Lines Matching +full:1 +full:eb
41 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb) in btrfs_leak_debug_add_eb() argument
43 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_add_eb()
47 list_add(&eb->leak_list, &fs_info->allocated_ebs); in btrfs_leak_debug_add_eb()
51 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb) in btrfs_leak_debug_del_eb() argument
53 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_del_eb()
57 list_del(&eb->leak_list); in btrfs_leak_debug_del_eb()
63 struct extent_buffer *eb; in btrfs_extent_buffer_leak_debug_check() local
76 eb = list_first_entry(&fs_info->allocated_ebs, in btrfs_extent_buffer_leak_debug_check()
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
81 btrfs_header_owner(eb)); in btrfs_extent_buffer_leak_debug_check()
82 list_del(&eb->leak_list); in btrfs_extent_buffer_leak_debug_check()
83 WARN_ON_ONCE(1); in btrfs_extent_buffer_leak_debug_check()
84 kmem_cache_free(extent_buffer_cache, eb); in btrfs_extent_buffer_leak_debug_check()
89 #define btrfs_leak_debug_add_eb(eb) do {} while (0) argument
90 #define btrfs_leak_debug_del_eb(eb) do {} while (0) argument
180 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX); in process_one_folio()
181 len = end + 1 - start; in process_one_folio()
276 end + 1) - range_start; in lock_delalloc_folios()
279 processed_end = range_start + range_len - 1; in lock_delalloc_folios()
341 /* @delalloc_end can be -1, never go beyond @orig_end */ in find_lock_delalloc_range()
358 if (delalloc_end + 1 - delalloc_start > max_bytes) in find_lock_delalloc_range()
359 delalloc_end = delalloc_start + max_bytes - 1; in find_lock_delalloc_range()
373 loops = 1; in find_lock_delalloc_range()
548 end = start + fi.length - 1; in end_bbio_data_read()
568 u32 zero_len = offset_in_folio(folio, end) + 1 - in end_bbio_data_read()
649 static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail) in alloc_eb_folio_array() argument
652 int num_pages = num_extent_pages(eb); in alloc_eb_folio_array()
660 eb->folios[i] = page_folio(page_array[i]); in alloc_eb_folio_array()
661 eb->folio_size = PAGE_SIZE; in alloc_eb_folio_array()
662 eb->folio_shift = PAGE_SHIFT; in alloc_eb_folio_array()
686 * 1) The folios are belonging to the same inode in btrfs_bio_is_contig()
823 static int attach_extent_buffer_folio(struct extent_buffer *eb, in attach_extent_buffer_folio() argument
827 struct btrfs_fs_info *fs_info = eb->fs_info; in attach_extent_buffer_folio()
841 folio_attach_private(folio, eb); in attach_extent_buffer_folio()
843 WARN_ON(folio_get_private(folio) != eb); in attach_extent_buffer_folio()
938 const u64 end = start + PAGE_SIZE - 1; in btrfs_do_readpage()
977 em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached); in btrfs_do_readpage()
979 end_folio_read(folio, false, cur, end + 1 - cur); in btrfs_do_readpage()
988 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage()
1033 prev_em_start && *prev_em_start != (u64)-1 && in btrfs_do_readpage()
1080 const u64 end = start + folio_size(folio) - 1; in btrfs_read_folio()
1162 const u64 page_end = page_start + folio_size(folio) - 1; in writepage_delalloc()
1188 ASSERT(fs_info->sectors_per_page > 1); in writepage_delalloc()
1191 bio_ctrl->submit_bitmap = 1; in writepage_delalloc()
1205 delalloc_start = delalloc_end + 1; in writepage_delalloc()
1209 min(delalloc_end, page_end) + 1 - delalloc_start); in writepage_delalloc()
1211 delalloc_start = delalloc_end + 1; in writepage_delalloc()
1231 found_len = last_delalloc_end + 1 - found_start; in writepage_delalloc()
1245 found_len = last_delalloc_end + 1 - found_start; in writepage_delalloc()
1257 found_start + found_len - 1, in writepage_delalloc()
1276 found_start + found_len - 1, NULL); in writepage_delalloc()
1279 found_start + found_len - 1); in writepage_delalloc()
1291 unsigned int end_bit = (min(page_end + 1, found_start + found_len) - in writepage_delalloc()
1299 if (found_start + found_len >= last_delalloc_end + 1) in writepage_delalloc()
1330 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE); in writepage_delalloc()
1338 return 1; in writepage_delalloc()
1422 * We return 1 if the IO is started and the page is unlocked,
1449 return 1; in extent_writepage_io()
1553 bio_ctrl->submit_bitmap = (unsigned long)-1; in extent_writepage()
1559 if (ret == 1) in extent_writepage()
1566 if (ret == 1) in extent_writepage()
1596 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb, in lock_extent_buffer_for_io() argument
1599 struct btrfs_fs_info *fs_info = eb->fs_info; in lock_extent_buffer_for_io()
1602 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
1603 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { in lock_extent_buffer_for_io()
1604 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
1607 wait_on_extent_buffer_writeback(eb); in lock_extent_buffer_for_io()
1608 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
1612 * We need to do this to prevent races in people who check if the eb is in lock_extent_buffer_for_io()
1616 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
1617 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in lock_extent_buffer_for_io()
1618 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in lock_extent_buffer_for_io()
1619 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1620 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); in lock_extent_buffer_for_io()
1622 -eb->len, in lock_extent_buffer_for_io()
1626 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1628 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
1632 static void set_btree_ioerr(struct extent_buffer *eb) in set_btree_ioerr() argument
1634 struct btrfs_fs_info *fs_info = eb->fs_info; in set_btree_ioerr()
1636 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in set_btree_ioerr()
1642 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_btree_ioerr()
1650 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO); in set_btree_ioerr()
1676 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is in set_btree_ioerr()
1677 * not done and would not be reliable - the eb might have been released in set_btree_ioerr()
1690 switch (eb->log_index) { in set_btree_ioerr()
1691 case -1: in set_btree_ioerr()
1697 case 1: in set_btree_ioerr()
1712 struct extent_buffer *eb; in find_extent_buffer_nolock() local
1715 eb = radix_tree_lookup(&fs_info->buffer_radix, in find_extent_buffer_nolock()
1717 if (eb && atomic_inc_not_zero(&eb->refs)) { in find_extent_buffer_nolock()
1719 return eb; in find_extent_buffer_nolock()
1727 struct extent_buffer *eb = bbio->private; in end_bbio_meta_write() local
1728 struct btrfs_fs_info *fs_info = eb->fs_info; in end_bbio_meta_write()
1733 set_btree_ioerr(eb); in end_bbio_meta_write()
1736 u64 start = eb->start + bio_offset; in end_bbio_meta_write()
1744 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in end_bbio_meta_write()
1746 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); in end_bbio_meta_write()
1751 static void prepare_eb_write(struct extent_buffer *eb) in prepare_eb_write() argument
1757 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in prepare_eb_write()
1760 nritems = btrfs_header_nritems(eb); in prepare_eb_write()
1761 if (btrfs_header_level(eb) > 0) { in prepare_eb_write()
1762 end = btrfs_node_key_ptr_offset(eb, nritems); in prepare_eb_write()
1763 memzero_extent_buffer(eb, end, eb->len - end); in prepare_eb_write()
1767 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0 in prepare_eb_write()
1769 start = btrfs_item_nr_offset(eb, nritems); in prepare_eb_write()
1770 end = btrfs_item_nr_offset(eb, 0); in prepare_eb_write()
1772 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info); in prepare_eb_write()
1774 end += btrfs_item_offset(eb, nritems - 1); in prepare_eb_write()
1775 memzero_extent_buffer(eb, start, end - start); in prepare_eb_write()
1779 static noinline_for_stack void write_one_eb(struct extent_buffer *eb, in write_one_eb() argument
1782 struct btrfs_fs_info *fs_info = eb->fs_info; in write_one_eb()
1785 prepare_eb_write(eb); in write_one_eb()
1789 eb->fs_info, end_bbio_meta_write, eb); in write_one_eb()
1790 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in write_one_eb()
1793 bbio->inode = BTRFS_I(eb->fs_info->btree_inode); in write_one_eb()
1794 bbio->file_offset = eb->start; in write_one_eb()
1796 struct folio *folio = eb->folios[0]; in write_one_eb()
1800 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len); in write_one_eb()
1801 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, in write_one_eb()
1802 eb->len)) { in write_one_eb()
1806 ret = bio_add_folio(&bbio->bio, folio, eb->len, in write_one_eb()
1807 eb->start - folio_pos(folio)); in write_one_eb()
1809 wbc_account_cgroup_owner(wbc, folio, eb->len); in write_one_eb()
1812 int num_folios = num_extent_folios(eb); in write_one_eb()
1815 struct folio *folio = eb->folios[i]; in write_one_eb()
1821 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0); in write_one_eb()
1823 wbc_account_cgroup_owner(wbc, folio, eb->folio_size); in write_one_eb()
1856 struct extent_buffer *eb; in submit_eb_subpage() local
1882 * Here we just want to grab the eb without touching extra in submit_eb_subpage()
1885 eb = find_extent_buffer_nolock(fs_info, start); in submit_eb_subpage()
1890 * The eb has already reached 0 refs thus find_extent_buffer() in submit_eb_subpage()
1891 * doesn't return it. We don't need to write back such eb in submit_eb_subpage()
1894 if (!eb) in submit_eb_subpage()
1897 if (lock_extent_buffer_for_io(eb, wbc)) { in submit_eb_subpage()
1898 write_one_eb(eb, wbc); in submit_eb_subpage()
1901 free_extent_buffer(eb); in submit_eb_subpage()
1911 * belongs to this eb, we don't need to submit
1930 struct extent_buffer *eb; in submit_eb_page() local
1945 eb = folio_get_private(folio); in submit_eb_page()
1951 if (WARN_ON(!eb)) { in submit_eb_page()
1956 if (eb == ctx->eb) { in submit_eb_page()
1960 ret = atomic_inc_not_zero(&eb->refs); in submit_eb_page()
1965 ctx->eb = eb; in submit_eb_page()
1967 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx); in submit_eb_page()
1971 free_extent_buffer(eb); in submit_eb_page()
1975 if (!lock_extent_buffer_for_io(eb, wbc)) { in submit_eb_page()
1976 free_extent_buffer(eb); in submit_eb_page()
1981 /* Mark the last eb in the block group. */ in submit_eb_page()
1982 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb); in submit_eb_page()
1983 ctx->zoned_bg->meta_write_pointer += eb->len; in submit_eb_page()
1985 write_one_eb(eb, wbc); in submit_eb_page()
1986 free_extent_buffer(eb); in submit_eb_page()
1987 return 1; in submit_eb_page()
2008 end = -1; in btree_write_cache_pages()
2017 scanned = 1; in btree_write_cache_pages()
2039 done = 1; in btree_write_cache_pages()
2058 scanned = 1; in btree_write_cache_pages()
2086 * extent io tree. Thus we don't want to submit such wild eb in btree_write_cache_pages()
2150 end = -1; in extent_write_cache_pages()
2160 range_whole = 1; in extent_write_cache_pages()
2161 scanned = 1; in extent_write_cache_pages()
2174 wbc->tagged_writepages = 1; in extent_write_cache_pages()
2250 done = 1; in extent_write_cache_pages()
2270 scanned = 1; in extent_write_cache_pages()
2314 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize)); in extent_write_locked_range()
2317 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end); in extent_write_locked_range()
2318 u32 cur_len = cur_end + 1 - cur; in extent_write_locked_range()
2331 cur = cur_end + 1; in extent_write_locked_range()
2343 bio_ctrl.submit_bitmap = (unsigned long)-1; in extent_write_locked_range()
2346 if (ret == 1) in extent_write_locked_range()
2356 cur = cur_end + 1; in extent_write_locked_range()
2388 const u64 end = start + readahead_length(rac) - 1; in btrfs_readahead()
2391 u64 prev_em_start = (u64)-1; in btrfs_readahead()
2415 u64 end = start + folio_size(folio) - 1; in extent_invalidate_folio()
2446 u64 end = start + PAGE_SIZE - 1; in try_release_extent_state()
2484 u64 end = start + PAGE_SIZE - 1; in try_release_extent_mapping()
2490 const u64 len = end - start + 1; in try_release_extent_mapping()
2506 extent_map_end(em) - 1, EXTENT_LOCKED)) in try_release_extent_mapping()
2557 static int extent_buffer_under_io(const struct extent_buffer *eb) in extent_buffer_under_io() argument
2559 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || in extent_buffer_under_io()
2560 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in extent_buffer_under_io()
2577 static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio) in detach_extent_buffer_folio() argument
2579 struct btrfs_fs_info *fs_info = eb->fs_info; in detach_extent_buffer_folio()
2580 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in detach_extent_buffer_folio()
2583 * For mapped eb, we're going to change the folio private, which should in detach_extent_buffer_folio()
2598 * removed the eb from the radix tree, so we could race in detach_extent_buffer_folio()
2599 * and have this page now attached to the new eb. So in detach_extent_buffer_folio()
2601 * this eb. in detach_extent_buffer_folio()
2603 if (folio_test_private(folio) && folio_get_private(folio) == eb) { in detach_extent_buffer_folio()
2604 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in detach_extent_buffer_folio()
2607 /* We need to make sure we haven't be attached to a new eb. */ in detach_extent_buffer_folio()
2616 * For subpage, we can have dummy eb with folio private attached. In in detach_extent_buffer_folio()
2618 * attached to one dummy eb, no sharing. in detach_extent_buffer_folio()
2638 static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb) in btrfs_release_extent_buffer_folios() argument
2640 ASSERT(!extent_buffer_under_io(eb)); in btrfs_release_extent_buffer_folios()
2643 struct folio *folio = eb->folios[i]; in btrfs_release_extent_buffer_folios()
2648 detach_extent_buffer_folio(eb, folio); in btrfs_release_extent_buffer_folios()
2658 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) in btrfs_release_extent_buffer() argument
2660 btrfs_release_extent_buffer_folios(eb); in btrfs_release_extent_buffer()
2661 btrfs_leak_debug_del_eb(eb); in btrfs_release_extent_buffer()
2662 kmem_cache_free(extent_buffer_cache, eb); in btrfs_release_extent_buffer()
2669 struct extent_buffer *eb = NULL; in __alloc_extent_buffer() local
2671 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); in __alloc_extent_buffer()
2672 eb->start = start; in __alloc_extent_buffer()
2673 eb->len = len; in __alloc_extent_buffer()
2674 eb->fs_info = fs_info; in __alloc_extent_buffer()
2675 init_rwsem(&eb->lock); in __alloc_extent_buffer()
2677 btrfs_leak_debug_add_eb(eb); in __alloc_extent_buffer()
2679 spin_lock_init(&eb->refs_lock); in __alloc_extent_buffer()
2680 atomic_set(&eb->refs, 1); in __alloc_extent_buffer()
2684 return eb; in __alloc_extent_buffer()
2729 struct extent_buffer *eb; in __alloc_dummy_extent_buffer() local
2733 eb = __alloc_extent_buffer(fs_info, start, len); in __alloc_dummy_extent_buffer()
2734 if (!eb) in __alloc_dummy_extent_buffer()
2737 ret = alloc_eb_folio_array(eb, false); in __alloc_dummy_extent_buffer()
2741 num_folios = num_extent_folios(eb); in __alloc_dummy_extent_buffer()
2743 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL); in __alloc_dummy_extent_buffer()
2748 set_extent_buffer_uptodate(eb); in __alloc_dummy_extent_buffer()
2749 btrfs_set_header_nritems(eb, 0); in __alloc_dummy_extent_buffer()
2750 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __alloc_dummy_extent_buffer()
2752 return eb; in __alloc_dummy_extent_buffer()
2755 if (eb->folios[i]) { in __alloc_dummy_extent_buffer()
2756 detach_extent_buffer_folio(eb, eb->folios[i]); in __alloc_dummy_extent_buffer()
2757 folio_put(eb->folios[i]); in __alloc_dummy_extent_buffer()
2760 kmem_cache_free(extent_buffer_cache, eb); in __alloc_dummy_extent_buffer()
2770 static void check_buffer_tree_ref(struct extent_buffer *eb) in check_buffer_tree_ref() argument
2796 refs = atomic_read(&eb->refs); in check_buffer_tree_ref()
2797 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
2800 spin_lock(&eb->refs_lock); in check_buffer_tree_ref()
2801 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
2802 atomic_inc(&eb->refs); in check_buffer_tree_ref()
2803 spin_unlock(&eb->refs_lock); in check_buffer_tree_ref()
2806 static void mark_extent_buffer_accessed(struct extent_buffer *eb) in mark_extent_buffer_accessed() argument
2808 int num_folios= num_extent_folios(eb); in mark_extent_buffer_accessed()
2810 check_buffer_tree_ref(eb); in mark_extent_buffer_accessed()
2813 folio_mark_accessed(eb->folios[i]); in mark_extent_buffer_accessed()
2819 struct extent_buffer *eb; in find_extent_buffer() local
2821 eb = find_extent_buffer_nolock(fs_info, start); in find_extent_buffer()
2822 if (!eb) in find_extent_buffer()
2825 * Lock our eb's refs_lock to avoid races with free_extent_buffer(). in find_extent_buffer()
2826 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and in find_extent_buffer()
2828 * set, eb->refs == 2, that the buffer isn't under IO (dirty and in find_extent_buffer()
2832 * could race and increment the eb's reference count, clear its stale in find_extent_buffer()
2837 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { in find_extent_buffer()
2838 spin_lock(&eb->refs_lock); in find_extent_buffer()
2839 spin_unlock(&eb->refs_lock); in find_extent_buffer()
2841 mark_extent_buffer_accessed(eb); in find_extent_buffer()
2842 return eb; in find_extent_buffer()
2849 struct extent_buffer *eb, *exists = NULL; in alloc_test_extent_buffer() local
2852 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
2853 if (eb) in alloc_test_extent_buffer()
2854 return eb; in alloc_test_extent_buffer()
2855 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
2856 if (!eb) in alloc_test_extent_buffer()
2858 eb->fs_info = fs_info; in alloc_test_extent_buffer()
2867 start >> fs_info->sectorsize_bits, eb); in alloc_test_extent_buffer()
2877 check_buffer_tree_ref(eb); in alloc_test_extent_buffer()
2878 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_test_extent_buffer()
2880 return eb; in alloc_test_extent_buffer()
2882 btrfs_release_extent_buffer(eb); in alloc_test_extent_buffer()
2907 * We could have already allocated an eb for this folio and attached one in grab_extent_buffer()
2908 * so lets see if we can get a ref on the existing eb, and if we can we in grab_extent_buffer()
2922 * Validate alignment constraints of eb at logical address @start.
2955 * Return 0 if eb->folios[i] is attached to btree inode successfully.
2959 * than @eb.
2962 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, in attach_eb_folio_to_filemap() argument
2967 struct btrfs_fs_info *fs_info = eb->fs_info; in attach_eb_folio_to_filemap()
2969 const unsigned long index = eb->start >> PAGE_SHIFT; in attach_eb_folio_to_filemap()
2976 ASSERT(eb->folios[i]); in attach_eb_folio_to_filemap()
2979 ret = filemap_add_folio(mapping, eb->folios[i], index + i, in attach_eb_folio_to_filemap()
2992 ASSERT(folio_nr_pages(existing_folio) == 1); in attach_eb_folio_to_filemap()
2994 if (folio_size(existing_folio) != eb->folio_size) { in attach_eb_folio_to_filemap()
3004 __free_page(folio_page(eb->folios[i], 0)); in attach_eb_folio_to_filemap()
3005 eb->folios[i] = existing_folio; in attach_eb_folio_to_filemap()
3016 return 1; in attach_eb_folio_to_filemap()
3019 __free_page(folio_page(eb->folios[i], 0)); in attach_eb_folio_to_filemap()
3020 eb->folios[i] = existing_folio; in attach_eb_folio_to_filemap()
3022 eb->folio_size = folio_size(eb->folios[i]); in attach_eb_folio_to_filemap()
3023 eb->folio_shift = folio_shift(eb->folios[i]); in attach_eb_folio_to_filemap()
3025 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc); in attach_eb_folio_to_filemap()
3028 * To inform we have an extra eb under allocation, so that in attach_eb_folio_to_filemap()
3030 * eb hasn't been inserted into radix tree yet. in attach_eb_folio_to_filemap()
3032 * The ref will be decreased when the eb releases the page, in in attach_eb_folio_to_filemap()
3036 btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]); in attach_eb_folio_to_filemap()
3047 struct extent_buffer *eb; in alloc_extent_buffer() local
3052 int uptodate = 1; in alloc_extent_buffer()
3069 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
3070 if (eb) in alloc_extent_buffer()
3071 return eb; in alloc_extent_buffer()
3073 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_extent_buffer()
3074 if (!eb) in alloc_extent_buffer()
3084 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level); in alloc_extent_buffer()
3103 ret = alloc_eb_folio_array(eb, true); in alloc_extent_buffer()
3109 num_folios = num_extent_folios(eb); in alloc_extent_buffer()
3114 ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb); in alloc_extent_buffer()
3122 * folios mismatch between the new eb and filemap. in alloc_extent_buffer()
3126 * - the new eb is using higher order folio in alloc_extent_buffer()
3129 * This can happen at the previous eb allocation, and we don't in alloc_extent_buffer()
3132 * - the existing eb has already been freed in alloc_extent_buffer()
3146 * Only after attach_eb_folio_to_filemap(), eb->folios[] is in alloc_extent_buffer()
3150 folio = eb->folios[i]; in alloc_extent_buffer()
3151 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len)); in alloc_extent_buffer()
3154 * Check if the current page is physically contiguous with previous eb in alloc_extent_buffer()
3159 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0)) in alloc_extent_buffer()
3162 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len)) in alloc_extent_buffer()
3174 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in alloc_extent_buffer()
3177 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start); in alloc_extent_buffer()
3185 start >> fs_info->sectorsize_bits, eb); in alloc_extent_buffer()
3197 check_buffer_tree_ref(eb); in alloc_extent_buffer()
3198 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_extent_buffer()
3206 folio_unlock(eb->folios[i]); in alloc_extent_buffer()
3207 return eb; in alloc_extent_buffer()
3210 WARN_ON(!atomic_dec_and_test(&eb->refs)); in alloc_extent_buffer()
3215 * then attaching our eb to that folio. If we fail to insert our folio in alloc_extent_buffer()
3216 * we'll lookup the folio for that index, and grab that EB. We do not in alloc_extent_buffer()
3217 * want that to grab this eb, as we're getting ready to free it. So we in alloc_extent_buffer()
3221 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb. in alloc_extent_buffer()
3224 * case. If we left eb->folios[i] populated in the subpage case we'd in alloc_extent_buffer()
3228 ASSERT(eb->folios[i]); in alloc_extent_buffer()
3229 detach_extent_buffer_folio(eb, eb->folios[i]); in alloc_extent_buffer()
3230 folio_unlock(eb->folios[i]); in alloc_extent_buffer()
3231 folio_put(eb->folios[i]); in alloc_extent_buffer()
3232 eb->folios[i] = NULL; in alloc_extent_buffer()
3238 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in alloc_extent_buffer()
3240 btrfs_release_extent_buffer(eb); in alloc_extent_buffer()
3249 struct extent_buffer *eb = in btrfs_release_extent_buffer_rcu() local
3252 kmem_cache_free(extent_buffer_cache, eb); in btrfs_release_extent_buffer_rcu()
3255 static int release_extent_buffer(struct extent_buffer *eb) in release_extent_buffer() argument
3256 __releases(&eb->refs_lock) in release_extent_buffer()
3258 lockdep_assert_held(&eb->refs_lock); in release_extent_buffer()
3260 WARN_ON(atomic_read(&eb->refs) == 0); in release_extent_buffer()
3261 if (atomic_dec_and_test(&eb->refs)) { in release_extent_buffer()
3262 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { in release_extent_buffer()
3263 struct btrfs_fs_info *fs_info = eb->fs_info; in release_extent_buffer()
3265 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3269 eb->start >> fs_info->sectorsize_bits); in release_extent_buffer()
3272 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3275 btrfs_leak_debug_del_eb(eb); in release_extent_buffer()
3277 btrfs_release_extent_buffer_folios(eb); in release_extent_buffer()
3279 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { in release_extent_buffer()
3280 kmem_cache_free(extent_buffer_cache, eb); in release_extent_buffer()
3281 return 1; in release_extent_buffer()
3284 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); in release_extent_buffer()
3285 return 1; in release_extent_buffer()
3287 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3292 void free_extent_buffer(struct extent_buffer *eb) in free_extent_buffer() argument
3295 if (!eb) in free_extent_buffer()
3298 refs = atomic_read(&eb->refs); in free_extent_buffer()
3299 while (1) { in free_extent_buffer()
3300 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) in free_extent_buffer()
3301 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && in free_extent_buffer()
3302 refs == 1)) in free_extent_buffer()
3304 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1)) in free_extent_buffer()
3308 spin_lock(&eb->refs_lock); in free_extent_buffer()
3309 if (atomic_read(&eb->refs) == 2 && in free_extent_buffer()
3310 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && in free_extent_buffer()
3311 !extent_buffer_under_io(eb) && in free_extent_buffer()
3312 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer()
3313 atomic_dec(&eb->refs); in free_extent_buffer()
3319 release_extent_buffer(eb); in free_extent_buffer()
3322 void free_extent_buffer_stale(struct extent_buffer *eb) in free_extent_buffer_stale() argument
3324 if (!eb) in free_extent_buffer_stale()
3327 spin_lock(&eb->refs_lock); in free_extent_buffer_stale()
3328 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); in free_extent_buffer_stale()
3330 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && in free_extent_buffer_stale()
3331 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer_stale()
3332 atomic_dec(&eb->refs); in free_extent_buffer_stale()
3333 release_extent_buffer(eb); in free_extent_buffer_stale()
3348 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb) in clear_subpage_extent_buffer_dirty() argument
3350 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_subpage_extent_buffer_dirty()
3351 struct folio *folio = eb->folios[0]; in clear_subpage_extent_buffer_dirty()
3356 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len); in clear_subpage_extent_buffer_dirty()
3360 WARN_ON(atomic_read(&eb->refs) == 0); in clear_subpage_extent_buffer_dirty()
3364 struct extent_buffer *eb) in btrfs_clear_buffer_dirty() argument
3366 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_clear_buffer_dirty()
3369 btrfs_assert_tree_write_locked(eb); in btrfs_clear_buffer_dirty()
3371 if (trans && btrfs_header_generation(eb) != trans->transid) in btrfs_clear_buffer_dirty()
3383 if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in btrfs_clear_buffer_dirty()
3384 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags); in btrfs_clear_buffer_dirty()
3388 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) in btrfs_clear_buffer_dirty()
3391 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, in btrfs_clear_buffer_dirty()
3394 if (eb->fs_info->nodesize < PAGE_SIZE) in btrfs_clear_buffer_dirty()
3395 return clear_subpage_extent_buffer_dirty(eb); in btrfs_clear_buffer_dirty()
3397 num_folios = num_extent_folios(eb); in btrfs_clear_buffer_dirty()
3399 struct folio *folio = eb->folios[i]; in btrfs_clear_buffer_dirty()
3407 WARN_ON(atomic_read(&eb->refs) == 0); in btrfs_clear_buffer_dirty()
3410 void set_extent_buffer_dirty(struct extent_buffer *eb) in set_extent_buffer_dirty() argument
3415 check_buffer_tree_ref(eb); in set_extent_buffer_dirty()
3417 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in set_extent_buffer_dirty()
3419 num_folios = num_extent_folios(eb); in set_extent_buffer_dirty()
3420 WARN_ON(atomic_read(&eb->refs) == 0); in set_extent_buffer_dirty()
3421 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); in set_extent_buffer_dirty()
3422 WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)); in set_extent_buffer_dirty()
3425 bool subpage = eb->fs_info->nodesize < PAGE_SIZE; in set_extent_buffer_dirty()
3439 folio_lock(eb->folios[0]); in set_extent_buffer_dirty()
3441 btrfs_folio_set_dirty(eb->fs_info, eb->folios[i], in set_extent_buffer_dirty()
3442 eb->start, eb->len); in set_extent_buffer_dirty()
3444 folio_unlock(eb->folios[0]); in set_extent_buffer_dirty()
3445 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes, in set_extent_buffer_dirty()
3446 eb->len, in set_extent_buffer_dirty()
3447 eb->fs_info->dirty_metadata_batch); in set_extent_buffer_dirty()
3451 ASSERT(folio_test_dirty(eb->folios[i])); in set_extent_buffer_dirty()
3455 void clear_extent_buffer_uptodate(struct extent_buffer *eb) in clear_extent_buffer_uptodate() argument
3457 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_extent_buffer_uptodate()
3458 int num_folios = num_extent_folios(eb); in clear_extent_buffer_uptodate()
3460 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in clear_extent_buffer_uptodate()
3462 struct folio *folio = eb->folios[i]; in clear_extent_buffer_uptodate()
3475 eb->start, eb->len); in clear_extent_buffer_uptodate()
3479 void set_extent_buffer_uptodate(struct extent_buffer *eb) in set_extent_buffer_uptodate() argument
3481 struct btrfs_fs_info *fs_info = eb->fs_info; in set_extent_buffer_uptodate()
3482 int num_folios = num_extent_folios(eb); in set_extent_buffer_uptodate()
3484 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_extent_buffer_uptodate()
3486 struct folio *folio = eb->folios[i]; in set_extent_buffer_uptodate()
3496 eb->start, eb->len); in set_extent_buffer_uptodate()
3500 static void clear_extent_buffer_reading(struct extent_buffer *eb) in clear_extent_buffer_reading() argument
3502 clear_bit(EXTENT_BUFFER_READING, &eb->bflags); in clear_extent_buffer_reading()
3504 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING); in clear_extent_buffer_reading()
3509 struct extent_buffer *eb = bbio->private; in end_bbio_meta_read() local
3510 struct btrfs_fs_info *fs_info = eb->fs_info; in end_bbio_meta_read()
3520 WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)); in end_bbio_meta_read()
3522 eb->read_mirror = bbio->mirror_num; in end_bbio_meta_read()
3525 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0) in end_bbio_meta_read()
3529 set_extent_buffer_uptodate(eb); in end_bbio_meta_read()
3531 clear_extent_buffer_uptodate(eb); in end_bbio_meta_read()
3532 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in end_bbio_meta_read()
3537 u64 start = eb->start + bio_offset; in end_bbio_meta_read()
3548 clear_extent_buffer_reading(eb); in end_bbio_meta_read()
3549 free_extent_buffer(eb); in end_bbio_meta_read()
3554 int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num, in read_extent_buffer_pages_nowait() argument
3560 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages_nowait()
3568 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) in read_extent_buffer_pages_nowait()
3572 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags)) in read_extent_buffer_pages_nowait()
3578 * started and finished reading the same eb. In this case, UPTODATE in read_extent_buffer_pages_nowait()
3581 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) { in read_extent_buffer_pages_nowait()
3582 clear_extent_buffer_reading(eb); in read_extent_buffer_pages_nowait()
3586 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in read_extent_buffer_pages_nowait()
3587 eb->read_mirror = 0; in read_extent_buffer_pages_nowait()
3588 check_buffer_tree_ref(eb); in read_extent_buffer_pages_nowait()
3589 atomic_inc(&eb->refs); in read_extent_buffer_pages_nowait()
3592 REQ_OP_READ | REQ_META, eb->fs_info, in read_extent_buffer_pages_nowait()
3593 end_bbio_meta_read, eb); in read_extent_buffer_pages_nowait()
3594 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in read_extent_buffer_pages_nowait()
3595 bbio->inode = BTRFS_I(eb->fs_info->btree_inode); in read_extent_buffer_pages_nowait()
3596 bbio->file_offset = eb->start; in read_extent_buffer_pages_nowait()
3598 if (eb->fs_info->nodesize < PAGE_SIZE) { in read_extent_buffer_pages_nowait()
3599 ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len, in read_extent_buffer_pages_nowait()
3600 eb->start - folio_pos(eb->folios[0])); in read_extent_buffer_pages_nowait()
3603 int num_folios = num_extent_folios(eb); in read_extent_buffer_pages_nowait()
3606 struct folio *folio = eb->folios[i]; in read_extent_buffer_pages_nowait()
3608 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0); in read_extent_buffer_pages_nowait()
3616 int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, in read_extent_buffer_pages() argument
3621 ret = read_extent_buffer_pages_nowait(eb, mirror_num, check); in read_extent_buffer_pages()
3625 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); in read_extent_buffer_pages()
3626 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
3631 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, in report_eb_range() argument
3634 btrfs_warn(eb->fs_info, in report_eb_range()
3635 "access to eb bytenr %llu len %u out of range start %lu len %lu", in report_eb_range()
3636 eb->start, eb->len, start, len); in report_eb_range()
3644 * the eb.
3645 * NOTE: @start and @len are offset inside the eb, not logical address.
3649 static inline int check_eb_range(const struct extent_buffer *eb, in check_eb_range() argument
3654 /* start, start + len should not go beyond eb->len nor overflow */ in check_eb_range()
3655 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
3656 return report_eb_range(eb, start, len); in check_eb_range()
3661 void read_extent_buffer(const struct extent_buffer *eb, void *dstv, in read_extent_buffer() argument
3664 const int unit_size = eb->folio_size; in read_extent_buffer()
3668 unsigned long i = get_eb_folio_index(eb, start); in read_extent_buffer()
3670 if (check_eb_range(eb, start, len)) { in read_extent_buffer()
3679 if (eb->addr) { in read_extent_buffer()
3680 memcpy(dstv, eb->addr + start, len); in read_extent_buffer()
3684 offset = get_eb_offset_in_folio(eb, start); in read_extent_buffer()
3690 kaddr = folio_address(eb->folios[i]); in read_extent_buffer()
3700 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, in read_extent_buffer_to_user_nofault() argument
3704 const int unit_size = eb->folio_size; in read_extent_buffer_to_user_nofault()
3708 unsigned long i = get_eb_folio_index(eb, start); in read_extent_buffer_to_user_nofault()
3711 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
3712 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
3714 if (eb->addr) { in read_extent_buffer_to_user_nofault()
3715 if (copy_to_user_nofault(dstv, eb->addr + start, len)) in read_extent_buffer_to_user_nofault()
3720 offset = get_eb_offset_in_folio(eb, start); in read_extent_buffer_to_user_nofault()
3726 kaddr = folio_address(eb->folios[i]); in read_extent_buffer_to_user_nofault()
3741 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, in memcmp_extent_buffer() argument
3744 const int unit_size = eb->folio_size; in memcmp_extent_buffer()
3749 unsigned long i = get_eb_folio_index(eb, start); in memcmp_extent_buffer()
3752 if (check_eb_range(eb, start, len)) in memcmp_extent_buffer()
3755 if (eb->addr) in memcmp_extent_buffer()
3756 return memcmp(ptrv, eb->addr + start, len); in memcmp_extent_buffer()
3758 offset = get_eb_offset_in_folio(eb, start); in memcmp_extent_buffer()
3762 kaddr = folio_address(eb->folios[i]); in memcmp_extent_buffer()
3779 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3781 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i) in assert_eb_folio_uptodate() argument
3783 struct btrfs_fs_info *fs_info = eb->fs_info; in assert_eb_folio_uptodate()
3784 struct folio *folio = eb->folios[i]; in assert_eb_folio_uptodate()
3796 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) in assert_eb_folio_uptodate()
3800 folio = eb->folios[0]; in assert_eb_folio_uptodate()
3803 eb->start, eb->len))) in assert_eb_folio_uptodate()
3804 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len); in assert_eb_folio_uptodate()
3810 static void __write_extent_buffer(const struct extent_buffer *eb, in __write_extent_buffer() argument
3814 const int unit_size = eb->folio_size; in __write_extent_buffer()
3819 unsigned long i = get_eb_folio_index(eb, start); in __write_extent_buffer()
3821 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __write_extent_buffer()
3823 if (check_eb_range(eb, start, len)) in __write_extent_buffer()
3826 if (eb->addr) { in __write_extent_buffer()
3828 memmove(eb->addr + start, srcv, len); in __write_extent_buffer()
3830 memcpy(eb->addr + start, srcv, len); in __write_extent_buffer()
3834 offset = get_eb_offset_in_folio(eb, start); in __write_extent_buffer()
3838 assert_eb_folio_uptodate(eb, i); in __write_extent_buffer()
3841 kaddr = folio_address(eb->folios[i]); in __write_extent_buffer()
3854 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, in write_extent_buffer() argument
3857 return __write_extent_buffer(eb, srcv, start, len, false); in write_extent_buffer()
3860 static void memset_extent_buffer(const struct extent_buffer *eb, int c, in memset_extent_buffer() argument
3863 const int unit_size = eb->folio_size; in memset_extent_buffer()
3866 if (eb->addr) { in memset_extent_buffer()
3867 memset(eb->addr + start, c, len); in memset_extent_buffer()
3872 unsigned long index = get_eb_folio_index(eb, cur); in memset_extent_buffer()
3873 unsigned int offset = get_eb_offset_in_folio(eb, cur); in memset_extent_buffer()
3876 assert_eb_folio_uptodate(eb, index); in memset_extent_buffer()
3877 memset(folio_address(eb->folios[index]) + offset, c, cur_len); in memset_extent_buffer()
3883 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, in memzero_extent_buffer() argument
3886 if (check_eb_range(eb, start, len)) in memzero_extent_buffer()
3888 return memset_extent_buffer(eb, 0, start, len); in memzero_extent_buffer()
3949 * @eb: the extent buffer
3959 static inline void eb_bitmap_offset(const struct extent_buffer *eb, in eb_bitmap_offset() argument
3972 offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset; in eb_bitmap_offset()
3974 *folio_index = offset >> eb->folio_shift; in eb_bitmap_offset()
3975 *folio_offset = offset_in_eb_folio(eb, offset); in eb_bitmap_offset()
3981 * @eb: the extent buffer
3985 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, in extent_buffer_test_bit() argument
3992 eb_bitmap_offset(eb, start, nr, &i, &offset); in extent_buffer_test_bit()
3993 assert_eb_folio_uptodate(eb, i); in extent_buffer_test_bit()
3994 kaddr = folio_address(eb->folios[i]); in extent_buffer_test_bit()
3995 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); in extent_buffer_test_bit()
3998 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr) in extent_buffer_get_byte() argument
4000 unsigned long index = get_eb_folio_index(eb, bytenr); in extent_buffer_get_byte()
4002 if (check_eb_range(eb, bytenr, 1)) in extent_buffer_get_byte()
4004 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr); in extent_buffer_get_byte()
4008 * Set an area of a bitmap to 1.
4010 * @eb: the extent buffer
4015 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, in extent_buffer_bitmap_set() argument
4019 unsigned int last_byte = start + BIT_BYTE(pos + len - 1); in extent_buffer_bitmap_set()
4028 kaddr = extent_buffer_get_byte(eb, first_byte); in extent_buffer_bitmap_set()
4034 ASSERT(first_byte + 1 <= last_byte); in extent_buffer_bitmap_set()
4035 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_set()
4038 kaddr = extent_buffer_get_byte(eb, last_byte); in extent_buffer_bitmap_set()
4046 * @eb: the extent buffer
4051 void extent_buffer_bitmap_clear(const struct extent_buffer *eb, in extent_buffer_bitmap_clear() argument
4056 unsigned int last_byte = start + BIT_BYTE(pos + len - 1); in extent_buffer_bitmap_clear()
4065 kaddr = extent_buffer_get_byte(eb, first_byte); in extent_buffer_bitmap_clear()
4071 ASSERT(first_byte + 1 <= last_byte); in extent_buffer_bitmap_clear()
4072 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_clear()
4075 kaddr = extent_buffer_get_byte(eb, last_byte); in extent_buffer_bitmap_clear()
4126 unsigned long dst_end = dst_offset + len - 1; in memmove_extent_buffer()
4127 unsigned long src_end = src_offset + len - 1; in memmove_extent_buffer()
4156 cur = min_t(unsigned long, len, src_off_in_folio + 1); in memmove_extent_buffer()
4157 cur = min(cur, dst_off_in_folio + 1); in memmove_extent_buffer()
4160 cur + 1; in memmove_extent_buffer()
4161 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1, in memmove_extent_buffer()
4164 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur, in memmove_extent_buffer()
4205 cur = gang[ret - 1]->start + gang[ret - 1]->len; in get_next_extent_buffer()
4219 struct extent_buffer *eb = NULL; in try_release_subpage_extent_buffer() local
4230 eb = get_next_extent_buffer(fs_info, folio, cur); in try_release_subpage_extent_buffer()
4231 if (!eb) { in try_release_subpage_extent_buffer()
4232 /* No more eb in the page range after or at cur */ in try_release_subpage_extent_buffer()
4236 cur = eb->start + eb->len; in try_release_subpage_extent_buffer()
4239 * The same as try_release_extent_buffer(), to ensure the eb in try_release_subpage_extent_buffer()
4242 spin_lock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4243 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_subpage_extent_buffer()
4244 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4251 * If tree ref isn't set then we know the ref on this eb is a in try_release_subpage_extent_buffer()
4252 * real ref, so just return, this eb will likely be freed soon in try_release_subpage_extent_buffer()
4255 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_subpage_extent_buffer()
4256 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4265 release_extent_buffer(eb); in try_release_subpage_extent_buffer()
4273 ret = 1; in try_release_subpage_extent_buffer()
4283 struct extent_buffer *eb; in try_release_extent_buffer() local
4295 return 1; in try_release_extent_buffer()
4298 eb = folio_get_private(folio); in try_release_extent_buffer()
4299 BUG_ON(!eb); in try_release_extent_buffer()
4303 * the eb doesn't disappear out from under us while we're looking at in try_release_extent_buffer()
4306 spin_lock(&eb->refs_lock); in try_release_extent_buffer()
4307 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_extent_buffer()
4308 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4315 * If tree ref isn't set then we know the ref on this eb is a real ref, in try_release_extent_buffer()
4318 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_extent_buffer()
4319 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4323 return release_extent_buffer(eb); in try_release_extent_buffer()
4331 * @owner_root: objectid of the root that owns this eb
4333 * @level: level for the eb
4336 * normal uptodate check of the eb, without checking the generation. If we have
4346 struct extent_buffer *eb; in btrfs_readahead_tree_block() local
4349 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level); in btrfs_readahead_tree_block()
4350 if (IS_ERR(eb)) in btrfs_readahead_tree_block()
4353 if (btrfs_buffer_uptodate(eb, gen, 1)) { in btrfs_readahead_tree_block()
4354 free_extent_buffer(eb); in btrfs_readahead_tree_block()
4358 ret = read_extent_buffer_pages_nowait(eb, 0, &check); in btrfs_readahead_tree_block()
4360 free_extent_buffer_stale(eb); in btrfs_readahead_tree_block()
4362 free_extent_buffer(eb); in btrfs_readahead_tree_block()
4380 btrfs_header_level(node) - 1); in btrfs_readahead_node_child()