Lines Matching +full:non +full:- +full:inclusive

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1994-1999 Linus Torvalds
30 #include <linux/error-injection.h>
33 #include <linux/backing-dev.h>
72 * finished 'unifying' the page and buffer cache and SMP-threaded the
73 * page-cache, 21.05.1999, Ingo Molnar <[email protected]>
75 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <[email protected]>
81 * ->i_mmap_rwsem (truncate_pagecache)
82 * ->private_lock (__free_pte->block_dirty_folio)
83 * ->swap_lock (exclusive_swap_page, others)
84 * ->i_pages lock
86 * ->i_rwsem
87 * ->invalidate_lock (acquired by fs in truncate path)
88 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
90 * ->mmap_lock
91 * ->i_mmap_rwsem
92 * ->page_table_lock or pte_lock (various, mainly in memory.c)
93 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
95 * ->mmap_lock
96 * ->invalidate_lock (filemap_fault)
97 * ->lock_page (filemap_fault, access_process_vm)
99 * ->i_rwsem (generic_perform_write)
100 * ->mmap_lock (fault_in_readable->do_page_fault)
102 * bdi->wb.list_lock
103 * sb_lock (fs/fs-writeback.c)
104 * ->i_pages lock (__sync_single_inode)
106 * ->i_mmap_rwsem
107 * ->anon_vma.lock (vma_merge)
109 * ->anon_vma.lock
110 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
112 * ->page_table_lock or pte_lock
113 * ->swap_lock (try_to_unmap_one)
114 * ->private_lock (try_to_unmap_one)
115 * ->i_pages lock (try_to_unmap_one)
116 * ->lruvec->lru_lock (follow_page_mask->mark_page_accessed)
117 * ->lruvec->lru_lock (check_pte_range->folio_isolate_lru)
118 * ->private_lock (folio_remove_rmap_pte->set_page_dirty)
119 * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
120 * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
121 * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty)
122 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
123 * ->inode->i_lock (zap_pte_range->set_page_dirty)
124 * ->private_lock (zap_pte_range->block_dirty_folio)
130 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
135 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
143 folio->mapping = NULL; in page_cache_delete()
144 /* Leave page->index set: truncation lookup relies upon it */ in page_cache_delete()
145 mapping->nrpages -= nr; in page_cache_delete()
156 current->comm, folio_pfn(folio)); in filemap_unaccount_folio()
157 dump_page(&folio->page, "still mapped when deleted"); in filemap_unaccount_folio()
171 atomic_set(&folio->_mapcount, -1); in filemap_unaccount_folio()
183 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); in filemap_unaccount_folio()
185 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); in filemap_unaccount_folio()
187 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); in filemap_unaccount_folio()
189 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); in filemap_unaccount_folio()
196 * unwritten data - on ordinary filesystems. in filemap_unaccount_folio()
198 * But it's harmless on in-memory filesystems like tmpfs; and can in filemap_unaccount_folio()
209 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
214 * sure the page is locked and that nobody else uses it - or that usage
219 struct address_space *mapping = folio->mapping; in __filemap_remove_folio()
231 free_folio = mapping->a_ops->free_folio; in filemap_free_folio()
241 * filemap_remove_folio - Remove folio from page cache.
250 struct address_space *mapping = folio->mapping; in filemap_remove_folio()
253 spin_lock(&mapping->host->i_lock); in filemap_remove_folio()
254 xa_lock_irq(&mapping->i_pages); in filemap_remove_folio()
256 xa_unlock_irq(&mapping->i_pages); in filemap_remove_folio()
258 inode_add_lru(mapping->host); in filemap_remove_folio()
259 spin_unlock(&mapping->host->i_lock); in filemap_remove_folio()
265 * page_cache_delete_batch - delete several folios from page cache
269 * The function walks over mapping->i_pages and removes folios passed in
280 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
300 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
301 VM_BUG_ON_FOLIO(folio->index > in page_cache_delete_batch()
302 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
308 folio->mapping = NULL; in page_cache_delete_batch()
309 /* Leave folio->index set: truncation lookup relies on it */ in page_cache_delete_batch()
315 mapping->nrpages -= total_pages; in page_cache_delete_batch()
326 spin_lock(&mapping->host->i_lock); in delete_from_page_cache_batch()
327 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
329 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch()
335 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
337 inode_add_lru(mapping->host); in delete_from_page_cache_batch()
338 spin_unlock(&mapping->host->i_lock); in delete_from_page_cache_batch()
341 filemap_free_folio(mapping, fbatch->folios[i]); in delete_from_page_cache_batch()
348 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
349 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
350 ret = -ENOSPC; in filemap_check_errors()
351 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
352 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
353 ret = -EIO; in filemap_check_errors()
361 if (test_bit(AS_EIO, &mapping->flags)) in filemap_check_and_keep_errors()
362 return -EIO; in filemap_check_and_keep_errors()
363 if (test_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_and_keep_errors()
364 return -ENOSPC; in filemap_check_and_keep_errors()
369 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
387 wbc_attach_fdatawrite_inode(wbc, mapping->host); in filemap_fdatawrite_wbc()
395 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
398 * @end: offset in bytes where the range ends (inclusive)
402 * within the byte offsets <start, end> inclusive.
444 * filemap_fdatawrite_range_kick - start writeback on a range
447 * @end: last (inclusive) index for writeback
449 * This is a non-integrity writeback helper, to start writing back folios
462 * filemap_flush - mostly a non-blocking flush
465 * This is a mostly non-blocking flush. Not suitable for data-integrity
466 * purposes - I/O may not be started against all dirty pages.
477 * filemap_range_has_page - check if a page exists in range.
480 * @end_byte: offset in bytes where the range ends (inclusive)
492 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
549 * filemap_fdatawait_range - wait for writeback to complete
552 * @end_byte: offset in bytes where the range ends (inclusive)
554 * Walk the list of under-writeback pages of the given address space
573 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
576 * @end_byte: offset in bytes where the range ends (inclusive)
578 * Walk the list of under-writeback pages of the given address space in the
583 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
595 * file_fdatawait_range - wait for writeback to complete
598 * @end_byte: offset in bytes where the range ends (inclusive)
600 * Walk the list of under-writeback pages of the address space that file
602 * status of the address space vs. the file->f_wb_err cursor and return it.
608 * Return: error status of the address space vs. the file->f_wb_err cursor.
612 struct address_space *mapping = file->f_mapping; in file_fdatawait_range()
620 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
623 * Walk the list of under-writeback pages of the given address space
628 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
643 return mapping->nrpages; in mapping_needs_writeback()
649 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback()
672 * filemap_write_and_wait_range - write out & wait on a file range
675 * @lend: offset in bytes where the range ends (inclusive)
677 * Write out and wait upon file offsets lstart->lend, inclusive.
679 * Note that @lend is inclusive (describes the last byte to be written) so
680 * that this function can be used to write to the very end-of-file (end = -1).
697 * written partially (e.g. -ENOSPC), so we wait for it. in filemap_write_and_wait_range()
698 * But the -EIO is special case, it may indicate the worst in filemap_write_and_wait_range()
701 if (err != -EIO) in filemap_write_and_wait_range()
713 errseq_t eseq = errseq_set(&mapping->wb_err, err); in __filemap_set_wb_err()
720 * file_check_and_advance_wb_err - report wb error (if any) that was previously
737 * While we handle mapping->wb_err with atomic operations, the f_wb_err
746 errseq_t old = READ_ONCE(file->f_wb_err); in file_check_and_advance_wb_err()
747 struct address_space *mapping = file->f_mapping; in file_check_and_advance_wb_err()
750 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err()
752 spin_lock(&file->f_lock); in file_check_and_advance_wb_err()
753 old = file->f_wb_err; in file_check_and_advance_wb_err()
754 err = errseq_check_and_advance(&mapping->wb_err, in file_check_and_advance_wb_err()
755 &file->f_wb_err); in file_check_and_advance_wb_err()
757 spin_unlock(&file->f_lock); in file_check_and_advance_wb_err()
765 clear_bit(AS_EIO, &mapping->flags); in file_check_and_advance_wb_err()
766 clear_bit(AS_ENOSPC, &mapping->flags); in file_check_and_advance_wb_err()
772 * file_write_and_wait_range - write out & wait on a file range
775 * @lend: offset in bytes where the range ends (inclusive)
777 * Write out and wait upon file offsets lstart->lend, inclusive.
779 * Note that @lend is inclusive (describes the last byte to be written) so
780 * that this function can be used to write to the very end-of-file (end = -1).
790 struct address_space *mapping = file->f_mapping; in file_write_and_wait_range()
799 if (err != -EIO) in file_write_and_wait_range()
810 * replace_page_cache_folio - replace a pagecache folio with a new one
824 struct address_space *mapping = old->mapping; in replace_page_cache_folio()
825 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_folio()
826 pgoff_t offset = old->index; in replace_page_cache_folio()
827 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_folio()
831 VM_BUG_ON_FOLIO(new->mapping, new); in replace_page_cache_folio()
834 new->mapping = mapping; in replace_page_cache_folio()
835 new->index = offset; in replace_page_cache_folio()
842 old->mapping = NULL; in replace_page_cache_folio()
862 XA_STATE(xas, &mapping->i_pages, index); in __filemap_add_folio()
874 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); in __filemap_add_folio()
881 folio->mapping = mapping; in __filemap_add_folio()
882 folio->index = xas.xa_index; in __filemap_add_folio()
885 int order = -1, split_order = 0; in __filemap_add_folio()
892 xas_set_err(&xas, -EEXIST); in __filemap_add_folio()
899 if (order == -1) in __filemap_add_folio()
903 /* entry may have changed before we re-acquire the lock */ in __filemap_add_folio()
928 mapping->nrpages += nr; in __filemap_add_folio()
962 folio->mapping = NULL; in __filemap_add_folio()
963 /* Leave page->index set: truncation relies upon it */ in __filemap_add_folio()
1024 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
1037 down_write(&mapping1->invalidate_lock); in filemap_invalidate_lock_two()
1039 down_write_nested(&mapping2->invalidate_lock, 1); in filemap_invalidate_lock_two()
1044 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
1055 up_write(&mapping1->invalidate_lock); in filemap_invalidate_unlock_two()
1057 up_write(&mapping2->invalidate_lock); in filemap_invalidate_unlock_two()
1091 * The page wait code treats the "wait->flags" somewhat unusually, because
1138 flags = wait->flags; in wake_page_function()
1140 if (test_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1141 return -1; in wake_page_function()
1143 if (test_and_set_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1144 return -1; in wake_page_function()
1150 * We are holding the wait-queue lock, but the waiter that in wake_page_function()
1155 * afterwards to avoid any races. This store-release pairs in wake_page_function()
1156 * with the load-acquire in folio_wait_bit_common(). in wake_page_function()
1158 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); in wake_page_function()
1159 wake_up_state(wait->private, mode); in wake_page_function()
1167 * After this list_del_init(&wait->entry) the wait entry in wake_page_function()
1168 * might be de-allocated and the process might even have in wake_page_function()
1171 list_del_init_careful(&wait->entry); in wake_page_function()
1185 spin_lock_irqsave(&q->lock, flags); in folio_wake_bit()
1200 spin_unlock_irqrestore(&q->lock, flags); in folio_wake_bit()
1225 if (wait->flags & WQ_FLAG_EXCLUSIVE) { in folio_trylock_flag()
1226 if (test_and_set_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1228 } else if (test_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1231 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; in folio_trylock_flag()
1257 wait->func = wake_page_function; in folio_wait_bit_common()
1262 wait->flags = 0; in folio_wait_bit_common()
1264 wait->flags = WQ_FLAG_EXCLUSIVE; in folio_wait_bit_common()
1265 if (--unfairness < 0) in folio_wait_bit_common()
1266 wait->flags |= WQ_FLAG_CUSTOM; in folio_wait_bit_common()
1283 spin_lock_irq(&q->lock); in folio_wait_bit_common()
1287 spin_unlock_irq(&q->lock); in folio_wait_bit_common()
1303 * be very careful with the 'wait->flags', because in folio_wait_bit_common()
1312 flags = smp_load_acquire(&wait->flags); in folio_wait_bit_common()
1321 /* If we were non-exclusive, we're done */ in folio_wait_bit_common()
1338 wait->flags |= WQ_FLAG_DONE; in folio_wait_bit_common()
1344 * waiter from the wait-queues, but the folio waiters bit will remain in folio_wait_bit_common()
1356 * NOTE! The wait->flags weren't stable until we've done the in folio_wait_bit_common()
1365 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive in folio_wait_bit_common()
1369 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; in folio_wait_bit_common()
1371 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; in folio_wait_bit_common()
1376 * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1410 wait->func = wake_page_function; in migration_entry_wait_on_locked()
1413 wait->flags = 0; in migration_entry_wait_on_locked()
1415 spin_lock_irq(&q->lock); in migration_entry_wait_on_locked()
1419 spin_unlock_irq(&q->lock); in migration_entry_wait_on_locked()
1434 flags = smp_load_acquire(&wait->flags); in migration_entry_wait_on_locked()
1467 * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1477 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1485 * folio_unlock - Unlock a locked folio.
1505 * folio_end_read - End read on a folio.
1535 * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1555 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1568 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1575 * - 0 if successful.
1576 * - -EINTR if a fatal signal was encountered.
1594 * completes. Do that now. If we fail, it's likely because of a big folio -
1603 * would otherwise not need non-IRQ handling. Just skip the in folio_end_dropbehind_write()
1607 if (folio->mapping) in folio_end_dropbehind_write()
1608 folio_unmap_invalidate(folio->mapping, folio, 0); in folio_end_dropbehind_write()
1614 * folio_end_writeback - End writeback against a folio.
1659 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1681 wait->folio = folio; in __folio_lock_async()
1682 wait->bit_nr = PG_locked; in __folio_lock_async()
1684 spin_lock_irq(&q->lock); in __folio_lock_async()
1685 __add_wait_queue_entry_tail(q, &wait->wait); in __folio_lock_async()
1695 __remove_wait_queue(q, &wait->wait); in __folio_lock_async()
1697 ret = -EIOCBQUEUED; in __folio_lock_async()
1698 spin_unlock_irq(&q->lock); in __folio_lock_async()
1704 * 0 - folio is locked.
1705 * non-zero - folio is not locked.
1706 * mmap_lock or per-VMA lock has been released (mmap_read_unlock() or
1711 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1715 unsigned int flags = vmf->flags; in __folio_lock_or_retry()
1719 * CAUTION! In this case, mmap_lock/per-VMA lock is not in __folio_lock_or_retry()
1748 * page_cache_next_miss() - Find the next gap in the page cache.
1753 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1763 * range specified (in which case 'return - index >= max_scan' will be true).
1764 * In the rare case of index wrap-around, 0 will be returned.
1769 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1771 while (max_scan--) { in page_cache_next_miss()
1784 * page_cache_prev_miss() - Find the previous gap in the page cache.
1789 * Search the range [max(index - max_scan + 1, 0), index] for the
1799 * range specified (in which case 'index - return >= max_scan' will be true).
1800 * In the rare case of wrap-around, ULONG_MAX will be returned.
1805 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1807 while (max_scan--) { in page_cache_prev_miss()
1832 * increased by a speculative page cache (or GUP-fast) lookup as it can
1840 * filemap_get_entry - Get a page cache entry.
1853 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_entry()
1883 * __filemap_get_folio - Find and get a reference to a folio.
1914 return ERR_PTR(-EAGAIN); in __filemap_get_folio()
1921 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1960 if (index & ((1UL << order) - 1)) in __filemap_get_folio()
1966 err = -ENOMEM; in __filemap_get_folio()
1984 } while (order-- > min_order); in __filemap_get_folio()
1986 if (err == -EEXIST) in __filemap_get_folio()
1993 * Return -EAGAIN so that there caller retries in a in __filemap_get_folio()
1994 * blocking fashion instead of propagating -ENOMEM in __filemap_get_folio()
1997 if ((fgp_flags & FGP_NOWAIT) && err == -ENOMEM) in __filemap_get_folio()
1998 err = -EAGAIN; in __filemap_get_folio()
2010 return ERR_PTR(-ENOENT); in __filemap_get_folio()
2054 * find_get_entries - gang pagecache lookup
2057 * @end: The final page index (inclusive).
2066 * due to not-present entries or large folios.
2076 XA_STATE(xas, &mapping->i_pages, *start); in find_get_entries()
2081 indices[fbatch->nr] = xas.xa_index; in find_get_entries()
2088 int idx = folio_batch_count(fbatch) - 1; in find_get_entries()
2090 folio = fbatch->folios[idx]; in find_get_entries()
2094 nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]); in find_get_entries()
2103 * find_lock_entries - Find a batch of pagecache entries.
2106 * @end: The final page index (inclusive).
2117 * due to not-present entries, large folios, folios which could not be
2125 XA_STATE(xas, &mapping->i_pages, *start); in find_lock_entries()
2135 base = folio->index; in find_lock_entries()
2140 if (base + nr - 1 > end) in find_lock_entries()
2144 if (folio->mapping != mapping || in find_lock_entries()
2151 base = xas.xa_index & ~(nr - 1); in find_lock_entries()
2156 if (base + nr - 1 > end) in find_lock_entries()
2162 indices[fbatch->nr] = xas.xa_index; in find_lock_entries()
2177 * filemap_get_folios - Get a batch of folios
2180 * @end: The final page index (inclusive)
2184 * index @start and up to index @end (inclusive). The folios are returned
2198 * filemap_get_folios_contig - Get a batch of contiguous folios
2201 * @end: The final page index (inclusive)
2215 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_contig()
2244 *start = folio->index + nr; in filemap_get_folios_contig()
2247 xas_advance(&xas, folio_next_index(folio) - 1); in filemap_get_folios_contig()
2260 folio = fbatch->folios[nr - 1]; in filemap_get_folios_contig()
2270 * filemap_get_folios_tag - Get a batch of folios matching @tag
2273 * @end: The final page index (inclusive)
2291 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_tag()
2305 *start = folio->index + nr; in filemap_get_folios_tag()
2312 * breaks the iteration when there is a page at index -1 but that is in filemap_get_folios_tag()
2315 if (end == (pgoff_t)-1) in filemap_get_folios_tag()
2316 *start = (pgoff_t)-1; in filemap_get_folios_tag()
2330 * ---R__________________________________________B__________
2343 ra->ra_pages /= 4; in shrink_readahead_size_eio()
2347 * filemap_get_read_batch - Get a batch of folios for read
2358 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2381 xas_advance(&xas, folio_next_index(folio) - 1); in filemap_get_read_batch()
2413 shrink_readahead_size_eio(&file->f_ra); in filemap_read_folio()
2414 return -EIO; in filemap_read_folio()
2426 if (!mapping->a_ops->is_partially_uptodate) in filemap_range_uptodate()
2428 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2432 count -= folio_pos(folio) - pos; in filemap_range_uptodate()
2435 pos -= folio_pos(folio); in filemap_range_uptodate()
2438 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2447 if (iocb->ki_flags & IOCB_NOWAIT) { in filemap_update_page()
2449 return -EAGAIN; in filemap_update_page()
2455 error = -EAGAIN; in filemap_update_page()
2456 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) in filemap_update_page()
2458 if (!(iocb->ki_flags & IOCB_WAITQ)) { in filemap_update_page()
2467 error = __folio_lock_async(folio, iocb->ki_waitq); in filemap_update_page()
2473 if (!folio->mapping) in filemap_update_page()
2477 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, in filemap_update_page()
2481 error = -EAGAIN; in filemap_update_page()
2482 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) in filemap_update_page()
2485 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, in filemap_update_page()
2499 struct address_space *mapping = iocb->ki_filp->f_mapping; in filemap_create_folio()
2505 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) in filemap_create_folio()
2506 return -EAGAIN; in filemap_create_folio()
2510 return -ENOMEM; in filemap_create_folio()
2511 if (iocb->ki_flags & IOCB_DONTCACHE) in filemap_create_folio()
2523 * pages or ->readahead() that need to hold invalidate_lock in filemap_create_folio()
2528 index = (iocb->ki_pos >> (PAGE_SHIFT + min_order)) << min_order; in filemap_create_folio()
2531 if (error == -EEXIST) in filemap_create_folio()
2536 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, in filemap_create_folio()
2554 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2556 if (iocb->ki_flags & IOCB_NOIO) in filemap_readahead()
2557 return -EAGAIN; in filemap_readahead()
2558 if (iocb->ki_flags & IOCB_DONTCACHE) in filemap_readahead()
2560 page_cache_async_ra(&ractl, folio, last_index - folio->index); in filemap_readahead()
2567 struct file *filp = iocb->ki_filp; in filemap_get_pages()
2568 struct address_space *mapping = filp->f_mapping; in filemap_get_pages()
2569 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; in filemap_get_pages()
2576 last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE); in filemap_get_pages()
2579 return -EINTR; in filemap_get_pages()
2581 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2583 DEFINE_READAHEAD(ractl, filp, &filp->f_ra, mapping, index); in filemap_get_pages()
2585 if (iocb->ki_flags & IOCB_NOIO) in filemap_get_pages()
2586 return -EAGAIN; in filemap_get_pages()
2587 if (iocb->ki_flags & IOCB_NOWAIT) in filemap_get_pages()
2589 if (iocb->ki_flags & IOCB_DONTCACHE) in filemap_get_pages()
2591 page_cache_sync_ra(&ractl, last_index - index); in filemap_get_pages()
2592 if (iocb->ki_flags & IOCB_NOWAIT) in filemap_get_pages()
2594 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2603 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
2610 if ((iocb->ki_flags & IOCB_WAITQ) && in filemap_get_pages()
2612 iocb->ki_flags |= IOCB_NOWAIT; in filemap_get_pages()
2619 trace_mm_filemap_get_pages(mapping, index, last_index - 1); in filemap_get_pages()
2624 if (likely(--fbatch->nr)) in filemap_get_pages()
2653 * filemap_read - Read data from the page cache.
2668 struct file *filp = iocb->ki_filp; in filemap_read()
2669 struct file_ra_state *ra = &filp->f_ra; in filemap_read()
2670 struct address_space *mapping = filp->f_mapping; in filemap_read()
2671 struct inode *inode = mapping->host; in filemap_read()
2676 loff_t last_pos = ra->prev_pos; in filemap_read()
2678 if (unlikely(iocb->ki_pos < 0)) in filemap_read()
2679 return -EINVAL; in filemap_read()
2680 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) in filemap_read()
2685 iov_iter_truncate(iter, inode->i_sb->s_maxbytes - iocb->ki_pos); in filemap_read()
2693 * can no longer safely return -EIOCBQUEUED. Hence mark in filemap_read()
2696 if ((iocb->ki_flags & IOCB_WAITQ) && already_read) in filemap_read()
2697 iocb->ki_flags |= IOCB_NOWAIT; in filemap_read()
2699 if (unlikely(iocb->ki_pos >= i_size_read(inode))) in filemap_read()
2702 error = filemap_get_pages(iocb, iter->count, &fbatch, false); in filemap_read()
2710 * the correct value for "nr", which means the zero-filled in filemap_read()
2712 * another truncate extends the file - this is desired though). in filemap_read()
2715 if (unlikely(iocb->ki_pos >= isize)) in filemap_read()
2717 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); in filemap_read()
2729 if (!pos_same_folio(iocb->ki_pos, last_pos - 1, in filemap_read()
2736 size_t offset = iocb->ki_pos & (fsize - 1); in filemap_read()
2737 size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, in filemap_read()
2738 fsize - offset); in filemap_read()
2756 iocb->ki_pos += copied; in filemap_read()
2757 last_pos = iocb->ki_pos; in filemap_read()
2760 error = -EFAULT; in filemap_read()
2772 } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); in filemap_read()
2775 ra->prev_pos = last_pos; in filemap_read()
2782 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_write_and_wait()
2783 loff_t pos = iocb->ki_pos; in kiocb_write_and_wait()
2784 loff_t end = pos + count - 1; in kiocb_write_and_wait()
2786 if (iocb->ki_flags & IOCB_NOWAIT) { in kiocb_write_and_wait()
2788 return -EAGAIN; in kiocb_write_and_wait()
2804 return -EAGAIN; in filemap_invalidate_pages()
2815 * without clobbering -EIOCBQUEUED from ->direct_IO(). in filemap_invalidate_pages()
2823 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_pages()
2825 return filemap_invalidate_pages(mapping, iocb->ki_pos, in kiocb_invalidate_pages()
2826 iocb->ki_pos + count - 1, in kiocb_invalidate_pages()
2827 iocb->ki_flags & IOCB_NOWAIT); in kiocb_invalidate_pages()
2832 * generic_file_read_iter - generic filesystem read routine
2839 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2843 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2845 * can be read, -EAGAIN shall be returned. When readahead would be
2861 if (iocb->ki_flags & IOCB_DIRECT) { in generic_file_read_iter()
2862 struct file *file = iocb->ki_filp; in generic_file_read_iter()
2863 struct address_space *mapping = file->f_mapping; in generic_file_read_iter()
2864 struct inode *inode = mapping->host; in generic_file_read_iter()
2871 retval = mapping->a_ops->direct_IO(iocb, iter); in generic_file_read_iter()
2873 iocb->ki_pos += retval; in generic_file_read_iter()
2874 count -= retval; in generic_file_read_iter()
2876 if (retval != -EIOCBQUEUED) in generic_file_read_iter()
2877 iov_iter_revert(iter, count - iov_iter_count(iter)); in generic_file_read_iter()
2890 if (iocb->ki_pos >= i_size_read(inode)) in generic_file_read_iter()
2908 size = min(size, folio_size(folio) - offset); in splice_folio_into_pipe()
2913 size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced); in splice_folio_into_pipe()
2922 pipe->head++; in splice_folio_into_pipe()
2932 * filemap_splice_read - Splice data from a file's pagecache into a pipe
2945 * to be read; -EAGAIN will be returned if the pipe had no space, and some
2961 if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes)) in filemap_splice_read()
2969 npages = max_t(ssize_t, pipe->max_usage - used, 0); in filemap_splice_read()
2977 if (*ppos >= i_size_read(in->f_mapping->host)) in filemap_splice_read()
2989 * the correct value for "nr", which means the zero-filled in filemap_splice_read()
2991 * another truncate extends the file - this is desired though). in filemap_splice_read()
2993 isize = i_size_read(in->f_mapping->host); in filemap_splice_read()
3002 writably_mapped = mapping_writably_mapped(in->f_mapping); in filemap_splice_read()
3020 n = min_t(loff_t, len, isize - *ppos); in filemap_splice_read()
3024 len -= n; in filemap_splice_read()
3027 in->f_ra.prev_pos = *ppos; in filemap_splice_read()
3047 const struct address_space_operations *ops = mapping->a_ops; in folio_seek_hole_data()
3048 size_t offset, bsz = i_blocksize(mapping->host); in folio_seek_hole_data()
3052 if (!ops->is_partially_uptodate) in folio_seek_hole_data()
3058 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
3061 offset = offset_in_folio(folio, start) & ~(bsz - 1); in folio_seek_hole_data()
3064 if (ops->is_partially_uptodate(folio, offset, bsz) == in folio_seek_hole_data()
3067 start = (start + bsz) & ~((u64)bsz - 1); in folio_seek_hole_data()
3084 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
3093 * entirely memory-based such as tmpfs, and filesystems which support
3096 * Return: The requested offset on success, or -ENXIO if @whence specifies
3098 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
3104 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
3105 pgoff_t max = (end - 1) >> PAGE_SHIFT; in mapping_seek_hole_data()
3110 return -ENXIO; in mapping_seek_hole_data()
3137 start = -ENXIO; in mapping_seek_hole_data()
3150 * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
3151 * @vmf - the vm_fault for this fault.
3152 * @folio - the folio to lock.
3153 * @fpin - the pointer to the file we may pin (or is already pinned).
3172 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap()
3176 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap()
3204 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
3205 struct file_ra_state *ra = &file->f_ra; in do_sync_mmap_readahead()
3206 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead()
3207 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3209 unsigned long vm_flags = vmf->vma->vm_flags; in do_sync_mmap_readahead()
3216 ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); in do_sync_mmap_readahead()
3217 ra->size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
3223 ra->size *= 2; in do_sync_mmap_readahead()
3224 ra->async_size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
3230 /* If we don't want any read-ahead, don't bother */ in do_sync_mmap_readahead()
3233 if (!ra->ra_pages) in do_sync_mmap_readahead()
3238 page_cache_sync_ra(&ractl, ra->ra_pages); in do_sync_mmap_readahead()
3243 mmap_miss = READ_ONCE(ra->mmap_miss); in do_sync_mmap_readahead()
3245 WRITE_ONCE(ra->mmap_miss, ++mmap_miss); in do_sync_mmap_readahead()
3249 * stop bothering with read-ahead. It will only hurt. in do_sync_mmap_readahead()
3255 * mmap read-around in do_sync_mmap_readahead()
3258 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); in do_sync_mmap_readahead()
3259 ra->size = ra->ra_pages; in do_sync_mmap_readahead()
3260 ra->async_size = ra->ra_pages / 4; in do_sync_mmap_readahead()
3261 ractl._index = ra->start; in do_sync_mmap_readahead()
3274 struct file *file = vmf->vma->vm_file; in do_async_mmap_readahead()
3275 struct file_ra_state *ra = &file->f_ra; in do_async_mmap_readahead()
3276 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); in do_async_mmap_readahead()
3280 /* If we don't want any read-ahead, don't bother */ in do_async_mmap_readahead()
3281 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) in do_async_mmap_readahead()
3284 mmap_miss = READ_ONCE(ra->mmap_miss); in do_async_mmap_readahead()
3286 WRITE_ONCE(ra->mmap_miss, --mmap_miss); in do_async_mmap_readahead()
3290 page_cache_async_ra(&ractl, folio, ra->ra_pages); in do_async_mmap_readahead()
3297 struct vm_area_struct *vma = vmf->vma; in filemap_fault_recheck_pte_none()
3311 * scenario while holding the PT lock, to not degrade non-mlocked in filemap_fault_recheck_pte_none()
3315 if (!(vma->vm_flags & VM_LOCKED)) in filemap_fault_recheck_pte_none()
3318 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in filemap_fault_recheck_pte_none()
3321 ptep = pte_offset_map_ro_nolock(vma->vm_mm, vmf->pmd, vmf->address, in filemap_fault_recheck_pte_none()
3322 &vmf->ptl); in filemap_fault_recheck_pte_none()
3329 spin_lock(vmf->ptl); in filemap_fault_recheck_pte_none()
3332 spin_unlock(vmf->ptl); in filemap_fault_recheck_pte_none()
3339 * filemap_fault - read in file data for page fault handling
3349 * vma->vm_mm->mmap_lock must be held on entry.
3359 * Return: bitwise-OR of %VM_FAULT_ codes.
3364 struct file *file = vmf->vma->vm_file; in filemap_fault()
3366 struct address_space *mapping = file->f_mapping; in filemap_fault()
3367 struct inode *inode = mapping->host; in filemap_fault()
3368 pgoff_t max_idx, index = vmf->pgoff; in filemap_fault()
3388 if (!(vmf->flags & FAULT_FLAG_TRIED)) in filemap_fault()
3401 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in filemap_fault()
3415 vmf->gfp_mask); in filemap_fault()
3428 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3437 * that it's up-to-date. If not, it is going to be due to an error, in filemap_fault()
3463 * time to return to the upper layer and have it re-find the vma and in filemap_fault()
3484 vmf->page = folio_file_page(folio, index); in filemap_fault()
3489 * Umm, take care of errors if the page isn't up-to-date. in filemap_fault()
3490 * Try to re-read it _once_. We do this synchronously, in filemap_fault()
3495 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3509 * re-find the vma and come back and find our hopefully still populated in filemap_fault()
3525 struct mm_struct *mm = vmf->vma->vm_mm; in filemap_map_pmd()
3528 if (pmd_trans_huge(*vmf->pmd)) { in filemap_map_pmd()
3534 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3544 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) in filemap_map_pmd()
3545 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); in filemap_map_pmd()
3574 if (folio->mapping != mapping) in next_uptodate_folio()
3578 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in next_uptodate_folio()
3579 if (xas->xa_index >= max_idx) in next_uptodate_folio()
3603 pte_t *old_ptep = vmf->pte; in filemap_map_folio_range()
3612 * In such situation, read-ahead is only a waste of IO. in filemap_map_folio_range()
3614 * we can stop read-ahead. in filemap_map_folio_range()
3622 * fault-around logic. in filemap_map_folio_range()
3624 if (!pte_none(ptep_get(&vmf->pte[count]))) in filemap_map_folio_range()
3634 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3640 vmf->pte += count; in filemap_map_folio_range()
3643 } while (--nr_pages > 0); in filemap_map_folio_range()
3649 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3653 vmf->pte = old_ptep; in filemap_map_folio_range()
3663 struct page *page = &folio->page; in filemap_map_order0_folio()
3675 * the fault-around logic. in filemap_map_order0_folio()
3677 if (!pte_none(ptep_get(vmf->pte))) in filemap_map_order0_folio()
3680 if (vmf->address == addr) in filemap_map_order0_folio()
3693 struct vm_area_struct *vma = vmf->vma; in filemap_map_pages()
3694 struct file *file = vma->vm_file; in filemap_map_pages()
3695 struct address_space *mapping = file->f_mapping; in filemap_map_pages()
3698 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3714 addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); in filemap_map_pages()
3715 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in filemap_map_pages()
3716 if (!vmf->pte) { in filemap_map_pages()
3722 file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1; in filemap_map_pages()
3730 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
3731 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3733 end = folio_next_index(folio) - 1; in filemap_map_pages()
3734 nr_pages = min(end, end_pgoff) - xas.xa_index + 1; in filemap_map_pages()
3741 xas.xa_index - folio->index, addr, in filemap_map_pages()
3747 add_mm_counter(vma->vm_mm, folio_type, rss); in filemap_map_pages()
3748 pte_unmap_unlock(vmf->pte, vmf->ptl); in filemap_map_pages()
3753 mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss); in filemap_map_pages()
3755 WRITE_ONCE(file->f_ra.mmap_miss, 0); in filemap_map_pages()
3757 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss); in filemap_map_pages()
3765 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite()
3766 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite()
3769 sb_start_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3770 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
3772 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3785 sb_end_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3799 struct address_space *mapping = file->f_mapping; in generic_file_mmap()
3801 if (!mapping->a_ops->read_folio) in generic_file_mmap()
3802 return -ENOEXEC; in generic_file_mmap()
3804 vma->vm_ops = &generic_file_vm_ops; in generic_file_mmap()
3809 * This is for filesystems which do not implement ->writepage.
3814 return -EINVAL; in generic_file_readonly_mmap()
3824 return -ENOSYS; in generic_file_mmap()
3828 return -ENOSYS; in generic_file_readonly_mmap()
3843 filler = mapping->a_ops->read_folio; in do_read_cache_folio()
3850 return ERR_PTR(-ENOMEM); in do_read_cache_folio()
3855 if (err == -EEXIST) in do_read_cache_folio()
3872 if (!folio->mapping) { in do_read_cache_folio()
3899 * read_cache_folio - Read into page cache, fill it if needed.
3902 * @filler: Function to perform the read, or NULL to use aops->read_folio().
3911 * Context: May sleep. Expects mapping->invalidate_lock to be held.
3923 * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
3932 * possible and so is EINTR. If ->read_folio returns another error,
3935 * The function expects mapping->invalidate_lock to be already held.
3953 return &folio->page; in do_read_cache_page()
3966 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3974 * If the page does not get brought uptodate, return -EIO.
3976 * The function expects mapping->invalidate_lock to be already held.
3997 errseq_set(&filp->f_mapping->wb_err, -EIO); in dio_warn_stale_pagecache()
4003 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, in dio_warn_stale_pagecache()
4004 current->comm); in dio_warn_stale_pagecache()
4010 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_post_direct_write()
4012 if (mapping->nrpages && in kiocb_invalidate_post_direct_write()
4014 iocb->ki_pos >> PAGE_SHIFT, in kiocb_invalidate_post_direct_write()
4015 (iocb->ki_pos + count - 1) >> PAGE_SHIFT)) in kiocb_invalidate_post_direct_write()
4016 dio_warn_stale_pagecache(iocb->ki_filp); in kiocb_invalidate_post_direct_write()
4022 struct address_space *mapping = iocb->ki_filp->f_mapping; in generic_file_direct_write()
4032 if (written == -EBUSY) in generic_file_direct_write()
4037 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write()
4041 * cached by non-direct readahead, or faulted in by get_user_pages() in generic_file_direct_write()
4057 struct inode *inode = mapping->host; in generic_file_direct_write()
4058 loff_t pos = iocb->ki_pos; in generic_file_direct_write()
4062 write_len -= written; in generic_file_direct_write()
4063 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { in generic_file_direct_write()
4067 iocb->ki_pos = pos; in generic_file_direct_write()
4069 if (written != -EIOCBQUEUED) in generic_file_direct_write()
4070 iov_iter_revert(from, write_len - iov_iter_count(from)); in generic_file_direct_write()
4077 struct file *file = iocb->ki_filp; in generic_perform_write()
4078 loff_t pos = iocb->ki_pos; in generic_perform_write()
4079 struct address_space *mapping = file->f_mapping; in generic_perform_write()
4080 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
4094 offset = pos & (chunk - 1); in generic_perform_write()
4095 bytes = min(chunk - offset, bytes); in generic_perform_write()
4099 status = -EINTR; in generic_perform_write()
4103 status = a_ops->write_begin(file, mapping, pos, bytes, in generic_perform_write()
4109 if (bytes > folio_size(folio) - offset) in generic_perform_write()
4110 bytes = folio_size(folio) - offset; in generic_perform_write()
4124 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write()
4127 iov_iter_revert(i, copied - max(status, 0L)); in generic_perform_write()
4135 * A short copy made ->write_end() reject the in generic_perform_write()
4153 status = -EFAULT; in generic_perform_write()
4164 iocb->ki_pos += written; in generic_perform_write()
4170 * __generic_file_write_iter - write data to a file
4192 struct file *file = iocb->ki_filp; in __generic_file_write_iter()
4193 struct address_space *mapping = file->f_mapping; in __generic_file_write_iter()
4194 struct inode *inode = mapping->host; in __generic_file_write_iter()
4205 if (iocb->ki_flags & IOCB_DIRECT) { in __generic_file_write_iter()
4212 * page-cache pages correctly). in __generic_file_write_iter()
4225 * generic_file_write_iter - write data to a file
4239 struct file *file = iocb->ki_filp; in generic_file_write_iter()
4240 struct inode *inode = file->f_mapping->host; in generic_file_write_iter()
4256 * filemap_release_folio() - Release fs-specific metadata on a folio.
4261 * (presumably at folio->private).
4274 struct address_space * const mapping = folio->mapping; in filemap_release_folio()
4282 if (mapping && mapping->a_ops->release_folio) in filemap_release_folio()
4283 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()
4289 * filemap_invalidate_inode - Invalidate/forcibly write back a range of an inode's pagecache
4293 * @end: Last byte in range (inclusive), or LLONG_MAX for everything from start
4304 struct address_space *mapping = inode->i_mapping; in filemap_invalidate_inode()
4307 pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1; in filemap_invalidate_inode()
4309 if (!mapping || !mapping->nrpages || end < start) in filemap_invalidate_inode()
4315 if (!mapping->nrpages) in filemap_invalidate_inode()
4344 * filemap_cachestat() - compute the page cache statistics of a mapping
4347 * @last_index: The final page index (inclusive).
4351 * page range of [first_index, last_index] (inclusive). The statistics
4358 XA_STATE(xas, &mapping->i_pages, first_index); in filemap_cachestat()
4378 * the rcu-protected xarray. in filemap_cachestat()
4387 folio_last_index = folio_first_index + nr_pages - 1; in filemap_cachestat()
4391 nr_pages -= first_index - folio_first_index; in filemap_cachestat()
4394 nr_pages -= folio_last_index - last_index; in filemap_cachestat()
4401 cs->nr_evicted += nr_pages; in filemap_cachestat()
4405 /* shmem file - in swap cache */ in filemap_cachestat()
4428 cs->nr_recently_evicted += nr_pages; in filemap_cachestat()
4434 cs->nr_cache += nr_pages; in filemap_cachestat()
4437 cs->nr_dirty += nr_pages; in filemap_cachestat()
4440 cs->nr_writeback += nr_pages; in filemap_cachestat()
4458 if (f->f_mode & FMODE_WRITE) in can_do_cachestat()
4479 * `off` and `len` must be non-negative integers. If `len` > 0,
4493 * zero - success
4494 * -EFAULT - cstat or cstat_range points to an illegal address
4495 * -EINVAL - invalid flags
4496 * -EBADF - invalid file descriptor
4497 * -EOPNOTSUPP - file descriptor is of a hugetlbfs file
4510 return -EBADF; in SYSCALL_DEFINE4()
4514 return -EFAULT; in SYSCALL_DEFINE4()
4518 return -EOPNOTSUPP; in SYSCALL_DEFINE4()
4521 return -EPERM; in SYSCALL_DEFINE4()
4524 return -EINVAL; in SYSCALL_DEFINE4()
4528 csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; in SYSCALL_DEFINE4()
4530 mapping = fd_file(f)->f_mapping; in SYSCALL_DEFINE4()
4534 return -EFAULT; in SYSCALL_DEFINE4()