Lines Matching +full:non +full:- +full:inclusive

1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/truncate.c - code for taking down pages from address_spaces
12 #include <linux/backing-dev.h>
29 XA_STATE(xas, &mapping->i_pages, start); in clear_shadow_entries()
38 spin_lock(&mapping->host->i_lock); in clear_shadow_entries()
49 inode_add_lru(mapping->host); in clear_shadow_entries()
50 spin_unlock(&mapping->host->i_lock); in clear_shadow_entries()
63 XA_STATE(xas, &mapping->i_pages, indices[0]); in truncate_folio_batch_exceptionals()
73 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals()
81 if (xa_is_value(fbatch->folios[i])) in truncate_folio_batch_exceptionals()
90 spin_lock(&mapping->host->i_lock); in truncate_folio_batch_exceptionals()
93 xas_for_each(&xas, folio, indices[nr-1]) { in truncate_folio_batch_exceptionals()
100 inode_add_lru(mapping->host); in truncate_folio_batch_exceptionals()
101 spin_unlock(&mapping->host->i_lock); in truncate_folio_batch_exceptionals()
107 * folio_invalidate - Invalidate part or all of a folio.
119 * blocks on-disk.
123 const struct address_space_operations *aops = folio->mapping->a_ops; in folio_invalidate()
125 if (aops->invalidate_folio) in folio_invalidate()
126 aops->invalidate_folio(folio, offset, length); in folio_invalidate()
131 * If truncate cannot remove the fs-private metadata from the page, the page
135 * We need to bail out if page->mapping is no longer equal to the original
149 * Some filesystems seem to re-dirty the page even after in truncate_cleanup_folio()
158 if (folio->mapping != mapping) in truncate_inode_folio()
159 return -EIO; in truncate_inode_folio()
183 offset = start - pos; in truncate_inode_partial_folio()
188 length = length - offset; in truncate_inode_partial_folio()
190 length = end + 1 - pos - offset; in truncate_inode_partial_folio()
194 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
203 if (!mapping_inaccessible(folio->mapping)) in truncate_inode_partial_folio()
214 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
225 return -EINVAL; in generic_error_remove_folio()
230 if (!S_ISREG(mapping->host->i_mode)) in generic_error_remove_folio()
231 return -EIO; in generic_error_remove_folio()
237 * mapping_evict_folio() - Remove an unused folio from the page-cache.
265 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
268 * @lend: offset to which to truncate (inclusive)
274 * Truncate takes two passes - the first pass is nonblocking. It will not
280 * We pass down the cache-hot hint to the page freeing code. Even if the
284 * Note that since ->invalidate_folio() accepts range to invalidate
291 pgoff_t start; /* inclusive */ in truncate_inode_pages_range()
307 * Note that 'end' is exclusive while 'lend' is inclusive. in truncate_inode_pages_range()
309 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; in truncate_inode_pages_range()
310 if (lend == -1) in truncate_inode_pages_range()
312 * lend == -1 indicates end-of-file so we have to set 'end' in truncate_inode_pages_range()
314 * unsigned we're using -1. in truncate_inode_pages_range()
316 end = -1; in truncate_inode_pages_range()
322 while (index < end && find_lock_entries(mapping, &index, end - 1, in truncate_inode_pages_range()
341 end = folio->index; in truncate_inode_pages_range()
353 end = folio->index; in truncate_inode_pages_range()
362 if (!find_get_entries(mapping, &index, end - 1, &fbatch, in truncate_inode_pages_range()
375 /* We rely upon deletion not changing page->index */ in truncate_inode_pages_range()
393 * truncate_inode_pages - truncate *all* the pages from an offset
397 * Called under (and serialised by) inode->i_rwsem and
398 * mapping->invalidate_lock.
402 * mapping->nrpages can be non-zero when this function returns even after
407 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); in truncate_inode_pages()
412 * truncate_inode_pages_final - truncate *all* pages before inode dies
415 * Called under (and serialized by) inode->i_rwsem.
438 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final()
439 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final()
447 * mapping_try_invalidate - Invalidate all the evictable folios of one inode
450 * @end: the offset 'to' which to invalidate (inclusive)
474 /* We rely upon deletion not changing folio->index */ in mapping_try_invalidate()
498 clear_shadow_entries(mapping, indices[0], indices[nr-1]); in mapping_try_invalidate()
508 * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode
511 * @end: the offset 'to' which to invalidate (inclusive)
532 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) in folio_launder()
534 return mapping->a_ops->launder_folio(folio); in folio_launder()
558 if (folio->mapping != mapping) in folio_unmap_invalidate()
559 return -EBUSY; in folio_unmap_invalidate()
561 return -EBUSY; in folio_unmap_invalidate()
563 spin_lock(&mapping->host->i_lock); in folio_unmap_invalidate()
564 xa_lock_irq(&mapping->i_pages); in folio_unmap_invalidate()
570 xa_unlock_irq(&mapping->i_pages); in folio_unmap_invalidate()
572 inode_add_lru(mapping->host); in folio_unmap_invalidate()
573 spin_unlock(&mapping->host->i_lock); in folio_unmap_invalidate()
578 xa_unlock_irq(&mapping->i_pages); in folio_unmap_invalidate()
579 spin_unlock(&mapping->host->i_lock); in folio_unmap_invalidate()
580 return -EBUSY; in folio_unmap_invalidate()
584 * invalidate_inode_pages2_range - remove range of pages from an address_space
587 * @end: the page offset 'to' which to invalidate (inclusive)
592 * Return: -EBUSY if any pages could not be invalidated.
617 /* We rely upon deletion not changing folio->index */ in invalidate_inode_pages2_range()
623 ret = -EBUSY; in invalidate_inode_pages2_range()
633 (1 + end - indices[i]), false); in invalidate_inode_pages2_range()
638 if (unlikely(folio->mapping != mapping)) { in invalidate_inode_pages2_range()
651 clear_shadow_entries(mapping, indices[0], indices[nr-1]); in invalidate_inode_pages2_range()
665 unmap_mapping_pages(mapping, start, end - start + 1, false); in invalidate_inode_pages2_range()
672 * invalidate_inode_pages2 - remove all pages from an address_space
678 * Return: -EBUSY if any pages could not be invalidated.
682 return invalidate_inode_pages2_range(mapping, 0, -1); in invalidate_inode_pages2()
687 * truncate_pagecache - unmap and remove pagecache that has been truncated
697 * with on-disk format, and the filesystem would not have to deal with
703 struct address_space *mapping = inode->i_mapping; in truncate_pagecache()
709 * single-page unmaps. However after this first call, and in truncate_pagecache()
722 * truncate_setsize - update inode and pagecache for a new file size
736 loff_t oldsize = inode->i_size; in truncate_setsize()
746 * pagecache_isize_extended - update pagecache after extension of i_size
754 * write access to the page. The filesystem will update its per-block
760 * The function must be called while we still hold i_rwsem - this not only
770 WARN_ON(to > inode->i_size); in pagecache_isize_extended()
776 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1))) in pagecache_isize_extended()
779 folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE); in pagecache_isize_extended()
791 * The post-eof range of the folio must be zeroed before it is exposed in pagecache_isize_extended()
798 offset = from - folio_pos(folio); in pagecache_isize_extended()
799 end = min_t(unsigned int, to - folio_pos(folio), in pagecache_isize_extended()
810 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
818 * with on-disk format, and the filesystem would not have to deal with
824 struct address_space *mapping = inode->i_mapping; in truncate_pagecache_range()
826 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; in truncate_pagecache_range()
832 * allows holelen 0 for all, and we allow lend -1 for end of file. in truncate_pagecache_range()
838 * hole-punching should not remove private COWed pages from the hole. in truncate_pagecache_range()
842 1 + unmap_end - unmap_start, 0); in truncate_pagecache_range()