Lines Matching +full:not +full:- +full:swapped
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
9 * Copyright (C) 2002-2011 Hugh Dickins.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
18 * tiny-shmem:
62 #include <linux/backing-dev.h>
103 * inode->i_private (with i_rwsem making sure that it has only one user at
104 * a time): we would prefer not to enlarge the shmem inode just for that.
157 return min3(nr_pages - totalhigh_pages(), nr_pages / 2, in shmem_default_max_inodes()
168 return sb->s_fs_info; in SHMEM_SB()
172 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
175 * consistent with the pre-accounting of private mappings ...
180 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); in shmem_acct_size()
194 return security_vm_enough_memory_mm(current->mm, in shmem_reacct_size()
195 VM_ACCT(newsize) - VM_ACCT(oldsize)); in shmem_reacct_size()
197 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); in shmem_reacct_size()
205 * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
206 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
213 return security_vm_enough_memory_mm(current->mm, in shmem_acct_blocks()
226 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_acct_blocks()
227 int err = -ENOSPC; in shmem_inode_acct_blocks()
229 if (shmem_acct_blocks(info->flags, pages)) in shmem_inode_acct_blocks()
233 if (sbinfo->max_blocks) { in shmem_inode_acct_blocks()
234 if (!percpu_counter_limited_add(&sbinfo->used_blocks, in shmem_inode_acct_blocks()
235 sbinfo->max_blocks, pages)) in shmem_inode_acct_blocks()
240 percpu_counter_sub(&sbinfo->used_blocks, pages); in shmem_inode_acct_blocks()
252 shmem_unacct_blocks(info->flags, pages); in shmem_inode_acct_blocks()
259 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_unacct_blocks()
264 if (sbinfo->max_blocks) in shmem_inode_unacct_blocks()
265 percpu_counter_sub(&sbinfo->used_blocks, pages); in shmem_inode_unacct_blocks()
266 shmem_unacct_blocks(info->flags, pages); in shmem_inode_unacct_blocks()
281 return mapping->a_ops == &shmem_aops; in shmem_mapping()
287 return vma->vm_ops == &shmem_anon_vm_ops; in vma_is_anon_shmem()
292 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops; in vma_is_shmem()
305 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; in shmem_enable_quotas()
320 for (type--; type >= 0; type--) in shmem_enable_quotas()
335 return SHMEM_I(inode)->i_dquot; in shmem_get_dquots()
354 if (!(sb->s_flags & SB_KERNMOUNT)) { in shmem_reserve_inode()
355 raw_spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
356 if (sbinfo->max_inodes) { in shmem_reserve_inode()
357 if (sbinfo->free_ispace < BOGO_INODE_SIZE) { in shmem_reserve_inode()
358 raw_spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
359 return -ENOSPC; in shmem_reserve_inode()
361 sbinfo->free_ispace -= BOGO_INODE_SIZE; in shmem_reserve_inode()
364 ino = sbinfo->next_ino++; in shmem_reserve_inode()
366 ino = sbinfo->next_ino++; in shmem_reserve_inode()
367 if (unlikely(!sbinfo->full_inums && in shmem_reserve_inode()
375 __func__, MINOR(sb->s_dev)); in shmem_reserve_inode()
376 sbinfo->next_ino = 1; in shmem_reserve_inode()
377 ino = sbinfo->next_ino++; in shmem_reserve_inode()
381 raw_spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
384 * __shmem_file_setup, one of our callers, is lock-free: it in shmem_reserve_inode()
387 * unknown contexts. As such, use a per-cpu batched allocator in shmem_reserve_inode()
388 * which doesn't require the per-sb stat_lock unless we are at in shmem_reserve_inode()
392 * shmem mounts are not exposed to userspace, so we don't need in shmem_reserve_inode()
397 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); in shmem_reserve_inode()
400 raw_spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
401 ino = sbinfo->next_ino; in shmem_reserve_inode()
402 sbinfo->next_ino += SHMEM_INO_BATCH; in shmem_reserve_inode()
403 raw_spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
418 if (sbinfo->max_inodes) { in shmem_free_inode()
419 raw_spin_lock(&sbinfo->stat_lock); in shmem_free_inode()
420 sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace; in shmem_free_inode()
421 raw_spin_unlock(&sbinfo->stat_lock); in shmem_free_inode()
426 * shmem_recalc_inode - recalculate the block usage of an inode
429 * @swapped: the change in number of pages swapped from inode
434 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
435 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
437 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped) in shmem_recalc_inode() argument
442 spin_lock(&info->lock); in shmem_recalc_inode()
443 info->alloced += alloced; in shmem_recalc_inode()
444 info->swapped += swapped; in shmem_recalc_inode()
445 freed = info->alloced - info->swapped - in shmem_recalc_inode()
446 READ_ONCE(inode->i_mapping->nrpages); in shmem_recalc_inode()
449 * after i_mapping->nrpages has already been adjusted (up or down), in shmem_recalc_inode()
450 * shmem_writepage() has to raise swapped before nrpages is lowered - in shmem_recalc_inode()
454 if (swapped > 0) in shmem_recalc_inode()
455 freed += swapped; in shmem_recalc_inode()
457 info->alloced -= freed; in shmem_recalc_inode()
458 spin_unlock(&info->lock); in shmem_recalc_inode()
467 struct address_space *mapping = inode->i_mapping; in shmem_charge()
473 xa_lock_irq(&mapping->i_pages); in shmem_charge()
474 mapping->nrpages += pages; in shmem_charge()
475 xa_unlock_irq(&mapping->i_pages); in shmem_charge()
495 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry()
502 return -ENOENT; in shmem_replace_entry()
509 * that an entry was not already brought back from swap by a racing thread.
511 * Checking folio is not enough: by the time a swapcache folio is locked, it
517 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); in shmem_confirm_swap()
549 #define SHMEM_HUGE_DENY (-1)
550 #define SHMEM_HUGE_FORCE (-2)
553 /* ifdef here to avoid bloating shmem.o when not necessary */
559 * shmem_mapping_size_orders - Get allowable folio orders for the given file size.
581 size = write_end - (index << PAGE_SHIFT); in shmem_mapping_size_orders()
586 /* If we're not aligned, allocate a smaller folio */ in shmem_mapping_size_orders()
587 if (index & ((1UL << order) - 1)) in shmem_mapping_size_orders()
591 return order > 0 ? BIT(order + 1) - 1 : 0; in shmem_mapping_size_orders()
606 if (!S_ISREG(inode->i_mode)) in shmem_huge_global_enabled()
615 * the mTHP interface, so we still use PMD-sized huge order to in shmem_huge_global_enabled()
618 * For tmpfs mmap()'s huge order, we still use PMD-sized order to in shmem_huge_global_enabled()
625 switch (SHMEM_SB(inode->i_sb)->huge) { in shmem_huge_global_enabled()
630 return shmem_mapping_size_orders(inode->i_mapping, index, write_end); in shmem_huge_global_enabled()
635 within_size_orders = shmem_mapping_size_orders(inode->i_mapping, in shmem_huge_global_enabled()
663 return -EINVAL; in shmem_parse_huge()
678 return -EINVAL; in shmem_parse_huge()
682 return -EINVAL; in shmem_parse_huge()
684 /* Do not override huge allocation policy with non-PMD sized mTHP */ in shmem_parse_huge()
687 return -EINVAL; in shmem_parse_huge()
722 unsigned long batch = sc ? sc->nr_to_scan : 128; in shmem_unused_huge_shrink()
725 if (list_empty(&sbinfo->shrinklist)) in shmem_unused_huge_shrink()
728 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
729 list_for_each_safe(pos, next, &sbinfo->shrinklist) { in shmem_unused_huge_shrink()
733 inode = igrab(&info->vfs_inode); in shmem_unused_huge_shrink()
737 list_del_init(&info->shrinklist); in shmem_unused_huge_shrink()
741 list_move(&info->shrinklist, &list); in shmem_unused_huge_shrink()
743 sbinfo->shrinklist_len--; in shmem_unused_huge_shrink()
744 if (!--batch) in shmem_unused_huge_shrink()
747 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
755 inode = &info->vfs_inode; in shmem_unused_huge_shrink()
761 folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE); in shmem_unused_huge_shrink()
774 if (end <= folio->index || end >= next) { in shmem_unused_huge_shrink()
799 freed += next - end; in shmem_unused_huge_shrink()
802 list_del_init(&info->shrinklist); in shmem_unused_huge_shrink()
811 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
812 list_move(&info->shrinklist, &sbinfo->shrinklist); in shmem_unused_huge_shrink()
813 sbinfo->shrinklist_len++; in shmem_unused_huge_shrink()
814 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
827 if (!READ_ONCE(sbinfo->shrinklist_len)) in shmem_unused_huge_scan()
837 return READ_ONCE(sbinfo->shrinklist_len); in shmem_unused_huge_count()
873 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache()
881 folio->mapping = mapping; in shmem_add_to_page_cache()
882 folio->index = index; in shmem_add_to_page_cache()
890 xas_set_err(&xas, -EEXIST); in shmem_add_to_page_cache()
894 xas_set_err(&xas, -EEXIST); in shmem_add_to_page_cache()
901 mapping->nrpages += nr; in shmem_add_to_page_cache()
907 folio->mapping = NULL; in shmem_add_to_page_cache()
920 struct address_space *mapping = folio->mapping; in shmem_delete_from_page_cache()
924 xa_lock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
925 error = shmem_replace_entry(mapping, folio->index, folio, radswap); in shmem_delete_from_page_cache()
926 folio->mapping = NULL; in shmem_delete_from_page_cache()
927 mapping->nrpages -= nr; in shmem_delete_from_page_cache()
928 shmem_update_stats(folio, -nr); in shmem_delete_from_page_cache()
929 xa_unlock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
936 * the number of pages being freed. 0 means entry not found in XArray (0 pages
942 int order = xa_get_order(&mapping->i_pages, index); in shmem_free_swap()
945 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); in shmem_free_swap()
955 * given offsets are swapped out.
958 * as long as the inode doesn't go away and racy results are not a problem.
963 XA_STATE(xas, &mapping->i_pages, start); in shmem_partial_swap_usage()
965 unsigned long swapped = 0; in shmem_partial_swap_usage() local
966 unsigned long max = end - 1; in shmem_partial_swap_usage()
973 swapped += 1 << xas_get_order(&xas); in shmem_partial_swap_usage()
983 return swapped << PAGE_SHIFT; in shmem_partial_swap_usage()
988 * given vma is swapped out.
991 * as long as the inode doesn't go away and racy results are not a problem.
995 struct inode *inode = file_inode(vma->vm_file); in shmem_swap_usage()
997 struct address_space *mapping = inode->i_mapping; in shmem_swap_usage()
998 unsigned long swapped; in shmem_swap_usage() local
1000 /* Be careful as we don't hold info->lock */ in shmem_swap_usage()
1001 swapped = READ_ONCE(info->swapped); in shmem_swap_usage()
1008 if (!swapped) in shmem_swap_usage()
1011 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) in shmem_swap_usage()
1012 return swapped << PAGE_SHIFT; in shmem_swap_usage()
1015 return shmem_partial_swap_usage(mapping, vma->vm_pgoff, in shmem_swap_usage()
1016 vma->vm_pgoff + vma_pages(vma)); in shmem_swap_usage()
1047 folio = filemap_get_entry(inode->i_mapping, index); in shmem_get_partial_folio()
1052 if (folio->mapping == inode->i_mapping) in shmem_get_partial_folio()
1054 /* The folio has been swapped out */ in shmem_get_partial_folio()
1074 struct address_space *mapping = inode->i_mapping; in shmem_undo_range()
1076 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; in shmem_undo_range()
1086 if (lend == -1) in shmem_undo_range()
1087 end = -1; /* unsigned, so actually very big */ in shmem_undo_range()
1089 if (info->fallocend > start && info->fallocend <= end && !unfalloc) in shmem_undo_range()
1090 info->fallocend = start; in shmem_undo_range()
1094 while (index < end && find_lock_entries(mapping, &index, end - 1, in shmem_undo_range()
1133 end = folio->index; in shmem_undo_range()
1145 end = folio->index; in shmem_undo_range()
1156 if (!find_get_entries(mapping, &index, end - 1, &fbatch, in shmem_undo_range()
1158 /* If all gone or hole-punch or unfalloc, we're done */ in shmem_undo_range()
1159 if (index == start || end != -1) in shmem_undo_range()
1219 shmem_recalc_inode(inode, 0, -nr_swaps_freed); in shmem_undo_range()
1234 struct inode *inode = path->dentry->d_inode; in shmem_getattr()
1237 if (info->alloced - info->swapped != inode->i_mapping->nrpages) in shmem_getattr()
1240 if (info->fsflags & FS_APPEND_FL) in shmem_getattr()
1241 stat->attributes |= STATX_ATTR_APPEND; in shmem_getattr()
1242 if (info->fsflags & FS_IMMUTABLE_FL) in shmem_getattr()
1243 stat->attributes |= STATX_ATTR_IMMUTABLE; in shmem_getattr()
1244 if (info->fsflags & FS_NODUMP_FL) in shmem_getattr()
1245 stat->attributes |= STATX_ATTR_NODUMP; in shmem_getattr()
1246 stat->attributes_mask |= (STATX_ATTR_APPEND | in shmem_getattr()
1252 stat->blksize = HPAGE_PMD_SIZE; in shmem_getattr()
1255 stat->result_mask |= STATX_BTIME; in shmem_getattr()
1256 stat->btime.tv_sec = info->i_crtime.tv_sec; in shmem_getattr()
1257 stat->btime.tv_nsec = info->i_crtime.tv_nsec; in shmem_getattr()
1276 if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) { in shmem_setattr()
1277 if ((inode->i_mode ^ attr->ia_mode) & 0111) { in shmem_setattr()
1278 return -EPERM; in shmem_setattr()
1282 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { in shmem_setattr()
1283 loff_t oldsize = inode->i_size; in shmem_setattr()
1284 loff_t newsize = attr->ia_size; in shmem_setattr()
1287 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || in shmem_setattr()
1288 (newsize > oldsize && (info->seals & F_SEAL_GROW))) in shmem_setattr()
1289 return -EPERM; in shmem_setattr()
1292 error = shmem_reacct_size(SHMEM_I(inode)->flags, in shmem_setattr()
1304 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
1306 if (info->alloced) in shmem_setattr()
1308 newsize, (loff_t)-1); in shmem_setattr()
1311 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
1331 if (attr->ia_valid & ATTR_MODE) in shmem_setattr()
1332 error = posix_acl_chmod(idmap, dentry, inode->i_mode); in shmem_setattr()
1345 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_evict_inode()
1348 if (shmem_mapping(inode->i_mapping)) { in shmem_evict_inode()
1349 shmem_unacct_size(info->flags, inode->i_size); in shmem_evict_inode()
1350 inode->i_size = 0; in shmem_evict_inode()
1351 mapping_set_exiting(inode->i_mapping); in shmem_evict_inode()
1352 shmem_truncate_range(inode, 0, (loff_t)-1); in shmem_evict_inode()
1353 if (!list_empty(&info->shrinklist)) { in shmem_evict_inode()
1354 spin_lock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1355 if (!list_empty(&info->shrinklist)) { in shmem_evict_inode()
1356 list_del_init(&info->shrinklist); in shmem_evict_inode()
1357 sbinfo->shrinklist_len--; in shmem_evict_inode()
1359 spin_unlock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1361 while (!list_empty(&info->swaplist)) { in shmem_evict_inode()
1363 wait_var_event(&info->stop_eviction, in shmem_evict_inode()
1364 !atomic_read(&info->stop_eviction)); in shmem_evict_inode()
1367 if (!atomic_read(&info->stop_eviction)) in shmem_evict_inode()
1368 list_del_init(&info->swaplist); in shmem_evict_inode()
1373 simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL); in shmem_evict_inode()
1374 shmem_free_inode(inode->i_sb, freed); in shmem_evict_inode()
1375 WARN_ON(inode->i_blocks); in shmem_evict_inode()
1387 XA_STATE(xas, &mapping->i_pages, start); in shmem_find_swap_entries()
1422 * Move the swapped pages for an inode to page cache. Returns the count
1423 * of pages swapped in, or the error in case of failure.
1431 struct address_space *mapping = inode->i_mapping; in shmem_unuse_swap_entries()
1434 struct folio *folio = fbatch->folios[i]; in shmem_unuse_swap_entries()
1445 if (error == -ENOMEM) in shmem_unuse_swap_entries()
1457 struct address_space *mapping = inode->i_mapping; in shmem_unuse_inode()
1475 start = indices[folio_batch_count(&fbatch) - 1]; in shmem_unuse_inode()
1496 if (!info->swapped) { in shmem_unuse()
1497 list_del_init(&info->swaplist); in shmem_unuse()
1502 * but before doing so, make sure shmem_evict_inode() will not in shmem_unuse()
1504 * (igrab() would protect from unlink, but not from unmount). in shmem_unuse()
1506 atomic_inc(&info->stop_eviction); in shmem_unuse()
1509 error = shmem_unuse_inode(&info->vfs_inode, type); in shmem_unuse()
1514 if (!info->swapped) in shmem_unuse()
1515 list_del_init(&info->swaplist); in shmem_unuse()
1516 if (atomic_dec_and_test(&info->stop_eviction)) in shmem_unuse()
1517 wake_up_var(&info->stop_eviction); in shmem_unuse()
1532 struct address_space *mapping = folio->mapping; in shmem_writepage()
1533 struct inode *inode = mapping->host; in shmem_writepage()
1535 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_writepage()
1543 * shmem_writepage; but a stacking filesystem might use ->writepage of in shmem_writepage()
1545 * swap only in response to memory pressure, and not for the writeback in shmem_writepage()
1548 if (WARN_ON_ONCE(!wbc->for_reclaim)) in shmem_writepage()
1551 if ((info->flags & VM_LOCKED) || sbinfo->noswap) in shmem_writepage()
1558 * If CONFIG_THP_SWAP is not enabled, the large folio should be in shmem_writepage()
1561 * And shrinkage of pages beyond i_size does not split swap, so in shmem_writepage()
1568 if ((index > folio->index && index < folio_next_index(folio)) || in shmem_writepage()
1577 if (split_huge_page_to_list_to_order(page, wbc->list, 0)) in shmem_writepage()
1583 index = folio->index; in shmem_writepage()
1592 * not yet completed the fallocation, then (a) we want to keep track in shmem_writepage()
1593 * of this folio in case we have to undo it, and (b) it may not be a in shmem_writepage()
1598 if (inode->i_private) { in shmem_writepage()
1600 spin_lock(&inode->i_lock); in shmem_writepage()
1601 shmem_falloc = inode->i_private; in shmem_writepage()
1603 !shmem_falloc->waitq && in shmem_writepage()
1604 index >= shmem_falloc->start && in shmem_writepage()
1605 index < shmem_falloc->next) in shmem_writepage()
1606 shmem_falloc->nr_unswapped += nr_pages; in shmem_writepage()
1609 spin_unlock(&inode->i_lock); in shmem_writepage()
1627 * Add inode to shmem_unuse()'s list of swapped-out inodes, in shmem_writepage()
1628 * if it's not already there. Do it now before the folio is in shmem_writepage()
1631 * we've incremented swapped, because shmem_unuse_inode() will in shmem_writepage()
1632 * prune a !swapped inode from the swaplist under this mutex. in shmem_writepage()
1635 if (list_empty(&info->swaplist)) in shmem_writepage()
1636 list_add(&info->swaplist, &shmem_swaplist); in shmem_writepage()
1647 return swap_writepage(&folio->page, wbc); in shmem_writepage()
1654 if (wbc->for_reclaim) in shmem_writepage()
1665 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol()
1676 if (sbinfo->mpol) { in shmem_get_sbmpol()
1677 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ in shmem_get_sbmpol()
1678 mpol = sbinfo->mpol; in shmem_get_sbmpol()
1680 raw_spin_unlock(&sbinfo->stat_lock); in shmem_get_sbmpol()
1759 unsigned long vm_flags = vma ? vma->vm_flags : 0; in shmem_allowable_huge_orders()
1782 * Only allow inherit orders if the top-level value is 'force', which in shmem_allowable_huge_orders()
1783 * means non-PMD sized THP can not override 'huge' mount option now. in shmem_allowable_huge_orders()
1814 struct vm_area_struct *vma = vmf ? vmf->vma : NULL; in shmem_suitable_orders()
1820 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in shmem_suitable_orders()
1835 * Be careful to retry when appropriate, but not forever! in shmem_suitable_orders()
1836 * Elsewhere -EEXIST would be the right code, but not here. in shmem_suitable_orders()
1838 if (!xa_find(&mapping->i_pages, &aligned_index, in shmem_suitable_orders()
1839 aligned_index + pages - 1, XA_PRESENT)) in shmem_suitable_orders()
1873 struct address_space *mapping = inode->i_mapping; in shmem_alloc_and_add_folio()
1905 return ERR_PTR(-ENOMEM); in shmem_alloc_and_add_folio()
1914 if (xa_find(&mapping->i_pages, &index, in shmem_alloc_and_add_folio()
1915 index + pages - 1, XA_PRESENT)) { in shmem_alloc_and_add_folio()
1916 error = -EEXIST; in shmem_alloc_and_add_folio()
1934 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_alloc_and_add_folio()
1943 * except our folio is there in cache, so not quite balanced. in shmem_alloc_and_add_folio()
1945 spin_lock(&info->lock); in shmem_alloc_and_add_folio()
1946 freed = pages + info->alloced - info->swapped - in shmem_alloc_and_add_folio()
1947 READ_ONCE(mapping->nrpages); in shmem_alloc_and_add_folio()
1949 info->alloced -= freed; in shmem_alloc_and_add_folio()
1950 spin_unlock(&info->lock); in shmem_alloc_and_add_folio()
1992 return ERR_PTR(-ENOMEM); in shmem_swap_alloc_folio()
1995 if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL, in shmem_swap_alloc_folio()
1998 return ERR_PTR(-ENOMEM); in shmem_swap_alloc_folio()
2009 * concurrent swapin and return -EEXIST. in shmem_swap_alloc_folio()
2013 return ERR_PTR(-EEXIST); in shmem_swap_alloc_folio()
2018 new->swap = entry; in shmem_swap_alloc_folio()
2051 swp_entry_t entry = old->swap; in shmem_replace_folio()
2054 XA_STATE(xas, &swap_mapping->i_pages, swap_index); in shmem_replace_folio()
2073 return -ENOMEM; in shmem_replace_folio()
2082 new->swap = entry; in shmem_replace_folio()
2085 /* Swap cache still stores N entries instead of a high-order entry */ in shmem_replace_folio()
2086 xa_lock_irq(&swap_mapping->i_pages); in shmem_replace_folio()
2091 error = -ENOENT; in shmem_replace_folio()
2101 shmem_update_stats(old, -nr_pages); in shmem_replace_folio()
2103 xa_unlock_irq(&swap_mapping->i_pages); in shmem_replace_folio()
2107 * Is this possible? I think not, now that our callers in shmem_replace_folio()
2108 * check both the swapcache flag and folio->private in shmem_replace_folio()
2119 old->private = NULL; in shmem_replace_folio()
2135 struct address_space *mapping = inode->i_mapping; in shmem_set_folio_swapin_error()
2141 old = xa_cmpxchg_irq(&mapping->i_pages, index, in shmem_set_folio_swapin_error()
2152 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks in shmem_set_folio_swapin_error()
2156 shmem_recalc_inode(inode, -nr_pages, -nr_pages); in shmem_set_folio_swapin_error()
2163 struct address_space *mapping = inode->i_mapping; in shmem_split_large_entry()
2164 XA_STATE_ORDER(xas, &mapping->i_pages, index, 0); in shmem_split_large_entry()
2172 int order = -1, split_order = 0; in shmem_split_large_entry()
2178 xas_set_err(&xas, -EEXIST); in shmem_split_large_entry()
2184 /* Swap entry may have changed before we re-acquire the lock */ in shmem_split_large_entry()
2200 * Re-set the swap entry after splitting, and the swap in shmem_split_large_entry()
2208 __xa_store(&mapping->i_pages, aligned_index + i, in shmem_split_large_entry()
2240 * Caller has to make sure that *foliop contains a valid swapped folio.
2249 struct address_space *mapping = inode->i_mapping; in shmem_swapin_folio()
2250 struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL; in shmem_swapin_folio()
2263 return -EIO; in shmem_swapin_folio()
2268 return -EEXIST; in shmem_swapin_folio()
2270 return -EINVAL; in shmem_swapin_folio()
2275 order = xa_get_order(&mapping->i_pages, index); in shmem_swapin_folio()
2287 * If uffd is active for the vma, we need per-page fault in shmem_swapin_folio()
2289 * to swapin order-0 folio, as well as for zswap case. in shmem_swapin_folio()
2296 if (!fallback_order0 && data_race(si->flags & SWP_SYNCHRONOUS_IO)) { in shmem_swapin_folio()
2304 * Fallback to swapin order-0 folio unless the swap entry in shmem_swapin_folio()
2309 if (error == -EEXIST) in shmem_swapin_folio()
2330 pgoff_t offset = index - round_down(index, 1 << split_order); in shmem_swapin_folio()
2338 error = -ENOMEM; in shmem_swapin_folio()
2360 pgoff_t offset = index - round_down(index, 1 << split_order); in shmem_swapin_folio()
2370 folio->swap.val != swap.val || in shmem_swapin_folio()
2372 xa_get_order(&mapping->i_pages, index) != folio_order(folio)) { in shmem_swapin_folio()
2373 error = -EEXIST; in shmem_swapin_folio()
2377 error = -EIO; in shmem_swapin_folio()
2401 shmem_recalc_inode(inode, 0, -nr_pages); in shmem_swapin_folio()
2407 folio->swap.val = 0; in shmem_swapin_folio()
2420 error = -EEXIST; in shmem_swapin_folio()
2421 if (error == -EIO) in shmem_swapin_folio()
2437 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
2439 * If we allocate a new one we do not mark it dirty. That's up to the
2449 struct vm_area_struct *vma = vmf ? vmf->vma : NULL; in shmem_get_folio_gfp()
2456 if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping))) in shmem_get_folio_gfp()
2457 return -EINVAL; in shmem_get_folio_gfp()
2460 return -EFBIG; in shmem_get_folio_gfp()
2464 return -EINVAL; in shmem_get_folio_gfp()
2467 fault_mm = vma ? vma->vm_mm : NULL; in shmem_get_folio_gfp()
2469 folio = filemap_get_entry(inode->i_mapping, index); in shmem_get_folio_gfp()
2480 if (error == -EEXIST) in shmem_get_folio_gfp()
2490 /* Has the folio been truncated or swapped out? */ in shmem_get_folio_gfp()
2491 if (unlikely(folio->mapping != inode->i_mapping)) { in shmem_get_folio_gfp()
2515 return -ENOENT; in shmem_get_folio_gfp()
2518 * Fast cache lookup and swap lookup did not find it: allocate. in shmem_get_folio_gfp()
2541 if (PTR_ERR(folio) == -EEXIST) in shmem_get_folio_gfp()
2548 if (error == -EEXIST) in shmem_get_folio_gfp()
2559 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_get_folio_gfp()
2565 spin_lock(&sbinfo->shrinklist_lock); in shmem_get_folio_gfp()
2568 * ->shrink_list in shmem_unused_huge_shrink() in shmem_get_folio_gfp()
2570 if (list_empty_careful(&info->shrinklist)) { in shmem_get_folio_gfp()
2571 list_add_tail(&info->shrinklist, in shmem_get_folio_gfp()
2572 &sbinfo->shrinklist); in shmem_get_folio_gfp()
2573 sbinfo->shrinklist_len++; in shmem_get_folio_gfp()
2575 spin_unlock(&sbinfo->shrinklist_lock); in shmem_get_folio_gfp()
2587 * Let SGP_WRITE caller clear ends if write does not fill folio; in shmem_get_folio_gfp()
2603 error = -EINVAL; in shmem_get_folio_gfp()
2625 * shmem_get_folio - find, and lock a shmem folio.
2636 * before unlocking the folio to ensure that the folio is not reclaimed.
2640 * - for SGP_READ, *@foliop is %NULL and 0 is returned
2641 * - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned
2642 * - for all other flags a new folio is allocated, inserted into the
2652 mapping_gfp_mask(inode->i_mapping), NULL, NULL); in shmem_get_folio()
2658 * entry unconditionally - even if something else had already woken the
2665 list_del_init(&wait->entry); in synchronous_wake_function()
2671 * prevent the hole-punch from ever completing: which in turn
2678 * It does not matter if we sometimes reach this check just before the
2679 * hole-punch begins, so that one fault then races with the punch:
2692 spin_lock(&inode->i_lock); in shmem_falloc_wait()
2693 shmem_falloc = inode->i_private; in shmem_falloc_wait()
2695 shmem_falloc->waitq && in shmem_falloc_wait()
2696 vmf->pgoff >= shmem_falloc->start && in shmem_falloc_wait()
2697 vmf->pgoff < shmem_falloc->next) { in shmem_falloc_wait()
2703 shmem_falloc_waitq = shmem_falloc->waitq; in shmem_falloc_wait()
2706 spin_unlock(&inode->i_lock); in shmem_falloc_wait()
2711 * stack of the hole-punching task: shmem_falloc_waitq in shmem_falloc_wait()
2713 * finish_wait() does not dereference it in that case; in shmem_falloc_wait()
2716 spin_lock(&inode->i_lock); in shmem_falloc_wait()
2719 spin_unlock(&inode->i_lock); in shmem_falloc_wait()
2729 struct inode *inode = file_inode(vmf->vma->vm_file); in shmem_fault()
2730 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); in shmem_fault()
2737 * prevent the hole-punch from ever completing: noted in i_private. in shmem_fault()
2739 if (unlikely(inode->i_private)) { in shmem_fault()
2745 WARN_ON_ONCE(vmf->page != NULL); in shmem_fault()
2746 err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE, in shmem_fault()
2751 vmf->page = folio_file_page(folio, vmf->pgoff); in shmem_fault()
2769 return -ENOMEM; in shmem_get_unmapped_area()
2771 addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff, in shmem_get_unmapped_area()
2780 if (addr > TASK_SIZE - len) in shmem_get_unmapped_area()
2803 VM_BUG_ON(file->f_op != &shmem_file_operations); in shmem_get_unmapped_area()
2804 sb = file_inode(file)->i_sb; in shmem_get_unmapped_area()
2812 sb = shm_mnt->mnt_sb; in shmem_get_unmapped_area()
2822 if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER) in shmem_get_unmapped_area()
2831 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order) in shmem_get_unmapped_area()
2838 offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1); in shmem_get_unmapped_area()
2841 if ((addr & (hpage_size - 1)) == offset) in shmem_get_unmapped_area()
2844 inflated_len = len + hpage_size - PAGE_SIZE; in shmem_get_unmapped_area()
2850 inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr, in shmem_get_unmapped_area()
2857 inflated_offset = inflated_addr & (hpage_size - 1); in shmem_get_unmapped_area()
2858 inflated_addr += offset - inflated_offset; in shmem_get_unmapped_area()
2862 if (inflated_addr > TASK_SIZE - len) in shmem_get_unmapped_area()
2870 struct inode *inode = file_inode(vma->vm_file); in shmem_set_policy()
2871 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); in shmem_set_policy()
2877 struct inode *inode = file_inode(vma->vm_file); in shmem_get_policy()
2886 *ilx = inode->i_ino; in shmem_get_policy()
2887 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in shmem_get_policy()
2888 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); in shmem_get_policy()
2897 *ilx = info->vfs_inode.i_ino + (index >> order); in shmem_get_pgoff_policy()
2899 mpol = mpol_shared_policy_lookup(&info->policy, index); in shmem_get_pgoff_policy()
2915 int retval = -ENOMEM; in shmem_lock()
2918 * What serializes the accesses to info->flags? in shmem_lock()
2922 if (lock && !(info->flags & VM_LOCKED)) { in shmem_lock()
2923 if (!user_shm_lock(inode->i_size, ucounts)) in shmem_lock()
2925 info->flags |= VM_LOCKED; in shmem_lock()
2926 mapping_set_unevictable(file->f_mapping); in shmem_lock()
2928 if (!lock && (info->flags & VM_LOCKED) && ucounts) { in shmem_lock()
2929 user_shm_unlock(inode->i_size, ucounts); in shmem_lock()
2930 info->flags &= ~VM_LOCKED; in shmem_lock()
2931 mapping_clear_unevictable(file->f_mapping); in shmem_lock()
2945 if (inode->i_nlink) in shmem_mmap()
2946 vma->vm_ops = &shmem_vm_ops; in shmem_mmap()
2948 vma->vm_ops = &shmem_anon_vm_ops; in shmem_mmap()
2954 file->f_mode |= FMODE_CAN_ODIRECT; in shmem_file_open()
2963 * shmem_inode_casefold_flags - Deal with casefold file attribute flag
2966 * an empty dir, and can't be removed from a non-empty dir.
2971 unsigned int old = inode->i_flags; in shmem_inode_casefold_flags()
2972 struct super_block *sb = inode->i_sb; in shmem_inode_casefold_flags()
2976 if (!sb->s_encoding) in shmem_inode_casefold_flags()
2977 return -EOPNOTSUPP; in shmem_inode_casefold_flags()
2979 if (!S_ISDIR(inode->i_mode)) in shmem_inode_casefold_flags()
2980 return -ENOTDIR; in shmem_inode_casefold_flags()
2983 return -ENOTEMPTY; in shmem_inode_casefold_flags()
2989 return -ENOTEMPTY; in shmem_inode_casefold_flags()
2999 return -EOPNOTSUPP; in shmem_inode_casefold_flags()
3025 * But FS_NODUMP_FL does not require any action in i_flags. in shmem_set_inode_flags()
3040 return &SHMEM_I(inode)->dir_offsets; in shmem_get_offset_ctx()
3061 return ERR_PTR(-ENOSPC); in __shmem_get_inode()
3064 inode->i_ino = ino; in __shmem_get_inode()
3066 inode->i_blocks = 0; in __shmem_get_inode()
3068 inode->i_generation = get_random_u32(); in __shmem_get_inode()
3070 memset(info, 0, (char *)inode - (char *)info); in __shmem_get_inode()
3071 spin_lock_init(&info->lock); in __shmem_get_inode()
3072 atomic_set(&info->stop_eviction, 0); in __shmem_get_inode()
3073 info->seals = F_SEAL_SEAL; in __shmem_get_inode()
3074 info->flags = flags & VM_NORESERVE; in __shmem_get_inode()
3075 info->i_crtime = inode_get_mtime(inode); in __shmem_get_inode()
3076 info->fsflags = (dir == NULL) ? 0 : in __shmem_get_inode()
3077 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED; in __shmem_get_inode()
3078 if (info->fsflags) in __shmem_get_inode()
3079 shmem_set_inode_flags(inode, info->fsflags, NULL); in __shmem_get_inode()
3080 INIT_LIST_HEAD(&info->shrinklist); in __shmem_get_inode()
3081 INIT_LIST_HEAD(&info->swaplist); in __shmem_get_inode()
3082 simple_xattrs_init(&info->xattrs); in __shmem_get_inode()
3084 if (sbinfo->noswap) in __shmem_get_inode()
3085 mapping_set_unevictable(inode->i_mapping); in __shmem_get_inode()
3088 if (sbinfo->huge) in __shmem_get_inode()
3089 mapping_set_large_folios(inode->i_mapping); in __shmem_get_inode()
3093 inode->i_op = &shmem_special_inode_operations; in __shmem_get_inode()
3097 inode->i_mapping->a_ops = &shmem_aops; in __shmem_get_inode()
3098 inode->i_op = &shmem_inode_operations; in __shmem_get_inode()
3099 inode->i_fop = &shmem_file_operations; in __shmem_get_inode()
3100 mpol_shared_policy_init(&info->policy, in __shmem_get_inode()
3106 inode->i_size = 2 * BOGO_DIRENT_SIZE; in __shmem_get_inode()
3107 inode->i_op = &shmem_dir_inode_operations; in __shmem_get_inode()
3108 inode->i_fop = &simple_offset_dir_operations; in __shmem_get_inode()
3113 * Must not load anything in the rbtree, in __shmem_get_inode()
3114 * mpol_free_shared_policy will not be called. in __shmem_get_inode()
3116 mpol_shared_policy_init(&info->policy, NULL); in __shmem_get_inode()
3148 inode->i_flags |= S_NOQUOTA; in shmem_get_inode()
3169 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte()
3171 struct address_space *mapping = inode->i_mapping; in shmem_mfill_atomic_pte()
3181 * We may have got a page, returned -ENOENT triggering a retry, in shmem_mfill_atomic_pte()
3182 * and now we find ourselves with -ENOMEM. Release the page, to in shmem_mfill_atomic_pte()
3189 return -ENOMEM; in shmem_mfill_atomic_pte()
3193 ret = -ENOMEM; in shmem_mfill_atomic_pte()
3225 ret = -ENOENT; in shmem_mfill_atomic_pte()
3232 clear_user_highpage(&folio->page, dst_addr); in shmem_mfill_atomic_pte()
3246 ret = -EFAULT; in shmem_mfill_atomic_pte()
3251 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp); in shmem_mfill_atomic_pte()
3259 &folio->page, true, flags); in shmem_mfill_atomic_pte()
3286 struct inode *inode = mapping->host; in shmem_write_begin()
3293 if (unlikely(info->seals & (F_SEAL_GROW | in shmem_write_begin()
3295 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) in shmem_write_begin()
3296 return -EPERM; in shmem_write_begin()
3297 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) in shmem_write_begin()
3298 return -EPERM; in shmem_write_begin()
3308 return -EIO; in shmem_write_begin()
3320 struct inode *inode = mapping->host; in shmem_write_end()
3322 if (pos + copied > inode->i_size) in shmem_write_end()
3342 struct file *file = iocb->ki_filp; in shmem_file_read_iter()
3344 struct address_space *mapping = inode->i_mapping; in shmem_file_read_iter()
3358 if (unlikely(iocb->ki_pos >= i_size)) in shmem_file_read_iter()
3361 index = iocb->ki_pos >> PAGE_SHIFT; in shmem_file_read_iter()
3364 if (error == -EINVAL) in shmem_file_read_iter()
3374 error = -EIO; in shmem_file_read_iter()
3388 if (unlikely(iocb->ki_pos >= i_size)) { in shmem_file_read_iter()
3393 end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count); in shmem_file_read_iter()
3398 offset = iocb->ki_pos & (fsize - 1); in shmem_file_read_iter()
3399 nr = min_t(loff_t, end_offset - iocb->ki_pos, fsize - offset); in shmem_file_read_iter()
3420 * Ok, we have the page, and it's up-to-date, so in shmem_file_read_iter()
3431 * clear_user() not so much, that it is noticeably in shmem_file_read_iter()
3438 * splice() - or others? - can result in confusion: in shmem_file_read_iter()
3445 iocb->ki_pos += ret; in shmem_file_read_iter()
3450 error = -EFAULT; in shmem_file_read_iter()
3462 struct file *file = iocb->ki_filp; in shmem_file_write_iter()
3463 struct inode *inode = file->f_mapping->host; in shmem_file_write_iter()
3510 size = min_t(size_t, size, PAGE_SIZE - offset); in splice_zeropage_into_pipe()
3521 pipe->head++; in splice_zeropage_into_pipe()
3532 struct address_space *mapping = inode->i_mapping; in shmem_file_splice_read()
3540 npages = max_t(ssize_t, pipe->max_usage - used, 0); in shmem_file_splice_read()
3555 if (error == -EINVAL) in shmem_file_splice_read()
3564 error = -EIO; in shmem_file_splice_read()
3577 * the correct value for "nr", which means the zero-filled in shmem_file_splice_read()
3578 * part of the page is not copied back to userspace (unless in shmem_file_splice_read()
3579 * another truncate extends the file - this is desired though). in shmem_file_splice_read()
3592 size = umin(size, PAGE_SIZE - offset); in shmem_file_splice_read()
3594 part = min_t(loff_t, isize - *ppos, size); in shmem_file_splice_read()
3610 * Ok, we have the page, and it's up-to-date, so we can in shmem_file_splice_read()
3622 len -= n; in shmem_file_splice_read()
3625 in->f_ra.prev_pos = *ppos; in shmem_file_splice_read()
3641 struct address_space *mapping = file->f_mapping; in shmem_file_llseek()
3642 struct inode *inode = mapping->host; in shmem_file_llseek()
3648 return -ENXIO; in shmem_file_llseek()
3652 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); in shmem_file_llseek()
3663 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_fallocate()
3670 return -EOPNOTSUPP; in shmem_fallocate()
3675 struct address_space *mapping = file->f_mapping; in shmem_fallocate()
3677 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; in shmem_fallocate()
3681 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { in shmem_fallocate()
3682 error = -EPERM; in shmem_fallocate()
3689 spin_lock(&inode->i_lock); in shmem_fallocate()
3690 inode->i_private = &shmem_falloc; in shmem_fallocate()
3691 spin_unlock(&inode->i_lock); in shmem_fallocate()
3695 1 + unmap_end - unmap_start, 0); in shmem_fallocate()
3696 shmem_truncate_range(inode, offset, offset + len - 1); in shmem_fallocate()
3697 /* No need to unmap again: hole-punching leaves COWed pages */ in shmem_fallocate()
3699 spin_lock(&inode->i_lock); in shmem_fallocate()
3700 inode->i_private = NULL; in shmem_fallocate()
3703 spin_unlock(&inode->i_lock); in shmem_fallocate()
3713 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { in shmem_fallocate()
3714 error = -EPERM; in shmem_fallocate()
3719 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; in shmem_fallocate()
3721 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { in shmem_fallocate()
3722 error = -ENOSPC; in shmem_fallocate()
3731 spin_lock(&inode->i_lock); in shmem_fallocate()
3732 inode->i_private = &shmem_falloc; in shmem_fallocate()
3733 spin_unlock(&inode->i_lock); in shmem_fallocate()
3736 * info->fallocend is only relevant when huge pages might be in shmem_fallocate()
3740 undo_fallocend = info->fallocend; in shmem_fallocate()
3741 if (info->fallocend < end) in shmem_fallocate()
3742 info->fallocend = end; in shmem_fallocate()
3749 * situations. We don't want to abort in case of non-fatal in shmem_fallocate()
3755 error = -EINTR; in shmem_fallocate()
3757 error = -ENOMEM; in shmem_fallocate()
3762 info->fallocend = undo_fallocend; in shmem_fallocate()
3767 ((loff_t)index << PAGE_SHIFT) - 1, true); in shmem_fallocate()
3775 * making it uptodate and un-undoable if we fail later. in shmem_fallocate()
3778 /* Beware 32-bit wraparound */ in shmem_fallocate()
3780 index--; in shmem_fallocate()
3787 shmem_falloc.nr_falloced += index - shmem_falloc.next; in shmem_fallocate()
3803 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) in shmem_fallocate()
3806 spin_lock(&inode->i_lock); in shmem_fallocate()
3807 inode->i_private = NULL; in shmem_fallocate()
3808 spin_unlock(&inode->i_lock); in shmem_fallocate()
3818 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); in shmem_statfs()
3820 buf->f_type = TMPFS_MAGIC; in shmem_statfs()
3821 buf->f_bsize = PAGE_SIZE; in shmem_statfs()
3822 buf->f_namelen = NAME_MAX; in shmem_statfs()
3823 if (sbinfo->max_blocks) { in shmem_statfs()
3824 buf->f_blocks = sbinfo->max_blocks; in shmem_statfs()
3825 buf->f_bavail = in shmem_statfs()
3826 buf->f_bfree = sbinfo->max_blocks - in shmem_statfs()
3827 percpu_counter_sum(&sbinfo->used_blocks); in shmem_statfs()
3829 if (sbinfo->max_inodes) { in shmem_statfs()
3830 buf->f_files = sbinfo->max_inodes; in shmem_statfs()
3831 buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE; in shmem_statfs()
3835 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); in shmem_statfs()
3850 if (!generic_ci_validate_strict_name(dir, &dentry->d_name)) in shmem_mknod()
3851 return -EINVAL; in shmem_mknod()
3853 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE); in shmem_mknod()
3860 error = security_inode_init_security(inode, dir, &dentry->d_name, in shmem_mknod()
3862 if (error && error != -EOPNOTSUPP) in shmem_mknod()
3869 dir->i_size += BOGO_DIRENT_SIZE; in shmem_mknod()
3878 dget(dentry); /* Extra count - pin the dentry in core */ in shmem_mknod()
3893 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); in shmem_tmpfile()
3900 if (error && error != -EOPNOTSUPP) in shmem_tmpfile()
3948 if (inode->i_nlink) { in shmem_link()
3949 ret = shmem_reserve_inode(inode->i_sb, NULL); in shmem_link()
3956 if (inode->i_nlink) in shmem_link()
3957 shmem_free_inode(inode->i_sb, 0); in shmem_link()
3961 dir->i_size += BOGO_DIRENT_SIZE; in shmem_link()
3980 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) in shmem_unlink()
3981 shmem_free_inode(inode->i_sb, 0); in shmem_unlink()
3985 dir->i_size -= BOGO_DIRENT_SIZE; in shmem_unlink()
3990 dput(dentry); /* Undo the count from "create" - does all the work */ in shmem_unlink()
3993 * For now, VFS can't deal with case-insensitive negative dentries, so in shmem_unlink()
4005 return -ENOTEMPTY; in shmem_rmdir()
4018 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); in shmem_whiteout()
4020 return -ENOMEM; in shmem_whiteout()
4033 * not sure which one, but that isn't even important. in shmem_whiteout()
4051 int they_are_dirs = S_ISDIR(inode->i_mode); in shmem_rename2()
4055 return -EINVAL; in shmem_rename2()
4062 return -ENOTEMPTY; in shmem_rename2()
4085 old_dir->i_size -= BOGO_DIRENT_SIZE; in shmem_rename2()
4086 new_dir->i_size += BOGO_DIRENT_SIZE; in shmem_rename2()
4104 return -ENAMETOOLONG; in shmem_symlink()
4106 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, in shmem_symlink()
4111 error = security_inode_init_security(inode, dir, &dentry->d_name, in shmem_symlink()
4113 if (error && error != -EOPNOTSUPP) in shmem_symlink()
4120 inode->i_size = len-1; in shmem_symlink()
4124 error = -ENOMEM; in shmem_symlink()
4127 inode->i_op = &shmem_short_symlink_operations; in shmem_symlink()
4128 inode_set_cached_link(inode, link, len - 1); in shmem_symlink()
4131 inode->i_mapping->a_ops = &shmem_aops; in shmem_symlink()
4135 inode->i_op = &shmem_symlink_inode_operations; in shmem_symlink()
4142 dir->i_size += BOGO_DIRENT_SIZE; in shmem_symlink()
4172 folio = filemap_get_folio(inode->i_mapping, 0); in shmem_get_link()
4174 return ERR_PTR(-ECHILD); in shmem_get_link()
4178 return ERR_PTR(-ECHILD); in shmem_get_link()
4185 return ERR_PTR(-ECHILD); in shmem_get_link()
4189 return ERR_PTR(-ECHILD); in shmem_get_link()
4203 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE); in shmem_fileattr_get()
4216 return -EOPNOTSUPP; in shmem_fileattr_set()
4217 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE) in shmem_fileattr_set()
4218 return -EOPNOTSUPP; in shmem_fileattr_set()
4220 flags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) | in shmem_fileattr_set()
4221 (fa->flags & SHMEM_FL_USER_MODIFIABLE); in shmem_fileattr_set()
4228 info->fsflags = flags; in shmem_fileattr_set()
4249 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_initxattrs()
4255 if (sbinfo->max_inodes) { in shmem_initxattrs()
4256 for (xattr = xattr_array; xattr->name != NULL; xattr++) { in shmem_initxattrs()
4257 ispace += simple_xattr_space(xattr->name, in shmem_initxattrs()
4258 xattr->value_len + XATTR_SECURITY_PREFIX_LEN); in shmem_initxattrs()
4261 raw_spin_lock(&sbinfo->stat_lock); in shmem_initxattrs()
4262 if (sbinfo->free_ispace < ispace) in shmem_initxattrs()
4265 sbinfo->free_ispace -= ispace; in shmem_initxattrs()
4266 raw_spin_unlock(&sbinfo->stat_lock); in shmem_initxattrs()
4268 return -ENOSPC; in shmem_initxattrs()
4272 for (xattr = xattr_array; xattr->name != NULL; xattr++) { in shmem_initxattrs()
4273 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); in shmem_initxattrs()
4277 len = strlen(xattr->name) + 1; in shmem_initxattrs()
4278 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, in shmem_initxattrs()
4280 if (!new_xattr->name) { in shmem_initxattrs()
4285 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, in shmem_initxattrs()
4287 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, in shmem_initxattrs()
4288 xattr->name, len); in shmem_initxattrs()
4290 simple_xattr_add(&info->xattrs, new_xattr); in shmem_initxattrs()
4293 if (xattr->name != NULL) { in shmem_initxattrs()
4295 raw_spin_lock(&sbinfo->stat_lock); in shmem_initxattrs()
4296 sbinfo->free_ispace += ispace; in shmem_initxattrs()
4297 raw_spin_unlock(&sbinfo->stat_lock); in shmem_initxattrs()
4299 simple_xattrs_free(&info->xattrs, NULL); in shmem_initxattrs()
4300 return -ENOMEM; in shmem_initxattrs()
4313 return simple_xattr_get(&info->xattrs, name, buffer, size); in shmem_xattr_handler_get()
4323 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_xattr_handler_set()
4328 if (value && sbinfo->max_inodes) { in shmem_xattr_handler_set()
4330 raw_spin_lock(&sbinfo->stat_lock); in shmem_xattr_handler_set()
4331 if (sbinfo->free_ispace < ispace) in shmem_xattr_handler_set()
4334 sbinfo->free_ispace -= ispace; in shmem_xattr_handler_set()
4335 raw_spin_unlock(&sbinfo->stat_lock); in shmem_xattr_handler_set()
4337 return -ENOSPC; in shmem_xattr_handler_set()
4340 old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags); in shmem_xattr_handler_set()
4343 if (old_xattr && sbinfo->max_inodes) in shmem_xattr_handler_set()
4344 ispace = simple_xattr_space(old_xattr->name, in shmem_xattr_handler_set()
4345 old_xattr->size); in shmem_xattr_handler_set()
4352 raw_spin_lock(&sbinfo->stat_lock); in shmem_xattr_handler_set()
4353 sbinfo->free_ispace += ispace; in shmem_xattr_handler_set()
4354 raw_spin_unlock(&sbinfo->stat_lock); in shmem_xattr_handler_set()
4387 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); in shmem_listxattr()
4411 return ERR_PTR(-ESTALE); in shmem_get_parent()
4419 return ino->i_ino == inum && fh[0] == ino->i_generation; in shmem_match()
4440 inum = fid->raw[2]; in shmem_fh_to_dentry()
4441 inum = (inum << 32) | fid->raw[1]; in shmem_fh_to_dentry()
4443 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), in shmem_fh_to_dentry()
4444 shmem_match, fid->raw); in shmem_fh_to_dentry()
4462 /* Unfortunately insert_inode_hash is not idempotent, in shmem_encode_fh()
4471 inode->i_ino + inode->i_generation); in shmem_encode_fh()
4475 fh[0] = inode->i_generation; in shmem_encode_fh()
4476 fh[1] = inode->i_ino; in shmem_encode_fh()
4477 fh[2] = ((__u64)inode->i_ino) >> 32; in shmem_encode_fh()
4552 struct shmem_options *ctx = fc->fs_private; in shmem_parse_opt_casefold()
4555 char *version_str = param->string + 5; in shmem_parse_opt_casefold()
4558 if (strncmp(param->string, "utf8-", 5)) in shmem_parse_opt_casefold()
4559 return invalfc(fc, "Only UTF-8 encodings are supported " in shmem_parse_opt_casefold()
4560 "in the format: utf8-<version number>"); in shmem_parse_opt_casefold()
4564 return invalfc(fc, "Invalid UTF-8 version: %s", version_str); in shmem_parse_opt_casefold()
4570 return invalfc(fc, "Failed loading UTF-8 version: utf8-%u.%u.%u\n", in shmem_parse_opt_casefold()
4575 pr_info("tmpfs: Using encoding : utf8-%u.%u.%u\n", in shmem_parse_opt_casefold()
4578 ctx->encoding = encoding; in shmem_parse_opt_casefold()
4586 return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n"); in shmem_parse_opt_casefold()
4592 struct shmem_options *ctx = fc->fs_private; in shmem_parse_one()
4606 size = memparse(param->string, &rest); in shmem_parse_one()
4615 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); in shmem_parse_one()
4616 ctx->seen |= SHMEM_SEEN_BLOCKS; in shmem_parse_one()
4619 ctx->blocks = memparse(param->string, &rest); in shmem_parse_one()
4620 if (*rest || ctx->blocks > LONG_MAX) in shmem_parse_one()
4622 ctx->seen |= SHMEM_SEEN_BLOCKS; in shmem_parse_one()
4625 ctx->inodes = memparse(param->string, &rest); in shmem_parse_one()
4626 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE) in shmem_parse_one()
4628 ctx->seen |= SHMEM_SEEN_INODES; in shmem_parse_one()
4631 ctx->mode = result.uint_32 & 07777; in shmem_parse_one()
4640 if (!kuid_has_mapping(fc->user_ns, kuid)) in shmem_parse_one()
4643 ctx->uid = kuid; in shmem_parse_one()
4652 if (!kgid_has_mapping(fc->user_ns, kgid)) in shmem_parse_one()
4655 ctx->gid = kgid; in shmem_parse_one()
4658 ctx->huge = result.uint_32; in shmem_parse_one()
4659 if (ctx->huge != SHMEM_HUGE_NEVER && in shmem_parse_one()
4663 ctx->seen |= SHMEM_SEEN_HUGE; in shmem_parse_one()
4667 mpol_put(ctx->mpol); in shmem_parse_one()
4668 ctx->mpol = NULL; in shmem_parse_one()
4669 if (mpol_parse_str(param->string, &ctx->mpol)) in shmem_parse_one()
4675 ctx->full_inums = false; in shmem_parse_one()
4676 ctx->seen |= SHMEM_SEEN_INUMS; in shmem_parse_one()
4683 ctx->full_inums = true; in shmem_parse_one()
4684 ctx->seen |= SHMEM_SEEN_INUMS; in shmem_parse_one()
4687 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) { in shmem_parse_one()
4691 ctx->noswap = true; in shmem_parse_one()
4692 ctx->seen |= SHMEM_SEEN_NOSWAP; in shmem_parse_one()
4695 if (fc->user_ns != &init_user_ns) in shmem_parse_one()
4697 ctx->seen |= SHMEM_SEEN_QUOTA; in shmem_parse_one()
4698 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP); in shmem_parse_one()
4701 if (fc->user_ns != &init_user_ns) in shmem_parse_one()
4703 ctx->seen |= SHMEM_SEEN_QUOTA; in shmem_parse_one()
4704 ctx->quota_types |= QTYPE_MASK_USR; in shmem_parse_one()
4707 if (fc->user_ns != &init_user_ns) in shmem_parse_one()
4709 ctx->seen |= SHMEM_SEEN_QUOTA; in shmem_parse_one()
4710 ctx->quota_types |= QTYPE_MASK_GRP; in shmem_parse_one()
4713 size = memparse(param->string, &rest); in shmem_parse_one()
4719 ctx->qlimits.usrquota_bhardlimit = size; in shmem_parse_one()
4722 size = memparse(param->string, &rest); in shmem_parse_one()
4728 ctx->qlimits.grpquota_bhardlimit = size; in shmem_parse_one()
4731 size = memparse(param->string, &rest); in shmem_parse_one()
4737 ctx->qlimits.usrquota_ihardlimit = size; in shmem_parse_one()
4740 size = memparse(param->string, &rest); in shmem_parse_one()
4746 ctx->qlimits.grpquota_ihardlimit = size; in shmem_parse_one()
4754 ctx->strict_encoding = true; in shmem_parse_one()
4757 return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n"); in shmem_parse_one()
4763 return invalfc(fc, "Unsupported parameter '%s'", param->key); in shmem_parse_one()
4765 return invalfc(fc, "Bad value for '%s'", param->key); in shmem_parse_one()
4777 * NUL-terminate this option: unfortunately, in shmem_next_opt()
4778 * mount options form a comma-separated list, in shmem_next_opt()
4806 struct shmem_options *ctx = fc->fs_private; in shmem_reconfigure()
4807 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); in shmem_reconfigure()
4812 raw_spin_lock(&sbinfo->stat_lock); in shmem_reconfigure()
4813 used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace; in shmem_reconfigure()
4815 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { in shmem_reconfigure()
4816 if (!sbinfo->max_blocks) { in shmem_reconfigure()
4820 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_reconfigure()
4821 ctx->blocks) > 0) { in shmem_reconfigure()
4826 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { in shmem_reconfigure()
4827 if (!sbinfo->max_inodes) { in shmem_reconfigure()
4831 if (ctx->inodes * BOGO_INODE_SIZE < used_isp) { in shmem_reconfigure()
4837 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && in shmem_reconfigure()
4838 sbinfo->next_ino > UINT_MAX) { in shmem_reconfigure()
4839 err = "Current inum too high to switch to 32-bit inums"; in shmem_reconfigure()
4842 if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) { in shmem_reconfigure()
4846 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) { in shmem_reconfigure()
4851 if (ctx->seen & SHMEM_SEEN_QUOTA && in shmem_reconfigure()
4852 !sb_any_quota_loaded(fc->root->d_sb)) { in shmem_reconfigure()
4859 (ctx->qlimits.name## hardlimit && \ in shmem_reconfigure()
4860 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit)) in shmem_reconfigure()
4869 if (ctx->seen & SHMEM_SEEN_HUGE) in shmem_reconfigure()
4870 sbinfo->huge = ctx->huge; in shmem_reconfigure()
4871 if (ctx->seen & SHMEM_SEEN_INUMS) in shmem_reconfigure()
4872 sbinfo->full_inums = ctx->full_inums; in shmem_reconfigure()
4873 if (ctx->seen & SHMEM_SEEN_BLOCKS) in shmem_reconfigure()
4874 sbinfo->max_blocks = ctx->blocks; in shmem_reconfigure()
4875 if (ctx->seen & SHMEM_SEEN_INODES) { in shmem_reconfigure()
4876 sbinfo->max_inodes = ctx->inodes; in shmem_reconfigure()
4877 sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp; in shmem_reconfigure()
4883 if (ctx->mpol) { in shmem_reconfigure()
4884 mpol = sbinfo->mpol; in shmem_reconfigure()
4885 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ in shmem_reconfigure()
4886 ctx->mpol = NULL; in shmem_reconfigure()
4889 if (ctx->noswap) in shmem_reconfigure()
4890 sbinfo->noswap = true; in shmem_reconfigure()
4892 raw_spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
4896 raw_spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
4902 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); in shmem_show_options()
4905 if (sbinfo->max_blocks != shmem_default_max_blocks()) in shmem_show_options()
4906 seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks)); in shmem_show_options()
4907 if (sbinfo->max_inodes != shmem_default_max_inodes()) in shmem_show_options()
4908 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); in shmem_show_options()
4909 if (sbinfo->mode != (0777 | S_ISVTX)) in shmem_show_options()
4910 seq_printf(seq, ",mode=%03ho", sbinfo->mode); in shmem_show_options()
4911 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) in shmem_show_options()
4913 from_kuid_munged(&init_user_ns, sbinfo->uid)); in shmem_show_options()
4914 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) in shmem_show_options()
4916 from_kgid_munged(&init_user_ns, sbinfo->gid)); in shmem_show_options()
4921 * /proc/config.gz to confirm 64-bit inums were successfully applied in shmem_show_options()
4922 * (which may not even exist if IKCONFIG_PROC isn't enabled). in shmem_show_options()
4924 * We hide it when inode64 isn't the default and we are using 32-bit in shmem_show_options()
4930 * +-----------------+-----------------+ in shmem_show_options()
4932 * +------------------+-----------------+-----------------+ in shmem_show_options()
4935 * +------------------+-----------------+-----------------+ in shmem_show_options()
4938 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) in shmem_show_options()
4939 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); in shmem_show_options()
4942 if (sbinfo->huge) in shmem_show_options()
4943 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); in shmem_show_options()
4948 if (sbinfo->noswap) in shmem_show_options()
4951 if (sb_has_quota_active(root->d_sb, USRQUOTA)) in shmem_show_options()
4953 if (sb_has_quota_active(root->d_sb, GRPQUOTA)) in shmem_show_options()
4955 if (sbinfo->qlimits.usrquota_bhardlimit) in shmem_show_options()
4957 sbinfo->qlimits.usrquota_bhardlimit); in shmem_show_options()
4958 if (sbinfo->qlimits.grpquota_bhardlimit) in shmem_show_options()
4960 sbinfo->qlimits.grpquota_bhardlimit); in shmem_show_options()
4961 if (sbinfo->qlimits.usrquota_ihardlimit) in shmem_show_options()
4963 sbinfo->qlimits.usrquota_ihardlimit); in shmem_show_options()
4964 if (sbinfo->qlimits.grpquota_ihardlimit) in shmem_show_options()
4966 sbinfo->qlimits.grpquota_ihardlimit); in shmem_show_options()
4978 if (sb->s_encoding) in shmem_put_super()
4979 utf8_unload(sb->s_encoding); in shmem_put_super()
4985 free_percpu(sbinfo->ino_batch); in shmem_put_super()
4986 percpu_counter_destroy(&sbinfo->used_blocks); in shmem_put_super()
4987 mpol_put(sbinfo->mpol); in shmem_put_super()
4989 sb->s_fs_info = NULL; in shmem_put_super()
5002 struct shmem_options *ctx = fc->fs_private; in shmem_fill_super()
5005 int error = -ENOMEM; in shmem_fill_super()
5013 sb->s_fs_info = sbinfo; in shmem_fill_super()
5021 if (!(sb->s_flags & SB_KERNMOUNT)) { in shmem_fill_super()
5022 if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) in shmem_fill_super()
5023 ctx->blocks = shmem_default_max_blocks(); in shmem_fill_super()
5024 if (!(ctx->seen & SHMEM_SEEN_INODES)) in shmem_fill_super()
5025 ctx->inodes = shmem_default_max_inodes(); in shmem_fill_super()
5026 if (!(ctx->seen & SHMEM_SEEN_INUMS)) in shmem_fill_super()
5027 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); in shmem_fill_super()
5028 sbinfo->noswap = ctx->noswap; in shmem_fill_super()
5030 sb->s_flags |= SB_NOUSER; in shmem_fill_super()
5032 sb->s_export_op = &shmem_export_ops; in shmem_fill_super()
5033 sb->s_flags |= SB_NOSEC | SB_I_VERSION; in shmem_fill_super()
5036 if (!ctx->encoding && ctx->strict_encoding) { in shmem_fill_super()
5038 error = -EINVAL; in shmem_fill_super()
5042 if (ctx->encoding) { in shmem_fill_super()
5043 sb->s_encoding = ctx->encoding; in shmem_fill_super()
5044 sb->s_d_op = &shmem_ci_dentry_ops; in shmem_fill_super()
5045 if (ctx->strict_encoding) in shmem_fill_super()
5046 sb->s_encoding_flags = SB_ENC_STRICT_MODE_FL; in shmem_fill_super()
5051 sb->s_flags |= SB_NOUSER; in shmem_fill_super()
5053 sbinfo->max_blocks = ctx->blocks; in shmem_fill_super()
5054 sbinfo->max_inodes = ctx->inodes; in shmem_fill_super()
5055 sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE; in shmem_fill_super()
5056 if (sb->s_flags & SB_KERNMOUNT) { in shmem_fill_super()
5057 sbinfo->ino_batch = alloc_percpu(ino_t); in shmem_fill_super()
5058 if (!sbinfo->ino_batch) in shmem_fill_super()
5061 sbinfo->uid = ctx->uid; in shmem_fill_super()
5062 sbinfo->gid = ctx->gid; in shmem_fill_super()
5063 sbinfo->full_inums = ctx->full_inums; in shmem_fill_super()
5064 sbinfo->mode = ctx->mode; in shmem_fill_super()
5066 if (ctx->seen & SHMEM_SEEN_HUGE) in shmem_fill_super()
5067 sbinfo->huge = ctx->huge; in shmem_fill_super()
5069 sbinfo->huge = tmpfs_huge; in shmem_fill_super()
5071 sbinfo->mpol = ctx->mpol; in shmem_fill_super()
5072 ctx->mpol = NULL; in shmem_fill_super()
5074 raw_spin_lock_init(&sbinfo->stat_lock); in shmem_fill_super()
5075 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) in shmem_fill_super()
5077 spin_lock_init(&sbinfo->shrinklist_lock); in shmem_fill_super()
5078 INIT_LIST_HEAD(&sbinfo->shrinklist); in shmem_fill_super()
5080 sb->s_maxbytes = MAX_LFS_FILESIZE; in shmem_fill_super()
5081 sb->s_blocksize = PAGE_SIZE; in shmem_fill_super()
5082 sb->s_blocksize_bits = PAGE_SHIFT; in shmem_fill_super()
5083 sb->s_magic = TMPFS_MAGIC; in shmem_fill_super()
5084 sb->s_op = &shmem_ops; in shmem_fill_super()
5085 sb->s_time_gran = 1; in shmem_fill_super()
5087 sb->s_xattr = shmem_xattr_handlers; in shmem_fill_super()
5090 sb->s_flags |= SB_POSIXACL; in shmem_fill_super()
5097 if (ctx->seen & SHMEM_SEEN_QUOTA) { in shmem_fill_super()
5098 sb->dq_op = &shmem_quota_operations; in shmem_fill_super()
5099 sb->s_qcop = &dquot_quotactl_sysfile_ops; in shmem_fill_super()
5100 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; in shmem_fill_super()
5103 memcpy(&sbinfo->qlimits, &ctx->qlimits, in shmem_fill_super()
5106 if (shmem_enable_quotas(sb, ctx->quota_types)) in shmem_fill_super()
5112 S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); in shmem_fill_super()
5117 inode->i_uid = sbinfo->uid; in shmem_fill_super()
5118 inode->i_gid = sbinfo->gid; in shmem_fill_super()
5119 sb->s_root = d_make_root(inode); in shmem_fill_super()
5120 if (!sb->s_root) in shmem_fill_super()
5136 struct shmem_options *ctx = fc->fs_private; in shmem_free_fc()
5139 mpol_put(ctx->mpol); in shmem_free_fc()
5162 return &info->vfs_inode; in shmem_alloc_inode()
5167 if (S_ISLNK(inode->i_mode)) in shmem_free_in_core_inode()
5168 kfree(inode->i_link); in shmem_free_in_core_inode()
5174 if (S_ISREG(inode->i_mode)) in shmem_destroy_inode()
5175 mpol_free_shared_policy(&SHMEM_I(inode)->policy); in shmem_destroy_inode()
5176 if (S_ISDIR(inode->i_mode)) in shmem_destroy_inode()
5183 inode_init_once(&info->vfs_inode); in shmem_init_inode()
5325 return -ENOMEM; in shmem_init_fs_context()
5327 ctx->mode = 0777 | S_ISVTX; in shmem_init_fs_context()
5328 ctx->uid = current_fsuid(); in shmem_init_fs_context()
5329 ctx->gid = current_fsgid(); in shmem_init_fs_context()
5332 ctx->encoding = NULL; in shmem_init_fs_context()
5335 fc->fs_private = ctx; in shmem_init_fs_context()
5336 fc->ops = &shmem_fs_context_ops; in shmem_init_fs_context()
5401 return -ENOMEM; in tmpfs_sysfs_init()
5423 pr_err("Could not register tmpfs\n"); in shmem_init()
5430 pr_err("Could not kern_mount tmpfs\n"); in shmem_init()
5437 pr_err("Could not init tmpfs sysfs\n"); in shmem_init()
5444 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; in shmem_init()
5449 * Default to setting PMD-sized THP to inherit the global setting and in shmem_init()
5450 * disable all other multi-size THPs. in shmem_init()
5499 return -EINVAL; in shmem_enabled_store()
5502 if (count && tmp[count - 1] == '\n') in shmem_enabled_store()
5503 tmp[count - 1] = '\0'; in shmem_enabled_store()
5506 if (huge == -EINVAL) in shmem_enabled_store()
5511 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; in shmem_enabled_store()
5523 int order = to_thpsize(kobj)->order; in thpsize_shmem_enabled_show()
5544 int order = to_thpsize(kobj)->order; in thpsize_shmem_enabled_store()
5555 /* Do not override huge allocation policy with non-PMD sized mTHP */ in thpsize_shmem_enabled_store()
5558 return -EINVAL; in thpsize_shmem_enabled_store()
5588 ret = -EINVAL; in thpsize_shmem_enabled_store()
5611 if (huge == -EINVAL) { in setup_transparent_hugepage_shmem()
5662 if (strchr(subtoken, '-')) { in setup_thp_shmem()
5663 start_size = strsep(&subtoken, "-"); in setup_thp_shmem()
5676 if (start == -EINVAL) { in setup_thp_shmem()
5682 if (end == -EINVAL) { in setup_thp_shmem()
5691 nr = end - start + 1; in setup_thp_shmem()
5742 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
5745 * shmem code (swap-backed and resource-limited) are outweighed by
5785 return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); in shmem_get_unmapped_area()
5791 truncate_inode_pages_range(inode->i_mapping, lstart, lend); in shmem_truncate_range()
5806 return inode ? inode : ERR_PTR(-ENOSPC); in shmem_get_inode()
5823 return ERR_PTR(-EINVAL); in __shmem_file_setup()
5826 return ERR_PTR(-ENOMEM); in __shmem_file_setup()
5829 return ERR_PTR(-EINVAL); in __shmem_file_setup()
5831 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, in __shmem_file_setup()
5837 inode->i_flags |= i_flags; in __shmem_file_setup()
5838 inode->i_size = size; in __shmem_file_setup()
5850 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
5857 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5866 * shmem_file_setup - get an unlinked file living in tmpfs
5869 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5878 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
5882 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5892 * shmem_zero_setup - setup a shared anonymous mapping
5898 loff_t size = vma->vm_end - vma->vm_start; in shmem_zero_setup()
5906 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); in shmem_zero_setup()
5910 if (vma->vm_file) in shmem_zero_setup()
5911 fput(vma->vm_file); in shmem_zero_setup()
5912 vma->vm_file = file; in shmem_zero_setup()
5913 vma->vm_ops = &shmem_anon_vm_ops; in shmem_zero_setup()
5919 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
5926 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
5937 struct inode *inode = mapping->host; in shmem_read_folio_gfp()
5964 return &folio->page; in shmem_read_mapping_page_gfp()
5969 return ERR_PTR(-EIO); in shmem_read_mapping_page_gfp()