1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *		 2000 Transmeta Corp.
6  *		 2000-2001 Christoph Rohland
7  *		 2000-2001 SAP AG
8  *		 2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <[email protected]>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <[email protected]>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <[email protected]>
20  *
21  * This file is released under the GPL.
22  */
23 
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/fileattr.h>
32 #include <linux/mm.h>
33 #include <linux/random.h>
34 #include <linux/sched/signal.h>
35 #include <linux/export.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/swap.h>
38 #include <linux/uio.h>
39 #include <linux/hugetlb.h>
40 #include <linux/fs_parser.h>
41 #include <linux/swapfile.h>
42 #include <linux/iversion.h>
43 #include <linux/unicode.h>
44 #include "swap.h"
45 
46 static struct vfsmount *shm_mnt __ro_after_init;
47 
48 #ifdef CONFIG_SHMEM
49 /*
50  * This virtual memory filesystem is heavily based on the ramfs. It
51  * extends ramfs by the ability to use swap and honor resource limits
52  * which makes it a completely usable filesystem.
53  */
54 
55 #include <linux/xattr.h>
56 #include <linux/exportfs.h>
57 #include <linux/posix_acl.h>
58 #include <linux/posix_acl_xattr.h>
59 #include <linux/mman.h>
60 #include <linux/string.h>
61 #include <linux/slab.h>
62 #include <linux/backing-dev.h>
63 #include <linux/writeback.h>
64 #include <linux/pagevec.h>
65 #include <linux/percpu_counter.h>
66 #include <linux/falloc.h>
67 #include <linux/splice.h>
68 #include <linux/security.h>
69 #include <linux/swapops.h>
70 #include <linux/mempolicy.h>
71 #include <linux/namei.h>
72 #include <linux/ctype.h>
73 #include <linux/migrate.h>
74 #include <linux/highmem.h>
75 #include <linux/seq_file.h>
76 #include <linux/magic.h>
77 #include <linux/syscalls.h>
78 #include <linux/fcntl.h>
79 #include <uapi/linux/memfd.h>
80 #include <linux/rmap.h>
81 #include <linux/uuid.h>
82 #include <linux/quotaops.h>
83 #include <linux/rcupdate_wait.h>
84 
85 #include <linux/uaccess.h>
86 
87 #include "internal.h"
88 
89 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
90 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
91 
92 /* Pretend that each entry is of this size in directory's i_size */
93 #define BOGO_DIRENT_SIZE 20
94 
95 /* Pretend that one inode + its dentry occupy this much memory */
96 #define BOGO_INODE_SIZE 1024
97 
98 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
99 #define SHORT_SYMLINK_LEN 128
100 
101 /*
102  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
103  * inode->i_private (with i_rwsem making sure that it has only one user at
104  * a time): we would prefer not to enlarge the shmem inode just for that.
105  */
106 struct shmem_falloc {
107 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
108 	pgoff_t start;		/* start of range currently being fallocated */
109 	pgoff_t next;		/* the next page offset to be fallocated */
110 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
111 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
112 };
113 
114 struct shmem_options {
115 	unsigned long long blocks;
116 	unsigned long long inodes;
117 	struct mempolicy *mpol;
118 	kuid_t uid;
119 	kgid_t gid;
120 	umode_t mode;
121 	bool full_inums;
122 	int huge;
123 	int seen;
124 	bool noswap;
125 	unsigned short quota_types;
126 	struct shmem_quota_limits qlimits;
127 #if IS_ENABLED(CONFIG_UNICODE)
128 	struct unicode_map *encoding;
129 	bool strict_encoding;
130 #endif
131 #define SHMEM_SEEN_BLOCKS 1
132 #define SHMEM_SEEN_INODES 2
133 #define SHMEM_SEEN_HUGE 4
134 #define SHMEM_SEEN_INUMS 8
135 #define SHMEM_SEEN_NOSWAP 16
136 #define SHMEM_SEEN_QUOTA 32
137 };
138 
139 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
140 static unsigned long huge_shmem_orders_always __read_mostly;
141 static unsigned long huge_shmem_orders_madvise __read_mostly;
142 static unsigned long huge_shmem_orders_inherit __read_mostly;
143 static unsigned long huge_shmem_orders_within_size __read_mostly;
144 static bool shmem_orders_configured __initdata;
145 #endif
146 
147 #ifdef CONFIG_TMPFS
shmem_default_max_blocks(void)148 static unsigned long shmem_default_max_blocks(void)
149 {
150 	return totalram_pages() / 2;
151 }
152 
shmem_default_max_inodes(void)153 static unsigned long shmem_default_max_inodes(void)
154 {
155 	unsigned long nr_pages = totalram_pages();
156 
157 	return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
158 			ULONG_MAX / BOGO_INODE_SIZE);
159 }
160 #endif
161 
162 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
163 			struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
164 			struct vm_area_struct *vma, vm_fault_t *fault_type);
165 
SHMEM_SB(struct super_block * sb)166 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
167 {
168 	return sb->s_fs_info;
169 }
170 
171 /*
172  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
173  * for shared memory and for shared anonymous (/dev/zero) mappings
174  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
175  * consistent with the pre-accounting of private mappings ...
176  */
shmem_acct_size(unsigned long flags,loff_t size)177 static inline int shmem_acct_size(unsigned long flags, loff_t size)
178 {
179 	return (flags & VM_NORESERVE) ?
180 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
181 }
182 
shmem_unacct_size(unsigned long flags,loff_t size)183 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
184 {
185 	if (!(flags & VM_NORESERVE))
186 		vm_unacct_memory(VM_ACCT(size));
187 }
188 
shmem_reacct_size(unsigned long flags,loff_t oldsize,loff_t newsize)189 static inline int shmem_reacct_size(unsigned long flags,
190 		loff_t oldsize, loff_t newsize)
191 {
192 	if (!(flags & VM_NORESERVE)) {
193 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
194 			return security_vm_enough_memory_mm(current->mm,
195 					VM_ACCT(newsize) - VM_ACCT(oldsize));
196 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
197 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
198 	}
199 	return 0;
200 }
201 
202 /*
203  * ... whereas tmpfs objects are accounted incrementally as
204  * pages are allocated, in order to allow large sparse files.
205  * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
206  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
207  */
shmem_acct_blocks(unsigned long flags,long pages)208 static inline int shmem_acct_blocks(unsigned long flags, long pages)
209 {
210 	if (!(flags & VM_NORESERVE))
211 		return 0;
212 
213 	return security_vm_enough_memory_mm(current->mm,
214 			pages * VM_ACCT(PAGE_SIZE));
215 }
216 
shmem_unacct_blocks(unsigned long flags,long pages)217 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
218 {
219 	if (flags & VM_NORESERVE)
220 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
221 }
222 
shmem_inode_acct_blocks(struct inode * inode,long pages)223 static int shmem_inode_acct_blocks(struct inode *inode, long pages)
224 {
225 	struct shmem_inode_info *info = SHMEM_I(inode);
226 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
227 	int err = -ENOSPC;
228 
229 	if (shmem_acct_blocks(info->flags, pages))
230 		return err;
231 
232 	might_sleep();	/* when quotas */
233 	if (sbinfo->max_blocks) {
234 		if (!percpu_counter_limited_add(&sbinfo->used_blocks,
235 						sbinfo->max_blocks, pages))
236 			goto unacct;
237 
238 		err = dquot_alloc_block_nodirty(inode, pages);
239 		if (err) {
240 			percpu_counter_sub(&sbinfo->used_blocks, pages);
241 			goto unacct;
242 		}
243 	} else {
244 		err = dquot_alloc_block_nodirty(inode, pages);
245 		if (err)
246 			goto unacct;
247 	}
248 
249 	return 0;
250 
251 unacct:
252 	shmem_unacct_blocks(info->flags, pages);
253 	return err;
254 }
255 
shmem_inode_unacct_blocks(struct inode * inode,long pages)256 static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
257 {
258 	struct shmem_inode_info *info = SHMEM_I(inode);
259 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
260 
261 	might_sleep();	/* when quotas */
262 	dquot_free_block_nodirty(inode, pages);
263 
264 	if (sbinfo->max_blocks)
265 		percpu_counter_sub(&sbinfo->used_blocks, pages);
266 	shmem_unacct_blocks(info->flags, pages);
267 }
268 
269 static const struct super_operations shmem_ops;
270 static const struct address_space_operations shmem_aops;
271 static const struct file_operations shmem_file_operations;
272 static const struct inode_operations shmem_inode_operations;
273 static const struct inode_operations shmem_dir_inode_operations;
274 static const struct inode_operations shmem_special_inode_operations;
275 static const struct vm_operations_struct shmem_vm_ops;
276 static const struct vm_operations_struct shmem_anon_vm_ops;
277 static struct file_system_type shmem_fs_type;
278 
shmem_mapping(struct address_space * mapping)279 bool shmem_mapping(struct address_space *mapping)
280 {
281 	return mapping->a_ops == &shmem_aops;
282 }
283 EXPORT_SYMBOL_GPL(shmem_mapping);
284 
vma_is_anon_shmem(struct vm_area_struct * vma)285 bool vma_is_anon_shmem(struct vm_area_struct *vma)
286 {
287 	return vma->vm_ops == &shmem_anon_vm_ops;
288 }
289 
vma_is_shmem(struct vm_area_struct * vma)290 bool vma_is_shmem(struct vm_area_struct *vma)
291 {
292 	return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
293 }
294 
295 static LIST_HEAD(shmem_swaplist);
296 static DEFINE_MUTEX(shmem_swaplist_mutex);
297 
298 #ifdef CONFIG_TMPFS_QUOTA
299 
shmem_enable_quotas(struct super_block * sb,unsigned short quota_types)300 static int shmem_enable_quotas(struct super_block *sb,
301 			       unsigned short quota_types)
302 {
303 	int type, err = 0;
304 
305 	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
306 	for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
307 		if (!(quota_types & (1 << type)))
308 			continue;
309 		err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
310 					  DQUOT_USAGE_ENABLED |
311 					  DQUOT_LIMITS_ENABLED);
312 		if (err)
313 			goto out_err;
314 	}
315 	return 0;
316 
317 out_err:
318 	pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
319 		type, err);
320 	for (type--; type >= 0; type--)
321 		dquot_quota_off(sb, type);
322 	return err;
323 }
324 
shmem_disable_quotas(struct super_block * sb)325 static void shmem_disable_quotas(struct super_block *sb)
326 {
327 	int type;
328 
329 	for (type = 0; type < SHMEM_MAXQUOTAS; type++)
330 		dquot_quota_off(sb, type);
331 }
332 
shmem_get_dquots(struct inode * inode)333 static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
334 {
335 	return SHMEM_I(inode)->i_dquot;
336 }
337 #endif /* CONFIG_TMPFS_QUOTA */
338 
339 /*
340  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
341  * produces a novel ino for the newly allocated inode.
342  *
343  * It may also be called when making a hard link to permit the space needed by
344  * each dentry. However, in that case, no new inode number is needed since that
345  * internally draws from another pool of inode numbers (currently global
346  * get_next_ino()). This case is indicated by passing NULL as inop.
347  */
348 #define SHMEM_INO_BATCH 1024
shmem_reserve_inode(struct super_block * sb,ino_t * inop)349 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
350 {
351 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
352 	ino_t ino;
353 
354 	if (!(sb->s_flags & SB_KERNMOUNT)) {
355 		raw_spin_lock(&sbinfo->stat_lock);
356 		if (sbinfo->max_inodes) {
357 			if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
358 				raw_spin_unlock(&sbinfo->stat_lock);
359 				return -ENOSPC;
360 			}
361 			sbinfo->free_ispace -= BOGO_INODE_SIZE;
362 		}
363 		if (inop) {
364 			ino = sbinfo->next_ino++;
365 			if (unlikely(is_zero_ino(ino)))
366 				ino = sbinfo->next_ino++;
367 			if (unlikely(!sbinfo->full_inums &&
368 				     ino > UINT_MAX)) {
369 				/*
370 				 * Emulate get_next_ino uint wraparound for
371 				 * compatibility
372 				 */
373 				if (IS_ENABLED(CONFIG_64BIT))
374 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
375 						__func__, MINOR(sb->s_dev));
376 				sbinfo->next_ino = 1;
377 				ino = sbinfo->next_ino++;
378 			}
379 			*inop = ino;
380 		}
381 		raw_spin_unlock(&sbinfo->stat_lock);
382 	} else if (inop) {
383 		/*
384 		 * __shmem_file_setup, one of our callers, is lock-free: it
385 		 * doesn't hold stat_lock in shmem_reserve_inode since
386 		 * max_inodes is always 0, and is called from potentially
387 		 * unknown contexts. As such, use a per-cpu batched allocator
388 		 * which doesn't require the per-sb stat_lock unless we are at
389 		 * the batch boundary.
390 		 *
391 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
392 		 * shmem mounts are not exposed to userspace, so we don't need
393 		 * to worry about things like glibc compatibility.
394 		 */
395 		ino_t *next_ino;
396 
397 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
398 		ino = *next_ino;
399 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
400 			raw_spin_lock(&sbinfo->stat_lock);
401 			ino = sbinfo->next_ino;
402 			sbinfo->next_ino += SHMEM_INO_BATCH;
403 			raw_spin_unlock(&sbinfo->stat_lock);
404 			if (unlikely(is_zero_ino(ino)))
405 				ino++;
406 		}
407 		*inop = ino;
408 		*next_ino = ++ino;
409 		put_cpu();
410 	}
411 
412 	return 0;
413 }
414 
shmem_free_inode(struct super_block * sb,size_t freed_ispace)415 static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
416 {
417 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
418 	if (sbinfo->max_inodes) {
419 		raw_spin_lock(&sbinfo->stat_lock);
420 		sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
421 		raw_spin_unlock(&sbinfo->stat_lock);
422 	}
423 }
424 
425 /**
426  * shmem_recalc_inode - recalculate the block usage of an inode
427  * @inode: inode to recalc
428  * @alloced: the change in number of pages allocated to inode
429  * @swapped: the change in number of pages swapped from inode
430  *
431  * We have to calculate the free blocks since the mm can drop
432  * undirtied hole pages behind our back.
433  *
434  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
435  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
436  */
shmem_recalc_inode(struct inode * inode,long alloced,long swapped)437 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
438 {
439 	struct shmem_inode_info *info = SHMEM_I(inode);
440 	long freed;
441 
442 	spin_lock(&info->lock);
443 	info->alloced += alloced;
444 	info->swapped += swapped;
445 	freed = info->alloced - info->swapped -
446 		READ_ONCE(inode->i_mapping->nrpages);
447 	/*
448 	 * Special case: whereas normally shmem_recalc_inode() is called
449 	 * after i_mapping->nrpages has already been adjusted (up or down),
450 	 * shmem_writepage() has to raise swapped before nrpages is lowered -
451 	 * to stop a racing shmem_recalc_inode() from thinking that a page has
452 	 * been freed.  Compensate here, to avoid the need for a followup call.
453 	 */
454 	if (swapped > 0)
455 		freed += swapped;
456 	if (freed > 0)
457 		info->alloced -= freed;
458 	spin_unlock(&info->lock);
459 
460 	/* The quota case may block */
461 	if (freed > 0)
462 		shmem_inode_unacct_blocks(inode, freed);
463 }
464 
shmem_charge(struct inode * inode,long pages)465 bool shmem_charge(struct inode *inode, long pages)
466 {
467 	struct address_space *mapping = inode->i_mapping;
468 
469 	if (shmem_inode_acct_blocks(inode, pages))
470 		return false;
471 
472 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
473 	xa_lock_irq(&mapping->i_pages);
474 	mapping->nrpages += pages;
475 	xa_unlock_irq(&mapping->i_pages);
476 
477 	shmem_recalc_inode(inode, pages, 0);
478 	return true;
479 }
480 
shmem_uncharge(struct inode * inode,long pages)481 void shmem_uncharge(struct inode *inode, long pages)
482 {
483 	/* pages argument is currently unused: keep it to help debugging */
484 	/* nrpages adjustment done by __filemap_remove_folio() or caller */
485 
486 	shmem_recalc_inode(inode, 0, 0);
487 }
488 
489 /*
490  * Replace item expected in xarray by a new item, while holding xa_lock.
491  */
shmem_replace_entry(struct address_space * mapping,pgoff_t index,void * expected,void * replacement)492 static int shmem_replace_entry(struct address_space *mapping,
493 			pgoff_t index, void *expected, void *replacement)
494 {
495 	XA_STATE(xas, &mapping->i_pages, index);
496 	void *item;
497 
498 	VM_BUG_ON(!expected);
499 	VM_BUG_ON(!replacement);
500 	item = xas_load(&xas);
501 	if (item != expected)
502 		return -ENOENT;
503 	xas_store(&xas, replacement);
504 	return 0;
505 }
506 
507 /*
508  * Sometimes, before we decide whether to proceed or to fail, we must check
509  * that an entry was not already brought back from swap by a racing thread.
510  *
511  * Checking folio is not enough: by the time a swapcache folio is locked, it
512  * might be reused, and again be swapcache, using the same swap as before.
513  */
shmem_confirm_swap(struct address_space * mapping,pgoff_t index,swp_entry_t swap)514 static bool shmem_confirm_swap(struct address_space *mapping,
515 			       pgoff_t index, swp_entry_t swap)
516 {
517 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
518 }
519 
520 /*
521  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
522  *
523  * SHMEM_HUGE_NEVER:
524  *	disables huge pages for the mount;
525  * SHMEM_HUGE_ALWAYS:
526  *	enables huge pages for the mount;
527  * SHMEM_HUGE_WITHIN_SIZE:
528  *	only allocate huge pages if the page will be fully within i_size,
529  *	also respect fadvise()/madvise() hints;
530  * SHMEM_HUGE_ADVISE:
531  *	only allocate huge pages if requested with fadvise()/madvise();
532  */
533 
534 #define SHMEM_HUGE_NEVER	0
535 #define SHMEM_HUGE_ALWAYS	1
536 #define SHMEM_HUGE_WITHIN_SIZE	2
537 #define SHMEM_HUGE_ADVISE	3
538 
539 /*
540  * Special values.
541  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
542  *
543  * SHMEM_HUGE_DENY:
544  *	disables huge on shm_mnt and all mounts, for emergency use;
545  * SHMEM_HUGE_FORCE:
546  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
547  *
548  */
549 #define SHMEM_HUGE_DENY		(-1)
550 #define SHMEM_HUGE_FORCE	(-2)
551 
552 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
553 /* ifdef here to avoid bloating shmem.o when not necessary */
554 
555 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
556 static int tmpfs_huge __read_mostly = SHMEM_HUGE_NEVER;
557 
558 /**
559  * shmem_mapping_size_orders - Get allowable folio orders for the given file size.
560  * @mapping: Target address_space.
561  * @index: The page index.
562  * @write_end: end of a write, could extend inode size.
563  *
564  * This returns huge orders for folios (when supported) based on the file size
565  * which the mapping currently allows at the given index. The index is relevant
566  * due to alignment considerations the mapping might have. The returned order
567  * may be less than the size passed.
568  *
569  * Return: The orders.
570  */
571 static inline unsigned int
shmem_mapping_size_orders(struct address_space * mapping,pgoff_t index,loff_t write_end)572 shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t write_end)
573 {
574 	unsigned int order;
575 	size_t size;
576 
577 	if (!mapping_large_folio_support(mapping) || !write_end)
578 		return 0;
579 
580 	/* Calculate the write size based on the write_end */
581 	size = write_end - (index << PAGE_SHIFT);
582 	order = filemap_get_order(size);
583 	if (!order)
584 		return 0;
585 
586 	/* If we're not aligned, allocate a smaller folio */
587 	if (index & ((1UL << order) - 1))
588 		order = __ffs(index);
589 
590 	order = min_t(size_t, order, MAX_PAGECACHE_ORDER);
591 	return order > 0 ? BIT(order + 1) - 1 : 0;
592 }
593 
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,unsigned long vm_flags)594 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
595 					      loff_t write_end, bool shmem_huge_force,
596 					      struct vm_area_struct *vma,
597 					      unsigned long vm_flags)
598 {
599 	unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ?
600 		0 : BIT(HPAGE_PMD_ORDER);
601 	unsigned long within_size_orders;
602 	unsigned int order;
603 	pgoff_t aligned_index;
604 	loff_t i_size;
605 
606 	if (!S_ISREG(inode->i_mode))
607 		return 0;
608 	if (shmem_huge == SHMEM_HUGE_DENY)
609 		return 0;
610 	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
611 		return maybe_pmd_order;
612 
613 	/*
614 	 * The huge order allocation for anon shmem is controlled through
615 	 * the mTHP interface, so we still use PMD-sized huge order to
616 	 * check whether global control is enabled.
617 	 *
618 	 * For tmpfs mmap()'s huge order, we still use PMD-sized order to
619 	 * allocate huge pages due to lack of a write size hint.
620 	 *
621 	 * Otherwise, tmpfs will allow getting a highest order hint based on
622 	 * the size of write and fallocate paths, then will try each allowable
623 	 * huge orders.
624 	 */
625 	switch (SHMEM_SB(inode->i_sb)->huge) {
626 	case SHMEM_HUGE_ALWAYS:
627 		if (vma)
628 			return maybe_pmd_order;
629 
630 		return shmem_mapping_size_orders(inode->i_mapping, index, write_end);
631 	case SHMEM_HUGE_WITHIN_SIZE:
632 		if (vma)
633 			within_size_orders = maybe_pmd_order;
634 		else
635 			within_size_orders = shmem_mapping_size_orders(inode->i_mapping,
636 								       index, write_end);
637 
638 		order = highest_order(within_size_orders);
639 		while (within_size_orders) {
640 			aligned_index = round_up(index + 1, 1 << order);
641 			i_size = max(write_end, i_size_read(inode));
642 			i_size = round_up(i_size, PAGE_SIZE);
643 			if (i_size >> PAGE_SHIFT >= aligned_index)
644 				return within_size_orders;
645 
646 			order = next_order(&within_size_orders, order);
647 		}
648 		fallthrough;
649 	case SHMEM_HUGE_ADVISE:
650 		if (vm_flags & VM_HUGEPAGE)
651 			return maybe_pmd_order;
652 		fallthrough;
653 	default:
654 		return 0;
655 	}
656 }
657 
shmem_parse_huge(const char * str)658 static int shmem_parse_huge(const char *str)
659 {
660 	int huge;
661 
662 	if (!str)
663 		return -EINVAL;
664 
665 	if (!strcmp(str, "never"))
666 		huge = SHMEM_HUGE_NEVER;
667 	else if (!strcmp(str, "always"))
668 		huge = SHMEM_HUGE_ALWAYS;
669 	else if (!strcmp(str, "within_size"))
670 		huge = SHMEM_HUGE_WITHIN_SIZE;
671 	else if (!strcmp(str, "advise"))
672 		huge = SHMEM_HUGE_ADVISE;
673 	else if (!strcmp(str, "deny"))
674 		huge = SHMEM_HUGE_DENY;
675 	else if (!strcmp(str, "force"))
676 		huge = SHMEM_HUGE_FORCE;
677 	else
678 		return -EINVAL;
679 
680 	if (!has_transparent_hugepage() &&
681 	    huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
682 		return -EINVAL;
683 
684 	/* Do not override huge allocation policy with non-PMD sized mTHP */
685 	if (huge == SHMEM_HUGE_FORCE &&
686 	    huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER))
687 		return -EINVAL;
688 
689 	return huge;
690 }
691 
692 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
shmem_format_huge(int huge)693 static const char *shmem_format_huge(int huge)
694 {
695 	switch (huge) {
696 	case SHMEM_HUGE_NEVER:
697 		return "never";
698 	case SHMEM_HUGE_ALWAYS:
699 		return "always";
700 	case SHMEM_HUGE_WITHIN_SIZE:
701 		return "within_size";
702 	case SHMEM_HUGE_ADVISE:
703 		return "advise";
704 	case SHMEM_HUGE_DENY:
705 		return "deny";
706 	case SHMEM_HUGE_FORCE:
707 		return "force";
708 	default:
709 		VM_BUG_ON(1);
710 		return "bad_val";
711 	}
712 }
713 #endif
714 
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)715 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
716 		struct shrink_control *sc, unsigned long nr_to_free)
717 {
718 	LIST_HEAD(list), *pos, *next;
719 	struct inode *inode;
720 	struct shmem_inode_info *info;
721 	struct folio *folio;
722 	unsigned long batch = sc ? sc->nr_to_scan : 128;
723 	unsigned long split = 0, freed = 0;
724 
725 	if (list_empty(&sbinfo->shrinklist))
726 		return SHRINK_STOP;
727 
728 	spin_lock(&sbinfo->shrinklist_lock);
729 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
730 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
731 
732 		/* pin the inode */
733 		inode = igrab(&info->vfs_inode);
734 
735 		/* inode is about to be evicted */
736 		if (!inode) {
737 			list_del_init(&info->shrinklist);
738 			goto next;
739 		}
740 
741 		list_move(&info->shrinklist, &list);
742 next:
743 		sbinfo->shrinklist_len--;
744 		if (!--batch)
745 			break;
746 	}
747 	spin_unlock(&sbinfo->shrinklist_lock);
748 
749 	list_for_each_safe(pos, next, &list) {
750 		pgoff_t next, end;
751 		loff_t i_size;
752 		int ret;
753 
754 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
755 		inode = &info->vfs_inode;
756 
757 		if (nr_to_free && freed >= nr_to_free)
758 			goto move_back;
759 
760 		i_size = i_size_read(inode);
761 		folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
762 		if (!folio || xa_is_value(folio))
763 			goto drop;
764 
765 		/* No large folio at the end of the file: nothing to split */
766 		if (!folio_test_large(folio)) {
767 			folio_put(folio);
768 			goto drop;
769 		}
770 
771 		/* Check if there is anything to gain from splitting */
772 		next = folio_next_index(folio);
773 		end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
774 		if (end <= folio->index || end >= next) {
775 			folio_put(folio);
776 			goto drop;
777 		}
778 
779 		/*
780 		 * Move the inode on the list back to shrinklist if we failed
781 		 * to lock the page at this time.
782 		 *
783 		 * Waiting for the lock may lead to deadlock in the
784 		 * reclaim path.
785 		 */
786 		if (!folio_trylock(folio)) {
787 			folio_put(folio);
788 			goto move_back;
789 		}
790 
791 		ret = split_folio(folio);
792 		folio_unlock(folio);
793 		folio_put(folio);
794 
795 		/* If split failed move the inode on the list back to shrinklist */
796 		if (ret)
797 			goto move_back;
798 
799 		freed += next - end;
800 		split++;
801 drop:
802 		list_del_init(&info->shrinklist);
803 		goto put;
804 move_back:
805 		/*
806 		 * Make sure the inode is either on the global list or deleted
807 		 * from any local list before iput() since it could be deleted
808 		 * in another thread once we put the inode (then the local list
809 		 * is corrupted).
810 		 */
811 		spin_lock(&sbinfo->shrinklist_lock);
812 		list_move(&info->shrinklist, &sbinfo->shrinklist);
813 		sbinfo->shrinklist_len++;
814 		spin_unlock(&sbinfo->shrinklist_lock);
815 put:
816 		iput(inode);
817 	}
818 
819 	return split;
820 }
821 
shmem_unused_huge_scan(struct super_block * sb,struct shrink_control * sc)822 static long shmem_unused_huge_scan(struct super_block *sb,
823 		struct shrink_control *sc)
824 {
825 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
826 
827 	if (!READ_ONCE(sbinfo->shrinklist_len))
828 		return SHRINK_STOP;
829 
830 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
831 }
832 
shmem_unused_huge_count(struct super_block * sb,struct shrink_control * sc)833 static long shmem_unused_huge_count(struct super_block *sb,
834 		struct shrink_control *sc)
835 {
836 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
837 	return READ_ONCE(sbinfo->shrinklist_len);
838 }
839 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
840 
841 #define shmem_huge SHMEM_HUGE_DENY
842 
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)843 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
844 		struct shrink_control *sc, unsigned long nr_to_free)
845 {
846 	return 0;
847 }
848 
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,unsigned long vm_flags)849 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
850 					      loff_t write_end, bool shmem_huge_force,
851 					      struct vm_area_struct *vma,
852 					      unsigned long vm_flags)
853 {
854 	return 0;
855 }
856 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
857 
shmem_update_stats(struct folio * folio,int nr_pages)858 static void shmem_update_stats(struct folio *folio, int nr_pages)
859 {
860 	if (folio_test_pmd_mappable(folio))
861 		__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
862 	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
863 	__lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
864 }
865 
866 /*
867  * Somewhat like filemap_add_folio, but error if expected item has gone.
868  */
shmem_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t index,void * expected,gfp_t gfp)869 static int shmem_add_to_page_cache(struct folio *folio,
870 				   struct address_space *mapping,
871 				   pgoff_t index, void *expected, gfp_t gfp)
872 {
873 	XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
874 	long nr = folio_nr_pages(folio);
875 
876 	VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
877 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
878 	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
879 
880 	folio_ref_add(folio, nr);
881 	folio->mapping = mapping;
882 	folio->index = index;
883 
884 	gfp &= GFP_RECLAIM_MASK;
885 	folio_throttle_swaprate(folio, gfp);
886 
887 	do {
888 		xas_lock_irq(&xas);
889 		if (expected != xas_find_conflict(&xas)) {
890 			xas_set_err(&xas, -EEXIST);
891 			goto unlock;
892 		}
893 		if (expected && xas_find_conflict(&xas)) {
894 			xas_set_err(&xas, -EEXIST);
895 			goto unlock;
896 		}
897 		xas_store(&xas, folio);
898 		if (xas_error(&xas))
899 			goto unlock;
900 		shmem_update_stats(folio, nr);
901 		mapping->nrpages += nr;
902 unlock:
903 		xas_unlock_irq(&xas);
904 	} while (xas_nomem(&xas, gfp));
905 
906 	if (xas_error(&xas)) {
907 		folio->mapping = NULL;
908 		folio_ref_sub(folio, nr);
909 		return xas_error(&xas);
910 	}
911 
912 	return 0;
913 }
914 
915 /*
916  * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
917  */
shmem_delete_from_page_cache(struct folio * folio,void * radswap)918 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
919 {
920 	struct address_space *mapping = folio->mapping;
921 	long nr = folio_nr_pages(folio);
922 	int error;
923 
924 	xa_lock_irq(&mapping->i_pages);
925 	error = shmem_replace_entry(mapping, folio->index, folio, radswap);
926 	folio->mapping = NULL;
927 	mapping->nrpages -= nr;
928 	shmem_update_stats(folio, -nr);
929 	xa_unlock_irq(&mapping->i_pages);
930 	folio_put_refs(folio, nr);
931 	BUG_ON(error);
932 }
933 
934 /*
935  * Remove swap entry from page cache, free the swap and its page cache. Returns
936  * the number of pages being freed. 0 means entry not found in XArray (0 pages
937  * being freed).
938  */
shmem_free_swap(struct address_space * mapping,pgoff_t index,void * radswap)939 static long shmem_free_swap(struct address_space *mapping,
940 			    pgoff_t index, void *radswap)
941 {
942 	int order = xa_get_order(&mapping->i_pages, index);
943 	void *old;
944 
945 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
946 	if (old != radswap)
947 		return 0;
948 	free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);
949 
950 	return 1 << order;
951 }
952 
953 /*
954  * Determine (in bytes) how many of the shmem object's pages mapped by the
955  * given offsets are swapped out.
956  *
957  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
958  * as long as the inode doesn't go away and racy results are not a problem.
959  */
shmem_partial_swap_usage(struct address_space * mapping,pgoff_t start,pgoff_t end)960 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
961 						pgoff_t start, pgoff_t end)
962 {
963 	XA_STATE(xas, &mapping->i_pages, start);
964 	struct page *page;
965 	unsigned long swapped = 0;
966 	unsigned long max = end - 1;
967 
968 	rcu_read_lock();
969 	xas_for_each(&xas, page, max) {
970 		if (xas_retry(&xas, page))
971 			continue;
972 		if (xa_is_value(page))
973 			swapped += 1 << xas_get_order(&xas);
974 		if (xas.xa_index == max)
975 			break;
976 		if (need_resched()) {
977 			xas_pause(&xas);
978 			cond_resched_rcu();
979 		}
980 	}
981 	rcu_read_unlock();
982 
983 	return swapped << PAGE_SHIFT;
984 }
985 
986 /*
987  * Determine (in bytes) how many of the shmem object's pages mapped by the
988  * given vma is swapped out.
989  *
990  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
991  * as long as the inode doesn't go away and racy results are not a problem.
992  */
shmem_swap_usage(struct vm_area_struct * vma)993 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
994 {
995 	struct inode *inode = file_inode(vma->vm_file);
996 	struct shmem_inode_info *info = SHMEM_I(inode);
997 	struct address_space *mapping = inode->i_mapping;
998 	unsigned long swapped;
999 
1000 	/* Be careful as we don't hold info->lock */
1001 	swapped = READ_ONCE(info->swapped);
1002 
1003 	/*
1004 	 * The easier cases are when the shmem object has nothing in swap, or
1005 	 * the vma maps it whole. Then we can simply use the stats that we
1006 	 * already track.
1007 	 */
1008 	if (!swapped)
1009 		return 0;
1010 
1011 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
1012 		return swapped << PAGE_SHIFT;
1013 
1014 	/* Here comes the more involved part */
1015 	return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
1016 					vma->vm_pgoff + vma_pages(vma));
1017 }
1018 
1019 /*
1020  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
1021  */
shmem_unlock_mapping(struct address_space * mapping)1022 void shmem_unlock_mapping(struct address_space *mapping)
1023 {
1024 	struct folio_batch fbatch;
1025 	pgoff_t index = 0;
1026 
1027 	folio_batch_init(&fbatch);
1028 	/*
1029 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
1030 	 */
1031 	while (!mapping_unevictable(mapping) &&
1032 	       filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
1033 		check_move_unevictable_folios(&fbatch);
1034 		folio_batch_release(&fbatch);
1035 		cond_resched();
1036 	}
1037 }
1038 
shmem_get_partial_folio(struct inode * inode,pgoff_t index)1039 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
1040 {
1041 	struct folio *folio;
1042 
1043 	/*
1044 	 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
1045 	 * beyond i_size, and reports fallocated folios as holes.
1046 	 */
1047 	folio = filemap_get_entry(inode->i_mapping, index);
1048 	if (!folio)
1049 		return folio;
1050 	if (!xa_is_value(folio)) {
1051 		folio_lock(folio);
1052 		if (folio->mapping == inode->i_mapping)
1053 			return folio;
1054 		/* The folio has been swapped out */
1055 		folio_unlock(folio);
1056 		folio_put(folio);
1057 	}
1058 	/*
1059 	 * But read a folio back from swap if any of it is within i_size
1060 	 * (although in some cases this is just a waste of time).
1061 	 */
1062 	folio = NULL;
1063 	shmem_get_folio(inode, index, 0, &folio, SGP_READ);
1064 	return folio;
1065 }
1066 
1067 /*
1068  * Remove range of pages and swap entries from page cache, and free them.
1069  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
1070  */
shmem_undo_range(struct inode * inode,loff_t lstart,loff_t lend,bool unfalloc)1071 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1072 								 bool unfalloc)
1073 {
1074 	struct address_space *mapping = inode->i_mapping;
1075 	struct shmem_inode_info *info = SHMEM_I(inode);
1076 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
1077 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
1078 	struct folio_batch fbatch;
1079 	pgoff_t indices[PAGEVEC_SIZE];
1080 	struct folio *folio;
1081 	bool same_folio;
1082 	long nr_swaps_freed = 0;
1083 	pgoff_t index;
1084 	int i;
1085 
1086 	if (lend == -1)
1087 		end = -1;	/* unsigned, so actually very big */
1088 
1089 	if (info->fallocend > start && info->fallocend <= end && !unfalloc)
1090 		info->fallocend = start;
1091 
1092 	folio_batch_init(&fbatch);
1093 	index = start;
1094 	while (index < end && find_lock_entries(mapping, &index, end - 1,
1095 			&fbatch, indices)) {
1096 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
1097 			folio = fbatch.folios[i];
1098 
1099 			if (xa_is_value(folio)) {
1100 				if (unfalloc)
1101 					continue;
1102 				nr_swaps_freed += shmem_free_swap(mapping,
1103 							indices[i], folio);
1104 				continue;
1105 			}
1106 
1107 			if (!unfalloc || !folio_test_uptodate(folio))
1108 				truncate_inode_folio(mapping, folio);
1109 			folio_unlock(folio);
1110 		}
1111 		folio_batch_remove_exceptionals(&fbatch);
1112 		folio_batch_release(&fbatch);
1113 		cond_resched();
1114 	}
1115 
1116 	/*
1117 	 * When undoing a failed fallocate, we want none of the partial folio
1118 	 * zeroing and splitting below, but shall want to truncate the whole
1119 	 * folio when !uptodate indicates that it was added by this fallocate,
1120 	 * even when [lstart, lend] covers only a part of the folio.
1121 	 */
1122 	if (unfalloc)
1123 		goto whole_folios;
1124 
1125 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1126 	folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1127 	if (folio) {
1128 		same_folio = lend < folio_pos(folio) + folio_size(folio);
1129 		folio_mark_dirty(folio);
1130 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1131 			start = folio_next_index(folio);
1132 			if (same_folio)
1133 				end = folio->index;
1134 		}
1135 		folio_unlock(folio);
1136 		folio_put(folio);
1137 		folio = NULL;
1138 	}
1139 
1140 	if (!same_folio)
1141 		folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1142 	if (folio) {
1143 		folio_mark_dirty(folio);
1144 		if (!truncate_inode_partial_folio(folio, lstart, lend))
1145 			end = folio->index;
1146 		folio_unlock(folio);
1147 		folio_put(folio);
1148 	}
1149 
1150 whole_folios:
1151 
1152 	index = start;
1153 	while (index < end) {
1154 		cond_resched();
1155 
1156 		if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1157 				indices)) {
1158 			/* If all gone or hole-punch or unfalloc, we're done */
1159 			if (index == start || end != -1)
1160 				break;
1161 			/* But if truncating, restart to make sure all gone */
1162 			index = start;
1163 			continue;
1164 		}
1165 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
1166 			folio = fbatch.folios[i];
1167 
1168 			if (xa_is_value(folio)) {
1169 				long swaps_freed;
1170 
1171 				if (unfalloc)
1172 					continue;
1173 				swaps_freed = shmem_free_swap(mapping, indices[i], folio);
1174 				if (!swaps_freed) {
1175 					/* Swap was replaced by page: retry */
1176 					index = indices[i];
1177 					break;
1178 				}
1179 				nr_swaps_freed += swaps_freed;
1180 				continue;
1181 			}
1182 
1183 			folio_lock(folio);
1184 
1185 			if (!unfalloc || !folio_test_uptodate(folio)) {
1186 				if (folio_mapping(folio) != mapping) {
1187 					/* Page was replaced by swap: retry */
1188 					folio_unlock(folio);
1189 					index = indices[i];
1190 					break;
1191 				}
1192 				VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1193 						folio);
1194 
1195 				if (!folio_test_large(folio)) {
1196 					truncate_inode_folio(mapping, folio);
1197 				} else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1198 					/*
1199 					 * If we split a page, reset the loop so
1200 					 * that we pick up the new sub pages.
1201 					 * Otherwise the THP was entirely
1202 					 * dropped or the target range was
1203 					 * zeroed, so just continue the loop as
1204 					 * is.
1205 					 */
1206 					if (!folio_test_large(folio)) {
1207 						folio_unlock(folio);
1208 						index = start;
1209 						break;
1210 					}
1211 				}
1212 			}
1213 			folio_unlock(folio);
1214 		}
1215 		folio_batch_remove_exceptionals(&fbatch);
1216 		folio_batch_release(&fbatch);
1217 	}
1218 
1219 	shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1220 }
1221 
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)1222 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1223 {
1224 	shmem_undo_range(inode, lstart, lend, false);
1225 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1226 	inode_inc_iversion(inode);
1227 }
1228 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1229 
shmem_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)1230 static int shmem_getattr(struct mnt_idmap *idmap,
1231 			 const struct path *path, struct kstat *stat,
1232 			 u32 request_mask, unsigned int query_flags)
1233 {
1234 	struct inode *inode = path->dentry->d_inode;
1235 	struct shmem_inode_info *info = SHMEM_I(inode);
1236 
1237 	if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1238 		shmem_recalc_inode(inode, 0, 0);
1239 
1240 	if (info->fsflags & FS_APPEND_FL)
1241 		stat->attributes |= STATX_ATTR_APPEND;
1242 	if (info->fsflags & FS_IMMUTABLE_FL)
1243 		stat->attributes |= STATX_ATTR_IMMUTABLE;
1244 	if (info->fsflags & FS_NODUMP_FL)
1245 		stat->attributes |= STATX_ATTR_NODUMP;
1246 	stat->attributes_mask |= (STATX_ATTR_APPEND |
1247 			STATX_ATTR_IMMUTABLE |
1248 			STATX_ATTR_NODUMP);
1249 	generic_fillattr(idmap, request_mask, inode, stat);
1250 
1251 	if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
1252 		stat->blksize = HPAGE_PMD_SIZE;
1253 
1254 	if (request_mask & STATX_BTIME) {
1255 		stat->result_mask |= STATX_BTIME;
1256 		stat->btime.tv_sec = info->i_crtime.tv_sec;
1257 		stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1258 	}
1259 
1260 	return 0;
1261 }
1262 
shmem_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1263 static int shmem_setattr(struct mnt_idmap *idmap,
1264 			 struct dentry *dentry, struct iattr *attr)
1265 {
1266 	struct inode *inode = d_inode(dentry);
1267 	struct shmem_inode_info *info = SHMEM_I(inode);
1268 	int error;
1269 	bool update_mtime = false;
1270 	bool update_ctime = true;
1271 
1272 	error = setattr_prepare(idmap, dentry, attr);
1273 	if (error)
1274 		return error;
1275 
1276 	if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1277 		if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1278 			return -EPERM;
1279 		}
1280 	}
1281 
1282 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1283 		loff_t oldsize = inode->i_size;
1284 		loff_t newsize = attr->ia_size;
1285 
1286 		/* protected by i_rwsem */
1287 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1288 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1289 			return -EPERM;
1290 
1291 		if (newsize != oldsize) {
1292 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
1293 					oldsize, newsize);
1294 			if (error)
1295 				return error;
1296 			i_size_write(inode, newsize);
1297 			update_mtime = true;
1298 		} else {
1299 			update_ctime = false;
1300 		}
1301 		if (newsize <= oldsize) {
1302 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1303 			if (oldsize > holebegin)
1304 				unmap_mapping_range(inode->i_mapping,
1305 							holebegin, 0, 1);
1306 			if (info->alloced)
1307 				shmem_truncate_range(inode,
1308 							newsize, (loff_t)-1);
1309 			/* unmap again to remove racily COWed private pages */
1310 			if (oldsize > holebegin)
1311 				unmap_mapping_range(inode->i_mapping,
1312 							holebegin, 0, 1);
1313 		}
1314 	}
1315 
1316 	if (is_quota_modification(idmap, inode, attr)) {
1317 		error = dquot_initialize(inode);
1318 		if (error)
1319 			return error;
1320 	}
1321 
1322 	/* Transfer quota accounting */
1323 	if (i_uid_needs_update(idmap, attr, inode) ||
1324 	    i_gid_needs_update(idmap, attr, inode)) {
1325 		error = dquot_transfer(idmap, inode, attr);
1326 		if (error)
1327 			return error;
1328 	}
1329 
1330 	setattr_copy(idmap, inode, attr);
1331 	if (attr->ia_valid & ATTR_MODE)
1332 		error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1333 	if (!error && update_ctime) {
1334 		inode_set_ctime_current(inode);
1335 		if (update_mtime)
1336 			inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1337 		inode_inc_iversion(inode);
1338 	}
1339 	return error;
1340 }
1341 
shmem_evict_inode(struct inode * inode)1342 static void shmem_evict_inode(struct inode *inode)
1343 {
1344 	struct shmem_inode_info *info = SHMEM_I(inode);
1345 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1346 	size_t freed = 0;
1347 
1348 	if (shmem_mapping(inode->i_mapping)) {
1349 		shmem_unacct_size(info->flags, inode->i_size);
1350 		inode->i_size = 0;
1351 		mapping_set_exiting(inode->i_mapping);
1352 		shmem_truncate_range(inode, 0, (loff_t)-1);
1353 		if (!list_empty(&info->shrinklist)) {
1354 			spin_lock(&sbinfo->shrinklist_lock);
1355 			if (!list_empty(&info->shrinklist)) {
1356 				list_del_init(&info->shrinklist);
1357 				sbinfo->shrinklist_len--;
1358 			}
1359 			spin_unlock(&sbinfo->shrinklist_lock);
1360 		}
1361 		while (!list_empty(&info->swaplist)) {
1362 			/* Wait while shmem_unuse() is scanning this inode... */
1363 			wait_var_event(&info->stop_eviction,
1364 				       !atomic_read(&info->stop_eviction));
1365 			mutex_lock(&shmem_swaplist_mutex);
1366 			/* ...but beware of the race if we peeked too early */
1367 			if (!atomic_read(&info->stop_eviction))
1368 				list_del_init(&info->swaplist);
1369 			mutex_unlock(&shmem_swaplist_mutex);
1370 		}
1371 	}
1372 
1373 	simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1374 	shmem_free_inode(inode->i_sb, freed);
1375 	WARN_ON(inode->i_blocks);
1376 	clear_inode(inode);
1377 #ifdef CONFIG_TMPFS_QUOTA
1378 	dquot_free_inode(inode);
1379 	dquot_drop(inode);
1380 #endif
1381 }
1382 
shmem_find_swap_entries(struct address_space * mapping,pgoff_t start,struct folio_batch * fbatch,pgoff_t * indices,unsigned int type)1383 static int shmem_find_swap_entries(struct address_space *mapping,
1384 				   pgoff_t start, struct folio_batch *fbatch,
1385 				   pgoff_t *indices, unsigned int type)
1386 {
1387 	XA_STATE(xas, &mapping->i_pages, start);
1388 	struct folio *folio;
1389 	swp_entry_t entry;
1390 
1391 	rcu_read_lock();
1392 	xas_for_each(&xas, folio, ULONG_MAX) {
1393 		if (xas_retry(&xas, folio))
1394 			continue;
1395 
1396 		if (!xa_is_value(folio))
1397 			continue;
1398 
1399 		entry = radix_to_swp_entry(folio);
1400 		/*
1401 		 * swapin error entries can be found in the mapping. But they're
1402 		 * deliberately ignored here as we've done everything we can do.
1403 		 */
1404 		if (swp_type(entry) != type)
1405 			continue;
1406 
1407 		indices[folio_batch_count(fbatch)] = xas.xa_index;
1408 		if (!folio_batch_add(fbatch, folio))
1409 			break;
1410 
1411 		if (need_resched()) {
1412 			xas_pause(&xas);
1413 			cond_resched_rcu();
1414 		}
1415 	}
1416 	rcu_read_unlock();
1417 
1418 	return xas.xa_index;
1419 }
1420 
1421 /*
1422  * Move the swapped pages for an inode to page cache. Returns the count
1423  * of pages swapped in, or the error in case of failure.
1424  */
shmem_unuse_swap_entries(struct inode * inode,struct folio_batch * fbatch,pgoff_t * indices)1425 static int shmem_unuse_swap_entries(struct inode *inode,
1426 		struct folio_batch *fbatch, pgoff_t *indices)
1427 {
1428 	int i = 0;
1429 	int ret = 0;
1430 	int error = 0;
1431 	struct address_space *mapping = inode->i_mapping;
1432 
1433 	for (i = 0; i < folio_batch_count(fbatch); i++) {
1434 		struct folio *folio = fbatch->folios[i];
1435 
1436 		if (!xa_is_value(folio))
1437 			continue;
1438 		error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1439 					mapping_gfp_mask(mapping), NULL, NULL);
1440 		if (error == 0) {
1441 			folio_unlock(folio);
1442 			folio_put(folio);
1443 			ret++;
1444 		}
1445 		if (error == -ENOMEM)
1446 			break;
1447 		error = 0;
1448 	}
1449 	return error ? error : ret;
1450 }
1451 
1452 /*
1453  * If swap found in inode, free it and move page from swapcache to filecache.
1454  */
shmem_unuse_inode(struct inode * inode,unsigned int type)1455 static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1456 {
1457 	struct address_space *mapping = inode->i_mapping;
1458 	pgoff_t start = 0;
1459 	struct folio_batch fbatch;
1460 	pgoff_t indices[PAGEVEC_SIZE];
1461 	int ret = 0;
1462 
1463 	do {
1464 		folio_batch_init(&fbatch);
1465 		shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1466 		if (folio_batch_count(&fbatch) == 0) {
1467 			ret = 0;
1468 			break;
1469 		}
1470 
1471 		ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1472 		if (ret < 0)
1473 			break;
1474 
1475 		start = indices[folio_batch_count(&fbatch) - 1];
1476 	} while (true);
1477 
1478 	return ret;
1479 }
1480 
1481 /*
1482  * Read all the shared memory data that resides in the swap
1483  * device 'type' back into memory, so the swap device can be
1484  * unused.
1485  */
shmem_unuse(unsigned int type)1486 int shmem_unuse(unsigned int type)
1487 {
1488 	struct shmem_inode_info *info, *next;
1489 	int error = 0;
1490 
1491 	if (list_empty(&shmem_swaplist))
1492 		return 0;
1493 
1494 	mutex_lock(&shmem_swaplist_mutex);
1495 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1496 		if (!info->swapped) {
1497 			list_del_init(&info->swaplist);
1498 			continue;
1499 		}
1500 		/*
1501 		 * Drop the swaplist mutex while searching the inode for swap;
1502 		 * but before doing so, make sure shmem_evict_inode() will not
1503 		 * remove placeholder inode from swaplist, nor let it be freed
1504 		 * (igrab() would protect from unlink, but not from unmount).
1505 		 */
1506 		atomic_inc(&info->stop_eviction);
1507 		mutex_unlock(&shmem_swaplist_mutex);
1508 
1509 		error = shmem_unuse_inode(&info->vfs_inode, type);
1510 		cond_resched();
1511 
1512 		mutex_lock(&shmem_swaplist_mutex);
1513 		next = list_next_entry(info, swaplist);
1514 		if (!info->swapped)
1515 			list_del_init(&info->swaplist);
1516 		if (atomic_dec_and_test(&info->stop_eviction))
1517 			wake_up_var(&info->stop_eviction);
1518 		if (error)
1519 			break;
1520 	}
1521 	mutex_unlock(&shmem_swaplist_mutex);
1522 
1523 	return error;
1524 }
1525 
1526 /*
1527  * Move the page from the page cache to the swap cache.
1528  */
shmem_writepage(struct page * page,struct writeback_control * wbc)1529 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1530 {
1531 	struct folio *folio = page_folio(page);
1532 	struct address_space *mapping = folio->mapping;
1533 	struct inode *inode = mapping->host;
1534 	struct shmem_inode_info *info = SHMEM_I(inode);
1535 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1536 	swp_entry_t swap;
1537 	pgoff_t index;
1538 	int nr_pages;
1539 	bool split = false;
1540 
1541 	/*
1542 	 * Our capabilities prevent regular writeback or sync from ever calling
1543 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
1544 	 * its underlying filesystem, in which case tmpfs should write out to
1545 	 * swap only in response to memory pressure, and not for the writeback
1546 	 * threads or sync.
1547 	 */
1548 	if (WARN_ON_ONCE(!wbc->for_reclaim))
1549 		goto redirty;
1550 
1551 	if ((info->flags & VM_LOCKED) || sbinfo->noswap)
1552 		goto redirty;
1553 
1554 	if (!total_swap_pages)
1555 		goto redirty;
1556 
1557 	/*
1558 	 * If CONFIG_THP_SWAP is not enabled, the large folio should be
1559 	 * split when swapping.
1560 	 *
1561 	 * And shrinkage of pages beyond i_size does not split swap, so
1562 	 * swapout of a large folio crossing i_size needs to split too
1563 	 * (unless fallocate has been used to preallocate beyond EOF).
1564 	 */
1565 	if (folio_test_large(folio)) {
1566 		index = shmem_fallocend(inode,
1567 			DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
1568 		if ((index > folio->index && index < folio_next_index(folio)) ||
1569 		    !IS_ENABLED(CONFIG_THP_SWAP))
1570 			split = true;
1571 	}
1572 
1573 	if (split) {
1574 try_split:
1575 		/* Ensure the subpages are still dirty */
1576 		folio_test_set_dirty(folio);
1577 		if (split_huge_page_to_list_to_order(page, wbc->list, 0))
1578 			goto redirty;
1579 		folio = page_folio(page);
1580 		folio_clear_dirty(folio);
1581 	}
1582 
1583 	index = folio->index;
1584 	nr_pages = folio_nr_pages(folio);
1585 
1586 	/*
1587 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1588 	 * value into swapfile.c, the only way we can correctly account for a
1589 	 * fallocated folio arriving here is now to initialize it and write it.
1590 	 *
1591 	 * That's okay for a folio already fallocated earlier, but if we have
1592 	 * not yet completed the fallocation, then (a) we want to keep track
1593 	 * of this folio in case we have to undo it, and (b) it may not be a
1594 	 * good idea to continue anyway, once we're pushing into swap.  So
1595 	 * reactivate the folio, and let shmem_fallocate() quit when too many.
1596 	 */
1597 	if (!folio_test_uptodate(folio)) {
1598 		if (inode->i_private) {
1599 			struct shmem_falloc *shmem_falloc;
1600 			spin_lock(&inode->i_lock);
1601 			shmem_falloc = inode->i_private;
1602 			if (shmem_falloc &&
1603 			    !shmem_falloc->waitq &&
1604 			    index >= shmem_falloc->start &&
1605 			    index < shmem_falloc->next)
1606 				shmem_falloc->nr_unswapped += nr_pages;
1607 			else
1608 				shmem_falloc = NULL;
1609 			spin_unlock(&inode->i_lock);
1610 			if (shmem_falloc)
1611 				goto redirty;
1612 		}
1613 		folio_zero_range(folio, 0, folio_size(folio));
1614 		flush_dcache_folio(folio);
1615 		folio_mark_uptodate(folio);
1616 	}
1617 
1618 	swap = folio_alloc_swap(folio);
1619 	if (!swap.val) {
1620 		if (nr_pages > 1)
1621 			goto try_split;
1622 
1623 		goto redirty;
1624 	}
1625 
1626 	/*
1627 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1628 	 * if it's not already there.  Do it now before the folio is
1629 	 * moved to swap cache, when its pagelock no longer protects
1630 	 * the inode from eviction.  But don't unlock the mutex until
1631 	 * we've incremented swapped, because shmem_unuse_inode() will
1632 	 * prune a !swapped inode from the swaplist under this mutex.
1633 	 */
1634 	mutex_lock(&shmem_swaplist_mutex);
1635 	if (list_empty(&info->swaplist))
1636 		list_add(&info->swaplist, &shmem_swaplist);
1637 
1638 	if (add_to_swap_cache(folio, swap,
1639 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1640 			NULL) == 0) {
1641 		shmem_recalc_inode(inode, 0, nr_pages);
1642 		swap_shmem_alloc(swap, nr_pages);
1643 		shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1644 
1645 		mutex_unlock(&shmem_swaplist_mutex);
1646 		BUG_ON(folio_mapped(folio));
1647 		return swap_writepage(&folio->page, wbc);
1648 	}
1649 
1650 	mutex_unlock(&shmem_swaplist_mutex);
1651 	put_swap_folio(folio, swap);
1652 redirty:
1653 	folio_mark_dirty(folio);
1654 	if (wbc->for_reclaim)
1655 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with folio locked */
1656 	folio_unlock(folio);
1657 	return 0;
1658 }
1659 
1660 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1661 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1662 {
1663 	char buffer[64];
1664 
1665 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1666 		return;		/* show nothing */
1667 
1668 	mpol_to_str(buffer, sizeof(buffer), mpol);
1669 
1670 	seq_printf(seq, ",mpol=%s", buffer);
1671 }
1672 
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1673 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1674 {
1675 	struct mempolicy *mpol = NULL;
1676 	if (sbinfo->mpol) {
1677 		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
1678 		mpol = sbinfo->mpol;
1679 		mpol_get(mpol);
1680 		raw_spin_unlock(&sbinfo->stat_lock);
1681 	}
1682 	return mpol;
1683 }
1684 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1685 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1686 {
1687 }
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1688 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1689 {
1690 	return NULL;
1691 }
1692 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1693 
1694 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
1695 			pgoff_t index, unsigned int order, pgoff_t *ilx);
1696 
shmem_swapin_cluster(swp_entry_t swap,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)1697 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1698 			struct shmem_inode_info *info, pgoff_t index)
1699 {
1700 	struct mempolicy *mpol;
1701 	pgoff_t ilx;
1702 	struct folio *folio;
1703 
1704 	mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1705 	folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1706 	mpol_cond_put(mpol);
1707 
1708 	return folio;
1709 }
1710 
1711 /*
1712  * Make sure huge_gfp is always more limited than limit_gfp.
1713  * Some of the flags set permissions, while others set limitations.
1714  */
limit_gfp_mask(gfp_t huge_gfp,gfp_t limit_gfp)1715 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1716 {
1717 	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1718 	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1719 	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1720 	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1721 
1722 	/* Allow allocations only from the originally specified zones. */
1723 	result |= zoneflags;
1724 
1725 	/*
1726 	 * Minimize the result gfp by taking the union with the deny flags,
1727 	 * and the intersection of the allow flags.
1728 	 */
1729 	result |= (limit_gfp & denyflags);
1730 	result |= (huge_gfp & limit_gfp) & allowflags;
1731 
1732 	return result;
1733 }
1734 
1735 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
shmem_hpage_pmd_enabled(void)1736 bool shmem_hpage_pmd_enabled(void)
1737 {
1738 	if (shmem_huge == SHMEM_HUGE_DENY)
1739 		return false;
1740 	if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always))
1741 		return true;
1742 	if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise))
1743 		return true;
1744 	if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size))
1745 		return true;
1746 	if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) &&
1747 	    shmem_huge != SHMEM_HUGE_NEVER)
1748 		return true;
1749 
1750 	return false;
1751 }
1752 
shmem_allowable_huge_orders(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,loff_t write_end,bool shmem_huge_force)1753 unsigned long shmem_allowable_huge_orders(struct inode *inode,
1754 				struct vm_area_struct *vma, pgoff_t index,
1755 				loff_t write_end, bool shmem_huge_force)
1756 {
1757 	unsigned long mask = READ_ONCE(huge_shmem_orders_always);
1758 	unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
1759 	unsigned long vm_flags = vma ? vma->vm_flags : 0;
1760 	pgoff_t aligned_index;
1761 	unsigned int global_orders;
1762 	loff_t i_size;
1763 	int order;
1764 
1765 	if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
1766 		return 0;
1767 
1768 	global_orders = shmem_huge_global_enabled(inode, index, write_end,
1769 						  shmem_huge_force, vma, vm_flags);
1770 	/* Tmpfs huge pages allocation */
1771 	if (!vma || !vma_is_anon_shmem(vma))
1772 		return global_orders;
1773 
1774 	/*
1775 	 * Following the 'deny' semantics of the top level, force the huge
1776 	 * option off from all mounts.
1777 	 */
1778 	if (shmem_huge == SHMEM_HUGE_DENY)
1779 		return 0;
1780 
1781 	/*
1782 	 * Only allow inherit orders if the top-level value is 'force', which
1783 	 * means non-PMD sized THP can not override 'huge' mount option now.
1784 	 */
1785 	if (shmem_huge == SHMEM_HUGE_FORCE)
1786 		return READ_ONCE(huge_shmem_orders_inherit);
1787 
1788 	/* Allow mTHP that will be fully within i_size. */
1789 	order = highest_order(within_size_orders);
1790 	while (within_size_orders) {
1791 		aligned_index = round_up(index + 1, 1 << order);
1792 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
1793 		if (i_size >> PAGE_SHIFT >= aligned_index) {
1794 			mask |= within_size_orders;
1795 			break;
1796 		}
1797 
1798 		order = next_order(&within_size_orders, order);
1799 	}
1800 
1801 	if (vm_flags & VM_HUGEPAGE)
1802 		mask |= READ_ONCE(huge_shmem_orders_madvise);
1803 
1804 	if (global_orders > 0)
1805 		mask |= READ_ONCE(huge_shmem_orders_inherit);
1806 
1807 	return THP_ORDERS_ALL_FILE_DEFAULT & mask;
1808 }
1809 
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1810 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1811 					   struct address_space *mapping, pgoff_t index,
1812 					   unsigned long orders)
1813 {
1814 	struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1815 	pgoff_t aligned_index;
1816 	unsigned long pages;
1817 	int order;
1818 
1819 	if (vma) {
1820 		orders = thp_vma_suitable_orders(vma, vmf->address, orders);
1821 		if (!orders)
1822 			return 0;
1823 	}
1824 
1825 	/* Find the highest order that can add into the page cache */
1826 	order = highest_order(orders);
1827 	while (orders) {
1828 		pages = 1UL << order;
1829 		aligned_index = round_down(index, pages);
1830 		/*
1831 		 * Check for conflict before waiting on a huge allocation.
1832 		 * Conflict might be that a huge page has just been allocated
1833 		 * and added to page cache by a racing thread, or that there
1834 		 * is already at least one small page in the huge extent.
1835 		 * Be careful to retry when appropriate, but not forever!
1836 		 * Elsewhere -EEXIST would be the right code, but not here.
1837 		 */
1838 		if (!xa_find(&mapping->i_pages, &aligned_index,
1839 			     aligned_index + pages - 1, XA_PRESENT))
1840 			break;
1841 		order = next_order(&orders, order);
1842 	}
1843 
1844 	return orders;
1845 }
1846 #else
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1847 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1848 					   struct address_space *mapping, pgoff_t index,
1849 					   unsigned long orders)
1850 {
1851 	return 0;
1852 }
1853 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1854 
shmem_alloc_folio(gfp_t gfp,int order,struct shmem_inode_info * info,pgoff_t index)1855 static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
1856 		struct shmem_inode_info *info, pgoff_t index)
1857 {
1858 	struct mempolicy *mpol;
1859 	pgoff_t ilx;
1860 	struct folio *folio;
1861 
1862 	mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
1863 	folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id());
1864 	mpol_cond_put(mpol);
1865 
1866 	return folio;
1867 }
1868 
shmem_alloc_and_add_folio(struct vm_fault * vmf,gfp_t gfp,struct inode * inode,pgoff_t index,struct mm_struct * fault_mm,unsigned long orders)1869 static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
1870 		gfp_t gfp, struct inode *inode, pgoff_t index,
1871 		struct mm_struct *fault_mm, unsigned long orders)
1872 {
1873 	struct address_space *mapping = inode->i_mapping;
1874 	struct shmem_inode_info *info = SHMEM_I(inode);
1875 	unsigned long suitable_orders = 0;
1876 	struct folio *folio = NULL;
1877 	long pages;
1878 	int error, order;
1879 
1880 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1881 		orders = 0;
1882 
1883 	if (orders > 0) {
1884 		suitable_orders = shmem_suitable_orders(inode, vmf,
1885 							mapping, index, orders);
1886 
1887 		order = highest_order(suitable_orders);
1888 		while (suitable_orders) {
1889 			pages = 1UL << order;
1890 			index = round_down(index, pages);
1891 			folio = shmem_alloc_folio(gfp, order, info, index);
1892 			if (folio)
1893 				goto allocated;
1894 
1895 			if (pages == HPAGE_PMD_NR)
1896 				count_vm_event(THP_FILE_FALLBACK);
1897 			count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
1898 			order = next_order(&suitable_orders, order);
1899 		}
1900 	} else {
1901 		pages = 1;
1902 		folio = shmem_alloc_folio(gfp, 0, info, index);
1903 	}
1904 	if (!folio)
1905 		return ERR_PTR(-ENOMEM);
1906 
1907 allocated:
1908 	__folio_set_locked(folio);
1909 	__folio_set_swapbacked(folio);
1910 
1911 	gfp &= GFP_RECLAIM_MASK;
1912 	error = mem_cgroup_charge(folio, fault_mm, gfp);
1913 	if (error) {
1914 		if (xa_find(&mapping->i_pages, &index,
1915 				index + pages - 1, XA_PRESENT)) {
1916 			error = -EEXIST;
1917 		} else if (pages > 1) {
1918 			if (pages == HPAGE_PMD_NR) {
1919 				count_vm_event(THP_FILE_FALLBACK);
1920 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
1921 			}
1922 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
1923 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
1924 		}
1925 		goto unlock;
1926 	}
1927 
1928 	error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
1929 	if (error)
1930 		goto unlock;
1931 
1932 	error = shmem_inode_acct_blocks(inode, pages);
1933 	if (error) {
1934 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1935 		long freed;
1936 		/*
1937 		 * Try to reclaim some space by splitting a few
1938 		 * large folios beyond i_size on the filesystem.
1939 		 */
1940 		shmem_unused_huge_shrink(sbinfo, NULL, pages);
1941 		/*
1942 		 * And do a shmem_recalc_inode() to account for freed pages:
1943 		 * except our folio is there in cache, so not quite balanced.
1944 		 */
1945 		spin_lock(&info->lock);
1946 		freed = pages + info->alloced - info->swapped -
1947 			READ_ONCE(mapping->nrpages);
1948 		if (freed > 0)
1949 			info->alloced -= freed;
1950 		spin_unlock(&info->lock);
1951 		if (freed > 0)
1952 			shmem_inode_unacct_blocks(inode, freed);
1953 		error = shmem_inode_acct_blocks(inode, pages);
1954 		if (error) {
1955 			filemap_remove_folio(folio);
1956 			goto unlock;
1957 		}
1958 	}
1959 
1960 	shmem_recalc_inode(inode, pages, 0);
1961 	folio_add_lru(folio);
1962 	return folio;
1963 
1964 unlock:
1965 	folio_unlock(folio);
1966 	folio_put(folio);
1967 	return ERR_PTR(error);
1968 }
1969 
shmem_swap_alloc_folio(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,swp_entry_t entry,int order,gfp_t gfp)1970 static struct folio *shmem_swap_alloc_folio(struct inode *inode,
1971 		struct vm_area_struct *vma, pgoff_t index,
1972 		swp_entry_t entry, int order, gfp_t gfp)
1973 {
1974 	struct shmem_inode_info *info = SHMEM_I(inode);
1975 	struct folio *new;
1976 	void *shadow;
1977 	int nr_pages;
1978 
1979 	/*
1980 	 * We have arrived here because our zones are constrained, so don't
1981 	 * limit chance of success with further cpuset and node constraints.
1982 	 */
1983 	gfp &= ~GFP_CONSTRAINT_MASK;
1984 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && order > 0) {
1985 		gfp_t huge_gfp = vma_thp_gfp_mask(vma);
1986 
1987 		gfp = limit_gfp_mask(huge_gfp, gfp);
1988 	}
1989 
1990 	new = shmem_alloc_folio(gfp, order, info, index);
1991 	if (!new)
1992 		return ERR_PTR(-ENOMEM);
1993 
1994 	nr_pages = folio_nr_pages(new);
1995 	if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL,
1996 					   gfp, entry)) {
1997 		folio_put(new);
1998 		return ERR_PTR(-ENOMEM);
1999 	}
2000 
2001 	/*
2002 	 * Prevent parallel swapin from proceeding with the swap cache flag.
2003 	 *
2004 	 * Of course there is another possible concurrent scenario as well,
2005 	 * that is to say, the swap cache flag of a large folio has already
2006 	 * been set by swapcache_prepare(), while another thread may have
2007 	 * already split the large swap entry stored in the shmem mapping.
2008 	 * In this case, shmem_add_to_page_cache() will help identify the
2009 	 * concurrent swapin and return -EEXIST.
2010 	 */
2011 	if (swapcache_prepare(entry, nr_pages)) {
2012 		folio_put(new);
2013 		return ERR_PTR(-EEXIST);
2014 	}
2015 
2016 	__folio_set_locked(new);
2017 	__folio_set_swapbacked(new);
2018 	new->swap = entry;
2019 
2020 	mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
2021 	shadow = get_shadow_from_swap_cache(entry);
2022 	if (shadow)
2023 		workingset_refault(new, shadow);
2024 	folio_add_lru(new);
2025 	swap_read_folio(new, NULL);
2026 	return new;
2027 }
2028 
2029 /*
2030  * When a page is moved from swapcache to shmem filecache (either by the
2031  * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
2032  * shmem_unuse_inode()), it may have been read in earlier from swap, in
2033  * ignorance of the mapping it belongs to.  If that mapping has special
2034  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
2035  * we may need to copy to a suitable page before moving to filecache.
2036  *
2037  * In a future release, this may well be extended to respect cpuset and
2038  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
2039  * but for now it is a simple matter of zone.
2040  */
shmem_should_replace_folio(struct folio * folio,gfp_t gfp)2041 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
2042 {
2043 	return folio_zonenum(folio) > gfp_zone(gfp);
2044 }
2045 
shmem_replace_folio(struct folio ** foliop,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index,struct vm_area_struct * vma)2046 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
2047 				struct shmem_inode_info *info, pgoff_t index,
2048 				struct vm_area_struct *vma)
2049 {
2050 	struct folio *new, *old = *foliop;
2051 	swp_entry_t entry = old->swap;
2052 	struct address_space *swap_mapping = swap_address_space(entry);
2053 	pgoff_t swap_index = swap_cache_index(entry);
2054 	XA_STATE(xas, &swap_mapping->i_pages, swap_index);
2055 	int nr_pages = folio_nr_pages(old);
2056 	int error = 0, i;
2057 
2058 	/*
2059 	 * We have arrived here because our zones are constrained, so don't
2060 	 * limit chance of success by further cpuset and node constraints.
2061 	 */
2062 	gfp &= ~GFP_CONSTRAINT_MASK;
2063 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2064 	if (nr_pages > 1) {
2065 		gfp_t huge_gfp = vma_thp_gfp_mask(vma);
2066 
2067 		gfp = limit_gfp_mask(huge_gfp, gfp);
2068 	}
2069 #endif
2070 
2071 	new = shmem_alloc_folio(gfp, folio_order(old), info, index);
2072 	if (!new)
2073 		return -ENOMEM;
2074 
2075 	folio_ref_add(new, nr_pages);
2076 	folio_copy(new, old);
2077 	flush_dcache_folio(new);
2078 
2079 	__folio_set_locked(new);
2080 	__folio_set_swapbacked(new);
2081 	folio_mark_uptodate(new);
2082 	new->swap = entry;
2083 	folio_set_swapcache(new);
2084 
2085 	/* Swap cache still stores N entries instead of a high-order entry */
2086 	xa_lock_irq(&swap_mapping->i_pages);
2087 	for (i = 0; i < nr_pages; i++) {
2088 		void *item = xas_load(&xas);
2089 
2090 		if (item != old) {
2091 			error = -ENOENT;
2092 			break;
2093 		}
2094 
2095 		xas_store(&xas, new);
2096 		xas_next(&xas);
2097 	}
2098 	if (!error) {
2099 		mem_cgroup_replace_folio(old, new);
2100 		shmem_update_stats(new, nr_pages);
2101 		shmem_update_stats(old, -nr_pages);
2102 	}
2103 	xa_unlock_irq(&swap_mapping->i_pages);
2104 
2105 	if (unlikely(error)) {
2106 		/*
2107 		 * Is this possible?  I think not, now that our callers
2108 		 * check both the swapcache flag and folio->private
2109 		 * after getting the folio lock; but be defensive.
2110 		 * Reverse old to newpage for clear and free.
2111 		 */
2112 		old = new;
2113 	} else {
2114 		folio_add_lru(new);
2115 		*foliop = new;
2116 	}
2117 
2118 	folio_clear_swapcache(old);
2119 	old->private = NULL;
2120 
2121 	folio_unlock(old);
2122 	/*
2123 	 * The old folio are removed from swap cache, drop the 'nr_pages'
2124 	 * reference, as well as one temporary reference getting from swap
2125 	 * cache.
2126 	 */
2127 	folio_put_refs(old, nr_pages + 1);
2128 	return error;
2129 }
2130 
shmem_set_folio_swapin_error(struct inode * inode,pgoff_t index,struct folio * folio,swp_entry_t swap,bool skip_swapcache)2131 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
2132 					 struct folio *folio, swp_entry_t swap,
2133 					 bool skip_swapcache)
2134 {
2135 	struct address_space *mapping = inode->i_mapping;
2136 	swp_entry_t swapin_error;
2137 	void *old;
2138 	int nr_pages;
2139 
2140 	swapin_error = make_poisoned_swp_entry();
2141 	old = xa_cmpxchg_irq(&mapping->i_pages, index,
2142 			     swp_to_radix_entry(swap),
2143 			     swp_to_radix_entry(swapin_error), 0);
2144 	if (old != swp_to_radix_entry(swap))
2145 		return;
2146 
2147 	nr_pages = folio_nr_pages(folio);
2148 	folio_wait_writeback(folio);
2149 	if (!skip_swapcache)
2150 		delete_from_swap_cache(folio);
2151 	/*
2152 	 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
2153 	 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
2154 	 * in shmem_evict_inode().
2155 	 */
2156 	shmem_recalc_inode(inode, -nr_pages, -nr_pages);
2157 	swap_free_nr(swap, nr_pages);
2158 }
2159 
shmem_split_large_entry(struct inode * inode,pgoff_t index,swp_entry_t swap,gfp_t gfp)2160 static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
2161 				   swp_entry_t swap, gfp_t gfp)
2162 {
2163 	struct address_space *mapping = inode->i_mapping;
2164 	XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
2165 	void *alloced_shadow = NULL;
2166 	int alloced_order = 0, i;
2167 
2168 	/* Convert user data gfp flags to xarray node gfp flags */
2169 	gfp &= GFP_RECLAIM_MASK;
2170 
2171 	for (;;) {
2172 		int order = -1, split_order = 0;
2173 		void *old = NULL;
2174 
2175 		xas_lock_irq(&xas);
2176 		old = xas_load(&xas);
2177 		if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
2178 			xas_set_err(&xas, -EEXIST);
2179 			goto unlock;
2180 		}
2181 
2182 		order = xas_get_order(&xas);
2183 
2184 		/* Swap entry may have changed before we re-acquire the lock */
2185 		if (alloced_order &&
2186 		    (old != alloced_shadow || order != alloced_order)) {
2187 			xas_destroy(&xas);
2188 			alloced_order = 0;
2189 		}
2190 
2191 		/* Try to split large swap entry in pagecache */
2192 		if (order > 0) {
2193 			if (!alloced_order) {
2194 				split_order = order;
2195 				goto unlock;
2196 			}
2197 			xas_split(&xas, old, order);
2198 
2199 			/*
2200 			 * Re-set the swap entry after splitting, and the swap
2201 			 * offset of the original large entry must be continuous.
2202 			 */
2203 			for (i = 0; i < 1 << order; i++) {
2204 				pgoff_t aligned_index = round_down(index, 1 << order);
2205 				swp_entry_t tmp;
2206 
2207 				tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
2208 				__xa_store(&mapping->i_pages, aligned_index + i,
2209 					   swp_to_radix_entry(tmp), 0);
2210 			}
2211 		}
2212 
2213 unlock:
2214 		xas_unlock_irq(&xas);
2215 
2216 		/* split needed, alloc here and retry. */
2217 		if (split_order) {
2218 			xas_split_alloc(&xas, old, split_order, gfp);
2219 			if (xas_error(&xas))
2220 				goto error;
2221 			alloced_shadow = old;
2222 			alloced_order = split_order;
2223 			xas_reset(&xas);
2224 			continue;
2225 		}
2226 
2227 		if (!xas_nomem(&xas, gfp))
2228 			break;
2229 	}
2230 
2231 error:
2232 	if (xas_error(&xas))
2233 		return xas_error(&xas);
2234 
2235 	return alloced_order;
2236 }
2237 
2238 /*
2239  * Swap in the folio pointed to by *foliop.
2240  * Caller has to make sure that *foliop contains a valid swapped folio.
2241  * Returns 0 and the folio in foliop if success. On failure, returns the
2242  * error code and NULL in *foliop.
2243  */
shmem_swapin_folio(struct inode * inode,pgoff_t index,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_area_struct * vma,vm_fault_t * fault_type)2244 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
2245 			     struct folio **foliop, enum sgp_type sgp,
2246 			     gfp_t gfp, struct vm_area_struct *vma,
2247 			     vm_fault_t *fault_type)
2248 {
2249 	struct address_space *mapping = inode->i_mapping;
2250 	struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
2251 	struct shmem_inode_info *info = SHMEM_I(inode);
2252 	struct swap_info_struct *si;
2253 	struct folio *folio = NULL;
2254 	bool skip_swapcache = false;
2255 	swp_entry_t swap;
2256 	int error, nr_pages, order, split_order;
2257 
2258 	VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
2259 	swap = radix_to_swp_entry(*foliop);
2260 	*foliop = NULL;
2261 
2262 	if (is_poisoned_swp_entry(swap))
2263 		return -EIO;
2264 
2265 	si = get_swap_device(swap);
2266 	if (!si) {
2267 		if (!shmem_confirm_swap(mapping, index, swap))
2268 			return -EEXIST;
2269 		else
2270 			return -EINVAL;
2271 	}
2272 
2273 	/* Look it up and read it in.. */
2274 	folio = swap_cache_get_folio(swap, NULL, 0);
2275 	order = xa_get_order(&mapping->i_pages, index);
2276 	if (!folio) {
2277 		bool fallback_order0 = false;
2278 
2279 		/* Or update major stats only when swapin succeeds?? */
2280 		if (fault_type) {
2281 			*fault_type |= VM_FAULT_MAJOR;
2282 			count_vm_event(PGMAJFAULT);
2283 			count_memcg_event_mm(fault_mm, PGMAJFAULT);
2284 		}
2285 
2286 		/*
2287 		 * If uffd is active for the vma, we need per-page fault
2288 		 * fidelity to maintain the uffd semantics, then fallback
2289 		 * to swapin order-0 folio, as well as for zswap case.
2290 		 */
2291 		if (order > 0 && ((vma && unlikely(userfaultfd_armed(vma))) ||
2292 				  !zswap_never_enabled()))
2293 			fallback_order0 = true;
2294 
2295 		/* Skip swapcache for synchronous device. */
2296 		if (!fallback_order0 && data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
2297 			folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp);
2298 			if (!IS_ERR(folio)) {
2299 				skip_swapcache = true;
2300 				goto alloced;
2301 			}
2302 
2303 			/*
2304 			 * Fallback to swapin order-0 folio unless the swap entry
2305 			 * already exists.
2306 			 */
2307 			error = PTR_ERR(folio);
2308 			folio = NULL;
2309 			if (error == -EEXIST)
2310 				goto failed;
2311 		}
2312 
2313 		/*
2314 		 * Now swap device can only swap in order 0 folio, then we
2315 		 * should split the large swap entry stored in the pagecache
2316 		 * if necessary.
2317 		 */
2318 		split_order = shmem_split_large_entry(inode, index, swap, gfp);
2319 		if (split_order < 0) {
2320 			error = split_order;
2321 			goto failed;
2322 		}
2323 
2324 		/*
2325 		 * If the large swap entry has already been split, it is
2326 		 * necessary to recalculate the new swap entry based on
2327 		 * the old order alignment.
2328 		 */
2329 		if (split_order > 0) {
2330 			pgoff_t offset = index - round_down(index, 1 << split_order);
2331 
2332 			swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
2333 		}
2334 
2335 		/* Here we actually start the io */
2336 		folio = shmem_swapin_cluster(swap, gfp, info, index);
2337 		if (!folio) {
2338 			error = -ENOMEM;
2339 			goto failed;
2340 		}
2341 	} else if (order != folio_order(folio)) {
2342 		/*
2343 		 * Swap readahead may swap in order 0 folios into swapcache
2344 		 * asynchronously, while the shmem mapping can still stores
2345 		 * large swap entries. In such cases, we should split the
2346 		 * large swap entry to prevent possible data corruption.
2347 		 */
2348 		split_order = shmem_split_large_entry(inode, index, swap, gfp);
2349 		if (split_order < 0) {
2350 			error = split_order;
2351 			goto failed;
2352 		}
2353 
2354 		/*
2355 		 * If the large swap entry has already been split, it is
2356 		 * necessary to recalculate the new swap entry based on
2357 		 * the old order alignment.
2358 		 */
2359 		if (split_order > 0) {
2360 			pgoff_t offset = index - round_down(index, 1 << split_order);
2361 
2362 			swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
2363 		}
2364 	}
2365 
2366 alloced:
2367 	/* We have to do this with folio locked to prevent races */
2368 	folio_lock(folio);
2369 	if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
2370 	    folio->swap.val != swap.val ||
2371 	    !shmem_confirm_swap(mapping, index, swap) ||
2372 	    xa_get_order(&mapping->i_pages, index) != folio_order(folio)) {
2373 		error = -EEXIST;
2374 		goto unlock;
2375 	}
2376 	if (!folio_test_uptodate(folio)) {
2377 		error = -EIO;
2378 		goto failed;
2379 	}
2380 	folio_wait_writeback(folio);
2381 	nr_pages = folio_nr_pages(folio);
2382 
2383 	/*
2384 	 * Some architectures may have to restore extra metadata to the
2385 	 * folio after reading from swap.
2386 	 */
2387 	arch_swap_restore(folio_swap(swap, folio), folio);
2388 
2389 	if (shmem_should_replace_folio(folio, gfp)) {
2390 		error = shmem_replace_folio(&folio, gfp, info, index, vma);
2391 		if (error)
2392 			goto failed;
2393 	}
2394 
2395 	error = shmem_add_to_page_cache(folio, mapping,
2396 					round_down(index, nr_pages),
2397 					swp_to_radix_entry(swap), gfp);
2398 	if (error)
2399 		goto failed;
2400 
2401 	shmem_recalc_inode(inode, 0, -nr_pages);
2402 
2403 	if (sgp == SGP_WRITE)
2404 		folio_mark_accessed(folio);
2405 
2406 	if (skip_swapcache) {
2407 		folio->swap.val = 0;
2408 		swapcache_clear(si, swap, nr_pages);
2409 	} else {
2410 		delete_from_swap_cache(folio);
2411 	}
2412 	folio_mark_dirty(folio);
2413 	swap_free_nr(swap, nr_pages);
2414 	put_swap_device(si);
2415 
2416 	*foliop = folio;
2417 	return 0;
2418 failed:
2419 	if (!shmem_confirm_swap(mapping, index, swap))
2420 		error = -EEXIST;
2421 	if (error == -EIO)
2422 		shmem_set_folio_swapin_error(inode, index, folio, swap,
2423 					     skip_swapcache);
2424 unlock:
2425 	if (skip_swapcache)
2426 		swapcache_clear(si, swap, folio_nr_pages(folio));
2427 	if (folio) {
2428 		folio_unlock(folio);
2429 		folio_put(folio);
2430 	}
2431 	put_swap_device(si);
2432 
2433 	return error;
2434 }
2435 
2436 /*
2437  * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
2438  *
2439  * If we allocate a new one we do not mark it dirty. That's up to the
2440  * vm. If we swap it in we mark it dirty since we also free the swap
2441  * entry since a page cannot live in both the swap and page cache.
2442  *
2443  * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
2444  */
shmem_get_folio_gfp(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_fault * vmf,vm_fault_t * fault_type)2445 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
2446 		loff_t write_end, struct folio **foliop, enum sgp_type sgp,
2447 		gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type)
2448 {
2449 	struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
2450 	struct mm_struct *fault_mm;
2451 	struct folio *folio;
2452 	int error;
2453 	bool alloced;
2454 	unsigned long orders = 0;
2455 
2456 	if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
2457 		return -EINVAL;
2458 
2459 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
2460 		return -EFBIG;
2461 repeat:
2462 	if (sgp <= SGP_CACHE &&
2463 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
2464 		return -EINVAL;
2465 
2466 	alloced = false;
2467 	fault_mm = vma ? vma->vm_mm : NULL;
2468 
2469 	folio = filemap_get_entry(inode->i_mapping, index);
2470 	if (folio && vma && userfaultfd_minor(vma)) {
2471 		if (!xa_is_value(folio))
2472 			folio_put(folio);
2473 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
2474 		return 0;
2475 	}
2476 
2477 	if (xa_is_value(folio)) {
2478 		error = shmem_swapin_folio(inode, index, &folio,
2479 					   sgp, gfp, vma, fault_type);
2480 		if (error == -EEXIST)
2481 			goto repeat;
2482 
2483 		*foliop = folio;
2484 		return error;
2485 	}
2486 
2487 	if (folio) {
2488 		folio_lock(folio);
2489 
2490 		/* Has the folio been truncated or swapped out? */
2491 		if (unlikely(folio->mapping != inode->i_mapping)) {
2492 			folio_unlock(folio);
2493 			folio_put(folio);
2494 			goto repeat;
2495 		}
2496 		if (sgp == SGP_WRITE)
2497 			folio_mark_accessed(folio);
2498 		if (folio_test_uptodate(folio))
2499 			goto out;
2500 		/* fallocated folio */
2501 		if (sgp != SGP_READ)
2502 			goto clear;
2503 		folio_unlock(folio);
2504 		folio_put(folio);
2505 	}
2506 
2507 	/*
2508 	 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2509 	 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2510 	 */
2511 	*foliop = NULL;
2512 	if (sgp == SGP_READ)
2513 		return 0;
2514 	if (sgp == SGP_NOALLOC)
2515 		return -ENOENT;
2516 
2517 	/*
2518 	 * Fast cache lookup and swap lookup did not find it: allocate.
2519 	 */
2520 
2521 	if (vma && userfaultfd_missing(vma)) {
2522 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2523 		return 0;
2524 	}
2525 
2526 	/* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
2527 	orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false);
2528 	if (orders > 0) {
2529 		gfp_t huge_gfp;
2530 
2531 		huge_gfp = vma_thp_gfp_mask(vma);
2532 		huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2533 		folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
2534 				inode, index, fault_mm, orders);
2535 		if (!IS_ERR(folio)) {
2536 			if (folio_test_pmd_mappable(folio))
2537 				count_vm_event(THP_FILE_ALLOC);
2538 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
2539 			goto alloced;
2540 		}
2541 		if (PTR_ERR(folio) == -EEXIST)
2542 			goto repeat;
2543 	}
2544 
2545 	folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0);
2546 	if (IS_ERR(folio)) {
2547 		error = PTR_ERR(folio);
2548 		if (error == -EEXIST)
2549 			goto repeat;
2550 		folio = NULL;
2551 		goto unlock;
2552 	}
2553 
2554 alloced:
2555 	alloced = true;
2556 	if (folio_test_large(folio) &&
2557 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2558 					folio_next_index(folio)) {
2559 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2560 		struct shmem_inode_info *info = SHMEM_I(inode);
2561 		/*
2562 		 * Part of the large folio is beyond i_size: subject
2563 		 * to shrink under memory pressure.
2564 		 */
2565 		spin_lock(&sbinfo->shrinklist_lock);
2566 		/*
2567 		 * _careful to defend against unlocked access to
2568 		 * ->shrink_list in shmem_unused_huge_shrink()
2569 		 */
2570 		if (list_empty_careful(&info->shrinklist)) {
2571 			list_add_tail(&info->shrinklist,
2572 				      &sbinfo->shrinklist);
2573 			sbinfo->shrinklist_len++;
2574 		}
2575 		spin_unlock(&sbinfo->shrinklist_lock);
2576 	}
2577 
2578 	if (sgp == SGP_WRITE)
2579 		folio_set_referenced(folio);
2580 	/*
2581 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2582 	 */
2583 	if (sgp == SGP_FALLOC)
2584 		sgp = SGP_WRITE;
2585 clear:
2586 	/*
2587 	 * Let SGP_WRITE caller clear ends if write does not fill folio;
2588 	 * but SGP_FALLOC on a folio fallocated earlier must initialize
2589 	 * it now, lest undo on failure cancel our earlier guarantee.
2590 	 */
2591 	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2592 		long i, n = folio_nr_pages(folio);
2593 
2594 		for (i = 0; i < n; i++)
2595 			clear_highpage(folio_page(folio, i));
2596 		flush_dcache_folio(folio);
2597 		folio_mark_uptodate(folio);
2598 	}
2599 
2600 	/* Perhaps the file has been truncated since we checked */
2601 	if (sgp <= SGP_CACHE &&
2602 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2603 		error = -EINVAL;
2604 		goto unlock;
2605 	}
2606 out:
2607 	*foliop = folio;
2608 	return 0;
2609 
2610 	/*
2611 	 * Error recovery.
2612 	 */
2613 unlock:
2614 	if (alloced)
2615 		filemap_remove_folio(folio);
2616 	shmem_recalc_inode(inode, 0, 0);
2617 	if (folio) {
2618 		folio_unlock(folio);
2619 		folio_put(folio);
2620 	}
2621 	return error;
2622 }
2623 
2624 /**
2625  * shmem_get_folio - find, and lock a shmem folio.
2626  * @inode:	inode to search
2627  * @index:	the page index.
2628  * @write_end:	end of a write, could extend inode size
2629  * @foliop:	pointer to the folio if found
2630  * @sgp:	SGP_* flags to control behavior
2631  *
2632  * Looks up the page cache entry at @inode & @index.  If a folio is
2633  * present, it is returned locked with an increased refcount.
2634  *
2635  * If the caller modifies data in the folio, it must call folio_mark_dirty()
2636  * before unlocking the folio to ensure that the folio is not reclaimed.
2637  * There is no need to reserve space before calling folio_mark_dirty().
2638  *
2639  * When no folio is found, the behavior depends on @sgp:
2640  *  - for SGP_READ, *@foliop is %NULL and 0 is returned
2641  *  - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned
2642  *  - for all other flags a new folio is allocated, inserted into the
2643  *    page cache and returned locked in @foliop.
2644  *
2645  * Context: May sleep.
2646  * Return: 0 if successful, else a negative error code.
2647  */
shmem_get_folio(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp)2648 int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
2649 		    struct folio **foliop, enum sgp_type sgp)
2650 {
2651 	return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp,
2652 			mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2653 }
2654 EXPORT_SYMBOL_GPL(shmem_get_folio);
2655 
2656 /*
2657  * This is like autoremove_wake_function, but it removes the wait queue
2658  * entry unconditionally - even if something else had already woken the
2659  * target.
2660  */
synchronous_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)2661 static int synchronous_wake_function(wait_queue_entry_t *wait,
2662 			unsigned int mode, int sync, void *key)
2663 {
2664 	int ret = default_wake_function(wait, mode, sync, key);
2665 	list_del_init(&wait->entry);
2666 	return ret;
2667 }
2668 
2669 /*
2670  * Trinity finds that probing a hole which tmpfs is punching can
2671  * prevent the hole-punch from ever completing: which in turn
2672  * locks writers out with its hold on i_rwsem.  So refrain from
2673  * faulting pages into the hole while it's being punched.  Although
2674  * shmem_undo_range() does remove the additions, it may be unable to
2675  * keep up, as each new page needs its own unmap_mapping_range() call,
2676  * and the i_mmap tree grows ever slower to scan if new vmas are added.
2677  *
2678  * It does not matter if we sometimes reach this check just before the
2679  * hole-punch begins, so that one fault then races with the punch:
2680  * we just need to make racing faults a rare case.
2681  *
2682  * The implementation below would be much simpler if we just used a
2683  * standard mutex or completion: but we cannot take i_rwsem in fault,
2684  * and bloating every shmem inode for this unlikely case would be sad.
2685  */
shmem_falloc_wait(struct vm_fault * vmf,struct inode * inode)2686 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2687 {
2688 	struct shmem_falloc *shmem_falloc;
2689 	struct file *fpin = NULL;
2690 	vm_fault_t ret = 0;
2691 
2692 	spin_lock(&inode->i_lock);
2693 	shmem_falloc = inode->i_private;
2694 	if (shmem_falloc &&
2695 	    shmem_falloc->waitq &&
2696 	    vmf->pgoff >= shmem_falloc->start &&
2697 	    vmf->pgoff < shmem_falloc->next) {
2698 		wait_queue_head_t *shmem_falloc_waitq;
2699 		DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2700 
2701 		ret = VM_FAULT_NOPAGE;
2702 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2703 		shmem_falloc_waitq = shmem_falloc->waitq;
2704 		prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2705 				TASK_UNINTERRUPTIBLE);
2706 		spin_unlock(&inode->i_lock);
2707 		schedule();
2708 
2709 		/*
2710 		 * shmem_falloc_waitq points into the shmem_fallocate()
2711 		 * stack of the hole-punching task: shmem_falloc_waitq
2712 		 * is usually invalid by the time we reach here, but
2713 		 * finish_wait() does not dereference it in that case;
2714 		 * though i_lock needed lest racing with wake_up_all().
2715 		 */
2716 		spin_lock(&inode->i_lock);
2717 		finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2718 	}
2719 	spin_unlock(&inode->i_lock);
2720 	if (fpin) {
2721 		fput(fpin);
2722 		ret = VM_FAULT_RETRY;
2723 	}
2724 	return ret;
2725 }
2726 
shmem_fault(struct vm_fault * vmf)2727 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2728 {
2729 	struct inode *inode = file_inode(vmf->vma->vm_file);
2730 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2731 	struct folio *folio = NULL;
2732 	vm_fault_t ret = 0;
2733 	int err;
2734 
2735 	/*
2736 	 * Trinity finds that probing a hole which tmpfs is punching can
2737 	 * prevent the hole-punch from ever completing: noted in i_private.
2738 	 */
2739 	if (unlikely(inode->i_private)) {
2740 		ret = shmem_falloc_wait(vmf, inode);
2741 		if (ret)
2742 			return ret;
2743 	}
2744 
2745 	WARN_ON_ONCE(vmf->page != NULL);
2746 	err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE,
2747 				  gfp, vmf, &ret);
2748 	if (err)
2749 		return vmf_error(err);
2750 	if (folio) {
2751 		vmf->page = folio_file_page(folio, vmf->pgoff);
2752 		ret |= VM_FAULT_LOCKED;
2753 	}
2754 	return ret;
2755 }
2756 
shmem_get_unmapped_area(struct file * file,unsigned long uaddr,unsigned long len,unsigned long pgoff,unsigned long flags)2757 unsigned long shmem_get_unmapped_area(struct file *file,
2758 				      unsigned long uaddr, unsigned long len,
2759 				      unsigned long pgoff, unsigned long flags)
2760 {
2761 	unsigned long addr;
2762 	unsigned long offset;
2763 	unsigned long inflated_len;
2764 	unsigned long inflated_addr;
2765 	unsigned long inflated_offset;
2766 	unsigned long hpage_size;
2767 
2768 	if (len > TASK_SIZE)
2769 		return -ENOMEM;
2770 
2771 	addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff,
2772 				    flags);
2773 
2774 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2775 		return addr;
2776 	if (IS_ERR_VALUE(addr))
2777 		return addr;
2778 	if (addr & ~PAGE_MASK)
2779 		return addr;
2780 	if (addr > TASK_SIZE - len)
2781 		return addr;
2782 
2783 	if (shmem_huge == SHMEM_HUGE_DENY)
2784 		return addr;
2785 	if (flags & MAP_FIXED)
2786 		return addr;
2787 	/*
2788 	 * Our priority is to support MAP_SHARED mapped hugely;
2789 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2790 	 * But if caller specified an address hint and we allocated area there
2791 	 * successfully, respect that as before.
2792 	 */
2793 	if (uaddr == addr)
2794 		return addr;
2795 
2796 	hpage_size = HPAGE_PMD_SIZE;
2797 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2798 		struct super_block *sb;
2799 		unsigned long __maybe_unused hpage_orders;
2800 		int order = 0;
2801 
2802 		if (file) {
2803 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2804 			sb = file_inode(file)->i_sb;
2805 		} else {
2806 			/*
2807 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2808 			 * for "/dev/zero", to create a shared anonymous object.
2809 			 */
2810 			if (IS_ERR(shm_mnt))
2811 				return addr;
2812 			sb = shm_mnt->mnt_sb;
2813 
2814 			/*
2815 			 * Find the highest mTHP order used for anonymous shmem to
2816 			 * provide a suitable alignment address.
2817 			 */
2818 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2819 			hpage_orders = READ_ONCE(huge_shmem_orders_always);
2820 			hpage_orders |= READ_ONCE(huge_shmem_orders_within_size);
2821 			hpage_orders |= READ_ONCE(huge_shmem_orders_madvise);
2822 			if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
2823 				hpage_orders |= READ_ONCE(huge_shmem_orders_inherit);
2824 
2825 			if (hpage_orders > 0) {
2826 				order = highest_order(hpage_orders);
2827 				hpage_size = PAGE_SIZE << order;
2828 			}
2829 #endif
2830 		}
2831 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order)
2832 			return addr;
2833 	}
2834 
2835 	if (len < hpage_size)
2836 		return addr;
2837 
2838 	offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1);
2839 	if (offset && offset + len < 2 * hpage_size)
2840 		return addr;
2841 	if ((addr & (hpage_size - 1)) == offset)
2842 		return addr;
2843 
2844 	inflated_len = len + hpage_size - PAGE_SIZE;
2845 	if (inflated_len > TASK_SIZE)
2846 		return addr;
2847 	if (inflated_len < len)
2848 		return addr;
2849 
2850 	inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr,
2851 					     inflated_len, 0, flags);
2852 	if (IS_ERR_VALUE(inflated_addr))
2853 		return addr;
2854 	if (inflated_addr & ~PAGE_MASK)
2855 		return addr;
2856 
2857 	inflated_offset = inflated_addr & (hpage_size - 1);
2858 	inflated_addr += offset - inflated_offset;
2859 	if (inflated_offset > offset)
2860 		inflated_addr += hpage_size;
2861 
2862 	if (inflated_addr > TASK_SIZE - len)
2863 		return addr;
2864 	return inflated_addr;
2865 }
2866 
2867 #ifdef CONFIG_NUMA
shmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol)2868 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2869 {
2870 	struct inode *inode = file_inode(vma->vm_file);
2871 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2872 }
2873 
shmem_get_policy(struct vm_area_struct * vma,unsigned long addr,pgoff_t * ilx)2874 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2875 					  unsigned long addr, pgoff_t *ilx)
2876 {
2877 	struct inode *inode = file_inode(vma->vm_file);
2878 	pgoff_t index;
2879 
2880 	/*
2881 	 * Bias interleave by inode number to distribute better across nodes;
2882 	 * but this interface is independent of which page order is used, so
2883 	 * supplies only that bias, letting caller apply the offset (adjusted
2884 	 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()).
2885 	 */
2886 	*ilx = inode->i_ino;
2887 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2888 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2889 }
2890 
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2891 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2892 			pgoff_t index, unsigned int order, pgoff_t *ilx)
2893 {
2894 	struct mempolicy *mpol;
2895 
2896 	/* Bias interleave by inode number to distribute better across nodes */
2897 	*ilx = info->vfs_inode.i_ino + (index >> order);
2898 
2899 	mpol = mpol_shared_policy_lookup(&info->policy, index);
2900 	return mpol ? mpol : get_task_policy(current);
2901 }
2902 #else
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2903 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2904 			pgoff_t index, unsigned int order, pgoff_t *ilx)
2905 {
2906 	*ilx = 0;
2907 	return NULL;
2908 }
2909 #endif /* CONFIG_NUMA */
2910 
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)2911 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2912 {
2913 	struct inode *inode = file_inode(file);
2914 	struct shmem_inode_info *info = SHMEM_I(inode);
2915 	int retval = -ENOMEM;
2916 
2917 	/*
2918 	 * What serializes the accesses to info->flags?
2919 	 * ipc_lock_object() when called from shmctl_do_lock(),
2920 	 * no serialization needed when called from shm_destroy().
2921 	 */
2922 	if (lock && !(info->flags & VM_LOCKED)) {
2923 		if (!user_shm_lock(inode->i_size, ucounts))
2924 			goto out_nomem;
2925 		info->flags |= VM_LOCKED;
2926 		mapping_set_unevictable(file->f_mapping);
2927 	}
2928 	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2929 		user_shm_unlock(inode->i_size, ucounts);
2930 		info->flags &= ~VM_LOCKED;
2931 		mapping_clear_unevictable(file->f_mapping);
2932 	}
2933 	retval = 0;
2934 
2935 out_nomem:
2936 	return retval;
2937 }
2938 
shmem_mmap(struct file * file,struct vm_area_struct * vma)2939 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2940 {
2941 	struct inode *inode = file_inode(file);
2942 
2943 	file_accessed(file);
2944 	/* This is anonymous shared memory if it is unlinked at the time of mmap */
2945 	if (inode->i_nlink)
2946 		vma->vm_ops = &shmem_vm_ops;
2947 	else
2948 		vma->vm_ops = &shmem_anon_vm_ops;
2949 	return 0;
2950 }
2951 
shmem_file_open(struct inode * inode,struct file * file)2952 static int shmem_file_open(struct inode *inode, struct file *file)
2953 {
2954 	file->f_mode |= FMODE_CAN_ODIRECT;
2955 	return generic_file_open(inode, file);
2956 }
2957 
2958 #ifdef CONFIG_TMPFS_XATTR
2959 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2960 
2961 #if IS_ENABLED(CONFIG_UNICODE)
2962 /*
2963  * shmem_inode_casefold_flags - Deal with casefold file attribute flag
2964  *
2965  * The casefold file attribute needs some special checks. I can just be added to
2966  * an empty dir, and can't be removed from a non-empty dir.
2967  */
shmem_inode_casefold_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry,unsigned int * i_flags)2968 static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
2969 				      struct dentry *dentry, unsigned int *i_flags)
2970 {
2971 	unsigned int old = inode->i_flags;
2972 	struct super_block *sb = inode->i_sb;
2973 
2974 	if (fsflags & FS_CASEFOLD_FL) {
2975 		if (!(old & S_CASEFOLD)) {
2976 			if (!sb->s_encoding)
2977 				return -EOPNOTSUPP;
2978 
2979 			if (!S_ISDIR(inode->i_mode))
2980 				return -ENOTDIR;
2981 
2982 			if (dentry && !simple_empty(dentry))
2983 				return -ENOTEMPTY;
2984 		}
2985 
2986 		*i_flags = *i_flags | S_CASEFOLD;
2987 	} else if (old & S_CASEFOLD) {
2988 		if (dentry && !simple_empty(dentry))
2989 			return -ENOTEMPTY;
2990 	}
2991 
2992 	return 0;
2993 }
2994 #else
shmem_inode_casefold_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry,unsigned int * i_flags)2995 static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
2996 				      struct dentry *dentry, unsigned int *i_flags)
2997 {
2998 	if (fsflags & FS_CASEFOLD_FL)
2999 		return -EOPNOTSUPP;
3000 
3001 	return 0;
3002 }
3003 #endif
3004 
3005 /*
3006  * chattr's fsflags are unrelated to extended attributes,
3007  * but tmpfs has chosen to enable them under the same config option.
3008  */
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry)3009 static int shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
3010 {
3011 	unsigned int i_flags = 0;
3012 	int ret;
3013 
3014 	ret = shmem_inode_casefold_flags(inode, fsflags, dentry, &i_flags);
3015 	if (ret)
3016 		return ret;
3017 
3018 	if (fsflags & FS_NOATIME_FL)
3019 		i_flags |= S_NOATIME;
3020 	if (fsflags & FS_APPEND_FL)
3021 		i_flags |= S_APPEND;
3022 	if (fsflags & FS_IMMUTABLE_FL)
3023 		i_flags |= S_IMMUTABLE;
3024 	/*
3025 	 * But FS_NODUMP_FL does not require any action in i_flags.
3026 	 */
3027 	inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE | S_CASEFOLD);
3028 
3029 	return 0;
3030 }
3031 #else
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry)3032 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
3033 {
3034 }
3035 #define shmem_initxattrs NULL
3036 #endif
3037 
shmem_get_offset_ctx(struct inode * inode)3038 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
3039 {
3040 	return &SHMEM_I(inode)->dir_offsets;
3041 }
3042 
__shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)3043 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
3044 					     struct super_block *sb,
3045 					     struct inode *dir, umode_t mode,
3046 					     dev_t dev, unsigned long flags)
3047 {
3048 	struct inode *inode;
3049 	struct shmem_inode_info *info;
3050 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3051 	ino_t ino;
3052 	int err;
3053 
3054 	err = shmem_reserve_inode(sb, &ino);
3055 	if (err)
3056 		return ERR_PTR(err);
3057 
3058 	inode = new_inode(sb);
3059 	if (!inode) {
3060 		shmem_free_inode(sb, 0);
3061 		return ERR_PTR(-ENOSPC);
3062 	}
3063 
3064 	inode->i_ino = ino;
3065 	inode_init_owner(idmap, inode, dir, mode);
3066 	inode->i_blocks = 0;
3067 	simple_inode_init_ts(inode);
3068 	inode->i_generation = get_random_u32();
3069 	info = SHMEM_I(inode);
3070 	memset(info, 0, (char *)inode - (char *)info);
3071 	spin_lock_init(&info->lock);
3072 	atomic_set(&info->stop_eviction, 0);
3073 	info->seals = F_SEAL_SEAL;
3074 	info->flags = flags & VM_NORESERVE;
3075 	info->i_crtime = inode_get_mtime(inode);
3076 	info->fsflags = (dir == NULL) ? 0 :
3077 		SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
3078 	if (info->fsflags)
3079 		shmem_set_inode_flags(inode, info->fsflags, NULL);
3080 	INIT_LIST_HEAD(&info->shrinklist);
3081 	INIT_LIST_HEAD(&info->swaplist);
3082 	simple_xattrs_init(&info->xattrs);
3083 	cache_no_acl(inode);
3084 	if (sbinfo->noswap)
3085 		mapping_set_unevictable(inode->i_mapping);
3086 
3087 	/* Don't consider 'deny' for emergencies and 'force' for testing */
3088 	if (sbinfo->huge)
3089 		mapping_set_large_folios(inode->i_mapping);
3090 
3091 	switch (mode & S_IFMT) {
3092 	default:
3093 		inode->i_op = &shmem_special_inode_operations;
3094 		init_special_inode(inode, mode, dev);
3095 		break;
3096 	case S_IFREG:
3097 		inode->i_mapping->a_ops = &shmem_aops;
3098 		inode->i_op = &shmem_inode_operations;
3099 		inode->i_fop = &shmem_file_operations;
3100 		mpol_shared_policy_init(&info->policy,
3101 					 shmem_get_sbmpol(sbinfo));
3102 		break;
3103 	case S_IFDIR:
3104 		inc_nlink(inode);
3105 		/* Some things misbehave if size == 0 on a directory */
3106 		inode->i_size = 2 * BOGO_DIRENT_SIZE;
3107 		inode->i_op = &shmem_dir_inode_operations;
3108 		inode->i_fop = &simple_offset_dir_operations;
3109 		simple_offset_init(shmem_get_offset_ctx(inode));
3110 		break;
3111 	case S_IFLNK:
3112 		/*
3113 		 * Must not load anything in the rbtree,
3114 		 * mpol_free_shared_policy will not be called.
3115 		 */
3116 		mpol_shared_policy_init(&info->policy, NULL);
3117 		break;
3118 	}
3119 
3120 	lockdep_annotate_inode_mutex_key(inode);
3121 	return inode;
3122 }
3123 
3124 #ifdef CONFIG_TMPFS_QUOTA
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)3125 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
3126 				     struct super_block *sb, struct inode *dir,
3127 				     umode_t mode, dev_t dev, unsigned long flags)
3128 {
3129 	int err;
3130 	struct inode *inode;
3131 
3132 	inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
3133 	if (IS_ERR(inode))
3134 		return inode;
3135 
3136 	err = dquot_initialize(inode);
3137 	if (err)
3138 		goto errout;
3139 
3140 	err = dquot_alloc_inode(inode);
3141 	if (err) {
3142 		dquot_drop(inode);
3143 		goto errout;
3144 	}
3145 	return inode;
3146 
3147 errout:
3148 	inode->i_flags |= S_NOQUOTA;
3149 	iput(inode);
3150 	return ERR_PTR(err);
3151 }
3152 #else
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)3153 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
3154 				     struct super_block *sb, struct inode *dir,
3155 				     umode_t mode, dev_t dev, unsigned long flags)
3156 {
3157 	return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
3158 }
3159 #endif /* CONFIG_TMPFS_QUOTA */
3160 
3161 #ifdef CONFIG_USERFAULTFD
shmem_mfill_atomic_pte(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)3162 int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
3163 			   struct vm_area_struct *dst_vma,
3164 			   unsigned long dst_addr,
3165 			   unsigned long src_addr,
3166 			   uffd_flags_t flags,
3167 			   struct folio **foliop)
3168 {
3169 	struct inode *inode = file_inode(dst_vma->vm_file);
3170 	struct shmem_inode_info *info = SHMEM_I(inode);
3171 	struct address_space *mapping = inode->i_mapping;
3172 	gfp_t gfp = mapping_gfp_mask(mapping);
3173 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
3174 	void *page_kaddr;
3175 	struct folio *folio;
3176 	int ret;
3177 	pgoff_t max_off;
3178 
3179 	if (shmem_inode_acct_blocks(inode, 1)) {
3180 		/*
3181 		 * We may have got a page, returned -ENOENT triggering a retry,
3182 		 * and now we find ourselves with -ENOMEM. Release the page, to
3183 		 * avoid a BUG_ON in our caller.
3184 		 */
3185 		if (unlikely(*foliop)) {
3186 			folio_put(*foliop);
3187 			*foliop = NULL;
3188 		}
3189 		return -ENOMEM;
3190 	}
3191 
3192 	if (!*foliop) {
3193 		ret = -ENOMEM;
3194 		folio = shmem_alloc_folio(gfp, 0, info, pgoff);
3195 		if (!folio)
3196 			goto out_unacct_blocks;
3197 
3198 		if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
3199 			page_kaddr = kmap_local_folio(folio, 0);
3200 			/*
3201 			 * The read mmap_lock is held here.  Despite the
3202 			 * mmap_lock being read recursive a deadlock is still
3203 			 * possible if a writer has taken a lock.  For example:
3204 			 *
3205 			 * process A thread 1 takes read lock on own mmap_lock
3206 			 * process A thread 2 calls mmap, blocks taking write lock
3207 			 * process B thread 1 takes page fault, read lock on own mmap lock
3208 			 * process B thread 2 calls mmap, blocks taking write lock
3209 			 * process A thread 1 blocks taking read lock on process B
3210 			 * process B thread 1 blocks taking read lock on process A
3211 			 *
3212 			 * Disable page faults to prevent potential deadlock
3213 			 * and retry the copy outside the mmap_lock.
3214 			 */
3215 			pagefault_disable();
3216 			ret = copy_from_user(page_kaddr,
3217 					     (const void __user *)src_addr,
3218 					     PAGE_SIZE);
3219 			pagefault_enable();
3220 			kunmap_local(page_kaddr);
3221 
3222 			/* fallback to copy_from_user outside mmap_lock */
3223 			if (unlikely(ret)) {
3224 				*foliop = folio;
3225 				ret = -ENOENT;
3226 				/* don't free the page */
3227 				goto out_unacct_blocks;
3228 			}
3229 
3230 			flush_dcache_folio(folio);
3231 		} else {		/* ZEROPAGE */
3232 			clear_user_highpage(&folio->page, dst_addr);
3233 		}
3234 	} else {
3235 		folio = *foliop;
3236 		VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
3237 		*foliop = NULL;
3238 	}
3239 
3240 	VM_BUG_ON(folio_test_locked(folio));
3241 	VM_BUG_ON(folio_test_swapbacked(folio));
3242 	__folio_set_locked(folio);
3243 	__folio_set_swapbacked(folio);
3244 	__folio_mark_uptodate(folio);
3245 
3246 	ret = -EFAULT;
3247 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3248 	if (unlikely(pgoff >= max_off))
3249 		goto out_release;
3250 
3251 	ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
3252 	if (ret)
3253 		goto out_release;
3254 	ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
3255 	if (ret)
3256 		goto out_release;
3257 
3258 	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
3259 				       &folio->page, true, flags);
3260 	if (ret)
3261 		goto out_delete_from_cache;
3262 
3263 	shmem_recalc_inode(inode, 1, 0);
3264 	folio_unlock(folio);
3265 	return 0;
3266 out_delete_from_cache:
3267 	filemap_remove_folio(folio);
3268 out_release:
3269 	folio_unlock(folio);
3270 	folio_put(folio);
3271 out_unacct_blocks:
3272 	shmem_inode_unacct_blocks(inode, 1);
3273 	return ret;
3274 }
3275 #endif /* CONFIG_USERFAULTFD */
3276 
3277 #ifdef CONFIG_TMPFS
3278 static const struct inode_operations shmem_symlink_inode_operations;
3279 static const struct inode_operations shmem_short_symlink_operations;
3280 
3281 static int
shmem_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)3282 shmem_write_begin(struct file *file, struct address_space *mapping,
3283 			loff_t pos, unsigned len,
3284 			struct folio **foliop, void **fsdata)
3285 {
3286 	struct inode *inode = mapping->host;
3287 	struct shmem_inode_info *info = SHMEM_I(inode);
3288 	pgoff_t index = pos >> PAGE_SHIFT;
3289 	struct folio *folio;
3290 	int ret = 0;
3291 
3292 	/* i_rwsem is held by caller */
3293 	if (unlikely(info->seals & (F_SEAL_GROW |
3294 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
3295 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
3296 			return -EPERM;
3297 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
3298 			return -EPERM;
3299 	}
3300 
3301 	ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE);
3302 	if (ret)
3303 		return ret;
3304 
3305 	if (folio_contain_hwpoisoned_page(folio)) {
3306 		folio_unlock(folio);
3307 		folio_put(folio);
3308 		return -EIO;
3309 	}
3310 
3311 	*foliop = folio;
3312 	return 0;
3313 }
3314 
3315 static int
shmem_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)3316 shmem_write_end(struct file *file, struct address_space *mapping,
3317 			loff_t pos, unsigned len, unsigned copied,
3318 			struct folio *folio, void *fsdata)
3319 {
3320 	struct inode *inode = mapping->host;
3321 
3322 	if (pos + copied > inode->i_size)
3323 		i_size_write(inode, pos + copied);
3324 
3325 	if (!folio_test_uptodate(folio)) {
3326 		if (copied < folio_size(folio)) {
3327 			size_t from = offset_in_folio(folio, pos);
3328 			folio_zero_segments(folio, 0, from,
3329 					from + copied, folio_size(folio));
3330 		}
3331 		folio_mark_uptodate(folio);
3332 	}
3333 	folio_mark_dirty(folio);
3334 	folio_unlock(folio);
3335 	folio_put(folio);
3336 
3337 	return copied;
3338 }
3339 
shmem_file_read_iter(struct kiocb * iocb,struct iov_iter * to)3340 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3341 {
3342 	struct file *file = iocb->ki_filp;
3343 	struct inode *inode = file_inode(file);
3344 	struct address_space *mapping = inode->i_mapping;
3345 	pgoff_t index;
3346 	unsigned long offset;
3347 	int error = 0;
3348 	ssize_t retval = 0;
3349 
3350 	for (;;) {
3351 		struct folio *folio = NULL;
3352 		struct page *page = NULL;
3353 		unsigned long nr, ret;
3354 		loff_t end_offset, i_size = i_size_read(inode);
3355 		bool fallback_page_copy = false;
3356 		size_t fsize;
3357 
3358 		if (unlikely(iocb->ki_pos >= i_size))
3359 			break;
3360 
3361 		index = iocb->ki_pos >> PAGE_SHIFT;
3362 		error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3363 		if (error) {
3364 			if (error == -EINVAL)
3365 				error = 0;
3366 			break;
3367 		}
3368 		if (folio) {
3369 			folio_unlock(folio);
3370 
3371 			page = folio_file_page(folio, index);
3372 			if (PageHWPoison(page)) {
3373 				folio_put(folio);
3374 				error = -EIO;
3375 				break;
3376 			}
3377 
3378 			if (folio_test_large(folio) &&
3379 			    folio_test_has_hwpoisoned(folio))
3380 				fallback_page_copy = true;
3381 		}
3382 
3383 		/*
3384 		 * We must evaluate after, since reads (unlike writes)
3385 		 * are called without i_rwsem protection against truncate
3386 		 */
3387 		i_size = i_size_read(inode);
3388 		if (unlikely(iocb->ki_pos >= i_size)) {
3389 			if (folio)
3390 				folio_put(folio);
3391 			break;
3392 		}
3393 		end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count);
3394 		if (folio && likely(!fallback_page_copy))
3395 			fsize = folio_size(folio);
3396 		else
3397 			fsize = PAGE_SIZE;
3398 		offset = iocb->ki_pos & (fsize - 1);
3399 		nr = min_t(loff_t, end_offset - iocb->ki_pos, fsize - offset);
3400 
3401 		if (folio) {
3402 			/*
3403 			 * If users can be writing to this page using arbitrary
3404 			 * virtual addresses, take care about potential aliasing
3405 			 * before reading the page on the kernel side.
3406 			 */
3407 			if (mapping_writably_mapped(mapping)) {
3408 				if (likely(!fallback_page_copy))
3409 					flush_dcache_folio(folio);
3410 				else
3411 					flush_dcache_page(page);
3412 			}
3413 
3414 			/*
3415 			 * Mark the folio accessed if we read the beginning.
3416 			 */
3417 			if (!offset)
3418 				folio_mark_accessed(folio);
3419 			/*
3420 			 * Ok, we have the page, and it's up-to-date, so
3421 			 * now we can copy it to user space...
3422 			 */
3423 			if (likely(!fallback_page_copy))
3424 				ret = copy_folio_to_iter(folio, offset, nr, to);
3425 			else
3426 				ret = copy_page_to_iter(page, offset, nr, to);
3427 			folio_put(folio);
3428 		} else if (user_backed_iter(to)) {
3429 			/*
3430 			 * Copy to user tends to be so well optimized, but
3431 			 * clear_user() not so much, that it is noticeably
3432 			 * faster to copy the zero page instead of clearing.
3433 			 */
3434 			ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
3435 		} else {
3436 			/*
3437 			 * But submitting the same page twice in a row to
3438 			 * splice() - or others? - can result in confusion:
3439 			 * so don't attempt that optimization on pipes etc.
3440 			 */
3441 			ret = iov_iter_zero(nr, to);
3442 		}
3443 
3444 		retval += ret;
3445 		iocb->ki_pos += ret;
3446 
3447 		if (!iov_iter_count(to))
3448 			break;
3449 		if (ret < nr) {
3450 			error = -EFAULT;
3451 			break;
3452 		}
3453 		cond_resched();
3454 	}
3455 
3456 	file_accessed(file);
3457 	return retval ? retval : error;
3458 }
3459 
shmem_file_write_iter(struct kiocb * iocb,struct iov_iter * from)3460 static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3461 {
3462 	struct file *file = iocb->ki_filp;
3463 	struct inode *inode = file->f_mapping->host;
3464 	ssize_t ret;
3465 
3466 	inode_lock(inode);
3467 	ret = generic_write_checks(iocb, from);
3468 	if (ret <= 0)
3469 		goto unlock;
3470 	ret = file_remove_privs(file);
3471 	if (ret)
3472 		goto unlock;
3473 	ret = file_update_time(file);
3474 	if (ret)
3475 		goto unlock;
3476 	ret = generic_perform_write(iocb, from);
3477 unlock:
3478 	inode_unlock(inode);
3479 	return ret;
3480 }
3481 
zero_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3482 static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
3483 			      struct pipe_buffer *buf)
3484 {
3485 	return true;
3486 }
3487 
zero_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3488 static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
3489 				  struct pipe_buffer *buf)
3490 {
3491 }
3492 
zero_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3493 static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
3494 				    struct pipe_buffer *buf)
3495 {
3496 	return false;
3497 }
3498 
3499 static const struct pipe_buf_operations zero_pipe_buf_ops = {
3500 	.release	= zero_pipe_buf_release,
3501 	.try_steal	= zero_pipe_buf_try_steal,
3502 	.get		= zero_pipe_buf_get,
3503 };
3504 
splice_zeropage_into_pipe(struct pipe_inode_info * pipe,loff_t fpos,size_t size)3505 static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
3506 					loff_t fpos, size_t size)
3507 {
3508 	size_t offset = fpos & ~PAGE_MASK;
3509 
3510 	size = min_t(size_t, size, PAGE_SIZE - offset);
3511 
3512 	if (!pipe_is_full(pipe)) {
3513 		struct pipe_buffer *buf = pipe_head_buf(pipe);
3514 
3515 		*buf = (struct pipe_buffer) {
3516 			.ops	= &zero_pipe_buf_ops,
3517 			.page	= ZERO_PAGE(0),
3518 			.offset	= offset,
3519 			.len	= size,
3520 		};
3521 		pipe->head++;
3522 	}
3523 
3524 	return size;
3525 }
3526 
shmem_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)3527 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
3528 				      struct pipe_inode_info *pipe,
3529 				      size_t len, unsigned int flags)
3530 {
3531 	struct inode *inode = file_inode(in);
3532 	struct address_space *mapping = inode->i_mapping;
3533 	struct folio *folio = NULL;
3534 	size_t total_spliced = 0, used, npages, n, part;
3535 	loff_t isize;
3536 	int error = 0;
3537 
3538 	/* Work out how much data we can actually add into the pipe */
3539 	used = pipe_buf_usage(pipe);
3540 	npages = max_t(ssize_t, pipe->max_usage - used, 0);
3541 	len = min_t(size_t, len, npages * PAGE_SIZE);
3542 
3543 	do {
3544 		bool fallback_page_splice = false;
3545 		struct page *page = NULL;
3546 		pgoff_t index;
3547 		size_t size;
3548 
3549 		if (*ppos >= i_size_read(inode))
3550 			break;
3551 
3552 		index = *ppos >> PAGE_SHIFT;
3553 		error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3554 		if (error) {
3555 			if (error == -EINVAL)
3556 				error = 0;
3557 			break;
3558 		}
3559 		if (folio) {
3560 			folio_unlock(folio);
3561 
3562 			page = folio_file_page(folio, index);
3563 			if (PageHWPoison(page)) {
3564 				error = -EIO;
3565 				break;
3566 			}
3567 
3568 			if (folio_test_large(folio) &&
3569 			    folio_test_has_hwpoisoned(folio))
3570 				fallback_page_splice = true;
3571 		}
3572 
3573 		/*
3574 		 * i_size must be checked after we know the pages are Uptodate.
3575 		 *
3576 		 * Checking i_size after the check allows us to calculate
3577 		 * the correct value for "nr", which means the zero-filled
3578 		 * part of the page is not copied back to userspace (unless
3579 		 * another truncate extends the file - this is desired though).
3580 		 */
3581 		isize = i_size_read(inode);
3582 		if (unlikely(*ppos >= isize))
3583 			break;
3584 		/*
3585 		 * Fallback to PAGE_SIZE splice if the large folio has hwpoisoned
3586 		 * pages.
3587 		 */
3588 		size = len;
3589 		if (unlikely(fallback_page_splice)) {
3590 			size_t offset = *ppos & ~PAGE_MASK;
3591 
3592 			size = umin(size, PAGE_SIZE - offset);
3593 		}
3594 		part = min_t(loff_t, isize - *ppos, size);
3595 
3596 		if (folio) {
3597 			/*
3598 			 * If users can be writing to this page using arbitrary
3599 			 * virtual addresses, take care about potential aliasing
3600 			 * before reading the page on the kernel side.
3601 			 */
3602 			if (mapping_writably_mapped(mapping)) {
3603 				if (likely(!fallback_page_splice))
3604 					flush_dcache_folio(folio);
3605 				else
3606 					flush_dcache_page(page);
3607 			}
3608 			folio_mark_accessed(folio);
3609 			/*
3610 			 * Ok, we have the page, and it's up-to-date, so we can
3611 			 * now splice it into the pipe.
3612 			 */
3613 			n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3614 			folio_put(folio);
3615 			folio = NULL;
3616 		} else {
3617 			n = splice_zeropage_into_pipe(pipe, *ppos, part);
3618 		}
3619 
3620 		if (!n)
3621 			break;
3622 		len -= n;
3623 		total_spliced += n;
3624 		*ppos += n;
3625 		in->f_ra.prev_pos = *ppos;
3626 		if (pipe_is_full(pipe))
3627 			break;
3628 
3629 		cond_resched();
3630 	} while (len);
3631 
3632 	if (folio)
3633 		folio_put(folio);
3634 
3635 	file_accessed(in);
3636 	return total_spliced ? total_spliced : error;
3637 }
3638 
shmem_file_llseek(struct file * file,loff_t offset,int whence)3639 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3640 {
3641 	struct address_space *mapping = file->f_mapping;
3642 	struct inode *inode = mapping->host;
3643 
3644 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
3645 		return generic_file_llseek_size(file, offset, whence,
3646 					MAX_LFS_FILESIZE, i_size_read(inode));
3647 	if (offset < 0)
3648 		return -ENXIO;
3649 
3650 	inode_lock(inode);
3651 	/* We're holding i_rwsem so we can access i_size directly */
3652 	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3653 	if (offset >= 0)
3654 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
3655 	inode_unlock(inode);
3656 	return offset;
3657 }
3658 
shmem_fallocate(struct file * file,int mode,loff_t offset,loff_t len)3659 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3660 							 loff_t len)
3661 {
3662 	struct inode *inode = file_inode(file);
3663 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3664 	struct shmem_inode_info *info = SHMEM_I(inode);
3665 	struct shmem_falloc shmem_falloc;
3666 	pgoff_t start, index, end, undo_fallocend;
3667 	int error;
3668 
3669 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3670 		return -EOPNOTSUPP;
3671 
3672 	inode_lock(inode);
3673 
3674 	if (mode & FALLOC_FL_PUNCH_HOLE) {
3675 		struct address_space *mapping = file->f_mapping;
3676 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
3677 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3678 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3679 
3680 		/* protected by i_rwsem */
3681 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3682 			error = -EPERM;
3683 			goto out;
3684 		}
3685 
3686 		shmem_falloc.waitq = &shmem_falloc_waitq;
3687 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3688 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3689 		spin_lock(&inode->i_lock);
3690 		inode->i_private = &shmem_falloc;
3691 		spin_unlock(&inode->i_lock);
3692 
3693 		if ((u64)unmap_end > (u64)unmap_start)
3694 			unmap_mapping_range(mapping, unmap_start,
3695 					    1 + unmap_end - unmap_start, 0);
3696 		shmem_truncate_range(inode, offset, offset + len - 1);
3697 		/* No need to unmap again: hole-punching leaves COWed pages */
3698 
3699 		spin_lock(&inode->i_lock);
3700 		inode->i_private = NULL;
3701 		wake_up_all(&shmem_falloc_waitq);
3702 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3703 		spin_unlock(&inode->i_lock);
3704 		error = 0;
3705 		goto out;
3706 	}
3707 
3708 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3709 	error = inode_newsize_ok(inode, offset + len);
3710 	if (error)
3711 		goto out;
3712 
3713 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3714 		error = -EPERM;
3715 		goto out;
3716 	}
3717 
3718 	start = offset >> PAGE_SHIFT;
3719 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3720 	/* Try to avoid a swapstorm if len is impossible to satisfy */
3721 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3722 		error = -ENOSPC;
3723 		goto out;
3724 	}
3725 
3726 	shmem_falloc.waitq = NULL;
3727 	shmem_falloc.start = start;
3728 	shmem_falloc.next  = start;
3729 	shmem_falloc.nr_falloced = 0;
3730 	shmem_falloc.nr_unswapped = 0;
3731 	spin_lock(&inode->i_lock);
3732 	inode->i_private = &shmem_falloc;
3733 	spin_unlock(&inode->i_lock);
3734 
3735 	/*
3736 	 * info->fallocend is only relevant when huge pages might be
3737 	 * involved: to prevent split_huge_page() freeing fallocated
3738 	 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3739 	 */
3740 	undo_fallocend = info->fallocend;
3741 	if (info->fallocend < end)
3742 		info->fallocend = end;
3743 
3744 	for (index = start; index < end; ) {
3745 		struct folio *folio;
3746 
3747 		/*
3748 		 * Check for fatal signal so that we abort early in OOM
3749 		 * situations. We don't want to abort in case of non-fatal
3750 		 * signals as large fallocate can take noticeable time and
3751 		 * e.g. periodic timers may result in fallocate constantly
3752 		 * restarting.
3753 		 */
3754 		if (fatal_signal_pending(current))
3755 			error = -EINTR;
3756 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3757 			error = -ENOMEM;
3758 		else
3759 			error = shmem_get_folio(inode, index, offset + len,
3760 						&folio, SGP_FALLOC);
3761 		if (error) {
3762 			info->fallocend = undo_fallocend;
3763 			/* Remove the !uptodate folios we added */
3764 			if (index > start) {
3765 				shmem_undo_range(inode,
3766 				    (loff_t)start << PAGE_SHIFT,
3767 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
3768 			}
3769 			goto undone;
3770 		}
3771 
3772 		/*
3773 		 * Here is a more important optimization than it appears:
3774 		 * a second SGP_FALLOC on the same large folio will clear it,
3775 		 * making it uptodate and un-undoable if we fail later.
3776 		 */
3777 		index = folio_next_index(folio);
3778 		/* Beware 32-bit wraparound */
3779 		if (!index)
3780 			index--;
3781 
3782 		/*
3783 		 * Inform shmem_writepage() how far we have reached.
3784 		 * No need for lock or barrier: we have the page lock.
3785 		 */
3786 		if (!folio_test_uptodate(folio))
3787 			shmem_falloc.nr_falloced += index - shmem_falloc.next;
3788 		shmem_falloc.next = index;
3789 
3790 		/*
3791 		 * If !uptodate, leave it that way so that freeable folios
3792 		 * can be recognized if we need to rollback on error later.
3793 		 * But mark it dirty so that memory pressure will swap rather
3794 		 * than free the folios we are allocating (and SGP_CACHE folios
3795 		 * might still be clean: we now need to mark those dirty too).
3796 		 */
3797 		folio_mark_dirty(folio);
3798 		folio_unlock(folio);
3799 		folio_put(folio);
3800 		cond_resched();
3801 	}
3802 
3803 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3804 		i_size_write(inode, offset + len);
3805 undone:
3806 	spin_lock(&inode->i_lock);
3807 	inode->i_private = NULL;
3808 	spin_unlock(&inode->i_lock);
3809 out:
3810 	if (!error)
3811 		file_modified(file);
3812 	inode_unlock(inode);
3813 	return error;
3814 }
3815 
shmem_statfs(struct dentry * dentry,struct kstatfs * buf)3816 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3817 {
3818 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3819 
3820 	buf->f_type = TMPFS_MAGIC;
3821 	buf->f_bsize = PAGE_SIZE;
3822 	buf->f_namelen = NAME_MAX;
3823 	if (sbinfo->max_blocks) {
3824 		buf->f_blocks = sbinfo->max_blocks;
3825 		buf->f_bavail =
3826 		buf->f_bfree  = sbinfo->max_blocks -
3827 				percpu_counter_sum(&sbinfo->used_blocks);
3828 	}
3829 	if (sbinfo->max_inodes) {
3830 		buf->f_files = sbinfo->max_inodes;
3831 		buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3832 	}
3833 	/* else leave those fields 0 like simple_statfs */
3834 
3835 	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3836 
3837 	return 0;
3838 }
3839 
3840 /*
3841  * File creation. Allocate an inode, and we're done..
3842  */
3843 static int
shmem_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t dev)3844 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3845 	    struct dentry *dentry, umode_t mode, dev_t dev)
3846 {
3847 	struct inode *inode;
3848 	int error;
3849 
3850 	if (!generic_ci_validate_strict_name(dir, &dentry->d_name))
3851 		return -EINVAL;
3852 
3853 	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3854 	if (IS_ERR(inode))
3855 		return PTR_ERR(inode);
3856 
3857 	error = simple_acl_create(dir, inode);
3858 	if (error)
3859 		goto out_iput;
3860 	error = security_inode_init_security(inode, dir, &dentry->d_name,
3861 					     shmem_initxattrs, NULL);
3862 	if (error && error != -EOPNOTSUPP)
3863 		goto out_iput;
3864 
3865 	error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3866 	if (error)
3867 		goto out_iput;
3868 
3869 	dir->i_size += BOGO_DIRENT_SIZE;
3870 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3871 	inode_inc_iversion(dir);
3872 
3873 	if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3874 		d_add(dentry, inode);
3875 	else
3876 		d_instantiate(dentry, inode);
3877 
3878 	dget(dentry); /* Extra count - pin the dentry in core */
3879 	return error;
3880 
3881 out_iput:
3882 	iput(inode);
3883 	return error;
3884 }
3885 
3886 static int
shmem_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)3887 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3888 	      struct file *file, umode_t mode)
3889 {
3890 	struct inode *inode;
3891 	int error;
3892 
3893 	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3894 	if (IS_ERR(inode)) {
3895 		error = PTR_ERR(inode);
3896 		goto err_out;
3897 	}
3898 	error = security_inode_init_security(inode, dir, NULL,
3899 					     shmem_initxattrs, NULL);
3900 	if (error && error != -EOPNOTSUPP)
3901 		goto out_iput;
3902 	error = simple_acl_create(dir, inode);
3903 	if (error)
3904 		goto out_iput;
3905 	d_tmpfile(file, inode);
3906 
3907 err_out:
3908 	return finish_open_simple(file, error);
3909 out_iput:
3910 	iput(inode);
3911 	return error;
3912 }
3913 
shmem_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)3914 static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3915 		       struct dentry *dentry, umode_t mode)
3916 {
3917 	int error;
3918 
3919 	error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3920 	if (error)
3921 		return error;
3922 	inc_nlink(dir);
3923 	return 0;
3924 }
3925 
shmem_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)3926 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3927 			struct dentry *dentry, umode_t mode, bool excl)
3928 {
3929 	return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3930 }
3931 
3932 /*
3933  * Link a file..
3934  */
shmem_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)3935 static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3936 		      struct dentry *dentry)
3937 {
3938 	struct inode *inode = d_inode(old_dentry);
3939 	int ret = 0;
3940 
3941 	/*
3942 	 * No ordinary (disk based) filesystem counts links as inodes;
3943 	 * but each new link needs a new dentry, pinning lowmem, and
3944 	 * tmpfs dentries cannot be pruned until they are unlinked.
3945 	 * But if an O_TMPFILE file is linked into the tmpfs, the
3946 	 * first link must skip that, to get the accounting right.
3947 	 */
3948 	if (inode->i_nlink) {
3949 		ret = shmem_reserve_inode(inode->i_sb, NULL);
3950 		if (ret)
3951 			goto out;
3952 	}
3953 
3954 	ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3955 	if (ret) {
3956 		if (inode->i_nlink)
3957 			shmem_free_inode(inode->i_sb, 0);
3958 		goto out;
3959 	}
3960 
3961 	dir->i_size += BOGO_DIRENT_SIZE;
3962 	inode_set_mtime_to_ts(dir,
3963 			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3964 	inode_inc_iversion(dir);
3965 	inc_nlink(inode);
3966 	ihold(inode);	/* New dentry reference */
3967 	dget(dentry);	/* Extra pinning count for the created dentry */
3968 	if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3969 		d_add(dentry, inode);
3970 	else
3971 		d_instantiate(dentry, inode);
3972 out:
3973 	return ret;
3974 }
3975 
shmem_unlink(struct inode * dir,struct dentry * dentry)3976 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3977 {
3978 	struct inode *inode = d_inode(dentry);
3979 
3980 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3981 		shmem_free_inode(inode->i_sb, 0);
3982 
3983 	simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3984 
3985 	dir->i_size -= BOGO_DIRENT_SIZE;
3986 	inode_set_mtime_to_ts(dir,
3987 			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3988 	inode_inc_iversion(dir);
3989 	drop_nlink(inode);
3990 	dput(dentry);	/* Undo the count from "create" - does all the work */
3991 
3992 	/*
3993 	 * For now, VFS can't deal with case-insensitive negative dentries, so
3994 	 * we invalidate them
3995 	 */
3996 	if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3997 		d_invalidate(dentry);
3998 
3999 	return 0;
4000 }
4001 
shmem_rmdir(struct inode * dir,struct dentry * dentry)4002 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
4003 {
4004 	if (!simple_empty(dentry))
4005 		return -ENOTEMPTY;
4006 
4007 	drop_nlink(d_inode(dentry));
4008 	drop_nlink(dir);
4009 	return shmem_unlink(dir, dentry);
4010 }
4011 
shmem_whiteout(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry)4012 static int shmem_whiteout(struct mnt_idmap *idmap,
4013 			  struct inode *old_dir, struct dentry *old_dentry)
4014 {
4015 	struct dentry *whiteout;
4016 	int error;
4017 
4018 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
4019 	if (!whiteout)
4020 		return -ENOMEM;
4021 
4022 	error = shmem_mknod(idmap, old_dir, whiteout,
4023 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
4024 	dput(whiteout);
4025 	if (error)
4026 		return error;
4027 
4028 	/*
4029 	 * Cheat and hash the whiteout while the old dentry is still in
4030 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
4031 	 *
4032 	 * d_lookup() will consistently find one of them at this point,
4033 	 * not sure which one, but that isn't even important.
4034 	 */
4035 	d_rehash(whiteout);
4036 	return 0;
4037 }
4038 
4039 /*
4040  * The VFS layer already does all the dentry stuff for rename,
4041  * we just have to decrement the usage count for the target if
4042  * it exists so that the VFS layer correctly free's it when it
4043  * gets overwritten.
4044  */
shmem_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)4045 static int shmem_rename2(struct mnt_idmap *idmap,
4046 			 struct inode *old_dir, struct dentry *old_dentry,
4047 			 struct inode *new_dir, struct dentry *new_dentry,
4048 			 unsigned int flags)
4049 {
4050 	struct inode *inode = d_inode(old_dentry);
4051 	int they_are_dirs = S_ISDIR(inode->i_mode);
4052 	int error;
4053 
4054 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
4055 		return -EINVAL;
4056 
4057 	if (flags & RENAME_EXCHANGE)
4058 		return simple_offset_rename_exchange(old_dir, old_dentry,
4059 						     new_dir, new_dentry);
4060 
4061 	if (!simple_empty(new_dentry))
4062 		return -ENOTEMPTY;
4063 
4064 	if (flags & RENAME_WHITEOUT) {
4065 		error = shmem_whiteout(idmap, old_dir, old_dentry);
4066 		if (error)
4067 			return error;
4068 	}
4069 
4070 	error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
4071 	if (error)
4072 		return error;
4073 
4074 	if (d_really_is_positive(new_dentry)) {
4075 		(void) shmem_unlink(new_dir, new_dentry);
4076 		if (they_are_dirs) {
4077 			drop_nlink(d_inode(new_dentry));
4078 			drop_nlink(old_dir);
4079 		}
4080 	} else if (they_are_dirs) {
4081 		drop_nlink(old_dir);
4082 		inc_nlink(new_dir);
4083 	}
4084 
4085 	old_dir->i_size -= BOGO_DIRENT_SIZE;
4086 	new_dir->i_size += BOGO_DIRENT_SIZE;
4087 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
4088 	inode_inc_iversion(old_dir);
4089 	inode_inc_iversion(new_dir);
4090 	return 0;
4091 }
4092 
shmem_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)4093 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
4094 			 struct dentry *dentry, const char *symname)
4095 {
4096 	int error;
4097 	int len;
4098 	struct inode *inode;
4099 	struct folio *folio;
4100 	char *link;
4101 
4102 	len = strlen(symname) + 1;
4103 	if (len > PAGE_SIZE)
4104 		return -ENAMETOOLONG;
4105 
4106 	inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
4107 				VM_NORESERVE);
4108 	if (IS_ERR(inode))
4109 		return PTR_ERR(inode);
4110 
4111 	error = security_inode_init_security(inode, dir, &dentry->d_name,
4112 					     shmem_initxattrs, NULL);
4113 	if (error && error != -EOPNOTSUPP)
4114 		goto out_iput;
4115 
4116 	error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
4117 	if (error)
4118 		goto out_iput;
4119 
4120 	inode->i_size = len-1;
4121 	if (len <= SHORT_SYMLINK_LEN) {
4122 		link = kmemdup(symname, len, GFP_KERNEL);
4123 		if (!link) {
4124 			error = -ENOMEM;
4125 			goto out_remove_offset;
4126 		}
4127 		inode->i_op = &shmem_short_symlink_operations;
4128 		inode_set_cached_link(inode, link, len - 1);
4129 	} else {
4130 		inode_nohighmem(inode);
4131 		inode->i_mapping->a_ops = &shmem_aops;
4132 		error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE);
4133 		if (error)
4134 			goto out_remove_offset;
4135 		inode->i_op = &shmem_symlink_inode_operations;
4136 		memcpy(folio_address(folio), symname, len);
4137 		folio_mark_uptodate(folio);
4138 		folio_mark_dirty(folio);
4139 		folio_unlock(folio);
4140 		folio_put(folio);
4141 	}
4142 	dir->i_size += BOGO_DIRENT_SIZE;
4143 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
4144 	inode_inc_iversion(dir);
4145 	if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
4146 		d_add(dentry, inode);
4147 	else
4148 		d_instantiate(dentry, inode);
4149 	dget(dentry);
4150 	return 0;
4151 
4152 out_remove_offset:
4153 	simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
4154 out_iput:
4155 	iput(inode);
4156 	return error;
4157 }
4158 
shmem_put_link(void * arg)4159 static void shmem_put_link(void *arg)
4160 {
4161 	folio_mark_accessed(arg);
4162 	folio_put(arg);
4163 }
4164 
shmem_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)4165 static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
4166 				  struct delayed_call *done)
4167 {
4168 	struct folio *folio = NULL;
4169 	int error;
4170 
4171 	if (!dentry) {
4172 		folio = filemap_get_folio(inode->i_mapping, 0);
4173 		if (IS_ERR(folio))
4174 			return ERR_PTR(-ECHILD);
4175 		if (PageHWPoison(folio_page(folio, 0)) ||
4176 		    !folio_test_uptodate(folio)) {
4177 			folio_put(folio);
4178 			return ERR_PTR(-ECHILD);
4179 		}
4180 	} else {
4181 		error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ);
4182 		if (error)
4183 			return ERR_PTR(error);
4184 		if (!folio)
4185 			return ERR_PTR(-ECHILD);
4186 		if (PageHWPoison(folio_page(folio, 0))) {
4187 			folio_unlock(folio);
4188 			folio_put(folio);
4189 			return ERR_PTR(-ECHILD);
4190 		}
4191 		folio_unlock(folio);
4192 	}
4193 	set_delayed_call(done, shmem_put_link, folio);
4194 	return folio_address(folio);
4195 }
4196 
4197 #ifdef CONFIG_TMPFS_XATTR
4198 
shmem_fileattr_get(struct dentry * dentry,struct fileattr * fa)4199 static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
4200 {
4201 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4202 
4203 	fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
4204 
4205 	return 0;
4206 }
4207 
shmem_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct fileattr * fa)4208 static int shmem_fileattr_set(struct mnt_idmap *idmap,
4209 			      struct dentry *dentry, struct fileattr *fa)
4210 {
4211 	struct inode *inode = d_inode(dentry);
4212 	struct shmem_inode_info *info = SHMEM_I(inode);
4213 	int ret, flags;
4214 
4215 	if (fileattr_has_fsx(fa))
4216 		return -EOPNOTSUPP;
4217 	if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
4218 		return -EOPNOTSUPP;
4219 
4220 	flags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
4221 		(fa->flags & SHMEM_FL_USER_MODIFIABLE);
4222 
4223 	ret = shmem_set_inode_flags(inode, flags, dentry);
4224 
4225 	if (ret)
4226 		return ret;
4227 
4228 	info->fsflags = flags;
4229 
4230 	inode_set_ctime_current(inode);
4231 	inode_inc_iversion(inode);
4232 	return 0;
4233 }
4234 
4235 /*
4236  * Superblocks without xattr inode operations may get some security.* xattr
4237  * support from the LSM "for free". As soon as we have any other xattrs
4238  * like ACLs, we also need to implement the security.* handlers at
4239  * filesystem level, though.
4240  */
4241 
4242 /*
4243  * Callback for security_inode_init_security() for acquiring xattrs.
4244  */
shmem_initxattrs(struct inode * inode,const struct xattr * xattr_array,void * fs_info)4245 static int shmem_initxattrs(struct inode *inode,
4246 			    const struct xattr *xattr_array, void *fs_info)
4247 {
4248 	struct shmem_inode_info *info = SHMEM_I(inode);
4249 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4250 	const struct xattr *xattr;
4251 	struct simple_xattr *new_xattr;
4252 	size_t ispace = 0;
4253 	size_t len;
4254 
4255 	if (sbinfo->max_inodes) {
4256 		for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4257 			ispace += simple_xattr_space(xattr->name,
4258 				xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
4259 		}
4260 		if (ispace) {
4261 			raw_spin_lock(&sbinfo->stat_lock);
4262 			if (sbinfo->free_ispace < ispace)
4263 				ispace = 0;
4264 			else
4265 				sbinfo->free_ispace -= ispace;
4266 			raw_spin_unlock(&sbinfo->stat_lock);
4267 			if (!ispace)
4268 				return -ENOSPC;
4269 		}
4270 	}
4271 
4272 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4273 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
4274 		if (!new_xattr)
4275 			break;
4276 
4277 		len = strlen(xattr->name) + 1;
4278 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
4279 					  GFP_KERNEL_ACCOUNT);
4280 		if (!new_xattr->name) {
4281 			kvfree(new_xattr);
4282 			break;
4283 		}
4284 
4285 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
4286 		       XATTR_SECURITY_PREFIX_LEN);
4287 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
4288 		       xattr->name, len);
4289 
4290 		simple_xattr_add(&info->xattrs, new_xattr);
4291 	}
4292 
4293 	if (xattr->name != NULL) {
4294 		if (ispace) {
4295 			raw_spin_lock(&sbinfo->stat_lock);
4296 			sbinfo->free_ispace += ispace;
4297 			raw_spin_unlock(&sbinfo->stat_lock);
4298 		}
4299 		simple_xattrs_free(&info->xattrs, NULL);
4300 		return -ENOMEM;
4301 	}
4302 
4303 	return 0;
4304 }
4305 
shmem_xattr_handler_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * buffer,size_t size)4306 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
4307 				   struct dentry *unused, struct inode *inode,
4308 				   const char *name, void *buffer, size_t size)
4309 {
4310 	struct shmem_inode_info *info = SHMEM_I(inode);
4311 
4312 	name = xattr_full_name(handler, name);
4313 	return simple_xattr_get(&info->xattrs, name, buffer, size);
4314 }
4315 
shmem_xattr_handler_set(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * name,const void * value,size_t size,int flags)4316 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
4317 				   struct mnt_idmap *idmap,
4318 				   struct dentry *unused, struct inode *inode,
4319 				   const char *name, const void *value,
4320 				   size_t size, int flags)
4321 {
4322 	struct shmem_inode_info *info = SHMEM_I(inode);
4323 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4324 	struct simple_xattr *old_xattr;
4325 	size_t ispace = 0;
4326 
4327 	name = xattr_full_name(handler, name);
4328 	if (value && sbinfo->max_inodes) {
4329 		ispace = simple_xattr_space(name, size);
4330 		raw_spin_lock(&sbinfo->stat_lock);
4331 		if (sbinfo->free_ispace < ispace)
4332 			ispace = 0;
4333 		else
4334 			sbinfo->free_ispace -= ispace;
4335 		raw_spin_unlock(&sbinfo->stat_lock);
4336 		if (!ispace)
4337 			return -ENOSPC;
4338 	}
4339 
4340 	old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
4341 	if (!IS_ERR(old_xattr)) {
4342 		ispace = 0;
4343 		if (old_xattr && sbinfo->max_inodes)
4344 			ispace = simple_xattr_space(old_xattr->name,
4345 						    old_xattr->size);
4346 		simple_xattr_free(old_xattr);
4347 		old_xattr = NULL;
4348 		inode_set_ctime_current(inode);
4349 		inode_inc_iversion(inode);
4350 	}
4351 	if (ispace) {
4352 		raw_spin_lock(&sbinfo->stat_lock);
4353 		sbinfo->free_ispace += ispace;
4354 		raw_spin_unlock(&sbinfo->stat_lock);
4355 	}
4356 	return PTR_ERR(old_xattr);
4357 }
4358 
4359 static const struct xattr_handler shmem_security_xattr_handler = {
4360 	.prefix = XATTR_SECURITY_PREFIX,
4361 	.get = shmem_xattr_handler_get,
4362 	.set = shmem_xattr_handler_set,
4363 };
4364 
4365 static const struct xattr_handler shmem_trusted_xattr_handler = {
4366 	.prefix = XATTR_TRUSTED_PREFIX,
4367 	.get = shmem_xattr_handler_get,
4368 	.set = shmem_xattr_handler_set,
4369 };
4370 
4371 static const struct xattr_handler shmem_user_xattr_handler = {
4372 	.prefix = XATTR_USER_PREFIX,
4373 	.get = shmem_xattr_handler_get,
4374 	.set = shmem_xattr_handler_set,
4375 };
4376 
4377 static const struct xattr_handler * const shmem_xattr_handlers[] = {
4378 	&shmem_security_xattr_handler,
4379 	&shmem_trusted_xattr_handler,
4380 	&shmem_user_xattr_handler,
4381 	NULL
4382 };
4383 
shmem_listxattr(struct dentry * dentry,char * buffer,size_t size)4384 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
4385 {
4386 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4387 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
4388 }
4389 #endif /* CONFIG_TMPFS_XATTR */
4390 
4391 static const struct inode_operations shmem_short_symlink_operations = {
4392 	.getattr	= shmem_getattr,
4393 	.setattr	= shmem_setattr,
4394 	.get_link	= simple_get_link,
4395 #ifdef CONFIG_TMPFS_XATTR
4396 	.listxattr	= shmem_listxattr,
4397 #endif
4398 };
4399 
4400 static const struct inode_operations shmem_symlink_inode_operations = {
4401 	.getattr	= shmem_getattr,
4402 	.setattr	= shmem_setattr,
4403 	.get_link	= shmem_get_link,
4404 #ifdef CONFIG_TMPFS_XATTR
4405 	.listxattr	= shmem_listxattr,
4406 #endif
4407 };
4408 
shmem_get_parent(struct dentry * child)4409 static struct dentry *shmem_get_parent(struct dentry *child)
4410 {
4411 	return ERR_PTR(-ESTALE);
4412 }
4413 
shmem_match(struct inode * ino,void * vfh)4414 static int shmem_match(struct inode *ino, void *vfh)
4415 {
4416 	__u32 *fh = vfh;
4417 	__u64 inum = fh[2];
4418 	inum = (inum << 32) | fh[1];
4419 	return ino->i_ino == inum && fh[0] == ino->i_generation;
4420 }
4421 
4422 /* Find any alias of inode, but prefer a hashed alias */
shmem_find_alias(struct inode * inode)4423 static struct dentry *shmem_find_alias(struct inode *inode)
4424 {
4425 	struct dentry *alias = d_find_alias(inode);
4426 
4427 	return alias ?: d_find_any_alias(inode);
4428 }
4429 
shmem_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)4430 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
4431 		struct fid *fid, int fh_len, int fh_type)
4432 {
4433 	struct inode *inode;
4434 	struct dentry *dentry = NULL;
4435 	u64 inum;
4436 
4437 	if (fh_len < 3)
4438 		return NULL;
4439 
4440 	inum = fid->raw[2];
4441 	inum = (inum << 32) | fid->raw[1];
4442 
4443 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
4444 			shmem_match, fid->raw);
4445 	if (inode) {
4446 		dentry = shmem_find_alias(inode);
4447 		iput(inode);
4448 	}
4449 
4450 	return dentry;
4451 }
4452 
shmem_encode_fh(struct inode * inode,__u32 * fh,int * len,struct inode * parent)4453 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
4454 				struct inode *parent)
4455 {
4456 	if (*len < 3) {
4457 		*len = 3;
4458 		return FILEID_INVALID;
4459 	}
4460 
4461 	if (inode_unhashed(inode)) {
4462 		/* Unfortunately insert_inode_hash is not idempotent,
4463 		 * so as we hash inodes here rather than at creation
4464 		 * time, we need a lock to ensure we only try
4465 		 * to do it once
4466 		 */
4467 		static DEFINE_SPINLOCK(lock);
4468 		spin_lock(&lock);
4469 		if (inode_unhashed(inode))
4470 			__insert_inode_hash(inode,
4471 					    inode->i_ino + inode->i_generation);
4472 		spin_unlock(&lock);
4473 	}
4474 
4475 	fh[0] = inode->i_generation;
4476 	fh[1] = inode->i_ino;
4477 	fh[2] = ((__u64)inode->i_ino) >> 32;
4478 
4479 	*len = 3;
4480 	return 1;
4481 }
4482 
4483 static const struct export_operations shmem_export_ops = {
4484 	.get_parent     = shmem_get_parent,
4485 	.encode_fh      = shmem_encode_fh,
4486 	.fh_to_dentry	= shmem_fh_to_dentry,
4487 };
4488 
4489 enum shmem_param {
4490 	Opt_gid,
4491 	Opt_huge,
4492 	Opt_mode,
4493 	Opt_mpol,
4494 	Opt_nr_blocks,
4495 	Opt_nr_inodes,
4496 	Opt_size,
4497 	Opt_uid,
4498 	Opt_inode32,
4499 	Opt_inode64,
4500 	Opt_noswap,
4501 	Opt_quota,
4502 	Opt_usrquota,
4503 	Opt_grpquota,
4504 	Opt_usrquota_block_hardlimit,
4505 	Opt_usrquota_inode_hardlimit,
4506 	Opt_grpquota_block_hardlimit,
4507 	Opt_grpquota_inode_hardlimit,
4508 	Opt_casefold_version,
4509 	Opt_casefold,
4510 	Opt_strict_encoding,
4511 };
4512 
4513 static const struct constant_table shmem_param_enums_huge[] = {
4514 	{"never",	SHMEM_HUGE_NEVER },
4515 	{"always",	SHMEM_HUGE_ALWAYS },
4516 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
4517 	{"advise",	SHMEM_HUGE_ADVISE },
4518 	{}
4519 };
4520 
4521 const struct fs_parameter_spec shmem_fs_parameters[] = {
4522 	fsparam_gid   ("gid",		Opt_gid),
4523 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
4524 	fsparam_u32oct("mode",		Opt_mode),
4525 	fsparam_string("mpol",		Opt_mpol),
4526 	fsparam_string("nr_blocks",	Opt_nr_blocks),
4527 	fsparam_string("nr_inodes",	Opt_nr_inodes),
4528 	fsparam_string("size",		Opt_size),
4529 	fsparam_uid   ("uid",		Opt_uid),
4530 	fsparam_flag  ("inode32",	Opt_inode32),
4531 	fsparam_flag  ("inode64",	Opt_inode64),
4532 	fsparam_flag  ("noswap",	Opt_noswap),
4533 #ifdef CONFIG_TMPFS_QUOTA
4534 	fsparam_flag  ("quota",		Opt_quota),
4535 	fsparam_flag  ("usrquota",	Opt_usrquota),
4536 	fsparam_flag  ("grpquota",	Opt_grpquota),
4537 	fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
4538 	fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
4539 	fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
4540 	fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
4541 #endif
4542 	fsparam_string("casefold",	Opt_casefold_version),
4543 	fsparam_flag  ("casefold",	Opt_casefold),
4544 	fsparam_flag  ("strict_encoding", Opt_strict_encoding),
4545 	{}
4546 };
4547 
4548 #if IS_ENABLED(CONFIG_UNICODE)
shmem_parse_opt_casefold(struct fs_context * fc,struct fs_parameter * param,bool latest_version)4549 static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4550 				    bool latest_version)
4551 {
4552 	struct shmem_options *ctx = fc->fs_private;
4553 	int version = UTF8_LATEST;
4554 	struct unicode_map *encoding;
4555 	char *version_str = param->string + 5;
4556 
4557 	if (!latest_version) {
4558 		if (strncmp(param->string, "utf8-", 5))
4559 			return invalfc(fc, "Only UTF-8 encodings are supported "
4560 				       "in the format: utf8-<version number>");
4561 
4562 		version = utf8_parse_version(version_str);
4563 		if (version < 0)
4564 			return invalfc(fc, "Invalid UTF-8 version: %s", version_str);
4565 	}
4566 
4567 	encoding = utf8_load(version);
4568 
4569 	if (IS_ERR(encoding)) {
4570 		return invalfc(fc, "Failed loading UTF-8 version: utf8-%u.%u.%u\n",
4571 			       unicode_major(version), unicode_minor(version),
4572 			       unicode_rev(version));
4573 	}
4574 
4575 	pr_info("tmpfs: Using encoding : utf8-%u.%u.%u\n",
4576 		unicode_major(version), unicode_minor(version), unicode_rev(version));
4577 
4578 	ctx->encoding = encoding;
4579 
4580 	return 0;
4581 }
4582 #else
shmem_parse_opt_casefold(struct fs_context * fc,struct fs_parameter * param,bool latest_version)4583 static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4584 				    bool latest_version)
4585 {
4586 	return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4587 }
4588 #endif
4589 
shmem_parse_one(struct fs_context * fc,struct fs_parameter * param)4590 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
4591 {
4592 	struct shmem_options *ctx = fc->fs_private;
4593 	struct fs_parse_result result;
4594 	unsigned long long size;
4595 	char *rest;
4596 	int opt;
4597 	kuid_t kuid;
4598 	kgid_t kgid;
4599 
4600 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
4601 	if (opt < 0)
4602 		return opt;
4603 
4604 	switch (opt) {
4605 	case Opt_size:
4606 		size = memparse(param->string, &rest);
4607 		if (*rest == '%') {
4608 			size <<= PAGE_SHIFT;
4609 			size *= totalram_pages();
4610 			do_div(size, 100);
4611 			rest++;
4612 		}
4613 		if (*rest)
4614 			goto bad_value;
4615 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
4616 		ctx->seen |= SHMEM_SEEN_BLOCKS;
4617 		break;
4618 	case Opt_nr_blocks:
4619 		ctx->blocks = memparse(param->string, &rest);
4620 		if (*rest || ctx->blocks > LONG_MAX)
4621 			goto bad_value;
4622 		ctx->seen |= SHMEM_SEEN_BLOCKS;
4623 		break;
4624 	case Opt_nr_inodes:
4625 		ctx->inodes = memparse(param->string, &rest);
4626 		if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
4627 			goto bad_value;
4628 		ctx->seen |= SHMEM_SEEN_INODES;
4629 		break;
4630 	case Opt_mode:
4631 		ctx->mode = result.uint_32 & 07777;
4632 		break;
4633 	case Opt_uid:
4634 		kuid = result.uid;
4635 
4636 		/*
4637 		 * The requested uid must be representable in the
4638 		 * filesystem's idmapping.
4639 		 */
4640 		if (!kuid_has_mapping(fc->user_ns, kuid))
4641 			goto bad_value;
4642 
4643 		ctx->uid = kuid;
4644 		break;
4645 	case Opt_gid:
4646 		kgid = result.gid;
4647 
4648 		/*
4649 		 * The requested gid must be representable in the
4650 		 * filesystem's idmapping.
4651 		 */
4652 		if (!kgid_has_mapping(fc->user_ns, kgid))
4653 			goto bad_value;
4654 
4655 		ctx->gid = kgid;
4656 		break;
4657 	case Opt_huge:
4658 		ctx->huge = result.uint_32;
4659 		if (ctx->huge != SHMEM_HUGE_NEVER &&
4660 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4661 		      has_transparent_hugepage()))
4662 			goto unsupported_parameter;
4663 		ctx->seen |= SHMEM_SEEN_HUGE;
4664 		break;
4665 	case Opt_mpol:
4666 		if (IS_ENABLED(CONFIG_NUMA)) {
4667 			mpol_put(ctx->mpol);
4668 			ctx->mpol = NULL;
4669 			if (mpol_parse_str(param->string, &ctx->mpol))
4670 				goto bad_value;
4671 			break;
4672 		}
4673 		goto unsupported_parameter;
4674 	case Opt_inode32:
4675 		ctx->full_inums = false;
4676 		ctx->seen |= SHMEM_SEEN_INUMS;
4677 		break;
4678 	case Opt_inode64:
4679 		if (sizeof(ino_t) < 8) {
4680 			return invalfc(fc,
4681 				       "Cannot use inode64 with <64bit inums in kernel\n");
4682 		}
4683 		ctx->full_inums = true;
4684 		ctx->seen |= SHMEM_SEEN_INUMS;
4685 		break;
4686 	case Opt_noswap:
4687 		if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
4688 			return invalfc(fc,
4689 				       "Turning off swap in unprivileged tmpfs mounts unsupported");
4690 		}
4691 		ctx->noswap = true;
4692 		ctx->seen |= SHMEM_SEEN_NOSWAP;
4693 		break;
4694 	case Opt_quota:
4695 		if (fc->user_ns != &init_user_ns)
4696 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4697 		ctx->seen |= SHMEM_SEEN_QUOTA;
4698 		ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4699 		break;
4700 	case Opt_usrquota:
4701 		if (fc->user_ns != &init_user_ns)
4702 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4703 		ctx->seen |= SHMEM_SEEN_QUOTA;
4704 		ctx->quota_types |= QTYPE_MASK_USR;
4705 		break;
4706 	case Opt_grpquota:
4707 		if (fc->user_ns != &init_user_ns)
4708 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4709 		ctx->seen |= SHMEM_SEEN_QUOTA;
4710 		ctx->quota_types |= QTYPE_MASK_GRP;
4711 		break;
4712 	case Opt_usrquota_block_hardlimit:
4713 		size = memparse(param->string, &rest);
4714 		if (*rest || !size)
4715 			goto bad_value;
4716 		if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4717 			return invalfc(fc,
4718 				       "User quota block hardlimit too large.");
4719 		ctx->qlimits.usrquota_bhardlimit = size;
4720 		break;
4721 	case Opt_grpquota_block_hardlimit:
4722 		size = memparse(param->string, &rest);
4723 		if (*rest || !size)
4724 			goto bad_value;
4725 		if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4726 			return invalfc(fc,
4727 				       "Group quota block hardlimit too large.");
4728 		ctx->qlimits.grpquota_bhardlimit = size;
4729 		break;
4730 	case Opt_usrquota_inode_hardlimit:
4731 		size = memparse(param->string, &rest);
4732 		if (*rest || !size)
4733 			goto bad_value;
4734 		if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4735 			return invalfc(fc,
4736 				       "User quota inode hardlimit too large.");
4737 		ctx->qlimits.usrquota_ihardlimit = size;
4738 		break;
4739 	case Opt_grpquota_inode_hardlimit:
4740 		size = memparse(param->string, &rest);
4741 		if (*rest || !size)
4742 			goto bad_value;
4743 		if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4744 			return invalfc(fc,
4745 				       "Group quota inode hardlimit too large.");
4746 		ctx->qlimits.grpquota_ihardlimit = size;
4747 		break;
4748 	case Opt_casefold_version:
4749 		return shmem_parse_opt_casefold(fc, param, false);
4750 	case Opt_casefold:
4751 		return shmem_parse_opt_casefold(fc, param, true);
4752 	case Opt_strict_encoding:
4753 #if IS_ENABLED(CONFIG_UNICODE)
4754 		ctx->strict_encoding = true;
4755 		break;
4756 #else
4757 		return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4758 #endif
4759 	}
4760 	return 0;
4761 
4762 unsupported_parameter:
4763 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
4764 bad_value:
4765 	return invalfc(fc, "Bad value for '%s'", param->key);
4766 }
4767 
shmem_next_opt(char ** s)4768 static char *shmem_next_opt(char **s)
4769 {
4770 	char *sbegin = *s;
4771 	char *p;
4772 
4773 	if (sbegin == NULL)
4774 		return NULL;
4775 
4776 	/*
4777 	 * NUL-terminate this option: unfortunately,
4778 	 * mount options form a comma-separated list,
4779 	 * but mpol's nodelist may also contain commas.
4780 	 */
4781 	for (;;) {
4782 		p = strchr(*s, ',');
4783 		if (p == NULL)
4784 			break;
4785 		*s = p + 1;
4786 		if (!isdigit(*(p+1))) {
4787 			*p = '\0';
4788 			return sbegin;
4789 		}
4790 	}
4791 
4792 	*s = NULL;
4793 	return sbegin;
4794 }
4795 
shmem_parse_monolithic(struct fs_context * fc,void * data)4796 static int shmem_parse_monolithic(struct fs_context *fc, void *data)
4797 {
4798 	return vfs_parse_monolithic_sep(fc, data, shmem_next_opt);
4799 }
4800 
4801 /*
4802  * Reconfigure a shmem filesystem.
4803  */
shmem_reconfigure(struct fs_context * fc)4804 static int shmem_reconfigure(struct fs_context *fc)
4805 {
4806 	struct shmem_options *ctx = fc->fs_private;
4807 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4808 	unsigned long used_isp;
4809 	struct mempolicy *mpol = NULL;
4810 	const char *err;
4811 
4812 	raw_spin_lock(&sbinfo->stat_lock);
4813 	used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
4814 
4815 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4816 		if (!sbinfo->max_blocks) {
4817 			err = "Cannot retroactively limit size";
4818 			goto out;
4819 		}
4820 		if (percpu_counter_compare(&sbinfo->used_blocks,
4821 					   ctx->blocks) > 0) {
4822 			err = "Too small a size for current use";
4823 			goto out;
4824 		}
4825 	}
4826 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4827 		if (!sbinfo->max_inodes) {
4828 			err = "Cannot retroactively limit inodes";
4829 			goto out;
4830 		}
4831 		if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4832 			err = "Too few inodes for current use";
4833 			goto out;
4834 		}
4835 	}
4836 
4837 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4838 	    sbinfo->next_ino > UINT_MAX) {
4839 		err = "Current inum too high to switch to 32-bit inums";
4840 		goto out;
4841 	}
4842 	if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
4843 		err = "Cannot disable swap on remount";
4844 		goto out;
4845 	}
4846 	if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
4847 		err = "Cannot enable swap on remount if it was disabled on first mount";
4848 		goto out;
4849 	}
4850 
4851 	if (ctx->seen & SHMEM_SEEN_QUOTA &&
4852 	    !sb_any_quota_loaded(fc->root->d_sb)) {
4853 		err = "Cannot enable quota on remount";
4854 		goto out;
4855 	}
4856 
4857 #ifdef CONFIG_TMPFS_QUOTA
4858 #define CHANGED_LIMIT(name)						\
4859 	(ctx->qlimits.name## hardlimit &&				\
4860 	(ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4861 
4862 	if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4863 	    CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4864 		err = "Cannot change global quota limit on remount";
4865 		goto out;
4866 	}
4867 #endif /* CONFIG_TMPFS_QUOTA */
4868 
4869 	if (ctx->seen & SHMEM_SEEN_HUGE)
4870 		sbinfo->huge = ctx->huge;
4871 	if (ctx->seen & SHMEM_SEEN_INUMS)
4872 		sbinfo->full_inums = ctx->full_inums;
4873 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
4874 		sbinfo->max_blocks  = ctx->blocks;
4875 	if (ctx->seen & SHMEM_SEEN_INODES) {
4876 		sbinfo->max_inodes  = ctx->inodes;
4877 		sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
4878 	}
4879 
4880 	/*
4881 	 * Preserve previous mempolicy unless mpol remount option was specified.
4882 	 */
4883 	if (ctx->mpol) {
4884 		mpol = sbinfo->mpol;
4885 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
4886 		ctx->mpol = NULL;
4887 	}
4888 
4889 	if (ctx->noswap)
4890 		sbinfo->noswap = true;
4891 
4892 	raw_spin_unlock(&sbinfo->stat_lock);
4893 	mpol_put(mpol);
4894 	return 0;
4895 out:
4896 	raw_spin_unlock(&sbinfo->stat_lock);
4897 	return invalfc(fc, "%s", err);
4898 }
4899 
shmem_show_options(struct seq_file * seq,struct dentry * root)4900 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4901 {
4902 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4903 	struct mempolicy *mpol;
4904 
4905 	if (sbinfo->max_blocks != shmem_default_max_blocks())
4906 		seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
4907 	if (sbinfo->max_inodes != shmem_default_max_inodes())
4908 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4909 	if (sbinfo->mode != (0777 | S_ISVTX))
4910 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4911 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4912 		seq_printf(seq, ",uid=%u",
4913 				from_kuid_munged(&init_user_ns, sbinfo->uid));
4914 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4915 		seq_printf(seq, ",gid=%u",
4916 				from_kgid_munged(&init_user_ns, sbinfo->gid));
4917 
4918 	/*
4919 	 * Showing inode{64,32} might be useful even if it's the system default,
4920 	 * since then people don't have to resort to checking both here and
4921 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
4922 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4923 	 *
4924 	 * We hide it when inode64 isn't the default and we are using 32-bit
4925 	 * inodes, since that probably just means the feature isn't even under
4926 	 * consideration.
4927 	 *
4928 	 * As such:
4929 	 *
4930 	 *                     +-----------------+-----------------+
4931 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
4932 	 *  +------------------+-----------------+-----------------+
4933 	 *  | full_inums=true  | show            | show            |
4934 	 *  | full_inums=false | show            | hide            |
4935 	 *  +------------------+-----------------+-----------------+
4936 	 *
4937 	 */
4938 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4939 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4940 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4941 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4942 	if (sbinfo->huge)
4943 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4944 #endif
4945 	mpol = shmem_get_sbmpol(sbinfo);
4946 	shmem_show_mpol(seq, mpol);
4947 	mpol_put(mpol);
4948 	if (sbinfo->noswap)
4949 		seq_printf(seq, ",noswap");
4950 #ifdef CONFIG_TMPFS_QUOTA
4951 	if (sb_has_quota_active(root->d_sb, USRQUOTA))
4952 		seq_printf(seq, ",usrquota");
4953 	if (sb_has_quota_active(root->d_sb, GRPQUOTA))
4954 		seq_printf(seq, ",grpquota");
4955 	if (sbinfo->qlimits.usrquota_bhardlimit)
4956 		seq_printf(seq, ",usrquota_block_hardlimit=%lld",
4957 			   sbinfo->qlimits.usrquota_bhardlimit);
4958 	if (sbinfo->qlimits.grpquota_bhardlimit)
4959 		seq_printf(seq, ",grpquota_block_hardlimit=%lld",
4960 			   sbinfo->qlimits.grpquota_bhardlimit);
4961 	if (sbinfo->qlimits.usrquota_ihardlimit)
4962 		seq_printf(seq, ",usrquota_inode_hardlimit=%lld",
4963 			   sbinfo->qlimits.usrquota_ihardlimit);
4964 	if (sbinfo->qlimits.grpquota_ihardlimit)
4965 		seq_printf(seq, ",grpquota_inode_hardlimit=%lld",
4966 			   sbinfo->qlimits.grpquota_ihardlimit);
4967 #endif
4968 	return 0;
4969 }
4970 
4971 #endif /* CONFIG_TMPFS */
4972 
shmem_put_super(struct super_block * sb)4973 static void shmem_put_super(struct super_block *sb)
4974 {
4975 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4976 
4977 #if IS_ENABLED(CONFIG_UNICODE)
4978 	if (sb->s_encoding)
4979 		utf8_unload(sb->s_encoding);
4980 #endif
4981 
4982 #ifdef CONFIG_TMPFS_QUOTA
4983 	shmem_disable_quotas(sb);
4984 #endif
4985 	free_percpu(sbinfo->ino_batch);
4986 	percpu_counter_destroy(&sbinfo->used_blocks);
4987 	mpol_put(sbinfo->mpol);
4988 	kfree(sbinfo);
4989 	sb->s_fs_info = NULL;
4990 }
4991 
4992 #if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_TMPFS)
4993 static const struct dentry_operations shmem_ci_dentry_ops = {
4994 	.d_hash = generic_ci_d_hash,
4995 	.d_compare = generic_ci_d_compare,
4996 	.d_delete = always_delete_dentry,
4997 };
4998 #endif
4999 
shmem_fill_super(struct super_block * sb,struct fs_context * fc)5000 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
5001 {
5002 	struct shmem_options *ctx = fc->fs_private;
5003 	struct inode *inode;
5004 	struct shmem_sb_info *sbinfo;
5005 	int error = -ENOMEM;
5006 
5007 	/* Round up to L1_CACHE_BYTES to resist false sharing */
5008 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
5009 				L1_CACHE_BYTES), GFP_KERNEL);
5010 	if (!sbinfo)
5011 		return error;
5012 
5013 	sb->s_fs_info = sbinfo;
5014 
5015 #ifdef CONFIG_TMPFS
5016 	/*
5017 	 * Per default we only allow half of the physical ram per
5018 	 * tmpfs instance, limiting inodes to one per page of lowmem;
5019 	 * but the internal instance is left unlimited.
5020 	 */
5021 	if (!(sb->s_flags & SB_KERNMOUNT)) {
5022 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
5023 			ctx->blocks = shmem_default_max_blocks();
5024 		if (!(ctx->seen & SHMEM_SEEN_INODES))
5025 			ctx->inodes = shmem_default_max_inodes();
5026 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
5027 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
5028 		sbinfo->noswap = ctx->noswap;
5029 	} else {
5030 		sb->s_flags |= SB_NOUSER;
5031 	}
5032 	sb->s_export_op = &shmem_export_ops;
5033 	sb->s_flags |= SB_NOSEC | SB_I_VERSION;
5034 
5035 #if IS_ENABLED(CONFIG_UNICODE)
5036 	if (!ctx->encoding && ctx->strict_encoding) {
5037 		pr_err("tmpfs: strict_encoding option without encoding is forbidden\n");
5038 		error = -EINVAL;
5039 		goto failed;
5040 	}
5041 
5042 	if (ctx->encoding) {
5043 		sb->s_encoding = ctx->encoding;
5044 		sb->s_d_op = &shmem_ci_dentry_ops;
5045 		if (ctx->strict_encoding)
5046 			sb->s_encoding_flags = SB_ENC_STRICT_MODE_FL;
5047 	}
5048 #endif
5049 
5050 #else
5051 	sb->s_flags |= SB_NOUSER;
5052 #endif /* CONFIG_TMPFS */
5053 	sbinfo->max_blocks = ctx->blocks;
5054 	sbinfo->max_inodes = ctx->inodes;
5055 	sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
5056 	if (sb->s_flags & SB_KERNMOUNT) {
5057 		sbinfo->ino_batch = alloc_percpu(ino_t);
5058 		if (!sbinfo->ino_batch)
5059 			goto failed;
5060 	}
5061 	sbinfo->uid = ctx->uid;
5062 	sbinfo->gid = ctx->gid;
5063 	sbinfo->full_inums = ctx->full_inums;
5064 	sbinfo->mode = ctx->mode;
5065 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5066 	if (ctx->seen & SHMEM_SEEN_HUGE)
5067 		sbinfo->huge = ctx->huge;
5068 	else
5069 		sbinfo->huge = tmpfs_huge;
5070 #endif
5071 	sbinfo->mpol = ctx->mpol;
5072 	ctx->mpol = NULL;
5073 
5074 	raw_spin_lock_init(&sbinfo->stat_lock);
5075 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
5076 		goto failed;
5077 	spin_lock_init(&sbinfo->shrinklist_lock);
5078 	INIT_LIST_HEAD(&sbinfo->shrinklist);
5079 
5080 	sb->s_maxbytes = MAX_LFS_FILESIZE;
5081 	sb->s_blocksize = PAGE_SIZE;
5082 	sb->s_blocksize_bits = PAGE_SHIFT;
5083 	sb->s_magic = TMPFS_MAGIC;
5084 	sb->s_op = &shmem_ops;
5085 	sb->s_time_gran = 1;
5086 #ifdef CONFIG_TMPFS_XATTR
5087 	sb->s_xattr = shmem_xattr_handlers;
5088 #endif
5089 #ifdef CONFIG_TMPFS_POSIX_ACL
5090 	sb->s_flags |= SB_POSIXACL;
5091 #endif
5092 	uuid_t uuid;
5093 	uuid_gen(&uuid);
5094 	super_set_uuid(sb, uuid.b, sizeof(uuid));
5095 
5096 #ifdef CONFIG_TMPFS_QUOTA
5097 	if (ctx->seen & SHMEM_SEEN_QUOTA) {
5098 		sb->dq_op = &shmem_quota_operations;
5099 		sb->s_qcop = &dquot_quotactl_sysfile_ops;
5100 		sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
5101 
5102 		/* Copy the default limits from ctx into sbinfo */
5103 		memcpy(&sbinfo->qlimits, &ctx->qlimits,
5104 		       sizeof(struct shmem_quota_limits));
5105 
5106 		if (shmem_enable_quotas(sb, ctx->quota_types))
5107 			goto failed;
5108 	}
5109 #endif /* CONFIG_TMPFS_QUOTA */
5110 
5111 	inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
5112 				S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
5113 	if (IS_ERR(inode)) {
5114 		error = PTR_ERR(inode);
5115 		goto failed;
5116 	}
5117 	inode->i_uid = sbinfo->uid;
5118 	inode->i_gid = sbinfo->gid;
5119 	sb->s_root = d_make_root(inode);
5120 	if (!sb->s_root)
5121 		goto failed;
5122 	return 0;
5123 
5124 failed:
5125 	shmem_put_super(sb);
5126 	return error;
5127 }
5128 
shmem_get_tree(struct fs_context * fc)5129 static int shmem_get_tree(struct fs_context *fc)
5130 {
5131 	return get_tree_nodev(fc, shmem_fill_super);
5132 }
5133 
shmem_free_fc(struct fs_context * fc)5134 static void shmem_free_fc(struct fs_context *fc)
5135 {
5136 	struct shmem_options *ctx = fc->fs_private;
5137 
5138 	if (ctx) {
5139 		mpol_put(ctx->mpol);
5140 		kfree(ctx);
5141 	}
5142 }
5143 
5144 static const struct fs_context_operations shmem_fs_context_ops = {
5145 	.free			= shmem_free_fc,
5146 	.get_tree		= shmem_get_tree,
5147 #ifdef CONFIG_TMPFS
5148 	.parse_monolithic	= shmem_parse_monolithic,
5149 	.parse_param		= shmem_parse_one,
5150 	.reconfigure		= shmem_reconfigure,
5151 #endif
5152 };
5153 
5154 static struct kmem_cache *shmem_inode_cachep __ro_after_init;
5155 
shmem_alloc_inode(struct super_block * sb)5156 static struct inode *shmem_alloc_inode(struct super_block *sb)
5157 {
5158 	struct shmem_inode_info *info;
5159 	info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
5160 	if (!info)
5161 		return NULL;
5162 	return &info->vfs_inode;
5163 }
5164 
shmem_free_in_core_inode(struct inode * inode)5165 static void shmem_free_in_core_inode(struct inode *inode)
5166 {
5167 	if (S_ISLNK(inode->i_mode))
5168 		kfree(inode->i_link);
5169 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
5170 }
5171 
shmem_destroy_inode(struct inode * inode)5172 static void shmem_destroy_inode(struct inode *inode)
5173 {
5174 	if (S_ISREG(inode->i_mode))
5175 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
5176 	if (S_ISDIR(inode->i_mode))
5177 		simple_offset_destroy(shmem_get_offset_ctx(inode));
5178 }
5179 
shmem_init_inode(void * foo)5180 static void shmem_init_inode(void *foo)
5181 {
5182 	struct shmem_inode_info *info = foo;
5183 	inode_init_once(&info->vfs_inode);
5184 }
5185 
shmem_init_inodecache(void)5186 static void __init shmem_init_inodecache(void)
5187 {
5188 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
5189 				sizeof(struct shmem_inode_info),
5190 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
5191 }
5192 
shmem_destroy_inodecache(void)5193 static void __init shmem_destroy_inodecache(void)
5194 {
5195 	kmem_cache_destroy(shmem_inode_cachep);
5196 }
5197 
5198 /* Keep the page in page cache instead of truncating it */
shmem_error_remove_folio(struct address_space * mapping,struct folio * folio)5199 static int shmem_error_remove_folio(struct address_space *mapping,
5200 				   struct folio *folio)
5201 {
5202 	return 0;
5203 }
5204 
5205 static const struct address_space_operations shmem_aops = {
5206 	.writepage	= shmem_writepage,
5207 	.dirty_folio	= noop_dirty_folio,
5208 #ifdef CONFIG_TMPFS
5209 	.write_begin	= shmem_write_begin,
5210 	.write_end	= shmem_write_end,
5211 #endif
5212 #ifdef CONFIG_MIGRATION
5213 	.migrate_folio	= migrate_folio,
5214 #endif
5215 	.error_remove_folio = shmem_error_remove_folio,
5216 };
5217 
5218 static const struct file_operations shmem_file_operations = {
5219 	.mmap		= shmem_mmap,
5220 	.open		= shmem_file_open,
5221 	.get_unmapped_area = shmem_get_unmapped_area,
5222 #ifdef CONFIG_TMPFS
5223 	.llseek		= shmem_file_llseek,
5224 	.read_iter	= shmem_file_read_iter,
5225 	.write_iter	= shmem_file_write_iter,
5226 	.fsync		= noop_fsync,
5227 	.splice_read	= shmem_file_splice_read,
5228 	.splice_write	= iter_file_splice_write,
5229 	.fallocate	= shmem_fallocate,
5230 #endif
5231 };
5232 
5233 static const struct inode_operations shmem_inode_operations = {
5234 	.getattr	= shmem_getattr,
5235 	.setattr	= shmem_setattr,
5236 #ifdef CONFIG_TMPFS_XATTR
5237 	.listxattr	= shmem_listxattr,
5238 	.set_acl	= simple_set_acl,
5239 	.fileattr_get	= shmem_fileattr_get,
5240 	.fileattr_set	= shmem_fileattr_set,
5241 #endif
5242 };
5243 
5244 static const struct inode_operations shmem_dir_inode_operations = {
5245 #ifdef CONFIG_TMPFS
5246 	.getattr	= shmem_getattr,
5247 	.create		= shmem_create,
5248 	.lookup		= simple_lookup,
5249 	.link		= shmem_link,
5250 	.unlink		= shmem_unlink,
5251 	.symlink	= shmem_symlink,
5252 	.mkdir		= shmem_mkdir,
5253 	.rmdir		= shmem_rmdir,
5254 	.mknod		= shmem_mknod,
5255 	.rename		= shmem_rename2,
5256 	.tmpfile	= shmem_tmpfile,
5257 	.get_offset_ctx	= shmem_get_offset_ctx,
5258 #endif
5259 #ifdef CONFIG_TMPFS_XATTR
5260 	.listxattr	= shmem_listxattr,
5261 	.fileattr_get	= shmem_fileattr_get,
5262 	.fileattr_set	= shmem_fileattr_set,
5263 #endif
5264 #ifdef CONFIG_TMPFS_POSIX_ACL
5265 	.setattr	= shmem_setattr,
5266 	.set_acl	= simple_set_acl,
5267 #endif
5268 };
5269 
5270 static const struct inode_operations shmem_special_inode_operations = {
5271 	.getattr	= shmem_getattr,
5272 #ifdef CONFIG_TMPFS_XATTR
5273 	.listxattr	= shmem_listxattr,
5274 #endif
5275 #ifdef CONFIG_TMPFS_POSIX_ACL
5276 	.setattr	= shmem_setattr,
5277 	.set_acl	= simple_set_acl,
5278 #endif
5279 };
5280 
5281 static const struct super_operations shmem_ops = {
5282 	.alloc_inode	= shmem_alloc_inode,
5283 	.free_inode	= shmem_free_in_core_inode,
5284 	.destroy_inode	= shmem_destroy_inode,
5285 #ifdef CONFIG_TMPFS
5286 	.statfs		= shmem_statfs,
5287 	.show_options	= shmem_show_options,
5288 #endif
5289 #ifdef CONFIG_TMPFS_QUOTA
5290 	.get_dquots	= shmem_get_dquots,
5291 #endif
5292 	.evict_inode	= shmem_evict_inode,
5293 	.drop_inode	= generic_delete_inode,
5294 	.put_super	= shmem_put_super,
5295 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5296 	.nr_cached_objects	= shmem_unused_huge_count,
5297 	.free_cached_objects	= shmem_unused_huge_scan,
5298 #endif
5299 };
5300 
5301 static const struct vm_operations_struct shmem_vm_ops = {
5302 	.fault		= shmem_fault,
5303 	.map_pages	= filemap_map_pages,
5304 #ifdef CONFIG_NUMA
5305 	.set_policy     = shmem_set_policy,
5306 	.get_policy     = shmem_get_policy,
5307 #endif
5308 };
5309 
5310 static const struct vm_operations_struct shmem_anon_vm_ops = {
5311 	.fault		= shmem_fault,
5312 	.map_pages	= filemap_map_pages,
5313 #ifdef CONFIG_NUMA
5314 	.set_policy     = shmem_set_policy,
5315 	.get_policy     = shmem_get_policy,
5316 #endif
5317 };
5318 
shmem_init_fs_context(struct fs_context * fc)5319 int shmem_init_fs_context(struct fs_context *fc)
5320 {
5321 	struct shmem_options *ctx;
5322 
5323 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
5324 	if (!ctx)
5325 		return -ENOMEM;
5326 
5327 	ctx->mode = 0777 | S_ISVTX;
5328 	ctx->uid = current_fsuid();
5329 	ctx->gid = current_fsgid();
5330 
5331 #if IS_ENABLED(CONFIG_UNICODE)
5332 	ctx->encoding = NULL;
5333 #endif
5334 
5335 	fc->fs_private = ctx;
5336 	fc->ops = &shmem_fs_context_ops;
5337 	return 0;
5338 }
5339 
5340 static struct file_system_type shmem_fs_type = {
5341 	.owner		= THIS_MODULE,
5342 	.name		= "tmpfs",
5343 	.init_fs_context = shmem_init_fs_context,
5344 #ifdef CONFIG_TMPFS
5345 	.parameters	= shmem_fs_parameters,
5346 #endif
5347 	.kill_sb	= kill_litter_super,
5348 	.fs_flags	= FS_USERNS_MOUNT | FS_ALLOW_IDMAP | FS_MGTIME,
5349 };
5350 
5351 #if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5352 
5353 #define __INIT_KOBJ_ATTR(_name, _mode, _show, _store)			\
5354 {									\
5355 	.attr	= { .name = __stringify(_name), .mode = _mode },	\
5356 	.show	= _show,						\
5357 	.store	= _store,						\
5358 }
5359 
5360 #define TMPFS_ATTR_W(_name, _store)				\
5361 	static struct kobj_attribute tmpfs_attr_##_name =	\
5362 			__INIT_KOBJ_ATTR(_name, 0200, NULL, _store)
5363 
5364 #define TMPFS_ATTR_RW(_name, _show, _store)			\
5365 	static struct kobj_attribute tmpfs_attr_##_name =	\
5366 			__INIT_KOBJ_ATTR(_name, 0644, _show, _store)
5367 
5368 #define TMPFS_ATTR_RO(_name, _show)				\
5369 	static struct kobj_attribute tmpfs_attr_##_name =	\
5370 			__INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
5371 
5372 #if IS_ENABLED(CONFIG_UNICODE)
casefold_show(struct kobject * kobj,struct kobj_attribute * a,char * buf)5373 static ssize_t casefold_show(struct kobject *kobj, struct kobj_attribute *a,
5374 			char *buf)
5375 {
5376 		return sysfs_emit(buf, "supported\n");
5377 }
5378 TMPFS_ATTR_RO(casefold, casefold_show);
5379 #endif
5380 
5381 static struct attribute *tmpfs_attributes[] = {
5382 #if IS_ENABLED(CONFIG_UNICODE)
5383 	&tmpfs_attr_casefold.attr,
5384 #endif
5385 	NULL
5386 };
5387 
5388 static const struct attribute_group tmpfs_attribute_group = {
5389 	.attrs = tmpfs_attributes,
5390 	.name = "features"
5391 };
5392 
5393 static struct kobject *tmpfs_kobj;
5394 
tmpfs_sysfs_init(void)5395 static int __init tmpfs_sysfs_init(void)
5396 {
5397 	int ret;
5398 
5399 	tmpfs_kobj = kobject_create_and_add("tmpfs", fs_kobj);
5400 	if (!tmpfs_kobj)
5401 		return -ENOMEM;
5402 
5403 	ret = sysfs_create_group(tmpfs_kobj, &tmpfs_attribute_group);
5404 	if (ret)
5405 		kobject_put(tmpfs_kobj);
5406 
5407 	return ret;
5408 }
5409 #endif /* CONFIG_SYSFS && CONFIG_TMPFS */
5410 
shmem_init(void)5411 void __init shmem_init(void)
5412 {
5413 	int error;
5414 
5415 	shmem_init_inodecache();
5416 
5417 #ifdef CONFIG_TMPFS_QUOTA
5418 	register_quota_format(&shmem_quota_format);
5419 #endif
5420 
5421 	error = register_filesystem(&shmem_fs_type);
5422 	if (error) {
5423 		pr_err("Could not register tmpfs\n");
5424 		goto out2;
5425 	}
5426 
5427 	shm_mnt = kern_mount(&shmem_fs_type);
5428 	if (IS_ERR(shm_mnt)) {
5429 		error = PTR_ERR(shm_mnt);
5430 		pr_err("Could not kern_mount tmpfs\n");
5431 		goto out1;
5432 	}
5433 
5434 #if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5435 	error = tmpfs_sysfs_init();
5436 	if (error) {
5437 		pr_err("Could not init tmpfs sysfs\n");
5438 		goto out1;
5439 	}
5440 #endif
5441 
5442 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5443 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
5444 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5445 	else
5446 		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
5447 
5448 	/*
5449 	 * Default to setting PMD-sized THP to inherit the global setting and
5450 	 * disable all other multi-size THPs.
5451 	 */
5452 	if (!shmem_orders_configured)
5453 		huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER);
5454 #endif
5455 	return;
5456 
5457 out1:
5458 	unregister_filesystem(&shmem_fs_type);
5459 out2:
5460 #ifdef CONFIG_TMPFS_QUOTA
5461 	unregister_quota_format(&shmem_quota_format);
5462 #endif
5463 	shmem_destroy_inodecache();
5464 	shm_mnt = ERR_PTR(error);
5465 }
5466 
5467 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5468 static ssize_t shmem_enabled_show(struct kobject *kobj,
5469 				  struct kobj_attribute *attr, char *buf)
5470 {
5471 	static const int values[] = {
5472 		SHMEM_HUGE_ALWAYS,
5473 		SHMEM_HUGE_WITHIN_SIZE,
5474 		SHMEM_HUGE_ADVISE,
5475 		SHMEM_HUGE_NEVER,
5476 		SHMEM_HUGE_DENY,
5477 		SHMEM_HUGE_FORCE,
5478 	};
5479 	int len = 0;
5480 	int i;
5481 
5482 	for (i = 0; i < ARRAY_SIZE(values); i++) {
5483 		len += sysfs_emit_at(buf, len,
5484 				shmem_huge == values[i] ? "%s[%s]" : "%s%s",
5485 				i ? " " : "", shmem_format_huge(values[i]));
5486 	}
5487 	len += sysfs_emit_at(buf, len, "\n");
5488 
5489 	return len;
5490 }
5491 
shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5492 static ssize_t shmem_enabled_store(struct kobject *kobj,
5493 		struct kobj_attribute *attr, const char *buf, size_t count)
5494 {
5495 	char tmp[16];
5496 	int huge, err;
5497 
5498 	if (count + 1 > sizeof(tmp))
5499 		return -EINVAL;
5500 	memcpy(tmp, buf, count);
5501 	tmp[count] = '\0';
5502 	if (count && tmp[count - 1] == '\n')
5503 		tmp[count - 1] = '\0';
5504 
5505 	huge = shmem_parse_huge(tmp);
5506 	if (huge == -EINVAL)
5507 		return huge;
5508 
5509 	shmem_huge = huge;
5510 	if (shmem_huge > SHMEM_HUGE_DENY)
5511 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5512 
5513 	err = start_stop_khugepaged();
5514 	return err ? err : count;
5515 }
5516 
5517 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
5518 static DEFINE_SPINLOCK(huge_shmem_orders_lock);
5519 
thpsize_shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5520 static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
5521 					  struct kobj_attribute *attr, char *buf)
5522 {
5523 	int order = to_thpsize(kobj)->order;
5524 	const char *output;
5525 
5526 	if (test_bit(order, &huge_shmem_orders_always))
5527 		output = "[always] inherit within_size advise never";
5528 	else if (test_bit(order, &huge_shmem_orders_inherit))
5529 		output = "always [inherit] within_size advise never";
5530 	else if (test_bit(order, &huge_shmem_orders_within_size))
5531 		output = "always inherit [within_size] advise never";
5532 	else if (test_bit(order, &huge_shmem_orders_madvise))
5533 		output = "always inherit within_size [advise] never";
5534 	else
5535 		output = "always inherit within_size advise [never]";
5536 
5537 	return sysfs_emit(buf, "%s\n", output);
5538 }
5539 
thpsize_shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5540 static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
5541 					   struct kobj_attribute *attr,
5542 					   const char *buf, size_t count)
5543 {
5544 	int order = to_thpsize(kobj)->order;
5545 	ssize_t ret = count;
5546 
5547 	if (sysfs_streq(buf, "always")) {
5548 		spin_lock(&huge_shmem_orders_lock);
5549 		clear_bit(order, &huge_shmem_orders_inherit);
5550 		clear_bit(order, &huge_shmem_orders_madvise);
5551 		clear_bit(order, &huge_shmem_orders_within_size);
5552 		set_bit(order, &huge_shmem_orders_always);
5553 		spin_unlock(&huge_shmem_orders_lock);
5554 	} else if (sysfs_streq(buf, "inherit")) {
5555 		/* Do not override huge allocation policy with non-PMD sized mTHP */
5556 		if (shmem_huge == SHMEM_HUGE_FORCE &&
5557 		    order != HPAGE_PMD_ORDER)
5558 			return -EINVAL;
5559 
5560 		spin_lock(&huge_shmem_orders_lock);
5561 		clear_bit(order, &huge_shmem_orders_always);
5562 		clear_bit(order, &huge_shmem_orders_madvise);
5563 		clear_bit(order, &huge_shmem_orders_within_size);
5564 		set_bit(order, &huge_shmem_orders_inherit);
5565 		spin_unlock(&huge_shmem_orders_lock);
5566 	} else if (sysfs_streq(buf, "within_size")) {
5567 		spin_lock(&huge_shmem_orders_lock);
5568 		clear_bit(order, &huge_shmem_orders_always);
5569 		clear_bit(order, &huge_shmem_orders_inherit);
5570 		clear_bit(order, &huge_shmem_orders_madvise);
5571 		set_bit(order, &huge_shmem_orders_within_size);
5572 		spin_unlock(&huge_shmem_orders_lock);
5573 	} else if (sysfs_streq(buf, "advise")) {
5574 		spin_lock(&huge_shmem_orders_lock);
5575 		clear_bit(order, &huge_shmem_orders_always);
5576 		clear_bit(order, &huge_shmem_orders_inherit);
5577 		clear_bit(order, &huge_shmem_orders_within_size);
5578 		set_bit(order, &huge_shmem_orders_madvise);
5579 		spin_unlock(&huge_shmem_orders_lock);
5580 	} else if (sysfs_streq(buf, "never")) {
5581 		spin_lock(&huge_shmem_orders_lock);
5582 		clear_bit(order, &huge_shmem_orders_always);
5583 		clear_bit(order, &huge_shmem_orders_inherit);
5584 		clear_bit(order, &huge_shmem_orders_within_size);
5585 		clear_bit(order, &huge_shmem_orders_madvise);
5586 		spin_unlock(&huge_shmem_orders_lock);
5587 	} else {
5588 		ret = -EINVAL;
5589 	}
5590 
5591 	if (ret > 0) {
5592 		int err = start_stop_khugepaged();
5593 
5594 		if (err)
5595 			ret = err;
5596 	}
5597 	return ret;
5598 }
5599 
5600 struct kobj_attribute thpsize_shmem_enabled_attr =
5601 	__ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store);
5602 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
5603 
5604 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
5605 
setup_transparent_hugepage_shmem(char * str)5606 static int __init setup_transparent_hugepage_shmem(char *str)
5607 {
5608 	int huge;
5609 
5610 	huge = shmem_parse_huge(str);
5611 	if (huge == -EINVAL) {
5612 		pr_warn("transparent_hugepage_shmem= cannot parse, ignored\n");
5613 		return huge;
5614 	}
5615 
5616 	shmem_huge = huge;
5617 	return 1;
5618 }
5619 __setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem);
5620 
setup_transparent_hugepage_tmpfs(char * str)5621 static int __init setup_transparent_hugepage_tmpfs(char *str)
5622 {
5623 	int huge;
5624 
5625 	huge = shmem_parse_huge(str);
5626 	if (huge < 0) {
5627 		pr_warn("transparent_hugepage_tmpfs= cannot parse, ignored\n");
5628 		return huge;
5629 	}
5630 
5631 	tmpfs_huge = huge;
5632 	return 1;
5633 }
5634 __setup("transparent_hugepage_tmpfs=", setup_transparent_hugepage_tmpfs);
5635 
5636 static char str_dup[PAGE_SIZE] __initdata;
setup_thp_shmem(char * str)5637 static int __init setup_thp_shmem(char *str)
5638 {
5639 	char *token, *range, *policy, *subtoken;
5640 	unsigned long always, inherit, madvise, within_size;
5641 	char *start_size, *end_size;
5642 	int start, end, nr;
5643 	char *p;
5644 
5645 	if (!str || strlen(str) + 1 > PAGE_SIZE)
5646 		goto err;
5647 	strscpy(str_dup, str);
5648 
5649 	always = huge_shmem_orders_always;
5650 	inherit = huge_shmem_orders_inherit;
5651 	madvise = huge_shmem_orders_madvise;
5652 	within_size = huge_shmem_orders_within_size;
5653 	p = str_dup;
5654 	while ((token = strsep(&p, ";")) != NULL) {
5655 		range = strsep(&token, ":");
5656 		policy = token;
5657 
5658 		if (!policy)
5659 			goto err;
5660 
5661 		while ((subtoken = strsep(&range, ",")) != NULL) {
5662 			if (strchr(subtoken, '-')) {
5663 				start_size = strsep(&subtoken, "-");
5664 				end_size = subtoken;
5665 
5666 				start = get_order_from_str(start_size,
5667 							   THP_ORDERS_ALL_FILE_DEFAULT);
5668 				end = get_order_from_str(end_size,
5669 							 THP_ORDERS_ALL_FILE_DEFAULT);
5670 			} else {
5671 				start_size = end_size = subtoken;
5672 				start = end = get_order_from_str(subtoken,
5673 								 THP_ORDERS_ALL_FILE_DEFAULT);
5674 			}
5675 
5676 			if (start == -EINVAL) {
5677 				pr_err("invalid size %s in thp_shmem boot parameter\n",
5678 				       start_size);
5679 				goto err;
5680 			}
5681 
5682 			if (end == -EINVAL) {
5683 				pr_err("invalid size %s in thp_shmem boot parameter\n",
5684 				       end_size);
5685 				goto err;
5686 			}
5687 
5688 			if (start < 0 || end < 0 || start > end)
5689 				goto err;
5690 
5691 			nr = end - start + 1;
5692 			if (!strcmp(policy, "always")) {
5693 				bitmap_set(&always, start, nr);
5694 				bitmap_clear(&inherit, start, nr);
5695 				bitmap_clear(&madvise, start, nr);
5696 				bitmap_clear(&within_size, start, nr);
5697 			} else if (!strcmp(policy, "advise")) {
5698 				bitmap_set(&madvise, start, nr);
5699 				bitmap_clear(&inherit, start, nr);
5700 				bitmap_clear(&always, start, nr);
5701 				bitmap_clear(&within_size, start, nr);
5702 			} else if (!strcmp(policy, "inherit")) {
5703 				bitmap_set(&inherit, start, nr);
5704 				bitmap_clear(&madvise, start, nr);
5705 				bitmap_clear(&always, start, nr);
5706 				bitmap_clear(&within_size, start, nr);
5707 			} else if (!strcmp(policy, "within_size")) {
5708 				bitmap_set(&within_size, start, nr);
5709 				bitmap_clear(&inherit, start, nr);
5710 				bitmap_clear(&madvise, start, nr);
5711 				bitmap_clear(&always, start, nr);
5712 			} else if (!strcmp(policy, "never")) {
5713 				bitmap_clear(&inherit, start, nr);
5714 				bitmap_clear(&madvise, start, nr);
5715 				bitmap_clear(&always, start, nr);
5716 				bitmap_clear(&within_size, start, nr);
5717 			} else {
5718 				pr_err("invalid policy %s in thp_shmem boot parameter\n", policy);
5719 				goto err;
5720 			}
5721 		}
5722 	}
5723 
5724 	huge_shmem_orders_always = always;
5725 	huge_shmem_orders_madvise = madvise;
5726 	huge_shmem_orders_inherit = inherit;
5727 	huge_shmem_orders_within_size = within_size;
5728 	shmem_orders_configured = true;
5729 	return 1;
5730 
5731 err:
5732 	pr_warn("thp_shmem=%s: error parsing string, ignoring setting\n", str);
5733 	return 0;
5734 }
5735 __setup("thp_shmem=", setup_thp_shmem);
5736 
5737 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5738 
5739 #else /* !CONFIG_SHMEM */
5740 
5741 /*
5742  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
5743  *
5744  * This is intended for small system where the benefits of the full
5745  * shmem code (swap-backed and resource-limited) are outweighed by
5746  * their complexity. On systems without swap this code should be
5747  * effectively equivalent, but much lighter weight.
5748  */
5749 
5750 static struct file_system_type shmem_fs_type = {
5751 	.name		= "tmpfs",
5752 	.init_fs_context = ramfs_init_fs_context,
5753 	.parameters	= ramfs_fs_parameters,
5754 	.kill_sb	= ramfs_kill_sb,
5755 	.fs_flags	= FS_USERNS_MOUNT,
5756 };
5757 
shmem_init(void)5758 void __init shmem_init(void)
5759 {
5760 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
5761 
5762 	shm_mnt = kern_mount(&shmem_fs_type);
5763 	BUG_ON(IS_ERR(shm_mnt));
5764 }
5765 
shmem_unuse(unsigned int type)5766 int shmem_unuse(unsigned int type)
5767 {
5768 	return 0;
5769 }
5770 
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)5771 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
5772 {
5773 	return 0;
5774 }
5775 
shmem_unlock_mapping(struct address_space * mapping)5776 void shmem_unlock_mapping(struct address_space *mapping)
5777 {
5778 }
5779 
5780 #ifdef CONFIG_MMU
shmem_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)5781 unsigned long shmem_get_unmapped_area(struct file *file,
5782 				      unsigned long addr, unsigned long len,
5783 				      unsigned long pgoff, unsigned long flags)
5784 {
5785 	return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
5786 }
5787 #endif
5788 
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)5789 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5790 {
5791 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5792 }
5793 EXPORT_SYMBOL_GPL(shmem_truncate_range);
5794 
5795 #define shmem_vm_ops				generic_file_vm_ops
5796 #define shmem_anon_vm_ops			generic_file_vm_ops
5797 #define shmem_file_operations			ramfs_file_operations
5798 #define shmem_acct_size(flags, size)		0
5799 #define shmem_unacct_size(flags, size)		do {} while (0)
5800 
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)5801 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
5802 				struct super_block *sb, struct inode *dir,
5803 				umode_t mode, dev_t dev, unsigned long flags)
5804 {
5805 	struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
5806 	return inode ? inode : ERR_PTR(-ENOSPC);
5807 }
5808 
5809 #endif /* CONFIG_SHMEM */
5810 
5811 /* common code */
5812 
__shmem_file_setup(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags,unsigned int i_flags)5813 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
5814 			loff_t size, unsigned long flags, unsigned int i_flags)
5815 {
5816 	struct inode *inode;
5817 	struct file *res;
5818 
5819 	if (IS_ERR(mnt))
5820 		return ERR_CAST(mnt);
5821 
5822 	if (size < 0 || size > MAX_LFS_FILESIZE)
5823 		return ERR_PTR(-EINVAL);
5824 
5825 	if (shmem_acct_size(flags, size))
5826 		return ERR_PTR(-ENOMEM);
5827 
5828 	if (is_idmapped_mnt(mnt))
5829 		return ERR_PTR(-EINVAL);
5830 
5831 	inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
5832 				S_IFREG | S_IRWXUGO, 0, flags);
5833 	if (IS_ERR(inode)) {
5834 		shmem_unacct_size(flags, size);
5835 		return ERR_CAST(inode);
5836 	}
5837 	inode->i_flags |= i_flags;
5838 	inode->i_size = size;
5839 	clear_nlink(inode);	/* It is unlinked */
5840 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
5841 	if (!IS_ERR(res))
5842 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
5843 				&shmem_file_operations);
5844 	if (IS_ERR(res))
5845 		iput(inode);
5846 	return res;
5847 }
5848 
5849 /**
5850  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
5851  * 	kernel internal.  There will be NO LSM permission checks against the
5852  * 	underlying inode.  So users of this interface must do LSM checks at a
5853  *	higher layer.  The users are the big_key and shm implementations.  LSM
5854  *	checks are provided at the key or shm level rather than the inode.
5855  * @name: name for dentry (to be seen in /proc/<pid>/maps
5856  * @size: size to be set for the file
5857  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5858  */
shmem_kernel_file_setup(const char * name,loff_t size,unsigned long flags)5859 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
5860 {
5861 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
5862 }
5863 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup);
5864 
5865 /**
5866  * shmem_file_setup - get an unlinked file living in tmpfs
5867  * @name: name for dentry (to be seen in /proc/<pid>/maps
5868  * @size: size to be set for the file
5869  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5870  */
shmem_file_setup(const char * name,loff_t size,unsigned long flags)5871 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
5872 {
5873 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
5874 }
5875 EXPORT_SYMBOL_GPL(shmem_file_setup);
5876 
5877 /**
5878  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
5879  * @mnt: the tmpfs mount where the file will be created
5880  * @name: name for dentry (to be seen in /proc/<pid>/maps
5881  * @size: size to be set for the file
5882  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5883  */
shmem_file_setup_with_mnt(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags)5884 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
5885 				       loff_t size, unsigned long flags)
5886 {
5887 	return __shmem_file_setup(mnt, name, size, flags, 0);
5888 }
5889 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
5890 
5891 /**
5892  * shmem_zero_setup - setup a shared anonymous mapping
5893  * @vma: the vma to be mmapped is prepared by do_mmap
5894  */
shmem_zero_setup(struct vm_area_struct * vma)5895 int shmem_zero_setup(struct vm_area_struct *vma)
5896 {
5897 	struct file *file;
5898 	loff_t size = vma->vm_end - vma->vm_start;
5899 
5900 	/*
5901 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
5902 	 * between XFS directory reading and selinux: since this file is only
5903 	 * accessible to the user through its mapping, use S_PRIVATE flag to
5904 	 * bypass file security, in the same way as shmem_kernel_file_setup().
5905 	 */
5906 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
5907 	if (IS_ERR(file))
5908 		return PTR_ERR(file);
5909 
5910 	if (vma->vm_file)
5911 		fput(vma->vm_file);
5912 	vma->vm_file = file;
5913 	vma->vm_ops = &shmem_anon_vm_ops;
5914 
5915 	return 0;
5916 }
5917 
5918 /**
5919  * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
5920  * @mapping:	the folio's address_space
5921  * @index:	the folio index
5922  * @gfp:	the page allocator flags to use if allocating
5923  *
5924  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
5925  * with any new page allocations done using the specified allocation flags.
5926  * But read_cache_page_gfp() uses the ->read_folio() method: which does not
5927  * suit tmpfs, since it may have pages in swapcache, and needs to find those
5928  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
5929  *
5930  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
5931  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
5932  */
shmem_read_folio_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)5933 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
5934 		pgoff_t index, gfp_t gfp)
5935 {
5936 #ifdef CONFIG_SHMEM
5937 	struct inode *inode = mapping->host;
5938 	struct folio *folio;
5939 	int error;
5940 
5941 	error = shmem_get_folio_gfp(inode, index, 0, &folio, SGP_CACHE,
5942 				    gfp, NULL, NULL);
5943 	if (error)
5944 		return ERR_PTR(error);
5945 
5946 	folio_unlock(folio);
5947 	return folio;
5948 #else
5949 	/*
5950 	 * The tiny !SHMEM case uses ramfs without swap
5951 	 */
5952 	return mapping_read_folio_gfp(mapping, index, gfp);
5953 #endif
5954 }
5955 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
5956 
shmem_read_mapping_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)5957 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
5958 					 pgoff_t index, gfp_t gfp)
5959 {
5960 	struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
5961 	struct page *page;
5962 
5963 	if (IS_ERR(folio))
5964 		return &folio->page;
5965 
5966 	page = folio_file_page(folio, index);
5967 	if (PageHWPoison(page)) {
5968 		folio_put(folio);
5969 		return ERR_PTR(-EIO);
5970 	}
5971 
5972 	return page;
5973 }
5974 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
5975