1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8 /*
9 * demand-loading started 01.12.91 - seems it is high on the list of
10 * things wanted, and it should be easy to implement. - Linus
11 */
12
13 /*
14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15 * pages started 02.12.91, seems to work. - Linus.
16 *
17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18 * would have taken more than the 6M I have free, but it worked well as
19 * far as I could see.
20 *
21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22 */
23
24 /*
25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
26 * thought has to go into this. Oh, well..
27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
28 * Found it. Everything seems to work now.
29 * 20.12.91 - Ok, making the swap-device changeable like the root.
30 */
31
32 /*
33 * 05.04.94 - Multi-page memory management added for v1.1.
34 * Idea by Alex Bligh ([email protected])
35 *
36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
37 * ([email protected])
38 *
39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40 */
41
42 #include <linux/kernel_stat.h>
43 #include <linux/mm.h>
44 #include <linux/mm_inline.h>
45 #include <linux/sched/mm.h>
46 #include <linux/sched/numa_balancing.h>
47 #include <linux/sched/task.h>
48 #include <linux/hugetlb.h>
49 #include <linux/mman.h>
50 #include <linux/swap.h>
51 #include <linux/highmem.h>
52 #include <linux/pagemap.h>
53 #include <linux/memremap.h>
54 #include <linux/kmsan.h>
55 #include <linux/ksm.h>
56 #include <linux/rmap.h>
57 #include <linux/export.h>
58 #include <linux/delayacct.h>
59 #include <linux/init.h>
60 #include <linux/pfn_t.h>
61 #include <linux/writeback.h>
62 #include <linux/memcontrol.h>
63 #include <linux/mmu_notifier.h>
64 #include <linux/swapops.h>
65 #include <linux/elf.h>
66 #include <linux/gfp.h>
67 #include <linux/migrate.h>
68 #include <linux/string.h>
69 #include <linux/memory-tiers.h>
70 #include <linux/debugfs.h>
71 #include <linux/userfaultfd_k.h>
72 #include <linux/dax.h>
73 #include <linux/oom.h>
74 #include <linux/numa.h>
75 #include <linux/perf_event.h>
76 #include <linux/ptrace.h>
77 #include <linux/vmalloc.h>
78 #include <linux/sched/sysctl.h>
79
80 #include <trace/events/kmem.h>
81
82 #include <asm/io.h>
83 #include <asm/mmu_context.h>
84 #include <asm/pgalloc.h>
85 #include <linux/uaccess.h>
86 #include <asm/tlb.h>
87 #include <asm/tlbflush.h>
88
89 #include "pgalloc-track.h"
90 #include "internal.h"
91 #include "swap.h"
92
93 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
94 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
95 #endif
96
97 #ifndef CONFIG_NUMA
98 unsigned long max_mapnr;
99 EXPORT_SYMBOL(max_mapnr);
100
101 struct page *mem_map;
102 EXPORT_SYMBOL(mem_map);
103 #endif
104
105 static vm_fault_t do_fault(struct vm_fault *vmf);
106 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
107 static bool vmf_pte_changed(struct vm_fault *vmf);
108
109 /*
110 * Return true if the original pte was a uffd-wp pte marker (so the pte was
111 * wr-protected).
112 */
vmf_orig_pte_uffd_wp(struct vm_fault * vmf)113 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
114 {
115 if (!userfaultfd_wp(vmf->vma))
116 return false;
117 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
118 return false;
119
120 return pte_marker_uffd_wp(vmf->orig_pte);
121 }
122
123 /*
124 * A number of key systems in x86 including ioremap() rely on the assumption
125 * that high_memory defines the upper bound on direct map memory, then end
126 * of ZONE_NORMAL.
127 */
128 void *high_memory;
129 EXPORT_SYMBOL(high_memory);
130
131 /*
132 * Randomize the address space (stacks, mmaps, brk, etc.).
133 *
134 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
135 * as ancient (libc5 based) binaries can segfault. )
136 */
137 int randomize_va_space __read_mostly =
138 #ifdef CONFIG_COMPAT_BRK
139 1;
140 #else
141 2;
142 #endif
143
144 #ifndef arch_wants_old_prefaulted_pte
arch_wants_old_prefaulted_pte(void)145 static inline bool arch_wants_old_prefaulted_pte(void)
146 {
147 /*
148 * Transitioning a PTE from 'old' to 'young' can be expensive on
149 * some architectures, even if it's performed in hardware. By
150 * default, "false" means prefaulted entries will be 'young'.
151 */
152 return false;
153 }
154 #endif
155
disable_randmaps(char * s)156 static int __init disable_randmaps(char *s)
157 {
158 randomize_va_space = 0;
159 return 1;
160 }
161 __setup("norandmaps", disable_randmaps);
162
163 unsigned long zero_pfn __read_mostly;
164 EXPORT_SYMBOL(zero_pfn);
165
166 unsigned long highest_memmap_pfn __read_mostly;
167
168 /*
169 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
170 */
init_zero_pfn(void)171 static int __init init_zero_pfn(void)
172 {
173 zero_pfn = page_to_pfn(ZERO_PAGE(0));
174 return 0;
175 }
176 early_initcall(init_zero_pfn);
177
mm_trace_rss_stat(struct mm_struct * mm,int member)178 void mm_trace_rss_stat(struct mm_struct *mm, int member)
179 {
180 trace_rss_stat(mm, member);
181 }
182
183 /*
184 * Note: this doesn't free the actual pages themselves. That
185 * has been handled earlier when unmapping all the memory regions.
186 */
free_pte_range(struct mmu_gather * tlb,pmd_t * pmd,unsigned long addr)187 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
188 unsigned long addr)
189 {
190 pgtable_t token = pmd_pgtable(*pmd);
191 pmd_clear(pmd);
192 pte_free_tlb(tlb, token, addr);
193 mm_dec_nr_ptes(tlb->mm);
194 }
195
free_pmd_range(struct mmu_gather * tlb,pud_t * pud,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)196 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
197 unsigned long addr, unsigned long end,
198 unsigned long floor, unsigned long ceiling)
199 {
200 pmd_t *pmd;
201 unsigned long next;
202 unsigned long start;
203
204 start = addr;
205 pmd = pmd_offset(pud, addr);
206 do {
207 next = pmd_addr_end(addr, end);
208 if (pmd_none_or_clear_bad(pmd))
209 continue;
210 free_pte_range(tlb, pmd, addr);
211 } while (pmd++, addr = next, addr != end);
212
213 start &= PUD_MASK;
214 if (start < floor)
215 return;
216 if (ceiling) {
217 ceiling &= PUD_MASK;
218 if (!ceiling)
219 return;
220 }
221 if (end - 1 > ceiling - 1)
222 return;
223
224 pmd = pmd_offset(pud, start);
225 pud_clear(pud);
226 pmd_free_tlb(tlb, pmd, start);
227 mm_dec_nr_pmds(tlb->mm);
228 }
229
free_pud_range(struct mmu_gather * tlb,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)230 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
231 unsigned long addr, unsigned long end,
232 unsigned long floor, unsigned long ceiling)
233 {
234 pud_t *pud;
235 unsigned long next;
236 unsigned long start;
237
238 start = addr;
239 pud = pud_offset(p4d, addr);
240 do {
241 next = pud_addr_end(addr, end);
242 if (pud_none_or_clear_bad(pud))
243 continue;
244 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
245 } while (pud++, addr = next, addr != end);
246
247 start &= P4D_MASK;
248 if (start < floor)
249 return;
250 if (ceiling) {
251 ceiling &= P4D_MASK;
252 if (!ceiling)
253 return;
254 }
255 if (end - 1 > ceiling - 1)
256 return;
257
258 pud = pud_offset(p4d, start);
259 p4d_clear(p4d);
260 pud_free_tlb(tlb, pud, start);
261 mm_dec_nr_puds(tlb->mm);
262 }
263
free_p4d_range(struct mmu_gather * tlb,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)264 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
265 unsigned long addr, unsigned long end,
266 unsigned long floor, unsigned long ceiling)
267 {
268 p4d_t *p4d;
269 unsigned long next;
270 unsigned long start;
271
272 start = addr;
273 p4d = p4d_offset(pgd, addr);
274 do {
275 next = p4d_addr_end(addr, end);
276 if (p4d_none_or_clear_bad(p4d))
277 continue;
278 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
279 } while (p4d++, addr = next, addr != end);
280
281 start &= PGDIR_MASK;
282 if (start < floor)
283 return;
284 if (ceiling) {
285 ceiling &= PGDIR_MASK;
286 if (!ceiling)
287 return;
288 }
289 if (end - 1 > ceiling - 1)
290 return;
291
292 p4d = p4d_offset(pgd, start);
293 pgd_clear(pgd);
294 p4d_free_tlb(tlb, p4d, start);
295 }
296
297 /*
298 * This function frees user-level page tables of a process.
299 */
free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)300 void free_pgd_range(struct mmu_gather *tlb,
301 unsigned long addr, unsigned long end,
302 unsigned long floor, unsigned long ceiling)
303 {
304 pgd_t *pgd;
305 unsigned long next;
306
307 /*
308 * The next few lines have given us lots of grief...
309 *
310 * Why are we testing PMD* at this top level? Because often
311 * there will be no work to do at all, and we'd prefer not to
312 * go all the way down to the bottom just to discover that.
313 *
314 * Why all these "- 1"s? Because 0 represents both the bottom
315 * of the address space and the top of it (using -1 for the
316 * top wouldn't help much: the masks would do the wrong thing).
317 * The rule is that addr 0 and floor 0 refer to the bottom of
318 * the address space, but end 0 and ceiling 0 refer to the top
319 * Comparisons need to use "end - 1" and "ceiling - 1" (though
320 * that end 0 case should be mythical).
321 *
322 * Wherever addr is brought up or ceiling brought down, we must
323 * be careful to reject "the opposite 0" before it confuses the
324 * subsequent tests. But what about where end is brought down
325 * by PMD_SIZE below? no, end can't go down to 0 there.
326 *
327 * Whereas we round start (addr) and ceiling down, by different
328 * masks at different levels, in order to test whether a table
329 * now has no other vmas using it, so can be freed, we don't
330 * bother to round floor or end up - the tests don't need that.
331 */
332
333 addr &= PMD_MASK;
334 if (addr < floor) {
335 addr += PMD_SIZE;
336 if (!addr)
337 return;
338 }
339 if (ceiling) {
340 ceiling &= PMD_MASK;
341 if (!ceiling)
342 return;
343 }
344 if (end - 1 > ceiling - 1)
345 end -= PMD_SIZE;
346 if (addr > end - 1)
347 return;
348 /*
349 * We add page table cache pages with PAGE_SIZE,
350 * (see pte_free_tlb()), flush the tlb if we need
351 */
352 tlb_change_page_size(tlb, PAGE_SIZE);
353 pgd = pgd_offset(tlb->mm, addr);
354 do {
355 next = pgd_addr_end(addr, end);
356 if (pgd_none_or_clear_bad(pgd))
357 continue;
358 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
359 } while (pgd++, addr = next, addr != end);
360 }
361
free_pgtables(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long floor,unsigned long ceiling,bool mm_wr_locked)362 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
363 struct vm_area_struct *vma, unsigned long floor,
364 unsigned long ceiling, bool mm_wr_locked)
365 {
366 struct unlink_vma_file_batch vb;
367
368 do {
369 unsigned long addr = vma->vm_start;
370 struct vm_area_struct *next;
371
372 /*
373 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
374 * be 0. This will underflow and is okay.
375 */
376 next = mas_find(mas, ceiling - 1);
377 if (unlikely(xa_is_zero(next)))
378 next = NULL;
379
380 /*
381 * Hide vma from rmap and truncate_pagecache before freeing
382 * pgtables
383 */
384 if (mm_wr_locked)
385 vma_start_write(vma);
386 unlink_anon_vmas(vma);
387
388 if (is_vm_hugetlb_page(vma)) {
389 unlink_file_vma(vma);
390 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
391 floor, next ? next->vm_start : ceiling);
392 } else {
393 unlink_file_vma_batch_init(&vb);
394 unlink_file_vma_batch_add(&vb, vma);
395
396 /*
397 * Optimization: gather nearby vmas into one call down
398 */
399 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
400 && !is_vm_hugetlb_page(next)) {
401 vma = next;
402 next = mas_find(mas, ceiling - 1);
403 if (unlikely(xa_is_zero(next)))
404 next = NULL;
405 if (mm_wr_locked)
406 vma_start_write(vma);
407 unlink_anon_vmas(vma);
408 unlink_file_vma_batch_add(&vb, vma);
409 }
410 unlink_file_vma_batch_final(&vb);
411 free_pgd_range(tlb, addr, vma->vm_end,
412 floor, next ? next->vm_start : ceiling);
413 }
414 vma = next;
415 } while (vma);
416 }
417
pmd_install(struct mm_struct * mm,pmd_t * pmd,pgtable_t * pte)418 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
419 {
420 spinlock_t *ptl = pmd_lock(mm, pmd);
421
422 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
423 mm_inc_nr_ptes(mm);
424 /*
425 * Ensure all pte setup (eg. pte page lock and page clearing) are
426 * visible before the pte is made visible to other CPUs by being
427 * put into page tables.
428 *
429 * The other side of the story is the pointer chasing in the page
430 * table walking code (when walking the page table without locking;
431 * ie. most of the time). Fortunately, these data accesses consist
432 * of a chain of data-dependent loads, meaning most CPUs (alpha
433 * being the notable exception) will already guarantee loads are
434 * seen in-order. See the alpha page table accessors for the
435 * smp_rmb() barriers in page table walking code.
436 */
437 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
438 pmd_populate(mm, pmd, *pte);
439 *pte = NULL;
440 }
441 spin_unlock(ptl);
442 }
443
__pte_alloc(struct mm_struct * mm,pmd_t * pmd)444 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
445 {
446 pgtable_t new = pte_alloc_one(mm);
447 if (!new)
448 return -ENOMEM;
449
450 pmd_install(mm, pmd, &new);
451 if (new)
452 pte_free(mm, new);
453 return 0;
454 }
455
__pte_alloc_kernel(pmd_t * pmd)456 int __pte_alloc_kernel(pmd_t *pmd)
457 {
458 pte_t *new = pte_alloc_one_kernel(&init_mm);
459 if (!new)
460 return -ENOMEM;
461
462 spin_lock(&init_mm.page_table_lock);
463 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
464 smp_wmb(); /* See comment in pmd_install() */
465 pmd_populate_kernel(&init_mm, pmd, new);
466 new = NULL;
467 }
468 spin_unlock(&init_mm.page_table_lock);
469 if (new)
470 pte_free_kernel(&init_mm, new);
471 return 0;
472 }
473
init_rss_vec(int * rss)474 static inline void init_rss_vec(int *rss)
475 {
476 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
477 }
478
add_mm_rss_vec(struct mm_struct * mm,int * rss)479 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
480 {
481 int i;
482
483 for (i = 0; i < NR_MM_COUNTERS; i++)
484 if (rss[i])
485 add_mm_counter(mm, i, rss[i]);
486 }
487
488 /*
489 * This function is called to print an error when a bad pte
490 * is found. For example, we might have a PFN-mapped pte in
491 * a region that doesn't allow it.
492 *
493 * The calling function must still handle the error.
494 */
print_bad_pte(struct vm_area_struct * vma,unsigned long addr,pte_t pte,struct page * page)495 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
496 pte_t pte, struct page *page)
497 {
498 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
499 p4d_t *p4d = p4d_offset(pgd, addr);
500 pud_t *pud = pud_offset(p4d, addr);
501 pmd_t *pmd = pmd_offset(pud, addr);
502 struct address_space *mapping;
503 pgoff_t index;
504 static unsigned long resume;
505 static unsigned long nr_shown;
506 static unsigned long nr_unshown;
507
508 /*
509 * Allow a burst of 60 reports, then keep quiet for that minute;
510 * or allow a steady drip of one report per second.
511 */
512 if (nr_shown == 60) {
513 if (time_before(jiffies, resume)) {
514 nr_unshown++;
515 return;
516 }
517 if (nr_unshown) {
518 pr_alert("BUG: Bad page map: %lu messages suppressed\n",
519 nr_unshown);
520 nr_unshown = 0;
521 }
522 nr_shown = 0;
523 }
524 if (nr_shown++ == 0)
525 resume = jiffies + 60 * HZ;
526
527 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
528 index = linear_page_index(vma, addr);
529
530 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
531 current->comm,
532 (long long)pte_val(pte), (long long)pmd_val(*pmd));
533 if (page)
534 dump_page(page, "bad pte");
535 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
536 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
537 pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
538 vma->vm_file,
539 vma->vm_ops ? vma->vm_ops->fault : NULL,
540 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
541 mapping ? mapping->a_ops->read_folio : NULL);
542 dump_stack();
543 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
544 }
545
546 /*
547 * vm_normal_page -- This function gets the "struct page" associated with a pte.
548 *
549 * "Special" mappings do not wish to be associated with a "struct page" (either
550 * it doesn't exist, or it exists but they don't want to touch it). In this
551 * case, NULL is returned here. "Normal" mappings do have a struct page.
552 *
553 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
554 * pte bit, in which case this function is trivial. Secondly, an architecture
555 * may not have a spare pte bit, which requires a more complicated scheme,
556 * described below.
557 *
558 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
559 * special mapping (even if there are underlying and valid "struct pages").
560 * COWed pages of a VM_PFNMAP are always normal.
561 *
562 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
563 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
564 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
565 * mapping will always honor the rule
566 *
567 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
568 *
569 * And for normal mappings this is false.
570 *
571 * This restricts such mappings to be a linear translation from virtual address
572 * to pfn. To get around this restriction, we allow arbitrary mappings so long
573 * as the vma is not a COW mapping; in that case, we know that all ptes are
574 * special (because none can have been COWed).
575 *
576 *
577 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
578 *
579 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
580 * page" backing, however the difference is that _all_ pages with a struct
581 * page (that is, those where pfn_valid is true) are refcounted and considered
582 * normal pages by the VM. The only exception are zeropages, which are
583 * *never* refcounted.
584 *
585 * The disadvantage is that pages are refcounted (which can be slower and
586 * simply not an option for some PFNMAP users). The advantage is that we
587 * don't have to follow the strict linearity rule of PFNMAP mappings in
588 * order to support COWable mappings.
589 *
590 */
vm_normal_page(struct vm_area_struct * vma,unsigned long addr,pte_t pte)591 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
592 pte_t pte)
593 {
594 unsigned long pfn = pte_pfn(pte);
595
596 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
597 if (likely(!pte_special(pte)))
598 goto check_pfn;
599 if (vma->vm_ops && vma->vm_ops->find_special_page)
600 return vma->vm_ops->find_special_page(vma, addr);
601 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
602 return NULL;
603 if (is_zero_pfn(pfn))
604 return NULL;
605 if (pte_devmap(pte))
606 /*
607 * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
608 * and will have refcounts incremented on their struct pages
609 * when they are inserted into PTEs, thus they are safe to
610 * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
611 * do not have refcounts. Example of legacy ZONE_DEVICE is
612 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
613 */
614 return NULL;
615
616 print_bad_pte(vma, addr, pte, NULL);
617 return NULL;
618 }
619
620 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
621
622 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
623 if (vma->vm_flags & VM_MIXEDMAP) {
624 if (!pfn_valid(pfn))
625 return NULL;
626 if (is_zero_pfn(pfn))
627 return NULL;
628 goto out;
629 } else {
630 unsigned long off;
631 off = (addr - vma->vm_start) >> PAGE_SHIFT;
632 if (pfn == vma->vm_pgoff + off)
633 return NULL;
634 if (!is_cow_mapping(vma->vm_flags))
635 return NULL;
636 }
637 }
638
639 if (is_zero_pfn(pfn))
640 return NULL;
641
642 check_pfn:
643 if (unlikely(pfn > highest_memmap_pfn)) {
644 print_bad_pte(vma, addr, pte, NULL);
645 return NULL;
646 }
647
648 /*
649 * NOTE! We still have PageReserved() pages in the page tables.
650 * eg. VDSO mappings can cause them to exist.
651 */
652 out:
653 VM_WARN_ON_ONCE(is_zero_pfn(pfn));
654 return pfn_to_page(pfn);
655 }
656
vm_normal_folio(struct vm_area_struct * vma,unsigned long addr,pte_t pte)657 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
658 pte_t pte)
659 {
660 struct page *page = vm_normal_page(vma, addr, pte);
661
662 if (page)
663 return page_folio(page);
664 return NULL;
665 }
666
667 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
vm_normal_page_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)668 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
669 pmd_t pmd)
670 {
671 unsigned long pfn = pmd_pfn(pmd);
672
673 /* Currently it's only used for huge pfnmaps */
674 if (unlikely(pmd_special(pmd)))
675 return NULL;
676
677 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
678 if (vma->vm_flags & VM_MIXEDMAP) {
679 if (!pfn_valid(pfn))
680 return NULL;
681 goto out;
682 } else {
683 unsigned long off;
684 off = (addr - vma->vm_start) >> PAGE_SHIFT;
685 if (pfn == vma->vm_pgoff + off)
686 return NULL;
687 if (!is_cow_mapping(vma->vm_flags))
688 return NULL;
689 }
690 }
691
692 if (pmd_devmap(pmd))
693 return NULL;
694 if (is_huge_zero_pmd(pmd))
695 return NULL;
696 if (unlikely(pfn > highest_memmap_pfn))
697 return NULL;
698
699 /*
700 * NOTE! We still have PageReserved() pages in the page tables.
701 * eg. VDSO mappings can cause them to exist.
702 */
703 out:
704 return pfn_to_page(pfn);
705 }
706
vm_normal_folio_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)707 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
708 unsigned long addr, pmd_t pmd)
709 {
710 struct page *page = vm_normal_page_pmd(vma, addr, pmd);
711
712 if (page)
713 return page_folio(page);
714 return NULL;
715 }
716 #endif
717
restore_exclusive_pte(struct vm_area_struct * vma,struct page * page,unsigned long address,pte_t * ptep)718 static void restore_exclusive_pte(struct vm_area_struct *vma,
719 struct page *page, unsigned long address,
720 pte_t *ptep)
721 {
722 struct folio *folio = page_folio(page);
723 pte_t orig_pte;
724 pte_t pte;
725 swp_entry_t entry;
726
727 orig_pte = ptep_get(ptep);
728 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
729 if (pte_swp_soft_dirty(orig_pte))
730 pte = pte_mksoft_dirty(pte);
731
732 entry = pte_to_swp_entry(orig_pte);
733 if (pte_swp_uffd_wp(orig_pte))
734 pte = pte_mkuffd_wp(pte);
735 else if (is_writable_device_exclusive_entry(entry))
736 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
737
738 VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
739 PageAnonExclusive(page)), folio);
740
741 /*
742 * No need to take a page reference as one was already
743 * created when the swap entry was made.
744 */
745 if (folio_test_anon(folio))
746 folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
747 else
748 /*
749 * Currently device exclusive access only supports anonymous
750 * memory so the entry shouldn't point to a filebacked page.
751 */
752 WARN_ON_ONCE(1);
753
754 set_pte_at(vma->vm_mm, address, ptep, pte);
755
756 /*
757 * No need to invalidate - it was non-present before. However
758 * secondary CPUs may have mappings that need invalidating.
759 */
760 update_mmu_cache(vma, address, ptep);
761 }
762
763 /*
764 * Tries to restore an exclusive pte if the page lock can be acquired without
765 * sleeping.
766 */
767 static int
try_restore_exclusive_pte(pte_t * src_pte,struct vm_area_struct * vma,unsigned long addr)768 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
769 unsigned long addr)
770 {
771 swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
772 struct page *page = pfn_swap_entry_to_page(entry);
773
774 if (trylock_page(page)) {
775 restore_exclusive_pte(vma, page, addr, src_pte);
776 unlock_page(page);
777 return 0;
778 }
779
780 return -EBUSY;
781 }
782
783 /*
784 * copy one vm_area from one task to the other. Assumes the page tables
785 * already present in the new task to be cleared in the whole range
786 * covered by this vma.
787 */
788
789 static unsigned long
copy_nonpresent_pte(struct mm_struct * dst_mm,struct mm_struct * src_mm,pte_t * dst_pte,pte_t * src_pte,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long addr,int * rss)790 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
791 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
792 struct vm_area_struct *src_vma, unsigned long addr, int *rss)
793 {
794 unsigned long vm_flags = dst_vma->vm_flags;
795 pte_t orig_pte = ptep_get(src_pte);
796 pte_t pte = orig_pte;
797 struct folio *folio;
798 struct page *page;
799 swp_entry_t entry = pte_to_swp_entry(orig_pte);
800
801 if (likely(!non_swap_entry(entry))) {
802 if (swap_duplicate(entry) < 0)
803 return -EIO;
804
805 /* make sure dst_mm is on swapoff's mmlist. */
806 if (unlikely(list_empty(&dst_mm->mmlist))) {
807 spin_lock(&mmlist_lock);
808 if (list_empty(&dst_mm->mmlist))
809 list_add(&dst_mm->mmlist,
810 &src_mm->mmlist);
811 spin_unlock(&mmlist_lock);
812 }
813 /* Mark the swap entry as shared. */
814 if (pte_swp_exclusive(orig_pte)) {
815 pte = pte_swp_clear_exclusive(orig_pte);
816 set_pte_at(src_mm, addr, src_pte, pte);
817 }
818 rss[MM_SWAPENTS]++;
819 } else if (is_migration_entry(entry)) {
820 folio = pfn_swap_entry_folio(entry);
821
822 rss[mm_counter(folio)]++;
823
824 if (!is_readable_migration_entry(entry) &&
825 is_cow_mapping(vm_flags)) {
826 /*
827 * COW mappings require pages in both parent and child
828 * to be set to read. A previously exclusive entry is
829 * now shared.
830 */
831 entry = make_readable_migration_entry(
832 swp_offset(entry));
833 pte = swp_entry_to_pte(entry);
834 if (pte_swp_soft_dirty(orig_pte))
835 pte = pte_swp_mksoft_dirty(pte);
836 if (pte_swp_uffd_wp(orig_pte))
837 pte = pte_swp_mkuffd_wp(pte);
838 set_pte_at(src_mm, addr, src_pte, pte);
839 }
840 } else if (is_device_private_entry(entry)) {
841 page = pfn_swap_entry_to_page(entry);
842 folio = page_folio(page);
843
844 /*
845 * Update rss count even for unaddressable pages, as
846 * they should treated just like normal pages in this
847 * respect.
848 *
849 * We will likely want to have some new rss counters
850 * for unaddressable pages, at some point. But for now
851 * keep things as they are.
852 */
853 folio_get(folio);
854 rss[mm_counter(folio)]++;
855 /* Cannot fail as these pages cannot get pinned. */
856 folio_try_dup_anon_rmap_pte(folio, page, src_vma);
857
858 /*
859 * We do not preserve soft-dirty information, because so
860 * far, checkpoint/restore is the only feature that
861 * requires that. And checkpoint/restore does not work
862 * when a device driver is involved (you cannot easily
863 * save and restore device driver state).
864 */
865 if (is_writable_device_private_entry(entry) &&
866 is_cow_mapping(vm_flags)) {
867 entry = make_readable_device_private_entry(
868 swp_offset(entry));
869 pte = swp_entry_to_pte(entry);
870 if (pte_swp_uffd_wp(orig_pte))
871 pte = pte_swp_mkuffd_wp(pte);
872 set_pte_at(src_mm, addr, src_pte, pte);
873 }
874 } else if (is_device_exclusive_entry(entry)) {
875 /*
876 * Make device exclusive entries present by restoring the
877 * original entry then copying as for a present pte. Device
878 * exclusive entries currently only support private writable
879 * (ie. COW) mappings.
880 */
881 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
882 if (try_restore_exclusive_pte(src_pte, src_vma, addr))
883 return -EBUSY;
884 return -ENOENT;
885 } else if (is_pte_marker_entry(entry)) {
886 pte_marker marker = copy_pte_marker(entry, dst_vma);
887
888 if (marker)
889 set_pte_at(dst_mm, addr, dst_pte,
890 make_pte_marker(marker));
891 return 0;
892 }
893 if (!userfaultfd_wp(dst_vma))
894 pte = pte_swp_clear_uffd_wp(pte);
895 set_pte_at(dst_mm, addr, dst_pte, pte);
896 return 0;
897 }
898
899 /*
900 * Copy a present and normal page.
901 *
902 * NOTE! The usual case is that this isn't required;
903 * instead, the caller can just increase the page refcount
904 * and re-use the pte the traditional way.
905 *
906 * And if we need a pre-allocated page but don't yet have
907 * one, return a negative error to let the preallocation
908 * code know so that it can do so outside the page table
909 * lock.
910 */
911 static inline int
copy_present_page(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,unsigned long addr,int * rss,struct folio ** prealloc,struct page * page)912 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
913 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
914 struct folio **prealloc, struct page *page)
915 {
916 struct folio *new_folio;
917 pte_t pte;
918
919 new_folio = *prealloc;
920 if (!new_folio)
921 return -EAGAIN;
922
923 /*
924 * We have a prealloc page, all good! Take it
925 * over and copy the page & arm it.
926 */
927
928 if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma))
929 return -EHWPOISON;
930
931 *prealloc = NULL;
932 __folio_mark_uptodate(new_folio);
933 folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
934 folio_add_lru_vma(new_folio, dst_vma);
935 rss[MM_ANONPAGES]++;
936
937 /* All done, just insert the new page copy in the child */
938 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
939 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
940 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
941 /* Uffd-wp needs to be delivered to dest pte as well */
942 pte = pte_mkuffd_wp(pte);
943 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
944 return 0;
945 }
946
__copy_present_ptes(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,pte_t pte,unsigned long addr,int nr)947 static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
948 struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
949 pte_t pte, unsigned long addr, int nr)
950 {
951 struct mm_struct *src_mm = src_vma->vm_mm;
952
953 /* If it's a COW mapping, write protect it both processes. */
954 if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
955 wrprotect_ptes(src_mm, addr, src_pte, nr);
956 pte = pte_wrprotect(pte);
957 }
958
959 /* If it's a shared mapping, mark it clean in the child. */
960 if (src_vma->vm_flags & VM_SHARED)
961 pte = pte_mkclean(pte);
962 pte = pte_mkold(pte);
963
964 if (!userfaultfd_wp(dst_vma))
965 pte = pte_clear_uffd_wp(pte);
966
967 set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
968 }
969
970 /*
971 * Copy one present PTE, trying to batch-process subsequent PTEs that map
972 * consecutive pages of the same folio by copying them as well.
973 *
974 * Returns -EAGAIN if one preallocated page is required to copy the next PTE.
975 * Otherwise, returns the number of copied PTEs (at least 1).
976 */
977 static inline int
copy_present_ptes(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,pte_t pte,unsigned long addr,int max_nr,int * rss,struct folio ** prealloc)978 copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
979 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
980 int max_nr, int *rss, struct folio **prealloc)
981 {
982 struct page *page;
983 struct folio *folio;
984 bool any_writable;
985 fpb_t flags = 0;
986 int err, nr;
987
988 page = vm_normal_page(src_vma, addr, pte);
989 if (unlikely(!page))
990 goto copy_pte;
991
992 folio = page_folio(page);
993
994 /*
995 * If we likely have to copy, just don't bother with batching. Make
996 * sure that the common "small folio" case is as fast as possible
997 * by keeping the batching logic separate.
998 */
999 if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
1000 if (src_vma->vm_flags & VM_SHARED)
1001 flags |= FPB_IGNORE_DIRTY;
1002 if (!vma_soft_dirty_enabled(src_vma))
1003 flags |= FPB_IGNORE_SOFT_DIRTY;
1004
1005 nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
1006 &any_writable, NULL, NULL);
1007 folio_ref_add(folio, nr);
1008 if (folio_test_anon(folio)) {
1009 if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
1010 nr, src_vma))) {
1011 folio_ref_sub(folio, nr);
1012 return -EAGAIN;
1013 }
1014 rss[MM_ANONPAGES] += nr;
1015 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1016 } else {
1017 folio_dup_file_rmap_ptes(folio, page, nr);
1018 rss[mm_counter_file(folio)] += nr;
1019 }
1020 if (any_writable)
1021 pte = pte_mkwrite(pte, src_vma);
1022 __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
1023 addr, nr);
1024 return nr;
1025 }
1026
1027 folio_get(folio);
1028 if (folio_test_anon(folio)) {
1029 /*
1030 * If this page may have been pinned by the parent process,
1031 * copy the page immediately for the child so that we'll always
1032 * guarantee the pinned page won't be randomly replaced in the
1033 * future.
1034 */
1035 if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
1036 /* Page may be pinned, we have to copy. */
1037 folio_put(folio);
1038 err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1039 addr, rss, prealloc, page);
1040 return err ? err : 1;
1041 }
1042 rss[MM_ANONPAGES]++;
1043 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
1044 } else {
1045 folio_dup_file_rmap_pte(folio, page);
1046 rss[mm_counter_file(folio)]++;
1047 }
1048
1049 copy_pte:
1050 __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1);
1051 return 1;
1052 }
1053
folio_prealloc(struct mm_struct * src_mm,struct vm_area_struct * vma,unsigned long addr,bool need_zero)1054 static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
1055 struct vm_area_struct *vma, unsigned long addr, bool need_zero)
1056 {
1057 struct folio *new_folio;
1058
1059 if (need_zero)
1060 new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
1061 else
1062 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
1063
1064 if (!new_folio)
1065 return NULL;
1066
1067 if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
1068 folio_put(new_folio);
1069 return NULL;
1070 }
1071 folio_throttle_swaprate(new_folio, GFP_KERNEL);
1072
1073 return new_folio;
1074 }
1075
1076 static int
copy_pte_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,unsigned long end)1077 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1078 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1079 unsigned long end)
1080 {
1081 struct mm_struct *dst_mm = dst_vma->vm_mm;
1082 struct mm_struct *src_mm = src_vma->vm_mm;
1083 pte_t *orig_src_pte, *orig_dst_pte;
1084 pte_t *src_pte, *dst_pte;
1085 pmd_t dummy_pmdval;
1086 pte_t ptent;
1087 spinlock_t *src_ptl, *dst_ptl;
1088 int progress, max_nr, ret = 0;
1089 int rss[NR_MM_COUNTERS];
1090 swp_entry_t entry = (swp_entry_t){0};
1091 struct folio *prealloc = NULL;
1092 int nr;
1093
1094 again:
1095 progress = 0;
1096 init_rss_vec(rss);
1097
1098 /*
1099 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1100 * error handling here, assume that exclusive mmap_lock on dst and src
1101 * protects anon from unexpected THP transitions; with shmem and file
1102 * protected by mmap_lock-less collapse skipping areas with anon_vma
1103 * (whereas vma_needs_copy() skips areas without anon_vma). A rework
1104 * can remove such assumptions later, but this is good enough for now.
1105 */
1106 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1107 if (!dst_pte) {
1108 ret = -ENOMEM;
1109 goto out;
1110 }
1111
1112 /*
1113 * We already hold the exclusive mmap_lock, the copy_pte_range() and
1114 * retract_page_tables() are using vma->anon_vma to be exclusive, so
1115 * the PTE page is stable, and there is no need to get pmdval and do
1116 * pmd_same() check.
1117 */
1118 src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval,
1119 &src_ptl);
1120 if (!src_pte) {
1121 pte_unmap_unlock(dst_pte, dst_ptl);
1122 /* ret == 0 */
1123 goto out;
1124 }
1125 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1126 orig_src_pte = src_pte;
1127 orig_dst_pte = dst_pte;
1128 arch_enter_lazy_mmu_mode();
1129
1130 do {
1131 nr = 1;
1132
1133 /*
1134 * We are holding two locks at this point - either of them
1135 * could generate latencies in another task on another CPU.
1136 */
1137 if (progress >= 32) {
1138 progress = 0;
1139 if (need_resched() ||
1140 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1141 break;
1142 }
1143 ptent = ptep_get(src_pte);
1144 if (pte_none(ptent)) {
1145 progress++;
1146 continue;
1147 }
1148 if (unlikely(!pte_present(ptent))) {
1149 ret = copy_nonpresent_pte(dst_mm, src_mm,
1150 dst_pte, src_pte,
1151 dst_vma, src_vma,
1152 addr, rss);
1153 if (ret == -EIO) {
1154 entry = pte_to_swp_entry(ptep_get(src_pte));
1155 break;
1156 } else if (ret == -EBUSY) {
1157 break;
1158 } else if (!ret) {
1159 progress += 8;
1160 continue;
1161 }
1162 ptent = ptep_get(src_pte);
1163 VM_WARN_ON_ONCE(!pte_present(ptent));
1164
1165 /*
1166 * Device exclusive entry restored, continue by copying
1167 * the now present pte.
1168 */
1169 WARN_ON_ONCE(ret != -ENOENT);
1170 }
1171 /* copy_present_ptes() will clear `*prealloc' if consumed */
1172 max_nr = (end - addr) / PAGE_SIZE;
1173 ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
1174 ptent, addr, max_nr, rss, &prealloc);
1175 /*
1176 * If we need a pre-allocated page for this pte, drop the
1177 * locks, allocate, and try again.
1178 * If copy failed due to hwpoison in source page, break out.
1179 */
1180 if (unlikely(ret == -EAGAIN || ret == -EHWPOISON))
1181 break;
1182 if (unlikely(prealloc)) {
1183 /*
1184 * pre-alloc page cannot be reused by next time so as
1185 * to strictly follow mempolicy (e.g., alloc_page_vma()
1186 * will allocate page according to address). This
1187 * could only happen if one pinned pte changed.
1188 */
1189 folio_put(prealloc);
1190 prealloc = NULL;
1191 }
1192 nr = ret;
1193 progress += 8 * nr;
1194 } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
1195 addr != end);
1196
1197 arch_leave_lazy_mmu_mode();
1198 pte_unmap_unlock(orig_src_pte, src_ptl);
1199 add_mm_rss_vec(dst_mm, rss);
1200 pte_unmap_unlock(orig_dst_pte, dst_ptl);
1201 cond_resched();
1202
1203 if (ret == -EIO) {
1204 VM_WARN_ON_ONCE(!entry.val);
1205 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1206 ret = -ENOMEM;
1207 goto out;
1208 }
1209 entry.val = 0;
1210 } else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) {
1211 goto out;
1212 } else if (ret == -EAGAIN) {
1213 prealloc = folio_prealloc(src_mm, src_vma, addr, false);
1214 if (!prealloc)
1215 return -ENOMEM;
1216 } else if (ret < 0) {
1217 VM_WARN_ON_ONCE(1);
1218 }
1219
1220 /* We've captured and resolved the error. Reset, try again. */
1221 ret = 0;
1222
1223 if (addr != end)
1224 goto again;
1225 out:
1226 if (unlikely(prealloc))
1227 folio_put(prealloc);
1228 return ret;
1229 }
1230
1231 static inline int
copy_pmd_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pud_t * dst_pud,pud_t * src_pud,unsigned long addr,unsigned long end)1232 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1233 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1234 unsigned long end)
1235 {
1236 struct mm_struct *dst_mm = dst_vma->vm_mm;
1237 struct mm_struct *src_mm = src_vma->vm_mm;
1238 pmd_t *src_pmd, *dst_pmd;
1239 unsigned long next;
1240
1241 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1242 if (!dst_pmd)
1243 return -ENOMEM;
1244 src_pmd = pmd_offset(src_pud, addr);
1245 do {
1246 next = pmd_addr_end(addr, end);
1247 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1248 || pmd_devmap(*src_pmd)) {
1249 int err;
1250 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1251 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1252 addr, dst_vma, src_vma);
1253 if (err == -ENOMEM)
1254 return -ENOMEM;
1255 if (!err)
1256 continue;
1257 /* fall through */
1258 }
1259 if (pmd_none_or_clear_bad(src_pmd))
1260 continue;
1261 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1262 addr, next))
1263 return -ENOMEM;
1264 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1265 return 0;
1266 }
1267
1268 static inline int
copy_pud_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,p4d_t * dst_p4d,p4d_t * src_p4d,unsigned long addr,unsigned long end)1269 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1270 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1271 unsigned long end)
1272 {
1273 struct mm_struct *dst_mm = dst_vma->vm_mm;
1274 struct mm_struct *src_mm = src_vma->vm_mm;
1275 pud_t *src_pud, *dst_pud;
1276 unsigned long next;
1277
1278 dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1279 if (!dst_pud)
1280 return -ENOMEM;
1281 src_pud = pud_offset(src_p4d, addr);
1282 do {
1283 next = pud_addr_end(addr, end);
1284 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1285 int err;
1286
1287 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1288 err = copy_huge_pud(dst_mm, src_mm,
1289 dst_pud, src_pud, addr, src_vma);
1290 if (err == -ENOMEM)
1291 return -ENOMEM;
1292 if (!err)
1293 continue;
1294 /* fall through */
1295 }
1296 if (pud_none_or_clear_bad(src_pud))
1297 continue;
1298 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1299 addr, next))
1300 return -ENOMEM;
1301 } while (dst_pud++, src_pud++, addr = next, addr != end);
1302 return 0;
1303 }
1304
1305 static inline int
copy_p4d_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pgd_t * dst_pgd,pgd_t * src_pgd,unsigned long addr,unsigned long end)1306 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1307 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1308 unsigned long end)
1309 {
1310 struct mm_struct *dst_mm = dst_vma->vm_mm;
1311 p4d_t *src_p4d, *dst_p4d;
1312 unsigned long next;
1313
1314 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1315 if (!dst_p4d)
1316 return -ENOMEM;
1317 src_p4d = p4d_offset(src_pgd, addr);
1318 do {
1319 next = p4d_addr_end(addr, end);
1320 if (p4d_none_or_clear_bad(src_p4d))
1321 continue;
1322 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1323 addr, next))
1324 return -ENOMEM;
1325 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1326 return 0;
1327 }
1328
1329 /*
1330 * Return true if the vma needs to copy the pgtable during this fork(). Return
1331 * false when we can speed up fork() by allowing lazy page faults later until
1332 * when the child accesses the memory range.
1333 */
1334 static bool
vma_needs_copy(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1335 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1336 {
1337 /*
1338 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1339 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1340 * contains uffd-wp protection information, that's something we can't
1341 * retrieve from page cache, and skip copying will lose those info.
1342 */
1343 if (userfaultfd_wp(dst_vma))
1344 return true;
1345
1346 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
1347 return true;
1348
1349 if (src_vma->anon_vma)
1350 return true;
1351
1352 /*
1353 * Don't copy ptes where a page fault will fill them correctly. Fork
1354 * becomes much lighter when there are big shared or private readonly
1355 * mappings. The tradeoff is that copy_page_range is more efficient
1356 * than faulting.
1357 */
1358 return false;
1359 }
1360
1361 int
copy_page_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1362 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1363 {
1364 pgd_t *src_pgd, *dst_pgd;
1365 unsigned long addr = src_vma->vm_start;
1366 unsigned long end = src_vma->vm_end;
1367 struct mm_struct *dst_mm = dst_vma->vm_mm;
1368 struct mm_struct *src_mm = src_vma->vm_mm;
1369 struct mmu_notifier_range range;
1370 unsigned long next, pfn;
1371 bool is_cow;
1372 int ret;
1373
1374 if (!vma_needs_copy(dst_vma, src_vma))
1375 return 0;
1376
1377 if (is_vm_hugetlb_page(src_vma))
1378 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1379
1380 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1381 ret = track_pfn_copy(dst_vma, src_vma, &pfn);
1382 if (ret)
1383 return ret;
1384 }
1385
1386 /*
1387 * We need to invalidate the secondary MMU mappings only when
1388 * there could be a permission downgrade on the ptes of the
1389 * parent mm. And a permission downgrade will only happen if
1390 * is_cow_mapping() returns true.
1391 */
1392 is_cow = is_cow_mapping(src_vma->vm_flags);
1393
1394 if (is_cow) {
1395 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1396 0, src_mm, addr, end);
1397 mmu_notifier_invalidate_range_start(&range);
1398 /*
1399 * Disabling preemption is not needed for the write side, as
1400 * the read side doesn't spin, but goes to the mmap_lock.
1401 *
1402 * Use the raw variant of the seqcount_t write API to avoid
1403 * lockdep complaining about preemptibility.
1404 */
1405 vma_assert_write_locked(src_vma);
1406 raw_write_seqcount_begin(&src_mm->write_protect_seq);
1407 }
1408
1409 ret = 0;
1410 dst_pgd = pgd_offset(dst_mm, addr);
1411 src_pgd = pgd_offset(src_mm, addr);
1412 do {
1413 next = pgd_addr_end(addr, end);
1414 if (pgd_none_or_clear_bad(src_pgd))
1415 continue;
1416 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1417 addr, next))) {
1418 ret = -ENOMEM;
1419 break;
1420 }
1421 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
1422
1423 if (is_cow) {
1424 raw_write_seqcount_end(&src_mm->write_protect_seq);
1425 mmu_notifier_invalidate_range_end(&range);
1426 }
1427 if (ret && unlikely(src_vma->vm_flags & VM_PFNMAP))
1428 untrack_pfn_copy(dst_vma, pfn);
1429 return ret;
1430 }
1431
1432 /* Whether we should zap all COWed (private) pages too */
should_zap_cows(struct zap_details * details)1433 static inline bool should_zap_cows(struct zap_details *details)
1434 {
1435 /* By default, zap all pages */
1436 if (!details || details->reclaim_pt)
1437 return true;
1438
1439 /* Or, we zap COWed pages only if the caller wants to */
1440 return details->even_cows;
1441 }
1442
1443 /* Decides whether we should zap this folio with the folio pointer specified */
should_zap_folio(struct zap_details * details,struct folio * folio)1444 static inline bool should_zap_folio(struct zap_details *details,
1445 struct folio *folio)
1446 {
1447 /* If we can make a decision without *folio.. */
1448 if (should_zap_cows(details))
1449 return true;
1450
1451 /* Otherwise we should only zap non-anon folios */
1452 return !folio_test_anon(folio);
1453 }
1454
zap_drop_markers(struct zap_details * details)1455 static inline bool zap_drop_markers(struct zap_details *details)
1456 {
1457 if (!details)
1458 return false;
1459
1460 return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1461 }
1462
1463 /*
1464 * This function makes sure that we'll replace the none pte with an uffd-wp
1465 * swap special pte marker when necessary. Must be with the pgtable lock held.
1466 *
1467 * Returns true if uffd-wp ptes was installed, false otherwise.
1468 */
1469 static inline bool
zap_install_uffd_wp_if_needed(struct vm_area_struct * vma,unsigned long addr,pte_t * pte,int nr,struct zap_details * details,pte_t pteval)1470 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1471 unsigned long addr, pte_t *pte, int nr,
1472 struct zap_details *details, pte_t pteval)
1473 {
1474 bool was_installed = false;
1475
1476 #ifdef CONFIG_PTE_MARKER_UFFD_WP
1477 /* Zap on anonymous always means dropping everything */
1478 if (vma_is_anonymous(vma))
1479 return false;
1480
1481 if (zap_drop_markers(details))
1482 return false;
1483
1484 for (;;) {
1485 /* the PFN in the PTE is irrelevant. */
1486 if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
1487 was_installed = true;
1488 if (--nr == 0)
1489 break;
1490 pte++;
1491 addr += PAGE_SIZE;
1492 }
1493 #endif
1494 return was_installed;
1495 }
1496
zap_present_folio_ptes(struct mmu_gather * tlb,struct vm_area_struct * vma,struct folio * folio,struct page * page,pte_t * pte,pte_t ptent,unsigned int nr,unsigned long addr,struct zap_details * details,int * rss,bool * force_flush,bool * force_break,bool * any_skipped)1497 static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
1498 struct vm_area_struct *vma, struct folio *folio,
1499 struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
1500 unsigned long addr, struct zap_details *details, int *rss,
1501 bool *force_flush, bool *force_break, bool *any_skipped)
1502 {
1503 struct mm_struct *mm = tlb->mm;
1504 bool delay_rmap = false;
1505
1506 if (!folio_test_anon(folio)) {
1507 ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1508 if (pte_dirty(ptent)) {
1509 folio_mark_dirty(folio);
1510 if (tlb_delay_rmap(tlb)) {
1511 delay_rmap = true;
1512 *force_flush = true;
1513 }
1514 }
1515 if (pte_young(ptent) && likely(vma_has_recency(vma)))
1516 folio_mark_accessed(folio);
1517 rss[mm_counter(folio)] -= nr;
1518 } else {
1519 /* We don't need up-to-date accessed/dirty bits. */
1520 clear_full_ptes(mm, addr, pte, nr, tlb->fullmm);
1521 rss[MM_ANONPAGES] -= nr;
1522 }
1523 /* Checking a single PTE in a batch is sufficient. */
1524 arch_check_zapped_pte(vma, ptent);
1525 tlb_remove_tlb_entries(tlb, pte, nr, addr);
1526 if (unlikely(userfaultfd_pte_wp(vma, ptent)))
1527 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte,
1528 nr, details, ptent);
1529
1530 if (!delay_rmap) {
1531 folio_remove_rmap_ptes(folio, page, nr, vma);
1532
1533 if (unlikely(folio_mapcount(folio) < 0))
1534 print_bad_pte(vma, addr, ptent, page);
1535 }
1536 if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
1537 *force_flush = true;
1538 *force_break = true;
1539 }
1540 }
1541
1542 /*
1543 * Zap or skip at least one present PTE, trying to batch-process subsequent
1544 * PTEs that map consecutive pages of the same folio.
1545 *
1546 * Returns the number of processed (skipped or zapped) PTEs (at least 1).
1547 */
zap_present_ptes(struct mmu_gather * tlb,struct vm_area_struct * vma,pte_t * pte,pte_t ptent,unsigned int max_nr,unsigned long addr,struct zap_details * details,int * rss,bool * force_flush,bool * force_break,bool * any_skipped)1548 static inline int zap_present_ptes(struct mmu_gather *tlb,
1549 struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1550 unsigned int max_nr, unsigned long addr,
1551 struct zap_details *details, int *rss, bool *force_flush,
1552 bool *force_break, bool *any_skipped)
1553 {
1554 const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
1555 struct mm_struct *mm = tlb->mm;
1556 struct folio *folio;
1557 struct page *page;
1558 int nr;
1559
1560 page = vm_normal_page(vma, addr, ptent);
1561 if (!page) {
1562 /* We don't need up-to-date accessed/dirty bits. */
1563 ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
1564 arch_check_zapped_pte(vma, ptent);
1565 tlb_remove_tlb_entry(tlb, pte, addr);
1566 if (userfaultfd_pte_wp(vma, ptent))
1567 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr,
1568 pte, 1, details, ptent);
1569 ksm_might_unmap_zero_page(mm, ptent);
1570 return 1;
1571 }
1572
1573 folio = page_folio(page);
1574 if (unlikely(!should_zap_folio(details, folio))) {
1575 *any_skipped = true;
1576 return 1;
1577 }
1578
1579 /*
1580 * Make sure that the common "small folio" case is as fast as possible
1581 * by keeping the batching logic separate.
1582 */
1583 if (unlikely(folio_test_large(folio) && max_nr != 1)) {
1584 nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
1585 NULL, NULL, NULL);
1586
1587 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
1588 addr, details, rss, force_flush,
1589 force_break, any_skipped);
1590 return nr;
1591 }
1592 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
1593 details, rss, force_flush, force_break, any_skipped);
1594 return 1;
1595 }
1596
zap_nonpresent_ptes(struct mmu_gather * tlb,struct vm_area_struct * vma,pte_t * pte,pte_t ptent,unsigned int max_nr,unsigned long addr,struct zap_details * details,int * rss,bool * any_skipped)1597 static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
1598 struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
1599 unsigned int max_nr, unsigned long addr,
1600 struct zap_details *details, int *rss, bool *any_skipped)
1601 {
1602 swp_entry_t entry;
1603 int nr = 1;
1604
1605 *any_skipped = true;
1606 entry = pte_to_swp_entry(ptent);
1607 if (is_device_private_entry(entry) ||
1608 is_device_exclusive_entry(entry)) {
1609 struct page *page = pfn_swap_entry_to_page(entry);
1610 struct folio *folio = page_folio(page);
1611
1612 if (unlikely(!should_zap_folio(details, folio)))
1613 return 1;
1614 /*
1615 * Both device private/exclusive mappings should only
1616 * work with anonymous page so far, so we don't need to
1617 * consider uffd-wp bit when zap. For more information,
1618 * see zap_install_uffd_wp_if_needed().
1619 */
1620 WARN_ON_ONCE(!vma_is_anonymous(vma));
1621 rss[mm_counter(folio)]--;
1622 if (is_device_private_entry(entry))
1623 folio_remove_rmap_pte(folio, page, vma);
1624 folio_put(folio);
1625 } else if (!non_swap_entry(entry)) {
1626 /* Genuine swap entries, hence a private anon pages */
1627 if (!should_zap_cows(details))
1628 return 1;
1629
1630 nr = swap_pte_batch(pte, max_nr, ptent);
1631 rss[MM_SWAPENTS] -= nr;
1632 free_swap_and_cache_nr(entry, nr);
1633 } else if (is_migration_entry(entry)) {
1634 struct folio *folio = pfn_swap_entry_folio(entry);
1635
1636 if (!should_zap_folio(details, folio))
1637 return 1;
1638 rss[mm_counter(folio)]--;
1639 } else if (pte_marker_entry_uffd_wp(entry)) {
1640 /*
1641 * For anon: always drop the marker; for file: only
1642 * drop the marker if explicitly requested.
1643 */
1644 if (!vma_is_anonymous(vma) && !zap_drop_markers(details))
1645 return 1;
1646 } else if (is_guard_swp_entry(entry)) {
1647 /*
1648 * Ordinary zapping should not remove guard PTE
1649 * markers. Only do so if we should remove PTE markers
1650 * in general.
1651 */
1652 if (!zap_drop_markers(details))
1653 return 1;
1654 } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) {
1655 if (!should_zap_cows(details))
1656 return 1;
1657 } else {
1658 /* We should have covered all the swap entry types */
1659 pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
1660 WARN_ON_ONCE(1);
1661 }
1662 clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
1663 *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
1664
1665 return nr;
1666 }
1667
do_zap_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pte_t * pte,unsigned long addr,unsigned long end,struct zap_details * details,int * rss,bool * force_flush,bool * force_break,bool * any_skipped)1668 static inline int do_zap_pte_range(struct mmu_gather *tlb,
1669 struct vm_area_struct *vma, pte_t *pte,
1670 unsigned long addr, unsigned long end,
1671 struct zap_details *details, int *rss,
1672 bool *force_flush, bool *force_break,
1673 bool *any_skipped)
1674 {
1675 pte_t ptent = ptep_get(pte);
1676 int max_nr = (end - addr) / PAGE_SIZE;
1677 int nr = 0;
1678
1679 /* Skip all consecutive none ptes */
1680 if (pte_none(ptent)) {
1681 for (nr = 1; nr < max_nr; nr++) {
1682 ptent = ptep_get(pte + nr);
1683 if (!pte_none(ptent))
1684 break;
1685 }
1686 max_nr -= nr;
1687 if (!max_nr)
1688 return nr;
1689 pte += nr;
1690 addr += nr * PAGE_SIZE;
1691 }
1692
1693 if (pte_present(ptent))
1694 nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr,
1695 details, rss, force_flush, force_break,
1696 any_skipped);
1697 else
1698 nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr,
1699 details, rss, any_skipped);
1700
1701 return nr;
1702 }
1703
zap_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,struct zap_details * details)1704 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1705 struct vm_area_struct *vma, pmd_t *pmd,
1706 unsigned long addr, unsigned long end,
1707 struct zap_details *details)
1708 {
1709 bool force_flush = false, force_break = false;
1710 struct mm_struct *mm = tlb->mm;
1711 int rss[NR_MM_COUNTERS];
1712 spinlock_t *ptl;
1713 pte_t *start_pte;
1714 pte_t *pte;
1715 pmd_t pmdval;
1716 unsigned long start = addr;
1717 bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details);
1718 bool direct_reclaim = true;
1719 int nr;
1720
1721 retry:
1722 tlb_change_page_size(tlb, PAGE_SIZE);
1723 init_rss_vec(rss);
1724 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1725 if (!pte)
1726 return addr;
1727
1728 flush_tlb_batched_pending(mm);
1729 arch_enter_lazy_mmu_mode();
1730 do {
1731 bool any_skipped = false;
1732
1733 if (need_resched()) {
1734 direct_reclaim = false;
1735 break;
1736 }
1737
1738 nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
1739 &force_flush, &force_break, &any_skipped);
1740 if (any_skipped)
1741 can_reclaim_pt = false;
1742 if (unlikely(force_break)) {
1743 addr += nr * PAGE_SIZE;
1744 direct_reclaim = false;
1745 break;
1746 }
1747 } while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
1748
1749 /*
1750 * Fast path: try to hold the pmd lock and unmap the PTE page.
1751 *
1752 * If the pte lock was released midway (retry case), or if the attempt
1753 * to hold the pmd lock failed, then we need to recheck all pte entries
1754 * to ensure they are still none, thereby preventing the pte entries
1755 * from being repopulated by another thread.
1756 */
1757 if (can_reclaim_pt && direct_reclaim && addr == end)
1758 direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
1759
1760 add_mm_rss_vec(mm, rss);
1761 arch_leave_lazy_mmu_mode();
1762
1763 /* Do the actual TLB flush before dropping ptl */
1764 if (force_flush) {
1765 tlb_flush_mmu_tlbonly(tlb);
1766 tlb_flush_rmaps(tlb, vma);
1767 }
1768 pte_unmap_unlock(start_pte, ptl);
1769
1770 /*
1771 * If we forced a TLB flush (either due to running out of
1772 * batch buffers or because we needed to flush dirty TLB
1773 * entries before releasing the ptl), free the batched
1774 * memory too. Come back again if we didn't do everything.
1775 */
1776 if (force_flush)
1777 tlb_flush_mmu(tlb);
1778
1779 if (addr != end) {
1780 cond_resched();
1781 force_flush = false;
1782 force_break = false;
1783 goto retry;
1784 }
1785
1786 if (can_reclaim_pt) {
1787 if (direct_reclaim)
1788 free_pte(mm, start, tlb, pmdval);
1789 else
1790 try_to_free_pte(mm, pmd, start, tlb);
1791 }
1792
1793 return addr;
1794 }
1795
zap_pmd_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,struct zap_details * details)1796 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1797 struct vm_area_struct *vma, pud_t *pud,
1798 unsigned long addr, unsigned long end,
1799 struct zap_details *details)
1800 {
1801 pmd_t *pmd;
1802 unsigned long next;
1803
1804 pmd = pmd_offset(pud, addr);
1805 do {
1806 next = pmd_addr_end(addr, end);
1807 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1808 if (next - addr != HPAGE_PMD_SIZE)
1809 __split_huge_pmd(vma, pmd, addr, false, NULL);
1810 else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1811 addr = next;
1812 continue;
1813 }
1814 /* fall through */
1815 } else if (details && details->single_folio &&
1816 folio_test_pmd_mappable(details->single_folio) &&
1817 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1818 spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1819 /*
1820 * Take and drop THP pmd lock so that we cannot return
1821 * prematurely, while zap_huge_pmd() has cleared *pmd,
1822 * but not yet decremented compound_mapcount().
1823 */
1824 spin_unlock(ptl);
1825 }
1826 if (pmd_none(*pmd)) {
1827 addr = next;
1828 continue;
1829 }
1830 addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1831 if (addr != next)
1832 pmd--;
1833 } while (pmd++, cond_resched(), addr != end);
1834
1835 return addr;
1836 }
1837
zap_pud_range(struct mmu_gather * tlb,struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,struct zap_details * details)1838 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1839 struct vm_area_struct *vma, p4d_t *p4d,
1840 unsigned long addr, unsigned long end,
1841 struct zap_details *details)
1842 {
1843 pud_t *pud;
1844 unsigned long next;
1845
1846 pud = pud_offset(p4d, addr);
1847 do {
1848 next = pud_addr_end(addr, end);
1849 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1850 if (next - addr != HPAGE_PUD_SIZE) {
1851 mmap_assert_locked(tlb->mm);
1852 split_huge_pud(vma, pud, addr);
1853 } else if (zap_huge_pud(tlb, vma, pud, addr))
1854 goto next;
1855 /* fall through */
1856 }
1857 if (pud_none_or_clear_bad(pud))
1858 continue;
1859 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1860 next:
1861 cond_resched();
1862 } while (pud++, addr = next, addr != end);
1863
1864 return addr;
1865 }
1866
zap_p4d_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,struct zap_details * details)1867 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1868 struct vm_area_struct *vma, pgd_t *pgd,
1869 unsigned long addr, unsigned long end,
1870 struct zap_details *details)
1871 {
1872 p4d_t *p4d;
1873 unsigned long next;
1874
1875 p4d = p4d_offset(pgd, addr);
1876 do {
1877 next = p4d_addr_end(addr, end);
1878 if (p4d_none_or_clear_bad(p4d))
1879 continue;
1880 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1881 } while (p4d++, addr = next, addr != end);
1882
1883 return addr;
1884 }
1885
unmap_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,struct zap_details * details)1886 void unmap_page_range(struct mmu_gather *tlb,
1887 struct vm_area_struct *vma,
1888 unsigned long addr, unsigned long end,
1889 struct zap_details *details)
1890 {
1891 pgd_t *pgd;
1892 unsigned long next;
1893
1894 BUG_ON(addr >= end);
1895 tlb_start_vma(tlb, vma);
1896 pgd = pgd_offset(vma->vm_mm, addr);
1897 do {
1898 next = pgd_addr_end(addr, end);
1899 if (pgd_none_or_clear_bad(pgd))
1900 continue;
1901 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1902 } while (pgd++, addr = next, addr != end);
1903 tlb_end_vma(tlb, vma);
1904 }
1905
1906
unmap_single_vma(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details,bool mm_wr_locked)1907 static void unmap_single_vma(struct mmu_gather *tlb,
1908 struct vm_area_struct *vma, unsigned long start_addr,
1909 unsigned long end_addr,
1910 struct zap_details *details, bool mm_wr_locked)
1911 {
1912 unsigned long start = max(vma->vm_start, start_addr);
1913 unsigned long end;
1914
1915 if (start >= vma->vm_end)
1916 return;
1917 end = min(vma->vm_end, end_addr);
1918 if (end <= vma->vm_start)
1919 return;
1920
1921 if (vma->vm_file)
1922 uprobe_munmap(vma, start, end);
1923
1924 if (unlikely(vma->vm_flags & VM_PFNMAP))
1925 untrack_pfn(vma, 0, 0, mm_wr_locked);
1926
1927 if (start != end) {
1928 if (unlikely(is_vm_hugetlb_page(vma))) {
1929 /*
1930 * It is undesirable to test vma->vm_file as it
1931 * should be non-null for valid hugetlb area.
1932 * However, vm_file will be NULL in the error
1933 * cleanup path of mmap_region. When
1934 * hugetlbfs ->mmap method fails,
1935 * mmap_region() nullifies vma->vm_file
1936 * before calling this function to clean up.
1937 * Since no pte has actually been setup, it is
1938 * safe to do nothing in this case.
1939 */
1940 if (vma->vm_file) {
1941 zap_flags_t zap_flags = details ?
1942 details->zap_flags : 0;
1943 __unmap_hugepage_range(tlb, vma, start, end,
1944 NULL, zap_flags);
1945 }
1946 } else
1947 unmap_page_range(tlb, vma, start, end, details);
1948 }
1949 }
1950
1951 /**
1952 * unmap_vmas - unmap a range of memory covered by a list of vma's
1953 * @tlb: address of the caller's struct mmu_gather
1954 * @mas: the maple state
1955 * @vma: the starting vma
1956 * @start_addr: virtual address at which to start unmapping
1957 * @end_addr: virtual address at which to end unmapping
1958 * @tree_end: The maximum index to check
1959 * @mm_wr_locked: lock flag
1960 *
1961 * Unmap all pages in the vma list.
1962 *
1963 * Only addresses between `start' and `end' will be unmapped.
1964 *
1965 * The VMA list must be sorted in ascending virtual address order.
1966 *
1967 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1968 * range after unmap_vmas() returns. So the only responsibility here is to
1969 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1970 * drops the lock and schedules.
1971 */
unmap_vmas(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,unsigned long tree_end,bool mm_wr_locked)1972 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
1973 struct vm_area_struct *vma, unsigned long start_addr,
1974 unsigned long end_addr, unsigned long tree_end,
1975 bool mm_wr_locked)
1976 {
1977 struct mmu_notifier_range range;
1978 struct zap_details details = {
1979 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
1980 /* Careful - we need to zap private pages too! */
1981 .even_cows = true,
1982 };
1983
1984 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
1985 start_addr, end_addr);
1986 mmu_notifier_invalidate_range_start(&range);
1987 do {
1988 unsigned long start = start_addr;
1989 unsigned long end = end_addr;
1990 hugetlb_zap_begin(vma, &start, &end);
1991 unmap_single_vma(tlb, vma, start, end, &details,
1992 mm_wr_locked);
1993 hugetlb_zap_end(vma, &details);
1994 vma = mas_find(mas, tree_end - 1);
1995 } while (vma && likely(!xa_is_zero(vma)));
1996 mmu_notifier_invalidate_range_end(&range);
1997 }
1998
1999 /**
2000 * zap_page_range_single - remove user pages in a given range
2001 * @vma: vm_area_struct holding the applicable pages
2002 * @address: starting address of pages to zap
2003 * @size: number of bytes to zap
2004 * @details: details of shared cache invalidation
2005 *
2006 * The range must fit into one VMA.
2007 */
zap_page_range_single(struct vm_area_struct * vma,unsigned long address,unsigned long size,struct zap_details * details)2008 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2009 unsigned long size, struct zap_details *details)
2010 {
2011 const unsigned long end = address + size;
2012 struct mmu_notifier_range range;
2013 struct mmu_gather tlb;
2014
2015 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2016 address, end);
2017 hugetlb_zap_begin(vma, &range.start, &range.end);
2018 tlb_gather_mmu(&tlb, vma->vm_mm);
2019 update_hiwater_rss(vma->vm_mm);
2020 mmu_notifier_invalidate_range_start(&range);
2021 /*
2022 * unmap 'address-end' not 'range.start-range.end' as range
2023 * could have been expanded for hugetlb pmd sharing.
2024 */
2025 unmap_single_vma(&tlb, vma, address, end, details, false);
2026 mmu_notifier_invalidate_range_end(&range);
2027 tlb_finish_mmu(&tlb);
2028 hugetlb_zap_end(vma, details);
2029 }
2030
2031 /**
2032 * zap_vma_ptes - remove ptes mapping the vma
2033 * @vma: vm_area_struct holding ptes to be zapped
2034 * @address: starting address of pages to zap
2035 * @size: number of bytes to zap
2036 *
2037 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
2038 *
2039 * The entire address range must be fully contained within the vma.
2040 *
2041 */
zap_vma_ptes(struct vm_area_struct * vma,unsigned long address,unsigned long size)2042 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2043 unsigned long size)
2044 {
2045 if (!range_in_vma(vma, address, address + size) ||
2046 !(vma->vm_flags & VM_PFNMAP))
2047 return;
2048
2049 zap_page_range_single(vma, address, size, NULL);
2050 }
2051 EXPORT_SYMBOL_GPL(zap_vma_ptes);
2052
walk_to_pmd(struct mm_struct * mm,unsigned long addr)2053 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
2054 {
2055 pgd_t *pgd;
2056 p4d_t *p4d;
2057 pud_t *pud;
2058 pmd_t *pmd;
2059
2060 pgd = pgd_offset(mm, addr);
2061 p4d = p4d_alloc(mm, pgd, addr);
2062 if (!p4d)
2063 return NULL;
2064 pud = pud_alloc(mm, p4d, addr);
2065 if (!pud)
2066 return NULL;
2067 pmd = pmd_alloc(mm, pud, addr);
2068 if (!pmd)
2069 return NULL;
2070
2071 VM_BUG_ON(pmd_trans_huge(*pmd));
2072 return pmd;
2073 }
2074
__get_locked_pte(struct mm_struct * mm,unsigned long addr,spinlock_t ** ptl)2075 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2076 spinlock_t **ptl)
2077 {
2078 pmd_t *pmd = walk_to_pmd(mm, addr);
2079
2080 if (!pmd)
2081 return NULL;
2082 return pte_alloc_map_lock(mm, pmd, addr, ptl);
2083 }
2084
vm_mixed_zeropage_allowed(struct vm_area_struct * vma)2085 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
2086 {
2087 VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP);
2088 /*
2089 * Whoever wants to forbid the zeropage after some zeropages
2090 * might already have been mapped has to scan the page tables and
2091 * bail out on any zeropages. Zeropages in COW mappings can
2092 * be unshared using FAULT_FLAG_UNSHARE faults.
2093 */
2094 if (mm_forbids_zeropage(vma->vm_mm))
2095 return false;
2096 /* zeropages in COW mappings are common and unproblematic. */
2097 if (is_cow_mapping(vma->vm_flags))
2098 return true;
2099 /* Mappings that do not allow for writable PTEs are unproblematic. */
2100 if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
2101 return true;
2102 /*
2103 * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could
2104 * find the shared zeropage and longterm-pin it, which would
2105 * be problematic as soon as the zeropage gets replaced by a different
2106 * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would
2107 * now differ to what GUP looked up. FSDAX is incompatible to
2108 * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see
2109 * check_vma_flags).
2110 */
2111 return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
2112 (vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
2113 }
2114
validate_page_before_insert(struct vm_area_struct * vma,struct page * page)2115 static int validate_page_before_insert(struct vm_area_struct *vma,
2116 struct page *page)
2117 {
2118 struct folio *folio = page_folio(page);
2119
2120 if (!folio_ref_count(folio))
2121 return -EINVAL;
2122 if (unlikely(is_zero_folio(folio))) {
2123 if (!vm_mixed_zeropage_allowed(vma))
2124 return -EINVAL;
2125 return 0;
2126 }
2127 if (folio_test_anon(folio) || folio_test_slab(folio) ||
2128 page_has_type(page))
2129 return -EINVAL;
2130 flush_dcache_folio(folio);
2131 return 0;
2132 }
2133
insert_page_into_pte_locked(struct vm_area_struct * vma,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)2134 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
2135 unsigned long addr, struct page *page, pgprot_t prot)
2136 {
2137 struct folio *folio = page_folio(page);
2138 pte_t pteval;
2139
2140 if (!pte_none(ptep_get(pte)))
2141 return -EBUSY;
2142 /* Ok, finally just insert the thing.. */
2143 pteval = mk_pte(page, prot);
2144 if (unlikely(is_zero_folio(folio))) {
2145 pteval = pte_mkspecial(pteval);
2146 } else {
2147 folio_get(folio);
2148 inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
2149 folio_add_file_rmap_pte(folio, page, vma);
2150 }
2151 set_pte_at(vma->vm_mm, addr, pte, pteval);
2152 return 0;
2153 }
2154
insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page,pgprot_t prot)2155 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2156 struct page *page, pgprot_t prot)
2157 {
2158 int retval;
2159 pte_t *pte;
2160 spinlock_t *ptl;
2161
2162 retval = validate_page_before_insert(vma, page);
2163 if (retval)
2164 goto out;
2165 retval = -ENOMEM;
2166 pte = get_locked_pte(vma->vm_mm, addr, &ptl);
2167 if (!pte)
2168 goto out;
2169 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
2170 pte_unmap_unlock(pte, ptl);
2171 out:
2172 return retval;
2173 }
2174
insert_page_in_batch_locked(struct vm_area_struct * vma,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)2175 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
2176 unsigned long addr, struct page *page, pgprot_t prot)
2177 {
2178 int err;
2179
2180 err = validate_page_before_insert(vma, page);
2181 if (err)
2182 return err;
2183 return insert_page_into_pte_locked(vma, pte, addr, page, prot);
2184 }
2185
2186 /* insert_pages() amortizes the cost of spinlock operations
2187 * when inserting pages in a loop.
2188 */
insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num,pgprot_t prot)2189 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
2190 struct page **pages, unsigned long *num, pgprot_t prot)
2191 {
2192 pmd_t *pmd = NULL;
2193 pte_t *start_pte, *pte;
2194 spinlock_t *pte_lock;
2195 struct mm_struct *const mm = vma->vm_mm;
2196 unsigned long curr_page_idx = 0;
2197 unsigned long remaining_pages_total = *num;
2198 unsigned long pages_to_write_in_pmd;
2199 int ret;
2200 more:
2201 ret = -EFAULT;
2202 pmd = walk_to_pmd(mm, addr);
2203 if (!pmd)
2204 goto out;
2205
2206 pages_to_write_in_pmd = min_t(unsigned long,
2207 remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
2208
2209 /* Allocate the PTE if necessary; takes PMD lock once only. */
2210 ret = -ENOMEM;
2211 if (pte_alloc(mm, pmd))
2212 goto out;
2213
2214 while (pages_to_write_in_pmd) {
2215 int pte_idx = 0;
2216 const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
2217
2218 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
2219 if (!start_pte) {
2220 ret = -EFAULT;
2221 goto out;
2222 }
2223 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
2224 int err = insert_page_in_batch_locked(vma, pte,
2225 addr, pages[curr_page_idx], prot);
2226 if (unlikely(err)) {
2227 pte_unmap_unlock(start_pte, pte_lock);
2228 ret = err;
2229 remaining_pages_total -= pte_idx;
2230 goto out;
2231 }
2232 addr += PAGE_SIZE;
2233 ++curr_page_idx;
2234 }
2235 pte_unmap_unlock(start_pte, pte_lock);
2236 pages_to_write_in_pmd -= batch_size;
2237 remaining_pages_total -= batch_size;
2238 }
2239 if (remaining_pages_total)
2240 goto more;
2241 ret = 0;
2242 out:
2243 *num = remaining_pages_total;
2244 return ret;
2245 }
2246
2247 /**
2248 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
2249 * @vma: user vma to map to
2250 * @addr: target start user address of these pages
2251 * @pages: source kernel pages
2252 * @num: in: number of pages to map. out: number of pages that were *not*
2253 * mapped. (0 means all pages were successfully mapped).
2254 *
2255 * Preferred over vm_insert_page() when inserting multiple pages.
2256 *
2257 * In case of error, we may have mapped a subset of the provided
2258 * pages. It is the caller's responsibility to account for this case.
2259 *
2260 * The same restrictions apply as in vm_insert_page().
2261 */
vm_insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num)2262 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2263 struct page **pages, unsigned long *num)
2264 {
2265 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
2266
2267 if (addr < vma->vm_start || end_addr >= vma->vm_end)
2268 return -EFAULT;
2269 if (!(vma->vm_flags & VM_MIXEDMAP)) {
2270 BUG_ON(mmap_read_trylock(vma->vm_mm));
2271 BUG_ON(vma->vm_flags & VM_PFNMAP);
2272 vm_flags_set(vma, VM_MIXEDMAP);
2273 }
2274 /* Defer page refcount checking till we're about to map that page. */
2275 return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
2276 }
2277 EXPORT_SYMBOL(vm_insert_pages);
2278
2279 /**
2280 * vm_insert_page - insert single page into user vma
2281 * @vma: user vma to map to
2282 * @addr: target user address of this page
2283 * @page: source kernel page
2284 *
2285 * This allows drivers to insert individual pages they've allocated
2286 * into a user vma. The zeropage is supported in some VMAs,
2287 * see vm_mixed_zeropage_allowed().
2288 *
2289 * The page has to be a nice clean _individual_ kernel allocation.
2290 * If you allocate a compound page, you need to have marked it as
2291 * such (__GFP_COMP), or manually just split the page up yourself
2292 * (see split_page()).
2293 *
2294 * NOTE! Traditionally this was done with "remap_pfn_range()" which
2295 * took an arbitrary page protection parameter. This doesn't allow
2296 * that. Your vma protection will have to be set up correctly, which
2297 * means that if you want a shared writable mapping, you'd better
2298 * ask for a shared writable mapping!
2299 *
2300 * The page does not need to be reserved.
2301 *
2302 * Usually this function is called from f_op->mmap() handler
2303 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2304 * Caller must set VM_MIXEDMAP on vma if it wants to call this
2305 * function from other places, for example from page-fault handler.
2306 *
2307 * Return: %0 on success, negative error code otherwise.
2308 */
vm_insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page)2309 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2310 struct page *page)
2311 {
2312 if (addr < vma->vm_start || addr >= vma->vm_end)
2313 return -EFAULT;
2314 if (!(vma->vm_flags & VM_MIXEDMAP)) {
2315 BUG_ON(mmap_read_trylock(vma->vm_mm));
2316 BUG_ON(vma->vm_flags & VM_PFNMAP);
2317 vm_flags_set(vma, VM_MIXEDMAP);
2318 }
2319 return insert_page(vma, addr, page, vma->vm_page_prot);
2320 }
2321 EXPORT_SYMBOL(vm_insert_page);
2322
2323 /*
2324 * __vm_map_pages - maps range of kernel pages into user vma
2325 * @vma: user vma to map to
2326 * @pages: pointer to array of source kernel pages
2327 * @num: number of pages in page array
2328 * @offset: user's requested vm_pgoff
2329 *
2330 * This allows drivers to map range of kernel pages into a user vma.
2331 * The zeropage is supported in some VMAs, see
2332 * vm_mixed_zeropage_allowed().
2333 *
2334 * Return: 0 on success and error code otherwise.
2335 */
__vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num,unsigned long offset)2336 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2337 unsigned long num, unsigned long offset)
2338 {
2339 unsigned long count = vma_pages(vma);
2340 unsigned long uaddr = vma->vm_start;
2341 int ret, i;
2342
2343 /* Fail if the user requested offset is beyond the end of the object */
2344 if (offset >= num)
2345 return -ENXIO;
2346
2347 /* Fail if the user requested size exceeds available object size */
2348 if (count > num - offset)
2349 return -ENXIO;
2350
2351 for (i = 0; i < count; i++) {
2352 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2353 if (ret < 0)
2354 return ret;
2355 uaddr += PAGE_SIZE;
2356 }
2357
2358 return 0;
2359 }
2360
2361 /**
2362 * vm_map_pages - maps range of kernel pages starts with non zero offset
2363 * @vma: user vma to map to
2364 * @pages: pointer to array of source kernel pages
2365 * @num: number of pages in page array
2366 *
2367 * Maps an object consisting of @num pages, catering for the user's
2368 * requested vm_pgoff
2369 *
2370 * If we fail to insert any page into the vma, the function will return
2371 * immediately leaving any previously inserted pages present. Callers
2372 * from the mmap handler may immediately return the error as their caller
2373 * will destroy the vma, removing any successfully inserted pages. Other
2374 * callers should make their own arrangements for calling unmap_region().
2375 *
2376 * Context: Process context. Called by mmap handlers.
2377 * Return: 0 on success and error code otherwise.
2378 */
vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num)2379 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2380 unsigned long num)
2381 {
2382 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2383 }
2384 EXPORT_SYMBOL(vm_map_pages);
2385
2386 /**
2387 * vm_map_pages_zero - map range of kernel pages starts with zero offset
2388 * @vma: user vma to map to
2389 * @pages: pointer to array of source kernel pages
2390 * @num: number of pages in page array
2391 *
2392 * Similar to vm_map_pages(), except that it explicitly sets the offset
2393 * to 0. This function is intended for the drivers that did not consider
2394 * vm_pgoff.
2395 *
2396 * Context: Process context. Called by mmap handlers.
2397 * Return: 0 on success and error code otherwise.
2398 */
vm_map_pages_zero(struct vm_area_struct * vma,struct page ** pages,unsigned long num)2399 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2400 unsigned long num)
2401 {
2402 return __vm_map_pages(vma, pages, num, 0);
2403 }
2404 EXPORT_SYMBOL(vm_map_pages_zero);
2405
insert_pfn(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t prot,bool mkwrite)2406 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2407 pfn_t pfn, pgprot_t prot, bool mkwrite)
2408 {
2409 struct mm_struct *mm = vma->vm_mm;
2410 pte_t *pte, entry;
2411 spinlock_t *ptl;
2412
2413 pte = get_locked_pte(mm, addr, &ptl);
2414 if (!pte)
2415 return VM_FAULT_OOM;
2416 entry = ptep_get(pte);
2417 if (!pte_none(entry)) {
2418 if (mkwrite) {
2419 /*
2420 * For read faults on private mappings the PFN passed
2421 * in may not match the PFN we have mapped if the
2422 * mapped PFN is a writeable COW page. In the mkwrite
2423 * case we are creating a writable PTE for a shared
2424 * mapping and we expect the PFNs to match. If they
2425 * don't match, we are likely racing with block
2426 * allocation and mapping invalidation so just skip the
2427 * update.
2428 */
2429 if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
2430 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
2431 goto out_unlock;
2432 }
2433 entry = pte_mkyoung(entry);
2434 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2435 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2436 update_mmu_cache(vma, addr, pte);
2437 }
2438 goto out_unlock;
2439 }
2440
2441 /* Ok, finally just insert the thing.. */
2442 if (pfn_t_devmap(pfn))
2443 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2444 else
2445 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2446
2447 if (mkwrite) {
2448 entry = pte_mkyoung(entry);
2449 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2450 }
2451
2452 set_pte_at(mm, addr, pte, entry);
2453 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2454
2455 out_unlock:
2456 pte_unmap_unlock(pte, ptl);
2457 return VM_FAULT_NOPAGE;
2458 }
2459
2460 /**
2461 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2462 * @vma: user vma to map to
2463 * @addr: target user address of this page
2464 * @pfn: source kernel pfn
2465 * @pgprot: pgprot flags for the inserted page
2466 *
2467 * This is exactly like vmf_insert_pfn(), except that it allows drivers
2468 * to override pgprot on a per-page basis.
2469 *
2470 * This only makes sense for IO mappings, and it makes no sense for
2471 * COW mappings. In general, using multiple vmas is preferable;
2472 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2473 * impractical.
2474 *
2475 * pgprot typically only differs from @vma->vm_page_prot when drivers set
2476 * caching- and encryption bits different than those of @vma->vm_page_prot,
2477 * because the caching- or encryption mode may not be known at mmap() time.
2478 *
2479 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2480 * to set caching and encryption bits for those vmas (except for COW pages).
2481 * This is ensured by core vm only modifying these page table entries using
2482 * functions that don't touch caching- or encryption bits, using pte_modify()
2483 * if needed. (See for example mprotect()).
2484 *
2485 * Also when new page-table entries are created, this is only done using the
2486 * fault() callback, and never using the value of vma->vm_page_prot,
2487 * except for page-table entries that point to anonymous pages as the result
2488 * of COW.
2489 *
2490 * Context: Process context. May allocate using %GFP_KERNEL.
2491 * Return: vm_fault_t value.
2492 */
vmf_insert_pfn_prot(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t pgprot)2493 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2494 unsigned long pfn, pgprot_t pgprot)
2495 {
2496 /*
2497 * Technically, architectures with pte_special can avoid all these
2498 * restrictions (same for remap_pfn_range). However we would like
2499 * consistency in testing and feature parity among all, so we should
2500 * try to keep these invariants in place for everybody.
2501 */
2502 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2503 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2504 (VM_PFNMAP|VM_MIXEDMAP));
2505 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2506 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2507
2508 if (addr < vma->vm_start || addr >= vma->vm_end)
2509 return VM_FAULT_SIGBUS;
2510
2511 if (!pfn_modify_allowed(pfn, pgprot))
2512 return VM_FAULT_SIGBUS;
2513
2514 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2515
2516 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2517 false);
2518 }
2519 EXPORT_SYMBOL(vmf_insert_pfn_prot);
2520
2521 /**
2522 * vmf_insert_pfn - insert single pfn into user vma
2523 * @vma: user vma to map to
2524 * @addr: target user address of this page
2525 * @pfn: source kernel pfn
2526 *
2527 * Similar to vm_insert_page, this allows drivers to insert individual pages
2528 * they've allocated into a user vma. Same comments apply.
2529 *
2530 * This function should only be called from a vm_ops->fault handler, and
2531 * in that case the handler should return the result of this function.
2532 *
2533 * vma cannot be a COW mapping.
2534 *
2535 * As this is called only for pages that do not currently exist, we
2536 * do not need to flush old virtual caches or the TLB.
2537 *
2538 * Context: Process context. May allocate using %GFP_KERNEL.
2539 * Return: vm_fault_t value.
2540 */
vmf_insert_pfn(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn)2541 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2542 unsigned long pfn)
2543 {
2544 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2545 }
2546 EXPORT_SYMBOL(vmf_insert_pfn);
2547
vm_mixed_ok(struct vm_area_struct * vma,pfn_t pfn,bool mkwrite)2548 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite)
2549 {
2550 if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) &&
2551 (mkwrite || !vm_mixed_zeropage_allowed(vma)))
2552 return false;
2553 /* these checks mirror the abort conditions in vm_normal_page */
2554 if (vma->vm_flags & VM_MIXEDMAP)
2555 return true;
2556 if (pfn_t_devmap(pfn))
2557 return true;
2558 if (pfn_t_special(pfn))
2559 return true;
2560 if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2561 return true;
2562 return false;
2563 }
2564
__vm_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,bool mkwrite)2565 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2566 unsigned long addr, pfn_t pfn, bool mkwrite)
2567 {
2568 pgprot_t pgprot = vma->vm_page_prot;
2569 int err;
2570
2571 if (!vm_mixed_ok(vma, pfn, mkwrite))
2572 return VM_FAULT_SIGBUS;
2573
2574 if (addr < vma->vm_start || addr >= vma->vm_end)
2575 return VM_FAULT_SIGBUS;
2576
2577 track_pfn_insert(vma, &pgprot, pfn);
2578
2579 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2580 return VM_FAULT_SIGBUS;
2581
2582 /*
2583 * If we don't have pte special, then we have to use the pfn_valid()
2584 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2585 * refcount the page if pfn_valid is true (hence insert_page rather
2586 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2587 * without pte special, it would there be refcounted as a normal page.
2588 */
2589 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2590 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2591 struct page *page;
2592
2593 /*
2594 * At this point we are committed to insert_page()
2595 * regardless of whether the caller specified flags that
2596 * result in pfn_t_has_page() == false.
2597 */
2598 page = pfn_to_page(pfn_t_to_pfn(pfn));
2599 err = insert_page(vma, addr, page, pgprot);
2600 } else {
2601 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2602 }
2603
2604 if (err == -ENOMEM)
2605 return VM_FAULT_OOM;
2606 if (err < 0 && err != -EBUSY)
2607 return VM_FAULT_SIGBUS;
2608
2609 return VM_FAULT_NOPAGE;
2610 }
2611
vmf_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2612 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2613 pfn_t pfn)
2614 {
2615 return __vm_insert_mixed(vma, addr, pfn, false);
2616 }
2617 EXPORT_SYMBOL(vmf_insert_mixed);
2618
2619 /*
2620 * If the insertion of PTE failed because someone else already added a
2621 * different entry in the mean time, we treat that as success as we assume
2622 * the same entry was actually inserted.
2623 */
vmf_insert_mixed_mkwrite(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2624 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2625 unsigned long addr, pfn_t pfn)
2626 {
2627 return __vm_insert_mixed(vma, addr, pfn, true);
2628 }
2629
2630 /*
2631 * maps a range of physical memory into the requested pages. the old
2632 * mappings are removed. any references to nonexistent pages results
2633 * in null mappings (currently treated as "copy-on-access")
2634 */
remap_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2635 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2636 unsigned long addr, unsigned long end,
2637 unsigned long pfn, pgprot_t prot)
2638 {
2639 pte_t *pte, *mapped_pte;
2640 spinlock_t *ptl;
2641 int err = 0;
2642
2643 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2644 if (!pte)
2645 return -ENOMEM;
2646 arch_enter_lazy_mmu_mode();
2647 do {
2648 BUG_ON(!pte_none(ptep_get(pte)));
2649 if (!pfn_modify_allowed(pfn, prot)) {
2650 err = -EACCES;
2651 break;
2652 }
2653 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2654 pfn++;
2655 } while (pte++, addr += PAGE_SIZE, addr != end);
2656 arch_leave_lazy_mmu_mode();
2657 pte_unmap_unlock(mapped_pte, ptl);
2658 return err;
2659 }
2660
remap_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2661 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2662 unsigned long addr, unsigned long end,
2663 unsigned long pfn, pgprot_t prot)
2664 {
2665 pmd_t *pmd;
2666 unsigned long next;
2667 int err;
2668
2669 pfn -= addr >> PAGE_SHIFT;
2670 pmd = pmd_alloc(mm, pud, addr);
2671 if (!pmd)
2672 return -ENOMEM;
2673 VM_BUG_ON(pmd_trans_huge(*pmd));
2674 do {
2675 next = pmd_addr_end(addr, end);
2676 err = remap_pte_range(mm, pmd, addr, next,
2677 pfn + (addr >> PAGE_SHIFT), prot);
2678 if (err)
2679 return err;
2680 } while (pmd++, addr = next, addr != end);
2681 return 0;
2682 }
2683
remap_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2684 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2685 unsigned long addr, unsigned long end,
2686 unsigned long pfn, pgprot_t prot)
2687 {
2688 pud_t *pud;
2689 unsigned long next;
2690 int err;
2691
2692 pfn -= addr >> PAGE_SHIFT;
2693 pud = pud_alloc(mm, p4d, addr);
2694 if (!pud)
2695 return -ENOMEM;
2696 do {
2697 next = pud_addr_end(addr, end);
2698 err = remap_pmd_range(mm, pud, addr, next,
2699 pfn + (addr >> PAGE_SHIFT), prot);
2700 if (err)
2701 return err;
2702 } while (pud++, addr = next, addr != end);
2703 return 0;
2704 }
2705
remap_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2706 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2707 unsigned long addr, unsigned long end,
2708 unsigned long pfn, pgprot_t prot)
2709 {
2710 p4d_t *p4d;
2711 unsigned long next;
2712 int err;
2713
2714 pfn -= addr >> PAGE_SHIFT;
2715 p4d = p4d_alloc(mm, pgd, addr);
2716 if (!p4d)
2717 return -ENOMEM;
2718 do {
2719 next = p4d_addr_end(addr, end);
2720 err = remap_pud_range(mm, p4d, addr, next,
2721 pfn + (addr >> PAGE_SHIFT), prot);
2722 if (err)
2723 return err;
2724 } while (p4d++, addr = next, addr != end);
2725 return 0;
2726 }
2727
remap_pfn_range_internal(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2728 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
2729 unsigned long pfn, unsigned long size, pgprot_t prot)
2730 {
2731 pgd_t *pgd;
2732 unsigned long next;
2733 unsigned long end = addr + PAGE_ALIGN(size);
2734 struct mm_struct *mm = vma->vm_mm;
2735 int err;
2736
2737 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2738 return -EINVAL;
2739
2740 /*
2741 * Physically remapped pages are special. Tell the
2742 * rest of the world about it:
2743 * VM_IO tells people not to look at these pages
2744 * (accesses can have side effects).
2745 * VM_PFNMAP tells the core MM that the base pages are just
2746 * raw PFN mappings, and do not have a "struct page" associated
2747 * with them.
2748 * VM_DONTEXPAND
2749 * Disable vma merging and expanding with mremap().
2750 * VM_DONTDUMP
2751 * Omit vma from core dump, even when VM_IO turned off.
2752 *
2753 * There's a horrible special case to handle copy-on-write
2754 * behaviour that some programs depend on. We mark the "original"
2755 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2756 * See vm_normal_page() for details.
2757 */
2758 if (is_cow_mapping(vma->vm_flags)) {
2759 if (addr != vma->vm_start || end != vma->vm_end)
2760 return -EINVAL;
2761 vma->vm_pgoff = pfn;
2762 }
2763
2764 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
2765
2766 BUG_ON(addr >= end);
2767 pfn -= addr >> PAGE_SHIFT;
2768 pgd = pgd_offset(mm, addr);
2769 flush_cache_range(vma, addr, end);
2770 do {
2771 next = pgd_addr_end(addr, end);
2772 err = remap_p4d_range(mm, pgd, addr, next,
2773 pfn + (addr >> PAGE_SHIFT), prot);
2774 if (err)
2775 return err;
2776 } while (pgd++, addr = next, addr != end);
2777
2778 return 0;
2779 }
2780
2781 /*
2782 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
2783 * must have pre-validated the caching bits of the pgprot_t.
2784 */
remap_pfn_range_notrack(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2785 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2786 unsigned long pfn, unsigned long size, pgprot_t prot)
2787 {
2788 int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
2789
2790 if (!error)
2791 return 0;
2792
2793 /*
2794 * A partial pfn range mapping is dangerous: it does not
2795 * maintain page reference counts, and callers may free
2796 * pages due to the error. So zap it early.
2797 */
2798 zap_page_range_single(vma, addr, size, NULL);
2799 return error;
2800 }
2801
2802 /**
2803 * remap_pfn_range - remap kernel memory to userspace
2804 * @vma: user vma to map to
2805 * @addr: target page aligned user address to start at
2806 * @pfn: page frame number of kernel physical memory address
2807 * @size: size of mapping area
2808 * @prot: page protection flags for this mapping
2809 *
2810 * Note: this is only safe if the mm semaphore is held when called.
2811 *
2812 * Return: %0 on success, negative error code otherwise.
2813 */
remap_pfn_range(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2814 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2815 unsigned long pfn, unsigned long size, pgprot_t prot)
2816 {
2817 int err;
2818
2819 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2820 if (err)
2821 return -EINVAL;
2822
2823 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2824 if (err)
2825 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
2826 return err;
2827 }
2828 EXPORT_SYMBOL(remap_pfn_range);
2829
2830 /**
2831 * vm_iomap_memory - remap memory to userspace
2832 * @vma: user vma to map to
2833 * @start: start of the physical memory to be mapped
2834 * @len: size of area
2835 *
2836 * This is a simplified io_remap_pfn_range() for common driver use. The
2837 * driver just needs to give us the physical memory range to be mapped,
2838 * we'll figure out the rest from the vma information.
2839 *
2840 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2841 * whatever write-combining details or similar.
2842 *
2843 * Return: %0 on success, negative error code otherwise.
2844 */
vm_iomap_memory(struct vm_area_struct * vma,phys_addr_t start,unsigned long len)2845 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2846 {
2847 unsigned long vm_len, pfn, pages;
2848
2849 /* Check that the physical memory area passed in looks valid */
2850 if (start + len < start)
2851 return -EINVAL;
2852 /*
2853 * You *really* shouldn't map things that aren't page-aligned,
2854 * but we've historically allowed it because IO memory might
2855 * just have smaller alignment.
2856 */
2857 len += start & ~PAGE_MASK;
2858 pfn = start >> PAGE_SHIFT;
2859 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2860 if (pfn + pages < pfn)
2861 return -EINVAL;
2862
2863 /* We start the mapping 'vm_pgoff' pages into the area */
2864 if (vma->vm_pgoff > pages)
2865 return -EINVAL;
2866 pfn += vma->vm_pgoff;
2867 pages -= vma->vm_pgoff;
2868
2869 /* Can we fit all of the mapping? */
2870 vm_len = vma->vm_end - vma->vm_start;
2871 if (vm_len >> PAGE_SHIFT > pages)
2872 return -EINVAL;
2873
2874 /* Ok, let it rip */
2875 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2876 }
2877 EXPORT_SYMBOL(vm_iomap_memory);
2878
apply_to_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2879 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2880 unsigned long addr, unsigned long end,
2881 pte_fn_t fn, void *data, bool create,
2882 pgtbl_mod_mask *mask)
2883 {
2884 pte_t *pte, *mapped_pte;
2885 int err = 0;
2886 spinlock_t *ptl;
2887
2888 if (create) {
2889 mapped_pte = pte = (mm == &init_mm) ?
2890 pte_alloc_kernel_track(pmd, addr, mask) :
2891 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2892 if (!pte)
2893 return -ENOMEM;
2894 } else {
2895 mapped_pte = pte = (mm == &init_mm) ?
2896 pte_offset_kernel(pmd, addr) :
2897 pte_offset_map_lock(mm, pmd, addr, &ptl);
2898 if (!pte)
2899 return -EINVAL;
2900 }
2901
2902 arch_enter_lazy_mmu_mode();
2903
2904 if (fn) {
2905 do {
2906 if (create || !pte_none(ptep_get(pte))) {
2907 err = fn(pte, addr, data);
2908 if (err)
2909 break;
2910 }
2911 } while (pte++, addr += PAGE_SIZE, addr != end);
2912 }
2913 *mask |= PGTBL_PTE_MODIFIED;
2914
2915 arch_leave_lazy_mmu_mode();
2916
2917 if (mm != &init_mm)
2918 pte_unmap_unlock(mapped_pte, ptl);
2919 return err;
2920 }
2921
apply_to_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2922 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2923 unsigned long addr, unsigned long end,
2924 pte_fn_t fn, void *data, bool create,
2925 pgtbl_mod_mask *mask)
2926 {
2927 pmd_t *pmd;
2928 unsigned long next;
2929 int err = 0;
2930
2931 BUG_ON(pud_leaf(*pud));
2932
2933 if (create) {
2934 pmd = pmd_alloc_track(mm, pud, addr, mask);
2935 if (!pmd)
2936 return -ENOMEM;
2937 } else {
2938 pmd = pmd_offset(pud, addr);
2939 }
2940 do {
2941 next = pmd_addr_end(addr, end);
2942 if (pmd_none(*pmd) && !create)
2943 continue;
2944 if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2945 return -EINVAL;
2946 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2947 if (!create)
2948 continue;
2949 pmd_clear_bad(pmd);
2950 }
2951 err = apply_to_pte_range(mm, pmd, addr, next,
2952 fn, data, create, mask);
2953 if (err)
2954 break;
2955 } while (pmd++, addr = next, addr != end);
2956
2957 return err;
2958 }
2959
apply_to_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2960 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2961 unsigned long addr, unsigned long end,
2962 pte_fn_t fn, void *data, bool create,
2963 pgtbl_mod_mask *mask)
2964 {
2965 pud_t *pud;
2966 unsigned long next;
2967 int err = 0;
2968
2969 if (create) {
2970 pud = pud_alloc_track(mm, p4d, addr, mask);
2971 if (!pud)
2972 return -ENOMEM;
2973 } else {
2974 pud = pud_offset(p4d, addr);
2975 }
2976 do {
2977 next = pud_addr_end(addr, end);
2978 if (pud_none(*pud) && !create)
2979 continue;
2980 if (WARN_ON_ONCE(pud_leaf(*pud)))
2981 return -EINVAL;
2982 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2983 if (!create)
2984 continue;
2985 pud_clear_bad(pud);
2986 }
2987 err = apply_to_pmd_range(mm, pud, addr, next,
2988 fn, data, create, mask);
2989 if (err)
2990 break;
2991 } while (pud++, addr = next, addr != end);
2992
2993 return err;
2994 }
2995
apply_to_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2996 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2997 unsigned long addr, unsigned long end,
2998 pte_fn_t fn, void *data, bool create,
2999 pgtbl_mod_mask *mask)
3000 {
3001 p4d_t *p4d;
3002 unsigned long next;
3003 int err = 0;
3004
3005 if (create) {
3006 p4d = p4d_alloc_track(mm, pgd, addr, mask);
3007 if (!p4d)
3008 return -ENOMEM;
3009 } else {
3010 p4d = p4d_offset(pgd, addr);
3011 }
3012 do {
3013 next = p4d_addr_end(addr, end);
3014 if (p4d_none(*p4d) && !create)
3015 continue;
3016 if (WARN_ON_ONCE(p4d_leaf(*p4d)))
3017 return -EINVAL;
3018 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
3019 if (!create)
3020 continue;
3021 p4d_clear_bad(p4d);
3022 }
3023 err = apply_to_pud_range(mm, p4d, addr, next,
3024 fn, data, create, mask);
3025 if (err)
3026 break;
3027 } while (p4d++, addr = next, addr != end);
3028
3029 return err;
3030 }
3031
__apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data,bool create)3032 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3033 unsigned long size, pte_fn_t fn,
3034 void *data, bool create)
3035 {
3036 pgd_t *pgd;
3037 unsigned long start = addr, next;
3038 unsigned long end = addr + size;
3039 pgtbl_mod_mask mask = 0;
3040 int err = 0;
3041
3042 if (WARN_ON(addr >= end))
3043 return -EINVAL;
3044
3045 pgd = pgd_offset(mm, addr);
3046 do {
3047 next = pgd_addr_end(addr, end);
3048 if (pgd_none(*pgd) && !create)
3049 continue;
3050 if (WARN_ON_ONCE(pgd_leaf(*pgd))) {
3051 err = -EINVAL;
3052 break;
3053 }
3054 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
3055 if (!create)
3056 continue;
3057 pgd_clear_bad(pgd);
3058 }
3059 err = apply_to_p4d_range(mm, pgd, addr, next,
3060 fn, data, create, &mask);
3061 if (err)
3062 break;
3063 } while (pgd++, addr = next, addr != end);
3064
3065 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
3066 arch_sync_kernel_mappings(start, start + size);
3067
3068 return err;
3069 }
3070
3071 /*
3072 * Scan a region of virtual memory, filling in page tables as necessary
3073 * and calling a provided function on each leaf page table.
3074 */
apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)3075 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
3076 unsigned long size, pte_fn_t fn, void *data)
3077 {
3078 return __apply_to_page_range(mm, addr, size, fn, data, true);
3079 }
3080 EXPORT_SYMBOL_GPL(apply_to_page_range);
3081
3082 /*
3083 * Scan a region of virtual memory, calling a provided function on
3084 * each leaf page table where it exists.
3085 *
3086 * Unlike apply_to_page_range, this does _not_ fill in page tables
3087 * where they are absent.
3088 */
apply_to_existing_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)3089 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
3090 unsigned long size, pte_fn_t fn, void *data)
3091 {
3092 return __apply_to_page_range(mm, addr, size, fn, data, false);
3093 }
3094
3095 /*
3096 * handle_pte_fault chooses page fault handler according to an entry which was
3097 * read non-atomically. Before making any commitment, on those architectures
3098 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
3099 * parts, do_swap_page must check under lock before unmapping the pte and
3100 * proceeding (but do_wp_page is only called after already making such a check;
3101 * and do_anonymous_page can safely check later on).
3102 */
pte_unmap_same(struct vm_fault * vmf)3103 static inline int pte_unmap_same(struct vm_fault *vmf)
3104 {
3105 int same = 1;
3106 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
3107 if (sizeof(pte_t) > sizeof(unsigned long)) {
3108 spin_lock(vmf->ptl);
3109 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
3110 spin_unlock(vmf->ptl);
3111 }
3112 #endif
3113 pte_unmap(vmf->pte);
3114 vmf->pte = NULL;
3115 return same;
3116 }
3117
3118 /*
3119 * Return:
3120 * 0: copied succeeded
3121 * -EHWPOISON: copy failed due to hwpoison in source page
3122 * -EAGAIN: copied failed (some other reason)
3123 */
__wp_page_copy_user(struct page * dst,struct page * src,struct vm_fault * vmf)3124 static inline int __wp_page_copy_user(struct page *dst, struct page *src,
3125 struct vm_fault *vmf)
3126 {
3127 int ret;
3128 void *kaddr;
3129 void __user *uaddr;
3130 struct vm_area_struct *vma = vmf->vma;
3131 struct mm_struct *mm = vma->vm_mm;
3132 unsigned long addr = vmf->address;
3133
3134 if (likely(src)) {
3135 if (copy_mc_user_highpage(dst, src, addr, vma))
3136 return -EHWPOISON;
3137 return 0;
3138 }
3139
3140 /*
3141 * If the source page was a PFN mapping, we don't have
3142 * a "struct page" for it. We do a best-effort copy by
3143 * just copying from the original user address. If that
3144 * fails, we just zero-fill it. Live with it.
3145 */
3146 kaddr = kmap_local_page(dst);
3147 pagefault_disable();
3148 uaddr = (void __user *)(addr & PAGE_MASK);
3149
3150 /*
3151 * On architectures with software "accessed" bits, we would
3152 * take a double page fault, so mark it accessed here.
3153 */
3154 vmf->pte = NULL;
3155 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
3156 pte_t entry;
3157
3158 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3159 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3160 /*
3161 * Other thread has already handled the fault
3162 * and update local tlb only
3163 */
3164 if (vmf->pte)
3165 update_mmu_tlb(vma, addr, vmf->pte);
3166 ret = -EAGAIN;
3167 goto pte_unlock;
3168 }
3169
3170 entry = pte_mkyoung(vmf->orig_pte);
3171 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
3172 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
3173 }
3174
3175 /*
3176 * This really shouldn't fail, because the page is there
3177 * in the page tables. But it might just be unreadable,
3178 * in which case we just give up and fill the result with
3179 * zeroes.
3180 */
3181 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3182 if (vmf->pte)
3183 goto warn;
3184
3185 /* Re-validate under PTL if the page is still mapped */
3186 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3187 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3188 /* The PTE changed under us, update local tlb */
3189 if (vmf->pte)
3190 update_mmu_tlb(vma, addr, vmf->pte);
3191 ret = -EAGAIN;
3192 goto pte_unlock;
3193 }
3194
3195 /*
3196 * The same page can be mapped back since last copy attempt.
3197 * Try to copy again under PTL.
3198 */
3199 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3200 /*
3201 * Give a warn in case there can be some obscure
3202 * use-case
3203 */
3204 warn:
3205 WARN_ON_ONCE(1);
3206 clear_page(kaddr);
3207 }
3208 }
3209
3210 ret = 0;
3211
3212 pte_unlock:
3213 if (vmf->pte)
3214 pte_unmap_unlock(vmf->pte, vmf->ptl);
3215 pagefault_enable();
3216 kunmap_local(kaddr);
3217 flush_dcache_page(dst);
3218
3219 return ret;
3220 }
3221
__get_fault_gfp_mask(struct vm_area_struct * vma)3222 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
3223 {
3224 struct file *vm_file = vma->vm_file;
3225
3226 if (vm_file)
3227 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
3228
3229 /*
3230 * Special mappings (e.g. VDSO) do not have any file so fake
3231 * a default GFP_KERNEL for them.
3232 */
3233 return GFP_KERNEL;
3234 }
3235
3236 /*
3237 * Notify the address space that the page is about to become writable so that
3238 * it can prohibit this or wait for the page to get into an appropriate state.
3239 *
3240 * We do this without the lock held, so that it can sleep if it needs to.
3241 */
do_page_mkwrite(struct vm_fault * vmf,struct folio * folio)3242 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
3243 {
3244 vm_fault_t ret;
3245 unsigned int old_flags = vmf->flags;
3246
3247 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3248
3249 if (vmf->vma->vm_file &&
3250 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3251 return VM_FAULT_SIGBUS;
3252
3253 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3254 /* Restore original flags so that caller is not surprised */
3255 vmf->flags = old_flags;
3256 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3257 return ret;
3258 if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3259 folio_lock(folio);
3260 if (!folio->mapping) {
3261 folio_unlock(folio);
3262 return 0; /* retry */
3263 }
3264 ret |= VM_FAULT_LOCKED;
3265 } else
3266 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3267 return ret;
3268 }
3269
3270 /*
3271 * Handle dirtying of a page in shared file mapping on a write fault.
3272 *
3273 * The function expects the page to be locked and unlocks it.
3274 */
fault_dirty_shared_page(struct vm_fault * vmf)3275 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3276 {
3277 struct vm_area_struct *vma = vmf->vma;
3278 struct address_space *mapping;
3279 struct folio *folio = page_folio(vmf->page);
3280 bool dirtied;
3281 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3282
3283 dirtied = folio_mark_dirty(folio);
3284 VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
3285 /*
3286 * Take a local copy of the address_space - folio.mapping may be zeroed
3287 * by truncate after folio_unlock(). The address_space itself remains
3288 * pinned by vma->vm_file's reference. We rely on folio_unlock()'s
3289 * release semantics to prevent the compiler from undoing this copying.
3290 */
3291 mapping = folio_raw_mapping(folio);
3292 folio_unlock(folio);
3293
3294 if (!page_mkwrite)
3295 file_update_time(vma->vm_file);
3296
3297 /*
3298 * Throttle page dirtying rate down to writeback speed.
3299 *
3300 * mapping may be NULL here because some device drivers do not
3301 * set page.mapping but still dirty their pages
3302 *
3303 * Drop the mmap_lock before waiting on IO, if we can. The file
3304 * is pinning the mapping, as per above.
3305 */
3306 if ((dirtied || page_mkwrite) && mapping) {
3307 struct file *fpin;
3308
3309 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3310 balance_dirty_pages_ratelimited(mapping);
3311 if (fpin) {
3312 fput(fpin);
3313 return VM_FAULT_COMPLETED;
3314 }
3315 }
3316
3317 return 0;
3318 }
3319
3320 /*
3321 * Handle write page faults for pages that can be reused in the current vma
3322 *
3323 * This can happen either due to the mapping being with the VM_SHARED flag,
3324 * or due to us being the last reference standing to the page. In either
3325 * case, all we need to do here is to mark the page as writable and update
3326 * any related book-keeping.
3327 */
wp_page_reuse(struct vm_fault * vmf,struct folio * folio)3328 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
3329 __releases(vmf->ptl)
3330 {
3331 struct vm_area_struct *vma = vmf->vma;
3332 pte_t entry;
3333
3334 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3335 VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte)));
3336
3337 if (folio) {
3338 VM_BUG_ON(folio_test_anon(folio) &&
3339 !PageAnonExclusive(vmf->page));
3340 /*
3341 * Clear the folio's cpupid information as the existing
3342 * information potentially belongs to a now completely
3343 * unrelated process.
3344 */
3345 folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1);
3346 }
3347
3348 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3349 entry = pte_mkyoung(vmf->orig_pte);
3350 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3351 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3352 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3353 pte_unmap_unlock(vmf->pte, vmf->ptl);
3354 count_vm_event(PGREUSE);
3355 }
3356
3357 /*
3358 * We could add a bitflag somewhere, but for now, we know that all
3359 * vm_ops that have a ->map_pages have been audited and don't need
3360 * the mmap_lock to be held.
3361 */
vmf_can_call_fault(const struct vm_fault * vmf)3362 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
3363 {
3364 struct vm_area_struct *vma = vmf->vma;
3365
3366 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
3367 return 0;
3368 vma_end_read(vma);
3369 return VM_FAULT_RETRY;
3370 }
3371
3372 /**
3373 * __vmf_anon_prepare - Prepare to handle an anonymous fault.
3374 * @vmf: The vm_fault descriptor passed from the fault handler.
3375 *
3376 * When preparing to insert an anonymous page into a VMA from a
3377 * fault handler, call this function rather than anon_vma_prepare().
3378 * If this vma does not already have an associated anon_vma and we are
3379 * only protected by the per-VMA lock, the caller must retry with the
3380 * mmap_lock held. __anon_vma_prepare() will look at adjacent VMAs to
3381 * determine if this VMA can share its anon_vma, and that's not safe to
3382 * do with only the per-VMA lock held for this VMA.
3383 *
3384 * Return: 0 if fault handling can proceed. Any other value should be
3385 * returned to the caller.
3386 */
__vmf_anon_prepare(struct vm_fault * vmf)3387 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
3388 {
3389 struct vm_area_struct *vma = vmf->vma;
3390 vm_fault_t ret = 0;
3391
3392 if (likely(vma->anon_vma))
3393 return 0;
3394 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3395 if (!mmap_read_trylock(vma->vm_mm))
3396 return VM_FAULT_RETRY;
3397 }
3398 if (__anon_vma_prepare(vma))
3399 ret = VM_FAULT_OOM;
3400 if (vmf->flags & FAULT_FLAG_VMA_LOCK)
3401 mmap_read_unlock(vma->vm_mm);
3402 return ret;
3403 }
3404
3405 /*
3406 * Handle the case of a page which we actually need to copy to a new page,
3407 * either due to COW or unsharing.
3408 *
3409 * Called with mmap_lock locked and the old page referenced, but
3410 * without the ptl held.
3411 *
3412 * High level logic flow:
3413 *
3414 * - Allocate a page, copy the content of the old page to the new one.
3415 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3416 * - Take the PTL. If the pte changed, bail out and release the allocated page
3417 * - If the pte is still the way we remember it, update the page table and all
3418 * relevant references. This includes dropping the reference the page-table
3419 * held to the old page, as well as updating the rmap.
3420 * - In any case, unlock the PTL and drop the reference we took to the old page.
3421 */
wp_page_copy(struct vm_fault * vmf)3422 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3423 {
3424 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3425 struct vm_area_struct *vma = vmf->vma;
3426 struct mm_struct *mm = vma->vm_mm;
3427 struct folio *old_folio = NULL;
3428 struct folio *new_folio = NULL;
3429 pte_t entry;
3430 int page_copied = 0;
3431 struct mmu_notifier_range range;
3432 vm_fault_t ret;
3433 bool pfn_is_zero;
3434
3435 delayacct_wpcopy_start();
3436
3437 if (vmf->page)
3438 old_folio = page_folio(vmf->page);
3439 ret = vmf_anon_prepare(vmf);
3440 if (unlikely(ret))
3441 goto out;
3442
3443 pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
3444 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
3445 if (!new_folio)
3446 goto oom;
3447
3448 if (!pfn_is_zero) {
3449 int err;
3450
3451 err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3452 if (err) {
3453 /*
3454 * COW failed, if the fault was solved by other,
3455 * it's fine. If not, userspace would re-fault on
3456 * the same address and we will handle the fault
3457 * from the second attempt.
3458 * The -EHWPOISON case will not be retried.
3459 */
3460 folio_put(new_folio);
3461 if (old_folio)
3462 folio_put(old_folio);
3463
3464 delayacct_wpcopy_end();
3465 return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
3466 }
3467 kmsan_copy_page_meta(&new_folio->page, vmf->page);
3468 }
3469
3470 __folio_mark_uptodate(new_folio);
3471
3472 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
3473 vmf->address & PAGE_MASK,
3474 (vmf->address & PAGE_MASK) + PAGE_SIZE);
3475 mmu_notifier_invalidate_range_start(&range);
3476
3477 /*
3478 * Re-check the pte - we dropped the lock
3479 */
3480 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3481 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3482 if (old_folio) {
3483 if (!folio_test_anon(old_folio)) {
3484 dec_mm_counter(mm, mm_counter_file(old_folio));
3485 inc_mm_counter(mm, MM_ANONPAGES);
3486 }
3487 } else {
3488 ksm_might_unmap_zero_page(mm, vmf->orig_pte);
3489 inc_mm_counter(mm, MM_ANONPAGES);
3490 }
3491 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3492 entry = mk_pte(&new_folio->page, vma->vm_page_prot);
3493 entry = pte_sw_mkyoung(entry);
3494 if (unlikely(unshare)) {
3495 if (pte_soft_dirty(vmf->orig_pte))
3496 entry = pte_mksoft_dirty(entry);
3497 if (pte_uffd_wp(vmf->orig_pte))
3498 entry = pte_mkuffd_wp(entry);
3499 } else {
3500 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3501 }
3502
3503 /*
3504 * Clear the pte entry and flush it first, before updating the
3505 * pte with the new entry, to keep TLBs on different CPUs in
3506 * sync. This code used to set the new PTE then flush TLBs, but
3507 * that left a window where the new PTE could be loaded into
3508 * some TLBs while the old PTE remains in others.
3509 */
3510 ptep_clear_flush(vma, vmf->address, vmf->pte);
3511 folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE);
3512 folio_add_lru_vma(new_folio, vma);
3513 BUG_ON(unshare && pte_write(entry));
3514 set_pte_at(mm, vmf->address, vmf->pte, entry);
3515 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3516 if (old_folio) {
3517 /*
3518 * Only after switching the pte to the new page may
3519 * we remove the mapcount here. Otherwise another
3520 * process may come and find the rmap count decremented
3521 * before the pte is switched to the new page, and
3522 * "reuse" the old page writing into it while our pte
3523 * here still points into it and can be read by other
3524 * threads.
3525 *
3526 * The critical issue is to order this
3527 * folio_remove_rmap_pte() with the ptp_clear_flush
3528 * above. Those stores are ordered by (if nothing else,)
3529 * the barrier present in the atomic_add_negative
3530 * in folio_remove_rmap_pte();
3531 *
3532 * Then the TLB flush in ptep_clear_flush ensures that
3533 * no process can access the old page before the
3534 * decremented mapcount is visible. And the old page
3535 * cannot be reused until after the decremented
3536 * mapcount is visible. So transitively, TLBs to
3537 * old page will be flushed before it can be reused.
3538 */
3539 folio_remove_rmap_pte(old_folio, vmf->page, vma);
3540 }
3541
3542 /* Free the old page.. */
3543 new_folio = old_folio;
3544 page_copied = 1;
3545 pte_unmap_unlock(vmf->pte, vmf->ptl);
3546 } else if (vmf->pte) {
3547 update_mmu_tlb(vma, vmf->address, vmf->pte);
3548 pte_unmap_unlock(vmf->pte, vmf->ptl);
3549 }
3550
3551 mmu_notifier_invalidate_range_end(&range);
3552
3553 if (new_folio)
3554 folio_put(new_folio);
3555 if (old_folio) {
3556 if (page_copied)
3557 free_swap_cache(old_folio);
3558 folio_put(old_folio);
3559 }
3560
3561 delayacct_wpcopy_end();
3562 return 0;
3563 oom:
3564 ret = VM_FAULT_OOM;
3565 out:
3566 if (old_folio)
3567 folio_put(old_folio);
3568
3569 delayacct_wpcopy_end();
3570 return ret;
3571 }
3572
3573 /**
3574 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3575 * writeable once the page is prepared
3576 *
3577 * @vmf: structure describing the fault
3578 * @folio: the folio of vmf->page
3579 *
3580 * This function handles all that is needed to finish a write page fault in a
3581 * shared mapping due to PTE being read-only once the mapped page is prepared.
3582 * It handles locking of PTE and modifying it.
3583 *
3584 * The function expects the page to be locked or other protection against
3585 * concurrent faults / writeback (such as DAX radix tree locks).
3586 *
3587 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3588 * we acquired PTE lock.
3589 */
finish_mkwrite_fault(struct vm_fault * vmf,struct folio * folio)3590 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
3591 {
3592 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3593 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3594 &vmf->ptl);
3595 if (!vmf->pte)
3596 return VM_FAULT_NOPAGE;
3597 /*
3598 * We might have raced with another page fault while we released the
3599 * pte_offset_map_lock.
3600 */
3601 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
3602 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3603 pte_unmap_unlock(vmf->pte, vmf->ptl);
3604 return VM_FAULT_NOPAGE;
3605 }
3606 wp_page_reuse(vmf, folio);
3607 return 0;
3608 }
3609
3610 /*
3611 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3612 * mapping
3613 */
wp_pfn_shared(struct vm_fault * vmf)3614 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3615 {
3616 struct vm_area_struct *vma = vmf->vma;
3617
3618 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3619 vm_fault_t ret;
3620
3621 pte_unmap_unlock(vmf->pte, vmf->ptl);
3622 ret = vmf_can_call_fault(vmf);
3623 if (ret)
3624 return ret;
3625
3626 vmf->flags |= FAULT_FLAG_MKWRITE;
3627 ret = vma->vm_ops->pfn_mkwrite(vmf);
3628 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3629 return ret;
3630 return finish_mkwrite_fault(vmf, NULL);
3631 }
3632 wp_page_reuse(vmf, NULL);
3633 return 0;
3634 }
3635
wp_page_shared(struct vm_fault * vmf,struct folio * folio)3636 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
3637 __releases(vmf->ptl)
3638 {
3639 struct vm_area_struct *vma = vmf->vma;
3640 vm_fault_t ret = 0;
3641
3642 folio_get(folio);
3643
3644 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3645 vm_fault_t tmp;
3646
3647 pte_unmap_unlock(vmf->pte, vmf->ptl);
3648 tmp = vmf_can_call_fault(vmf);
3649 if (tmp) {
3650 folio_put(folio);
3651 return tmp;
3652 }
3653
3654 tmp = do_page_mkwrite(vmf, folio);
3655 if (unlikely(!tmp || (tmp &
3656 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3657 folio_put(folio);
3658 return tmp;
3659 }
3660 tmp = finish_mkwrite_fault(vmf, folio);
3661 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3662 folio_unlock(folio);
3663 folio_put(folio);
3664 return tmp;
3665 }
3666 } else {
3667 wp_page_reuse(vmf, folio);
3668 folio_lock(folio);
3669 }
3670 ret |= fault_dirty_shared_page(vmf);
3671 folio_put(folio);
3672
3673 return ret;
3674 }
3675
wp_can_reuse_anon_folio(struct folio * folio,struct vm_area_struct * vma)3676 static bool wp_can_reuse_anon_folio(struct folio *folio,
3677 struct vm_area_struct *vma)
3678 {
3679 /*
3680 * We could currently only reuse a subpage of a large folio if no
3681 * other subpages of the large folios are still mapped. However,
3682 * let's just consistently not reuse subpages even if we could
3683 * reuse in that scenario, and give back a large folio a bit
3684 * sooner.
3685 */
3686 if (folio_test_large(folio))
3687 return false;
3688
3689 /*
3690 * We have to verify under folio lock: these early checks are
3691 * just an optimization to avoid locking the folio and freeing
3692 * the swapcache if there is little hope that we can reuse.
3693 *
3694 * KSM doesn't necessarily raise the folio refcount.
3695 */
3696 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
3697 return false;
3698 if (!folio_test_lru(folio))
3699 /*
3700 * We cannot easily detect+handle references from
3701 * remote LRU caches or references to LRU folios.
3702 */
3703 lru_add_drain();
3704 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
3705 return false;
3706 if (!folio_trylock(folio))
3707 return false;
3708 if (folio_test_swapcache(folio))
3709 folio_free_swap(folio);
3710 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3711 folio_unlock(folio);
3712 return false;
3713 }
3714 /*
3715 * Ok, we've got the only folio reference from our mapping
3716 * and the folio is locked, it's dark out, and we're wearing
3717 * sunglasses. Hit it.
3718 */
3719 folio_move_anon_rmap(folio, vma);
3720 folio_unlock(folio);
3721 return true;
3722 }
3723
3724 /*
3725 * This routine handles present pages, when
3726 * * users try to write to a shared page (FAULT_FLAG_WRITE)
3727 * * GUP wants to take a R/O pin on a possibly shared anonymous page
3728 * (FAULT_FLAG_UNSHARE)
3729 *
3730 * It is done by copying the page to a new address and decrementing the
3731 * shared-page counter for the old page.
3732 *
3733 * Note that this routine assumes that the protection checks have been
3734 * done by the caller (the low-level page fault routine in most cases).
3735 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3736 * done any necessary COW.
3737 *
3738 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3739 * though the page will change only once the write actually happens. This
3740 * avoids a few races, and potentially makes it more efficient.
3741 *
3742 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3743 * but allow concurrent faults), with pte both mapped and locked.
3744 * We return with mmap_lock still held, but pte unmapped and unlocked.
3745 */
do_wp_page(struct vm_fault * vmf)3746 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3747 __releases(vmf->ptl)
3748 {
3749 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3750 struct vm_area_struct *vma = vmf->vma;
3751 struct folio *folio = NULL;
3752 pte_t pte;
3753
3754 if (likely(!unshare)) {
3755 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
3756 if (!userfaultfd_wp_async(vma)) {
3757 pte_unmap_unlock(vmf->pte, vmf->ptl);
3758 return handle_userfault(vmf, VM_UFFD_WP);
3759 }
3760
3761 /*
3762 * Nothing needed (cache flush, TLB invalidations,
3763 * etc.) because we're only removing the uffd-wp bit,
3764 * which is completely invisible to the user.
3765 */
3766 pte = pte_clear_uffd_wp(ptep_get(vmf->pte));
3767
3768 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3769 /*
3770 * Update this to be prepared for following up CoW
3771 * handling
3772 */
3773 vmf->orig_pte = pte;
3774 }
3775
3776 /*
3777 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3778 * is flushed in this case before copying.
3779 */
3780 if (unlikely(userfaultfd_wp(vmf->vma) &&
3781 mm_tlb_flush_pending(vmf->vma->vm_mm)))
3782 flush_tlb_page(vmf->vma, vmf->address);
3783 }
3784
3785 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3786
3787 if (vmf->page)
3788 folio = page_folio(vmf->page);
3789
3790 /*
3791 * Shared mapping: we are guaranteed to have VM_WRITE and
3792 * FAULT_FLAG_WRITE set at this point.
3793 */
3794 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
3795 /*
3796 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3797 * VM_PFNMAP VMA.
3798 *
3799 * We should not cow pages in a shared writeable mapping.
3800 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3801 */
3802 if (!vmf->page)
3803 return wp_pfn_shared(vmf);
3804 return wp_page_shared(vmf, folio);
3805 }
3806
3807 /*
3808 * Private mapping: create an exclusive anonymous page copy if reuse
3809 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
3810 *
3811 * If we encounter a page that is marked exclusive, we must reuse
3812 * the page without further checks.
3813 */
3814 if (folio && folio_test_anon(folio) &&
3815 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) {
3816 if (!PageAnonExclusive(vmf->page))
3817 SetPageAnonExclusive(vmf->page);
3818 if (unlikely(unshare)) {
3819 pte_unmap_unlock(vmf->pte, vmf->ptl);
3820 return 0;
3821 }
3822 wp_page_reuse(vmf, folio);
3823 return 0;
3824 }
3825 /*
3826 * Ok, we need to copy. Oh, well..
3827 */
3828 if (folio)
3829 folio_get(folio);
3830
3831 pte_unmap_unlock(vmf->pte, vmf->ptl);
3832 #ifdef CONFIG_KSM
3833 if (folio && folio_test_ksm(folio))
3834 count_vm_event(COW_KSM);
3835 #endif
3836 return wp_page_copy(vmf);
3837 }
3838
unmap_mapping_range_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details)3839 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3840 unsigned long start_addr, unsigned long end_addr,
3841 struct zap_details *details)
3842 {
3843 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3844 }
3845
unmap_mapping_range_tree(struct rb_root_cached * root,pgoff_t first_index,pgoff_t last_index,struct zap_details * details)3846 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3847 pgoff_t first_index,
3848 pgoff_t last_index,
3849 struct zap_details *details)
3850 {
3851 struct vm_area_struct *vma;
3852 pgoff_t vba, vea, zba, zea;
3853
3854 vma_interval_tree_foreach(vma, root, first_index, last_index) {
3855 vba = vma->vm_pgoff;
3856 vea = vba + vma_pages(vma) - 1;
3857 zba = max(first_index, vba);
3858 zea = min(last_index, vea);
3859
3860 unmap_mapping_range_vma(vma,
3861 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3862 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3863 details);
3864 }
3865 }
3866
3867 /**
3868 * unmap_mapping_folio() - Unmap single folio from processes.
3869 * @folio: The locked folio to be unmapped.
3870 *
3871 * Unmap this folio from any userspace process which still has it mmaped.
3872 * Typically, for efficiency, the range of nearby pages has already been
3873 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
3874 * truncation or invalidation holds the lock on a folio, it may find that
3875 * the page has been remapped again: and then uses unmap_mapping_folio()
3876 * to unmap it finally.
3877 */
unmap_mapping_folio(struct folio * folio)3878 void unmap_mapping_folio(struct folio *folio)
3879 {
3880 struct address_space *mapping = folio->mapping;
3881 struct zap_details details = { };
3882 pgoff_t first_index;
3883 pgoff_t last_index;
3884
3885 VM_BUG_ON(!folio_test_locked(folio));
3886
3887 first_index = folio->index;
3888 last_index = folio_next_index(folio) - 1;
3889
3890 details.even_cows = false;
3891 details.single_folio = folio;
3892 details.zap_flags = ZAP_FLAG_DROP_MARKER;
3893
3894 i_mmap_lock_read(mapping);
3895 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3896 unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3897 last_index, &details);
3898 i_mmap_unlock_read(mapping);
3899 }
3900
3901 /**
3902 * unmap_mapping_pages() - Unmap pages from processes.
3903 * @mapping: The address space containing pages to be unmapped.
3904 * @start: Index of first page to be unmapped.
3905 * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
3906 * @even_cows: Whether to unmap even private COWed pages.
3907 *
3908 * Unmap the pages in this address space from any userspace process which
3909 * has them mmaped. Generally, you want to remove COWed pages as well when
3910 * a file is being truncated, but not when invalidating pages from the page
3911 * cache.
3912 */
unmap_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t nr,bool even_cows)3913 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3914 pgoff_t nr, bool even_cows)
3915 {
3916 struct zap_details details = { };
3917 pgoff_t first_index = start;
3918 pgoff_t last_index = start + nr - 1;
3919
3920 details.even_cows = even_cows;
3921 if (last_index < first_index)
3922 last_index = ULONG_MAX;
3923
3924 i_mmap_lock_read(mapping);
3925 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3926 unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3927 last_index, &details);
3928 i_mmap_unlock_read(mapping);
3929 }
3930 EXPORT_SYMBOL_GPL(unmap_mapping_pages);
3931
3932 /**
3933 * unmap_mapping_range - unmap the portion of all mmaps in the specified
3934 * address_space corresponding to the specified byte range in the underlying
3935 * file.
3936 *
3937 * @mapping: the address space containing mmaps to be unmapped.
3938 * @holebegin: byte in first page to unmap, relative to the start of
3939 * the underlying file. This will be rounded down to a PAGE_SIZE
3940 * boundary. Note that this is different from truncate_pagecache(), which
3941 * must keep the partial page. In contrast, we must get rid of
3942 * partial pages.
3943 * @holelen: size of prospective hole in bytes. This will be rounded
3944 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
3945 * end of the file.
3946 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3947 * but 0 when invalidating pagecache, don't throw away private data.
3948 */
unmap_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen,int even_cows)3949 void unmap_mapping_range(struct address_space *mapping,
3950 loff_t const holebegin, loff_t const holelen, int even_cows)
3951 {
3952 pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
3953 pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
3954
3955 /* Check for overflow. */
3956 if (sizeof(holelen) > sizeof(hlen)) {
3957 long long holeend =
3958 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3959 if (holeend & ~(long long)ULONG_MAX)
3960 hlen = ULONG_MAX - hba + 1;
3961 }
3962
3963 unmap_mapping_pages(mapping, hba, hlen, even_cows);
3964 }
3965 EXPORT_SYMBOL(unmap_mapping_range);
3966
3967 /*
3968 * Restore a potential device exclusive pte to a working pte entry
3969 */
remove_device_exclusive_entry(struct vm_fault * vmf)3970 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3971 {
3972 struct folio *folio = page_folio(vmf->page);
3973 struct vm_area_struct *vma = vmf->vma;
3974 struct mmu_notifier_range range;
3975 vm_fault_t ret;
3976
3977 /*
3978 * We need a reference to lock the folio because we don't hold
3979 * the PTL so a racing thread can remove the device-exclusive
3980 * entry and unmap it. If the folio is free the entry must
3981 * have been removed already. If it happens to have already
3982 * been re-allocated after being freed all we do is lock and
3983 * unlock it.
3984 */
3985 if (!folio_try_get(folio))
3986 return 0;
3987
3988 ret = folio_lock_or_retry(folio, vmf);
3989 if (ret) {
3990 folio_put(folio);
3991 return ret;
3992 }
3993 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
3994 vma->vm_mm, vmf->address & PAGE_MASK,
3995 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3996 mmu_notifier_invalidate_range_start(&range);
3997
3998 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3999 &vmf->ptl);
4000 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4001 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
4002
4003 if (vmf->pte)
4004 pte_unmap_unlock(vmf->pte, vmf->ptl);
4005 folio_unlock(folio);
4006 folio_put(folio);
4007
4008 mmu_notifier_invalidate_range_end(&range);
4009 return 0;
4010 }
4011
should_try_to_free_swap(struct folio * folio,struct vm_area_struct * vma,unsigned int fault_flags)4012 static inline bool should_try_to_free_swap(struct folio *folio,
4013 struct vm_area_struct *vma,
4014 unsigned int fault_flags)
4015 {
4016 if (!folio_test_swapcache(folio))
4017 return false;
4018 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
4019 folio_test_mlocked(folio))
4020 return true;
4021 /*
4022 * If we want to map a page that's in the swapcache writable, we
4023 * have to detect via the refcount if we're really the exclusive
4024 * user. Try freeing the swapcache to get rid of the swapcache
4025 * reference only in case it's likely that we'll be the exlusive user.
4026 */
4027 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
4028 folio_ref_count(folio) == (1 + folio_nr_pages(folio));
4029 }
4030
pte_marker_clear(struct vm_fault * vmf)4031 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
4032 {
4033 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4034 vmf->address, &vmf->ptl);
4035 if (!vmf->pte)
4036 return 0;
4037 /*
4038 * Be careful so that we will only recover a special uffd-wp pte into a
4039 * none pte. Otherwise it means the pte could have changed, so retry.
4040 *
4041 * This should also cover the case where e.g. the pte changed
4042 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
4043 * So is_pte_marker() check is not enough to safely drop the pte.
4044 */
4045 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
4046 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
4047 pte_unmap_unlock(vmf->pte, vmf->ptl);
4048 return 0;
4049 }
4050
do_pte_missing(struct vm_fault * vmf)4051 static vm_fault_t do_pte_missing(struct vm_fault *vmf)
4052 {
4053 if (vma_is_anonymous(vmf->vma))
4054 return do_anonymous_page(vmf);
4055 else
4056 return do_fault(vmf);
4057 }
4058
4059 /*
4060 * This is actually a page-missing access, but with uffd-wp special pte
4061 * installed. It means this pte was wr-protected before being unmapped.
4062 */
pte_marker_handle_uffd_wp(struct vm_fault * vmf)4063 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
4064 {
4065 /*
4066 * Just in case there're leftover special ptes even after the region
4067 * got unregistered - we can simply clear them.
4068 */
4069 if (unlikely(!userfaultfd_wp(vmf->vma)))
4070 return pte_marker_clear(vmf);
4071
4072 return do_pte_missing(vmf);
4073 }
4074
handle_pte_marker(struct vm_fault * vmf)4075 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
4076 {
4077 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
4078 unsigned long marker = pte_marker_get(entry);
4079
4080 /*
4081 * PTE markers should never be empty. If anything weird happened,
4082 * the best thing to do is to kill the process along with its mm.
4083 */
4084 if (WARN_ON_ONCE(!marker))
4085 return VM_FAULT_SIGBUS;
4086
4087 /* Higher priority than uffd-wp when data corrupted */
4088 if (marker & PTE_MARKER_POISONED)
4089 return VM_FAULT_HWPOISON;
4090
4091 /* Hitting a guard page is always a fatal condition. */
4092 if (marker & PTE_MARKER_GUARD)
4093 return VM_FAULT_SIGSEGV;
4094
4095 if (pte_marker_entry_uffd_wp(entry))
4096 return pte_marker_handle_uffd_wp(vmf);
4097
4098 /* This is an unknown pte marker */
4099 return VM_FAULT_SIGBUS;
4100 }
4101
__alloc_swap_folio(struct vm_fault * vmf)4102 static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
4103 {
4104 struct vm_area_struct *vma = vmf->vma;
4105 struct folio *folio;
4106 swp_entry_t entry;
4107
4108 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
4109 if (!folio)
4110 return NULL;
4111
4112 entry = pte_to_swp_entry(vmf->orig_pte);
4113 if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4114 GFP_KERNEL, entry)) {
4115 folio_put(folio);
4116 return NULL;
4117 }
4118
4119 return folio;
4120 }
4121
4122 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
non_swapcache_batch(swp_entry_t entry,int max_nr)4123 static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
4124 {
4125 struct swap_info_struct *si = swp_swap_info(entry);
4126 pgoff_t offset = swp_offset(entry);
4127 int i;
4128
4129 /*
4130 * While allocating a large folio and doing swap_read_folio, which is
4131 * the case the being faulted pte doesn't have swapcache. We need to
4132 * ensure all PTEs have no cache as well, otherwise, we might go to
4133 * swap devices while the content is in swapcache.
4134 */
4135 for (i = 0; i < max_nr; i++) {
4136 if ((si->swap_map[offset + i] & SWAP_HAS_CACHE))
4137 return i;
4138 }
4139
4140 return i;
4141 }
4142
4143 /*
4144 * Check if the PTEs within a range are contiguous swap entries
4145 * and have consistent swapcache, zeromap.
4146 */
can_swapin_thp(struct vm_fault * vmf,pte_t * ptep,int nr_pages)4147 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
4148 {
4149 unsigned long addr;
4150 swp_entry_t entry;
4151 int idx;
4152 pte_t pte;
4153
4154 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4155 idx = (vmf->address - addr) / PAGE_SIZE;
4156 pte = ptep_get(ptep);
4157
4158 if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
4159 return false;
4160 entry = pte_to_swp_entry(pte);
4161 if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
4162 return false;
4163
4164 /*
4165 * swap_read_folio() can't handle the case a large folio is hybridly
4166 * from different backends. And they are likely corner cases. Similar
4167 * things might be added once zswap support large folios.
4168 */
4169 if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
4170 return false;
4171 if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
4172 return false;
4173
4174 return true;
4175 }
4176
thp_swap_suitable_orders(pgoff_t swp_offset,unsigned long addr,unsigned long orders)4177 static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
4178 unsigned long addr,
4179 unsigned long orders)
4180 {
4181 int order, nr;
4182
4183 order = highest_order(orders);
4184
4185 /*
4186 * To swap in a THP with nr pages, we require that its first swap_offset
4187 * is aligned with that number, as it was when the THP was swapped out.
4188 * This helps filter out most invalid entries.
4189 */
4190 while (orders) {
4191 nr = 1 << order;
4192 if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr)
4193 break;
4194 order = next_order(&orders, order);
4195 }
4196
4197 return orders;
4198 }
4199
alloc_swap_folio(struct vm_fault * vmf)4200 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4201 {
4202 struct vm_area_struct *vma = vmf->vma;
4203 unsigned long orders;
4204 struct folio *folio;
4205 unsigned long addr;
4206 swp_entry_t entry;
4207 spinlock_t *ptl;
4208 pte_t *pte;
4209 gfp_t gfp;
4210 int order;
4211
4212 /*
4213 * If uffd is active for the vma we need per-page fault fidelity to
4214 * maintain the uffd semantics.
4215 */
4216 if (unlikely(userfaultfd_armed(vma)))
4217 goto fallback;
4218
4219 /*
4220 * A large swapped out folio could be partially or fully in zswap. We
4221 * lack handling for such cases, so fallback to swapping in order-0
4222 * folio.
4223 */
4224 if (!zswap_never_enabled())
4225 goto fallback;
4226
4227 entry = pte_to_swp_entry(vmf->orig_pte);
4228 /*
4229 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4230 * and suitable for swapping THP.
4231 */
4232 orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4233 TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4234 orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4235 orders = thp_swap_suitable_orders(swp_offset(entry),
4236 vmf->address, orders);
4237
4238 if (!orders)
4239 goto fallback;
4240
4241 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4242 vmf->address & PMD_MASK, &ptl);
4243 if (unlikely(!pte))
4244 goto fallback;
4245
4246 /*
4247 * For do_swap_page, find the highest order where the aligned range is
4248 * completely swap entries with contiguous swap offsets.
4249 */
4250 order = highest_order(orders);
4251 while (orders) {
4252 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4253 if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order))
4254 break;
4255 order = next_order(&orders, order);
4256 }
4257
4258 pte_unmap_unlock(pte, ptl);
4259
4260 /* Try allocating the highest of the remaining orders. */
4261 gfp = vma_thp_gfp_mask(vma);
4262 while (orders) {
4263 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4264 folio = vma_alloc_folio(gfp, order, vma, addr);
4265 if (folio) {
4266 if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
4267 gfp, entry))
4268 return folio;
4269 count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
4270 folio_put(folio);
4271 }
4272 count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
4273 order = next_order(&orders, order);
4274 }
4275
4276 fallback:
4277 return __alloc_swap_folio(vmf);
4278 }
4279 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
alloc_swap_folio(struct vm_fault * vmf)4280 static struct folio *alloc_swap_folio(struct vm_fault *vmf)
4281 {
4282 return __alloc_swap_folio(vmf);
4283 }
4284 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4285
4286 static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq);
4287
4288 /*
4289 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4290 * but allow concurrent faults), and pte mapped but not yet locked.
4291 * We return with pte unmapped and unlocked.
4292 *
4293 * We return with the mmap_lock locked or unlocked in the same cases
4294 * as does filemap_fault().
4295 */
do_swap_page(struct vm_fault * vmf)4296 vm_fault_t do_swap_page(struct vm_fault *vmf)
4297 {
4298 struct vm_area_struct *vma = vmf->vma;
4299 struct folio *swapcache, *folio = NULL;
4300 DECLARE_WAITQUEUE(wait, current);
4301 struct page *page;
4302 struct swap_info_struct *si = NULL;
4303 rmap_t rmap_flags = RMAP_NONE;
4304 bool need_clear_cache = false;
4305 bool exclusive = false;
4306 swp_entry_t entry;
4307 pte_t pte;
4308 vm_fault_t ret = 0;
4309 void *shadow = NULL;
4310 int nr_pages;
4311 unsigned long page_idx;
4312 unsigned long address;
4313 pte_t *ptep;
4314
4315 if (!pte_unmap_same(vmf))
4316 goto out;
4317
4318 entry = pte_to_swp_entry(vmf->orig_pte);
4319 if (unlikely(non_swap_entry(entry))) {
4320 if (is_migration_entry(entry)) {
4321 migration_entry_wait(vma->vm_mm, vmf->pmd,
4322 vmf->address);
4323 } else if (is_device_exclusive_entry(entry)) {
4324 vmf->page = pfn_swap_entry_to_page(entry);
4325 ret = remove_device_exclusive_entry(vmf);
4326 } else if (is_device_private_entry(entry)) {
4327 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4328 /*
4329 * migrate_to_ram is not yet ready to operate
4330 * under VMA lock.
4331 */
4332 vma_end_read(vma);
4333 ret = VM_FAULT_RETRY;
4334 goto out;
4335 }
4336
4337 vmf->page = pfn_swap_entry_to_page(entry);
4338 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4339 vmf->address, &vmf->ptl);
4340 if (unlikely(!vmf->pte ||
4341 !pte_same(ptep_get(vmf->pte),
4342 vmf->orig_pte)))
4343 goto unlock;
4344
4345 /*
4346 * Get a page reference while we know the page can't be
4347 * freed.
4348 */
4349 get_page(vmf->page);
4350 pte_unmap_unlock(vmf->pte, vmf->ptl);
4351 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
4352 put_page(vmf->page);
4353 } else if (is_hwpoison_entry(entry)) {
4354 ret = VM_FAULT_HWPOISON;
4355 } else if (is_pte_marker_entry(entry)) {
4356 ret = handle_pte_marker(vmf);
4357 } else {
4358 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
4359 ret = VM_FAULT_SIGBUS;
4360 }
4361 goto out;
4362 }
4363
4364 /* Prevent swapoff from happening to us. */
4365 si = get_swap_device(entry);
4366 if (unlikely(!si))
4367 goto out;
4368
4369 folio = swap_cache_get_folio(entry, vma, vmf->address);
4370 if (folio)
4371 page = folio_file_page(folio, swp_offset(entry));
4372 swapcache = folio;
4373
4374 if (!folio) {
4375 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
4376 __swap_count(entry) == 1) {
4377 /* skip swapcache */
4378 folio = alloc_swap_folio(vmf);
4379 if (folio) {
4380 __folio_set_locked(folio);
4381 __folio_set_swapbacked(folio);
4382
4383 nr_pages = folio_nr_pages(folio);
4384 if (folio_test_large(folio))
4385 entry.val = ALIGN_DOWN(entry.val, nr_pages);
4386 /*
4387 * Prevent parallel swapin from proceeding with
4388 * the cache flag. Otherwise, another thread
4389 * may finish swapin first, free the entry, and
4390 * swapout reusing the same entry. It's
4391 * undetectable as pte_same() returns true due
4392 * to entry reuse.
4393 */
4394 if (swapcache_prepare(entry, nr_pages)) {
4395 /*
4396 * Relax a bit to prevent rapid
4397 * repeated page faults.
4398 */
4399 add_wait_queue(&swapcache_wq, &wait);
4400 schedule_timeout_uninterruptible(1);
4401 remove_wait_queue(&swapcache_wq, &wait);
4402 goto out_page;
4403 }
4404 need_clear_cache = true;
4405
4406 mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
4407
4408 shadow = get_shadow_from_swap_cache(entry);
4409 if (shadow)
4410 workingset_refault(folio, shadow);
4411
4412 folio_add_lru(folio);
4413
4414 /* To provide entry to swap_read_folio() */
4415 folio->swap = entry;
4416 swap_read_folio(folio, NULL);
4417 folio->private = NULL;
4418 }
4419 } else {
4420 folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
4421 vmf);
4422 swapcache = folio;
4423 }
4424
4425 if (!folio) {
4426 /*
4427 * Back out if somebody else faulted in this pte
4428 * while we released the pte lock.
4429 */
4430 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4431 vmf->address, &vmf->ptl);
4432 if (likely(vmf->pte &&
4433 pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4434 ret = VM_FAULT_OOM;
4435 goto unlock;
4436 }
4437
4438 /* Had to read the page from swap area: Major fault */
4439 ret = VM_FAULT_MAJOR;
4440 count_vm_event(PGMAJFAULT);
4441 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
4442 page = folio_file_page(folio, swp_offset(entry));
4443 } else if (PageHWPoison(page)) {
4444 /*
4445 * hwpoisoned dirty swapcache pages are kept for killing
4446 * owner processes (which may be unknown at hwpoison time)
4447 */
4448 ret = VM_FAULT_HWPOISON;
4449 goto out_release;
4450 }
4451
4452 ret |= folio_lock_or_retry(folio, vmf);
4453 if (ret & VM_FAULT_RETRY)
4454 goto out_release;
4455
4456 if (swapcache) {
4457 /*
4458 * Make sure folio_free_swap() or swapoff did not release the
4459 * swapcache from under us. The page pin, and pte_same test
4460 * below, are not enough to exclude that. Even if it is still
4461 * swapcache, we need to check that the page's swap has not
4462 * changed.
4463 */
4464 if (unlikely(!folio_test_swapcache(folio) ||
4465 page_swap_entry(page).val != entry.val))
4466 goto out_page;
4467
4468 /*
4469 * KSM sometimes has to copy on read faults, for example, if
4470 * page->index of !PageKSM() pages would be nonlinear inside the
4471 * anon VMA -- PageKSM() is lost on actual swapout.
4472 */
4473 folio = ksm_might_need_to_copy(folio, vma, vmf->address);
4474 if (unlikely(!folio)) {
4475 ret = VM_FAULT_OOM;
4476 folio = swapcache;
4477 goto out_page;
4478 } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
4479 ret = VM_FAULT_HWPOISON;
4480 folio = swapcache;
4481 goto out_page;
4482 }
4483 if (folio != swapcache)
4484 page = folio_page(folio, 0);
4485
4486 /*
4487 * If we want to map a page that's in the swapcache writable, we
4488 * have to detect via the refcount if we're really the exclusive
4489 * owner. Try removing the extra reference from the local LRU
4490 * caches if required.
4491 */
4492 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
4493 !folio_test_ksm(folio) && !folio_test_lru(folio))
4494 lru_add_drain();
4495 }
4496
4497 folio_throttle_swaprate(folio, GFP_KERNEL);
4498
4499 /*
4500 * Back out if somebody else already faulted in this pte.
4501 */
4502 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4503 &vmf->ptl);
4504 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
4505 goto out_nomap;
4506
4507 if (unlikely(!folio_test_uptodate(folio))) {
4508 ret = VM_FAULT_SIGBUS;
4509 goto out_nomap;
4510 }
4511
4512 /* allocated large folios for SWP_SYNCHRONOUS_IO */
4513 if (folio_test_large(folio) && !folio_test_swapcache(folio)) {
4514 unsigned long nr = folio_nr_pages(folio);
4515 unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
4516 unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE;
4517 pte_t *folio_ptep = vmf->pte - idx;
4518 pte_t folio_pte = ptep_get(folio_ptep);
4519
4520 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4521 swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4522 goto out_nomap;
4523
4524 page_idx = idx;
4525 address = folio_start;
4526 ptep = folio_ptep;
4527 goto check_folio;
4528 }
4529
4530 nr_pages = 1;
4531 page_idx = 0;
4532 address = vmf->address;
4533 ptep = vmf->pte;
4534 if (folio_test_large(folio) && folio_test_swapcache(folio)) {
4535 int nr = folio_nr_pages(folio);
4536 unsigned long idx = folio_page_idx(folio, page);
4537 unsigned long folio_start = address - idx * PAGE_SIZE;
4538 unsigned long folio_end = folio_start + nr * PAGE_SIZE;
4539 pte_t *folio_ptep;
4540 pte_t folio_pte;
4541
4542 if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
4543 goto check_folio;
4544 if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
4545 goto check_folio;
4546
4547 folio_ptep = vmf->pte - idx;
4548 folio_pte = ptep_get(folio_ptep);
4549 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
4550 swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
4551 goto check_folio;
4552
4553 page_idx = idx;
4554 address = folio_start;
4555 ptep = folio_ptep;
4556 nr_pages = nr;
4557 entry = folio->swap;
4558 page = &folio->page;
4559 }
4560
4561 check_folio:
4562 /*
4563 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
4564 * must never point at an anonymous page in the swapcache that is
4565 * PG_anon_exclusive. Sanity check that this holds and especially, that
4566 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
4567 * check after taking the PT lock and making sure that nobody
4568 * concurrently faulted in this page and set PG_anon_exclusive.
4569 */
4570 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
4571 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
4572
4573 /*
4574 * Check under PT lock (to protect against concurrent fork() sharing
4575 * the swap entry concurrently) for certainly exclusive pages.
4576 */
4577 if (!folio_test_ksm(folio)) {
4578 exclusive = pte_swp_exclusive(vmf->orig_pte);
4579 if (folio != swapcache) {
4580 /*
4581 * We have a fresh page that is not exposed to the
4582 * swapcache -> certainly exclusive.
4583 */
4584 exclusive = true;
4585 } else if (exclusive && folio_test_writeback(folio) &&
4586 data_race(si->flags & SWP_STABLE_WRITES)) {
4587 /*
4588 * This is tricky: not all swap backends support
4589 * concurrent page modifications while under writeback.
4590 *
4591 * So if we stumble over such a page in the swapcache
4592 * we must not set the page exclusive, otherwise we can
4593 * map it writable without further checks and modify it
4594 * while still under writeback.
4595 *
4596 * For these problematic swap backends, simply drop the
4597 * exclusive marker: this is perfectly fine as we start
4598 * writeback only if we fully unmapped the page and
4599 * there are no unexpected references on the page after
4600 * unmapping succeeded. After fully unmapped, no
4601 * further GUP references (FOLL_GET and FOLL_PIN) can
4602 * appear, so dropping the exclusive marker and mapping
4603 * it only R/O is fine.
4604 */
4605 exclusive = false;
4606 }
4607 }
4608
4609 /*
4610 * Some architectures may have to restore extra metadata to the page
4611 * when reading from swap. This metadata may be indexed by swap entry
4612 * so this must be called before swap_free().
4613 */
4614 arch_swap_restore(folio_swap(entry, folio), folio);
4615
4616 /*
4617 * Remove the swap entry and conditionally try to free up the swapcache.
4618 * We're already holding a reference on the page but haven't mapped it
4619 * yet.
4620 */
4621 swap_free_nr(entry, nr_pages);
4622 if (should_try_to_free_swap(folio, vma, vmf->flags))
4623 folio_free_swap(folio);
4624
4625 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4626 add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
4627 pte = mk_pte(page, vma->vm_page_prot);
4628 if (pte_swp_soft_dirty(vmf->orig_pte))
4629 pte = pte_mksoft_dirty(pte);
4630 if (pte_swp_uffd_wp(vmf->orig_pte))
4631 pte = pte_mkuffd_wp(pte);
4632
4633 /*
4634 * Same logic as in do_wp_page(); however, optimize for pages that are
4635 * certainly not shared either because we just allocated them without
4636 * exposing them to the swapcache or because the swap entry indicates
4637 * exclusivity.
4638 */
4639 if (!folio_test_ksm(folio) &&
4640 (exclusive || folio_ref_count(folio) == 1)) {
4641 if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) &&
4642 !pte_needs_soft_dirty_wp(vma, pte)) {
4643 pte = pte_mkwrite(pte, vma);
4644 if (vmf->flags & FAULT_FLAG_WRITE) {
4645 pte = pte_mkdirty(pte);
4646 vmf->flags &= ~FAULT_FLAG_WRITE;
4647 }
4648 }
4649 rmap_flags |= RMAP_EXCLUSIVE;
4650 }
4651 folio_ref_add(folio, nr_pages - 1);
4652 flush_icache_pages(vma, page, nr_pages);
4653 vmf->orig_pte = pte_advance_pfn(pte, page_idx);
4654
4655 /* ksm created a completely new copy */
4656 if (unlikely(folio != swapcache && swapcache)) {
4657 folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
4658 folio_add_lru_vma(folio, vma);
4659 } else if (!folio_test_anon(folio)) {
4660 /*
4661 * We currently only expect small !anon folios which are either
4662 * fully exclusive or fully shared, or new allocated large
4663 * folios which are fully exclusive. If we ever get large
4664 * folios within swapcache here, we have to be careful.
4665 */
4666 VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio));
4667 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
4668 folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
4669 } else {
4670 folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
4671 rmap_flags);
4672 }
4673
4674 VM_BUG_ON(!folio_test_anon(folio) ||
4675 (pte_write(pte) && !PageAnonExclusive(page)));
4676 set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
4677 arch_do_swap_page_nr(vma->vm_mm, vma, address,
4678 pte, pte, nr_pages);
4679
4680 folio_unlock(folio);
4681 if (folio != swapcache && swapcache) {
4682 /*
4683 * Hold the lock to avoid the swap entry to be reused
4684 * until we take the PT lock for the pte_same() check
4685 * (to avoid false positives from pte_same). For
4686 * further safety release the lock after the swap_free
4687 * so that the swap count won't change under a
4688 * parallel locked swapcache.
4689 */
4690 folio_unlock(swapcache);
4691 folio_put(swapcache);
4692 }
4693
4694 if (vmf->flags & FAULT_FLAG_WRITE) {
4695 ret |= do_wp_page(vmf);
4696 if (ret & VM_FAULT_ERROR)
4697 ret &= VM_FAULT_ERROR;
4698 goto out;
4699 }
4700
4701 /* No need to invalidate - it was non-present before */
4702 update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
4703 unlock:
4704 if (vmf->pte)
4705 pte_unmap_unlock(vmf->pte, vmf->ptl);
4706 out:
4707 /* Clear the swap cache pin for direct swapin after PTL unlock */
4708 if (need_clear_cache) {
4709 swapcache_clear(si, entry, nr_pages);
4710 if (waitqueue_active(&swapcache_wq))
4711 wake_up(&swapcache_wq);
4712 }
4713 if (si)
4714 put_swap_device(si);
4715 return ret;
4716 out_nomap:
4717 if (vmf->pte)
4718 pte_unmap_unlock(vmf->pte, vmf->ptl);
4719 out_page:
4720 folio_unlock(folio);
4721 out_release:
4722 folio_put(folio);
4723 if (folio != swapcache && swapcache) {
4724 folio_unlock(swapcache);
4725 folio_put(swapcache);
4726 }
4727 if (need_clear_cache) {
4728 swapcache_clear(si, entry, nr_pages);
4729 if (waitqueue_active(&swapcache_wq))
4730 wake_up(&swapcache_wq);
4731 }
4732 if (si)
4733 put_swap_device(si);
4734 return ret;
4735 }
4736
pte_range_none(pte_t * pte,int nr_pages)4737 static bool pte_range_none(pte_t *pte, int nr_pages)
4738 {
4739 int i;
4740
4741 for (i = 0; i < nr_pages; i++) {
4742 if (!pte_none(ptep_get_lockless(pte + i)))
4743 return false;
4744 }
4745
4746 return true;
4747 }
4748
alloc_anon_folio(struct vm_fault * vmf)4749 static struct folio *alloc_anon_folio(struct vm_fault *vmf)
4750 {
4751 struct vm_area_struct *vma = vmf->vma;
4752 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4753 unsigned long orders;
4754 struct folio *folio;
4755 unsigned long addr;
4756 pte_t *pte;
4757 gfp_t gfp;
4758 int order;
4759
4760 /*
4761 * If uffd is active for the vma we need per-page fault fidelity to
4762 * maintain the uffd semantics.
4763 */
4764 if (unlikely(userfaultfd_armed(vma)))
4765 goto fallback;
4766
4767 /*
4768 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4769 * for this vma. Then filter out the orders that can't be allocated over
4770 * the faulting address and still be fully contained in the vma.
4771 */
4772 orders = thp_vma_allowable_orders(vma, vma->vm_flags,
4773 TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
4774 orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4775
4776 if (!orders)
4777 goto fallback;
4778
4779 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
4780 if (!pte)
4781 return ERR_PTR(-EAGAIN);
4782
4783 /*
4784 * Find the highest order where the aligned range is completely
4785 * pte_none(). Note that all remaining orders will be completely
4786 * pte_none().
4787 */
4788 order = highest_order(orders);
4789 while (orders) {
4790 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4791 if (pte_range_none(pte + pte_index(addr), 1 << order))
4792 break;
4793 order = next_order(&orders, order);
4794 }
4795
4796 pte_unmap(pte);
4797
4798 if (!orders)
4799 goto fallback;
4800
4801 /* Try allocating the highest of the remaining orders. */
4802 gfp = vma_thp_gfp_mask(vma);
4803 while (orders) {
4804 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4805 folio = vma_alloc_folio(gfp, order, vma, addr);
4806 if (folio) {
4807 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
4808 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
4809 folio_put(folio);
4810 goto next;
4811 }
4812 folio_throttle_swaprate(folio, gfp);
4813 /*
4814 * When a folio is not zeroed during allocation
4815 * (__GFP_ZERO not used) or user folios require special
4816 * handling, folio_zero_user() is used to make sure
4817 * that the page corresponding to the faulting address
4818 * will be hot in the cache after zeroing.
4819 */
4820 if (user_alloc_needs_zeroing())
4821 folio_zero_user(folio, vmf->address);
4822 return folio;
4823 }
4824 next:
4825 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
4826 order = next_order(&orders, order);
4827 }
4828
4829 fallback:
4830 #endif
4831 return folio_prealloc(vma->vm_mm, vma, vmf->address, true);
4832 }
4833
4834 /*
4835 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4836 * but allow concurrent faults), and pte mapped but not yet locked.
4837 * We return with mmap_lock still held, but pte unmapped and unlocked.
4838 */
do_anonymous_page(struct vm_fault * vmf)4839 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4840 {
4841 struct vm_area_struct *vma = vmf->vma;
4842 unsigned long addr = vmf->address;
4843 struct folio *folio;
4844 vm_fault_t ret = 0;
4845 int nr_pages = 1;
4846 pte_t entry;
4847
4848 /* File mapping without ->vm_ops ? */
4849 if (vma->vm_flags & VM_SHARED)
4850 return VM_FAULT_SIGBUS;
4851
4852 /*
4853 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
4854 * be distinguished from a transient failure of pte_offset_map().
4855 */
4856 if (pte_alloc(vma->vm_mm, vmf->pmd))
4857 return VM_FAULT_OOM;
4858
4859 /* Use the zero-page for reads */
4860 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4861 !mm_forbids_zeropage(vma->vm_mm)) {
4862 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4863 vma->vm_page_prot));
4864 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4865 vmf->address, &vmf->ptl);
4866 if (!vmf->pte)
4867 goto unlock;
4868 if (vmf_pte_changed(vmf)) {
4869 update_mmu_tlb(vma, vmf->address, vmf->pte);
4870 goto unlock;
4871 }
4872 ret = check_stable_address_space(vma->vm_mm);
4873 if (ret)
4874 goto unlock;
4875 /* Deliver the page fault to userland, check inside PT lock */
4876 if (userfaultfd_missing(vma)) {
4877 pte_unmap_unlock(vmf->pte, vmf->ptl);
4878 return handle_userfault(vmf, VM_UFFD_MISSING);
4879 }
4880 goto setpte;
4881 }
4882
4883 /* Allocate our own private page. */
4884 ret = vmf_anon_prepare(vmf);
4885 if (ret)
4886 return ret;
4887 /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
4888 folio = alloc_anon_folio(vmf);
4889 if (IS_ERR(folio))
4890 return 0;
4891 if (!folio)
4892 goto oom;
4893
4894 nr_pages = folio_nr_pages(folio);
4895 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4896
4897 /*
4898 * The memory barrier inside __folio_mark_uptodate makes sure that
4899 * preceding stores to the page contents become visible before
4900 * the set_pte_at() write.
4901 */
4902 __folio_mark_uptodate(folio);
4903
4904 entry = mk_pte(&folio->page, vma->vm_page_prot);
4905 entry = pte_sw_mkyoung(entry);
4906 if (vma->vm_flags & VM_WRITE)
4907 entry = pte_mkwrite(pte_mkdirty(entry), vma);
4908
4909 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
4910 if (!vmf->pte)
4911 goto release;
4912 if (nr_pages == 1 && vmf_pte_changed(vmf)) {
4913 update_mmu_tlb(vma, addr, vmf->pte);
4914 goto release;
4915 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
4916 update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
4917 goto release;
4918 }
4919
4920 ret = check_stable_address_space(vma->vm_mm);
4921 if (ret)
4922 goto release;
4923
4924 /* Deliver the page fault to userland, check inside PT lock */
4925 if (userfaultfd_missing(vma)) {
4926 pte_unmap_unlock(vmf->pte, vmf->ptl);
4927 folio_put(folio);
4928 return handle_userfault(vmf, VM_UFFD_MISSING);
4929 }
4930
4931 folio_ref_add(folio, nr_pages - 1);
4932 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4933 count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
4934 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
4935 folio_add_lru_vma(folio, vma);
4936 setpte:
4937 if (vmf_orig_pte_uffd_wp(vmf))
4938 entry = pte_mkuffd_wp(entry);
4939 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
4940
4941 /* No need to invalidate - it was non-present before */
4942 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
4943 unlock:
4944 if (vmf->pte)
4945 pte_unmap_unlock(vmf->pte, vmf->ptl);
4946 return ret;
4947 release:
4948 folio_put(folio);
4949 goto unlock;
4950 oom:
4951 return VM_FAULT_OOM;
4952 }
4953
4954 /*
4955 * The mmap_lock must have been held on entry, and may have been
4956 * released depending on flags and vma->vm_ops->fault() return value.
4957 * See filemap_fault() and __lock_page_retry().
4958 */
__do_fault(struct vm_fault * vmf)4959 static vm_fault_t __do_fault(struct vm_fault *vmf)
4960 {
4961 struct vm_area_struct *vma = vmf->vma;
4962 struct folio *folio;
4963 vm_fault_t ret;
4964
4965 /*
4966 * Preallocate pte before we take page_lock because this might lead to
4967 * deadlocks for memcg reclaim which waits for pages under writeback:
4968 * lock_page(A)
4969 * SetPageWriteback(A)
4970 * unlock_page(A)
4971 * lock_page(B)
4972 * lock_page(B)
4973 * pte_alloc_one
4974 * shrink_folio_list
4975 * wait_on_page_writeback(A)
4976 * SetPageWriteback(B)
4977 * unlock_page(B)
4978 * # flush A, B to clear the writeback
4979 */
4980 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4981 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4982 if (!vmf->prealloc_pte)
4983 return VM_FAULT_OOM;
4984 }
4985
4986 ret = vma->vm_ops->fault(vmf);
4987 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4988 VM_FAULT_DONE_COW)))
4989 return ret;
4990
4991 folio = page_folio(vmf->page);
4992 if (unlikely(PageHWPoison(vmf->page))) {
4993 vm_fault_t poisonret = VM_FAULT_HWPOISON;
4994 if (ret & VM_FAULT_LOCKED) {
4995 if (page_mapped(vmf->page))
4996 unmap_mapping_folio(folio);
4997 /* Retry if a clean folio was removed from the cache. */
4998 if (mapping_evict_folio(folio->mapping, folio))
4999 poisonret = VM_FAULT_NOPAGE;
5000 folio_unlock(folio);
5001 }
5002 folio_put(folio);
5003 vmf->page = NULL;
5004 return poisonret;
5005 }
5006
5007 if (unlikely(!(ret & VM_FAULT_LOCKED)))
5008 folio_lock(folio);
5009 else
5010 VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
5011
5012 return ret;
5013 }
5014
5015 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
deposit_prealloc_pte(struct vm_fault * vmf)5016 static void deposit_prealloc_pte(struct vm_fault *vmf)
5017 {
5018 struct vm_area_struct *vma = vmf->vma;
5019
5020 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
5021 /*
5022 * We are going to consume the prealloc table,
5023 * count that as nr_ptes.
5024 */
5025 mm_inc_nr_ptes(vma->vm_mm);
5026 vmf->prealloc_pte = NULL;
5027 }
5028
do_set_pmd(struct vm_fault * vmf,struct page * page)5029 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
5030 {
5031 struct folio *folio = page_folio(page);
5032 struct vm_area_struct *vma = vmf->vma;
5033 bool write = vmf->flags & FAULT_FLAG_WRITE;
5034 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5035 pmd_t entry;
5036 vm_fault_t ret = VM_FAULT_FALLBACK;
5037
5038 /*
5039 * It is too late to allocate a small folio, we already have a large
5040 * folio in the pagecache: especially s390 KVM cannot tolerate any
5041 * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any
5042 * PMD mappings if THPs are disabled.
5043 */
5044 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags))
5045 return ret;
5046
5047 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
5048 return ret;
5049
5050 if (folio_order(folio) != HPAGE_PMD_ORDER)
5051 return ret;
5052 page = &folio->page;
5053
5054 /*
5055 * Just backoff if any subpage of a THP is corrupted otherwise
5056 * the corrupted page may mapped by PMD silently to escape the
5057 * check. This kind of THP just can be PTE mapped. Access to
5058 * the corrupted subpage should trigger SIGBUS as expected.
5059 */
5060 if (unlikely(folio_test_has_hwpoisoned(folio)))
5061 return ret;
5062
5063 /*
5064 * Archs like ppc64 need additional space to store information
5065 * related to pte entry. Use the preallocated table for that.
5066 */
5067 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
5068 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
5069 if (!vmf->prealloc_pte)
5070 return VM_FAULT_OOM;
5071 }
5072
5073 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
5074 if (unlikely(!pmd_none(*vmf->pmd)))
5075 goto out;
5076
5077 flush_icache_pages(vma, page, HPAGE_PMD_NR);
5078
5079 entry = mk_huge_pmd(page, vma->vm_page_prot);
5080 if (write)
5081 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
5082
5083 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
5084 folio_add_file_rmap_pmd(folio, page, vma);
5085
5086 /*
5087 * deposit and withdraw with pmd lock held
5088 */
5089 if (arch_needs_pgtable_deposit())
5090 deposit_prealloc_pte(vmf);
5091
5092 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
5093
5094 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
5095
5096 /* fault is handled */
5097 ret = 0;
5098 count_vm_event(THP_FILE_MAPPED);
5099 out:
5100 spin_unlock(vmf->ptl);
5101 return ret;
5102 }
5103 #else
do_set_pmd(struct vm_fault * vmf,struct page * page)5104 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
5105 {
5106 return VM_FAULT_FALLBACK;
5107 }
5108 #endif
5109
5110 /**
5111 * set_pte_range - Set a range of PTEs to point to pages in a folio.
5112 * @vmf: Fault decription.
5113 * @folio: The folio that contains @page.
5114 * @page: The first page to create a PTE for.
5115 * @nr: The number of PTEs to create.
5116 * @addr: The first address to create a PTE for.
5117 */
set_pte_range(struct vm_fault * vmf,struct folio * folio,struct page * page,unsigned int nr,unsigned long addr)5118 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
5119 struct page *page, unsigned int nr, unsigned long addr)
5120 {
5121 struct vm_area_struct *vma = vmf->vma;
5122 bool write = vmf->flags & FAULT_FLAG_WRITE;
5123 bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
5124 pte_t entry;
5125
5126 flush_icache_pages(vma, page, nr);
5127 entry = mk_pte(page, vma->vm_page_prot);
5128
5129 if (prefault && arch_wants_old_prefaulted_pte())
5130 entry = pte_mkold(entry);
5131 else
5132 entry = pte_sw_mkyoung(entry);
5133
5134 if (write)
5135 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
5136 if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
5137 entry = pte_mkuffd_wp(entry);
5138 /* copy-on-write page */
5139 if (write && !(vma->vm_flags & VM_SHARED)) {
5140 VM_BUG_ON_FOLIO(nr != 1, folio);
5141 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
5142 folio_add_lru_vma(folio, vma);
5143 } else {
5144 folio_add_file_rmap_ptes(folio, page, nr, vma);
5145 }
5146 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
5147
5148 /* no need to invalidate: a not-present page won't be cached */
5149 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
5150 }
5151
vmf_pte_changed(struct vm_fault * vmf)5152 static bool vmf_pte_changed(struct vm_fault *vmf)
5153 {
5154 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
5155 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
5156
5157 return !pte_none(ptep_get(vmf->pte));
5158 }
5159
5160 /**
5161 * finish_fault - finish page fault once we have prepared the page to fault
5162 *
5163 * @vmf: structure describing the fault
5164 *
5165 * This function handles all that is needed to finish a page fault once the
5166 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
5167 * given page, adds reverse page mapping, handles memcg charges and LRU
5168 * addition.
5169 *
5170 * The function expects the page to be locked and on success it consumes a
5171 * reference of a page being mapped (for the PTE which maps it).
5172 *
5173 * Return: %0 on success, %VM_FAULT_ code in case of error.
5174 */
finish_fault(struct vm_fault * vmf)5175 vm_fault_t finish_fault(struct vm_fault *vmf)
5176 {
5177 struct vm_area_struct *vma = vmf->vma;
5178 struct page *page;
5179 struct folio *folio;
5180 vm_fault_t ret;
5181 bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
5182 !(vma->vm_flags & VM_SHARED);
5183 int type, nr_pages;
5184 unsigned long addr;
5185 bool needs_fallback = false;
5186
5187 fallback:
5188 addr = vmf->address;
5189
5190 /* Did we COW the page? */
5191 if (is_cow)
5192 page = vmf->cow_page;
5193 else
5194 page = vmf->page;
5195
5196 /*
5197 * check even for read faults because we might have lost our CoWed
5198 * page
5199 */
5200 if (!(vma->vm_flags & VM_SHARED)) {
5201 ret = check_stable_address_space(vma->vm_mm);
5202 if (ret)
5203 return ret;
5204 }
5205
5206 if (pmd_none(*vmf->pmd)) {
5207 if (PageTransCompound(page)) {
5208 ret = do_set_pmd(vmf, page);
5209 if (ret != VM_FAULT_FALLBACK)
5210 return ret;
5211 }
5212
5213 if (vmf->prealloc_pte)
5214 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
5215 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
5216 return VM_FAULT_OOM;
5217 }
5218
5219 folio = page_folio(page);
5220 nr_pages = folio_nr_pages(folio);
5221
5222 /*
5223 * Using per-page fault to maintain the uffd semantics, and same
5224 * approach also applies to non-anonymous-shmem faults to avoid
5225 * inflating the RSS of the process.
5226 */
5227 if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma)) ||
5228 unlikely(needs_fallback)) {
5229 nr_pages = 1;
5230 } else if (nr_pages > 1) {
5231 pgoff_t idx = folio_page_idx(folio, page);
5232 /* The page offset of vmf->address within the VMA. */
5233 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5234 /* The index of the entry in the pagetable for fault page. */
5235 pgoff_t pte_off = pte_index(vmf->address);
5236
5237 /*
5238 * Fallback to per-page fault in case the folio size in page
5239 * cache beyond the VMA limits and PMD pagetable limits.
5240 */
5241 if (unlikely(vma_off < idx ||
5242 vma_off + (nr_pages - idx) > vma_pages(vma) ||
5243 pte_off < idx ||
5244 pte_off + (nr_pages - idx) > PTRS_PER_PTE)) {
5245 nr_pages = 1;
5246 } else {
5247 /* Now we can set mappings for the whole large folio. */
5248 addr = vmf->address - idx * PAGE_SIZE;
5249 page = &folio->page;
5250 }
5251 }
5252
5253 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5254 addr, &vmf->ptl);
5255 if (!vmf->pte)
5256 return VM_FAULT_NOPAGE;
5257
5258 /* Re-check under ptl */
5259 if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) {
5260 update_mmu_tlb(vma, addr, vmf->pte);
5261 ret = VM_FAULT_NOPAGE;
5262 goto unlock;
5263 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
5264 needs_fallback = true;
5265 pte_unmap_unlock(vmf->pte, vmf->ptl);
5266 goto fallback;
5267 }
5268
5269 folio_ref_add(folio, nr_pages - 1);
5270 set_pte_range(vmf, folio, page, nr_pages, addr);
5271 type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
5272 add_mm_counter(vma->vm_mm, type, nr_pages);
5273 ret = 0;
5274
5275 unlock:
5276 pte_unmap_unlock(vmf->pte, vmf->ptl);
5277 return ret;
5278 }
5279
5280 static unsigned long fault_around_pages __read_mostly =
5281 65536 >> PAGE_SHIFT;
5282
5283 #ifdef CONFIG_DEBUG_FS
fault_around_bytes_get(void * data,u64 * val)5284 static int fault_around_bytes_get(void *data, u64 *val)
5285 {
5286 *val = fault_around_pages << PAGE_SHIFT;
5287 return 0;
5288 }
5289
5290 /*
5291 * fault_around_bytes must be rounded down to the nearest page order as it's
5292 * what do_fault_around() expects to see.
5293 */
fault_around_bytes_set(void * data,u64 val)5294 static int fault_around_bytes_set(void *data, u64 val)
5295 {
5296 if (val / PAGE_SIZE > PTRS_PER_PTE)
5297 return -EINVAL;
5298
5299 /*
5300 * The minimum value is 1 page, however this results in no fault-around
5301 * at all. See should_fault_around().
5302 */
5303 val = max(val, PAGE_SIZE);
5304 fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT;
5305
5306 return 0;
5307 }
5308 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
5309 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
5310
fault_around_debugfs(void)5311 static int __init fault_around_debugfs(void)
5312 {
5313 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
5314 &fault_around_bytes_fops);
5315 return 0;
5316 }
5317 late_initcall(fault_around_debugfs);
5318 #endif
5319
5320 /*
5321 * do_fault_around() tries to map few pages around the fault address. The hope
5322 * is that the pages will be needed soon and this will lower the number of
5323 * faults to handle.
5324 *
5325 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
5326 * not ready to be mapped: not up-to-date, locked, etc.
5327 *
5328 * This function doesn't cross VMA or page table boundaries, in order to call
5329 * map_pages() and acquire a PTE lock only once.
5330 *
5331 * fault_around_pages defines how many pages we'll try to map.
5332 * do_fault_around() expects it to be set to a power of two less than or equal
5333 * to PTRS_PER_PTE.
5334 *
5335 * The virtual address of the area that we map is naturally aligned to
5336 * fault_around_pages * PAGE_SIZE rounded down to the machine page size
5337 * (and therefore to page order). This way it's easier to guarantee
5338 * that we don't cross page table boundaries.
5339 */
do_fault_around(struct vm_fault * vmf)5340 static vm_fault_t do_fault_around(struct vm_fault *vmf)
5341 {
5342 pgoff_t nr_pages = READ_ONCE(fault_around_pages);
5343 pgoff_t pte_off = pte_index(vmf->address);
5344 /* The page offset of vmf->address within the VMA. */
5345 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
5346 pgoff_t from_pte, to_pte;
5347 vm_fault_t ret;
5348
5349 /* The PTE offset of the start address, clamped to the VMA. */
5350 from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
5351 pte_off - min(pte_off, vma_off));
5352
5353 /* The PTE offset of the end address, clamped to the VMA and PTE. */
5354 to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
5355 pte_off + vma_pages(vmf->vma) - vma_off) - 1;
5356
5357 if (pmd_none(*vmf->pmd)) {
5358 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
5359 if (!vmf->prealloc_pte)
5360 return VM_FAULT_OOM;
5361 }
5362
5363 rcu_read_lock();
5364 ret = vmf->vma->vm_ops->map_pages(vmf,
5365 vmf->pgoff + from_pte - pte_off,
5366 vmf->pgoff + to_pte - pte_off);
5367 rcu_read_unlock();
5368
5369 return ret;
5370 }
5371
5372 /* Return true if we should do read fault-around, false otherwise */
should_fault_around(struct vm_fault * vmf)5373 static inline bool should_fault_around(struct vm_fault *vmf)
5374 {
5375 /* No ->map_pages? No way to fault around... */
5376 if (!vmf->vma->vm_ops->map_pages)
5377 return false;
5378
5379 if (uffd_disable_fault_around(vmf->vma))
5380 return false;
5381
5382 /* A single page implies no faulting 'around' at all. */
5383 return fault_around_pages > 1;
5384 }
5385
do_read_fault(struct vm_fault * vmf)5386 static vm_fault_t do_read_fault(struct vm_fault *vmf)
5387 {
5388 vm_fault_t ret = 0;
5389 struct folio *folio;
5390
5391 /*
5392 * Let's call ->map_pages() first and use ->fault() as fallback
5393 * if page by the offset is not ready to be mapped (cold cache or
5394 * something).
5395 */
5396 if (should_fault_around(vmf)) {
5397 ret = do_fault_around(vmf);
5398 if (ret)
5399 return ret;
5400 }
5401
5402 ret = vmf_can_call_fault(vmf);
5403 if (ret)
5404 return ret;
5405
5406 ret = __do_fault(vmf);
5407 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5408 return ret;
5409
5410 ret |= finish_fault(vmf);
5411 folio = page_folio(vmf->page);
5412 folio_unlock(folio);
5413 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5414 folio_put(folio);
5415 return ret;
5416 }
5417
do_cow_fault(struct vm_fault * vmf)5418 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
5419 {
5420 struct vm_area_struct *vma = vmf->vma;
5421 struct folio *folio;
5422 vm_fault_t ret;
5423
5424 ret = vmf_can_call_fault(vmf);
5425 if (!ret)
5426 ret = vmf_anon_prepare(vmf);
5427 if (ret)
5428 return ret;
5429
5430 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
5431 if (!folio)
5432 return VM_FAULT_OOM;
5433
5434 vmf->cow_page = &folio->page;
5435
5436 ret = __do_fault(vmf);
5437 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5438 goto uncharge_out;
5439 if (ret & VM_FAULT_DONE_COW)
5440 return ret;
5441
5442 if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) {
5443 ret = VM_FAULT_HWPOISON;
5444 goto unlock;
5445 }
5446 __folio_mark_uptodate(folio);
5447
5448 ret |= finish_fault(vmf);
5449 unlock:
5450 unlock_page(vmf->page);
5451 put_page(vmf->page);
5452 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5453 goto uncharge_out;
5454 return ret;
5455 uncharge_out:
5456 folio_put(folio);
5457 return ret;
5458 }
5459
do_shared_fault(struct vm_fault * vmf)5460 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
5461 {
5462 struct vm_area_struct *vma = vmf->vma;
5463 vm_fault_t ret, tmp;
5464 struct folio *folio;
5465
5466 ret = vmf_can_call_fault(vmf);
5467 if (ret)
5468 return ret;
5469
5470 ret = __do_fault(vmf);
5471 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
5472 return ret;
5473
5474 folio = page_folio(vmf->page);
5475
5476 /*
5477 * Check if the backing address space wants to know that the page is
5478 * about to become writable
5479 */
5480 if (vma->vm_ops->page_mkwrite) {
5481 folio_unlock(folio);
5482 tmp = do_page_mkwrite(vmf, folio);
5483 if (unlikely(!tmp ||
5484 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5485 folio_put(folio);
5486 return tmp;
5487 }
5488 }
5489
5490 ret |= finish_fault(vmf);
5491 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
5492 VM_FAULT_RETRY))) {
5493 folio_unlock(folio);
5494 folio_put(folio);
5495 return ret;
5496 }
5497
5498 ret |= fault_dirty_shared_page(vmf);
5499 return ret;
5500 }
5501
5502 /*
5503 * We enter with non-exclusive mmap_lock (to exclude vma changes,
5504 * but allow concurrent faults).
5505 * The mmap_lock may have been released depending on flags and our
5506 * return value. See filemap_fault() and __folio_lock_or_retry().
5507 * If mmap_lock is released, vma may become invalid (for example
5508 * by other thread calling munmap()).
5509 */
do_fault(struct vm_fault * vmf)5510 static vm_fault_t do_fault(struct vm_fault *vmf)
5511 {
5512 struct vm_area_struct *vma = vmf->vma;
5513 struct mm_struct *vm_mm = vma->vm_mm;
5514 vm_fault_t ret;
5515
5516 /*
5517 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
5518 */
5519 if (!vma->vm_ops->fault) {
5520 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
5521 vmf->address, &vmf->ptl);
5522 if (unlikely(!vmf->pte))
5523 ret = VM_FAULT_SIGBUS;
5524 else {
5525 /*
5526 * Make sure this is not a temporary clearing of pte
5527 * by holding ptl and checking again. A R/M/W update
5528 * of pte involves: take ptl, clearing the pte so that
5529 * we don't have concurrent modification by hardware
5530 * followed by an update.
5531 */
5532 if (unlikely(pte_none(ptep_get(vmf->pte))))
5533 ret = VM_FAULT_SIGBUS;
5534 else
5535 ret = VM_FAULT_NOPAGE;
5536
5537 pte_unmap_unlock(vmf->pte, vmf->ptl);
5538 }
5539 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
5540 ret = do_read_fault(vmf);
5541 else if (!(vma->vm_flags & VM_SHARED))
5542 ret = do_cow_fault(vmf);
5543 else
5544 ret = do_shared_fault(vmf);
5545
5546 /* preallocated pagetable is unused: free it */
5547 if (vmf->prealloc_pte) {
5548 pte_free(vm_mm, vmf->prealloc_pte);
5549 vmf->prealloc_pte = NULL;
5550 }
5551 return ret;
5552 }
5553
numa_migrate_check(struct folio * folio,struct vm_fault * vmf,unsigned long addr,int * flags,bool writable,int * last_cpupid)5554 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
5555 unsigned long addr, int *flags,
5556 bool writable, int *last_cpupid)
5557 {
5558 struct vm_area_struct *vma = vmf->vma;
5559
5560 /*
5561 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
5562 * much anyway since they can be in shared cache state. This misses
5563 * the case where a mapping is writable but the process never writes
5564 * to it but pte_write gets cleared during protection updates and
5565 * pte_dirty has unpredictable behaviour between PTE scan updates,
5566 * background writeback, dirty balancing and application behaviour.
5567 */
5568 if (!writable)
5569 *flags |= TNF_NO_GROUP;
5570
5571 /*
5572 * Flag if the folio is shared between multiple address spaces. This
5573 * is later used when determining whether to group tasks together
5574 */
5575 if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
5576 *flags |= TNF_SHARED;
5577 /*
5578 * For memory tiering mode, cpupid of slow memory page is used
5579 * to record page access time. So use default value.
5580 */
5581 if (folio_use_access_time(folio))
5582 *last_cpupid = (-1 & LAST_CPUPID_MASK);
5583 else
5584 *last_cpupid = folio_last_cpupid(folio);
5585
5586 /* Record the current PID acceesing VMA */
5587 vma_set_access_pid_bit(vma);
5588
5589 count_vm_numa_event(NUMA_HINT_FAULTS);
5590 #ifdef CONFIG_NUMA_BALANCING
5591 count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
5592 #endif
5593 if (folio_nid(folio) == numa_node_id()) {
5594 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
5595 *flags |= TNF_FAULT_LOCAL;
5596 }
5597
5598 return mpol_misplaced(folio, vmf, addr);
5599 }
5600
numa_rebuild_single_mapping(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long fault_addr,pte_t * fault_pte,bool writable)5601 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5602 unsigned long fault_addr, pte_t *fault_pte,
5603 bool writable)
5604 {
5605 pte_t pte, old_pte;
5606
5607 old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte);
5608 pte = pte_modify(old_pte, vma->vm_page_prot);
5609 pte = pte_mkyoung(pte);
5610 if (writable)
5611 pte = pte_mkwrite(pte, vma);
5612 ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte);
5613 update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1);
5614 }
5615
numa_rebuild_large_mapping(struct vm_fault * vmf,struct vm_area_struct * vma,struct folio * folio,pte_t fault_pte,bool ignore_writable,bool pte_write_upgrade)5616 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
5617 struct folio *folio, pte_t fault_pte,
5618 bool ignore_writable, bool pte_write_upgrade)
5619 {
5620 int nr = pte_pfn(fault_pte) - folio_pfn(folio);
5621 unsigned long start, end, addr = vmf->address;
5622 unsigned long addr_start = addr - (nr << PAGE_SHIFT);
5623 unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
5624 pte_t *start_ptep;
5625
5626 /* Stay within the VMA and within the page table. */
5627 start = max3(addr_start, pt_start, vma->vm_start);
5628 end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
5629 vma->vm_end);
5630 start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT);
5631
5632 /* Restore all PTEs' mapping of the large folio */
5633 for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
5634 pte_t ptent = ptep_get(start_ptep);
5635 bool writable = false;
5636
5637 if (!pte_present(ptent) || !pte_protnone(ptent))
5638 continue;
5639
5640 if (pfn_folio(pte_pfn(ptent)) != folio)
5641 continue;
5642
5643 if (!ignore_writable) {
5644 ptent = pte_modify(ptent, vma->vm_page_prot);
5645 writable = pte_write(ptent);
5646 if (!writable && pte_write_upgrade &&
5647 can_change_pte_writable(vma, addr, ptent))
5648 writable = true;
5649 }
5650
5651 numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable);
5652 }
5653 }
5654
do_numa_page(struct vm_fault * vmf)5655 static vm_fault_t do_numa_page(struct vm_fault *vmf)
5656 {
5657 struct vm_area_struct *vma = vmf->vma;
5658 struct folio *folio = NULL;
5659 int nid = NUMA_NO_NODE;
5660 bool writable = false, ignore_writable = false;
5661 bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
5662 int last_cpupid;
5663 int target_nid;
5664 pte_t pte, old_pte;
5665 int flags = 0, nr_pages;
5666
5667 /*
5668 * The pte cannot be used safely until we verify, while holding the page
5669 * table lock, that its contents have not changed during fault handling.
5670 */
5671 spin_lock(vmf->ptl);
5672 /* Read the live PTE from the page tables: */
5673 old_pte = ptep_get(vmf->pte);
5674
5675 if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
5676 pte_unmap_unlock(vmf->pte, vmf->ptl);
5677 return 0;
5678 }
5679
5680 pte = pte_modify(old_pte, vma->vm_page_prot);
5681
5682 /*
5683 * Detect now whether the PTE could be writable; this information
5684 * is only valid while holding the PT lock.
5685 */
5686 writable = pte_write(pte);
5687 if (!writable && pte_write_upgrade &&
5688 can_change_pte_writable(vma, vmf->address, pte))
5689 writable = true;
5690
5691 folio = vm_normal_folio(vma, vmf->address, pte);
5692 if (!folio || folio_is_zone_device(folio))
5693 goto out_map;
5694
5695 nid = folio_nid(folio);
5696 nr_pages = folio_nr_pages(folio);
5697
5698 target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
5699 writable, &last_cpupid);
5700 if (target_nid == NUMA_NO_NODE)
5701 goto out_map;
5702 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
5703 flags |= TNF_MIGRATE_FAIL;
5704 goto out_map;
5705 }
5706 /* The folio is isolated and isolation code holds a folio reference. */
5707 pte_unmap_unlock(vmf->pte, vmf->ptl);
5708 writable = false;
5709 ignore_writable = true;
5710
5711 /* Migrate to the requested node */
5712 if (!migrate_misplaced_folio(folio, target_nid)) {
5713 nid = target_nid;
5714 flags |= TNF_MIGRATED;
5715 task_numa_fault(last_cpupid, nid, nr_pages, flags);
5716 return 0;
5717 }
5718
5719 flags |= TNF_MIGRATE_FAIL;
5720 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5721 vmf->address, &vmf->ptl);
5722 if (unlikely(!vmf->pte))
5723 return 0;
5724 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
5725 pte_unmap_unlock(vmf->pte, vmf->ptl);
5726 return 0;
5727 }
5728 out_map:
5729 /*
5730 * Make it present again, depending on how arch implements
5731 * non-accessible ptes, some can allow access by kernel mode.
5732 */
5733 if (folio && folio_test_large(folio))
5734 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
5735 pte_write_upgrade);
5736 else
5737 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
5738 writable);
5739 pte_unmap_unlock(vmf->pte, vmf->ptl);
5740
5741 if (nid != NUMA_NO_NODE)
5742 task_numa_fault(last_cpupid, nid, nr_pages, flags);
5743 return 0;
5744 }
5745
create_huge_pmd(struct vm_fault * vmf)5746 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
5747 {
5748 struct vm_area_struct *vma = vmf->vma;
5749 if (vma_is_anonymous(vma))
5750 return do_huge_pmd_anonymous_page(vmf);
5751 if (vma->vm_ops->huge_fault)
5752 return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5753 return VM_FAULT_FALLBACK;
5754 }
5755
5756 /* `inline' is required to avoid gcc 4.1.2 build error */
wp_huge_pmd(struct vm_fault * vmf)5757 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
5758 {
5759 struct vm_area_struct *vma = vmf->vma;
5760 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5761 vm_fault_t ret;
5762
5763 if (vma_is_anonymous(vma)) {
5764 if (likely(!unshare) &&
5765 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) {
5766 if (userfaultfd_wp_async(vmf->vma))
5767 goto split;
5768 return handle_userfault(vmf, VM_UFFD_WP);
5769 }
5770 return do_huge_pmd_wp_page(vmf);
5771 }
5772
5773 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5774 if (vma->vm_ops->huge_fault) {
5775 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
5776 if (!(ret & VM_FAULT_FALLBACK))
5777 return ret;
5778 }
5779 }
5780
5781 split:
5782 /* COW or write-notify handled on pte level: split pmd. */
5783 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
5784
5785 return VM_FAULT_FALLBACK;
5786 }
5787
create_huge_pud(struct vm_fault * vmf)5788 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
5789 {
5790 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
5791 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5792 struct vm_area_struct *vma = vmf->vma;
5793 /* No support for anonymous transparent PUD pages yet */
5794 if (vma_is_anonymous(vma))
5795 return VM_FAULT_FALLBACK;
5796 if (vma->vm_ops->huge_fault)
5797 return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5798 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5799 return VM_FAULT_FALLBACK;
5800 }
5801
wp_huge_pud(struct vm_fault * vmf,pud_t orig_pud)5802 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
5803 {
5804 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
5805 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
5806 struct vm_area_struct *vma = vmf->vma;
5807 vm_fault_t ret;
5808
5809 /* No support for anonymous transparent PUD pages yet */
5810 if (vma_is_anonymous(vma))
5811 goto split;
5812 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5813 if (vma->vm_ops->huge_fault) {
5814 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
5815 if (!(ret & VM_FAULT_FALLBACK))
5816 return ret;
5817 }
5818 }
5819 split:
5820 /* COW or write-notify not handled on PUD level: split pud.*/
5821 __split_huge_pud(vma, vmf->pud, vmf->address);
5822 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
5823 return VM_FAULT_FALLBACK;
5824 }
5825
5826 /*
5827 * These routines also need to handle stuff like marking pages dirty
5828 * and/or accessed for architectures that don't do it in hardware (most
5829 * RISC architectures). The early dirtying is also good on the i386.
5830 *
5831 * There is also a hook called "update_mmu_cache()" that architectures
5832 * with external mmu caches can use to update those (ie the Sparc or
5833 * PowerPC hashed page tables that act as extended TLBs).
5834 *
5835 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
5836 * concurrent faults).
5837 *
5838 * The mmap_lock may have been released depending on flags and our return value.
5839 * See filemap_fault() and __folio_lock_or_retry().
5840 */
handle_pte_fault(struct vm_fault * vmf)5841 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
5842 {
5843 pte_t entry;
5844
5845 if (unlikely(pmd_none(*vmf->pmd))) {
5846 /*
5847 * Leave __pte_alloc() until later: because vm_ops->fault may
5848 * want to allocate huge page, and if we expose page table
5849 * for an instant, it will be difficult to retract from
5850 * concurrent faults and from rmap lookups.
5851 */
5852 vmf->pte = NULL;
5853 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
5854 } else {
5855 pmd_t dummy_pmdval;
5856
5857 /*
5858 * A regular pmd is established and it can't morph into a huge
5859 * pmd by anon khugepaged, since that takes mmap_lock in write
5860 * mode; but shmem or file collapse to THP could still morph
5861 * it into a huge pmd: just retry later if so.
5862 *
5863 * Use the maywrite version to indicate that vmf->pte may be
5864 * modified, but since we will use pte_same() to detect the
5865 * change of the !pte_none() entry, there is no need to recheck
5866 * the pmdval. Here we chooes to pass a dummy variable instead
5867 * of NULL, which helps new user think about why this place is
5868 * special.
5869 */
5870 vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd,
5871 vmf->address, &dummy_pmdval,
5872 &vmf->ptl);
5873 if (unlikely(!vmf->pte))
5874 return 0;
5875 vmf->orig_pte = ptep_get_lockless(vmf->pte);
5876 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
5877
5878 if (pte_none(vmf->orig_pte)) {
5879 pte_unmap(vmf->pte);
5880 vmf->pte = NULL;
5881 }
5882 }
5883
5884 if (!vmf->pte)
5885 return do_pte_missing(vmf);
5886
5887 if (!pte_present(vmf->orig_pte))
5888 return do_swap_page(vmf);
5889
5890 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5891 return do_numa_page(vmf);
5892
5893 spin_lock(vmf->ptl);
5894 entry = vmf->orig_pte;
5895 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
5896 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
5897 goto unlock;
5898 }
5899 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5900 if (!pte_write(entry))
5901 return do_wp_page(vmf);
5902 else if (likely(vmf->flags & FAULT_FLAG_WRITE))
5903 entry = pte_mkdirty(entry);
5904 }
5905 entry = pte_mkyoung(entry);
5906 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5907 vmf->flags & FAULT_FLAG_WRITE)) {
5908 update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5909 vmf->pte, 1);
5910 } else {
5911 /* Skip spurious TLB flush for retried page fault */
5912 if (vmf->flags & FAULT_FLAG_TRIED)
5913 goto unlock;
5914 /*
5915 * This is needed only for protection faults but the arch code
5916 * is not yet telling us if this is a protection fault or not.
5917 * This still avoids useless tlb flushes for .text page faults
5918 * with threads.
5919 */
5920 if (vmf->flags & FAULT_FLAG_WRITE)
5921 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5922 vmf->pte);
5923 }
5924 unlock:
5925 pte_unmap_unlock(vmf->pte, vmf->ptl);
5926 return 0;
5927 }
5928
5929 /*
5930 * On entry, we hold either the VMA lock or the mmap_lock
5931 * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in
5932 * the result, the mmap_lock is not held on exit. See filemap_fault()
5933 * and __folio_lock_or_retry().
5934 */
__handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags)5935 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
5936 unsigned long address, unsigned int flags)
5937 {
5938 struct vm_fault vmf = {
5939 .vma = vma,
5940 .address = address & PAGE_MASK,
5941 .real_address = address,
5942 .flags = flags,
5943 .pgoff = linear_page_index(vma, address),
5944 .gfp_mask = __get_fault_gfp_mask(vma),
5945 };
5946 struct mm_struct *mm = vma->vm_mm;
5947 unsigned long vm_flags = vma->vm_flags;
5948 pgd_t *pgd;
5949 p4d_t *p4d;
5950 vm_fault_t ret;
5951
5952 pgd = pgd_offset(mm, address);
5953 p4d = p4d_alloc(mm, pgd, address);
5954 if (!p4d)
5955 return VM_FAULT_OOM;
5956
5957 vmf.pud = pud_alloc(mm, p4d, address);
5958 if (!vmf.pud)
5959 return VM_FAULT_OOM;
5960 retry_pud:
5961 if (pud_none(*vmf.pud) &&
5962 thp_vma_allowable_order(vma, vm_flags,
5963 TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
5964 ret = create_huge_pud(&vmf);
5965 if (!(ret & VM_FAULT_FALLBACK))
5966 return ret;
5967 } else {
5968 pud_t orig_pud = *vmf.pud;
5969
5970 barrier();
5971 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
5972
5973 /*
5974 * TODO once we support anonymous PUDs: NUMA case and
5975 * FAULT_FLAG_UNSHARE handling.
5976 */
5977 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
5978 ret = wp_huge_pud(&vmf, orig_pud);
5979 if (!(ret & VM_FAULT_FALLBACK))
5980 return ret;
5981 } else {
5982 huge_pud_set_accessed(&vmf, orig_pud);
5983 return 0;
5984 }
5985 }
5986 }
5987
5988 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5989 if (!vmf.pmd)
5990 return VM_FAULT_OOM;
5991
5992 /* Huge pud page fault raced with pmd_alloc? */
5993 if (pud_trans_unstable(vmf.pud))
5994 goto retry_pud;
5995
5996 if (pmd_none(*vmf.pmd) &&
5997 thp_vma_allowable_order(vma, vm_flags,
5998 TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
5999 ret = create_huge_pmd(&vmf);
6000 if (!(ret & VM_FAULT_FALLBACK))
6001 return ret;
6002 } else {
6003 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
6004
6005 if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
6006 VM_BUG_ON(thp_migration_supported() &&
6007 !is_pmd_migration_entry(vmf.orig_pmd));
6008 if (is_pmd_migration_entry(vmf.orig_pmd))
6009 pmd_migration_entry_wait(mm, vmf.pmd);
6010 return 0;
6011 }
6012 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
6013 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
6014 return do_huge_pmd_numa_page(&vmf);
6015
6016 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6017 !pmd_write(vmf.orig_pmd)) {
6018 ret = wp_huge_pmd(&vmf);
6019 if (!(ret & VM_FAULT_FALLBACK))
6020 return ret;
6021 } else {
6022 huge_pmd_set_accessed(&vmf);
6023 return 0;
6024 }
6025 }
6026 }
6027
6028 return handle_pte_fault(&vmf);
6029 }
6030
6031 /**
6032 * mm_account_fault - Do page fault accounting
6033 * @mm: mm from which memcg should be extracted. It can be NULL.
6034 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting
6035 * of perf event counters, but we'll still do the per-task accounting to
6036 * the task who triggered this page fault.
6037 * @address: the faulted address.
6038 * @flags: the fault flags.
6039 * @ret: the fault retcode.
6040 *
6041 * This will take care of most of the page fault accounting. Meanwhile, it
6042 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
6043 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
6044 * still be in per-arch page fault handlers at the entry of page fault.
6045 */
mm_account_fault(struct mm_struct * mm,struct pt_regs * regs,unsigned long address,unsigned int flags,vm_fault_t ret)6046 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
6047 unsigned long address, unsigned int flags,
6048 vm_fault_t ret)
6049 {
6050 bool major;
6051
6052 /* Incomplete faults will be accounted upon completion. */
6053 if (ret & VM_FAULT_RETRY)
6054 return;
6055
6056 /*
6057 * To preserve the behavior of older kernels, PGFAULT counters record
6058 * both successful and failed faults, as opposed to perf counters,
6059 * which ignore failed cases.
6060 */
6061 count_vm_event(PGFAULT);
6062 count_memcg_event_mm(mm, PGFAULT);
6063
6064 /*
6065 * Do not account for unsuccessful faults (e.g. when the address wasn't
6066 * valid). That includes arch_vma_access_permitted() failing before
6067 * reaching here. So this is not a "this many hardware page faults"
6068 * counter. We should use the hw profiling for that.
6069 */
6070 if (ret & VM_FAULT_ERROR)
6071 return;
6072
6073 /*
6074 * We define the fault as a major fault when the final successful fault
6075 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
6076 * handle it immediately previously).
6077 */
6078 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
6079
6080 if (major)
6081 current->maj_flt++;
6082 else
6083 current->min_flt++;
6084
6085 /*
6086 * If the fault is done for GUP, regs will be NULL. We only do the
6087 * accounting for the per thread fault counters who triggered the
6088 * fault, and we skip the perf event updates.
6089 */
6090 if (!regs)
6091 return;
6092
6093 if (major)
6094 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
6095 else
6096 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
6097 }
6098
6099 #ifdef CONFIG_LRU_GEN
lru_gen_enter_fault(struct vm_area_struct * vma)6100 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6101 {
6102 /* the LRU algorithm only applies to accesses with recency */
6103 current->in_lru_fault = vma_has_recency(vma);
6104 }
6105
lru_gen_exit_fault(void)6106 static void lru_gen_exit_fault(void)
6107 {
6108 current->in_lru_fault = false;
6109 }
6110 #else
lru_gen_enter_fault(struct vm_area_struct * vma)6111 static void lru_gen_enter_fault(struct vm_area_struct *vma)
6112 {
6113 }
6114
lru_gen_exit_fault(void)6115 static void lru_gen_exit_fault(void)
6116 {
6117 }
6118 #endif /* CONFIG_LRU_GEN */
6119
sanitize_fault_flags(struct vm_area_struct * vma,unsigned int * flags)6120 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
6121 unsigned int *flags)
6122 {
6123 if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
6124 if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
6125 return VM_FAULT_SIGSEGV;
6126 /*
6127 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
6128 * just treat it like an ordinary read-fault otherwise.
6129 */
6130 if (!is_cow_mapping(vma->vm_flags))
6131 *flags &= ~FAULT_FLAG_UNSHARE;
6132 } else if (*flags & FAULT_FLAG_WRITE) {
6133 /* Write faults on read-only mappings are impossible ... */
6134 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
6135 return VM_FAULT_SIGSEGV;
6136 /* ... and FOLL_FORCE only applies to COW mappings. */
6137 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
6138 !is_cow_mapping(vma->vm_flags)))
6139 return VM_FAULT_SIGSEGV;
6140 }
6141 #ifdef CONFIG_PER_VMA_LOCK
6142 /*
6143 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
6144 * the assumption that lock is dropped on VM_FAULT_RETRY.
6145 */
6146 if (WARN_ON_ONCE((*flags &
6147 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
6148 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
6149 return VM_FAULT_SIGSEGV;
6150 #endif
6151
6152 return 0;
6153 }
6154
6155 /*
6156 * By the time we get here, we already hold either the VMA lock or the
6157 * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which).
6158 *
6159 * The mmap_lock may have been released depending on flags and our
6160 * return value. See filemap_fault() and __folio_lock_or_retry().
6161 */
handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct pt_regs * regs)6162 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
6163 unsigned int flags, struct pt_regs *regs)
6164 {
6165 /* If the fault handler drops the mmap_lock, vma may be freed */
6166 struct mm_struct *mm = vma->vm_mm;
6167 vm_fault_t ret;
6168 bool is_droppable;
6169
6170 __set_current_state(TASK_RUNNING);
6171
6172 ret = sanitize_fault_flags(vma, &flags);
6173 if (ret)
6174 goto out;
6175
6176 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
6177 flags & FAULT_FLAG_INSTRUCTION,
6178 flags & FAULT_FLAG_REMOTE)) {
6179 ret = VM_FAULT_SIGSEGV;
6180 goto out;
6181 }
6182
6183 is_droppable = !!(vma->vm_flags & VM_DROPPABLE);
6184
6185 /*
6186 * Enable the memcg OOM handling for faults triggered in user
6187 * space. Kernel faults are handled more gracefully.
6188 */
6189 if (flags & FAULT_FLAG_USER)
6190 mem_cgroup_enter_user_fault();
6191
6192 lru_gen_enter_fault(vma);
6193
6194 if (unlikely(is_vm_hugetlb_page(vma)))
6195 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
6196 else
6197 ret = __handle_mm_fault(vma, address, flags);
6198
6199 /*
6200 * Warning: It is no longer safe to dereference vma-> after this point,
6201 * because mmap_lock might have been dropped by __handle_mm_fault(), so
6202 * vma might be destroyed from underneath us.
6203 */
6204
6205 lru_gen_exit_fault();
6206
6207 /* If the mapping is droppable, then errors due to OOM aren't fatal. */
6208 if (is_droppable)
6209 ret &= ~VM_FAULT_OOM;
6210
6211 if (flags & FAULT_FLAG_USER) {
6212 mem_cgroup_exit_user_fault();
6213 /*
6214 * The task may have entered a memcg OOM situation but
6215 * if the allocation error was handled gracefully (no
6216 * VM_FAULT_OOM), there is no need to kill anything.
6217 * Just clean up the OOM state peacefully.
6218 */
6219 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
6220 mem_cgroup_oom_synchronize(false);
6221 }
6222 out:
6223 mm_account_fault(mm, regs, address, flags, ret);
6224
6225 return ret;
6226 }
6227 EXPORT_SYMBOL_GPL(handle_mm_fault);
6228
6229 #ifdef CONFIG_LOCK_MM_AND_FIND_VMA
6230 #include <linux/extable.h>
6231
get_mmap_lock_carefully(struct mm_struct * mm,struct pt_regs * regs)6232 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
6233 {
6234 if (likely(mmap_read_trylock(mm)))
6235 return true;
6236
6237 if (regs && !user_mode(regs)) {
6238 unsigned long ip = exception_ip(regs);
6239 if (!search_exception_tables(ip))
6240 return false;
6241 }
6242
6243 return !mmap_read_lock_killable(mm);
6244 }
6245
mmap_upgrade_trylock(struct mm_struct * mm)6246 static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
6247 {
6248 /*
6249 * We don't have this operation yet.
6250 *
6251 * It should be easy enough to do: it's basically a
6252 * atomic_long_try_cmpxchg_acquire()
6253 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
6254 * it also needs the proper lockdep magic etc.
6255 */
6256 return false;
6257 }
6258
upgrade_mmap_lock_carefully(struct mm_struct * mm,struct pt_regs * regs)6259 static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
6260 {
6261 mmap_read_unlock(mm);
6262 if (regs && !user_mode(regs)) {
6263 unsigned long ip = exception_ip(regs);
6264 if (!search_exception_tables(ip))
6265 return false;
6266 }
6267 return !mmap_write_lock_killable(mm);
6268 }
6269
6270 /*
6271 * Helper for page fault handling.
6272 *
6273 * This is kind of equivalent to "mmap_read_lock()" followed
6274 * by "find_extend_vma()", except it's a lot more careful about
6275 * the locking (and will drop the lock on failure).
6276 *
6277 * For example, if we have a kernel bug that causes a page
6278 * fault, we don't want to just use mmap_read_lock() to get
6279 * the mm lock, because that would deadlock if the bug were
6280 * to happen while we're holding the mm lock for writing.
6281 *
6282 * So this checks the exception tables on kernel faults in
6283 * order to only do this all for instructions that are actually
6284 * expected to fault.
6285 *
6286 * We can also actually take the mm lock for writing if we
6287 * need to extend the vma, which helps the VM layer a lot.
6288 */
lock_mm_and_find_vma(struct mm_struct * mm,unsigned long addr,struct pt_regs * regs)6289 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
6290 unsigned long addr, struct pt_regs *regs)
6291 {
6292 struct vm_area_struct *vma;
6293
6294 if (!get_mmap_lock_carefully(mm, regs))
6295 return NULL;
6296
6297 vma = find_vma(mm, addr);
6298 if (likely(vma && (vma->vm_start <= addr)))
6299 return vma;
6300
6301 /*
6302 * Well, dang. We might still be successful, but only
6303 * if we can extend a vma to do so.
6304 */
6305 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
6306 mmap_read_unlock(mm);
6307 return NULL;
6308 }
6309
6310 /*
6311 * We can try to upgrade the mmap lock atomically,
6312 * in which case we can continue to use the vma
6313 * we already looked up.
6314 *
6315 * Otherwise we'll have to drop the mmap lock and
6316 * re-take it, and also look up the vma again,
6317 * re-checking it.
6318 */
6319 if (!mmap_upgrade_trylock(mm)) {
6320 if (!upgrade_mmap_lock_carefully(mm, regs))
6321 return NULL;
6322
6323 vma = find_vma(mm, addr);
6324 if (!vma)
6325 goto fail;
6326 if (vma->vm_start <= addr)
6327 goto success;
6328 if (!(vma->vm_flags & VM_GROWSDOWN))
6329 goto fail;
6330 }
6331
6332 if (expand_stack_locked(vma, addr))
6333 goto fail;
6334
6335 success:
6336 mmap_write_downgrade(mm);
6337 return vma;
6338
6339 fail:
6340 mmap_write_unlock(mm);
6341 return NULL;
6342 }
6343 #endif
6344
6345 #ifdef CONFIG_PER_VMA_LOCK
6346 /*
6347 * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
6348 * stable and not isolated. If the VMA is not found or is being modified the
6349 * function returns NULL.
6350 */
lock_vma_under_rcu(struct mm_struct * mm,unsigned long address)6351 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
6352 unsigned long address)
6353 {
6354 MA_STATE(mas, &mm->mm_mt, address, address);
6355 struct vm_area_struct *vma;
6356
6357 rcu_read_lock();
6358 retry:
6359 vma = mas_walk(&mas);
6360 if (!vma)
6361 goto inval;
6362
6363 if (!vma_start_read(vma))
6364 goto inval;
6365
6366 /* Check if the VMA got isolated after we found it */
6367 if (vma->detached) {
6368 vma_end_read(vma);
6369 count_vm_vma_lock_event(VMA_LOCK_MISS);
6370 /* The area was replaced with another one */
6371 goto retry;
6372 }
6373 /*
6374 * At this point, we have a stable reference to a VMA: The VMA is
6375 * locked and we know it hasn't already been isolated.
6376 * From here on, we can access the VMA without worrying about which
6377 * fields are accessible for RCU readers.
6378 */
6379
6380 /* Check since vm_start/vm_end might change before we lock the VMA */
6381 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6382 goto inval_end_read;
6383
6384 rcu_read_unlock();
6385 return vma;
6386
6387 inval_end_read:
6388 vma_end_read(vma);
6389 inval:
6390 rcu_read_unlock();
6391 count_vm_vma_lock_event(VMA_LOCK_ABORT);
6392 return NULL;
6393 }
6394 #endif /* CONFIG_PER_VMA_LOCK */
6395
6396 #ifndef __PAGETABLE_P4D_FOLDED
6397 /*
6398 * Allocate p4d page table.
6399 * We've already handled the fast-path in-line.
6400 */
__p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)6401 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
6402 {
6403 p4d_t *new = p4d_alloc_one(mm, address);
6404 if (!new)
6405 return -ENOMEM;
6406
6407 spin_lock(&mm->page_table_lock);
6408 if (pgd_present(*pgd)) { /* Another has populated it */
6409 p4d_free(mm, new);
6410 } else {
6411 smp_wmb(); /* See comment in pmd_install() */
6412 pgd_populate(mm, pgd, new);
6413 }
6414 spin_unlock(&mm->page_table_lock);
6415 return 0;
6416 }
6417 #endif /* __PAGETABLE_P4D_FOLDED */
6418
6419 #ifndef __PAGETABLE_PUD_FOLDED
6420 /*
6421 * Allocate page upper directory.
6422 * We've already handled the fast-path in-line.
6423 */
__pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)6424 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
6425 {
6426 pud_t *new = pud_alloc_one(mm, address);
6427 if (!new)
6428 return -ENOMEM;
6429
6430 spin_lock(&mm->page_table_lock);
6431 if (!p4d_present(*p4d)) {
6432 mm_inc_nr_puds(mm);
6433 smp_wmb(); /* See comment in pmd_install() */
6434 p4d_populate(mm, p4d, new);
6435 } else /* Another has populated it */
6436 pud_free(mm, new);
6437 spin_unlock(&mm->page_table_lock);
6438 return 0;
6439 }
6440 #endif /* __PAGETABLE_PUD_FOLDED */
6441
6442 #ifndef __PAGETABLE_PMD_FOLDED
6443 /*
6444 * Allocate page middle directory.
6445 * We've already handled the fast-path in-line.
6446 */
__pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)6447 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
6448 {
6449 spinlock_t *ptl;
6450 pmd_t *new = pmd_alloc_one(mm, address);
6451 if (!new)
6452 return -ENOMEM;
6453
6454 ptl = pud_lock(mm, pud);
6455 if (!pud_present(*pud)) {
6456 mm_inc_nr_pmds(mm);
6457 smp_wmb(); /* See comment in pmd_install() */
6458 pud_populate(mm, pud, new);
6459 } else { /* Another has populated it */
6460 pmd_free(mm, new);
6461 }
6462 spin_unlock(ptl);
6463 return 0;
6464 }
6465 #endif /* __PAGETABLE_PMD_FOLDED */
6466
pfnmap_args_setup(struct follow_pfnmap_args * args,spinlock_t * lock,pte_t * ptep,pgprot_t pgprot,unsigned long pfn_base,unsigned long addr_mask,bool writable,bool special)6467 static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
6468 spinlock_t *lock, pte_t *ptep,
6469 pgprot_t pgprot, unsigned long pfn_base,
6470 unsigned long addr_mask, bool writable,
6471 bool special)
6472 {
6473 args->lock = lock;
6474 args->ptep = ptep;
6475 args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
6476 args->pgprot = pgprot;
6477 args->writable = writable;
6478 args->special = special;
6479 }
6480
pfnmap_lockdep_assert(struct vm_area_struct * vma)6481 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma)
6482 {
6483 #ifdef CONFIG_LOCKDEP
6484 struct file *file = vma->vm_file;
6485 struct address_space *mapping = file ? file->f_mapping : NULL;
6486
6487 if (mapping)
6488 lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) ||
6489 lockdep_is_held(&vma->vm_mm->mmap_lock));
6490 else
6491 lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock));
6492 #endif
6493 }
6494
6495 /**
6496 * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
6497 * @args: Pointer to struct @follow_pfnmap_args
6498 *
6499 * The caller needs to setup args->vma and args->address to point to the
6500 * virtual address as the target of such lookup. On a successful return,
6501 * the results will be put into other output fields.
6502 *
6503 * After the caller finished using the fields, the caller must invoke
6504 * another follow_pfnmap_end() to proper releases the locks and resources
6505 * of such look up request.
6506 *
6507 * During the start() and end() calls, the results in @args will be valid
6508 * as proper locks will be held. After the end() is called, all the fields
6509 * in @follow_pfnmap_args will be invalid to be further accessed. Further
6510 * use of such information after end() may require proper synchronizations
6511 * by the caller with page table updates, otherwise it can create a
6512 * security bug.
6513 *
6514 * If the PTE maps a refcounted page, callers are responsible to protect
6515 * against invalidation with MMU notifiers; otherwise access to the PFN at
6516 * a later point in time can trigger use-after-free.
6517 *
6518 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
6519 * should be taken for read, and the mmap semaphore cannot be released
6520 * before the end() is invoked.
6521 *
6522 * This function must not be used to modify PTE content.
6523 *
6524 * Return: zero on success, negative otherwise.
6525 */
follow_pfnmap_start(struct follow_pfnmap_args * args)6526 int follow_pfnmap_start(struct follow_pfnmap_args *args)
6527 {
6528 struct vm_area_struct *vma = args->vma;
6529 unsigned long address = args->address;
6530 struct mm_struct *mm = vma->vm_mm;
6531 spinlock_t *lock;
6532 pgd_t *pgdp;
6533 p4d_t *p4dp, p4d;
6534 pud_t *pudp, pud;
6535 pmd_t *pmdp, pmd;
6536 pte_t *ptep, pte;
6537
6538 pfnmap_lockdep_assert(vma);
6539
6540 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
6541 goto out;
6542
6543 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
6544 goto out;
6545 retry:
6546 pgdp = pgd_offset(mm, address);
6547 if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
6548 goto out;
6549
6550 p4dp = p4d_offset(pgdp, address);
6551 p4d = READ_ONCE(*p4dp);
6552 if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
6553 goto out;
6554
6555 pudp = pud_offset(p4dp, address);
6556 pud = READ_ONCE(*pudp);
6557 if (pud_none(pud))
6558 goto out;
6559 if (pud_leaf(pud)) {
6560 lock = pud_lock(mm, pudp);
6561 if (!unlikely(pud_leaf(pud))) {
6562 spin_unlock(lock);
6563 goto retry;
6564 }
6565 pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud),
6566 pud_pfn(pud), PUD_MASK, pud_write(pud),
6567 pud_special(pud));
6568 return 0;
6569 }
6570
6571 pmdp = pmd_offset(pudp, address);
6572 pmd = pmdp_get_lockless(pmdp);
6573 if (pmd_leaf(pmd)) {
6574 lock = pmd_lock(mm, pmdp);
6575 if (!unlikely(pmd_leaf(pmd))) {
6576 spin_unlock(lock);
6577 goto retry;
6578 }
6579 pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd),
6580 pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
6581 pmd_special(pmd));
6582 return 0;
6583 }
6584
6585 ptep = pte_offset_map_lock(mm, pmdp, address, &lock);
6586 if (!ptep)
6587 goto out;
6588 pte = ptep_get(ptep);
6589 if (!pte_present(pte))
6590 goto unlock;
6591 pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte),
6592 pte_pfn(pte), PAGE_MASK, pte_write(pte),
6593 pte_special(pte));
6594 return 0;
6595 unlock:
6596 pte_unmap_unlock(ptep, lock);
6597 out:
6598 return -EINVAL;
6599 }
6600 EXPORT_SYMBOL_GPL(follow_pfnmap_start);
6601
6602 /**
6603 * follow_pfnmap_end(): End a follow_pfnmap_start() process
6604 * @args: Pointer to struct @follow_pfnmap_args
6605 *
6606 * Must be used in pair of follow_pfnmap_start(). See the start() function
6607 * above for more information.
6608 */
follow_pfnmap_end(struct follow_pfnmap_args * args)6609 void follow_pfnmap_end(struct follow_pfnmap_args *args)
6610 {
6611 if (args->lock)
6612 spin_unlock(args->lock);
6613 if (args->ptep)
6614 pte_unmap(args->ptep);
6615 }
6616 EXPORT_SYMBOL_GPL(follow_pfnmap_end);
6617
6618 #ifdef CONFIG_HAVE_IOREMAP_PROT
6619 /**
6620 * generic_access_phys - generic implementation for iomem mmap access
6621 * @vma: the vma to access
6622 * @addr: userspace address, not relative offset within @vma
6623 * @buf: buffer to read/write
6624 * @len: length of transfer
6625 * @write: set to FOLL_WRITE when writing, otherwise reading
6626 *
6627 * This is a generic implementation for &vm_operations_struct.access for an
6628 * iomem mapping. This callback is used by access_process_vm() when the @vma is
6629 * not page based.
6630 */
generic_access_phys(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)6631 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
6632 void *buf, int len, int write)
6633 {
6634 resource_size_t phys_addr;
6635 unsigned long prot = 0;
6636 void __iomem *maddr;
6637 int offset = offset_in_page(addr);
6638 int ret = -EINVAL;
6639 bool writable;
6640 struct follow_pfnmap_args args = { .vma = vma, .address = addr };
6641
6642 retry:
6643 if (follow_pfnmap_start(&args))
6644 return -EINVAL;
6645 prot = pgprot_val(args.pgprot);
6646 phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
6647 writable = args.writable;
6648 follow_pfnmap_end(&args);
6649
6650 if ((write & FOLL_WRITE) && !writable)
6651 return -EINVAL;
6652
6653 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
6654 if (!maddr)
6655 return -ENOMEM;
6656
6657 if (follow_pfnmap_start(&args))
6658 goto out_unmap;
6659
6660 if ((prot != pgprot_val(args.pgprot)) ||
6661 (phys_addr != (args.pfn << PAGE_SHIFT)) ||
6662 (writable != args.writable)) {
6663 follow_pfnmap_end(&args);
6664 iounmap(maddr);
6665 goto retry;
6666 }
6667
6668 if (write)
6669 memcpy_toio(maddr + offset, buf, len);
6670 else
6671 memcpy_fromio(buf, maddr + offset, len);
6672 ret = len;
6673 follow_pfnmap_end(&args);
6674 out_unmap:
6675 iounmap(maddr);
6676
6677 return ret;
6678 }
6679 EXPORT_SYMBOL_GPL(generic_access_phys);
6680 #endif
6681
6682 /*
6683 * Access another process' address space as given in mm.
6684 */
__access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)6685 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
6686 void *buf, int len, unsigned int gup_flags)
6687 {
6688 void *old_buf = buf;
6689 int write = gup_flags & FOLL_WRITE;
6690
6691 if (mmap_read_lock_killable(mm))
6692 return 0;
6693
6694 /* Untag the address before looking up the VMA */
6695 addr = untagged_addr_remote(mm, addr);
6696
6697 /* Avoid triggering the temporary warning in __get_user_pages */
6698 if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
6699 return 0;
6700
6701 /* ignore errors, just check how much was successfully transferred */
6702 while (len) {
6703 int bytes, offset;
6704 void *maddr;
6705 struct vm_area_struct *vma = NULL;
6706 struct page *page = get_user_page_vma_remote(mm, addr,
6707 gup_flags, &vma);
6708
6709 if (IS_ERR(page)) {
6710 /* We might need to expand the stack to access it */
6711 vma = vma_lookup(mm, addr);
6712 if (!vma) {
6713 vma = expand_stack(mm, addr);
6714
6715 /* mmap_lock was dropped on failure */
6716 if (!vma)
6717 return buf - old_buf;
6718
6719 /* Try again if stack expansion worked */
6720 continue;
6721 }
6722
6723 /*
6724 * Check if this is a VM_IO | VM_PFNMAP VMA, which
6725 * we can access using slightly different code.
6726 */
6727 bytes = 0;
6728 #ifdef CONFIG_HAVE_IOREMAP_PROT
6729 if (vma->vm_ops && vma->vm_ops->access)
6730 bytes = vma->vm_ops->access(vma, addr, buf,
6731 len, write);
6732 #endif
6733 if (bytes <= 0)
6734 break;
6735 } else {
6736 bytes = len;
6737 offset = addr & (PAGE_SIZE-1);
6738 if (bytes > PAGE_SIZE-offset)
6739 bytes = PAGE_SIZE-offset;
6740
6741 maddr = kmap_local_page(page);
6742 if (write) {
6743 copy_to_user_page(vma, page, addr,
6744 maddr + offset, buf, bytes);
6745 set_page_dirty_lock(page);
6746 } else {
6747 copy_from_user_page(vma, page, addr,
6748 buf, maddr + offset, bytes);
6749 }
6750 unmap_and_put_page(page, maddr);
6751 }
6752 len -= bytes;
6753 buf += bytes;
6754 addr += bytes;
6755 }
6756 mmap_read_unlock(mm);
6757
6758 return buf - old_buf;
6759 }
6760
6761 /**
6762 * access_remote_vm - access another process' address space
6763 * @mm: the mm_struct of the target address space
6764 * @addr: start address to access
6765 * @buf: source or destination buffer
6766 * @len: number of bytes to transfer
6767 * @gup_flags: flags modifying lookup behaviour
6768 *
6769 * The caller must hold a reference on @mm.
6770 *
6771 * Return: number of bytes copied from source to destination.
6772 */
access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)6773 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6774 void *buf, int len, unsigned int gup_flags)
6775 {
6776 return __access_remote_vm(mm, addr, buf, len, gup_flags);
6777 }
6778
6779 /*
6780 * Access another process' address space.
6781 * Source/target buffer must be kernel space,
6782 * Do not walk the page table directly, use get_user_pages
6783 */
access_process_vm(struct task_struct * tsk,unsigned long addr,void * buf,int len,unsigned int gup_flags)6784 int access_process_vm(struct task_struct *tsk, unsigned long addr,
6785 void *buf, int len, unsigned int gup_flags)
6786 {
6787 struct mm_struct *mm;
6788 int ret;
6789
6790 mm = get_task_mm(tsk);
6791 if (!mm)
6792 return 0;
6793
6794 ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
6795
6796 mmput(mm);
6797
6798 return ret;
6799 }
6800 EXPORT_SYMBOL_GPL(access_process_vm);
6801
6802 /*
6803 * Print the name of a VMA.
6804 */
print_vma_addr(char * prefix,unsigned long ip)6805 void print_vma_addr(char *prefix, unsigned long ip)
6806 {
6807 struct mm_struct *mm = current->mm;
6808 struct vm_area_struct *vma;
6809
6810 /*
6811 * we might be running from an atomic context so we cannot sleep
6812 */
6813 if (!mmap_read_trylock(mm))
6814 return;
6815
6816 vma = vma_lookup(mm, ip);
6817 if (vma && vma->vm_file) {
6818 struct file *f = vma->vm_file;
6819 ip -= vma->vm_start;
6820 ip += vma->vm_pgoff << PAGE_SHIFT;
6821 printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip,
6822 vma->vm_start,
6823 vma->vm_end - vma->vm_start);
6824 }
6825 mmap_read_unlock(mm);
6826 }
6827
6828 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
__might_fault(const char * file,int line)6829 void __might_fault(const char *file, int line)
6830 {
6831 if (pagefault_disabled())
6832 return;
6833 __might_sleep(file, line);
6834 if (current->mm)
6835 might_lock_read(¤t->mm->mmap_lock);
6836 }
6837 EXPORT_SYMBOL(__might_fault);
6838 #endif
6839
6840 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
6841 /*
6842 * Process all subpages of the specified huge page with the specified
6843 * operation. The target subpage will be processed last to keep its
6844 * cache lines hot.
6845 */
process_huge_page(unsigned long addr_hint,unsigned int nr_pages,int (* process_subpage)(unsigned long addr,int idx,void * arg),void * arg)6846 static inline int process_huge_page(
6847 unsigned long addr_hint, unsigned int nr_pages,
6848 int (*process_subpage)(unsigned long addr, int idx, void *arg),
6849 void *arg)
6850 {
6851 int i, n, base, l, ret;
6852 unsigned long addr = addr_hint &
6853 ~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
6854
6855 /* Process target subpage last to keep its cache lines hot */
6856 might_sleep();
6857 n = (addr_hint - addr) / PAGE_SIZE;
6858 if (2 * n <= nr_pages) {
6859 /* If target subpage in first half of huge page */
6860 base = 0;
6861 l = n;
6862 /* Process subpages at the end of huge page */
6863 for (i = nr_pages - 1; i >= 2 * n; i--) {
6864 cond_resched();
6865 ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6866 if (ret)
6867 return ret;
6868 }
6869 } else {
6870 /* If target subpage in second half of huge page */
6871 base = nr_pages - 2 * (nr_pages - n);
6872 l = nr_pages - n;
6873 /* Process subpages at the begin of huge page */
6874 for (i = 0; i < base; i++) {
6875 cond_resched();
6876 ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6877 if (ret)
6878 return ret;
6879 }
6880 }
6881 /*
6882 * Process remaining subpages in left-right-left-right pattern
6883 * towards the target subpage
6884 */
6885 for (i = 0; i < l; i++) {
6886 int left_idx = base + i;
6887 int right_idx = base + 2 * l - 1 - i;
6888
6889 cond_resched();
6890 ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
6891 if (ret)
6892 return ret;
6893 cond_resched();
6894 ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
6895 if (ret)
6896 return ret;
6897 }
6898 return 0;
6899 }
6900
clear_gigantic_page(struct folio * folio,unsigned long addr_hint,unsigned int nr_pages)6901 static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint,
6902 unsigned int nr_pages)
6903 {
6904 unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio));
6905 int i;
6906
6907 might_sleep();
6908 for (i = 0; i < nr_pages; i++) {
6909 cond_resched();
6910 clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
6911 }
6912 }
6913
clear_subpage(unsigned long addr,int idx,void * arg)6914 static int clear_subpage(unsigned long addr, int idx, void *arg)
6915 {
6916 struct folio *folio = arg;
6917
6918 clear_user_highpage(folio_page(folio, idx), addr);
6919 return 0;
6920 }
6921
6922 /**
6923 * folio_zero_user - Zero a folio which will be mapped to userspace.
6924 * @folio: The folio to zero.
6925 * @addr_hint: The address will be accessed or the base address if uncelar.
6926 */
folio_zero_user(struct folio * folio,unsigned long addr_hint)6927 void folio_zero_user(struct folio *folio, unsigned long addr_hint)
6928 {
6929 unsigned int nr_pages = folio_nr_pages(folio);
6930
6931 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
6932 clear_gigantic_page(folio, addr_hint, nr_pages);
6933 else
6934 process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
6935 }
6936
copy_user_gigantic_page(struct folio * dst,struct folio * src,unsigned long addr_hint,struct vm_area_struct * vma,unsigned int nr_pages)6937 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
6938 unsigned long addr_hint,
6939 struct vm_area_struct *vma,
6940 unsigned int nr_pages)
6941 {
6942 unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
6943 struct page *dst_page;
6944 struct page *src_page;
6945 int i;
6946
6947 for (i = 0; i < nr_pages; i++) {
6948 dst_page = folio_page(dst, i);
6949 src_page = folio_page(src, i);
6950
6951 cond_resched();
6952 if (copy_mc_user_highpage(dst_page, src_page,
6953 addr + i*PAGE_SIZE, vma))
6954 return -EHWPOISON;
6955 }
6956 return 0;
6957 }
6958
6959 struct copy_subpage_arg {
6960 struct folio *dst;
6961 struct folio *src;
6962 struct vm_area_struct *vma;
6963 };
6964
copy_subpage(unsigned long addr,int idx,void * arg)6965 static int copy_subpage(unsigned long addr, int idx, void *arg)
6966 {
6967 struct copy_subpage_arg *copy_arg = arg;
6968 struct page *dst = folio_page(copy_arg->dst, idx);
6969 struct page *src = folio_page(copy_arg->src, idx);
6970
6971 if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma))
6972 return -EHWPOISON;
6973 return 0;
6974 }
6975
copy_user_large_folio(struct folio * dst,struct folio * src,unsigned long addr_hint,struct vm_area_struct * vma)6976 int copy_user_large_folio(struct folio *dst, struct folio *src,
6977 unsigned long addr_hint, struct vm_area_struct *vma)
6978 {
6979 unsigned int nr_pages = folio_nr_pages(dst);
6980 struct copy_subpage_arg arg = {
6981 .dst = dst,
6982 .src = src,
6983 .vma = vma,
6984 };
6985
6986 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
6987 return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
6988
6989 return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
6990 }
6991
copy_folio_from_user(struct folio * dst_folio,const void __user * usr_src,bool allow_pagefault)6992 long copy_folio_from_user(struct folio *dst_folio,
6993 const void __user *usr_src,
6994 bool allow_pagefault)
6995 {
6996 void *kaddr;
6997 unsigned long i, rc = 0;
6998 unsigned int nr_pages = folio_nr_pages(dst_folio);
6999 unsigned long ret_val = nr_pages * PAGE_SIZE;
7000 struct page *subpage;
7001
7002 for (i = 0; i < nr_pages; i++) {
7003 subpage = folio_page(dst_folio, i);
7004 kaddr = kmap_local_page(subpage);
7005 if (!allow_pagefault)
7006 pagefault_disable();
7007 rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
7008 if (!allow_pagefault)
7009 pagefault_enable();
7010 kunmap_local(kaddr);
7011
7012 ret_val -= (PAGE_SIZE - rc);
7013 if (rc)
7014 break;
7015
7016 flush_dcache_page(subpage);
7017
7018 cond_resched();
7019 }
7020 return ret_val;
7021 }
7022 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
7023
7024 #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
7025
7026 static struct kmem_cache *page_ptl_cachep;
7027
ptlock_cache_init(void)7028 void __init ptlock_cache_init(void)
7029 {
7030 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
7031 SLAB_PANIC, NULL);
7032 }
7033
ptlock_alloc(struct ptdesc * ptdesc)7034 bool ptlock_alloc(struct ptdesc *ptdesc)
7035 {
7036 spinlock_t *ptl;
7037
7038 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
7039 if (!ptl)
7040 return false;
7041 ptdesc->ptl = ptl;
7042 return true;
7043 }
7044
ptlock_free(struct ptdesc * ptdesc)7045 void ptlock_free(struct ptdesc *ptdesc)
7046 {
7047 if (ptdesc->ptl)
7048 kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
7049 }
7050 #endif
7051
vma_pgtable_walk_begin(struct vm_area_struct * vma)7052 void vma_pgtable_walk_begin(struct vm_area_struct *vma)
7053 {
7054 if (is_vm_hugetlb_page(vma))
7055 hugetlb_vma_lock_read(vma);
7056 }
7057
vma_pgtable_walk_end(struct vm_area_struct * vma)7058 void vma_pgtable_walk_end(struct vm_area_struct *vma)
7059 {
7060 if (is_vm_hugetlb_page(vma))
7061 hugetlb_vma_unlock_read(vma);
7062 }
7063