1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * VMA-specific functions.
5 */
6
7 #include "vma_internal.h"
8 #include "vma.h"
9
10 struct mmap_state {
11 struct mm_struct *mm;
12 struct vma_iterator *vmi;
13
14 unsigned long addr;
15 unsigned long end;
16 pgoff_t pgoff;
17 unsigned long pglen;
18 unsigned long flags;
19 struct file *file;
20
21 unsigned long charged;
22 bool retry_merge;
23
24 struct vm_area_struct *prev;
25 struct vm_area_struct *next;
26
27 /* Unmapping state. */
28 struct vma_munmap_struct vms;
29 struct ma_state mas_detach;
30 struct maple_tree mt_detach;
31 };
32
33 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, flags_, file_) \
34 struct mmap_state name = { \
35 .mm = mm_, \
36 .vmi = vmi_, \
37 .addr = addr_, \
38 .end = (addr_) + (len_), \
39 .pgoff = pgoff_, \
40 .pglen = PHYS_PFN(len_), \
41 .flags = flags_, \
42 .file = file_, \
43 }
44
45 #define VMG_MMAP_STATE(name, map_, vma_) \
46 struct vma_merge_struct name = { \
47 .mm = (map_)->mm, \
48 .vmi = (map_)->vmi, \
49 .start = (map_)->addr, \
50 .end = (map_)->end, \
51 .flags = (map_)->flags, \
52 .pgoff = (map_)->pgoff, \
53 .file = (map_)->file, \
54 .prev = (map_)->prev, \
55 .vma = vma_, \
56 .next = (vma_) ? NULL : (map_)->next, \
57 .state = VMA_MERGE_START, \
58 .merge_flags = VMG_FLAG_DEFAULT, \
59 }
60
is_mergeable_vma(struct vma_merge_struct * vmg,bool merge_next)61 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
62 {
63 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
64
65 if (!mpol_equal(vmg->policy, vma_policy(vma)))
66 return false;
67 /*
68 * VM_SOFTDIRTY should not prevent from VMA merging, if we
69 * match the flags but dirty bit -- the caller should mark
70 * merged VMA as dirty. If dirty bit won't be excluded from
71 * comparison, we increase pressure on the memory system forcing
72 * the kernel to generate new VMAs when old one could be
73 * extended instead.
74 */
75 if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY)
76 return false;
77 if (vma->vm_file != vmg->file)
78 return false;
79 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
80 return false;
81 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
82 return false;
83 return true;
84 }
85
is_mergeable_anon_vma(struct anon_vma * anon_vma1,struct anon_vma * anon_vma2,struct vm_area_struct * vma)86 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
87 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
88 {
89 /*
90 * The list_is_singular() test is to avoid merging VMA cloned from
91 * parents. This can improve scalability caused by anon_vma lock.
92 */
93 if ((!anon_vma1 || !anon_vma2) && (!vma ||
94 list_is_singular(&vma->anon_vma_chain)))
95 return true;
96 return anon_vma1 == anon_vma2;
97 }
98
99 /* Are the anon_vma's belonging to each VMA compatible with one another? */
are_anon_vmas_compatible(struct vm_area_struct * vma1,struct vm_area_struct * vma2)100 static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
101 struct vm_area_struct *vma2)
102 {
103 return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL);
104 }
105
106 /*
107 * init_multi_vma_prep() - Initializer for struct vma_prepare
108 * @vp: The vma_prepare struct
109 * @vma: The vma that will be altered once locked
110 * @next: The next vma if it is to be adjusted
111 * @remove: The first vma to be removed
112 * @remove2: The second vma to be removed
113 */
init_multi_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma,struct vm_area_struct * next,struct vm_area_struct * remove,struct vm_area_struct * remove2)114 static void init_multi_vma_prep(struct vma_prepare *vp,
115 struct vm_area_struct *vma,
116 struct vm_area_struct *next,
117 struct vm_area_struct *remove,
118 struct vm_area_struct *remove2)
119 {
120 memset(vp, 0, sizeof(struct vma_prepare));
121 vp->vma = vma;
122 vp->anon_vma = vma->anon_vma;
123 vp->remove = remove;
124 vp->remove2 = remove2;
125 vp->adj_next = next;
126 if (!vp->anon_vma && next)
127 vp->anon_vma = next->anon_vma;
128
129 vp->file = vma->vm_file;
130 if (vp->file)
131 vp->mapping = vma->vm_file->f_mapping;
132
133 }
134
135 /*
136 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
137 * in front of (at a lower virtual address and file offset than) the vma.
138 *
139 * We cannot merge two vmas if they have differently assigned (non-NULL)
140 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
141 *
142 * We don't check here for the merged mmap wrapping around the end of pagecache
143 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
144 * wrap, nor mmaps which cover the final page at index -1UL.
145 *
146 * We assume the vma may be removed as part of the merge.
147 */
can_vma_merge_before(struct vma_merge_struct * vmg)148 static bool can_vma_merge_before(struct vma_merge_struct *vmg)
149 {
150 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
151
152 if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
153 is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
154 if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
155 return true;
156 }
157
158 return false;
159 }
160
161 /*
162 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
163 * beyond (at a higher virtual address and file offset than) the vma.
164 *
165 * We cannot merge two vmas if they have differently assigned (non-NULL)
166 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
167 *
168 * We assume that vma is not removed as part of the merge.
169 */
can_vma_merge_after(struct vma_merge_struct * vmg)170 static bool can_vma_merge_after(struct vma_merge_struct *vmg)
171 {
172 if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
173 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
174 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
175 return true;
176 }
177 return false;
178 }
179
__vma_link_file(struct vm_area_struct * vma,struct address_space * mapping)180 static void __vma_link_file(struct vm_area_struct *vma,
181 struct address_space *mapping)
182 {
183 if (vma_is_shared_maywrite(vma))
184 mapping_allow_writable(mapping);
185
186 flush_dcache_mmap_lock(mapping);
187 vma_interval_tree_insert(vma, &mapping->i_mmap);
188 flush_dcache_mmap_unlock(mapping);
189 }
190
191 /*
192 * Requires inode->i_mapping->i_mmap_rwsem
193 */
__remove_shared_vm_struct(struct vm_area_struct * vma,struct address_space * mapping)194 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
195 struct address_space *mapping)
196 {
197 if (vma_is_shared_maywrite(vma))
198 mapping_unmap_writable(mapping);
199
200 flush_dcache_mmap_lock(mapping);
201 vma_interval_tree_remove(vma, &mapping->i_mmap);
202 flush_dcache_mmap_unlock(mapping);
203 }
204
205 /*
206 * vma has some anon_vma assigned, and is already inserted on that
207 * anon_vma's interval trees.
208 *
209 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
210 * vma must be removed from the anon_vma's interval trees using
211 * anon_vma_interval_tree_pre_update_vma().
212 *
213 * After the update, the vma will be reinserted using
214 * anon_vma_interval_tree_post_update_vma().
215 *
216 * The entire update must be protected by exclusive mmap_lock and by
217 * the root anon_vma's mutex.
218 */
219 static void
anon_vma_interval_tree_pre_update_vma(struct vm_area_struct * vma)220 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
221 {
222 struct anon_vma_chain *avc;
223
224 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
225 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
226 }
227
228 static void
anon_vma_interval_tree_post_update_vma(struct vm_area_struct * vma)229 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
230 {
231 struct anon_vma_chain *avc;
232
233 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
234 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
235 }
236
237 /*
238 * vma_prepare() - Helper function for handling locking VMAs prior to altering
239 * @vp: The initialized vma_prepare struct
240 */
vma_prepare(struct vma_prepare * vp)241 static void vma_prepare(struct vma_prepare *vp)
242 {
243 if (vp->file) {
244 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
245
246 if (vp->adj_next)
247 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
248 vp->adj_next->vm_end);
249
250 i_mmap_lock_write(vp->mapping);
251 if (vp->insert && vp->insert->vm_file) {
252 /*
253 * Put into interval tree now, so instantiated pages
254 * are visible to arm/parisc __flush_dcache_page
255 * throughout; but we cannot insert into address
256 * space until vma start or end is updated.
257 */
258 __vma_link_file(vp->insert,
259 vp->insert->vm_file->f_mapping);
260 }
261 }
262
263 if (vp->anon_vma) {
264 anon_vma_lock_write(vp->anon_vma);
265 anon_vma_interval_tree_pre_update_vma(vp->vma);
266 if (vp->adj_next)
267 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
268 }
269
270 if (vp->file) {
271 flush_dcache_mmap_lock(vp->mapping);
272 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
273 if (vp->adj_next)
274 vma_interval_tree_remove(vp->adj_next,
275 &vp->mapping->i_mmap);
276 }
277
278 }
279
280 /*
281 * vma_complete- Helper function for handling the unlocking after altering VMAs,
282 * or for inserting a VMA.
283 *
284 * @vp: The vma_prepare struct
285 * @vmi: The vma iterator
286 * @mm: The mm_struct
287 */
vma_complete(struct vma_prepare * vp,struct vma_iterator * vmi,struct mm_struct * mm)288 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
289 struct mm_struct *mm)
290 {
291 if (vp->file) {
292 if (vp->adj_next)
293 vma_interval_tree_insert(vp->adj_next,
294 &vp->mapping->i_mmap);
295 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
296 flush_dcache_mmap_unlock(vp->mapping);
297 }
298
299 if (vp->remove && vp->file) {
300 __remove_shared_vm_struct(vp->remove, vp->mapping);
301 if (vp->remove2)
302 __remove_shared_vm_struct(vp->remove2, vp->mapping);
303 } else if (vp->insert) {
304 /*
305 * split_vma has split insert from vma, and needs
306 * us to insert it before dropping the locks
307 * (it may either follow vma or precede it).
308 */
309 vma_iter_store(vmi, vp->insert);
310 mm->map_count++;
311 }
312
313 if (vp->anon_vma) {
314 anon_vma_interval_tree_post_update_vma(vp->vma);
315 if (vp->adj_next)
316 anon_vma_interval_tree_post_update_vma(vp->adj_next);
317 anon_vma_unlock_write(vp->anon_vma);
318 }
319
320 if (vp->file) {
321 i_mmap_unlock_write(vp->mapping);
322 uprobe_mmap(vp->vma);
323
324 if (vp->adj_next)
325 uprobe_mmap(vp->adj_next);
326 }
327
328 if (vp->remove) {
329 again:
330 vma_mark_detached(vp->remove, true);
331 if (vp->file) {
332 uprobe_munmap(vp->remove, vp->remove->vm_start,
333 vp->remove->vm_end);
334 fput(vp->file);
335 }
336 if (vp->remove->anon_vma)
337 anon_vma_merge(vp->vma, vp->remove);
338 mm->map_count--;
339 mpol_put(vma_policy(vp->remove));
340 if (!vp->remove2)
341 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
342 vm_area_free(vp->remove);
343
344 /*
345 * In mprotect's case 6 (see comments on vma_merge),
346 * we are removing both mid and next vmas
347 */
348 if (vp->remove2) {
349 vp->remove = vp->remove2;
350 vp->remove2 = NULL;
351 goto again;
352 }
353 }
354 if (vp->insert && vp->file)
355 uprobe_mmap(vp->insert);
356 }
357
358 /*
359 * init_vma_prep() - Initializer wrapper for vma_prepare struct
360 * @vp: The vma_prepare struct
361 * @vma: The vma that will be altered once locked
362 */
init_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma)363 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
364 {
365 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
366 }
367
368 /*
369 * Can the proposed VMA be merged with the left (previous) VMA taking into
370 * account the start position of the proposed range.
371 */
can_vma_merge_left(struct vma_merge_struct * vmg)372 static bool can_vma_merge_left(struct vma_merge_struct *vmg)
373
374 {
375 return vmg->prev && vmg->prev->vm_end == vmg->start &&
376 can_vma_merge_after(vmg);
377 }
378
379 /*
380 * Can the proposed VMA be merged with the right (next) VMA taking into
381 * account the end position of the proposed range.
382 *
383 * In addition, if we can merge with the left VMA, ensure that left and right
384 * anon_vma's are also compatible.
385 */
can_vma_merge_right(struct vma_merge_struct * vmg,bool can_merge_left)386 static bool can_vma_merge_right(struct vma_merge_struct *vmg,
387 bool can_merge_left)
388 {
389 if (!vmg->next || vmg->end != vmg->next->vm_start ||
390 !can_vma_merge_before(vmg))
391 return false;
392
393 if (!can_merge_left)
394 return true;
395
396 /*
397 * If we can merge with prev (left) and next (right), indicating that
398 * each VMA's anon_vma is compatible with the proposed anon_vma, this
399 * does not mean prev and next are compatible with EACH OTHER.
400 *
401 * We therefore check this in addition to mergeability to either side.
402 */
403 return are_anon_vmas_compatible(vmg->prev, vmg->next);
404 }
405
406 /*
407 * Close a vm structure and free it.
408 */
remove_vma(struct vm_area_struct * vma,bool unreachable)409 void remove_vma(struct vm_area_struct *vma, bool unreachable)
410 {
411 might_sleep();
412 vma_close(vma);
413 if (vma->vm_file)
414 fput(vma->vm_file);
415 mpol_put(vma_policy(vma));
416 if (unreachable)
417 __vm_area_free(vma);
418 else
419 vm_area_free(vma);
420 }
421
422 /*
423 * Get rid of page table information in the indicated region.
424 *
425 * Called with the mm semaphore held.
426 */
unmap_region(struct ma_state * mas,struct vm_area_struct * vma,struct vm_area_struct * prev,struct vm_area_struct * next)427 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
428 struct vm_area_struct *prev, struct vm_area_struct *next)
429 {
430 struct mm_struct *mm = vma->vm_mm;
431 struct mmu_gather tlb;
432
433 tlb_gather_mmu(&tlb, mm);
434 update_hiwater_rss(mm);
435 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
436 /* mm_wr_locked = */ true);
437 mas_set(mas, vma->vm_end);
438 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
439 next ? next->vm_start : USER_PGTABLES_CEILING,
440 /* mm_wr_locked = */ true);
441 tlb_finish_mmu(&tlb);
442 }
443
444 /*
445 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
446 * has already been checked or doesn't make sense to fail.
447 * VMA Iterator will point to the original VMA.
448 */
449 static __must_check int
__split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)450 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
451 unsigned long addr, int new_below)
452 {
453 struct vma_prepare vp;
454 struct vm_area_struct *new;
455 int err;
456
457 WARN_ON(vma->vm_start >= addr);
458 WARN_ON(vma->vm_end <= addr);
459
460 if (vma->vm_ops && vma->vm_ops->may_split) {
461 err = vma->vm_ops->may_split(vma, addr);
462 if (err)
463 return err;
464 }
465
466 new = vm_area_dup(vma);
467 if (!new)
468 return -ENOMEM;
469
470 if (new_below) {
471 new->vm_end = addr;
472 } else {
473 new->vm_start = addr;
474 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
475 }
476
477 err = -ENOMEM;
478 vma_iter_config(vmi, new->vm_start, new->vm_end);
479 if (vma_iter_prealloc(vmi, new))
480 goto out_free_vma;
481
482 err = vma_dup_policy(vma, new);
483 if (err)
484 goto out_free_vmi;
485
486 err = anon_vma_clone(new, vma);
487 if (err)
488 goto out_free_mpol;
489
490 if (new->vm_file)
491 get_file(new->vm_file);
492
493 if (new->vm_ops && new->vm_ops->open)
494 new->vm_ops->open(new);
495
496 vma_start_write(vma);
497 vma_start_write(new);
498
499 init_vma_prep(&vp, vma);
500 vp.insert = new;
501 vma_prepare(&vp);
502 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
503
504 if (new_below) {
505 vma->vm_start = addr;
506 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
507 } else {
508 vma->vm_end = addr;
509 }
510
511 /* vma_complete stores the new vma */
512 vma_complete(&vp, vmi, vma->vm_mm);
513 validate_mm(vma->vm_mm);
514
515 /* Success. */
516 if (new_below)
517 vma_next(vmi);
518 else
519 vma_prev(vmi);
520
521 return 0;
522
523 out_free_mpol:
524 mpol_put(vma_policy(new));
525 out_free_vmi:
526 vma_iter_free(vmi);
527 out_free_vma:
528 vm_area_free(new);
529 return err;
530 }
531
532 /*
533 * Split a vma into two pieces at address 'addr', a new vma is allocated
534 * either for the first part or the tail.
535 */
split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)536 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
537 unsigned long addr, int new_below)
538 {
539 if (vma->vm_mm->map_count >= sysctl_max_map_count)
540 return -ENOMEM;
541
542 return __split_vma(vmi, vma, addr, new_below);
543 }
544
545 /*
546 * dup_anon_vma() - Helper function to duplicate anon_vma
547 * @dst: The destination VMA
548 * @src: The source VMA
549 * @dup: Pointer to the destination VMA when successful.
550 *
551 * Returns: 0 on success.
552 */
dup_anon_vma(struct vm_area_struct * dst,struct vm_area_struct * src,struct vm_area_struct ** dup)553 static int dup_anon_vma(struct vm_area_struct *dst,
554 struct vm_area_struct *src, struct vm_area_struct **dup)
555 {
556 /*
557 * Easily overlooked: when mprotect shifts the boundary, make sure the
558 * expanding vma has anon_vma set if the shrinking vma had, to cover any
559 * anon pages imported.
560 */
561 if (src->anon_vma && !dst->anon_vma) {
562 int ret;
563
564 vma_assert_write_locked(dst);
565 dst->anon_vma = src->anon_vma;
566 ret = anon_vma_clone(dst, src);
567 if (ret)
568 return ret;
569
570 *dup = dst;
571 }
572
573 return 0;
574 }
575
576 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
validate_mm(struct mm_struct * mm)577 void validate_mm(struct mm_struct *mm)
578 {
579 int bug = 0;
580 int i = 0;
581 struct vm_area_struct *vma;
582 VMA_ITERATOR(vmi, mm, 0);
583
584 mt_validate(&mm->mm_mt);
585 for_each_vma(vmi, vma) {
586 #ifdef CONFIG_DEBUG_VM_RB
587 struct anon_vma *anon_vma = vma->anon_vma;
588 struct anon_vma_chain *avc;
589 #endif
590 unsigned long vmi_start, vmi_end;
591 bool warn = 0;
592
593 vmi_start = vma_iter_addr(&vmi);
594 vmi_end = vma_iter_end(&vmi);
595 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
596 warn = 1;
597
598 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
599 warn = 1;
600
601 if (warn) {
602 pr_emerg("issue in %s\n", current->comm);
603 dump_stack();
604 dump_vma(vma);
605 pr_emerg("tree range: %px start %lx end %lx\n", vma,
606 vmi_start, vmi_end - 1);
607 vma_iter_dump_tree(&vmi);
608 }
609
610 #ifdef CONFIG_DEBUG_VM_RB
611 if (anon_vma) {
612 anon_vma_lock_read(anon_vma);
613 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
614 anon_vma_interval_tree_verify(avc);
615 anon_vma_unlock_read(anon_vma);
616 }
617 #endif
618 /* Check for a infinite loop */
619 if (++i > mm->map_count + 10) {
620 i = -1;
621 break;
622 }
623 }
624 if (i != mm->map_count) {
625 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
626 bug = 1;
627 }
628 VM_BUG_ON_MM(bug, mm);
629 }
630 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
631
632 /* Actually perform the VMA merge operation. */
commit_merge(struct vma_merge_struct * vmg,struct vm_area_struct * adjust,struct vm_area_struct * remove,struct vm_area_struct * remove2,long adj_start,bool expanded)633 static int commit_merge(struct vma_merge_struct *vmg,
634 struct vm_area_struct *adjust,
635 struct vm_area_struct *remove,
636 struct vm_area_struct *remove2,
637 long adj_start,
638 bool expanded)
639 {
640 struct vma_prepare vp;
641
642 init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
643
644 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
645 vp.anon_vma != adjust->anon_vma);
646
647 if (expanded) {
648 /* Note: vma iterator must be pointing to 'start'. */
649 vma_iter_config(vmg->vmi, vmg->start, vmg->end);
650 } else {
651 vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
652 adjust->vm_end);
653 }
654
655 if (vma_iter_prealloc(vmg->vmi, vmg->vma))
656 return -ENOMEM;
657
658 vma_prepare(&vp);
659 vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
660 vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
661
662 if (expanded)
663 vma_iter_store(vmg->vmi, vmg->vma);
664
665 if (adj_start) {
666 adjust->vm_start += adj_start;
667 adjust->vm_pgoff += PHYS_PFN(adj_start);
668 if (adj_start < 0) {
669 WARN_ON(expanded);
670 vma_iter_store(vmg->vmi, adjust);
671 }
672 }
673
674 vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
675
676 return 0;
677 }
678
679 /* We can only remove VMAs when merging if they do not have a close hook. */
can_merge_remove_vma(struct vm_area_struct * vma)680 static bool can_merge_remove_vma(struct vm_area_struct *vma)
681 {
682 return !vma->vm_ops || !vma->vm_ops->close;
683 }
684
685 /*
686 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
687 * attributes modified.
688 *
689 * @vmg: Describes the modifications being made to a VMA and associated
690 * metadata.
691 *
692 * When the attributes of a range within a VMA change, then it might be possible
693 * for immediately adjacent VMAs to be merged into that VMA due to having
694 * identical properties.
695 *
696 * This function checks for the existence of any such mergeable VMAs and updates
697 * the maple tree describing the @vmg->vma->vm_mm address space to account for
698 * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
699 *
700 * As part of this operation, if a merge occurs, the @vmg object will have its
701 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
702 * calls to this function should reset these fields.
703 *
704 * Returns: The merged VMA if merge succeeds, or NULL otherwise.
705 *
706 * ASSUMPTIONS:
707 * - The caller must assign the VMA to be modifed to @vmg->vma.
708 * - The caller must have set @vmg->prev to the previous VMA, if there is one.
709 * - The caller must not set @vmg->next, as we determine this.
710 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
711 * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
712 */
vma_merge_existing_range(struct vma_merge_struct * vmg)713 static __must_check struct vm_area_struct *vma_merge_existing_range(
714 struct vma_merge_struct *vmg)
715 {
716 struct vm_area_struct *vma = vmg->vma;
717 struct vm_area_struct *prev = vmg->prev;
718 struct vm_area_struct *next, *res;
719 struct vm_area_struct *anon_dup = NULL;
720 struct vm_area_struct *adjust = NULL;
721 unsigned long start = vmg->start;
722 unsigned long end = vmg->end;
723 bool left_side = vma && start == vma->vm_start;
724 bool right_side = vma && end == vma->vm_end;
725 int err = 0;
726 long adj_start = 0;
727 bool merge_will_delete_vma, merge_will_delete_next;
728 bool merge_left, merge_right, merge_both;
729 bool expanded;
730
731 mmap_assert_write_locked(vmg->mm);
732 VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */
733 VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */
734 VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg);
735 VM_WARN_ON_VMG(start >= end, vmg);
736
737 /*
738 * If vma == prev, then we are offset into a VMA. Otherwise, if we are
739 * not, we must span a portion of the VMA.
740 */
741 VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) ||
742 vmg->end > vma->vm_end), vmg);
743 /* The vmi must be positioned within vmg->vma. */
744 VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
745 vma_iter_addr(vmg->vmi) < vma->vm_end), vmg);
746
747 vmg->state = VMA_MERGE_NOMERGE;
748
749 /*
750 * If a special mapping or if the range being modified is neither at the
751 * furthermost left or right side of the VMA, then we have no chance of
752 * merging and should abort.
753 */
754 if (vmg->flags & VM_SPECIAL || (!left_side && !right_side))
755 return NULL;
756
757 if (left_side)
758 merge_left = can_vma_merge_left(vmg);
759 else
760 merge_left = false;
761
762 if (right_side) {
763 next = vmg->next = vma_iter_next_range(vmg->vmi);
764 vma_iter_prev_range(vmg->vmi);
765
766 merge_right = can_vma_merge_right(vmg, merge_left);
767 } else {
768 merge_right = false;
769 next = NULL;
770 }
771
772 if (merge_left) /* If merging prev, position iterator there. */
773 vma_prev(vmg->vmi);
774 else if (!merge_right) /* If we have nothing to merge, abort. */
775 return NULL;
776
777 merge_both = merge_left && merge_right;
778 /* If we span the entire VMA, a merge implies it will be deleted. */
779 merge_will_delete_vma = left_side && right_side;
780
781 /*
782 * If we need to remove vma in its entirety but are unable to do so,
783 * we have no sensible recourse but to abort the merge.
784 */
785 if (merge_will_delete_vma && !can_merge_remove_vma(vma))
786 return NULL;
787
788 /*
789 * If we merge both VMAs, then next is also deleted. This implies
790 * merge_will_delete_vma also.
791 */
792 merge_will_delete_next = merge_both;
793
794 /*
795 * If we cannot delete next, then we can reduce the operation to merging
796 * prev and vma (thereby deleting vma).
797 */
798 if (merge_will_delete_next && !can_merge_remove_vma(next)) {
799 merge_will_delete_next = false;
800 merge_right = false;
801 merge_both = false;
802 }
803
804 /* No matter what happens, we will be adjusting vma. */
805 vma_start_write(vma);
806
807 if (merge_left)
808 vma_start_write(prev);
809
810 if (merge_right)
811 vma_start_write(next);
812
813 if (merge_both) {
814 /*
815 * |<----->|
816 * |-------*********-------|
817 * prev vma next
818 * extend delete delete
819 */
820
821 vmg->vma = prev;
822 vmg->start = prev->vm_start;
823 vmg->end = next->vm_end;
824 vmg->pgoff = prev->vm_pgoff;
825
826 /*
827 * We already ensured anon_vma compatibility above, so now it's
828 * simply a case of, if prev has no anon_vma object, which of
829 * next or vma contains the anon_vma we must duplicate.
830 */
831 err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
832 } else if (merge_left) {
833 /*
834 * |<----->| OR
835 * |<--------->|
836 * |-------*************
837 * prev vma
838 * extend shrink/delete
839 */
840
841 vmg->vma = prev;
842 vmg->start = prev->vm_start;
843 vmg->pgoff = prev->vm_pgoff;
844
845 if (!merge_will_delete_vma) {
846 adjust = vma;
847 adj_start = vmg->end - vma->vm_start;
848 }
849
850 err = dup_anon_vma(prev, vma, &anon_dup);
851 } else { /* merge_right */
852 /*
853 * |<----->| OR
854 * |<--------->|
855 * *************-------|
856 * vma next
857 * shrink/delete extend
858 */
859
860 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
861
862 VM_WARN_ON_VMG(!merge_right, vmg);
863 /* If we are offset into a VMA, then prev must be vma. */
864 VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg);
865
866 if (merge_will_delete_vma) {
867 vmg->vma = next;
868 vmg->end = next->vm_end;
869 vmg->pgoff = next->vm_pgoff - pglen;
870 } else {
871 /*
872 * We shrink vma and expand next.
873 *
874 * IMPORTANT: This is the ONLY case where the final
875 * merged VMA is NOT vmg->vma, but rather vmg->next.
876 */
877
878 vmg->start = vma->vm_start;
879 vmg->end = start;
880 vmg->pgoff = vma->vm_pgoff;
881
882 adjust = next;
883 adj_start = -(vma->vm_end - start);
884 }
885
886 err = dup_anon_vma(next, vma, &anon_dup);
887 }
888
889 if (err)
890 goto abort;
891
892 /*
893 * In nearly all cases, we expand vmg->vma. There is one exception -
894 * merge_right where we partially span the VMA. In this case we shrink
895 * the end of vmg->vma and adjust the start of vmg->next accordingly.
896 */
897 expanded = !merge_right || merge_will_delete_vma;
898
899 if (commit_merge(vmg, adjust,
900 merge_will_delete_vma ? vma : NULL,
901 merge_will_delete_next ? next : NULL,
902 adj_start, expanded)) {
903 if (anon_dup)
904 unlink_anon_vmas(anon_dup);
905
906 /*
907 * We've cleaned up any cloned anon_vma's, no VMAs have been
908 * modified, no harm no foul if the user requests that we not
909 * report this and just give up, leaving the VMAs unmerged.
910 */
911 if (!vmg->give_up_on_oom)
912 vmg->state = VMA_MERGE_ERROR_NOMEM;
913 return NULL;
914 }
915
916 res = merge_left ? prev : next;
917 khugepaged_enter_vma(res, vmg->flags);
918
919 vmg->state = VMA_MERGE_SUCCESS;
920 return res;
921
922 abort:
923 vma_iter_set(vmg->vmi, start);
924 vma_iter_load(vmg->vmi);
925
926 /*
927 * This means we have failed to clone anon_vma's correctly, but no
928 * actual changes to VMAs have occurred, so no harm no foul - if the
929 * user doesn't want this reported and instead just wants to give up on
930 * the merge, allow it.
931 */
932 if (!vmg->give_up_on_oom)
933 vmg->state = VMA_MERGE_ERROR_NOMEM;
934 return NULL;
935 }
936
937 /*
938 * vma_merge_new_range - Attempt to merge a new VMA into address space
939 *
940 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
941 * (exclusive), which we try to merge with any adjacent VMAs if possible.
942 *
943 * We are about to add a VMA to the address space starting at @vmg->start and
944 * ending at @vmg->end. There are three different possible scenarios:
945 *
946 * 1. There is a VMA with identical properties immediately adjacent to the
947 * proposed new VMA [@vmg->start, @vmg->end) either before or after it -
948 * EXPAND that VMA:
949 *
950 * Proposed: |-----| or |-----|
951 * Existing: |----| |----|
952 *
953 * 2. There are VMAs with identical properties immediately adjacent to the
954 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
955 * EXPAND the former and REMOVE the latter:
956 *
957 * Proposed: |-----|
958 * Existing: |----| |----|
959 *
960 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
961 * VMAs do not have identical attributes - NO MERGE POSSIBLE.
962 *
963 * In instances where we can merge, this function returns the expanded VMA which
964 * will have its range adjusted accordingly and the underlying maple tree also
965 * adjusted.
966 *
967 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
968 * to the VMA we expanded.
969 *
970 * This function adjusts @vmg to provide @vmg->next if not already specified,
971 * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
972 *
973 * ASSUMPTIONS:
974 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
975 * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
976 other than VMAs that will be unmapped should the operation succeed.
977 * - The caller must have specified the previous vma in @vmg->prev.
978 * - The caller must have specified the next vma in @vmg->next.
979 * - The caller must have positioned the vmi at or before the gap.
980 */
vma_merge_new_range(struct vma_merge_struct * vmg)981 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
982 {
983 struct vm_area_struct *prev = vmg->prev;
984 struct vm_area_struct *next = vmg->next;
985 unsigned long end = vmg->end;
986 bool can_merge_left, can_merge_right;
987 bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND;
988
989 mmap_assert_write_locked(vmg->mm);
990 VM_WARN_ON_VMG(vmg->vma, vmg);
991 /* vmi must point at or before the gap. */
992 VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
993
994 vmg->state = VMA_MERGE_NOMERGE;
995
996 /* Special VMAs are unmergeable, also if no prev/next. */
997 if ((vmg->flags & VM_SPECIAL) || (!prev && !next))
998 return NULL;
999
1000 can_merge_left = can_vma_merge_left(vmg);
1001 can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left);
1002
1003 /* If we can merge with the next VMA, adjust vmg accordingly. */
1004 if (can_merge_right) {
1005 vmg->end = next->vm_end;
1006 vmg->vma = next;
1007 }
1008
1009 /* If we can merge with the previous VMA, adjust vmg accordingly. */
1010 if (can_merge_left) {
1011 vmg->start = prev->vm_start;
1012 vmg->vma = prev;
1013 vmg->pgoff = prev->vm_pgoff;
1014
1015 /*
1016 * If this merge would result in removal of the next VMA but we
1017 * are not permitted to do so, reduce the operation to merging
1018 * prev and vma.
1019 */
1020 if (can_merge_right && !can_merge_remove_vma(next))
1021 vmg->end = end;
1022
1023 /* In expand-only case we are already positioned at prev. */
1024 if (!just_expand) {
1025 /* Equivalent to going to the previous range. */
1026 vma_prev(vmg->vmi);
1027 }
1028 }
1029
1030 /*
1031 * Now try to expand adjacent VMA(s). This takes care of removing the
1032 * following VMA if we have VMAs on both sides.
1033 */
1034 if (vmg->vma && !vma_expand(vmg)) {
1035 khugepaged_enter_vma(vmg->vma, vmg->flags);
1036 vmg->state = VMA_MERGE_SUCCESS;
1037 return vmg->vma;
1038 }
1039
1040 return NULL;
1041 }
1042
1043 /*
1044 * vma_expand - Expand an existing VMA
1045 *
1046 * @vmg: Describes a VMA expansion operation.
1047 *
1048 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
1049 * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
1050 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with
1051 * vmg->next needs to be handled by the caller.
1052 *
1053 * Returns: 0 on success.
1054 *
1055 * ASSUMPTIONS:
1056 * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
1057 * - The caller must have set @vmg->vma and @vmg->next.
1058 */
vma_expand(struct vma_merge_struct * vmg)1059 int vma_expand(struct vma_merge_struct *vmg)
1060 {
1061 struct vm_area_struct *anon_dup = NULL;
1062 bool remove_next = false;
1063 struct vm_area_struct *vma = vmg->vma;
1064 struct vm_area_struct *next = vmg->next;
1065
1066 mmap_assert_write_locked(vmg->mm);
1067
1068 vma_start_write(vma);
1069 if (next && (vma != next) && (vmg->end == next->vm_end)) {
1070 int ret;
1071
1072 remove_next = true;
1073 /* This should already have been checked by this point. */
1074 VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
1075 vma_start_write(next);
1076 ret = dup_anon_vma(vma, next, &anon_dup);
1077 if (ret)
1078 return ret;
1079 }
1080
1081 /* Not merging but overwriting any part of next is not handled. */
1082 VM_WARN_ON_VMG(next && !remove_next &&
1083 next != vma && vmg->end > next->vm_start, vmg);
1084 /* Only handles expanding */
1085 VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg);
1086
1087 if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
1088 goto nomem;
1089
1090 return 0;
1091
1092 nomem:
1093 if (anon_dup)
1094 unlink_anon_vmas(anon_dup);
1095 /*
1096 * If the user requests that we just give upon OOM, we are safe to do so
1097 * here, as commit merge provides this contract to us. Nothing has been
1098 * changed - no harm no foul, just don't report it.
1099 */
1100 if (!vmg->give_up_on_oom)
1101 vmg->state = VMA_MERGE_ERROR_NOMEM;
1102 return -ENOMEM;
1103 }
1104
1105 /*
1106 * vma_shrink() - Reduce an existing VMAs memory area
1107 * @vmi: The vma iterator
1108 * @vma: The VMA to modify
1109 * @start: The new start
1110 * @end: The new end
1111 *
1112 * Returns: 0 on success, -ENOMEM otherwise
1113 */
vma_shrink(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)1114 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1115 unsigned long start, unsigned long end, pgoff_t pgoff)
1116 {
1117 struct vma_prepare vp;
1118
1119 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
1120
1121 if (vma->vm_start < start)
1122 vma_iter_config(vmi, vma->vm_start, start);
1123 else
1124 vma_iter_config(vmi, end, vma->vm_end);
1125
1126 if (vma_iter_prealloc(vmi, NULL))
1127 return -ENOMEM;
1128
1129 vma_start_write(vma);
1130
1131 init_vma_prep(&vp, vma);
1132 vma_prepare(&vp);
1133 vma_adjust_trans_huge(vma, start, end, 0);
1134
1135 vma_iter_clear(vmi);
1136 vma_set_range(vma, start, end, pgoff);
1137 vma_complete(&vp, vmi, vma->vm_mm);
1138 validate_mm(vma->vm_mm);
1139 return 0;
1140 }
1141
vms_clear_ptes(struct vma_munmap_struct * vms,struct ma_state * mas_detach,bool mm_wr_locked)1142 static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
1143 struct ma_state *mas_detach, bool mm_wr_locked)
1144 {
1145 struct mmu_gather tlb;
1146
1147 if (!vms->clear_ptes) /* Nothing to do */
1148 return;
1149
1150 /*
1151 * We can free page tables without write-locking mmap_lock because VMAs
1152 * were isolated before we downgraded mmap_lock.
1153 */
1154 mas_set(mas_detach, 1);
1155 tlb_gather_mmu(&tlb, vms->vma->vm_mm);
1156 update_hiwater_rss(vms->vma->vm_mm);
1157 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
1158 vms->vma_count, mm_wr_locked);
1159
1160 mas_set(mas_detach, 1);
1161 /* start and end may be different if there is no prev or next vma. */
1162 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
1163 vms->unmap_end, mm_wr_locked);
1164 tlb_finish_mmu(&tlb);
1165 vms->clear_ptes = false;
1166 }
1167
vms_clean_up_area(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1168 static void vms_clean_up_area(struct vma_munmap_struct *vms,
1169 struct ma_state *mas_detach)
1170 {
1171 struct vm_area_struct *vma;
1172
1173 if (!vms->nr_pages)
1174 return;
1175
1176 vms_clear_ptes(vms, mas_detach, true);
1177 mas_set(mas_detach, 0);
1178 mas_for_each(mas_detach, vma, ULONG_MAX)
1179 vma_close(vma);
1180 }
1181
1182 /*
1183 * vms_complete_munmap_vmas() - Finish the munmap() operation
1184 * @vms: The vma munmap struct
1185 * @mas_detach: The maple state of the detached vmas
1186 *
1187 * This updates the mm_struct, unmaps the region, frees the resources
1188 * used for the munmap() and may downgrade the lock - if requested. Everything
1189 * needed to be done once the vma maple tree is updated.
1190 */
vms_complete_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1191 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
1192 struct ma_state *mas_detach)
1193 {
1194 struct vm_area_struct *vma;
1195 struct mm_struct *mm;
1196
1197 mm = current->mm;
1198 mm->map_count -= vms->vma_count;
1199 mm->locked_vm -= vms->locked_vm;
1200 if (vms->unlock)
1201 mmap_write_downgrade(mm);
1202
1203 if (!vms->nr_pages)
1204 return;
1205
1206 vms_clear_ptes(vms, mas_detach, !vms->unlock);
1207 /* Update high watermark before we lower total_vm */
1208 update_hiwater_vm(mm);
1209 /* Stat accounting */
1210 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
1211 /* Paranoid bookkeeping */
1212 VM_WARN_ON(vms->exec_vm > mm->exec_vm);
1213 VM_WARN_ON(vms->stack_vm > mm->stack_vm);
1214 VM_WARN_ON(vms->data_vm > mm->data_vm);
1215 mm->exec_vm -= vms->exec_vm;
1216 mm->stack_vm -= vms->stack_vm;
1217 mm->data_vm -= vms->data_vm;
1218
1219 /* Remove and clean up vmas */
1220 mas_set(mas_detach, 0);
1221 mas_for_each(mas_detach, vma, ULONG_MAX)
1222 remove_vma(vma, /* unreachable = */ false);
1223
1224 vm_unacct_memory(vms->nr_accounted);
1225 validate_mm(mm);
1226 if (vms->unlock)
1227 mmap_read_unlock(mm);
1228
1229 __mt_destroy(mas_detach->tree);
1230 }
1231
1232 /*
1233 * reattach_vmas() - Undo any munmap work and free resources
1234 * @mas_detach: The maple state with the detached maple tree
1235 *
1236 * Reattach any detached vmas and free up the maple tree used to track the vmas.
1237 */
reattach_vmas(struct ma_state * mas_detach)1238 static void reattach_vmas(struct ma_state *mas_detach)
1239 {
1240 struct vm_area_struct *vma;
1241
1242 mas_set(mas_detach, 0);
1243 mas_for_each(mas_detach, vma, ULONG_MAX)
1244 vma_mark_detached(vma, false);
1245
1246 __mt_destroy(mas_detach->tree);
1247 }
1248
1249 /*
1250 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1251 * for removal at a later date. Handles splitting first and last if necessary
1252 * and marking the vmas as isolated.
1253 *
1254 * @vms: The vma munmap struct
1255 * @mas_detach: The maple state tracking the detached tree
1256 *
1257 * Return: 0 on success, error otherwise
1258 */
vms_gather_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)1259 static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
1260 struct ma_state *mas_detach)
1261 {
1262 struct vm_area_struct *next = NULL;
1263 int error;
1264
1265 /*
1266 * If we need to split any vma, do it now to save pain later.
1267 * Does it split the first one?
1268 */
1269 if (vms->start > vms->vma->vm_start) {
1270
1271 /*
1272 * Make sure that map_count on return from munmap() will
1273 * not exceed its limit; but let map_count go just above
1274 * its limit temporarily, to help free resources as expected.
1275 */
1276 if (vms->end < vms->vma->vm_end &&
1277 vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
1278 error = -ENOMEM;
1279 goto map_count_exceeded;
1280 }
1281
1282 /* Don't bother splitting the VMA if we can't unmap it anyway */
1283 if (!can_modify_vma(vms->vma)) {
1284 error = -EPERM;
1285 goto start_split_failed;
1286 }
1287
1288 error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
1289 if (error)
1290 goto start_split_failed;
1291 }
1292 vms->prev = vma_prev(vms->vmi);
1293 if (vms->prev)
1294 vms->unmap_start = vms->prev->vm_end;
1295
1296 /*
1297 * Detach a range of VMAs from the mm. Using next as a temp variable as
1298 * it is always overwritten.
1299 */
1300 for_each_vma_range(*(vms->vmi), next, vms->end) {
1301 long nrpages;
1302
1303 if (!can_modify_vma(next)) {
1304 error = -EPERM;
1305 goto modify_vma_failed;
1306 }
1307 /* Does it split the end? */
1308 if (next->vm_end > vms->end) {
1309 error = __split_vma(vms->vmi, next, vms->end, 0);
1310 if (error)
1311 goto end_split_failed;
1312 }
1313 vma_start_write(next);
1314 mas_set(mas_detach, vms->vma_count++);
1315 error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
1316 if (error)
1317 goto munmap_gather_failed;
1318
1319 vma_mark_detached(next, true);
1320 nrpages = vma_pages(next);
1321
1322 vms->nr_pages += nrpages;
1323 if (next->vm_flags & VM_LOCKED)
1324 vms->locked_vm += nrpages;
1325
1326 if (next->vm_flags & VM_ACCOUNT)
1327 vms->nr_accounted += nrpages;
1328
1329 if (is_exec_mapping(next->vm_flags))
1330 vms->exec_vm += nrpages;
1331 else if (is_stack_mapping(next->vm_flags))
1332 vms->stack_vm += nrpages;
1333 else if (is_data_mapping(next->vm_flags))
1334 vms->data_vm += nrpages;
1335
1336 if (vms->uf) {
1337 /*
1338 * If userfaultfd_unmap_prep returns an error the vmas
1339 * will remain split, but userland will get a
1340 * highly unexpected error anyway. This is no
1341 * different than the case where the first of the two
1342 * __split_vma fails, but we don't undo the first
1343 * split, despite we could. This is unlikely enough
1344 * failure that it's not worth optimizing it for.
1345 */
1346 error = userfaultfd_unmap_prep(next, vms->start,
1347 vms->end, vms->uf);
1348 if (error)
1349 goto userfaultfd_error;
1350 }
1351 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1352 BUG_ON(next->vm_start < vms->start);
1353 BUG_ON(next->vm_start > vms->end);
1354 #endif
1355 }
1356
1357 vms->next = vma_next(vms->vmi);
1358 if (vms->next)
1359 vms->unmap_end = vms->next->vm_start;
1360
1361 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1362 /* Make sure no VMAs are about to be lost. */
1363 {
1364 MA_STATE(test, mas_detach->tree, 0, 0);
1365 struct vm_area_struct *vma_mas, *vma_test;
1366 int test_count = 0;
1367
1368 vma_iter_set(vms->vmi, vms->start);
1369 rcu_read_lock();
1370 vma_test = mas_find(&test, vms->vma_count - 1);
1371 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
1372 BUG_ON(vma_mas != vma_test);
1373 test_count++;
1374 vma_test = mas_next(&test, vms->vma_count - 1);
1375 }
1376 rcu_read_unlock();
1377 BUG_ON(vms->vma_count != test_count);
1378 }
1379 #endif
1380
1381 while (vma_iter_addr(vms->vmi) > vms->start)
1382 vma_iter_prev_range(vms->vmi);
1383
1384 vms->clear_ptes = true;
1385 return 0;
1386
1387 userfaultfd_error:
1388 munmap_gather_failed:
1389 end_split_failed:
1390 modify_vma_failed:
1391 reattach_vmas(mas_detach);
1392 start_split_failed:
1393 map_count_exceeded:
1394 return error;
1395 }
1396
1397 /*
1398 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
1399 * @vms: The vma munmap struct
1400 * @vmi: The vma iterator
1401 * @vma: The first vm_area_struct to munmap
1402 * @start: The aligned start address to munmap
1403 * @end: The aligned end address to munmap
1404 * @uf: The userfaultfd list_head
1405 * @unlock: Unlock after the operation. Only unlocked on success
1406 */
init_vma_munmap(struct vma_munmap_struct * vms,struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)1407 static void init_vma_munmap(struct vma_munmap_struct *vms,
1408 struct vma_iterator *vmi, struct vm_area_struct *vma,
1409 unsigned long start, unsigned long end, struct list_head *uf,
1410 bool unlock)
1411 {
1412 vms->vmi = vmi;
1413 vms->vma = vma;
1414 if (vma) {
1415 vms->start = start;
1416 vms->end = end;
1417 } else {
1418 vms->start = vms->end = 0;
1419 }
1420 vms->unlock = unlock;
1421 vms->uf = uf;
1422 vms->vma_count = 0;
1423 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
1424 vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
1425 vms->unmap_start = FIRST_USER_ADDRESS;
1426 vms->unmap_end = USER_PGTABLES_CEILING;
1427 vms->clear_ptes = false;
1428 }
1429
1430 /*
1431 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1432 * @vmi: The vma iterator
1433 * @vma: The starting vm_area_struct
1434 * @mm: The mm_struct
1435 * @start: The aligned start address to munmap.
1436 * @end: The aligned end address to munmap.
1437 * @uf: The userfaultfd list_head
1438 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
1439 * success.
1440 *
1441 * Return: 0 on success and drops the lock if so directed, error and leaves the
1442 * lock held otherwise.
1443 */
do_vmi_align_munmap(struct vma_iterator * vmi,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)1444 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1445 struct mm_struct *mm, unsigned long start, unsigned long end,
1446 struct list_head *uf, bool unlock)
1447 {
1448 struct maple_tree mt_detach;
1449 MA_STATE(mas_detach, &mt_detach, 0, 0);
1450 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1451 mt_on_stack(mt_detach);
1452 struct vma_munmap_struct vms;
1453 int error;
1454
1455 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
1456 error = vms_gather_munmap_vmas(&vms, &mas_detach);
1457 if (error)
1458 goto gather_failed;
1459
1460 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
1461 if (error)
1462 goto clear_tree_failed;
1463
1464 /* Point of no return */
1465 vms_complete_munmap_vmas(&vms, &mas_detach);
1466 return 0;
1467
1468 clear_tree_failed:
1469 reattach_vmas(&mas_detach);
1470 gather_failed:
1471 validate_mm(mm);
1472 return error;
1473 }
1474
1475 /*
1476 * do_vmi_munmap() - munmap a given range.
1477 * @vmi: The vma iterator
1478 * @mm: The mm_struct
1479 * @start: The start address to munmap
1480 * @len: The length of the range to munmap
1481 * @uf: The userfaultfd list_head
1482 * @unlock: set to true if the user wants to drop the mmap_lock on success
1483 *
1484 * This function takes a @mas that is either pointing to the previous VMA or set
1485 * to MA_START and sets it up to remove the mapping(s). The @len will be
1486 * aligned.
1487 *
1488 * Return: 0 on success and drops the lock if so directed, error and leaves the
1489 * lock held otherwise.
1490 */
do_vmi_munmap(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf,bool unlock)1491 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
1492 unsigned long start, size_t len, struct list_head *uf,
1493 bool unlock)
1494 {
1495 unsigned long end;
1496 struct vm_area_struct *vma;
1497
1498 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
1499 return -EINVAL;
1500
1501 end = start + PAGE_ALIGN(len);
1502 if (end == start)
1503 return -EINVAL;
1504
1505 /* Find the first overlapping VMA */
1506 vma = vma_find(vmi, end);
1507 if (!vma) {
1508 if (unlock)
1509 mmap_write_unlock(mm);
1510 return 0;
1511 }
1512
1513 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1514 }
1515
1516 /*
1517 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1518 * context and anonymous VMA name within the range [start, end).
1519 *
1520 * As a result, we might be able to merge the newly modified VMA range with an
1521 * adjacent VMA with identical properties.
1522 *
1523 * If no merge is possible and the range does not span the entirety of the VMA,
1524 * we then need to split the VMA to accommodate the change.
1525 *
1526 * The function returns either the merged VMA, the original VMA if a split was
1527 * required instead, or an error if the split failed.
1528 */
vma_modify(struct vma_merge_struct * vmg)1529 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1530 {
1531 struct vm_area_struct *vma = vmg->vma;
1532 unsigned long start = vmg->start;
1533 unsigned long end = vmg->end;
1534 struct vm_area_struct *merged;
1535
1536 /* First, try to merge. */
1537 merged = vma_merge_existing_range(vmg);
1538 if (merged)
1539 return merged;
1540 if (vmg_nomem(vmg))
1541 return ERR_PTR(-ENOMEM);
1542
1543 /*
1544 * Split can fail for reasons other than OOM, so if the user requests
1545 * this it's probably a mistake.
1546 */
1547 VM_WARN_ON(vmg->give_up_on_oom &&
1548 (vma->vm_start != start || vma->vm_end != end));
1549
1550 /* Split any preceding portion of the VMA. */
1551 if (vma->vm_start < start) {
1552 int err = split_vma(vmg->vmi, vma, start, 1);
1553
1554 if (err)
1555 return ERR_PTR(err);
1556 }
1557
1558 /* Split any trailing portion of the VMA. */
1559 if (vma->vm_end > end) {
1560 int err = split_vma(vmg->vmi, vma, end, 0);
1561
1562 if (err)
1563 return ERR_PTR(err);
1564 }
1565
1566 return vma;
1567 }
1568
vma_modify_flags(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags)1569 struct vm_area_struct *vma_modify_flags(
1570 struct vma_iterator *vmi, struct vm_area_struct *prev,
1571 struct vm_area_struct *vma, unsigned long start, unsigned long end,
1572 unsigned long new_flags)
1573 {
1574 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1575
1576 vmg.flags = new_flags;
1577
1578 return vma_modify(&vmg);
1579 }
1580
1581 struct vm_area_struct
vma_modify_flags_name(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags,struct anon_vma_name * new_name)1582 *vma_modify_flags_name(struct vma_iterator *vmi,
1583 struct vm_area_struct *prev,
1584 struct vm_area_struct *vma,
1585 unsigned long start,
1586 unsigned long end,
1587 unsigned long new_flags,
1588 struct anon_vma_name *new_name)
1589 {
1590 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1591
1592 vmg.flags = new_flags;
1593 vmg.anon_name = new_name;
1594
1595 return vma_modify(&vmg);
1596 }
1597
1598 struct vm_area_struct
vma_modify_policy(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct mempolicy * new_pol)1599 *vma_modify_policy(struct vma_iterator *vmi,
1600 struct vm_area_struct *prev,
1601 struct vm_area_struct *vma,
1602 unsigned long start, unsigned long end,
1603 struct mempolicy *new_pol)
1604 {
1605 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1606
1607 vmg.policy = new_pol;
1608
1609 return vma_modify(&vmg);
1610 }
1611
1612 struct vm_area_struct
vma_modify_flags_uffd(struct vma_iterator * vmi,struct vm_area_struct * prev,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long new_flags,struct vm_userfaultfd_ctx new_ctx,bool give_up_on_oom)1613 *vma_modify_flags_uffd(struct vma_iterator *vmi,
1614 struct vm_area_struct *prev,
1615 struct vm_area_struct *vma,
1616 unsigned long start, unsigned long end,
1617 unsigned long new_flags,
1618 struct vm_userfaultfd_ctx new_ctx,
1619 bool give_up_on_oom)
1620 {
1621 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1622
1623 vmg.flags = new_flags;
1624 vmg.uffd_ctx = new_ctx;
1625 if (give_up_on_oom)
1626 vmg.give_up_on_oom = true;
1627
1628 return vma_modify(&vmg);
1629 }
1630
1631 /*
1632 * Expand vma by delta bytes, potentially merging with an immediately adjacent
1633 * VMA with identical properties.
1634 */
vma_merge_extend(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long delta)1635 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1636 struct vm_area_struct *vma,
1637 unsigned long delta)
1638 {
1639 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
1640
1641 vmg.next = vma_iter_next_rewind(vmi, NULL);
1642 vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
1643
1644 return vma_merge_new_range(&vmg);
1645 }
1646
unlink_file_vma_batch_init(struct unlink_vma_file_batch * vb)1647 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1648 {
1649 vb->count = 0;
1650 }
1651
unlink_file_vma_batch_process(struct unlink_vma_file_batch * vb)1652 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1653 {
1654 struct address_space *mapping;
1655 int i;
1656
1657 mapping = vb->vmas[0]->vm_file->f_mapping;
1658 i_mmap_lock_write(mapping);
1659 for (i = 0; i < vb->count; i++) {
1660 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1661 __remove_shared_vm_struct(vb->vmas[i], mapping);
1662 }
1663 i_mmap_unlock_write(mapping);
1664
1665 unlink_file_vma_batch_init(vb);
1666 }
1667
unlink_file_vma_batch_add(struct unlink_vma_file_batch * vb,struct vm_area_struct * vma)1668 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1669 struct vm_area_struct *vma)
1670 {
1671 if (vma->vm_file == NULL)
1672 return;
1673
1674 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1675 vb->count == ARRAY_SIZE(vb->vmas))
1676 unlink_file_vma_batch_process(vb);
1677
1678 vb->vmas[vb->count] = vma;
1679 vb->count++;
1680 }
1681
unlink_file_vma_batch_final(struct unlink_vma_file_batch * vb)1682 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1683 {
1684 if (vb->count > 0)
1685 unlink_file_vma_batch_process(vb);
1686 }
1687
1688 /*
1689 * Unlink a file-based vm structure from its interval tree, to hide
1690 * vma from rmap and vmtruncate before freeing its page tables.
1691 */
unlink_file_vma(struct vm_area_struct * vma)1692 void unlink_file_vma(struct vm_area_struct *vma)
1693 {
1694 struct file *file = vma->vm_file;
1695
1696 if (file) {
1697 struct address_space *mapping = file->f_mapping;
1698
1699 i_mmap_lock_write(mapping);
1700 __remove_shared_vm_struct(vma, mapping);
1701 i_mmap_unlock_write(mapping);
1702 }
1703 }
1704
vma_link_file(struct vm_area_struct * vma)1705 void vma_link_file(struct vm_area_struct *vma)
1706 {
1707 struct file *file = vma->vm_file;
1708 struct address_space *mapping;
1709
1710 if (file) {
1711 mapping = file->f_mapping;
1712 i_mmap_lock_write(mapping);
1713 __vma_link_file(vma, mapping);
1714 i_mmap_unlock_write(mapping);
1715 }
1716 }
1717
vma_link(struct mm_struct * mm,struct vm_area_struct * vma)1718 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1719 {
1720 VMA_ITERATOR(vmi, mm, 0);
1721
1722 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1723 if (vma_iter_prealloc(&vmi, vma))
1724 return -ENOMEM;
1725
1726 vma_start_write(vma);
1727 vma_iter_store(&vmi, vma);
1728 vma_link_file(vma);
1729 mm->map_count++;
1730 validate_mm(mm);
1731 return 0;
1732 }
1733
1734 /*
1735 * Copy the vma structure to a new location in the same mm,
1736 * prior to moving page table entries, to effect an mremap move.
1737 */
copy_vma(struct vm_area_struct ** vmap,unsigned long addr,unsigned long len,pgoff_t pgoff,bool * need_rmap_locks)1738 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1739 unsigned long addr, unsigned long len, pgoff_t pgoff,
1740 bool *need_rmap_locks)
1741 {
1742 struct vm_area_struct *vma = *vmap;
1743 unsigned long vma_start = vma->vm_start;
1744 struct mm_struct *mm = vma->vm_mm;
1745 struct vm_area_struct *new_vma;
1746 bool faulted_in_anon_vma = true;
1747 VMA_ITERATOR(vmi, mm, addr);
1748 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
1749
1750 /*
1751 * If anonymous vma has not yet been faulted, update new pgoff
1752 * to match new location, to increase its chance of merging.
1753 */
1754 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1755 pgoff = addr >> PAGE_SHIFT;
1756 faulted_in_anon_vma = false;
1757 }
1758
1759 new_vma = find_vma_prev(mm, addr, &vmg.prev);
1760 if (new_vma && new_vma->vm_start < addr + len)
1761 return NULL; /* should never get here */
1762
1763 vmg.vma = NULL; /* New VMA range. */
1764 vmg.pgoff = pgoff;
1765 vmg.next = vma_iter_next_rewind(&vmi, NULL);
1766 new_vma = vma_merge_new_range(&vmg);
1767
1768 if (new_vma) {
1769 /*
1770 * Source vma may have been merged into new_vma
1771 */
1772 if (unlikely(vma_start >= new_vma->vm_start &&
1773 vma_start < new_vma->vm_end)) {
1774 /*
1775 * The only way we can get a vma_merge with
1776 * self during an mremap is if the vma hasn't
1777 * been faulted in yet and we were allowed to
1778 * reset the dst vma->vm_pgoff to the
1779 * destination address of the mremap to allow
1780 * the merge to happen. mremap must change the
1781 * vm_pgoff linearity between src and dst vmas
1782 * (in turn preventing a vma_merge) to be
1783 * safe. It is only safe to keep the vm_pgoff
1784 * linear if there are no pages mapped yet.
1785 */
1786 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1787 *vmap = vma = new_vma;
1788 }
1789 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1790 } else {
1791 new_vma = vm_area_dup(vma);
1792 if (!new_vma)
1793 goto out;
1794 vma_set_range(new_vma, addr, addr + len, pgoff);
1795 if (vma_dup_policy(vma, new_vma))
1796 goto out_free_vma;
1797 if (anon_vma_clone(new_vma, vma))
1798 goto out_free_mempol;
1799 if (new_vma->vm_file)
1800 get_file(new_vma->vm_file);
1801 if (new_vma->vm_ops && new_vma->vm_ops->open)
1802 new_vma->vm_ops->open(new_vma);
1803 if (vma_link(mm, new_vma))
1804 goto out_vma_link;
1805 *need_rmap_locks = false;
1806 }
1807 return new_vma;
1808
1809 out_vma_link:
1810 vma_close(new_vma);
1811
1812 if (new_vma->vm_file)
1813 fput(new_vma->vm_file);
1814
1815 unlink_anon_vmas(new_vma);
1816 out_free_mempol:
1817 mpol_put(vma_policy(new_vma));
1818 out_free_vma:
1819 vm_area_free(new_vma);
1820 out:
1821 return NULL;
1822 }
1823
1824 /*
1825 * Rough compatibility check to quickly see if it's even worth looking
1826 * at sharing an anon_vma.
1827 *
1828 * They need to have the same vm_file, and the flags can only differ
1829 * in things that mprotect may change.
1830 *
1831 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1832 * we can merge the two vma's. For example, we refuse to merge a vma if
1833 * there is a vm_ops->close() function, because that indicates that the
1834 * driver is doing some kind of reference counting. But that doesn't
1835 * really matter for the anon_vma sharing case.
1836 */
anon_vma_compatible(struct vm_area_struct * a,struct vm_area_struct * b)1837 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1838 {
1839 return a->vm_end == b->vm_start &&
1840 mpol_equal(vma_policy(a), vma_policy(b)) &&
1841 a->vm_file == b->vm_file &&
1842 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1843 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1844 }
1845
1846 /*
1847 * Do some basic sanity checking to see if we can re-use the anon_vma
1848 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1849 * the same as 'old', the other will be the new one that is trying
1850 * to share the anon_vma.
1851 *
1852 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1853 * the anon_vma of 'old' is concurrently in the process of being set up
1854 * by another page fault trying to merge _that_. But that's ok: if it
1855 * is being set up, that automatically means that it will be a singleton
1856 * acceptable for merging, so we can do all of this optimistically. But
1857 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1858 *
1859 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1860 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1861 * is to return an anon_vma that is "complex" due to having gone through
1862 * a fork).
1863 *
1864 * We also make sure that the two vma's are compatible (adjacent,
1865 * and with the same memory policies). That's all stable, even with just
1866 * a read lock on the mmap_lock.
1867 */
reusable_anon_vma(struct vm_area_struct * old,struct vm_area_struct * a,struct vm_area_struct * b)1868 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
1869 struct vm_area_struct *a,
1870 struct vm_area_struct *b)
1871 {
1872 if (anon_vma_compatible(a, b)) {
1873 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1874
1875 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1876 return anon_vma;
1877 }
1878 return NULL;
1879 }
1880
1881 /*
1882 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1883 * neighbouring vmas for a suitable anon_vma, before it goes off
1884 * to allocate a new anon_vma. It checks because a repetitive
1885 * sequence of mprotects and faults may otherwise lead to distinct
1886 * anon_vmas being allocated, preventing vma merge in subsequent
1887 * mprotect.
1888 */
find_mergeable_anon_vma(struct vm_area_struct * vma)1889 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1890 {
1891 struct anon_vma *anon_vma = NULL;
1892 struct vm_area_struct *prev, *next;
1893 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
1894
1895 /* Try next first. */
1896 next = vma_iter_load(&vmi);
1897 if (next) {
1898 anon_vma = reusable_anon_vma(next, vma, next);
1899 if (anon_vma)
1900 return anon_vma;
1901 }
1902
1903 prev = vma_prev(&vmi);
1904 VM_BUG_ON_VMA(prev != vma, vma);
1905 prev = vma_prev(&vmi);
1906 /* Try prev next. */
1907 if (prev)
1908 anon_vma = reusable_anon_vma(prev, prev, vma);
1909
1910 /*
1911 * We might reach here with anon_vma == NULL if we can't find
1912 * any reusable anon_vma.
1913 * There's no absolute need to look only at touching neighbours:
1914 * we could search further afield for "compatible" anon_vmas.
1915 * But it would probably just be a waste of time searching,
1916 * or lead to too many vmas hanging off the same anon_vma.
1917 * We're trying to allow mprotect remerging later on,
1918 * not trying to minimize memory used for anon_vmas.
1919 */
1920 return anon_vma;
1921 }
1922
vm_ops_needs_writenotify(const struct vm_operations_struct * vm_ops)1923 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1924 {
1925 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1926 }
1927
vma_is_shared_writable(struct vm_area_struct * vma)1928 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1929 {
1930 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1931 (VM_WRITE | VM_SHARED);
1932 }
1933
vma_fs_can_writeback(struct vm_area_struct * vma)1934 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1935 {
1936 /* No managed pages to writeback. */
1937 if (vma->vm_flags & VM_PFNMAP)
1938 return false;
1939
1940 return vma->vm_file && vma->vm_file->f_mapping &&
1941 mapping_can_writeback(vma->vm_file->f_mapping);
1942 }
1943
1944 /*
1945 * Does this VMA require the underlying folios to have their dirty state
1946 * tracked?
1947 */
vma_needs_dirty_tracking(struct vm_area_struct * vma)1948 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1949 {
1950 /* Only shared, writable VMAs require dirty tracking. */
1951 if (!vma_is_shared_writable(vma))
1952 return false;
1953
1954 /* Does the filesystem need to be notified? */
1955 if (vm_ops_needs_writenotify(vma->vm_ops))
1956 return true;
1957
1958 /*
1959 * Even if the filesystem doesn't indicate a need for writenotify, if it
1960 * can writeback, dirty tracking is still required.
1961 */
1962 return vma_fs_can_writeback(vma);
1963 }
1964
1965 /*
1966 * Some shared mappings will want the pages marked read-only
1967 * to track write events. If so, we'll downgrade vm_page_prot
1968 * to the private version (using protection_map[] without the
1969 * VM_SHARED bit).
1970 */
vma_wants_writenotify(struct vm_area_struct * vma,pgprot_t vm_page_prot)1971 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1972 {
1973 /* If it was private or non-writable, the write bit is already clear */
1974 if (!vma_is_shared_writable(vma))
1975 return false;
1976
1977 /* The backer wishes to know when pages are first written to? */
1978 if (vm_ops_needs_writenotify(vma->vm_ops))
1979 return true;
1980
1981 /* The open routine did something to the protections that pgprot_modify
1982 * won't preserve? */
1983 if (pgprot_val(vm_page_prot) !=
1984 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1985 return false;
1986
1987 /*
1988 * Do we need to track softdirty? hugetlb does not support softdirty
1989 * tracking yet.
1990 */
1991 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1992 return true;
1993
1994 /* Do we need write faults for uffd-wp tracking? */
1995 if (userfaultfd_wp(vma))
1996 return true;
1997
1998 /* Can the mapping track the dirty pages? */
1999 return vma_fs_can_writeback(vma);
2000 }
2001
2002 static DEFINE_MUTEX(mm_all_locks_mutex);
2003
vm_lock_anon_vma(struct mm_struct * mm,struct anon_vma * anon_vma)2004 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2005 {
2006 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2007 /*
2008 * The LSB of head.next can't change from under us
2009 * because we hold the mm_all_locks_mutex.
2010 */
2011 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
2012 /*
2013 * We can safely modify head.next after taking the
2014 * anon_vma->root->rwsem. If some other vma in this mm shares
2015 * the same anon_vma we won't take it again.
2016 *
2017 * No need of atomic instructions here, head.next
2018 * can't change from under us thanks to the
2019 * anon_vma->root->rwsem.
2020 */
2021 if (__test_and_set_bit(0, (unsigned long *)
2022 &anon_vma->root->rb_root.rb_root.rb_node))
2023 BUG();
2024 }
2025 }
2026
vm_lock_mapping(struct mm_struct * mm,struct address_space * mapping)2027 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2028 {
2029 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2030 /*
2031 * AS_MM_ALL_LOCKS can't change from under us because
2032 * we hold the mm_all_locks_mutex.
2033 *
2034 * Operations on ->flags have to be atomic because
2035 * even if AS_MM_ALL_LOCKS is stable thanks to the
2036 * mm_all_locks_mutex, there may be other cpus
2037 * changing other bitflags in parallel to us.
2038 */
2039 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2040 BUG();
2041 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
2042 }
2043 }
2044
2045 /*
2046 * This operation locks against the VM for all pte/vma/mm related
2047 * operations that could ever happen on a certain mm. This includes
2048 * vmtruncate, try_to_unmap, and all page faults.
2049 *
2050 * The caller must take the mmap_lock in write mode before calling
2051 * mm_take_all_locks(). The caller isn't allowed to release the
2052 * mmap_lock until mm_drop_all_locks() returns.
2053 *
2054 * mmap_lock in write mode is required in order to block all operations
2055 * that could modify pagetables and free pages without need of
2056 * altering the vma layout. It's also needed in write mode to avoid new
2057 * anon_vmas to be associated with existing vmas.
2058 *
2059 * A single task can't take more than one mm_take_all_locks() in a row
2060 * or it would deadlock.
2061 *
2062 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
2063 * mapping->flags avoid to take the same lock twice, if more than one
2064 * vma in this mm is backed by the same anon_vma or address_space.
2065 *
2066 * We take locks in following order, accordingly to comment at beginning
2067 * of mm/rmap.c:
2068 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
2069 * hugetlb mapping);
2070 * - all vmas marked locked
2071 * - all i_mmap_rwsem locks;
2072 * - all anon_vma->rwseml
2073 *
2074 * We can take all locks within these types randomly because the VM code
2075 * doesn't nest them and we protected from parallel mm_take_all_locks() by
2076 * mm_all_locks_mutex.
2077 *
2078 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2079 * that may have to take thousand of locks.
2080 *
2081 * mm_take_all_locks() can fail if it's interrupted by signals.
2082 */
mm_take_all_locks(struct mm_struct * mm)2083 int mm_take_all_locks(struct mm_struct *mm)
2084 {
2085 struct vm_area_struct *vma;
2086 struct anon_vma_chain *avc;
2087 VMA_ITERATOR(vmi, mm, 0);
2088
2089 mmap_assert_write_locked(mm);
2090
2091 mutex_lock(&mm_all_locks_mutex);
2092
2093 /*
2094 * vma_start_write() does not have a complement in mm_drop_all_locks()
2095 * because vma_start_write() is always asymmetrical; it marks a VMA as
2096 * being written to until mmap_write_unlock() or mmap_write_downgrade()
2097 * is reached.
2098 */
2099 for_each_vma(vmi, vma) {
2100 if (signal_pending(current))
2101 goto out_unlock;
2102 vma_start_write(vma);
2103 }
2104
2105 vma_iter_init(&vmi, mm, 0);
2106 for_each_vma(vmi, vma) {
2107 if (signal_pending(current))
2108 goto out_unlock;
2109 if (vma->vm_file && vma->vm_file->f_mapping &&
2110 is_vm_hugetlb_page(vma))
2111 vm_lock_mapping(mm, vma->vm_file->f_mapping);
2112 }
2113
2114 vma_iter_init(&vmi, mm, 0);
2115 for_each_vma(vmi, vma) {
2116 if (signal_pending(current))
2117 goto out_unlock;
2118 if (vma->vm_file && vma->vm_file->f_mapping &&
2119 !is_vm_hugetlb_page(vma))
2120 vm_lock_mapping(mm, vma->vm_file->f_mapping);
2121 }
2122
2123 vma_iter_init(&vmi, mm, 0);
2124 for_each_vma(vmi, vma) {
2125 if (signal_pending(current))
2126 goto out_unlock;
2127 if (vma->anon_vma)
2128 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2129 vm_lock_anon_vma(mm, avc->anon_vma);
2130 }
2131
2132 return 0;
2133
2134 out_unlock:
2135 mm_drop_all_locks(mm);
2136 return -EINTR;
2137 }
2138
vm_unlock_anon_vma(struct anon_vma * anon_vma)2139 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2140 {
2141 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2142 /*
2143 * The LSB of head.next can't change to 0 from under
2144 * us because we hold the mm_all_locks_mutex.
2145 *
2146 * We must however clear the bitflag before unlocking
2147 * the vma so the users using the anon_vma->rb_root will
2148 * never see our bitflag.
2149 *
2150 * No need of atomic instructions here, head.next
2151 * can't change from under us until we release the
2152 * anon_vma->root->rwsem.
2153 */
2154 if (!__test_and_clear_bit(0, (unsigned long *)
2155 &anon_vma->root->rb_root.rb_root.rb_node))
2156 BUG();
2157 anon_vma_unlock_write(anon_vma);
2158 }
2159 }
2160
vm_unlock_mapping(struct address_space * mapping)2161 static void vm_unlock_mapping(struct address_space *mapping)
2162 {
2163 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2164 /*
2165 * AS_MM_ALL_LOCKS can't change to 0 from under us
2166 * because we hold the mm_all_locks_mutex.
2167 */
2168 i_mmap_unlock_write(mapping);
2169 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2170 &mapping->flags))
2171 BUG();
2172 }
2173 }
2174
2175 /*
2176 * The mmap_lock cannot be released by the caller until
2177 * mm_drop_all_locks() returns.
2178 */
mm_drop_all_locks(struct mm_struct * mm)2179 void mm_drop_all_locks(struct mm_struct *mm)
2180 {
2181 struct vm_area_struct *vma;
2182 struct anon_vma_chain *avc;
2183 VMA_ITERATOR(vmi, mm, 0);
2184
2185 mmap_assert_write_locked(mm);
2186 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2187
2188 for_each_vma(vmi, vma) {
2189 if (vma->anon_vma)
2190 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2191 vm_unlock_anon_vma(avc->anon_vma);
2192 if (vma->vm_file && vma->vm_file->f_mapping)
2193 vm_unlock_mapping(vma->vm_file->f_mapping);
2194 }
2195
2196 mutex_unlock(&mm_all_locks_mutex);
2197 }
2198
2199 /*
2200 * We account for memory if it's a private writeable mapping,
2201 * not hugepages and VM_NORESERVE wasn't set.
2202 */
accountable_mapping(struct file * file,vm_flags_t vm_flags)2203 static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
2204 {
2205 /*
2206 * hugetlb has its own accounting separate from the core VM
2207 * VM_HUGETLB may not be set yet so we cannot check for that flag.
2208 */
2209 if (file && is_file_hugepages(file))
2210 return false;
2211
2212 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
2213 }
2214
2215 /*
2216 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
2217 * operation.
2218 * @vms: The vma unmap structure
2219 * @mas_detach: The maple state with the detached maple tree
2220 *
2221 * Reattach any detached vmas, free up the maple tree used to track the vmas.
2222 * If that's not possible because the ptes are cleared (and vm_ops->closed() may
2223 * have been called), then a NULL is written over the vmas and the vmas are
2224 * removed (munmap() completed).
2225 */
vms_abort_munmap_vmas(struct vma_munmap_struct * vms,struct ma_state * mas_detach)2226 static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
2227 struct ma_state *mas_detach)
2228 {
2229 struct ma_state *mas = &vms->vmi->mas;
2230
2231 if (!vms->nr_pages)
2232 return;
2233
2234 if (vms->clear_ptes)
2235 return reattach_vmas(mas_detach);
2236
2237 /*
2238 * Aborting cannot just call the vm_ops open() because they are often
2239 * not symmetrical and state data has been lost. Resort to the old
2240 * failure method of leaving a gap where the MAP_FIXED mapping failed.
2241 */
2242 mas_set_range(mas, vms->start, vms->end - 1);
2243 mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
2244 /* Clean up the insertion of the unfortunate gap */
2245 vms_complete_munmap_vmas(vms, mas_detach);
2246 }
2247
2248 /*
2249 * __mmap_prepare() - Prepare to gather any overlapping VMAs that need to be
2250 * unmapped once the map operation is completed, check limits, account mapping
2251 * and clean up any pre-existing VMAs.
2252 *
2253 * @map: Mapping state.
2254 * @uf: Userfaultfd context list.
2255 *
2256 * Returns: 0 on success, error code otherwise.
2257 */
__mmap_prepare(struct mmap_state * map,struct list_head * uf)2258 static int __mmap_prepare(struct mmap_state *map, struct list_head *uf)
2259 {
2260 int error;
2261 struct vma_iterator *vmi = map->vmi;
2262 struct vma_munmap_struct *vms = &map->vms;
2263
2264 /* Find the first overlapping VMA and initialise unmap state. */
2265 vms->vma = vma_find(vmi, map->end);
2266 init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf,
2267 /* unlock = */ false);
2268
2269 /* OK, we have overlapping VMAs - prepare to unmap them. */
2270 if (vms->vma) {
2271 mt_init_flags(&map->mt_detach,
2272 vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2273 mt_on_stack(map->mt_detach);
2274 mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0);
2275 /* Prepare to unmap any existing mapping in the area */
2276 error = vms_gather_munmap_vmas(vms, &map->mas_detach);
2277 if (error) {
2278 /* On error VMAs will already have been reattached. */
2279 vms->nr_pages = 0;
2280 return error;
2281 }
2282
2283 map->next = vms->next;
2284 map->prev = vms->prev;
2285 } else {
2286 map->next = vma_iter_next_rewind(vmi, &map->prev);
2287 }
2288
2289 /* Check against address space limit. */
2290 if (!may_expand_vm(map->mm, map->flags, map->pglen - vms->nr_pages))
2291 return -ENOMEM;
2292
2293 /* Private writable mapping: check memory availability. */
2294 if (accountable_mapping(map->file, map->flags)) {
2295 map->charged = map->pglen;
2296 map->charged -= vms->nr_accounted;
2297 if (map->charged) {
2298 error = security_vm_enough_memory_mm(map->mm, map->charged);
2299 if (error)
2300 return error;
2301 }
2302
2303 vms->nr_accounted = 0;
2304 map->flags |= VM_ACCOUNT;
2305 }
2306
2307 /*
2308 * Clear PTEs while the vma is still in the tree so that rmap
2309 * cannot race with the freeing later in the truncate scenario.
2310 * This is also needed for mmap_file(), which is why vm_ops
2311 * close function is called.
2312 */
2313 vms_clean_up_area(vms, &map->mas_detach);
2314
2315 return 0;
2316 }
2317
2318
__mmap_new_file_vma(struct mmap_state * map,struct vm_area_struct * vma)2319 static int __mmap_new_file_vma(struct mmap_state *map,
2320 struct vm_area_struct *vma)
2321 {
2322 struct vma_iterator *vmi = map->vmi;
2323 int error;
2324
2325 vma->vm_file = get_file(map->file);
2326 error = mmap_file(vma->vm_file, vma);
2327 if (error) {
2328 fput(vma->vm_file);
2329 vma->vm_file = NULL;
2330
2331 vma_iter_set(vmi, vma->vm_end);
2332 /* Undo any partial mapping done by a device driver. */
2333 unmap_region(&vmi->mas, vma, map->prev, map->next);
2334
2335 return error;
2336 }
2337
2338 /* Drivers cannot alter the address of the VMA. */
2339 WARN_ON_ONCE(map->addr != vma->vm_start);
2340 /*
2341 * Drivers should not permit writability when previously it was
2342 * disallowed.
2343 */
2344 VM_WARN_ON_ONCE(map->flags != vma->vm_flags &&
2345 !(map->flags & VM_MAYWRITE) &&
2346 (vma->vm_flags & VM_MAYWRITE));
2347
2348 /* If the flags change (and are mergeable), let's retry later. */
2349 map->retry_merge = vma->vm_flags != map->flags && !(vma->vm_flags & VM_SPECIAL);
2350 map->flags = vma->vm_flags;
2351
2352 return 0;
2353 }
2354
2355 /*
2356 * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not
2357 * possible.
2358 *
2359 * @map: Mapping state.
2360 * @vmap: Output pointer for the new VMA.
2361 *
2362 * Returns: Zero on success, or an error.
2363 */
__mmap_new_vma(struct mmap_state * map,struct vm_area_struct ** vmap)2364 static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
2365 {
2366 struct vma_iterator *vmi = map->vmi;
2367 int error = 0;
2368 struct vm_area_struct *vma;
2369
2370 /*
2371 * Determine the object being mapped and call the appropriate
2372 * specific mapper. the address has already been validated, but
2373 * not unmapped, but the maps are removed from the list.
2374 */
2375 vma = vm_area_alloc(map->mm);
2376 if (!vma)
2377 return -ENOMEM;
2378
2379 vma_iter_config(vmi, map->addr, map->end);
2380 vma_set_range(vma, map->addr, map->end, map->pgoff);
2381 vm_flags_init(vma, map->flags);
2382 vma->vm_page_prot = vm_get_page_prot(map->flags);
2383
2384 if (vma_iter_prealloc(vmi, vma)) {
2385 error = -ENOMEM;
2386 goto free_vma;
2387 }
2388
2389 if (map->file)
2390 error = __mmap_new_file_vma(map, vma);
2391 else if (map->flags & VM_SHARED)
2392 error = shmem_zero_setup(vma);
2393 else
2394 vma_set_anonymous(vma);
2395
2396 if (error)
2397 goto free_iter_vma;
2398
2399 #ifdef CONFIG_SPARC64
2400 /* TODO: Fix SPARC ADI! */
2401 WARN_ON_ONCE(!arch_validate_flags(map->flags));
2402 #endif
2403
2404 /* Lock the VMA since it is modified after insertion into VMA tree */
2405 vma_start_write(vma);
2406 vma_iter_store(vmi, vma);
2407 map->mm->map_count++;
2408 vma_link_file(vma);
2409
2410 /*
2411 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
2412 * call covers the non-merge case.
2413 */
2414 if (!vma_is_anonymous(vma))
2415 khugepaged_enter_vma(vma, map->flags);
2416 ksm_add_vma(vma);
2417 *vmap = vma;
2418 return 0;
2419
2420 free_iter_vma:
2421 vma_iter_free(vmi);
2422 free_vma:
2423 vm_area_free(vma);
2424 return error;
2425 }
2426
2427 /*
2428 * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping
2429 * statistics, handle locking and finalise the VMA.
2430 *
2431 * @map: Mapping state.
2432 * @vma: Merged or newly allocated VMA for the mmap()'d region.
2433 */
__mmap_complete(struct mmap_state * map,struct vm_area_struct * vma)2434 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
2435 {
2436 struct mm_struct *mm = map->mm;
2437 unsigned long vm_flags = vma->vm_flags;
2438
2439 perf_event_mmap(vma);
2440
2441 /* Unmap any existing mapping in the area. */
2442 vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
2443
2444 vm_stat_account(mm, vma->vm_flags, map->pglen);
2445 if (vm_flags & VM_LOCKED) {
2446 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2447 is_vm_hugetlb_page(vma) ||
2448 vma == get_gate_vma(mm))
2449 vm_flags_clear(vma, VM_LOCKED_MASK);
2450 else
2451 mm->locked_vm += map->pglen;
2452 }
2453
2454 if (vma->vm_file)
2455 uprobe_mmap(vma);
2456
2457 /*
2458 * New (or expanded) vma always get soft dirty status.
2459 * Otherwise user-space soft-dirty page tracker won't
2460 * be able to distinguish situation when vma area unmapped,
2461 * then new mapped in-place (which must be aimed as
2462 * a completely new data area).
2463 */
2464 vm_flags_set(vma, VM_SOFTDIRTY);
2465
2466 vma_set_page_prot(vma);
2467 }
2468
__mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)2469 static unsigned long __mmap_region(struct file *file, unsigned long addr,
2470 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2471 struct list_head *uf)
2472 {
2473 struct mm_struct *mm = current->mm;
2474 struct vm_area_struct *vma = NULL;
2475 int error;
2476 VMA_ITERATOR(vmi, mm, addr);
2477 MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
2478
2479 error = __mmap_prepare(&map, uf);
2480 if (error)
2481 goto abort_munmap;
2482
2483 /* Attempt to merge with adjacent VMAs... */
2484 if (map.prev || map.next) {
2485 VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL);
2486
2487 vma = vma_merge_new_range(&vmg);
2488 }
2489
2490 /* ...but if we can't, allocate a new VMA. */
2491 if (!vma) {
2492 error = __mmap_new_vma(&map, &vma);
2493 if (error)
2494 goto unacct_error;
2495 }
2496
2497 /* If flags changed, we might be able to merge, so try again. */
2498 if (map.retry_merge) {
2499 struct vm_area_struct *merged;
2500 VMG_MMAP_STATE(vmg, &map, vma);
2501
2502 vma_iter_config(map.vmi, map.addr, map.end);
2503 merged = vma_merge_existing_range(&vmg);
2504 if (merged)
2505 vma = merged;
2506 }
2507
2508 __mmap_complete(&map, vma);
2509
2510 return addr;
2511
2512 /* Accounting was done by __mmap_prepare(). */
2513 unacct_error:
2514 if (map.charged)
2515 vm_unacct_memory(map.charged);
2516 abort_munmap:
2517 vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
2518 return error;
2519 }
2520
2521 /**
2522 * mmap_region() - Actually perform the userland mapping of a VMA into
2523 * current->mm with known, aligned and overflow-checked @addr and @len, and
2524 * correctly determined VMA flags @vm_flags and page offset @pgoff.
2525 *
2526 * This is an internal memory management function, and should not be used
2527 * directly.
2528 *
2529 * The caller must write-lock current->mm->mmap_lock.
2530 *
2531 * @file: If a file-backed mapping, a pointer to the struct file describing the
2532 * file to be mapped, otherwise NULL.
2533 * @addr: The page-aligned address at which to perform the mapping.
2534 * @len: The page-aligned, non-zero, length of the mapping.
2535 * @vm_flags: The VMA flags which should be applied to the mapping.
2536 * @pgoff: If @file is specified, the page offset into the file, if not then
2537 * the virtual page offset in memory of the anonymous mapping.
2538 * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap
2539 * events.
2540 *
2541 * Returns: Either an error, or the address at which the requested mapping has
2542 * been performed.
2543 */
mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)2544 unsigned long mmap_region(struct file *file, unsigned long addr,
2545 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2546 struct list_head *uf)
2547 {
2548 unsigned long ret;
2549 bool writable_file_mapping = false;
2550
2551 mmap_assert_write_locked(current->mm);
2552
2553 /* Check to see if MDWE is applicable. */
2554 if (map_deny_write_exec(vm_flags, vm_flags))
2555 return -EACCES;
2556
2557 /* Allow architectures to sanity-check the vm_flags. */
2558 if (!arch_validate_flags(vm_flags))
2559 return -EINVAL;
2560
2561 /* Map writable and ensure this isn't a sealed memfd. */
2562 if (file && is_shared_maywrite(vm_flags)) {
2563 int error = mapping_map_writable(file->f_mapping);
2564
2565 if (error)
2566 return error;
2567 writable_file_mapping = true;
2568 }
2569
2570 ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
2571
2572 /* Clear our write mapping regardless of error. */
2573 if (writable_file_mapping)
2574 mapping_unmap_writable(file->f_mapping);
2575
2576 validate_mm(current->mm);
2577 return ret;
2578 }
2579
2580 /*
2581 * do_brk_flags() - Increase the brk vma if the flags match.
2582 * @vmi: The vma iterator
2583 * @addr: The start address
2584 * @len: The length of the increase
2585 * @vma: The vma,
2586 * @flags: The VMA Flags
2587 *
2588 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
2589 * do not match then create a new anonymous VMA. Eventually we may be able to
2590 * do some brk-specific accounting here.
2591 */
do_brk_flags(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,unsigned long len,unsigned long flags)2592 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
2593 unsigned long addr, unsigned long len, unsigned long flags)
2594 {
2595 struct mm_struct *mm = current->mm;
2596
2597 /*
2598 * Check against address space limits by the changed size
2599 * Note: This happens *after* clearing old mappings in some code paths.
2600 */
2601 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2602 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
2603 return -ENOMEM;
2604
2605 if (mm->map_count > sysctl_max_map_count)
2606 return -ENOMEM;
2607
2608 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2609 return -ENOMEM;
2610
2611 /*
2612 * Expand the existing vma if possible; Note that singular lists do not
2613 * occur after forking, so the expand will only happen on new VMAs.
2614 */
2615 if (vma && vma->vm_end == addr) {
2616 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
2617
2618 vmg.prev = vma;
2619 /* vmi is positioned at prev, which this mode expects. */
2620 vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
2621
2622 if (vma_merge_new_range(&vmg))
2623 goto out;
2624 else if (vmg_nomem(&vmg))
2625 goto unacct_fail;
2626 }
2627
2628 if (vma)
2629 vma_iter_next_range(vmi);
2630 /* create a vma struct for an anonymous mapping */
2631 vma = vm_area_alloc(mm);
2632 if (!vma)
2633 goto unacct_fail;
2634
2635 vma_set_anonymous(vma);
2636 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
2637 vm_flags_init(vma, flags);
2638 vma->vm_page_prot = vm_get_page_prot(flags);
2639 vma_start_write(vma);
2640 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
2641 goto mas_store_fail;
2642
2643 mm->map_count++;
2644 validate_mm(mm);
2645 ksm_add_vma(vma);
2646 out:
2647 perf_event_mmap(vma);
2648 mm->total_vm += len >> PAGE_SHIFT;
2649 mm->data_vm += len >> PAGE_SHIFT;
2650 if (flags & VM_LOCKED)
2651 mm->locked_vm += (len >> PAGE_SHIFT);
2652 vm_flags_set(vma, VM_SOFTDIRTY);
2653 return 0;
2654
2655 mas_store_fail:
2656 vm_area_free(vma);
2657 unacct_fail:
2658 vm_unacct_memory(len >> PAGE_SHIFT);
2659 return -ENOMEM;
2660 }
2661
2662 /**
2663 * unmapped_area() - Find an area between the low_limit and the high_limit with
2664 * the correct alignment and offset, all from @info. Note: current->mm is used
2665 * for the search.
2666 *
2667 * @info: The unmapped area information including the range [low_limit -
2668 * high_limit), the alignment offset and mask.
2669 *
2670 * Return: A memory address or -ENOMEM.
2671 */
unmapped_area(struct vm_unmapped_area_info * info)2672 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
2673 {
2674 unsigned long length, gap;
2675 unsigned long low_limit, high_limit;
2676 struct vm_area_struct *tmp;
2677 VMA_ITERATOR(vmi, current->mm, 0);
2678
2679 /* Adjust search length to account for worst case alignment overhead */
2680 length = info->length + info->align_mask + info->start_gap;
2681 if (length < info->length)
2682 return -ENOMEM;
2683
2684 low_limit = info->low_limit;
2685 if (low_limit < mmap_min_addr)
2686 low_limit = mmap_min_addr;
2687 high_limit = info->high_limit;
2688 retry:
2689 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
2690 return -ENOMEM;
2691
2692 /*
2693 * Adjust for the gap first so it doesn't interfere with the
2694 * later alignment. The first step is the minimum needed to
2695 * fulill the start gap, the next steps is the minimum to align
2696 * that. It is the minimum needed to fulill both.
2697 */
2698 gap = vma_iter_addr(&vmi) + info->start_gap;
2699 gap += (info->align_offset - gap) & info->align_mask;
2700 tmp = vma_next(&vmi);
2701 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
2702 if (vm_start_gap(tmp) < gap + length - 1) {
2703 low_limit = tmp->vm_end;
2704 vma_iter_reset(&vmi);
2705 goto retry;
2706 }
2707 } else {
2708 tmp = vma_prev(&vmi);
2709 if (tmp && vm_end_gap(tmp) > gap) {
2710 low_limit = vm_end_gap(tmp);
2711 vma_iter_reset(&vmi);
2712 goto retry;
2713 }
2714 }
2715
2716 return gap;
2717 }
2718
2719 /**
2720 * unmapped_area_topdown() - Find an area between the low_limit and the
2721 * high_limit with the correct alignment and offset at the highest available
2722 * address, all from @info. Note: current->mm is used for the search.
2723 *
2724 * @info: The unmapped area information including the range [low_limit -
2725 * high_limit), the alignment offset and mask.
2726 *
2727 * Return: A memory address or -ENOMEM.
2728 */
unmapped_area_topdown(struct vm_unmapped_area_info * info)2729 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
2730 {
2731 unsigned long length, gap, gap_end;
2732 unsigned long low_limit, high_limit;
2733 struct vm_area_struct *tmp;
2734 VMA_ITERATOR(vmi, current->mm, 0);
2735
2736 /* Adjust search length to account for worst case alignment overhead */
2737 length = info->length + info->align_mask + info->start_gap;
2738 if (length < info->length)
2739 return -ENOMEM;
2740
2741 low_limit = info->low_limit;
2742 if (low_limit < mmap_min_addr)
2743 low_limit = mmap_min_addr;
2744 high_limit = info->high_limit;
2745 retry:
2746 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
2747 return -ENOMEM;
2748
2749 gap = vma_iter_end(&vmi) - info->length;
2750 gap -= (gap - info->align_offset) & info->align_mask;
2751 gap_end = vma_iter_end(&vmi);
2752 tmp = vma_next(&vmi);
2753 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
2754 if (vm_start_gap(tmp) < gap_end) {
2755 high_limit = vm_start_gap(tmp);
2756 vma_iter_reset(&vmi);
2757 goto retry;
2758 }
2759 } else {
2760 tmp = vma_prev(&vmi);
2761 if (tmp && vm_end_gap(tmp) > gap) {
2762 high_limit = tmp->vm_start;
2763 vma_iter_reset(&vmi);
2764 goto retry;
2765 }
2766 }
2767
2768 return gap;
2769 }
2770
2771 /*
2772 * Verify that the stack growth is acceptable and
2773 * update accounting. This is shared with both the
2774 * grow-up and grow-down cases.
2775 */
acct_stack_growth(struct vm_area_struct * vma,unsigned long size,unsigned long grow)2776 static int acct_stack_growth(struct vm_area_struct *vma,
2777 unsigned long size, unsigned long grow)
2778 {
2779 struct mm_struct *mm = vma->vm_mm;
2780 unsigned long new_start;
2781
2782 /* address space limit tests */
2783 if (!may_expand_vm(mm, vma->vm_flags, grow))
2784 return -ENOMEM;
2785
2786 /* Stack limit test */
2787 if (size > rlimit(RLIMIT_STACK))
2788 return -ENOMEM;
2789
2790 /* mlock limit tests */
2791 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
2792 return -ENOMEM;
2793
2794 /* Check to ensure the stack will not grow into a hugetlb-only region */
2795 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2796 vma->vm_end - size;
2797 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2798 return -EFAULT;
2799
2800 /*
2801 * Overcommit.. This must be the final test, as it will
2802 * update security statistics.
2803 */
2804 if (security_vm_enough_memory_mm(mm, grow))
2805 return -ENOMEM;
2806
2807 return 0;
2808 }
2809
2810 #if defined(CONFIG_STACK_GROWSUP)
2811 /*
2812 * PA-RISC uses this for its stack.
2813 * vma is the last one with address > vma->vm_end. Have to extend vma.
2814 */
expand_upwards(struct vm_area_struct * vma,unsigned long address)2815 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2816 {
2817 struct mm_struct *mm = vma->vm_mm;
2818 struct vm_area_struct *next;
2819 unsigned long gap_addr;
2820 int error = 0;
2821 VMA_ITERATOR(vmi, mm, vma->vm_start);
2822
2823 if (!(vma->vm_flags & VM_GROWSUP))
2824 return -EFAULT;
2825
2826 mmap_assert_write_locked(mm);
2827
2828 /* Guard against exceeding limits of the address space. */
2829 address &= PAGE_MASK;
2830 if (address >= (TASK_SIZE & PAGE_MASK))
2831 return -ENOMEM;
2832 address += PAGE_SIZE;
2833
2834 /* Enforce stack_guard_gap */
2835 gap_addr = address + stack_guard_gap;
2836
2837 /* Guard against overflow */
2838 if (gap_addr < address || gap_addr > TASK_SIZE)
2839 gap_addr = TASK_SIZE;
2840
2841 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
2842 if (next && vma_is_accessible(next)) {
2843 if (!(next->vm_flags & VM_GROWSUP))
2844 return -ENOMEM;
2845 /* Check that both stack segments have the same anon_vma? */
2846 }
2847
2848 if (next)
2849 vma_iter_prev_range_limit(&vmi, address);
2850
2851 vma_iter_config(&vmi, vma->vm_start, address);
2852 if (vma_iter_prealloc(&vmi, vma))
2853 return -ENOMEM;
2854
2855 /* We must make sure the anon_vma is allocated. */
2856 if (unlikely(anon_vma_prepare(vma))) {
2857 vma_iter_free(&vmi);
2858 return -ENOMEM;
2859 }
2860
2861 /* Lock the VMA before expanding to prevent concurrent page faults */
2862 vma_start_write(vma);
2863 /* We update the anon VMA tree. */
2864 anon_vma_lock_write(vma->anon_vma);
2865
2866 /* Somebody else might have raced and expanded it already */
2867 if (address > vma->vm_end) {
2868 unsigned long size, grow;
2869
2870 size = address - vma->vm_start;
2871 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2872
2873 error = -ENOMEM;
2874 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2875 error = acct_stack_growth(vma, size, grow);
2876 if (!error) {
2877 if (vma->vm_flags & VM_LOCKED)
2878 mm->locked_vm += grow;
2879 vm_stat_account(mm, vma->vm_flags, grow);
2880 anon_vma_interval_tree_pre_update_vma(vma);
2881 vma->vm_end = address;
2882 /* Overwrite old entry in mtree. */
2883 vma_iter_store(&vmi, vma);
2884 anon_vma_interval_tree_post_update_vma(vma);
2885
2886 perf_event_mmap(vma);
2887 }
2888 }
2889 }
2890 anon_vma_unlock_write(vma->anon_vma);
2891 vma_iter_free(&vmi);
2892 validate_mm(mm);
2893 return error;
2894 }
2895 #endif /* CONFIG_STACK_GROWSUP */
2896
2897 /*
2898 * vma is the first one with address < vma->vm_start. Have to extend vma.
2899 * mmap_lock held for writing.
2900 */
expand_downwards(struct vm_area_struct * vma,unsigned long address)2901 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
2902 {
2903 struct mm_struct *mm = vma->vm_mm;
2904 struct vm_area_struct *prev;
2905 int error = 0;
2906 VMA_ITERATOR(vmi, mm, vma->vm_start);
2907
2908 if (!(vma->vm_flags & VM_GROWSDOWN))
2909 return -EFAULT;
2910
2911 mmap_assert_write_locked(mm);
2912
2913 address &= PAGE_MASK;
2914 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
2915 return -EPERM;
2916
2917 /* Enforce stack_guard_gap */
2918 prev = vma_prev(&vmi);
2919 /* Check that both stack segments have the same anon_vma? */
2920 if (prev) {
2921 if (!(prev->vm_flags & VM_GROWSDOWN) &&
2922 vma_is_accessible(prev) &&
2923 (address - prev->vm_end < stack_guard_gap))
2924 return -ENOMEM;
2925 }
2926
2927 if (prev)
2928 vma_iter_next_range_limit(&vmi, vma->vm_start);
2929
2930 vma_iter_config(&vmi, address, vma->vm_end);
2931 if (vma_iter_prealloc(&vmi, vma))
2932 return -ENOMEM;
2933
2934 /* We must make sure the anon_vma is allocated. */
2935 if (unlikely(anon_vma_prepare(vma))) {
2936 vma_iter_free(&vmi);
2937 return -ENOMEM;
2938 }
2939
2940 /* Lock the VMA before expanding to prevent concurrent page faults */
2941 vma_start_write(vma);
2942 /* We update the anon VMA tree. */
2943 anon_vma_lock_write(vma->anon_vma);
2944
2945 /* Somebody else might have raced and expanded it already */
2946 if (address < vma->vm_start) {
2947 unsigned long size, grow;
2948
2949 size = vma->vm_end - address;
2950 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2951
2952 error = -ENOMEM;
2953 if (grow <= vma->vm_pgoff) {
2954 error = acct_stack_growth(vma, size, grow);
2955 if (!error) {
2956 if (vma->vm_flags & VM_LOCKED)
2957 mm->locked_vm += grow;
2958 vm_stat_account(mm, vma->vm_flags, grow);
2959 anon_vma_interval_tree_pre_update_vma(vma);
2960 vma->vm_start = address;
2961 vma->vm_pgoff -= grow;
2962 /* Overwrite old entry in mtree. */
2963 vma_iter_store(&vmi, vma);
2964 anon_vma_interval_tree_post_update_vma(vma);
2965
2966 perf_event_mmap(vma);
2967 }
2968 }
2969 }
2970 anon_vma_unlock_write(vma->anon_vma);
2971 vma_iter_free(&vmi);
2972 validate_mm(mm);
2973 return error;
2974 }
2975
__vm_munmap(unsigned long start,size_t len,bool unlock)2976 int __vm_munmap(unsigned long start, size_t len, bool unlock)
2977 {
2978 int ret;
2979 struct mm_struct *mm = current->mm;
2980 LIST_HEAD(uf);
2981 VMA_ITERATOR(vmi, mm, start);
2982
2983 if (mmap_write_lock_killable(mm))
2984 return -EINTR;
2985
2986 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
2987 if (ret || !unlock)
2988 mmap_write_unlock(mm);
2989
2990 userfaultfd_unmap_complete(mm, &uf);
2991 return ret;
2992 }
2993