1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * vma.h
4 *
5 * Core VMA manipulation API implemented in vma.c.
6 */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9
10 /*
11 * VMA lock generalization
12 */
13 struct vma_prepare {
14 struct vm_area_struct *vma;
15 struct vm_area_struct *adj_next;
16 struct file *file;
17 struct address_space *mapping;
18 struct anon_vma *anon_vma;
19 struct vm_area_struct *insert;
20 struct vm_area_struct *remove;
21 struct vm_area_struct *remove2;
22 };
23
24 struct unlink_vma_file_batch {
25 int count;
26 struct vm_area_struct *vmas[8];
27 };
28
29 /*
30 * vma munmap operation
31 */
32 struct vma_munmap_struct {
33 struct vma_iterator *vmi;
34 struct vm_area_struct *vma; /* The first vma to munmap */
35 struct vm_area_struct *prev; /* vma before the munmap area */
36 struct vm_area_struct *next; /* vma after the munmap area */
37 struct list_head *uf; /* Userfaultfd list_head */
38 unsigned long start; /* Aligned start addr (inclusive) */
39 unsigned long end; /* Aligned end addr (exclusive) */
40 unsigned long unmap_start; /* Unmap PTE start */
41 unsigned long unmap_end; /* Unmap PTE end */
42 int vma_count; /* Number of vmas that will be removed */
43 bool unlock; /* Unlock after the munmap */
44 bool clear_ptes; /* If there are outstanding PTE to be cleared */
45 /* 2 byte hole */
46 unsigned long nr_pages; /* Number of pages being removed */
47 unsigned long locked_vm; /* Number of locked pages */
48 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
49 unsigned long exec_vm;
50 unsigned long stack_vm;
51 unsigned long data_vm;
52 };
53
54 enum vma_merge_state {
55 VMA_MERGE_START,
56 VMA_MERGE_ERROR_NOMEM,
57 VMA_MERGE_NOMERGE,
58 VMA_MERGE_SUCCESS,
59 };
60
61 enum vma_merge_flags {
62 VMG_FLAG_DEFAULT = 0,
63 /*
64 * If we can expand, simply do so. We know there is nothing to merge to
65 * the right. Does not reset state upon failure to merge. The VMA
66 * iterator is assumed to be positioned at the previous VMA, rather than
67 * at the gap.
68 */
69 VMG_FLAG_JUST_EXPAND = 1 << 0,
70 };
71
72 /* Represents a VMA merge operation. */
73 struct vma_merge_struct {
74 struct mm_struct *mm;
75 struct vma_iterator *vmi;
76 pgoff_t pgoff;
77 struct vm_area_struct *prev;
78 struct vm_area_struct *next; /* Modified by vma_merge(). */
79 struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
80 unsigned long start;
81 unsigned long end;
82 unsigned long flags;
83 struct file *file;
84 struct anon_vma *anon_vma;
85 struct mempolicy *policy;
86 struct vm_userfaultfd_ctx uffd_ctx;
87 struct anon_vma_name *anon_name;
88 enum vma_merge_flags merge_flags;
89 enum vma_merge_state state;
90
91 /*
92 * If a merge is possible, but an OOM error occurs, give up and don't
93 * execute the merge, returning NULL.
94 */
95 bool give_up_on_oom :1;
96 };
97
vmg_nomem(struct vma_merge_struct * vmg)98 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
99 {
100 return vmg->state == VMA_MERGE_ERROR_NOMEM;
101 }
102
103 /* Assumes addr >= vma->vm_start. */
vma_pgoff_offset(struct vm_area_struct * vma,unsigned long addr)104 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
105 unsigned long addr)
106 {
107 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
108 }
109
110 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \
111 struct vma_merge_struct name = { \
112 .mm = mm_, \
113 .vmi = vmi_, \
114 .start = start_, \
115 .end = end_, \
116 .flags = flags_, \
117 .pgoff = pgoff_, \
118 .state = VMA_MERGE_START, \
119 .merge_flags = VMG_FLAG_DEFAULT, \
120 }
121
122 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
123 struct vma_merge_struct name = { \
124 .mm = vma_->vm_mm, \
125 .vmi = vmi_, \
126 .prev = prev_, \
127 .next = NULL, \
128 .vma = vma_, \
129 .start = start_, \
130 .end = end_, \
131 .flags = vma_->vm_flags, \
132 .pgoff = vma_pgoff_offset(vma_, start_), \
133 .file = vma_->vm_file, \
134 .anon_vma = vma_->anon_vma, \
135 .policy = vma_policy(vma_), \
136 .uffd_ctx = vma_->vm_userfaultfd_ctx, \
137 .anon_name = anon_vma_name(vma_), \
138 .state = VMA_MERGE_START, \
139 .merge_flags = VMG_FLAG_DEFAULT, \
140 }
141
142 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
143 void validate_mm(struct mm_struct *mm);
144 #else
145 #define validate_mm(mm) do { } while (0)
146 #endif
147
148 __must_check int vma_expand(struct vma_merge_struct *vmg);
149 __must_check int vma_shrink(struct vma_iterator *vmi,
150 struct vm_area_struct *vma,
151 unsigned long start, unsigned long end, pgoff_t pgoff);
152
vma_iter_store_gfp(struct vma_iterator * vmi,struct vm_area_struct * vma,gfp_t gfp)153 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
154 struct vm_area_struct *vma, gfp_t gfp)
155
156 {
157 if (vmi->mas.status != ma_start &&
158 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
159 vma_iter_invalidate(vmi);
160
161 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
162 mas_store_gfp(&vmi->mas, vma, gfp);
163 if (unlikely(mas_is_err(&vmi->mas)))
164 return -ENOMEM;
165
166 return 0;
167 }
168
169 int
170 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
171 struct mm_struct *mm, unsigned long start,
172 unsigned long end, struct list_head *uf, bool unlock);
173
174 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
175 unsigned long start, size_t len, struct list_head *uf,
176 bool unlock);
177
178 void remove_vma(struct vm_area_struct *vma, bool unreachable);
179
180 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
181 struct vm_area_struct *prev, struct vm_area_struct *next);
182
183 /* We are about to modify the VMA's flags. */
184 __must_check struct vm_area_struct
185 *vma_modify_flags(struct vma_iterator *vmi,
186 struct vm_area_struct *prev, struct vm_area_struct *vma,
187 unsigned long start, unsigned long end,
188 unsigned long new_flags);
189
190 /* We are about to modify the VMA's flags and/or anon_name. */
191 __must_check struct vm_area_struct
192 *vma_modify_flags_name(struct vma_iterator *vmi,
193 struct vm_area_struct *prev,
194 struct vm_area_struct *vma,
195 unsigned long start,
196 unsigned long end,
197 unsigned long new_flags,
198 struct anon_vma_name *new_name);
199
200 /* We are about to modify the VMA's memory policy. */
201 __must_check struct vm_area_struct
202 *vma_modify_policy(struct vma_iterator *vmi,
203 struct vm_area_struct *prev,
204 struct vm_area_struct *vma,
205 unsigned long start, unsigned long end,
206 struct mempolicy *new_pol);
207
208 /* We are about to modify the VMA's flags and/or uffd context. */
209 __must_check struct vm_area_struct
210 *vma_modify_flags_uffd(struct vma_iterator *vmi,
211 struct vm_area_struct *prev,
212 struct vm_area_struct *vma,
213 unsigned long start, unsigned long end,
214 unsigned long new_flags,
215 struct vm_userfaultfd_ctx new_ctx,
216 bool give_up_on_oom);
217
218 __must_check struct vm_area_struct
219 *vma_merge_new_range(struct vma_merge_struct *vmg);
220
221 __must_check struct vm_area_struct
222 *vma_merge_extend(struct vma_iterator *vmi,
223 struct vm_area_struct *vma,
224 unsigned long delta);
225
226 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
227
228 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
229
230 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
231 struct vm_area_struct *vma);
232
233 void unlink_file_vma(struct vm_area_struct *vma);
234
235 void vma_link_file(struct vm_area_struct *vma);
236
237 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
238
239 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
240 unsigned long addr, unsigned long len, pgoff_t pgoff,
241 bool *need_rmap_locks);
242
243 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
244
245 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
246 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
247
248 int mm_take_all_locks(struct mm_struct *mm);
249 void mm_drop_all_locks(struct mm_struct *mm);
250
251 unsigned long mmap_region(struct file *file, unsigned long addr,
252 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
253 struct list_head *uf);
254
255 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
256 unsigned long addr, unsigned long request, unsigned long flags);
257
258 unsigned long unmapped_area(struct vm_unmapped_area_info *info);
259 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
260
vma_wants_manual_pte_write_upgrade(struct vm_area_struct * vma)261 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
262 {
263 /*
264 * We want to check manually if we can change individual PTEs writable
265 * if we can't do that automatically for all PTEs in a mapping. For
266 * private mappings, that's always the case when we have write
267 * permissions as we properly have to handle COW.
268 */
269 if (vma->vm_flags & VM_SHARED)
270 return vma_wants_writenotify(vma, vma->vm_page_prot);
271 return !!(vma->vm_flags & VM_WRITE);
272 }
273
274 #ifdef CONFIG_MMU
vm_pgprot_modify(pgprot_t oldprot,unsigned long vm_flags)275 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
276 {
277 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
278 }
279 #endif
280
vma_prev_limit(struct vma_iterator * vmi,unsigned long min)281 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
282 unsigned long min)
283 {
284 return mas_prev(&vmi->mas, min);
285 }
286
287 /*
288 * These three helpers classifies VMAs for virtual memory accounting.
289 */
290
291 /*
292 * Executable code area - executable, not writable, not stack
293 */
is_exec_mapping(vm_flags_t flags)294 static inline bool is_exec_mapping(vm_flags_t flags)
295 {
296 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
297 }
298
299 /*
300 * Stack area (including shadow stacks)
301 *
302 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
303 * do_mmap() forbids all other combinations.
304 */
is_stack_mapping(vm_flags_t flags)305 static inline bool is_stack_mapping(vm_flags_t flags)
306 {
307 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
308 }
309
310 /*
311 * Data area - private, writable, not stack
312 */
is_data_mapping(vm_flags_t flags)313 static inline bool is_data_mapping(vm_flags_t flags)
314 {
315 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
316 }
317
318
vma_iter_config(struct vma_iterator * vmi,unsigned long index,unsigned long last)319 static inline void vma_iter_config(struct vma_iterator *vmi,
320 unsigned long index, unsigned long last)
321 {
322 __mas_set_range(&vmi->mas, index, last - 1);
323 }
324
vma_iter_reset(struct vma_iterator * vmi)325 static inline void vma_iter_reset(struct vma_iterator *vmi)
326 {
327 mas_reset(&vmi->mas);
328 }
329
330 static inline
vma_iter_prev_range_limit(struct vma_iterator * vmi,unsigned long min)331 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
332 {
333 return mas_prev_range(&vmi->mas, min);
334 }
335
336 static inline
vma_iter_next_range_limit(struct vma_iterator * vmi,unsigned long max)337 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
338 {
339 return mas_next_range(&vmi->mas, max);
340 }
341
vma_iter_area_lowest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)342 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
343 unsigned long max, unsigned long size)
344 {
345 return mas_empty_area(&vmi->mas, min, max - 1, size);
346 }
347
vma_iter_area_highest(struct vma_iterator * vmi,unsigned long min,unsigned long max,unsigned long size)348 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
349 unsigned long max, unsigned long size)
350 {
351 return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
352 }
353
354 /*
355 * VMA Iterator functions shared between nommu and mmap
356 */
vma_iter_prealloc(struct vma_iterator * vmi,struct vm_area_struct * vma)357 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
358 struct vm_area_struct *vma)
359 {
360 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
361 }
362
vma_iter_clear(struct vma_iterator * vmi)363 static inline void vma_iter_clear(struct vma_iterator *vmi)
364 {
365 mas_store_prealloc(&vmi->mas, NULL);
366 }
367
vma_iter_load(struct vma_iterator * vmi)368 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
369 {
370 return mas_walk(&vmi->mas);
371 }
372
373 /* Store a VMA with preallocated memory */
vma_iter_store(struct vma_iterator * vmi,struct vm_area_struct * vma)374 static inline void vma_iter_store(struct vma_iterator *vmi,
375 struct vm_area_struct *vma)
376 {
377
378 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
379 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
380 vmi->mas.index > vma->vm_start)) {
381 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
382 vmi->mas.index, vma->vm_start, vma->vm_start,
383 vma->vm_end, vmi->mas.index, vmi->mas.last);
384 }
385 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
386 vmi->mas.last < vma->vm_start)) {
387 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
388 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
389 vmi->mas.index, vmi->mas.last);
390 }
391 #endif
392
393 if (vmi->mas.status != ma_start &&
394 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
395 vma_iter_invalidate(vmi);
396
397 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
398 mas_store_prealloc(&vmi->mas, vma);
399 }
400
vma_iter_addr(struct vma_iterator * vmi)401 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
402 {
403 return vmi->mas.index;
404 }
405
vma_iter_end(struct vma_iterator * vmi)406 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
407 {
408 return vmi->mas.last + 1;
409 }
410
vma_iter_bulk_alloc(struct vma_iterator * vmi,unsigned long count)411 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
412 unsigned long count)
413 {
414 return mas_expected_entries(&vmi->mas, count);
415 }
416
417 static inline
vma_iter_prev_range(struct vma_iterator * vmi)418 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
419 {
420 return mas_prev_range(&vmi->mas, 0);
421 }
422
423 /*
424 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
425 * if no previous VMA, to index 0.
426 */
427 static inline
vma_iter_next_rewind(struct vma_iterator * vmi,struct vm_area_struct ** pprev)428 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
429 struct vm_area_struct **pprev)
430 {
431 struct vm_area_struct *next = vma_next(vmi);
432 struct vm_area_struct *prev = vma_prev(vmi);
433
434 /*
435 * Consider the case where no previous VMA exists. We advance to the
436 * next VMA, skipping any gap, then rewind to the start of the range.
437 *
438 * If we were to unconditionally advance to the next range we'd wind up
439 * at the next VMA again, so we check to ensure there is a previous VMA
440 * to skip over.
441 */
442 if (prev)
443 vma_iter_next_range(vmi);
444
445 if (pprev)
446 *pprev = prev;
447
448 return next;
449 }
450
451 #ifdef CONFIG_64BIT
452
vma_is_sealed(struct vm_area_struct * vma)453 static inline bool vma_is_sealed(struct vm_area_struct *vma)
454 {
455 return (vma->vm_flags & VM_SEALED);
456 }
457
458 /*
459 * check if a vma is sealed for modification.
460 * return true, if modification is allowed.
461 */
can_modify_vma(struct vm_area_struct * vma)462 static inline bool can_modify_vma(struct vm_area_struct *vma)
463 {
464 if (unlikely(vma_is_sealed(vma)))
465 return false;
466
467 return true;
468 }
469
470 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
471
472 #else
473
can_modify_vma(struct vm_area_struct * vma)474 static inline bool can_modify_vma(struct vm_area_struct *vma)
475 {
476 return true;
477 }
478
can_modify_vma_madv(struct vm_area_struct * vma,int behavior)479 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
480 {
481 return true;
482 }
483
484 #endif
485
486 #if defined(CONFIG_STACK_GROWSUP)
487 int expand_upwards(struct vm_area_struct *vma, unsigned long address);
488 #endif
489
490 int expand_downwards(struct vm_area_struct *vma, unsigned long address);
491
492 int __vm_munmap(unsigned long start, size_t len, bool unlock);
493
494 #endif /* __MM_VMA_H */
495