Lines Matching +full:mm +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/mmap.c
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
48 #include <linux/sched/mm.h>
63 #define arch_mmap_check(addr, len, flags) (0)
80 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
83 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
86 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
91 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ in vma_set_page_prot()
92 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot()
96 * check_brk_limits() - Use platform specific check of range & verify mlock
101 * Return: 0 on success.
107 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); in check_brk_limits()
111 return mlock_future_ok(current->mm, current->mm->def_flags, len) in check_brk_limits()
112 ? 0 : -EAGAIN; in check_brk_limits()
118 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local
125 if (mmap_write_lock_killable(mm)) in SYSCALL_DEFINE1()
126 return -EINTR; in SYSCALL_DEFINE1()
128 origbrk = mm->brk; in SYSCALL_DEFINE1()
133 * randomize_va_space to 2, which will still cause mm->start_brk in SYSCALL_DEFINE1()
136 if (current->brk_randomized) in SYSCALL_DEFINE1()
137 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
139 min_brk = mm->end_data; in SYSCALL_DEFINE1()
141 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
150 * not page aligned -Ram Gupta in SYSCALL_DEFINE1()
152 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1()
153 mm->end_data, mm->start_data)) in SYSCALL_DEFINE1()
157 oldbrk = PAGE_ALIGN(mm->brk); in SYSCALL_DEFINE1()
159 mm->brk = brk; in SYSCALL_DEFINE1()
164 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
166 vma_iter_init(&vmi, mm, newbrk); in SYSCALL_DEFINE1()
168 if (!brkvma || brkvma->vm_start >= oldbrk) in SYSCALL_DEFINE1()
169 goto out; /* mapping intersects with an existing non-brk vma. */ in SYSCALL_DEFINE1()
171 * mm->brk must be protected by write mmap_lock. in SYSCALL_DEFINE1()
175 mm->brk = brk; in SYSCALL_DEFINE1()
176 if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf, in SYSCALL_DEFINE1()
183 if (check_brk_limits(oldbrk, newbrk - oldbrk)) in SYSCALL_DEFINE1()
190 vma_iter_init(&vmi, mm, oldbrk); in SYSCALL_DEFINE1()
195 brkvma = vma_prev_limit(&vmi, mm->start_brk); in SYSCALL_DEFINE1()
196 /* Ok, looks good - let it rip. */ in SYSCALL_DEFINE1()
197 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) in SYSCALL_DEFINE1()
200 mm->brk = brk; in SYSCALL_DEFINE1()
201 if (mm->def_flags & VM_LOCKED) in SYSCALL_DEFINE1()
205 mmap_write_unlock(mm); in SYSCALL_DEFINE1()
207 userfaultfd_unmap_complete(mm, &uf); in SYSCALL_DEFINE1()
209 mm_populate(oldbrk, newbrk - oldbrk); in SYSCALL_DEFINE1()
213 mm->brk = origbrk; in SYSCALL_DEFINE1()
214 mmap_write_unlock(mm); in SYSCALL_DEFINE1()
231 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, in mlock_future_ok() argument
240 locked_pages += mm->locked_vm; in mlock_future_ok()
250 if (S_ISREG(inode->i_mode)) in file_mmap_size_max()
253 if (S_ISBLK(inode->i_mode)) in file_mmap_size_max()
256 if (S_ISSOCK(inode->i_mode)) in file_mmap_size_max()
260 if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET) in file_mmap_size_max()
261 return 0; in file_mmap_size_max()
274 maxsize -= len; in file_mmap_ok()
281 * do_mmap() - Perform a userland memory mapping into the current process
284 * apply @vm_flags. If this is a file-backed mapping then the file is specified
288 * @uf is non-NULL, the caller has provided a list head to track unmap events
292 * @populate, which must be non-NULL, expecting the caller to actually perform
295 * This function will invoke architecture-specific (and if provided and
296 * relevant, file system-specific) logic to determine the most appropriate
311 * The caller must write-lock current->mm->mmap_lock.
314 * mapped, if a file-backed mapping.
315 * @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the
317 * page-aligned.
318 * @len: The length of the mapping. Will be page-aligned and must be at least 1
324 * @vm_flags: VMA flags which should be set by default, or 0 otherwise.
325 * @pgoff: Page offset into the @file if file-backed, should be 0 otherwise.
326 * @populate: A pointer to a value which will be set to 0 if no population of
328 * non-NULL. See mmap (2) for details as to under what circumstances population
343 struct mm_struct *mm = current->mm; in do_mmap() local
344 int pkey = 0; in do_mmap()
346 *populate = 0; in do_mmap()
348 mmap_assert_write_locked(mm); in do_mmap()
351 return -EINVAL; in do_mmap()
359 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) in do_mmap()
360 if (!(file && path_noexec(&file->f_path))) in do_mmap()
373 return -ENOMEM; in do_mmap()
377 return -EOVERFLOW; in do_mmap()
380 if (mm->map_count > sysctl_max_map_count) in do_mmap()
381 return -ENOMEM; in do_mmap()
394 pkey = execute_only_pkey(mm); in do_mmap()
395 if (pkey < 0) in do_mmap()
396 pkey = 0; in do_mmap()
399 /* Do simple checking here so the lower-level routines won't have in do_mmap()
404 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in do_mmap()
414 if (find_vma_intersection(mm, addr, addr + len)) in do_mmap()
415 return -EEXIST; in do_mmap()
420 return -EPERM; in do_mmap()
422 if (!mlock_future_ok(mm, vm_flags, len)) in do_mmap()
423 return -EAGAIN; in do_mmap()
431 return -EOVERFLOW; in do_mmap()
434 if (file->f_op->fop_flags & FOP_MMAP_SYNC) in do_mmap()
440 * Force use of MAP_SHARED_VALIDATE with non-legacy in do_mmap()
450 return -EOPNOTSUPP; in do_mmap()
452 if (!(file->f_mode & FMODE_WRITE)) in do_mmap()
453 return -EACCES; in do_mmap()
454 if (IS_SWAPFILE(file->f_mapping->host)) in do_mmap()
455 return -ETXTBSY; in do_mmap()
459 * Make sure we don't allow writing to an append-only in do_mmap()
462 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) in do_mmap()
463 return -EACCES; in do_mmap()
466 if (!(file->f_mode & FMODE_WRITE)) in do_mmap()
470 if (!(file->f_mode & FMODE_READ)) in do_mmap()
471 return -EACCES; in do_mmap()
472 if (path_noexec(&file->f_path)) { in do_mmap()
474 return -EPERM; in do_mmap()
478 if (!file->f_op->mmap) in do_mmap()
479 return -ENODEV; in do_mmap()
481 return -EINVAL; in do_mmap()
485 return -EINVAL; in do_mmap()
499 return -EINVAL; in do_mmap()
503 pgoff = 0; in do_mmap()
508 return -ENOTSUPP; in do_mmap()
518 return -EINVAL; in do_mmap()
520 return -EINVAL; in do_mmap()
543 return -EINVAL; in do_mmap()
580 return -EBADF; in ksys_mmap_pgoff()
584 retval = -EINVAL; in ksys_mmap_pgoff()
592 return -EINVAL; in ksys_mmap_pgoff()
597 * taken when vm_ops->mmap() is called in ksys_mmap_pgoff()
636 return -EFAULT; in SYSCALL_DEFINE1()
638 return -EINVAL; in SYSCALL_DEFINE1()
654 return 0; in stack_guard_placement()
661 * - does not intersect with any VMA;
662 * - is contained within the [low_limit, high_limit) interval;
663 * - is at least the desired size.
664 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
670 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) in vm_unmapped_area()
680 * For shmat() with addr=0.
688 * This function "knows" that -ENOMEM has the bits set.
695 struct mm_struct *mm = current->mm; in generic_get_unmapped_area() local
700 if (len > mmap_end - mmap_min_addr) in generic_get_unmapped_area()
701 return -ENOMEM; in generic_get_unmapped_area()
708 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area()
709 if (mmap_end - len >= addr && addr >= mmap_min_addr && in generic_get_unmapped_area()
716 info.low_limit = mm->mmap_base; in generic_get_unmapped_area()
736 * This mmap-allocator allocates new areas top-down from below the
745 struct mm_struct *mm = current->mm; in generic_get_unmapped_area_topdown() local
750 if (len > mmap_end - mmap_min_addr) in generic_get_unmapped_area_topdown()
751 return -ENOMEM; in generic_get_unmapped_area_topdown()
759 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area_topdown()
760 if (mmap_end - len >= addr && addr >= mmap_min_addr && in generic_get_unmapped_area_topdown()
769 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); in generic_get_unmapped_area_topdown()
777 * so fall back to the bottom-up function here. This scenario in generic_get_unmapped_area_topdown()
782 VM_BUG_ON(addr != -ENOMEM); in generic_get_unmapped_area_topdown()
783 info.flags = 0; in generic_get_unmapped_area_topdown()
803 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, in mm_get_unmapped_area_vmflags() argument
808 if (test_bit(MMF_TOPDOWN, &mm->flags)) in mm_get_unmapped_area_vmflags()
828 return -ENOMEM; in __get_unmapped_area()
831 if (file->f_op->get_unmapped_area) in __get_unmapped_area()
832 get_area = file->f_op->get_unmapped_area; in __get_unmapped_area()
843 pgoff = 0; in __get_unmapped_area()
854 addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, in __get_unmapped_area()
860 if (addr > TASK_SIZE - len) in __get_unmapped_area()
861 return -ENOMEM; in __get_unmapped_area()
863 return -EINVAL; in __get_unmapped_area()
870 mm_get_unmapped_area(struct mm_struct *mm, struct file *file, in mm_get_unmapped_area() argument
874 if (test_bit(MMF_TOPDOWN, &mm->flags)) in mm_get_unmapped_area()
875 return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0); in mm_get_unmapped_area()
876 return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0); in mm_get_unmapped_area()
881 * find_vma_intersection() - Look up the first VMA which intersects the interval
882 * @mm: The process address space.
889 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, in find_vma_intersection() argument
895 mmap_assert_locked(mm); in find_vma_intersection()
896 return mt_find(&mm->mm_mt, &index, end_addr - 1); in find_vma_intersection()
901 * find_vma() - Find the VMA for a given address, or the next VMA.
902 * @mm: The mm_struct to check
908 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) in find_vma() argument
912 mmap_assert_locked(mm); in find_vma()
913 return mt_find(&mm->mm_mt, &index, ULONG_MAX); in find_vma()
918 * find_vma_prev() - Find the VMA for a given address, or the next vma and
920 * @mm: The mm_struct to check
931 find_vma_prev(struct mm_struct *mm, unsigned long addr, in find_vma_prev() argument
935 VMA_ITERATOR(vmi, mm, addr); in find_vma_prev()
966 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) in find_extend_vma_locked() argument
971 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma_locked()
972 if (vma && (vma->vm_start <= addr)) in find_extend_vma_locked()
978 if (prev->vm_flags & VM_LOCKED) in find_extend_vma_locked()
979 populate_vma_page_range(prev, addr, prev->vm_end, NULL); in find_extend_vma_locked()
988 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) in find_extend_vma_locked() argument
994 vma = find_vma(mm, addr); in find_extend_vma_locked()
997 if (vma->vm_start <= addr) in find_extend_vma_locked()
999 start = vma->vm_start; in find_extend_vma_locked()
1002 if (vma->vm_flags & VM_LOCKED) in find_extend_vma_locked()
1011 #define vma_expand_down(vma, addr) (-EFAULT)
1015 #define vma_expand_up(vma,addr) (-EFAULT)
1024 * This is called with the mm locked for reading, drops the lock, takes
1031 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) in expand_stack() argument
1035 mmap_read_unlock(mm); in expand_stack()
1036 if (mmap_write_lock_killable(mm)) in expand_stack()
1039 vma = find_vma_prev(mm, addr, &prev); in expand_stack()
1040 if (vma && vma->vm_start <= addr) in expand_stack()
1051 mmap_write_unlock(mm); in expand_stack()
1055 mmap_write_downgrade(mm); in expand_stack()
1059 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
1060 * @mm: The mm_struct
1065 * Return: 0 on success, error otherwise.
1067 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, in do_munmap() argument
1070 VMA_ITERATOR(vmi, mm, start); in do_munmap()
1072 return do_vmi_munmap(&vmi, mm, start, len, uf, false); in do_munmap()
1095 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE5() local
1097 unsigned long populate = 0; in SYSCALL_DEFINE5()
1098 unsigned long ret = -EINVAL; in SYSCALL_DEFINE5()
1102 …pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_… in SYSCALL_DEFINE5()
1103 current->comm, current->pid); in SYSCALL_DEFINE5()
1117 if (mmap_read_lock_killable(mm)) in SYSCALL_DEFINE5()
1118 return -EINTR; in SYSCALL_DEFINE5()
1125 vma = vma_lookup(mm, start); in SYSCALL_DEFINE5()
1127 if (!vma || !(vma->vm_flags & VM_SHARED)) { in SYSCALL_DEFINE5()
1128 mmap_read_unlock(mm); in SYSCALL_DEFINE5()
1129 return -EINVAL; in SYSCALL_DEFINE5()
1132 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; in SYSCALL_DEFINE5()
1133 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; in SYSCALL_DEFINE5()
1134 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; in SYSCALL_DEFINE5()
1138 if (vma->vm_flags & VM_LOCKED) in SYSCALL_DEFINE5()
1142 vm_flags = vma->vm_flags; in SYSCALL_DEFINE5()
1143 file = get_file(vma->vm_file); in SYSCALL_DEFINE5()
1145 mmap_read_unlock(mm); in SYSCALL_DEFINE5()
1154 ret = -EINVAL; in SYSCALL_DEFINE5()
1157 if (mmap_write_lock_killable(mm)) { in SYSCALL_DEFINE5()
1159 return -EINTR; in SYSCALL_DEFINE5()
1162 vma = vma_lookup(mm, start); in SYSCALL_DEFINE5()
1168 if (vma->vm_flags != vm_flags) in SYSCALL_DEFINE5()
1170 if (vma->vm_file != file) in SYSCALL_DEFINE5()
1173 if (start + size > vma->vm_end) { in SYSCALL_DEFINE5()
1174 VMA_ITERATOR(vmi, mm, vma->vm_end); in SYSCALL_DEFINE5()
1179 if (next->vm_start != prev->vm_end) in SYSCALL_DEFINE5()
1182 if (next->vm_file != vma->vm_file) in SYSCALL_DEFINE5()
1185 if (next->vm_flags != vma->vm_flags) in SYSCALL_DEFINE5()
1188 if (start + size <= next->vm_end) in SYSCALL_DEFINE5()
1198 ret = do_mmap(vma->vm_file, start, size, in SYSCALL_DEFINE5()
1199 prot, flags, 0, pgoff, &populate, NULL); in SYSCALL_DEFINE5()
1201 mmap_write_unlock(mm); in SYSCALL_DEFINE5()
1206 ret = 0; in SYSCALL_DEFINE5()
1212 struct mm_struct *mm = current->mm; in vm_brk_flags() local
1218 VMA_ITERATOR(vmi, mm, addr); in vm_brk_flags()
1222 return -ENOMEM; in vm_brk_flags()
1224 return 0; in vm_brk_flags()
1227 if ((flags & (~VM_EXEC)) != 0) in vm_brk_flags()
1228 return -EINVAL; in vm_brk_flags()
1230 if (mmap_write_lock_killable(mm)) in vm_brk_flags()
1231 return -EINTR; in vm_brk_flags()
1237 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); in vm_brk_flags()
1243 populate = ((mm->def_flags & VM_LOCKED) != 0); in vm_brk_flags()
1244 mmap_write_unlock(mm); in vm_brk_flags()
1245 userfaultfd_unmap_complete(mm, &uf); in vm_brk_flags()
1252 mmap_write_unlock(mm); in vm_brk_flags()
1258 void exit_mmap(struct mm_struct *mm) in exit_mmap() argument
1262 unsigned long nr_accounted = 0; in exit_mmap()
1263 VMA_ITERATOR(vmi, mm, 0); in exit_mmap()
1264 int count = 0; in exit_mmap()
1266 /* mm's last user has gone, and its about to be pulled down */ in exit_mmap()
1267 mmu_notifier_release(mm); in exit_mmap()
1269 mmap_read_lock(mm); in exit_mmap()
1270 arch_exit_mmap(mm); in exit_mmap()
1275 mmap_read_unlock(mm); in exit_mmap()
1276 mmap_write_lock(mm); in exit_mmap()
1280 flush_cache_mm(mm); in exit_mmap()
1281 tlb_gather_mmu_fullmm(&tlb, mm); in exit_mmap()
1282 /* update_hiwater_rss(mm) here? but nobody should be looking */ in exit_mmap()
1283 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ in exit_mmap()
1284 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); in exit_mmap()
1285 mmap_read_unlock(mm); in exit_mmap()
1291 set_bit(MMF_OOM_SKIP, &mm->flags); in exit_mmap()
1292 mmap_write_lock(mm); in exit_mmap()
1293 mt_clear_in_rcu(&mm->mm_mt); in exit_mmap()
1294 vma_iter_set(&vmi, vma->vm_end); in exit_mmap()
1301 * enabled, without holding any MM locks besides the unreachable in exit_mmap()
1304 vma_iter_set(&vmi, vma->vm_end); in exit_mmap()
1306 if (vma->vm_flags & VM_ACCOUNT) in exit_mmap()
1314 BUG_ON(count != mm->map_count); in exit_mmap()
1316 trace_exit_mmap(mm); in exit_mmap()
1318 __mt_destroy(&mm->mm_mt); in exit_mmap()
1319 mmap_write_unlock(mm); in exit_mmap()
1324 * and into the inode's i_mmap tree. If vm_file is non-NULL
1327 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
1332 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) in insert_vm_struct()
1333 return -ENOMEM; in insert_vm_struct()
1335 if ((vma->vm_flags & VM_ACCOUNT) && in insert_vm_struct()
1336 security_vm_enough_memory_mm(mm, charged)) in insert_vm_struct()
1337 return -ENOMEM; in insert_vm_struct()
1352 BUG_ON(vma->anon_vma); in insert_vm_struct()
1353 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; in insert_vm_struct()
1356 if (vma_link(mm, vma)) { in insert_vm_struct()
1357 if (vma->vm_flags & VM_ACCOUNT) in insert_vm_struct()
1359 return -ENOMEM; in insert_vm_struct()
1362 return 0; in insert_vm_struct()
1369 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) in may_expand_vm() argument
1371 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) in may_expand_vm()
1375 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { in may_expand_vm()
1377 if (rlimit(RLIMIT_DATA) == 0 && in may_expand_vm()
1378 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) in may_expand_vm()
1382 current->comm, current->pid, in may_expand_vm()
1383 (mm->data_vm + npages) << PAGE_SHIFT, in may_expand_vm()
1394 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) in vm_stat_account() argument
1396 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); in vm_stat_account()
1399 mm->exec_vm += npages; in vm_stat_account()
1401 mm->stack_vm += npages; in vm_stat_account()
1403 mm->data_vm += npages; in vm_stat_account()
1415 const struct vm_special_mapping *sm = vma->vm_private_data; in special_mapping_close()
1417 if (sm->close) in special_mapping_close()
1418 sm->close(sm, vma); in special_mapping_close()
1423 return ((struct vm_special_mapping *)vma->vm_private_data)->name; in special_mapping_name()
1428 struct vm_special_mapping *sm = new_vma->vm_private_data; in special_mapping_mremap()
1430 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) in special_mapping_mremap()
1431 return -EFAULT; in special_mapping_mremap()
1433 if (sm->mremap) in special_mapping_mremap()
1434 return sm->mremap(sm, new_vma); in special_mapping_mremap()
1436 return 0; in special_mapping_mremap()
1442 * Forbid splitting special mappings - kernel has expectations over in special_mapping_split()
1447 return -EINVAL; in special_mapping_split()
1462 struct vm_area_struct *vma = vmf->vma; in special_mapping_fault()
1465 struct vm_special_mapping *sm = vma->vm_private_data; in special_mapping_fault()
1467 if (sm->fault) in special_mapping_fault()
1468 return sm->fault(sm, vmf->vma, vmf); in special_mapping_fault()
1470 pages = sm->pages; in special_mapping_fault()
1472 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) in special_mapping_fault()
1473 pgoff--; in special_mapping_fault()
1478 vmf->page = page; in special_mapping_fault()
1479 return 0; in special_mapping_fault()
1486 struct mm_struct *mm, in __install_special_mapping() argument
1494 vma = vm_area_alloc(mm); in __install_special_mapping()
1496 return ERR_PTR(-ENOMEM); in __install_special_mapping()
1498 vma_set_range(vma, addr, addr + len, 0); in __install_special_mapping()
1499 vm_flags_init(vma, (vm_flags | mm->def_flags | in __install_special_mapping()
1501 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in __install_special_mapping()
1503 vma->vm_ops = ops; in __install_special_mapping()
1504 vma->vm_private_data = priv; in __install_special_mapping()
1506 ret = insert_vm_struct(mm, vma); in __install_special_mapping()
1510 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); in __install_special_mapping()
1524 return vma->vm_private_data == sm && in vma_is_special_mapping()
1525 vma->vm_ops == &special_mapping_vmops; in vma_is_special_mapping()
1529 * Called with mm->mmap_lock held for writing.
1532 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
1538 struct mm_struct *mm, in _install_special_mapping() argument
1542 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, in _install_special_mapping()
1553 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); in mmap_init()
1574 return 0; in init_user_reserve()
1595 return 0; in init_admin_reserve()
1626 if (tmp > 0 && tmp < SZ_128K) in reserve_mem_notifier()
1631 if (tmp > 0 && tmp < SZ_8K) in reserve_mem_notifier()
1661 return 0; in init_reserve_notifier()
1667 * this VMA and its relocated range, which will now reside at [vma->vm_start -
1668 * shift, vma->vm_end - shift).
1686 struct mm_struct *mm = vma->vm_mm; in relocate_vma_down() local
1687 unsigned long old_start = vma->vm_start; in relocate_vma_down()
1688 unsigned long old_end = vma->vm_end; in relocate_vma_down()
1689 unsigned long length = old_end - old_start; in relocate_vma_down()
1690 unsigned long new_start = old_start - shift; in relocate_vma_down()
1691 unsigned long new_end = old_end - shift; in relocate_vma_down()
1692 VMA_ITERATOR(vmi, mm, new_start); in relocate_vma_down()
1693 VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); in relocate_vma_down()
1704 return -EFAULT; in relocate_vma_down()
1712 return -ENOMEM; in relocate_vma_down()
1720 return -ENOMEM; in relocate_vma_down()
1722 tlb_gather_mmu(&tlb, mm); in relocate_vma_down()
1729 next ? next->vm_start : USER_PGTABLES_CEILING); in relocate_vma_down()
1734 * have constraints on va-space that make this illegal (IA64) - in relocate_vma_down()
1738 next ? next->vm_start : USER_PGTABLES_CEILING); in relocate_vma_down()
1744 return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff); in relocate_vma_down()
1749 * Obtain a read lock on mm->mmap_lock, if the specified address is below the
1751 * downward-growing stack, then attempt to expand the stack to contain it.
1757 * IMPORTANT - VMA fields are accessed without an mmap lock being held, so the
1758 * VMA referenced must not be linked in any user-visible tree, i.e. it must be a
1770 bool mmap_read_lock_maybe_expand(struct mm_struct *mm, in mmap_read_lock_maybe_expand() argument
1774 if (!write || addr >= new_vma->vm_start) { in mmap_read_lock_maybe_expand()
1775 mmap_read_lock(mm); in mmap_read_lock_maybe_expand()
1779 if (!(new_vma->vm_flags & VM_GROWSDOWN)) in mmap_read_lock_maybe_expand()
1782 mmap_write_lock(mm); in mmap_read_lock_maybe_expand()
1784 mmap_write_unlock(mm); in mmap_read_lock_maybe_expand()
1788 mmap_write_downgrade(mm); in mmap_read_lock_maybe_expand()
1792 bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma, in mmap_read_lock_maybe_expand() argument