Lines Matching +full:t +full:- +full:head +full:- +full:semi
1 // SPDX-License-Identifier: GPL-2.0
52 * in vmscan and, possibly, the fault path; and to support semi-accurate
88 folio->mlock_count++; in __mlock_folio()
95 folio->mlock_count = !!folio_test_mlocked(folio); in __mlock_folio()
114 folio->mlock_count = !!folio_test_mlocked(folio); in __mlock_new_folio()
135 if (folio->mlock_count) in __munlock_folio()
136 folio->mlock_count--; in __munlock_folio()
137 if (folio->mlock_count) in __munlock_folio()
144 __zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); in __munlock_folio()
194 folio = fbatch->folios[i]; in mlock_folio_batch()
196 folio = (struct folio *)((unsigned long)folio - mlock); in mlock_folio_batch()
197 fbatch->folios[i] = folio; in mlock_folio_batch()
239 * mlock_folio - mlock a folio already on (or temporarily off) LRU
264 * mlock_new_folio - mlock a newly allocated folio not yet on LRU
265 * @folio: folio to be mlocked, either normal or a THP head.
287 * munlock_folio - munlock a folio
288 * @folio: folio to be munlocked, either normal or a THP head.
311 unsigned int count = (end - addr) >> PAGE_SHIFT; in folio_mlock_step()
334 if (!(vma->vm_flags & VM_LOCKED)) in allow_mlock_munlock()
356 struct vm_area_struct *vma = walk->vma; in mlock_pte_range()
371 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range()
378 start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mlock_pte_range()
380 walk->action = ACTION_AGAIN; in mlock_pte_range()
396 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range()
402 pte += step - 1; in mlock_pte_range()
403 addr += (step - 1) << PAGE_SHIFT; in mlock_pte_range()
413 * mlock_vma_pages_range() - mlock any pages already in the range,
415 * @vma - vma containing range to be mlock()ed or munlock()ed
416 * @start - start address in @vma of the range
417 * @end - end of range in @vma
418 * @newflags - the new set of flags for @vma.
433 * or page reclaim finding a page of this now-VM_LOCKED vma, in mlock_vma_pages_range()
448 walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL); in mlock_vma_pages_range()
458 * mlock_fixup - handle mlock[all]/munlock[all] requests.
460 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
461 * munlock is a no-op. However, for some special vmas, we go ahead and
470 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
473 vm_flags_t oldflags = vma->vm_flags; in mlock_fixup()
476 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || in mlock_fixup()
478 /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ in mlock_fixup()
490 nr_pages = (end - start) >> PAGE_SHIFT; in mlock_fixup()
492 nr_pages = -nr_pages; in mlock_fixup()
495 mm->locked_vm += nr_pages; in mlock_fixup()
519 VMA_ITERATOR(vmi, current->mm, start); in apply_vma_lock_flags()
525 return -EINVAL; in apply_vma_lock_flags()
530 return -ENOMEM; in apply_vma_lock_flags()
533 if (start > vma->vm_start) in apply_vma_lock_flags()
537 tmp = vma->vm_start; in apply_vma_lock_flags()
542 if (vma->vm_start != tmp) in apply_vma_lock_flags()
543 return -ENOMEM; in apply_vma_lock_flags()
545 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_vma_lock_flags()
547 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ in apply_vma_lock_flags()
548 tmp = vma->vm_end; in apply_vma_lock_flags()
559 return -ENOMEM; in apply_vma_lock_flags()
579 /* Don't overflow past ULONG_MAX */ in count_mm_mlocked_page_nr()
580 if (unlikely(ULONG_MAX - len < start)) in count_mm_mlocked_page_nr()
586 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr()
587 if (start > vma->vm_start) in count_mm_mlocked_page_nr()
588 count -= (start - vma->vm_start); in count_mm_mlocked_page_nr()
589 if (end < vma->vm_end) { in count_mm_mlocked_page_nr()
590 count += end - vma->vm_start; in count_mm_mlocked_page_nr()
593 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr()
605 if (retval == -EFAULT) in __mlock_posix_error_return()
606 retval = -ENOMEM; in __mlock_posix_error_return()
607 else if (retval == -ENOMEM) in __mlock_posix_error_return()
608 retval = -EAGAIN; in __mlock_posix_error_return()
616 int error = -ENOMEM; in do_mlock()
621 return -EPERM; in do_mlock()
630 if (mmap_write_lock_killable(current->mm)) in do_mlock()
631 return -EINTR; in do_mlock()
633 locked += current->mm->locked_vm; in do_mlock()
637 * previously mlocked areas, that part area in "mm->locked_vm" in do_mlock()
641 locked -= count_mm_mlocked_page_nr(current->mm, in do_mlock()
649 mmap_write_unlock(current->mm); in do_mlock()
669 return -EINVAL; in SYSCALL_DEFINE3()
686 if (mmap_write_lock_killable(current->mm)) in SYSCALL_DEFINE2()
687 return -EINTR; in SYSCALL_DEFINE2()
689 mmap_write_unlock(current->mm); in SYSCALL_DEFINE2()
696 * and translate into the appropriate modifications to mm->def_flags and/or the
702 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
706 VMA_ITERATOR(vmi, current->mm, 0); in apply_mlockall_flags()
710 current->mm->def_flags &= ~VM_LOCKED_MASK; in apply_mlockall_flags()
712 current->mm->def_flags |= VM_LOCKED; in apply_mlockall_flags()
715 current->mm->def_flags |= VM_LOCKONFAULT; in apply_mlockall_flags()
731 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_mlockall_flags()
734 error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, in apply_mlockall_flags()
752 return -EINVAL; in SYSCALL_DEFINE1()
755 return -EPERM; in SYSCALL_DEFINE1()
760 if (mmap_write_lock_killable(current->mm)) in SYSCALL_DEFINE1()
761 return -EINTR; in SYSCALL_DEFINE1()
763 ret = -ENOMEM; in SYSCALL_DEFINE1()
764 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || in SYSCALL_DEFINE1()
767 mmap_write_unlock(current->mm); in SYSCALL_DEFINE1()
778 if (mmap_write_lock_killable(current->mm)) in SYSCALL_DEFINE0()
779 return -EINTR; in SYSCALL_DEFINE0()
781 mmap_write_unlock(current->mm); in SYSCALL_DEFINE0()
797 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in user_shm_lock()
822 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); in user_shm_unlock()