Lines Matching +full:cpu1 +full:- +full:start +full:- +full:addr

1 // SPDX-License-Identifier: GPL-2.0-only
20 #include <linux/backing-dev.h>
39 #include <linux/memory-tiers.h>
93 if (!vma->vm_file) in file_thp_enabled()
96 inode = file_inode(vma->vm_file); in file_thp_enabled()
98 return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); in file_thp_enabled()
123 if (!vma->vm_mm) /* vdso */ in __thp_vma_allowable_orders()
150 unsigned long addr; in __thp_vma_allowable_orders() local
153 addr = vma->vm_end - (PAGE_SIZE << order); in __thp_vma_allowable_orders()
154 if (thp_vma_suitable_order(vma, addr, order)) in __thp_vma_allowable_orders()
168 if (!in_pf && shmem_file(vma->vm_file)) in __thp_vma_allowable_orders()
169 return shmem_allowable_huge_orders(file_inode(vma->vm_file), in __thp_vma_allowable_orders()
170 vma, vma->vm_pgoff, 0, in __thp_vma_allowable_orders()
184 * Trust that ->huge_fault() handlers know what they are doing in __thp_vma_allowable_orders()
187 if (((in_pf || smaps)) && vma->vm_ops->huge_fault) in __thp_vma_allowable_orders()
205 if (!vma->anon_vma) in __thp_vma_allowable_orders()
252 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_get_huge_zero_folio()
258 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_get_huge_zero_folio()
266 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_put_huge_zero_folio()
326 ret = -EINVAL; in enabled_store()
358 return -EINVAL; in single_hugepage_flag_store()
421 return -EINVAL; in defrag_store()
494 int order = to_thpsize(kobj)->order; in anon_enabled_show()
513 int order = to_thpsize(kobj)->order; in anon_enabled_store()
541 ret = -EINVAL; in anon_enabled_store()
599 sum += this->stats[order][item]; in sum_mthp_stat()
609 int order = to_thpsize(kobj)->order; \
694 int ret = -ENOENT; in sysfs_add_group()
701 if (grp->name) in sysfs_add_group()
713 int ret = -ENOMEM; in thpsize_create()
719 thpsize->order = order; in thpsize_create()
721 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent, in thpsize_create()
722 "hugepages-%lukB", size); in thpsize_create()
729 ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp); in thpsize_create()
733 ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp); in thpsize_create()
738 ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp); in thpsize_create()
742 ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp); in thpsize_create()
748 ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp); in thpsize_create()
752 ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp); in thpsize_create()
759 kobject_put(&thpsize->kobj); in thpsize_create()
777 * Default to setting PMD-sized THP to inherit the global setting and in hugepage_init_sysfs()
778 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time in hugepage_init_sysfs()
787 return -ENOMEM; in hugepage_init_sysfs()
811 list_add(&thpsize->node, &thpsize_list); in hugepage_init_sysfs()
832 list_del(&thpsize->node); in hugepage_exit_sysfs()
833 kobject_put(&thpsize->kobj); in hugepage_exit_sysfs()
853 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero"); in thp_shrinker_init()
855 return -ENOMEM; in thp_shrinker_init()
860 "thp-deferred_split"); in thp_shrinker_init()
863 return -ENOMEM; in thp_shrinker_init()
866 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count; in thp_shrinker_init()
867 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan; in thp_shrinker_init()
870 deferred_split_shrinker->count_objects = deferred_split_count; in thp_shrinker_init()
871 deferred_split_shrinker->scan_objects = deferred_split_scan; in thp_shrinker_init()
890 return -EINVAL; in hugepage_init()
915 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { in hugepage_init()
973 int start, end, nr; in setup_thp_anon() local
992 if (strchr(subtoken, '-')) { in setup_thp_anon()
993 start_size = strsep(&subtoken, "-"); in setup_thp_anon()
996 start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON); in setup_thp_anon()
1000 start = end = get_order_from_str(subtoken, in setup_thp_anon()
1004 if (start == -EINVAL) { in setup_thp_anon()
1009 if (end == -EINVAL) { in setup_thp_anon()
1014 if (start < 0 || end < 0 || start > end) in setup_thp_anon()
1017 nr = end - start + 1; in setup_thp_anon()
1019 bitmap_set(&always, start, nr); in setup_thp_anon()
1020 bitmap_clear(&inherit, start, nr); in setup_thp_anon()
1021 bitmap_clear(&madvise, start, nr); in setup_thp_anon()
1023 bitmap_set(&madvise, start, nr); in setup_thp_anon()
1024 bitmap_clear(&inherit, start, nr); in setup_thp_anon()
1025 bitmap_clear(&always, start, nr); in setup_thp_anon()
1027 bitmap_set(&inherit, start, nr); in setup_thp_anon()
1028 bitmap_clear(&madvise, start, nr); in setup_thp_anon()
1029 bitmap_clear(&always, start, nr); in setup_thp_anon()
1031 bitmap_clear(&inherit, start, nr); in setup_thp_anon()
1032 bitmap_clear(&madvise, start, nr); in setup_thp_anon()
1033 bitmap_clear(&always, start, nr); in setup_thp_anon()
1055 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
1068 return &memcg->deferred_split_queue; in get_deferred_split_queue()
1070 return &pgdat->deferred_split_queue; in get_deferred_split_queue()
1078 return &pgdat->deferred_split_queue; in get_deferred_split_queue()
1092 unsigned long addr, unsigned long len, in __thp_get_unmapped_area() argument
1103 if (off_end <= off_align || (off_end - off_align) < size) in __thp_get_unmapped_area()
1110 ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad, in __thp_get_unmapped_area()
1124 if (ret == addr) in __thp_get_unmapped_area()
1125 return addr; in __thp_get_unmapped_area()
1127 off_sub = (off - ret) & (size - 1); in __thp_get_unmapped_area()
1129 if (test_bit(MMF_TOPDOWN, &current->mm->flags) && !off_sub) in __thp_get_unmapped_area()
1136 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, in thp_get_unmapped_area_vmflags() argument
1143 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags); in thp_get_unmapped_area_vmflags()
1147 return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags, in thp_get_unmapped_area_vmflags()
1151 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, in thp_get_unmapped_area() argument
1154 return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0); in thp_get_unmapped_area()
1159 unsigned long addr) in vma_alloc_anon_folio_pmd() argument
1165 folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK); in vma_alloc_anon_folio_pmd()
1174 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in vma_alloc_anon_folio_pmd()
1191 folio_zero_user(folio, addr); in vma_alloc_anon_folio_pmd()
1206 entry = mk_huge_pmd(&folio->page, vma->vm_page_prot); in map_anon_folio_pmd()
1210 set_pmd_at(vma->vm_mm, haddr, pmd, entry); in map_anon_folio_pmd()
1212 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in map_anon_folio_pmd()
1215 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); in map_anon_folio_pmd()
1220 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page()
1221 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page()
1226 folio = vma_alloc_anon_folio_pmd(vma, vmf->address); in __do_huge_pmd_anonymous_page()
1230 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page()
1236 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
1237 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
1240 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page()
1246 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page()
1248 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
1253 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
1254 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); in __do_huge_pmd_anonymous_page()
1255 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page()
1257 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page()
1262 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page()
1265 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
1282 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); in vma_thp_gfp_mask()
1314 entry = mk_pmd(&zero_folio->page, vma->vm_page_prot); in set_huge_zero_folio()
1323 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page()
1324 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_anonymous_page()
1332 khugepaged_enter_vma(vma, vma->vm_flags); in do_huge_pmd_anonymous_page()
1334 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_huge_pmd_anonymous_page()
1335 !mm_forbids_zeropage(vma->vm_mm) && in do_huge_pmd_anonymous_page()
1341 pgtable = pte_alloc_one(vma->vm_mm); in do_huge_pmd_anonymous_page()
1344 zero_folio = mm_get_huge_zero_folio(vma->vm_mm); in do_huge_pmd_anonymous_page()
1346 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
1350 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
1352 if (pmd_none(*vmf->pmd)) { in do_huge_pmd_anonymous_page()
1353 ret = check_stable_address_space(vma->vm_mm); in do_huge_pmd_anonymous_page()
1355 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page()
1356 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
1358 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page()
1359 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
1363 set_huge_zero_folio(pgtable, vma->vm_mm, vma, in do_huge_pmd_anonymous_page()
1364 haddr, vmf->pmd, zero_folio); in do_huge_pmd_anonymous_page()
1365 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
1366 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page()
1369 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page()
1370 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
1378 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pmd() argument
1382 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd()
1395 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pfn_pmd()
1396 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
1418 set_pmd_at(mm, addr, pmd, entry); in insert_pfn_pmd()
1419 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
1428 * vmf_insert_pfn_pmd - insert a pmd size pfn
1439 unsigned long addr = vmf->address & PMD_MASK; in vmf_insert_pfn_pmd() local
1440 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pmd()
1441 pgprot_t pgprot = vma->vm_page_prot; in vmf_insert_pfn_pmd()
1449 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pmd()
1451 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pmd()
1453 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd()
1455 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pmd()
1459 pgtable = pte_alloc_one(vma->vm_mm); in vmf_insert_pfn_pmd()
1466 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd()
1474 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pud_mkwrite()
1479 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pud() argument
1482 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pud()
1483 pgprot_t prot = vma->vm_page_prot; in insert_pfn_pud()
1494 if (pudp_set_access_flags(vma, addr, pud, entry, 1)) in insert_pfn_pud()
1495 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
1509 set_pud_at(mm, addr, pud, entry); in insert_pfn_pud()
1510 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
1517 * vmf_insert_pfn_pud - insert a pud size pfn
1528 unsigned long addr = vmf->address & PUD_MASK; in vmf_insert_pfn_pud() local
1529 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pud()
1530 pgprot_t pgprot = vma->vm_page_prot; in vmf_insert_pfn_pud()
1537 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pud()
1539 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pud()
1541 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pud()
1543 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pud()
1548 insert_pfn_pud(vma, addr, vmf->pud, pfn, write); in vmf_insert_pfn_pud()
1554 void touch_pmd(struct vm_area_struct *vma, unsigned long addr, in touch_pmd() argument
1562 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, in touch_pmd()
1564 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
1567 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pmd() argument
1571 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pmd()
1586 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); in follow_devmap_pmd()
1593 return ERR_PTR(-EEXIST); in follow_devmap_pmd()
1595 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; in follow_devmap_pmd()
1598 return ERR_PTR(-EFAULT); in follow_devmap_pmd()
1608 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, in copy_huge_pmd() argument
1616 int ret = -ENOMEM; in copy_huge_pmd()
1632 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); in copy_huge_pmd()
1636 /* Skip if can be re-fill on fault */ in copy_huge_pmd()
1648 ret = -EAGAIN; in copy_huge_pmd()
1664 set_pmd_at(src_mm, addr, src_pmd, pmd); in copy_huge_pmd()
1671 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1707 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); in copy_huge_pmd()
1708 return -EAGAIN; in copy_huge_pmd()
1714 pmdp_set_wrprotect(src_mm, addr, src_pmd); in copy_huge_pmd()
1720 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1731 void touch_pud(struct vm_area_struct *vma, unsigned long addr, in touch_pud() argument
1739 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, in touch_pud()
1741 update_mmu_cache_pud(vma, addr, pud); in touch_pud()
1745 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, in copy_huge_pud() argument
1756 ret = -EAGAIN; in copy_huge_pud()
1765 if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) { in copy_huge_pud()
1766 pudp_set_wrprotect(src_mm, addr, src_pud); in copy_huge_pud()
1770 set_pud_at(dst_mm, addr, dst_pud, pud); in copy_huge_pud()
1781 bool write = vmf->flags & FAULT_FLAG_WRITE; in huge_pud_set_accessed()
1783 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); in huge_pud_set_accessed()
1784 if (unlikely(!pud_same(*vmf->pud, orig_pud))) in huge_pud_set_accessed()
1787 touch_pud(vmf->vma, vmf->address, vmf->pud, write); in huge_pud_set_accessed()
1789 spin_unlock(vmf->ptl); in huge_pud_set_accessed()
1795 bool write = vmf->flags & FAULT_FLAG_WRITE; in huge_pmd_set_accessed()
1797 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1798 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) in huge_pmd_set_accessed()
1801 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); in huge_pmd_set_accessed()
1804 spin_unlock(vmf->ptl); in huge_pmd_set_accessed()
1809 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_zero_wp_pmd()
1810 struct vm_area_struct *vma = vmf->vma; in do_huge_zero_wp_pmd()
1815 folio = vma_alloc_anon_folio_pmd(vma, vmf->address); in do_huge_zero_wp_pmd()
1819 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr, in do_huge_zero_wp_pmd()
1822 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_zero_wp_pmd()
1823 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) in do_huge_zero_wp_pmd()
1825 ret = check_stable_address_space(vma->vm_mm); in do_huge_zero_wp_pmd()
1828 (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd); in do_huge_zero_wp_pmd()
1829 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); in do_huge_zero_wp_pmd()
1834 spin_unlock(vmf->ptl); in do_huge_zero_wp_pmd()
1841 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in do_huge_pmd_wp_page()
1842 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page()
1845 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_wp_page()
1846 pmd_t orig_pmd = vmf->orig_pmd; in do_huge_pmd_wp_page()
1848 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1849 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page()
1861 spin_lock(vmf->ptl); in do_huge_pmd_wp_page()
1863 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1864 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1878 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1880 spin_lock(vmf->ptl); in do_huge_pmd_wp_page()
1881 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1882 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1914 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1919 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1920 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1921 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1927 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1929 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in do_huge_pmd_wp_page()
1934 unsigned long addr, pmd_t pmd) in can_change_pmd_writable() argument
1938 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) in can_change_pmd_writable()
1949 /* Do we need write faults for uffd-wp tracking? */ in can_change_pmd_writable()
1953 if (!(vma->vm_flags & VM_SHARED)) { in can_change_pmd_writable()
1955 page = vm_normal_page_pmd(vma, addr, pmd); in can_change_pmd_writable()
1966 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_numa_page()
1968 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_numa_page()
1975 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1976 old_pmd = pmdp_get(vmf->pmd); in do_huge_pmd_numa_page()
1978 if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) { in do_huge_pmd_numa_page()
1979 spin_unlock(vmf->ptl); in do_huge_pmd_numa_page()
1983 pmd = pmd_modify(old_pmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1991 can_change_pmd_writable(vma, vmf->address, pmd)) in do_huge_pmd_numa_page()
2009 spin_unlock(vmf->ptl); in do_huge_pmd_numa_page()
2020 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
2021 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) { in do_huge_pmd_numa_page()
2022 spin_unlock(vmf->ptl); in do_huge_pmd_numa_page()
2027 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot); in do_huge_pmd_numa_page()
2031 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
2032 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
2033 spin_unlock(vmf->ptl); in do_huge_pmd_numa_page()
2045 pmd_t *pmd, unsigned long addr, unsigned long next) in madvise_free_huge_pmd() argument
2050 struct mm_struct *mm = tlb->mm; in madvise_free_huge_pmd()
2081 * If user want to discard part-pages of THP, split it so MADV_FREE in madvise_free_huge_pmd()
2084 if (next - addr != HPAGE_PMD_SIZE) { in madvise_free_huge_pmd()
2098 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
2102 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_free_huge_pmd()
2103 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd()
2124 pmd_t *pmd, unsigned long addr) in zap_huge_pmd() argument
2140 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, in zap_huge_pmd()
2141 tlb->fullmm); in zap_huge_pmd()
2143 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd()
2146 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2149 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2173 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2174 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); in zap_huge_pmd()
2177 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2178 add_mm_counter(tlb->mm, mm_counter_file(folio), in zap_huge_pmd()
2179 -HPAGE_PMD_NR); in zap_huge_pmd()
2184 tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE); in zap_huge_pmd()
2230 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
2278 * - 0 if PMD could not be locked
2279 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2281 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
2284 pmd_t *pmd, unsigned long addr, pgprot_t newprot, in change_huge_pmd() argument
2287 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
2332 set_pmd_at(mm, addr, pmd, newpmd); in change_huge_pmd()
2341 * Avoid trapping faults against the zero page. The read-only in change_huge_pmd()
2342 * data is likely to be read-cached on the local CPU and in change_huge_pmd()
2370 * CPU0: CPU1: in change_huge_pmd()
2378 * // pmd is re-established in change_huge_pmd()
2386 oldpmd = pmdp_invalidate_ad(vma, addr, pmd); in change_huge_pmd()
2401 can_change_pmd_writable(vma, addr, entry)) in change_huge_pmd()
2405 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
2408 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE); in change_huge_pmd()
2417 * - 0: if pud leaf changed from under us
2418 * - 1: if pud can be skipped
2419 * - HPAGE_PUD_NR: if pud was successfully processed
2423 pud_t *pudp, unsigned long addr, pgprot_t newprot, in change_huge_pud() argument
2426 struct mm_struct *mm = vma->vm_mm; in change_huge_pud()
2437 * Huge entries on userfault-wp only works with anonymous, while we in change_huge_pud()
2451 oldpud = pudp_invalidate(vma, addr, pudp); in change_huge_pud()
2453 set_pud_at(mm, addr, pudp, entry); in change_huge_pud()
2454 tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE); in change_huge_pud()
2466 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2493 return -EINVAL; in move_pages_huge_pmd()
2500 return -EAGAIN; in move_pages_huge_pmd()
2502 return -ENOENT; in move_pages_huge_pmd()
2510 return -EBUSY; in move_pages_huge_pmd()
2535 err = -EAGAIN; in move_pages_huge_pmd()
2546 err = -EAGAIN; in move_pages_huge_pmd()
2551 !PageAnonExclusive(&src_folio->page)) { in move_pages_huge_pmd()
2552 err = -EBUSY; in move_pages_huge_pmd()
2558 err = -EBUSY; in move_pages_huge_pmd()
2566 err = -EBUSY; in move_pages_huge_pmd()
2571 src_folio->index = linear_page_index(dst_vma, dst_addr); in move_pages_huge_pmd()
2573 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot); in move_pages_huge_pmd()
2578 _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot); in move_pages_huge_pmd()
2610 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
2628 ptl = pud_lock(vma->vm_mm, pud); in __pud_trans_huge_lock()
2637 pud_t *pud, unsigned long addr) in zap_huge_pud() argument
2646 orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm); in zap_huge_pud()
2648 tlb_remove_pud_tlb_entry(tlb, pud, addr); in zap_huge_pud()
2663 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pud_locked()
2664 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); in __split_huge_pud_locked()
2678 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in __split_huge_pud()
2682 ptl = pud_lock(vma->vm_mm, pud); in __split_huge_pud()
2685 __split_huge_pud_locked(vma, pud, range.start); in __split_huge_pud()
2701 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd()
2704 unsigned long addr; in __split_huge_zero_page_pmd() local
2723 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { in __split_huge_zero_page_pmd()
2726 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot); in __split_huge_zero_page_pmd()
2731 set_pte_at(mm, addr, pte, entry); in __split_huge_zero_page_pmd()
2734 pte_unmap(pte - 1); in __split_huge_zero_page_pmd()
2742 struct mm_struct *mm = vma->vm_mm; in __split_huge_pmd_locked()
2749 unsigned long addr; in __split_huge_pmd_locked() local
2754 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pmd_locked()
2755 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); in __split_huge_pmd_locked()
2786 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR); in __split_huge_pmd_locked()
2821 * happens in place). If we overwrite the pmd with the not-huge in __split_huge_pmd_locked()
2838 * we write the non-huge version of the pmd entry with in __split_huge_pmd_locked()
2859 * each subpage -- no need to (temporarily) clear. in __split_huge_pmd_locked()
2863 * managed to clear PageAnonExclusive() -- see in __split_huge_pmd_locked()
2878 folio_ref_add(folio, HPAGE_PMD_NR - 1); in __split_huge_pmd_locked()
2901 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { in __split_huge_pmd_locked()
2925 set_pte_at(mm, addr, pte + i, entry); in __split_huge_pmd_locked()
2930 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot)); in __split_huge_pmd_locked()
2935 /* NOTE: this may set soft-dirty too on some archs */ in __split_huge_pmd_locked()
2986 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in __split_huge_pmd()
2990 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
2991 split_huge_pmd_locked(vma, range.start, pmd, freeze, folio); in __split_huge_pmd()
2999 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); in split_huge_pmd_address()
3020 unsigned long start, in vma_adjust_trans_huge() argument
3024 /* Check if we need to split start first. */ in vma_adjust_trans_huge()
3025 split_huge_pmd_if_needed(vma, start); in vma_adjust_trans_huge()
3035 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); in vma_adjust_trans_huge()
3036 unsigned long nstart = next->vm_start; in vma_adjust_trans_huge()
3066 unsigned long addr, pmd_t *pmdp, in __discard_anon_folio_pmd_locked() argument
3069 struct mm_struct *mm = vma->vm_mm; in __discard_anon_folio_pmd_locked()
3076 orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp); in __discard_anon_folio_pmd_locked()
3079 * Syncing against concurrent GUP-fast: in __discard_anon_folio_pmd_locked()
3080 * - clear PMD; barrier; read refcount in __discard_anon_folio_pmd_locked()
3081 * - inc refcount; barrier; read PMD in __discard_anon_folio_pmd_locked()
3103 set_pmd_at(mm, addr, pmdp, orig_pmd); in __discard_anon_folio_pmd_locked()
3109 add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR); in __discard_anon_folio_pmd_locked()
3110 if (vma->vm_flags & VM_LOCKED) in __discard_anon_folio_pmd_locked()
3117 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, in unmap_huge_pmd_locked() argument
3122 VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE)); in unmap_huge_pmd_locked()
3125 return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio); in unmap_huge_pmd_locked()
3151 lockdep_assert_held(&lruvec->lru_lock); in lru_add_page_tail()
3157 list_add_tail(&tail->lru, list); in lru_add_page_tail()
3162 tail->mlock_count = 0; in lru_add_page_tail()
3164 list_add_tail(&tail->lru, &folio->lru); in lru_add_page_tail()
3173 struct page *head = &folio->page; in __split_huge_page_tail()
3181 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); in __split_huge_page_tail()
3189 * Note that for mapped sub-pages of an anonymous THP, in __split_huge_page_tail()
3193 * unreferenced sub-pages of an anonymous THP: we can simply drop in __split_huge_page_tail()
3194 * PG_anon_exclusive (-> PG_mappedtodisk) for these here. in __split_huge_page_tail()
3196 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in __split_huge_page_tail()
3197 page_tail->flags |= (head->flags & in __split_huge_page_tail()
3216 /* ->mapping in first and second tail page is replaced by other uses */ in __split_huge_page_tail()
3217 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, in __split_huge_page_tail()
3219 new_folio->mapping = folio->mapping; in __split_huge_page_tail()
3220 new_folio->index = folio->index + tail; in __split_huge_page_tail()
3223 * page->private should not be set in tail pages. Fix up and warn once in __split_huge_page_tail()
3226 if (unlikely(page_tail->private)) { in __split_huge_page_tail()
3228 page_tail->private = 0; in __split_huge_page_tail()
3231 new_folio->swap.val = folio->swap.val + tail; in __split_huge_page_tail()
3233 /* Page flags must be visible before we make the page non-compound. */ in __split_huge_page_tail()
3262 * pages to show after the currently processed elements - e.g. in __split_huge_page_tail()
3272 struct page *head = &folio->page; in __split_huge_page()
3285 offset = swap_cache_index(folio->swap); in __split_huge_page()
3286 swap_cache = swap_address_space(folio->swap); in __split_huge_page()
3287 xa_lock(&swap_cache->i_pages); in __split_huge_page()
3295 for (i = nr - new_nr; i >= new_nr; i -= new_nr) { in __split_huge_page()
3300 if (tail->index >= end) { in __split_huge_page()
3301 if (shmem_mapping(folio->mapping)) in __split_huge_page()
3305 inode_to_wb(folio->mapping->host)); in __split_huge_page()
3309 __xa_store(&folio->mapping->i_pages, tail->index, in __split_huge_page()
3312 __xa_store(&swap_cache->i_pages, offset + i, in __split_huge_page()
3335 xa_unlock(&swap_cache->i_pages); in __split_huge_page()
3342 xa_unlock(&folio->mapping->i_pages); in __split_huge_page()
3347 shmem_uncharge(folio->mapping->host, nr_dropped); in __split_huge_page()
3351 * set page to its compound_head when split to non order-0 pages, so in __split_huge_page()
3389 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - in can_split_folio()
3407 * will receive an -EAGAIN.
3409 * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
3410 * supported for non-file-backed folios, because folio->_deferred_list, which
3411 * is used by partially mapped folios, is stored in subpage 2, but an order-1
3412 * folio only has subpages 0 and 1. File-backed order-1 folios are supported,
3426 * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
3429 * Returns -EBUSY when trying to split the huge zeropage, if the folio is
3430 * under writeback, if fs-specific folio metadata cannot currently be
3435 * min-order if one is set for non-anonymous folios.
3437 * Returns -EINVAL when trying to split to an order that is incompatible
3446 XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); in split_huge_page_to_list_to_order()
3459 return -EINVAL; in split_huge_page_to_list_to_order()
3462 /* order-1 is not supported for anonymous THP. */ in split_huge_page_to_list_to_order()
3464 VM_WARN_ONCE(1, "Cannot split to order-1 folio"); in split_huge_page_to_list_to_order()
3465 return -EINVAL; in split_huge_page_to_list_to_order()
3468 /* Split shmem folio to non-zero order not supported */ in split_huge_page_to_list_to_order()
3469 if (shmem_mapping(folio->mapping)) { in split_huge_page_to_list_to_order()
3471 "Cannot split shmem folio to non-0 order"); in split_huge_page_to_list_to_order()
3472 return -EINVAL; in split_huge_page_to_list_to_order()
3481 !mapping_large_folio_support(folio->mapping)) { in split_huge_page_to_list_to_order()
3483 "Cannot split file folio to non-0 order"); in split_huge_page_to_list_to_order()
3484 return -EINVAL; in split_huge_page_to_list_to_order()
3488 /* Only swapping a whole PMD-mapped folio is supported */ in split_huge_page_to_list_to_order()
3490 return -EINVAL; in split_huge_page_to_list_to_order()
3495 return -EBUSY; in split_huge_page_to_list_to_order()
3499 return -EBUSY; in split_huge_page_to_list_to_order()
3512 ret = -EBUSY; in split_huge_page_to_list_to_order()
3515 end = -1; in split_huge_page_to_list_to_order()
3522 mapping = folio->mapping; in split_huge_page_to_list_to_order()
3526 ret = -EBUSY; in split_huge_page_to_list_to_order()
3530 min_order = mapping_min_folio_order(folio->mapping); in split_huge_page_to_list_to_order()
3532 VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u", in split_huge_page_to_list_to_order()
3534 ret = -EINVAL; in split_huge_page_to_list_to_order()
3542 ret = -EBUSY; in split_huge_page_to_list_to_order()
3557 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, in split_huge_page_to_list_to_order()
3562 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in split_huge_page_to_list_to_order()
3564 end = shmem_fallocend(mapping->host, end); in split_huge_page_to_list_to_order()
3572 ret = -EAGAIN; in split_huge_page_to_list_to_order()
3591 /* Prevent deferred_split_scan() touching ->_refcount */ in split_huge_page_to_list_to_order()
3592 spin_lock(&ds_queue->split_queue_lock); in split_huge_page_to_list_to_order()
3595 !list_empty(&folio->_deferred_list)) { in split_huge_page_to_list_to_order()
3596 ds_queue->split_queue_len--; in split_huge_page_to_list_to_order()
3600 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); in split_huge_page_to_list_to_order()
3608 list_del_init(&folio->_deferred_list); in split_huge_page_to_list_to_order()
3610 spin_unlock(&ds_queue->split_queue_lock); in split_huge_page_to_list_to_order()
3619 NR_SHMEM_THPS, -nr); in split_huge_page_to_list_to_order()
3622 NR_FILE_THPS, -nr); in split_huge_page_to_list_to_order()
3629 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); in split_huge_page_to_list_to_order()
3630 mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order)); in split_huge_page_to_list_to_order()
3635 spin_unlock(&ds_queue->split_queue_lock); in split_huge_page_to_list_to_order()
3641 ret = -EAGAIN; in split_huge_page_to_list_to_order()
3664 if (!folio->mapping) { in min_order_for_split()
3667 return -EBUSY; in min_order_for_split()
3670 return mapping_min_folio_order(folio->mapping); in min_order_for_split()
3680 return split_huge_page_to_list_to_order(&folio->page, list, ret); in split_folio_to_list()
3687 * queueing THP splits, and that list is (racily observed to be) non-empty.
3690 * zero: because even when split_queue_lock is held, a non-empty _deferred_list
3691 * might be in use on deferred_split_scan()'s unlocked on-stack list.
3706 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); in __folio_unqueue_deferred_split()
3707 if (!list_empty(&folio->_deferred_list)) { in __folio_unqueue_deferred_split()
3708 ds_queue->split_queue_len--; in __folio_unqueue_deferred_split()
3712 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); in __folio_unqueue_deferred_split()
3714 list_del_init(&folio->_deferred_list); in __folio_unqueue_deferred_split()
3717 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); in __folio_unqueue_deferred_split()
3751 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); in deferred_split_folio()
3762 /* partially mapped folios cannot become non-partially mapped */ in deferred_split_folio()
3765 if (list_empty(&folio->_deferred_list)) { in deferred_split_folio()
3766 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); in deferred_split_folio()
3767 ds_queue->split_queue_len++; in deferred_split_folio()
3771 deferred_split_shrinker->id); in deferred_split_folio()
3774 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); in deferred_split_folio()
3780 struct pglist_data *pgdata = NODE_DATA(sc->nid); in deferred_split_count()
3781 struct deferred_split *ds_queue = &pgdata->deferred_split_queue; in deferred_split_count()
3784 if (sc->memcg) in deferred_split_count()
3785 ds_queue = &sc->memcg->deferred_split_queue; in deferred_split_count()
3787 return READ_ONCE(ds_queue->split_queue_len); in deferred_split_count()
3796 if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1) in thp_underused()
3810 * of non-zero filled pages exceeds threshold. in thp_underused()
3813 if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) { in thp_underused()
3826 struct pglist_data *pgdata = NODE_DATA(sc->nid); in deferred_split_scan()
3827 struct deferred_split *ds_queue = &pgdata->deferred_split_queue; in deferred_split_scan()
3834 if (sc->memcg) in deferred_split_scan()
3835 ds_queue = &sc->memcg->deferred_split_queue; in deferred_split_scan()
3838 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); in deferred_split_scan()
3840 list_for_each_entry_safe(folio, next, &ds_queue->split_queue, in deferred_split_scan()
3843 list_move(&folio->_deferred_list, &list); in deferred_split_scan()
3849 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); in deferred_split_scan()
3851 list_del_init(&folio->_deferred_list); in deferred_split_scan()
3852 ds_queue->split_queue_len--; in deferred_split_scan()
3854 if (!--sc->nr_to_scan) in deferred_split_scan()
3857 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); in deferred_split_scan()
3888 list_del_init(&folio->_deferred_list); in deferred_split_scan()
3903 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); in deferred_split_scan()
3904 list_splice_tail(&list, &ds_queue->split_queue); in deferred_split_scan()
3905 ds_queue->split_queue_len -= removed; in deferred_split_scan()
3906 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); in deferred_split_scan()
3915 if (!split && list_empty(&ds_queue->split_queue)) in deferred_split_scan()
3934 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { in split_huge_pages_all()
3960 pfn += nr_pages - 1; in split_huge_pages_all()
3973 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || in vma_not_suitable_for_thp_split()
3984 unsigned long addr; in split_huge_pages_pid() local
3991 ret = -ESRCH; in split_huge_pages_pid()
4000 ret = -EINVAL; in split_huge_pages_pid()
4004 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", in split_huge_pages_pid()
4009 * always increase addr by PAGE_SIZE, since we could have a PTE page in split_huge_pages_pid()
4010 * table filled with PTE-mapped THPs, each of which is distinct. in split_huge_pages_pid()
4012 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { in split_huge_pages_pid()
4013 struct vm_area_struct *vma = vma_lookup(mm, addr); in split_huge_pages_pid()
4024 addr = vma->vm_end; in split_huge_pages_pid()
4028 folio = folio_walk_start(&fw, vma, addr, 0); in split_huge_pages_pid()
4036 mapping = folio->mapping; in split_huge_pages_pid()
4059 if (!folio_test_anon(folio) && folio->mapping != mapping) in split_huge_pages_pid()
4091 int ret = -EINVAL; in split_huge_pages_in_file()
4106 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", in split_huge_pages_in_file()
4109 mapping = candidate->f_mapping; in split_huge_pages_in_file()
4132 if (folio->mapping != mapping) in split_huge_pages_in_file()
4148 pr_debug("%lu of %lu file-backed THP split\n", split, total); in split_huge_pages_in_file()
4174 ret = -EFAULT; in split_huge_pages_write()
4180 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; in split_huge_pages_write()
4193 ret = -EINVAL; in split_huge_pages_write()
4200 ret = -EINVAL; in split_huge_pages_write()
4216 ret = -EINVAL; in split_huge_pages_write()
4248 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry()
4249 struct mm_struct *mm = vma->vm_mm; in set_pmd_migration_entry()
4250 unsigned long address = pvmw->address; in set_pmd_migration_entry()
4256 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry()
4260 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
4265 set_pmd_at(mm, address, pvmw->pmd, pmdval); in set_pmd_migration_entry()
4266 return -EBUSY; in set_pmd_migration_entry()
4286 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry()
4297 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd()
4298 struct mm_struct *mm = vma->vm_mm; in remove_migration_pmd()
4299 unsigned long address = pvmw->address; in remove_migration_pmd()
4304 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd()
4307 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd()
4309 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); in remove_migration_pmd()
4310 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd()
4314 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd()
4318 /* NOTE: this may contain setting soft-dirty on some archs */ in remove_migration_pmd()
4333 set_pmd_at(mm, haddr, pvmw->pmd, pmde); in remove_migration_pmd()
4335 /* No need to invalidate - it was non-present before */ in remove_migration_pmd()
4336 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()