Lines Matching +full:tlb +full:- +full:split
1 /* SPDX-License-Identifier: GPL-2.0 */
28 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
30 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
32 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
70 * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
73 #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
77 * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
83 ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
108 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
110 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
113 #define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
115 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
191 return fls_long(orders) - 1; in highest_order()
202 * - For file vma, check if the linear page offset of vma is
203 * order-aligned within the file. The hugepage is
204 * guaranteed to be order-aligned within the file, but we must
205 * check that the order-aligned addresses in the VMA map to
206 * order-aligned offsets within the file, else the hugepage will
208 * - For all vmas, check if the haddr is in an aligned hugepage
219 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, in thp_vma_suitable_order()
226 if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) in thp_vma_suitable_order()
265 * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
267 * @vm_flags: use these vm_flags instead of vma->vm_flags
273 * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
274 * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
324 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags); in vma_thp_disabled()
354 * expects the same page that has been split to be locked when in split_huge_page()
357 * page to be split. in split_huge_page()
383 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
388 change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, in change_huge_pud() argument
435 * folio_test_pmd_mappable - Can we map this folio with a PMD?
561 return -EINVAL; in hugepage_madvise()
568 return -EINVAL; in madvise_collapse()
638 static inline int change_huge_pud(struct mmu_gather *tlb, in change_huge_pud() argument
650 return split_huge_page_to_list_to_order(&folio->page, list, new_order); in split_folio_to_list_to_order()