Lines Matching +full:entry +full:- +full:address
1 // SPDX-License-Identifier: GPL-2.0
21 if (pvmw->flags & PVMW_SYNC) { in map_pte()
23 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte()
24 pvmw->address, &pvmw->ptl); in map_pte()
25 *ptlp = pvmw->ptl; in map_pte()
26 return !!pvmw->pte; in map_pte()
32 * in case *pvmw->pmd changes underneath us; so we need to in map_pte()
37 pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte()
38 pvmw->address, pmdvalp, ptlp); in map_pte()
39 if (!pvmw->pte) in map_pte()
42 ptent = ptep_get(pvmw->pte); in map_pte()
44 if (pvmw->flags & PVMW_MIGRATION) { in map_pte()
48 swp_entry_t entry; in map_pte() local
50 * Handle un-addressable ZONE_DEVICE memory. in map_pte()
53 * device page from the process address space. Such in map_pte()
55 * a special swap entry, nonetheless it still does in map_pte()
65 entry = pte_to_swp_entry(ptent); in map_pte()
66 if (!is_device_private_entry(entry) && in map_pte()
67 !is_device_exclusive_entry(entry)) in map_pte()
73 if (unlikely(!pmd_same(*pmdvalp, pmdp_get_lockless(pvmw->pmd)))) { in map_pte()
74 pte_unmap_unlock(pvmw->pte, *ptlp); in map_pte()
77 pvmw->ptl = *ptlp; in map_pte()
83 * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is
84 * mapped at the @pvmw->pte
87 * @pte_nr: the number of small pages described by @pvmw->pte.
92 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
95 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
96 * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
98 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
99 * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
107 pte_t ptent = ptep_get(pvmw->pte); in check_pte()
109 if (pvmw->flags & PVMW_MIGRATION) { in check_pte()
110 swp_entry_t entry; in check_pte() local
113 entry = pte_to_swp_entry(ptent); in check_pte()
115 if (!is_migration_entry(entry) && in check_pte()
116 !is_device_exclusive_entry(entry)) in check_pte()
119 pfn = swp_offset_pfn(entry); in check_pte()
121 swp_entry_t entry; in check_pte() local
123 /* Handle un-addressable ZONE_DEVICE memory */ in check_pte()
124 entry = pte_to_swp_entry(ptent); in check_pte()
125 if (!is_device_private_entry(entry) && in check_pte()
126 !is_device_exclusive_entry(entry)) in check_pte()
129 pfn = swp_offset_pfn(entry); in check_pte()
137 if ((pfn + pte_nr - 1) < pvmw->pfn) in check_pte()
139 if (pfn > (pvmw->pfn + pvmw->nr_pages - 1)) in check_pte()
147 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn) in check_pmd()
149 if (pfn > pvmw->pfn + pvmw->nr_pages - 1) in check_pmd()
156 pvmw->address = (pvmw->address + size) & ~(size - 1); in step_forward()
157 if (!pvmw->address) in step_forward()
158 pvmw->address = ULONG_MAX; in step_forward()
162 * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
163 * @pvmw->address
164 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
167 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
168 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
169 * adjusted if needed (for PTE-mapped THPs).
171 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
172 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
175 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
176 * regardless of which page table level the page is mapped at. @pvmw->pmd is
180 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
187 struct vm_area_struct *vma = pvmw->vma; in page_vma_mapped_walk()
188 struct mm_struct *mm = vma->vm_mm; in page_vma_mapped_walk()
197 if (pvmw->pmd && !pvmw->pte) in page_vma_mapped_walk()
204 if (pvmw->pte) in page_vma_mapped_walk()
211 pvmw->pte = hugetlb_walk(vma, pvmw->address, size); in page_vma_mapped_walk()
212 if (!pvmw->pte) in page_vma_mapped_walk()
215 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); in page_vma_mapped_walk()
222 if (pvmw->pte) in page_vma_mapped_walk()
226 pgd = pgd_offset(mm, pvmw->address); in page_vma_mapped_walk()
231 p4d = p4d_offset(pgd, pvmw->address); in page_vma_mapped_walk()
236 pud = pud_offset(p4d, pvmw->address); in page_vma_mapped_walk()
242 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk()
248 pmde = pmdp_get_lockless(pvmw->pmd); in page_vma_mapped_walk()
252 pvmw->ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk()
253 pmde = *pvmw->pmd; in page_vma_mapped_walk()
255 swp_entry_t entry; in page_vma_mapped_walk() local
258 !(pvmw->flags & PVMW_MIGRATION)) in page_vma_mapped_walk()
260 entry = pmd_to_swp_entry(pmde); in page_vma_mapped_walk()
261 if (!is_migration_entry(entry) || in page_vma_mapped_walk()
262 !check_pmd(swp_offset_pfn(entry), pvmw)) in page_vma_mapped_walk()
267 if (pvmw->flags & PVMW_MIGRATION) in page_vma_mapped_walk()
274 spin_unlock(pvmw->ptl); in page_vma_mapped_walk()
275 pvmw->ptl = NULL; in page_vma_mapped_walk()
282 if ((pvmw->flags & PVMW_SYNC) && in page_vma_mapped_walk()
283 thp_vma_suitable_order(vma, pvmw->address, in page_vma_mapped_walk()
285 (pvmw->nr_pages >= HPAGE_PMD_NR)) { in page_vma_mapped_walk()
286 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk()
294 if (!pvmw->pte) in page_vma_mapped_walk()
303 pvmw->address += PAGE_SIZE; in page_vma_mapped_walk()
304 if (pvmw->address >= end) in page_vma_mapped_walk()
307 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) { in page_vma_mapped_walk()
308 if (pvmw->ptl) { in page_vma_mapped_walk()
309 spin_unlock(pvmw->ptl); in page_vma_mapped_walk()
310 pvmw->ptl = NULL; in page_vma_mapped_walk()
312 pte_unmap(pvmw->pte); in page_vma_mapped_walk()
313 pvmw->pte = NULL; in page_vma_mapped_walk()
316 pvmw->pte++; in page_vma_mapped_walk()
317 } while (pte_none(ptep_get(pvmw->pte))); in page_vma_mapped_walk()
319 if (!pvmw->ptl) { in page_vma_mapped_walk()
321 if (unlikely(!pmd_same(pmde, pmdp_get_lockless(pvmw->pmd)))) { in page_vma_mapped_walk()
322 pte_unmap_unlock(pvmw->pte, ptl); in page_vma_mapped_walk()
323 pvmw->pte = NULL; in page_vma_mapped_walk()
326 pvmw->ptl = ptl; in page_vma_mapped_walk()
329 } while (pvmw->address < end); in page_vma_mapped_walk()
336 * page_mapped_in_vma - check whether a page is really mapped in a VMA
340 * Return: The address the page is mapped at if the page is in the range
342 * outside the VMA or not present, returns -EFAULT.
356 pvmw.address = vma_address(vma, page_pgoff(folio, page), 1); in page_mapped_in_vma()
357 if (pvmw.address == -EFAULT) in page_mapped_in_vma()
360 return -EFAULT; in page_mapped_in_vma()
363 return pvmw.address; in page_mapped_in_vma()