1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7
8 #include "internal.h"
9
not_found(struct page_vma_mapped_walk * pvmw)10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12 page_vma_mapped_walk_done(pvmw);
13 return false;
14 }
15
map_pte(struct page_vma_mapped_walk * pvmw,pmd_t * pmdvalp,spinlock_t ** ptlp)16 static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp,
17 spinlock_t **ptlp)
18 {
19 pte_t ptent;
20
21 if (pvmw->flags & PVMW_SYNC) {
22 /* Use the stricter lookup */
23 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
24 pvmw->address, &pvmw->ptl);
25 *ptlp = pvmw->ptl;
26 return !!pvmw->pte;
27 }
28
29 again:
30 /*
31 * It is important to return the ptl corresponding to pte,
32 * in case *pvmw->pmd changes underneath us; so we need to
33 * return it even when choosing not to lock, in case caller
34 * proceeds to loop over next ptes, and finds a match later.
35 * Though, in most cases, page lock already protects this.
36 */
37 pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd,
38 pvmw->address, pmdvalp, ptlp);
39 if (!pvmw->pte)
40 return false;
41
42 ptent = ptep_get(pvmw->pte);
43
44 if (pvmw->flags & PVMW_MIGRATION) {
45 if (!is_swap_pte(ptent))
46 return false;
47 } else if (is_swap_pte(ptent)) {
48 swp_entry_t entry;
49 /*
50 * Handle un-addressable ZONE_DEVICE memory.
51 *
52 * We get here when we are trying to unmap a private
53 * device page from the process address space. Such
54 * page is not CPU accessible and thus is mapped as
55 * a special swap entry, nonetheless it still does
56 * count as a valid regular mapping for the page
57 * (and is accounted as such in page maps count).
58 *
59 * So handle this special case as if it was a normal
60 * page mapping ie lock CPU page table and return true.
61 *
62 * For more details on device private memory see HMM
63 * (include/linux/hmm.h or mm/hmm.c).
64 */
65 entry = pte_to_swp_entry(ptent);
66 if (!is_device_private_entry(entry) &&
67 !is_device_exclusive_entry(entry))
68 return false;
69 } else if (!pte_present(ptent)) {
70 return false;
71 }
72 spin_lock(*ptlp);
73 if (unlikely(!pmd_same(*pmdvalp, pmdp_get_lockless(pvmw->pmd)))) {
74 pte_unmap_unlock(pvmw->pte, *ptlp);
75 goto again;
76 }
77 pvmw->ptl = *ptlp;
78
79 return true;
80 }
81
82 /**
83 * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is
84 * mapped at the @pvmw->pte
85 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
86 * for checking
87 * @pte_nr: the number of small pages described by @pvmw->pte.
88 *
89 * page_vma_mapped_walk() found a place where pfn range is *potentially*
90 * mapped. check_pte() has to validate this.
91 *
92 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
93 * arbitrary page.
94 *
95 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
96 * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
97 *
98 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
99 * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
100 *
101 * Otherwise, return false.
102 *
103 */
check_pte(struct page_vma_mapped_walk * pvmw,unsigned long pte_nr)104 static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr)
105 {
106 unsigned long pfn;
107 pte_t ptent = ptep_get(pvmw->pte);
108
109 if (pvmw->flags & PVMW_MIGRATION) {
110 swp_entry_t entry;
111 if (!is_swap_pte(ptent))
112 return false;
113 entry = pte_to_swp_entry(ptent);
114
115 if (!is_migration_entry(entry) &&
116 !is_device_exclusive_entry(entry))
117 return false;
118
119 pfn = swp_offset_pfn(entry);
120 } else if (is_swap_pte(ptent)) {
121 swp_entry_t entry;
122
123 /* Handle un-addressable ZONE_DEVICE memory */
124 entry = pte_to_swp_entry(ptent);
125 if (!is_device_private_entry(entry) &&
126 !is_device_exclusive_entry(entry))
127 return false;
128
129 pfn = swp_offset_pfn(entry);
130 } else {
131 if (!pte_present(ptent))
132 return false;
133
134 pfn = pte_pfn(ptent);
135 }
136
137 if ((pfn + pte_nr - 1) < pvmw->pfn)
138 return false;
139 if (pfn > (pvmw->pfn + pvmw->nr_pages - 1))
140 return false;
141 return true;
142 }
143
144 /* Returns true if the two ranges overlap. Careful to not overflow. */
check_pmd(unsigned long pfn,struct page_vma_mapped_walk * pvmw)145 static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
146 {
147 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
148 return false;
149 if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
150 return false;
151 return true;
152 }
153
step_forward(struct page_vma_mapped_walk * pvmw,unsigned long size)154 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
155 {
156 pvmw->address = (pvmw->address + size) & ~(size - 1);
157 if (!pvmw->address)
158 pvmw->address = ULONG_MAX;
159 }
160
161 /**
162 * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
163 * @pvmw->address
164 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
165 * must be set. pmd, pte and ptl must be NULL.
166 *
167 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
168 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
169 * adjusted if needed (for PTE-mapped THPs).
170 *
171 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
172 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
173 * a loop to find all PTEs that map the THP.
174 *
175 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
176 * regardless of which page table level the page is mapped at. @pvmw->pmd is
177 * NULL.
178 *
179 * Returns false if there are no more page table entries for the page in
180 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
181 *
182 * If you need to stop the walk before page_vma_mapped_walk() returned false,
183 * use page_vma_mapped_walk_done(). It will do the housekeeping.
184 */
page_vma_mapped_walk(struct page_vma_mapped_walk * pvmw)185 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
186 {
187 struct vm_area_struct *vma = pvmw->vma;
188 struct mm_struct *mm = vma->vm_mm;
189 unsigned long end;
190 spinlock_t *ptl;
191 pgd_t *pgd;
192 p4d_t *p4d;
193 pud_t *pud;
194 pmd_t pmde;
195
196 /* The only possible pmd mapping has been handled on last iteration */
197 if (pvmw->pmd && !pvmw->pte)
198 return not_found(pvmw);
199
200 if (unlikely(is_vm_hugetlb_page(vma))) {
201 struct hstate *hstate = hstate_vma(vma);
202 unsigned long size = huge_page_size(hstate);
203 /* The only possible mapping was handled on last iteration */
204 if (pvmw->pte)
205 return not_found(pvmw);
206 /*
207 * All callers that get here will already hold the
208 * i_mmap_rwsem. Therefore, no additional locks need to be
209 * taken before calling hugetlb_walk().
210 */
211 pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
212 if (!pvmw->pte)
213 return false;
214
215 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
216 if (!check_pte(pvmw, pages_per_huge_page(hstate)))
217 return not_found(pvmw);
218 return true;
219 }
220
221 end = vma_address_end(pvmw);
222 if (pvmw->pte)
223 goto next_pte;
224 restart:
225 do {
226 pgd = pgd_offset(mm, pvmw->address);
227 if (!pgd_present(*pgd)) {
228 step_forward(pvmw, PGDIR_SIZE);
229 continue;
230 }
231 p4d = p4d_offset(pgd, pvmw->address);
232 if (!p4d_present(*p4d)) {
233 step_forward(pvmw, P4D_SIZE);
234 continue;
235 }
236 pud = pud_offset(p4d, pvmw->address);
237 if (!pud_present(*pud)) {
238 step_forward(pvmw, PUD_SIZE);
239 continue;
240 }
241
242 pvmw->pmd = pmd_offset(pud, pvmw->address);
243 /*
244 * Make sure the pmd value isn't cached in a register by the
245 * compiler and used as a stale value after we've observed a
246 * subsequent update.
247 */
248 pmde = pmdp_get_lockless(pvmw->pmd);
249
250 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
251 (pmd_present(pmde) && pmd_devmap(pmde))) {
252 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
253 pmde = *pvmw->pmd;
254 if (!pmd_present(pmde)) {
255 swp_entry_t entry;
256
257 if (!thp_migration_supported() ||
258 !(pvmw->flags & PVMW_MIGRATION))
259 return not_found(pvmw);
260 entry = pmd_to_swp_entry(pmde);
261 if (!is_migration_entry(entry) ||
262 !check_pmd(swp_offset_pfn(entry), pvmw))
263 return not_found(pvmw);
264 return true;
265 }
266 if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
267 if (pvmw->flags & PVMW_MIGRATION)
268 return not_found(pvmw);
269 if (!check_pmd(pmd_pfn(pmde), pvmw))
270 return not_found(pvmw);
271 return true;
272 }
273 /* THP pmd was split under us: handle on pte level */
274 spin_unlock(pvmw->ptl);
275 pvmw->ptl = NULL;
276 } else if (!pmd_present(pmde)) {
277 /*
278 * If PVMW_SYNC, take and drop THP pmd lock so that we
279 * cannot return prematurely, while zap_huge_pmd() has
280 * cleared *pmd but not decremented compound_mapcount().
281 */
282 if ((pvmw->flags & PVMW_SYNC) &&
283 thp_vma_suitable_order(vma, pvmw->address,
284 PMD_ORDER) &&
285 (pvmw->nr_pages >= HPAGE_PMD_NR)) {
286 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
287
288 spin_unlock(ptl);
289 }
290 step_forward(pvmw, PMD_SIZE);
291 continue;
292 }
293 if (!map_pte(pvmw, &pmde, &ptl)) {
294 if (!pvmw->pte)
295 goto restart;
296 goto next_pte;
297 }
298 this_pte:
299 if (check_pte(pvmw, 1))
300 return true;
301 next_pte:
302 do {
303 pvmw->address += PAGE_SIZE;
304 if (pvmw->address >= end)
305 return not_found(pvmw);
306 /* Did we cross page table boundary? */
307 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
308 if (pvmw->ptl) {
309 spin_unlock(pvmw->ptl);
310 pvmw->ptl = NULL;
311 }
312 pte_unmap(pvmw->pte);
313 pvmw->pte = NULL;
314 goto restart;
315 }
316 pvmw->pte++;
317 } while (pte_none(ptep_get(pvmw->pte)));
318
319 if (!pvmw->ptl) {
320 spin_lock(ptl);
321 if (unlikely(!pmd_same(pmde, pmdp_get_lockless(pvmw->pmd)))) {
322 pte_unmap_unlock(pvmw->pte, ptl);
323 pvmw->pte = NULL;
324 goto restart;
325 }
326 pvmw->ptl = ptl;
327 }
328 goto this_pte;
329 } while (pvmw->address < end);
330
331 return false;
332 }
333
334 #ifdef CONFIG_MEMORY_FAILURE
335 /**
336 * page_mapped_in_vma - check whether a page is really mapped in a VMA
337 * @page: the page to test
338 * @vma: the VMA to test
339 *
340 * Return: The address the page is mapped at if the page is in the range
341 * covered by the VMA and present in the page table. If the page is
342 * outside the VMA or not present, returns -EFAULT.
343 * Only valid for normal file or anonymous VMAs.
344 */
page_mapped_in_vma(const struct page * page,struct vm_area_struct * vma)345 unsigned long page_mapped_in_vma(const struct page *page,
346 struct vm_area_struct *vma)
347 {
348 const struct folio *folio = page_folio(page);
349 struct page_vma_mapped_walk pvmw = {
350 .pfn = page_to_pfn(page),
351 .nr_pages = 1,
352 .vma = vma,
353 .flags = PVMW_SYNC,
354 };
355
356 pvmw.address = vma_address(vma, page_pgoff(folio, page), 1);
357 if (pvmw.address == -EFAULT)
358 goto out;
359 if (!page_vma_mapped_walk(&pvmw))
360 return -EFAULT;
361 page_vma_mapped_walk_done(&pvmw);
362 out:
363 return pvmw.address;
364 }
365 #endif
366