1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for The Physical Address Space
4  *
5  * Author: SeongJae Park <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "damon-pa: " fmt
9 
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/memory-tiers.h>
16 #include <linux/migrate.h>
17 #include <linux/mm_inline.h>
18 
19 #include "../internal.h"
20 #include "ops-common.h"
21 
damon_folio_mkold_one(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)22 static bool damon_folio_mkold_one(struct folio *folio,
23 		struct vm_area_struct *vma, unsigned long addr, void *arg)
24 {
25 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
26 
27 	while (page_vma_mapped_walk(&pvmw)) {
28 		addr = pvmw.address;
29 		if (pvmw.pte)
30 			damon_ptep_mkold(pvmw.pte, vma, addr);
31 		else
32 			damon_pmdp_mkold(pvmw.pmd, vma, addr);
33 	}
34 	return true;
35 }
36 
damon_folio_mkold(struct folio * folio)37 static void damon_folio_mkold(struct folio *folio)
38 {
39 	struct rmap_walk_control rwc = {
40 		.rmap_one = damon_folio_mkold_one,
41 		.anon_lock = folio_lock_anon_vma_read,
42 	};
43 	bool need_lock;
44 
45 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
46 		folio_set_idle(folio);
47 		return;
48 	}
49 
50 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
51 	if (need_lock && !folio_trylock(folio))
52 		return;
53 
54 	rmap_walk(folio, &rwc);
55 
56 	if (need_lock)
57 		folio_unlock(folio);
58 
59 }
60 
damon_pa_mkold(unsigned long paddr)61 static void damon_pa_mkold(unsigned long paddr)
62 {
63 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
64 
65 	if (!folio)
66 		return;
67 
68 	damon_folio_mkold(folio);
69 	folio_put(folio);
70 }
71 
__damon_pa_prepare_access_check(struct damon_region * r)72 static void __damon_pa_prepare_access_check(struct damon_region *r)
73 {
74 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
75 
76 	damon_pa_mkold(r->sampling_addr);
77 }
78 
damon_pa_prepare_access_checks(struct damon_ctx * ctx)79 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
80 {
81 	struct damon_target *t;
82 	struct damon_region *r;
83 
84 	damon_for_each_target(t, ctx) {
85 		damon_for_each_region(r, t)
86 			__damon_pa_prepare_access_check(r);
87 	}
88 }
89 
damon_folio_young_one(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)90 static bool damon_folio_young_one(struct folio *folio,
91 		struct vm_area_struct *vma, unsigned long addr, void *arg)
92 {
93 	bool *accessed = arg;
94 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
95 
96 	*accessed = false;
97 	while (page_vma_mapped_walk(&pvmw)) {
98 		addr = pvmw.address;
99 		if (pvmw.pte) {
100 			*accessed = pte_young(ptep_get(pvmw.pte)) ||
101 				!folio_test_idle(folio) ||
102 				mmu_notifier_test_young(vma->vm_mm, addr);
103 		} else {
104 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
105 			*accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
106 				!folio_test_idle(folio) ||
107 				mmu_notifier_test_young(vma->vm_mm, addr);
108 #else
109 			WARN_ON_ONCE(1);
110 #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
111 		}
112 		if (*accessed) {
113 			page_vma_mapped_walk_done(&pvmw);
114 			break;
115 		}
116 	}
117 
118 	/* If accessed, stop walking */
119 	return *accessed == false;
120 }
121 
damon_folio_young(struct folio * folio)122 static bool damon_folio_young(struct folio *folio)
123 {
124 	bool accessed = false;
125 	struct rmap_walk_control rwc = {
126 		.arg = &accessed,
127 		.rmap_one = damon_folio_young_one,
128 		.anon_lock = folio_lock_anon_vma_read,
129 	};
130 	bool need_lock;
131 
132 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
133 		if (folio_test_idle(folio))
134 			return false;
135 		else
136 			return true;
137 	}
138 
139 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
140 	if (need_lock && !folio_trylock(folio))
141 		return false;
142 
143 	rmap_walk(folio, &rwc);
144 
145 	if (need_lock)
146 		folio_unlock(folio);
147 
148 	return accessed;
149 }
150 
damon_pa_young(unsigned long paddr,unsigned long * folio_sz)151 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
152 {
153 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
154 	bool accessed;
155 
156 	if (!folio)
157 		return false;
158 
159 	accessed = damon_folio_young(folio);
160 	*folio_sz = folio_size(folio);
161 	folio_put(folio);
162 	return accessed;
163 }
164 
__damon_pa_check_access(struct damon_region * r,struct damon_attrs * attrs)165 static void __damon_pa_check_access(struct damon_region *r,
166 		struct damon_attrs *attrs)
167 {
168 	static unsigned long last_addr;
169 	static unsigned long last_folio_sz = PAGE_SIZE;
170 	static bool last_accessed;
171 
172 	/* If the region is in the last checked page, reuse the result */
173 	if (ALIGN_DOWN(last_addr, last_folio_sz) ==
174 				ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
175 		damon_update_region_access_rate(r, last_accessed, attrs);
176 		return;
177 	}
178 
179 	last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
180 	damon_update_region_access_rate(r, last_accessed, attrs);
181 
182 	last_addr = r->sampling_addr;
183 }
184 
damon_pa_check_accesses(struct damon_ctx * ctx)185 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
186 {
187 	struct damon_target *t;
188 	struct damon_region *r;
189 	unsigned int max_nr_accesses = 0;
190 
191 	damon_for_each_target(t, ctx) {
192 		damon_for_each_region(r, t) {
193 			__damon_pa_check_access(r, &ctx->attrs);
194 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
195 		}
196 	}
197 
198 	return max_nr_accesses;
199 }
200 
damos_pa_filter_match(struct damos_filter * filter,struct folio * folio)201 static bool damos_pa_filter_match(struct damos_filter *filter,
202 		struct folio *folio)
203 {
204 	bool matched = false;
205 	struct mem_cgroup *memcg;
206 
207 	switch (filter->type) {
208 	case DAMOS_FILTER_TYPE_ANON:
209 		matched = folio_test_anon(folio);
210 		break;
211 	case DAMOS_FILTER_TYPE_MEMCG:
212 		rcu_read_lock();
213 		memcg = folio_memcg_check(folio);
214 		if (!memcg)
215 			matched = false;
216 		else
217 			matched = filter->memcg_id == mem_cgroup_id(memcg);
218 		rcu_read_unlock();
219 		break;
220 	case DAMOS_FILTER_TYPE_YOUNG:
221 		matched = damon_folio_young(folio);
222 		if (matched)
223 			damon_folio_mkold(folio);
224 		break;
225 	default:
226 		break;
227 	}
228 
229 	return matched == filter->matching;
230 }
231 
232 /*
233  * damos_pa_filter_out - Return true if the page should be filtered out.
234  */
damos_pa_filter_out(struct damos * scheme,struct folio * folio)235 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
236 {
237 	struct damos_filter *filter;
238 
239 	if (scheme->core_filters_allowed)
240 		return false;
241 
242 	damos_for_each_filter(filter, scheme) {
243 		if (damos_pa_filter_match(filter, folio))
244 			return !filter->allow;
245 	}
246 	return false;
247 }
248 
damon_pa_invalid_damos_folio(struct folio * folio,struct damos * s)249 static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s)
250 {
251 	if (!folio)
252 		return true;
253 	if (folio == s->last_applied) {
254 		folio_put(folio);
255 		return true;
256 	}
257 	return false;
258 }
259 
damon_pa_pageout(struct damon_region * r,struct damos * s,unsigned long * sz_filter_passed)260 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
261 		unsigned long *sz_filter_passed)
262 {
263 	unsigned long addr, applied;
264 	LIST_HEAD(folio_list);
265 	bool install_young_filter = true;
266 	struct damos_filter *filter;
267 	struct folio *folio;
268 
269 	/* check access in page level again by default */
270 	damos_for_each_filter(filter, s) {
271 		if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
272 			install_young_filter = false;
273 			break;
274 		}
275 	}
276 	if (install_young_filter) {
277 		filter = damos_new_filter(
278 				DAMOS_FILTER_TYPE_YOUNG, true, false);
279 		if (!filter)
280 			return 0;
281 		damos_add_filter(s, filter);
282 	}
283 
284 	addr = r->ar.start;
285 	while (addr < r->ar.end) {
286 		folio = damon_get_folio(PHYS_PFN(addr));
287 		if (damon_pa_invalid_damos_folio(folio, s)) {
288 			addr += PAGE_SIZE;
289 			continue;
290 		}
291 
292 		if (damos_pa_filter_out(s, folio))
293 			goto put_folio;
294 		else
295 			*sz_filter_passed += folio_size(folio);
296 
297 		folio_clear_referenced(folio);
298 		folio_test_clear_young(folio);
299 		if (!folio_isolate_lru(folio))
300 			goto put_folio;
301 		if (folio_test_unevictable(folio))
302 			folio_putback_lru(folio);
303 		else
304 			list_add(&folio->lru, &folio_list);
305 put_folio:
306 		addr += folio_size(folio);
307 		folio_put(folio);
308 	}
309 	if (install_young_filter)
310 		damos_destroy_filter(filter);
311 	applied = reclaim_pages(&folio_list);
312 	cond_resched();
313 	s->last_applied = folio;
314 	return applied * PAGE_SIZE;
315 }
316 
damon_pa_mark_accessed_or_deactivate(struct damon_region * r,struct damos * s,bool mark_accessed,unsigned long * sz_filter_passed)317 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
318 		struct damon_region *r, struct damos *s, bool mark_accessed,
319 		unsigned long *sz_filter_passed)
320 {
321 	unsigned long addr, applied = 0;
322 	struct folio *folio;
323 
324 	addr = r->ar.start;
325 	while (addr < r->ar.end) {
326 		folio = damon_get_folio(PHYS_PFN(addr));
327 		if (damon_pa_invalid_damos_folio(folio, s)) {
328 			addr += PAGE_SIZE;
329 			continue;
330 		}
331 
332 		if (damos_pa_filter_out(s, folio))
333 			goto put_folio;
334 		else
335 			*sz_filter_passed += folio_size(folio);
336 
337 		if (mark_accessed)
338 			folio_mark_accessed(folio);
339 		else
340 			folio_deactivate(folio);
341 		applied += folio_nr_pages(folio);
342 put_folio:
343 		addr += folio_size(folio);
344 		folio_put(folio);
345 	}
346 	s->last_applied = folio;
347 	return applied * PAGE_SIZE;
348 }
349 
damon_pa_mark_accessed(struct damon_region * r,struct damos * s,unsigned long * sz_filter_passed)350 static unsigned long damon_pa_mark_accessed(struct damon_region *r,
351 	struct damos *s, unsigned long *sz_filter_passed)
352 {
353 	return damon_pa_mark_accessed_or_deactivate(r, s, true,
354 			sz_filter_passed);
355 }
356 
damon_pa_deactivate_pages(struct damon_region * r,struct damos * s,unsigned long * sz_filter_passed)357 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
358 	struct damos *s, unsigned long *sz_filter_passed)
359 {
360 	return damon_pa_mark_accessed_or_deactivate(r, s, false,
361 			sz_filter_passed);
362 }
363 
__damon_pa_migrate_folio_list(struct list_head * migrate_folios,struct pglist_data * pgdat,int target_nid)364 static unsigned int __damon_pa_migrate_folio_list(
365 		struct list_head *migrate_folios, struct pglist_data *pgdat,
366 		int target_nid)
367 {
368 	unsigned int nr_succeeded = 0;
369 	nodemask_t allowed_mask = NODE_MASK_NONE;
370 	struct migration_target_control mtc = {
371 		/*
372 		 * Allocate from 'node', or fail quickly and quietly.
373 		 * When this happens, 'page' will likely just be discarded
374 		 * instead of migrated.
375 		 */
376 		.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
377 			__GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
378 		.nid = target_nid,
379 		.nmask = &allowed_mask
380 	};
381 
382 	if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE)
383 		return 0;
384 
385 	if (list_empty(migrate_folios))
386 		return 0;
387 
388 	/* Migration ignores all cpuset and mempolicy settings */
389 	migrate_pages(migrate_folios, alloc_migrate_folio, NULL,
390 		      (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON,
391 		      &nr_succeeded);
392 
393 	return nr_succeeded;
394 }
395 
damon_pa_migrate_folio_list(struct list_head * folio_list,struct pglist_data * pgdat,int target_nid)396 static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list,
397 						struct pglist_data *pgdat,
398 						int target_nid)
399 {
400 	unsigned int nr_migrated = 0;
401 	struct folio *folio;
402 	LIST_HEAD(ret_folios);
403 	LIST_HEAD(migrate_folios);
404 
405 	while (!list_empty(folio_list)) {
406 		struct folio *folio;
407 
408 		cond_resched();
409 
410 		folio = lru_to_folio(folio_list);
411 		list_del(&folio->lru);
412 
413 		if (!folio_trylock(folio))
414 			goto keep;
415 
416 		/* Relocate its contents to another node. */
417 		list_add(&folio->lru, &migrate_folios);
418 		folio_unlock(folio);
419 		continue;
420 keep:
421 		list_add(&folio->lru, &ret_folios);
422 	}
423 	/* 'folio_list' is always empty here */
424 
425 	/* Migrate folios selected for migration */
426 	nr_migrated += __damon_pa_migrate_folio_list(
427 			&migrate_folios, pgdat, target_nid);
428 	/*
429 	 * Folios that could not be migrated are still in @migrate_folios.  Add
430 	 * those back on @folio_list
431 	 */
432 	if (!list_empty(&migrate_folios))
433 		list_splice_init(&migrate_folios, folio_list);
434 
435 	try_to_unmap_flush();
436 
437 	list_splice(&ret_folios, folio_list);
438 
439 	while (!list_empty(folio_list)) {
440 		folio = lru_to_folio(folio_list);
441 		list_del(&folio->lru);
442 		folio_putback_lru(folio);
443 	}
444 
445 	return nr_migrated;
446 }
447 
damon_pa_migrate_pages(struct list_head * folio_list,int target_nid)448 static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
449 					    int target_nid)
450 {
451 	int nid;
452 	unsigned long nr_migrated = 0;
453 	LIST_HEAD(node_folio_list);
454 	unsigned int noreclaim_flag;
455 
456 	if (list_empty(folio_list))
457 		return nr_migrated;
458 
459 	noreclaim_flag = memalloc_noreclaim_save();
460 
461 	nid = folio_nid(lru_to_folio(folio_list));
462 	do {
463 		struct folio *folio = lru_to_folio(folio_list);
464 
465 		if (nid == folio_nid(folio)) {
466 			list_move(&folio->lru, &node_folio_list);
467 			continue;
468 		}
469 
470 		nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
471 							   NODE_DATA(nid),
472 							   target_nid);
473 		nid = folio_nid(lru_to_folio(folio_list));
474 	} while (!list_empty(folio_list));
475 
476 	nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
477 						   NODE_DATA(nid),
478 						   target_nid);
479 
480 	memalloc_noreclaim_restore(noreclaim_flag);
481 
482 	return nr_migrated;
483 }
484 
damon_pa_migrate(struct damon_region * r,struct damos * s,unsigned long * sz_filter_passed)485 static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
486 		unsigned long *sz_filter_passed)
487 {
488 	unsigned long addr, applied;
489 	LIST_HEAD(folio_list);
490 	struct folio *folio;
491 
492 	addr = r->ar.start;
493 	while (addr < r->ar.end) {
494 		folio = damon_get_folio(PHYS_PFN(addr));
495 		if (damon_pa_invalid_damos_folio(folio, s)) {
496 			addr += PAGE_SIZE;
497 			continue;
498 		}
499 
500 		if (damos_pa_filter_out(s, folio))
501 			goto put_folio;
502 		else
503 			*sz_filter_passed += folio_size(folio);
504 
505 		if (!folio_isolate_lru(folio))
506 			goto put_folio;
507 		list_add(&folio->lru, &folio_list);
508 put_folio:
509 		addr += folio_size(folio);
510 		folio_put(folio);
511 	}
512 	applied = damon_pa_migrate_pages(&folio_list, s->target_nid);
513 	cond_resched();
514 	s->last_applied = folio;
515 	return applied * PAGE_SIZE;
516 }
517 
damon_pa_scheme_has_filter(struct damos * s)518 static bool damon_pa_scheme_has_filter(struct damos *s)
519 {
520 	struct damos_filter *f;
521 
522 	damos_for_each_filter(f, s)
523 		return true;
524 	return false;
525 }
526 
damon_pa_stat(struct damon_region * r,struct damos * s,unsigned long * sz_filter_passed)527 static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
528 		unsigned long *sz_filter_passed)
529 {
530 	unsigned long addr;
531 	LIST_HEAD(folio_list);
532 	struct folio *folio;
533 
534 	if (!damon_pa_scheme_has_filter(s))
535 		return 0;
536 
537 	addr = r->ar.start;
538 	while (addr < r->ar.end) {
539 		folio = damon_get_folio(PHYS_PFN(addr));
540 		if (damon_pa_invalid_damos_folio(folio, s)) {
541 			addr += PAGE_SIZE;
542 			continue;
543 		}
544 
545 		if (!damos_pa_filter_out(s, folio))
546 			*sz_filter_passed += folio_size(folio);
547 		addr += folio_size(folio);
548 		folio_put(folio);
549 	}
550 	s->last_applied = folio;
551 	return 0;
552 }
553 
damon_pa_apply_scheme(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * scheme,unsigned long * sz_filter_passed)554 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
555 		struct damon_target *t, struct damon_region *r,
556 		struct damos *scheme, unsigned long *sz_filter_passed)
557 {
558 	switch (scheme->action) {
559 	case DAMOS_PAGEOUT:
560 		return damon_pa_pageout(r, scheme, sz_filter_passed);
561 	case DAMOS_LRU_PRIO:
562 		return damon_pa_mark_accessed(r, scheme, sz_filter_passed);
563 	case DAMOS_LRU_DEPRIO:
564 		return damon_pa_deactivate_pages(r, scheme, sz_filter_passed);
565 	case DAMOS_MIGRATE_HOT:
566 	case DAMOS_MIGRATE_COLD:
567 		return damon_pa_migrate(r, scheme, sz_filter_passed);
568 	case DAMOS_STAT:
569 		return damon_pa_stat(r, scheme, sz_filter_passed);
570 	default:
571 		/* DAMOS actions that not yet supported by 'paddr'. */
572 		break;
573 	}
574 	return 0;
575 }
576 
damon_pa_scheme_score(struct damon_ctx * context,struct damon_target * t,struct damon_region * r,struct damos * scheme)577 static int damon_pa_scheme_score(struct damon_ctx *context,
578 		struct damon_target *t, struct damon_region *r,
579 		struct damos *scheme)
580 {
581 	switch (scheme->action) {
582 	case DAMOS_PAGEOUT:
583 		return damon_cold_score(context, r, scheme);
584 	case DAMOS_LRU_PRIO:
585 		return damon_hot_score(context, r, scheme);
586 	case DAMOS_LRU_DEPRIO:
587 		return damon_cold_score(context, r, scheme);
588 	case DAMOS_MIGRATE_HOT:
589 		return damon_hot_score(context, r, scheme);
590 	case DAMOS_MIGRATE_COLD:
591 		return damon_cold_score(context, r, scheme);
592 	default:
593 		break;
594 	}
595 
596 	return DAMOS_MAX_SCORE;
597 }
598 
damon_pa_initcall(void)599 static int __init damon_pa_initcall(void)
600 {
601 	struct damon_operations ops = {
602 		.id = DAMON_OPS_PADDR,
603 		.init = NULL,
604 		.update = NULL,
605 		.prepare_access_checks = damon_pa_prepare_access_checks,
606 		.check_accesses = damon_pa_check_accesses,
607 		.reset_aggregated = NULL,
608 		.target_valid = NULL,
609 		.cleanup = NULL,
610 		.apply_scheme = damon_pa_apply_scheme,
611 		.get_scheme_score = damon_pa_scheme_score,
612 	};
613 
614 	return damon_register_ops(&ops);
615 };
616 
617 subsys_initcall(damon_pa_initcall);
618