xref: /linux/mm/damon/paddr.c (revision fe1136b4ccbfac9b8e72d4551d1ce788a67d59cb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for The Physical Address Space
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon-pa: " fmt
9 
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/memory-tiers.h>
16 #include <linux/migrate.h>
17 #include <linux/mm_inline.h>
18 
19 #include "../internal.h"
20 #include "ops-common.h"
21 
22 static bool damon_folio_mkold_one(struct folio *folio,
23 		struct vm_area_struct *vma, unsigned long addr, void *arg)
24 {
25 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
26 
27 	while (page_vma_mapped_walk(&pvmw)) {
28 		addr = pvmw.address;
29 		if (pvmw.pte)
30 			damon_ptep_mkold(pvmw.pte, vma, addr);
31 		else
32 			damon_pmdp_mkold(pvmw.pmd, vma, addr);
33 	}
34 	return true;
35 }
36 
37 static void damon_folio_mkold(struct folio *folio)
38 {
39 	struct rmap_walk_control rwc = {
40 		.rmap_one = damon_folio_mkold_one,
41 		.anon_lock = folio_lock_anon_vma_read,
42 	};
43 	bool need_lock;
44 
45 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
46 		folio_set_idle(folio);
47 		return;
48 	}
49 
50 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
51 	if (need_lock && !folio_trylock(folio))
52 		return;
53 
54 	rmap_walk(folio, &rwc);
55 
56 	if (need_lock)
57 		folio_unlock(folio);
58 
59 }
60 
61 static void damon_pa_mkold(unsigned long paddr)
62 {
63 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
64 
65 	if (!folio)
66 		return;
67 
68 	damon_folio_mkold(folio);
69 	folio_put(folio);
70 }
71 
72 static void __damon_pa_prepare_access_check(struct damon_region *r)
73 {
74 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
75 
76 	damon_pa_mkold(r->sampling_addr);
77 }
78 
79 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
80 {
81 	struct damon_target *t;
82 	struct damon_region *r;
83 
84 	damon_for_each_target(t, ctx) {
85 		damon_for_each_region(r, t)
86 			__damon_pa_prepare_access_check(r);
87 	}
88 }
89 
90 static bool damon_folio_young_one(struct folio *folio,
91 		struct vm_area_struct *vma, unsigned long addr, void *arg)
92 {
93 	bool *accessed = arg;
94 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
95 	pte_t pte;
96 
97 	*accessed = false;
98 	while (page_vma_mapped_walk(&pvmw)) {
99 		addr = pvmw.address;
100 		if (pvmw.pte) {
101 			pte = ptep_get(pvmw.pte);
102 
103 			/*
104 			 * PFN swap PTEs, such as device-exclusive ones, that
105 			 * actually map pages are "old" from a CPU perspective.
106 			 * The MMU notifier takes care of any device aspects.
107 			 */
108 			*accessed = (pte_present(pte) && pte_young(pte)) ||
109 				!folio_test_idle(folio) ||
110 				mmu_notifier_test_young(vma->vm_mm, addr);
111 		} else {
112 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
113 			*accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
114 				!folio_test_idle(folio) ||
115 				mmu_notifier_test_young(vma->vm_mm, addr);
116 #else
117 			WARN_ON_ONCE(1);
118 #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
119 		}
120 		if (*accessed) {
121 			page_vma_mapped_walk_done(&pvmw);
122 			break;
123 		}
124 	}
125 
126 	/* If accessed, stop walking */
127 	return *accessed == false;
128 }
129 
130 static bool damon_folio_young(struct folio *folio)
131 {
132 	bool accessed = false;
133 	struct rmap_walk_control rwc = {
134 		.arg = &accessed,
135 		.rmap_one = damon_folio_young_one,
136 		.anon_lock = folio_lock_anon_vma_read,
137 	};
138 	bool need_lock;
139 
140 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
141 		if (folio_test_idle(folio))
142 			return false;
143 		else
144 			return true;
145 	}
146 
147 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
148 	if (need_lock && !folio_trylock(folio))
149 		return false;
150 
151 	rmap_walk(folio, &rwc);
152 
153 	if (need_lock)
154 		folio_unlock(folio);
155 
156 	return accessed;
157 }
158 
159 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
160 {
161 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
162 	bool accessed;
163 
164 	if (!folio)
165 		return false;
166 
167 	accessed = damon_folio_young(folio);
168 	*folio_sz = folio_size(folio);
169 	folio_put(folio);
170 	return accessed;
171 }
172 
173 static void __damon_pa_check_access(struct damon_region *r,
174 		struct damon_attrs *attrs)
175 {
176 	static unsigned long last_addr;
177 	static unsigned long last_folio_sz = PAGE_SIZE;
178 	static bool last_accessed;
179 
180 	/* If the region is in the last checked page, reuse the result */
181 	if (ALIGN_DOWN(last_addr, last_folio_sz) ==
182 				ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
183 		damon_update_region_access_rate(r, last_accessed, attrs);
184 		return;
185 	}
186 
187 	last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
188 	damon_update_region_access_rate(r, last_accessed, attrs);
189 
190 	last_addr = r->sampling_addr;
191 }
192 
193 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
194 {
195 	struct damon_target *t;
196 	struct damon_region *r;
197 	unsigned int max_nr_accesses = 0;
198 
199 	damon_for_each_target(t, ctx) {
200 		damon_for_each_region(r, t) {
201 			__damon_pa_check_access(r, &ctx->attrs);
202 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
203 		}
204 	}
205 
206 	return max_nr_accesses;
207 }
208 
209 static bool damos_pa_filter_match(struct damos_filter *filter,
210 		struct folio *folio)
211 {
212 	bool matched = false;
213 	struct mem_cgroup *memcg;
214 
215 	switch (filter->type) {
216 	case DAMOS_FILTER_TYPE_ANON:
217 		matched = folio_test_anon(folio);
218 		break;
219 	case DAMOS_FILTER_TYPE_MEMCG:
220 		rcu_read_lock();
221 		memcg = folio_memcg_check(folio);
222 		if (!memcg)
223 			matched = false;
224 		else
225 			matched = filter->memcg_id == mem_cgroup_id(memcg);
226 		rcu_read_unlock();
227 		break;
228 	case DAMOS_FILTER_TYPE_YOUNG:
229 		matched = damon_folio_young(folio);
230 		if (matched)
231 			damon_folio_mkold(folio);
232 		break;
233 	default:
234 		break;
235 	}
236 
237 	return matched == filter->matching;
238 }
239 
240 /*
241  * damos_pa_filter_out - Return true if the page should be filtered out.
242  */
243 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
244 {
245 	struct damos_filter *filter;
246 
247 	if (scheme->core_filters_allowed)
248 		return false;
249 
250 	damos_for_each_filter(filter, scheme) {
251 		if (damos_pa_filter_match(filter, folio))
252 			return !filter->allow;
253 	}
254 	return false;
255 }
256 
257 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
258 		unsigned long *sz_filter_passed)
259 {
260 	unsigned long addr, applied;
261 	LIST_HEAD(folio_list);
262 	bool install_young_filter = true;
263 	struct damos_filter *filter;
264 
265 	/* check access in page level again by default */
266 	damos_for_each_filter(filter, s) {
267 		if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
268 			install_young_filter = false;
269 			break;
270 		}
271 	}
272 	if (install_young_filter) {
273 		filter = damos_new_filter(
274 				DAMOS_FILTER_TYPE_YOUNG, true, false);
275 		if (!filter)
276 			return 0;
277 		damos_add_filter(s, filter);
278 	}
279 
280 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
281 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
282 
283 		if (!folio)
284 			continue;
285 
286 		if (damos_pa_filter_out(s, folio))
287 			goto put_folio;
288 		else
289 			*sz_filter_passed += folio_size(folio);
290 
291 		folio_clear_referenced(folio);
292 		folio_test_clear_young(folio);
293 		if (!folio_isolate_lru(folio))
294 			goto put_folio;
295 		if (folio_test_unevictable(folio))
296 			folio_putback_lru(folio);
297 		else
298 			list_add(&folio->lru, &folio_list);
299 put_folio:
300 		folio_put(folio);
301 	}
302 	if (install_young_filter)
303 		damos_destroy_filter(filter);
304 	applied = reclaim_pages(&folio_list);
305 	cond_resched();
306 	return applied * PAGE_SIZE;
307 }
308 
309 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
310 		struct damon_region *r, struct damos *s, bool mark_accessed,
311 		unsigned long *sz_filter_passed)
312 {
313 	unsigned long addr, applied = 0;
314 
315 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
316 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
317 
318 		if (!folio)
319 			continue;
320 
321 		if (damos_pa_filter_out(s, folio))
322 			goto put_folio;
323 		else
324 			*sz_filter_passed += folio_size(folio);
325 
326 		if (mark_accessed)
327 			folio_mark_accessed(folio);
328 		else
329 			folio_deactivate(folio);
330 		applied += folio_nr_pages(folio);
331 put_folio:
332 		folio_put(folio);
333 	}
334 	return applied * PAGE_SIZE;
335 }
336 
337 static unsigned long damon_pa_mark_accessed(struct damon_region *r,
338 	struct damos *s, unsigned long *sz_filter_passed)
339 {
340 	return damon_pa_mark_accessed_or_deactivate(r, s, true,
341 			sz_filter_passed);
342 }
343 
344 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
345 	struct damos *s, unsigned long *sz_filter_passed)
346 {
347 	return damon_pa_mark_accessed_or_deactivate(r, s, false,
348 			sz_filter_passed);
349 }
350 
351 static unsigned int __damon_pa_migrate_folio_list(
352 		struct list_head *migrate_folios, struct pglist_data *pgdat,
353 		int target_nid)
354 {
355 	unsigned int nr_succeeded = 0;
356 	nodemask_t allowed_mask = NODE_MASK_NONE;
357 	struct migration_target_control mtc = {
358 		/*
359 		 * Allocate from 'node', or fail quickly and quietly.
360 		 * When this happens, 'page' will likely just be discarded
361 		 * instead of migrated.
362 		 */
363 		.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
364 			__GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
365 		.nid = target_nid,
366 		.nmask = &allowed_mask
367 	};
368 
369 	if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE)
370 		return 0;
371 
372 	if (list_empty(migrate_folios))
373 		return 0;
374 
375 	/* Migration ignores all cpuset and mempolicy settings */
376 	migrate_pages(migrate_folios, alloc_migrate_folio, NULL,
377 		      (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON,
378 		      &nr_succeeded);
379 
380 	return nr_succeeded;
381 }
382 
383 static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list,
384 						struct pglist_data *pgdat,
385 						int target_nid)
386 {
387 	unsigned int nr_migrated = 0;
388 	struct folio *folio;
389 	LIST_HEAD(ret_folios);
390 	LIST_HEAD(migrate_folios);
391 
392 	while (!list_empty(folio_list)) {
393 		struct folio *folio;
394 
395 		cond_resched();
396 
397 		folio = lru_to_folio(folio_list);
398 		list_del(&folio->lru);
399 
400 		if (!folio_trylock(folio))
401 			goto keep;
402 
403 		/* Relocate its contents to another node. */
404 		list_add(&folio->lru, &migrate_folios);
405 		folio_unlock(folio);
406 		continue;
407 keep:
408 		list_add(&folio->lru, &ret_folios);
409 	}
410 	/* 'folio_list' is always empty here */
411 
412 	/* Migrate folios selected for migration */
413 	nr_migrated += __damon_pa_migrate_folio_list(
414 			&migrate_folios, pgdat, target_nid);
415 	/*
416 	 * Folios that could not be migrated are still in @migrate_folios.  Add
417 	 * those back on @folio_list
418 	 */
419 	if (!list_empty(&migrate_folios))
420 		list_splice_init(&migrate_folios, folio_list);
421 
422 	try_to_unmap_flush();
423 
424 	list_splice(&ret_folios, folio_list);
425 
426 	while (!list_empty(folio_list)) {
427 		folio = lru_to_folio(folio_list);
428 		list_del(&folio->lru);
429 		folio_putback_lru(folio);
430 	}
431 
432 	return nr_migrated;
433 }
434 
435 static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
436 					    int target_nid)
437 {
438 	int nid;
439 	unsigned long nr_migrated = 0;
440 	LIST_HEAD(node_folio_list);
441 	unsigned int noreclaim_flag;
442 
443 	if (list_empty(folio_list))
444 		return nr_migrated;
445 
446 	noreclaim_flag = memalloc_noreclaim_save();
447 
448 	nid = folio_nid(lru_to_folio(folio_list));
449 	do {
450 		struct folio *folio = lru_to_folio(folio_list);
451 
452 		if (nid == folio_nid(folio)) {
453 			list_move(&folio->lru, &node_folio_list);
454 			continue;
455 		}
456 
457 		nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
458 							   NODE_DATA(nid),
459 							   target_nid);
460 		nid = folio_nid(lru_to_folio(folio_list));
461 	} while (!list_empty(folio_list));
462 
463 	nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
464 						   NODE_DATA(nid),
465 						   target_nid);
466 
467 	memalloc_noreclaim_restore(noreclaim_flag);
468 
469 	return nr_migrated;
470 }
471 
472 static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
473 		unsigned long *sz_filter_passed)
474 {
475 	unsigned long addr, applied;
476 	LIST_HEAD(folio_list);
477 
478 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
479 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
480 
481 		if (!folio)
482 			continue;
483 
484 		if (damos_pa_filter_out(s, folio))
485 			goto put_folio;
486 		else
487 			*sz_filter_passed += folio_size(folio);
488 
489 		if (!folio_isolate_lru(folio))
490 			goto put_folio;
491 		list_add(&folio->lru, &folio_list);
492 put_folio:
493 		folio_put(folio);
494 	}
495 	applied = damon_pa_migrate_pages(&folio_list, s->target_nid);
496 	cond_resched();
497 	return applied * PAGE_SIZE;
498 }
499 
500 static bool damon_pa_scheme_has_filter(struct damos *s)
501 {
502 	struct damos_filter *f;
503 
504 	damos_for_each_filter(f, s)
505 		return true;
506 	return false;
507 }
508 
509 static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
510 		unsigned long *sz_filter_passed)
511 {
512 	unsigned long addr;
513 	LIST_HEAD(folio_list);
514 
515 	if (!damon_pa_scheme_has_filter(s))
516 		return 0;
517 
518 	addr = r->ar.start;
519 	while (addr < r->ar.end) {
520 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
521 
522 		if (!folio) {
523 			addr += PAGE_SIZE;
524 			continue;
525 		}
526 
527 		if (!damos_pa_filter_out(s, folio))
528 			*sz_filter_passed += folio_size(folio);
529 		addr += folio_size(folio);
530 		folio_put(folio);
531 	}
532 	return 0;
533 }
534 
535 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
536 		struct damon_target *t, struct damon_region *r,
537 		struct damos *scheme, unsigned long *sz_filter_passed)
538 {
539 	switch (scheme->action) {
540 	case DAMOS_PAGEOUT:
541 		return damon_pa_pageout(r, scheme, sz_filter_passed);
542 	case DAMOS_LRU_PRIO:
543 		return damon_pa_mark_accessed(r, scheme, sz_filter_passed);
544 	case DAMOS_LRU_DEPRIO:
545 		return damon_pa_deactivate_pages(r, scheme, sz_filter_passed);
546 	case DAMOS_MIGRATE_HOT:
547 	case DAMOS_MIGRATE_COLD:
548 		return damon_pa_migrate(r, scheme, sz_filter_passed);
549 	case DAMOS_STAT:
550 		return damon_pa_stat(r, scheme, sz_filter_passed);
551 	default:
552 		/* DAMOS actions that not yet supported by 'paddr'. */
553 		break;
554 	}
555 	return 0;
556 }
557 
558 static int damon_pa_scheme_score(struct damon_ctx *context,
559 		struct damon_target *t, struct damon_region *r,
560 		struct damos *scheme)
561 {
562 	switch (scheme->action) {
563 	case DAMOS_PAGEOUT:
564 		return damon_cold_score(context, r, scheme);
565 	case DAMOS_LRU_PRIO:
566 		return damon_hot_score(context, r, scheme);
567 	case DAMOS_LRU_DEPRIO:
568 		return damon_cold_score(context, r, scheme);
569 	case DAMOS_MIGRATE_HOT:
570 		return damon_hot_score(context, r, scheme);
571 	case DAMOS_MIGRATE_COLD:
572 		return damon_cold_score(context, r, scheme);
573 	default:
574 		break;
575 	}
576 
577 	return DAMOS_MAX_SCORE;
578 }
579 
580 static int __init damon_pa_initcall(void)
581 {
582 	struct damon_operations ops = {
583 		.id = DAMON_OPS_PADDR,
584 		.init = NULL,
585 		.update = NULL,
586 		.prepare_access_checks = damon_pa_prepare_access_checks,
587 		.check_accesses = damon_pa_check_accesses,
588 		.reset_aggregated = NULL,
589 		.target_valid = NULL,
590 		.cleanup = NULL,
591 		.apply_scheme = damon_pa_apply_scheme,
592 		.get_scheme_score = damon_pa_scheme_score,
593 	};
594 
595 	return damon_register_ops(&ops);
596 };
597 
598 subsys_initcall(damon_pa_initcall);
599