xref: /linux/mm/damon/paddr.c (revision 6aacab308a5dfd222b2d23662bbae60c11007cfb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Code for The Physical Address Space
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon-pa: " fmt
9 
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/memory-tiers.h>
16 #include <linux/mm_inline.h>
17 
18 #include "../internal.h"
19 #include "ops-common.h"
20 
21 static phys_addr_t damon_pa_phys_addr(
22 		unsigned long addr, unsigned long addr_unit)
23 {
24 	return (phys_addr_t)addr * addr_unit;
25 }
26 
27 static unsigned long damon_pa_core_addr(
28 		phys_addr_t pa, unsigned long addr_unit)
29 {
30 	/*
31 	 * Use div_u64() for avoiding linking errors related with __udivdi3,
32 	 * __aeabi_uldivmod, or similar problems.  This should also improve the
33 	 * performance optimization (read div_u64() comment for the detail).
34 	 */
35 	if (sizeof(pa) == 8 && sizeof(addr_unit) == 4)
36 		return div_u64(pa, addr_unit);
37 	return pa / addr_unit;
38 }
39 
40 static void damon_pa_mkold(phys_addr_t paddr)
41 {
42 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
43 
44 	if (!folio)
45 		return;
46 
47 	damon_folio_mkold(folio);
48 	folio_put(folio);
49 }
50 
51 static void __damon_pa_prepare_access_check(struct damon_region *r,
52 		unsigned long addr_unit)
53 {
54 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
55 
56 	damon_pa_mkold(damon_pa_phys_addr(r->sampling_addr, addr_unit));
57 }
58 
59 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
60 {
61 	struct damon_target *t;
62 	struct damon_region *r;
63 
64 	damon_for_each_target(t, ctx) {
65 		damon_for_each_region(r, t)
66 			__damon_pa_prepare_access_check(r, ctx->addr_unit);
67 	}
68 }
69 
70 static bool damon_pa_young(phys_addr_t paddr, unsigned long *folio_sz)
71 {
72 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
73 	bool accessed;
74 
75 	if (!folio)
76 		return false;
77 
78 	accessed = damon_folio_young(folio);
79 	*folio_sz = folio_size(folio);
80 	folio_put(folio);
81 	return accessed;
82 }
83 
84 static void __damon_pa_check_access(struct damon_region *r,
85 		struct damon_attrs *attrs, unsigned long addr_unit)
86 {
87 	static phys_addr_t last_addr;
88 	static unsigned long last_folio_sz = PAGE_SIZE;
89 	static bool last_accessed;
90 	phys_addr_t sampling_addr = damon_pa_phys_addr(
91 			r->sampling_addr, addr_unit);
92 
93 	/* If the region is in the last checked page, reuse the result */
94 	if (ALIGN_DOWN(last_addr, last_folio_sz) ==
95 				ALIGN_DOWN(sampling_addr, last_folio_sz)) {
96 		damon_update_region_access_rate(r, last_accessed, attrs);
97 		return;
98 	}
99 
100 	last_accessed = damon_pa_young(sampling_addr, &last_folio_sz);
101 	damon_update_region_access_rate(r, last_accessed, attrs);
102 
103 	last_addr = sampling_addr;
104 }
105 
106 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
107 {
108 	struct damon_target *t;
109 	struct damon_region *r;
110 	unsigned int max_nr_accesses = 0;
111 
112 	damon_for_each_target(t, ctx) {
113 		damon_for_each_region(r, t) {
114 			__damon_pa_check_access(
115 					r, &ctx->attrs, ctx->addr_unit);
116 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
117 		}
118 	}
119 
120 	return max_nr_accesses;
121 }
122 
123 /*
124  * damos_pa_filter_out - Return true if the page should be filtered out.
125  */
126 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
127 {
128 	struct damos_filter *filter;
129 
130 	if (scheme->core_filters_allowed)
131 		return false;
132 
133 	damos_for_each_ops_filter(filter, scheme) {
134 		if (damos_folio_filter_match(filter, folio))
135 			return !filter->allow;
136 	}
137 	return scheme->ops_filters_default_reject;
138 }
139 
140 static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s)
141 {
142 	if (!folio)
143 		return true;
144 	if (folio == s->last_applied) {
145 		folio_put(folio);
146 		return true;
147 	}
148 	return false;
149 }
150 
151 static unsigned long damon_pa_pageout(struct damon_region *r,
152 		unsigned long addr_unit, struct damos *s,
153 		unsigned long *sz_filter_passed)
154 {
155 	phys_addr_t addr, applied;
156 	LIST_HEAD(folio_list);
157 	bool install_young_filter = true;
158 	struct damos_filter *filter;
159 	struct folio *folio = NULL;
160 
161 	/* check access in page level again by default */
162 	damos_for_each_ops_filter(filter, s) {
163 		if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
164 			install_young_filter = false;
165 			break;
166 		}
167 	}
168 	if (install_young_filter) {
169 		filter = damos_new_filter(
170 				DAMOS_FILTER_TYPE_YOUNG, true, false);
171 		if (!filter)
172 			return 0;
173 		damos_add_filter(s, filter);
174 	}
175 
176 	addr = damon_pa_phys_addr(r->ar.start, addr_unit);
177 	while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
178 		folio = damon_get_folio(PHYS_PFN(addr));
179 		if (damon_pa_invalid_damos_folio(folio, s)) {
180 			addr += PAGE_SIZE;
181 			continue;
182 		}
183 
184 		if (damos_pa_filter_out(s, folio))
185 			goto put_folio;
186 		else
187 			*sz_filter_passed += folio_size(folio) / addr_unit;
188 
189 		folio_clear_referenced(folio);
190 		folio_test_clear_young(folio);
191 		if (!folio_isolate_lru(folio))
192 			goto put_folio;
193 		if (folio_test_unevictable(folio))
194 			folio_putback_lru(folio);
195 		else
196 			list_add(&folio->lru, &folio_list);
197 put_folio:
198 		addr += folio_size(folio);
199 		folio_put(folio);
200 	}
201 	if (install_young_filter)
202 		damos_destroy_filter(filter);
203 	applied = reclaim_pages(&folio_list);
204 	cond_resched();
205 	s->last_applied = folio;
206 	return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
207 }
208 
209 static inline unsigned long damon_pa_de_activate(
210 		struct damon_region *r, unsigned long addr_unit,
211 		struct damos *s, bool activate,
212 		unsigned long *sz_filter_passed)
213 {
214 	phys_addr_t addr, applied = 0;
215 	struct folio *folio = NULL;
216 
217 	addr = damon_pa_phys_addr(r->ar.start, addr_unit);
218 	while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
219 		folio = damon_get_folio(PHYS_PFN(addr));
220 		if (damon_pa_invalid_damos_folio(folio, s)) {
221 			addr += PAGE_SIZE;
222 			continue;
223 		}
224 
225 		if (damos_pa_filter_out(s, folio))
226 			goto put_folio;
227 		else
228 			*sz_filter_passed += folio_size(folio) / addr_unit;
229 
230 		if (activate)
231 			folio_activate(folio);
232 		else
233 			folio_deactivate(folio);
234 		applied += folio_nr_pages(folio);
235 put_folio:
236 		addr += folio_size(folio);
237 		folio_put(folio);
238 	}
239 	s->last_applied = folio;
240 	return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
241 }
242 
243 static unsigned long damon_pa_activate_pages(struct damon_region *r,
244 		unsigned long addr_unit, struct damos *s,
245 		unsigned long *sz_filter_passed)
246 {
247 	return damon_pa_de_activate(r, addr_unit, s, true, sz_filter_passed);
248 }
249 
250 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
251 		unsigned long addr_unit, struct damos *s,
252 		unsigned long *sz_filter_passed)
253 {
254 	return damon_pa_de_activate(r, addr_unit, s, false, sz_filter_passed);
255 }
256 
257 static unsigned long damon_pa_migrate(struct damon_region *r,
258 		unsigned long addr_unit, struct damos *s,
259 		unsigned long *sz_filter_passed)
260 {
261 	phys_addr_t addr, applied;
262 	LIST_HEAD(folio_list);
263 	struct folio *folio = NULL;
264 
265 	addr = damon_pa_phys_addr(r->ar.start, addr_unit);
266 	while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
267 		folio = damon_get_folio(PHYS_PFN(addr));
268 		if (damon_pa_invalid_damos_folio(folio, s)) {
269 			addr += PAGE_SIZE;
270 			continue;
271 		}
272 
273 		if (damos_pa_filter_out(s, folio))
274 			goto put_folio;
275 		else
276 			*sz_filter_passed += folio_size(folio) / addr_unit;
277 
278 		if (!folio_isolate_lru(folio))
279 			goto put_folio;
280 		list_add(&folio->lru, &folio_list);
281 put_folio:
282 		addr += folio_size(folio);
283 		folio_put(folio);
284 	}
285 	applied = damon_migrate_pages(&folio_list, s->target_nid);
286 	cond_resched();
287 	s->last_applied = folio;
288 	return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
289 }
290 
291 static unsigned long damon_pa_stat(struct damon_region *r,
292 		unsigned long addr_unit, struct damos *s,
293 		unsigned long *sz_filter_passed)
294 {
295 	phys_addr_t addr;
296 	struct folio *folio = NULL;
297 
298 	if (!damos_ops_has_filter(s))
299 		return 0;
300 
301 	addr = damon_pa_phys_addr(r->ar.start, addr_unit);
302 	while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
303 		folio = damon_get_folio(PHYS_PFN(addr));
304 		if (damon_pa_invalid_damos_folio(folio, s)) {
305 			addr += PAGE_SIZE;
306 			continue;
307 		}
308 
309 		if (!damos_pa_filter_out(s, folio))
310 			*sz_filter_passed += folio_size(folio) / addr_unit;
311 		addr += folio_size(folio);
312 		folio_put(folio);
313 	}
314 	s->last_applied = folio;
315 	return 0;
316 }
317 
318 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
319 		struct damon_target *t, struct damon_region *r,
320 		struct damos *scheme, unsigned long *sz_filter_passed)
321 {
322 	unsigned long aunit = ctx->addr_unit;
323 
324 	switch (scheme->action) {
325 	case DAMOS_PAGEOUT:
326 		return damon_pa_pageout(r, aunit, scheme, sz_filter_passed);
327 	case DAMOS_LRU_PRIO:
328 		return damon_pa_activate_pages(r, aunit, scheme,
329 				sz_filter_passed);
330 	case DAMOS_LRU_DEPRIO:
331 		return damon_pa_deactivate_pages(r, aunit, scheme,
332 				sz_filter_passed);
333 	case DAMOS_MIGRATE_HOT:
334 	case DAMOS_MIGRATE_COLD:
335 		return damon_pa_migrate(r, aunit, scheme, sz_filter_passed);
336 	case DAMOS_STAT:
337 		return damon_pa_stat(r, aunit, scheme, sz_filter_passed);
338 	default:
339 		/* DAMOS actions that not yet supported by 'paddr'. */
340 		break;
341 	}
342 	return 0;
343 }
344 
345 static int damon_pa_scheme_score(struct damon_ctx *context,
346 		struct damon_target *t, struct damon_region *r,
347 		struct damos *scheme)
348 {
349 	switch (scheme->action) {
350 	case DAMOS_PAGEOUT:
351 		return damon_cold_score(context, r, scheme);
352 	case DAMOS_LRU_PRIO:
353 		return damon_hot_score(context, r, scheme);
354 	case DAMOS_LRU_DEPRIO:
355 		return damon_cold_score(context, r, scheme);
356 	case DAMOS_MIGRATE_HOT:
357 		return damon_hot_score(context, r, scheme);
358 	case DAMOS_MIGRATE_COLD:
359 		return damon_cold_score(context, r, scheme);
360 	default:
361 		break;
362 	}
363 
364 	return DAMOS_MAX_SCORE;
365 }
366 
367 static int __init damon_pa_initcall(void)
368 {
369 	struct damon_operations ops = {
370 		.id = DAMON_OPS_PADDR,
371 		.init = NULL,
372 		.update = NULL,
373 		.prepare_access_checks = damon_pa_prepare_access_checks,
374 		.check_accesses = damon_pa_check_accesses,
375 		.target_valid = NULL,
376 		.apply_scheme = damon_pa_apply_scheme,
377 		.get_scheme_score = damon_pa_scheme_score,
378 	};
379 
380 	return damon_register_ops(&ops);
381 };
382 
383 subsys_initcall(damon_pa_initcall);
384