xref: /linux/mm/damon/paddr.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Code for The Physical Address Space
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon-pa: " fmt
9 
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/memory-tiers.h>
16 #include <linux/mm_inline.h>
17 
18 #include "../internal.h"
19 #include "ops-common.h"
20 
damon_pa_mkold(unsigned long paddr)21 static void damon_pa_mkold(unsigned long paddr)
22 {
23 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
24 
25 	if (!folio)
26 		return;
27 
28 	damon_folio_mkold(folio);
29 	folio_put(folio);
30 }
31 
__damon_pa_prepare_access_check(struct damon_region * r)32 static void __damon_pa_prepare_access_check(struct damon_region *r)
33 {
34 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
35 
36 	damon_pa_mkold(r->sampling_addr);
37 }
38 
damon_pa_prepare_access_checks(struct damon_ctx * ctx)39 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
40 {
41 	struct damon_target *t;
42 	struct damon_region *r;
43 
44 	damon_for_each_target(t, ctx) {
45 		damon_for_each_region(r, t)
46 			__damon_pa_prepare_access_check(r);
47 	}
48 }
49 
damon_pa_young(unsigned long paddr,unsigned long * folio_sz)50 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
51 {
52 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
53 	bool accessed;
54 
55 	if (!folio)
56 		return false;
57 
58 	accessed = damon_folio_young(folio);
59 	*folio_sz = folio_size(folio);
60 	folio_put(folio);
61 	return accessed;
62 }
63 
__damon_pa_check_access(struct damon_region * r,struct damon_attrs * attrs)64 static void __damon_pa_check_access(struct damon_region *r,
65 		struct damon_attrs *attrs)
66 {
67 	static unsigned long last_addr;
68 	static unsigned long last_folio_sz = PAGE_SIZE;
69 	static bool last_accessed;
70 
71 	/* If the region is in the last checked page, reuse the result */
72 	if (ALIGN_DOWN(last_addr, last_folio_sz) ==
73 				ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
74 		damon_update_region_access_rate(r, last_accessed, attrs);
75 		return;
76 	}
77 
78 	last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
79 	damon_update_region_access_rate(r, last_accessed, attrs);
80 
81 	last_addr = r->sampling_addr;
82 }
83 
damon_pa_check_accesses(struct damon_ctx * ctx)84 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
85 {
86 	struct damon_target *t;
87 	struct damon_region *r;
88 	unsigned int max_nr_accesses = 0;
89 
90 	damon_for_each_target(t, ctx) {
91 		damon_for_each_region(r, t) {
92 			__damon_pa_check_access(r, &ctx->attrs);
93 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
94 		}
95 	}
96 
97 	return max_nr_accesses;
98 }
99 
100 /*
101  * damos_pa_filter_out - Return true if the page should be filtered out.
102  */
damos_pa_filter_out(struct damos * scheme,struct folio * folio)103 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
104 {
105 	struct damos_filter *filter;
106 
107 	if (scheme->core_filters_allowed)
108 		return false;
109 
110 	damos_for_each_ops_filter(filter, scheme) {
111 		if (damos_folio_filter_match(filter, folio))
112 			return !filter->allow;
113 	}
114 	return scheme->ops_filters_default_reject;
115 }
116 
damon_pa_invalid_damos_folio(struct folio * folio,struct damos * s)117 static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s)
118 {
119 	if (!folio)
120 		return true;
121 	if (folio == s->last_applied) {
122 		folio_put(folio);
123 		return true;
124 	}
125 	return false;
126 }
127 
damon_pa_pageout(struct damon_region * r,struct damos * s,unsigned long * sz_filter_passed)128 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
129 		unsigned long *sz_filter_passed)
130 {
131 	unsigned long addr, applied;
132 	LIST_HEAD(folio_list);
133 	bool install_young_filter = true;
134 	struct damos_filter *filter;
135 	struct folio *folio;
136 
137 	/* check access in page level again by default */
138 	damos_for_each_ops_filter(filter, s) {
139 		if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
140 			install_young_filter = false;
141 			break;
142 		}
143 	}
144 	if (install_young_filter) {
145 		filter = damos_new_filter(
146 				DAMOS_FILTER_TYPE_YOUNG, true, false);
147 		if (!filter)
148 			return 0;
149 		damos_add_filter(s, filter);
150 	}
151 
152 	addr = r->ar.start;
153 	while (addr < r->ar.end) {
154 		folio = damon_get_folio(PHYS_PFN(addr));
155 		if (damon_pa_invalid_damos_folio(folio, s)) {
156 			addr += PAGE_SIZE;
157 			continue;
158 		}
159 
160 		if (damos_pa_filter_out(s, folio))
161 			goto put_folio;
162 		else
163 			*sz_filter_passed += folio_size(folio);
164 
165 		folio_clear_referenced(folio);
166 		folio_test_clear_young(folio);
167 		if (!folio_isolate_lru(folio))
168 			goto put_folio;
169 		if (folio_test_unevictable(folio))
170 			folio_putback_lru(folio);
171 		else
172 			list_add(&folio->lru, &folio_list);
173 put_folio:
174 		addr += folio_size(folio);
175 		folio_put(folio);
176 	}
177 	if (install_young_filter)
178 		damos_destroy_filter(filter);
179 	applied = reclaim_pages(&folio_list);
180 	cond_resched();
181 	s->last_applied = folio;
182 	return applied * PAGE_SIZE;
183 }
184 
damon_pa_mark_accessed_or_deactivate(struct damon_region * r,struct damos * s,bool mark_accessed,unsigned long * sz_filter_passed)185 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
186 		struct damon_region *r, struct damos *s, bool mark_accessed,
187 		unsigned long *sz_filter_passed)
188 {
189 	unsigned long addr, applied = 0;
190 	struct folio *folio;
191 
192 	addr = r->ar.start;
193 	while (addr < r->ar.end) {
194 		folio = damon_get_folio(PHYS_PFN(addr));
195 		if (damon_pa_invalid_damos_folio(folio, s)) {
196 			addr += PAGE_SIZE;
197 			continue;
198 		}
199 
200 		if (damos_pa_filter_out(s, folio))
201 			goto put_folio;
202 		else
203 			*sz_filter_passed += folio_size(folio);
204 
205 		if (mark_accessed)
206 			folio_mark_accessed(folio);
207 		else
208 			folio_deactivate(folio);
209 		applied += folio_nr_pages(folio);
210 put_folio:
211 		addr += folio_size(folio);
212 		folio_put(folio);
213 	}
214 	s->last_applied = folio;
215 	return applied * PAGE_SIZE;
216 }
217 
damon_pa_mark_accessed(struct damon_region * r,struct damos * s,unsigned long * sz_filter_passed)218 static unsigned long damon_pa_mark_accessed(struct damon_region *r,
219 	struct damos *s, unsigned long *sz_filter_passed)
220 {
221 	return damon_pa_mark_accessed_or_deactivate(r, s, true,
222 			sz_filter_passed);
223 }
224 
damon_pa_deactivate_pages(struct damon_region * r,struct damos * s,unsigned long * sz_filter_passed)225 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
226 	struct damos *s, unsigned long *sz_filter_passed)
227 {
228 	return damon_pa_mark_accessed_or_deactivate(r, s, false,
229 			sz_filter_passed);
230 }
231 
damon_pa_migrate(struct damon_region * r,struct damos * s,unsigned long * sz_filter_passed)232 static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
233 		unsigned long *sz_filter_passed)
234 {
235 	unsigned long addr, applied;
236 	LIST_HEAD(folio_list);
237 	struct folio *folio;
238 
239 	addr = r->ar.start;
240 	while (addr < r->ar.end) {
241 		folio = damon_get_folio(PHYS_PFN(addr));
242 		if (damon_pa_invalid_damos_folio(folio, s)) {
243 			addr += PAGE_SIZE;
244 			continue;
245 		}
246 
247 		if (damos_pa_filter_out(s, folio))
248 			goto put_folio;
249 		else
250 			*sz_filter_passed += folio_size(folio);
251 
252 		if (!folio_isolate_lru(folio))
253 			goto put_folio;
254 		list_add(&folio->lru, &folio_list);
255 put_folio:
256 		addr += folio_size(folio);
257 		folio_put(folio);
258 	}
259 	applied = damon_migrate_pages(&folio_list, s->target_nid);
260 	cond_resched();
261 	s->last_applied = folio;
262 	return applied * PAGE_SIZE;
263 }
264 
damon_pa_scheme_has_filter(struct damos * s)265 static bool damon_pa_scheme_has_filter(struct damos *s)
266 {
267 	struct damos_filter *f;
268 
269 	damos_for_each_ops_filter(f, s)
270 		return true;
271 	return false;
272 }
273 
damon_pa_stat(struct damon_region * r,struct damos * s,unsigned long * sz_filter_passed)274 static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
275 		unsigned long *sz_filter_passed)
276 {
277 	unsigned long addr;
278 	struct folio *folio;
279 
280 	if (!damon_pa_scheme_has_filter(s))
281 		return 0;
282 
283 	addr = r->ar.start;
284 	while (addr < r->ar.end) {
285 		folio = damon_get_folio(PHYS_PFN(addr));
286 		if (damon_pa_invalid_damos_folio(folio, s)) {
287 			addr += PAGE_SIZE;
288 			continue;
289 		}
290 
291 		if (!damos_pa_filter_out(s, folio))
292 			*sz_filter_passed += folio_size(folio);
293 		addr += folio_size(folio);
294 		folio_put(folio);
295 	}
296 	s->last_applied = folio;
297 	return 0;
298 }
299 
damon_pa_apply_scheme(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * scheme,unsigned long * sz_filter_passed)300 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
301 		struct damon_target *t, struct damon_region *r,
302 		struct damos *scheme, unsigned long *sz_filter_passed)
303 {
304 	switch (scheme->action) {
305 	case DAMOS_PAGEOUT:
306 		return damon_pa_pageout(r, scheme, sz_filter_passed);
307 	case DAMOS_LRU_PRIO:
308 		return damon_pa_mark_accessed(r, scheme, sz_filter_passed);
309 	case DAMOS_LRU_DEPRIO:
310 		return damon_pa_deactivate_pages(r, scheme, sz_filter_passed);
311 	case DAMOS_MIGRATE_HOT:
312 	case DAMOS_MIGRATE_COLD:
313 		return damon_pa_migrate(r, scheme, sz_filter_passed);
314 	case DAMOS_STAT:
315 		return damon_pa_stat(r, scheme, sz_filter_passed);
316 	default:
317 		/* DAMOS actions that not yet supported by 'paddr'. */
318 		break;
319 	}
320 	return 0;
321 }
322 
damon_pa_scheme_score(struct damon_ctx * context,struct damon_target * t,struct damon_region * r,struct damos * scheme)323 static int damon_pa_scheme_score(struct damon_ctx *context,
324 		struct damon_target *t, struct damon_region *r,
325 		struct damos *scheme)
326 {
327 	switch (scheme->action) {
328 	case DAMOS_PAGEOUT:
329 		return damon_cold_score(context, r, scheme);
330 	case DAMOS_LRU_PRIO:
331 		return damon_hot_score(context, r, scheme);
332 	case DAMOS_LRU_DEPRIO:
333 		return damon_cold_score(context, r, scheme);
334 	case DAMOS_MIGRATE_HOT:
335 		return damon_hot_score(context, r, scheme);
336 	case DAMOS_MIGRATE_COLD:
337 		return damon_cold_score(context, r, scheme);
338 	default:
339 		break;
340 	}
341 
342 	return DAMOS_MAX_SCORE;
343 }
344 
damon_pa_initcall(void)345 static int __init damon_pa_initcall(void)
346 {
347 	struct damon_operations ops = {
348 		.id = DAMON_OPS_PADDR,
349 		.init = NULL,
350 		.update = NULL,
351 		.prepare_access_checks = damon_pa_prepare_access_checks,
352 		.check_accesses = damon_pa_check_accesses,
353 		.target_valid = NULL,
354 		.cleanup = NULL,
355 		.apply_scheme = damon_pa_apply_scheme,
356 		.get_scheme_score = damon_pa_scheme_score,
357 	};
358 
359 	return damon_register_ops(&ops);
360 };
361 
362 subsys_initcall(damon_pa_initcall);
363