xref: /linux/mm/damon/paddr.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for The Physical Address Space
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon-pa: " fmt
9 
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 
16 #include "../internal.h"
17 #include "ops-common.h"
18 
19 static bool damon_folio_mkold_one(struct folio *folio,
20 		struct vm_area_struct *vma, unsigned long addr, void *arg)
21 {
22 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
23 
24 	while (page_vma_mapped_walk(&pvmw)) {
25 		addr = pvmw.address;
26 		if (pvmw.pte)
27 			damon_ptep_mkold(pvmw.pte, vma, addr);
28 		else
29 			damon_pmdp_mkold(pvmw.pmd, vma, addr);
30 	}
31 	return true;
32 }
33 
34 static void damon_folio_mkold(struct folio *folio)
35 {
36 	struct rmap_walk_control rwc = {
37 		.rmap_one = damon_folio_mkold_one,
38 		.anon_lock = folio_lock_anon_vma_read,
39 	};
40 	bool need_lock;
41 
42 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
43 		folio_set_idle(folio);
44 		return;
45 	}
46 
47 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
48 	if (need_lock && !folio_trylock(folio))
49 		return;
50 
51 	rmap_walk(folio, &rwc);
52 
53 	if (need_lock)
54 		folio_unlock(folio);
55 
56 }
57 
58 static void damon_pa_mkold(unsigned long paddr)
59 {
60 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
61 
62 	if (!folio)
63 		return;
64 
65 	damon_folio_mkold(folio);
66 	folio_put(folio);
67 }
68 
69 static void __damon_pa_prepare_access_check(struct damon_region *r)
70 {
71 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
72 
73 	damon_pa_mkold(r->sampling_addr);
74 }
75 
76 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
77 {
78 	struct damon_target *t;
79 	struct damon_region *r;
80 
81 	damon_for_each_target(t, ctx) {
82 		damon_for_each_region(r, t)
83 			__damon_pa_prepare_access_check(r);
84 	}
85 }
86 
87 static bool damon_folio_young_one(struct folio *folio,
88 		struct vm_area_struct *vma, unsigned long addr, void *arg)
89 {
90 	bool *accessed = arg;
91 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
92 
93 	*accessed = false;
94 	while (page_vma_mapped_walk(&pvmw)) {
95 		addr = pvmw.address;
96 		if (pvmw.pte) {
97 			*accessed = pte_young(ptep_get(pvmw.pte)) ||
98 				!folio_test_idle(folio) ||
99 				mmu_notifier_test_young(vma->vm_mm, addr);
100 		} else {
101 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
102 			*accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
103 				!folio_test_idle(folio) ||
104 				mmu_notifier_test_young(vma->vm_mm, addr);
105 #else
106 			WARN_ON_ONCE(1);
107 #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
108 		}
109 		if (*accessed) {
110 			page_vma_mapped_walk_done(&pvmw);
111 			break;
112 		}
113 	}
114 
115 	/* If accessed, stop walking */
116 	return *accessed == false;
117 }
118 
119 static bool damon_folio_young(struct folio *folio)
120 {
121 	bool accessed = false;
122 	struct rmap_walk_control rwc = {
123 		.arg = &accessed,
124 		.rmap_one = damon_folio_young_one,
125 		.anon_lock = folio_lock_anon_vma_read,
126 	};
127 	bool need_lock;
128 
129 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
130 		if (folio_test_idle(folio))
131 			return false;
132 		else
133 			return true;
134 	}
135 
136 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
137 	if (need_lock && !folio_trylock(folio))
138 		return false;
139 
140 	rmap_walk(folio, &rwc);
141 
142 	if (need_lock)
143 		folio_unlock(folio);
144 
145 	return accessed;
146 }
147 
148 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
149 {
150 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
151 	bool accessed;
152 
153 	if (!folio)
154 		return false;
155 
156 	accessed = damon_folio_young(folio);
157 	*folio_sz = folio_size(folio);
158 	folio_put(folio);
159 	return accessed;
160 }
161 
162 static void __damon_pa_check_access(struct damon_region *r,
163 		struct damon_attrs *attrs)
164 {
165 	static unsigned long last_addr;
166 	static unsigned long last_folio_sz = PAGE_SIZE;
167 	static bool last_accessed;
168 
169 	/* If the region is in the last checked page, reuse the result */
170 	if (ALIGN_DOWN(last_addr, last_folio_sz) ==
171 				ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
172 		damon_update_region_access_rate(r, last_accessed, attrs);
173 		return;
174 	}
175 
176 	last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
177 	damon_update_region_access_rate(r, last_accessed, attrs);
178 
179 	last_addr = r->sampling_addr;
180 }
181 
182 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
183 {
184 	struct damon_target *t;
185 	struct damon_region *r;
186 	unsigned int max_nr_accesses = 0;
187 
188 	damon_for_each_target(t, ctx) {
189 		damon_for_each_region(r, t) {
190 			__damon_pa_check_access(r, &ctx->attrs);
191 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
192 		}
193 	}
194 
195 	return max_nr_accesses;
196 }
197 
198 static bool __damos_pa_filter_out(struct damos_filter *filter,
199 		struct folio *folio)
200 {
201 	bool matched = false;
202 	struct mem_cgroup *memcg;
203 
204 	switch (filter->type) {
205 	case DAMOS_FILTER_TYPE_ANON:
206 		matched = folio_test_anon(folio);
207 		break;
208 	case DAMOS_FILTER_TYPE_MEMCG:
209 		rcu_read_lock();
210 		memcg = folio_memcg_check(folio);
211 		if (!memcg)
212 			matched = false;
213 		else
214 			matched = filter->memcg_id == mem_cgroup_id(memcg);
215 		rcu_read_unlock();
216 		break;
217 	case DAMOS_FILTER_TYPE_YOUNG:
218 		matched = damon_folio_young(folio);
219 		if (matched)
220 			damon_folio_mkold(folio);
221 		break;
222 	default:
223 		break;
224 	}
225 
226 	return matched == filter->matching;
227 }
228 
229 /*
230  * damos_pa_filter_out - Return true if the page should be filtered out.
231  */
232 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
233 {
234 	struct damos_filter *filter;
235 
236 	damos_for_each_filter(filter, scheme) {
237 		if (__damos_pa_filter_out(filter, folio))
238 			return true;
239 	}
240 	return false;
241 }
242 
243 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
244 {
245 	unsigned long addr, applied;
246 	LIST_HEAD(folio_list);
247 	bool install_young_filter = true;
248 	struct damos_filter *filter;
249 
250 	/* check access in page level again by default */
251 	damos_for_each_filter(filter, s) {
252 		if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
253 			install_young_filter = false;
254 			break;
255 		}
256 	}
257 	if (install_young_filter) {
258 		filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true);
259 		if (!filter)
260 			return 0;
261 		damos_add_filter(s, filter);
262 	}
263 
264 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
265 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
266 
267 		if (!folio)
268 			continue;
269 
270 		if (damos_pa_filter_out(s, folio))
271 			goto put_folio;
272 
273 		folio_clear_referenced(folio);
274 		folio_test_clear_young(folio);
275 		if (!folio_isolate_lru(folio))
276 			goto put_folio;
277 		if (folio_test_unevictable(folio))
278 			folio_putback_lru(folio);
279 		else
280 			list_add(&folio->lru, &folio_list);
281 put_folio:
282 		folio_put(folio);
283 	}
284 	if (install_young_filter)
285 		damos_destroy_filter(filter);
286 	applied = reclaim_pages(&folio_list);
287 	cond_resched();
288 	return applied * PAGE_SIZE;
289 }
290 
291 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
292 		struct damon_region *r, struct damos *s, bool mark_accessed)
293 {
294 	unsigned long addr, applied = 0;
295 
296 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
297 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
298 
299 		if (!folio)
300 			continue;
301 
302 		if (damos_pa_filter_out(s, folio))
303 			goto put_folio;
304 
305 		if (mark_accessed)
306 			folio_mark_accessed(folio);
307 		else
308 			folio_deactivate(folio);
309 		applied += folio_nr_pages(folio);
310 put_folio:
311 		folio_put(folio);
312 	}
313 	return applied * PAGE_SIZE;
314 }
315 
316 static unsigned long damon_pa_mark_accessed(struct damon_region *r,
317 	struct damos *s)
318 {
319 	return damon_pa_mark_accessed_or_deactivate(r, s, true);
320 }
321 
322 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
323 	struct damos *s)
324 {
325 	return damon_pa_mark_accessed_or_deactivate(r, s, false);
326 }
327 
328 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
329 		struct damon_target *t, struct damon_region *r,
330 		struct damos *scheme)
331 {
332 	switch (scheme->action) {
333 	case DAMOS_PAGEOUT:
334 		return damon_pa_pageout(r, scheme);
335 	case DAMOS_LRU_PRIO:
336 		return damon_pa_mark_accessed(r, scheme);
337 	case DAMOS_LRU_DEPRIO:
338 		return damon_pa_deactivate_pages(r, scheme);
339 	case DAMOS_STAT:
340 		break;
341 	default:
342 		/* DAMOS actions that not yet supported by 'paddr'. */
343 		break;
344 	}
345 	return 0;
346 }
347 
348 static int damon_pa_scheme_score(struct damon_ctx *context,
349 		struct damon_target *t, struct damon_region *r,
350 		struct damos *scheme)
351 {
352 	switch (scheme->action) {
353 	case DAMOS_PAGEOUT:
354 		return damon_cold_score(context, r, scheme);
355 	case DAMOS_LRU_PRIO:
356 		return damon_hot_score(context, r, scheme);
357 	case DAMOS_LRU_DEPRIO:
358 		return damon_cold_score(context, r, scheme);
359 	default:
360 		break;
361 	}
362 
363 	return DAMOS_MAX_SCORE;
364 }
365 
366 static int __init damon_pa_initcall(void)
367 {
368 	struct damon_operations ops = {
369 		.id = DAMON_OPS_PADDR,
370 		.init = NULL,
371 		.update = NULL,
372 		.prepare_access_checks = damon_pa_prepare_access_checks,
373 		.check_accesses = damon_pa_check_accesses,
374 		.reset_aggregated = NULL,
375 		.target_valid = NULL,
376 		.cleanup = NULL,
377 		.apply_scheme = damon_pa_apply_scheme,
378 		.get_scheme_score = damon_pa_scheme_score,
379 	};
380 
381 	return damon_register_ops(&ops);
382 };
383 
384 subsys_initcall(damon_pa_initcall);
385