xref: /linux/mm/damon/paddr.c (revision 5f42375904b08890f2e8e7cd955c5bf0c2c0d05a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for The Physical Address Space
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon-pa: " fmt
9 
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 
16 #include "../internal.h"
17 #include "ops-common.h"
18 
19 static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
20 		unsigned long addr, void *arg)
21 {
22 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
23 
24 	while (page_vma_mapped_walk(&pvmw)) {
25 		addr = pvmw.address;
26 		if (pvmw.pte)
27 			damon_ptep_mkold(pvmw.pte, vma, addr);
28 		else
29 			damon_pmdp_mkold(pvmw.pmd, vma, addr);
30 	}
31 	return true;
32 }
33 
34 static void damon_pa_mkold(unsigned long paddr)
35 {
36 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
37 	struct rmap_walk_control rwc = {
38 		.rmap_one = __damon_pa_mkold,
39 		.anon_lock = folio_lock_anon_vma_read,
40 	};
41 	bool need_lock;
42 
43 	if (!folio)
44 		return;
45 
46 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
47 		folio_set_idle(folio);
48 		goto out;
49 	}
50 
51 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
52 	if (need_lock && !folio_trylock(folio))
53 		goto out;
54 
55 	rmap_walk(folio, &rwc);
56 
57 	if (need_lock)
58 		folio_unlock(folio);
59 
60 out:
61 	folio_put(folio);
62 }
63 
64 static void __damon_pa_prepare_access_check(struct damon_region *r)
65 {
66 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
67 
68 	damon_pa_mkold(r->sampling_addr);
69 }
70 
71 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
72 {
73 	struct damon_target *t;
74 	struct damon_region *r;
75 
76 	damon_for_each_target(t, ctx) {
77 		damon_for_each_region(r, t)
78 			__damon_pa_prepare_access_check(r);
79 	}
80 }
81 
82 static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
83 		unsigned long addr, void *arg)
84 {
85 	bool *accessed = arg;
86 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
87 
88 	*accessed = false;
89 	while (page_vma_mapped_walk(&pvmw)) {
90 		addr = pvmw.address;
91 		if (pvmw.pte) {
92 			*accessed = pte_young(ptep_get(pvmw.pte)) ||
93 				!folio_test_idle(folio) ||
94 				mmu_notifier_test_young(vma->vm_mm, addr);
95 		} else {
96 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
97 			*accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
98 				!folio_test_idle(folio) ||
99 				mmu_notifier_test_young(vma->vm_mm, addr);
100 #else
101 			WARN_ON_ONCE(1);
102 #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
103 		}
104 		if (*accessed) {
105 			page_vma_mapped_walk_done(&pvmw);
106 			break;
107 		}
108 	}
109 
110 	/* If accessed, stop walking */
111 	return *accessed == false;
112 }
113 
114 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
115 {
116 	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
117 	bool accessed = false;
118 	struct rmap_walk_control rwc = {
119 		.arg = &accessed,
120 		.rmap_one = __damon_pa_young,
121 		.anon_lock = folio_lock_anon_vma_read,
122 	};
123 	bool need_lock;
124 
125 	if (!folio)
126 		return false;
127 
128 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
129 		if (folio_test_idle(folio))
130 			accessed = false;
131 		else
132 			accessed = true;
133 		goto out;
134 	}
135 
136 	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
137 	if (need_lock && !folio_trylock(folio))
138 		goto out;
139 
140 	rmap_walk(folio, &rwc);
141 
142 	if (need_lock)
143 		folio_unlock(folio);
144 
145 out:
146 	*folio_sz = folio_size(folio);
147 	folio_put(folio);
148 	return accessed;
149 }
150 
151 static void __damon_pa_check_access(struct damon_region *r,
152 		struct damon_attrs *attrs)
153 {
154 	static unsigned long last_addr;
155 	static unsigned long last_folio_sz = PAGE_SIZE;
156 	static bool last_accessed;
157 
158 	/* If the region is in the last checked page, reuse the result */
159 	if (ALIGN_DOWN(last_addr, last_folio_sz) ==
160 				ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
161 		damon_update_region_access_rate(r, last_accessed, attrs);
162 		return;
163 	}
164 
165 	last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
166 	damon_update_region_access_rate(r, last_accessed, attrs);
167 
168 	last_addr = r->sampling_addr;
169 }
170 
171 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
172 {
173 	struct damon_target *t;
174 	struct damon_region *r;
175 	unsigned int max_nr_accesses = 0;
176 
177 	damon_for_each_target(t, ctx) {
178 		damon_for_each_region(r, t) {
179 			__damon_pa_check_access(r, &ctx->attrs);
180 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
181 		}
182 	}
183 
184 	return max_nr_accesses;
185 }
186 
187 static bool __damos_pa_filter_out(struct damos_filter *filter,
188 		struct folio *folio)
189 {
190 	bool matched = false;
191 	struct mem_cgroup *memcg;
192 
193 	switch (filter->type) {
194 	case DAMOS_FILTER_TYPE_ANON:
195 		matched = folio_test_anon(folio);
196 		break;
197 	case DAMOS_FILTER_TYPE_MEMCG:
198 		rcu_read_lock();
199 		memcg = folio_memcg_check(folio);
200 		if (!memcg)
201 			matched = false;
202 		else
203 			matched = filter->memcg_id == mem_cgroup_id(memcg);
204 		rcu_read_unlock();
205 		break;
206 	default:
207 		break;
208 	}
209 
210 	return matched == filter->matching;
211 }
212 
213 /*
214  * damos_pa_filter_out - Return true if the page should be filtered out.
215  */
216 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
217 {
218 	struct damos_filter *filter;
219 
220 	damos_for_each_filter(filter, scheme) {
221 		if (__damos_pa_filter_out(filter, folio))
222 			return true;
223 	}
224 	return false;
225 }
226 
227 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
228 {
229 	unsigned long addr, applied;
230 	LIST_HEAD(folio_list);
231 
232 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
233 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
234 
235 		if (!folio)
236 			continue;
237 
238 		if (damos_pa_filter_out(s, folio))
239 			goto put_folio;
240 
241 		folio_clear_referenced(folio);
242 		folio_test_clear_young(folio);
243 		if (!folio_isolate_lru(folio))
244 			goto put_folio;
245 		if (folio_test_unevictable(folio))
246 			folio_putback_lru(folio);
247 		else
248 			list_add(&folio->lru, &folio_list);
249 put_folio:
250 		folio_put(folio);
251 	}
252 	applied = reclaim_pages(&folio_list);
253 	cond_resched();
254 	return applied * PAGE_SIZE;
255 }
256 
257 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
258 		struct damon_region *r, struct damos *s, bool mark_accessed)
259 {
260 	unsigned long addr, applied = 0;
261 
262 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
263 		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
264 
265 		if (!folio)
266 			continue;
267 
268 		if (damos_pa_filter_out(s, folio))
269 			goto put_folio;
270 
271 		if (mark_accessed)
272 			folio_mark_accessed(folio);
273 		else
274 			folio_deactivate(folio);
275 		applied += folio_nr_pages(folio);
276 put_folio:
277 		folio_put(folio);
278 	}
279 	return applied * PAGE_SIZE;
280 }
281 
282 static unsigned long damon_pa_mark_accessed(struct damon_region *r,
283 	struct damos *s)
284 {
285 	return damon_pa_mark_accessed_or_deactivate(r, s, true);
286 }
287 
288 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
289 	struct damos *s)
290 {
291 	return damon_pa_mark_accessed_or_deactivate(r, s, false);
292 }
293 
294 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
295 		struct damon_target *t, struct damon_region *r,
296 		struct damos *scheme)
297 {
298 	switch (scheme->action) {
299 	case DAMOS_PAGEOUT:
300 		return damon_pa_pageout(r, scheme);
301 	case DAMOS_LRU_PRIO:
302 		return damon_pa_mark_accessed(r, scheme);
303 	case DAMOS_LRU_DEPRIO:
304 		return damon_pa_deactivate_pages(r, scheme);
305 	case DAMOS_STAT:
306 		break;
307 	default:
308 		/* DAMOS actions that not yet supported by 'paddr'. */
309 		break;
310 	}
311 	return 0;
312 }
313 
314 static int damon_pa_scheme_score(struct damon_ctx *context,
315 		struct damon_target *t, struct damon_region *r,
316 		struct damos *scheme)
317 {
318 	switch (scheme->action) {
319 	case DAMOS_PAGEOUT:
320 		return damon_cold_score(context, r, scheme);
321 	case DAMOS_LRU_PRIO:
322 		return damon_hot_score(context, r, scheme);
323 	case DAMOS_LRU_DEPRIO:
324 		return damon_cold_score(context, r, scheme);
325 	default:
326 		break;
327 	}
328 
329 	return DAMOS_MAX_SCORE;
330 }
331 
332 static int __init damon_pa_initcall(void)
333 {
334 	struct damon_operations ops = {
335 		.id = DAMON_OPS_PADDR,
336 		.init = NULL,
337 		.update = NULL,
338 		.prepare_access_checks = damon_pa_prepare_access_checks,
339 		.check_accesses = damon_pa_check_accesses,
340 		.reset_aggregated = NULL,
341 		.target_valid = NULL,
342 		.cleanup = NULL,
343 		.apply_scheme = damon_pa_apply_scheme,
344 		.get_scheme_score = damon_pa_scheme_score,
345 	};
346 
347 	return damon_register_ops(&ops);
348 };
349 
350 subsys_initcall(damon_pa_initcall);
351