xref: /linux/mm/damon/vaddr.c (revision dec1c62e91ba268ab2a6e339d4d7a59287d5eba1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for Virtual Address Spaces
4  *
5  * Author: SeongJae Park <sjpark@amazon.de>
6  */
7 
8 #define pr_fmt(fmt) "damon-va: " fmt
9 
10 #include <asm-generic/mman-common.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_idle.h>
15 #include <linux/pagewalk.h>
16 #include <linux/sched/mm.h>
17 
18 #include "ops-common.h"
19 
20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
23 #endif
24 
25 /*
26  * 't->pid' should be the pointer to the relevant 'struct pid' having reference
27  * count.  Caller must put the returned task, unless it is NULL.
28  */
29 static inline struct task_struct *damon_get_task_struct(struct damon_target *t)
30 {
31 	return get_pid_task(t->pid, PIDTYPE_PID);
32 }
33 
34 /*
35  * Get the mm_struct of the given target
36  *
37  * Caller _must_ put the mm_struct after use, unless it is NULL.
38  *
39  * Returns the mm_struct of the target on success, NULL on failure
40  */
41 static struct mm_struct *damon_get_mm(struct damon_target *t)
42 {
43 	struct task_struct *task;
44 	struct mm_struct *mm;
45 
46 	task = damon_get_task_struct(t);
47 	if (!task)
48 		return NULL;
49 
50 	mm = get_task_mm(task);
51 	put_task_struct(task);
52 	return mm;
53 }
54 
55 /*
56  * Functions for the initial monitoring target regions construction
57  */
58 
59 /*
60  * Size-evenly split a region into 'nr_pieces' small regions
61  *
62  * Returns 0 on success, or negative error code otherwise.
63  */
64 static int damon_va_evenly_split_region(struct damon_target *t,
65 		struct damon_region *r, unsigned int nr_pieces)
66 {
67 	unsigned long sz_orig, sz_piece, orig_end;
68 	struct damon_region *n = NULL, *next;
69 	unsigned long start;
70 
71 	if (!r || !nr_pieces)
72 		return -EINVAL;
73 
74 	orig_end = r->ar.end;
75 	sz_orig = r->ar.end - r->ar.start;
76 	sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
77 
78 	if (!sz_piece)
79 		return -EINVAL;
80 
81 	r->ar.end = r->ar.start + sz_piece;
82 	next = damon_next_region(r);
83 	for (start = r->ar.end; start + sz_piece <= orig_end;
84 			start += sz_piece) {
85 		n = damon_new_region(start, start + sz_piece);
86 		if (!n)
87 			return -ENOMEM;
88 		damon_insert_region(n, r, next, t);
89 		r = n;
90 	}
91 	/* complement last region for possible rounding error */
92 	if (n)
93 		n->ar.end = orig_end;
94 
95 	return 0;
96 }
97 
98 static unsigned long sz_range(struct damon_addr_range *r)
99 {
100 	return r->end - r->start;
101 }
102 
103 /*
104  * Find three regions separated by two biggest unmapped regions
105  *
106  * vma		the head vma of the target address space
107  * regions	an array of three address ranges that results will be saved
108  *
109  * This function receives an address space and finds three regions in it which
110  * separated by the two biggest unmapped regions in the space.  Please refer to
111  * below comments of '__damon_va_init_regions()' function to know why this is
112  * necessary.
113  *
114  * Returns 0 if success, or negative error code otherwise.
115  */
116 static int __damon_va_three_regions(struct vm_area_struct *vma,
117 				       struct damon_addr_range regions[3])
118 {
119 	struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
120 	struct vm_area_struct *last_vma = NULL;
121 	unsigned long start = 0;
122 	struct rb_root rbroot;
123 
124 	/* Find two biggest gaps so that first_gap > second_gap > others */
125 	for (; vma; vma = vma->vm_next) {
126 		if (!last_vma) {
127 			start = vma->vm_start;
128 			goto next;
129 		}
130 
131 		if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
132 			rbroot.rb_node = &vma->vm_rb;
133 			vma = rb_entry(rb_last(&rbroot),
134 					struct vm_area_struct, vm_rb);
135 			goto next;
136 		}
137 
138 		gap.start = last_vma->vm_end;
139 		gap.end = vma->vm_start;
140 		if (sz_range(&gap) > sz_range(&second_gap)) {
141 			swap(gap, second_gap);
142 			if (sz_range(&second_gap) > sz_range(&first_gap))
143 				swap(second_gap, first_gap);
144 		}
145 next:
146 		last_vma = vma;
147 	}
148 
149 	if (!sz_range(&second_gap) || !sz_range(&first_gap))
150 		return -EINVAL;
151 
152 	/* Sort the two biggest gaps by address */
153 	if (first_gap.start > second_gap.start)
154 		swap(first_gap, second_gap);
155 
156 	/* Store the result */
157 	regions[0].start = ALIGN(start, DAMON_MIN_REGION);
158 	regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
159 	regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
160 	regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
161 	regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
162 	regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
163 
164 	return 0;
165 }
166 
167 /*
168  * Get the three regions in the given target (task)
169  *
170  * Returns 0 on success, negative error code otherwise.
171  */
172 static int damon_va_three_regions(struct damon_target *t,
173 				struct damon_addr_range regions[3])
174 {
175 	struct mm_struct *mm;
176 	int rc;
177 
178 	mm = damon_get_mm(t);
179 	if (!mm)
180 		return -EINVAL;
181 
182 	mmap_read_lock(mm);
183 	rc = __damon_va_three_regions(mm->mmap, regions);
184 	mmap_read_unlock(mm);
185 
186 	mmput(mm);
187 	return rc;
188 }
189 
190 /*
191  * Initialize the monitoring target regions for the given target (task)
192  *
193  * t	the given target
194  *
195  * Because only a number of small portions of the entire address space
196  * is actually mapped to the memory and accessed, monitoring the unmapped
197  * regions is wasteful.  That said, because we can deal with small noises,
198  * tracking every mapping is not strictly required but could even incur a high
199  * overhead if the mapping frequently changes or the number of mappings is
200  * high.  The adaptive regions adjustment mechanism will further help to deal
201  * with the noise by simply identifying the unmapped areas as a region that
202  * has no access.  Moreover, applying the real mappings that would have many
203  * unmapped areas inside will make the adaptive mechanism quite complex.  That
204  * said, too huge unmapped areas inside the monitoring target should be removed
205  * to not take the time for the adaptive mechanism.
206  *
207  * For the reason, we convert the complex mappings to three distinct regions
208  * that cover every mapped area of the address space.  Also the two gaps
209  * between the three regions are the two biggest unmapped areas in the given
210  * address space.  In detail, this function first identifies the start and the
211  * end of the mappings and the two biggest unmapped areas of the address space.
212  * Then, it constructs the three regions as below:
213  *
214  *     [mappings[0]->start, big_two_unmapped_areas[0]->start)
215  *     [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
216  *     [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
217  *
218  * As usual memory map of processes is as below, the gap between the heap and
219  * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
220  * region and the stack will be two biggest unmapped regions.  Because these
221  * gaps are exceptionally huge areas in usual address space, excluding these
222  * two biggest unmapped regions will be sufficient to make a trade-off.
223  *
224  *   <heap>
225  *   <BIG UNMAPPED REGION 1>
226  *   <uppermost mmap()-ed region>
227  *   (other mmap()-ed regions and small unmapped regions)
228  *   <lowermost mmap()-ed region>
229  *   <BIG UNMAPPED REGION 2>
230  *   <stack>
231  */
232 static void __damon_va_init_regions(struct damon_ctx *ctx,
233 				     struct damon_target *t)
234 {
235 	struct damon_target *ti;
236 	struct damon_region *r;
237 	struct damon_addr_range regions[3];
238 	unsigned long sz = 0, nr_pieces;
239 	int i, tidx = 0;
240 
241 	if (damon_va_three_regions(t, regions)) {
242 		damon_for_each_target(ti, ctx) {
243 			if (ti == t)
244 				break;
245 			tidx++;
246 		}
247 		pr_debug("Failed to get three regions of %dth target\n", tidx);
248 		return;
249 	}
250 
251 	for (i = 0; i < 3; i++)
252 		sz += regions[i].end - regions[i].start;
253 	if (ctx->min_nr_regions)
254 		sz /= ctx->min_nr_regions;
255 	if (sz < DAMON_MIN_REGION)
256 		sz = DAMON_MIN_REGION;
257 
258 	/* Set the initial three regions of the target */
259 	for (i = 0; i < 3; i++) {
260 		r = damon_new_region(regions[i].start, regions[i].end);
261 		if (!r) {
262 			pr_err("%d'th init region creation failed\n", i);
263 			return;
264 		}
265 		damon_add_region(r, t);
266 
267 		nr_pieces = (regions[i].end - regions[i].start) / sz;
268 		damon_va_evenly_split_region(t, r, nr_pieces);
269 	}
270 }
271 
272 /* Initialize '->regions_list' of every target (task) */
273 static void damon_va_init(struct damon_ctx *ctx)
274 {
275 	struct damon_target *t;
276 
277 	damon_for_each_target(t, ctx) {
278 		/* the user may set the target regions as they want */
279 		if (!damon_nr_regions(t))
280 			__damon_va_init_regions(ctx, t);
281 	}
282 }
283 
284 /*
285  * Update regions for current memory mappings
286  */
287 static void damon_va_update(struct damon_ctx *ctx)
288 {
289 	struct damon_addr_range three_regions[3];
290 	struct damon_target *t;
291 
292 	damon_for_each_target(t, ctx) {
293 		if (damon_va_three_regions(t, three_regions))
294 			continue;
295 		damon_set_regions(t, three_regions, 3);
296 	}
297 }
298 
299 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
300 		unsigned long next, struct mm_walk *walk)
301 {
302 	pte_t *pte;
303 	spinlock_t *ptl;
304 
305 	if (pmd_huge(*pmd)) {
306 		ptl = pmd_lock(walk->mm, pmd);
307 		if (pmd_huge(*pmd)) {
308 			damon_pmdp_mkold(pmd, walk->mm, addr);
309 			spin_unlock(ptl);
310 			return 0;
311 		}
312 		spin_unlock(ptl);
313 	}
314 
315 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
316 		return 0;
317 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
318 	if (!pte_present(*pte))
319 		goto out;
320 	damon_ptep_mkold(pte, walk->mm, addr);
321 out:
322 	pte_unmap_unlock(pte, ptl);
323 	return 0;
324 }
325 
326 #ifdef CONFIG_HUGETLB_PAGE
327 static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
328 				struct vm_area_struct *vma, unsigned long addr)
329 {
330 	bool referenced = false;
331 	pte_t entry = huge_ptep_get(pte);
332 	struct page *page = pte_page(entry);
333 
334 	get_page(page);
335 
336 	if (pte_young(entry)) {
337 		referenced = true;
338 		entry = pte_mkold(entry);
339 		huge_ptep_set_access_flags(vma, addr, pte, entry,
340 					   vma->vm_flags & VM_WRITE);
341 	}
342 
343 #ifdef CONFIG_MMU_NOTIFIER
344 	if (mmu_notifier_clear_young(mm, addr,
345 				     addr + huge_page_size(hstate_vma(vma))))
346 		referenced = true;
347 #endif /* CONFIG_MMU_NOTIFIER */
348 
349 	if (referenced)
350 		set_page_young(page);
351 
352 	set_page_idle(page);
353 	put_page(page);
354 }
355 
356 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
357 				     unsigned long addr, unsigned long end,
358 				     struct mm_walk *walk)
359 {
360 	struct hstate *h = hstate_vma(walk->vma);
361 	spinlock_t *ptl;
362 	pte_t entry;
363 
364 	ptl = huge_pte_lock(h, walk->mm, pte);
365 	entry = huge_ptep_get(pte);
366 	if (!pte_present(entry))
367 		goto out;
368 
369 	damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr);
370 
371 out:
372 	spin_unlock(ptl);
373 	return 0;
374 }
375 #else
376 #define damon_mkold_hugetlb_entry NULL
377 #endif /* CONFIG_HUGETLB_PAGE */
378 
379 static const struct mm_walk_ops damon_mkold_ops = {
380 	.pmd_entry = damon_mkold_pmd_entry,
381 	.hugetlb_entry = damon_mkold_hugetlb_entry,
382 };
383 
384 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
385 {
386 	mmap_read_lock(mm);
387 	walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
388 	mmap_read_unlock(mm);
389 }
390 
391 /*
392  * Functions for the access checking of the regions
393  */
394 
395 static void __damon_va_prepare_access_check(struct damon_ctx *ctx,
396 			struct mm_struct *mm, struct damon_region *r)
397 {
398 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
399 
400 	damon_va_mkold(mm, r->sampling_addr);
401 }
402 
403 static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
404 {
405 	struct damon_target *t;
406 	struct mm_struct *mm;
407 	struct damon_region *r;
408 
409 	damon_for_each_target(t, ctx) {
410 		mm = damon_get_mm(t);
411 		if (!mm)
412 			continue;
413 		damon_for_each_region(r, t)
414 			__damon_va_prepare_access_check(ctx, mm, r);
415 		mmput(mm);
416 	}
417 }
418 
419 struct damon_young_walk_private {
420 	unsigned long *page_sz;
421 	bool young;
422 };
423 
424 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
425 		unsigned long next, struct mm_walk *walk)
426 {
427 	pte_t *pte;
428 	spinlock_t *ptl;
429 	struct page *page;
430 	struct damon_young_walk_private *priv = walk->private;
431 
432 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
433 	if (pmd_huge(*pmd)) {
434 		ptl = pmd_lock(walk->mm, pmd);
435 		if (!pmd_huge(*pmd)) {
436 			spin_unlock(ptl);
437 			goto regular_page;
438 		}
439 		page = damon_get_page(pmd_pfn(*pmd));
440 		if (!page)
441 			goto huge_out;
442 		if (pmd_young(*pmd) || !page_is_idle(page) ||
443 					mmu_notifier_test_young(walk->mm,
444 						addr)) {
445 			*priv->page_sz = HPAGE_PMD_SIZE;
446 			priv->young = true;
447 		}
448 		put_page(page);
449 huge_out:
450 		spin_unlock(ptl);
451 		return 0;
452 	}
453 
454 regular_page:
455 #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
456 
457 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
458 		return -EINVAL;
459 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
460 	if (!pte_present(*pte))
461 		goto out;
462 	page = damon_get_page(pte_pfn(*pte));
463 	if (!page)
464 		goto out;
465 	if (pte_young(*pte) || !page_is_idle(page) ||
466 			mmu_notifier_test_young(walk->mm, addr)) {
467 		*priv->page_sz = PAGE_SIZE;
468 		priv->young = true;
469 	}
470 	put_page(page);
471 out:
472 	pte_unmap_unlock(pte, ptl);
473 	return 0;
474 }
475 
476 #ifdef CONFIG_HUGETLB_PAGE
477 static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
478 				     unsigned long addr, unsigned long end,
479 				     struct mm_walk *walk)
480 {
481 	struct damon_young_walk_private *priv = walk->private;
482 	struct hstate *h = hstate_vma(walk->vma);
483 	struct page *page;
484 	spinlock_t *ptl;
485 	pte_t entry;
486 
487 	ptl = huge_pte_lock(h, walk->mm, pte);
488 	entry = huge_ptep_get(pte);
489 	if (!pte_present(entry))
490 		goto out;
491 
492 	page = pte_page(entry);
493 	get_page(page);
494 
495 	if (pte_young(entry) || !page_is_idle(page) ||
496 	    mmu_notifier_test_young(walk->mm, addr)) {
497 		*priv->page_sz = huge_page_size(h);
498 		priv->young = true;
499 	}
500 
501 	put_page(page);
502 
503 out:
504 	spin_unlock(ptl);
505 	return 0;
506 }
507 #else
508 #define damon_young_hugetlb_entry NULL
509 #endif /* CONFIG_HUGETLB_PAGE */
510 
511 static const struct mm_walk_ops damon_young_ops = {
512 	.pmd_entry = damon_young_pmd_entry,
513 	.hugetlb_entry = damon_young_hugetlb_entry,
514 };
515 
516 static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
517 		unsigned long *page_sz)
518 {
519 	struct damon_young_walk_private arg = {
520 		.page_sz = page_sz,
521 		.young = false,
522 	};
523 
524 	mmap_read_lock(mm);
525 	walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
526 	mmap_read_unlock(mm);
527 	return arg.young;
528 }
529 
530 /*
531  * Check whether the region was accessed after the last preparation
532  *
533  * mm	'mm_struct' for the given virtual address space
534  * r	the region to be checked
535  */
536 static void __damon_va_check_access(struct damon_ctx *ctx,
537 			       struct mm_struct *mm, struct damon_region *r)
538 {
539 	static struct mm_struct *last_mm;
540 	static unsigned long last_addr;
541 	static unsigned long last_page_sz = PAGE_SIZE;
542 	static bool last_accessed;
543 
544 	/* If the region is in the last checked page, reuse the result */
545 	if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
546 				ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
547 		if (last_accessed)
548 			r->nr_accesses++;
549 		return;
550 	}
551 
552 	last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
553 	if (last_accessed)
554 		r->nr_accesses++;
555 
556 	last_mm = mm;
557 	last_addr = r->sampling_addr;
558 }
559 
560 static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
561 {
562 	struct damon_target *t;
563 	struct mm_struct *mm;
564 	struct damon_region *r;
565 	unsigned int max_nr_accesses = 0;
566 
567 	damon_for_each_target(t, ctx) {
568 		mm = damon_get_mm(t);
569 		if (!mm)
570 			continue;
571 		damon_for_each_region(r, t) {
572 			__damon_va_check_access(ctx, mm, r);
573 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
574 		}
575 		mmput(mm);
576 	}
577 
578 	return max_nr_accesses;
579 }
580 
581 /*
582  * Functions for the target validity check and cleanup
583  */
584 
585 static bool damon_va_target_valid(void *target)
586 {
587 	struct damon_target *t = target;
588 	struct task_struct *task;
589 
590 	task = damon_get_task_struct(t);
591 	if (task) {
592 		put_task_struct(task);
593 		return true;
594 	}
595 
596 	return false;
597 }
598 
599 #ifndef CONFIG_ADVISE_SYSCALLS
600 static unsigned long damos_madvise(struct damon_target *target,
601 		struct damon_region *r, int behavior)
602 {
603 	return 0;
604 }
605 #else
606 static unsigned long damos_madvise(struct damon_target *target,
607 		struct damon_region *r, int behavior)
608 {
609 	struct mm_struct *mm;
610 	unsigned long start = PAGE_ALIGN(r->ar.start);
611 	unsigned long len = PAGE_ALIGN(r->ar.end - r->ar.start);
612 	unsigned long applied;
613 
614 	mm = damon_get_mm(target);
615 	if (!mm)
616 		return 0;
617 
618 	applied = do_madvise(mm, start, len, behavior) ? 0 : len;
619 	mmput(mm);
620 
621 	return applied;
622 }
623 #endif	/* CONFIG_ADVISE_SYSCALLS */
624 
625 static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
626 		struct damon_target *t, struct damon_region *r,
627 		struct damos *scheme)
628 {
629 	int madv_action;
630 
631 	switch (scheme->action) {
632 	case DAMOS_WILLNEED:
633 		madv_action = MADV_WILLNEED;
634 		break;
635 	case DAMOS_COLD:
636 		madv_action = MADV_COLD;
637 		break;
638 	case DAMOS_PAGEOUT:
639 		madv_action = MADV_PAGEOUT;
640 		break;
641 	case DAMOS_HUGEPAGE:
642 		madv_action = MADV_HUGEPAGE;
643 		break;
644 	case DAMOS_NOHUGEPAGE:
645 		madv_action = MADV_NOHUGEPAGE;
646 		break;
647 	case DAMOS_STAT:
648 		return 0;
649 	default:
650 		return 0;
651 	}
652 
653 	return damos_madvise(t, r, madv_action);
654 }
655 
656 static int damon_va_scheme_score(struct damon_ctx *context,
657 		struct damon_target *t, struct damon_region *r,
658 		struct damos *scheme)
659 {
660 
661 	switch (scheme->action) {
662 	case DAMOS_PAGEOUT:
663 		return damon_pageout_score(context, r, scheme);
664 	default:
665 		break;
666 	}
667 
668 	return DAMOS_MAX_SCORE;
669 }
670 
671 static int __init damon_va_initcall(void)
672 {
673 	struct damon_operations ops = {
674 		.id = DAMON_OPS_VADDR,
675 		.init = damon_va_init,
676 		.update = damon_va_update,
677 		.prepare_access_checks = damon_va_prepare_access_checks,
678 		.check_accesses = damon_va_check_accesses,
679 		.reset_aggregated = NULL,
680 		.target_valid = damon_va_target_valid,
681 		.cleanup = NULL,
682 		.apply_scheme = damon_va_apply_scheme,
683 		.get_scheme_score = damon_va_scheme_score,
684 	};
685 	/* ops for fixed virtual address ranges */
686 	struct damon_operations ops_fvaddr = ops;
687 	int err;
688 
689 	/* Don't set the monitoring target regions for the entire mapping */
690 	ops_fvaddr.id = DAMON_OPS_FVADDR;
691 	ops_fvaddr.init = NULL;
692 	ops_fvaddr.update = NULL;
693 
694 	err = damon_register_ops(&ops);
695 	if (err)
696 		return err;
697 	return damon_register_ops(&ops_fvaddr);
698 };
699 
700 subsys_initcall(damon_va_initcall);
701 
702 #include "vaddr-test.h"
703