xref: /linux/mm/damon/vaddr.c (revision 3f0a50f345f78183f6e9b39c2f45ca5dcaa511ca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Primitives for Virtual Address Spaces
4  *
5  * Author: SeongJae Park <sjpark@amazon.de>
6  */
7 
8 #define pr_fmt(fmt) "damon-va: " fmt
9 
10 #include <asm-generic/mman-common.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_idle.h>
15 #include <linux/pagewalk.h>
16 #include <linux/sched/mm.h>
17 
18 #include "ops-common.h"
19 
20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
23 #endif
24 
25 /*
26  * 't->pid' should be the pointer to the relevant 'struct pid' having reference
27  * count.  Caller must put the returned task, unless it is NULL.
28  */
29 static inline struct task_struct *damon_get_task_struct(struct damon_target *t)
30 {
31 	return get_pid_task(t->pid, PIDTYPE_PID);
32 }
33 
34 /*
35  * Get the mm_struct of the given target
36  *
37  * Caller _must_ put the mm_struct after use, unless it is NULL.
38  *
39  * Returns the mm_struct of the target on success, NULL on failure
40  */
41 static struct mm_struct *damon_get_mm(struct damon_target *t)
42 {
43 	struct task_struct *task;
44 	struct mm_struct *mm;
45 
46 	task = damon_get_task_struct(t);
47 	if (!task)
48 		return NULL;
49 
50 	mm = get_task_mm(task);
51 	put_task_struct(task);
52 	return mm;
53 }
54 
55 /*
56  * Functions for the initial monitoring target regions construction
57  */
58 
59 /*
60  * Size-evenly split a region into 'nr_pieces' small regions
61  *
62  * Returns 0 on success, or negative error code otherwise.
63  */
64 static int damon_va_evenly_split_region(struct damon_target *t,
65 		struct damon_region *r, unsigned int nr_pieces)
66 {
67 	unsigned long sz_orig, sz_piece, orig_end;
68 	struct damon_region *n = NULL, *next;
69 	unsigned long start;
70 
71 	if (!r || !nr_pieces)
72 		return -EINVAL;
73 
74 	orig_end = r->ar.end;
75 	sz_orig = r->ar.end - r->ar.start;
76 	sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
77 
78 	if (!sz_piece)
79 		return -EINVAL;
80 
81 	r->ar.end = r->ar.start + sz_piece;
82 	next = damon_next_region(r);
83 	for (start = r->ar.end; start + sz_piece <= orig_end;
84 			start += sz_piece) {
85 		n = damon_new_region(start, start + sz_piece);
86 		if (!n)
87 			return -ENOMEM;
88 		damon_insert_region(n, r, next, t);
89 		r = n;
90 	}
91 	/* complement last region for possible rounding error */
92 	if (n)
93 		n->ar.end = orig_end;
94 
95 	return 0;
96 }
97 
98 static unsigned long sz_range(struct damon_addr_range *r)
99 {
100 	return r->end - r->start;
101 }
102 
103 /*
104  * Find three regions separated by two biggest unmapped regions
105  *
106  * vma		the head vma of the target address space
107  * regions	an array of three address ranges that results will be saved
108  *
109  * This function receives an address space and finds three regions in it which
110  * separated by the two biggest unmapped regions in the space.  Please refer to
111  * below comments of '__damon_va_init_regions()' function to know why this is
112  * necessary.
113  *
114  * Returns 0 if success, or negative error code otherwise.
115  */
116 static int __damon_va_three_regions(struct vm_area_struct *vma,
117 				       struct damon_addr_range regions[3])
118 {
119 	struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
120 	struct vm_area_struct *last_vma = NULL;
121 	unsigned long start = 0;
122 	struct rb_root rbroot;
123 
124 	/* Find two biggest gaps so that first_gap > second_gap > others */
125 	for (; vma; vma = vma->vm_next) {
126 		if (!last_vma) {
127 			start = vma->vm_start;
128 			goto next;
129 		}
130 
131 		if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
132 			rbroot.rb_node = &vma->vm_rb;
133 			vma = rb_entry(rb_last(&rbroot),
134 					struct vm_area_struct, vm_rb);
135 			goto next;
136 		}
137 
138 		gap.start = last_vma->vm_end;
139 		gap.end = vma->vm_start;
140 		if (sz_range(&gap) > sz_range(&second_gap)) {
141 			swap(gap, second_gap);
142 			if (sz_range(&second_gap) > sz_range(&first_gap))
143 				swap(second_gap, first_gap);
144 		}
145 next:
146 		last_vma = vma;
147 	}
148 
149 	if (!sz_range(&second_gap) || !sz_range(&first_gap))
150 		return -EINVAL;
151 
152 	/* Sort the two biggest gaps by address */
153 	if (first_gap.start > second_gap.start)
154 		swap(first_gap, second_gap);
155 
156 	/* Store the result */
157 	regions[0].start = ALIGN(start, DAMON_MIN_REGION);
158 	regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
159 	regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
160 	regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
161 	regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
162 	regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
163 
164 	return 0;
165 }
166 
167 /*
168  * Get the three regions in the given target (task)
169  *
170  * Returns 0 on success, negative error code otherwise.
171  */
172 static int damon_va_three_regions(struct damon_target *t,
173 				struct damon_addr_range regions[3])
174 {
175 	struct mm_struct *mm;
176 	int rc;
177 
178 	mm = damon_get_mm(t);
179 	if (!mm)
180 		return -EINVAL;
181 
182 	mmap_read_lock(mm);
183 	rc = __damon_va_three_regions(mm->mmap, regions);
184 	mmap_read_unlock(mm);
185 
186 	mmput(mm);
187 	return rc;
188 }
189 
190 /*
191  * Initialize the monitoring target regions for the given target (task)
192  *
193  * t	the given target
194  *
195  * Because only a number of small portions of the entire address space
196  * is actually mapped to the memory and accessed, monitoring the unmapped
197  * regions is wasteful.  That said, because we can deal with small noises,
198  * tracking every mapping is not strictly required but could even incur a high
199  * overhead if the mapping frequently changes or the number of mappings is
200  * high.  The adaptive regions adjustment mechanism will further help to deal
201  * with the noise by simply identifying the unmapped areas as a region that
202  * has no access.  Moreover, applying the real mappings that would have many
203  * unmapped areas inside will make the adaptive mechanism quite complex.  That
204  * said, too huge unmapped areas inside the monitoring target should be removed
205  * to not take the time for the adaptive mechanism.
206  *
207  * For the reason, we convert the complex mappings to three distinct regions
208  * that cover every mapped area of the address space.  Also the two gaps
209  * between the three regions are the two biggest unmapped areas in the given
210  * address space.  In detail, this function first identifies the start and the
211  * end of the mappings and the two biggest unmapped areas of the address space.
212  * Then, it constructs the three regions as below:
213  *
214  *     [mappings[0]->start, big_two_unmapped_areas[0]->start)
215  *     [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
216  *     [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
217  *
218  * As usual memory map of processes is as below, the gap between the heap and
219  * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
220  * region and the stack will be two biggest unmapped regions.  Because these
221  * gaps are exceptionally huge areas in usual address space, excluding these
222  * two biggest unmapped regions will be sufficient to make a trade-off.
223  *
224  *   <heap>
225  *   <BIG UNMAPPED REGION 1>
226  *   <uppermost mmap()-ed region>
227  *   (other mmap()-ed regions and small unmapped regions)
228  *   <lowermost mmap()-ed region>
229  *   <BIG UNMAPPED REGION 2>
230  *   <stack>
231  */
232 static void __damon_va_init_regions(struct damon_ctx *ctx,
233 				     struct damon_target *t)
234 {
235 	struct damon_target *ti;
236 	struct damon_region *r;
237 	struct damon_addr_range regions[3];
238 	unsigned long sz = 0, nr_pieces;
239 	int i, tidx = 0;
240 
241 	if (damon_va_three_regions(t, regions)) {
242 		damon_for_each_target(ti, ctx) {
243 			if (ti == t)
244 				break;
245 			tidx++;
246 		}
247 		pr_debug("Failed to get three regions of %dth target\n", tidx);
248 		return;
249 	}
250 
251 	for (i = 0; i < 3; i++)
252 		sz += regions[i].end - regions[i].start;
253 	if (ctx->min_nr_regions)
254 		sz /= ctx->min_nr_regions;
255 	if (sz < DAMON_MIN_REGION)
256 		sz = DAMON_MIN_REGION;
257 
258 	/* Set the initial three regions of the target */
259 	for (i = 0; i < 3; i++) {
260 		r = damon_new_region(regions[i].start, regions[i].end);
261 		if (!r) {
262 			pr_err("%d'th init region creation failed\n", i);
263 			return;
264 		}
265 		damon_add_region(r, t);
266 
267 		nr_pieces = (regions[i].end - regions[i].start) / sz;
268 		damon_va_evenly_split_region(t, r, nr_pieces);
269 	}
270 }
271 
272 /* Initialize '->regions_list' of every target (task) */
273 static void damon_va_init(struct damon_ctx *ctx)
274 {
275 	struct damon_target *t;
276 
277 	damon_for_each_target(t, ctx) {
278 		/* the user may set the target regions as they want */
279 		if (!damon_nr_regions(t))
280 			__damon_va_init_regions(ctx, t);
281 	}
282 }
283 
284 /*
285  * Functions for the dynamic monitoring target regions update
286  */
287 
288 /*
289  * Check whether a region is intersecting an address range
290  *
291  * Returns true if it is.
292  */
293 static bool damon_intersect(struct damon_region *r,
294 		struct damon_addr_range *re)
295 {
296 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
297 }
298 
299 /*
300  * Update damon regions for the three big regions of the given target
301  *
302  * t		the given target
303  * bregions	the three big regions of the target
304  */
305 static void damon_va_apply_three_regions(struct damon_target *t,
306 		struct damon_addr_range bregions[3])
307 {
308 	struct damon_region *r, *next;
309 	unsigned int i;
310 
311 	/* Remove regions which are not in the three big regions now */
312 	damon_for_each_region_safe(r, next, t) {
313 		for (i = 0; i < 3; i++) {
314 			if (damon_intersect(r, &bregions[i]))
315 				break;
316 		}
317 		if (i == 3)
318 			damon_destroy_region(r, t);
319 	}
320 
321 	/* Adjust intersecting regions to fit with the three big regions */
322 	for (i = 0; i < 3; i++) {
323 		struct damon_region *first = NULL, *last;
324 		struct damon_region *newr;
325 		struct damon_addr_range *br;
326 
327 		br = &bregions[i];
328 		/* Get the first and last regions which intersects with br */
329 		damon_for_each_region(r, t) {
330 			if (damon_intersect(r, br)) {
331 				if (!first)
332 					first = r;
333 				last = r;
334 			}
335 			if (r->ar.start >= br->end)
336 				break;
337 		}
338 		if (!first) {
339 			/* no damon_region intersects with this big region */
340 			newr = damon_new_region(
341 					ALIGN_DOWN(br->start,
342 						DAMON_MIN_REGION),
343 					ALIGN(br->end, DAMON_MIN_REGION));
344 			if (!newr)
345 				continue;
346 			damon_insert_region(newr, damon_prev_region(r), r, t);
347 		} else {
348 			first->ar.start = ALIGN_DOWN(br->start,
349 					DAMON_MIN_REGION);
350 			last->ar.end = ALIGN(br->end, DAMON_MIN_REGION);
351 		}
352 	}
353 }
354 
355 /*
356  * Update regions for current memory mappings
357  */
358 static void damon_va_update(struct damon_ctx *ctx)
359 {
360 	struct damon_addr_range three_regions[3];
361 	struct damon_target *t;
362 
363 	damon_for_each_target(t, ctx) {
364 		if (damon_va_three_regions(t, three_regions))
365 			continue;
366 		damon_va_apply_three_regions(t, three_regions);
367 	}
368 }
369 
370 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
371 		unsigned long next, struct mm_walk *walk)
372 {
373 	pte_t *pte;
374 	spinlock_t *ptl;
375 
376 	if (pmd_huge(*pmd)) {
377 		ptl = pmd_lock(walk->mm, pmd);
378 		if (pmd_huge(*pmd)) {
379 			damon_pmdp_mkold(pmd, walk->mm, addr);
380 			spin_unlock(ptl);
381 			return 0;
382 		}
383 		spin_unlock(ptl);
384 	}
385 
386 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
387 		return 0;
388 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
389 	if (!pte_present(*pte))
390 		goto out;
391 	damon_ptep_mkold(pte, walk->mm, addr);
392 out:
393 	pte_unmap_unlock(pte, ptl);
394 	return 0;
395 }
396 
397 #ifdef CONFIG_HUGETLB_PAGE
398 static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
399 				struct vm_area_struct *vma, unsigned long addr)
400 {
401 	bool referenced = false;
402 	pte_t entry = huge_ptep_get(pte);
403 	struct page *page = pte_page(entry);
404 
405 	get_page(page);
406 
407 	if (pte_young(entry)) {
408 		referenced = true;
409 		entry = pte_mkold(entry);
410 		huge_ptep_set_access_flags(vma, addr, pte, entry,
411 					   vma->vm_flags & VM_WRITE);
412 	}
413 
414 #ifdef CONFIG_MMU_NOTIFIER
415 	if (mmu_notifier_clear_young(mm, addr,
416 				     addr + huge_page_size(hstate_vma(vma))))
417 		referenced = true;
418 #endif /* CONFIG_MMU_NOTIFIER */
419 
420 	if (referenced)
421 		set_page_young(page);
422 
423 	set_page_idle(page);
424 	put_page(page);
425 }
426 
427 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
428 				     unsigned long addr, unsigned long end,
429 				     struct mm_walk *walk)
430 {
431 	struct hstate *h = hstate_vma(walk->vma);
432 	spinlock_t *ptl;
433 	pte_t entry;
434 
435 	ptl = huge_pte_lock(h, walk->mm, pte);
436 	entry = huge_ptep_get(pte);
437 	if (!pte_present(entry))
438 		goto out;
439 
440 	damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr);
441 
442 out:
443 	spin_unlock(ptl);
444 	return 0;
445 }
446 #else
447 #define damon_mkold_hugetlb_entry NULL
448 #endif /* CONFIG_HUGETLB_PAGE */
449 
450 static const struct mm_walk_ops damon_mkold_ops = {
451 	.pmd_entry = damon_mkold_pmd_entry,
452 	.hugetlb_entry = damon_mkold_hugetlb_entry,
453 };
454 
455 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
456 {
457 	mmap_read_lock(mm);
458 	walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
459 	mmap_read_unlock(mm);
460 }
461 
462 /*
463  * Functions for the access checking of the regions
464  */
465 
466 static void __damon_va_prepare_access_check(struct damon_ctx *ctx,
467 			struct mm_struct *mm, struct damon_region *r)
468 {
469 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
470 
471 	damon_va_mkold(mm, r->sampling_addr);
472 }
473 
474 static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
475 {
476 	struct damon_target *t;
477 	struct mm_struct *mm;
478 	struct damon_region *r;
479 
480 	damon_for_each_target(t, ctx) {
481 		mm = damon_get_mm(t);
482 		if (!mm)
483 			continue;
484 		damon_for_each_region(r, t)
485 			__damon_va_prepare_access_check(ctx, mm, r);
486 		mmput(mm);
487 	}
488 }
489 
490 struct damon_young_walk_private {
491 	unsigned long *page_sz;
492 	bool young;
493 };
494 
495 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
496 		unsigned long next, struct mm_walk *walk)
497 {
498 	pte_t *pte;
499 	spinlock_t *ptl;
500 	struct page *page;
501 	struct damon_young_walk_private *priv = walk->private;
502 
503 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
504 	if (pmd_huge(*pmd)) {
505 		ptl = pmd_lock(walk->mm, pmd);
506 		if (!pmd_huge(*pmd)) {
507 			spin_unlock(ptl);
508 			goto regular_page;
509 		}
510 		page = damon_get_page(pmd_pfn(*pmd));
511 		if (!page)
512 			goto huge_out;
513 		if (pmd_young(*pmd) || !page_is_idle(page) ||
514 					mmu_notifier_test_young(walk->mm,
515 						addr)) {
516 			*priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
517 			priv->young = true;
518 		}
519 		put_page(page);
520 huge_out:
521 		spin_unlock(ptl);
522 		return 0;
523 	}
524 
525 regular_page:
526 #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
527 
528 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
529 		return -EINVAL;
530 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
531 	if (!pte_present(*pte))
532 		goto out;
533 	page = damon_get_page(pte_pfn(*pte));
534 	if (!page)
535 		goto out;
536 	if (pte_young(*pte) || !page_is_idle(page) ||
537 			mmu_notifier_test_young(walk->mm, addr)) {
538 		*priv->page_sz = PAGE_SIZE;
539 		priv->young = true;
540 	}
541 	put_page(page);
542 out:
543 	pte_unmap_unlock(pte, ptl);
544 	return 0;
545 }
546 
547 #ifdef CONFIG_HUGETLB_PAGE
548 static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
549 				     unsigned long addr, unsigned long end,
550 				     struct mm_walk *walk)
551 {
552 	struct damon_young_walk_private *priv = walk->private;
553 	struct hstate *h = hstate_vma(walk->vma);
554 	struct page *page;
555 	spinlock_t *ptl;
556 	pte_t entry;
557 
558 	ptl = huge_pte_lock(h, walk->mm, pte);
559 	entry = huge_ptep_get(pte);
560 	if (!pte_present(entry))
561 		goto out;
562 
563 	page = pte_page(entry);
564 	get_page(page);
565 
566 	if (pte_young(entry) || !page_is_idle(page) ||
567 	    mmu_notifier_test_young(walk->mm, addr)) {
568 		*priv->page_sz = huge_page_size(h);
569 		priv->young = true;
570 	}
571 
572 	put_page(page);
573 
574 out:
575 	spin_unlock(ptl);
576 	return 0;
577 }
578 #else
579 #define damon_young_hugetlb_entry NULL
580 #endif /* CONFIG_HUGETLB_PAGE */
581 
582 static const struct mm_walk_ops damon_young_ops = {
583 	.pmd_entry = damon_young_pmd_entry,
584 	.hugetlb_entry = damon_young_hugetlb_entry,
585 };
586 
587 static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
588 		unsigned long *page_sz)
589 {
590 	struct damon_young_walk_private arg = {
591 		.page_sz = page_sz,
592 		.young = false,
593 	};
594 
595 	mmap_read_lock(mm);
596 	walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
597 	mmap_read_unlock(mm);
598 	return arg.young;
599 }
600 
601 /*
602  * Check whether the region was accessed after the last preparation
603  *
604  * mm	'mm_struct' for the given virtual address space
605  * r	the region to be checked
606  */
607 static void __damon_va_check_access(struct damon_ctx *ctx,
608 			       struct mm_struct *mm, struct damon_region *r)
609 {
610 	static struct mm_struct *last_mm;
611 	static unsigned long last_addr;
612 	static unsigned long last_page_sz = PAGE_SIZE;
613 	static bool last_accessed;
614 
615 	/* If the region is in the last checked page, reuse the result */
616 	if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
617 				ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
618 		if (last_accessed)
619 			r->nr_accesses++;
620 		return;
621 	}
622 
623 	last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
624 	if (last_accessed)
625 		r->nr_accesses++;
626 
627 	last_mm = mm;
628 	last_addr = r->sampling_addr;
629 }
630 
631 static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
632 {
633 	struct damon_target *t;
634 	struct mm_struct *mm;
635 	struct damon_region *r;
636 	unsigned int max_nr_accesses = 0;
637 
638 	damon_for_each_target(t, ctx) {
639 		mm = damon_get_mm(t);
640 		if (!mm)
641 			continue;
642 		damon_for_each_region(r, t) {
643 			__damon_va_check_access(ctx, mm, r);
644 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
645 		}
646 		mmput(mm);
647 	}
648 
649 	return max_nr_accesses;
650 }
651 
652 /*
653  * Functions for the target validity check and cleanup
654  */
655 
656 static bool damon_va_target_valid(void *target)
657 {
658 	struct damon_target *t = target;
659 	struct task_struct *task;
660 
661 	task = damon_get_task_struct(t);
662 	if (task) {
663 		put_task_struct(task);
664 		return true;
665 	}
666 
667 	return false;
668 }
669 
670 #ifndef CONFIG_ADVISE_SYSCALLS
671 static unsigned long damos_madvise(struct damon_target *target,
672 		struct damon_region *r, int behavior)
673 {
674 	return 0;
675 }
676 #else
677 static unsigned long damos_madvise(struct damon_target *target,
678 		struct damon_region *r, int behavior)
679 {
680 	struct mm_struct *mm;
681 	unsigned long start = PAGE_ALIGN(r->ar.start);
682 	unsigned long len = PAGE_ALIGN(r->ar.end - r->ar.start);
683 	unsigned long applied;
684 
685 	mm = damon_get_mm(target);
686 	if (!mm)
687 		return 0;
688 
689 	applied = do_madvise(mm, start, len, behavior) ? 0 : len;
690 	mmput(mm);
691 
692 	return applied;
693 }
694 #endif	/* CONFIG_ADVISE_SYSCALLS */
695 
696 static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
697 		struct damon_target *t, struct damon_region *r,
698 		struct damos *scheme)
699 {
700 	int madv_action;
701 
702 	switch (scheme->action) {
703 	case DAMOS_WILLNEED:
704 		madv_action = MADV_WILLNEED;
705 		break;
706 	case DAMOS_COLD:
707 		madv_action = MADV_COLD;
708 		break;
709 	case DAMOS_PAGEOUT:
710 		madv_action = MADV_PAGEOUT;
711 		break;
712 	case DAMOS_HUGEPAGE:
713 		madv_action = MADV_HUGEPAGE;
714 		break;
715 	case DAMOS_NOHUGEPAGE:
716 		madv_action = MADV_NOHUGEPAGE;
717 		break;
718 	case DAMOS_STAT:
719 		return 0;
720 	default:
721 		return 0;
722 	}
723 
724 	return damos_madvise(t, r, madv_action);
725 }
726 
727 static int damon_va_scheme_score(struct damon_ctx *context,
728 		struct damon_target *t, struct damon_region *r,
729 		struct damos *scheme)
730 {
731 
732 	switch (scheme->action) {
733 	case DAMOS_PAGEOUT:
734 		return damon_pageout_score(context, r, scheme);
735 	default:
736 		break;
737 	}
738 
739 	return DAMOS_MAX_SCORE;
740 }
741 
742 static int __init damon_va_initcall(void)
743 {
744 	struct damon_operations ops = {
745 		.id = DAMON_OPS_VADDR,
746 		.init = damon_va_init,
747 		.update = damon_va_update,
748 		.prepare_access_checks = damon_va_prepare_access_checks,
749 		.check_accesses = damon_va_check_accesses,
750 		.reset_aggregated = NULL,
751 		.target_valid = damon_va_target_valid,
752 		.cleanup = NULL,
753 		.apply_scheme = damon_va_apply_scheme,
754 		.get_scheme_score = damon_va_scheme_score,
755 	};
756 
757 	return damon_register_ops(&ops);
758 };
759 
760 subsys_initcall(damon_va_initcall);
761 
762 #include "vaddr-test.h"
763