Lines Matching +full:mm +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0
8 #define pr_fmt(fmt) "damon-va: " fmt
16 #include <linux/sched/mm.h>
19 #include "ops-common.h"
27 * 't->pid' should be the pointer to the relevant 'struct pid' having reference
32 return get_pid_task(t->pid, PIDTYPE_PID); in damon_get_task_struct()
45 struct mm_struct *mm; in damon_get_mm() local
51 mm = get_task_mm(task); in damon_get_mm()
53 return mm; in damon_get_mm()
61 * Size-evenly split a region into 'nr_pieces' small regions
63 * Returns 0 on success, or negative error code otherwise.
74 return -EINVAL; in damon_va_evenly_split_region()
77 return 0; in damon_va_evenly_split_region()
79 orig_end = r->ar.end; in damon_va_evenly_split_region()
84 return -EINVAL; in damon_va_evenly_split_region()
86 r->ar.end = r->ar.start + sz_piece; in damon_va_evenly_split_region()
88 for (start = r->ar.end, i = 1; i < nr_pieces; start += sz_piece, i++) { in damon_va_evenly_split_region()
91 return -ENOMEM; in damon_va_evenly_split_region()
97 n->ar.end = orig_end; in damon_va_evenly_split_region()
99 return 0; in damon_va_evenly_split_region()
104 return r->end - r->start; in sz_range()
118 * Returns 0 if success, or negative error code otherwise.
120 static int __damon_va_three_regions(struct mm_struct *mm, in __damon_va_three_regions() argument
123 struct damon_addr_range first_gap = {0}, second_gap = {0}; in __damon_va_three_regions()
124 VMA_ITERATOR(vmi, mm, 0); in __damon_va_three_regions()
138 start = vma->vm_start; in __damon_va_three_regions()
141 gap = vma->vm_start - prev->vm_end; in __damon_va_three_regions()
145 first_gap.start = prev->vm_end; in __damon_va_three_regions()
146 first_gap.end = vma->vm_start; in __damon_va_three_regions()
148 second_gap.start = prev->vm_end; in __damon_va_three_regions()
149 second_gap.end = vma->vm_start; in __damon_va_three_regions()
157 return -EINVAL; in __damon_va_three_regions()
164 regions[0].start = ALIGN(start, DAMON_MIN_REGION); in __damon_va_three_regions()
165 regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); in __damon_va_three_regions()
169 regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION); in __damon_va_three_regions()
171 return 0; in __damon_va_three_regions()
177 * Returns 0 on success, negative error code otherwise.
182 struct mm_struct *mm; in damon_va_three_regions() local
185 mm = damon_get_mm(t); in damon_va_three_regions()
186 if (!mm) in damon_va_three_regions()
187 return -EINVAL; in damon_va_three_regions()
189 mmap_read_lock(mm); in damon_va_three_regions()
190 rc = __damon_va_three_regions(mm, regions); in damon_va_three_regions()
191 mmap_read_unlock(mm); in damon_va_three_regions()
193 mmput(mm); in damon_va_three_regions()
221 * [mappings[0]->start, big_two_unmapped_areas[0]->start)
222 * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
223 * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
226 * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
229 * two biggest unmapped regions will be sufficient to make a trade-off.
233 * <uppermost mmap()-ed region>
234 * (other mmap()-ed regions and small unmapped regions)
235 * <lowermost mmap()-ed region>
245 unsigned long sz = 0, nr_pieces; in __damon_va_init_regions()
246 int i, tidx = 0; in __damon_va_init_regions()
258 for (i = 0; i < 3; i++) in __damon_va_init_regions()
259 sz += regions[i].end - regions[i].start; in __damon_va_init_regions()
260 if (ctx->attrs.min_nr_regions) in __damon_va_init_regions()
261 sz /= ctx->attrs.min_nr_regions; in __damon_va_init_regions()
266 for (i = 0; i < 3; i++) { in __damon_va_init_regions()
274 nr_pieces = (regions[i].end - regions[i].start) / sz; in __damon_va_init_regions()
279 /* Initialize '->regions_list' of every target (task) */
314 ptl = pmd_lock(walk->mm, pmd); in damon_mkold_pmd_entry()
319 return 0; in damon_mkold_pmd_entry()
323 damon_pmdp_mkold(pmd, walk->vma, addr); in damon_mkold_pmd_entry()
325 return 0; in damon_mkold_pmd_entry()
330 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in damon_mkold_pmd_entry()
332 return 0; in damon_mkold_pmd_entry()
335 damon_ptep_mkold(pte, walk->vma, addr); in damon_mkold_pmd_entry()
338 return 0; in damon_mkold_pmd_entry()
342 static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, in damon_hugetlb_mkold() argument
346 pte_t entry = huge_ptep_get(mm, addr, pte); in damon_hugetlb_mkold()
355 set_huge_pte_at(mm, addr, pte, entry, psize); in damon_hugetlb_mkold()
358 if (mmu_notifier_clear_young(mm, addr, in damon_hugetlb_mkold()
373 struct hstate *h = hstate_vma(walk->vma); in damon_mkold_hugetlb_entry()
377 ptl = huge_pte_lock(h, walk->mm, pte); in damon_mkold_hugetlb_entry()
378 entry = huge_ptep_get(walk->mm, addr, pte); in damon_mkold_hugetlb_entry()
382 damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr); in damon_mkold_hugetlb_entry()
386 return 0; in damon_mkold_hugetlb_entry()
398 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) in damon_va_mkold() argument
400 mmap_read_lock(mm); in damon_va_mkold()
401 walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); in damon_va_mkold()
402 mmap_read_unlock(mm); in damon_va_mkold()
409 static void __damon_va_prepare_access_check(struct mm_struct *mm, in __damon_va_prepare_access_check() argument
412 r->sampling_addr = damon_rand(r->ar.start, r->ar.end); in __damon_va_prepare_access_check()
414 damon_va_mkold(mm, r->sampling_addr); in __damon_va_prepare_access_check()
420 struct mm_struct *mm; in damon_va_prepare_access_checks() local
424 mm = damon_get_mm(t); in damon_va_prepare_access_checks()
425 if (!mm) in damon_va_prepare_access_checks()
428 __damon_va_prepare_access_check(mm, r); in damon_va_prepare_access_checks()
429 mmput(mm); in damon_va_prepare_access_checks()
446 struct damon_young_walk_private *priv = walk->private; in damon_young_pmd_entry()
452 ptl = pmd_lock(walk->mm, pmd); in damon_young_pmd_entry()
457 return 0; in damon_young_pmd_entry()
468 mmu_notifier_test_young(walk->mm, in damon_young_pmd_entry()
470 priv->young = true; in damon_young_pmd_entry()
471 *priv->folio_sz = HPAGE_PMD_SIZE; in damon_young_pmd_entry()
475 return 0; in damon_young_pmd_entry()
481 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in damon_young_pmd_entry()
483 return 0; in damon_young_pmd_entry()
491 mmu_notifier_test_young(walk->mm, addr)) in damon_young_pmd_entry()
492 priv->young = true; in damon_young_pmd_entry()
493 *priv->folio_sz = folio_size(folio); in damon_young_pmd_entry()
497 return 0; in damon_young_pmd_entry()
505 struct damon_young_walk_private *priv = walk->private; in damon_young_hugetlb_entry()
506 struct hstate *h = hstate_vma(walk->vma); in damon_young_hugetlb_entry()
511 ptl = huge_pte_lock(h, walk->mm, pte); in damon_young_hugetlb_entry()
512 entry = huge_ptep_get(walk->mm, addr, pte); in damon_young_hugetlb_entry()
520 mmu_notifier_test_young(walk->mm, addr)) in damon_young_hugetlb_entry()
521 priv->young = true; in damon_young_hugetlb_entry()
522 *priv->folio_sz = huge_page_size(h); in damon_young_hugetlb_entry()
528 return 0; in damon_young_hugetlb_entry()
540 static bool damon_va_young(struct mm_struct *mm, unsigned long addr, in damon_va_young() argument
548 mmap_read_lock(mm); in damon_va_young()
549 walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg); in damon_va_young()
550 mmap_read_unlock(mm); in damon_va_young()
557 * mm 'mm_struct' for the given virtual address space
560 static void __damon_va_check_access(struct mm_struct *mm, in __damon_va_check_access() argument
568 if (!mm) { in __damon_va_check_access()
575 ALIGN_DOWN(r->sampling_addr, last_folio_sz))) { in __damon_va_check_access()
580 last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz); in __damon_va_check_access()
583 last_addr = r->sampling_addr; in __damon_va_check_access()
589 struct mm_struct *mm; in damon_va_check_accesses() local
591 unsigned int max_nr_accesses = 0; in damon_va_check_accesses()
595 mm = damon_get_mm(t); in damon_va_check_accesses()
598 __damon_va_check_access(mm, r, same_target, in damon_va_check_accesses()
599 &ctx->attrs); in damon_va_check_accesses()
600 max_nr_accesses = max(r->nr_accesses, max_nr_accesses); in damon_va_check_accesses()
603 if (mm) in damon_va_check_accesses()
604 mmput(mm); in damon_va_check_accesses()
622 mmu_notifier_test_young(vma->vm_mm, addr); in damos_va_filter_young_match()
629 return young == filter->matching; in damos_va_filter_young_match()
639 if (scheme->core_filters_allowed) in damos_va_filter_out()
649 if (filter->type == DAMOS_FILTER_TYPE_YOUNG) in damos_va_filter_out()
656 return !filter->allow; in damos_va_filter_out()
658 return scheme->ops_filters_default_reject; in damos_va_filter_out()
680 unsigned int weight_total = 0; in damos_va_migrate_dests_add()
685 * to s->target_nid. in damos_va_migrate_dests_add()
687 if (!dests->nr_dests) { in damos_va_migrate_dests_add()
688 i = 0; in damos_va_migrate_dests_add()
693 ilx = vma->vm_pgoff >> order; in damos_va_migrate_dests_add()
694 ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order); in damos_va_migrate_dests_add()
696 for (i = 0; i < dests->nr_dests; i++) in damos_va_migrate_dests_add()
697 weight_total += dests->weight_arr[i]; in damos_va_migrate_dests_add()
699 /* If the total weights are somehow 0, don't migrate at all */ in damos_va_migrate_dests_add()
704 for (i = 0; i < dests->nr_dests; i++) { in damos_va_migrate_dests_add()
705 if (target < dests->weight_arr[i]) in damos_va_migrate_dests_add()
707 target -= dests->weight_arr[i]; in damos_va_migrate_dests_add()
711 if (folio_nid(folio) == dests->node_id_arr[i]) in damos_va_migrate_dests_add()
718 list_add(&folio->lru, &migration_lists[i]); in damos_va_migrate_dests_add()
725 struct damos_va_migrate_private *priv = walk->private; in damos_va_migrate_pmd_entry()
726 struct list_head *migration_lists = priv->migration_lists; in damos_va_migrate_pmd_entry()
727 struct damos *s = priv->scheme; in damos_va_migrate_pmd_entry()
728 struct damos_migrate_dests *dests = &s->migrate_dests; in damos_va_migrate_pmd_entry()
733 ptl = pmd_lock(walk->mm, pmd); in damos_va_migrate_pmd_entry()
740 walk->action = ACTION_CONTINUE; in damos_va_migrate_pmd_entry()
746 if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd)) in damos_va_migrate_pmd_entry()
749 damos_va_migrate_dests_add(folio, walk->vma, addr, dests, in damos_va_migrate_pmd_entry()
756 return 0; in damos_va_migrate_pmd_entry()
765 struct damos_va_migrate_private *priv = walk->private; in damos_va_migrate_pte_entry()
766 struct list_head *migration_lists = priv->migration_lists; in damos_va_migrate_pte_entry()
767 struct damos *s = priv->scheme; in damos_va_migrate_pte_entry()
768 struct damos_migrate_dests *dests = &s->migrate_dests; in damos_va_migrate_pte_entry()
774 return 0; in damos_va_migrate_pte_entry()
778 return 0; in damos_va_migrate_pte_entry()
780 if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL)) in damos_va_migrate_pte_entry()
783 damos_va_migrate_dests_add(folio, walk->vma, addr, dests, in damos_va_migrate_pte_entry()
788 return 0; in damos_va_migrate_pte_entry()
810 put_pid(t->pid); in damon_va_cleanup_target()
817 return 0; in damos_madvise()
823 struct mm_struct *mm; in damos_madvise() local
824 unsigned long start = PAGE_ALIGN(r->ar.start); in damos_madvise()
828 mm = damon_get_mm(target); in damos_madvise()
829 if (!mm) in damos_madvise()
830 return 0; in damos_madvise()
832 applied = do_madvise(mm, start, len, behavior) ? 0 : len; in damos_madvise()
833 mmput(mm); in damos_madvise()
845 struct mm_struct *mm; in damos_va_migrate() local
849 unsigned long applied = 0; in damos_va_migrate()
850 struct damos_migrate_dests *dests = &s->migrate_dests; in damos_va_migrate()
857 use_target_nid = dests->nr_dests == 0; in damos_va_migrate()
858 nr_dests = use_target_nid ? 1 : dests->nr_dests; in damos_va_migrate()
863 return 0; in damos_va_migrate()
865 for (int i = 0; i < nr_dests; i++) in damos_va_migrate()
869 mm = damon_get_mm(target); in damos_va_migrate()
870 if (!mm) in damos_va_migrate()
873 mmap_read_lock(mm); in damos_va_migrate()
874 walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv); in damos_va_migrate()
875 mmap_read_unlock(mm); in damos_va_migrate()
876 mmput(mm); in damos_va_migrate()
878 for (int i = 0; i < nr_dests; i++) { in damos_va_migrate()
879 nid = use_target_nid ? s->target_nid : dests->node_id_arr[i]; in damos_va_migrate()
897 return !folio || folio == s->last_applied; in damos_va_invalid_folio()
903 struct damos_va_stat_private *priv = walk->private; in damos_va_stat_pmd_entry()
904 struct damos *s = priv->scheme; in damos_va_stat_pmd_entry()
905 unsigned long *sz_filter_passed = priv->sz_filter_passed; in damos_va_stat_pmd_entry()
906 struct vm_area_struct *vma = walk->vma; in damos_va_stat_pmd_entry()
918 return 0; in damos_va_stat_pmd_entry()
930 s->last_applied = folio; in damos_va_stat_pmd_entry()
934 return 0; in damos_va_stat_pmd_entry()
937 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in damos_va_stat_pmd_entry()
939 return 0; in damos_va_stat_pmd_entry()
956 s->last_applied = folio; in damos_va_stat_pmd_entry()
959 return 0; in damos_va_stat_pmd_entry()
967 struct mm_struct *mm; in damos_va_stat() local
977 return 0; in damos_va_stat()
979 mm = damon_get_mm(target); in damos_va_stat()
980 if (!mm) in damos_va_stat()
981 return 0; in damos_va_stat()
983 mmap_read_lock(mm); in damos_va_stat()
984 walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv); in damos_va_stat()
985 mmap_read_unlock(mm); in damos_va_stat()
986 mmput(mm); in damos_va_stat()
987 return 0; in damos_va_stat()
996 switch (scheme->action) { in damon_va_apply_scheme()
1021 return 0; in damon_va_apply_scheme()
1032 switch (scheme->action) { in damon_va_scheme_score()
1077 #include "tests/vaddr-kunit.h"