1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DAMON Primitives for The Physical Address Space 4 * 5 * Author: SeongJae Park <sj@kernel.org> 6 */ 7 8 #define pr_fmt(fmt) "damon-pa: " fmt 9 10 #include <linux/mmu_notifier.h> 11 #include <linux/page_idle.h> 12 #include <linux/pagemap.h> 13 #include <linux/rmap.h> 14 #include <linux/swap.h> 15 #include <linux/memory-tiers.h> 16 #include <linux/migrate.h> 17 #include <linux/mm_inline.h> 18 19 #include "../internal.h" 20 #include "ops-common.h" 21 22 static bool damon_folio_mkold_one(struct folio *folio, 23 struct vm_area_struct *vma, unsigned long addr, void *arg) 24 { 25 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); 26 27 while (page_vma_mapped_walk(&pvmw)) { 28 addr = pvmw.address; 29 if (pvmw.pte) 30 damon_ptep_mkold(pvmw.pte, vma, addr); 31 else 32 damon_pmdp_mkold(pvmw.pmd, vma, addr); 33 } 34 return true; 35 } 36 37 static void damon_folio_mkold(struct folio *folio) 38 { 39 struct rmap_walk_control rwc = { 40 .rmap_one = damon_folio_mkold_one, 41 .anon_lock = folio_lock_anon_vma_read, 42 }; 43 bool need_lock; 44 45 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { 46 folio_set_idle(folio); 47 return; 48 } 49 50 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); 51 if (need_lock && !folio_trylock(folio)) 52 return; 53 54 rmap_walk(folio, &rwc); 55 56 if (need_lock) 57 folio_unlock(folio); 58 59 } 60 61 static void damon_pa_mkold(unsigned long paddr) 62 { 63 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); 64 65 if (!folio) 66 return; 67 68 damon_folio_mkold(folio); 69 folio_put(folio); 70 } 71 72 static void __damon_pa_prepare_access_check(struct damon_region *r) 73 { 74 r->sampling_addr = damon_rand(r->ar.start, r->ar.end); 75 76 damon_pa_mkold(r->sampling_addr); 77 } 78 79 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) 80 { 81 struct damon_target *t; 82 struct damon_region *r; 83 84 damon_for_each_target(t, ctx) { 85 damon_for_each_region(r, t) 86 __damon_pa_prepare_access_check(r); 87 } 88 } 89 90 static bool damon_folio_young_one(struct folio *folio, 91 struct vm_area_struct *vma, unsigned long addr, void *arg) 92 { 93 bool *accessed = arg; 94 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); 95 96 *accessed = false; 97 while (page_vma_mapped_walk(&pvmw)) { 98 addr = pvmw.address; 99 if (pvmw.pte) { 100 *accessed = pte_young(ptep_get(pvmw.pte)) || 101 !folio_test_idle(folio) || 102 mmu_notifier_test_young(vma->vm_mm, addr); 103 } else { 104 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 105 *accessed = pmd_young(pmdp_get(pvmw.pmd)) || 106 !folio_test_idle(folio) || 107 mmu_notifier_test_young(vma->vm_mm, addr); 108 #else 109 WARN_ON_ONCE(1); 110 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 111 } 112 if (*accessed) { 113 page_vma_mapped_walk_done(&pvmw); 114 break; 115 } 116 } 117 118 /* If accessed, stop walking */ 119 return *accessed == false; 120 } 121 122 static bool damon_folio_young(struct folio *folio) 123 { 124 bool accessed = false; 125 struct rmap_walk_control rwc = { 126 .arg = &accessed, 127 .rmap_one = damon_folio_young_one, 128 .anon_lock = folio_lock_anon_vma_read, 129 }; 130 bool need_lock; 131 132 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { 133 if (folio_test_idle(folio)) 134 return false; 135 else 136 return true; 137 } 138 139 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); 140 if (need_lock && !folio_trylock(folio)) 141 return false; 142 143 rmap_walk(folio, &rwc); 144 145 if (need_lock) 146 folio_unlock(folio); 147 148 return accessed; 149 } 150 151 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) 152 { 153 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); 154 bool accessed; 155 156 if (!folio) 157 return false; 158 159 accessed = damon_folio_young(folio); 160 *folio_sz = folio_size(folio); 161 folio_put(folio); 162 return accessed; 163 } 164 165 static void __damon_pa_check_access(struct damon_region *r, 166 struct damon_attrs *attrs) 167 { 168 static unsigned long last_addr; 169 static unsigned long last_folio_sz = PAGE_SIZE; 170 static bool last_accessed; 171 172 /* If the region is in the last checked page, reuse the result */ 173 if (ALIGN_DOWN(last_addr, last_folio_sz) == 174 ALIGN_DOWN(r->sampling_addr, last_folio_sz)) { 175 damon_update_region_access_rate(r, last_accessed, attrs); 176 return; 177 } 178 179 last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz); 180 damon_update_region_access_rate(r, last_accessed, attrs); 181 182 last_addr = r->sampling_addr; 183 } 184 185 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) 186 { 187 struct damon_target *t; 188 struct damon_region *r; 189 unsigned int max_nr_accesses = 0; 190 191 damon_for_each_target(t, ctx) { 192 damon_for_each_region(r, t) { 193 __damon_pa_check_access(r, &ctx->attrs); 194 max_nr_accesses = max(r->nr_accesses, max_nr_accesses); 195 } 196 } 197 198 return max_nr_accesses; 199 } 200 201 static bool damos_pa_filter_match(struct damos_filter *filter, 202 struct folio *folio) 203 { 204 bool matched = false; 205 struct mem_cgroup *memcg; 206 207 switch (filter->type) { 208 case DAMOS_FILTER_TYPE_ANON: 209 matched = folio_test_anon(folio); 210 break; 211 case DAMOS_FILTER_TYPE_MEMCG: 212 rcu_read_lock(); 213 memcg = folio_memcg_check(folio); 214 if (!memcg) 215 matched = false; 216 else 217 matched = filter->memcg_id == mem_cgroup_id(memcg); 218 rcu_read_unlock(); 219 break; 220 case DAMOS_FILTER_TYPE_YOUNG: 221 matched = damon_folio_young(folio); 222 if (matched) 223 damon_folio_mkold(folio); 224 break; 225 default: 226 break; 227 } 228 229 return matched == filter->matching; 230 } 231 232 /* 233 * damos_pa_filter_out - Return true if the page should be filtered out. 234 */ 235 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) 236 { 237 struct damos_filter *filter; 238 239 damos_for_each_filter(filter, scheme) { 240 if (damos_pa_filter_match(filter, folio)) 241 return !filter->allow; 242 } 243 return false; 244 } 245 246 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, 247 unsigned long *sz_filter_passed) 248 { 249 unsigned long addr, applied; 250 LIST_HEAD(folio_list); 251 bool install_young_filter = true; 252 struct damos_filter *filter; 253 254 /* check access in page level again by default */ 255 damos_for_each_filter(filter, s) { 256 if (filter->type == DAMOS_FILTER_TYPE_YOUNG) { 257 install_young_filter = false; 258 break; 259 } 260 } 261 if (install_young_filter) { 262 filter = damos_new_filter( 263 DAMOS_FILTER_TYPE_YOUNG, true, false); 264 if (!filter) 265 return 0; 266 damos_add_filter(s, filter); 267 } 268 269 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { 270 struct folio *folio = damon_get_folio(PHYS_PFN(addr)); 271 272 if (!folio) 273 continue; 274 275 if (damos_pa_filter_out(s, folio)) 276 goto put_folio; 277 else 278 *sz_filter_passed += folio_size(folio); 279 280 folio_clear_referenced(folio); 281 folio_test_clear_young(folio); 282 if (!folio_isolate_lru(folio)) 283 goto put_folio; 284 if (folio_test_unevictable(folio)) 285 folio_putback_lru(folio); 286 else 287 list_add(&folio->lru, &folio_list); 288 put_folio: 289 folio_put(folio); 290 } 291 if (install_young_filter) 292 damos_destroy_filter(filter); 293 applied = reclaim_pages(&folio_list); 294 cond_resched(); 295 return applied * PAGE_SIZE; 296 } 297 298 static inline unsigned long damon_pa_mark_accessed_or_deactivate( 299 struct damon_region *r, struct damos *s, bool mark_accessed, 300 unsigned long *sz_filter_passed) 301 { 302 unsigned long addr, applied = 0; 303 304 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { 305 struct folio *folio = damon_get_folio(PHYS_PFN(addr)); 306 307 if (!folio) 308 continue; 309 310 if (damos_pa_filter_out(s, folio)) 311 goto put_folio; 312 else 313 *sz_filter_passed += folio_size(folio); 314 315 if (mark_accessed) 316 folio_mark_accessed(folio); 317 else 318 folio_deactivate(folio); 319 applied += folio_nr_pages(folio); 320 put_folio: 321 folio_put(folio); 322 } 323 return applied * PAGE_SIZE; 324 } 325 326 static unsigned long damon_pa_mark_accessed(struct damon_region *r, 327 struct damos *s, unsigned long *sz_filter_passed) 328 { 329 return damon_pa_mark_accessed_or_deactivate(r, s, true, 330 sz_filter_passed); 331 } 332 333 static unsigned long damon_pa_deactivate_pages(struct damon_region *r, 334 struct damos *s, unsigned long *sz_filter_passed) 335 { 336 return damon_pa_mark_accessed_or_deactivate(r, s, false, 337 sz_filter_passed); 338 } 339 340 static unsigned int __damon_pa_migrate_folio_list( 341 struct list_head *migrate_folios, struct pglist_data *pgdat, 342 int target_nid) 343 { 344 unsigned int nr_succeeded = 0; 345 nodemask_t allowed_mask = NODE_MASK_NONE; 346 struct migration_target_control mtc = { 347 /* 348 * Allocate from 'node', or fail quickly and quietly. 349 * When this happens, 'page' will likely just be discarded 350 * instead of migrated. 351 */ 352 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | 353 __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, 354 .nid = target_nid, 355 .nmask = &allowed_mask 356 }; 357 358 if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE) 359 return 0; 360 361 if (list_empty(migrate_folios)) 362 return 0; 363 364 /* Migration ignores all cpuset and mempolicy settings */ 365 migrate_pages(migrate_folios, alloc_migrate_folio, NULL, 366 (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON, 367 &nr_succeeded); 368 369 return nr_succeeded; 370 } 371 372 static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list, 373 struct pglist_data *pgdat, 374 int target_nid) 375 { 376 unsigned int nr_migrated = 0; 377 struct folio *folio; 378 LIST_HEAD(ret_folios); 379 LIST_HEAD(migrate_folios); 380 381 while (!list_empty(folio_list)) { 382 struct folio *folio; 383 384 cond_resched(); 385 386 folio = lru_to_folio(folio_list); 387 list_del(&folio->lru); 388 389 if (!folio_trylock(folio)) 390 goto keep; 391 392 /* Relocate its contents to another node. */ 393 list_add(&folio->lru, &migrate_folios); 394 folio_unlock(folio); 395 continue; 396 keep: 397 list_add(&folio->lru, &ret_folios); 398 } 399 /* 'folio_list' is always empty here */ 400 401 /* Migrate folios selected for migration */ 402 nr_migrated += __damon_pa_migrate_folio_list( 403 &migrate_folios, pgdat, target_nid); 404 /* 405 * Folios that could not be migrated are still in @migrate_folios. Add 406 * those back on @folio_list 407 */ 408 if (!list_empty(&migrate_folios)) 409 list_splice_init(&migrate_folios, folio_list); 410 411 try_to_unmap_flush(); 412 413 list_splice(&ret_folios, folio_list); 414 415 while (!list_empty(folio_list)) { 416 folio = lru_to_folio(folio_list); 417 list_del(&folio->lru); 418 folio_putback_lru(folio); 419 } 420 421 return nr_migrated; 422 } 423 424 static unsigned long damon_pa_migrate_pages(struct list_head *folio_list, 425 int target_nid) 426 { 427 int nid; 428 unsigned long nr_migrated = 0; 429 LIST_HEAD(node_folio_list); 430 unsigned int noreclaim_flag; 431 432 if (list_empty(folio_list)) 433 return nr_migrated; 434 435 noreclaim_flag = memalloc_noreclaim_save(); 436 437 nid = folio_nid(lru_to_folio(folio_list)); 438 do { 439 struct folio *folio = lru_to_folio(folio_list); 440 441 if (nid == folio_nid(folio)) { 442 list_move(&folio->lru, &node_folio_list); 443 continue; 444 } 445 446 nr_migrated += damon_pa_migrate_folio_list(&node_folio_list, 447 NODE_DATA(nid), 448 target_nid); 449 nid = folio_nid(lru_to_folio(folio_list)); 450 } while (!list_empty(folio_list)); 451 452 nr_migrated += damon_pa_migrate_folio_list(&node_folio_list, 453 NODE_DATA(nid), 454 target_nid); 455 456 memalloc_noreclaim_restore(noreclaim_flag); 457 458 return nr_migrated; 459 } 460 461 static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, 462 unsigned long *sz_filter_passed) 463 { 464 unsigned long addr, applied; 465 LIST_HEAD(folio_list); 466 467 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { 468 struct folio *folio = damon_get_folio(PHYS_PFN(addr)); 469 470 if (!folio) 471 continue; 472 473 if (damos_pa_filter_out(s, folio)) 474 goto put_folio; 475 else 476 *sz_filter_passed += folio_size(folio); 477 478 if (!folio_isolate_lru(folio)) 479 goto put_folio; 480 list_add(&folio->lru, &folio_list); 481 put_folio: 482 folio_put(folio); 483 } 484 applied = damon_pa_migrate_pages(&folio_list, s->target_nid); 485 cond_resched(); 486 return applied * PAGE_SIZE; 487 } 488 489 static bool damon_pa_scheme_has_filter(struct damos *s) 490 { 491 struct damos_filter *f; 492 493 damos_for_each_filter(f, s) 494 return true; 495 return false; 496 } 497 498 static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s, 499 unsigned long *sz_filter_passed) 500 { 501 unsigned long addr; 502 LIST_HEAD(folio_list); 503 504 if (!damon_pa_scheme_has_filter(s)) 505 return 0; 506 507 addr = r->ar.start; 508 while (addr < r->ar.end) { 509 struct folio *folio = damon_get_folio(PHYS_PFN(addr)); 510 511 if (!folio) { 512 addr += PAGE_SIZE; 513 continue; 514 } 515 516 if (!damos_pa_filter_out(s, folio)) 517 *sz_filter_passed += folio_size(folio); 518 addr += folio_size(folio); 519 folio_put(folio); 520 } 521 return 0; 522 } 523 524 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, 525 struct damon_target *t, struct damon_region *r, 526 struct damos *scheme, unsigned long *sz_filter_passed) 527 { 528 switch (scheme->action) { 529 case DAMOS_PAGEOUT: 530 return damon_pa_pageout(r, scheme, sz_filter_passed); 531 case DAMOS_LRU_PRIO: 532 return damon_pa_mark_accessed(r, scheme, sz_filter_passed); 533 case DAMOS_LRU_DEPRIO: 534 return damon_pa_deactivate_pages(r, scheme, sz_filter_passed); 535 case DAMOS_MIGRATE_HOT: 536 case DAMOS_MIGRATE_COLD: 537 return damon_pa_migrate(r, scheme, sz_filter_passed); 538 case DAMOS_STAT: 539 return damon_pa_stat(r, scheme, sz_filter_passed); 540 default: 541 /* DAMOS actions that not yet supported by 'paddr'. */ 542 break; 543 } 544 return 0; 545 } 546 547 static int damon_pa_scheme_score(struct damon_ctx *context, 548 struct damon_target *t, struct damon_region *r, 549 struct damos *scheme) 550 { 551 switch (scheme->action) { 552 case DAMOS_PAGEOUT: 553 return damon_cold_score(context, r, scheme); 554 case DAMOS_LRU_PRIO: 555 return damon_hot_score(context, r, scheme); 556 case DAMOS_LRU_DEPRIO: 557 return damon_cold_score(context, r, scheme); 558 case DAMOS_MIGRATE_HOT: 559 return damon_hot_score(context, r, scheme); 560 case DAMOS_MIGRATE_COLD: 561 return damon_cold_score(context, r, scheme); 562 default: 563 break; 564 } 565 566 return DAMOS_MAX_SCORE; 567 } 568 569 static int __init damon_pa_initcall(void) 570 { 571 struct damon_operations ops = { 572 .id = DAMON_OPS_PADDR, 573 .init = NULL, 574 .update = NULL, 575 .prepare_access_checks = damon_pa_prepare_access_checks, 576 .check_accesses = damon_pa_check_accesses, 577 .reset_aggregated = NULL, 578 .target_valid = NULL, 579 .cleanup = NULL, 580 .apply_scheme = damon_pa_apply_scheme, 581 .get_scheme_score = damon_pa_scheme_score, 582 }; 583 584 return damon_register_ops(&ops); 585 }; 586 587 subsys_initcall(damon_pa_initcall); 588