1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Data Access Monitor Unit Tests 4 * 5 * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved. 6 * 7 * Author: SeongJae Park <sj@kernel.org> 8 */ 9 10 #ifdef CONFIG_DAMON_KUNIT_TEST 11 12 #ifndef _DAMON_CORE_TEST_H 13 #define _DAMON_CORE_TEST_H 14 15 #include <kunit/test.h> 16 17 static void damon_test_regions(struct kunit *test) 18 { 19 struct damon_region *r; 20 struct damon_target *t; 21 22 r = damon_new_region(1, 2); 23 if (!r) 24 kunit_skip(test, "region alloc fail"); 25 KUNIT_EXPECT_EQ(test, 1ul, r->ar.start); 26 KUNIT_EXPECT_EQ(test, 2ul, r->ar.end); 27 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 28 29 t = damon_new_target(); 30 if (!t) { 31 damon_free_region(r); 32 kunit_skip(test, "target alloc fail"); 33 } 34 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 35 36 damon_add_region(r, t); 37 KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t)); 38 39 damon_destroy_region(r, t); 40 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 41 42 damon_free_target(t); 43 } 44 45 static unsigned int nr_damon_targets(struct damon_ctx *ctx) 46 { 47 struct damon_target *t; 48 unsigned int nr_targets = 0; 49 50 damon_for_each_target(t, ctx) 51 nr_targets++; 52 53 return nr_targets; 54 } 55 56 static void damon_test_target(struct kunit *test) 57 { 58 struct damon_ctx *c = damon_new_ctx(); 59 struct damon_target *t; 60 61 if (!c) 62 kunit_skip(test, "ctx alloc fail"); 63 64 t = damon_new_target(); 65 if (!t) { 66 damon_destroy_ctx(c); 67 kunit_skip(test, "target alloc fail"); 68 } 69 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 70 71 damon_add_target(c, t); 72 KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c)); 73 74 damon_destroy_target(t, c); 75 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 76 77 damon_destroy_ctx(c); 78 } 79 80 /* 81 * Test kdamond_reset_aggregated() 82 * 83 * DAMON checks access to each region and aggregates this information as the 84 * access frequency of each region. In detail, it increases '->nr_accesses' of 85 * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes 86 * the aggregated information ('->nr_accesses' of each regions) to the result 87 * buffer. As a result of the flushing, the '->nr_accesses' of regions are 88 * initialized to zero. 89 */ 90 static void damon_test_aggregate(struct kunit *test) 91 { 92 struct damon_ctx *ctx = damon_new_ctx(); 93 unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} }; 94 unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} }; 95 unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} }; 96 struct damon_target *t; 97 struct damon_region *r; 98 int it, ir; 99 100 if (!ctx) 101 kunit_skip(test, "ctx alloc fail"); 102 103 for (it = 0; it < 3; it++) { 104 t = damon_new_target(); 105 if (!t) { 106 damon_destroy_ctx(ctx); 107 kunit_skip(test, "target alloc fail"); 108 } 109 damon_add_target(ctx, t); 110 } 111 112 it = 0; 113 damon_for_each_target(t, ctx) { 114 for (ir = 0; ir < 3; ir++) { 115 r = damon_new_region(saddr[it][ir], eaddr[it][ir]); 116 if (!r) { 117 damon_destroy_ctx(ctx); 118 kunit_skip(test, "region alloc fail"); 119 } 120 r->nr_accesses = accesses[it][ir]; 121 r->nr_accesses_bp = accesses[it][ir] * 10000; 122 damon_add_region(r, t); 123 } 124 it++; 125 } 126 kdamond_reset_aggregated(ctx); 127 it = 0; 128 damon_for_each_target(t, ctx) { 129 ir = 0; 130 /* '->nr_accesses' should be zeroed */ 131 damon_for_each_region(r, t) { 132 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 133 ir++; 134 } 135 /* regions should be preserved */ 136 KUNIT_EXPECT_EQ(test, 3, ir); 137 it++; 138 } 139 /* targets also should be preserved */ 140 KUNIT_EXPECT_EQ(test, 3, it); 141 142 damon_destroy_ctx(ctx); 143 } 144 145 static void damon_test_split_at(struct kunit *test) 146 { 147 struct damon_target *t; 148 struct damon_region *r, *r_new; 149 150 t = damon_new_target(); 151 if (!t) 152 kunit_skip(test, "target alloc fail"); 153 r = damon_new_region(0, 100); 154 if (!r) { 155 damon_free_target(t); 156 kunit_skip(test, "region alloc fail"); 157 } 158 r->nr_accesses_bp = 420000; 159 r->nr_accesses = 42; 160 r->last_nr_accesses = 15; 161 damon_add_region(r, t); 162 damon_split_region_at(t, r, 25); 163 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 164 KUNIT_EXPECT_EQ(test, r->ar.end, 25ul); 165 166 r_new = damon_next_region(r); 167 KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul); 168 KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul); 169 170 KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp); 171 KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses); 172 KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses); 173 174 damon_free_target(t); 175 } 176 177 static void damon_test_merge_two(struct kunit *test) 178 { 179 struct damon_target *t; 180 struct damon_region *r, *r2, *r3; 181 int i; 182 183 t = damon_new_target(); 184 if (!t) 185 kunit_skip(test, "target alloc fail"); 186 r = damon_new_region(0, 100); 187 if (!r) { 188 damon_free_target(t); 189 kunit_skip(test, "region alloc fail"); 190 } 191 r->nr_accesses = 10; 192 r->nr_accesses_bp = 100000; 193 damon_add_region(r, t); 194 r2 = damon_new_region(100, 300); 195 if (!r2) { 196 damon_free_target(t); 197 kunit_skip(test, "second region alloc fail"); 198 } 199 r2->nr_accesses = 20; 200 r2->nr_accesses_bp = 200000; 201 damon_add_region(r2, t); 202 203 damon_merge_two_regions(t, r, r2); 204 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 205 KUNIT_EXPECT_EQ(test, r->ar.end, 300ul); 206 KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u); 207 208 i = 0; 209 damon_for_each_region(r3, t) { 210 KUNIT_EXPECT_PTR_EQ(test, r, r3); 211 i++; 212 } 213 KUNIT_EXPECT_EQ(test, i, 1); 214 215 damon_free_target(t); 216 } 217 218 static struct damon_region *__nth_region_of(struct damon_target *t, int idx) 219 { 220 struct damon_region *r; 221 unsigned int i = 0; 222 223 damon_for_each_region(r, t) { 224 if (i++ == idx) 225 return r; 226 } 227 228 return NULL; 229 } 230 231 static void damon_test_merge_regions_of(struct kunit *test) 232 { 233 struct damon_target *t; 234 struct damon_region *r; 235 unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184}; 236 unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230}; 237 unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2}; 238 239 unsigned long saddrs[] = {0, 114, 130, 156, 170}; 240 unsigned long eaddrs[] = {112, 130, 156, 170, 230}; 241 int i; 242 243 t = damon_new_target(); 244 if (!t) 245 kunit_skip(test, "target alloc fail"); 246 for (i = 0; i < ARRAY_SIZE(sa); i++) { 247 r = damon_new_region(sa[i], ea[i]); 248 if (!r) { 249 damon_free_target(t); 250 kunit_skip(test, "region alloc fail"); 251 } 252 r->nr_accesses = nrs[i]; 253 r->nr_accesses_bp = nrs[i] * 10000; 254 damon_add_region(r, t); 255 } 256 257 damon_merge_regions_of(t, 9, 9999); 258 /* 0-112, 114-130, 130-156, 156-170 */ 259 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); 260 for (i = 0; i < 5; i++) { 261 r = __nth_region_of(t, i); 262 KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]); 263 KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]); 264 } 265 damon_free_target(t); 266 } 267 268 static void damon_test_split_regions_of(struct kunit *test) 269 { 270 struct damon_target *t; 271 struct damon_region *r; 272 273 t = damon_new_target(); 274 if (!t) 275 kunit_skip(test, "target alloc fail"); 276 r = damon_new_region(0, 22); 277 if (!r) { 278 damon_free_target(t); 279 kunit_skip(test, "region alloc fail"); 280 } 281 damon_add_region(r, t); 282 damon_split_regions_of(t, 2, DAMON_MIN_REGION); 283 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u); 284 damon_free_target(t); 285 286 t = damon_new_target(); 287 if (!t) 288 kunit_skip(test, "second target alloc fail"); 289 r = damon_new_region(0, 220); 290 if (!r) { 291 damon_free_target(t); 292 kunit_skip(test, "second region alloc fail"); 293 } 294 damon_add_region(r, t); 295 damon_split_regions_of(t, 4, DAMON_MIN_REGION); 296 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u); 297 damon_free_target(t); 298 } 299 300 static void damon_test_ops_registration(struct kunit *test) 301 { 302 struct damon_ctx *c = damon_new_ctx(); 303 struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak; 304 bool need_cleanup = false; 305 306 if (!c) 307 kunit_skip(test, "ctx alloc fail"); 308 309 /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */ 310 if (!damon_is_registered_ops(DAMON_OPS_VADDR)) { 311 bak.id = DAMON_OPS_VADDR; 312 KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0); 313 need_cleanup = true; 314 } 315 316 /* DAMON_OPS_VADDR is ensured to be registered */ 317 KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0); 318 319 /* Double-registration is prohibited */ 320 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 321 322 /* Unknown ops id cannot be registered */ 323 KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL); 324 325 /* Registration should success after unregistration */ 326 mutex_lock(&damon_ops_lock); 327 bak = damon_registered_ops[DAMON_OPS_VADDR]; 328 damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){}; 329 mutex_unlock(&damon_ops_lock); 330 331 ops.id = DAMON_OPS_VADDR; 332 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0); 333 334 mutex_lock(&damon_ops_lock); 335 damon_registered_ops[DAMON_OPS_VADDR] = bak; 336 mutex_unlock(&damon_ops_lock); 337 338 /* Check double-registration failure again */ 339 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 340 341 damon_destroy_ctx(c); 342 343 if (need_cleanup) { 344 mutex_lock(&damon_ops_lock); 345 damon_registered_ops[DAMON_OPS_VADDR] = 346 (struct damon_operations){}; 347 mutex_unlock(&damon_ops_lock); 348 } 349 } 350 351 static void damon_test_set_regions(struct kunit *test) 352 { 353 struct damon_target *t = damon_new_target(); 354 struct damon_region *r1, *r2; 355 struct damon_addr_range range = {.start = 8, .end = 28}; 356 unsigned long expects[] = {8, 16, 16, 24, 24, 28}; 357 int expect_idx = 0; 358 struct damon_region *r; 359 360 if (!t) 361 kunit_skip(test, "target alloc fail"); 362 r1 = damon_new_region(4, 16); 363 if (!r1) { 364 damon_free_target(t); 365 kunit_skip(test, "region alloc fail"); 366 } 367 r2 = damon_new_region(24, 32); 368 if (!r2) { 369 damon_free_target(t); 370 damon_free_region(r1); 371 kunit_skip(test, "second region alloc fail"); 372 } 373 374 damon_add_region(r1, t); 375 damon_add_region(r2, t); 376 damon_set_regions(t, &range, 1, DAMON_MIN_REGION); 377 378 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3); 379 damon_for_each_region(r, t) { 380 KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]); 381 KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]); 382 } 383 damon_destroy_target(t, NULL); 384 } 385 386 static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test) 387 { 388 struct damon_attrs attrs = { 389 .sample_interval = 10, 390 .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10 391 }; 392 393 /* 394 * In some cases such as 32bit architectures where UINT_MAX is 395 * ULONG_MAX, attrs.aggr_interval becomes zero. Calling 396 * damon_nr_accesses_to_accesses_bp() in the case will cause 397 * divide-by-zero. Such case is prohibited in normal execution since 398 * the caution is documented on the comment for the function, and 399 * damon_update_monitoring_results() does the check. Skip the test in 400 * the case. 401 */ 402 if (!attrs.aggr_interval) 403 kunit_skip(test, "aggr_interval is zero."); 404 405 KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0); 406 } 407 408 static void damon_test_update_monitoring_result(struct kunit *test) 409 { 410 struct damon_attrs old_attrs = { 411 .sample_interval = 10, .aggr_interval = 1000,}; 412 struct damon_attrs new_attrs; 413 struct damon_region *r = damon_new_region(3, 7); 414 415 if (!r) 416 kunit_skip(test, "region alloc fail"); 417 418 r->nr_accesses = 15; 419 r->nr_accesses_bp = 150000; 420 r->age = 20; 421 422 new_attrs = (struct damon_attrs){ 423 .sample_interval = 100, .aggr_interval = 10000,}; 424 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 425 KUNIT_EXPECT_EQ(test, r->nr_accesses, 15); 426 KUNIT_EXPECT_EQ(test, r->age, 2); 427 428 new_attrs = (struct damon_attrs){ 429 .sample_interval = 1, .aggr_interval = 1000}; 430 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 431 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); 432 KUNIT_EXPECT_EQ(test, r->age, 2); 433 434 new_attrs = (struct damon_attrs){ 435 .sample_interval = 1, .aggr_interval = 100}; 436 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 437 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); 438 KUNIT_EXPECT_EQ(test, r->age, 20); 439 440 damon_free_region(r); 441 } 442 443 static void damon_test_set_attrs(struct kunit *test) 444 { 445 struct damon_ctx *c = damon_new_ctx(); 446 struct damon_attrs valid_attrs = { 447 .min_nr_regions = 10, .max_nr_regions = 1000, 448 .sample_interval = 5000, .aggr_interval = 100000,}; 449 struct damon_attrs invalid_attrs; 450 451 if (!c) 452 kunit_skip(test, "ctx alloc fail"); 453 454 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0); 455 456 invalid_attrs = valid_attrs; 457 invalid_attrs.min_nr_regions = 1; 458 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 459 460 invalid_attrs = valid_attrs; 461 invalid_attrs.max_nr_regions = 9; 462 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 463 464 invalid_attrs = valid_attrs; 465 invalid_attrs.aggr_interval = 4999; 466 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 467 468 damon_destroy_ctx(c); 469 } 470 471 static void damon_test_moving_sum(struct kunit *test) 472 { 473 unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10; 474 unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0}; 475 unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000, 476 45000, 40000, 35000, 30000}; 477 int i; 478 479 for (i = 0; i < ARRAY_SIZE(new_values); i++) { 480 mvsum = damon_moving_sum(mvsum, nomvsum, len_window, 481 new_values[i]); 482 KUNIT_EXPECT_EQ(test, mvsum, expects[i]); 483 } 484 } 485 486 static void damos_test_new_filter(struct kunit *test) 487 { 488 struct damos_filter *filter; 489 490 filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false); 491 if (!filter) 492 kunit_skip(test, "filter alloc fail"); 493 KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON); 494 KUNIT_EXPECT_EQ(test, filter->matching, true); 495 KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list); 496 KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list); 497 damos_destroy_filter(filter); 498 } 499 500 static void damos_test_commit_quota_goal_for(struct kunit *test, 501 struct damos_quota_goal *dst, 502 struct damos_quota_goal *src) 503 { 504 u64 dst_last_psi_total = 0; 505 506 if (dst->metric == DAMOS_QUOTA_SOME_MEM_PSI_US) 507 dst_last_psi_total = dst->last_psi_total; 508 damos_commit_quota_goal(dst, src); 509 510 KUNIT_EXPECT_EQ(test, dst->metric, src->metric); 511 KUNIT_EXPECT_EQ(test, dst->target_value, src->target_value); 512 if (src->metric == DAMOS_QUOTA_USER_INPUT) 513 KUNIT_EXPECT_EQ(test, dst->current_value, src->current_value); 514 if (dst_last_psi_total && src->metric == DAMOS_QUOTA_SOME_MEM_PSI_US) 515 KUNIT_EXPECT_EQ(test, dst->last_psi_total, dst_last_psi_total); 516 switch (dst->metric) { 517 case DAMOS_QUOTA_NODE_MEM_USED_BP: 518 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 519 KUNIT_EXPECT_EQ(test, dst->nid, src->nid); 520 break; 521 case DAMOS_QUOTA_NODE_MEMCG_USED_BP: 522 case DAMOS_QUOTA_NODE_MEMCG_FREE_BP: 523 KUNIT_EXPECT_EQ(test, dst->nid, src->nid); 524 KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id); 525 break; 526 default: 527 break; 528 } 529 } 530 531 static void damos_test_commit_quota_goal(struct kunit *test) 532 { 533 struct damos_quota_goal dst = { 534 .metric = DAMOS_QUOTA_SOME_MEM_PSI_US, 535 .target_value = 1000, 536 .current_value = 123, 537 .last_psi_total = 456, 538 }; 539 540 damos_test_commit_quota_goal_for(test, &dst, 541 &(struct damos_quota_goal){ 542 .metric = DAMOS_QUOTA_USER_INPUT, 543 .target_value = 789, 544 .current_value = 12}); 545 damos_test_commit_quota_goal_for(test, &dst, 546 &(struct damos_quota_goal){ 547 .metric = DAMOS_QUOTA_NODE_MEM_FREE_BP, 548 .target_value = 345, 549 .current_value = 678, 550 .nid = 9, 551 }); 552 damos_test_commit_quota_goal_for(test, &dst, 553 &(struct damos_quota_goal){ 554 .metric = DAMOS_QUOTA_NODE_MEM_USED_BP, 555 .target_value = 12, 556 .current_value = 345, 557 .nid = 6, 558 }); 559 damos_test_commit_quota_goal_for(test, &dst, 560 &(struct damos_quota_goal){ 561 .metric = DAMOS_QUOTA_NODE_MEMCG_USED_BP, 562 .target_value = 456, 563 .current_value = 567, 564 .nid = 6, 565 .memcg_id = 7, 566 }); 567 damos_test_commit_quota_goal_for(test, &dst, 568 &(struct damos_quota_goal){ 569 .metric = DAMOS_QUOTA_NODE_MEMCG_FREE_BP, 570 .target_value = 890, 571 .current_value = 901, 572 .nid = 10, 573 .memcg_id = 1, 574 }); 575 damos_test_commit_quota_goal_for(test, &dst, 576 &(struct damos_quota_goal) { 577 .metric = DAMOS_QUOTA_USER_INPUT, 578 .target_value = 789, 579 .current_value = 12, 580 }); 581 } 582 583 static void damos_test_commit_quota_goals_for(struct kunit *test, 584 struct damos_quota_goal *dst_goals, int nr_dst_goals, 585 struct damos_quota_goal *src_goals, int nr_src_goals) 586 { 587 struct damos_quota dst, src; 588 struct damos_quota_goal *goal, *next; 589 bool skip = true; 590 int i; 591 592 INIT_LIST_HEAD(&dst.goals); 593 INIT_LIST_HEAD(&src.goals); 594 595 for (i = 0; i < nr_dst_goals; i++) { 596 /* 597 * When nr_src_goals is smaller than dst_goals, 598 * damos_commit_quota_goals() will kfree() the dst goals. 599 * Make it kfree()-able. 600 */ 601 goal = damos_new_quota_goal(dst_goals[i].metric, 602 dst_goals[i].target_value); 603 if (!goal) 604 goto out; 605 damos_add_quota_goal(&dst, goal); 606 } 607 skip = false; 608 for (i = 0; i < nr_src_goals; i++) 609 damos_add_quota_goal(&src, &src_goals[i]); 610 611 damos_commit_quota_goals(&dst, &src); 612 613 i = 0; 614 damos_for_each_quota_goal(goal, (&dst)) { 615 KUNIT_EXPECT_EQ(test, goal->metric, src_goals[i].metric); 616 KUNIT_EXPECT_EQ(test, goal->target_value, 617 src_goals[i++].target_value); 618 } 619 KUNIT_EXPECT_EQ(test, i, nr_src_goals); 620 621 out: 622 damos_for_each_quota_goal_safe(goal, next, (&dst)) 623 damos_destroy_quota_goal(goal); 624 if (skip) 625 kunit_skip(test, "goal alloc fail"); 626 } 627 628 static void damos_test_commit_quota_goals(struct kunit *test) 629 { 630 damos_test_commit_quota_goals_for(test, 631 (struct damos_quota_goal[]){}, 0, 632 (struct damos_quota_goal[]){ 633 { 634 .metric = DAMOS_QUOTA_USER_INPUT, 635 .target_value = 123, 636 }, 637 }, 1); 638 damos_test_commit_quota_goals_for(test, 639 (struct damos_quota_goal[]){ 640 { 641 .metric = DAMOS_QUOTA_USER_INPUT, 642 .target_value = 234, 643 }, 644 645 }, 1, 646 (struct damos_quota_goal[]){ 647 { 648 .metric = DAMOS_QUOTA_USER_INPUT, 649 .target_value = 345, 650 }, 651 }, 1); 652 damos_test_commit_quota_goals_for(test, 653 (struct damos_quota_goal[]){ 654 { 655 .metric = DAMOS_QUOTA_USER_INPUT, 656 .target_value = 456, 657 }, 658 659 }, 1, 660 (struct damos_quota_goal[]){}, 0); 661 } 662 663 static void damos_test_commit_quota(struct kunit *test) 664 { 665 struct damos_quota dst = { 666 .reset_interval = 1, 667 .ms = 2, 668 .sz = 3, 669 .weight_sz = 4, 670 .weight_nr_accesses = 5, 671 .weight_age = 6, 672 }; 673 struct damos_quota src = { 674 .reset_interval = 7, 675 .ms = 8, 676 .sz = 9, 677 .weight_sz = 10, 678 .weight_nr_accesses = 11, 679 .weight_age = 12, 680 }; 681 682 INIT_LIST_HEAD(&dst.goals); 683 INIT_LIST_HEAD(&src.goals); 684 685 damos_commit_quota(&dst, &src); 686 687 KUNIT_EXPECT_EQ(test, dst.reset_interval, src.reset_interval); 688 KUNIT_EXPECT_EQ(test, dst.ms, src.ms); 689 KUNIT_EXPECT_EQ(test, dst.sz, src.sz); 690 KUNIT_EXPECT_EQ(test, dst.weight_sz, src.weight_sz); 691 KUNIT_EXPECT_EQ(test, dst.weight_nr_accesses, src.weight_nr_accesses); 692 KUNIT_EXPECT_EQ(test, dst.weight_age, src.weight_age); 693 } 694 695 static void damos_test_commit_filter_for(struct kunit *test, 696 struct damos_filter *dst, struct damos_filter *src) 697 { 698 damos_commit_filter(dst, src); 699 KUNIT_EXPECT_EQ(test, dst->type, src->type); 700 KUNIT_EXPECT_EQ(test, dst->matching, src->matching); 701 KUNIT_EXPECT_EQ(test, dst->allow, src->allow); 702 switch (src->type) { 703 case DAMOS_FILTER_TYPE_MEMCG: 704 KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id); 705 break; 706 case DAMOS_FILTER_TYPE_ADDR: 707 KUNIT_EXPECT_EQ(test, dst->addr_range.start, 708 src->addr_range.start); 709 KUNIT_EXPECT_EQ(test, dst->addr_range.end, 710 src->addr_range.end); 711 break; 712 case DAMOS_FILTER_TYPE_TARGET: 713 KUNIT_EXPECT_EQ(test, dst->target_idx, src->target_idx); 714 break; 715 case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: 716 KUNIT_EXPECT_EQ(test, dst->sz_range.min, src->sz_range.min); 717 KUNIT_EXPECT_EQ(test, dst->sz_range.max, src->sz_range.max); 718 break; 719 default: 720 break; 721 } 722 } 723 724 static void damos_test_commit_filter(struct kunit *test) 725 { 726 struct damos_filter dst = { 727 .type = DAMOS_FILTER_TYPE_ACTIVE, 728 .matching = false, 729 .allow = false, 730 }; 731 732 damos_test_commit_filter_for(test, &dst, 733 &(struct damos_filter){ 734 .type = DAMOS_FILTER_TYPE_ANON, 735 .matching = true, 736 .allow = true, 737 }); 738 damos_test_commit_filter_for(test, &dst, 739 &(struct damos_filter){ 740 .type = DAMOS_FILTER_TYPE_MEMCG, 741 .matching = false, 742 .allow = false, 743 .memcg_id = 123, 744 }); 745 damos_test_commit_filter_for(test, &dst, 746 &(struct damos_filter){ 747 .type = DAMOS_FILTER_TYPE_YOUNG, 748 .matching = true, 749 .allow = true, 750 }); 751 damos_test_commit_filter_for(test, &dst, 752 &(struct damos_filter){ 753 .type = DAMOS_FILTER_TYPE_HUGEPAGE_SIZE, 754 .matching = false, 755 .allow = false, 756 .sz_range = {.min = 234, .max = 345}, 757 }); 758 damos_test_commit_filter_for(test, &dst, 759 &(struct damos_filter){ 760 .type = DAMOS_FILTER_TYPE_UNMAPPED, 761 .matching = true, 762 .allow = true, 763 }); 764 damos_test_commit_filter_for(test, &dst, 765 &(struct damos_filter){ 766 .type = DAMOS_FILTER_TYPE_ADDR, 767 .matching = false, 768 .allow = false, 769 .addr_range = {.start = 456, .end = 567}, 770 }); 771 damos_test_commit_filter_for(test, &dst, 772 &(struct damos_filter){ 773 .type = DAMOS_FILTER_TYPE_TARGET, 774 .matching = true, 775 .allow = true, 776 .target_idx = 6, 777 }); 778 } 779 780 static void damos_test_filter_out(struct kunit *test) 781 { 782 struct damon_target *t; 783 struct damon_region *r, *r2; 784 struct damos_filter *f; 785 786 f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false); 787 if (!f) 788 kunit_skip(test, "filter alloc fail"); 789 f->addr_range = (struct damon_addr_range){ 790 .start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6}; 791 792 t = damon_new_target(); 793 if (!t) { 794 damos_destroy_filter(f); 795 kunit_skip(test, "target alloc fail"); 796 } 797 r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5); 798 if (!r) { 799 damos_destroy_filter(f); 800 damon_free_target(t); 801 kunit_skip(test, "region alloc fail"); 802 } 803 damon_add_region(r, t); 804 805 /* region in the range */ 806 KUNIT_EXPECT_TRUE(test, 807 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 808 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 809 810 /* region before the range */ 811 r->ar.start = DAMON_MIN_REGION * 1; 812 r->ar.end = DAMON_MIN_REGION * 2; 813 KUNIT_EXPECT_FALSE(test, 814 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 815 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 816 817 /* region after the range */ 818 r->ar.start = DAMON_MIN_REGION * 6; 819 r->ar.end = DAMON_MIN_REGION * 8; 820 KUNIT_EXPECT_FALSE(test, 821 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 822 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 823 824 /* region started before the range */ 825 r->ar.start = DAMON_MIN_REGION * 1; 826 r->ar.end = DAMON_MIN_REGION * 4; 827 KUNIT_EXPECT_FALSE(test, 828 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 829 /* filter should have split the region */ 830 KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1); 831 KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2); 832 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2); 833 r2 = damon_next_region(r); 834 KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2); 835 KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4); 836 damon_destroy_region(r2, t); 837 838 /* region started in the range */ 839 r->ar.start = DAMON_MIN_REGION * 2; 840 r->ar.end = DAMON_MIN_REGION * 8; 841 KUNIT_EXPECT_TRUE(test, 842 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 843 /* filter should have split the region */ 844 KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2); 845 KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6); 846 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2); 847 r2 = damon_next_region(r); 848 KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6); 849 KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8); 850 damon_destroy_region(r2, t); 851 852 damon_free_target(t); 853 damos_free_filter(f); 854 } 855 856 static void damon_test_feed_loop_next_input(struct kunit *test) 857 { 858 unsigned long last_input = 900000, current_score = 200; 859 860 /* 861 * If current score is lower than the goal, which is always 10,000 862 * (read the comment on damon_feed_loop_next_input()'s comment), next 863 * input should be higher than the last input. 864 */ 865 KUNIT_EXPECT_GT(test, 866 damon_feed_loop_next_input(last_input, current_score), 867 last_input); 868 869 /* 870 * If current score is higher than the goal, next input should be lower 871 * than the last input. 872 */ 873 current_score = 250000000; 874 KUNIT_EXPECT_LT(test, 875 damon_feed_loop_next_input(last_input, current_score), 876 last_input); 877 878 /* 879 * The next input depends on the distance between the current score and 880 * the goal 881 */ 882 KUNIT_EXPECT_GT(test, 883 damon_feed_loop_next_input(last_input, 200), 884 damon_feed_loop_next_input(last_input, 2000)); 885 } 886 887 static void damon_test_set_filters_default_reject(struct kunit *test) 888 { 889 struct damos scheme; 890 struct damos_filter *target_filter, *anon_filter; 891 892 INIT_LIST_HEAD(&scheme.filters); 893 INIT_LIST_HEAD(&scheme.ops_filters); 894 895 damos_set_filters_default_reject(&scheme); 896 /* 897 * No filter is installed. Allow by default on both core and ops layer 898 * filtering stages, since there are no filters at all. 899 */ 900 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 901 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 902 903 target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true); 904 if (!target_filter) 905 kunit_skip(test, "filter alloc fail"); 906 damos_add_filter(&scheme, target_filter); 907 damos_set_filters_default_reject(&scheme); 908 /* 909 * A core-handled allow-filter is installed. 910 * Rejct by default on core layer filtering stage due to the last 911 * core-layer-filter's behavior. 912 * Allow by default on ops layer filtering stage due to the absence of 913 * ops layer filters. 914 */ 915 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, true); 916 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 917 918 target_filter->allow = false; 919 damos_set_filters_default_reject(&scheme); 920 /* 921 * A core-handled reject-filter is installed. 922 * Allow by default on core layer filtering stage due to the last 923 * core-layer-filter's behavior. 924 * Allow by default on ops layer filtering stage due to the absence of 925 * ops layer filters. 926 */ 927 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 928 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 929 930 anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true); 931 if (!anon_filter) { 932 damos_free_filter(target_filter); 933 kunit_skip(test, "anon_filter alloc fail"); 934 } 935 damos_add_filter(&scheme, anon_filter); 936 937 damos_set_filters_default_reject(&scheme); 938 /* 939 * A core-handled reject-filter and ops-handled allow-filter are installed. 940 * Allow by default on core layer filtering stage due to the existence 941 * of the ops-handled filter. 942 * Reject by default on ops layer filtering stage due to the last 943 * ops-layer-filter's behavior. 944 */ 945 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 946 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true); 947 948 target_filter->allow = true; 949 damos_set_filters_default_reject(&scheme); 950 /* 951 * A core-handled allow-filter and ops-handled allow-filter are 952 * installed. 953 * Allow by default on core layer filtering stage due to the existence 954 * of the ops-handled filter. 955 * Reject by default on ops layer filtering stage due to the last 956 * ops-layer-filter's behavior. 957 */ 958 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 959 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true); 960 961 damos_free_filter(anon_filter); 962 damos_free_filter(target_filter); 963 } 964 965 static struct kunit_case damon_test_cases[] = { 966 KUNIT_CASE(damon_test_target), 967 KUNIT_CASE(damon_test_regions), 968 KUNIT_CASE(damon_test_aggregate), 969 KUNIT_CASE(damon_test_split_at), 970 KUNIT_CASE(damon_test_merge_two), 971 KUNIT_CASE(damon_test_merge_regions_of), 972 KUNIT_CASE(damon_test_split_regions_of), 973 KUNIT_CASE(damon_test_ops_registration), 974 KUNIT_CASE(damon_test_set_regions), 975 KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp), 976 KUNIT_CASE(damon_test_update_monitoring_result), 977 KUNIT_CASE(damon_test_set_attrs), 978 KUNIT_CASE(damon_test_moving_sum), 979 KUNIT_CASE(damos_test_new_filter), 980 KUNIT_CASE(damos_test_commit_quota_goal), 981 KUNIT_CASE(damos_test_commit_quota_goals), 982 KUNIT_CASE(damos_test_commit_quota), 983 KUNIT_CASE(damos_test_commit_filter), 984 KUNIT_CASE(damos_test_filter_out), 985 KUNIT_CASE(damon_test_feed_loop_next_input), 986 KUNIT_CASE(damon_test_set_filters_default_reject), 987 {}, 988 }; 989 990 static struct kunit_suite damon_test_suite = { 991 .name = "damon", 992 .test_cases = damon_test_cases, 993 }; 994 kunit_test_suite(damon_test_suite); 995 996 #endif /* _DAMON_CORE_TEST_H */ 997 998 #endif /* CONFIG_DAMON_KUNIT_TEST */ 999