1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Data Access Monitor Unit Tests 4 * 5 * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved. 6 * 7 * Author: SeongJae Park <sj@kernel.org> 8 */ 9 10 #ifdef CONFIG_DAMON_KUNIT_TEST 11 12 #ifndef _DAMON_CORE_TEST_H 13 #define _DAMON_CORE_TEST_H 14 15 #include <kunit/test.h> 16 17 static void damon_test_regions(struct kunit *test) 18 { 19 struct damon_region *r; 20 struct damon_target *t; 21 22 r = damon_new_region(1, 2); 23 if (!r) 24 kunit_skip(test, "region alloc fail"); 25 KUNIT_EXPECT_EQ(test, 1ul, r->ar.start); 26 KUNIT_EXPECT_EQ(test, 2ul, r->ar.end); 27 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 28 29 t = damon_new_target(); 30 if (!t) { 31 damon_free_region(r); 32 kunit_skip(test, "target alloc fail"); 33 } 34 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 35 36 damon_add_region(r, t); 37 KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t)); 38 39 damon_destroy_region(r, t); 40 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 41 42 damon_free_target(t); 43 } 44 45 static unsigned int nr_damon_targets(struct damon_ctx *ctx) 46 { 47 struct damon_target *t; 48 unsigned int nr_targets = 0; 49 50 damon_for_each_target(t, ctx) 51 nr_targets++; 52 53 return nr_targets; 54 } 55 56 static void damon_test_target(struct kunit *test) 57 { 58 struct damon_ctx *c = damon_new_ctx(); 59 struct damon_target *t; 60 61 if (!c) 62 kunit_skip(test, "ctx alloc fail"); 63 64 t = damon_new_target(); 65 if (!t) { 66 damon_destroy_ctx(c); 67 kunit_skip(test, "target alloc fail"); 68 } 69 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 70 71 damon_add_target(c, t); 72 KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c)); 73 74 damon_destroy_target(t, c); 75 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 76 77 damon_destroy_ctx(c); 78 } 79 80 /* 81 * Test kdamond_reset_aggregated() 82 * 83 * DAMON checks access to each region and aggregates this information as the 84 * access frequency of each region. In detail, it increases '->nr_accesses' of 85 * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes 86 * the aggregated information ('->nr_accesses' of each regions) to the result 87 * buffer. As a result of the flushing, the '->nr_accesses' of regions are 88 * initialized to zero. 89 */ 90 static void damon_test_aggregate(struct kunit *test) 91 { 92 struct damon_ctx *ctx = damon_new_ctx(); 93 unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} }; 94 unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} }; 95 unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} }; 96 struct damon_target *t; 97 struct damon_region *r; 98 int it, ir; 99 100 if (!ctx) 101 kunit_skip(test, "ctx alloc fail"); 102 103 for (it = 0; it < 3; it++) { 104 t = damon_new_target(); 105 if (!t) { 106 damon_destroy_ctx(ctx); 107 kunit_skip(test, "target alloc fail"); 108 } 109 damon_add_target(ctx, t); 110 } 111 112 it = 0; 113 damon_for_each_target(t, ctx) { 114 for (ir = 0; ir < 3; ir++) { 115 r = damon_new_region(saddr[it][ir], eaddr[it][ir]); 116 if (!r) { 117 damon_destroy_ctx(ctx); 118 kunit_skip(test, "region alloc fail"); 119 } 120 r->nr_accesses = accesses[it][ir]; 121 r->nr_accesses_bp = accesses[it][ir] * 10000; 122 damon_add_region(r, t); 123 } 124 it++; 125 } 126 kdamond_reset_aggregated(ctx); 127 it = 0; 128 damon_for_each_target(t, ctx) { 129 ir = 0; 130 /* '->nr_accesses' should be zeroed */ 131 damon_for_each_region(r, t) { 132 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 133 ir++; 134 } 135 /* regions should be preserved */ 136 KUNIT_EXPECT_EQ(test, 3, ir); 137 it++; 138 } 139 /* targets also should be preserved */ 140 KUNIT_EXPECT_EQ(test, 3, it); 141 142 damon_destroy_ctx(ctx); 143 } 144 145 static void damon_test_split_at(struct kunit *test) 146 { 147 struct damon_target *t; 148 struct damon_region *r, *r_new; 149 150 t = damon_new_target(); 151 if (!t) 152 kunit_skip(test, "target alloc fail"); 153 r = damon_new_region(0, 100); 154 if (!r) { 155 damon_free_target(t); 156 kunit_skip(test, "region alloc fail"); 157 } 158 r->nr_accesses_bp = 420000; 159 r->nr_accesses = 42; 160 r->last_nr_accesses = 15; 161 damon_add_region(r, t); 162 damon_split_region_at(t, r, 25); 163 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 164 KUNIT_EXPECT_EQ(test, r->ar.end, 25ul); 165 166 r_new = damon_next_region(r); 167 KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul); 168 KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul); 169 170 KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp); 171 KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses); 172 KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses); 173 174 damon_free_target(t); 175 } 176 177 static void damon_test_merge_two(struct kunit *test) 178 { 179 struct damon_target *t; 180 struct damon_region *r, *r2, *r3; 181 int i; 182 183 t = damon_new_target(); 184 if (!t) 185 kunit_skip(test, "target alloc fail"); 186 r = damon_new_region(0, 100); 187 if (!r) { 188 damon_free_target(t); 189 kunit_skip(test, "region alloc fail"); 190 } 191 r->nr_accesses = 10; 192 r->nr_accesses_bp = 100000; 193 damon_add_region(r, t); 194 r2 = damon_new_region(100, 300); 195 if (!r2) { 196 damon_free_target(t); 197 kunit_skip(test, "second region alloc fail"); 198 } 199 r2->nr_accesses = 20; 200 r2->nr_accesses_bp = 200000; 201 damon_add_region(r2, t); 202 203 damon_merge_two_regions(t, r, r2); 204 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 205 KUNIT_EXPECT_EQ(test, r->ar.end, 300ul); 206 KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u); 207 208 i = 0; 209 damon_for_each_region(r3, t) { 210 KUNIT_EXPECT_PTR_EQ(test, r, r3); 211 i++; 212 } 213 KUNIT_EXPECT_EQ(test, i, 1); 214 215 damon_free_target(t); 216 } 217 218 static struct damon_region *__nth_region_of(struct damon_target *t, int idx) 219 { 220 struct damon_region *r; 221 unsigned int i = 0; 222 223 damon_for_each_region(r, t) { 224 if (i++ == idx) 225 return r; 226 } 227 228 return NULL; 229 } 230 231 static void damon_test_merge_regions_of(struct kunit *test) 232 { 233 struct damon_target *t; 234 struct damon_region *r; 235 unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184}; 236 unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230}; 237 unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2}; 238 239 unsigned long saddrs[] = {0, 114, 130, 156, 170}; 240 unsigned long eaddrs[] = {112, 130, 156, 170, 230}; 241 int i; 242 243 t = damon_new_target(); 244 if (!t) 245 kunit_skip(test, "target alloc fail"); 246 for (i = 0; i < ARRAY_SIZE(sa); i++) { 247 r = damon_new_region(sa[i], ea[i]); 248 if (!r) { 249 damon_free_target(t); 250 kunit_skip(test, "region alloc fail"); 251 } 252 r->nr_accesses = nrs[i]; 253 r->nr_accesses_bp = nrs[i] * 10000; 254 damon_add_region(r, t); 255 } 256 257 damon_merge_regions_of(t, 9, 9999); 258 /* 0-112, 114-130, 130-156, 156-170 */ 259 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); 260 for (i = 0; i < 5; i++) { 261 r = __nth_region_of(t, i); 262 KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]); 263 KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]); 264 } 265 damon_free_target(t); 266 } 267 268 static void damon_test_split_regions_of(struct kunit *test) 269 { 270 struct damon_target *t; 271 struct damon_region *r; 272 273 t = damon_new_target(); 274 if (!t) 275 kunit_skip(test, "target alloc fail"); 276 r = damon_new_region(0, 22); 277 if (!r) { 278 damon_free_target(t); 279 kunit_skip(test, "region alloc fail"); 280 } 281 damon_add_region(r, t); 282 damon_split_regions_of(t, 2, DAMON_MIN_REGION); 283 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u); 284 damon_free_target(t); 285 286 t = damon_new_target(); 287 if (!t) 288 kunit_skip(test, "second target alloc fail"); 289 r = damon_new_region(0, 220); 290 if (!r) { 291 damon_free_target(t); 292 kunit_skip(test, "second region alloc fail"); 293 } 294 damon_add_region(r, t); 295 damon_split_regions_of(t, 4, DAMON_MIN_REGION); 296 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u); 297 damon_free_target(t); 298 } 299 300 static void damon_test_ops_registration(struct kunit *test) 301 { 302 struct damon_ctx *c = damon_new_ctx(); 303 struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak; 304 bool need_cleanup = false; 305 306 if (!c) 307 kunit_skip(test, "ctx alloc fail"); 308 309 /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */ 310 if (!damon_is_registered_ops(DAMON_OPS_VADDR)) { 311 bak.id = DAMON_OPS_VADDR; 312 KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0); 313 need_cleanup = true; 314 } 315 316 /* DAMON_OPS_VADDR is ensured to be registered */ 317 KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0); 318 319 /* Double-registration is prohibited */ 320 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 321 322 /* Unknown ops id cannot be registered */ 323 KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL); 324 325 /* Registration should success after unregistration */ 326 mutex_lock(&damon_ops_lock); 327 bak = damon_registered_ops[DAMON_OPS_VADDR]; 328 damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){}; 329 mutex_unlock(&damon_ops_lock); 330 331 ops.id = DAMON_OPS_VADDR; 332 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0); 333 334 mutex_lock(&damon_ops_lock); 335 damon_registered_ops[DAMON_OPS_VADDR] = bak; 336 mutex_unlock(&damon_ops_lock); 337 338 /* Check double-registration failure again */ 339 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 340 341 damon_destroy_ctx(c); 342 343 if (need_cleanup) { 344 mutex_lock(&damon_ops_lock); 345 damon_registered_ops[DAMON_OPS_VADDR] = 346 (struct damon_operations){}; 347 mutex_unlock(&damon_ops_lock); 348 } 349 } 350 351 static void damon_test_set_regions(struct kunit *test) 352 { 353 struct damon_target *t = damon_new_target(); 354 struct damon_region *r1, *r2; 355 struct damon_addr_range range = {.start = 8, .end = 28}; 356 unsigned long expects[] = {8, 16, 16, 24, 24, 28}; 357 int expect_idx = 0; 358 struct damon_region *r; 359 360 if (!t) 361 kunit_skip(test, "target alloc fail"); 362 r1 = damon_new_region(4, 16); 363 if (!r1) { 364 damon_free_target(t); 365 kunit_skip(test, "region alloc fail"); 366 } 367 r2 = damon_new_region(24, 32); 368 if (!r2) { 369 damon_free_target(t); 370 damon_free_region(r1); 371 kunit_skip(test, "second region alloc fail"); 372 } 373 374 damon_add_region(r1, t); 375 damon_add_region(r2, t); 376 damon_set_regions(t, &range, 1, DAMON_MIN_REGION); 377 378 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3); 379 damon_for_each_region(r, t) { 380 KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]); 381 KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]); 382 } 383 damon_destroy_target(t, NULL); 384 } 385 386 static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test) 387 { 388 struct damon_attrs attrs = { 389 .sample_interval = 10, 390 .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10 391 }; 392 393 /* 394 * In some cases such as 32bit architectures where UINT_MAX is 395 * ULONG_MAX, attrs.aggr_interval becomes zero. Calling 396 * damon_nr_accesses_to_accesses_bp() in the case will cause 397 * divide-by-zero. Such case is prohibited in normal execution since 398 * the caution is documented on the comment for the function, and 399 * damon_update_monitoring_results() does the check. Skip the test in 400 * the case. 401 */ 402 if (!attrs.aggr_interval) 403 kunit_skip(test, "aggr_interval is zero."); 404 405 KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0); 406 } 407 408 static void damon_test_update_monitoring_result(struct kunit *test) 409 { 410 struct damon_attrs old_attrs = { 411 .sample_interval = 10, .aggr_interval = 1000,}; 412 struct damon_attrs new_attrs; 413 struct damon_region *r = damon_new_region(3, 7); 414 415 if (!r) 416 kunit_skip(test, "region alloc fail"); 417 418 r->nr_accesses = 15; 419 r->nr_accesses_bp = 150000; 420 r->age = 20; 421 422 new_attrs = (struct damon_attrs){ 423 .sample_interval = 100, .aggr_interval = 10000,}; 424 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 425 KUNIT_EXPECT_EQ(test, r->nr_accesses, 15); 426 KUNIT_EXPECT_EQ(test, r->age, 2); 427 428 new_attrs = (struct damon_attrs){ 429 .sample_interval = 1, .aggr_interval = 1000}; 430 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 431 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); 432 KUNIT_EXPECT_EQ(test, r->age, 2); 433 434 new_attrs = (struct damon_attrs){ 435 .sample_interval = 1, .aggr_interval = 100}; 436 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 437 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); 438 KUNIT_EXPECT_EQ(test, r->age, 20); 439 440 damon_free_region(r); 441 } 442 443 static void damon_test_set_attrs(struct kunit *test) 444 { 445 struct damon_ctx *c = damon_new_ctx(); 446 struct damon_attrs valid_attrs = { 447 .min_nr_regions = 10, .max_nr_regions = 1000, 448 .sample_interval = 5000, .aggr_interval = 100000,}; 449 struct damon_attrs invalid_attrs; 450 451 if (!c) 452 kunit_skip(test, "ctx alloc fail"); 453 454 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0); 455 456 invalid_attrs = valid_attrs; 457 invalid_attrs.min_nr_regions = 1; 458 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 459 460 invalid_attrs = valid_attrs; 461 invalid_attrs.max_nr_regions = 9; 462 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 463 464 invalid_attrs = valid_attrs; 465 invalid_attrs.aggr_interval = 4999; 466 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 467 468 damon_destroy_ctx(c); 469 } 470 471 static void damon_test_moving_sum(struct kunit *test) 472 { 473 unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10; 474 unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0}; 475 unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000, 476 45000, 40000, 35000, 30000}; 477 int i; 478 479 for (i = 0; i < ARRAY_SIZE(new_values); i++) { 480 mvsum = damon_moving_sum(mvsum, nomvsum, len_window, 481 new_values[i]); 482 KUNIT_EXPECT_EQ(test, mvsum, expects[i]); 483 } 484 } 485 486 static void damos_test_new_filter(struct kunit *test) 487 { 488 struct damos_filter *filter; 489 490 filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false); 491 if (!filter) 492 kunit_skip(test, "filter alloc fail"); 493 KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON); 494 KUNIT_EXPECT_EQ(test, filter->matching, true); 495 KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list); 496 KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list); 497 damos_destroy_filter(filter); 498 } 499 500 static void damos_test_commit_quota_goal_for(struct kunit *test, 501 struct damos_quota_goal *dst, 502 struct damos_quota_goal *src) 503 { 504 u64 dst_last_psi_total = 0; 505 506 if (dst->metric == DAMOS_QUOTA_SOME_MEM_PSI_US) 507 dst_last_psi_total = dst->last_psi_total; 508 damos_commit_quota_goal(dst, src); 509 510 KUNIT_EXPECT_EQ(test, dst->metric, src->metric); 511 KUNIT_EXPECT_EQ(test, dst->target_value, src->target_value); 512 if (src->metric == DAMOS_QUOTA_USER_INPUT) 513 KUNIT_EXPECT_EQ(test, dst->current_value, src->current_value); 514 if (dst_last_psi_total && src->metric == DAMOS_QUOTA_SOME_MEM_PSI_US) 515 KUNIT_EXPECT_EQ(test, dst->last_psi_total, dst_last_psi_total); 516 switch (dst->metric) { 517 case DAMOS_QUOTA_NODE_MEM_USED_BP: 518 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 519 KUNIT_EXPECT_EQ(test, dst->nid, src->nid); 520 break; 521 case DAMOS_QUOTA_NODE_MEMCG_USED_BP: 522 case DAMOS_QUOTA_NODE_MEMCG_FREE_BP: 523 KUNIT_EXPECT_EQ(test, dst->nid, src->nid); 524 KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id); 525 break; 526 default: 527 break; 528 } 529 } 530 531 static void damos_test_commit_quota_goal(struct kunit *test) 532 { 533 struct damos_quota_goal dst = { 534 .metric = DAMOS_QUOTA_SOME_MEM_PSI_US, 535 .target_value = 1000, 536 .current_value = 123, 537 .last_psi_total = 456, 538 }; 539 540 damos_test_commit_quota_goal_for(test, &dst, 541 &(struct damos_quota_goal){ 542 .metric = DAMOS_QUOTA_USER_INPUT, 543 .target_value = 789, 544 .current_value = 12}); 545 damos_test_commit_quota_goal_for(test, &dst, 546 &(struct damos_quota_goal){ 547 .metric = DAMOS_QUOTA_NODE_MEM_FREE_BP, 548 .target_value = 345, 549 .current_value = 678, 550 .nid = 9, 551 }); 552 damos_test_commit_quota_goal_for(test, &dst, 553 &(struct damos_quota_goal){ 554 .metric = DAMOS_QUOTA_NODE_MEM_USED_BP, 555 .target_value = 12, 556 .current_value = 345, 557 .nid = 6, 558 }); 559 damos_test_commit_quota_goal_for(test, &dst, 560 &(struct damos_quota_goal){ 561 .metric = DAMOS_QUOTA_NODE_MEMCG_USED_BP, 562 .target_value = 456, 563 .current_value = 567, 564 .nid = 6, 565 .memcg_id = 7, 566 }); 567 damos_test_commit_quota_goal_for(test, &dst, 568 &(struct damos_quota_goal){ 569 .metric = DAMOS_QUOTA_NODE_MEMCG_FREE_BP, 570 .target_value = 890, 571 .current_value = 901, 572 .nid = 10, 573 .memcg_id = 1, 574 }); 575 damos_test_commit_quota_goal_for(test, &dst, 576 &(struct damos_quota_goal) { 577 .metric = DAMOS_QUOTA_USER_INPUT, 578 .target_value = 789, 579 .current_value = 12, 580 }); 581 } 582 583 static void damos_test_commit_quota_goals_for(struct kunit *test, 584 struct damos_quota_goal *dst_goals, int nr_dst_goals, 585 struct damos_quota_goal *src_goals, int nr_src_goals) 586 { 587 struct damos_quota dst, src; 588 struct damos_quota_goal *goal, *next; 589 bool skip = true; 590 int i; 591 592 INIT_LIST_HEAD(&dst.goals); 593 INIT_LIST_HEAD(&src.goals); 594 595 for (i = 0; i < nr_dst_goals; i++) { 596 /* 597 * When nr_src_goals is smaller than dst_goals, 598 * damos_commit_quota_goals() will kfree() the dst goals. 599 * Make it kfree()-able. 600 */ 601 goal = damos_new_quota_goal(dst_goals[i].metric, 602 dst_goals[i].target_value); 603 if (!goal) 604 goto out; 605 damos_add_quota_goal(&dst, goal); 606 } 607 skip = false; 608 for (i = 0; i < nr_src_goals; i++) 609 damos_add_quota_goal(&src, &src_goals[i]); 610 611 damos_commit_quota_goals(&dst, &src); 612 613 i = 0; 614 damos_for_each_quota_goal(goal, (&dst)) { 615 KUNIT_EXPECT_EQ(test, goal->metric, src_goals[i].metric); 616 KUNIT_EXPECT_EQ(test, goal->target_value, 617 src_goals[i++].target_value); 618 } 619 KUNIT_EXPECT_EQ(test, i, nr_src_goals); 620 621 out: 622 damos_for_each_quota_goal_safe(goal, next, (&dst)) 623 damos_destroy_quota_goal(goal); 624 if (skip) 625 kunit_skip(test, "goal alloc fail"); 626 } 627 628 static void damos_test_commit_quota_goals(struct kunit *test) 629 { 630 damos_test_commit_quota_goals_for(test, 631 (struct damos_quota_goal[]){}, 0, 632 (struct damos_quota_goal[]){ 633 { 634 .metric = DAMOS_QUOTA_USER_INPUT, 635 .target_value = 123, 636 }, 637 }, 1); 638 damos_test_commit_quota_goals_for(test, 639 (struct damos_quota_goal[]){ 640 { 641 .metric = DAMOS_QUOTA_USER_INPUT, 642 .target_value = 234, 643 }, 644 645 }, 1, 646 (struct damos_quota_goal[]){ 647 { 648 .metric = DAMOS_QUOTA_USER_INPUT, 649 .target_value = 345, 650 }, 651 }, 1); 652 damos_test_commit_quota_goals_for(test, 653 (struct damos_quota_goal[]){ 654 { 655 .metric = DAMOS_QUOTA_USER_INPUT, 656 .target_value = 456, 657 }, 658 659 }, 1, 660 (struct damos_quota_goal[]){}, 0); 661 } 662 663 static void damos_test_commit_filter_for(struct kunit *test, 664 struct damos_filter *dst, struct damos_filter *src) 665 { 666 damos_commit_filter(dst, src); 667 KUNIT_EXPECT_EQ(test, dst->type, src->type); 668 KUNIT_EXPECT_EQ(test, dst->matching, src->matching); 669 KUNIT_EXPECT_EQ(test, dst->allow, src->allow); 670 switch (src->type) { 671 case DAMOS_FILTER_TYPE_MEMCG: 672 KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id); 673 break; 674 case DAMOS_FILTER_TYPE_ADDR: 675 KUNIT_EXPECT_EQ(test, dst->addr_range.start, 676 src->addr_range.start); 677 KUNIT_EXPECT_EQ(test, dst->addr_range.end, 678 src->addr_range.end); 679 break; 680 case DAMOS_FILTER_TYPE_TARGET: 681 KUNIT_EXPECT_EQ(test, dst->target_idx, src->target_idx); 682 break; 683 case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: 684 KUNIT_EXPECT_EQ(test, dst->sz_range.min, src->sz_range.min); 685 KUNIT_EXPECT_EQ(test, dst->sz_range.max, src->sz_range.max); 686 break; 687 default: 688 break; 689 } 690 } 691 692 static void damos_test_commit_filter(struct kunit *test) 693 { 694 struct damos_filter dst = { 695 .type = DAMOS_FILTER_TYPE_ACTIVE, 696 .matching = false, 697 .allow = false, 698 }; 699 700 damos_test_commit_filter_for(test, &dst, 701 &(struct damos_filter){ 702 .type = DAMOS_FILTER_TYPE_ANON, 703 .matching = true, 704 .allow = true, 705 }); 706 damos_test_commit_filter_for(test, &dst, 707 &(struct damos_filter){ 708 .type = DAMOS_FILTER_TYPE_MEMCG, 709 .matching = false, 710 .allow = false, 711 .memcg_id = 123, 712 }); 713 damos_test_commit_filter_for(test, &dst, 714 &(struct damos_filter){ 715 .type = DAMOS_FILTER_TYPE_YOUNG, 716 .matching = true, 717 .allow = true, 718 }); 719 damos_test_commit_filter_for(test, &dst, 720 &(struct damos_filter){ 721 .type = DAMOS_FILTER_TYPE_HUGEPAGE_SIZE, 722 .matching = false, 723 .allow = false, 724 .sz_range = {.min = 234, .max = 345}, 725 }); 726 damos_test_commit_filter_for(test, &dst, 727 &(struct damos_filter){ 728 .type = DAMOS_FILTER_TYPE_UNMAPPED, 729 .matching = true, 730 .allow = true, 731 }); 732 damos_test_commit_filter_for(test, &dst, 733 &(struct damos_filter){ 734 .type = DAMOS_FILTER_TYPE_ADDR, 735 .matching = false, 736 .allow = false, 737 .addr_range = {.start = 456, .end = 567}, 738 }); 739 damos_test_commit_filter_for(test, &dst, 740 &(struct damos_filter){ 741 .type = DAMOS_FILTER_TYPE_TARGET, 742 .matching = true, 743 .allow = true, 744 .target_idx = 6, 745 }); 746 } 747 748 static void damos_test_filter_out(struct kunit *test) 749 { 750 struct damon_target *t; 751 struct damon_region *r, *r2; 752 struct damos_filter *f; 753 754 f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false); 755 if (!f) 756 kunit_skip(test, "filter alloc fail"); 757 f->addr_range = (struct damon_addr_range){ 758 .start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6}; 759 760 t = damon_new_target(); 761 if (!t) { 762 damos_destroy_filter(f); 763 kunit_skip(test, "target alloc fail"); 764 } 765 r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5); 766 if (!r) { 767 damos_destroy_filter(f); 768 damon_free_target(t); 769 kunit_skip(test, "region alloc fail"); 770 } 771 damon_add_region(r, t); 772 773 /* region in the range */ 774 KUNIT_EXPECT_TRUE(test, 775 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 776 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 777 778 /* region before the range */ 779 r->ar.start = DAMON_MIN_REGION * 1; 780 r->ar.end = DAMON_MIN_REGION * 2; 781 KUNIT_EXPECT_FALSE(test, 782 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 783 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 784 785 /* region after the range */ 786 r->ar.start = DAMON_MIN_REGION * 6; 787 r->ar.end = DAMON_MIN_REGION * 8; 788 KUNIT_EXPECT_FALSE(test, 789 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 790 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 791 792 /* region started before the range */ 793 r->ar.start = DAMON_MIN_REGION * 1; 794 r->ar.end = DAMON_MIN_REGION * 4; 795 KUNIT_EXPECT_FALSE(test, 796 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 797 /* filter should have split the region */ 798 KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1); 799 KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2); 800 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2); 801 r2 = damon_next_region(r); 802 KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2); 803 KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4); 804 damon_destroy_region(r2, t); 805 806 /* region started in the range */ 807 r->ar.start = DAMON_MIN_REGION * 2; 808 r->ar.end = DAMON_MIN_REGION * 8; 809 KUNIT_EXPECT_TRUE(test, 810 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 811 /* filter should have split the region */ 812 KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2); 813 KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6); 814 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2); 815 r2 = damon_next_region(r); 816 KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6); 817 KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8); 818 damon_destroy_region(r2, t); 819 820 damon_free_target(t); 821 damos_free_filter(f); 822 } 823 824 static void damon_test_feed_loop_next_input(struct kunit *test) 825 { 826 unsigned long last_input = 900000, current_score = 200; 827 828 /* 829 * If current score is lower than the goal, which is always 10,000 830 * (read the comment on damon_feed_loop_next_input()'s comment), next 831 * input should be higher than the last input. 832 */ 833 KUNIT_EXPECT_GT(test, 834 damon_feed_loop_next_input(last_input, current_score), 835 last_input); 836 837 /* 838 * If current score is higher than the goal, next input should be lower 839 * than the last input. 840 */ 841 current_score = 250000000; 842 KUNIT_EXPECT_LT(test, 843 damon_feed_loop_next_input(last_input, current_score), 844 last_input); 845 846 /* 847 * The next input depends on the distance between the current score and 848 * the goal 849 */ 850 KUNIT_EXPECT_GT(test, 851 damon_feed_loop_next_input(last_input, 200), 852 damon_feed_loop_next_input(last_input, 2000)); 853 } 854 855 static void damon_test_set_filters_default_reject(struct kunit *test) 856 { 857 struct damos scheme; 858 struct damos_filter *target_filter, *anon_filter; 859 860 INIT_LIST_HEAD(&scheme.filters); 861 INIT_LIST_HEAD(&scheme.ops_filters); 862 863 damos_set_filters_default_reject(&scheme); 864 /* 865 * No filter is installed. Allow by default on both core and ops layer 866 * filtering stages, since there are no filters at all. 867 */ 868 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 869 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 870 871 target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true); 872 if (!target_filter) 873 kunit_skip(test, "filter alloc fail"); 874 damos_add_filter(&scheme, target_filter); 875 damos_set_filters_default_reject(&scheme); 876 /* 877 * A core-handled allow-filter is installed. 878 * Rejct by default on core layer filtering stage due to the last 879 * core-layer-filter's behavior. 880 * Allow by default on ops layer filtering stage due to the absence of 881 * ops layer filters. 882 */ 883 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, true); 884 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 885 886 target_filter->allow = false; 887 damos_set_filters_default_reject(&scheme); 888 /* 889 * A core-handled reject-filter is installed. 890 * Allow by default on core layer filtering stage due to the last 891 * core-layer-filter's behavior. 892 * Allow by default on ops layer filtering stage due to the absence of 893 * ops layer filters. 894 */ 895 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 896 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 897 898 anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true); 899 if (!anon_filter) { 900 damos_free_filter(target_filter); 901 kunit_skip(test, "anon_filter alloc fail"); 902 } 903 damos_add_filter(&scheme, anon_filter); 904 905 damos_set_filters_default_reject(&scheme); 906 /* 907 * A core-handled reject-filter and ops-handled allow-filter are installed. 908 * Allow by default on core layer filtering stage due to the existence 909 * of the ops-handled filter. 910 * Reject by default on ops layer filtering stage due to the last 911 * ops-layer-filter's behavior. 912 */ 913 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 914 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true); 915 916 target_filter->allow = true; 917 damos_set_filters_default_reject(&scheme); 918 /* 919 * A core-handled allow-filter and ops-handled allow-filter are 920 * installed. 921 * Allow by default on core layer filtering stage due to the existence 922 * of the ops-handled filter. 923 * Reject by default on ops layer filtering stage due to the last 924 * ops-layer-filter's behavior. 925 */ 926 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 927 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true); 928 929 damos_free_filter(anon_filter); 930 damos_free_filter(target_filter); 931 } 932 933 static struct kunit_case damon_test_cases[] = { 934 KUNIT_CASE(damon_test_target), 935 KUNIT_CASE(damon_test_regions), 936 KUNIT_CASE(damon_test_aggregate), 937 KUNIT_CASE(damon_test_split_at), 938 KUNIT_CASE(damon_test_merge_two), 939 KUNIT_CASE(damon_test_merge_regions_of), 940 KUNIT_CASE(damon_test_split_regions_of), 941 KUNIT_CASE(damon_test_ops_registration), 942 KUNIT_CASE(damon_test_set_regions), 943 KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp), 944 KUNIT_CASE(damon_test_update_monitoring_result), 945 KUNIT_CASE(damon_test_set_attrs), 946 KUNIT_CASE(damon_test_moving_sum), 947 KUNIT_CASE(damos_test_new_filter), 948 KUNIT_CASE(damos_test_commit_quota_goal), 949 KUNIT_CASE(damos_test_commit_quota_goals), 950 KUNIT_CASE(damos_test_commit_filter), 951 KUNIT_CASE(damos_test_filter_out), 952 KUNIT_CASE(damon_test_feed_loop_next_input), 953 KUNIT_CASE(damon_test_set_filters_default_reject), 954 {}, 955 }; 956 957 static struct kunit_suite damon_test_suite = { 958 .name = "damon", 959 .test_cases = damon_test_cases, 960 }; 961 kunit_test_suite(damon_test_suite); 962 963 #endif /* _DAMON_CORE_TEST_H */ 964 965 #endif /* CONFIG_DAMON_KUNIT_TEST */ 966