1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Data Access Monitor Unit Tests 4 * 5 * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved. 6 * 7 * Author: SeongJae Park <sj@kernel.org> 8 */ 9 10 #ifdef CONFIG_DAMON_KUNIT_TEST 11 12 #ifndef _DAMON_CORE_TEST_H 13 #define _DAMON_CORE_TEST_H 14 15 #include <kunit/test.h> 16 17 static void damon_test_regions(struct kunit *test) 18 { 19 struct damon_region *r; 20 struct damon_target *t; 21 22 r = damon_new_region(1, 2); 23 if (!r) 24 kunit_skip(test, "region alloc fail"); 25 KUNIT_EXPECT_EQ(test, 1ul, r->ar.start); 26 KUNIT_EXPECT_EQ(test, 2ul, r->ar.end); 27 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 28 29 t = damon_new_target(); 30 if (!t) { 31 damon_free_region(r); 32 kunit_skip(test, "target alloc fail"); 33 } 34 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 35 36 damon_add_region(r, t); 37 KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t)); 38 39 damon_destroy_region(r, t); 40 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 41 42 damon_free_target(t); 43 } 44 45 static unsigned int nr_damon_targets(struct damon_ctx *ctx) 46 { 47 struct damon_target *t; 48 unsigned int nr_targets = 0; 49 50 damon_for_each_target(t, ctx) 51 nr_targets++; 52 53 return nr_targets; 54 } 55 56 static void damon_test_target(struct kunit *test) 57 { 58 struct damon_ctx *c = damon_new_ctx(); 59 struct damon_target *t; 60 61 if (!c) 62 kunit_skip(test, "ctx alloc fail"); 63 64 t = damon_new_target(); 65 if (!t) { 66 damon_destroy_ctx(c); 67 kunit_skip(test, "target alloc fail"); 68 } 69 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 70 71 damon_add_target(c, t); 72 KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c)); 73 74 damon_destroy_target(t, c); 75 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 76 77 damon_destroy_ctx(c); 78 } 79 80 /* 81 * Test kdamond_reset_aggregated() 82 * 83 * DAMON checks access to each region and aggregates this information as the 84 * access frequency of each region. In detail, it increases '->nr_accesses' of 85 * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes 86 * the aggregated information ('->nr_accesses' of each regions) to the result 87 * buffer. As a result of the flushing, the '->nr_accesses' of regions are 88 * initialized to zero. 89 */ 90 static void damon_test_aggregate(struct kunit *test) 91 { 92 struct damon_ctx *ctx = damon_new_ctx(); 93 unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} }; 94 unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} }; 95 unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} }; 96 struct damon_target *t; 97 struct damon_region *r; 98 int it, ir; 99 100 if (!ctx) 101 kunit_skip(test, "ctx alloc fail"); 102 103 for (it = 0; it < 3; it++) { 104 t = damon_new_target(); 105 if (!t) { 106 damon_destroy_ctx(ctx); 107 kunit_skip(test, "target alloc fail"); 108 } 109 damon_add_target(ctx, t); 110 } 111 112 it = 0; 113 damon_for_each_target(t, ctx) { 114 for (ir = 0; ir < 3; ir++) { 115 r = damon_new_region(saddr[it][ir], eaddr[it][ir]); 116 if (!r) { 117 damon_destroy_ctx(ctx); 118 kunit_skip(test, "region alloc fail"); 119 } 120 r->nr_accesses = accesses[it][ir]; 121 r->nr_accesses_bp = accesses[it][ir] * 10000; 122 damon_add_region(r, t); 123 } 124 it++; 125 } 126 kdamond_reset_aggregated(ctx); 127 it = 0; 128 damon_for_each_target(t, ctx) { 129 ir = 0; 130 /* '->nr_accesses' should be zeroed */ 131 damon_for_each_region(r, t) { 132 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 133 ir++; 134 } 135 /* regions should be preserved */ 136 KUNIT_EXPECT_EQ(test, 3, ir); 137 it++; 138 } 139 /* targets also should be preserved */ 140 KUNIT_EXPECT_EQ(test, 3, it); 141 142 damon_destroy_ctx(ctx); 143 } 144 145 static void damon_test_split_at(struct kunit *test) 146 { 147 struct damon_target *t; 148 struct damon_region *r, *r_new; 149 150 t = damon_new_target(); 151 if (!t) 152 kunit_skip(test, "target alloc fail"); 153 r = damon_new_region(0, 100); 154 if (!r) { 155 damon_free_target(t); 156 kunit_skip(test, "region alloc fail"); 157 } 158 r->nr_accesses_bp = 420000; 159 r->nr_accesses = 42; 160 r->last_nr_accesses = 15; 161 damon_add_region(r, t); 162 damon_split_region_at(t, r, 25); 163 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 164 KUNIT_EXPECT_EQ(test, r->ar.end, 25ul); 165 166 r_new = damon_next_region(r); 167 KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul); 168 KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul); 169 170 KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp); 171 KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses); 172 KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses); 173 174 damon_free_target(t); 175 } 176 177 static void damon_test_merge_two(struct kunit *test) 178 { 179 struct damon_target *t; 180 struct damon_region *r, *r2, *r3; 181 int i; 182 183 t = damon_new_target(); 184 if (!t) 185 kunit_skip(test, "target alloc fail"); 186 r = damon_new_region(0, 100); 187 if (!r) { 188 damon_free_target(t); 189 kunit_skip(test, "region alloc fail"); 190 } 191 r->nr_accesses = 10; 192 r->nr_accesses_bp = 100000; 193 damon_add_region(r, t); 194 r2 = damon_new_region(100, 300); 195 if (!r2) { 196 damon_free_target(t); 197 kunit_skip(test, "second region alloc fail"); 198 } 199 r2->nr_accesses = 20; 200 r2->nr_accesses_bp = 200000; 201 damon_add_region(r2, t); 202 203 damon_merge_two_regions(t, r, r2); 204 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 205 KUNIT_EXPECT_EQ(test, r->ar.end, 300ul); 206 KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u); 207 208 i = 0; 209 damon_for_each_region(r3, t) { 210 KUNIT_EXPECT_PTR_EQ(test, r, r3); 211 i++; 212 } 213 KUNIT_EXPECT_EQ(test, i, 1); 214 215 damon_free_target(t); 216 } 217 218 static struct damon_region *__nth_region_of(struct damon_target *t, int idx) 219 { 220 struct damon_region *r; 221 unsigned int i = 0; 222 223 damon_for_each_region(r, t) { 224 if (i++ == idx) 225 return r; 226 } 227 228 return NULL; 229 } 230 231 static void damon_test_merge_regions_of(struct kunit *test) 232 { 233 struct damon_target *t; 234 struct damon_region *r; 235 unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184}; 236 unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230}; 237 unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2}; 238 239 unsigned long saddrs[] = {0, 114, 130, 156, 170}; 240 unsigned long eaddrs[] = {112, 130, 156, 170, 230}; 241 int i; 242 243 t = damon_new_target(); 244 if (!t) 245 kunit_skip(test, "target alloc fail"); 246 for (i = 0; i < ARRAY_SIZE(sa); i++) { 247 r = damon_new_region(sa[i], ea[i]); 248 if (!r) { 249 damon_free_target(t); 250 kunit_skip(test, "region alloc fail"); 251 } 252 r->nr_accesses = nrs[i]; 253 r->nr_accesses_bp = nrs[i] * 10000; 254 damon_add_region(r, t); 255 } 256 257 damon_merge_regions_of(t, 9, 9999); 258 /* 0-112, 114-130, 130-156, 156-170 */ 259 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); 260 for (i = 0; i < 5; i++) { 261 r = __nth_region_of(t, i); 262 KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]); 263 KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]); 264 } 265 damon_free_target(t); 266 } 267 268 static void damon_test_split_regions_of(struct kunit *test) 269 { 270 struct damon_target *t; 271 struct damon_region *r; 272 273 t = damon_new_target(); 274 if (!t) 275 kunit_skip(test, "target alloc fail"); 276 r = damon_new_region(0, 22); 277 if (!r) { 278 damon_free_target(t); 279 kunit_skip(test, "region alloc fail"); 280 } 281 damon_add_region(r, t); 282 damon_split_regions_of(t, 2, 1); 283 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u); 284 damon_free_target(t); 285 286 t = damon_new_target(); 287 if (!t) 288 kunit_skip(test, "second target alloc fail"); 289 r = damon_new_region(0, 220); 290 if (!r) { 291 damon_free_target(t); 292 kunit_skip(test, "second region alloc fail"); 293 } 294 damon_add_region(r, t); 295 damon_split_regions_of(t, 4, 1); 296 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u); 297 damon_free_target(t); 298 } 299 300 static void damon_test_ops_registration(struct kunit *test) 301 { 302 struct damon_ctx *c = damon_new_ctx(); 303 struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak; 304 bool need_cleanup = false; 305 306 if (!c) 307 kunit_skip(test, "ctx alloc fail"); 308 309 /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */ 310 if (!damon_is_registered_ops(DAMON_OPS_VADDR)) { 311 bak.id = DAMON_OPS_VADDR; 312 KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0); 313 need_cleanup = true; 314 } 315 316 /* DAMON_OPS_VADDR is ensured to be registered */ 317 KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0); 318 319 /* Double-registration is prohibited */ 320 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 321 322 /* Unknown ops id cannot be registered */ 323 KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL); 324 325 /* Registration should success after unregistration */ 326 mutex_lock(&damon_ops_lock); 327 bak = damon_registered_ops[DAMON_OPS_VADDR]; 328 damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){}; 329 mutex_unlock(&damon_ops_lock); 330 331 ops.id = DAMON_OPS_VADDR; 332 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0); 333 334 mutex_lock(&damon_ops_lock); 335 damon_registered_ops[DAMON_OPS_VADDR] = bak; 336 mutex_unlock(&damon_ops_lock); 337 338 /* Check double-registration failure again */ 339 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 340 341 damon_destroy_ctx(c); 342 343 if (need_cleanup) { 344 mutex_lock(&damon_ops_lock); 345 damon_registered_ops[DAMON_OPS_VADDR] = 346 (struct damon_operations){}; 347 mutex_unlock(&damon_ops_lock); 348 } 349 } 350 351 static void damon_test_set_regions(struct kunit *test) 352 { 353 struct damon_target *t = damon_new_target(); 354 struct damon_region *r1, *r2; 355 struct damon_addr_range range = {.start = 8, .end = 28}; 356 unsigned long expects[] = {8, 16, 16, 24, 24, 28}; 357 int expect_idx = 0; 358 struct damon_region *r; 359 360 if (!t) 361 kunit_skip(test, "target alloc fail"); 362 r1 = damon_new_region(4, 16); 363 if (!r1) { 364 damon_free_target(t); 365 kunit_skip(test, "region alloc fail"); 366 } 367 r2 = damon_new_region(24, 32); 368 if (!r2) { 369 damon_free_target(t); 370 damon_free_region(r1); 371 kunit_skip(test, "second region alloc fail"); 372 } 373 374 damon_add_region(r1, t); 375 damon_add_region(r2, t); 376 damon_set_regions(t, &range, 1, 1); 377 378 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3); 379 damon_for_each_region(r, t) { 380 KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]); 381 KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]); 382 } 383 damon_destroy_target(t, NULL); 384 } 385 386 static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test) 387 { 388 struct damon_attrs attrs = { 389 .sample_interval = 10, 390 .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10 391 }; 392 393 /* 394 * In some cases such as 32bit architectures where UINT_MAX is 395 * ULONG_MAX, attrs.aggr_interval becomes zero. Calling 396 * damon_nr_accesses_to_accesses_bp() in the case will cause 397 * divide-by-zero. Such case is prohibited in normal execution since 398 * the caution is documented on the comment for the function, and 399 * damon_update_monitoring_results() does the check. Skip the test in 400 * the case. 401 */ 402 if (!attrs.aggr_interval) 403 kunit_skip(test, "aggr_interval is zero."); 404 405 KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0); 406 } 407 408 static void damon_test_update_monitoring_result(struct kunit *test) 409 { 410 struct damon_attrs old_attrs = { 411 .sample_interval = 10, .aggr_interval = 1000,}; 412 struct damon_attrs new_attrs; 413 struct damon_region *r = damon_new_region(3, 7); 414 415 if (!r) 416 kunit_skip(test, "region alloc fail"); 417 418 r->nr_accesses = 15; 419 r->nr_accesses_bp = 150000; 420 r->age = 20; 421 422 new_attrs = (struct damon_attrs){ 423 .sample_interval = 100, .aggr_interval = 10000,}; 424 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 425 KUNIT_EXPECT_EQ(test, r->nr_accesses, 15); 426 KUNIT_EXPECT_EQ(test, r->age, 2); 427 428 new_attrs = (struct damon_attrs){ 429 .sample_interval = 1, .aggr_interval = 1000}; 430 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 431 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); 432 KUNIT_EXPECT_EQ(test, r->age, 2); 433 434 new_attrs = (struct damon_attrs){ 435 .sample_interval = 1, .aggr_interval = 100}; 436 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 437 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); 438 KUNIT_EXPECT_EQ(test, r->age, 20); 439 440 damon_free_region(r); 441 } 442 443 static void damon_test_set_attrs(struct kunit *test) 444 { 445 struct damon_ctx *c = damon_new_ctx(); 446 struct damon_attrs valid_attrs = { 447 .min_nr_regions = 10, .max_nr_regions = 1000, 448 .sample_interval = 5000, .aggr_interval = 100000,}; 449 struct damon_attrs invalid_attrs; 450 451 if (!c) 452 kunit_skip(test, "ctx alloc fail"); 453 454 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0); 455 456 invalid_attrs = valid_attrs; 457 invalid_attrs.min_nr_regions = 1; 458 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 459 460 invalid_attrs = valid_attrs; 461 invalid_attrs.max_nr_regions = 9; 462 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 463 464 invalid_attrs = valid_attrs; 465 invalid_attrs.aggr_interval = 4999; 466 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 467 468 damon_destroy_ctx(c); 469 } 470 471 static void damon_test_moving_sum(struct kunit *test) 472 { 473 unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10; 474 unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0}; 475 unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000, 476 45000, 40000, 35000, 30000}; 477 int i; 478 479 for (i = 0; i < ARRAY_SIZE(new_values); i++) { 480 mvsum = damon_moving_sum(mvsum, nomvsum, len_window, 481 new_values[i]); 482 KUNIT_EXPECT_EQ(test, mvsum, expects[i]); 483 } 484 } 485 486 static void damos_test_new_filter(struct kunit *test) 487 { 488 struct damos_filter *filter; 489 490 filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false); 491 if (!filter) 492 kunit_skip(test, "filter alloc fail"); 493 KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON); 494 KUNIT_EXPECT_EQ(test, filter->matching, true); 495 KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list); 496 KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list); 497 damos_destroy_filter(filter); 498 } 499 500 static void damos_test_commit_quota_goal_for(struct kunit *test, 501 struct damos_quota_goal *dst, 502 struct damos_quota_goal *src) 503 { 504 u64 dst_last_psi_total = 0; 505 506 if (dst->metric == DAMOS_QUOTA_SOME_MEM_PSI_US) 507 dst_last_psi_total = dst->last_psi_total; 508 damos_commit_quota_goal(dst, src); 509 510 KUNIT_EXPECT_EQ(test, dst->metric, src->metric); 511 KUNIT_EXPECT_EQ(test, dst->target_value, src->target_value); 512 if (src->metric == DAMOS_QUOTA_USER_INPUT) 513 KUNIT_EXPECT_EQ(test, dst->current_value, src->current_value); 514 if (dst_last_psi_total && src->metric == DAMOS_QUOTA_SOME_MEM_PSI_US) 515 KUNIT_EXPECT_EQ(test, dst->last_psi_total, dst_last_psi_total); 516 switch (dst->metric) { 517 case DAMOS_QUOTA_NODE_MEM_USED_BP: 518 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 519 KUNIT_EXPECT_EQ(test, dst->nid, src->nid); 520 break; 521 case DAMOS_QUOTA_NODE_MEMCG_USED_BP: 522 case DAMOS_QUOTA_NODE_MEMCG_FREE_BP: 523 KUNIT_EXPECT_EQ(test, dst->nid, src->nid); 524 KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id); 525 break; 526 default: 527 break; 528 } 529 } 530 531 static void damos_test_commit_quota_goal(struct kunit *test) 532 { 533 struct damos_quota_goal dst = { 534 .metric = DAMOS_QUOTA_SOME_MEM_PSI_US, 535 .target_value = 1000, 536 .current_value = 123, 537 .last_psi_total = 456, 538 }; 539 540 damos_test_commit_quota_goal_for(test, &dst, 541 &(struct damos_quota_goal){ 542 .metric = DAMOS_QUOTA_USER_INPUT, 543 .target_value = 789, 544 .current_value = 12}); 545 damos_test_commit_quota_goal_for(test, &dst, 546 &(struct damos_quota_goal){ 547 .metric = DAMOS_QUOTA_NODE_MEM_FREE_BP, 548 .target_value = 345, 549 .current_value = 678, 550 .nid = 9, 551 }); 552 damos_test_commit_quota_goal_for(test, &dst, 553 &(struct damos_quota_goal){ 554 .metric = DAMOS_QUOTA_NODE_MEM_USED_BP, 555 .target_value = 12, 556 .current_value = 345, 557 .nid = 6, 558 }); 559 damos_test_commit_quota_goal_for(test, &dst, 560 &(struct damos_quota_goal){ 561 .metric = DAMOS_QUOTA_NODE_MEMCG_USED_BP, 562 .target_value = 456, 563 .current_value = 567, 564 .nid = 6, 565 .memcg_id = 7, 566 }); 567 damos_test_commit_quota_goal_for(test, &dst, 568 &(struct damos_quota_goal){ 569 .metric = DAMOS_QUOTA_NODE_MEMCG_FREE_BP, 570 .target_value = 890, 571 .current_value = 901, 572 .nid = 10, 573 .memcg_id = 1, 574 }); 575 damos_test_commit_quota_goal_for(test, &dst, 576 &(struct damos_quota_goal) { 577 .metric = DAMOS_QUOTA_USER_INPUT, 578 .target_value = 789, 579 .current_value = 12, 580 }); 581 } 582 583 static void damos_test_commit_quota_goals_for(struct kunit *test, 584 struct damos_quota_goal *dst_goals, int nr_dst_goals, 585 struct damos_quota_goal *src_goals, int nr_src_goals) 586 { 587 struct damos_quota dst, src; 588 struct damos_quota_goal *goal, *next; 589 bool skip = true; 590 int i; 591 592 INIT_LIST_HEAD(&dst.goals); 593 INIT_LIST_HEAD(&src.goals); 594 595 for (i = 0; i < nr_dst_goals; i++) { 596 /* 597 * When nr_src_goals is smaller than dst_goals, 598 * damos_commit_quota_goals() will kfree() the dst goals. 599 * Make it kfree()-able. 600 */ 601 goal = damos_new_quota_goal(dst_goals[i].metric, 602 dst_goals[i].target_value); 603 if (!goal) 604 goto out; 605 damos_add_quota_goal(&dst, goal); 606 } 607 skip = false; 608 for (i = 0; i < nr_src_goals; i++) 609 damos_add_quota_goal(&src, &src_goals[i]); 610 611 damos_commit_quota_goals(&dst, &src); 612 613 i = 0; 614 damos_for_each_quota_goal(goal, (&dst)) { 615 KUNIT_EXPECT_EQ(test, goal->metric, src_goals[i].metric); 616 KUNIT_EXPECT_EQ(test, goal->target_value, 617 src_goals[i++].target_value); 618 } 619 KUNIT_EXPECT_EQ(test, i, nr_src_goals); 620 621 out: 622 damos_for_each_quota_goal_safe(goal, next, (&dst)) 623 damos_destroy_quota_goal(goal); 624 if (skip) 625 kunit_skip(test, "goal alloc fail"); 626 } 627 628 static void damos_test_commit_quota_goals(struct kunit *test) 629 { 630 damos_test_commit_quota_goals_for(test, 631 (struct damos_quota_goal[]){}, 0, 632 (struct damos_quota_goal[]){ 633 { 634 .metric = DAMOS_QUOTA_USER_INPUT, 635 .target_value = 123, 636 }, 637 }, 1); 638 damos_test_commit_quota_goals_for(test, 639 (struct damos_quota_goal[]){ 640 { 641 .metric = DAMOS_QUOTA_USER_INPUT, 642 .target_value = 234, 643 }, 644 645 }, 1, 646 (struct damos_quota_goal[]){ 647 { 648 .metric = DAMOS_QUOTA_USER_INPUT, 649 .target_value = 345, 650 }, 651 }, 1); 652 damos_test_commit_quota_goals_for(test, 653 (struct damos_quota_goal[]){ 654 { 655 .metric = DAMOS_QUOTA_USER_INPUT, 656 .target_value = 456, 657 }, 658 659 }, 1, 660 (struct damos_quota_goal[]){}, 0); 661 } 662 663 static void damos_test_commit_quota(struct kunit *test) 664 { 665 struct damos_quota dst = { 666 .reset_interval = 1, 667 .ms = 2, 668 .sz = 3, 669 .weight_sz = 4, 670 .weight_nr_accesses = 5, 671 .weight_age = 6, 672 }; 673 struct damos_quota src = { 674 .reset_interval = 7, 675 .ms = 8, 676 .sz = 9, 677 .weight_sz = 10, 678 .weight_nr_accesses = 11, 679 .weight_age = 12, 680 }; 681 682 INIT_LIST_HEAD(&dst.goals); 683 INIT_LIST_HEAD(&src.goals); 684 685 damos_commit_quota(&dst, &src); 686 687 KUNIT_EXPECT_EQ(test, dst.reset_interval, src.reset_interval); 688 KUNIT_EXPECT_EQ(test, dst.ms, src.ms); 689 KUNIT_EXPECT_EQ(test, dst.sz, src.sz); 690 KUNIT_EXPECT_EQ(test, dst.weight_sz, src.weight_sz); 691 KUNIT_EXPECT_EQ(test, dst.weight_nr_accesses, src.weight_nr_accesses); 692 KUNIT_EXPECT_EQ(test, dst.weight_age, src.weight_age); 693 } 694 695 static int damos_test_help_dests_setup(struct damos_migrate_dests *dests, 696 unsigned int *node_id_arr, unsigned int *weight_arr, 697 size_t nr_dests) 698 { 699 size_t i; 700 701 dests->node_id_arr = kmalloc_array(nr_dests, 702 sizeof(*dests->node_id_arr), GFP_KERNEL); 703 if (!dests->node_id_arr) 704 return -ENOMEM; 705 dests->weight_arr = kmalloc_array(nr_dests, 706 sizeof(*dests->weight_arr), GFP_KERNEL); 707 if (!dests->weight_arr) { 708 kfree(dests->node_id_arr); 709 dests->node_id_arr = NULL; 710 return -ENOMEM; 711 } 712 713 for (i = 0; i < nr_dests; i++) { 714 dests->node_id_arr[i] = node_id_arr[i]; 715 dests->weight_arr[i] = weight_arr[i]; 716 } 717 dests->nr_dests = nr_dests; 718 return 0; 719 } 720 721 static void damos_test_help_dests_free(struct damos_migrate_dests *dests) 722 { 723 kfree(dests->node_id_arr); 724 kfree(dests->weight_arr); 725 } 726 727 static void damos_test_commit_dests_for(struct kunit *test, 728 unsigned int *dst_node_id_arr, unsigned int *dst_weight_arr, 729 size_t dst_nr_dests, 730 unsigned int *src_node_id_arr, unsigned int *src_weight_arr, 731 size_t src_nr_dests) 732 { 733 struct damos_migrate_dests dst = {}, src = {}; 734 int i, err; 735 bool skip = true; 736 737 err = damos_test_help_dests_setup(&dst, dst_node_id_arr, 738 dst_weight_arr, dst_nr_dests); 739 if (err) 740 kunit_skip(test, "dests setup fail"); 741 err = damos_test_help_dests_setup(&src, src_node_id_arr, 742 src_weight_arr, src_nr_dests); 743 if (err) { 744 damos_test_help_dests_free(&dst); 745 kunit_skip(test, "src setup fail"); 746 } 747 err = damos_commit_dests(&dst, &src); 748 if (err) 749 goto out; 750 skip = false; 751 752 KUNIT_EXPECT_EQ(test, dst.nr_dests, src_nr_dests); 753 for (i = 0; i < dst.nr_dests; i++) { 754 KUNIT_EXPECT_EQ(test, dst.node_id_arr[i], src_node_id_arr[i]); 755 KUNIT_EXPECT_EQ(test, dst.weight_arr[i], src_weight_arr[i]); 756 } 757 758 out: 759 damos_test_help_dests_free(&dst); 760 damos_test_help_dests_free(&src); 761 if (skip) 762 kunit_skip(test, "skip"); 763 } 764 765 static void damos_test_commit_dests(struct kunit *test) 766 { 767 damos_test_commit_dests_for(test, 768 (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4}, 769 3, 770 (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7}, 771 3); 772 damos_test_commit_dests_for(test, 773 (unsigned int[]){1, 2}, (unsigned int[]){2, 3}, 774 2, 775 (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7}, 776 3); 777 damos_test_commit_dests_for(test, 778 NULL, NULL, 0, 779 (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7}, 780 3); 781 damos_test_commit_dests_for(test, 782 (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4}, 783 3, 784 (unsigned int[]){4, 5}, (unsigned int[]){5, 6}, 2); 785 damos_test_commit_dests_for(test, 786 (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4}, 787 3, 788 NULL, NULL, 0); 789 } 790 791 static void damos_test_commit_filter_for(struct kunit *test, 792 struct damos_filter *dst, struct damos_filter *src) 793 { 794 damos_commit_filter(dst, src); 795 KUNIT_EXPECT_EQ(test, dst->type, src->type); 796 KUNIT_EXPECT_EQ(test, dst->matching, src->matching); 797 KUNIT_EXPECT_EQ(test, dst->allow, src->allow); 798 switch (src->type) { 799 case DAMOS_FILTER_TYPE_MEMCG: 800 KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id); 801 break; 802 case DAMOS_FILTER_TYPE_ADDR: 803 KUNIT_EXPECT_EQ(test, dst->addr_range.start, 804 src->addr_range.start); 805 KUNIT_EXPECT_EQ(test, dst->addr_range.end, 806 src->addr_range.end); 807 break; 808 case DAMOS_FILTER_TYPE_TARGET: 809 KUNIT_EXPECT_EQ(test, dst->target_idx, src->target_idx); 810 break; 811 case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: 812 KUNIT_EXPECT_EQ(test, dst->sz_range.min, src->sz_range.min); 813 KUNIT_EXPECT_EQ(test, dst->sz_range.max, src->sz_range.max); 814 break; 815 default: 816 break; 817 } 818 } 819 820 static void damos_test_commit_filter(struct kunit *test) 821 { 822 struct damos_filter dst = { 823 .type = DAMOS_FILTER_TYPE_ACTIVE, 824 .matching = false, 825 .allow = false, 826 }; 827 828 damos_test_commit_filter_for(test, &dst, 829 &(struct damos_filter){ 830 .type = DAMOS_FILTER_TYPE_ANON, 831 .matching = true, 832 .allow = true, 833 }); 834 damos_test_commit_filter_for(test, &dst, 835 &(struct damos_filter){ 836 .type = DAMOS_FILTER_TYPE_MEMCG, 837 .matching = false, 838 .allow = false, 839 .memcg_id = 123, 840 }); 841 damos_test_commit_filter_for(test, &dst, 842 &(struct damos_filter){ 843 .type = DAMOS_FILTER_TYPE_YOUNG, 844 .matching = true, 845 .allow = true, 846 }); 847 damos_test_commit_filter_for(test, &dst, 848 &(struct damos_filter){ 849 .type = DAMOS_FILTER_TYPE_HUGEPAGE_SIZE, 850 .matching = false, 851 .allow = false, 852 .sz_range = {.min = 234, .max = 345}, 853 }); 854 damos_test_commit_filter_for(test, &dst, 855 &(struct damos_filter){ 856 .type = DAMOS_FILTER_TYPE_UNMAPPED, 857 .matching = true, 858 .allow = true, 859 }); 860 damos_test_commit_filter_for(test, &dst, 861 &(struct damos_filter){ 862 .type = DAMOS_FILTER_TYPE_ADDR, 863 .matching = false, 864 .allow = false, 865 .addr_range = {.start = 456, .end = 567}, 866 }); 867 damos_test_commit_filter_for(test, &dst, 868 &(struct damos_filter){ 869 .type = DAMOS_FILTER_TYPE_TARGET, 870 .matching = true, 871 .allow = true, 872 .target_idx = 6, 873 }); 874 } 875 876 static void damos_test_help_initailize_scheme(struct damos *scheme) 877 { 878 INIT_LIST_HEAD(&scheme->quota.goals); 879 INIT_LIST_HEAD(&scheme->core_filters); 880 INIT_LIST_HEAD(&scheme->ops_filters); 881 } 882 883 static void damos_test_commit_for(struct kunit *test, struct damos *dst, 884 struct damos *src) 885 { 886 int err; 887 888 damos_test_help_initailize_scheme(dst); 889 damos_test_help_initailize_scheme(src); 890 891 err = damos_commit(dst, src); 892 if (err) 893 kunit_skip(test, "damos_commit fail"); 894 895 KUNIT_EXPECT_EQ(test, dst->pattern.min_sz_region, 896 src->pattern.min_sz_region); 897 KUNIT_EXPECT_EQ(test, dst->pattern.max_sz_region, 898 src->pattern.max_sz_region); 899 KUNIT_EXPECT_EQ(test, dst->pattern.min_nr_accesses, 900 src->pattern.min_nr_accesses); 901 KUNIT_EXPECT_EQ(test, dst->pattern.max_nr_accesses, 902 src->pattern.max_nr_accesses); 903 KUNIT_EXPECT_EQ(test, dst->pattern.min_age_region, 904 src->pattern.min_age_region); 905 KUNIT_EXPECT_EQ(test, dst->pattern.max_age_region, 906 src->pattern.max_age_region); 907 908 KUNIT_EXPECT_EQ(test, dst->action, src->action); 909 KUNIT_EXPECT_EQ(test, dst->apply_interval_us, src->apply_interval_us); 910 911 KUNIT_EXPECT_EQ(test, dst->wmarks.metric, src->wmarks.metric); 912 KUNIT_EXPECT_EQ(test, dst->wmarks.interval, src->wmarks.interval); 913 KUNIT_EXPECT_EQ(test, dst->wmarks.high, src->wmarks.high); 914 KUNIT_EXPECT_EQ(test, dst->wmarks.mid, src->wmarks.mid); 915 KUNIT_EXPECT_EQ(test, dst->wmarks.low, src->wmarks.low); 916 917 switch (src->action) { 918 case DAMOS_MIGRATE_COLD: 919 case DAMOS_MIGRATE_HOT: 920 KUNIT_EXPECT_EQ(test, dst->target_nid, src->target_nid); 921 break; 922 default: 923 break; 924 } 925 } 926 927 static void damos_test_commit(struct kunit *test) 928 { 929 damos_test_commit_for(test, 930 &(struct damos){ 931 .pattern = (struct damos_access_pattern){ 932 1, 2, 3, 4, 5, 6}, 933 .action = DAMOS_PAGEOUT, 934 .apply_interval_us = 1000000, 935 .wmarks = (struct damos_watermarks){ 936 DAMOS_WMARK_FREE_MEM_RATE, 937 900, 100, 50}, 938 }, 939 &(struct damos){ 940 .pattern = (struct damos_access_pattern){ 941 2, 3, 4, 5, 6, 7}, 942 .action = DAMOS_PAGEOUT, 943 .apply_interval_us = 2000000, 944 .wmarks = (struct damos_watermarks){ 945 DAMOS_WMARK_FREE_MEM_RATE, 946 800, 50, 30}, 947 }); 948 damos_test_commit_for(test, 949 &(struct damos){ 950 .pattern = (struct damos_access_pattern){ 951 1, 2, 3, 4, 5, 6}, 952 .action = DAMOS_PAGEOUT, 953 .apply_interval_us = 1000000, 954 .wmarks = (struct damos_watermarks){ 955 DAMOS_WMARK_FREE_MEM_RATE, 956 900, 100, 50}, 957 }, 958 &(struct damos){ 959 .pattern = (struct damos_access_pattern){ 960 2, 3, 4, 5, 6, 7}, 961 .action = DAMOS_MIGRATE_HOT, 962 .apply_interval_us = 2000000, 963 .target_nid = 5, 964 }); 965 } 966 967 static struct damon_target *damon_test_help_setup_target( 968 unsigned long region_start_end[][2], int nr_regions) 969 { 970 struct damon_target *t; 971 struct damon_region *r; 972 int i; 973 974 t = damon_new_target(); 975 if (!t) 976 return NULL; 977 for (i = 0; i < nr_regions; i++) { 978 r = damon_new_region(region_start_end[i][0], 979 region_start_end[i][1]); 980 if (!r) { 981 damon_free_target(t); 982 return NULL; 983 } 984 damon_add_region(r, t); 985 } 986 return t; 987 } 988 989 static void damon_test_commit_target_regions_for(struct kunit *test, 990 unsigned long dst_start_end[][2], int nr_dst_regions, 991 unsigned long src_start_end[][2], int nr_src_regions, 992 unsigned long expect_start_end[][2], int nr_expect_regions) 993 { 994 struct damon_target *dst_target, *src_target; 995 struct damon_region *r; 996 int i; 997 998 dst_target = damon_test_help_setup_target(dst_start_end, nr_dst_regions); 999 if (!dst_target) 1000 kunit_skip(test, "dst target setup fail"); 1001 src_target = damon_test_help_setup_target(src_start_end, nr_src_regions); 1002 if (!src_target) { 1003 damon_free_target(dst_target); 1004 kunit_skip(test, "src target setup fail"); 1005 } 1006 damon_commit_target_regions(dst_target, src_target, 1); 1007 i = 0; 1008 damon_for_each_region(r, dst_target) { 1009 KUNIT_EXPECT_EQ(test, r->ar.start, expect_start_end[i][0]); 1010 KUNIT_EXPECT_EQ(test, r->ar.end, expect_start_end[i][1]); 1011 i++; 1012 } 1013 KUNIT_EXPECT_EQ(test, damon_nr_regions(dst_target), nr_expect_regions); 1014 KUNIT_EXPECT_EQ(test, i, nr_expect_regions); 1015 damon_free_target(dst_target); 1016 damon_free_target(src_target); 1017 } 1018 1019 static void damon_test_commit_target_regions(struct kunit *test) 1020 { 1021 damon_test_commit_target_regions_for(test, 1022 (unsigned long[][2]) {{3, 8}, {8, 10}}, 2, 1023 (unsigned long[][2]) {{4, 6}}, 1, 1024 (unsigned long[][2]) {{4, 6}}, 1); 1025 damon_test_commit_target_regions_for(test, 1026 (unsigned long[][2]) {{3, 8}, {8, 10}}, 2, 1027 (unsigned long[][2]) {}, 0, 1028 (unsigned long[][2]) {{3, 8}, {8, 10}}, 2); 1029 } 1030 1031 static void damos_test_filter_out(struct kunit *test) 1032 { 1033 struct damon_target *t; 1034 struct damon_region *r, *r2; 1035 struct damos_filter *f; 1036 1037 f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false); 1038 if (!f) 1039 kunit_skip(test, "filter alloc fail"); 1040 f->addr_range = (struct damon_addr_range){.start = 2, .end = 6}; 1041 1042 t = damon_new_target(); 1043 if (!t) { 1044 damos_destroy_filter(f); 1045 kunit_skip(test, "target alloc fail"); 1046 } 1047 r = damon_new_region(3, 5); 1048 if (!r) { 1049 damos_destroy_filter(f); 1050 damon_free_target(t); 1051 kunit_skip(test, "region alloc fail"); 1052 } 1053 damon_add_region(r, t); 1054 1055 /* region in the range */ 1056 KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f, 1)); 1057 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 1058 1059 /* region before the range */ 1060 r->ar.start = 1; 1061 r->ar.end = 2; 1062 KUNIT_EXPECT_FALSE(test, 1063 damos_filter_match(NULL, t, r, f, 1)); 1064 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 1065 1066 /* region after the range */ 1067 r->ar.start = 6; 1068 r->ar.end = 8; 1069 KUNIT_EXPECT_FALSE(test, 1070 damos_filter_match(NULL, t, r, f, 1)); 1071 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 1072 1073 /* region started before the range */ 1074 r->ar.start = 1; 1075 r->ar.end = 4; 1076 KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f, 1)); 1077 /* filter should have split the region */ 1078 KUNIT_EXPECT_EQ(test, r->ar.start, 1); 1079 KUNIT_EXPECT_EQ(test, r->ar.end, 2); 1080 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2); 1081 r2 = damon_next_region(r); 1082 KUNIT_EXPECT_EQ(test, r2->ar.start, 2); 1083 KUNIT_EXPECT_EQ(test, r2->ar.end, 4); 1084 damon_destroy_region(r2, t); 1085 1086 /* region started in the range */ 1087 r->ar.start = 2; 1088 r->ar.end = 8; 1089 KUNIT_EXPECT_TRUE(test, 1090 damos_filter_match(NULL, t, r, f, 1)); 1091 /* filter should have split the region */ 1092 KUNIT_EXPECT_EQ(test, r->ar.start, 2); 1093 KUNIT_EXPECT_EQ(test, r->ar.end, 6); 1094 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2); 1095 r2 = damon_next_region(r); 1096 KUNIT_EXPECT_EQ(test, r2->ar.start, 6); 1097 KUNIT_EXPECT_EQ(test, r2->ar.end, 8); 1098 damon_destroy_region(r2, t); 1099 1100 damon_free_target(t); 1101 damos_free_filter(f); 1102 } 1103 1104 static void damon_test_feed_loop_next_input(struct kunit *test) 1105 { 1106 unsigned long last_input = 900000, current_score = 200; 1107 1108 /* 1109 * If current score is lower than the goal, which is always 10,000 1110 * (read the comment on damon_feed_loop_next_input()'s comment), next 1111 * input should be higher than the last input. 1112 */ 1113 KUNIT_EXPECT_GT(test, 1114 damon_feed_loop_next_input(last_input, current_score), 1115 last_input); 1116 1117 /* 1118 * If current score is higher than the goal, next input should be lower 1119 * than the last input. 1120 */ 1121 current_score = 250000000; 1122 KUNIT_EXPECT_LT(test, 1123 damon_feed_loop_next_input(last_input, current_score), 1124 last_input); 1125 1126 /* 1127 * The next input depends on the distance between the current score and 1128 * the goal 1129 */ 1130 KUNIT_EXPECT_GT(test, 1131 damon_feed_loop_next_input(last_input, 200), 1132 damon_feed_loop_next_input(last_input, 2000)); 1133 } 1134 1135 static void damon_test_set_filters_default_reject(struct kunit *test) 1136 { 1137 struct damos scheme; 1138 struct damos_filter *target_filter, *anon_filter; 1139 1140 INIT_LIST_HEAD(&scheme.core_filters); 1141 INIT_LIST_HEAD(&scheme.ops_filters); 1142 1143 damos_set_filters_default_reject(&scheme); 1144 /* 1145 * No filter is installed. Allow by default on both core and ops layer 1146 * filtering stages, since there are no filters at all. 1147 */ 1148 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 1149 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 1150 1151 target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true); 1152 if (!target_filter) 1153 kunit_skip(test, "filter alloc fail"); 1154 damos_add_filter(&scheme, target_filter); 1155 damos_set_filters_default_reject(&scheme); 1156 /* 1157 * A core-handled allow-filter is installed. 1158 * Rejct by default on core layer filtering stage due to the last 1159 * core-layer-filter's behavior. 1160 * Allow by default on ops layer filtering stage due to the absence of 1161 * ops layer filters. 1162 */ 1163 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, true); 1164 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 1165 1166 target_filter->allow = false; 1167 damos_set_filters_default_reject(&scheme); 1168 /* 1169 * A core-handled reject-filter is installed. 1170 * Allow by default on core layer filtering stage due to the last 1171 * core-layer-filter's behavior. 1172 * Allow by default on ops layer filtering stage due to the absence of 1173 * ops layer filters. 1174 */ 1175 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 1176 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 1177 1178 anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true); 1179 if (!anon_filter) { 1180 damos_free_filter(target_filter); 1181 kunit_skip(test, "anon_filter alloc fail"); 1182 } 1183 damos_add_filter(&scheme, anon_filter); 1184 1185 damos_set_filters_default_reject(&scheme); 1186 /* 1187 * A core-handled reject-filter and ops-handled allow-filter are installed. 1188 * Allow by default on core layer filtering stage due to the existence 1189 * of the ops-handled filter. 1190 * Reject by default on ops layer filtering stage due to the last 1191 * ops-layer-filter's behavior. 1192 */ 1193 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 1194 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true); 1195 1196 target_filter->allow = true; 1197 damos_set_filters_default_reject(&scheme); 1198 /* 1199 * A core-handled allow-filter and ops-handled allow-filter are 1200 * installed. 1201 * Allow by default on core layer filtering stage due to the existence 1202 * of the ops-handled filter. 1203 * Reject by default on ops layer filtering stage due to the last 1204 * ops-layer-filter's behavior. 1205 */ 1206 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 1207 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true); 1208 1209 damos_free_filter(anon_filter); 1210 damos_free_filter(target_filter); 1211 } 1212 1213 static struct kunit_case damon_test_cases[] = { 1214 KUNIT_CASE(damon_test_target), 1215 KUNIT_CASE(damon_test_regions), 1216 KUNIT_CASE(damon_test_aggregate), 1217 KUNIT_CASE(damon_test_split_at), 1218 KUNIT_CASE(damon_test_merge_two), 1219 KUNIT_CASE(damon_test_merge_regions_of), 1220 KUNIT_CASE(damon_test_split_regions_of), 1221 KUNIT_CASE(damon_test_ops_registration), 1222 KUNIT_CASE(damon_test_set_regions), 1223 KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp), 1224 KUNIT_CASE(damon_test_update_monitoring_result), 1225 KUNIT_CASE(damon_test_set_attrs), 1226 KUNIT_CASE(damon_test_moving_sum), 1227 KUNIT_CASE(damos_test_new_filter), 1228 KUNIT_CASE(damos_test_commit_quota_goal), 1229 KUNIT_CASE(damos_test_commit_quota_goals), 1230 KUNIT_CASE(damos_test_commit_quota), 1231 KUNIT_CASE(damos_test_commit_dests), 1232 KUNIT_CASE(damos_test_commit_filter), 1233 KUNIT_CASE(damos_test_commit), 1234 KUNIT_CASE(damon_test_commit_target_regions), 1235 KUNIT_CASE(damos_test_filter_out), 1236 KUNIT_CASE(damon_test_feed_loop_next_input), 1237 KUNIT_CASE(damon_test_set_filters_default_reject), 1238 {}, 1239 }; 1240 1241 static struct kunit_suite damon_test_suite = { 1242 .name = "damon", 1243 .test_cases = damon_test_cases, 1244 }; 1245 kunit_test_suite(damon_test_suite); 1246 1247 #endif /* _DAMON_CORE_TEST_H */ 1248 1249 #endif /* CONFIG_DAMON_KUNIT_TEST */ 1250