1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Data Access Monitor Unit Tests 4 * 5 * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved. 6 * 7 * Author: SeongJae Park <sj@kernel.org> 8 */ 9 10 #ifdef CONFIG_DAMON_KUNIT_TEST 11 12 #ifndef _DAMON_CORE_TEST_H 13 #define _DAMON_CORE_TEST_H 14 15 #include <kunit/test.h> 16 17 static void damon_test_regions(struct kunit *test) 18 { 19 struct damon_region *r; 20 struct damon_target *t; 21 22 r = damon_new_region(1, 2); 23 if (!r) 24 kunit_skip(test, "region alloc fail"); 25 KUNIT_EXPECT_EQ(test, 1ul, r->ar.start); 26 KUNIT_EXPECT_EQ(test, 2ul, r->ar.end); 27 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 28 29 t = damon_new_target(); 30 if (!t) { 31 damon_free_region(r); 32 kunit_skip(test, "target alloc fail"); 33 } 34 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 35 36 damon_add_region(r, t); 37 KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t)); 38 39 damon_destroy_region(r, t); 40 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 41 42 damon_free_target(t); 43 } 44 45 static unsigned int nr_damon_targets(struct damon_ctx *ctx) 46 { 47 struct damon_target *t; 48 unsigned int nr_targets = 0; 49 50 damon_for_each_target(t, ctx) 51 nr_targets++; 52 53 return nr_targets; 54 } 55 56 static void damon_test_target(struct kunit *test) 57 { 58 struct damon_ctx *c = damon_new_ctx(); 59 struct damon_target *t; 60 61 if (!c) 62 kunit_skip(test, "ctx alloc fail"); 63 64 t = damon_new_target(); 65 if (!t) { 66 damon_destroy_ctx(c); 67 kunit_skip(test, "target alloc fail"); 68 } 69 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 70 71 damon_add_target(c, t); 72 KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c)); 73 74 damon_destroy_target(t, c); 75 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 76 77 damon_destroy_ctx(c); 78 } 79 80 /* 81 * Test kdamond_reset_aggregated() 82 * 83 * DAMON checks access to each region and aggregates this information as the 84 * access frequency of each region. In detail, it increases '->nr_accesses' of 85 * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes 86 * the aggregated information ('->nr_accesses' of each regions) to the result 87 * buffer. As a result of the flushing, the '->nr_accesses' of regions are 88 * initialized to zero. 89 */ 90 static void damon_test_aggregate(struct kunit *test) 91 { 92 struct damon_ctx *ctx = damon_new_ctx(); 93 unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} }; 94 unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} }; 95 unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} }; 96 struct damon_target *t; 97 struct damon_region *r; 98 int it, ir; 99 100 if (!ctx) 101 kunit_skip(test, "ctx alloc fail"); 102 103 for (it = 0; it < 3; it++) { 104 t = damon_new_target(); 105 if (!t) { 106 damon_destroy_ctx(ctx); 107 kunit_skip(test, "target alloc fail"); 108 } 109 damon_add_target(ctx, t); 110 } 111 112 it = 0; 113 damon_for_each_target(t, ctx) { 114 for (ir = 0; ir < 3; ir++) { 115 r = damon_new_region(saddr[it][ir], eaddr[it][ir]); 116 if (!r) { 117 damon_destroy_ctx(ctx); 118 kunit_skip(test, "region alloc fail"); 119 } 120 r->nr_accesses = accesses[it][ir]; 121 r->nr_accesses_bp = accesses[it][ir] * 10000; 122 damon_add_region(r, t); 123 } 124 it++; 125 } 126 kdamond_reset_aggregated(ctx); 127 it = 0; 128 damon_for_each_target(t, ctx) { 129 ir = 0; 130 /* '->nr_accesses' should be zeroed */ 131 damon_for_each_region(r, t) { 132 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 133 ir++; 134 } 135 /* regions should be preserved */ 136 KUNIT_EXPECT_EQ(test, 3, ir); 137 it++; 138 } 139 /* targets also should be preserved */ 140 KUNIT_EXPECT_EQ(test, 3, it); 141 142 damon_destroy_ctx(ctx); 143 } 144 145 static void damon_test_split_at(struct kunit *test) 146 { 147 struct damon_ctx *c = damon_new_ctx(); 148 struct damon_target *t; 149 struct damon_region *r, *r_new; 150 151 if (!c) 152 kunit_skip(test, "ctx alloc fail"); 153 t = damon_new_target(); 154 if (!t) { 155 damon_destroy_ctx(c); 156 kunit_skip(test, "target alloc fail"); 157 } 158 r = damon_new_region(0, 100); 159 if (!r) { 160 damon_destroy_ctx(c); 161 damon_free_target(t); 162 kunit_skip(test, "region alloc fail"); 163 } 164 r->nr_accesses_bp = 420000; 165 r->nr_accesses = 42; 166 r->last_nr_accesses = 15; 167 damon_add_region(r, t); 168 damon_split_region_at(t, r, 25); 169 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 170 KUNIT_EXPECT_EQ(test, r->ar.end, 25ul); 171 172 r_new = damon_next_region(r); 173 KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul); 174 KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul); 175 176 KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp); 177 KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses); 178 KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses); 179 180 damon_free_target(t); 181 damon_destroy_ctx(c); 182 } 183 184 static void damon_test_merge_two(struct kunit *test) 185 { 186 struct damon_target *t; 187 struct damon_region *r, *r2, *r3; 188 int i; 189 190 t = damon_new_target(); 191 if (!t) 192 kunit_skip(test, "target alloc fail"); 193 r = damon_new_region(0, 100); 194 if (!r) { 195 damon_free_target(t); 196 kunit_skip(test, "region alloc fail"); 197 } 198 r->nr_accesses = 10; 199 r->nr_accesses_bp = 100000; 200 damon_add_region(r, t); 201 r2 = damon_new_region(100, 300); 202 if (!r2) { 203 damon_free_target(t); 204 kunit_skip(test, "second region alloc fail"); 205 } 206 r2->nr_accesses = 20; 207 r2->nr_accesses_bp = 200000; 208 damon_add_region(r2, t); 209 210 damon_merge_two_regions(t, r, r2); 211 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 212 KUNIT_EXPECT_EQ(test, r->ar.end, 300ul); 213 KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u); 214 215 i = 0; 216 damon_for_each_region(r3, t) { 217 KUNIT_EXPECT_PTR_EQ(test, r, r3); 218 i++; 219 } 220 KUNIT_EXPECT_EQ(test, i, 1); 221 222 damon_free_target(t); 223 } 224 225 static struct damon_region *__nth_region_of(struct damon_target *t, int idx) 226 { 227 struct damon_region *r; 228 unsigned int i = 0; 229 230 damon_for_each_region(r, t) { 231 if (i++ == idx) 232 return r; 233 } 234 235 return NULL; 236 } 237 238 static void damon_test_merge_regions_of(struct kunit *test) 239 { 240 struct damon_target *t; 241 struct damon_region *r; 242 unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184}; 243 unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230}; 244 unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2}; 245 246 unsigned long saddrs[] = {0, 114, 130, 156, 170}; 247 unsigned long eaddrs[] = {112, 130, 156, 170, 230}; 248 int i; 249 250 t = damon_new_target(); 251 if (!t) 252 kunit_skip(test, "target alloc fail"); 253 for (i = 0; i < ARRAY_SIZE(sa); i++) { 254 r = damon_new_region(sa[i], ea[i]); 255 if (!r) { 256 damon_free_target(t); 257 kunit_skip(test, "region alloc fail"); 258 } 259 r->nr_accesses = nrs[i]; 260 r->nr_accesses_bp = nrs[i] * 10000; 261 damon_add_region(r, t); 262 } 263 264 damon_merge_regions_of(t, 9, 9999); 265 /* 0-112, 114-130, 130-156, 156-170 */ 266 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); 267 for (i = 0; i < 5; i++) { 268 r = __nth_region_of(t, i); 269 KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]); 270 KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]); 271 } 272 damon_free_target(t); 273 } 274 275 static void damon_test_split_regions_of(struct kunit *test) 276 { 277 struct damon_ctx *c = damon_new_ctx(); 278 struct damon_target *t; 279 struct damon_region *r; 280 281 if (!c) 282 kunit_skip("ctx alloc fail"); 283 t = damon_new_target(); 284 if (!t) { 285 damon_destroy_ctx(c); 286 kunit_skip(test, "target alloc fail"); 287 } 288 r = damon_new_region(0, 22); 289 if (!r) { 290 damon_destroy_ctx(c); 291 damon_free_target(t); 292 kunit_skip(test, "region alloc fail"); 293 } 294 damon_add_region(r, t); 295 damon_split_regions_of(t, 2, DAMON_MIN_REGION); 296 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u); 297 damon_free_target(t); 298 299 t = damon_new_target(); 300 if (!t) { 301 damon_destroy_ctx(c); 302 kunit_skip(test, "second target alloc fail"); 303 } 304 r = damon_new_region(0, 220); 305 if (!r) { 306 damon_destroy_ctx(c); 307 damon_free_target(t); 308 kunit_skip(test, "second region alloc fail"); 309 } 310 damon_add_region(r, t); 311 damon_split_regions_of(t, 4, DAMON_MIN_REGION); 312 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u); 313 damon_free_target(t); 314 damon_destroy_ctx(c); 315 } 316 317 static void damon_test_ops_registration(struct kunit *test) 318 { 319 struct damon_ctx *c = damon_new_ctx(); 320 struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak; 321 bool need_cleanup = false; 322 323 if (!c) 324 kunit_skip(test, "ctx alloc fail"); 325 326 /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */ 327 if (!damon_is_registered_ops(DAMON_OPS_VADDR)) { 328 bak.id = DAMON_OPS_VADDR; 329 KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0); 330 need_cleanup = true; 331 } 332 333 /* DAMON_OPS_VADDR is ensured to be registered */ 334 KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0); 335 336 /* Double-registration is prohibited */ 337 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 338 339 /* Unknown ops id cannot be registered */ 340 KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL); 341 342 /* Registration should success after unregistration */ 343 mutex_lock(&damon_ops_lock); 344 bak = damon_registered_ops[DAMON_OPS_VADDR]; 345 damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){}; 346 mutex_unlock(&damon_ops_lock); 347 348 ops.id = DAMON_OPS_VADDR; 349 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0); 350 351 mutex_lock(&damon_ops_lock); 352 damon_registered_ops[DAMON_OPS_VADDR] = bak; 353 mutex_unlock(&damon_ops_lock); 354 355 /* Check double-registration failure again */ 356 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 357 358 damon_destroy_ctx(c); 359 360 if (need_cleanup) { 361 mutex_lock(&damon_ops_lock); 362 damon_registered_ops[DAMON_OPS_VADDR] = 363 (struct damon_operations){}; 364 mutex_unlock(&damon_ops_lock); 365 } 366 } 367 368 static void damon_test_set_regions(struct kunit *test) 369 { 370 struct damon_target *t = damon_new_target(); 371 struct damon_region *r1, *r2; 372 struct damon_addr_range range = {.start = 8, .end = 28}; 373 unsigned long expects[] = {8, 16, 16, 24, 24, 28}; 374 int expect_idx = 0; 375 struct damon_region *r; 376 377 if (!t) 378 kunit_skip(test, "target alloc fail"); 379 r1 = damon_new_region(4, 16); 380 if (!r1) { 381 damon_free_target(t); 382 kunit_skip(test, "region alloc fail"); 383 } 384 r2 = damon_new_region(24, 32); 385 if (!r2) { 386 damon_free_target(t); 387 damon_free_region(r1); 388 kunit_skip(test, "second region alloc fail"); 389 } 390 391 damon_add_region(r1, t); 392 damon_add_region(r2, t); 393 damon_set_regions(t, &range, 1, DAMON_MIN_REGION); 394 395 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3); 396 damon_for_each_region(r, t) { 397 KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]); 398 KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]); 399 } 400 damon_destroy_target(t, NULL); 401 } 402 403 static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test) 404 { 405 struct damon_attrs attrs = { 406 .sample_interval = 10, 407 .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10 408 }; 409 410 /* 411 * In some cases such as 32bit architectures where UINT_MAX is 412 * ULONG_MAX, attrs.aggr_interval becomes zero. Calling 413 * damon_nr_accesses_to_accesses_bp() in the case will cause 414 * divide-by-zero. Such case is prohibited in normal execution since 415 * the caution is documented on the comment for the function, and 416 * damon_update_monitoring_results() does the check. Skip the test in 417 * the case. 418 */ 419 if (!attrs.aggr_interval) 420 kunit_skip(test, "aggr_interval is zero."); 421 422 KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0); 423 } 424 425 static void damon_test_update_monitoring_result(struct kunit *test) 426 { 427 struct damon_attrs old_attrs = { 428 .sample_interval = 10, .aggr_interval = 1000,}; 429 struct damon_attrs new_attrs; 430 struct damon_region *r = damon_new_region(3, 7); 431 432 if (!r) 433 kunit_skip(test, "region alloc fail"); 434 435 r->nr_accesses = 15; 436 r->nr_accesses_bp = 150000; 437 r->age = 20; 438 439 new_attrs = (struct damon_attrs){ 440 .sample_interval = 100, .aggr_interval = 10000,}; 441 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 442 KUNIT_EXPECT_EQ(test, r->nr_accesses, 15); 443 KUNIT_EXPECT_EQ(test, r->age, 2); 444 445 new_attrs = (struct damon_attrs){ 446 .sample_interval = 1, .aggr_interval = 1000}; 447 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 448 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); 449 KUNIT_EXPECT_EQ(test, r->age, 2); 450 451 new_attrs = (struct damon_attrs){ 452 .sample_interval = 1, .aggr_interval = 100}; 453 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); 454 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); 455 KUNIT_EXPECT_EQ(test, r->age, 20); 456 457 damon_free_region(r); 458 } 459 460 static void damon_test_set_attrs(struct kunit *test) 461 { 462 struct damon_ctx *c = damon_new_ctx(); 463 struct damon_attrs valid_attrs = { 464 .min_nr_regions = 10, .max_nr_regions = 1000, 465 .sample_interval = 5000, .aggr_interval = 100000,}; 466 struct damon_attrs invalid_attrs; 467 468 if (!c) 469 kunit_skip(test, "ctx alloc fail"); 470 471 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0); 472 473 invalid_attrs = valid_attrs; 474 invalid_attrs.min_nr_regions = 1; 475 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 476 477 invalid_attrs = valid_attrs; 478 invalid_attrs.max_nr_regions = 9; 479 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 480 481 invalid_attrs = valid_attrs; 482 invalid_attrs.aggr_interval = 4999; 483 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL); 484 485 damon_destroy_ctx(c); 486 } 487 488 static void damon_test_moving_sum(struct kunit *test) 489 { 490 unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10; 491 unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0}; 492 unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000, 493 45000, 40000, 35000, 30000}; 494 int i; 495 496 for (i = 0; i < ARRAY_SIZE(new_values); i++) { 497 mvsum = damon_moving_sum(mvsum, nomvsum, len_window, 498 new_values[i]); 499 KUNIT_EXPECT_EQ(test, mvsum, expects[i]); 500 } 501 } 502 503 static void damos_test_new_filter(struct kunit *test) 504 { 505 struct damos_filter *filter; 506 507 filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false); 508 if (!filter) 509 kunit_skip(test, "filter alloc fail"); 510 KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON); 511 KUNIT_EXPECT_EQ(test, filter->matching, true); 512 KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list); 513 KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list); 514 damos_destroy_filter(filter); 515 } 516 517 static void damos_test_commit_filter(struct kunit *test) 518 { 519 struct damos_filter *src_filter, *dst_filter; 520 521 src_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true); 522 if (!src_filter) 523 kunit_skip(test, "src filter alloc fail"); 524 dst_filter = damos_new_filter(DAMOS_FILTER_TYPE_ACTIVE, false, false); 525 if (!dst_filter) { 526 damos_destroy_filter(src_filter); 527 kunit_skip(test, "dst filter alloc fail"); 528 } 529 damos_commit_filter(dst_filter, src_filter); 530 KUNIT_EXPECT_EQ(test, dst_filter->type, src_filter->type); 531 KUNIT_EXPECT_EQ(test, dst_filter->matching, src_filter->matching); 532 KUNIT_EXPECT_EQ(test, dst_filter->allow, src_filter->allow); 533 534 damos_destroy_filter(src_filter); 535 damos_destroy_filter(dst_filter); 536 } 537 538 static void damos_test_filter_out(struct kunit *test) 539 { 540 struct damon_target *t; 541 struct damon_region *r, *r2; 542 struct damos_filter *f; 543 544 f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false); 545 if (!f) 546 kunit_skip(test, "filter alloc fail"); 547 f->addr_range = (struct damon_addr_range){ 548 .start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6}; 549 550 t = damon_new_target(); 551 if (!t) { 552 damos_destroy_filter(f); 553 kunit_skip(test, "target alloc fail"); 554 } 555 r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5); 556 if (!r) { 557 damos_destroy_filter(f); 558 damon_free_target(t); 559 kunit_skip(test, "region alloc fail"); 560 } 561 damon_add_region(r, t); 562 563 /* region in the range */ 564 KUNIT_EXPECT_TRUE(test, 565 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 566 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 567 568 /* region before the range */ 569 r->ar.start = DAMON_MIN_REGION * 1; 570 r->ar.end = DAMON_MIN_REGION * 2; 571 KUNIT_EXPECT_FALSE(test, 572 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 573 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 574 575 /* region after the range */ 576 r->ar.start = DAMON_MIN_REGION * 6; 577 r->ar.end = DAMON_MIN_REGION * 8; 578 KUNIT_EXPECT_FALSE(test, 579 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 580 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); 581 582 /* region started before the range */ 583 r->ar.start = DAMON_MIN_REGION * 1; 584 r->ar.end = DAMON_MIN_REGION * 4; 585 KUNIT_EXPECT_FALSE(test, 586 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 587 /* filter should have split the region */ 588 KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1); 589 KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2); 590 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2); 591 r2 = damon_next_region(r); 592 KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2); 593 KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4); 594 damon_destroy_region(r2, t); 595 596 /* region started in the range */ 597 r->ar.start = DAMON_MIN_REGION * 2; 598 r->ar.end = DAMON_MIN_REGION * 8; 599 KUNIT_EXPECT_TRUE(test, 600 damos_filter_match(NULL, t, r, f, DAMON_MIN_REGION)); 601 /* filter should have split the region */ 602 KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2); 603 KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6); 604 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2); 605 r2 = damon_next_region(r); 606 KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6); 607 KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8); 608 damon_destroy_region(r2, t); 609 610 damon_free_target(t); 611 damos_free_filter(f); 612 } 613 614 static void damon_test_feed_loop_next_input(struct kunit *test) 615 { 616 unsigned long last_input = 900000, current_score = 200; 617 618 /* 619 * If current score is lower than the goal, which is always 10,000 620 * (read the comment on damon_feed_loop_next_input()'s comment), next 621 * input should be higher than the last input. 622 */ 623 KUNIT_EXPECT_GT(test, 624 damon_feed_loop_next_input(last_input, current_score), 625 last_input); 626 627 /* 628 * If current score is higher than the goal, next input should be lower 629 * than the last input. 630 */ 631 current_score = 250000000; 632 KUNIT_EXPECT_LT(test, 633 damon_feed_loop_next_input(last_input, current_score), 634 last_input); 635 636 /* 637 * The next input depends on the distance between the current score and 638 * the goal 639 */ 640 KUNIT_EXPECT_GT(test, 641 damon_feed_loop_next_input(last_input, 200), 642 damon_feed_loop_next_input(last_input, 2000)); 643 } 644 645 static void damon_test_set_filters_default_reject(struct kunit *test) 646 { 647 struct damos scheme; 648 struct damos_filter *target_filter, *anon_filter; 649 650 INIT_LIST_HEAD(&scheme.filters); 651 INIT_LIST_HEAD(&scheme.ops_filters); 652 653 damos_set_filters_default_reject(&scheme); 654 /* 655 * No filter is installed. Allow by default on both core and ops layer 656 * filtering stages, since there are no filters at all. 657 */ 658 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 659 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 660 661 target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true); 662 if (!target_filter) 663 kunit_skip(test, "filter alloc fail"); 664 damos_add_filter(&scheme, target_filter); 665 damos_set_filters_default_reject(&scheme); 666 /* 667 * A core-handled allow-filter is installed. 668 * Rejct by default on core layer filtering stage due to the last 669 * core-layer-filter's behavior. 670 * Allow by default on ops layer filtering stage due to the absence of 671 * ops layer filters. 672 */ 673 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, true); 674 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 675 676 target_filter->allow = false; 677 damos_set_filters_default_reject(&scheme); 678 /* 679 * A core-handled reject-filter is installed. 680 * Allow by default on core layer filtering stage due to the last 681 * core-layer-filter's behavior. 682 * Allow by default on ops layer filtering stage due to the absence of 683 * ops layer filters. 684 */ 685 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 686 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false); 687 688 anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true); 689 if (!anon_filter) { 690 damos_free_filter(target_filter); 691 kunit_skip(test, "anon_filter alloc fail"); 692 } 693 damos_add_filter(&scheme, anon_filter); 694 695 damos_set_filters_default_reject(&scheme); 696 /* 697 * A core-handled reject-filter and ops-handled allow-filter are installed. 698 * Allow by default on core layer filtering stage due to the existence 699 * of the ops-handled filter. 700 * Reject by default on ops layer filtering stage due to the last 701 * ops-layer-filter's behavior. 702 */ 703 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 704 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true); 705 706 target_filter->allow = true; 707 damos_set_filters_default_reject(&scheme); 708 /* 709 * A core-handled allow-filter and ops-handled allow-filter are 710 * installed. 711 * Allow by default on core layer filtering stage due to the existence 712 * of the ops-handled filter. 713 * Reject by default on ops layer filtering stage due to the last 714 * ops-layer-filter's behavior. 715 */ 716 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false); 717 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true); 718 719 damos_free_filter(anon_filter); 720 damos_free_filter(target_filter); 721 } 722 723 static struct kunit_case damon_test_cases[] = { 724 KUNIT_CASE(damon_test_target), 725 KUNIT_CASE(damon_test_regions), 726 KUNIT_CASE(damon_test_aggregate), 727 KUNIT_CASE(damon_test_split_at), 728 KUNIT_CASE(damon_test_merge_two), 729 KUNIT_CASE(damon_test_merge_regions_of), 730 KUNIT_CASE(damon_test_split_regions_of), 731 KUNIT_CASE(damon_test_ops_registration), 732 KUNIT_CASE(damon_test_set_regions), 733 KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp), 734 KUNIT_CASE(damon_test_update_monitoring_result), 735 KUNIT_CASE(damon_test_set_attrs), 736 KUNIT_CASE(damon_test_moving_sum), 737 KUNIT_CASE(damos_test_new_filter), 738 KUNIT_CASE(damos_test_commit_filter), 739 KUNIT_CASE(damos_test_filter_out), 740 KUNIT_CASE(damon_test_feed_loop_next_input), 741 KUNIT_CASE(damon_test_set_filters_default_reject), 742 {}, 743 }; 744 745 static struct kunit_suite damon_test_suite = { 746 .name = "damon", 747 .test_cases = damon_test_cases, 748 }; 749 kunit_test_suite(damon_test_suite); 750 751 #endif /* _DAMON_CORE_TEST_H */ 752 753 #endif /* CONFIG_DAMON_KUNIT_TEST */ 754