1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Data Access Monitor Unit Tests
4 *
5 * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
6 *
7 * Author: SeongJae Park <sj@kernel.org>
8 */
9
10 #ifdef CONFIG_DAMON_KUNIT_TEST
11
12 #ifndef _DAMON_CORE_TEST_H
13 #define _DAMON_CORE_TEST_H
14
15 #include <kunit/test.h>
16
damon_test_regions(struct kunit * test)17 static void damon_test_regions(struct kunit *test)
18 {
19 struct damon_region *r;
20 struct damon_target *t;
21
22 r = damon_new_region(1, 2);
23 if (!r)
24 kunit_skip(test, "region alloc fail");
25 KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
26 KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
27 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
28
29 t = damon_new_target();
30 if (!t) {
31 damon_free_region(r);
32 kunit_skip(test, "target alloc fail");
33 }
34 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
35
36 damon_add_region(r, t);
37 KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
38
39 damon_destroy_region(r, t);
40 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
41
42 damon_free_target(t);
43 }
44
nr_damon_targets(struct damon_ctx * ctx)45 static unsigned int nr_damon_targets(struct damon_ctx *ctx)
46 {
47 struct damon_target *t;
48 unsigned int nr_targets = 0;
49
50 damon_for_each_target(t, ctx)
51 nr_targets++;
52
53 return nr_targets;
54 }
55
damon_test_target(struct kunit * test)56 static void damon_test_target(struct kunit *test)
57 {
58 struct damon_ctx *c = damon_new_ctx();
59 struct damon_target *t;
60
61 if (!c)
62 kunit_skip(test, "ctx alloc fail");
63
64 t = damon_new_target();
65 if (!t) {
66 damon_destroy_ctx(c);
67 kunit_skip(test, "target alloc fail");
68 }
69 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
70
71 damon_add_target(c, t);
72 KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
73
74 damon_destroy_target(t, c);
75 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
76
77 damon_destroy_ctx(c);
78 }
79
80 /*
81 * Test kdamond_reset_aggregated()
82 *
83 * DAMON checks access to each region and aggregates this information as the
84 * access frequency of each region. In detail, it increases '->nr_accesses' of
85 * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes
86 * the aggregated information ('->nr_accesses' of each regions) to the result
87 * buffer. As a result of the flushing, the '->nr_accesses' of regions are
88 * initialized to zero.
89 */
damon_test_aggregate(struct kunit * test)90 static void damon_test_aggregate(struct kunit *test)
91 {
92 struct damon_ctx *ctx = damon_new_ctx();
93 unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
94 unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
95 unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
96 struct damon_target *t;
97 struct damon_region *r;
98 int it, ir;
99
100 if (!ctx)
101 kunit_skip(test, "ctx alloc fail");
102
103 for (it = 0; it < 3; it++) {
104 t = damon_new_target();
105 if (!t) {
106 damon_destroy_ctx(ctx);
107 kunit_skip(test, "target alloc fail");
108 }
109 damon_add_target(ctx, t);
110 }
111
112 it = 0;
113 damon_for_each_target(t, ctx) {
114 for (ir = 0; ir < 3; ir++) {
115 r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
116 if (!r) {
117 damon_destroy_ctx(ctx);
118 kunit_skip(test, "region alloc fail");
119 }
120 r->nr_accesses = accesses[it][ir];
121 r->nr_accesses_bp = accesses[it][ir] * 10000;
122 damon_add_region(r, t);
123 }
124 it++;
125 }
126 kdamond_reset_aggregated(ctx);
127 it = 0;
128 damon_for_each_target(t, ctx) {
129 ir = 0;
130 /* '->nr_accesses' should be zeroed */
131 damon_for_each_region(r, t) {
132 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
133 ir++;
134 }
135 /* regions should be preserved */
136 KUNIT_EXPECT_EQ(test, 3, ir);
137 it++;
138 }
139 /* targets also should be preserved */
140 KUNIT_EXPECT_EQ(test, 3, it);
141
142 damon_destroy_ctx(ctx);
143 }
144
damon_test_split_at(struct kunit * test)145 static void damon_test_split_at(struct kunit *test)
146 {
147 struct damon_target *t;
148 struct damon_region *r, *r_new;
149
150 t = damon_new_target();
151 if (!t)
152 kunit_skip(test, "target alloc fail");
153 r = damon_new_region(0, 100);
154 if (!r) {
155 damon_free_target(t);
156 kunit_skip(test, "region alloc fail");
157 }
158 r->nr_accesses_bp = 420000;
159 r->nr_accesses = 42;
160 r->last_nr_accesses = 15;
161 r->age = 10;
162 damon_add_region(r, t);
163 damon_split_region_at(t, r, 25);
164 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
165 KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
166
167 r_new = damon_next_region(r);
168 KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul);
169 KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul);
170
171 KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp);
172 KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses);
173 KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
174 KUNIT_EXPECT_EQ(test, r->age, r_new->age);
175
176 damon_free_target(t);
177 }
178
damon_test_merge_two(struct kunit * test)179 static void damon_test_merge_two(struct kunit *test)
180 {
181 struct damon_target *t;
182 struct damon_region *r, *r2, *r3;
183 int i;
184
185 t = damon_new_target();
186 if (!t)
187 kunit_skip(test, "target alloc fail");
188 r = damon_new_region(0, 100);
189 if (!r) {
190 damon_free_target(t);
191 kunit_skip(test, "region alloc fail");
192 }
193 r->nr_accesses = 10;
194 r->nr_accesses_bp = 100000;
195 r->age = 9;
196 damon_add_region(r, t);
197 r2 = damon_new_region(100, 300);
198 if (!r2) {
199 damon_free_target(t);
200 kunit_skip(test, "second region alloc fail");
201 }
202 r2->nr_accesses = 20;
203 r2->nr_accesses_bp = 200000;
204 r2->age = 21;
205 damon_add_region(r2, t);
206
207 damon_merge_two_regions(t, r, r2);
208 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
209 KUNIT_EXPECT_EQ(test, r->ar.end, 300ul);
210 KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u);
211 KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, 160000u);
212 KUNIT_EXPECT_EQ(test, r->age, 17u);
213
214 i = 0;
215 damon_for_each_region(r3, t) {
216 KUNIT_EXPECT_PTR_EQ(test, r, r3);
217 i++;
218 }
219 KUNIT_EXPECT_EQ(test, i, 1);
220
221 damon_free_target(t);
222 }
223
__nth_region_of(struct damon_target * t,int idx)224 static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
225 {
226 struct damon_region *r;
227 unsigned int i = 0;
228
229 damon_for_each_region(r, t) {
230 if (i++ == idx)
231 return r;
232 }
233
234 return NULL;
235 }
236
damon_test_merge_regions_of(struct kunit * test)237 static void damon_test_merge_regions_of(struct kunit *test)
238 {
239 struct damon_target *t;
240 struct damon_region *r;
241 unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184, 230};
242 unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230, 10170};
243 unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2, 5};
244
245 unsigned long saddrs[] = {0, 114, 130, 156, 170, 230};
246 unsigned long eaddrs[] = {112, 130, 156, 170, 230, 10170};
247 int i;
248
249 t = damon_new_target();
250 if (!t)
251 kunit_skip(test, "target alloc fail");
252 for (i = 0; i < ARRAY_SIZE(sa); i++) {
253 r = damon_new_region(sa[i], ea[i]);
254 if (!r) {
255 damon_free_target(t);
256 kunit_skip(test, "region alloc fail");
257 }
258 r->nr_accesses = nrs[i];
259 r->nr_accesses_bp = nrs[i] * 10000;
260 damon_add_region(r, t);
261 }
262
263 damon_merge_regions_of(t, 9, 9999);
264 /* 0-112, 114-130, 130-156, 156-170, 170-230, 230-10170 */
265 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 6u);
266 for (i = 0; i < 6; i++) {
267 r = __nth_region_of(t, i);
268 KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]);
269 KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]);
270 }
271 damon_free_target(t);
272 }
273
damon_test_split_regions_of(struct kunit * test)274 static void damon_test_split_regions_of(struct kunit *test)
275 {
276 struct damon_target *t;
277 struct damon_region *r;
278 unsigned long sa[] = {0, 300, 500};
279 unsigned long ea[] = {220, 400, 700};
280 int i;
281
282 t = damon_new_target();
283 if (!t)
284 kunit_skip(test, "target alloc fail");
285 r = damon_new_region(0, 22);
286 if (!r) {
287 damon_free_target(t);
288 kunit_skip(test, "region alloc fail");
289 }
290 damon_add_region(r, t);
291 damon_split_regions_of(t, 2, 1);
292 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
293 damon_free_target(t);
294
295 t = damon_new_target();
296 if (!t)
297 kunit_skip(test, "second target alloc fail");
298 r = damon_new_region(0, 220);
299 if (!r) {
300 damon_free_target(t);
301 kunit_skip(test, "second region alloc fail");
302 }
303 damon_add_region(r, t);
304 damon_split_regions_of(t, 4, 1);
305 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
306 damon_free_target(t);
307
308 t = damon_new_target();
309 if (!t)
310 kunit_skip(test, "third target alloc fail");
311 for (i = 0; i < ARRAY_SIZE(sa); i++) {
312 r = damon_new_region(sa[i], ea[i]);
313 if (!r) {
314 damon_free_target(t);
315 kunit_skip(test, "region alloc fail");
316 }
317 damon_add_region(r, t);
318 }
319 damon_split_regions_of(t, 4, 5);
320 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 12u);
321 damon_for_each_region(r, t)
322 KUNIT_EXPECT_GE(test, damon_sz_region(r) % 5ul, 0ul);
323 damon_free_target(t);
324 }
325
damon_test_ops_registration(struct kunit * test)326 static void damon_test_ops_registration(struct kunit *test)
327 {
328 struct damon_ctx *c = damon_new_ctx();
329 struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
330 bool need_cleanup = false;
331
332 if (!c)
333 kunit_skip(test, "ctx alloc fail");
334
335 /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
336 if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
337 bak.id = DAMON_OPS_VADDR;
338 KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0);
339 need_cleanup = true;
340 }
341
342 /* DAMON_OPS_VADDR is ensured to be registered */
343 KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
344
345 /* Double-registration is prohibited */
346 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
347
348 /* Unknown ops id cannot be registered */
349 KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL);
350
351 /* Registration should success after unregistration */
352 mutex_lock(&damon_ops_lock);
353 bak = damon_registered_ops[DAMON_OPS_VADDR];
354 damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){};
355 mutex_unlock(&damon_ops_lock);
356
357 ops.id = DAMON_OPS_VADDR;
358 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0);
359
360 mutex_lock(&damon_ops_lock);
361 damon_registered_ops[DAMON_OPS_VADDR] = bak;
362 mutex_unlock(&damon_ops_lock);
363
364 /* Check double-registration failure again */
365 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
366
367 damon_destroy_ctx(c);
368
369 if (need_cleanup) {
370 mutex_lock(&damon_ops_lock);
371 damon_registered_ops[DAMON_OPS_VADDR] =
372 (struct damon_operations){};
373 mutex_unlock(&damon_ops_lock);
374 }
375 }
376
damon_test_set_regions(struct kunit * test)377 static void damon_test_set_regions(struct kunit *test)
378 {
379 struct damon_target *t = damon_new_target();
380 struct damon_region *r1, *r2;
381 struct damon_addr_range range = {.start = 8, .end = 28};
382 unsigned long expects[] = {8, 16, 16, 24, 24, 28};
383 int expect_idx = 0;
384 struct damon_region *r;
385
386 if (!t)
387 kunit_skip(test, "target alloc fail");
388 r1 = damon_new_region(4, 16);
389 if (!r1) {
390 damon_free_target(t);
391 kunit_skip(test, "region alloc fail");
392 }
393 r2 = damon_new_region(24, 32);
394 if (!r2) {
395 damon_free_target(t);
396 damon_free_region(r1);
397 kunit_skip(test, "second region alloc fail");
398 }
399
400 damon_add_region(r1, t);
401 damon_add_region(r2, t);
402 damon_set_regions(t, &range, 1, 1);
403
404 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
405 damon_for_each_region(r, t) {
406 KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
407 KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
408 }
409 damon_destroy_target(t, NULL);
410 }
411
damon_test_nr_accesses_to_accesses_bp(struct kunit * test)412 static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
413 {
414 struct damon_attrs attrs = {
415 .sample_interval = 10,
416 .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
417 };
418
419 /*
420 * In some cases such as 32bit architectures where UINT_MAX is
421 * ULONG_MAX, attrs.aggr_interval becomes zero. Calling
422 * damon_nr_accesses_to_accesses_bp() in the case will cause
423 * divide-by-zero. Such case is prohibited in normal execution since
424 * the caution is documented on the comment for the function, and
425 * damon_update_monitoring_results() does the check. Skip the test in
426 * the case.
427 */
428 if (!attrs.aggr_interval)
429 kunit_skip(test, "aggr_interval is zero.");
430
431 KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
432 }
433
damon_test_update_monitoring_result(struct kunit * test)434 static void damon_test_update_monitoring_result(struct kunit *test)
435 {
436 struct damon_attrs old_attrs = {
437 .sample_interval = 10, .aggr_interval = 1000,};
438 struct damon_attrs new_attrs;
439 struct damon_region *r = damon_new_region(3, 7);
440
441 if (!r)
442 kunit_skip(test, "region alloc fail");
443
444 r->nr_accesses = 15;
445 r->nr_accesses_bp = 150000;
446 r->age = 20;
447
448 new_attrs = (struct damon_attrs){
449 .sample_interval = 100, .aggr_interval = 10000,};
450 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
451 KUNIT_EXPECT_EQ(test, r->nr_accesses, 15);
452 KUNIT_EXPECT_EQ(test, r->age, 2);
453
454 new_attrs = (struct damon_attrs){
455 .sample_interval = 1, .aggr_interval = 1000};
456 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
457 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
458 KUNIT_EXPECT_EQ(test, r->age, 2);
459
460 new_attrs = (struct damon_attrs){
461 .sample_interval = 1, .aggr_interval = 100};
462 damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
463 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
464 KUNIT_EXPECT_EQ(test, r->age, 20);
465
466 damon_free_region(r);
467 }
468
damon_test_set_attrs(struct kunit * test)469 static void damon_test_set_attrs(struct kunit *test)
470 {
471 struct damon_ctx *c = damon_new_ctx();
472 struct damon_attrs valid_attrs = {
473 .min_nr_regions = 10, .max_nr_regions = 1000,
474 .sample_interval = 5000, .aggr_interval = 100000,};
475 struct damon_attrs invalid_attrs;
476
477 if (!c)
478 kunit_skip(test, "ctx alloc fail");
479
480 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
481
482 invalid_attrs = valid_attrs;
483 invalid_attrs.min_nr_regions = 1;
484 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
485
486 invalid_attrs = valid_attrs;
487 invalid_attrs.max_nr_regions = 9;
488 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
489
490 invalid_attrs = valid_attrs;
491 invalid_attrs.aggr_interval = 4999;
492 KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
493
494 damon_destroy_ctx(c);
495 }
496
damon_test_moving_sum(struct kunit * test)497 static void damon_test_moving_sum(struct kunit *test)
498 {
499 unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10;
500 unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0};
501 unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000,
502 45000, 40000, 35000, 30000};
503 int i;
504
505 for (i = 0; i < ARRAY_SIZE(new_values); i++) {
506 mvsum = damon_moving_sum(mvsum, nomvsum, len_window,
507 new_values[i]);
508 KUNIT_EXPECT_EQ(test, mvsum, expects[i]);
509 }
510 }
511
damos_test_new_filter(struct kunit * test)512 static void damos_test_new_filter(struct kunit *test)
513 {
514 struct damos_filter *filter;
515
516 filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false);
517 if (!filter)
518 kunit_skip(test, "filter alloc fail");
519 KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON);
520 KUNIT_EXPECT_EQ(test, filter->matching, true);
521 KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list);
522 KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list);
523 damos_destroy_filter(filter);
524 }
525
damos_test_commit_quota_goal_for(struct kunit * test,struct damos_quota_goal * dst,struct damos_quota_goal * src)526 static void damos_test_commit_quota_goal_for(struct kunit *test,
527 struct damos_quota_goal *dst,
528 struct damos_quota_goal *src)
529 {
530 u64 dst_last_psi_total = 0;
531
532 if (dst->metric == DAMOS_QUOTA_SOME_MEM_PSI_US)
533 dst_last_psi_total = dst->last_psi_total;
534 damos_commit_quota_goal(dst, src);
535
536 KUNIT_EXPECT_EQ(test, dst->metric, src->metric);
537 KUNIT_EXPECT_EQ(test, dst->target_value, src->target_value);
538 if (src->metric == DAMOS_QUOTA_USER_INPUT)
539 KUNIT_EXPECT_EQ(test, dst->current_value, src->current_value);
540 if (dst_last_psi_total && src->metric == DAMOS_QUOTA_SOME_MEM_PSI_US)
541 KUNIT_EXPECT_EQ(test, dst->last_psi_total, dst_last_psi_total);
542 switch (dst->metric) {
543 case DAMOS_QUOTA_NODE_MEM_USED_BP:
544 case DAMOS_QUOTA_NODE_MEM_FREE_BP:
545 KUNIT_EXPECT_EQ(test, dst->nid, src->nid);
546 break;
547 case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
548 case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
549 KUNIT_EXPECT_EQ(test, dst->nid, src->nid);
550 KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id);
551 break;
552 default:
553 break;
554 }
555 }
556
damos_test_commit_quota_goal(struct kunit * test)557 static void damos_test_commit_quota_goal(struct kunit *test)
558 {
559 struct damos_quota_goal dst = {
560 .metric = DAMOS_QUOTA_SOME_MEM_PSI_US,
561 .target_value = 1000,
562 .current_value = 123,
563 .last_psi_total = 456,
564 };
565
566 damos_test_commit_quota_goal_for(test, &dst,
567 &(struct damos_quota_goal){
568 .metric = DAMOS_QUOTA_USER_INPUT,
569 .target_value = 789,
570 .current_value = 12});
571 damos_test_commit_quota_goal_for(test, &dst,
572 &(struct damos_quota_goal){
573 .metric = DAMOS_QUOTA_NODE_MEM_FREE_BP,
574 .target_value = 345,
575 .current_value = 678,
576 .nid = 9,
577 });
578 damos_test_commit_quota_goal_for(test, &dst,
579 &(struct damos_quota_goal){
580 .metric = DAMOS_QUOTA_NODE_MEM_USED_BP,
581 .target_value = 12,
582 .current_value = 345,
583 .nid = 6,
584 });
585 damos_test_commit_quota_goal_for(test, &dst,
586 &(struct damos_quota_goal){
587 .metric = DAMOS_QUOTA_NODE_MEMCG_USED_BP,
588 .target_value = 456,
589 .current_value = 567,
590 .nid = 6,
591 .memcg_id = 7,
592 });
593 damos_test_commit_quota_goal_for(test, &dst,
594 &(struct damos_quota_goal){
595 .metric = DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
596 .target_value = 890,
597 .current_value = 901,
598 .nid = 10,
599 .memcg_id = 1,
600 });
601 damos_test_commit_quota_goal_for(test, &dst,
602 &(struct damos_quota_goal) {
603 .metric = DAMOS_QUOTA_SOME_MEM_PSI_US,
604 .target_value = 234,
605 .current_value = 345,
606 .last_psi_total = 567,
607 });
608 }
609
damos_test_commit_quota_goals_for(struct kunit * test,struct damos_quota_goal * dst_goals,int nr_dst_goals,struct damos_quota_goal * src_goals,int nr_src_goals)610 static void damos_test_commit_quota_goals_for(struct kunit *test,
611 struct damos_quota_goal *dst_goals, int nr_dst_goals,
612 struct damos_quota_goal *src_goals, int nr_src_goals)
613 {
614 struct damos_quota dst, src;
615 struct damos_quota_goal *goal, *next;
616 bool skip = true;
617 int i;
618
619 INIT_LIST_HEAD(&dst.goals);
620 INIT_LIST_HEAD(&src.goals);
621
622 for (i = 0; i < nr_dst_goals; i++) {
623 /*
624 * When nr_src_goals is smaller than dst_goals,
625 * damos_commit_quota_goals() will kfree() the dst goals.
626 * Make it kfree()-able.
627 */
628 goal = damos_new_quota_goal(dst_goals[i].metric,
629 dst_goals[i].target_value);
630 if (!goal)
631 goto out;
632 damos_add_quota_goal(&dst, goal);
633 }
634 skip = false;
635 for (i = 0; i < nr_src_goals; i++)
636 damos_add_quota_goal(&src, &src_goals[i]);
637
638 damos_commit_quota_goals(&dst, &src);
639
640 i = 0;
641 damos_for_each_quota_goal(goal, (&dst)) {
642 KUNIT_EXPECT_EQ(test, goal->metric, src_goals[i].metric);
643 KUNIT_EXPECT_EQ(test, goal->target_value,
644 src_goals[i++].target_value);
645 }
646 KUNIT_EXPECT_EQ(test, i, nr_src_goals);
647
648 out:
649 damos_for_each_quota_goal_safe(goal, next, (&dst))
650 damos_destroy_quota_goal(goal);
651 if (skip)
652 kunit_skip(test, "goal alloc fail");
653 }
654
damos_test_commit_quota_goals(struct kunit * test)655 static void damos_test_commit_quota_goals(struct kunit *test)
656 {
657 damos_test_commit_quota_goals_for(test,
658 (struct damos_quota_goal[]){}, 0,
659 (struct damos_quota_goal[]){
660 {
661 .metric = DAMOS_QUOTA_USER_INPUT,
662 .target_value = 123,
663 },
664 }, 1);
665 damos_test_commit_quota_goals_for(test,
666 (struct damos_quota_goal[]){
667 {
668 .metric = DAMOS_QUOTA_USER_INPUT,
669 .target_value = 234,
670 },
671
672 }, 1,
673 (struct damos_quota_goal[]){
674 {
675 .metric = DAMOS_QUOTA_USER_INPUT,
676 .target_value = 345,
677 },
678 }, 1);
679 damos_test_commit_quota_goals_for(test,
680 (struct damos_quota_goal[]){
681 {
682 .metric = DAMOS_QUOTA_USER_INPUT,
683 .target_value = 456,
684 },
685
686 }, 1,
687 (struct damos_quota_goal[]){}, 0);
688 }
689
damos_test_commit_quota(struct kunit * test)690 static void damos_test_commit_quota(struct kunit *test)
691 {
692 struct damos_quota dst = {
693 .reset_interval = 1,
694 .ms = 2,
695 .sz = 3,
696 .weight_sz = 4,
697 .weight_nr_accesses = 5,
698 .weight_age = 6,
699 };
700 struct damos_quota src = {
701 .reset_interval = 7,
702 .ms = 8,
703 .sz = 9,
704 .weight_sz = 10,
705 .weight_nr_accesses = 11,
706 .weight_age = 12,
707 };
708
709 INIT_LIST_HEAD(&dst.goals);
710 INIT_LIST_HEAD(&src.goals);
711
712 damos_commit_quota(&dst, &src);
713
714 KUNIT_EXPECT_EQ(test, dst.reset_interval, src.reset_interval);
715 KUNIT_EXPECT_EQ(test, dst.ms, src.ms);
716 KUNIT_EXPECT_EQ(test, dst.sz, src.sz);
717 KUNIT_EXPECT_EQ(test, dst.weight_sz, src.weight_sz);
718 KUNIT_EXPECT_EQ(test, dst.weight_nr_accesses, src.weight_nr_accesses);
719 KUNIT_EXPECT_EQ(test, dst.weight_age, src.weight_age);
720 }
721
damos_test_help_dests_setup(struct damos_migrate_dests * dests,unsigned int * node_id_arr,unsigned int * weight_arr,size_t nr_dests)722 static int damos_test_help_dests_setup(struct damos_migrate_dests *dests,
723 unsigned int *node_id_arr, unsigned int *weight_arr,
724 size_t nr_dests)
725 {
726 size_t i;
727
728 dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, nr_dests);
729 if (!dests->node_id_arr)
730 return -ENOMEM;
731 dests->weight_arr = kmalloc_objs(*dests->weight_arr, nr_dests);
732 if (!dests->weight_arr) {
733 kfree(dests->node_id_arr);
734 dests->node_id_arr = NULL;
735 return -ENOMEM;
736 }
737
738 for (i = 0; i < nr_dests; i++) {
739 dests->node_id_arr[i] = node_id_arr[i];
740 dests->weight_arr[i] = weight_arr[i];
741 }
742 dests->nr_dests = nr_dests;
743 return 0;
744 }
745
damos_test_help_dests_free(struct damos_migrate_dests * dests)746 static void damos_test_help_dests_free(struct damos_migrate_dests *dests)
747 {
748 kfree(dests->node_id_arr);
749 kfree(dests->weight_arr);
750 }
751
damos_test_commit_dests_for(struct kunit * test,unsigned int * dst_node_id_arr,unsigned int * dst_weight_arr,size_t dst_nr_dests,unsigned int * src_node_id_arr,unsigned int * src_weight_arr,size_t src_nr_dests)752 static void damos_test_commit_dests_for(struct kunit *test,
753 unsigned int *dst_node_id_arr, unsigned int *dst_weight_arr,
754 size_t dst_nr_dests,
755 unsigned int *src_node_id_arr, unsigned int *src_weight_arr,
756 size_t src_nr_dests)
757 {
758 struct damos_migrate_dests dst = {}, src = {};
759 int i, err;
760 bool skip = true;
761
762 err = damos_test_help_dests_setup(&dst, dst_node_id_arr,
763 dst_weight_arr, dst_nr_dests);
764 if (err)
765 kunit_skip(test, "dests setup fail");
766 err = damos_test_help_dests_setup(&src, src_node_id_arr,
767 src_weight_arr, src_nr_dests);
768 if (err) {
769 damos_test_help_dests_free(&dst);
770 kunit_skip(test, "src setup fail");
771 }
772 err = damos_commit_dests(&dst, &src);
773 if (err)
774 goto out;
775 skip = false;
776
777 KUNIT_EXPECT_EQ(test, dst.nr_dests, src_nr_dests);
778 for (i = 0; i < dst.nr_dests; i++) {
779 KUNIT_EXPECT_EQ(test, dst.node_id_arr[i], src_node_id_arr[i]);
780 KUNIT_EXPECT_EQ(test, dst.weight_arr[i], src_weight_arr[i]);
781 }
782
783 out:
784 damos_test_help_dests_free(&dst);
785 damos_test_help_dests_free(&src);
786 if (skip)
787 kunit_skip(test, "skip");
788 }
789
damos_test_commit_dests(struct kunit * test)790 static void damos_test_commit_dests(struct kunit *test)
791 {
792 damos_test_commit_dests_for(test,
793 (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
794 3,
795 (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
796 3);
797 damos_test_commit_dests_for(test,
798 (unsigned int[]){1, 2}, (unsigned int[]){2, 3},
799 2,
800 (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
801 3);
802 damos_test_commit_dests_for(test,
803 NULL, NULL, 0,
804 (unsigned int[]){4, 5, 6}, (unsigned int[]){5, 6, 7},
805 3);
806 damos_test_commit_dests_for(test,
807 (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
808 3,
809 (unsigned int[]){4, 5}, (unsigned int[]){5, 6}, 2);
810 damos_test_commit_dests_for(test,
811 (unsigned int[]){1, 2, 3}, (unsigned int[]){2, 3, 4},
812 3,
813 NULL, NULL, 0);
814 }
815
damos_test_commit_filter_for(struct kunit * test,struct damos_filter * dst,struct damos_filter * src)816 static void damos_test_commit_filter_for(struct kunit *test,
817 struct damos_filter *dst, struct damos_filter *src)
818 {
819 damos_commit_filter(dst, src);
820 KUNIT_EXPECT_EQ(test, dst->type, src->type);
821 KUNIT_EXPECT_EQ(test, dst->matching, src->matching);
822 KUNIT_EXPECT_EQ(test, dst->allow, src->allow);
823 switch (src->type) {
824 case DAMOS_FILTER_TYPE_MEMCG:
825 KUNIT_EXPECT_EQ(test, dst->memcg_id, src->memcg_id);
826 break;
827 case DAMOS_FILTER_TYPE_ADDR:
828 KUNIT_EXPECT_EQ(test, dst->addr_range.start,
829 src->addr_range.start);
830 KUNIT_EXPECT_EQ(test, dst->addr_range.end,
831 src->addr_range.end);
832 break;
833 case DAMOS_FILTER_TYPE_TARGET:
834 KUNIT_EXPECT_EQ(test, dst->target_idx, src->target_idx);
835 break;
836 case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
837 KUNIT_EXPECT_EQ(test, dst->sz_range.min, src->sz_range.min);
838 KUNIT_EXPECT_EQ(test, dst->sz_range.max, src->sz_range.max);
839 break;
840 default:
841 break;
842 }
843 }
844
damos_test_commit_filter(struct kunit * test)845 static void damos_test_commit_filter(struct kunit *test)
846 {
847 struct damos_filter dst = {
848 .type = DAMOS_FILTER_TYPE_ACTIVE,
849 .matching = false,
850 .allow = false,
851 };
852
853 damos_test_commit_filter_for(test, &dst,
854 &(struct damos_filter){
855 .type = DAMOS_FILTER_TYPE_ANON,
856 .matching = true,
857 .allow = true,
858 });
859 damos_test_commit_filter_for(test, &dst,
860 &(struct damos_filter){
861 .type = DAMOS_FILTER_TYPE_MEMCG,
862 .matching = false,
863 .allow = false,
864 .memcg_id = 123,
865 });
866 damos_test_commit_filter_for(test, &dst,
867 &(struct damos_filter){
868 .type = DAMOS_FILTER_TYPE_YOUNG,
869 .matching = true,
870 .allow = true,
871 });
872 damos_test_commit_filter_for(test, &dst,
873 &(struct damos_filter){
874 .type = DAMOS_FILTER_TYPE_HUGEPAGE_SIZE,
875 .matching = false,
876 .allow = false,
877 .sz_range = {.min = 234, .max = 345},
878 });
879 damos_test_commit_filter_for(test, &dst,
880 &(struct damos_filter){
881 .type = DAMOS_FILTER_TYPE_UNMAPPED,
882 .matching = true,
883 .allow = true,
884 });
885 damos_test_commit_filter_for(test, &dst,
886 &(struct damos_filter){
887 .type = DAMOS_FILTER_TYPE_ADDR,
888 .matching = false,
889 .allow = false,
890 .addr_range = {.start = 456, .end = 567},
891 });
892 damos_test_commit_filter_for(test, &dst,
893 &(struct damos_filter){
894 .type = DAMOS_FILTER_TYPE_TARGET,
895 .matching = true,
896 .allow = true,
897 .target_idx = 6,
898 });
899 }
900
damos_test_help_initailize_scheme(struct damos * scheme)901 static void damos_test_help_initailize_scheme(struct damos *scheme)
902 {
903 INIT_LIST_HEAD(&scheme->quota.goals);
904 INIT_LIST_HEAD(&scheme->core_filters);
905 INIT_LIST_HEAD(&scheme->ops_filters);
906 }
907
damos_test_commit_for(struct kunit * test,struct damos * dst,struct damos * src)908 static void damos_test_commit_for(struct kunit *test, struct damos *dst,
909 struct damos *src)
910 {
911 int err;
912
913 damos_test_help_initailize_scheme(dst);
914 damos_test_help_initailize_scheme(src);
915
916 err = damos_commit(dst, src);
917 if (err)
918 kunit_skip(test, "damos_commit fail");
919
920 KUNIT_EXPECT_EQ(test, dst->pattern.min_sz_region,
921 src->pattern.min_sz_region);
922 KUNIT_EXPECT_EQ(test, dst->pattern.max_sz_region,
923 src->pattern.max_sz_region);
924 KUNIT_EXPECT_EQ(test, dst->pattern.min_nr_accesses,
925 src->pattern.min_nr_accesses);
926 KUNIT_EXPECT_EQ(test, dst->pattern.max_nr_accesses,
927 src->pattern.max_nr_accesses);
928 KUNIT_EXPECT_EQ(test, dst->pattern.min_age_region,
929 src->pattern.min_age_region);
930 KUNIT_EXPECT_EQ(test, dst->pattern.max_age_region,
931 src->pattern.max_age_region);
932
933 KUNIT_EXPECT_EQ(test, dst->action, src->action);
934 KUNIT_EXPECT_EQ(test, dst->apply_interval_us, src->apply_interval_us);
935
936 KUNIT_EXPECT_EQ(test, dst->wmarks.metric, src->wmarks.metric);
937 KUNIT_EXPECT_EQ(test, dst->wmarks.interval, src->wmarks.interval);
938 KUNIT_EXPECT_EQ(test, dst->wmarks.high, src->wmarks.high);
939 KUNIT_EXPECT_EQ(test, dst->wmarks.mid, src->wmarks.mid);
940 KUNIT_EXPECT_EQ(test, dst->wmarks.low, src->wmarks.low);
941
942 switch (src->action) {
943 case DAMOS_MIGRATE_COLD:
944 case DAMOS_MIGRATE_HOT:
945 KUNIT_EXPECT_EQ(test, dst->target_nid, src->target_nid);
946 break;
947 default:
948 break;
949 }
950 }
951
damos_test_commit_pageout(struct kunit * test)952 static void damos_test_commit_pageout(struct kunit *test)
953 {
954 damos_test_commit_for(test,
955 &(struct damos){
956 .pattern = (struct damos_access_pattern){
957 1, 2, 3, 4, 5, 6},
958 .action = DAMOS_PAGEOUT,
959 .apply_interval_us = 1000000,
960 .wmarks = (struct damos_watermarks){
961 DAMOS_WMARK_FREE_MEM_RATE,
962 900, 100, 50},
963 },
964 &(struct damos){
965 .pattern = (struct damos_access_pattern){
966 2, 3, 4, 5, 6, 7},
967 .action = DAMOS_PAGEOUT,
968 .apply_interval_us = 2000000,
969 .wmarks = (struct damos_watermarks){
970 DAMOS_WMARK_FREE_MEM_RATE,
971 800, 50, 30},
972 });
973 }
974
damos_test_commit_migrate_hot(struct kunit * test)975 static void damos_test_commit_migrate_hot(struct kunit *test)
976 {
977 damos_test_commit_for(test,
978 &(struct damos){
979 .pattern = (struct damos_access_pattern){
980 1, 2, 3, 4, 5, 6},
981 .action = DAMOS_PAGEOUT,
982 .apply_interval_us = 1000000,
983 .wmarks = (struct damos_watermarks){
984 DAMOS_WMARK_FREE_MEM_RATE,
985 900, 100, 50},
986 },
987 &(struct damos){
988 .pattern = (struct damos_access_pattern){
989 2, 3, 4, 5, 6, 7},
990 .action = DAMOS_MIGRATE_HOT,
991 .apply_interval_us = 2000000,
992 .target_nid = 5,
993 });
994 }
995
damon_test_help_setup_target(unsigned long region_start_end[][2],int nr_regions)996 static struct damon_target *damon_test_help_setup_target(
997 unsigned long region_start_end[][2], int nr_regions)
998 {
999 struct damon_target *t;
1000 struct damon_region *r;
1001 int i;
1002
1003 t = damon_new_target();
1004 if (!t)
1005 return NULL;
1006 for (i = 0; i < nr_regions; i++) {
1007 r = damon_new_region(region_start_end[i][0],
1008 region_start_end[i][1]);
1009 if (!r) {
1010 damon_free_target(t);
1011 return NULL;
1012 }
1013 damon_add_region(r, t);
1014 }
1015 return t;
1016 }
1017
damon_test_commit_target_regions_for(struct kunit * test,unsigned long dst_start_end[][2],int nr_dst_regions,unsigned long src_start_end[][2],int nr_src_regions,unsigned long expect_start_end[][2],int nr_expect_regions)1018 static void damon_test_commit_target_regions_for(struct kunit *test,
1019 unsigned long dst_start_end[][2], int nr_dst_regions,
1020 unsigned long src_start_end[][2], int nr_src_regions,
1021 unsigned long expect_start_end[][2], int nr_expect_regions)
1022 {
1023 struct damon_target *dst_target, *src_target;
1024 struct damon_region *r;
1025 int i;
1026
1027 dst_target = damon_test_help_setup_target(dst_start_end, nr_dst_regions);
1028 if (!dst_target)
1029 kunit_skip(test, "dst target setup fail");
1030 src_target = damon_test_help_setup_target(src_start_end, nr_src_regions);
1031 if (!src_target) {
1032 damon_free_target(dst_target);
1033 kunit_skip(test, "src target setup fail");
1034 }
1035 damon_commit_target_regions(dst_target, src_target, 1);
1036 i = 0;
1037 damon_for_each_region(r, dst_target) {
1038 KUNIT_EXPECT_EQ(test, r->ar.start, expect_start_end[i][0]);
1039 KUNIT_EXPECT_EQ(test, r->ar.end, expect_start_end[i][1]);
1040 i++;
1041 }
1042 KUNIT_EXPECT_EQ(test, damon_nr_regions(dst_target), nr_expect_regions);
1043 KUNIT_EXPECT_EQ(test, i, nr_expect_regions);
1044 damon_free_target(dst_target);
1045 damon_free_target(src_target);
1046 }
1047
damon_test_commit_target_regions(struct kunit * test)1048 static void damon_test_commit_target_regions(struct kunit *test)
1049 {
1050 damon_test_commit_target_regions_for(test,
1051 (unsigned long[][2]) {{3, 8}, {8, 10}}, 2,
1052 (unsigned long[][2]) {{4, 6}}, 1,
1053 (unsigned long[][2]) {{4, 6}}, 1);
1054 damon_test_commit_target_regions_for(test,
1055 (unsigned long[][2]) {{3, 8}, {8, 10}}, 2,
1056 (unsigned long[][2]) {}, 0,
1057 (unsigned long[][2]) {{3, 8}, {8, 10}}, 2);
1058 }
1059
damos_test_filter_out(struct kunit * test)1060 static void damos_test_filter_out(struct kunit *test)
1061 {
1062 struct damon_target *t;
1063 struct damon_region *r, *r2;
1064 struct damos_filter *f;
1065
1066 f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false);
1067 if (!f)
1068 kunit_skip(test, "filter alloc fail");
1069 f->addr_range = (struct damon_addr_range){.start = 2, .end = 6};
1070
1071 t = damon_new_target();
1072 if (!t) {
1073 damos_destroy_filter(f);
1074 kunit_skip(test, "target alloc fail");
1075 }
1076 r = damon_new_region(3, 5);
1077 if (!r) {
1078 damos_destroy_filter(f);
1079 damon_free_target(t);
1080 kunit_skip(test, "region alloc fail");
1081 }
1082 damon_add_region(r, t);
1083
1084 /* region in the range */
1085 KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f, 1));
1086 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
1087
1088 /* region before the range */
1089 r->ar.start = 1;
1090 r->ar.end = 2;
1091 KUNIT_EXPECT_FALSE(test,
1092 damos_filter_match(NULL, t, r, f, 1));
1093 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
1094
1095 /* region after the range */
1096 r->ar.start = 6;
1097 r->ar.end = 8;
1098 KUNIT_EXPECT_FALSE(test,
1099 damos_filter_match(NULL, t, r, f, 1));
1100 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
1101
1102 /* region started before the range */
1103 r->ar.start = 1;
1104 r->ar.end = 4;
1105 KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f, 1));
1106 /* filter should have split the region */
1107 KUNIT_EXPECT_EQ(test, r->ar.start, 1);
1108 KUNIT_EXPECT_EQ(test, r->ar.end, 2);
1109 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
1110 r2 = damon_next_region(r);
1111 KUNIT_EXPECT_EQ(test, r2->ar.start, 2);
1112 KUNIT_EXPECT_EQ(test, r2->ar.end, 4);
1113 damon_destroy_region(r2, t);
1114
1115 /* region started in the range */
1116 r->ar.start = 2;
1117 r->ar.end = 8;
1118 KUNIT_EXPECT_TRUE(test,
1119 damos_filter_match(NULL, t, r, f, 1));
1120 /* filter should have split the region */
1121 KUNIT_EXPECT_EQ(test, r->ar.start, 2);
1122 KUNIT_EXPECT_EQ(test, r->ar.end, 6);
1123 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
1124 r2 = damon_next_region(r);
1125 KUNIT_EXPECT_EQ(test, r2->ar.start, 6);
1126 KUNIT_EXPECT_EQ(test, r2->ar.end, 8);
1127 damon_destroy_region(r2, t);
1128
1129 damon_free_target(t);
1130 damos_free_filter(f);
1131 }
1132
damon_test_feed_loop_next_input(struct kunit * test)1133 static void damon_test_feed_loop_next_input(struct kunit *test)
1134 {
1135 unsigned long last_input = 900000, current_score = 200;
1136
1137 /*
1138 * If current score is lower than the goal, which is always 10,000
1139 * (read the comment on damon_feed_loop_next_input()'s comment), next
1140 * input should be higher than the last input.
1141 */
1142 KUNIT_EXPECT_GT(test,
1143 damon_feed_loop_next_input(last_input, current_score),
1144 last_input);
1145
1146 /*
1147 * If current score is higher than the goal, next input should be lower
1148 * than the last input.
1149 */
1150 current_score = 250000000;
1151 KUNIT_EXPECT_LT(test,
1152 damon_feed_loop_next_input(last_input, current_score),
1153 last_input);
1154
1155 /*
1156 * The next input depends on the distance between the current score and
1157 * the goal
1158 */
1159 KUNIT_EXPECT_GT(test,
1160 damon_feed_loop_next_input(last_input, 200),
1161 damon_feed_loop_next_input(last_input, 2000));
1162 }
1163
damon_test_set_filters_default_reject(struct kunit * test)1164 static void damon_test_set_filters_default_reject(struct kunit *test)
1165 {
1166 struct damos scheme;
1167 struct damos_filter *target_filter, *anon_filter;
1168
1169 INIT_LIST_HEAD(&scheme.core_filters);
1170 INIT_LIST_HEAD(&scheme.ops_filters);
1171
1172 damos_set_filters_default_reject(&scheme);
1173 /*
1174 * No filter is installed. Allow by default on both core and ops layer
1175 * filtering stages, since there are no filters at all.
1176 */
1177 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
1178 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
1179
1180 target_filter = damos_new_filter(DAMOS_FILTER_TYPE_TARGET, true, true);
1181 if (!target_filter)
1182 kunit_skip(test, "filter alloc fail");
1183 damos_add_filter(&scheme, target_filter);
1184 damos_set_filters_default_reject(&scheme);
1185 /*
1186 * A core-handled allow-filter is installed.
1187 * Reject by default on core layer filtering stage due to the last
1188 * core-layer-filter's behavior.
1189 * Allow by default on ops layer filtering stage due to the absence of
1190 * ops layer filters.
1191 */
1192 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, true);
1193 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
1194
1195 target_filter->allow = false;
1196 damos_set_filters_default_reject(&scheme);
1197 /*
1198 * A core-handled reject-filter is installed.
1199 * Allow by default on core layer filtering stage due to the last
1200 * core-layer-filter's behavior.
1201 * Allow by default on ops layer filtering stage due to the absence of
1202 * ops layer filters.
1203 */
1204 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
1205 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, false);
1206
1207 anon_filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, true);
1208 if (!anon_filter) {
1209 damos_free_filter(target_filter);
1210 kunit_skip(test, "anon_filter alloc fail");
1211 }
1212 damos_add_filter(&scheme, anon_filter);
1213
1214 damos_set_filters_default_reject(&scheme);
1215 /*
1216 * A core-handled reject-filter and ops-handled allow-filter are installed.
1217 * Allow by default on core layer filtering stage due to the existence
1218 * of the ops-handled filter.
1219 * Reject by default on ops layer filtering stage due to the last
1220 * ops-layer-filter's behavior.
1221 */
1222 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
1223 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true);
1224
1225 target_filter->allow = true;
1226 damos_set_filters_default_reject(&scheme);
1227 /*
1228 * A core-handled allow-filter and ops-handled allow-filter are
1229 * installed.
1230 * Allow by default on core layer filtering stage due to the existence
1231 * of the ops-handled filter.
1232 * Reject by default on ops layer filtering stage due to the last
1233 * ops-layer-filter's behavior.
1234 */
1235 KUNIT_EXPECT_EQ(test, scheme.core_filters_default_reject, false);
1236 KUNIT_EXPECT_EQ(test, scheme.ops_filters_default_reject, true);
1237
1238 damos_free_filter(anon_filter);
1239 damos_free_filter(target_filter);
1240 }
1241
1242 static struct kunit_case damon_test_cases[] = {
1243 KUNIT_CASE(damon_test_target),
1244 KUNIT_CASE(damon_test_regions),
1245 KUNIT_CASE(damon_test_aggregate),
1246 KUNIT_CASE(damon_test_split_at),
1247 KUNIT_CASE(damon_test_merge_two),
1248 KUNIT_CASE(damon_test_merge_regions_of),
1249 KUNIT_CASE(damon_test_split_regions_of),
1250 KUNIT_CASE(damon_test_ops_registration),
1251 KUNIT_CASE(damon_test_set_regions),
1252 KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp),
1253 KUNIT_CASE(damon_test_update_monitoring_result),
1254 KUNIT_CASE(damon_test_set_attrs),
1255 KUNIT_CASE(damon_test_moving_sum),
1256 KUNIT_CASE(damos_test_new_filter),
1257 KUNIT_CASE(damos_test_commit_quota_goal),
1258 KUNIT_CASE(damos_test_commit_quota_goals),
1259 KUNIT_CASE(damos_test_commit_quota),
1260 KUNIT_CASE(damos_test_commit_dests),
1261 KUNIT_CASE(damos_test_commit_filter),
1262 KUNIT_CASE(damos_test_commit_pageout),
1263 KUNIT_CASE(damos_test_commit_migrate_hot),
1264 KUNIT_CASE(damon_test_commit_target_regions),
1265 KUNIT_CASE(damos_test_filter_out),
1266 KUNIT_CASE(damon_test_feed_loop_next_input),
1267 KUNIT_CASE(damon_test_set_filters_default_reject),
1268 {},
1269 };
1270
1271 static struct kunit_suite damon_test_suite = {
1272 .name = "damon",
1273 .test_cases = damon_test_cases,
1274 };
1275 kunit_test_suite(damon_test_suite);
1276
1277 #endif /* _DAMON_CORE_TEST_H */
1278
1279 #endif /* CONFIG_DAMON_KUNIT_TEST */
1280