xref: /linux/mm/damon/sysfs.c (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON sysfs Interface
4  *
5  * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
6  */
7 
8 #include <linux/damon.h>
9 #include <linux/kobject.h>
10 #include <linux/pid.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 
14 static DEFINE_MUTEX(damon_sysfs_lock);
15 
16 /*
17  * unsigned long range directory
18  */
19 
20 struct damon_sysfs_ul_range {
21 	struct kobject kobj;
22 	unsigned long min;
23 	unsigned long max;
24 };
25 
26 static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
27 		unsigned long min,
28 		unsigned long max)
29 {
30 	struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
31 			GFP_KERNEL);
32 
33 	if (!range)
34 		return NULL;
35 	range->kobj = (struct kobject){};
36 	range->min = min;
37 	range->max = max;
38 
39 	return range;
40 }
41 
42 static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
43 		char *buf)
44 {
45 	struct damon_sysfs_ul_range *range = container_of(kobj,
46 			struct damon_sysfs_ul_range, kobj);
47 
48 	return sysfs_emit(buf, "%lu\n", range->min);
49 }
50 
51 static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
52 		const char *buf, size_t count)
53 {
54 	struct damon_sysfs_ul_range *range = container_of(kobj,
55 			struct damon_sysfs_ul_range, kobj);
56 	unsigned long min;
57 	int err;
58 
59 	err = kstrtoul(buf, 0, &min);
60 	if (err)
61 		return -EINVAL;
62 
63 	range->min = min;
64 	return count;
65 }
66 
67 static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
68 		char *buf)
69 {
70 	struct damon_sysfs_ul_range *range = container_of(kobj,
71 			struct damon_sysfs_ul_range, kobj);
72 
73 	return sysfs_emit(buf, "%lu\n", range->max);
74 }
75 
76 static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
77 		const char *buf, size_t count)
78 {
79 	struct damon_sysfs_ul_range *range = container_of(kobj,
80 			struct damon_sysfs_ul_range, kobj);
81 	unsigned long max;
82 	int err;
83 
84 	err = kstrtoul(buf, 0, &max);
85 	if (err)
86 		return -EINVAL;
87 
88 	range->max = max;
89 	return count;
90 }
91 
92 static void damon_sysfs_ul_range_release(struct kobject *kobj)
93 {
94 	kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
95 }
96 
97 static struct kobj_attribute damon_sysfs_ul_range_min_attr =
98 		__ATTR_RW_MODE(min, 0600);
99 
100 static struct kobj_attribute damon_sysfs_ul_range_max_attr =
101 		__ATTR_RW_MODE(max, 0600);
102 
103 static struct attribute *damon_sysfs_ul_range_attrs[] = {
104 	&damon_sysfs_ul_range_min_attr.attr,
105 	&damon_sysfs_ul_range_max_attr.attr,
106 	NULL,
107 };
108 ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
109 
110 static struct kobj_type damon_sysfs_ul_range_ktype = {
111 	.release = damon_sysfs_ul_range_release,
112 	.sysfs_ops = &kobj_sysfs_ops,
113 	.default_groups = damon_sysfs_ul_range_groups,
114 };
115 
116 /*
117  * schemes/stats directory
118  */
119 
120 struct damon_sysfs_stats {
121 	struct kobject kobj;
122 	unsigned long nr_tried;
123 	unsigned long sz_tried;
124 	unsigned long nr_applied;
125 	unsigned long sz_applied;
126 	unsigned long qt_exceeds;
127 };
128 
129 static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void)
130 {
131 	return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL);
132 }
133 
134 static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
135 		char *buf)
136 {
137 	struct damon_sysfs_stats *stats = container_of(kobj,
138 			struct damon_sysfs_stats, kobj);
139 
140 	return sysfs_emit(buf, "%lu\n", stats->nr_tried);
141 }
142 
143 static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
144 		char *buf)
145 {
146 	struct damon_sysfs_stats *stats = container_of(kobj,
147 			struct damon_sysfs_stats, kobj);
148 
149 	return sysfs_emit(buf, "%lu\n", stats->sz_tried);
150 }
151 
152 static ssize_t nr_applied_show(struct kobject *kobj,
153 		struct kobj_attribute *attr, char *buf)
154 {
155 	struct damon_sysfs_stats *stats = container_of(kobj,
156 			struct damon_sysfs_stats, kobj);
157 
158 	return sysfs_emit(buf, "%lu\n", stats->nr_applied);
159 }
160 
161 static ssize_t sz_applied_show(struct kobject *kobj,
162 		struct kobj_attribute *attr, char *buf)
163 {
164 	struct damon_sysfs_stats *stats = container_of(kobj,
165 			struct damon_sysfs_stats, kobj);
166 
167 	return sysfs_emit(buf, "%lu\n", stats->sz_applied);
168 }
169 
170 static ssize_t qt_exceeds_show(struct kobject *kobj,
171 		struct kobj_attribute *attr, char *buf)
172 {
173 	struct damon_sysfs_stats *stats = container_of(kobj,
174 			struct damon_sysfs_stats, kobj);
175 
176 	return sysfs_emit(buf, "%lu\n", stats->qt_exceeds);
177 }
178 
179 static void damon_sysfs_stats_release(struct kobject *kobj)
180 {
181 	kfree(container_of(kobj, struct damon_sysfs_stats, kobj));
182 }
183 
184 static struct kobj_attribute damon_sysfs_stats_nr_tried_attr =
185 		__ATTR_RO_MODE(nr_tried, 0400);
186 
187 static struct kobj_attribute damon_sysfs_stats_sz_tried_attr =
188 		__ATTR_RO_MODE(sz_tried, 0400);
189 
190 static struct kobj_attribute damon_sysfs_stats_nr_applied_attr =
191 		__ATTR_RO_MODE(nr_applied, 0400);
192 
193 static struct kobj_attribute damon_sysfs_stats_sz_applied_attr =
194 		__ATTR_RO_MODE(sz_applied, 0400);
195 
196 static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr =
197 		__ATTR_RO_MODE(qt_exceeds, 0400);
198 
199 static struct attribute *damon_sysfs_stats_attrs[] = {
200 	&damon_sysfs_stats_nr_tried_attr.attr,
201 	&damon_sysfs_stats_sz_tried_attr.attr,
202 	&damon_sysfs_stats_nr_applied_attr.attr,
203 	&damon_sysfs_stats_sz_applied_attr.attr,
204 	&damon_sysfs_stats_qt_exceeds_attr.attr,
205 	NULL,
206 };
207 ATTRIBUTE_GROUPS(damon_sysfs_stats);
208 
209 static struct kobj_type damon_sysfs_stats_ktype = {
210 	.release = damon_sysfs_stats_release,
211 	.sysfs_ops = &kobj_sysfs_ops,
212 	.default_groups = damon_sysfs_stats_groups,
213 };
214 
215 /*
216  * watermarks directory
217  */
218 
219 struct damon_sysfs_watermarks {
220 	struct kobject kobj;
221 	enum damos_wmark_metric metric;
222 	unsigned long interval_us;
223 	unsigned long high;
224 	unsigned long mid;
225 	unsigned long low;
226 };
227 
228 static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
229 		enum damos_wmark_metric metric, unsigned long interval_us,
230 		unsigned long high, unsigned long mid, unsigned long low)
231 {
232 	struct damon_sysfs_watermarks *watermarks = kmalloc(
233 			sizeof(*watermarks), GFP_KERNEL);
234 
235 	if (!watermarks)
236 		return NULL;
237 	watermarks->kobj = (struct kobject){};
238 	watermarks->metric = metric;
239 	watermarks->interval_us = interval_us;
240 	watermarks->high = high;
241 	watermarks->mid = mid;
242 	watermarks->low = low;
243 	return watermarks;
244 }
245 
246 /* Should match with enum damos_wmark_metric */
247 static const char * const damon_sysfs_wmark_metric_strs[] = {
248 	"none",
249 	"free_mem_rate",
250 };
251 
252 static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr,
253 		char *buf)
254 {
255 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
256 			struct damon_sysfs_watermarks, kobj);
257 
258 	return sysfs_emit(buf, "%s\n",
259 			damon_sysfs_wmark_metric_strs[watermarks->metric]);
260 }
261 
262 static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr,
263 		const char *buf, size_t count)
264 {
265 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
266 			struct damon_sysfs_watermarks, kobj);
267 	enum damos_wmark_metric metric;
268 
269 	for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) {
270 		if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) {
271 			watermarks->metric = metric;
272 			return count;
273 		}
274 	}
275 	return -EINVAL;
276 }
277 
278 static ssize_t interval_us_show(struct kobject *kobj,
279 		struct kobj_attribute *attr, char *buf)
280 {
281 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
282 			struct damon_sysfs_watermarks, kobj);
283 
284 	return sysfs_emit(buf, "%lu\n", watermarks->interval_us);
285 }
286 
287 static ssize_t interval_us_store(struct kobject *kobj,
288 		struct kobj_attribute *attr, const char *buf, size_t count)
289 {
290 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
291 			struct damon_sysfs_watermarks, kobj);
292 	int err = kstrtoul(buf, 0, &watermarks->interval_us);
293 
294 	if (err)
295 		return -EINVAL;
296 	return count;
297 }
298 
299 static ssize_t high_show(struct kobject *kobj,
300 		struct kobj_attribute *attr, char *buf)
301 {
302 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
303 			struct damon_sysfs_watermarks, kobj);
304 
305 	return sysfs_emit(buf, "%lu\n", watermarks->high);
306 }
307 
308 static ssize_t high_store(struct kobject *kobj,
309 		struct kobj_attribute *attr, const char *buf, size_t count)
310 {
311 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
312 			struct damon_sysfs_watermarks, kobj);
313 	int err = kstrtoul(buf, 0, &watermarks->high);
314 
315 	if (err)
316 		return -EINVAL;
317 	return count;
318 }
319 
320 static ssize_t mid_show(struct kobject *kobj,
321 		struct kobj_attribute *attr, char *buf)
322 {
323 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
324 			struct damon_sysfs_watermarks, kobj);
325 
326 	return sysfs_emit(buf, "%lu\n", watermarks->mid);
327 }
328 
329 static ssize_t mid_store(struct kobject *kobj,
330 		struct kobj_attribute *attr, const char *buf, size_t count)
331 {
332 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
333 			struct damon_sysfs_watermarks, kobj);
334 	int err = kstrtoul(buf, 0, &watermarks->mid);
335 
336 	if (err)
337 		return -EINVAL;
338 	return count;
339 }
340 
341 static ssize_t low_show(struct kobject *kobj,
342 		struct kobj_attribute *attr, char *buf)
343 {
344 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
345 			struct damon_sysfs_watermarks, kobj);
346 
347 	return sysfs_emit(buf, "%lu\n", watermarks->low);
348 }
349 
350 static ssize_t low_store(struct kobject *kobj,
351 		struct kobj_attribute *attr, const char *buf, size_t count)
352 {
353 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
354 			struct damon_sysfs_watermarks, kobj);
355 	int err = kstrtoul(buf, 0, &watermarks->low);
356 
357 	if (err)
358 		return -EINVAL;
359 	return count;
360 }
361 
362 static void damon_sysfs_watermarks_release(struct kobject *kobj)
363 {
364 	kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj));
365 }
366 
367 static struct kobj_attribute damon_sysfs_watermarks_metric_attr =
368 		__ATTR_RW_MODE(metric, 0600);
369 
370 static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr =
371 		__ATTR_RW_MODE(interval_us, 0600);
372 
373 static struct kobj_attribute damon_sysfs_watermarks_high_attr =
374 		__ATTR_RW_MODE(high, 0600);
375 
376 static struct kobj_attribute damon_sysfs_watermarks_mid_attr =
377 		__ATTR_RW_MODE(mid, 0600);
378 
379 static struct kobj_attribute damon_sysfs_watermarks_low_attr =
380 		__ATTR_RW_MODE(low, 0600);
381 
382 static struct attribute *damon_sysfs_watermarks_attrs[] = {
383 	&damon_sysfs_watermarks_metric_attr.attr,
384 	&damon_sysfs_watermarks_interval_us_attr.attr,
385 	&damon_sysfs_watermarks_high_attr.attr,
386 	&damon_sysfs_watermarks_mid_attr.attr,
387 	&damon_sysfs_watermarks_low_attr.attr,
388 	NULL,
389 };
390 ATTRIBUTE_GROUPS(damon_sysfs_watermarks);
391 
392 static struct kobj_type damon_sysfs_watermarks_ktype = {
393 	.release = damon_sysfs_watermarks_release,
394 	.sysfs_ops = &kobj_sysfs_ops,
395 	.default_groups = damon_sysfs_watermarks_groups,
396 };
397 
398 /*
399  * scheme/weights directory
400  */
401 
402 struct damon_sysfs_weights {
403 	struct kobject kobj;
404 	unsigned int sz;
405 	unsigned int nr_accesses;
406 	unsigned int age;
407 };
408 
409 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz,
410 		unsigned int nr_accesses, unsigned int age)
411 {
412 	struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights),
413 			GFP_KERNEL);
414 
415 	if (!weights)
416 		return NULL;
417 	weights->kobj = (struct kobject){};
418 	weights->sz = sz;
419 	weights->nr_accesses = nr_accesses;
420 	weights->age = age;
421 	return weights;
422 }
423 
424 static ssize_t sz_permil_show(struct kobject *kobj,
425 		struct kobj_attribute *attr, char *buf)
426 {
427 	struct damon_sysfs_weights *weights = container_of(kobj,
428 			struct damon_sysfs_weights, kobj);
429 
430 	return sysfs_emit(buf, "%u\n", weights->sz);
431 }
432 
433 static ssize_t sz_permil_store(struct kobject *kobj,
434 		struct kobj_attribute *attr, const char *buf, size_t count)
435 {
436 	struct damon_sysfs_weights *weights = container_of(kobj,
437 			struct damon_sysfs_weights, kobj);
438 	int err = kstrtouint(buf, 0, &weights->sz);
439 
440 	if (err)
441 		return -EINVAL;
442 	return count;
443 }
444 
445 static ssize_t nr_accesses_permil_show(struct kobject *kobj,
446 		struct kobj_attribute *attr, char *buf)
447 {
448 	struct damon_sysfs_weights *weights = container_of(kobj,
449 			struct damon_sysfs_weights, kobj);
450 
451 	return sysfs_emit(buf, "%u\n", weights->nr_accesses);
452 }
453 
454 static ssize_t nr_accesses_permil_store(struct kobject *kobj,
455 		struct kobj_attribute *attr, const char *buf, size_t count)
456 {
457 	struct damon_sysfs_weights *weights = container_of(kobj,
458 			struct damon_sysfs_weights, kobj);
459 	int err = kstrtouint(buf, 0, &weights->nr_accesses);
460 
461 	if (err)
462 		return -EINVAL;
463 	return count;
464 }
465 
466 static ssize_t age_permil_show(struct kobject *kobj,
467 		struct kobj_attribute *attr, char *buf)
468 {
469 	struct damon_sysfs_weights *weights = container_of(kobj,
470 			struct damon_sysfs_weights, kobj);
471 
472 	return sysfs_emit(buf, "%u\n", weights->age);
473 }
474 
475 static ssize_t age_permil_store(struct kobject *kobj,
476 		struct kobj_attribute *attr, const char *buf, size_t count)
477 {
478 	struct damon_sysfs_weights *weights = container_of(kobj,
479 			struct damon_sysfs_weights, kobj);
480 	int err = kstrtouint(buf, 0, &weights->age);
481 
482 	if (err)
483 		return -EINVAL;
484 	return count;
485 }
486 
487 static void damon_sysfs_weights_release(struct kobject *kobj)
488 {
489 	kfree(container_of(kobj, struct damon_sysfs_weights, kobj));
490 }
491 
492 static struct kobj_attribute damon_sysfs_weights_sz_attr =
493 		__ATTR_RW_MODE(sz_permil, 0600);
494 
495 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr =
496 		__ATTR_RW_MODE(nr_accesses_permil, 0600);
497 
498 static struct kobj_attribute damon_sysfs_weights_age_attr =
499 		__ATTR_RW_MODE(age_permil, 0600);
500 
501 static struct attribute *damon_sysfs_weights_attrs[] = {
502 	&damon_sysfs_weights_sz_attr.attr,
503 	&damon_sysfs_weights_nr_accesses_attr.attr,
504 	&damon_sysfs_weights_age_attr.attr,
505 	NULL,
506 };
507 ATTRIBUTE_GROUPS(damon_sysfs_weights);
508 
509 static struct kobj_type damon_sysfs_weights_ktype = {
510 	.release = damon_sysfs_weights_release,
511 	.sysfs_ops = &kobj_sysfs_ops,
512 	.default_groups = damon_sysfs_weights_groups,
513 };
514 
515 /*
516  * quotas directory
517  */
518 
519 struct damon_sysfs_quotas {
520 	struct kobject kobj;
521 	struct damon_sysfs_weights *weights;
522 	unsigned long ms;
523 	unsigned long sz;
524 	unsigned long reset_interval_ms;
525 };
526 
527 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
528 {
529 	return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL);
530 }
531 
532 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
533 {
534 	struct damon_sysfs_weights *weights;
535 	int err;
536 
537 	weights = damon_sysfs_weights_alloc(0, 0, 0);
538 	if (!weights)
539 		return -ENOMEM;
540 
541 	err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
542 			&quotas->kobj, "weights");
543 	if (err)
544 		kobject_put(&weights->kobj);
545 	else
546 		quotas->weights = weights;
547 	return err;
548 }
549 
550 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
551 {
552 	kobject_put(&quotas->weights->kobj);
553 }
554 
555 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
556 		char *buf)
557 {
558 	struct damon_sysfs_quotas *quotas = container_of(kobj,
559 			struct damon_sysfs_quotas, kobj);
560 
561 	return sysfs_emit(buf, "%lu\n", quotas->ms);
562 }
563 
564 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr,
565 		const char *buf, size_t count)
566 {
567 	struct damon_sysfs_quotas *quotas = container_of(kobj,
568 			struct damon_sysfs_quotas, kobj);
569 	int err = kstrtoul(buf, 0, &quotas->ms);
570 
571 	if (err)
572 		return -EINVAL;
573 	return count;
574 }
575 
576 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr,
577 		char *buf)
578 {
579 	struct damon_sysfs_quotas *quotas = container_of(kobj,
580 			struct damon_sysfs_quotas, kobj);
581 
582 	return sysfs_emit(buf, "%lu\n", quotas->sz);
583 }
584 
585 static ssize_t bytes_store(struct kobject *kobj,
586 		struct kobj_attribute *attr, const char *buf, size_t count)
587 {
588 	struct damon_sysfs_quotas *quotas = container_of(kobj,
589 			struct damon_sysfs_quotas, kobj);
590 	int err = kstrtoul(buf, 0, &quotas->sz);
591 
592 	if (err)
593 		return -EINVAL;
594 	return count;
595 }
596 
597 static ssize_t reset_interval_ms_show(struct kobject *kobj,
598 		struct kobj_attribute *attr, char *buf)
599 {
600 	struct damon_sysfs_quotas *quotas = container_of(kobj,
601 			struct damon_sysfs_quotas, kobj);
602 
603 	return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms);
604 }
605 
606 static ssize_t reset_interval_ms_store(struct kobject *kobj,
607 		struct kobj_attribute *attr, const char *buf, size_t count)
608 {
609 	struct damon_sysfs_quotas *quotas = container_of(kobj,
610 			struct damon_sysfs_quotas, kobj);
611 	int err = kstrtoul(buf, 0, &quotas->reset_interval_ms);
612 
613 	if (err)
614 		return -EINVAL;
615 	return count;
616 }
617 
618 static void damon_sysfs_quotas_release(struct kobject *kobj)
619 {
620 	kfree(container_of(kobj, struct damon_sysfs_quotas, kobj));
621 }
622 
623 static struct kobj_attribute damon_sysfs_quotas_ms_attr =
624 		__ATTR_RW_MODE(ms, 0600);
625 
626 static struct kobj_attribute damon_sysfs_quotas_sz_attr =
627 		__ATTR_RW_MODE(bytes, 0600);
628 
629 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr =
630 		__ATTR_RW_MODE(reset_interval_ms, 0600);
631 
632 static struct attribute *damon_sysfs_quotas_attrs[] = {
633 	&damon_sysfs_quotas_ms_attr.attr,
634 	&damon_sysfs_quotas_sz_attr.attr,
635 	&damon_sysfs_quotas_reset_interval_ms_attr.attr,
636 	NULL,
637 };
638 ATTRIBUTE_GROUPS(damon_sysfs_quotas);
639 
640 static struct kobj_type damon_sysfs_quotas_ktype = {
641 	.release = damon_sysfs_quotas_release,
642 	.sysfs_ops = &kobj_sysfs_ops,
643 	.default_groups = damon_sysfs_quotas_groups,
644 };
645 
646 /*
647  * access_pattern directory
648  */
649 
650 struct damon_sysfs_access_pattern {
651 	struct kobject kobj;
652 	struct damon_sysfs_ul_range *sz;
653 	struct damon_sysfs_ul_range *nr_accesses;
654 	struct damon_sysfs_ul_range *age;
655 };
656 
657 static
658 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
659 {
660 	struct damon_sysfs_access_pattern *access_pattern =
661 		kmalloc(sizeof(*access_pattern), GFP_KERNEL);
662 
663 	if (!access_pattern)
664 		return NULL;
665 	access_pattern->kobj = (struct kobject){};
666 	return access_pattern;
667 }
668 
669 static int damon_sysfs_access_pattern_add_range_dir(
670 		struct damon_sysfs_access_pattern *access_pattern,
671 		struct damon_sysfs_ul_range **range_dir_ptr,
672 		char *name)
673 {
674 	struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
675 	int err;
676 
677 	if (!range)
678 		return -ENOMEM;
679 	err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
680 			&access_pattern->kobj, name);
681 	if (err)
682 		kobject_put(&range->kobj);
683 	else
684 		*range_dir_ptr = range;
685 	return err;
686 }
687 
688 static int damon_sysfs_access_pattern_add_dirs(
689 		struct damon_sysfs_access_pattern *access_pattern)
690 {
691 	int err;
692 
693 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
694 			&access_pattern->sz, "sz");
695 	if (err)
696 		goto put_sz_out;
697 
698 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
699 			&access_pattern->nr_accesses, "nr_accesses");
700 	if (err)
701 		goto put_nr_accesses_sz_out;
702 
703 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
704 			&access_pattern->age, "age");
705 	if (err)
706 		goto put_age_nr_accesses_sz_out;
707 	return 0;
708 
709 put_age_nr_accesses_sz_out:
710 	kobject_put(&access_pattern->age->kobj);
711 	access_pattern->age = NULL;
712 put_nr_accesses_sz_out:
713 	kobject_put(&access_pattern->nr_accesses->kobj);
714 	access_pattern->nr_accesses = NULL;
715 put_sz_out:
716 	kobject_put(&access_pattern->sz->kobj);
717 	access_pattern->sz = NULL;
718 	return err;
719 }
720 
721 static void damon_sysfs_access_pattern_rm_dirs(
722 		struct damon_sysfs_access_pattern *access_pattern)
723 {
724 	kobject_put(&access_pattern->sz->kobj);
725 	kobject_put(&access_pattern->nr_accesses->kobj);
726 	kobject_put(&access_pattern->age->kobj);
727 }
728 
729 static void damon_sysfs_access_pattern_release(struct kobject *kobj)
730 {
731 	kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj));
732 }
733 
734 static struct attribute *damon_sysfs_access_pattern_attrs[] = {
735 	NULL,
736 };
737 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
738 
739 static struct kobj_type damon_sysfs_access_pattern_ktype = {
740 	.release = damon_sysfs_access_pattern_release,
741 	.sysfs_ops = &kobj_sysfs_ops,
742 	.default_groups = damon_sysfs_access_pattern_groups,
743 };
744 
745 /*
746  * scheme directory
747  */
748 
749 struct damon_sysfs_scheme {
750 	struct kobject kobj;
751 	enum damos_action action;
752 	struct damon_sysfs_access_pattern *access_pattern;
753 	struct damon_sysfs_quotas *quotas;
754 	struct damon_sysfs_watermarks *watermarks;
755 	struct damon_sysfs_stats *stats;
756 };
757 
758 /* This should match with enum damos_action */
759 static const char * const damon_sysfs_damos_action_strs[] = {
760 	"willneed",
761 	"cold",
762 	"pageout",
763 	"hugepage",
764 	"nohugepage",
765 	"lru_prio",
766 	"lru_deprio",
767 	"stat",
768 };
769 
770 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
771 		enum damos_action action)
772 {
773 	struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
774 				GFP_KERNEL);
775 
776 	if (!scheme)
777 		return NULL;
778 	scheme->kobj = (struct kobject){};
779 	scheme->action = action;
780 	return scheme;
781 }
782 
783 static int damon_sysfs_scheme_set_access_pattern(
784 		struct damon_sysfs_scheme *scheme)
785 {
786 	struct damon_sysfs_access_pattern *access_pattern;
787 	int err;
788 
789 	access_pattern = damon_sysfs_access_pattern_alloc();
790 	if (!access_pattern)
791 		return -ENOMEM;
792 	err = kobject_init_and_add(&access_pattern->kobj,
793 			&damon_sysfs_access_pattern_ktype, &scheme->kobj,
794 			"access_pattern");
795 	if (err)
796 		goto out;
797 	err = damon_sysfs_access_pattern_add_dirs(access_pattern);
798 	if (err)
799 		goto out;
800 	scheme->access_pattern = access_pattern;
801 	return 0;
802 
803 out:
804 	kobject_put(&access_pattern->kobj);
805 	return err;
806 }
807 
808 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme)
809 {
810 	struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc();
811 	int err;
812 
813 	if (!quotas)
814 		return -ENOMEM;
815 	err = kobject_init_and_add(&quotas->kobj, &damon_sysfs_quotas_ktype,
816 			&scheme->kobj, "quotas");
817 	if (err)
818 		goto out;
819 	err = damon_sysfs_quotas_add_dirs(quotas);
820 	if (err)
821 		goto out;
822 	scheme->quotas = quotas;
823 	return 0;
824 
825 out:
826 	kobject_put(&quotas->kobj);
827 	return err;
828 }
829 
830 static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
831 {
832 	struct damon_sysfs_watermarks *watermarks =
833 		damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0);
834 	int err;
835 
836 	if (!watermarks)
837 		return -ENOMEM;
838 	err = kobject_init_and_add(&watermarks->kobj,
839 			&damon_sysfs_watermarks_ktype, &scheme->kobj,
840 			"watermarks");
841 	if (err)
842 		kobject_put(&watermarks->kobj);
843 	else
844 		scheme->watermarks = watermarks;
845 	return err;
846 }
847 
848 static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme)
849 {
850 	struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc();
851 	int err;
852 
853 	if (!stats)
854 		return -ENOMEM;
855 	err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype,
856 			&scheme->kobj, "stats");
857 	if (err)
858 		kobject_put(&stats->kobj);
859 	else
860 		scheme->stats = stats;
861 	return err;
862 }
863 
864 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
865 {
866 	int err;
867 
868 	err = damon_sysfs_scheme_set_access_pattern(scheme);
869 	if (err)
870 		return err;
871 	err = damon_sysfs_scheme_set_quotas(scheme);
872 	if (err)
873 		goto put_access_pattern_out;
874 	err = damon_sysfs_scheme_set_watermarks(scheme);
875 	if (err)
876 		goto put_quotas_access_pattern_out;
877 	err = damon_sysfs_scheme_set_stats(scheme);
878 	if (err)
879 		goto put_watermarks_quotas_access_pattern_out;
880 	return 0;
881 
882 put_watermarks_quotas_access_pattern_out:
883 	kobject_put(&scheme->watermarks->kobj);
884 	scheme->watermarks = NULL;
885 put_quotas_access_pattern_out:
886 	kobject_put(&scheme->quotas->kobj);
887 	scheme->quotas = NULL;
888 put_access_pattern_out:
889 	kobject_put(&scheme->access_pattern->kobj);
890 	scheme->access_pattern = NULL;
891 	return err;
892 }
893 
894 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
895 {
896 	damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
897 	kobject_put(&scheme->access_pattern->kobj);
898 	damon_sysfs_quotas_rm_dirs(scheme->quotas);
899 	kobject_put(&scheme->quotas->kobj);
900 	kobject_put(&scheme->watermarks->kobj);
901 	kobject_put(&scheme->stats->kobj);
902 }
903 
904 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr,
905 		char *buf)
906 {
907 	struct damon_sysfs_scheme *scheme = container_of(kobj,
908 			struct damon_sysfs_scheme, kobj);
909 
910 	return sysfs_emit(buf, "%s\n",
911 			damon_sysfs_damos_action_strs[scheme->action]);
912 }
913 
914 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
915 		const char *buf, size_t count)
916 {
917 	struct damon_sysfs_scheme *scheme = container_of(kobj,
918 			struct damon_sysfs_scheme, kobj);
919 	enum damos_action action;
920 
921 	for (action = 0; action < NR_DAMOS_ACTIONS; action++) {
922 		if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) {
923 			scheme->action = action;
924 			return count;
925 		}
926 	}
927 	return -EINVAL;
928 }
929 
930 static void damon_sysfs_scheme_release(struct kobject *kobj)
931 {
932 	kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
933 }
934 
935 static struct kobj_attribute damon_sysfs_scheme_action_attr =
936 		__ATTR_RW_MODE(action, 0600);
937 
938 static struct attribute *damon_sysfs_scheme_attrs[] = {
939 	&damon_sysfs_scheme_action_attr.attr,
940 	NULL,
941 };
942 ATTRIBUTE_GROUPS(damon_sysfs_scheme);
943 
944 static struct kobj_type damon_sysfs_scheme_ktype = {
945 	.release = damon_sysfs_scheme_release,
946 	.sysfs_ops = &kobj_sysfs_ops,
947 	.default_groups = damon_sysfs_scheme_groups,
948 };
949 
950 /*
951  * schemes directory
952  */
953 
954 struct damon_sysfs_schemes {
955 	struct kobject kobj;
956 	struct damon_sysfs_scheme **schemes_arr;
957 	int nr;
958 };
959 
960 static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void)
961 {
962 	return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL);
963 }
964 
965 static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes)
966 {
967 	struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr;
968 	int i;
969 
970 	for (i = 0; i < schemes->nr; i++) {
971 		damon_sysfs_scheme_rm_dirs(schemes_arr[i]);
972 		kobject_put(&schemes_arr[i]->kobj);
973 	}
974 	schemes->nr = 0;
975 	kfree(schemes_arr);
976 	schemes->schemes_arr = NULL;
977 }
978 
979 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
980 		int nr_schemes)
981 {
982 	struct damon_sysfs_scheme **schemes_arr, *scheme;
983 	int err, i;
984 
985 	damon_sysfs_schemes_rm_dirs(schemes);
986 	if (!nr_schemes)
987 		return 0;
988 
989 	schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr),
990 			GFP_KERNEL | __GFP_NOWARN);
991 	if (!schemes_arr)
992 		return -ENOMEM;
993 	schemes->schemes_arr = schemes_arr;
994 
995 	for (i = 0; i < nr_schemes; i++) {
996 		scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
997 		if (!scheme) {
998 			damon_sysfs_schemes_rm_dirs(schemes);
999 			return -ENOMEM;
1000 		}
1001 
1002 		err = kobject_init_and_add(&scheme->kobj,
1003 				&damon_sysfs_scheme_ktype, &schemes->kobj,
1004 				"%d", i);
1005 		if (err)
1006 			goto out;
1007 		err = damon_sysfs_scheme_add_dirs(scheme);
1008 		if (err)
1009 			goto out;
1010 
1011 		schemes_arr[i] = scheme;
1012 		schemes->nr++;
1013 	}
1014 	return 0;
1015 
1016 out:
1017 	damon_sysfs_schemes_rm_dirs(schemes);
1018 	kobject_put(&scheme->kobj);
1019 	return err;
1020 }
1021 
1022 static ssize_t nr_schemes_show(struct kobject *kobj,
1023 		struct kobj_attribute *attr, char *buf)
1024 {
1025 	struct damon_sysfs_schemes *schemes = container_of(kobj,
1026 			struct damon_sysfs_schemes, kobj);
1027 
1028 	return sysfs_emit(buf, "%d\n", schemes->nr);
1029 }
1030 
1031 static ssize_t nr_schemes_store(struct kobject *kobj,
1032 		struct kobj_attribute *attr, const char *buf, size_t count)
1033 {
1034 	struct damon_sysfs_schemes *schemes = container_of(kobj,
1035 			struct damon_sysfs_schemes, kobj);
1036 	int nr, err = kstrtoint(buf, 0, &nr);
1037 
1038 	if (err)
1039 		return err;
1040 	if (nr < 0)
1041 		return -EINVAL;
1042 
1043 	if (!mutex_trylock(&damon_sysfs_lock))
1044 		return -EBUSY;
1045 	err = damon_sysfs_schemes_add_dirs(schemes, nr);
1046 	mutex_unlock(&damon_sysfs_lock);
1047 	if (err)
1048 		return err;
1049 	return count;
1050 }
1051 
1052 static void damon_sysfs_schemes_release(struct kobject *kobj)
1053 {
1054 	kfree(container_of(kobj, struct damon_sysfs_schemes, kobj));
1055 }
1056 
1057 static struct kobj_attribute damon_sysfs_schemes_nr_attr =
1058 		__ATTR_RW_MODE(nr_schemes, 0600);
1059 
1060 static struct attribute *damon_sysfs_schemes_attrs[] = {
1061 	&damon_sysfs_schemes_nr_attr.attr,
1062 	NULL,
1063 };
1064 ATTRIBUTE_GROUPS(damon_sysfs_schemes);
1065 
1066 static struct kobj_type damon_sysfs_schemes_ktype = {
1067 	.release = damon_sysfs_schemes_release,
1068 	.sysfs_ops = &kobj_sysfs_ops,
1069 	.default_groups = damon_sysfs_schemes_groups,
1070 };
1071 
1072 /*
1073  * init region directory
1074  */
1075 
1076 struct damon_sysfs_region {
1077 	struct kobject kobj;
1078 	unsigned long start;
1079 	unsigned long end;
1080 };
1081 
1082 static struct damon_sysfs_region *damon_sysfs_region_alloc(
1083 		unsigned long start,
1084 		unsigned long end)
1085 {
1086 	struct damon_sysfs_region *region = kmalloc(sizeof(*region),
1087 			GFP_KERNEL);
1088 
1089 	if (!region)
1090 		return NULL;
1091 	region->kobj = (struct kobject){};
1092 	region->start = start;
1093 	region->end = end;
1094 	return region;
1095 }
1096 
1097 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
1098 		char *buf)
1099 {
1100 	struct damon_sysfs_region *region = container_of(kobj,
1101 			struct damon_sysfs_region, kobj);
1102 
1103 	return sysfs_emit(buf, "%lu\n", region->start);
1104 }
1105 
1106 static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
1107 		const char *buf, size_t count)
1108 {
1109 	struct damon_sysfs_region *region = container_of(kobj,
1110 			struct damon_sysfs_region, kobj);
1111 	int err = kstrtoul(buf, 0, &region->start);
1112 
1113 	if (err)
1114 		return -EINVAL;
1115 	return count;
1116 }
1117 
1118 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
1119 		char *buf)
1120 {
1121 	struct damon_sysfs_region *region = container_of(kobj,
1122 			struct damon_sysfs_region, kobj);
1123 
1124 	return sysfs_emit(buf, "%lu\n", region->end);
1125 }
1126 
1127 static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
1128 		const char *buf, size_t count)
1129 {
1130 	struct damon_sysfs_region *region = container_of(kobj,
1131 			struct damon_sysfs_region, kobj);
1132 	int err = kstrtoul(buf, 0, &region->end);
1133 
1134 	if (err)
1135 		return -EINVAL;
1136 	return count;
1137 }
1138 
1139 static void damon_sysfs_region_release(struct kobject *kobj)
1140 {
1141 	kfree(container_of(kobj, struct damon_sysfs_region, kobj));
1142 }
1143 
1144 static struct kobj_attribute damon_sysfs_region_start_attr =
1145 		__ATTR_RW_MODE(start, 0600);
1146 
1147 static struct kobj_attribute damon_sysfs_region_end_attr =
1148 		__ATTR_RW_MODE(end, 0600);
1149 
1150 static struct attribute *damon_sysfs_region_attrs[] = {
1151 	&damon_sysfs_region_start_attr.attr,
1152 	&damon_sysfs_region_end_attr.attr,
1153 	NULL,
1154 };
1155 ATTRIBUTE_GROUPS(damon_sysfs_region);
1156 
1157 static struct kobj_type damon_sysfs_region_ktype = {
1158 	.release = damon_sysfs_region_release,
1159 	.sysfs_ops = &kobj_sysfs_ops,
1160 	.default_groups = damon_sysfs_region_groups,
1161 };
1162 
1163 /*
1164  * init_regions directory
1165  */
1166 
1167 struct damon_sysfs_regions {
1168 	struct kobject kobj;
1169 	struct damon_sysfs_region **regions_arr;
1170 	int nr;
1171 };
1172 
1173 static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
1174 {
1175 	return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
1176 }
1177 
1178 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
1179 {
1180 	struct damon_sysfs_region **regions_arr = regions->regions_arr;
1181 	int i;
1182 
1183 	for (i = 0; i < regions->nr; i++)
1184 		kobject_put(&regions_arr[i]->kobj);
1185 	regions->nr = 0;
1186 	kfree(regions_arr);
1187 	regions->regions_arr = NULL;
1188 }
1189 
1190 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
1191 		int nr_regions)
1192 {
1193 	struct damon_sysfs_region **regions_arr, *region;
1194 	int err, i;
1195 
1196 	damon_sysfs_regions_rm_dirs(regions);
1197 	if (!nr_regions)
1198 		return 0;
1199 
1200 	regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
1201 			GFP_KERNEL | __GFP_NOWARN);
1202 	if (!regions_arr)
1203 		return -ENOMEM;
1204 	regions->regions_arr = regions_arr;
1205 
1206 	for (i = 0; i < nr_regions; i++) {
1207 		region = damon_sysfs_region_alloc(0, 0);
1208 		if (!region) {
1209 			damon_sysfs_regions_rm_dirs(regions);
1210 			return -ENOMEM;
1211 		}
1212 
1213 		err = kobject_init_and_add(&region->kobj,
1214 				&damon_sysfs_region_ktype, &regions->kobj,
1215 				"%d", i);
1216 		if (err) {
1217 			kobject_put(&region->kobj);
1218 			damon_sysfs_regions_rm_dirs(regions);
1219 			return err;
1220 		}
1221 
1222 		regions_arr[i] = region;
1223 		regions->nr++;
1224 	}
1225 	return 0;
1226 }
1227 
1228 static ssize_t nr_regions_show(struct kobject *kobj,
1229 		struct kobj_attribute *attr, char *buf)
1230 {
1231 	struct damon_sysfs_regions *regions = container_of(kobj,
1232 			struct damon_sysfs_regions, kobj);
1233 
1234 	return sysfs_emit(buf, "%d\n", regions->nr);
1235 }
1236 
1237 static ssize_t nr_regions_store(struct kobject *kobj,
1238 		struct kobj_attribute *attr, const char *buf, size_t count)
1239 {
1240 	struct damon_sysfs_regions *regions = container_of(kobj,
1241 			struct damon_sysfs_regions, kobj);
1242 	int nr, err = kstrtoint(buf, 0, &nr);
1243 
1244 	if (err)
1245 		return err;
1246 	if (nr < 0)
1247 		return -EINVAL;
1248 
1249 	if (!mutex_trylock(&damon_sysfs_lock))
1250 		return -EBUSY;
1251 	err = damon_sysfs_regions_add_dirs(regions, nr);
1252 	mutex_unlock(&damon_sysfs_lock);
1253 	if (err)
1254 		return err;
1255 
1256 	return count;
1257 }
1258 
1259 static void damon_sysfs_regions_release(struct kobject *kobj)
1260 {
1261 	kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
1262 }
1263 
1264 static struct kobj_attribute damon_sysfs_regions_nr_attr =
1265 		__ATTR_RW_MODE(nr_regions, 0600);
1266 
1267 static struct attribute *damon_sysfs_regions_attrs[] = {
1268 	&damon_sysfs_regions_nr_attr.attr,
1269 	NULL,
1270 };
1271 ATTRIBUTE_GROUPS(damon_sysfs_regions);
1272 
1273 static struct kobj_type damon_sysfs_regions_ktype = {
1274 	.release = damon_sysfs_regions_release,
1275 	.sysfs_ops = &kobj_sysfs_ops,
1276 	.default_groups = damon_sysfs_regions_groups,
1277 };
1278 
1279 /*
1280  * target directory
1281  */
1282 
1283 struct damon_sysfs_target {
1284 	struct kobject kobj;
1285 	struct damon_sysfs_regions *regions;
1286 	int pid;
1287 };
1288 
1289 static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
1290 {
1291 	return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
1292 }
1293 
1294 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
1295 {
1296 	struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
1297 	int err;
1298 
1299 	if (!regions)
1300 		return -ENOMEM;
1301 
1302 	err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype,
1303 			&target->kobj, "regions");
1304 	if (err)
1305 		kobject_put(&regions->kobj);
1306 	else
1307 		target->regions = regions;
1308 	return err;
1309 }
1310 
1311 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
1312 {
1313 	damon_sysfs_regions_rm_dirs(target->regions);
1314 	kobject_put(&target->regions->kobj);
1315 }
1316 
1317 static ssize_t pid_target_show(struct kobject *kobj,
1318 		struct kobj_attribute *attr, char *buf)
1319 {
1320 	struct damon_sysfs_target *target = container_of(kobj,
1321 			struct damon_sysfs_target, kobj);
1322 
1323 	return sysfs_emit(buf, "%d\n", target->pid);
1324 }
1325 
1326 static ssize_t pid_target_store(struct kobject *kobj,
1327 		struct kobj_attribute *attr, const char *buf, size_t count)
1328 {
1329 	struct damon_sysfs_target *target = container_of(kobj,
1330 			struct damon_sysfs_target, kobj);
1331 	int err = kstrtoint(buf, 0, &target->pid);
1332 
1333 	if (err)
1334 		return -EINVAL;
1335 	return count;
1336 }
1337 
1338 static void damon_sysfs_target_release(struct kobject *kobj)
1339 {
1340 	kfree(container_of(kobj, struct damon_sysfs_target, kobj));
1341 }
1342 
1343 static struct kobj_attribute damon_sysfs_target_pid_attr =
1344 		__ATTR_RW_MODE(pid_target, 0600);
1345 
1346 static struct attribute *damon_sysfs_target_attrs[] = {
1347 	&damon_sysfs_target_pid_attr.attr,
1348 	NULL,
1349 };
1350 ATTRIBUTE_GROUPS(damon_sysfs_target);
1351 
1352 static struct kobj_type damon_sysfs_target_ktype = {
1353 	.release = damon_sysfs_target_release,
1354 	.sysfs_ops = &kobj_sysfs_ops,
1355 	.default_groups = damon_sysfs_target_groups,
1356 };
1357 
1358 /*
1359  * targets directory
1360  */
1361 
1362 struct damon_sysfs_targets {
1363 	struct kobject kobj;
1364 	struct damon_sysfs_target **targets_arr;
1365 	int nr;
1366 };
1367 
1368 static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
1369 {
1370 	return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
1371 }
1372 
1373 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
1374 {
1375 	struct damon_sysfs_target **targets_arr = targets->targets_arr;
1376 	int i;
1377 
1378 	for (i = 0; i < targets->nr; i++) {
1379 		damon_sysfs_target_rm_dirs(targets_arr[i]);
1380 		kobject_put(&targets_arr[i]->kobj);
1381 	}
1382 	targets->nr = 0;
1383 	kfree(targets_arr);
1384 	targets->targets_arr = NULL;
1385 }
1386 
1387 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
1388 		int nr_targets)
1389 {
1390 	struct damon_sysfs_target **targets_arr, *target;
1391 	int err, i;
1392 
1393 	damon_sysfs_targets_rm_dirs(targets);
1394 	if (!nr_targets)
1395 		return 0;
1396 
1397 	targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
1398 			GFP_KERNEL | __GFP_NOWARN);
1399 	if (!targets_arr)
1400 		return -ENOMEM;
1401 	targets->targets_arr = targets_arr;
1402 
1403 	for (i = 0; i < nr_targets; i++) {
1404 		target = damon_sysfs_target_alloc();
1405 		if (!target) {
1406 			damon_sysfs_targets_rm_dirs(targets);
1407 			return -ENOMEM;
1408 		}
1409 
1410 		err = kobject_init_and_add(&target->kobj,
1411 				&damon_sysfs_target_ktype, &targets->kobj,
1412 				"%d", i);
1413 		if (err)
1414 			goto out;
1415 
1416 		err = damon_sysfs_target_add_dirs(target);
1417 		if (err)
1418 			goto out;
1419 
1420 		targets_arr[i] = target;
1421 		targets->nr++;
1422 	}
1423 	return 0;
1424 
1425 out:
1426 	damon_sysfs_targets_rm_dirs(targets);
1427 	kobject_put(&target->kobj);
1428 	return err;
1429 }
1430 
1431 static ssize_t nr_targets_show(struct kobject *kobj,
1432 		struct kobj_attribute *attr, char *buf)
1433 {
1434 	struct damon_sysfs_targets *targets = container_of(kobj,
1435 			struct damon_sysfs_targets, kobj);
1436 
1437 	return sysfs_emit(buf, "%d\n", targets->nr);
1438 }
1439 
1440 static ssize_t nr_targets_store(struct kobject *kobj,
1441 		struct kobj_attribute *attr, const char *buf, size_t count)
1442 {
1443 	struct damon_sysfs_targets *targets = container_of(kobj,
1444 			struct damon_sysfs_targets, kobj);
1445 	int nr, err = kstrtoint(buf, 0, &nr);
1446 
1447 	if (err)
1448 		return err;
1449 	if (nr < 0)
1450 		return -EINVAL;
1451 
1452 	if (!mutex_trylock(&damon_sysfs_lock))
1453 		return -EBUSY;
1454 	err = damon_sysfs_targets_add_dirs(targets, nr);
1455 	mutex_unlock(&damon_sysfs_lock);
1456 	if (err)
1457 		return err;
1458 
1459 	return count;
1460 }
1461 
1462 static void damon_sysfs_targets_release(struct kobject *kobj)
1463 {
1464 	kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
1465 }
1466 
1467 static struct kobj_attribute damon_sysfs_targets_nr_attr =
1468 		__ATTR_RW_MODE(nr_targets, 0600);
1469 
1470 static struct attribute *damon_sysfs_targets_attrs[] = {
1471 	&damon_sysfs_targets_nr_attr.attr,
1472 	NULL,
1473 };
1474 ATTRIBUTE_GROUPS(damon_sysfs_targets);
1475 
1476 static struct kobj_type damon_sysfs_targets_ktype = {
1477 	.release = damon_sysfs_targets_release,
1478 	.sysfs_ops = &kobj_sysfs_ops,
1479 	.default_groups = damon_sysfs_targets_groups,
1480 };
1481 
1482 /*
1483  * intervals directory
1484  */
1485 
1486 struct damon_sysfs_intervals {
1487 	struct kobject kobj;
1488 	unsigned long sample_us;
1489 	unsigned long aggr_us;
1490 	unsigned long update_us;
1491 };
1492 
1493 static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
1494 		unsigned long sample_us, unsigned long aggr_us,
1495 		unsigned long update_us)
1496 {
1497 	struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
1498 			GFP_KERNEL);
1499 
1500 	if (!intervals)
1501 		return NULL;
1502 
1503 	intervals->kobj = (struct kobject){};
1504 	intervals->sample_us = sample_us;
1505 	intervals->aggr_us = aggr_us;
1506 	intervals->update_us = update_us;
1507 	return intervals;
1508 }
1509 
1510 static ssize_t sample_us_show(struct kobject *kobj,
1511 		struct kobj_attribute *attr, char *buf)
1512 {
1513 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1514 			struct damon_sysfs_intervals, kobj);
1515 
1516 	return sysfs_emit(buf, "%lu\n", intervals->sample_us);
1517 }
1518 
1519 static ssize_t sample_us_store(struct kobject *kobj,
1520 		struct kobj_attribute *attr, const char *buf, size_t count)
1521 {
1522 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1523 			struct damon_sysfs_intervals, kobj);
1524 	unsigned long us;
1525 	int err = kstrtoul(buf, 0, &us);
1526 
1527 	if (err)
1528 		return -EINVAL;
1529 
1530 	intervals->sample_us = us;
1531 	return count;
1532 }
1533 
1534 static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
1535 		char *buf)
1536 {
1537 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1538 			struct damon_sysfs_intervals, kobj);
1539 
1540 	return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
1541 }
1542 
1543 static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
1544 		const char *buf, size_t count)
1545 {
1546 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1547 			struct damon_sysfs_intervals, kobj);
1548 	unsigned long us;
1549 	int err = kstrtoul(buf, 0, &us);
1550 
1551 	if (err)
1552 		return -EINVAL;
1553 
1554 	intervals->aggr_us = us;
1555 	return count;
1556 }
1557 
1558 static ssize_t update_us_show(struct kobject *kobj,
1559 		struct kobj_attribute *attr, char *buf)
1560 {
1561 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1562 			struct damon_sysfs_intervals, kobj);
1563 
1564 	return sysfs_emit(buf, "%lu\n", intervals->update_us);
1565 }
1566 
1567 static ssize_t update_us_store(struct kobject *kobj,
1568 		struct kobj_attribute *attr, const char *buf, size_t count)
1569 {
1570 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1571 			struct damon_sysfs_intervals, kobj);
1572 	unsigned long us;
1573 	int err = kstrtoul(buf, 0, &us);
1574 
1575 	if (err)
1576 		return -EINVAL;
1577 
1578 	intervals->update_us = us;
1579 	return count;
1580 }
1581 
1582 static void damon_sysfs_intervals_release(struct kobject *kobj)
1583 {
1584 	kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
1585 }
1586 
1587 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
1588 		__ATTR_RW_MODE(sample_us, 0600);
1589 
1590 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
1591 		__ATTR_RW_MODE(aggr_us, 0600);
1592 
1593 static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
1594 		__ATTR_RW_MODE(update_us, 0600);
1595 
1596 static struct attribute *damon_sysfs_intervals_attrs[] = {
1597 	&damon_sysfs_intervals_sample_us_attr.attr,
1598 	&damon_sysfs_intervals_aggr_us_attr.attr,
1599 	&damon_sysfs_intervals_update_us_attr.attr,
1600 	NULL,
1601 };
1602 ATTRIBUTE_GROUPS(damon_sysfs_intervals);
1603 
1604 static struct kobj_type damon_sysfs_intervals_ktype = {
1605 	.release = damon_sysfs_intervals_release,
1606 	.sysfs_ops = &kobj_sysfs_ops,
1607 	.default_groups = damon_sysfs_intervals_groups,
1608 };
1609 
1610 /*
1611  * monitoring_attrs directory
1612  */
1613 
1614 struct damon_sysfs_attrs {
1615 	struct kobject kobj;
1616 	struct damon_sysfs_intervals *intervals;
1617 	struct damon_sysfs_ul_range *nr_regions_range;
1618 };
1619 
1620 static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
1621 {
1622 	struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
1623 
1624 	if (!attrs)
1625 		return NULL;
1626 	attrs->kobj = (struct kobject){};
1627 	return attrs;
1628 }
1629 
1630 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
1631 {
1632 	struct damon_sysfs_intervals *intervals;
1633 	struct damon_sysfs_ul_range *nr_regions_range;
1634 	int err;
1635 
1636 	intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
1637 	if (!intervals)
1638 		return -ENOMEM;
1639 
1640 	err = kobject_init_and_add(&intervals->kobj,
1641 			&damon_sysfs_intervals_ktype, &attrs->kobj,
1642 			"intervals");
1643 	if (err)
1644 		goto put_intervals_out;
1645 	attrs->intervals = intervals;
1646 
1647 	nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
1648 	if (!nr_regions_range) {
1649 		err = -ENOMEM;
1650 		goto put_intervals_out;
1651 	}
1652 
1653 	err = kobject_init_and_add(&nr_regions_range->kobj,
1654 			&damon_sysfs_ul_range_ktype, &attrs->kobj,
1655 			"nr_regions");
1656 	if (err)
1657 		goto put_nr_regions_intervals_out;
1658 	attrs->nr_regions_range = nr_regions_range;
1659 	return 0;
1660 
1661 put_nr_regions_intervals_out:
1662 	kobject_put(&nr_regions_range->kobj);
1663 	attrs->nr_regions_range = NULL;
1664 put_intervals_out:
1665 	kobject_put(&intervals->kobj);
1666 	attrs->intervals = NULL;
1667 	return err;
1668 }
1669 
1670 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
1671 {
1672 	kobject_put(&attrs->nr_regions_range->kobj);
1673 	kobject_put(&attrs->intervals->kobj);
1674 }
1675 
1676 static void damon_sysfs_attrs_release(struct kobject *kobj)
1677 {
1678 	kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
1679 }
1680 
1681 static struct attribute *damon_sysfs_attrs_attrs[] = {
1682 	NULL,
1683 };
1684 ATTRIBUTE_GROUPS(damon_sysfs_attrs);
1685 
1686 static struct kobj_type damon_sysfs_attrs_ktype = {
1687 	.release = damon_sysfs_attrs_release,
1688 	.sysfs_ops = &kobj_sysfs_ops,
1689 	.default_groups = damon_sysfs_attrs_groups,
1690 };
1691 
1692 /*
1693  * context directory
1694  */
1695 
1696 /* This should match with enum damon_ops_id */
1697 static const char * const damon_sysfs_ops_strs[] = {
1698 	"vaddr",
1699 	"fvaddr",
1700 	"paddr",
1701 };
1702 
1703 struct damon_sysfs_context {
1704 	struct kobject kobj;
1705 	enum damon_ops_id ops_id;
1706 	struct damon_sysfs_attrs *attrs;
1707 	struct damon_sysfs_targets *targets;
1708 	struct damon_sysfs_schemes *schemes;
1709 };
1710 
1711 static struct damon_sysfs_context *damon_sysfs_context_alloc(
1712 		enum damon_ops_id ops_id)
1713 {
1714 	struct damon_sysfs_context *context = kmalloc(sizeof(*context),
1715 				GFP_KERNEL);
1716 
1717 	if (!context)
1718 		return NULL;
1719 	context->kobj = (struct kobject){};
1720 	context->ops_id = ops_id;
1721 	return context;
1722 }
1723 
1724 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
1725 {
1726 	struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
1727 	int err;
1728 
1729 	if (!attrs)
1730 		return -ENOMEM;
1731 	err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
1732 			&context->kobj, "monitoring_attrs");
1733 	if (err)
1734 		goto out;
1735 	err = damon_sysfs_attrs_add_dirs(attrs);
1736 	if (err)
1737 		goto out;
1738 	context->attrs = attrs;
1739 	return 0;
1740 
1741 out:
1742 	kobject_put(&attrs->kobj);
1743 	return err;
1744 }
1745 
1746 static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
1747 {
1748 	struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
1749 	int err;
1750 
1751 	if (!targets)
1752 		return -ENOMEM;
1753 	err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
1754 			&context->kobj, "targets");
1755 	if (err) {
1756 		kobject_put(&targets->kobj);
1757 		return err;
1758 	}
1759 	context->targets = targets;
1760 	return 0;
1761 }
1762 
1763 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
1764 {
1765 	struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
1766 	int err;
1767 
1768 	if (!schemes)
1769 		return -ENOMEM;
1770 	err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
1771 			&context->kobj, "schemes");
1772 	if (err) {
1773 		kobject_put(&schemes->kobj);
1774 		return err;
1775 	}
1776 	context->schemes = schemes;
1777 	return 0;
1778 }
1779 
1780 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
1781 {
1782 	int err;
1783 
1784 	err = damon_sysfs_context_set_attrs(context);
1785 	if (err)
1786 		return err;
1787 
1788 	err = damon_sysfs_context_set_targets(context);
1789 	if (err)
1790 		goto put_attrs_out;
1791 
1792 	err = damon_sysfs_context_set_schemes(context);
1793 	if (err)
1794 		goto put_targets_attrs_out;
1795 	return 0;
1796 
1797 put_targets_attrs_out:
1798 	kobject_put(&context->targets->kobj);
1799 	context->targets = NULL;
1800 put_attrs_out:
1801 	kobject_put(&context->attrs->kobj);
1802 	context->attrs = NULL;
1803 	return err;
1804 }
1805 
1806 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
1807 {
1808 	damon_sysfs_attrs_rm_dirs(context->attrs);
1809 	kobject_put(&context->attrs->kobj);
1810 	damon_sysfs_targets_rm_dirs(context->targets);
1811 	kobject_put(&context->targets->kobj);
1812 	damon_sysfs_schemes_rm_dirs(context->schemes);
1813 	kobject_put(&context->schemes->kobj);
1814 }
1815 
1816 static ssize_t avail_operations_show(struct kobject *kobj,
1817 		struct kobj_attribute *attr, char *buf)
1818 {
1819 	enum damon_ops_id id;
1820 	int len = 0;
1821 
1822 	for (id = 0; id < NR_DAMON_OPS; id++) {
1823 		if (!damon_is_registered_ops(id))
1824 			continue;
1825 		len += sysfs_emit_at(buf, len, "%s\n",
1826 				damon_sysfs_ops_strs[id]);
1827 	}
1828 	return len;
1829 }
1830 
1831 static ssize_t operations_show(struct kobject *kobj,
1832 		struct kobj_attribute *attr, char *buf)
1833 {
1834 	struct damon_sysfs_context *context = container_of(kobj,
1835 			struct damon_sysfs_context, kobj);
1836 
1837 	return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
1838 }
1839 
1840 static ssize_t operations_store(struct kobject *kobj,
1841 		struct kobj_attribute *attr, const char *buf, size_t count)
1842 {
1843 	struct damon_sysfs_context *context = container_of(kobj,
1844 			struct damon_sysfs_context, kobj);
1845 	enum damon_ops_id id;
1846 
1847 	for (id = 0; id < NR_DAMON_OPS; id++) {
1848 		if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
1849 			context->ops_id = id;
1850 			return count;
1851 		}
1852 	}
1853 	return -EINVAL;
1854 }
1855 
1856 static void damon_sysfs_context_release(struct kobject *kobj)
1857 {
1858 	kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1859 }
1860 
1861 static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
1862 		__ATTR_RO_MODE(avail_operations, 0400);
1863 
1864 static struct kobj_attribute damon_sysfs_context_operations_attr =
1865 		__ATTR_RW_MODE(operations, 0600);
1866 
1867 static struct attribute *damon_sysfs_context_attrs[] = {
1868 	&damon_sysfs_context_avail_operations_attr.attr,
1869 	&damon_sysfs_context_operations_attr.attr,
1870 	NULL,
1871 };
1872 ATTRIBUTE_GROUPS(damon_sysfs_context);
1873 
1874 static struct kobj_type damon_sysfs_context_ktype = {
1875 	.release = damon_sysfs_context_release,
1876 	.sysfs_ops = &kobj_sysfs_ops,
1877 	.default_groups = damon_sysfs_context_groups,
1878 };
1879 
1880 /*
1881  * contexts directory
1882  */
1883 
1884 struct damon_sysfs_contexts {
1885 	struct kobject kobj;
1886 	struct damon_sysfs_context **contexts_arr;
1887 	int nr;
1888 };
1889 
1890 static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1891 {
1892 	return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1893 }
1894 
1895 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1896 {
1897 	struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1898 	int i;
1899 
1900 	for (i = 0; i < contexts->nr; i++) {
1901 		damon_sysfs_context_rm_dirs(contexts_arr[i]);
1902 		kobject_put(&contexts_arr[i]->kobj);
1903 	}
1904 	contexts->nr = 0;
1905 	kfree(contexts_arr);
1906 	contexts->contexts_arr = NULL;
1907 }
1908 
1909 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1910 		int nr_contexts)
1911 {
1912 	struct damon_sysfs_context **contexts_arr, *context;
1913 	int err, i;
1914 
1915 	damon_sysfs_contexts_rm_dirs(contexts);
1916 	if (!nr_contexts)
1917 		return 0;
1918 
1919 	contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1920 			GFP_KERNEL | __GFP_NOWARN);
1921 	if (!contexts_arr)
1922 		return -ENOMEM;
1923 	contexts->contexts_arr = contexts_arr;
1924 
1925 	for (i = 0; i < nr_contexts; i++) {
1926 		context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1927 		if (!context) {
1928 			damon_sysfs_contexts_rm_dirs(contexts);
1929 			return -ENOMEM;
1930 		}
1931 
1932 		err = kobject_init_and_add(&context->kobj,
1933 				&damon_sysfs_context_ktype, &contexts->kobj,
1934 				"%d", i);
1935 		if (err)
1936 			goto out;
1937 
1938 		err = damon_sysfs_context_add_dirs(context);
1939 		if (err)
1940 			goto out;
1941 
1942 		contexts_arr[i] = context;
1943 		contexts->nr++;
1944 	}
1945 	return 0;
1946 
1947 out:
1948 	damon_sysfs_contexts_rm_dirs(contexts);
1949 	kobject_put(&context->kobj);
1950 	return err;
1951 }
1952 
1953 static ssize_t nr_contexts_show(struct kobject *kobj,
1954 		struct kobj_attribute *attr, char *buf)
1955 {
1956 	struct damon_sysfs_contexts *contexts = container_of(kobj,
1957 			struct damon_sysfs_contexts, kobj);
1958 
1959 	return sysfs_emit(buf, "%d\n", contexts->nr);
1960 }
1961 
1962 static ssize_t nr_contexts_store(struct kobject *kobj,
1963 		struct kobj_attribute *attr, const char *buf, size_t count)
1964 {
1965 	struct damon_sysfs_contexts *contexts = container_of(kobj,
1966 			struct damon_sysfs_contexts, kobj);
1967 	int nr, err;
1968 
1969 	err = kstrtoint(buf, 0, &nr);
1970 	if (err)
1971 		return err;
1972 	/* TODO: support multiple contexts per kdamond */
1973 	if (nr < 0 || 1 < nr)
1974 		return -EINVAL;
1975 
1976 	if (!mutex_trylock(&damon_sysfs_lock))
1977 		return -EBUSY;
1978 	err = damon_sysfs_contexts_add_dirs(contexts, nr);
1979 	mutex_unlock(&damon_sysfs_lock);
1980 	if (err)
1981 		return err;
1982 
1983 	return count;
1984 }
1985 
1986 static void damon_sysfs_contexts_release(struct kobject *kobj)
1987 {
1988 	kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1989 }
1990 
1991 static struct kobj_attribute damon_sysfs_contexts_nr_attr
1992 		= __ATTR_RW_MODE(nr_contexts, 0600);
1993 
1994 static struct attribute *damon_sysfs_contexts_attrs[] = {
1995 	&damon_sysfs_contexts_nr_attr.attr,
1996 	NULL,
1997 };
1998 ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1999 
2000 static struct kobj_type damon_sysfs_contexts_ktype = {
2001 	.release = damon_sysfs_contexts_release,
2002 	.sysfs_ops = &kobj_sysfs_ops,
2003 	.default_groups = damon_sysfs_contexts_groups,
2004 };
2005 
2006 /*
2007  * kdamond directory
2008  */
2009 
2010 struct damon_sysfs_kdamond {
2011 	struct kobject kobj;
2012 	struct damon_sysfs_contexts *contexts;
2013 	struct damon_ctx *damon_ctx;
2014 };
2015 
2016 static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
2017 {
2018 	return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
2019 }
2020 
2021 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
2022 {
2023 	struct damon_sysfs_contexts *contexts;
2024 	int err;
2025 
2026 	contexts = damon_sysfs_contexts_alloc();
2027 	if (!contexts)
2028 		return -ENOMEM;
2029 
2030 	err = kobject_init_and_add(&contexts->kobj,
2031 			&damon_sysfs_contexts_ktype, &kdamond->kobj,
2032 			"contexts");
2033 	if (err) {
2034 		kobject_put(&contexts->kobj);
2035 		return err;
2036 	}
2037 	kdamond->contexts = contexts;
2038 
2039 	return err;
2040 }
2041 
2042 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
2043 {
2044 	damon_sysfs_contexts_rm_dirs(kdamond->contexts);
2045 	kobject_put(&kdamond->contexts->kobj);
2046 }
2047 
2048 static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
2049 {
2050 	bool running;
2051 
2052 	mutex_lock(&ctx->kdamond_lock);
2053 	running = ctx->kdamond != NULL;
2054 	mutex_unlock(&ctx->kdamond_lock);
2055 	return running;
2056 }
2057 
2058 /*
2059  * enum damon_sysfs_cmd - Commands for a specific kdamond.
2060  */
2061 enum damon_sysfs_cmd {
2062 	/* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
2063 	DAMON_SYSFS_CMD_ON,
2064 	/* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
2065 	DAMON_SYSFS_CMD_OFF,
2066 	/* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
2067 	DAMON_SYSFS_CMD_COMMIT,
2068 	/*
2069 	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
2070 	 * files.
2071 	 */
2072 	DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
2073 	/*
2074 	 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
2075 	 */
2076 	NR_DAMON_SYSFS_CMDS,
2077 };
2078 
2079 /* Should match with enum damon_sysfs_cmd */
2080 static const char * const damon_sysfs_cmd_strs[] = {
2081 	"on",
2082 	"off",
2083 	"commit",
2084 	"update_schemes_stats",
2085 };
2086 
2087 /*
2088  * struct damon_sysfs_cmd_request - A request to the DAMON callback.
2089  * @cmd:	The command that needs to be handled by the callback.
2090  * @kdamond:	The kobject wrapper that associated to the kdamond thread.
2091  *
2092  * This structure represents a sysfs command request that need to access some
2093  * DAMON context-internal data.  Because DAMON context-internal data can be
2094  * safely accessed from DAMON callbacks without additional synchronization, the
2095  * request will be handled by the DAMON callback.  None-``NULL`` @kdamond means
2096  * the request is valid.
2097  */
2098 struct damon_sysfs_cmd_request {
2099 	enum damon_sysfs_cmd cmd;
2100 	struct damon_sysfs_kdamond *kdamond;
2101 };
2102 
2103 /* Current DAMON callback request.  Protected by damon_sysfs_lock. */
2104 static struct damon_sysfs_cmd_request damon_sysfs_cmd_request;
2105 
2106 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
2107 		char *buf)
2108 {
2109 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2110 			struct damon_sysfs_kdamond, kobj);
2111 	struct damon_ctx *ctx = kdamond->damon_ctx;
2112 	bool running;
2113 
2114 	if (!ctx)
2115 		running = false;
2116 	else
2117 		running = damon_sysfs_ctx_running(ctx);
2118 
2119 	return sysfs_emit(buf, "%s\n", running ?
2120 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
2121 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
2122 }
2123 
2124 static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
2125 		struct damon_sysfs_attrs *sys_attrs)
2126 {
2127 	struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
2128 	struct damon_sysfs_ul_range *sys_nr_regions =
2129 		sys_attrs->nr_regions_range;
2130 
2131 	return damon_set_attrs(ctx, sys_intervals->sample_us,
2132 			sys_intervals->aggr_us, sys_intervals->update_us,
2133 			sys_nr_regions->min, sys_nr_regions->max);
2134 }
2135 
2136 static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
2137 {
2138 	struct damon_target *t, *next;
2139 
2140 	damon_for_each_target_safe(t, next, ctx) {
2141 		if (damon_target_has_pid(ctx))
2142 			put_pid(t->pid);
2143 		damon_destroy_target(t);
2144 	}
2145 }
2146 
2147 static int damon_sysfs_set_regions(struct damon_target *t,
2148 		struct damon_sysfs_regions *sysfs_regions)
2149 {
2150 	struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
2151 			sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
2152 	int i, err = -EINVAL;
2153 
2154 	if (!ranges)
2155 		return -ENOMEM;
2156 	for (i = 0; i < sysfs_regions->nr; i++) {
2157 		struct damon_sysfs_region *sys_region =
2158 			sysfs_regions->regions_arr[i];
2159 
2160 		if (sys_region->start > sys_region->end)
2161 			goto out;
2162 
2163 		ranges[i].start = sys_region->start;
2164 		ranges[i].end = sys_region->end;
2165 		if (i == 0)
2166 			continue;
2167 		if (ranges[i - 1].end > ranges[i].start)
2168 			goto out;
2169 	}
2170 	err = damon_set_regions(t, ranges, sysfs_regions->nr);
2171 out:
2172 	kfree(ranges);
2173 	return err;
2174 
2175 }
2176 
2177 static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
2178 		struct damon_ctx *ctx)
2179 {
2180 	struct damon_target *t = damon_new_target();
2181 	int err = -EINVAL;
2182 
2183 	if (!t)
2184 		return -ENOMEM;
2185 	if (damon_target_has_pid(ctx)) {
2186 		t->pid = find_get_pid(sys_target->pid);
2187 		if (!t->pid)
2188 			goto destroy_targets_out;
2189 	}
2190 	damon_add_target(ctx, t);
2191 	err = damon_sysfs_set_regions(t, sys_target->regions);
2192 	if (err)
2193 		goto destroy_targets_out;
2194 	return 0;
2195 
2196 destroy_targets_out:
2197 	damon_sysfs_destroy_targets(ctx);
2198 	return err;
2199 }
2200 
2201 /*
2202  * Search a target in a context that corresponds to the sysfs target input.
2203  *
2204  * Return: pointer to the target if found, NULL if not found, or negative
2205  * error code if the search failed.
2206  */
2207 static struct damon_target *damon_sysfs_existing_target(
2208 		struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
2209 {
2210 	struct pid *pid;
2211 	struct damon_target *t;
2212 
2213 	if (!damon_target_has_pid(ctx)) {
2214 		/* Up to only one target for paddr could exist */
2215 		damon_for_each_target(t, ctx)
2216 			return t;
2217 		return NULL;
2218 	}
2219 
2220 	/* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
2221 	pid = find_get_pid(sys_target->pid);
2222 	if (!pid)
2223 		return ERR_PTR(-EINVAL);
2224 	damon_for_each_target(t, ctx) {
2225 		if (t->pid == pid) {
2226 			put_pid(pid);
2227 			return t;
2228 		}
2229 	}
2230 	put_pid(pid);
2231 	return NULL;
2232 }
2233 
2234 static int damon_sysfs_set_targets(struct damon_ctx *ctx,
2235 		struct damon_sysfs_targets *sysfs_targets)
2236 {
2237 	int i, err;
2238 
2239 	/* Multiple physical address space monitoring targets makes no sense */
2240 	if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
2241 		return -EINVAL;
2242 
2243 	for (i = 0; i < sysfs_targets->nr; i++) {
2244 		struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
2245 		struct damon_target *t = damon_sysfs_existing_target(st, ctx);
2246 
2247 		if (IS_ERR(t))
2248 			return PTR_ERR(t);
2249 		if (!t)
2250 			err = damon_sysfs_add_target(st, ctx);
2251 		else
2252 			err = damon_sysfs_set_regions(t, st->regions);
2253 		if (err)
2254 			return err;
2255 	}
2256 	return 0;
2257 }
2258 
2259 static struct damos *damon_sysfs_mk_scheme(
2260 		struct damon_sysfs_scheme *sysfs_scheme)
2261 {
2262 	struct damon_sysfs_access_pattern *pattern =
2263 		sysfs_scheme->access_pattern;
2264 	struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
2265 	struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
2266 	struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
2267 	struct damos_quota quota = {
2268 		.ms = sysfs_quotas->ms,
2269 		.sz = sysfs_quotas->sz,
2270 		.reset_interval = sysfs_quotas->reset_interval_ms,
2271 		.weight_sz = sysfs_weights->sz,
2272 		.weight_nr_accesses = sysfs_weights->nr_accesses,
2273 		.weight_age = sysfs_weights->age,
2274 	};
2275 	struct damos_watermarks wmarks = {
2276 		.metric = sysfs_wmarks->metric,
2277 		.interval = sysfs_wmarks->interval_us,
2278 		.high = sysfs_wmarks->high,
2279 		.mid = sysfs_wmarks->mid,
2280 		.low = sysfs_wmarks->low,
2281 	};
2282 
2283 	return damon_new_scheme(pattern->sz->min, pattern->sz->max,
2284 			pattern->nr_accesses->min, pattern->nr_accesses->max,
2285 			pattern->age->min, pattern->age->max,
2286 			sysfs_scheme->action, &quota, &wmarks);
2287 }
2288 
2289 static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
2290 		struct damon_sysfs_schemes *sysfs_schemes)
2291 {
2292 	int i;
2293 
2294 	for (i = 0; i < sysfs_schemes->nr; i++) {
2295 		struct damos *scheme, *next;
2296 
2297 		scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
2298 		if (!scheme) {
2299 			damon_for_each_scheme_safe(scheme, next, ctx)
2300 				damon_destroy_scheme(scheme);
2301 			return -ENOMEM;
2302 		}
2303 		damon_add_scheme(ctx, scheme);
2304 	}
2305 	return 0;
2306 }
2307 
2308 static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
2309 {
2310 	struct damon_target *t, *next;
2311 
2312 	if (ctx->ops.id != DAMON_OPS_VADDR && ctx->ops.id != DAMON_OPS_FVADDR)
2313 		return;
2314 
2315 	mutex_lock(&ctx->kdamond_lock);
2316 	damon_for_each_target_safe(t, next, ctx) {
2317 		put_pid(t->pid);
2318 		damon_destroy_target(t);
2319 	}
2320 	mutex_unlock(&ctx->kdamond_lock);
2321 }
2322 
2323 /*
2324  * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
2325  * @kdamond:	The kobject wrapper that associated to the kdamond thread.
2326  *
2327  * This function reads the schemes stats of specific kdamond and update the
2328  * related values for sysfs files.  This function should be called from DAMON
2329  * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON
2330  * contexts-internal data and DAMON sysfs variables.
2331  */
2332 static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
2333 {
2334 	struct damon_ctx *ctx = kdamond->damon_ctx;
2335 	struct damon_sysfs_schemes *sysfs_schemes;
2336 	struct damos *scheme;
2337 	int schemes_idx = 0;
2338 
2339 	if (!ctx)
2340 		return -EINVAL;
2341 	sysfs_schemes = kdamond->contexts->contexts_arr[0]->schemes;
2342 	damon_for_each_scheme(scheme, ctx) {
2343 		struct damon_sysfs_stats *sysfs_stats;
2344 
2345 		sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
2346 		sysfs_stats->nr_tried = scheme->stat.nr_tried;
2347 		sysfs_stats->sz_tried = scheme->stat.sz_tried;
2348 		sysfs_stats->nr_applied = scheme->stat.nr_applied;
2349 		sysfs_stats->sz_applied = scheme->stat.sz_applied;
2350 		sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds;
2351 	}
2352 	return 0;
2353 }
2354 
2355 static inline bool damon_sysfs_kdamond_running(
2356 		struct damon_sysfs_kdamond *kdamond)
2357 {
2358 	return kdamond->damon_ctx &&
2359 		damon_sysfs_ctx_running(kdamond->damon_ctx);
2360 }
2361 
2362 static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
2363 		struct damon_sysfs_context *sys_ctx)
2364 {
2365 	int err;
2366 
2367 	err = damon_select_ops(ctx, sys_ctx->ops_id);
2368 	if (err)
2369 		return err;
2370 	err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2371 	if (err)
2372 		return err;
2373 	err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2374 	if (err)
2375 		return err;
2376 	return damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2377 }
2378 
2379 /*
2380  * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
2381  * @kdamond:	The kobject wrapper for the associated kdamond.
2382  *
2383  * If the sysfs input is wrong, the kdamond will be terminated.
2384  */
2385 static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
2386 {
2387 	if (!damon_sysfs_kdamond_running(kdamond))
2388 		return -EINVAL;
2389 	/* TODO: Support multiple contexts per kdamond */
2390 	if (kdamond->contexts->nr != 1)
2391 		return -EINVAL;
2392 
2393 	return damon_sysfs_apply_inputs(kdamond->damon_ctx,
2394 			kdamond->contexts->contexts_arr[0]);
2395 }
2396 
2397 /*
2398  * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
2399  * @c:	The DAMON context of the callback.
2400  *
2401  * This function is periodically called back from the kdamond thread for @c.
2402  * Then, it checks if there is a waiting DAMON sysfs request and handles it.
2403  */
2404 static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
2405 {
2406 	struct damon_sysfs_kdamond *kdamond;
2407 	int err = 0;
2408 
2409 	/* avoid deadlock due to concurrent state_store('off') */
2410 	if (!mutex_trylock(&damon_sysfs_lock))
2411 		return 0;
2412 	kdamond = damon_sysfs_cmd_request.kdamond;
2413 	if (!kdamond || kdamond->damon_ctx != c)
2414 		goto out;
2415 	switch (damon_sysfs_cmd_request.cmd) {
2416 	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
2417 		err = damon_sysfs_upd_schemes_stats(kdamond);
2418 		break;
2419 	case DAMON_SYSFS_CMD_COMMIT:
2420 		err = damon_sysfs_commit_input(kdamond);
2421 		break;
2422 	default:
2423 		break;
2424 	}
2425 	/* Mark the request as invalid now. */
2426 	damon_sysfs_cmd_request.kdamond = NULL;
2427 out:
2428 	mutex_unlock(&damon_sysfs_lock);
2429 	return err;
2430 }
2431 
2432 static struct damon_ctx *damon_sysfs_build_ctx(
2433 		struct damon_sysfs_context *sys_ctx)
2434 {
2435 	struct damon_ctx *ctx = damon_new_ctx();
2436 	int err;
2437 
2438 	if (!ctx)
2439 		return ERR_PTR(-ENOMEM);
2440 
2441 	err = damon_sysfs_apply_inputs(ctx, sys_ctx);
2442 	if (err) {
2443 		damon_destroy_ctx(ctx);
2444 		return ERR_PTR(err);
2445 	}
2446 
2447 	ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback;
2448 	ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback;
2449 	ctx->callback.before_terminate = damon_sysfs_before_terminate;
2450 	return ctx;
2451 }
2452 
2453 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
2454 {
2455 	struct damon_ctx *ctx;
2456 	int err;
2457 
2458 	if (kdamond->damon_ctx &&
2459 			damon_sysfs_ctx_running(kdamond->damon_ctx))
2460 		return -EBUSY;
2461 	if (damon_sysfs_cmd_request.kdamond == kdamond)
2462 		return -EBUSY;
2463 	/* TODO: support multiple contexts per kdamond */
2464 	if (kdamond->contexts->nr != 1)
2465 		return -EINVAL;
2466 
2467 	if (kdamond->damon_ctx)
2468 		damon_destroy_ctx(kdamond->damon_ctx);
2469 	kdamond->damon_ctx = NULL;
2470 
2471 	ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
2472 	if (IS_ERR(ctx))
2473 		return PTR_ERR(ctx);
2474 	err = damon_start(&ctx, 1, false);
2475 	if (err) {
2476 		damon_destroy_ctx(ctx);
2477 		return err;
2478 	}
2479 	kdamond->damon_ctx = ctx;
2480 	return err;
2481 }
2482 
2483 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
2484 {
2485 	if (!kdamond->damon_ctx)
2486 		return -EINVAL;
2487 	return damon_stop(&kdamond->damon_ctx, 1);
2488 	/*
2489 	 * To allow users show final monitoring results of already turned-off
2490 	 * DAMON, we free kdamond->damon_ctx in next
2491 	 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
2492 	 */
2493 }
2494 
2495 /*
2496  * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
2497  * @cmd:	The command to handle.
2498  * @kdamond:	The kobject wrapper for the associated kdamond.
2499  *
2500  * This function handles a DAMON sysfs command for a kdamond.  For commands
2501  * that need to access running DAMON context-internal data, it requests
2502  * handling of the command to the DAMON callback
2503  * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
2504  * or the context is completed.
2505  *
2506  * Return: 0 on success, negative error code otherwise.
2507  */
2508 static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
2509 		struct damon_sysfs_kdamond *kdamond)
2510 {
2511 	bool need_wait = true;
2512 
2513 	/* Handle commands that doesn't access DAMON context-internal data */
2514 	switch (cmd) {
2515 	case DAMON_SYSFS_CMD_ON:
2516 		return damon_sysfs_turn_damon_on(kdamond);
2517 	case DAMON_SYSFS_CMD_OFF:
2518 		return damon_sysfs_turn_damon_off(kdamond);
2519 	default:
2520 		break;
2521 	}
2522 
2523 	/* Pass the command to DAMON callback for safe DAMON context access */
2524 	if (damon_sysfs_cmd_request.kdamond)
2525 		return -EBUSY;
2526 	if (!damon_sysfs_kdamond_running(kdamond))
2527 		return -EINVAL;
2528 	damon_sysfs_cmd_request.cmd = cmd;
2529 	damon_sysfs_cmd_request.kdamond = kdamond;
2530 
2531 	/*
2532 	 * wait until damon_sysfs_cmd_request_callback() handles the request
2533 	 * from kdamond context
2534 	 */
2535 	mutex_unlock(&damon_sysfs_lock);
2536 	while (need_wait) {
2537 		schedule_timeout_idle(msecs_to_jiffies(100));
2538 		if (!mutex_trylock(&damon_sysfs_lock))
2539 			continue;
2540 		if (!damon_sysfs_cmd_request.kdamond) {
2541 			/* damon_sysfs_cmd_request_callback() handled */
2542 			need_wait = false;
2543 		} else if (!damon_sysfs_kdamond_running(kdamond)) {
2544 			/* kdamond has already finished */
2545 			need_wait = false;
2546 			damon_sysfs_cmd_request.kdamond = NULL;
2547 		}
2548 		mutex_unlock(&damon_sysfs_lock);
2549 	}
2550 	mutex_lock(&damon_sysfs_lock);
2551 	return 0;
2552 }
2553 
2554 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
2555 		const char *buf, size_t count)
2556 {
2557 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2558 			struct damon_sysfs_kdamond, kobj);
2559 	enum damon_sysfs_cmd cmd;
2560 	ssize_t ret = -EINVAL;
2561 
2562 	if (!mutex_trylock(&damon_sysfs_lock))
2563 		return -EBUSY;
2564 	for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
2565 		if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
2566 			ret = damon_sysfs_handle_cmd(cmd, kdamond);
2567 			break;
2568 		}
2569 	}
2570 	mutex_unlock(&damon_sysfs_lock);
2571 	if (!ret)
2572 		ret = count;
2573 	return ret;
2574 }
2575 
2576 static ssize_t pid_show(struct kobject *kobj,
2577 		struct kobj_attribute *attr, char *buf)
2578 {
2579 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2580 			struct damon_sysfs_kdamond, kobj);
2581 	struct damon_ctx *ctx;
2582 	int pid;
2583 
2584 	if (!mutex_trylock(&damon_sysfs_lock))
2585 		return -EBUSY;
2586 	ctx = kdamond->damon_ctx;
2587 	if (!ctx) {
2588 		pid = -1;
2589 		goto out;
2590 	}
2591 	mutex_lock(&ctx->kdamond_lock);
2592 	if (!ctx->kdamond)
2593 		pid = -1;
2594 	else
2595 		pid = ctx->kdamond->pid;
2596 	mutex_unlock(&ctx->kdamond_lock);
2597 out:
2598 	mutex_unlock(&damon_sysfs_lock);
2599 	return sysfs_emit(buf, "%d\n", pid);
2600 }
2601 
2602 static void damon_sysfs_kdamond_release(struct kobject *kobj)
2603 {
2604 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2605 			struct damon_sysfs_kdamond, kobj);
2606 
2607 	if (kdamond->damon_ctx)
2608 		damon_destroy_ctx(kdamond->damon_ctx);
2609 	kfree(kdamond);
2610 }
2611 
2612 static struct kobj_attribute damon_sysfs_kdamond_state_attr =
2613 		__ATTR_RW_MODE(state, 0600);
2614 
2615 static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
2616 		__ATTR_RO_MODE(pid, 0400);
2617 
2618 static struct attribute *damon_sysfs_kdamond_attrs[] = {
2619 	&damon_sysfs_kdamond_state_attr.attr,
2620 	&damon_sysfs_kdamond_pid_attr.attr,
2621 	NULL,
2622 };
2623 ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
2624 
2625 static struct kobj_type damon_sysfs_kdamond_ktype = {
2626 	.release = damon_sysfs_kdamond_release,
2627 	.sysfs_ops = &kobj_sysfs_ops,
2628 	.default_groups = damon_sysfs_kdamond_groups,
2629 };
2630 
2631 /*
2632  * kdamonds directory
2633  */
2634 
2635 struct damon_sysfs_kdamonds {
2636 	struct kobject kobj;
2637 	struct damon_sysfs_kdamond **kdamonds_arr;
2638 	int nr;
2639 };
2640 
2641 static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
2642 {
2643 	return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
2644 }
2645 
2646 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
2647 {
2648 	struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
2649 	int i;
2650 
2651 	for (i = 0; i < kdamonds->nr; i++) {
2652 		damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
2653 		kobject_put(&kdamonds_arr[i]->kobj);
2654 	}
2655 	kdamonds->nr = 0;
2656 	kfree(kdamonds_arr);
2657 	kdamonds->kdamonds_arr = NULL;
2658 }
2659 
2660 static int damon_sysfs_nr_running_ctxs(struct damon_sysfs_kdamond **kdamonds,
2661 		int nr_kdamonds)
2662 {
2663 	int nr_running_ctxs = 0;
2664 	int i;
2665 
2666 	for (i = 0; i < nr_kdamonds; i++) {
2667 		struct damon_ctx *ctx = kdamonds[i]->damon_ctx;
2668 
2669 		if (!ctx)
2670 			continue;
2671 		mutex_lock(&ctx->kdamond_lock);
2672 		if (ctx->kdamond)
2673 			nr_running_ctxs++;
2674 		mutex_unlock(&ctx->kdamond_lock);
2675 	}
2676 	return nr_running_ctxs;
2677 }
2678 
2679 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
2680 		int nr_kdamonds)
2681 {
2682 	struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
2683 	int err, i;
2684 
2685 	if (damon_sysfs_nr_running_ctxs(kdamonds->kdamonds_arr, kdamonds->nr))
2686 		return -EBUSY;
2687 
2688 	for (i = 0; i < kdamonds->nr; i++) {
2689 		if (damon_sysfs_cmd_request.kdamond ==
2690 				kdamonds->kdamonds_arr[i])
2691 			return -EBUSY;
2692 	}
2693 
2694 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
2695 	if (!nr_kdamonds)
2696 		return 0;
2697 
2698 	kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
2699 			GFP_KERNEL | __GFP_NOWARN);
2700 	if (!kdamonds_arr)
2701 		return -ENOMEM;
2702 	kdamonds->kdamonds_arr = kdamonds_arr;
2703 
2704 	for (i = 0; i < nr_kdamonds; i++) {
2705 		kdamond = damon_sysfs_kdamond_alloc();
2706 		if (!kdamond) {
2707 			damon_sysfs_kdamonds_rm_dirs(kdamonds);
2708 			return -ENOMEM;
2709 		}
2710 
2711 		err = kobject_init_and_add(&kdamond->kobj,
2712 				&damon_sysfs_kdamond_ktype, &kdamonds->kobj,
2713 				"%d", i);
2714 		if (err)
2715 			goto out;
2716 
2717 		err = damon_sysfs_kdamond_add_dirs(kdamond);
2718 		if (err)
2719 			goto out;
2720 
2721 		kdamonds_arr[i] = kdamond;
2722 		kdamonds->nr++;
2723 	}
2724 	return 0;
2725 
2726 out:
2727 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
2728 	kobject_put(&kdamond->kobj);
2729 	return err;
2730 }
2731 
2732 static ssize_t nr_kdamonds_show(struct kobject *kobj,
2733 		struct kobj_attribute *attr, char *buf)
2734 {
2735 	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2736 			struct damon_sysfs_kdamonds, kobj);
2737 
2738 	return sysfs_emit(buf, "%d\n", kdamonds->nr);
2739 }
2740 
2741 static ssize_t nr_kdamonds_store(struct kobject *kobj,
2742 		struct kobj_attribute *attr, const char *buf, size_t count)
2743 {
2744 	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2745 			struct damon_sysfs_kdamonds, kobj);
2746 	int nr, err;
2747 
2748 	err = kstrtoint(buf, 0, &nr);
2749 	if (err)
2750 		return err;
2751 	if (nr < 0)
2752 		return -EINVAL;
2753 
2754 	if (!mutex_trylock(&damon_sysfs_lock))
2755 		return -EBUSY;
2756 	err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2757 	mutex_unlock(&damon_sysfs_lock);
2758 	if (err)
2759 		return err;
2760 
2761 	return count;
2762 }
2763 
2764 static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2765 {
2766 	kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2767 }
2768 
2769 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2770 		__ATTR_RW_MODE(nr_kdamonds, 0600);
2771 
2772 static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2773 	&damon_sysfs_kdamonds_nr_attr.attr,
2774 	NULL,
2775 };
2776 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2777 
2778 static struct kobj_type damon_sysfs_kdamonds_ktype = {
2779 	.release = damon_sysfs_kdamonds_release,
2780 	.sysfs_ops = &kobj_sysfs_ops,
2781 	.default_groups = damon_sysfs_kdamonds_groups,
2782 };
2783 
2784 /*
2785  * damon user interface directory
2786  */
2787 
2788 struct damon_sysfs_ui_dir {
2789 	struct kobject kobj;
2790 	struct damon_sysfs_kdamonds *kdamonds;
2791 };
2792 
2793 static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2794 {
2795 	return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2796 }
2797 
2798 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2799 {
2800 	struct damon_sysfs_kdamonds *kdamonds;
2801 	int err;
2802 
2803 	kdamonds = damon_sysfs_kdamonds_alloc();
2804 	if (!kdamonds)
2805 		return -ENOMEM;
2806 
2807 	err = kobject_init_and_add(&kdamonds->kobj,
2808 			&damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2809 			"kdamonds");
2810 	if (err) {
2811 		kobject_put(&kdamonds->kobj);
2812 		return err;
2813 	}
2814 	ui_dir->kdamonds = kdamonds;
2815 	return err;
2816 }
2817 
2818 static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2819 {
2820 	kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2821 }
2822 
2823 static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2824 	NULL,
2825 };
2826 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2827 
2828 static struct kobj_type damon_sysfs_ui_dir_ktype = {
2829 	.release = damon_sysfs_ui_dir_release,
2830 	.sysfs_ops = &kobj_sysfs_ops,
2831 	.default_groups = damon_sysfs_ui_dir_groups,
2832 };
2833 
2834 static int __init damon_sysfs_init(void)
2835 {
2836 	struct kobject *damon_sysfs_root;
2837 	struct damon_sysfs_ui_dir *admin;
2838 	int err;
2839 
2840 	damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2841 	if (!damon_sysfs_root)
2842 		return -ENOMEM;
2843 
2844 	admin = damon_sysfs_ui_dir_alloc();
2845 	if (!admin) {
2846 		kobject_put(damon_sysfs_root);
2847 		return -ENOMEM;
2848 	}
2849 	err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2850 			damon_sysfs_root, "admin");
2851 	if (err)
2852 		goto out;
2853 	err = damon_sysfs_ui_dir_add_dirs(admin);
2854 	if (err)
2855 		goto out;
2856 	return 0;
2857 
2858 out:
2859 	kobject_put(&admin->kobj);
2860 	kobject_put(damon_sysfs_root);
2861 	return err;
2862 }
2863 subsys_initcall(damon_sysfs_init);
2864