xref: /linux/mm/damon/sysfs.c (revision 7203ca412fc8e8a0588e9adc0f777d3163f8dff3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON sysfs Interface
4  *
5  * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
6  */
7 
8 #include <linux/pid.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 
12 #include "sysfs-common.h"
13 
14 /*
15  * init region directory
16  */
17 
18 struct damon_sysfs_region {
19 	struct kobject kobj;
20 	struct damon_addr_range ar;
21 };
22 
damon_sysfs_region_alloc(void)23 static struct damon_sysfs_region *damon_sysfs_region_alloc(void)
24 {
25 	return kzalloc(sizeof(struct damon_sysfs_region), GFP_KERNEL);
26 }
27 
start_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)28 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
29 		char *buf)
30 {
31 	struct damon_sysfs_region *region = container_of(kobj,
32 			struct damon_sysfs_region, kobj);
33 
34 	return sysfs_emit(buf, "%lu\n", region->ar.start);
35 }
36 
start_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)37 static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
38 		const char *buf, size_t count)
39 {
40 	struct damon_sysfs_region *region = container_of(kobj,
41 			struct damon_sysfs_region, kobj);
42 	int err = kstrtoul(buf, 0, &region->ar.start);
43 
44 	return err ? err : count;
45 }
46 
end_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)47 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
48 		char *buf)
49 {
50 	struct damon_sysfs_region *region = container_of(kobj,
51 			struct damon_sysfs_region, kobj);
52 
53 	return sysfs_emit(buf, "%lu\n", region->ar.end);
54 }
55 
end_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)56 static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
57 		const char *buf, size_t count)
58 {
59 	struct damon_sysfs_region *region = container_of(kobj,
60 			struct damon_sysfs_region, kobj);
61 	int err = kstrtoul(buf, 0, &region->ar.end);
62 
63 	return err ? err : count;
64 }
65 
damon_sysfs_region_release(struct kobject * kobj)66 static void damon_sysfs_region_release(struct kobject *kobj)
67 {
68 	kfree(container_of(kobj, struct damon_sysfs_region, kobj));
69 }
70 
71 static struct kobj_attribute damon_sysfs_region_start_attr =
72 		__ATTR_RW_MODE(start, 0600);
73 
74 static struct kobj_attribute damon_sysfs_region_end_attr =
75 		__ATTR_RW_MODE(end, 0600);
76 
77 static struct attribute *damon_sysfs_region_attrs[] = {
78 	&damon_sysfs_region_start_attr.attr,
79 	&damon_sysfs_region_end_attr.attr,
80 	NULL,
81 };
82 ATTRIBUTE_GROUPS(damon_sysfs_region);
83 
84 static const struct kobj_type damon_sysfs_region_ktype = {
85 	.release = damon_sysfs_region_release,
86 	.sysfs_ops = &kobj_sysfs_ops,
87 	.default_groups = damon_sysfs_region_groups,
88 };
89 
90 /*
91  * init_regions directory
92  */
93 
94 struct damon_sysfs_regions {
95 	struct kobject kobj;
96 	struct damon_sysfs_region **regions_arr;
97 	int nr;
98 };
99 
damon_sysfs_regions_alloc(void)100 static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
101 {
102 	return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
103 }
104 
damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions * regions)105 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
106 {
107 	struct damon_sysfs_region **regions_arr = regions->regions_arr;
108 	int i;
109 
110 	for (i = 0; i < regions->nr; i++)
111 		kobject_put(&regions_arr[i]->kobj);
112 	regions->nr = 0;
113 	kfree(regions_arr);
114 	regions->regions_arr = NULL;
115 }
116 
damon_sysfs_regions_add_dirs(struct damon_sysfs_regions * regions,int nr_regions)117 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
118 		int nr_regions)
119 {
120 	struct damon_sysfs_region **regions_arr, *region;
121 	int err, i;
122 
123 	damon_sysfs_regions_rm_dirs(regions);
124 	if (!nr_regions)
125 		return 0;
126 
127 	regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
128 			GFP_KERNEL | __GFP_NOWARN);
129 	if (!regions_arr)
130 		return -ENOMEM;
131 	regions->regions_arr = regions_arr;
132 
133 	for (i = 0; i < nr_regions; i++) {
134 		region = damon_sysfs_region_alloc();
135 		if (!region) {
136 			damon_sysfs_regions_rm_dirs(regions);
137 			return -ENOMEM;
138 		}
139 
140 		err = kobject_init_and_add(&region->kobj,
141 				&damon_sysfs_region_ktype, &regions->kobj,
142 				"%d", i);
143 		if (err) {
144 			kobject_put(&region->kobj);
145 			damon_sysfs_regions_rm_dirs(regions);
146 			return err;
147 		}
148 
149 		regions_arr[i] = region;
150 		regions->nr++;
151 	}
152 	return 0;
153 }
154 
nr_regions_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)155 static ssize_t nr_regions_show(struct kobject *kobj,
156 		struct kobj_attribute *attr, char *buf)
157 {
158 	struct damon_sysfs_regions *regions = container_of(kobj,
159 			struct damon_sysfs_regions, kobj);
160 
161 	return sysfs_emit(buf, "%d\n", regions->nr);
162 }
163 
nr_regions_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)164 static ssize_t nr_regions_store(struct kobject *kobj,
165 		struct kobj_attribute *attr, const char *buf, size_t count)
166 {
167 	struct damon_sysfs_regions *regions;
168 	int nr, err = kstrtoint(buf, 0, &nr);
169 
170 	if (err)
171 		return err;
172 	if (nr < 0)
173 		return -EINVAL;
174 
175 	regions = container_of(kobj, struct damon_sysfs_regions, kobj);
176 
177 	if (!mutex_trylock(&damon_sysfs_lock))
178 		return -EBUSY;
179 	err = damon_sysfs_regions_add_dirs(regions, nr);
180 	mutex_unlock(&damon_sysfs_lock);
181 	if (err)
182 		return err;
183 
184 	return count;
185 }
186 
damon_sysfs_regions_release(struct kobject * kobj)187 static void damon_sysfs_regions_release(struct kobject *kobj)
188 {
189 	kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
190 }
191 
192 static struct kobj_attribute damon_sysfs_regions_nr_attr =
193 		__ATTR_RW_MODE(nr_regions, 0600);
194 
195 static struct attribute *damon_sysfs_regions_attrs[] = {
196 	&damon_sysfs_regions_nr_attr.attr,
197 	NULL,
198 };
199 ATTRIBUTE_GROUPS(damon_sysfs_regions);
200 
201 static const struct kobj_type damon_sysfs_regions_ktype = {
202 	.release = damon_sysfs_regions_release,
203 	.sysfs_ops = &kobj_sysfs_ops,
204 	.default_groups = damon_sysfs_regions_groups,
205 };
206 
207 /*
208  * target directory
209  */
210 
211 struct damon_sysfs_target {
212 	struct kobject kobj;
213 	struct damon_sysfs_regions *regions;
214 	int pid;
215 	bool obsolete;
216 };
217 
damon_sysfs_target_alloc(void)218 static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
219 {
220 	return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
221 }
222 
damon_sysfs_target_add_dirs(struct damon_sysfs_target * target)223 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
224 {
225 	struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
226 	int err;
227 
228 	if (!regions)
229 		return -ENOMEM;
230 
231 	err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype,
232 			&target->kobj, "regions");
233 	if (err)
234 		kobject_put(&regions->kobj);
235 	else
236 		target->regions = regions;
237 	return err;
238 }
239 
damon_sysfs_target_rm_dirs(struct damon_sysfs_target * target)240 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
241 {
242 	damon_sysfs_regions_rm_dirs(target->regions);
243 	kobject_put(&target->regions->kobj);
244 }
245 
pid_target_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)246 static ssize_t pid_target_show(struct kobject *kobj,
247 		struct kobj_attribute *attr, char *buf)
248 {
249 	struct damon_sysfs_target *target = container_of(kobj,
250 			struct damon_sysfs_target, kobj);
251 
252 	return sysfs_emit(buf, "%d\n", target->pid);
253 }
254 
pid_target_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)255 static ssize_t pid_target_store(struct kobject *kobj,
256 		struct kobj_attribute *attr, const char *buf, size_t count)
257 {
258 	struct damon_sysfs_target *target = container_of(kobj,
259 			struct damon_sysfs_target, kobj);
260 	int err = kstrtoint(buf, 0, &target->pid);
261 
262 	if (err)
263 		return -EINVAL;
264 	return count;
265 }
266 
obsolete_target_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)267 static ssize_t obsolete_target_show(struct kobject *kobj,
268 		struct kobj_attribute *attr, char *buf)
269 {
270 	struct damon_sysfs_target *target = container_of(kobj,
271 			struct damon_sysfs_target, kobj);
272 
273 	return sysfs_emit(buf, "%c\n", target->obsolete ? 'Y' : 'N');
274 }
275 
obsolete_target_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)276 static ssize_t obsolete_target_store(struct kobject *kobj,
277 		struct kobj_attribute *attr, const char *buf, size_t count)
278 {
279 	struct damon_sysfs_target *target = container_of(kobj,
280 			struct damon_sysfs_target, kobj);
281 	bool obsolete;
282 	int err = kstrtobool(buf, &obsolete);
283 
284 	if (err)
285 		return err;
286 	target->obsolete = obsolete;
287 	return count;
288 }
289 
damon_sysfs_target_release(struct kobject * kobj)290 static void damon_sysfs_target_release(struct kobject *kobj)
291 {
292 	kfree(container_of(kobj, struct damon_sysfs_target, kobj));
293 }
294 
295 static struct kobj_attribute damon_sysfs_target_pid_attr =
296 		__ATTR_RW_MODE(pid_target, 0600);
297 
298 static struct kobj_attribute damon_sysfs_target_obsolete_attr =
299 		__ATTR_RW_MODE(obsolete_target, 0600);
300 
301 static struct attribute *damon_sysfs_target_attrs[] = {
302 	&damon_sysfs_target_pid_attr.attr,
303 	&damon_sysfs_target_obsolete_attr.attr,
304 	NULL,
305 };
306 ATTRIBUTE_GROUPS(damon_sysfs_target);
307 
308 static const struct kobj_type damon_sysfs_target_ktype = {
309 	.release = damon_sysfs_target_release,
310 	.sysfs_ops = &kobj_sysfs_ops,
311 	.default_groups = damon_sysfs_target_groups,
312 };
313 
314 /*
315  * targets directory
316  */
317 
318 struct damon_sysfs_targets {
319 	struct kobject kobj;
320 	struct damon_sysfs_target **targets_arr;
321 	int nr;
322 };
323 
damon_sysfs_targets_alloc(void)324 static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
325 {
326 	return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
327 }
328 
damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets * targets)329 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
330 {
331 	struct damon_sysfs_target **targets_arr = targets->targets_arr;
332 	int i;
333 
334 	for (i = 0; i < targets->nr; i++) {
335 		damon_sysfs_target_rm_dirs(targets_arr[i]);
336 		kobject_put(&targets_arr[i]->kobj);
337 	}
338 	targets->nr = 0;
339 	kfree(targets_arr);
340 	targets->targets_arr = NULL;
341 }
342 
damon_sysfs_targets_add_dirs(struct damon_sysfs_targets * targets,int nr_targets)343 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
344 		int nr_targets)
345 {
346 	struct damon_sysfs_target **targets_arr, *target;
347 	int err, i;
348 
349 	damon_sysfs_targets_rm_dirs(targets);
350 	if (!nr_targets)
351 		return 0;
352 
353 	targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
354 			GFP_KERNEL | __GFP_NOWARN);
355 	if (!targets_arr)
356 		return -ENOMEM;
357 	targets->targets_arr = targets_arr;
358 
359 	for (i = 0; i < nr_targets; i++) {
360 		target = damon_sysfs_target_alloc();
361 		if (!target) {
362 			damon_sysfs_targets_rm_dirs(targets);
363 			return -ENOMEM;
364 		}
365 
366 		err = kobject_init_and_add(&target->kobj,
367 				&damon_sysfs_target_ktype, &targets->kobj,
368 				"%d", i);
369 		if (err)
370 			goto out;
371 
372 		err = damon_sysfs_target_add_dirs(target);
373 		if (err)
374 			goto out;
375 
376 		targets_arr[i] = target;
377 		targets->nr++;
378 	}
379 	return 0;
380 
381 out:
382 	damon_sysfs_targets_rm_dirs(targets);
383 	kobject_put(&target->kobj);
384 	return err;
385 }
386 
nr_targets_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)387 static ssize_t nr_targets_show(struct kobject *kobj,
388 		struct kobj_attribute *attr, char *buf)
389 {
390 	struct damon_sysfs_targets *targets = container_of(kobj,
391 			struct damon_sysfs_targets, kobj);
392 
393 	return sysfs_emit(buf, "%d\n", targets->nr);
394 }
395 
nr_targets_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)396 static ssize_t nr_targets_store(struct kobject *kobj,
397 		struct kobj_attribute *attr, const char *buf, size_t count)
398 {
399 	struct damon_sysfs_targets *targets;
400 	int nr, err = kstrtoint(buf, 0, &nr);
401 
402 	if (err)
403 		return err;
404 	if (nr < 0)
405 		return -EINVAL;
406 
407 	targets = container_of(kobj, struct damon_sysfs_targets, kobj);
408 
409 	if (!mutex_trylock(&damon_sysfs_lock))
410 		return -EBUSY;
411 	err = damon_sysfs_targets_add_dirs(targets, nr);
412 	mutex_unlock(&damon_sysfs_lock);
413 	if (err)
414 		return err;
415 
416 	return count;
417 }
418 
damon_sysfs_targets_release(struct kobject * kobj)419 static void damon_sysfs_targets_release(struct kobject *kobj)
420 {
421 	kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
422 }
423 
424 static struct kobj_attribute damon_sysfs_targets_nr_attr =
425 		__ATTR_RW_MODE(nr_targets, 0600);
426 
427 static struct attribute *damon_sysfs_targets_attrs[] = {
428 	&damon_sysfs_targets_nr_attr.attr,
429 	NULL,
430 };
431 ATTRIBUTE_GROUPS(damon_sysfs_targets);
432 
433 static const struct kobj_type damon_sysfs_targets_ktype = {
434 	.release = damon_sysfs_targets_release,
435 	.sysfs_ops = &kobj_sysfs_ops,
436 	.default_groups = damon_sysfs_targets_groups,
437 };
438 
439 /*
440  * intervals goal directory
441  */
442 
443 struct damon_sysfs_intervals_goal {
444 	struct kobject kobj;
445 	unsigned long access_bp;
446 	unsigned long aggrs;
447 	unsigned long min_sample_us;
448 	unsigned long max_sample_us;
449 };
450 
damon_sysfs_intervals_goal_alloc(unsigned long access_bp,unsigned long aggrs,unsigned long min_sample_us,unsigned long max_sample_us)451 static struct damon_sysfs_intervals_goal *damon_sysfs_intervals_goal_alloc(
452 		unsigned long access_bp, unsigned long aggrs,
453 		unsigned long min_sample_us, unsigned long max_sample_us)
454 {
455 	struct damon_sysfs_intervals_goal *goal = kmalloc(sizeof(*goal),
456 			GFP_KERNEL);
457 
458 	if (!goal)
459 		return NULL;
460 
461 	goal->kobj = (struct kobject){};
462 	goal->access_bp = access_bp;
463 	goal->aggrs = aggrs;
464 	goal->min_sample_us = min_sample_us;
465 	goal->max_sample_us = max_sample_us;
466 	return goal;
467 }
468 
access_bp_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)469 static ssize_t access_bp_show(struct kobject *kobj,
470 		struct kobj_attribute *attr, char *buf)
471 {
472 	struct damon_sysfs_intervals_goal *goal = container_of(kobj,
473 			struct damon_sysfs_intervals_goal, kobj);
474 
475 	return sysfs_emit(buf, "%lu\n", goal->access_bp);
476 }
477 
access_bp_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)478 static ssize_t access_bp_store(struct kobject *kobj,
479 		struct kobj_attribute *attr, const char *buf, size_t count)
480 {
481 	struct damon_sysfs_intervals_goal *goal = container_of(kobj,
482 			struct damon_sysfs_intervals_goal, kobj);
483 	unsigned long nr;
484 	int err = kstrtoul(buf, 0, &nr);
485 
486 	if (err)
487 		return err;
488 
489 	goal->access_bp = nr;
490 	return count;
491 }
492 
aggrs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)493 static ssize_t aggrs_show(struct kobject *kobj,
494 		struct kobj_attribute *attr, char *buf)
495 {
496 	struct damon_sysfs_intervals_goal *goal = container_of(kobj,
497 			struct damon_sysfs_intervals_goal, kobj);
498 
499 	return sysfs_emit(buf, "%lu\n", goal->aggrs);
500 }
501 
aggrs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)502 static ssize_t aggrs_store(struct kobject *kobj,
503 		struct kobj_attribute *attr, const char *buf, size_t count)
504 {
505 	struct damon_sysfs_intervals_goal *goal = container_of(kobj,
506 			struct damon_sysfs_intervals_goal, kobj);
507 	unsigned long nr;
508 	int err = kstrtoul(buf, 0, &nr);
509 
510 	if (err)
511 		return err;
512 
513 	goal->aggrs = nr;
514 	return count;
515 }
516 
min_sample_us_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)517 static ssize_t min_sample_us_show(struct kobject *kobj,
518 		struct kobj_attribute *attr, char *buf)
519 {
520 	struct damon_sysfs_intervals_goal *goal = container_of(kobj,
521 			struct damon_sysfs_intervals_goal, kobj);
522 
523 	return sysfs_emit(buf, "%lu\n", goal->min_sample_us);
524 }
525 
min_sample_us_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)526 static ssize_t min_sample_us_store(struct kobject *kobj,
527 		struct kobj_attribute *attr, const char *buf, size_t count)
528 {
529 	struct damon_sysfs_intervals_goal *goal = container_of(kobj,
530 			struct damon_sysfs_intervals_goal, kobj);
531 	unsigned long nr;
532 	int err = kstrtoul(buf, 0, &nr);
533 
534 	if (err)
535 		return err;
536 
537 	goal->min_sample_us = nr;
538 	return count;
539 }
540 
max_sample_us_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)541 static ssize_t max_sample_us_show(struct kobject *kobj,
542 		struct kobj_attribute *attr, char *buf)
543 {
544 	struct damon_sysfs_intervals_goal *goal = container_of(kobj,
545 			struct damon_sysfs_intervals_goal, kobj);
546 
547 	return sysfs_emit(buf, "%lu\n", goal->max_sample_us);
548 }
549 
max_sample_us_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)550 static ssize_t max_sample_us_store(struct kobject *kobj,
551 		struct kobj_attribute *attr, const char *buf, size_t count)
552 {
553 	struct damon_sysfs_intervals_goal *goal = container_of(kobj,
554 			struct damon_sysfs_intervals_goal, kobj);
555 	unsigned long nr;
556 	int err = kstrtoul(buf, 0, &nr);
557 
558 	if (err)
559 		return err;
560 
561 	goal->max_sample_us = nr;
562 	return count;
563 }
564 
damon_sysfs_intervals_goal_release(struct kobject * kobj)565 static void damon_sysfs_intervals_goal_release(struct kobject *kobj)
566 {
567 	kfree(container_of(kobj, struct damon_sysfs_intervals_goal, kobj));
568 }
569 
570 static struct kobj_attribute damon_sysfs_intervals_goal_access_bp_attr =
571 		__ATTR_RW_MODE(access_bp, 0600);
572 
573 static struct kobj_attribute damon_sysfs_intervals_goal_aggrs_attr =
574 		__ATTR_RW_MODE(aggrs, 0600);
575 
576 static struct kobj_attribute damon_sysfs_intervals_goal_min_sample_us_attr =
577 		__ATTR_RW_MODE(min_sample_us, 0600);
578 
579 static struct kobj_attribute damon_sysfs_intervals_goal_max_sample_us_attr =
580 		__ATTR_RW_MODE(max_sample_us, 0600);
581 
582 static struct attribute *damon_sysfs_intervals_goal_attrs[] = {
583 	&damon_sysfs_intervals_goal_access_bp_attr.attr,
584 	&damon_sysfs_intervals_goal_aggrs_attr.attr,
585 	&damon_sysfs_intervals_goal_min_sample_us_attr.attr,
586 	&damon_sysfs_intervals_goal_max_sample_us_attr.attr,
587 	NULL,
588 };
589 ATTRIBUTE_GROUPS(damon_sysfs_intervals_goal);
590 
591 static const struct kobj_type damon_sysfs_intervals_goal_ktype = {
592 	.release = damon_sysfs_intervals_goal_release,
593 	.sysfs_ops = &kobj_sysfs_ops,
594 	.default_groups = damon_sysfs_intervals_goal_groups,
595 };
596 
597 /*
598  * intervals directory
599  */
600 
601 struct damon_sysfs_intervals {
602 	struct kobject kobj;
603 	unsigned long sample_us;
604 	unsigned long aggr_us;
605 	unsigned long update_us;
606 	struct damon_sysfs_intervals_goal *intervals_goal;
607 };
608 
damon_sysfs_intervals_alloc(unsigned long sample_us,unsigned long aggr_us,unsigned long update_us)609 static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
610 		unsigned long sample_us, unsigned long aggr_us,
611 		unsigned long update_us)
612 {
613 	struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
614 			GFP_KERNEL);
615 
616 	if (!intervals)
617 		return NULL;
618 
619 	intervals->kobj = (struct kobject){};
620 	intervals->sample_us = sample_us;
621 	intervals->aggr_us = aggr_us;
622 	intervals->update_us = update_us;
623 	return intervals;
624 }
625 
damon_sysfs_intervals_add_dirs(struct damon_sysfs_intervals * intervals)626 static int damon_sysfs_intervals_add_dirs(struct damon_sysfs_intervals *intervals)
627 {
628 	struct damon_sysfs_intervals_goal *goal;
629 	int err;
630 
631 	goal = damon_sysfs_intervals_goal_alloc(0, 0, 0, 0);
632 	if (!goal)
633 		return -ENOMEM;
634 
635 	err = kobject_init_and_add(&goal->kobj,
636 			&damon_sysfs_intervals_goal_ktype, &intervals->kobj,
637 			"intervals_goal");
638 	if (err) {
639 		kobject_put(&goal->kobj);
640 		intervals->intervals_goal = NULL;
641 		return err;
642 	}
643 	intervals->intervals_goal = goal;
644 	return 0;
645 }
646 
damon_sysfs_intervals_rm_dirs(struct damon_sysfs_intervals * intervals)647 static void damon_sysfs_intervals_rm_dirs(struct damon_sysfs_intervals *intervals)
648 {
649 	kobject_put(&intervals->intervals_goal->kobj);
650 }
651 
sample_us_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)652 static ssize_t sample_us_show(struct kobject *kobj,
653 		struct kobj_attribute *attr, char *buf)
654 {
655 	struct damon_sysfs_intervals *intervals = container_of(kobj,
656 			struct damon_sysfs_intervals, kobj);
657 
658 	return sysfs_emit(buf, "%lu\n", intervals->sample_us);
659 }
660 
sample_us_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)661 static ssize_t sample_us_store(struct kobject *kobj,
662 		struct kobj_attribute *attr, const char *buf, size_t count)
663 {
664 	struct damon_sysfs_intervals *intervals = container_of(kobj,
665 			struct damon_sysfs_intervals, kobj);
666 	unsigned long us;
667 	int err = kstrtoul(buf, 0, &us);
668 
669 	if (err)
670 		return err;
671 
672 	intervals->sample_us = us;
673 	return count;
674 }
675 
aggr_us_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)676 static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
677 		char *buf)
678 {
679 	struct damon_sysfs_intervals *intervals = container_of(kobj,
680 			struct damon_sysfs_intervals, kobj);
681 
682 	return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
683 }
684 
aggr_us_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)685 static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
686 		const char *buf, size_t count)
687 {
688 	struct damon_sysfs_intervals *intervals = container_of(kobj,
689 			struct damon_sysfs_intervals, kobj);
690 	unsigned long us;
691 	int err = kstrtoul(buf, 0, &us);
692 
693 	if (err)
694 		return err;
695 
696 	intervals->aggr_us = us;
697 	return count;
698 }
699 
update_us_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)700 static ssize_t update_us_show(struct kobject *kobj,
701 		struct kobj_attribute *attr, char *buf)
702 {
703 	struct damon_sysfs_intervals *intervals = container_of(kobj,
704 			struct damon_sysfs_intervals, kobj);
705 
706 	return sysfs_emit(buf, "%lu\n", intervals->update_us);
707 }
708 
update_us_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)709 static ssize_t update_us_store(struct kobject *kobj,
710 		struct kobj_attribute *attr, const char *buf, size_t count)
711 {
712 	struct damon_sysfs_intervals *intervals = container_of(kobj,
713 			struct damon_sysfs_intervals, kobj);
714 	unsigned long us;
715 	int err = kstrtoul(buf, 0, &us);
716 
717 	if (err)
718 		return err;
719 
720 	intervals->update_us = us;
721 	return count;
722 }
723 
damon_sysfs_intervals_release(struct kobject * kobj)724 static void damon_sysfs_intervals_release(struct kobject *kobj)
725 {
726 	kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
727 }
728 
729 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
730 		__ATTR_RW_MODE(sample_us, 0600);
731 
732 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
733 		__ATTR_RW_MODE(aggr_us, 0600);
734 
735 static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
736 		__ATTR_RW_MODE(update_us, 0600);
737 
738 static struct attribute *damon_sysfs_intervals_attrs[] = {
739 	&damon_sysfs_intervals_sample_us_attr.attr,
740 	&damon_sysfs_intervals_aggr_us_attr.attr,
741 	&damon_sysfs_intervals_update_us_attr.attr,
742 	NULL,
743 };
744 ATTRIBUTE_GROUPS(damon_sysfs_intervals);
745 
746 static const struct kobj_type damon_sysfs_intervals_ktype = {
747 	.release = damon_sysfs_intervals_release,
748 	.sysfs_ops = &kobj_sysfs_ops,
749 	.default_groups = damon_sysfs_intervals_groups,
750 };
751 
752 /*
753  * monitoring_attrs directory
754  */
755 
756 struct damon_sysfs_attrs {
757 	struct kobject kobj;
758 	struct damon_sysfs_intervals *intervals;
759 	struct damon_sysfs_ul_range *nr_regions_range;
760 };
761 
damon_sysfs_attrs_alloc(void)762 static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
763 {
764 	struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
765 
766 	if (!attrs)
767 		return NULL;
768 	attrs->kobj = (struct kobject){};
769 	return attrs;
770 }
771 
damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs * attrs)772 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
773 {
774 	struct damon_sysfs_intervals *intervals;
775 	struct damon_sysfs_ul_range *nr_regions_range;
776 	int err;
777 
778 	intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
779 	if (!intervals)
780 		return -ENOMEM;
781 
782 	err = kobject_init_and_add(&intervals->kobj,
783 			&damon_sysfs_intervals_ktype, &attrs->kobj,
784 			"intervals");
785 	if (err)
786 		goto put_intervals_out;
787 	err = damon_sysfs_intervals_add_dirs(intervals);
788 	if (err)
789 		goto put_intervals_out;
790 	attrs->intervals = intervals;
791 
792 	nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
793 	if (!nr_regions_range) {
794 		err = -ENOMEM;
795 		goto put_intervals_out;
796 	}
797 
798 	err = kobject_init_and_add(&nr_regions_range->kobj,
799 			&damon_sysfs_ul_range_ktype, &attrs->kobj,
800 			"nr_regions");
801 	if (err)
802 		goto put_nr_regions_intervals_out;
803 	attrs->nr_regions_range = nr_regions_range;
804 	return 0;
805 
806 put_nr_regions_intervals_out:
807 	kobject_put(&nr_regions_range->kobj);
808 	attrs->nr_regions_range = NULL;
809 put_intervals_out:
810 	kobject_put(&intervals->kobj);
811 	attrs->intervals = NULL;
812 	return err;
813 }
814 
damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs * attrs)815 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
816 {
817 	kobject_put(&attrs->nr_regions_range->kobj);
818 	damon_sysfs_intervals_rm_dirs(attrs->intervals);
819 	kobject_put(&attrs->intervals->kobj);
820 }
821 
damon_sysfs_attrs_release(struct kobject * kobj)822 static void damon_sysfs_attrs_release(struct kobject *kobj)
823 {
824 	kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
825 }
826 
827 static struct attribute *damon_sysfs_attrs_attrs[] = {
828 	NULL,
829 };
830 ATTRIBUTE_GROUPS(damon_sysfs_attrs);
831 
832 static const struct kobj_type damon_sysfs_attrs_ktype = {
833 	.release = damon_sysfs_attrs_release,
834 	.sysfs_ops = &kobj_sysfs_ops,
835 	.default_groups = damon_sysfs_attrs_groups,
836 };
837 
838 /*
839  * context directory
840  */
841 
842 struct damon_sysfs_ops_name {
843 	enum damon_ops_id ops_id;
844 	char *name;
845 };
846 
847 static const struct damon_sysfs_ops_name damon_sysfs_ops_names[] = {
848 	{
849 		.ops_id = DAMON_OPS_VADDR,
850 		.name = "vaddr",
851 	},
852 	{
853 		.ops_id = DAMON_OPS_FVADDR,
854 		.name = "fvaddr",
855 	},
856 	{
857 		.ops_id = DAMON_OPS_PADDR,
858 		.name = "paddr",
859 	},
860 };
861 
862 struct damon_sysfs_context {
863 	struct kobject kobj;
864 	enum damon_ops_id ops_id;
865 	unsigned long addr_unit;
866 	struct damon_sysfs_attrs *attrs;
867 	struct damon_sysfs_targets *targets;
868 	struct damon_sysfs_schemes *schemes;
869 };
870 
damon_sysfs_context_alloc(enum damon_ops_id ops_id)871 static struct damon_sysfs_context *damon_sysfs_context_alloc(
872 		enum damon_ops_id ops_id)
873 {
874 	struct damon_sysfs_context *context = kmalloc(sizeof(*context),
875 				GFP_KERNEL);
876 
877 	if (!context)
878 		return NULL;
879 	context->kobj = (struct kobject){};
880 	context->ops_id = ops_id;
881 	context->addr_unit = 1;
882 	return context;
883 }
884 
damon_sysfs_context_set_attrs(struct damon_sysfs_context * context)885 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
886 {
887 	struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
888 	int err;
889 
890 	if (!attrs)
891 		return -ENOMEM;
892 	err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
893 			&context->kobj, "monitoring_attrs");
894 	if (err)
895 		goto out;
896 	err = damon_sysfs_attrs_add_dirs(attrs);
897 	if (err)
898 		goto out;
899 	context->attrs = attrs;
900 	return 0;
901 
902 out:
903 	kobject_put(&attrs->kobj);
904 	return err;
905 }
906 
damon_sysfs_context_set_targets(struct damon_sysfs_context * context)907 static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
908 {
909 	struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
910 	int err;
911 
912 	if (!targets)
913 		return -ENOMEM;
914 	err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
915 			&context->kobj, "targets");
916 	if (err) {
917 		kobject_put(&targets->kobj);
918 		return err;
919 	}
920 	context->targets = targets;
921 	return 0;
922 }
923 
damon_sysfs_context_set_schemes(struct damon_sysfs_context * context)924 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
925 {
926 	struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
927 	int err;
928 
929 	if (!schemes)
930 		return -ENOMEM;
931 	err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
932 			&context->kobj, "schemes");
933 	if (err) {
934 		kobject_put(&schemes->kobj);
935 		return err;
936 	}
937 	context->schemes = schemes;
938 	return 0;
939 }
940 
damon_sysfs_context_add_dirs(struct damon_sysfs_context * context)941 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
942 {
943 	int err;
944 
945 	err = damon_sysfs_context_set_attrs(context);
946 	if (err)
947 		return err;
948 
949 	err = damon_sysfs_context_set_targets(context);
950 	if (err)
951 		goto put_attrs_out;
952 
953 	err = damon_sysfs_context_set_schemes(context);
954 	if (err)
955 		goto put_targets_attrs_out;
956 	return 0;
957 
958 put_targets_attrs_out:
959 	kobject_put(&context->targets->kobj);
960 	context->targets = NULL;
961 put_attrs_out:
962 	kobject_put(&context->attrs->kobj);
963 	context->attrs = NULL;
964 	return err;
965 }
966 
damon_sysfs_context_rm_dirs(struct damon_sysfs_context * context)967 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
968 {
969 	damon_sysfs_attrs_rm_dirs(context->attrs);
970 	kobject_put(&context->attrs->kobj);
971 	damon_sysfs_targets_rm_dirs(context->targets);
972 	kobject_put(&context->targets->kobj);
973 	damon_sysfs_schemes_rm_dirs(context->schemes);
974 	kobject_put(&context->schemes->kobj);
975 }
976 
avail_operations_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)977 static ssize_t avail_operations_show(struct kobject *kobj,
978 		struct kobj_attribute *attr, char *buf)
979 {
980 	int len = 0;
981 	int i;
982 
983 	for (i = 0; i < ARRAY_SIZE(damon_sysfs_ops_names); i++) {
984 		const struct damon_sysfs_ops_name *ops_name;
985 
986 		ops_name = &damon_sysfs_ops_names[i];
987 		if (!damon_is_registered_ops(ops_name->ops_id))
988 			continue;
989 		len += sysfs_emit_at(buf, len, "%s\n", ops_name->name);
990 	}
991 	return len;
992 }
993 
operations_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)994 static ssize_t operations_show(struct kobject *kobj,
995 		struct kobj_attribute *attr, char *buf)
996 {
997 	struct damon_sysfs_context *context = container_of(kobj,
998 			struct damon_sysfs_context, kobj);
999 	int i;
1000 
1001 	for (i = 0; i < ARRAY_SIZE(damon_sysfs_ops_names); i++) {
1002 		const struct damon_sysfs_ops_name *ops_name;
1003 
1004 		ops_name = &damon_sysfs_ops_names[i];
1005 		if (ops_name->ops_id == context->ops_id)
1006 			return sysfs_emit(buf, "%s\n", ops_name->name);
1007 	}
1008 	return -EINVAL;
1009 }
1010 
operations_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1011 static ssize_t operations_store(struct kobject *kobj,
1012 		struct kobj_attribute *attr, const char *buf, size_t count)
1013 {
1014 	struct damon_sysfs_context *context = container_of(kobj,
1015 			struct damon_sysfs_context, kobj);
1016 	int i;
1017 
1018 	for (i = 0; i < ARRAY_SIZE(damon_sysfs_ops_names); i++) {
1019 		const struct damon_sysfs_ops_name *ops_name;
1020 
1021 		ops_name = &damon_sysfs_ops_names[i];
1022 		if (sysfs_streq(buf, ops_name->name)) {
1023 			context->ops_id = ops_name->ops_id;
1024 			return count;
1025 		}
1026 	}
1027 	return -EINVAL;
1028 }
1029 
addr_unit_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1030 static ssize_t addr_unit_show(struct kobject *kobj,
1031 		struct kobj_attribute *attr, char *buf)
1032 {
1033 	struct damon_sysfs_context *context = container_of(kobj,
1034 			struct damon_sysfs_context, kobj);
1035 
1036 	return sysfs_emit(buf, "%lu\n", context->addr_unit);
1037 }
1038 
addr_unit_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1039 static ssize_t addr_unit_store(struct kobject *kobj,
1040 		struct kobj_attribute *attr, const char *buf, size_t count)
1041 {
1042 	struct damon_sysfs_context *context = container_of(kobj,
1043 			struct damon_sysfs_context, kobj);
1044 	unsigned long input_addr_unit;
1045 	int err = kstrtoul(buf, 0, &input_addr_unit);
1046 
1047 	if (err)
1048 		return err;
1049 	if (!input_addr_unit)
1050 		return -EINVAL;
1051 
1052 	context->addr_unit = input_addr_unit;
1053 	return count;
1054 }
1055 
damon_sysfs_context_release(struct kobject * kobj)1056 static void damon_sysfs_context_release(struct kobject *kobj)
1057 {
1058 	kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1059 }
1060 
1061 static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
1062 		__ATTR_RO_MODE(avail_operations, 0400);
1063 
1064 static struct kobj_attribute damon_sysfs_context_operations_attr =
1065 		__ATTR_RW_MODE(operations, 0600);
1066 
1067 static struct kobj_attribute damon_sysfs_context_addr_unit_attr =
1068 		__ATTR_RW_MODE(addr_unit, 0600);
1069 
1070 static struct attribute *damon_sysfs_context_attrs[] = {
1071 	&damon_sysfs_context_avail_operations_attr.attr,
1072 	&damon_sysfs_context_operations_attr.attr,
1073 	&damon_sysfs_context_addr_unit_attr.attr,
1074 	NULL,
1075 };
1076 ATTRIBUTE_GROUPS(damon_sysfs_context);
1077 
1078 static const struct kobj_type damon_sysfs_context_ktype = {
1079 	.release = damon_sysfs_context_release,
1080 	.sysfs_ops = &kobj_sysfs_ops,
1081 	.default_groups = damon_sysfs_context_groups,
1082 };
1083 
1084 /*
1085  * contexts directory
1086  */
1087 
1088 struct damon_sysfs_contexts {
1089 	struct kobject kobj;
1090 	struct damon_sysfs_context **contexts_arr;
1091 	int nr;
1092 };
1093 
damon_sysfs_contexts_alloc(void)1094 static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1095 {
1096 	return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1097 }
1098 
damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts * contexts)1099 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1100 {
1101 	struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1102 	int i;
1103 
1104 	for (i = 0; i < contexts->nr; i++) {
1105 		damon_sysfs_context_rm_dirs(contexts_arr[i]);
1106 		kobject_put(&contexts_arr[i]->kobj);
1107 	}
1108 	contexts->nr = 0;
1109 	kfree(contexts_arr);
1110 	contexts->contexts_arr = NULL;
1111 }
1112 
damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts * contexts,int nr_contexts)1113 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1114 		int nr_contexts)
1115 {
1116 	struct damon_sysfs_context **contexts_arr, *context;
1117 	int err, i;
1118 
1119 	damon_sysfs_contexts_rm_dirs(contexts);
1120 	if (!nr_contexts)
1121 		return 0;
1122 
1123 	contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1124 			GFP_KERNEL | __GFP_NOWARN);
1125 	if (!contexts_arr)
1126 		return -ENOMEM;
1127 	contexts->contexts_arr = contexts_arr;
1128 
1129 	for (i = 0; i < nr_contexts; i++) {
1130 		context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1131 		if (!context) {
1132 			damon_sysfs_contexts_rm_dirs(contexts);
1133 			return -ENOMEM;
1134 		}
1135 
1136 		err = kobject_init_and_add(&context->kobj,
1137 				&damon_sysfs_context_ktype, &contexts->kobj,
1138 				"%d", i);
1139 		if (err)
1140 			goto out;
1141 
1142 		err = damon_sysfs_context_add_dirs(context);
1143 		if (err)
1144 			goto out;
1145 
1146 		contexts_arr[i] = context;
1147 		contexts->nr++;
1148 	}
1149 	return 0;
1150 
1151 out:
1152 	damon_sysfs_contexts_rm_dirs(contexts);
1153 	kobject_put(&context->kobj);
1154 	return err;
1155 }
1156 
nr_contexts_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1157 static ssize_t nr_contexts_show(struct kobject *kobj,
1158 		struct kobj_attribute *attr, char *buf)
1159 {
1160 	struct damon_sysfs_contexts *contexts = container_of(kobj,
1161 			struct damon_sysfs_contexts, kobj);
1162 
1163 	return sysfs_emit(buf, "%d\n", contexts->nr);
1164 }
1165 
nr_contexts_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1166 static ssize_t nr_contexts_store(struct kobject *kobj,
1167 		struct kobj_attribute *attr, const char *buf, size_t count)
1168 {
1169 	struct damon_sysfs_contexts *contexts;
1170 	int nr, err;
1171 
1172 	err = kstrtoint(buf, 0, &nr);
1173 	if (err)
1174 		return err;
1175 	/* TODO: support multiple contexts per kdamond */
1176 	if (nr < 0 || 1 < nr)
1177 		return -EINVAL;
1178 
1179 	contexts = container_of(kobj, struct damon_sysfs_contexts, kobj);
1180 	if (!mutex_trylock(&damon_sysfs_lock))
1181 		return -EBUSY;
1182 	err = damon_sysfs_contexts_add_dirs(contexts, nr);
1183 	mutex_unlock(&damon_sysfs_lock);
1184 	if (err)
1185 		return err;
1186 
1187 	return count;
1188 }
1189 
damon_sysfs_contexts_release(struct kobject * kobj)1190 static void damon_sysfs_contexts_release(struct kobject *kobj)
1191 {
1192 	kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1193 }
1194 
1195 static struct kobj_attribute damon_sysfs_contexts_nr_attr
1196 		= __ATTR_RW_MODE(nr_contexts, 0600);
1197 
1198 static struct attribute *damon_sysfs_contexts_attrs[] = {
1199 	&damon_sysfs_contexts_nr_attr.attr,
1200 	NULL,
1201 };
1202 ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1203 
1204 static const struct kobj_type damon_sysfs_contexts_ktype = {
1205 	.release = damon_sysfs_contexts_release,
1206 	.sysfs_ops = &kobj_sysfs_ops,
1207 	.default_groups = damon_sysfs_contexts_groups,
1208 };
1209 
1210 /*
1211  * kdamond directory
1212  */
1213 
1214 struct damon_sysfs_kdamond {
1215 	struct kobject kobj;
1216 	struct damon_sysfs_contexts *contexts;
1217 	struct damon_ctx *damon_ctx;
1218 	unsigned int refresh_ms;
1219 };
1220 
damon_sysfs_kdamond_alloc(void)1221 static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
1222 {
1223 	return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
1224 }
1225 
damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond * kdamond)1226 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
1227 {
1228 	struct damon_sysfs_contexts *contexts;
1229 	int err;
1230 
1231 	contexts = damon_sysfs_contexts_alloc();
1232 	if (!contexts)
1233 		return -ENOMEM;
1234 
1235 	err = kobject_init_and_add(&contexts->kobj,
1236 			&damon_sysfs_contexts_ktype, &kdamond->kobj,
1237 			"contexts");
1238 	if (err) {
1239 		kobject_put(&contexts->kobj);
1240 		return err;
1241 	}
1242 	kdamond->contexts = contexts;
1243 
1244 	return err;
1245 }
1246 
damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond * kdamond)1247 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
1248 {
1249 	damon_sysfs_contexts_rm_dirs(kdamond->contexts);
1250 	kobject_put(&kdamond->contexts->kobj);
1251 }
1252 
1253 /*
1254  * enum damon_sysfs_cmd - Commands for a specific kdamond.
1255  */
1256 enum damon_sysfs_cmd {
1257 	/* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
1258 	DAMON_SYSFS_CMD_ON,
1259 	/* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
1260 	DAMON_SYSFS_CMD_OFF,
1261 	/* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
1262 	DAMON_SYSFS_CMD_COMMIT,
1263 	/*
1264 	 * @DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: Commit the quota goals
1265 	 * to DAMON.
1266 	 */
1267 	DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS,
1268 	/*
1269 	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
1270 	 * files.
1271 	 */
1272 	DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
1273 	/*
1274 	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: Update
1275 	 * tried_regions/total_bytes sysfs files for each scheme.
1276 	 */
1277 	DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES,
1278 	/*
1279 	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: Update schemes tried
1280 	 * regions
1281 	 */
1282 	DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS,
1283 	/*
1284 	 * @DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: Clear schemes tried
1285 	 * regions
1286 	 */
1287 	DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS,
1288 	/*
1289 	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: Update the
1290 	 * effective size quota of the scheme in bytes.
1291 	 */
1292 	DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS,
1293 	/*
1294 	 * @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring
1295 	 * intervals.
1296 	 */
1297 	DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS,
1298 	/*
1299 	 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
1300 	 */
1301 	NR_DAMON_SYSFS_CMDS,
1302 };
1303 
1304 /* Should match with enum damon_sysfs_cmd */
1305 static const char * const damon_sysfs_cmd_strs[] = {
1306 	"on",
1307 	"off",
1308 	"commit",
1309 	"commit_schemes_quota_goals",
1310 	"update_schemes_stats",
1311 	"update_schemes_tried_bytes",
1312 	"update_schemes_tried_regions",
1313 	"clear_schemes_tried_regions",
1314 	"update_schemes_effective_quotas",
1315 	"update_tuned_intervals",
1316 };
1317 
state_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1318 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
1319 		char *buf)
1320 {
1321 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1322 			struct damon_sysfs_kdamond, kobj);
1323 	struct damon_ctx *ctx;
1324 	bool running = false;
1325 
1326 	if (!mutex_trylock(&damon_sysfs_lock))
1327 		return -EBUSY;
1328 
1329 	ctx = kdamond->damon_ctx;
1330 	if (ctx)
1331 		running = damon_is_running(ctx);
1332 
1333 	mutex_unlock(&damon_sysfs_lock);
1334 
1335 	return sysfs_emit(buf, "%s\n", running ?
1336 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
1337 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
1338 }
1339 
damon_sysfs_set_attrs(struct damon_ctx * ctx,struct damon_sysfs_attrs * sys_attrs)1340 static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
1341 		struct damon_sysfs_attrs *sys_attrs)
1342 {
1343 	struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
1344 	struct damon_sysfs_intervals_goal *sys_goal =
1345 		sys_intervals->intervals_goal;
1346 	struct damon_sysfs_ul_range *sys_nr_regions =
1347 		sys_attrs->nr_regions_range;
1348 	struct damon_attrs attrs = {
1349 		.sample_interval = sys_intervals->sample_us,
1350 		.aggr_interval = sys_intervals->aggr_us,
1351 		.intervals_goal = {
1352 			.access_bp = sys_goal->access_bp,
1353 			.aggrs = sys_goal->aggrs,
1354 			.min_sample_us = sys_goal->min_sample_us,
1355 			.max_sample_us = sys_goal->max_sample_us},
1356 		.ops_update_interval = sys_intervals->update_us,
1357 		.min_nr_regions = sys_nr_regions->min,
1358 		.max_nr_regions = sys_nr_regions->max,
1359 	};
1360 	return damon_set_attrs(ctx, &attrs);
1361 }
1362 
damon_sysfs_set_regions(struct damon_target * t,struct damon_sysfs_regions * sysfs_regions,unsigned long min_sz_region)1363 static int damon_sysfs_set_regions(struct damon_target *t,
1364 		struct damon_sysfs_regions *sysfs_regions,
1365 		unsigned long min_sz_region)
1366 {
1367 	struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
1368 			sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
1369 	int i, err = -EINVAL;
1370 
1371 	if (!ranges)
1372 		return -ENOMEM;
1373 	for (i = 0; i < sysfs_regions->nr; i++) {
1374 		struct damon_sysfs_region *sys_region =
1375 			sysfs_regions->regions_arr[i];
1376 
1377 		if (sys_region->ar.start > sys_region->ar.end)
1378 			goto out;
1379 
1380 		ranges[i].start = sys_region->ar.start;
1381 		ranges[i].end = sys_region->ar.end;
1382 		if (i == 0)
1383 			continue;
1384 		if (ranges[i - 1].end > ranges[i].start)
1385 			goto out;
1386 	}
1387 	err = damon_set_regions(t, ranges, sysfs_regions->nr, min_sz_region);
1388 out:
1389 	kfree(ranges);
1390 	return err;
1391 
1392 }
1393 
damon_sysfs_add_target(struct damon_sysfs_target * sys_target,struct damon_ctx * ctx)1394 static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
1395 		struct damon_ctx *ctx)
1396 {
1397 	struct damon_target *t = damon_new_target();
1398 
1399 	if (!t)
1400 		return -ENOMEM;
1401 	damon_add_target(ctx, t);
1402 	if (damon_target_has_pid(ctx)) {
1403 		t->pid = find_get_pid(sys_target->pid);
1404 		if (!t->pid)
1405 			/* caller will destroy targets */
1406 			return -EINVAL;
1407 	}
1408 	t->obsolete = sys_target->obsolete;
1409 	return damon_sysfs_set_regions(t, sys_target->regions, ctx->min_sz_region);
1410 }
1411 
damon_sysfs_add_targets(struct damon_ctx * ctx,struct damon_sysfs_targets * sysfs_targets)1412 static int damon_sysfs_add_targets(struct damon_ctx *ctx,
1413 		struct damon_sysfs_targets *sysfs_targets)
1414 {
1415 	int i, err;
1416 
1417 	/* Multiple physical address space monitoring targets makes no sense */
1418 	if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
1419 		return -EINVAL;
1420 
1421 	for (i = 0; i < sysfs_targets->nr; i++) {
1422 		struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
1423 
1424 		err = damon_sysfs_add_target(st, ctx);
1425 		if (err)
1426 			return err;
1427 	}
1428 	return 0;
1429 }
1430 
1431 /*
1432  * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
1433  * @data:	The kobject wrapper that associated to the kdamond thread.
1434  *
1435  * This function reads the schemes stats of specific kdamond and update the
1436  * related values for sysfs files.  This function should be called from DAMON
1437  * worker thread,to safely access the DAMON contexts-internal data.  Caller
1438  * should also ensure holding ``damon_syfs_lock``, and ->damon_ctx of @data is
1439  * not NULL but a valid pointer, to safely access DAMON sysfs variables.
1440  */
damon_sysfs_upd_schemes_stats(void * data)1441 static int damon_sysfs_upd_schemes_stats(void *data)
1442 {
1443 	struct damon_sysfs_kdamond *kdamond = data;
1444 	struct damon_ctx *ctx = kdamond->damon_ctx;
1445 
1446 	damon_sysfs_schemes_update_stats(
1447 			kdamond->contexts->contexts_arr[0]->schemes, ctx);
1448 	return 0;
1449 }
1450 
damon_sysfs_kdamond_running(struct damon_sysfs_kdamond * kdamond)1451 static inline bool damon_sysfs_kdamond_running(
1452 		struct damon_sysfs_kdamond *kdamond)
1453 {
1454 	return kdamond->damon_ctx &&
1455 		damon_is_running(kdamond->damon_ctx);
1456 }
1457 
damon_sysfs_apply_inputs(struct damon_ctx * ctx,struct damon_sysfs_context * sys_ctx)1458 static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
1459 		struct damon_sysfs_context *sys_ctx)
1460 {
1461 	int err;
1462 
1463 	err = damon_select_ops(ctx, sys_ctx->ops_id);
1464 	if (err)
1465 		return err;
1466 	ctx->addr_unit = sys_ctx->addr_unit;
1467 	/* addr_unit is respected by only DAMON_OPS_PADDR */
1468 	if (sys_ctx->ops_id == DAMON_OPS_PADDR)
1469 		ctx->min_sz_region = max(
1470 				DAMON_MIN_REGION / sys_ctx->addr_unit, 1);
1471 	err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
1472 	if (err)
1473 		return err;
1474 	err = damon_sysfs_add_targets(ctx, sys_ctx->targets);
1475 	if (err)
1476 		return err;
1477 	return damon_sysfs_add_schemes(ctx, sys_ctx->schemes);
1478 }
1479 
1480 static struct damon_ctx *damon_sysfs_build_ctx(
1481 		struct damon_sysfs_context *sys_ctx);
1482 
1483 /*
1484  * Return a new damon_ctx for testing new parameters to commit.
1485  */
damon_sysfs_new_test_ctx(struct damon_ctx * running_ctx)1486 static struct damon_ctx *damon_sysfs_new_test_ctx(
1487 		struct damon_ctx *running_ctx)
1488 {
1489 	struct damon_ctx *test_ctx;
1490 	int err;
1491 
1492 	test_ctx = damon_new_ctx();
1493 	if (!test_ctx)
1494 		return NULL;
1495 	err = damon_commit_ctx(test_ctx, running_ctx);
1496 	if (err) {
1497 		damon_destroy_ctx(test_ctx);
1498 		return NULL;
1499 	}
1500 	return test_ctx;
1501 }
1502 
1503 /*
1504  * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
1505  * @kdamond:	The kobject wrapper for the associated kdamond.
1506  *
1507  * Returns error if the sysfs input is wrong.
1508  */
damon_sysfs_commit_input(void * data)1509 static int damon_sysfs_commit_input(void *data)
1510 {
1511 	struct damon_sysfs_kdamond *kdamond = data;
1512 	struct damon_ctx *param_ctx, *test_ctx;
1513 	int err;
1514 
1515 	if (!damon_sysfs_kdamond_running(kdamond))
1516 		return -EINVAL;
1517 	/* TODO: Support multiple contexts per kdamond */
1518 	if (kdamond->contexts->nr != 1)
1519 		return -EINVAL;
1520 
1521 	param_ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
1522 	if (IS_ERR(param_ctx))
1523 		return PTR_ERR(param_ctx);
1524 	test_ctx = damon_sysfs_new_test_ctx(kdamond->damon_ctx);
1525 	if (!test_ctx)
1526 		return -ENOMEM;
1527 	err = damon_commit_ctx(test_ctx, param_ctx);
1528 	if (err)
1529 		goto out;
1530 	err = damon_commit_ctx(kdamond->damon_ctx, param_ctx);
1531 out:
1532 	damon_destroy_ctx(test_ctx);
1533 	damon_destroy_ctx(param_ctx);
1534 	return err;
1535 }
1536 
damon_sysfs_commit_schemes_quota_goals(void * data)1537 static int damon_sysfs_commit_schemes_quota_goals(void *data)
1538 {
1539 	struct damon_sysfs_kdamond *sysfs_kdamond = data;
1540 	struct damon_ctx *ctx;
1541 	struct damon_sysfs_context *sysfs_ctx;
1542 
1543 	if (!damon_sysfs_kdamond_running(sysfs_kdamond))
1544 		return -EINVAL;
1545 	/* TODO: Support multiple contexts per kdamond */
1546 	if (sysfs_kdamond->contexts->nr != 1)
1547 		return -EINVAL;
1548 
1549 	ctx = sysfs_kdamond->damon_ctx;
1550 	sysfs_ctx = sysfs_kdamond->contexts->contexts_arr[0];
1551 	return damos_sysfs_set_quota_scores(sysfs_ctx->schemes, ctx);
1552 }
1553 
1554 /*
1555  * damon_sysfs_upd_schemes_effective_quotas() - Update schemes effective quotas
1556  * sysfs files.
1557  * @data:	The kobject wrapper that associated to the kdamond thread.
1558  *
1559  * This function reads the schemes' effective quotas of specific kdamond and
1560  * update the related values for sysfs files.  This function should be called
1561  * from DAMON callbacks while holding ``damon_syfs_lock``, to safely access the
1562  * DAMON contexts-internal data and DAMON sysfs variables.
1563  */
damon_sysfs_upd_schemes_effective_quotas(void * data)1564 static int damon_sysfs_upd_schemes_effective_quotas(void *data)
1565 {
1566 	struct damon_sysfs_kdamond *kdamond = data;
1567 	struct damon_ctx *ctx = kdamond->damon_ctx;
1568 
1569 	damos_sysfs_update_effective_quotas(
1570 			kdamond->contexts->contexts_arr[0]->schemes, ctx);
1571 	return 0;
1572 }
1573 
damon_sysfs_upd_tuned_intervals(void * data)1574 static int damon_sysfs_upd_tuned_intervals(void *data)
1575 {
1576 	struct damon_sysfs_kdamond *kdamond = data;
1577 	struct damon_ctx *ctx = kdamond->damon_ctx;
1578 
1579 	kdamond->contexts->contexts_arr[0]->attrs->intervals->sample_us =
1580 		ctx->attrs.sample_interval;
1581 	kdamond->contexts->contexts_arr[0]->attrs->intervals->aggr_us =
1582 		ctx->attrs.aggr_interval;
1583 	return 0;
1584 }
1585 
damon_sysfs_build_ctx(struct damon_sysfs_context * sys_ctx)1586 static struct damon_ctx *damon_sysfs_build_ctx(
1587 		struct damon_sysfs_context *sys_ctx)
1588 {
1589 	struct damon_ctx *ctx = damon_new_ctx();
1590 	int err;
1591 
1592 	if (!ctx)
1593 		return ERR_PTR(-ENOMEM);
1594 
1595 	err = damon_sysfs_apply_inputs(ctx, sys_ctx);
1596 	if (err) {
1597 		damon_destroy_ctx(ctx);
1598 		return ERR_PTR(err);
1599 	}
1600 
1601 	return ctx;
1602 }
1603 
1604 static unsigned long damon_sysfs_next_update_jiffies;
1605 
damon_sysfs_repeat_call_fn(void * data)1606 static int damon_sysfs_repeat_call_fn(void *data)
1607 {
1608 	struct damon_sysfs_kdamond *sysfs_kdamond = data;
1609 
1610 	if (!sysfs_kdamond->refresh_ms)
1611 		return 0;
1612 	if (time_before(jiffies, damon_sysfs_next_update_jiffies))
1613 		return 0;
1614 	damon_sysfs_next_update_jiffies = jiffies +
1615 		msecs_to_jiffies(sysfs_kdamond->refresh_ms);
1616 
1617 	if (!mutex_trylock(&damon_sysfs_lock))
1618 		return 0;
1619 	damon_sysfs_upd_tuned_intervals(sysfs_kdamond);
1620 	damon_sysfs_upd_schemes_stats(sysfs_kdamond);
1621 	damon_sysfs_upd_schemes_effective_quotas(sysfs_kdamond);
1622 	mutex_unlock(&damon_sysfs_lock);
1623 	return 0;
1624 }
1625 
damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond * kdamond)1626 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
1627 {
1628 	struct damon_ctx *ctx;
1629 	struct damon_call_control *repeat_call_control;
1630 	int err;
1631 
1632 	if (damon_sysfs_kdamond_running(kdamond))
1633 		return -EBUSY;
1634 	/* TODO: support multiple contexts per kdamond */
1635 	if (kdamond->contexts->nr != 1)
1636 		return -EINVAL;
1637 
1638 	if (kdamond->damon_ctx)
1639 		damon_destroy_ctx(kdamond->damon_ctx);
1640 	kdamond->damon_ctx = NULL;
1641 
1642 	repeat_call_control = kmalloc(sizeof(*repeat_call_control),
1643 			GFP_KERNEL);
1644 	if (!repeat_call_control)
1645 		return -ENOMEM;
1646 
1647 	ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
1648 	if (IS_ERR(ctx)) {
1649 		kfree(repeat_call_control);
1650 		return PTR_ERR(ctx);
1651 	}
1652 	err = damon_start(&ctx, 1, false);
1653 	if (err) {
1654 		kfree(repeat_call_control);
1655 		damon_destroy_ctx(ctx);
1656 		return err;
1657 	}
1658 	kdamond->damon_ctx = ctx;
1659 
1660 	damon_sysfs_next_update_jiffies =
1661 		jiffies + msecs_to_jiffies(kdamond->refresh_ms);
1662 
1663 	repeat_call_control->fn = damon_sysfs_repeat_call_fn;
1664 	repeat_call_control->data = kdamond;
1665 	repeat_call_control->repeat = true;
1666 	repeat_call_control->dealloc_on_cancel = true;
1667 	damon_call(ctx, repeat_call_control);
1668 	return err;
1669 }
1670 
damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond * kdamond)1671 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
1672 {
1673 	if (!kdamond->damon_ctx)
1674 		return -EINVAL;
1675 	return damon_stop(&kdamond->damon_ctx, 1);
1676 	/*
1677 	 * To allow users show final monitoring results of already turned-off
1678 	 * DAMON, we free kdamond->damon_ctx in next
1679 	 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
1680 	 */
1681 }
1682 
damon_sysfs_damon_call(int (* fn)(void * data),struct damon_sysfs_kdamond * kdamond)1683 static int damon_sysfs_damon_call(int (*fn)(void *data),
1684 		struct damon_sysfs_kdamond *kdamond)
1685 {
1686 	struct damon_call_control call_control = {};
1687 	int err;
1688 
1689 	if (!kdamond->damon_ctx)
1690 		return -EINVAL;
1691 	call_control.fn = fn;
1692 	call_control.data = kdamond;
1693 	err = damon_call(kdamond->damon_ctx, &call_control);
1694 	return err ? err : call_control.return_code;
1695 }
1696 
1697 struct damon_sysfs_schemes_walk_data {
1698 	struct damon_sysfs_kdamond *sysfs_kdamond;
1699 	bool total_bytes_only;
1700 };
1701 
1702 /* populate the region directory */
damon_sysfs_schemes_tried_regions_upd_one(void * data,struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s,unsigned long sz_filter_passed)1703 static void damon_sysfs_schemes_tried_regions_upd_one(void *data, struct damon_ctx *ctx,
1704 		struct damon_target *t, struct damon_region *r,
1705 		struct damos *s, unsigned long sz_filter_passed)
1706 {
1707 	struct damon_sysfs_schemes_walk_data *walk_data = data;
1708 	struct damon_sysfs_kdamond *sysfs_kdamond = walk_data->sysfs_kdamond;
1709 
1710 	damos_sysfs_populate_region_dir(
1711 			sysfs_kdamond->contexts->contexts_arr[0]->schemes,
1712 			ctx, t, r, s, walk_data->total_bytes_only,
1713 			sz_filter_passed);
1714 }
1715 
damon_sysfs_update_schemes_tried_regions(struct damon_sysfs_kdamond * sysfs_kdamond,bool total_bytes_only)1716 static int damon_sysfs_update_schemes_tried_regions(
1717 		struct damon_sysfs_kdamond *sysfs_kdamond, bool total_bytes_only)
1718 {
1719 	struct damon_sysfs_schemes_walk_data walk_data = {
1720 		.sysfs_kdamond = sysfs_kdamond,
1721 		.total_bytes_only = total_bytes_only,
1722 	};
1723 	struct damos_walk_control control = {
1724 		.walk_fn = damon_sysfs_schemes_tried_regions_upd_one,
1725 		.data = &walk_data,
1726 	};
1727 	struct damon_ctx *ctx = sysfs_kdamond->damon_ctx;
1728 
1729 	if (!ctx)
1730 		return -EINVAL;
1731 
1732 	damon_sysfs_schemes_clear_regions(
1733 			sysfs_kdamond->contexts->contexts_arr[0]->schemes);
1734 	return damos_walk(ctx, &control);
1735 }
1736 
1737 /*
1738  * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
1739  * @cmd:	The command to handle.
1740  * @kdamond:	The kobject wrapper for the associated kdamond.
1741  *
1742  * This function handles a DAMON sysfs command for a kdamond.
1743  *
1744  * Return: 0 on success, negative error code otherwise.
1745  */
damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,struct damon_sysfs_kdamond * kdamond)1746 static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
1747 		struct damon_sysfs_kdamond *kdamond)
1748 {
1749 	switch (cmd) {
1750 	case DAMON_SYSFS_CMD_ON:
1751 		return damon_sysfs_turn_damon_on(kdamond);
1752 	case DAMON_SYSFS_CMD_OFF:
1753 		return damon_sysfs_turn_damon_off(kdamond);
1754 	case DAMON_SYSFS_CMD_COMMIT:
1755 		return damon_sysfs_damon_call(
1756 				damon_sysfs_commit_input, kdamond);
1757 	case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS:
1758 		return damon_sysfs_damon_call(
1759 				damon_sysfs_commit_schemes_quota_goals,
1760 				kdamond);
1761 	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
1762 		return damon_sysfs_damon_call(
1763 				damon_sysfs_upd_schemes_stats, kdamond);
1764 	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES:
1765 		return damon_sysfs_update_schemes_tried_regions(kdamond, true);
1766 	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS:
1767 		return damon_sysfs_update_schemes_tried_regions(kdamond, false);
1768 	case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS:
1769 		return damon_sysfs_schemes_clear_regions(
1770 			kdamond->contexts->contexts_arr[0]->schemes);
1771 	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS:
1772 		return damon_sysfs_damon_call(
1773 				damon_sysfs_upd_schemes_effective_quotas,
1774 				kdamond);
1775 	case DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS:
1776 		return damon_sysfs_damon_call(
1777 				damon_sysfs_upd_tuned_intervals, kdamond);
1778 	default:
1779 		return -EINVAL;
1780 	}
1781 }
1782 
state_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1783 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
1784 		const char *buf, size_t count)
1785 {
1786 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1787 			struct damon_sysfs_kdamond, kobj);
1788 	enum damon_sysfs_cmd cmd;
1789 	ssize_t ret = -EINVAL;
1790 
1791 	if (!mutex_trylock(&damon_sysfs_lock))
1792 		return -EBUSY;
1793 	for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
1794 		if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
1795 			ret = damon_sysfs_handle_cmd(cmd, kdamond);
1796 			break;
1797 		}
1798 	}
1799 	mutex_unlock(&damon_sysfs_lock);
1800 	if (!ret)
1801 		ret = count;
1802 	return ret;
1803 }
1804 
pid_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1805 static ssize_t pid_show(struct kobject *kobj,
1806 		struct kobj_attribute *attr, char *buf)
1807 {
1808 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1809 			struct damon_sysfs_kdamond, kobj);
1810 	struct damon_ctx *ctx;
1811 	int pid = -1;
1812 
1813 	if (!mutex_trylock(&damon_sysfs_lock))
1814 		return -EBUSY;
1815 	ctx = kdamond->damon_ctx;
1816 	if (!ctx)
1817 		goto out;
1818 
1819 	mutex_lock(&ctx->kdamond_lock);
1820 	if (ctx->kdamond)
1821 		pid = ctx->kdamond->pid;
1822 	mutex_unlock(&ctx->kdamond_lock);
1823 out:
1824 	mutex_unlock(&damon_sysfs_lock);
1825 	return sysfs_emit(buf, "%d\n", pid);
1826 }
1827 
refresh_ms_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1828 static ssize_t refresh_ms_show(struct kobject *kobj,
1829 		struct kobj_attribute *attr, char *buf)
1830 {
1831 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1832 			struct damon_sysfs_kdamond, kobj);
1833 
1834 	return sysfs_emit(buf, "%u\n", kdamond->refresh_ms);
1835 }
1836 
refresh_ms_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1837 static ssize_t refresh_ms_store(struct kobject *kobj,
1838 		struct kobj_attribute *attr, const char *buf, size_t count)
1839 {
1840 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1841 			struct damon_sysfs_kdamond, kobj);
1842 	unsigned int nr;
1843 	int err = kstrtouint(buf, 0, &nr);
1844 
1845 	if (err)
1846 		return err;
1847 
1848 	kdamond->refresh_ms = nr;
1849 	return count;
1850 }
1851 
damon_sysfs_kdamond_release(struct kobject * kobj)1852 static void damon_sysfs_kdamond_release(struct kobject *kobj)
1853 {
1854 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1855 			struct damon_sysfs_kdamond, kobj);
1856 
1857 	if (kdamond->damon_ctx)
1858 		damon_destroy_ctx(kdamond->damon_ctx);
1859 	kfree(kdamond);
1860 }
1861 
1862 static struct kobj_attribute damon_sysfs_kdamond_state_attr =
1863 		__ATTR_RW_MODE(state, 0600);
1864 
1865 static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
1866 		__ATTR_RO_MODE(pid, 0400);
1867 
1868 static struct kobj_attribute damon_sysfs_kdamond_refresh_ms_attr =
1869 		__ATTR_RW_MODE(refresh_ms, 0600);
1870 
1871 static struct attribute *damon_sysfs_kdamond_attrs[] = {
1872 	&damon_sysfs_kdamond_state_attr.attr,
1873 	&damon_sysfs_kdamond_pid_attr.attr,
1874 	&damon_sysfs_kdamond_refresh_ms_attr.attr,
1875 	NULL,
1876 };
1877 ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
1878 
1879 static const struct kobj_type damon_sysfs_kdamond_ktype = {
1880 	.release = damon_sysfs_kdamond_release,
1881 	.sysfs_ops = &kobj_sysfs_ops,
1882 	.default_groups = damon_sysfs_kdamond_groups,
1883 };
1884 
1885 /*
1886  * kdamonds directory
1887  */
1888 
1889 struct damon_sysfs_kdamonds {
1890 	struct kobject kobj;
1891 	struct damon_sysfs_kdamond **kdamonds_arr;
1892 	int nr;
1893 };
1894 
damon_sysfs_kdamonds_alloc(void)1895 static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
1896 {
1897 	return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
1898 }
1899 
damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds * kdamonds)1900 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
1901 {
1902 	struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
1903 	int i;
1904 
1905 	for (i = 0; i < kdamonds->nr; i++) {
1906 		damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
1907 		kobject_put(&kdamonds_arr[i]->kobj);
1908 	}
1909 	kdamonds->nr = 0;
1910 	kfree(kdamonds_arr);
1911 	kdamonds->kdamonds_arr = NULL;
1912 }
1913 
damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond ** kdamonds,int nr_kdamonds)1914 static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds,
1915 		int nr_kdamonds)
1916 {
1917 	int i;
1918 
1919 	for (i = 0; i < nr_kdamonds; i++) {
1920 		if (damon_sysfs_kdamond_running(kdamonds[i]))
1921 			return true;
1922 	}
1923 
1924 	return false;
1925 }
1926 
damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds * kdamonds,int nr_kdamonds)1927 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
1928 		int nr_kdamonds)
1929 {
1930 	struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
1931 	int err, i;
1932 
1933 	if (damon_sysfs_kdamonds_busy(kdamonds->kdamonds_arr, kdamonds->nr))
1934 		return -EBUSY;
1935 
1936 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
1937 	if (!nr_kdamonds)
1938 		return 0;
1939 
1940 	kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
1941 			GFP_KERNEL | __GFP_NOWARN);
1942 	if (!kdamonds_arr)
1943 		return -ENOMEM;
1944 	kdamonds->kdamonds_arr = kdamonds_arr;
1945 
1946 	for (i = 0; i < nr_kdamonds; i++) {
1947 		kdamond = damon_sysfs_kdamond_alloc();
1948 		if (!kdamond) {
1949 			damon_sysfs_kdamonds_rm_dirs(kdamonds);
1950 			return -ENOMEM;
1951 		}
1952 
1953 		err = kobject_init_and_add(&kdamond->kobj,
1954 				&damon_sysfs_kdamond_ktype, &kdamonds->kobj,
1955 				"%d", i);
1956 		if (err)
1957 			goto out;
1958 
1959 		err = damon_sysfs_kdamond_add_dirs(kdamond);
1960 		if (err)
1961 			goto out;
1962 
1963 		kdamonds_arr[i] = kdamond;
1964 		kdamonds->nr++;
1965 	}
1966 	return 0;
1967 
1968 out:
1969 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
1970 	kobject_put(&kdamond->kobj);
1971 	return err;
1972 }
1973 
nr_kdamonds_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1974 static ssize_t nr_kdamonds_show(struct kobject *kobj,
1975 		struct kobj_attribute *attr, char *buf)
1976 {
1977 	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
1978 			struct damon_sysfs_kdamonds, kobj);
1979 
1980 	return sysfs_emit(buf, "%d\n", kdamonds->nr);
1981 }
1982 
nr_kdamonds_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1983 static ssize_t nr_kdamonds_store(struct kobject *kobj,
1984 		struct kobj_attribute *attr, const char *buf, size_t count)
1985 {
1986 	struct damon_sysfs_kdamonds *kdamonds;
1987 	int nr, err;
1988 
1989 	err = kstrtoint(buf, 0, &nr);
1990 	if (err)
1991 		return err;
1992 	if (nr < 0)
1993 		return -EINVAL;
1994 
1995 	kdamonds = container_of(kobj, struct damon_sysfs_kdamonds, kobj);
1996 
1997 	if (!mutex_trylock(&damon_sysfs_lock))
1998 		return -EBUSY;
1999 	err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2000 	mutex_unlock(&damon_sysfs_lock);
2001 	if (err)
2002 		return err;
2003 
2004 	return count;
2005 }
2006 
damon_sysfs_kdamonds_release(struct kobject * kobj)2007 static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2008 {
2009 	kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2010 }
2011 
2012 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2013 		__ATTR_RW_MODE(nr_kdamonds, 0600);
2014 
2015 static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2016 	&damon_sysfs_kdamonds_nr_attr.attr,
2017 	NULL,
2018 };
2019 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2020 
2021 static const struct kobj_type damon_sysfs_kdamonds_ktype = {
2022 	.release = damon_sysfs_kdamonds_release,
2023 	.sysfs_ops = &kobj_sysfs_ops,
2024 	.default_groups = damon_sysfs_kdamonds_groups,
2025 };
2026 
2027 /*
2028  * damon user interface directory
2029  */
2030 
2031 struct damon_sysfs_ui_dir {
2032 	struct kobject kobj;
2033 	struct damon_sysfs_kdamonds *kdamonds;
2034 };
2035 
damon_sysfs_ui_dir_alloc(void)2036 static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2037 {
2038 	return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2039 }
2040 
damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir * ui_dir)2041 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2042 {
2043 	struct damon_sysfs_kdamonds *kdamonds;
2044 	int err;
2045 
2046 	kdamonds = damon_sysfs_kdamonds_alloc();
2047 	if (!kdamonds)
2048 		return -ENOMEM;
2049 
2050 	err = kobject_init_and_add(&kdamonds->kobj,
2051 			&damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2052 			"kdamonds");
2053 	if (err) {
2054 		kobject_put(&kdamonds->kobj);
2055 		return err;
2056 	}
2057 	ui_dir->kdamonds = kdamonds;
2058 	return err;
2059 }
2060 
damon_sysfs_ui_dir_release(struct kobject * kobj)2061 static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2062 {
2063 	kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2064 }
2065 
2066 static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2067 	NULL,
2068 };
2069 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2070 
2071 static const struct kobj_type damon_sysfs_ui_dir_ktype = {
2072 	.release = damon_sysfs_ui_dir_release,
2073 	.sysfs_ops = &kobj_sysfs_ops,
2074 	.default_groups = damon_sysfs_ui_dir_groups,
2075 };
2076 
damon_sysfs_init(void)2077 static int __init damon_sysfs_init(void)
2078 {
2079 	struct kobject *damon_sysfs_root;
2080 	struct damon_sysfs_ui_dir *admin;
2081 	int err;
2082 
2083 	damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2084 	if (!damon_sysfs_root)
2085 		return -ENOMEM;
2086 
2087 	admin = damon_sysfs_ui_dir_alloc();
2088 	if (!admin) {
2089 		kobject_put(damon_sysfs_root);
2090 		return -ENOMEM;
2091 	}
2092 	err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2093 			damon_sysfs_root, "admin");
2094 	if (err)
2095 		goto out;
2096 	err = damon_sysfs_ui_dir_add_dirs(admin);
2097 	if (err)
2098 		goto out;
2099 	return 0;
2100 
2101 out:
2102 	kobject_put(&admin->kobj);
2103 	kobject_put(damon_sysfs_root);
2104 	return err;
2105 }
2106 subsys_initcall(damon_sysfs_init);
2107 
2108 #include "tests/sysfs-kunit.h"
2109