xref: /linux/mm/damon/dbgfs.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON Debugfs Interface
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon-dbgfs: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/debugfs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/page_idle.h>
16 #include <linux/slab.h>
17 
18 #define DAMON_DBGFS_DEPRECATION_NOTICE					\
19 	"DAMON debugfs interface is deprecated, so users should move "	\
20 	"to DAMON_SYSFS. If you cannot, please report your usecase to "	\
21 	"damon@lists.linux.dev and linux-mm@kvack.org.\n"
22 
23 static struct damon_ctx **dbgfs_ctxs;
24 static int dbgfs_nr_ctxs;
25 static struct dentry **dbgfs_dirs;
26 static DEFINE_MUTEX(damon_dbgfs_lock);
27 
damon_dbgfs_warn_deprecation(void)28 static void damon_dbgfs_warn_deprecation(void)
29 {
30 	pr_warn_once(DAMON_DBGFS_DEPRECATION_NOTICE);
31 }
32 
33 /*
34  * Returns non-empty string on success, negative error code otherwise.
35  */
user_input_str(const char __user * buf,size_t count,loff_t * ppos)36 static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
37 {
38 	char *kbuf;
39 	ssize_t ret;
40 
41 	/* We do not accept continuous write */
42 	if (*ppos)
43 		return ERR_PTR(-EINVAL);
44 
45 	kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
46 	if (!kbuf)
47 		return ERR_PTR(-ENOMEM);
48 
49 	ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
50 	if (ret != count) {
51 		kfree(kbuf);
52 		return ERR_PTR(-EIO);
53 	}
54 	kbuf[ret] = '\0';
55 
56 	return kbuf;
57 }
58 
dbgfs_attrs_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)59 static ssize_t dbgfs_attrs_read(struct file *file,
60 		char __user *buf, size_t count, loff_t *ppos)
61 {
62 	struct damon_ctx *ctx = file->private_data;
63 	char kbuf[128];
64 	int ret;
65 
66 	mutex_lock(&ctx->kdamond_lock);
67 	ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
68 			ctx->attrs.sample_interval, ctx->attrs.aggr_interval,
69 			ctx->attrs.ops_update_interval,
70 			ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions);
71 	mutex_unlock(&ctx->kdamond_lock);
72 
73 	return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
74 }
75 
dbgfs_attrs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)76 static ssize_t dbgfs_attrs_write(struct file *file,
77 		const char __user *buf, size_t count, loff_t *ppos)
78 {
79 	struct damon_ctx *ctx = file->private_data;
80 	struct damon_attrs attrs;
81 	char *kbuf;
82 	ssize_t ret;
83 
84 	kbuf = user_input_str(buf, count, ppos);
85 	if (IS_ERR(kbuf))
86 		return PTR_ERR(kbuf);
87 
88 	if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
89 				&attrs.sample_interval, &attrs.aggr_interval,
90 				&attrs.ops_update_interval,
91 				&attrs.min_nr_regions,
92 				&attrs.max_nr_regions) != 5) {
93 		ret = -EINVAL;
94 		goto out;
95 	}
96 
97 	mutex_lock(&ctx->kdamond_lock);
98 	if (ctx->kdamond) {
99 		ret = -EBUSY;
100 		goto unlock_out;
101 	}
102 
103 	ret = damon_set_attrs(ctx, &attrs);
104 	if (!ret)
105 		ret = count;
106 unlock_out:
107 	mutex_unlock(&ctx->kdamond_lock);
108 out:
109 	kfree(kbuf);
110 	return ret;
111 }
112 
113 /*
114  * Return corresponding dbgfs' scheme action value (int) for the given
115  * damos_action if the given damos_action value is valid and supported by
116  * dbgfs, negative error code otherwise.
117  */
damos_action_to_dbgfs_scheme_action(enum damos_action action)118 static int damos_action_to_dbgfs_scheme_action(enum damos_action action)
119 {
120 	switch (action) {
121 	case DAMOS_WILLNEED:
122 		return 0;
123 	case DAMOS_COLD:
124 		return 1;
125 	case DAMOS_PAGEOUT:
126 		return 2;
127 	case DAMOS_HUGEPAGE:
128 		return 3;
129 	case DAMOS_NOHUGEPAGE:
130 		return 4;
131 	case DAMOS_STAT:
132 		return 5;
133 	default:
134 		return -EINVAL;
135 	}
136 }
137 
sprint_schemes(struct damon_ctx * c,char * buf,ssize_t len)138 static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
139 {
140 	struct damos *s;
141 	int written = 0;
142 	int rc;
143 
144 	damon_for_each_scheme(s, c) {
145 		rc = scnprintf(&buf[written], len - written,
146 				"%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
147 				s->pattern.min_sz_region,
148 				s->pattern.max_sz_region,
149 				s->pattern.min_nr_accesses,
150 				s->pattern.max_nr_accesses,
151 				s->pattern.min_age_region,
152 				s->pattern.max_age_region,
153 				damos_action_to_dbgfs_scheme_action(s->action),
154 				s->quota.ms, s->quota.sz,
155 				s->quota.reset_interval,
156 				s->quota.weight_sz,
157 				s->quota.weight_nr_accesses,
158 				s->quota.weight_age,
159 				s->wmarks.metric, s->wmarks.interval,
160 				s->wmarks.high, s->wmarks.mid, s->wmarks.low,
161 				s->stat.nr_tried, s->stat.sz_tried,
162 				s->stat.nr_applied, s->stat.sz_applied,
163 				s->stat.qt_exceeds);
164 		if (!rc)
165 			return -ENOMEM;
166 
167 		written += rc;
168 	}
169 	return written;
170 }
171 
dbgfs_schemes_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)172 static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
173 		size_t count, loff_t *ppos)
174 {
175 	struct damon_ctx *ctx = file->private_data;
176 	char *kbuf;
177 	ssize_t len;
178 
179 	kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
180 	if (!kbuf)
181 		return -ENOMEM;
182 
183 	mutex_lock(&ctx->kdamond_lock);
184 	len = sprint_schemes(ctx, kbuf, count);
185 	mutex_unlock(&ctx->kdamond_lock);
186 	if (len < 0)
187 		goto out;
188 	len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
189 
190 out:
191 	kfree(kbuf);
192 	return len;
193 }
194 
free_schemes_arr(struct damos ** schemes,ssize_t nr_schemes)195 static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
196 {
197 	ssize_t i;
198 
199 	for (i = 0; i < nr_schemes; i++)
200 		kfree(schemes[i]);
201 	kfree(schemes);
202 }
203 
204 /*
205  * Return corresponding damos_action for the given dbgfs input for a scheme
206  * action if the input is valid, negative error code otherwise.
207  */
dbgfs_scheme_action_to_damos_action(int dbgfs_action)208 static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action)
209 {
210 	switch (dbgfs_action) {
211 	case 0:
212 		return DAMOS_WILLNEED;
213 	case 1:
214 		return DAMOS_COLD;
215 	case 2:
216 		return DAMOS_PAGEOUT;
217 	case 3:
218 		return DAMOS_HUGEPAGE;
219 	case 4:
220 		return DAMOS_NOHUGEPAGE;
221 	case 5:
222 		return DAMOS_STAT;
223 	default:
224 		return -EINVAL;
225 	}
226 }
227 
228 /*
229  * Converts a string into an array of struct damos pointers
230  *
231  * Returns an array of struct damos pointers that converted if the conversion
232  * success, or NULL otherwise.
233  */
str_to_schemes(const char * str,ssize_t len,ssize_t * nr_schemes)234 static struct damos **str_to_schemes(const char *str, ssize_t len,
235 				ssize_t *nr_schemes)
236 {
237 	struct damos *scheme, **schemes;
238 	const int max_nr_schemes = 256;
239 	int pos = 0, parsed, ret;
240 	unsigned int action_input;
241 	enum damos_action action;
242 
243 	schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
244 			GFP_KERNEL);
245 	if (!schemes)
246 		return NULL;
247 
248 	*nr_schemes = 0;
249 	while (pos < len && *nr_schemes < max_nr_schemes) {
250 		struct damos_access_pattern pattern = {};
251 		struct damos_quota quota = {};
252 		struct damos_watermarks wmarks;
253 
254 		ret = sscanf(&str[pos],
255 				"%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
256 				&pattern.min_sz_region, &pattern.max_sz_region,
257 				&pattern.min_nr_accesses,
258 				&pattern.max_nr_accesses,
259 				&pattern.min_age_region,
260 				&pattern.max_age_region,
261 				&action_input, &quota.ms,
262 				&quota.sz, &quota.reset_interval,
263 				&quota.weight_sz, &quota.weight_nr_accesses,
264 				&quota.weight_age, &wmarks.metric,
265 				&wmarks.interval, &wmarks.high, &wmarks.mid,
266 				&wmarks.low, &parsed);
267 		if (ret != 18)
268 			break;
269 		action = dbgfs_scheme_action_to_damos_action(action_input);
270 		if ((int)action < 0)
271 			goto fail;
272 
273 		if (pattern.min_sz_region > pattern.max_sz_region ||
274 		    pattern.min_nr_accesses > pattern.max_nr_accesses ||
275 		    pattern.min_age_region > pattern.max_age_region)
276 			goto fail;
277 
278 		if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
279 		    wmarks.mid <  wmarks.low)
280 			goto fail;
281 
282 		pos += parsed;
283 		scheme = damon_new_scheme(&pattern, action, 0, &quota,
284 				&wmarks, NUMA_NO_NODE);
285 		if (!scheme)
286 			goto fail;
287 
288 		schemes[*nr_schemes] = scheme;
289 		*nr_schemes += 1;
290 	}
291 	return schemes;
292 fail:
293 	free_schemes_arr(schemes, *nr_schemes);
294 	return NULL;
295 }
296 
dbgfs_schemes_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)297 static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
298 		size_t count, loff_t *ppos)
299 {
300 	struct damon_ctx *ctx = file->private_data;
301 	char *kbuf;
302 	struct damos **schemes;
303 	ssize_t nr_schemes = 0, ret;
304 
305 	kbuf = user_input_str(buf, count, ppos);
306 	if (IS_ERR(kbuf))
307 		return PTR_ERR(kbuf);
308 
309 	schemes = str_to_schemes(kbuf, count, &nr_schemes);
310 	if (!schemes) {
311 		ret = -EINVAL;
312 		goto out;
313 	}
314 
315 	mutex_lock(&ctx->kdamond_lock);
316 	if (ctx->kdamond) {
317 		ret = -EBUSY;
318 		goto unlock_out;
319 	}
320 
321 	damon_set_schemes(ctx, schemes, nr_schemes);
322 	ret = count;
323 	nr_schemes = 0;
324 
325 unlock_out:
326 	mutex_unlock(&ctx->kdamond_lock);
327 	free_schemes_arr(schemes, nr_schemes);
328 out:
329 	kfree(kbuf);
330 	return ret;
331 }
332 
sprint_target_ids(struct damon_ctx * ctx,char * buf,ssize_t len)333 static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
334 {
335 	struct damon_target *t;
336 	int id;
337 	int written = 0;
338 	int rc;
339 
340 	damon_for_each_target(t, ctx) {
341 		if (damon_target_has_pid(ctx))
342 			/* Show pid numbers to debugfs users */
343 			id = pid_vnr(t->pid);
344 		else
345 			/* Show 42 for physical address space, just for fun */
346 			id = 42;
347 
348 		rc = scnprintf(&buf[written], len - written, "%d ", id);
349 		if (!rc)
350 			return -ENOMEM;
351 		written += rc;
352 	}
353 	if (written)
354 		written -= 1;
355 	written += scnprintf(&buf[written], len - written, "\n");
356 	return written;
357 }
358 
dbgfs_target_ids_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)359 static ssize_t dbgfs_target_ids_read(struct file *file,
360 		char __user *buf, size_t count, loff_t *ppos)
361 {
362 	struct damon_ctx *ctx = file->private_data;
363 	ssize_t len;
364 	char ids_buf[320];
365 
366 	mutex_lock(&ctx->kdamond_lock);
367 	len = sprint_target_ids(ctx, ids_buf, 320);
368 	mutex_unlock(&ctx->kdamond_lock);
369 	if (len < 0)
370 		return len;
371 
372 	return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
373 }
374 
375 /*
376  * Converts a string into an integers array
377  *
378  * Returns an array of integers array if the conversion success, or NULL
379  * otherwise.
380  */
str_to_ints(const char * str,ssize_t len,ssize_t * nr_ints)381 static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints)
382 {
383 	int *array;
384 	const int max_nr_ints = 32;
385 	int nr;
386 	int pos = 0, parsed, ret;
387 
388 	*nr_ints = 0;
389 	array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL);
390 	if (!array)
391 		return NULL;
392 	while (*nr_ints < max_nr_ints && pos < len) {
393 		ret = sscanf(&str[pos], "%d%n", &nr, &parsed);
394 		pos += parsed;
395 		if (ret != 1)
396 			break;
397 		array[*nr_ints] = nr;
398 		*nr_ints += 1;
399 	}
400 
401 	return array;
402 }
403 
dbgfs_put_pids(struct pid ** pids,int nr_pids)404 static void dbgfs_put_pids(struct pid **pids, int nr_pids)
405 {
406 	int i;
407 
408 	for (i = 0; i < nr_pids; i++)
409 		put_pid(pids[i]);
410 }
411 
412 /*
413  * Converts a string into an struct pid pointers array
414  *
415  * Returns an array of struct pid pointers if the conversion success, or NULL
416  * otherwise.
417  */
str_to_pids(const char * str,ssize_t len,ssize_t * nr_pids)418 static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids)
419 {
420 	int *ints;
421 	ssize_t nr_ints;
422 	struct pid **pids;
423 
424 	*nr_pids = 0;
425 
426 	ints = str_to_ints(str, len, &nr_ints);
427 	if (!ints)
428 		return NULL;
429 
430 	pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL);
431 	if (!pids)
432 		goto out;
433 
434 	for (; *nr_pids < nr_ints; (*nr_pids)++) {
435 		pids[*nr_pids] = find_get_pid(ints[*nr_pids]);
436 		if (!pids[*nr_pids]) {
437 			dbgfs_put_pids(pids, *nr_pids);
438 			kfree(ints);
439 			kfree(pids);
440 			return NULL;
441 		}
442 	}
443 
444 out:
445 	kfree(ints);
446 	return pids;
447 }
448 
449 /*
450  * dbgfs_set_targets() - Set monitoring targets.
451  * @ctx:	monitoring context
452  * @nr_targets:	number of targets
453  * @pids:	array of target pids (size is same to @nr_targets)
454  *
455  * This function should not be called while the kdamond is running.  @pids is
456  * ignored if the context is not configured to have pid in each target.  On
457  * failure, reference counts of all pids in @pids are decremented.
458  *
459  * Return: 0 on success, negative error code otherwise.
460  */
dbgfs_set_targets(struct damon_ctx * ctx,ssize_t nr_targets,struct pid ** pids)461 static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
462 		struct pid **pids)
463 {
464 	ssize_t i;
465 	struct damon_target *t, *next;
466 
467 	damon_for_each_target_safe(t, next, ctx) {
468 		if (damon_target_has_pid(ctx))
469 			put_pid(t->pid);
470 		damon_destroy_target(t);
471 	}
472 
473 	for (i = 0; i < nr_targets; i++) {
474 		t = damon_new_target();
475 		if (!t) {
476 			damon_for_each_target_safe(t, next, ctx)
477 				damon_destroy_target(t);
478 			if (damon_target_has_pid(ctx))
479 				dbgfs_put_pids(pids, nr_targets);
480 			return -ENOMEM;
481 		}
482 		if (damon_target_has_pid(ctx))
483 			t->pid = pids[i];
484 		damon_add_target(ctx, t);
485 	}
486 
487 	return 0;
488 }
489 
dbgfs_target_ids_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)490 static ssize_t dbgfs_target_ids_write(struct file *file,
491 		const char __user *buf, size_t count, loff_t *ppos)
492 {
493 	struct damon_ctx *ctx = file->private_data;
494 	bool id_is_pid = true;
495 	char *kbuf;
496 	struct pid **target_pids = NULL;
497 	ssize_t nr_targets;
498 	ssize_t ret;
499 
500 	kbuf = user_input_str(buf, count, ppos);
501 	if (IS_ERR(kbuf))
502 		return PTR_ERR(kbuf);
503 
504 	if (!strncmp(kbuf, "paddr\n", count)) {
505 		id_is_pid = false;
506 		nr_targets = 1;
507 	}
508 
509 	if (id_is_pid) {
510 		target_pids = str_to_pids(kbuf, count, &nr_targets);
511 		if (!target_pids) {
512 			ret = -ENOMEM;
513 			goto out;
514 		}
515 	}
516 
517 	mutex_lock(&ctx->kdamond_lock);
518 	if (ctx->kdamond) {
519 		if (id_is_pid)
520 			dbgfs_put_pids(target_pids, nr_targets);
521 		ret = -EBUSY;
522 		goto unlock_out;
523 	}
524 
525 	/* remove previously set targets */
526 	dbgfs_set_targets(ctx, 0, NULL);
527 	if (!nr_targets) {
528 		ret = count;
529 		goto unlock_out;
530 	}
531 
532 	/* Configure the context for the address space type */
533 	if (id_is_pid)
534 		ret = damon_select_ops(ctx, DAMON_OPS_VADDR);
535 	else
536 		ret = damon_select_ops(ctx, DAMON_OPS_PADDR);
537 	if (ret)
538 		goto unlock_out;
539 
540 	ret = dbgfs_set_targets(ctx, nr_targets, target_pids);
541 	if (!ret)
542 		ret = count;
543 
544 unlock_out:
545 	mutex_unlock(&ctx->kdamond_lock);
546 	kfree(target_pids);
547 out:
548 	kfree(kbuf);
549 	return ret;
550 }
551 
sprint_init_regions(struct damon_ctx * c,char * buf,ssize_t len)552 static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len)
553 {
554 	struct damon_target *t;
555 	struct damon_region *r;
556 	int target_idx = 0;
557 	int written = 0;
558 	int rc;
559 
560 	damon_for_each_target(t, c) {
561 		damon_for_each_region(r, t) {
562 			rc = scnprintf(&buf[written], len - written,
563 					"%d %lu %lu\n",
564 					target_idx, r->ar.start, r->ar.end);
565 			if (!rc)
566 				return -ENOMEM;
567 			written += rc;
568 		}
569 		target_idx++;
570 	}
571 	return written;
572 }
573 
dbgfs_init_regions_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)574 static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
575 		size_t count, loff_t *ppos)
576 {
577 	struct damon_ctx *ctx = file->private_data;
578 	char *kbuf;
579 	ssize_t len;
580 
581 	kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
582 	if (!kbuf)
583 		return -ENOMEM;
584 
585 	mutex_lock(&ctx->kdamond_lock);
586 	if (ctx->kdamond) {
587 		mutex_unlock(&ctx->kdamond_lock);
588 		len = -EBUSY;
589 		goto out;
590 	}
591 
592 	len = sprint_init_regions(ctx, kbuf, count);
593 	mutex_unlock(&ctx->kdamond_lock);
594 	if (len < 0)
595 		goto out;
596 	len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
597 
598 out:
599 	kfree(kbuf);
600 	return len;
601 }
602 
add_init_region(struct damon_ctx * c,int target_idx,struct damon_addr_range * ar)603 static int add_init_region(struct damon_ctx *c, int target_idx,
604 		struct damon_addr_range *ar)
605 {
606 	struct damon_target *t;
607 	struct damon_region *r, *prev;
608 	unsigned long idx = 0;
609 	int rc = -EINVAL;
610 
611 	if (ar->start >= ar->end)
612 		return -EINVAL;
613 
614 	damon_for_each_target(t, c) {
615 		if (idx++ == target_idx) {
616 			r = damon_new_region(ar->start, ar->end);
617 			if (!r)
618 				return -ENOMEM;
619 			damon_add_region(r, t);
620 			if (damon_nr_regions(t) > 1) {
621 				prev = damon_prev_region(r);
622 				if (prev->ar.end > r->ar.start) {
623 					damon_destroy_region(r, t);
624 					return -EINVAL;
625 				}
626 			}
627 			rc = 0;
628 		}
629 	}
630 	return rc;
631 }
632 
set_init_regions(struct damon_ctx * c,const char * str,ssize_t len)633 static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
634 {
635 	struct damon_target *t;
636 	struct damon_region *r, *next;
637 	int pos = 0, parsed, ret;
638 	int target_idx;
639 	struct damon_addr_range ar;
640 	int err;
641 
642 	damon_for_each_target(t, c) {
643 		damon_for_each_region_safe(r, next, t)
644 			damon_destroy_region(r, t);
645 	}
646 
647 	while (pos < len) {
648 		ret = sscanf(&str[pos], "%d %lu %lu%n",
649 				&target_idx, &ar.start, &ar.end, &parsed);
650 		if (ret != 3)
651 			break;
652 		err = add_init_region(c, target_idx, &ar);
653 		if (err)
654 			goto fail;
655 		pos += parsed;
656 	}
657 
658 	return 0;
659 
660 fail:
661 	damon_for_each_target(t, c) {
662 		damon_for_each_region_safe(r, next, t)
663 			damon_destroy_region(r, t);
664 	}
665 	return err;
666 }
667 
dbgfs_init_regions_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)668 static ssize_t dbgfs_init_regions_write(struct file *file,
669 					  const char __user *buf, size_t count,
670 					  loff_t *ppos)
671 {
672 	struct damon_ctx *ctx = file->private_data;
673 	char *kbuf;
674 	ssize_t ret = count;
675 	int err;
676 
677 	kbuf = user_input_str(buf, count, ppos);
678 	if (IS_ERR(kbuf))
679 		return PTR_ERR(kbuf);
680 
681 	mutex_lock(&ctx->kdamond_lock);
682 	if (ctx->kdamond) {
683 		ret = -EBUSY;
684 		goto unlock_out;
685 	}
686 
687 	err = set_init_regions(ctx, kbuf, ret);
688 	if (err)
689 		ret = err;
690 
691 unlock_out:
692 	mutex_unlock(&ctx->kdamond_lock);
693 	kfree(kbuf);
694 	return ret;
695 }
696 
dbgfs_kdamond_pid_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)697 static ssize_t dbgfs_kdamond_pid_read(struct file *file,
698 		char __user *buf, size_t count, loff_t *ppos)
699 {
700 	struct damon_ctx *ctx = file->private_data;
701 	char *kbuf;
702 	ssize_t len;
703 
704 	kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
705 	if (!kbuf)
706 		return -ENOMEM;
707 
708 	mutex_lock(&ctx->kdamond_lock);
709 	if (ctx->kdamond)
710 		len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
711 	else
712 		len = scnprintf(kbuf, count, "none\n");
713 	mutex_unlock(&ctx->kdamond_lock);
714 	if (!len)
715 		goto out;
716 	len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
717 
718 out:
719 	kfree(kbuf);
720 	return len;
721 }
722 
damon_dbgfs_open(struct inode * inode,struct file * file)723 static int damon_dbgfs_open(struct inode *inode, struct file *file)
724 {
725 	damon_dbgfs_warn_deprecation();
726 
727 	file->private_data = inode->i_private;
728 
729 	return nonseekable_open(inode, file);
730 }
731 
732 static const struct file_operations attrs_fops = {
733 	.open = damon_dbgfs_open,
734 	.read = dbgfs_attrs_read,
735 	.write = dbgfs_attrs_write,
736 };
737 
738 static const struct file_operations schemes_fops = {
739 	.open = damon_dbgfs_open,
740 	.read = dbgfs_schemes_read,
741 	.write = dbgfs_schemes_write,
742 };
743 
744 static const struct file_operations target_ids_fops = {
745 	.open = damon_dbgfs_open,
746 	.read = dbgfs_target_ids_read,
747 	.write = dbgfs_target_ids_write,
748 };
749 
750 static const struct file_operations init_regions_fops = {
751 	.open = damon_dbgfs_open,
752 	.read = dbgfs_init_regions_read,
753 	.write = dbgfs_init_regions_write,
754 };
755 
756 static const struct file_operations kdamond_pid_fops = {
757 	.open = damon_dbgfs_open,
758 	.read = dbgfs_kdamond_pid_read,
759 };
760 
dbgfs_fill_ctx_dir(struct dentry * dir,struct damon_ctx * ctx)761 static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
762 {
763 	const char * const file_names[] = {"attrs", "schemes", "target_ids",
764 		"init_regions", "kdamond_pid"};
765 	const struct file_operations *fops[] = {&attrs_fops, &schemes_fops,
766 		&target_ids_fops, &init_regions_fops, &kdamond_pid_fops};
767 	int i;
768 
769 	for (i = 0; i < ARRAY_SIZE(file_names); i++)
770 		debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
771 }
772 
dbgfs_before_terminate(struct damon_ctx * ctx)773 static void dbgfs_before_terminate(struct damon_ctx *ctx)
774 {
775 	struct damon_target *t, *next;
776 
777 	if (!damon_target_has_pid(ctx))
778 		return;
779 
780 	mutex_lock(&ctx->kdamond_lock);
781 	damon_for_each_target_safe(t, next, ctx) {
782 		put_pid(t->pid);
783 		damon_destroy_target(t);
784 	}
785 	mutex_unlock(&ctx->kdamond_lock);
786 }
787 
dbgfs_new_ctx(void)788 static struct damon_ctx *dbgfs_new_ctx(void)
789 {
790 	struct damon_ctx *ctx;
791 
792 	ctx = damon_new_ctx();
793 	if (!ctx)
794 		return NULL;
795 
796 	if (damon_select_ops(ctx, DAMON_OPS_VADDR) &&
797 			damon_select_ops(ctx, DAMON_OPS_PADDR)) {
798 		damon_destroy_ctx(ctx);
799 		return NULL;
800 	}
801 	ctx->callback.before_terminate = dbgfs_before_terminate;
802 	return ctx;
803 }
804 
dbgfs_destroy_ctx(struct damon_ctx * ctx)805 static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
806 {
807 	damon_destroy_ctx(ctx);
808 }
809 
damon_dbgfs_deprecated_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)810 static ssize_t damon_dbgfs_deprecated_read(struct file *file,
811 		char __user *buf, size_t count, loff_t *ppos)
812 {
813 	static const char kbuf[512] = DAMON_DBGFS_DEPRECATION_NOTICE;
814 
815 	return simple_read_from_buffer(buf, count, ppos, kbuf, strlen(kbuf));
816 }
817 
818 /*
819  * Make a context of @name and create a debugfs directory for it.
820  *
821  * This function should be called while holding damon_dbgfs_lock.
822  *
823  * Returns 0 on success, negative error code otherwise.
824  */
dbgfs_mk_context(char * name)825 static int dbgfs_mk_context(char *name)
826 {
827 	struct dentry *root, **new_dirs, *new_dir;
828 	struct damon_ctx **new_ctxs, *new_ctx;
829 
830 	if (damon_nr_running_ctxs())
831 		return -EBUSY;
832 
833 	new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
834 			(dbgfs_nr_ctxs + 1), GFP_KERNEL);
835 	if (!new_ctxs)
836 		return -ENOMEM;
837 	dbgfs_ctxs = new_ctxs;
838 
839 	new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
840 			(dbgfs_nr_ctxs + 1), GFP_KERNEL);
841 	if (!new_dirs)
842 		return -ENOMEM;
843 	dbgfs_dirs = new_dirs;
844 
845 	root = dbgfs_dirs[0];
846 	if (!root)
847 		return -ENOENT;
848 
849 	new_dir = debugfs_create_dir(name, root);
850 	/* Below check is required for a potential duplicated name case */
851 	if (IS_ERR(new_dir))
852 		return PTR_ERR(new_dir);
853 	dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
854 
855 	new_ctx = dbgfs_new_ctx();
856 	if (!new_ctx) {
857 		debugfs_remove(new_dir);
858 		dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
859 		return -ENOMEM;
860 	}
861 
862 	dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
863 	dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
864 			dbgfs_ctxs[dbgfs_nr_ctxs]);
865 	dbgfs_nr_ctxs++;
866 
867 	return 0;
868 }
869 
dbgfs_mk_context_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)870 static ssize_t dbgfs_mk_context_write(struct file *file,
871 		const char __user *buf, size_t count, loff_t *ppos)
872 {
873 	char *kbuf;
874 	char *ctx_name;
875 	ssize_t ret;
876 
877 	kbuf = user_input_str(buf, count, ppos);
878 	if (IS_ERR(kbuf))
879 		return PTR_ERR(kbuf);
880 	ctx_name = kmalloc(count + 1, GFP_KERNEL);
881 	if (!ctx_name) {
882 		kfree(kbuf);
883 		return -ENOMEM;
884 	}
885 
886 	/* Trim white space */
887 	if (sscanf(kbuf, "%s", ctx_name) != 1) {
888 		ret = -EINVAL;
889 		goto out;
890 	}
891 
892 	mutex_lock(&damon_dbgfs_lock);
893 	ret = dbgfs_mk_context(ctx_name);
894 	if (!ret)
895 		ret = count;
896 	mutex_unlock(&damon_dbgfs_lock);
897 
898 out:
899 	kfree(kbuf);
900 	kfree(ctx_name);
901 	return ret;
902 }
903 
904 /*
905  * Remove a context of @name and its debugfs directory.
906  *
907  * This function should be called while holding damon_dbgfs_lock.
908  *
909  * Return 0 on success, negative error code otherwise.
910  */
dbgfs_rm_context(char * name)911 static int dbgfs_rm_context(char *name)
912 {
913 	struct dentry *root, *dir, **new_dirs;
914 	struct inode *inode;
915 	struct damon_ctx **new_ctxs;
916 	int i, j;
917 	int ret = 0;
918 
919 	if (damon_nr_running_ctxs())
920 		return -EBUSY;
921 
922 	root = dbgfs_dirs[0];
923 	if (!root)
924 		return -ENOENT;
925 
926 	dir = debugfs_lookup(name, root);
927 	if (!dir)
928 		return -ENOENT;
929 
930 	inode = d_inode(dir);
931 	if (!S_ISDIR(inode->i_mode)) {
932 		ret = -EINVAL;
933 		goto out_dput;
934 	}
935 
936 	new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
937 			GFP_KERNEL);
938 	if (!new_dirs) {
939 		ret = -ENOMEM;
940 		goto out_dput;
941 	}
942 
943 	new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
944 			GFP_KERNEL);
945 	if (!new_ctxs) {
946 		ret = -ENOMEM;
947 		goto out_new_dirs;
948 	}
949 
950 	for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
951 		if (dbgfs_dirs[i] == dir) {
952 			debugfs_remove(dbgfs_dirs[i]);
953 			dbgfs_destroy_ctx(dbgfs_ctxs[i]);
954 			continue;
955 		}
956 		new_dirs[j] = dbgfs_dirs[i];
957 		new_ctxs[j++] = dbgfs_ctxs[i];
958 	}
959 
960 	kfree(dbgfs_dirs);
961 	kfree(dbgfs_ctxs);
962 
963 	dbgfs_dirs = new_dirs;
964 	dbgfs_ctxs = new_ctxs;
965 	dbgfs_nr_ctxs--;
966 
967 	goto out_dput;
968 
969 out_new_dirs:
970 	kfree(new_dirs);
971 out_dput:
972 	dput(dir);
973 	return ret;
974 }
975 
dbgfs_rm_context_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)976 static ssize_t dbgfs_rm_context_write(struct file *file,
977 		const char __user *buf, size_t count, loff_t *ppos)
978 {
979 	char *kbuf;
980 	ssize_t ret;
981 	char *ctx_name;
982 
983 	kbuf = user_input_str(buf, count, ppos);
984 	if (IS_ERR(kbuf))
985 		return PTR_ERR(kbuf);
986 	ctx_name = kmalloc(count + 1, GFP_KERNEL);
987 	if (!ctx_name) {
988 		kfree(kbuf);
989 		return -ENOMEM;
990 	}
991 
992 	/* Trim white space */
993 	if (sscanf(kbuf, "%s", ctx_name) != 1) {
994 		ret = -EINVAL;
995 		goto out;
996 	}
997 
998 	mutex_lock(&damon_dbgfs_lock);
999 	ret = dbgfs_rm_context(ctx_name);
1000 	if (!ret)
1001 		ret = count;
1002 	mutex_unlock(&damon_dbgfs_lock);
1003 
1004 out:
1005 	kfree(kbuf);
1006 	kfree(ctx_name);
1007 	return ret;
1008 }
1009 
dbgfs_monitor_on_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1010 static ssize_t dbgfs_monitor_on_read(struct file *file,
1011 		char __user *buf, size_t count, loff_t *ppos)
1012 {
1013 	char monitor_on_buf[5];
1014 	bool monitor_on = damon_nr_running_ctxs() != 0;
1015 	int len;
1016 
1017 	len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
1018 
1019 	return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
1020 }
1021 
dbgfs_monitor_on_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1022 static ssize_t dbgfs_monitor_on_write(struct file *file,
1023 		const char __user *buf, size_t count, loff_t *ppos)
1024 {
1025 	ssize_t ret;
1026 	char *kbuf;
1027 
1028 	kbuf = user_input_str(buf, count, ppos);
1029 	if (IS_ERR(kbuf))
1030 		return PTR_ERR(kbuf);
1031 
1032 	/* Remove white space */
1033 	if (sscanf(kbuf, "%s", kbuf) != 1) {
1034 		kfree(kbuf);
1035 		return -EINVAL;
1036 	}
1037 
1038 	mutex_lock(&damon_dbgfs_lock);
1039 	if (!strncmp(kbuf, "on", count)) {
1040 		int i;
1041 
1042 		for (i = 0; i < dbgfs_nr_ctxs; i++) {
1043 			if (damon_targets_empty(dbgfs_ctxs[i])) {
1044 				kfree(kbuf);
1045 				mutex_unlock(&damon_dbgfs_lock);
1046 				return -EINVAL;
1047 			}
1048 		}
1049 		ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true);
1050 	} else if (!strncmp(kbuf, "off", count)) {
1051 		ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
1052 	} else {
1053 		ret = -EINVAL;
1054 	}
1055 	mutex_unlock(&damon_dbgfs_lock);
1056 
1057 	if (!ret)
1058 		ret = count;
1059 	kfree(kbuf);
1060 	return ret;
1061 }
1062 
damon_dbgfs_static_file_open(struct inode * inode,struct file * file)1063 static int damon_dbgfs_static_file_open(struct inode *inode, struct file *file)
1064 {
1065 	damon_dbgfs_warn_deprecation();
1066 	return nonseekable_open(inode, file);
1067 }
1068 
1069 static const struct file_operations deprecated_fops = {
1070 	.read = damon_dbgfs_deprecated_read,
1071 };
1072 
1073 static const struct file_operations mk_contexts_fops = {
1074 	.open = damon_dbgfs_static_file_open,
1075 	.write = dbgfs_mk_context_write,
1076 };
1077 
1078 static const struct file_operations rm_contexts_fops = {
1079 	.open = damon_dbgfs_static_file_open,
1080 	.write = dbgfs_rm_context_write,
1081 };
1082 
1083 static const struct file_operations monitor_on_fops = {
1084 	.open = damon_dbgfs_static_file_open,
1085 	.read = dbgfs_monitor_on_read,
1086 	.write = dbgfs_monitor_on_write,
1087 };
1088 
__damon_dbgfs_init(void)1089 static int __init __damon_dbgfs_init(void)
1090 {
1091 	struct dentry *dbgfs_root;
1092 	const char * const file_names[] = {"mk_contexts", "rm_contexts",
1093 		"monitor_on_DEPRECATED", "DEPRECATED"};
1094 	const struct file_operations *fops[] = {&mk_contexts_fops,
1095 		&rm_contexts_fops, &monitor_on_fops, &deprecated_fops};
1096 	int i;
1097 
1098 	dbgfs_root = debugfs_create_dir("damon", NULL);
1099 
1100 	for (i = 0; i < ARRAY_SIZE(file_names); i++)
1101 		debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
1102 				fops[i]);
1103 	dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
1104 
1105 	dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL);
1106 	if (!dbgfs_dirs) {
1107 		debugfs_remove(dbgfs_root);
1108 		return -ENOMEM;
1109 	}
1110 	dbgfs_dirs[0] = dbgfs_root;
1111 
1112 	return 0;
1113 }
1114 
1115 /*
1116  * Functions for the initialization
1117  */
1118 
damon_dbgfs_init(void)1119 static int __init damon_dbgfs_init(void)
1120 {
1121 	int rc = -ENOMEM;
1122 
1123 	mutex_lock(&damon_dbgfs_lock);
1124 	dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
1125 	if (!dbgfs_ctxs)
1126 		goto out;
1127 	dbgfs_ctxs[0] = dbgfs_new_ctx();
1128 	if (!dbgfs_ctxs[0]) {
1129 		kfree(dbgfs_ctxs);
1130 		goto out;
1131 	}
1132 	dbgfs_nr_ctxs = 1;
1133 
1134 	rc = __damon_dbgfs_init();
1135 	if (rc) {
1136 		kfree(dbgfs_ctxs[0]);
1137 		kfree(dbgfs_ctxs);
1138 		pr_err("%s: dbgfs init failed\n", __func__);
1139 	}
1140 
1141 out:
1142 	mutex_unlock(&damon_dbgfs_lock);
1143 	return rc;
1144 }
1145 
1146 module_init(damon_dbgfs_init);
1147 
1148 #include "tests/dbgfs-kunit.h"
1149