xref: /linux/fs/xfs/xfs_sysfs.c (revision 87c9c16317882dd6dbbc07e349bc3223e14f3244)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Red Hat, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_log_priv.h"
14 #include "xfs_mount.h"
15 
16 struct xfs_sysfs_attr {
17 	struct attribute attr;
18 	ssize_t (*show)(struct kobject *kobject, char *buf);
19 	ssize_t (*store)(struct kobject *kobject, const char *buf,
20 			 size_t count);
21 };
22 
23 static inline struct xfs_sysfs_attr *
24 to_attr(struct attribute *attr)
25 {
26 	return container_of(attr, struct xfs_sysfs_attr, attr);
27 }
28 
29 #define XFS_SYSFS_ATTR_RW(name) \
30 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
31 #define XFS_SYSFS_ATTR_RO(name) \
32 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
33 #define XFS_SYSFS_ATTR_WO(name) \
34 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
35 
36 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
37 
38 STATIC ssize_t
39 xfs_sysfs_object_show(
40 	struct kobject		*kobject,
41 	struct attribute	*attr,
42 	char			*buf)
43 {
44 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
45 
46 	return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
47 }
48 
49 STATIC ssize_t
50 xfs_sysfs_object_store(
51 	struct kobject		*kobject,
52 	struct attribute	*attr,
53 	const char		*buf,
54 	size_t			count)
55 {
56 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
57 
58 	return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
59 }
60 
61 static const struct sysfs_ops xfs_sysfs_ops = {
62 	.show = xfs_sysfs_object_show,
63 	.store = xfs_sysfs_object_store,
64 };
65 
66 static struct attribute *xfs_mp_attrs[] = {
67 	NULL,
68 };
69 
70 struct kobj_type xfs_mp_ktype = {
71 	.release = xfs_sysfs_release,
72 	.sysfs_ops = &xfs_sysfs_ops,
73 	.default_attrs = xfs_mp_attrs,
74 };
75 
76 #ifdef DEBUG
77 /* debug */
78 
79 STATIC ssize_t
80 bug_on_assert_store(
81 	struct kobject		*kobject,
82 	const char		*buf,
83 	size_t			count)
84 {
85 	int			ret;
86 	int			val;
87 
88 	ret = kstrtoint(buf, 0, &val);
89 	if (ret)
90 		return ret;
91 
92 	if (val == 1)
93 		xfs_globals.bug_on_assert = true;
94 	else if (val == 0)
95 		xfs_globals.bug_on_assert = false;
96 	else
97 		return -EINVAL;
98 
99 	return count;
100 }
101 
102 STATIC ssize_t
103 bug_on_assert_show(
104 	struct kobject		*kobject,
105 	char			*buf)
106 {
107 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bug_on_assert ? 1 : 0);
108 }
109 XFS_SYSFS_ATTR_RW(bug_on_assert);
110 
111 STATIC ssize_t
112 log_recovery_delay_store(
113 	struct kobject	*kobject,
114 	const char	*buf,
115 	size_t		count)
116 {
117 	int		ret;
118 	int		val;
119 
120 	ret = kstrtoint(buf, 0, &val);
121 	if (ret)
122 		return ret;
123 
124 	if (val < 0 || val > 60)
125 		return -EINVAL;
126 
127 	xfs_globals.log_recovery_delay = val;
128 
129 	return count;
130 }
131 
132 STATIC ssize_t
133 log_recovery_delay_show(
134 	struct kobject	*kobject,
135 	char		*buf)
136 {
137 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.log_recovery_delay);
138 }
139 XFS_SYSFS_ATTR_RW(log_recovery_delay);
140 
141 STATIC ssize_t
142 mount_delay_store(
143 	struct kobject	*kobject,
144 	const char	*buf,
145 	size_t		count)
146 {
147 	int		ret;
148 	int		val;
149 
150 	ret = kstrtoint(buf, 0, &val);
151 	if (ret)
152 		return ret;
153 
154 	if (val < 0 || val > 60)
155 		return -EINVAL;
156 
157 	xfs_globals.mount_delay = val;
158 
159 	return count;
160 }
161 
162 STATIC ssize_t
163 mount_delay_show(
164 	struct kobject	*kobject,
165 	char		*buf)
166 {
167 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.mount_delay);
168 }
169 XFS_SYSFS_ATTR_RW(mount_delay);
170 
171 static ssize_t
172 always_cow_store(
173 	struct kobject	*kobject,
174 	const char	*buf,
175 	size_t		count)
176 {
177 	ssize_t		ret;
178 
179 	ret = kstrtobool(buf, &xfs_globals.always_cow);
180 	if (ret < 0)
181 		return ret;
182 	return count;
183 }
184 
185 static ssize_t
186 always_cow_show(
187 	struct kobject	*kobject,
188 	char		*buf)
189 {
190 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.always_cow);
191 }
192 XFS_SYSFS_ATTR_RW(always_cow);
193 
194 #ifdef DEBUG
195 /*
196  * Override how many threads the parallel work queue is allowed to create.
197  * This has to be a debug-only global (instead of an errortag) because one of
198  * the main users of parallel workqueues is mount time quotacheck.
199  */
200 STATIC ssize_t
201 pwork_threads_store(
202 	struct kobject	*kobject,
203 	const char	*buf,
204 	size_t		count)
205 {
206 	int		ret;
207 	int		val;
208 
209 	ret = kstrtoint(buf, 0, &val);
210 	if (ret)
211 		return ret;
212 
213 	if (val < -1 || val > num_possible_cpus())
214 		return -EINVAL;
215 
216 	xfs_globals.pwork_threads = val;
217 
218 	return count;
219 }
220 
221 STATIC ssize_t
222 pwork_threads_show(
223 	struct kobject	*kobject,
224 	char		*buf)
225 {
226 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.pwork_threads);
227 }
228 XFS_SYSFS_ATTR_RW(pwork_threads);
229 #endif /* DEBUG */
230 
231 static struct attribute *xfs_dbg_attrs[] = {
232 	ATTR_LIST(bug_on_assert),
233 	ATTR_LIST(log_recovery_delay),
234 	ATTR_LIST(mount_delay),
235 	ATTR_LIST(always_cow),
236 #ifdef DEBUG
237 	ATTR_LIST(pwork_threads),
238 #endif
239 	NULL,
240 };
241 
242 struct kobj_type xfs_dbg_ktype = {
243 	.release = xfs_sysfs_release,
244 	.sysfs_ops = &xfs_sysfs_ops,
245 	.default_attrs = xfs_dbg_attrs,
246 };
247 
248 #endif /* DEBUG */
249 
250 /* stats */
251 
252 static inline struct xstats *
253 to_xstats(struct kobject *kobject)
254 {
255 	struct xfs_kobj *kobj = to_kobj(kobject);
256 
257 	return container_of(kobj, struct xstats, xs_kobj);
258 }
259 
260 STATIC ssize_t
261 stats_show(
262 	struct kobject	*kobject,
263 	char		*buf)
264 {
265 	struct xstats	*stats = to_xstats(kobject);
266 
267 	return xfs_stats_format(stats->xs_stats, buf);
268 }
269 XFS_SYSFS_ATTR_RO(stats);
270 
271 STATIC ssize_t
272 stats_clear_store(
273 	struct kobject	*kobject,
274 	const char	*buf,
275 	size_t		count)
276 {
277 	int		ret;
278 	int		val;
279 	struct xstats	*stats = to_xstats(kobject);
280 
281 	ret = kstrtoint(buf, 0, &val);
282 	if (ret)
283 		return ret;
284 
285 	if (val != 1)
286 		return -EINVAL;
287 
288 	xfs_stats_clearall(stats->xs_stats);
289 	return count;
290 }
291 XFS_SYSFS_ATTR_WO(stats_clear);
292 
293 static struct attribute *xfs_stats_attrs[] = {
294 	ATTR_LIST(stats),
295 	ATTR_LIST(stats_clear),
296 	NULL,
297 };
298 
299 struct kobj_type xfs_stats_ktype = {
300 	.release = xfs_sysfs_release,
301 	.sysfs_ops = &xfs_sysfs_ops,
302 	.default_attrs = xfs_stats_attrs,
303 };
304 
305 /* xlog */
306 
307 static inline struct xlog *
308 to_xlog(struct kobject *kobject)
309 {
310 	struct xfs_kobj *kobj = to_kobj(kobject);
311 
312 	return container_of(kobj, struct xlog, l_kobj);
313 }
314 
315 STATIC ssize_t
316 log_head_lsn_show(
317 	struct kobject	*kobject,
318 	char		*buf)
319 {
320 	int cycle;
321 	int block;
322 	struct xlog *log = to_xlog(kobject);
323 
324 	spin_lock(&log->l_icloglock);
325 	cycle = log->l_curr_cycle;
326 	block = log->l_curr_block;
327 	spin_unlock(&log->l_icloglock);
328 
329 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
330 }
331 XFS_SYSFS_ATTR_RO(log_head_lsn);
332 
333 STATIC ssize_t
334 log_tail_lsn_show(
335 	struct kobject	*kobject,
336 	char		*buf)
337 {
338 	int cycle;
339 	int block;
340 	struct xlog *log = to_xlog(kobject);
341 
342 	xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
343 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
344 }
345 XFS_SYSFS_ATTR_RO(log_tail_lsn);
346 
347 STATIC ssize_t
348 reserve_grant_head_show(
349 	struct kobject	*kobject,
350 	char		*buf)
351 
352 {
353 	int cycle;
354 	int bytes;
355 	struct xlog *log = to_xlog(kobject);
356 
357 	xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes);
358 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes);
359 }
360 XFS_SYSFS_ATTR_RO(reserve_grant_head);
361 
362 STATIC ssize_t
363 write_grant_head_show(
364 	struct kobject	*kobject,
365 	char		*buf)
366 {
367 	int cycle;
368 	int bytes;
369 	struct xlog *log = to_xlog(kobject);
370 
371 	xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes);
372 	return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes);
373 }
374 XFS_SYSFS_ATTR_RO(write_grant_head);
375 
376 static struct attribute *xfs_log_attrs[] = {
377 	ATTR_LIST(log_head_lsn),
378 	ATTR_LIST(log_tail_lsn),
379 	ATTR_LIST(reserve_grant_head),
380 	ATTR_LIST(write_grant_head),
381 	NULL,
382 };
383 
384 struct kobj_type xfs_log_ktype = {
385 	.release = xfs_sysfs_release,
386 	.sysfs_ops = &xfs_sysfs_ops,
387 	.default_attrs = xfs_log_attrs,
388 };
389 
390 /*
391  * Metadata IO error configuration
392  *
393  * The sysfs structure here is:
394  *	...xfs/<dev>/error/<class>/<errno>/<error_attrs>
395  *
396  * where <class> allows us to discriminate between data IO and metadata IO,
397  * and any other future type of IO (e.g. special inode or directory error
398  * handling) we care to support.
399  */
400 static inline struct xfs_error_cfg *
401 to_error_cfg(struct kobject *kobject)
402 {
403 	struct xfs_kobj *kobj = to_kobj(kobject);
404 	return container_of(kobj, struct xfs_error_cfg, kobj);
405 }
406 
407 static inline struct xfs_mount *
408 err_to_mp(struct kobject *kobject)
409 {
410 	struct xfs_kobj *kobj = to_kobj(kobject);
411 	return container_of(kobj, struct xfs_mount, m_error_kobj);
412 }
413 
414 static ssize_t
415 max_retries_show(
416 	struct kobject	*kobject,
417 	char		*buf)
418 {
419 	int		retries;
420 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
421 
422 	if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
423 		retries = -1;
424 	else
425 		retries = cfg->max_retries;
426 
427 	return snprintf(buf, PAGE_SIZE, "%d\n", retries);
428 }
429 
430 static ssize_t
431 max_retries_store(
432 	struct kobject	*kobject,
433 	const char	*buf,
434 	size_t		count)
435 {
436 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
437 	int		ret;
438 	int		val;
439 
440 	ret = kstrtoint(buf, 0, &val);
441 	if (ret)
442 		return ret;
443 
444 	if (val < -1)
445 		return -EINVAL;
446 
447 	if (val == -1)
448 		cfg->max_retries = XFS_ERR_RETRY_FOREVER;
449 	else
450 		cfg->max_retries = val;
451 	return count;
452 }
453 XFS_SYSFS_ATTR_RW(max_retries);
454 
455 static ssize_t
456 retry_timeout_seconds_show(
457 	struct kobject	*kobject,
458 	char		*buf)
459 {
460 	int		timeout;
461 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
462 
463 	if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
464 		timeout = -1;
465 	else
466 		timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
467 
468 	return snprintf(buf, PAGE_SIZE, "%d\n", timeout);
469 }
470 
471 static ssize_t
472 retry_timeout_seconds_store(
473 	struct kobject	*kobject,
474 	const char	*buf,
475 	size_t		count)
476 {
477 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
478 	int		ret;
479 	int		val;
480 
481 	ret = kstrtoint(buf, 0, &val);
482 	if (ret)
483 		return ret;
484 
485 	/* 1 day timeout maximum, -1 means infinite */
486 	if (val < -1 || val > 86400)
487 		return -EINVAL;
488 
489 	if (val == -1)
490 		cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
491 	else {
492 		cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
493 		ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
494 	}
495 	return count;
496 }
497 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
498 
499 static ssize_t
500 fail_at_unmount_show(
501 	struct kobject	*kobject,
502 	char		*buf)
503 {
504 	struct xfs_mount	*mp = err_to_mp(kobject);
505 
506 	return snprintf(buf, PAGE_SIZE, "%d\n", mp->m_fail_unmount);
507 }
508 
509 static ssize_t
510 fail_at_unmount_store(
511 	struct kobject	*kobject,
512 	const char	*buf,
513 	size_t		count)
514 {
515 	struct xfs_mount	*mp = err_to_mp(kobject);
516 	int		ret;
517 	int		val;
518 
519 	ret = kstrtoint(buf, 0, &val);
520 	if (ret)
521 		return ret;
522 
523 	if (val < 0 || val > 1)
524 		return -EINVAL;
525 
526 	mp->m_fail_unmount = val;
527 	return count;
528 }
529 XFS_SYSFS_ATTR_RW(fail_at_unmount);
530 
531 static struct attribute *xfs_error_attrs[] = {
532 	ATTR_LIST(max_retries),
533 	ATTR_LIST(retry_timeout_seconds),
534 	NULL,
535 };
536 
537 
538 static struct kobj_type xfs_error_cfg_ktype = {
539 	.release = xfs_sysfs_release,
540 	.sysfs_ops = &xfs_sysfs_ops,
541 	.default_attrs = xfs_error_attrs,
542 };
543 
544 static struct kobj_type xfs_error_ktype = {
545 	.release = xfs_sysfs_release,
546 	.sysfs_ops = &xfs_sysfs_ops,
547 };
548 
549 /*
550  * Error initialization tables. These need to be ordered in the same
551  * order as the enums used to index the array. All class init tables need to
552  * define a "default" behaviour as the first entry, all other entries can be
553  * empty.
554  */
555 struct xfs_error_init {
556 	char		*name;
557 	int		max_retries;
558 	int		retry_timeout;	/* in seconds */
559 };
560 
561 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
562 	{ .name = "default",
563 	  .max_retries = XFS_ERR_RETRY_FOREVER,
564 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
565 	},
566 	{ .name = "EIO",
567 	  .max_retries = XFS_ERR_RETRY_FOREVER,
568 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
569 	},
570 	{ .name = "ENOSPC",
571 	  .max_retries = XFS_ERR_RETRY_FOREVER,
572 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
573 	},
574 	{ .name = "ENODEV",
575 	  .max_retries = 0,	/* We can't recover from devices disappearing */
576 	  .retry_timeout = 0,
577 	},
578 };
579 
580 static int
581 xfs_error_sysfs_init_class(
582 	struct xfs_mount	*mp,
583 	int			class,
584 	const char		*parent_name,
585 	struct xfs_kobj		*parent_kobj,
586 	const struct xfs_error_init init[])
587 {
588 	struct xfs_error_cfg	*cfg;
589 	int			error;
590 	int			i;
591 
592 	ASSERT(class < XFS_ERR_CLASS_MAX);
593 
594 	error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
595 				&mp->m_error_kobj, parent_name);
596 	if (error)
597 		return error;
598 
599 	for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
600 		cfg = &mp->m_error_cfg[class][i];
601 		error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
602 					parent_kobj, init[i].name);
603 		if (error)
604 			goto out_error;
605 
606 		cfg->max_retries = init[i].max_retries;
607 		if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
608 			cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
609 		else
610 			cfg->retry_timeout = msecs_to_jiffies(
611 					init[i].retry_timeout * MSEC_PER_SEC);
612 	}
613 	return 0;
614 
615 out_error:
616 	/* unwind the entries that succeeded */
617 	for (i--; i >= 0; i--) {
618 		cfg = &mp->m_error_cfg[class][i];
619 		xfs_sysfs_del(&cfg->kobj);
620 	}
621 	xfs_sysfs_del(parent_kobj);
622 	return error;
623 }
624 
625 int
626 xfs_error_sysfs_init(
627 	struct xfs_mount	*mp)
628 {
629 	int			error;
630 
631 	/* .../xfs/<dev>/error/ */
632 	error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
633 				&mp->m_kobj, "error");
634 	if (error)
635 		return error;
636 
637 	error = sysfs_create_file(&mp->m_error_kobj.kobject,
638 				  ATTR_LIST(fail_at_unmount));
639 
640 	if (error)
641 		goto out_error;
642 
643 	/* .../xfs/<dev>/error/metadata/ */
644 	error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
645 				"metadata", &mp->m_error_meta_kobj,
646 				xfs_error_meta_init);
647 	if (error)
648 		goto out_error;
649 
650 	return 0;
651 
652 out_error:
653 	xfs_sysfs_del(&mp->m_error_kobj);
654 	return error;
655 }
656 
657 void
658 xfs_error_sysfs_del(
659 	struct xfs_mount	*mp)
660 {
661 	struct xfs_error_cfg	*cfg;
662 	int			i, j;
663 
664 	for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
665 		for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
666 			cfg = &mp->m_error_cfg[i][j];
667 
668 			xfs_sysfs_del(&cfg->kobj);
669 		}
670 	}
671 	xfs_sysfs_del(&mp->m_error_meta_kobj);
672 	xfs_sysfs_del(&mp->m_error_kobj);
673 }
674 
675 struct xfs_error_cfg *
676 xfs_error_get_cfg(
677 	struct xfs_mount	*mp,
678 	int			error_class,
679 	int			error)
680 {
681 	struct xfs_error_cfg	*cfg;
682 
683 	if (error < 0)
684 		error = -error;
685 
686 	switch (error) {
687 	case EIO:
688 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
689 		break;
690 	case ENOSPC:
691 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
692 		break;
693 	case ENODEV:
694 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
695 		break;
696 	default:
697 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];
698 		break;
699 	}
700 
701 	return cfg;
702 }
703