xref: /linux/fs/xfs/xfs_sysfs.c (revision c148bc7535650fbfa95a1f571b9ffa2ab478ea33)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Red Hat, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_log.h"
14 #include "xfs_log_priv.h"
15 #include "xfs_mount.h"
16 #include "xfs_zones.h"
17 
18 struct xfs_sysfs_attr {
19 	struct attribute attr;
20 	ssize_t (*show)(struct kobject *kobject, char *buf);
21 	ssize_t (*store)(struct kobject *kobject, const char *buf,
22 			 size_t count);
23 };
24 
25 static inline struct xfs_sysfs_attr *
to_attr(struct attribute * attr)26 to_attr(struct attribute *attr)
27 {
28 	return container_of(attr, struct xfs_sysfs_attr, attr);
29 }
30 
31 #define XFS_SYSFS_ATTR_RW(name) \
32 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
33 #define XFS_SYSFS_ATTR_RO(name) \
34 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
35 #define XFS_SYSFS_ATTR_WO(name) \
36 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
37 
38 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
39 
40 STATIC ssize_t
xfs_sysfs_object_show(struct kobject * kobject,struct attribute * attr,char * buf)41 xfs_sysfs_object_show(
42 	struct kobject		*kobject,
43 	struct attribute	*attr,
44 	char			*buf)
45 {
46 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
47 
48 	return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
49 }
50 
51 STATIC ssize_t
xfs_sysfs_object_store(struct kobject * kobject,struct attribute * attr,const char * buf,size_t count)52 xfs_sysfs_object_store(
53 	struct kobject		*kobject,
54 	struct attribute	*attr,
55 	const char		*buf,
56 	size_t			count)
57 {
58 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
59 
60 	return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
61 }
62 
63 static const struct sysfs_ops xfs_sysfs_ops = {
64 	.show = xfs_sysfs_object_show,
65 	.store = xfs_sysfs_object_store,
66 };
67 
68 static struct attribute *xfs_mp_attrs[] = {
69 	NULL,
70 };
71 ATTRIBUTE_GROUPS(xfs_mp);
72 
73 static const struct kobj_type xfs_mp_ktype = {
74 	.release = xfs_sysfs_release,
75 	.sysfs_ops = &xfs_sysfs_ops,
76 	.default_groups = xfs_mp_groups,
77 };
78 
79 #ifdef DEBUG
80 /* debug */
81 
82 STATIC ssize_t
bug_on_assert_store(struct kobject * kobject,const char * buf,size_t count)83 bug_on_assert_store(
84 	struct kobject		*kobject,
85 	const char		*buf,
86 	size_t			count)
87 {
88 	int			ret;
89 	int			val;
90 
91 	ret = kstrtoint(buf, 0, &val);
92 	if (ret)
93 		return ret;
94 
95 	if (val == 1)
96 		xfs_globals.bug_on_assert = true;
97 	else if (val == 0)
98 		xfs_globals.bug_on_assert = false;
99 	else
100 		return -EINVAL;
101 
102 	return count;
103 }
104 
105 STATIC ssize_t
bug_on_assert_show(struct kobject * kobject,char * buf)106 bug_on_assert_show(
107 	struct kobject		*kobject,
108 	char			*buf)
109 {
110 	return sysfs_emit(buf, "%d\n", xfs_globals.bug_on_assert);
111 }
112 XFS_SYSFS_ATTR_RW(bug_on_assert);
113 
114 STATIC ssize_t
log_recovery_delay_store(struct kobject * kobject,const char * buf,size_t count)115 log_recovery_delay_store(
116 	struct kobject	*kobject,
117 	const char	*buf,
118 	size_t		count)
119 {
120 	int		ret;
121 	int		val;
122 
123 	ret = kstrtoint(buf, 0, &val);
124 	if (ret)
125 		return ret;
126 
127 	if (val < 0 || val > 60)
128 		return -EINVAL;
129 
130 	xfs_globals.log_recovery_delay = val;
131 
132 	return count;
133 }
134 
135 STATIC ssize_t
log_recovery_delay_show(struct kobject * kobject,char * buf)136 log_recovery_delay_show(
137 	struct kobject	*kobject,
138 	char		*buf)
139 {
140 	return sysfs_emit(buf, "%d\n", xfs_globals.log_recovery_delay);
141 }
142 XFS_SYSFS_ATTR_RW(log_recovery_delay);
143 
144 STATIC ssize_t
mount_delay_store(struct kobject * kobject,const char * buf,size_t count)145 mount_delay_store(
146 	struct kobject	*kobject,
147 	const char	*buf,
148 	size_t		count)
149 {
150 	int		ret;
151 	int		val;
152 
153 	ret = kstrtoint(buf, 0, &val);
154 	if (ret)
155 		return ret;
156 
157 	if (val < 0 || val > 60)
158 		return -EINVAL;
159 
160 	xfs_globals.mount_delay = val;
161 
162 	return count;
163 }
164 
165 STATIC ssize_t
mount_delay_show(struct kobject * kobject,char * buf)166 mount_delay_show(
167 	struct kobject	*kobject,
168 	char		*buf)
169 {
170 	return sysfs_emit(buf, "%d\n", xfs_globals.mount_delay);
171 }
172 XFS_SYSFS_ATTR_RW(mount_delay);
173 
174 static ssize_t
always_cow_store(struct kobject * kobject,const char * buf,size_t count)175 always_cow_store(
176 	struct kobject	*kobject,
177 	const char	*buf,
178 	size_t		count)
179 {
180 	ssize_t		ret;
181 
182 	ret = kstrtobool(buf, &xfs_globals.always_cow);
183 	if (ret < 0)
184 		return ret;
185 	return count;
186 }
187 
188 static ssize_t
always_cow_show(struct kobject * kobject,char * buf)189 always_cow_show(
190 	struct kobject	*kobject,
191 	char		*buf)
192 {
193 	return sysfs_emit(buf, "%d\n", xfs_globals.always_cow);
194 }
195 XFS_SYSFS_ATTR_RW(always_cow);
196 
197 /*
198  * Override how many threads the parallel work queue is allowed to create.
199  * This has to be a debug-only global (instead of an errortag) because one of
200  * the main users of parallel workqueues is mount time quotacheck.
201  */
202 STATIC ssize_t
pwork_threads_store(struct kobject * kobject,const char * buf,size_t count)203 pwork_threads_store(
204 	struct kobject	*kobject,
205 	const char	*buf,
206 	size_t		count)
207 {
208 	int		ret;
209 	int		val;
210 
211 	ret = kstrtoint(buf, 0, &val);
212 	if (ret)
213 		return ret;
214 
215 	if (val < -1 || val > num_possible_cpus())
216 		return -EINVAL;
217 
218 	xfs_globals.pwork_threads = val;
219 
220 	return count;
221 }
222 
223 STATIC ssize_t
pwork_threads_show(struct kobject * kobject,char * buf)224 pwork_threads_show(
225 	struct kobject	*kobject,
226 	char		*buf)
227 {
228 	return sysfs_emit(buf, "%d\n", xfs_globals.pwork_threads);
229 }
230 XFS_SYSFS_ATTR_RW(pwork_threads);
231 
232 /*
233  * The "LARP" (Logged extended Attribute Recovery Persistence) debugging knob
234  * sets the XFS_DA_OP_LOGGED flag on all xfs_attr_set operations performed on
235  * V5 filesystems.  As a result, the intermediate progress of all setxattr and
236  * removexattr operations are tracked via the log and can be restarted during
237  * recovery.  This is useful for testing xattr recovery prior to merging of the
238  * parent pointer feature which requires it to maintain consistency, and may be
239  * enabled for userspace xattrs in the future.
240  */
241 static ssize_t
larp_store(struct kobject * kobject,const char * buf,size_t count)242 larp_store(
243 	struct kobject	*kobject,
244 	const char	*buf,
245 	size_t		count)
246 {
247 	ssize_t		ret;
248 
249 	ret = kstrtobool(buf, &xfs_globals.larp);
250 	if (ret < 0)
251 		return ret;
252 	return count;
253 }
254 
255 STATIC ssize_t
larp_show(struct kobject * kobject,char * buf)256 larp_show(
257 	struct kobject	*kobject,
258 	char		*buf)
259 {
260 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.larp);
261 }
262 XFS_SYSFS_ATTR_RW(larp);
263 
264 STATIC ssize_t
bload_leaf_slack_store(struct kobject * kobject,const char * buf,size_t count)265 bload_leaf_slack_store(
266 	struct kobject	*kobject,
267 	const char	*buf,
268 	size_t		count)
269 {
270 	int		ret;
271 	int		val;
272 
273 	ret = kstrtoint(buf, 0, &val);
274 	if (ret)
275 		return ret;
276 
277 	xfs_globals.bload_leaf_slack = val;
278 	return count;
279 }
280 
281 STATIC ssize_t
bload_leaf_slack_show(struct kobject * kobject,char * buf)282 bload_leaf_slack_show(
283 	struct kobject	*kobject,
284 	char		*buf)
285 {
286 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_leaf_slack);
287 }
288 XFS_SYSFS_ATTR_RW(bload_leaf_slack);
289 
290 STATIC ssize_t
bload_node_slack_store(struct kobject * kobject,const char * buf,size_t count)291 bload_node_slack_store(
292 	struct kobject	*kobject,
293 	const char	*buf,
294 	size_t		count)
295 {
296 	int		ret;
297 	int		val;
298 
299 	ret = kstrtoint(buf, 0, &val);
300 	if (ret)
301 		return ret;
302 
303 	xfs_globals.bload_node_slack = val;
304 	return count;
305 }
306 
307 STATIC ssize_t
bload_node_slack_show(struct kobject * kobject,char * buf)308 bload_node_slack_show(
309 	struct kobject	*kobject,
310 	char		*buf)
311 {
312 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_node_slack);
313 }
314 XFS_SYSFS_ATTR_RW(bload_node_slack);
315 
316 static struct attribute *xfs_dbg_attrs[] = {
317 	ATTR_LIST(bug_on_assert),
318 	ATTR_LIST(log_recovery_delay),
319 	ATTR_LIST(mount_delay),
320 	ATTR_LIST(always_cow),
321 	ATTR_LIST(pwork_threads),
322 	ATTR_LIST(larp),
323 	ATTR_LIST(bload_leaf_slack),
324 	ATTR_LIST(bload_node_slack),
325 	NULL,
326 };
327 ATTRIBUTE_GROUPS(xfs_dbg);
328 
329 const struct kobj_type xfs_dbg_ktype = {
330 	.release = xfs_sysfs_release,
331 	.sysfs_ops = &xfs_sysfs_ops,
332 	.default_groups = xfs_dbg_groups,
333 };
334 
335 #endif /* DEBUG */
336 
337 /* stats */
338 
339 static inline struct xstats *
to_xstats(struct kobject * kobject)340 to_xstats(struct kobject *kobject)
341 {
342 	struct xfs_kobj *kobj = to_kobj(kobject);
343 
344 	return container_of(kobj, struct xstats, xs_kobj);
345 }
346 
347 STATIC ssize_t
stats_show(struct kobject * kobject,char * buf)348 stats_show(
349 	struct kobject	*kobject,
350 	char		*buf)
351 {
352 	struct xstats	*stats = to_xstats(kobject);
353 
354 	return xfs_stats_format(stats->xs_stats, buf);
355 }
356 XFS_SYSFS_ATTR_RO(stats);
357 
358 STATIC ssize_t
stats_clear_store(struct kobject * kobject,const char * buf,size_t count)359 stats_clear_store(
360 	struct kobject	*kobject,
361 	const char	*buf,
362 	size_t		count)
363 {
364 	int		ret;
365 	int		val;
366 	struct xstats	*stats = to_xstats(kobject);
367 
368 	ret = kstrtoint(buf, 0, &val);
369 	if (ret)
370 		return ret;
371 
372 	if (val != 1)
373 		return -EINVAL;
374 
375 	xfs_stats_clearall(stats->xs_stats);
376 	return count;
377 }
378 XFS_SYSFS_ATTR_WO(stats_clear);
379 
380 static struct attribute *xfs_stats_attrs[] = {
381 	ATTR_LIST(stats),
382 	ATTR_LIST(stats_clear),
383 	NULL,
384 };
385 ATTRIBUTE_GROUPS(xfs_stats);
386 
387 const struct kobj_type xfs_stats_ktype = {
388 	.release = xfs_sysfs_release,
389 	.sysfs_ops = &xfs_sysfs_ops,
390 	.default_groups = xfs_stats_groups,
391 };
392 
393 /* xlog */
394 
395 static inline struct xlog *
to_xlog(struct kobject * kobject)396 to_xlog(struct kobject *kobject)
397 {
398 	struct xfs_kobj *kobj = to_kobj(kobject);
399 
400 	return container_of(kobj, struct xlog, l_kobj);
401 }
402 
403 STATIC ssize_t
log_head_lsn_show(struct kobject * kobject,char * buf)404 log_head_lsn_show(
405 	struct kobject	*kobject,
406 	char		*buf)
407 {
408 	int cycle;
409 	int block;
410 	struct xlog *log = to_xlog(kobject);
411 
412 	spin_lock(&log->l_icloglock);
413 	cycle = log->l_curr_cycle;
414 	block = log->l_curr_block;
415 	spin_unlock(&log->l_icloglock);
416 
417 	return sysfs_emit(buf, "%d:%d\n", cycle, block);
418 }
419 XFS_SYSFS_ATTR_RO(log_head_lsn);
420 
421 STATIC ssize_t
log_tail_lsn_show(struct kobject * kobject,char * buf)422 log_tail_lsn_show(
423 	struct kobject	*kobject,
424 	char		*buf)
425 {
426 	int cycle;
427 	int block;
428 	struct xlog *log = to_xlog(kobject);
429 
430 	xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
431 	return sysfs_emit(buf, "%d:%d\n", cycle, block);
432 }
433 XFS_SYSFS_ATTR_RO(log_tail_lsn);
434 
435 STATIC ssize_t
reserve_grant_head_bytes_show(struct kobject * kobject,char * buf)436 reserve_grant_head_bytes_show(
437 	struct kobject	*kobject,
438 	char		*buf)
439 {
440 	return sysfs_emit(buf, "%lld\n",
441 			atomic64_read(&to_xlog(kobject)->l_reserve_head.grant));
442 }
443 XFS_SYSFS_ATTR_RO(reserve_grant_head_bytes);
444 
445 STATIC ssize_t
write_grant_head_bytes_show(struct kobject * kobject,char * buf)446 write_grant_head_bytes_show(
447 	struct kobject	*kobject,
448 	char		*buf)
449 {
450 	return sysfs_emit(buf, "%lld\n",
451 			atomic64_read(&to_xlog(kobject)->l_write_head.grant));
452 }
453 XFS_SYSFS_ATTR_RO(write_grant_head_bytes);
454 
455 static struct attribute *xfs_log_attrs[] = {
456 	ATTR_LIST(log_head_lsn),
457 	ATTR_LIST(log_tail_lsn),
458 	ATTR_LIST(reserve_grant_head_bytes),
459 	ATTR_LIST(write_grant_head_bytes),
460 	NULL,
461 };
462 ATTRIBUTE_GROUPS(xfs_log);
463 
464 const struct kobj_type xfs_log_ktype = {
465 	.release = xfs_sysfs_release,
466 	.sysfs_ops = &xfs_sysfs_ops,
467 	.default_groups = xfs_log_groups,
468 };
469 
470 /*
471  * Metadata IO error configuration
472  *
473  * The sysfs structure here is:
474  *	...xfs/<dev>/error/<class>/<errno>/<error_attrs>
475  *
476  * where <class> allows us to discriminate between data IO and metadata IO,
477  * and any other future type of IO (e.g. special inode or directory error
478  * handling) we care to support.
479  */
480 static inline struct xfs_error_cfg *
to_error_cfg(struct kobject * kobject)481 to_error_cfg(struct kobject *kobject)
482 {
483 	struct xfs_kobj *kobj = to_kobj(kobject);
484 	return container_of(kobj, struct xfs_error_cfg, kobj);
485 }
486 
487 static inline struct xfs_mount *
err_to_mp(struct kobject * kobject)488 err_to_mp(struct kobject *kobject)
489 {
490 	struct xfs_kobj *kobj = to_kobj(kobject);
491 	return container_of(kobj, struct xfs_mount, m_error_kobj);
492 }
493 
494 static ssize_t
max_retries_show(struct kobject * kobject,char * buf)495 max_retries_show(
496 	struct kobject	*kobject,
497 	char		*buf)
498 {
499 	int		retries;
500 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
501 
502 	if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
503 		retries = -1;
504 	else
505 		retries = cfg->max_retries;
506 
507 	return sysfs_emit(buf, "%d\n", retries);
508 }
509 
510 static ssize_t
max_retries_store(struct kobject * kobject,const char * buf,size_t count)511 max_retries_store(
512 	struct kobject	*kobject,
513 	const char	*buf,
514 	size_t		count)
515 {
516 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
517 	int		ret;
518 	int		val;
519 
520 	ret = kstrtoint(buf, 0, &val);
521 	if (ret)
522 		return ret;
523 
524 	if (val < -1)
525 		return -EINVAL;
526 
527 	if (val == -1)
528 		cfg->max_retries = XFS_ERR_RETRY_FOREVER;
529 	else
530 		cfg->max_retries = val;
531 	return count;
532 }
533 XFS_SYSFS_ATTR_RW(max_retries);
534 
535 static ssize_t
retry_timeout_seconds_show(struct kobject * kobject,char * buf)536 retry_timeout_seconds_show(
537 	struct kobject	*kobject,
538 	char		*buf)
539 {
540 	int		timeout;
541 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
542 
543 	if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
544 		timeout = -1;
545 	else
546 		timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
547 
548 	return sysfs_emit(buf, "%d\n", timeout);
549 }
550 
551 static ssize_t
retry_timeout_seconds_store(struct kobject * kobject,const char * buf,size_t count)552 retry_timeout_seconds_store(
553 	struct kobject	*kobject,
554 	const char	*buf,
555 	size_t		count)
556 {
557 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
558 	int		ret;
559 	int		val;
560 
561 	ret = kstrtoint(buf, 0, &val);
562 	if (ret)
563 		return ret;
564 
565 	/* 1 day timeout maximum, -1 means infinite */
566 	if (val < -1 || val > 86400)
567 		return -EINVAL;
568 
569 	if (val == -1)
570 		cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
571 	else {
572 		cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
573 		ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
574 	}
575 	return count;
576 }
577 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
578 
579 static ssize_t
fail_at_unmount_show(struct kobject * kobject,char * buf)580 fail_at_unmount_show(
581 	struct kobject	*kobject,
582 	char		*buf)
583 {
584 	struct xfs_mount	*mp = err_to_mp(kobject);
585 
586 	return sysfs_emit(buf, "%d\n", mp->m_fail_unmount);
587 }
588 
589 static ssize_t
fail_at_unmount_store(struct kobject * kobject,const char * buf,size_t count)590 fail_at_unmount_store(
591 	struct kobject	*kobject,
592 	const char	*buf,
593 	size_t		count)
594 {
595 	struct xfs_mount	*mp = err_to_mp(kobject);
596 	int		ret;
597 	int		val;
598 
599 	ret = kstrtoint(buf, 0, &val);
600 	if (ret)
601 		return ret;
602 
603 	if (val < 0 || val > 1)
604 		return -EINVAL;
605 
606 	mp->m_fail_unmount = val;
607 	return count;
608 }
609 XFS_SYSFS_ATTR_RW(fail_at_unmount);
610 
611 static struct attribute *xfs_error_attrs[] = {
612 	ATTR_LIST(max_retries),
613 	ATTR_LIST(retry_timeout_seconds),
614 	NULL,
615 };
616 ATTRIBUTE_GROUPS(xfs_error);
617 
618 static const struct kobj_type xfs_error_cfg_ktype = {
619 	.release = xfs_sysfs_release,
620 	.sysfs_ops = &xfs_sysfs_ops,
621 	.default_groups = xfs_error_groups,
622 };
623 
624 static const struct kobj_type xfs_error_ktype = {
625 	.release = xfs_sysfs_release,
626 	.sysfs_ops = &xfs_sysfs_ops,
627 };
628 
629 /*
630  * Error initialization tables. These need to be ordered in the same
631  * order as the enums used to index the array. All class init tables need to
632  * define a "default" behaviour as the first entry, all other entries can be
633  * empty.
634  */
635 struct xfs_error_init {
636 	char		*name;
637 	int		max_retries;
638 	int		retry_timeout;	/* in seconds */
639 };
640 
641 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
642 	{ .name = "default",
643 	  .max_retries = XFS_ERR_RETRY_FOREVER,
644 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
645 	},
646 	{ .name = "EIO",
647 	  .max_retries = XFS_ERR_RETRY_FOREVER,
648 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
649 	},
650 	{ .name = "ENOSPC",
651 	  .max_retries = XFS_ERR_RETRY_FOREVER,
652 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
653 	},
654 	{ .name = "ENODEV",
655 	  .max_retries = 0,	/* We can't recover from devices disappearing */
656 	  .retry_timeout = 0,
657 	},
658 };
659 
660 static int
xfs_error_sysfs_init_class(struct xfs_mount * mp,int class,const char * parent_name,struct xfs_kobj * parent_kobj,const struct xfs_error_init init[])661 xfs_error_sysfs_init_class(
662 	struct xfs_mount	*mp,
663 	int			class,
664 	const char		*parent_name,
665 	struct xfs_kobj		*parent_kobj,
666 	const struct xfs_error_init init[])
667 {
668 	struct xfs_error_cfg	*cfg;
669 	int			error;
670 	int			i;
671 
672 	ASSERT(class < XFS_ERR_CLASS_MAX);
673 
674 	error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
675 				&mp->m_error_kobj, parent_name);
676 	if (error)
677 		return error;
678 
679 	for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
680 		cfg = &mp->m_error_cfg[class][i];
681 		error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
682 					parent_kobj, init[i].name);
683 		if (error)
684 			goto out_error;
685 
686 		cfg->max_retries = init[i].max_retries;
687 		if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
688 			cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
689 		else
690 			cfg->retry_timeout = msecs_to_jiffies(
691 					init[i].retry_timeout * MSEC_PER_SEC);
692 	}
693 	return 0;
694 
695 out_error:
696 	/* unwind the entries that succeeded */
697 	for (i--; i >= 0; i--) {
698 		cfg = &mp->m_error_cfg[class][i];
699 		xfs_sysfs_del(&cfg->kobj);
700 	}
701 	xfs_sysfs_del(parent_kobj);
702 	return error;
703 }
704 
zoned_to_mp(struct kobject * kobj)705 static inline struct xfs_mount *zoned_to_mp(struct kobject *kobj)
706 {
707 	return container_of(to_kobj(kobj), struct xfs_mount, m_zoned_kobj);
708 }
709 
710 static ssize_t
max_open_zones_show(struct kobject * kobj,char * buf)711 max_open_zones_show(
712 	struct kobject		*kobj,
713 	char			*buf)
714 {
715 	/* only report the open zones available for user data */
716 	return sysfs_emit(buf, "%u\n",
717 		zoned_to_mp(kobj)->m_max_open_zones - XFS_OPEN_GC_ZONES);
718 }
719 XFS_SYSFS_ATTR_RO(max_open_zones);
720 
721 static struct attribute *xfs_zoned_attrs[] = {
722 	ATTR_LIST(max_open_zones),
723 	NULL,
724 };
725 ATTRIBUTE_GROUPS(xfs_zoned);
726 
727 static const struct kobj_type xfs_zoned_ktype = {
728 	.release = xfs_sysfs_release,
729 	.sysfs_ops = &xfs_sysfs_ops,
730 	.default_groups = xfs_zoned_groups,
731 };
732 
733 int
xfs_mount_sysfs_init(struct xfs_mount * mp)734 xfs_mount_sysfs_init(
735 	struct xfs_mount	*mp)
736 {
737 	int			error;
738 
739 	super_set_sysfs_name_id(mp->m_super);
740 
741 	/* .../xfs/<dev>/ */
742 	error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype,
743 			       NULL, mp->m_super->s_id);
744 	if (error)
745 		return error;
746 
747 	/* .../xfs/<dev>/stats/ */
748 	error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
749 			       &mp->m_kobj, "stats");
750 	if (error)
751 		goto out_remove_fsdir;
752 
753 	/* .../xfs/<dev>/error/ */
754 	error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
755 				&mp->m_kobj, "error");
756 	if (error)
757 		goto out_remove_stats_dir;
758 
759 	/* .../xfs/<dev>/error/fail_at_unmount */
760 	error = sysfs_create_file(&mp->m_error_kobj.kobject,
761 				  ATTR_LIST(fail_at_unmount));
762 
763 	if (error)
764 		goto out_remove_error_dir;
765 
766 	/* .../xfs/<dev>/error/metadata/ */
767 	error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
768 				"metadata", &mp->m_error_meta_kobj,
769 				xfs_error_meta_init);
770 	if (error)
771 		goto out_remove_error_dir;
772 
773 	if (IS_ENABLED(CONFIG_XFS_RT) && xfs_has_zoned(mp)) {
774 		/* .../xfs/<dev>/zoned/ */
775 		error = xfs_sysfs_init(&mp->m_zoned_kobj, &xfs_zoned_ktype,
776 					&mp->m_kobj, "zoned");
777 		if (error)
778 			goto out_remove_error_dir;
779 	}
780 
781 	return 0;
782 
783 out_remove_error_dir:
784 	xfs_sysfs_del(&mp->m_error_kobj);
785 out_remove_stats_dir:
786 	xfs_sysfs_del(&mp->m_stats.xs_kobj);
787 out_remove_fsdir:
788 	xfs_sysfs_del(&mp->m_kobj);
789 	return error;
790 }
791 
792 void
xfs_mount_sysfs_del(struct xfs_mount * mp)793 xfs_mount_sysfs_del(
794 	struct xfs_mount	*mp)
795 {
796 	struct xfs_error_cfg	*cfg;
797 	int			i, j;
798 
799 	if (IS_ENABLED(CONFIG_XFS_RT) && xfs_has_zoned(mp))
800 		xfs_sysfs_del(&mp->m_zoned_kobj);
801 
802 	for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
803 		for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
804 			cfg = &mp->m_error_cfg[i][j];
805 
806 			xfs_sysfs_del(&cfg->kobj);
807 		}
808 	}
809 	xfs_sysfs_del(&mp->m_error_meta_kobj);
810 	xfs_sysfs_del(&mp->m_error_kobj);
811 	xfs_sysfs_del(&mp->m_stats.xs_kobj);
812 	xfs_sysfs_del(&mp->m_kobj);
813 }
814 
815 struct xfs_error_cfg *
xfs_error_get_cfg(struct xfs_mount * mp,int error_class,int error)816 xfs_error_get_cfg(
817 	struct xfs_mount	*mp,
818 	int			error_class,
819 	int			error)
820 {
821 	struct xfs_error_cfg	*cfg;
822 
823 	if (error < 0)
824 		error = -error;
825 
826 	switch (error) {
827 	case EIO:
828 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
829 		break;
830 	case ENOSPC:
831 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
832 		break;
833 	case ENODEV:
834 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
835 		break;
836 	default:
837 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];
838 		break;
839 	}
840 
841 	return cfg;
842 }
843