xref: /linux/fs/xfs/xfs_stats.c (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs_platform.h"
7 
8 struct xstats xfsstats;
9 
10 static int counter_val(struct xfsstats __percpu *stats, int idx)
11 {
12 	int val = 0, cpu;
13 
14 	for_each_possible_cpu(cpu)
15 		val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
16 	return val;
17 }
18 
19 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
20 {
21 	int		i, j;
22 	int		len = 0;
23 	uint64_t	xs_xstrat_bytes = 0;
24 	uint64_t	xs_write_bytes = 0;
25 	uint64_t	xs_read_bytes = 0;
26 	uint64_t	xs_defer_relog = 0;
27 	uint64_t	xs_gc_bytes = 0;
28 
29 	static const struct xstats_entry {
30 		char	*desc;
31 		int	endpoint;
32 	} xstats[] = {
33 		{ "extent_alloc",	xfsstats_offset(xs_abt_lookup)	},
34 		{ "abt",		xfsstats_offset(xs_blk_mapr)	},
35 		{ "blk_map",		xfsstats_offset(xs_bmbt_lookup)	},
36 		{ "bmbt",		xfsstats_offset(xs_dir_lookup)	},
37 		{ "dir",		xfsstats_offset(xs_trans_sync)	},
38 		{ "trans",		xfsstats_offset(xs_ig_attempts)	},
39 		{ "ig",			xfsstats_offset(xs_log_writes)	},
40 		{ "log",		xfsstats_offset(xs_try_logspace)},
41 		{ "push_ail",		xfsstats_offset(xs_xstrat_quick)},
42 		{ "xstrat",		xfsstats_offset(xs_write_calls)	},
43 		{ "rw",			xfsstats_offset(xs_attr_get)	},
44 		{ "attr",		xfsstats_offset(xs_iflush_count)},
45 		{ "icluster",		xfsstats_offset(vn_active)	},
46 		{ "vnodes",		xfsstats_offset(xb_get)		},
47 		{ "buf",		xfsstats_offset(xs_abtb_2)	},
48 		{ "abtb2",		xfsstats_offset(xs_abtc_2)	},
49 		{ "abtc2",		xfsstats_offset(xs_bmbt_2)	},
50 		{ "bmbt2",		xfsstats_offset(xs_ibt_2)	},
51 		{ "ibt2",		xfsstats_offset(xs_fibt_2)	},
52 		{ "fibt2",		xfsstats_offset(xs_rmap_2)	},
53 		{ "rmapbt",		xfsstats_offset(xs_refcbt_2)	},
54 		{ "refcntbt",		xfsstats_offset(xs_rmap_mem_2)	},
55 		{ "rmapbt_mem",		xfsstats_offset(xs_rcbag_2)	},
56 		{ "rcbagbt",		xfsstats_offset(xs_rtrmap_2)	},
57 		{ "rtrmapbt",		xfsstats_offset(xs_rtrmap_mem_2)},
58 		{ "rtrmapbt_mem",	xfsstats_offset(xs_rtrefcbt_2)	},
59 		{ "rtrefcntbt",		xfsstats_offset(xs_qm_dqreclaims)},
60 		/* we print both series of quota information together */
61 		{ "qm",			xfsstats_offset(xs_gc_read_calls)},
62 		{ "zoned",		xfsstats_offset(__pad1)},
63 	};
64 
65 	/* Loop over all stats groups */
66 
67 	for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
68 		len += scnprintf(buf + len, PATH_MAX - len, "%s",
69 				xstats[i].desc);
70 		/* inner loop does each group */
71 		for (; j < xstats[i].endpoint; j++)
72 			len += scnprintf(buf + len, PATH_MAX - len, " %u",
73 					counter_val(stats, j));
74 		len += scnprintf(buf + len, PATH_MAX - len, "\n");
75 	}
76 	/* extra precision counters */
77 	for_each_possible_cpu(i) {
78 		xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
79 		xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
80 		xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
81 		xs_defer_relog += per_cpu_ptr(stats, i)->s.xs_defer_relog;
82 		xs_gc_bytes += per_cpu_ptr(stats, i)->s.xs_gc_bytes;
83 	}
84 
85 	len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n",
86 			xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
87 	len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n",
88 			xs_defer_relog);
89 	len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
90 #if defined(DEBUG)
91 		1);
92 #else
93 		0);
94 #endif
95 	len += scnprintf(buf + len, PATH_MAX-len, "gc xpc %llu\n", xs_gc_bytes);
96 
97 	return len;
98 }
99 
100 void xfs_stats_clearall(struct xfsstats __percpu *stats)
101 {
102 	int		c;
103 	uint32_t	vn_active;
104 
105 	xfs_notice(NULL, "Clearing xfsstats");
106 	for_each_possible_cpu(c) {
107 		preempt_disable();
108 		/* save vn_active, it's a universal truth! */
109 		vn_active = per_cpu_ptr(stats, c)->s.vn_active;
110 		memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
111 		per_cpu_ptr(stats, c)->s.vn_active = vn_active;
112 		preempt_enable();
113 	}
114 }
115 
116 #ifdef CONFIG_PROC_FS
117 /* legacy quota interfaces */
118 #ifdef CONFIG_XFS_QUOTA
119 
120 #define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
121 #define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
122 
123 static int xqm_proc_show(struct seq_file *m, void *v)
124 {
125 	/* maximum; incore; ratio free to inuse; freelist; rtquota */
126 	seq_printf(m, "%d\t%d\t%d\t%u\t%s\n",
127 		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
128 		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1),
129 		   IS_ENABLED(CONFIG_XFS_RT) ? "rtquota" : "quota");
130 	return 0;
131 }
132 
133 /* legacy quota stats interface no 2 */
134 static int xqmstat_proc_show(struct seq_file *m, void *v)
135 {
136 	int j;
137 
138 	seq_puts(m, "qm");
139 	for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
140 		seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
141 	seq_putc(m, '\n');
142 	return 0;
143 }
144 #endif /* CONFIG_XFS_QUOTA */
145 
146 int
147 xfs_init_procfs(void)
148 {
149 	if (!proc_mkdir("fs/xfs", NULL))
150 		return -ENOMEM;
151 
152 	if (!proc_symlink("fs/xfs/stat", NULL,
153 			  "/sys/fs/xfs/stats/stats"))
154 		goto out;
155 
156 #ifdef CONFIG_XFS_QUOTA
157 	if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
158 		goto out;
159 	if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
160 		goto out;
161 #endif
162 	return 0;
163 
164 out:
165 	remove_proc_subtree("fs/xfs", NULL);
166 	return -ENOMEM;
167 }
168 
169 void
170 xfs_cleanup_procfs(void)
171 {
172 	remove_proc_subtree("fs/xfs", NULL);
173 }
174 #endif /* CONFIG_PROC_FS */
175