xref: /linux/fs/xfs/xfs_stats.c (revision 24ce659dcc02c21f8d6c0a7589c3320a4dfa8152)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 
8 struct xstats xfsstats;
9 
10 static int counter_val(struct xfsstats __percpu *stats, int idx)
11 {
12 	int val = 0, cpu;
13 
14 	for_each_possible_cpu(cpu)
15 		val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
16 	return val;
17 }
18 
19 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
20 {
21 	int		i, j;
22 	int		len = 0;
23 	uint64_t	xs_xstrat_bytes = 0;
24 	uint64_t	xs_write_bytes = 0;
25 	uint64_t	xs_read_bytes = 0;
26 
27 	static const struct xstats_entry {
28 		char	*desc;
29 		int	endpoint;
30 	} xstats[] = {
31 		{ "extent_alloc",	xfsstats_offset(xs_abt_lookup)	},
32 		{ "abt",		xfsstats_offset(xs_blk_mapr)	},
33 		{ "blk_map",		xfsstats_offset(xs_bmbt_lookup)	},
34 		{ "bmbt",		xfsstats_offset(xs_dir_lookup)	},
35 		{ "dir",		xfsstats_offset(xs_trans_sync)	},
36 		{ "trans",		xfsstats_offset(xs_ig_attempts)	},
37 		{ "ig",			xfsstats_offset(xs_log_writes)	},
38 		{ "log",		xfsstats_offset(xs_try_logspace)},
39 		{ "push_ail",		xfsstats_offset(xs_xstrat_quick)},
40 		{ "xstrat",		xfsstats_offset(xs_write_calls)	},
41 		{ "rw",			xfsstats_offset(xs_attr_get)	},
42 		{ "attr",		xfsstats_offset(xs_iflush_count)},
43 		{ "icluster",		xfsstats_offset(vn_active)	},
44 		{ "vnodes",		xfsstats_offset(xb_get)		},
45 		{ "buf",		xfsstats_offset(xs_abtb_2)	},
46 		{ "abtb2",		xfsstats_offset(xs_abtc_2)	},
47 		{ "abtc2",		xfsstats_offset(xs_bmbt_2)	},
48 		{ "bmbt2",		xfsstats_offset(xs_ibt_2)	},
49 		{ "ibt2",		xfsstats_offset(xs_fibt_2)	},
50 		{ "fibt2",		xfsstats_offset(xs_rmap_2)	},
51 		{ "rmapbt",		xfsstats_offset(xs_refcbt_2)	},
52 		{ "refcntbt",		xfsstats_offset(xs_qm_dqreclaims)},
53 		/* we print both series of quota information together */
54 		{ "qm",			xfsstats_offset(xs_xstrat_bytes)},
55 	};
56 
57 	/* Loop over all stats groups */
58 
59 	for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
60 		len += scnprintf(buf + len, PATH_MAX - len, "%s",
61 				xstats[i].desc);
62 		/* inner loop does each group */
63 		for (; j < xstats[i].endpoint; j++)
64 			len += scnprintf(buf + len, PATH_MAX - len, " %u",
65 					counter_val(stats, j));
66 		len += scnprintf(buf + len, PATH_MAX - len, "\n");
67 	}
68 	/* extra precision counters */
69 	for_each_possible_cpu(i) {
70 		xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
71 		xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
72 		xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
73 	}
74 
75 	len += scnprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
76 			xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
77 	len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
78 #if defined(DEBUG)
79 		1);
80 #else
81 		0);
82 #endif
83 
84 	return len;
85 }
86 
87 void xfs_stats_clearall(struct xfsstats __percpu *stats)
88 {
89 	int		c;
90 	uint32_t	vn_active;
91 
92 	xfs_notice(NULL, "Clearing xfsstats");
93 	for_each_possible_cpu(c) {
94 		preempt_disable();
95 		/* save vn_active, it's a universal truth! */
96 		vn_active = per_cpu_ptr(stats, c)->s.vn_active;
97 		memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
98 		per_cpu_ptr(stats, c)->s.vn_active = vn_active;
99 		preempt_enable();
100 	}
101 }
102 
103 #ifdef CONFIG_PROC_FS
104 /* legacy quota interfaces */
105 #ifdef CONFIG_XFS_QUOTA
106 
107 #define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
108 #define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
109 
110 static int xqm_proc_show(struct seq_file *m, void *v)
111 {
112 	/* maximum; incore; ratio free to inuse; freelist */
113 	seq_printf(m, "%d\t%d\t%d\t%u\n",
114 		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
115 		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
116 	return 0;
117 }
118 
119 /* legacy quota stats interface no 2 */
120 static int xqmstat_proc_show(struct seq_file *m, void *v)
121 {
122 	int j;
123 
124 	seq_printf(m, "qm");
125 	for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
126 		seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
127 	seq_putc(m, '\n');
128 	return 0;
129 }
130 #endif /* CONFIG_XFS_QUOTA */
131 
132 int
133 xfs_init_procfs(void)
134 {
135 	if (!proc_mkdir("fs/xfs", NULL))
136 		return -ENOMEM;
137 
138 	if (!proc_symlink("fs/xfs/stat", NULL,
139 			  "/sys/fs/xfs/stats/stats"))
140 		goto out;
141 
142 #ifdef CONFIG_XFS_QUOTA
143 	if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
144 		goto out;
145 	if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
146 		goto out;
147 #endif
148 	return 0;
149 
150 out:
151 	remove_proc_subtree("fs/xfs", NULL);
152 	return -ENOMEM;
153 }
154 
155 void
156 xfs_cleanup_procfs(void)
157 {
158 	remove_proc_subtree("fs/xfs", NULL);
159 }
160 #endif /* CONFIG_PROC_FS */
161