xref: /linux/fs/xfs/scrub/stats.c (revision b477ff98d903618a1ab8247861f2ea6e70c0f0f8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_btree.h"
14 #include "xfs_super.h"
15 #include "scrub/scrub.h"
16 #include "scrub/stats.h"
17 #include "scrub/trace.h"
18 
19 struct xchk_scrub_stats {
20 	/* all 32-bit counters here */
21 
22 	/* checking stats */
23 	uint32_t		invocations;
24 	uint32_t		clean;
25 	uint32_t		corrupt;
26 	uint32_t		preen;
27 	uint32_t		xfail;
28 	uint32_t		xcorrupt;
29 	uint32_t		incomplete;
30 	uint32_t		warning;
31 	uint32_t		retries;
32 
33 	/* repair stats */
34 	uint32_t		repair_invocations;
35 	uint32_t		repair_success;
36 
37 	/* all 64-bit items here */
38 
39 	/* runtimes */
40 	uint64_t		checktime_us;
41 	uint64_t		repairtime_us;
42 
43 	/* non-counter state must go at the end for clearall */
44 	spinlock_t		css_lock;
45 };
46 
47 struct xchk_stats {
48 	struct dentry		*cs_debugfs;
49 	struct xchk_scrub_stats	cs_stats[XFS_SCRUB_TYPE_NR];
50 };
51 
52 
53 static struct xchk_stats	global_stats;
54 
55 static const char *name_map[XFS_SCRUB_TYPE_NR] = {
56 	[XFS_SCRUB_TYPE_SB]		= "sb",
57 	[XFS_SCRUB_TYPE_AGF]		= "agf",
58 	[XFS_SCRUB_TYPE_AGFL]		= "agfl",
59 	[XFS_SCRUB_TYPE_AGI]		= "agi",
60 	[XFS_SCRUB_TYPE_BNOBT]		= "bnobt",
61 	[XFS_SCRUB_TYPE_CNTBT]		= "cntbt",
62 	[XFS_SCRUB_TYPE_INOBT]		= "inobt",
63 	[XFS_SCRUB_TYPE_FINOBT]		= "finobt",
64 	[XFS_SCRUB_TYPE_RMAPBT]		= "rmapbt",
65 	[XFS_SCRUB_TYPE_REFCNTBT]	= "refcountbt",
66 	[XFS_SCRUB_TYPE_INODE]		= "inode",
67 	[XFS_SCRUB_TYPE_BMBTD]		= "bmapbtd",
68 	[XFS_SCRUB_TYPE_BMBTA]		= "bmapbta",
69 	[XFS_SCRUB_TYPE_BMBTC]		= "bmapbtc",
70 	[XFS_SCRUB_TYPE_DIR]		= "directory",
71 	[XFS_SCRUB_TYPE_XATTR]		= "xattr",
72 	[XFS_SCRUB_TYPE_SYMLINK]	= "symlink",
73 	[XFS_SCRUB_TYPE_PARENT]		= "parent",
74 	[XFS_SCRUB_TYPE_RTBITMAP]	= "rtbitmap",
75 	[XFS_SCRUB_TYPE_RTSUM]		= "rtsummary",
76 	[XFS_SCRUB_TYPE_UQUOTA]		= "usrquota",
77 	[XFS_SCRUB_TYPE_GQUOTA]		= "grpquota",
78 	[XFS_SCRUB_TYPE_PQUOTA]		= "prjquota",
79 	[XFS_SCRUB_TYPE_FSCOUNTERS]	= "fscounters",
80 	[XFS_SCRUB_TYPE_QUOTACHECK]	= "quotacheck",
81 	[XFS_SCRUB_TYPE_NLINKS]		= "nlinks",
82 	[XFS_SCRUB_TYPE_DIRTREE]	= "dirtree",
83 	[XFS_SCRUB_TYPE_METAPATH]	= "metapath",
84 	[XFS_SCRUB_TYPE_RGSUPER]	= "rgsuper",
85 	[XFS_SCRUB_TYPE_RTRMAPBT]	= "rtrmapbt",
86 	[XFS_SCRUB_TYPE_RTREFCBT]	= "rtrefcountbt",
87 };
88 
89 /* Format the scrub stats into a text buffer, similar to pcp style. */
90 STATIC ssize_t
xchk_stats_format(struct xchk_stats * cs,char * buf,size_t remaining)91 xchk_stats_format(
92 	struct xchk_stats	*cs,
93 	char			*buf,
94 	size_t			remaining)
95 {
96 	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
97 	unsigned int		i;
98 	ssize_t			copied = 0;
99 	int			ret = 0;
100 
101 	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
102 		if (!name_map[i])
103 			continue;
104 
105 		ret = scnprintf(buf, remaining,
106  "%s %u %u %u %u %u %u %u %u %u %llu %u %u %llu\n",
107 				name_map[i],
108 				(unsigned int)css->invocations,
109 				(unsigned int)css->clean,
110 				(unsigned int)css->corrupt,
111 				(unsigned int)css->preen,
112 				(unsigned int)css->xfail,
113 				(unsigned int)css->xcorrupt,
114 				(unsigned int)css->incomplete,
115 				(unsigned int)css->warning,
116 				(unsigned int)css->retries,
117 				(unsigned long long)css->checktime_us,
118 				(unsigned int)css->repair_invocations,
119 				(unsigned int)css->repair_success,
120 				(unsigned long long)css->repairtime_us);
121 		if (ret <= 0)
122 			break;
123 
124 		remaining -= ret;
125 		copied += ret;
126 		buf +=  ret;
127 	}
128 
129 	return copied > 0 ? copied : ret;
130 }
131 
132 /* Estimate the worst case buffer size required to hold the whole report. */
133 STATIC size_t
xchk_stats_estimate_bufsize(struct xchk_stats * cs)134 xchk_stats_estimate_bufsize(
135 	struct xchk_stats	*cs)
136 {
137 	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
138 	unsigned int		i;
139 	size_t			field_width;
140 	size_t			ret = 0;
141 
142 	/* 4294967296 plus one space for each u32 field */
143 	field_width = 11 * (offsetof(struct xchk_scrub_stats, checktime_us) /
144 			    sizeof(uint32_t));
145 
146 	/* 18446744073709551615 plus one space for each u64 field */
147 	field_width += 21 * ((offsetof(struct xchk_scrub_stats, css_lock) -
148 			      offsetof(struct xchk_scrub_stats, checktime_us)) /
149 			     sizeof(uint64_t));
150 
151 	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
152 		if (!name_map[i])
153 			continue;
154 
155 		/* name plus one space */
156 		ret += 1 + strlen(name_map[i]);
157 
158 		/* all fields, plus newline */
159 		ret += field_width + 1;
160 	}
161 
162 	return ret;
163 }
164 
165 /* Clear all counters. */
166 STATIC void
xchk_stats_clearall(struct xchk_stats * cs)167 xchk_stats_clearall(
168 	struct xchk_stats	*cs)
169 {
170 	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
171 	unsigned int		i;
172 
173 	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
174 		spin_lock(&css->css_lock);
175 		memset(css, 0, offsetof(struct xchk_scrub_stats, css_lock));
176 		spin_unlock(&css->css_lock);
177 	}
178 }
179 
180 #define XFS_SCRUB_OFLAG_UNCLEAN	(XFS_SCRUB_OFLAG_CORRUPT | \
181 				 XFS_SCRUB_OFLAG_PREEN | \
182 				 XFS_SCRUB_OFLAG_XFAIL | \
183 				 XFS_SCRUB_OFLAG_XCORRUPT | \
184 				 XFS_SCRUB_OFLAG_INCOMPLETE | \
185 				 XFS_SCRUB_OFLAG_WARNING)
186 
187 STATIC void
xchk_stats_merge_one(struct xchk_stats * cs,const struct xfs_scrub_metadata * sm,const struct xchk_stats_run * run)188 xchk_stats_merge_one(
189 	struct xchk_stats		*cs,
190 	const struct xfs_scrub_metadata	*sm,
191 	const struct xchk_stats_run	*run)
192 {
193 	struct xchk_scrub_stats		*css;
194 
195 	if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
196 		ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
197 		return;
198 	}
199 
200 	css = &cs->cs_stats[sm->sm_type];
201 	spin_lock(&css->css_lock);
202 	css->invocations++;
203 	if (!(sm->sm_flags & XFS_SCRUB_OFLAG_UNCLEAN))
204 		css->clean++;
205 	if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
206 		css->corrupt++;
207 	if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN)
208 		css->preen++;
209 	if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL)
210 		css->xfail++;
211 	if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT)
212 		css->xcorrupt++;
213 	if (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
214 		css->incomplete++;
215 	if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING)
216 		css->warning++;
217 	css->retries += run->retries;
218 	css->checktime_us += howmany_64(run->scrub_ns, NSEC_PER_USEC);
219 
220 	if (run->repair_attempted)
221 		css->repair_invocations++;
222 	if (run->repair_succeeded)
223 		css->repair_success++;
224 	css->repairtime_us += howmany_64(run->repair_ns, NSEC_PER_USEC);
225 	spin_unlock(&css->css_lock);
226 }
227 
228 /* Merge these scrub-run stats into the global and mount stat data. */
229 void
xchk_stats_merge(struct xfs_mount * mp,const struct xfs_scrub_metadata * sm,const struct xchk_stats_run * run)230 xchk_stats_merge(
231 	struct xfs_mount		*mp,
232 	const struct xfs_scrub_metadata	*sm,
233 	const struct xchk_stats_run	*run)
234 {
235 	xchk_stats_merge_one(&global_stats, sm, run);
236 	xchk_stats_merge_one(mp->m_scrub_stats, sm, run);
237 }
238 
239 /* debugfs boilerplate */
240 
241 static ssize_t
xchk_scrub_stats_read(struct file * file,char __user * ubuf,size_t count,loff_t * ppos)242 xchk_scrub_stats_read(
243 	struct file		*file,
244 	char __user		*ubuf,
245 	size_t			count,
246 	loff_t			*ppos)
247 {
248 	struct xchk_stats	*cs = file->private_data;
249 	char			*buf;
250 	size_t			bufsize;
251 	ssize_t			avail, ret;
252 
253 	/*
254 	 * This generates stringly snapshot of all the scrub counters, so we
255 	 * do not want userspace to receive garbled text from multiple calls.
256 	 * If the file position is greater than 0, return a short read.
257 	 */
258 	if (*ppos > 0)
259 		return 0;
260 
261 	bufsize = xchk_stats_estimate_bufsize(cs);
262 
263 	buf = kvmalloc(bufsize, XCHK_GFP_FLAGS);
264 	if (!buf)
265 		return -ENOMEM;
266 
267 	avail = xchk_stats_format(cs, buf, bufsize);
268 	if (avail < 0) {
269 		ret = avail;
270 		goto out;
271 	}
272 
273 	ret = simple_read_from_buffer(ubuf, count, ppos, buf, avail);
274 out:
275 	kvfree(buf);
276 	return ret;
277 }
278 
279 static const struct file_operations scrub_stats_fops = {
280 	.open			= simple_open,
281 	.read			= xchk_scrub_stats_read,
282 };
283 
284 static ssize_t
xchk_clear_scrub_stats_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)285 xchk_clear_scrub_stats_write(
286 	struct file		*file,
287 	const char __user	*ubuf,
288 	size_t			count,
289 	loff_t			*ppos)
290 {
291 	struct xchk_stats	*cs = file->private_data;
292 	unsigned int		val;
293 	int			ret;
294 
295 	ret = kstrtouint_from_user(ubuf, count, 0, &val);
296 	if (ret)
297 		return ret;
298 
299 	if (val != 1)
300 		return -EINVAL;
301 
302 	xchk_stats_clearall(cs);
303 	return count;
304 }
305 
306 static const struct file_operations clear_scrub_stats_fops = {
307 	.open			= simple_open,
308 	.write			= xchk_clear_scrub_stats_write,
309 };
310 
311 /* Initialize the stats object. */
312 STATIC int
xchk_stats_init(struct xchk_stats * cs,struct xfs_mount * mp)313 xchk_stats_init(
314 	struct xchk_stats	*cs,
315 	struct xfs_mount	*mp)
316 {
317 	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
318 	unsigned int		i;
319 
320 	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++)
321 		spin_lock_init(&css->css_lock);
322 
323 	return 0;
324 }
325 
326 /* Connect the stats object to debugfs. */
327 void
xchk_stats_register(struct xchk_stats * cs,struct dentry * parent)328 xchk_stats_register(
329 	struct xchk_stats	*cs,
330 	struct dentry		*parent)
331 {
332 	if (!parent)
333 		return;
334 
335 	cs->cs_debugfs = xfs_debugfs_mkdir("scrub", parent);
336 	if (!cs->cs_debugfs)
337 		return;
338 
339 	debugfs_create_file("stats", 0444, cs->cs_debugfs, cs,
340 			&scrub_stats_fops);
341 	debugfs_create_file("clear_stats", 0200, cs->cs_debugfs, cs,
342 			&clear_scrub_stats_fops);
343 }
344 
345 /* Free all resources related to the stats object. */
346 STATIC int
xchk_stats_teardown(struct xchk_stats * cs)347 xchk_stats_teardown(
348 	struct xchk_stats	*cs)
349 {
350 	return 0;
351 }
352 
353 /* Disconnect the stats object from debugfs. */
354 void
xchk_stats_unregister(struct xchk_stats * cs)355 xchk_stats_unregister(
356 	struct xchk_stats	*cs)
357 {
358 	debugfs_remove(cs->cs_debugfs);
359 }
360 
361 /* Initialize global stats and register them */
362 int __init
xchk_global_stats_setup(struct dentry * parent)363 xchk_global_stats_setup(
364 	struct dentry		*parent)
365 {
366 	int			error;
367 
368 	error = xchk_stats_init(&global_stats, NULL);
369 	if (error)
370 		return error;
371 
372 	xchk_stats_register(&global_stats, parent);
373 	return 0;
374 }
375 
376 /* Unregister global stats and tear them down */
377 void
xchk_global_stats_teardown(void)378 xchk_global_stats_teardown(void)
379 {
380 	xchk_stats_unregister(&global_stats);
381 	xchk_stats_teardown(&global_stats);
382 }
383 
384 /* Allocate per-mount stats */
385 int
xchk_mount_stats_alloc(struct xfs_mount * mp)386 xchk_mount_stats_alloc(
387 	struct xfs_mount	*mp)
388 {
389 	struct xchk_stats	*cs;
390 	int			error;
391 
392 	cs = kvzalloc(sizeof(struct xchk_stats), GFP_KERNEL);
393 	if (!cs)
394 		return -ENOMEM;
395 
396 	error = xchk_stats_init(cs, mp);
397 	if (error)
398 		goto out_free;
399 
400 	mp->m_scrub_stats = cs;
401 	return 0;
402 out_free:
403 	kvfree(cs);
404 	return error;
405 }
406 
407 /* Free per-mount stats */
408 void
xchk_mount_stats_free(struct xfs_mount * mp)409 xchk_mount_stats_free(
410 	struct xfs_mount	*mp)
411 {
412 	xchk_stats_teardown(mp->m_scrub_stats);
413 	kvfree(mp->m_scrub_stats);
414 	mp->m_scrub_stats = NULL;
415 }
416