xref: /linux/fs/xfs/scrub/stats.c (revision e467705a9fb37f51595aa6deaca085ccb4005454)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_btree.h"
14 #include "xfs_super.h"
15 #include "scrub/scrub.h"
16 #include "scrub/stats.h"
17 #include "scrub/trace.h"
18 
19 struct xchk_scrub_stats {
20 	/* all 32-bit counters here */
21 
22 	/* checking stats */
23 	uint32_t		invocations;
24 	uint32_t		clean;
25 	uint32_t		corrupt;
26 	uint32_t		preen;
27 	uint32_t		xfail;
28 	uint32_t		xcorrupt;
29 	uint32_t		incomplete;
30 	uint32_t		warning;
31 	uint32_t		retries;
32 
33 	/* repair stats */
34 	uint32_t		repair_invocations;
35 	uint32_t		repair_success;
36 
37 	/* all 64-bit items here */
38 
39 	/* runtimes */
40 	uint64_t		checktime_us;
41 	uint64_t		repairtime_us;
42 
43 	/* non-counter state must go at the end for clearall */
44 	spinlock_t		css_lock;
45 };
46 
47 struct xchk_stats {
48 	struct dentry		*cs_debugfs;
49 	struct xchk_scrub_stats	cs_stats[XFS_SCRUB_TYPE_NR];
50 };
51 
52 
53 static struct xchk_stats	global_stats;
54 
55 static const char *name_map[XFS_SCRUB_TYPE_NR] = {
56 	[XFS_SCRUB_TYPE_SB]		= "sb",
57 	[XFS_SCRUB_TYPE_AGF]		= "agf",
58 	[XFS_SCRUB_TYPE_AGFL]		= "agfl",
59 	[XFS_SCRUB_TYPE_AGI]		= "agi",
60 	[XFS_SCRUB_TYPE_BNOBT]		= "bnobt",
61 	[XFS_SCRUB_TYPE_CNTBT]		= "cntbt",
62 	[XFS_SCRUB_TYPE_INOBT]		= "inobt",
63 	[XFS_SCRUB_TYPE_FINOBT]		= "finobt",
64 	[XFS_SCRUB_TYPE_RMAPBT]		= "rmapbt",
65 	[XFS_SCRUB_TYPE_REFCNTBT]	= "refcountbt",
66 	[XFS_SCRUB_TYPE_INODE]		= "inode",
67 	[XFS_SCRUB_TYPE_BMBTD]		= "bmapbtd",
68 	[XFS_SCRUB_TYPE_BMBTA]		= "bmapbta",
69 	[XFS_SCRUB_TYPE_BMBTC]		= "bmapbtc",
70 	[XFS_SCRUB_TYPE_DIR]		= "directory",
71 	[XFS_SCRUB_TYPE_XATTR]		= "xattr",
72 	[XFS_SCRUB_TYPE_SYMLINK]	= "symlink",
73 	[XFS_SCRUB_TYPE_PARENT]		= "parent",
74 	[XFS_SCRUB_TYPE_RTBITMAP]	= "rtbitmap",
75 	[XFS_SCRUB_TYPE_RTSUM]		= "rtsummary",
76 	[XFS_SCRUB_TYPE_UQUOTA]		= "usrquota",
77 	[XFS_SCRUB_TYPE_GQUOTA]		= "grpquota",
78 	[XFS_SCRUB_TYPE_PQUOTA]		= "prjquota",
79 	[XFS_SCRUB_TYPE_FSCOUNTERS]	= "fscounters",
80 	[XFS_SCRUB_TYPE_QUOTACHECK]	= "quotacheck",
81 	[XFS_SCRUB_TYPE_NLINKS]		= "nlinks",
82 	[XFS_SCRUB_TYPE_DIRTREE]	= "dirtree",
83 };
84 
85 /* Format the scrub stats into a text buffer, similar to pcp style. */
86 STATIC ssize_t
87 xchk_stats_format(
88 	struct xchk_stats	*cs,
89 	char			*buf,
90 	size_t			remaining)
91 {
92 	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
93 	unsigned int		i;
94 	ssize_t			copied = 0;
95 	int			ret = 0;
96 
97 	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
98 		if (!name_map[i])
99 			continue;
100 
101 		ret = scnprintf(buf, remaining,
102  "%s %u %u %u %u %u %u %u %u %u %llu %u %u %llu\n",
103 				name_map[i],
104 				(unsigned int)css->invocations,
105 				(unsigned int)css->clean,
106 				(unsigned int)css->corrupt,
107 				(unsigned int)css->preen,
108 				(unsigned int)css->xfail,
109 				(unsigned int)css->xcorrupt,
110 				(unsigned int)css->incomplete,
111 				(unsigned int)css->warning,
112 				(unsigned int)css->retries,
113 				(unsigned long long)css->checktime_us,
114 				(unsigned int)css->repair_invocations,
115 				(unsigned int)css->repair_success,
116 				(unsigned long long)css->repairtime_us);
117 		if (ret <= 0)
118 			break;
119 
120 		remaining -= ret;
121 		copied += ret;
122 		buf +=  ret;
123 	}
124 
125 	return copied > 0 ? copied : ret;
126 }
127 
128 /* Estimate the worst case buffer size required to hold the whole report. */
129 STATIC size_t
130 xchk_stats_estimate_bufsize(
131 	struct xchk_stats	*cs)
132 {
133 	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
134 	unsigned int		i;
135 	size_t			field_width;
136 	size_t			ret = 0;
137 
138 	/* 4294967296 plus one space for each u32 field */
139 	field_width = 11 * (offsetof(struct xchk_scrub_stats, checktime_us) /
140 			    sizeof(uint32_t));
141 
142 	/* 18446744073709551615 plus one space for each u64 field */
143 	field_width += 21 * ((offsetof(struct xchk_scrub_stats, css_lock) -
144 			      offsetof(struct xchk_scrub_stats, checktime_us)) /
145 			     sizeof(uint64_t));
146 
147 	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
148 		if (!name_map[i])
149 			continue;
150 
151 		/* name plus one space */
152 		ret += 1 + strlen(name_map[i]);
153 
154 		/* all fields, plus newline */
155 		ret += field_width + 1;
156 	}
157 
158 	return ret;
159 }
160 
161 /* Clear all counters. */
162 STATIC void
163 xchk_stats_clearall(
164 	struct xchk_stats	*cs)
165 {
166 	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
167 	unsigned int		i;
168 
169 	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
170 		spin_lock(&css->css_lock);
171 		memset(css, 0, offsetof(struct xchk_scrub_stats, css_lock));
172 		spin_unlock(&css->css_lock);
173 	}
174 }
175 
176 #define XFS_SCRUB_OFLAG_UNCLEAN	(XFS_SCRUB_OFLAG_CORRUPT | \
177 				 XFS_SCRUB_OFLAG_PREEN | \
178 				 XFS_SCRUB_OFLAG_XFAIL | \
179 				 XFS_SCRUB_OFLAG_XCORRUPT | \
180 				 XFS_SCRUB_OFLAG_INCOMPLETE | \
181 				 XFS_SCRUB_OFLAG_WARNING)
182 
183 STATIC void
184 xchk_stats_merge_one(
185 	struct xchk_stats		*cs,
186 	const struct xfs_scrub_metadata	*sm,
187 	const struct xchk_stats_run	*run)
188 {
189 	struct xchk_scrub_stats		*css;
190 
191 	if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
192 		ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
193 		return;
194 	}
195 
196 	css = &cs->cs_stats[sm->sm_type];
197 	spin_lock(&css->css_lock);
198 	css->invocations++;
199 	if (!(sm->sm_flags & XFS_SCRUB_OFLAG_UNCLEAN))
200 		css->clean++;
201 	if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
202 		css->corrupt++;
203 	if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN)
204 		css->preen++;
205 	if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL)
206 		css->xfail++;
207 	if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT)
208 		css->xcorrupt++;
209 	if (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
210 		css->incomplete++;
211 	if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING)
212 		css->warning++;
213 	css->retries += run->retries;
214 	css->checktime_us += howmany_64(run->scrub_ns, NSEC_PER_USEC);
215 
216 	if (run->repair_attempted)
217 		css->repair_invocations++;
218 	if (run->repair_succeeded)
219 		css->repair_success++;
220 	css->repairtime_us += howmany_64(run->repair_ns, NSEC_PER_USEC);
221 	spin_unlock(&css->css_lock);
222 }
223 
224 /* Merge these scrub-run stats into the global and mount stat data. */
225 void
226 xchk_stats_merge(
227 	struct xfs_mount		*mp,
228 	const struct xfs_scrub_metadata	*sm,
229 	const struct xchk_stats_run	*run)
230 {
231 	xchk_stats_merge_one(&global_stats, sm, run);
232 	xchk_stats_merge_one(mp->m_scrub_stats, sm, run);
233 }
234 
235 /* debugfs boilerplate */
236 
237 static ssize_t
238 xchk_scrub_stats_read(
239 	struct file		*file,
240 	char __user		*ubuf,
241 	size_t			count,
242 	loff_t			*ppos)
243 {
244 	struct xchk_stats	*cs = file->private_data;
245 	char			*buf;
246 	size_t			bufsize;
247 	ssize_t			avail, ret;
248 
249 	/*
250 	 * This generates stringly snapshot of all the scrub counters, so we
251 	 * do not want userspace to receive garbled text from multiple calls.
252 	 * If the file position is greater than 0, return a short read.
253 	 */
254 	if (*ppos > 0)
255 		return 0;
256 
257 	bufsize = xchk_stats_estimate_bufsize(cs);
258 
259 	buf = kvmalloc(bufsize, XCHK_GFP_FLAGS);
260 	if (!buf)
261 		return -ENOMEM;
262 
263 	avail = xchk_stats_format(cs, buf, bufsize);
264 	if (avail < 0) {
265 		ret = avail;
266 		goto out;
267 	}
268 
269 	ret = simple_read_from_buffer(ubuf, count, ppos, buf, avail);
270 out:
271 	kvfree(buf);
272 	return ret;
273 }
274 
275 static const struct file_operations scrub_stats_fops = {
276 	.open			= simple_open,
277 	.read			= xchk_scrub_stats_read,
278 };
279 
280 static ssize_t
281 xchk_clear_scrub_stats_write(
282 	struct file		*file,
283 	const char __user	*ubuf,
284 	size_t			count,
285 	loff_t			*ppos)
286 {
287 	struct xchk_stats	*cs = file->private_data;
288 	unsigned int		val;
289 	int			ret;
290 
291 	ret = kstrtouint_from_user(ubuf, count, 0, &val);
292 	if (ret)
293 		return ret;
294 
295 	if (val != 1)
296 		return -EINVAL;
297 
298 	xchk_stats_clearall(cs);
299 	return count;
300 }
301 
302 static const struct file_operations clear_scrub_stats_fops = {
303 	.open			= simple_open,
304 	.write			= xchk_clear_scrub_stats_write,
305 };
306 
307 /* Initialize the stats object. */
308 STATIC int
309 xchk_stats_init(
310 	struct xchk_stats	*cs,
311 	struct xfs_mount	*mp)
312 {
313 	struct xchk_scrub_stats	*css = &cs->cs_stats[0];
314 	unsigned int		i;
315 
316 	for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++)
317 		spin_lock_init(&css->css_lock);
318 
319 	return 0;
320 }
321 
322 /* Connect the stats object to debugfs. */
323 void
324 xchk_stats_register(
325 	struct xchk_stats	*cs,
326 	struct dentry		*parent)
327 {
328 	if (!parent)
329 		return;
330 
331 	cs->cs_debugfs = xfs_debugfs_mkdir("scrub", parent);
332 	if (!cs->cs_debugfs)
333 		return;
334 
335 	debugfs_create_file("stats", 0444, cs->cs_debugfs, cs,
336 			&scrub_stats_fops);
337 	debugfs_create_file("clear_stats", 0200, cs->cs_debugfs, cs,
338 			&clear_scrub_stats_fops);
339 }
340 
341 /* Free all resources related to the stats object. */
342 STATIC int
343 xchk_stats_teardown(
344 	struct xchk_stats	*cs)
345 {
346 	return 0;
347 }
348 
349 /* Disconnect the stats object from debugfs. */
350 void
351 xchk_stats_unregister(
352 	struct xchk_stats	*cs)
353 {
354 	debugfs_remove(cs->cs_debugfs);
355 }
356 
357 /* Initialize global stats and register them */
358 int __init
359 xchk_global_stats_setup(
360 	struct dentry		*parent)
361 {
362 	int			error;
363 
364 	error = xchk_stats_init(&global_stats, NULL);
365 	if (error)
366 		return error;
367 
368 	xchk_stats_register(&global_stats, parent);
369 	return 0;
370 }
371 
372 /* Unregister global stats and tear them down */
373 void
374 xchk_global_stats_teardown(void)
375 {
376 	xchk_stats_unregister(&global_stats);
377 	xchk_stats_teardown(&global_stats);
378 }
379 
380 /* Allocate per-mount stats */
381 int
382 xchk_mount_stats_alloc(
383 	struct xfs_mount	*mp)
384 {
385 	struct xchk_stats	*cs;
386 	int			error;
387 
388 	cs = kvzalloc(sizeof(struct xchk_stats), GFP_KERNEL);
389 	if (!cs)
390 		return -ENOMEM;
391 
392 	error = xchk_stats_init(cs, mp);
393 	if (error)
394 		goto out_free;
395 
396 	mp->m_scrub_stats = cs;
397 	return 0;
398 out_free:
399 	kvfree(cs);
400 	return error;
401 }
402 
403 /* Free per-mount stats */
404 void
405 xchk_mount_stats_free(
406 	struct xfs_mount	*mp)
407 {
408 	xchk_stats_teardown(mp->m_scrub_stats);
409 	kvfree(mp->m_scrub_stats);
410 	mp->m_scrub_stats = NULL;
411 }
412