1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_sysfs.h" 13 #include "xfs_btree.h" 14 #include "xfs_super.h" 15 #include "scrub/scrub.h" 16 #include "scrub/stats.h" 17 #include "scrub/trace.h" 18 19 struct xchk_scrub_stats { 20 /* all 32-bit counters here */ 21 22 /* checking stats */ 23 uint32_t invocations; 24 uint32_t clean; 25 uint32_t corrupt; 26 uint32_t preen; 27 uint32_t xfail; 28 uint32_t xcorrupt; 29 uint32_t incomplete; 30 uint32_t warning; 31 uint32_t retries; 32 33 /* repair stats */ 34 uint32_t repair_invocations; 35 uint32_t repair_success; 36 37 /* all 64-bit items here */ 38 39 /* runtimes */ 40 uint64_t checktime_us; 41 uint64_t repairtime_us; 42 43 /* non-counter state must go at the end for clearall */ 44 spinlock_t css_lock; 45 }; 46 47 struct xchk_stats { 48 struct dentry *cs_debugfs; 49 struct xchk_scrub_stats cs_stats[XFS_SCRUB_TYPE_NR]; 50 }; 51 52 53 static struct xchk_stats global_stats; 54 55 static const char *name_map[XFS_SCRUB_TYPE_NR] = { 56 [XFS_SCRUB_TYPE_SB] = "sb", 57 [XFS_SCRUB_TYPE_AGF] = "agf", 58 [XFS_SCRUB_TYPE_AGFL] = "agfl", 59 [XFS_SCRUB_TYPE_AGI] = "agi", 60 [XFS_SCRUB_TYPE_BNOBT] = "bnobt", 61 [XFS_SCRUB_TYPE_CNTBT] = "cntbt", 62 [XFS_SCRUB_TYPE_INOBT] = "inobt", 63 [XFS_SCRUB_TYPE_FINOBT] = "finobt", 64 [XFS_SCRUB_TYPE_RMAPBT] = "rmapbt", 65 [XFS_SCRUB_TYPE_REFCNTBT] = "refcountbt", 66 [XFS_SCRUB_TYPE_INODE] = "inode", 67 [XFS_SCRUB_TYPE_BMBTD] = "bmapbtd", 68 [XFS_SCRUB_TYPE_BMBTA] = "bmapbta", 69 [XFS_SCRUB_TYPE_BMBTC] = "bmapbtc", 70 [XFS_SCRUB_TYPE_DIR] = "directory", 71 [XFS_SCRUB_TYPE_XATTR] = "xattr", 72 [XFS_SCRUB_TYPE_SYMLINK] = "symlink", 73 [XFS_SCRUB_TYPE_PARENT] = "parent", 74 [XFS_SCRUB_TYPE_RTBITMAP] = "rtbitmap", 75 [XFS_SCRUB_TYPE_RTSUM] = "rtsummary", 76 [XFS_SCRUB_TYPE_UQUOTA] = "usrquota", 77 [XFS_SCRUB_TYPE_GQUOTA] = "grpquota", 78 [XFS_SCRUB_TYPE_PQUOTA] = "prjquota", 79 [XFS_SCRUB_TYPE_FSCOUNTERS] = "fscounters", 80 }; 81 82 /* Format the scrub stats into a text buffer, similar to pcp style. */ 83 STATIC ssize_t 84 xchk_stats_format( 85 struct xchk_stats *cs, 86 char *buf, 87 size_t remaining) 88 { 89 struct xchk_scrub_stats *css = &cs->cs_stats[0]; 90 unsigned int i; 91 ssize_t copied = 0; 92 int ret = 0; 93 94 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) { 95 if (!name_map[i]) 96 continue; 97 98 ret = scnprintf(buf, remaining, 99 "%s %u %u %u %u %u %u %u %u %u %llu %u %u %llu\n", 100 name_map[i], 101 (unsigned int)css->invocations, 102 (unsigned int)css->clean, 103 (unsigned int)css->corrupt, 104 (unsigned int)css->preen, 105 (unsigned int)css->xfail, 106 (unsigned int)css->xcorrupt, 107 (unsigned int)css->incomplete, 108 (unsigned int)css->warning, 109 (unsigned int)css->retries, 110 (unsigned long long)css->checktime_us, 111 (unsigned int)css->repair_invocations, 112 (unsigned int)css->repair_success, 113 (unsigned long long)css->repairtime_us); 114 if (ret <= 0) 115 break; 116 117 remaining -= ret; 118 copied += ret; 119 buf += ret; 120 } 121 122 return copied > 0 ? copied : ret; 123 } 124 125 /* Estimate the worst case buffer size required to hold the whole report. */ 126 STATIC size_t 127 xchk_stats_estimate_bufsize( 128 struct xchk_stats *cs) 129 { 130 struct xchk_scrub_stats *css = &cs->cs_stats[0]; 131 unsigned int i; 132 size_t field_width; 133 size_t ret = 0; 134 135 /* 4294967296 plus one space for each u32 field */ 136 field_width = 11 * (offsetof(struct xchk_scrub_stats, checktime_us) / 137 sizeof(uint32_t)); 138 139 /* 18446744073709551615 plus one space for each u64 field */ 140 field_width += 21 * ((offsetof(struct xchk_scrub_stats, css_lock) - 141 offsetof(struct xchk_scrub_stats, checktime_us)) / 142 sizeof(uint64_t)); 143 144 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) { 145 if (!name_map[i]) 146 continue; 147 148 /* name plus one space */ 149 ret += 1 + strlen(name_map[i]); 150 151 /* all fields, plus newline */ 152 ret += field_width + 1; 153 } 154 155 return ret; 156 } 157 158 /* Clear all counters. */ 159 STATIC void 160 xchk_stats_clearall( 161 struct xchk_stats *cs) 162 { 163 struct xchk_scrub_stats *css = &cs->cs_stats[0]; 164 unsigned int i; 165 166 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) { 167 spin_lock(&css->css_lock); 168 memset(css, 0, offsetof(struct xchk_scrub_stats, css_lock)); 169 spin_unlock(&css->css_lock); 170 } 171 } 172 173 #define XFS_SCRUB_OFLAG_UNCLEAN (XFS_SCRUB_OFLAG_CORRUPT | \ 174 XFS_SCRUB_OFLAG_PREEN | \ 175 XFS_SCRUB_OFLAG_XFAIL | \ 176 XFS_SCRUB_OFLAG_XCORRUPT | \ 177 XFS_SCRUB_OFLAG_INCOMPLETE | \ 178 XFS_SCRUB_OFLAG_WARNING) 179 180 STATIC void 181 xchk_stats_merge_one( 182 struct xchk_stats *cs, 183 const struct xfs_scrub_metadata *sm, 184 const struct xchk_stats_run *run) 185 { 186 struct xchk_scrub_stats *css; 187 188 ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR); 189 190 css = &cs->cs_stats[sm->sm_type]; 191 spin_lock(&css->css_lock); 192 css->invocations++; 193 if (!(sm->sm_flags & XFS_SCRUB_OFLAG_UNCLEAN)) 194 css->clean++; 195 if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 196 css->corrupt++; 197 if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN) 198 css->preen++; 199 if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL) 200 css->xfail++; 201 if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT) 202 css->xcorrupt++; 203 if (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) 204 css->incomplete++; 205 if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING) 206 css->warning++; 207 css->retries += run->retries; 208 css->checktime_us += howmany_64(run->scrub_ns, NSEC_PER_USEC); 209 210 if (run->repair_attempted) 211 css->repair_invocations++; 212 if (run->repair_succeeded) 213 css->repair_success++; 214 css->repairtime_us += howmany_64(run->repair_ns, NSEC_PER_USEC); 215 spin_unlock(&css->css_lock); 216 } 217 218 /* Merge these scrub-run stats into the global and mount stat data. */ 219 void 220 xchk_stats_merge( 221 struct xfs_mount *mp, 222 const struct xfs_scrub_metadata *sm, 223 const struct xchk_stats_run *run) 224 { 225 xchk_stats_merge_one(&global_stats, sm, run); 226 xchk_stats_merge_one(mp->m_scrub_stats, sm, run); 227 } 228 229 /* debugfs boilerplate */ 230 231 static ssize_t 232 xchk_scrub_stats_read( 233 struct file *file, 234 char __user *ubuf, 235 size_t count, 236 loff_t *ppos) 237 { 238 struct xchk_stats *cs = file->private_data; 239 char *buf; 240 size_t bufsize; 241 ssize_t avail, ret; 242 243 /* 244 * This generates stringly snapshot of all the scrub counters, so we 245 * do not want userspace to receive garbled text from multiple calls. 246 * If the file position is greater than 0, return a short read. 247 */ 248 if (*ppos > 0) 249 return 0; 250 251 bufsize = xchk_stats_estimate_bufsize(cs); 252 253 buf = kvmalloc(bufsize, XCHK_GFP_FLAGS); 254 if (!buf) 255 return -ENOMEM; 256 257 avail = xchk_stats_format(cs, buf, bufsize); 258 if (avail < 0) { 259 ret = avail; 260 goto out; 261 } 262 263 ret = simple_read_from_buffer(ubuf, count, ppos, buf, avail); 264 out: 265 kvfree(buf); 266 return ret; 267 } 268 269 static const struct file_operations scrub_stats_fops = { 270 .open = simple_open, 271 .read = xchk_scrub_stats_read, 272 }; 273 274 static ssize_t 275 xchk_clear_scrub_stats_write( 276 struct file *file, 277 const char __user *ubuf, 278 size_t count, 279 loff_t *ppos) 280 { 281 struct xchk_stats *cs = file->private_data; 282 unsigned int val; 283 int ret; 284 285 ret = kstrtouint_from_user(ubuf, count, 0, &val); 286 if (ret) 287 return ret; 288 289 if (val != 1) 290 return -EINVAL; 291 292 xchk_stats_clearall(cs); 293 return count; 294 } 295 296 static const struct file_operations clear_scrub_stats_fops = { 297 .open = simple_open, 298 .write = xchk_clear_scrub_stats_write, 299 }; 300 301 /* Initialize the stats object. */ 302 STATIC int 303 xchk_stats_init( 304 struct xchk_stats *cs, 305 struct xfs_mount *mp) 306 { 307 struct xchk_scrub_stats *css = &cs->cs_stats[0]; 308 unsigned int i; 309 310 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) 311 spin_lock_init(&css->css_lock); 312 313 return 0; 314 } 315 316 /* Connect the stats object to debugfs. */ 317 void 318 xchk_stats_register( 319 struct xchk_stats *cs, 320 struct dentry *parent) 321 { 322 if (!parent) 323 return; 324 325 cs->cs_debugfs = xfs_debugfs_mkdir("scrub", parent); 326 if (!cs->cs_debugfs) 327 return; 328 329 debugfs_create_file("stats", 0644, cs->cs_debugfs, cs, 330 &scrub_stats_fops); 331 debugfs_create_file("clear_stats", 0400, cs->cs_debugfs, cs, 332 &clear_scrub_stats_fops); 333 } 334 335 /* Free all resources related to the stats object. */ 336 STATIC int 337 xchk_stats_teardown( 338 struct xchk_stats *cs) 339 { 340 return 0; 341 } 342 343 /* Disconnect the stats object from debugfs. */ 344 void 345 xchk_stats_unregister( 346 struct xchk_stats *cs) 347 { 348 debugfs_remove(cs->cs_debugfs); 349 } 350 351 /* Initialize global stats and register them */ 352 int __init 353 xchk_global_stats_setup( 354 struct dentry *parent) 355 { 356 int error; 357 358 error = xchk_stats_init(&global_stats, NULL); 359 if (error) 360 return error; 361 362 xchk_stats_register(&global_stats, parent); 363 return 0; 364 } 365 366 /* Unregister global stats and tear them down */ 367 void 368 xchk_global_stats_teardown(void) 369 { 370 xchk_stats_unregister(&global_stats); 371 xchk_stats_teardown(&global_stats); 372 } 373 374 /* Allocate per-mount stats */ 375 int 376 xchk_mount_stats_alloc( 377 struct xfs_mount *mp) 378 { 379 struct xchk_stats *cs; 380 int error; 381 382 cs = kvzalloc(sizeof(struct xchk_stats), GFP_KERNEL); 383 if (!cs) 384 return -ENOMEM; 385 386 error = xchk_stats_init(cs, mp); 387 if (error) 388 goto out_free; 389 390 mp->m_scrub_stats = cs; 391 return 0; 392 out_free: 393 kvfree(cs); 394 return error; 395 } 396 397 /* Free per-mount stats */ 398 void 399 xchk_mount_stats_free( 400 struct xfs_mount *mp) 401 { 402 xchk_stats_teardown(mp->m_scrub_stats); 403 kvfree(mp->m_scrub_stats); 404 mp->m_scrub_stats = NULL; 405 } 406