1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_log_format.h" 13 #include "xfs_inode.h" 14 #include "xfs_da_format.h" 15 #include "xfs_da_btree.h" 16 #include "xfs_attr.h" 17 #include "xfs_attr_leaf.h" 18 #include "scrub/scrub.h" 19 #include "scrub/common.h" 20 #include "scrub/dabtree.h" 21 #include "scrub/attr.h" 22 23 /* 24 * Allocate enough memory to hold an attr value and attr block bitmaps, 25 * reallocating the buffer if necessary. Buffer contents are not preserved 26 * across a reallocation. 27 */ 28 int 29 xchk_setup_xattr_buf( 30 struct xfs_scrub *sc, 31 size_t value_size, 32 xfs_km_flags_t flags) 33 { 34 size_t sz; 35 struct xchk_xattr_buf *ab = sc->buf; 36 37 /* 38 * We need enough space to read an xattr value from the file or enough 39 * space to hold three copies of the xattr free space bitmap. We don't 40 * need the buffer space for both purposes at the same time. 41 */ 42 sz = 3 * sizeof(long) * BITS_TO_LONGS(sc->mp->m_attr_geo->blksize); 43 sz = max_t(size_t, sz, value_size); 44 45 /* 46 * If there's already a buffer, figure out if we need to reallocate it 47 * to accommodate a larger size. 48 */ 49 if (ab) { 50 if (sz <= ab->sz) 51 return 0; 52 kmem_free(ab); 53 sc->buf = NULL; 54 } 55 56 /* 57 * Don't zero the buffer upon allocation to avoid runtime overhead. 58 * All users must be careful never to read uninitialized contents. 59 */ 60 ab = kmem_alloc_large(sizeof(*ab) + sz, flags); 61 if (!ab) 62 return -ENOMEM; 63 64 ab->sz = sz; 65 sc->buf = ab; 66 return 0; 67 } 68 69 /* Set us up to scrub an inode's extended attributes. */ 70 int 71 xchk_setup_xattr( 72 struct xfs_scrub *sc, 73 struct xfs_inode *ip) 74 { 75 int error; 76 77 /* 78 * We failed to get memory while checking attrs, so this time try to 79 * get all the memory we're ever going to need. Allocate the buffer 80 * without the inode lock held, which means we can sleep. 81 */ 82 if (sc->flags & XCHK_TRY_HARDER) { 83 error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0); 84 if (error) 85 return error; 86 } 87 88 return xchk_setup_inode_contents(sc, ip, 0); 89 } 90 91 /* Extended Attributes */ 92 93 struct xchk_xattr { 94 struct xfs_attr_list_context context; 95 struct xfs_scrub *sc; 96 }; 97 98 /* 99 * Check that an extended attribute key can be looked up by hash. 100 * 101 * We use the XFS attribute list iterator (i.e. xfs_attr_list_int_ilocked) 102 * to call this function for every attribute key in an inode. Once 103 * we're here, we load the attribute value to see if any errors happen, 104 * or if we get more or less data than we expected. 105 */ 106 static void 107 xchk_xattr_listent( 108 struct xfs_attr_list_context *context, 109 int flags, 110 unsigned char *name, 111 int namelen, 112 int valuelen) 113 { 114 struct xchk_xattr *sx; 115 struct xfs_da_args args = { NULL }; 116 int error = 0; 117 118 sx = container_of(context, struct xchk_xattr, context); 119 120 if (xchk_should_terminate(sx->sc, &error)) { 121 context->seen_enough = error; 122 return; 123 } 124 125 if (flags & XFS_ATTR_INCOMPLETE) { 126 /* Incomplete attr key, just mark the inode for preening. */ 127 xchk_ino_set_preen(sx->sc, context->dp->i_ino); 128 return; 129 } 130 131 /* Does this name make sense? */ 132 if (!xfs_attr_namecheck(name, namelen)) { 133 xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno); 134 return; 135 } 136 137 /* 138 * Try to allocate enough memory to extrat the attr value. If that 139 * doesn't work, we overload the seen_enough variable to convey 140 * the error message back to the main scrub function. 141 */ 142 error = xchk_setup_xattr_buf(sx->sc, valuelen, KM_MAYFAIL); 143 if (error == -ENOMEM) 144 error = -EDEADLOCK; 145 if (error) { 146 context->seen_enough = error; 147 return; 148 } 149 150 args.flags = ATTR_KERNOTIME; 151 if (flags & XFS_ATTR_ROOT) 152 args.flags |= ATTR_ROOT; 153 else if (flags & XFS_ATTR_SECURE) 154 args.flags |= ATTR_SECURE; 155 args.geo = context->dp->i_mount->m_attr_geo; 156 args.whichfork = XFS_ATTR_FORK; 157 args.dp = context->dp; 158 args.name = name; 159 args.namelen = namelen; 160 args.hashval = xfs_da_hashname(args.name, args.namelen); 161 args.trans = context->tp; 162 args.value = xchk_xattr_valuebuf(sx->sc); 163 args.valuelen = valuelen; 164 165 error = xfs_attr_get_ilocked(context->dp, &args); 166 if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno, 167 &error)) 168 goto fail_xref; 169 if (args.valuelen != valuelen) 170 xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, 171 args.blkno); 172 fail_xref: 173 if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 174 context->seen_enough = 1; 175 return; 176 } 177 178 /* 179 * Mark a range [start, start+len) in this map. Returns true if the 180 * region was free, and false if there's a conflict or a problem. 181 * 182 * Within a char, the lowest bit of the char represents the byte with 183 * the smallest address 184 */ 185 STATIC bool 186 xchk_xattr_set_map( 187 struct xfs_scrub *sc, 188 unsigned long *map, 189 unsigned int start, 190 unsigned int len) 191 { 192 unsigned int mapsize = sc->mp->m_attr_geo->blksize; 193 bool ret = true; 194 195 if (start >= mapsize) 196 return false; 197 if (start + len > mapsize) { 198 len = mapsize - start; 199 ret = false; 200 } 201 202 if (find_next_bit(map, mapsize, start) < start + len) 203 ret = false; 204 bitmap_set(map, start, len); 205 206 return ret; 207 } 208 209 /* 210 * Check the leaf freemap from the usage bitmap. Returns false if the 211 * attr freemap has problems or points to used space. 212 */ 213 STATIC bool 214 xchk_xattr_check_freemap( 215 struct xfs_scrub *sc, 216 unsigned long *map, 217 struct xfs_attr3_icleaf_hdr *leafhdr) 218 { 219 unsigned long *freemap = xchk_xattr_freemap(sc); 220 unsigned long *dstmap = xchk_xattr_dstmap(sc); 221 unsigned int mapsize = sc->mp->m_attr_geo->blksize; 222 int i; 223 224 /* Construct bitmap of freemap contents. */ 225 bitmap_zero(freemap, mapsize); 226 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { 227 if (!xchk_xattr_set_map(sc, freemap, 228 leafhdr->freemap[i].base, 229 leafhdr->freemap[i].size)) 230 return false; 231 } 232 233 /* Look for bits that are set in freemap and are marked in use. */ 234 return bitmap_and(dstmap, freemap, map, mapsize) == 0; 235 } 236 237 /* 238 * Check this leaf entry's relations to everything else. 239 * Returns the number of bytes used for the name/value data. 240 */ 241 STATIC void 242 xchk_xattr_entry( 243 struct xchk_da_btree *ds, 244 int level, 245 char *buf_end, 246 struct xfs_attr_leafblock *leaf, 247 struct xfs_attr3_icleaf_hdr *leafhdr, 248 struct xfs_attr_leaf_entry *ent, 249 int idx, 250 unsigned int *usedbytes, 251 __u32 *last_hashval) 252 { 253 struct xfs_mount *mp = ds->state->mp; 254 unsigned long *usedmap = xchk_xattr_usedmap(ds->sc); 255 char *name_end; 256 struct xfs_attr_leaf_name_local *lentry; 257 struct xfs_attr_leaf_name_remote *rentry; 258 unsigned int nameidx; 259 unsigned int namesize; 260 261 if (ent->pad2 != 0) 262 xchk_da_set_corrupt(ds, level); 263 264 /* Hash values in order? */ 265 if (be32_to_cpu(ent->hashval) < *last_hashval) 266 xchk_da_set_corrupt(ds, level); 267 *last_hashval = be32_to_cpu(ent->hashval); 268 269 nameidx = be16_to_cpu(ent->nameidx); 270 if (nameidx < leafhdr->firstused || 271 nameidx >= mp->m_attr_geo->blksize) { 272 xchk_da_set_corrupt(ds, level); 273 return; 274 } 275 276 /* Check the name information. */ 277 if (ent->flags & XFS_ATTR_LOCAL) { 278 lentry = xfs_attr3_leaf_name_local(leaf, idx); 279 namesize = xfs_attr_leaf_entsize_local(lentry->namelen, 280 be16_to_cpu(lentry->valuelen)); 281 name_end = (char *)lentry + namesize; 282 if (lentry->namelen == 0) 283 xchk_da_set_corrupt(ds, level); 284 } else { 285 rentry = xfs_attr3_leaf_name_remote(leaf, idx); 286 namesize = xfs_attr_leaf_entsize_remote(rentry->namelen); 287 name_end = (char *)rentry + namesize; 288 if (rentry->namelen == 0 || rentry->valueblk == 0) 289 xchk_da_set_corrupt(ds, level); 290 } 291 if (name_end > buf_end) 292 xchk_da_set_corrupt(ds, level); 293 294 if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize)) 295 xchk_da_set_corrupt(ds, level); 296 if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 297 *usedbytes += namesize; 298 } 299 300 /* Scrub an attribute leaf. */ 301 STATIC int 302 xchk_xattr_block( 303 struct xchk_da_btree *ds, 304 int level) 305 { 306 struct xfs_attr3_icleaf_hdr leafhdr; 307 struct xfs_mount *mp = ds->state->mp; 308 struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; 309 struct xfs_buf *bp = blk->bp; 310 xfs_dablk_t *last_checked = ds->private; 311 struct xfs_attr_leafblock *leaf = bp->b_addr; 312 struct xfs_attr_leaf_entry *ent; 313 struct xfs_attr_leaf_entry *entries; 314 unsigned long *usedmap; 315 char *buf_end; 316 size_t off; 317 __u32 last_hashval = 0; 318 unsigned int usedbytes = 0; 319 unsigned int hdrsize; 320 int i; 321 int error; 322 323 if (*last_checked == blk->blkno) 324 return 0; 325 326 /* Allocate memory for block usage checking. */ 327 error = xchk_setup_xattr_buf(ds->sc, 0, KM_MAYFAIL); 328 if (error == -ENOMEM) 329 return -EDEADLOCK; 330 if (error) 331 return error; 332 usedmap = xchk_xattr_usedmap(ds->sc); 333 334 *last_checked = blk->blkno; 335 bitmap_zero(usedmap, mp->m_attr_geo->blksize); 336 337 /* Check all the padding. */ 338 if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb)) { 339 struct xfs_attr3_leafblock *leaf = bp->b_addr; 340 341 if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 || 342 leaf->hdr.info.hdr.pad != 0) 343 xchk_da_set_corrupt(ds, level); 344 } else { 345 if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0) 346 xchk_da_set_corrupt(ds, level); 347 } 348 349 /* Check the leaf header */ 350 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); 351 hdrsize = xfs_attr3_leaf_hdr_size(leaf); 352 353 if (leafhdr.usedbytes > mp->m_attr_geo->blksize) 354 xchk_da_set_corrupt(ds, level); 355 if (leafhdr.firstused > mp->m_attr_geo->blksize) 356 xchk_da_set_corrupt(ds, level); 357 if (leafhdr.firstused < hdrsize) 358 xchk_da_set_corrupt(ds, level); 359 if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize)) 360 xchk_da_set_corrupt(ds, level); 361 362 if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 363 goto out; 364 365 entries = xfs_attr3_leaf_entryp(leaf); 366 if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused) 367 xchk_da_set_corrupt(ds, level); 368 369 buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize; 370 for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) { 371 /* Mark the leaf entry itself. */ 372 off = (char *)ent - (char *)leaf; 373 if (!xchk_xattr_set_map(ds->sc, usedmap, off, 374 sizeof(xfs_attr_leaf_entry_t))) { 375 xchk_da_set_corrupt(ds, level); 376 goto out; 377 } 378 379 /* Check the entry and nameval. */ 380 xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr, 381 ent, i, &usedbytes, &last_hashval); 382 383 if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 384 goto out; 385 } 386 387 if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr)) 388 xchk_da_set_corrupt(ds, level); 389 390 if (leafhdr.usedbytes != usedbytes) 391 xchk_da_set_corrupt(ds, level); 392 393 out: 394 return 0; 395 } 396 397 /* Scrub a attribute btree record. */ 398 STATIC int 399 xchk_xattr_rec( 400 struct xchk_da_btree *ds, 401 int level) 402 { 403 struct xfs_mount *mp = ds->state->mp; 404 struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; 405 struct xfs_attr_leaf_name_local *lentry; 406 struct xfs_attr_leaf_name_remote *rentry; 407 struct xfs_buf *bp; 408 struct xfs_attr_leaf_entry *ent; 409 xfs_dahash_t calc_hash; 410 xfs_dahash_t hash; 411 int nameidx; 412 int hdrsize; 413 unsigned int badflags; 414 int error; 415 416 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); 417 418 ent = xfs_attr3_leaf_entryp(blk->bp->b_addr) + blk->index; 419 420 /* Check the whole block, if necessary. */ 421 error = xchk_xattr_block(ds, level); 422 if (error) 423 goto out; 424 if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 425 goto out; 426 427 /* Check the hash of the entry. */ 428 error = xchk_da_btree_hash(ds, level, &ent->hashval); 429 if (error) 430 goto out; 431 432 /* Find the attr entry's location. */ 433 bp = blk->bp; 434 hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr); 435 nameidx = be16_to_cpu(ent->nameidx); 436 if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) { 437 xchk_da_set_corrupt(ds, level); 438 goto out; 439 } 440 441 /* Retrieve the entry and check it. */ 442 hash = be32_to_cpu(ent->hashval); 443 badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE | 444 XFS_ATTR_INCOMPLETE); 445 if ((ent->flags & badflags) != 0) 446 xchk_da_set_corrupt(ds, level); 447 if (ent->flags & XFS_ATTR_LOCAL) { 448 lentry = (struct xfs_attr_leaf_name_local *) 449 (((char *)bp->b_addr) + nameidx); 450 if (lentry->namelen <= 0) { 451 xchk_da_set_corrupt(ds, level); 452 goto out; 453 } 454 calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen); 455 } else { 456 rentry = (struct xfs_attr_leaf_name_remote *) 457 (((char *)bp->b_addr) + nameidx); 458 if (rentry->namelen <= 0) { 459 xchk_da_set_corrupt(ds, level); 460 goto out; 461 } 462 calc_hash = xfs_da_hashname(rentry->name, rentry->namelen); 463 } 464 if (calc_hash != hash) 465 xchk_da_set_corrupt(ds, level); 466 467 out: 468 return error; 469 } 470 471 /* Scrub the extended attribute metadata. */ 472 int 473 xchk_xattr( 474 struct xfs_scrub *sc) 475 { 476 struct xchk_xattr sx; 477 struct attrlist_cursor_kern cursor = { 0 }; 478 xfs_dablk_t last_checked = -1U; 479 int error = 0; 480 481 if (!xfs_inode_hasattr(sc->ip)) 482 return -ENOENT; 483 484 memset(&sx, 0, sizeof(sx)); 485 /* Check attribute tree structure */ 486 error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec, 487 &last_checked); 488 if (error) 489 goto out; 490 491 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 492 goto out; 493 494 /* Check that every attr key can also be looked up by hash. */ 495 sx.context.dp = sc->ip; 496 sx.context.cursor = &cursor; 497 sx.context.resynch = 1; 498 sx.context.put_listent = xchk_xattr_listent; 499 sx.context.tp = sc->tp; 500 sx.context.flags = ATTR_INCOMPLETE; 501 sx.sc = sc; 502 503 /* 504 * Look up every xattr in this file by name. 505 * 506 * Use the backend implementation of xfs_attr_list to call 507 * xchk_xattr_listent on every attribute key in this inode. 508 * In other words, we use the same iterator/callback mechanism 509 * that listattr uses to scrub extended attributes, though in our 510 * _listent function, we check the value of the attribute. 511 * 512 * The VFS only locks i_rwsem when modifying attrs, so keep all 513 * three locks held because that's the only way to ensure we're 514 * the only thread poking into the da btree. We traverse the da 515 * btree while holding a leaf buffer locked for the xattr name 516 * iteration, which doesn't really follow the usual buffer 517 * locking order. 518 */ 519 error = xfs_attr_list_int_ilocked(&sx.context); 520 if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error)) 521 goto out; 522 523 /* Did our listent function try to return any errors? */ 524 if (sx.context.seen_enough < 0) 525 error = sx.context.seen_enough; 526 out: 527 return error; 528 } 529