1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2013 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 14 #include "xfs_da_format.h" 15 #include "xfs_inode.h" 16 #include "xfs_trans.h" 17 #include "xfs_bmap.h" 18 #include "xfs_da_btree.h" 19 #include "xfs_attr.h" 20 #include "xfs_attr_sf.h" 21 #include "xfs_attr_leaf.h" 22 #include "xfs_error.h" 23 #include "xfs_trace.h" 24 #include "xfs_dir2.h" 25 26 STATIC int 27 xfs_attr_shortform_compare(const void *a, const void *b) 28 { 29 xfs_attr_sf_sort_t *sa, *sb; 30 31 sa = (xfs_attr_sf_sort_t *)a; 32 sb = (xfs_attr_sf_sort_t *)b; 33 if (sa->hash < sb->hash) { 34 return -1; 35 } else if (sa->hash > sb->hash) { 36 return 1; 37 } else { 38 return sa->entno - sb->entno; 39 } 40 } 41 42 #define XFS_ISRESET_CURSOR(cursor) \ 43 (!((cursor)->initted) && !((cursor)->hashval) && \ 44 !((cursor)->blkno) && !((cursor)->offset)) 45 /* 46 * Copy out entries of shortform attribute lists for attr_list(). 47 * Shortform attribute lists are not stored in hashval sorted order. 48 * If the output buffer is not large enough to hold them all, then 49 * we have to calculate each entries' hashvalue and sort them before 50 * we can begin returning them to the user. 51 */ 52 static int 53 xfs_attr_shortform_list( 54 struct xfs_attr_list_context *context) 55 { 56 struct xfs_attrlist_cursor_kern *cursor = &context->cursor; 57 struct xfs_inode *dp = context->dp; 58 struct xfs_attr_sf_sort *sbuf, *sbp; 59 struct xfs_attr_sf_hdr *sf = dp->i_af.if_data; 60 struct xfs_attr_sf_entry *sfe; 61 int sbsize, nsbuf, count, i; 62 int error = 0; 63 64 ASSERT(sf != NULL); 65 if (!sf->count) 66 return 0; 67 68 trace_xfs_attr_list_sf(context); 69 70 /* 71 * If the buffer is large enough and the cursor is at the start, 72 * do not bother with sorting since we will return everything in 73 * one buffer and another call using the cursor won't need to be 74 * made. 75 * Note the generous fudge factor of 16 overhead bytes per entry. 76 * If bufsize is zero then put_listent must be a search function 77 * and can just scan through what we have. 78 */ 79 if (context->bufsize == 0 || 80 (XFS_ISRESET_CURSOR(cursor) && 81 (dp->i_af.if_bytes + sf->count * 16) < context->bufsize)) { 82 for (i = 0, sfe = xfs_attr_sf_firstentry(sf); i < sf->count; i++) { 83 if (XFS_IS_CORRUPT(context->dp->i_mount, 84 !xfs_attr_namecheck(sfe->nameval, 85 sfe->namelen))) 86 return -EFSCORRUPTED; 87 context->put_listent(context, 88 sfe->flags, 89 sfe->nameval, 90 (int)sfe->namelen, 91 (int)sfe->valuelen); 92 /* 93 * Either search callback finished early or 94 * didn't fit it all in the buffer after all. 95 */ 96 if (context->seen_enough) 97 break; 98 sfe = xfs_attr_sf_nextentry(sfe); 99 } 100 trace_xfs_attr_list_sf_all(context); 101 return 0; 102 } 103 104 /* do no more for a search callback */ 105 if (context->bufsize == 0) 106 return 0; 107 108 /* 109 * It didn't all fit, so we have to sort everything on hashval. 110 */ 111 sbsize = sf->count * sizeof(*sbuf); 112 sbp = sbuf = kmem_alloc(sbsize, KM_NOFS); 113 114 /* 115 * Scan the attribute list for the rest of the entries, storing 116 * the relevant info from only those that match into a buffer. 117 */ 118 nsbuf = 0; 119 for (i = 0, sfe = xfs_attr_sf_firstentry(sf); i < sf->count; i++) { 120 if (unlikely( 121 ((char *)sfe < (char *)sf) || 122 ((char *)sfe >= ((char *)sf + dp->i_af.if_bytes)))) { 123 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", 124 XFS_ERRLEVEL_LOW, 125 context->dp->i_mount, sfe, 126 sizeof(*sfe)); 127 kmem_free(sbuf); 128 return -EFSCORRUPTED; 129 } 130 131 sbp->entno = i; 132 sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen); 133 sbp->name = sfe->nameval; 134 sbp->namelen = sfe->namelen; 135 /* These are bytes, and both on-disk, don't endian-flip */ 136 sbp->valuelen = sfe->valuelen; 137 sbp->flags = sfe->flags; 138 sfe = xfs_attr_sf_nextentry(sfe); 139 sbp++; 140 nsbuf++; 141 } 142 143 /* 144 * Sort the entries on hash then entno. 145 */ 146 xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare); 147 148 /* 149 * Re-find our place IN THE SORTED LIST. 150 */ 151 count = 0; 152 cursor->initted = 1; 153 cursor->blkno = 0; 154 for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { 155 if (sbp->hash == cursor->hashval) { 156 if (cursor->offset == count) { 157 break; 158 } 159 count++; 160 } else if (sbp->hash > cursor->hashval) { 161 break; 162 } 163 } 164 if (i == nsbuf) 165 goto out; 166 167 /* 168 * Loop putting entries into the user buffer. 169 */ 170 for ( ; i < nsbuf; i++, sbp++) { 171 if (cursor->hashval != sbp->hash) { 172 cursor->hashval = sbp->hash; 173 cursor->offset = 0; 174 } 175 if (XFS_IS_CORRUPT(context->dp->i_mount, 176 !xfs_attr_namecheck(sbp->name, 177 sbp->namelen))) { 178 error = -EFSCORRUPTED; 179 goto out; 180 } 181 context->put_listent(context, 182 sbp->flags, 183 sbp->name, 184 sbp->namelen, 185 sbp->valuelen); 186 if (context->seen_enough) 187 break; 188 cursor->offset++; 189 } 190 out: 191 kmem_free(sbuf); 192 return error; 193 } 194 195 /* 196 * We didn't find the block & hash mentioned in the cursor state, so 197 * walk down the attr btree looking for the hash. 198 */ 199 STATIC int 200 xfs_attr_node_list_lookup( 201 struct xfs_attr_list_context *context, 202 struct xfs_attrlist_cursor_kern *cursor, 203 struct xfs_buf **pbp) 204 { 205 struct xfs_da3_icnode_hdr nodehdr; 206 struct xfs_da_intnode *node; 207 struct xfs_da_node_entry *btree; 208 struct xfs_inode *dp = context->dp; 209 struct xfs_mount *mp = dp->i_mount; 210 struct xfs_trans *tp = context->tp; 211 struct xfs_buf *bp; 212 int i; 213 int error = 0; 214 unsigned int expected_level = 0; 215 uint16_t magic; 216 217 ASSERT(*pbp == NULL); 218 cursor->blkno = 0; 219 for (;;) { 220 error = xfs_da3_node_read(tp, dp, cursor->blkno, &bp, 221 XFS_ATTR_FORK); 222 if (error) 223 return error; 224 node = bp->b_addr; 225 magic = be16_to_cpu(node->hdr.info.magic); 226 if (magic == XFS_ATTR_LEAF_MAGIC || 227 magic == XFS_ATTR3_LEAF_MAGIC) 228 break; 229 if (magic != XFS_DA_NODE_MAGIC && 230 magic != XFS_DA3_NODE_MAGIC) { 231 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 232 node, sizeof(*node)); 233 goto out_corruptbuf; 234 } 235 236 xfs_da3_node_hdr_from_disk(mp, &nodehdr, node); 237 238 /* Tree taller than we can handle; bail out! */ 239 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) 240 goto out_corruptbuf; 241 242 /* Check the level from the root node. */ 243 if (cursor->blkno == 0) 244 expected_level = nodehdr.level - 1; 245 else if (expected_level != nodehdr.level) 246 goto out_corruptbuf; 247 else 248 expected_level--; 249 250 btree = nodehdr.btree; 251 for (i = 0; i < nodehdr.count; btree++, i++) { 252 if (cursor->hashval <= be32_to_cpu(btree->hashval)) { 253 cursor->blkno = be32_to_cpu(btree->before); 254 trace_xfs_attr_list_node_descend(context, 255 btree); 256 break; 257 } 258 } 259 xfs_trans_brelse(tp, bp); 260 261 if (i == nodehdr.count) 262 return 0; 263 264 /* We can't point back to the root. */ 265 if (XFS_IS_CORRUPT(mp, cursor->blkno == 0)) 266 return -EFSCORRUPTED; 267 } 268 269 if (expected_level != 0) 270 goto out_corruptbuf; 271 272 *pbp = bp; 273 return 0; 274 275 out_corruptbuf: 276 xfs_buf_mark_corrupt(bp); 277 xfs_trans_brelse(tp, bp); 278 return -EFSCORRUPTED; 279 } 280 281 STATIC int 282 xfs_attr_node_list( 283 struct xfs_attr_list_context *context) 284 { 285 struct xfs_attrlist_cursor_kern *cursor = &context->cursor; 286 struct xfs_attr3_icleaf_hdr leafhdr; 287 struct xfs_attr_leafblock *leaf; 288 struct xfs_da_intnode *node; 289 struct xfs_buf *bp; 290 struct xfs_inode *dp = context->dp; 291 struct xfs_mount *mp = dp->i_mount; 292 int error = 0; 293 294 trace_xfs_attr_node_list(context); 295 296 cursor->initted = 1; 297 298 /* 299 * Do all sorts of validation on the passed-in cursor structure. 300 * If anything is amiss, ignore the cursor and look up the hashval 301 * starting from the btree root. 302 */ 303 bp = NULL; 304 if (cursor->blkno > 0) { 305 error = xfs_da3_node_read(context->tp, dp, cursor->blkno, &bp, 306 XFS_ATTR_FORK); 307 if ((error != 0) && (error != -EFSCORRUPTED)) 308 return error; 309 if (bp) { 310 struct xfs_attr_leaf_entry *entries; 311 312 node = bp->b_addr; 313 switch (be16_to_cpu(node->hdr.info.magic)) { 314 case XFS_DA_NODE_MAGIC: 315 case XFS_DA3_NODE_MAGIC: 316 trace_xfs_attr_list_wrong_blk(context); 317 xfs_trans_brelse(context->tp, bp); 318 bp = NULL; 319 break; 320 case XFS_ATTR_LEAF_MAGIC: 321 case XFS_ATTR3_LEAF_MAGIC: 322 leaf = bp->b_addr; 323 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, 324 &leafhdr, leaf); 325 entries = xfs_attr3_leaf_entryp(leaf); 326 if (cursor->hashval > be32_to_cpu( 327 entries[leafhdr.count - 1].hashval)) { 328 trace_xfs_attr_list_wrong_blk(context); 329 xfs_trans_brelse(context->tp, bp); 330 bp = NULL; 331 } else if (cursor->hashval <= be32_to_cpu( 332 entries[0].hashval)) { 333 trace_xfs_attr_list_wrong_blk(context); 334 xfs_trans_brelse(context->tp, bp); 335 bp = NULL; 336 } 337 break; 338 default: 339 trace_xfs_attr_list_wrong_blk(context); 340 xfs_trans_brelse(context->tp, bp); 341 bp = NULL; 342 } 343 } 344 } 345 346 /* 347 * We did not find what we expected given the cursor's contents, 348 * so we start from the top and work down based on the hash value. 349 * Note that start of node block is same as start of leaf block. 350 */ 351 if (bp == NULL) { 352 error = xfs_attr_node_list_lookup(context, cursor, &bp); 353 if (error || !bp) 354 return error; 355 } 356 ASSERT(bp != NULL); 357 358 /* 359 * Roll upward through the blocks, processing each leaf block in 360 * order. As long as there is space in the result buffer, keep 361 * adding the information. 362 */ 363 for (;;) { 364 leaf = bp->b_addr; 365 error = xfs_attr3_leaf_list_int(bp, context); 366 if (error) 367 break; 368 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); 369 if (context->seen_enough || leafhdr.forw == 0) 370 break; 371 cursor->blkno = leafhdr.forw; 372 xfs_trans_brelse(context->tp, bp); 373 error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno, 374 &bp); 375 if (error) 376 return error; 377 } 378 xfs_trans_brelse(context->tp, bp); 379 return error; 380 } 381 382 /* 383 * Copy out attribute list entries for attr_list(), for leaf attribute lists. 384 */ 385 int 386 xfs_attr3_leaf_list_int( 387 struct xfs_buf *bp, 388 struct xfs_attr_list_context *context) 389 { 390 struct xfs_attrlist_cursor_kern *cursor = &context->cursor; 391 struct xfs_attr_leafblock *leaf; 392 struct xfs_attr3_icleaf_hdr ichdr; 393 struct xfs_attr_leaf_entry *entries; 394 struct xfs_attr_leaf_entry *entry; 395 int i; 396 struct xfs_mount *mp = context->dp->i_mount; 397 398 trace_xfs_attr_list_leaf(context); 399 400 leaf = bp->b_addr; 401 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); 402 entries = xfs_attr3_leaf_entryp(leaf); 403 404 cursor->initted = 1; 405 406 /* 407 * Re-find our place in the leaf block if this is a new syscall. 408 */ 409 if (context->resynch) { 410 entry = &entries[0]; 411 for (i = 0; i < ichdr.count; entry++, i++) { 412 if (be32_to_cpu(entry->hashval) == cursor->hashval) { 413 if (cursor->offset == context->dupcnt) { 414 context->dupcnt = 0; 415 break; 416 } 417 context->dupcnt++; 418 } else if (be32_to_cpu(entry->hashval) > 419 cursor->hashval) { 420 context->dupcnt = 0; 421 break; 422 } 423 } 424 if (i == ichdr.count) { 425 trace_xfs_attr_list_notfound(context); 426 return 0; 427 } 428 } else { 429 entry = &entries[0]; 430 i = 0; 431 } 432 context->resynch = 0; 433 434 /* 435 * We have found our place, start copying out the new attributes. 436 */ 437 for (; i < ichdr.count; entry++, i++) { 438 char *name; 439 int namelen, valuelen; 440 441 if (be32_to_cpu(entry->hashval) != cursor->hashval) { 442 cursor->hashval = be32_to_cpu(entry->hashval); 443 cursor->offset = 0; 444 } 445 446 if ((entry->flags & XFS_ATTR_INCOMPLETE) && 447 !context->allow_incomplete) 448 continue; 449 450 if (entry->flags & XFS_ATTR_LOCAL) { 451 xfs_attr_leaf_name_local_t *name_loc; 452 453 name_loc = xfs_attr3_leaf_name_local(leaf, i); 454 name = name_loc->nameval; 455 namelen = name_loc->namelen; 456 valuelen = be16_to_cpu(name_loc->valuelen); 457 } else { 458 xfs_attr_leaf_name_remote_t *name_rmt; 459 460 name_rmt = xfs_attr3_leaf_name_remote(leaf, i); 461 name = name_rmt->name; 462 namelen = name_rmt->namelen; 463 valuelen = be32_to_cpu(name_rmt->valuelen); 464 } 465 466 if (XFS_IS_CORRUPT(context->dp->i_mount, 467 !xfs_attr_namecheck(name, namelen))) 468 return -EFSCORRUPTED; 469 context->put_listent(context, entry->flags, 470 name, namelen, valuelen); 471 if (context->seen_enough) 472 break; 473 cursor->offset++; 474 } 475 trace_xfs_attr_list_leaf_end(context); 476 return 0; 477 } 478 479 /* 480 * Copy out attribute entries for attr_list(), for leaf attribute lists. 481 */ 482 STATIC int 483 xfs_attr_leaf_list( 484 struct xfs_attr_list_context *context) 485 { 486 struct xfs_buf *bp; 487 int error; 488 489 trace_xfs_attr_leaf_list(context); 490 491 context->cursor.blkno = 0; 492 error = xfs_attr3_leaf_read(context->tp, context->dp, 0, &bp); 493 if (error) 494 return error; 495 496 error = xfs_attr3_leaf_list_int(bp, context); 497 xfs_trans_brelse(context->tp, bp); 498 return error; 499 } 500 501 int 502 xfs_attr_list_ilocked( 503 struct xfs_attr_list_context *context) 504 { 505 struct xfs_inode *dp = context->dp; 506 507 ASSERT(xfs_isilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 508 509 /* 510 * Decide on what work routines to call based on the inode size. 511 */ 512 if (!xfs_inode_hasattr(dp)) 513 return 0; 514 if (dp->i_af.if_format == XFS_DINODE_FMT_LOCAL) 515 return xfs_attr_shortform_list(context); 516 if (xfs_attr_is_leaf(dp)) 517 return xfs_attr_leaf_list(context); 518 return xfs_attr_node_list(context); 519 } 520 521 int 522 xfs_attr_list( 523 struct xfs_attr_list_context *context) 524 { 525 struct xfs_inode *dp = context->dp; 526 uint lock_mode; 527 int error; 528 529 XFS_STATS_INC(dp->i_mount, xs_attr_list); 530 531 if (xfs_is_shutdown(dp->i_mount)) 532 return -EIO; 533 534 lock_mode = xfs_ilock_attr_map_shared(dp); 535 error = xfs_attr_list_ilocked(context); 536 xfs_iunlock(dp, lock_mode); 537 return error; 538 } 539