1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2013 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_da_format.h" 15 #include "xfs_da_btree.h" 16 #include "xfs_inode.h" 17 #include "xfs_trans.h" 18 #include "xfs_inode_item.h" 19 #include "xfs_bmap.h" 20 #include "xfs_attr.h" 21 #include "xfs_attr_sf.h" 22 #include "xfs_attr_remote.h" 23 #include "xfs_attr_leaf.h" 24 #include "xfs_error.h" 25 #include "xfs_trace.h" 26 #include "xfs_buf_item.h" 27 #include "xfs_cksum.h" 28 #include "xfs_dir2.h" 29 30 STATIC int 31 xfs_attr_shortform_compare(const void *a, const void *b) 32 { 33 xfs_attr_sf_sort_t *sa, *sb; 34 35 sa = (xfs_attr_sf_sort_t *)a; 36 sb = (xfs_attr_sf_sort_t *)b; 37 if (sa->hash < sb->hash) { 38 return -1; 39 } else if (sa->hash > sb->hash) { 40 return 1; 41 } else { 42 return sa->entno - sb->entno; 43 } 44 } 45 46 #define XFS_ISRESET_CURSOR(cursor) \ 47 (!((cursor)->initted) && !((cursor)->hashval) && \ 48 !((cursor)->blkno) && !((cursor)->offset)) 49 /* 50 * Copy out entries of shortform attribute lists for attr_list(). 51 * Shortform attribute lists are not stored in hashval sorted order. 52 * If the output buffer is not large enough to hold them all, then we 53 * we have to calculate each entries' hashvalue and sort them before 54 * we can begin returning them to the user. 55 */ 56 static int 57 xfs_attr_shortform_list(xfs_attr_list_context_t *context) 58 { 59 attrlist_cursor_kern_t *cursor; 60 xfs_attr_sf_sort_t *sbuf, *sbp; 61 xfs_attr_shortform_t *sf; 62 xfs_attr_sf_entry_t *sfe; 63 xfs_inode_t *dp; 64 int sbsize, nsbuf, count, i; 65 66 ASSERT(context != NULL); 67 dp = context->dp; 68 ASSERT(dp != NULL); 69 ASSERT(dp->i_afp != NULL); 70 sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; 71 ASSERT(sf != NULL); 72 if (!sf->hdr.count) 73 return 0; 74 cursor = context->cursor; 75 ASSERT(cursor != NULL); 76 77 trace_xfs_attr_list_sf(context); 78 79 /* 80 * If the buffer is large enough and the cursor is at the start, 81 * do not bother with sorting since we will return everything in 82 * one buffer and another call using the cursor won't need to be 83 * made. 84 * Note the generous fudge factor of 16 overhead bytes per entry. 85 * If bufsize is zero then put_listent must be a search function 86 * and can just scan through what we have. 87 */ 88 if (context->bufsize == 0 || 89 (XFS_ISRESET_CURSOR(cursor) && 90 (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { 91 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { 92 context->put_listent(context, 93 sfe->flags, 94 sfe->nameval, 95 (int)sfe->namelen, 96 (int)sfe->valuelen); 97 /* 98 * Either search callback finished early or 99 * didn't fit it all in the buffer after all. 100 */ 101 if (context->seen_enough) 102 break; 103 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 104 } 105 trace_xfs_attr_list_sf_all(context); 106 return 0; 107 } 108 109 /* do no more for a search callback */ 110 if (context->bufsize == 0) 111 return 0; 112 113 /* 114 * It didn't all fit, so we have to sort everything on hashval. 115 */ 116 sbsize = sf->hdr.count * sizeof(*sbuf); 117 sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS); 118 119 /* 120 * Scan the attribute list for the rest of the entries, storing 121 * the relevant info from only those that match into a buffer. 122 */ 123 nsbuf = 0; 124 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { 125 if (unlikely( 126 ((char *)sfe < (char *)sf) || 127 ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) { 128 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", 129 XFS_ERRLEVEL_LOW, 130 context->dp->i_mount, sfe, 131 sizeof(*sfe)); 132 kmem_free(sbuf); 133 return -EFSCORRUPTED; 134 } 135 136 sbp->entno = i; 137 sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen); 138 sbp->name = sfe->nameval; 139 sbp->namelen = sfe->namelen; 140 /* These are bytes, and both on-disk, don't endian-flip */ 141 sbp->valuelen = sfe->valuelen; 142 sbp->flags = sfe->flags; 143 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 144 sbp++; 145 nsbuf++; 146 } 147 148 /* 149 * Sort the entries on hash then entno. 150 */ 151 xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare); 152 153 /* 154 * Re-find our place IN THE SORTED LIST. 155 */ 156 count = 0; 157 cursor->initted = 1; 158 cursor->blkno = 0; 159 for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { 160 if (sbp->hash == cursor->hashval) { 161 if (cursor->offset == count) { 162 break; 163 } 164 count++; 165 } else if (sbp->hash > cursor->hashval) { 166 break; 167 } 168 } 169 if (i == nsbuf) { 170 kmem_free(sbuf); 171 return 0; 172 } 173 174 /* 175 * Loop putting entries into the user buffer. 176 */ 177 for ( ; i < nsbuf; i++, sbp++) { 178 if (cursor->hashval != sbp->hash) { 179 cursor->hashval = sbp->hash; 180 cursor->offset = 0; 181 } 182 context->put_listent(context, 183 sbp->flags, 184 sbp->name, 185 sbp->namelen, 186 sbp->valuelen); 187 if (context->seen_enough) 188 break; 189 cursor->offset++; 190 } 191 192 kmem_free(sbuf); 193 return 0; 194 } 195 196 /* 197 * We didn't find the block & hash mentioned in the cursor state, so 198 * walk down the attr btree looking for the hash. 199 */ 200 STATIC int 201 xfs_attr_node_list_lookup( 202 struct xfs_attr_list_context *context, 203 struct attrlist_cursor_kern *cursor, 204 struct xfs_buf **pbp) 205 { 206 struct xfs_da3_icnode_hdr nodehdr; 207 struct xfs_da_intnode *node; 208 struct xfs_da_node_entry *btree; 209 struct xfs_inode *dp = context->dp; 210 struct xfs_mount *mp = dp->i_mount; 211 struct xfs_trans *tp = context->tp; 212 struct xfs_buf *bp; 213 int i; 214 int error = 0; 215 unsigned int expected_level = 0; 216 uint16_t magic; 217 218 ASSERT(*pbp == NULL); 219 cursor->blkno = 0; 220 for (;;) { 221 error = xfs_da3_node_read(tp, dp, cursor->blkno, -1, &bp, 222 XFS_ATTR_FORK); 223 if (error) 224 return error; 225 node = bp->b_addr; 226 magic = be16_to_cpu(node->hdr.info.magic); 227 if (magic == XFS_ATTR_LEAF_MAGIC || 228 magic == XFS_ATTR3_LEAF_MAGIC) 229 break; 230 if (magic != XFS_DA_NODE_MAGIC && 231 magic != XFS_DA3_NODE_MAGIC) { 232 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 233 node, sizeof(*node)); 234 goto out_corruptbuf; 235 } 236 237 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 238 239 /* Tree taller than we can handle; bail out! */ 240 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) 241 goto out_corruptbuf; 242 243 /* Check the level from the root node. */ 244 if (cursor->blkno == 0) 245 expected_level = nodehdr.level - 1; 246 else if (expected_level != nodehdr.level) 247 goto out_corruptbuf; 248 else 249 expected_level--; 250 251 btree = dp->d_ops->node_tree_p(node); 252 for (i = 0; i < nodehdr.count; btree++, i++) { 253 if (cursor->hashval <= be32_to_cpu(btree->hashval)) { 254 cursor->blkno = be32_to_cpu(btree->before); 255 trace_xfs_attr_list_node_descend(context, 256 btree); 257 break; 258 } 259 } 260 xfs_trans_brelse(tp, bp); 261 262 if (i == nodehdr.count) 263 return 0; 264 265 /* We can't point back to the root. */ 266 if (cursor->blkno == 0) 267 return -EFSCORRUPTED; 268 } 269 270 if (expected_level != 0) 271 goto out_corruptbuf; 272 273 *pbp = bp; 274 return 0; 275 276 out_corruptbuf: 277 xfs_trans_brelse(tp, bp); 278 return -EFSCORRUPTED; 279 } 280 281 STATIC int 282 xfs_attr_node_list( 283 struct xfs_attr_list_context *context) 284 { 285 struct xfs_attr3_icleaf_hdr leafhdr; 286 struct attrlist_cursor_kern *cursor; 287 struct xfs_attr_leafblock *leaf; 288 struct xfs_da_intnode *node; 289 struct xfs_buf *bp; 290 struct xfs_inode *dp = context->dp; 291 struct xfs_mount *mp = dp->i_mount; 292 int error; 293 294 trace_xfs_attr_node_list(context); 295 296 cursor = context->cursor; 297 cursor->initted = 1; 298 299 /* 300 * Do all sorts of validation on the passed-in cursor structure. 301 * If anything is amiss, ignore the cursor and look up the hashval 302 * starting from the btree root. 303 */ 304 bp = NULL; 305 if (cursor->blkno > 0) { 306 error = xfs_da3_node_read(context->tp, dp, cursor->blkno, -1, 307 &bp, XFS_ATTR_FORK); 308 if ((error != 0) && (error != -EFSCORRUPTED)) 309 return error; 310 if (bp) { 311 struct xfs_attr_leaf_entry *entries; 312 313 node = bp->b_addr; 314 switch (be16_to_cpu(node->hdr.info.magic)) { 315 case XFS_DA_NODE_MAGIC: 316 case XFS_DA3_NODE_MAGIC: 317 trace_xfs_attr_list_wrong_blk(context); 318 xfs_trans_brelse(context->tp, bp); 319 bp = NULL; 320 break; 321 case XFS_ATTR_LEAF_MAGIC: 322 case XFS_ATTR3_LEAF_MAGIC: 323 leaf = bp->b_addr; 324 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, 325 &leafhdr, leaf); 326 entries = xfs_attr3_leaf_entryp(leaf); 327 if (cursor->hashval > be32_to_cpu( 328 entries[leafhdr.count - 1].hashval)) { 329 trace_xfs_attr_list_wrong_blk(context); 330 xfs_trans_brelse(context->tp, bp); 331 bp = NULL; 332 } else if (cursor->hashval <= be32_to_cpu( 333 entries[0].hashval)) { 334 trace_xfs_attr_list_wrong_blk(context); 335 xfs_trans_brelse(context->tp, bp); 336 bp = NULL; 337 } 338 break; 339 default: 340 trace_xfs_attr_list_wrong_blk(context); 341 xfs_trans_brelse(context->tp, bp); 342 bp = NULL; 343 } 344 } 345 } 346 347 /* 348 * We did not find what we expected given the cursor's contents, 349 * so we start from the top and work down based on the hash value. 350 * Note that start of node block is same as start of leaf block. 351 */ 352 if (bp == NULL) { 353 error = xfs_attr_node_list_lookup(context, cursor, &bp); 354 if (error || !bp) 355 return error; 356 } 357 ASSERT(bp != NULL); 358 359 /* 360 * Roll upward through the blocks, processing each leaf block in 361 * order. As long as there is space in the result buffer, keep 362 * adding the information. 363 */ 364 for (;;) { 365 leaf = bp->b_addr; 366 xfs_attr3_leaf_list_int(bp, context); 367 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); 368 if (context->seen_enough || leafhdr.forw == 0) 369 break; 370 cursor->blkno = leafhdr.forw; 371 xfs_trans_brelse(context->tp, bp); 372 error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno, -1, &bp); 373 if (error) 374 return error; 375 } 376 xfs_trans_brelse(context->tp, bp); 377 return 0; 378 } 379 380 /* 381 * Copy out attribute list entries for attr_list(), for leaf attribute lists. 382 */ 383 void 384 xfs_attr3_leaf_list_int( 385 struct xfs_buf *bp, 386 struct xfs_attr_list_context *context) 387 { 388 struct attrlist_cursor_kern *cursor; 389 struct xfs_attr_leafblock *leaf; 390 struct xfs_attr3_icleaf_hdr ichdr; 391 struct xfs_attr_leaf_entry *entries; 392 struct xfs_attr_leaf_entry *entry; 393 int i; 394 struct xfs_mount *mp = context->dp->i_mount; 395 396 trace_xfs_attr_list_leaf(context); 397 398 leaf = bp->b_addr; 399 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); 400 entries = xfs_attr3_leaf_entryp(leaf); 401 402 cursor = context->cursor; 403 cursor->initted = 1; 404 405 /* 406 * Re-find our place in the leaf block if this is a new syscall. 407 */ 408 if (context->resynch) { 409 entry = &entries[0]; 410 for (i = 0; i < ichdr.count; entry++, i++) { 411 if (be32_to_cpu(entry->hashval) == cursor->hashval) { 412 if (cursor->offset == context->dupcnt) { 413 context->dupcnt = 0; 414 break; 415 } 416 context->dupcnt++; 417 } else if (be32_to_cpu(entry->hashval) > 418 cursor->hashval) { 419 context->dupcnt = 0; 420 break; 421 } 422 } 423 if (i == ichdr.count) { 424 trace_xfs_attr_list_notfound(context); 425 return; 426 } 427 } else { 428 entry = &entries[0]; 429 i = 0; 430 } 431 context->resynch = 0; 432 433 /* 434 * We have found our place, start copying out the new attributes. 435 */ 436 for (; i < ichdr.count; entry++, i++) { 437 char *name; 438 int namelen, valuelen; 439 440 if (be32_to_cpu(entry->hashval) != cursor->hashval) { 441 cursor->hashval = be32_to_cpu(entry->hashval); 442 cursor->offset = 0; 443 } 444 445 if ((entry->flags & XFS_ATTR_INCOMPLETE) && 446 !(context->flags & ATTR_INCOMPLETE)) 447 continue; /* skip incomplete entries */ 448 449 if (entry->flags & XFS_ATTR_LOCAL) { 450 xfs_attr_leaf_name_local_t *name_loc; 451 452 name_loc = xfs_attr3_leaf_name_local(leaf, i); 453 name = name_loc->nameval; 454 namelen = name_loc->namelen; 455 valuelen = be16_to_cpu(name_loc->valuelen); 456 } else { 457 xfs_attr_leaf_name_remote_t *name_rmt; 458 459 name_rmt = xfs_attr3_leaf_name_remote(leaf, i); 460 name = name_rmt->name; 461 namelen = name_rmt->namelen; 462 valuelen = be32_to_cpu(name_rmt->valuelen); 463 } 464 465 context->put_listent(context, entry->flags, 466 name, namelen, valuelen); 467 if (context->seen_enough) 468 break; 469 cursor->offset++; 470 } 471 trace_xfs_attr_list_leaf_end(context); 472 return; 473 } 474 475 /* 476 * Copy out attribute entries for attr_list(), for leaf attribute lists. 477 */ 478 STATIC int 479 xfs_attr_leaf_list(xfs_attr_list_context_t *context) 480 { 481 int error; 482 struct xfs_buf *bp; 483 484 trace_xfs_attr_leaf_list(context); 485 486 context->cursor->blkno = 0; 487 error = xfs_attr3_leaf_read(context->tp, context->dp, 0, -1, &bp); 488 if (error) 489 return error; 490 491 xfs_attr3_leaf_list_int(bp, context); 492 xfs_trans_brelse(context->tp, bp); 493 return 0; 494 } 495 496 int 497 xfs_attr_list_int_ilocked( 498 struct xfs_attr_list_context *context) 499 { 500 struct xfs_inode *dp = context->dp; 501 502 ASSERT(xfs_isilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 503 504 /* 505 * Decide on what work routines to call based on the inode size. 506 */ 507 if (!xfs_inode_hasattr(dp)) 508 return 0; 509 else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) 510 return xfs_attr_shortform_list(context); 511 else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) 512 return xfs_attr_leaf_list(context); 513 return xfs_attr_node_list(context); 514 } 515 516 int 517 xfs_attr_list_int( 518 xfs_attr_list_context_t *context) 519 { 520 int error; 521 xfs_inode_t *dp = context->dp; 522 uint lock_mode; 523 524 XFS_STATS_INC(dp->i_mount, xs_attr_list); 525 526 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 527 return -EIO; 528 529 lock_mode = xfs_ilock_attr_map_shared(dp); 530 error = xfs_attr_list_int_ilocked(context); 531 xfs_iunlock(dp, lock_mode); 532 return error; 533 } 534 535 #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \ 536 (((struct attrlist_ent *) 0)->a_name - (char *) 0) 537 #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \ 538 ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(uint32_t)-1) \ 539 & ~(sizeof(uint32_t)-1)) 540 541 /* 542 * Format an attribute and copy it out to the user's buffer. 543 * Take care to check values and protect against them changing later, 544 * we may be reading them directly out of a user buffer. 545 */ 546 STATIC void 547 xfs_attr_put_listent( 548 xfs_attr_list_context_t *context, 549 int flags, 550 unsigned char *name, 551 int namelen, 552 int valuelen) 553 { 554 struct attrlist *alist = (struct attrlist *)context->alist; 555 attrlist_ent_t *aep; 556 int arraytop; 557 558 ASSERT(!context->seen_enough); 559 ASSERT(!(context->flags & ATTR_KERNOVAL)); 560 ASSERT(context->count >= 0); 561 ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); 562 ASSERT(context->firstu >= sizeof(*alist)); 563 ASSERT(context->firstu <= context->bufsize); 564 565 /* 566 * Only list entries in the right namespace. 567 */ 568 if (((context->flags & ATTR_SECURE) == 0) != 569 ((flags & XFS_ATTR_SECURE) == 0)) 570 return; 571 if (((context->flags & ATTR_ROOT) == 0) != 572 ((flags & XFS_ATTR_ROOT) == 0)) 573 return; 574 575 arraytop = sizeof(*alist) + 576 context->count * sizeof(alist->al_offset[0]); 577 context->firstu -= ATTR_ENTSIZE(namelen); 578 if (context->firstu < arraytop) { 579 trace_xfs_attr_list_full(context); 580 alist->al_more = 1; 581 context->seen_enough = 1; 582 return; 583 } 584 585 aep = (attrlist_ent_t *)&context->alist[context->firstu]; 586 aep->a_valuelen = valuelen; 587 memcpy(aep->a_name, name, namelen); 588 aep->a_name[namelen] = 0; 589 alist->al_offset[context->count++] = context->firstu; 590 alist->al_count = context->count; 591 trace_xfs_attr_list_add(context); 592 return; 593 } 594 595 /* 596 * Generate a list of extended attribute names and optionally 597 * also value lengths. Positive return value follows the XFS 598 * convention of being an error, zero or negative return code 599 * is the length of the buffer returned (negated), indicating 600 * success. 601 */ 602 int 603 xfs_attr_list( 604 xfs_inode_t *dp, 605 char *buffer, 606 int bufsize, 607 int flags, 608 attrlist_cursor_kern_t *cursor) 609 { 610 xfs_attr_list_context_t context; 611 struct attrlist *alist; 612 int error; 613 614 /* 615 * Validate the cursor. 616 */ 617 if (cursor->pad1 || cursor->pad2) 618 return -EINVAL; 619 if ((cursor->initted == 0) && 620 (cursor->hashval || cursor->blkno || cursor->offset)) 621 return -EINVAL; 622 623 /* Only internal consumers can retrieve incomplete attrs. */ 624 if (flags & ATTR_INCOMPLETE) 625 return -EINVAL; 626 627 /* 628 * Check for a properly aligned buffer. 629 */ 630 if (((long)buffer) & (sizeof(int)-1)) 631 return -EFAULT; 632 if (flags & ATTR_KERNOVAL) 633 bufsize = 0; 634 635 /* 636 * Initialize the output buffer. 637 */ 638 memset(&context, 0, sizeof(context)); 639 context.dp = dp; 640 context.cursor = cursor; 641 context.resynch = 1; 642 context.flags = flags; 643 context.alist = buffer; 644 context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ 645 context.firstu = context.bufsize; 646 context.put_listent = xfs_attr_put_listent; 647 648 alist = (struct attrlist *)context.alist; 649 alist->al_count = 0; 650 alist->al_more = 0; 651 alist->al_offset[0] = context.bufsize; 652 653 error = xfs_attr_list_int(&context); 654 ASSERT(error <= 0); 655 return error; 656 } 657