1 /*- 2 * Copyright (c) 2007 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * Stand-alone ZFS file reader. 32 */ 33 34 #include <sys/endian.h> 35 #include <sys/stat.h> 36 #include <sys/stdint.h> 37 #include <sys/list.h> 38 #include <machine/_inttypes.h> 39 40 #include "zfsimpl.h" 41 #include "zfssubr.c" 42 43 44 struct zfsmount { 45 const spa_t *spa; 46 objset_phys_t objset; 47 uint64_t rootobj; 48 }; 49 static struct zfsmount zfsmount __unused; 50 51 /* 52 * The indirect_child_t represents the vdev that we will read from, when we 53 * need to read all copies of the data (e.g. for scrub or reconstruction). 54 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror), 55 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs, 56 * ic_vdev is a child of the mirror. 57 */ 58 typedef struct indirect_child { 59 void *ic_data; 60 vdev_t *ic_vdev; 61 } indirect_child_t; 62 63 /* 64 * The indirect_split_t represents one mapped segment of an i/o to the 65 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be 66 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size. 67 * For split blocks, there will be several of these. 68 */ 69 typedef struct indirect_split { 70 list_node_t is_node; /* link on iv_splits */ 71 72 /* 73 * is_split_offset is the offset into the i/o. 74 * This is the sum of the previous splits' is_size's. 75 */ 76 uint64_t is_split_offset; 77 78 vdev_t *is_vdev; /* top-level vdev */ 79 uint64_t is_target_offset; /* offset on is_vdev */ 80 uint64_t is_size; 81 int is_children; /* number of entries in is_child[] */ 82 83 /* 84 * is_good_child is the child that we are currently using to 85 * attempt reconstruction. 86 */ 87 int is_good_child; 88 89 indirect_child_t is_child[1]; /* variable-length */ 90 } indirect_split_t; 91 92 /* 93 * The indirect_vsd_t is associated with each i/o to the indirect vdev. 94 * It is the "Vdev-Specific Data" in the zio_t's io_vsd. 95 */ 96 typedef struct indirect_vsd { 97 boolean_t iv_split_block; 98 boolean_t iv_reconstruct; 99 100 list_t iv_splits; /* list of indirect_split_t's */ 101 } indirect_vsd_t; 102 103 /* 104 * List of all vdevs, chained through v_alllink. 105 */ 106 static vdev_list_t zfs_vdevs; 107 108 /* 109 * List of ZFS features supported for read 110 */ 111 static const char *features_for_read[] = { 112 "org.illumos:lz4_compress", 113 "com.delphix:hole_birth", 114 "com.delphix:extensible_dataset", 115 "com.delphix:embedded_data", 116 "org.open-zfs:large_blocks", 117 "org.illumos:sha512", 118 "org.illumos:skein", 119 "org.zfsonlinux:large_dnode", 120 "com.joyent:multi_vdev_crash_dump", 121 "com.delphix:spacemap_histogram", 122 "com.delphix:zpool_checkpoint", 123 "com.delphix:spacemap_v2", 124 "com.datto:encryption", 125 "org.zfsonlinux:allocation_classes", 126 "com.datto:resilver_defer", 127 "com.delphix:device_removal", 128 "com.delphix:obsolete_counts", 129 "com.intel:allocation_classes", 130 NULL 131 }; 132 133 /* 134 * List of all pools, chained through spa_link. 135 */ 136 static spa_list_t zfs_pools; 137 138 static const dnode_phys_t *dnode_cache_obj; 139 static uint64_t dnode_cache_bn; 140 static char *dnode_cache_buf; 141 static char *zap_scratch; 142 static char *zfs_temp_buf, *zfs_temp_end, *zfs_temp_ptr; 143 144 #define TEMP_SIZE (1024 * 1024) 145 146 static int zio_read(const spa_t *spa, const blkptr_t *bp, void *buf); 147 static int zfs_get_root(const spa_t *spa, uint64_t *objid); 148 static int zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result); 149 static int zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, 150 const char *name, uint64_t integer_size, uint64_t num_integers, 151 void *value); 152 static int objset_get_dnode(const spa_t *, const objset_phys_t *, uint64_t, 153 dnode_phys_t *); 154 static int dnode_read(const spa_t *, const dnode_phys_t *, off_t, void *, 155 size_t); 156 static int vdev_indirect_read(vdev_t *, const blkptr_t *, void *, off_t, 157 size_t); 158 static int vdev_mirror_read(vdev_t *, const blkptr_t *, void *, off_t, size_t); 159 vdev_indirect_mapping_t *vdev_indirect_mapping_open(spa_t *, objset_phys_t *, 160 uint64_t); 161 vdev_indirect_mapping_entry_phys_t * 162 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *, uint64_t, 163 uint64_t, uint64_t *); 164 165 static void 166 zfs_init(void) 167 { 168 STAILQ_INIT(&zfs_vdevs); 169 STAILQ_INIT(&zfs_pools); 170 171 zfs_temp_buf = malloc(TEMP_SIZE); 172 zfs_temp_end = zfs_temp_buf + TEMP_SIZE; 173 zfs_temp_ptr = zfs_temp_buf; 174 dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE); 175 zap_scratch = malloc(SPA_MAXBLOCKSIZE); 176 177 zfs_init_crc(); 178 } 179 180 static void * 181 zfs_alloc(size_t size) 182 { 183 char *ptr; 184 185 if (zfs_temp_ptr + size > zfs_temp_end) { 186 panic("ZFS: out of temporary buffer space"); 187 } 188 ptr = zfs_temp_ptr; 189 zfs_temp_ptr += size; 190 191 return (ptr); 192 } 193 194 static void 195 zfs_free(void *ptr, size_t size) 196 { 197 198 zfs_temp_ptr -= size; 199 if (zfs_temp_ptr != ptr) { 200 panic("ZFS: zfs_alloc()/zfs_free() mismatch"); 201 } 202 } 203 204 static int 205 xdr_int(const unsigned char **xdr, int *ip) 206 { 207 *ip = be32dec(*xdr); 208 (*xdr) += 4; 209 return (0); 210 } 211 212 static int 213 xdr_u_int(const unsigned char **xdr, u_int *ip) 214 { 215 *ip = be32dec(*xdr); 216 (*xdr) += 4; 217 return (0); 218 } 219 220 static int 221 xdr_uint64_t(const unsigned char **xdr, uint64_t *lp) 222 { 223 u_int hi, lo; 224 225 xdr_u_int(xdr, &hi); 226 xdr_u_int(xdr, &lo); 227 *lp = (((uint64_t)hi) << 32) | lo; 228 return (0); 229 } 230 231 static int 232 nvlist_find(const unsigned char *nvlist, const char *name, int type, 233 int *elementsp, void *valuep) 234 { 235 const unsigned char *p, *pair; 236 int junk; 237 int encoded_size, decoded_size; 238 239 p = nvlist; 240 xdr_int(&p, &junk); 241 xdr_int(&p, &junk); 242 243 pair = p; 244 xdr_int(&p, &encoded_size); 245 xdr_int(&p, &decoded_size); 246 while (encoded_size && decoded_size) { 247 int namelen, pairtype, elements; 248 const char *pairname; 249 250 xdr_int(&p, &namelen); 251 pairname = (const char *)p; 252 p += roundup(namelen, 4); 253 xdr_int(&p, &pairtype); 254 255 if (memcmp(name, pairname, namelen) == 0 && type == pairtype) { 256 xdr_int(&p, &elements); 257 if (elementsp) 258 *elementsp = elements; 259 if (type == DATA_TYPE_UINT64) { 260 xdr_uint64_t(&p, (uint64_t *)valuep); 261 return (0); 262 } else if (type == DATA_TYPE_STRING) { 263 int len; 264 xdr_int(&p, &len); 265 (*(const char **)valuep) = (const char *)p; 266 return (0); 267 } else if (type == DATA_TYPE_NVLIST || 268 type == DATA_TYPE_NVLIST_ARRAY) { 269 (*(const unsigned char **)valuep) = 270 (const unsigned char *)p; 271 return (0); 272 } else { 273 return (EIO); 274 } 275 } else { 276 /* 277 * Not the pair we are looking for, skip to the 278 * next one. 279 */ 280 p = pair + encoded_size; 281 } 282 283 pair = p; 284 xdr_int(&p, &encoded_size); 285 xdr_int(&p, &decoded_size); 286 } 287 288 return (EIO); 289 } 290 291 static int 292 nvlist_check_features_for_read(const unsigned char *nvlist) 293 { 294 const unsigned char *p, *pair; 295 int junk; 296 int encoded_size, decoded_size; 297 int rc; 298 299 rc = 0; 300 301 p = nvlist; 302 xdr_int(&p, &junk); 303 xdr_int(&p, &junk); 304 305 pair = p; 306 xdr_int(&p, &encoded_size); 307 xdr_int(&p, &decoded_size); 308 while (encoded_size && decoded_size) { 309 int namelen, pairtype; 310 const char *pairname; 311 int i, found; 312 313 found = 0; 314 315 xdr_int(&p, &namelen); 316 pairname = (const char *)p; 317 p += roundup(namelen, 4); 318 xdr_int(&p, &pairtype); 319 320 for (i = 0; features_for_read[i] != NULL; i++) { 321 if (memcmp(pairname, features_for_read[i], 322 namelen) == 0) { 323 found = 1; 324 break; 325 } 326 } 327 328 if (!found) { 329 printf("ZFS: unsupported feature: %s\n", pairname); 330 rc = EIO; 331 } 332 333 p = pair + encoded_size; 334 335 pair = p; 336 xdr_int(&p, &encoded_size); 337 xdr_int(&p, &decoded_size); 338 } 339 340 return (rc); 341 } 342 343 /* 344 * Return the next nvlist in an nvlist array. 345 */ 346 static const unsigned char * 347 nvlist_next(const unsigned char *nvlist) 348 { 349 const unsigned char *p, *pair; 350 int junk; 351 int encoded_size, decoded_size; 352 353 p = nvlist; 354 xdr_int(&p, &junk); 355 xdr_int(&p, &junk); 356 357 pair = p; 358 xdr_int(&p, &encoded_size); 359 xdr_int(&p, &decoded_size); 360 while (encoded_size && decoded_size) { 361 p = pair + encoded_size; 362 363 pair = p; 364 xdr_int(&p, &encoded_size); 365 xdr_int(&p, &decoded_size); 366 } 367 368 return (p); 369 } 370 371 #ifdef TEST 372 373 static const unsigned char * 374 nvlist_print(const unsigned char *nvlist, unsigned int indent) 375 { 376 static const char *typenames[] = { 377 "DATA_TYPE_UNKNOWN", 378 "DATA_TYPE_BOOLEAN", 379 "DATA_TYPE_BYTE", 380 "DATA_TYPE_INT16", 381 "DATA_TYPE_UINT16", 382 "DATA_TYPE_INT32", 383 "DATA_TYPE_UINT32", 384 "DATA_TYPE_INT64", 385 "DATA_TYPE_UINT64", 386 "DATA_TYPE_STRING", 387 "DATA_TYPE_BYTE_ARRAY", 388 "DATA_TYPE_INT16_ARRAY", 389 "DATA_TYPE_UINT16_ARRAY", 390 "DATA_TYPE_INT32_ARRAY", 391 "DATA_TYPE_UINT32_ARRAY", 392 "DATA_TYPE_INT64_ARRAY", 393 "DATA_TYPE_UINT64_ARRAY", 394 "DATA_TYPE_STRING_ARRAY", 395 "DATA_TYPE_HRTIME", 396 "DATA_TYPE_NVLIST", 397 "DATA_TYPE_NVLIST_ARRAY", 398 "DATA_TYPE_BOOLEAN_VALUE", 399 "DATA_TYPE_INT8", 400 "DATA_TYPE_UINT8", 401 "DATA_TYPE_BOOLEAN_ARRAY", 402 "DATA_TYPE_INT8_ARRAY", 403 "DATA_TYPE_UINT8_ARRAY" 404 }; 405 406 unsigned int i, j; 407 const unsigned char *p, *pair; 408 int junk; 409 int encoded_size, decoded_size; 410 411 p = nvlist; 412 xdr_int(&p, &junk); 413 xdr_int(&p, &junk); 414 415 pair = p; 416 xdr_int(&p, &encoded_size); 417 xdr_int(&p, &decoded_size); 418 while (encoded_size && decoded_size) { 419 int namelen, pairtype, elements; 420 const char *pairname; 421 422 xdr_int(&p, &namelen); 423 pairname = (const char *)p; 424 p += roundup(namelen, 4); 425 xdr_int(&p, &pairtype); 426 427 for (i = 0; i < indent; i++) 428 printf(" "); 429 printf("%s %s", typenames[pairtype], pairname); 430 431 xdr_int(&p, &elements); 432 switch (pairtype) { 433 case DATA_TYPE_UINT64: { 434 uint64_t val; 435 xdr_uint64_t(&p, &val); 436 printf(" = 0x%jx\n", (uintmax_t)val); 437 break; 438 } 439 440 case DATA_TYPE_STRING: { 441 int len; 442 xdr_int(&p, &len); 443 printf(" = \"%s\"\n", p); 444 break; 445 } 446 447 case DATA_TYPE_NVLIST: 448 printf("\n"); 449 nvlist_print(p, indent + 1); 450 break; 451 452 case DATA_TYPE_NVLIST_ARRAY: 453 for (j = 0; j < elements; j++) { 454 printf("[%d]\n", j); 455 p = nvlist_print(p, indent + 1); 456 if (j != elements - 1) { 457 for (i = 0; i < indent; i++) 458 printf(" "); 459 printf("%s %s", typenames[pairtype], 460 pairname); 461 } 462 } 463 break; 464 465 default: 466 printf("\n"); 467 } 468 469 p = pair + encoded_size; 470 471 pair = p; 472 xdr_int(&p, &encoded_size); 473 xdr_int(&p, &decoded_size); 474 } 475 476 return (p); 477 } 478 479 #endif 480 481 static int 482 vdev_read_phys(vdev_t *vdev, const blkptr_t *bp, void *buf, 483 off_t offset, size_t size) 484 { 485 size_t psize; 486 int rc; 487 488 if (!vdev->v_phys_read) 489 return (EIO); 490 491 if (bp) { 492 psize = BP_GET_PSIZE(bp); 493 } else { 494 psize = size; 495 } 496 497 rc = vdev->v_phys_read(vdev, vdev->v_read_priv, offset, buf, psize); 498 if (rc == 0) { 499 if (bp != NULL) 500 rc = zio_checksum_verify(vdev->v_spa, bp, buf); 501 } 502 503 return (rc); 504 } 505 506 typedef struct remap_segment { 507 vdev_t *rs_vd; 508 uint64_t rs_offset; 509 uint64_t rs_asize; 510 uint64_t rs_split_offset; 511 list_node_t rs_node; 512 } remap_segment_t; 513 514 static remap_segment_t * 515 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset) 516 { 517 remap_segment_t *rs = malloc(sizeof (remap_segment_t)); 518 519 if (rs != NULL) { 520 rs->rs_vd = vd; 521 rs->rs_offset = offset; 522 rs->rs_asize = asize; 523 rs->rs_split_offset = split_offset; 524 } 525 526 return (rs); 527 } 528 529 vdev_indirect_mapping_t * 530 vdev_indirect_mapping_open(spa_t *spa, objset_phys_t *os, 531 uint64_t mapping_object) 532 { 533 vdev_indirect_mapping_t *vim; 534 vdev_indirect_mapping_phys_t *vim_phys; 535 int rc; 536 537 vim = calloc(1, sizeof (*vim)); 538 if (vim == NULL) 539 return (NULL); 540 541 vim->vim_dn = calloc(1, sizeof (*vim->vim_dn)); 542 if (vim->vim_dn == NULL) { 543 free(vim); 544 return (NULL); 545 } 546 547 rc = objset_get_dnode(spa, os, mapping_object, vim->vim_dn); 548 if (rc != 0) { 549 free(vim->vim_dn); 550 free(vim); 551 return (NULL); 552 } 553 554 vim->vim_spa = spa; 555 vim->vim_phys = malloc(sizeof (*vim->vim_phys)); 556 if (vim->vim_phys == NULL) { 557 free(vim->vim_dn); 558 free(vim); 559 return (NULL); 560 } 561 562 vim_phys = (vdev_indirect_mapping_phys_t *)DN_BONUS(vim->vim_dn); 563 *vim->vim_phys = *vim_phys; 564 565 vim->vim_objset = os; 566 vim->vim_object = mapping_object; 567 vim->vim_entries = NULL; 568 569 vim->vim_havecounts = 570 (vim->vim_dn->dn_bonuslen > VDEV_INDIRECT_MAPPING_SIZE_V0); 571 572 return (vim); 573 } 574 575 /* 576 * Compare an offset with an indirect mapping entry; there are three 577 * possible scenarios: 578 * 579 * 1. The offset is "less than" the mapping entry; meaning the 580 * offset is less than the source offset of the mapping entry. In 581 * this case, there is no overlap between the offset and the 582 * mapping entry and -1 will be returned. 583 * 584 * 2. The offset is "greater than" the mapping entry; meaning the 585 * offset is greater than the mapping entry's source offset plus 586 * the entry's size. In this case, there is no overlap between 587 * the offset and the mapping entry and 1 will be returned. 588 * 589 * NOTE: If the offset is actually equal to the entry's offset 590 * plus size, this is considered to be "greater" than the entry, 591 * and this case applies (i.e. 1 will be returned). Thus, the 592 * entry's "range" can be considered to be inclusive at its 593 * start, but exclusive at its end: e.g. [src, src + size). 594 * 595 * 3. The last case to consider is if the offset actually falls 596 * within the mapping entry's range. If this is the case, the 597 * offset is considered to be "equal to" the mapping entry and 598 * 0 will be returned. 599 * 600 * NOTE: If the offset is equal to the entry's source offset, 601 * this case applies and 0 will be returned. If the offset is 602 * equal to the entry's source plus its size, this case does 603 * *not* apply (see "NOTE" above for scenario 2), and 1 will be 604 * returned. 605 */ 606 static int 607 dva_mapping_overlap_compare(const void *v_key, const void *v_array_elem) 608 { 609 const uint64_t *key = v_key; 610 const vdev_indirect_mapping_entry_phys_t *array_elem = 611 v_array_elem; 612 uint64_t src_offset = DVA_MAPPING_GET_SRC_OFFSET(array_elem); 613 614 if (*key < src_offset) { 615 return (-1); 616 } else if (*key < src_offset + DVA_GET_ASIZE(&array_elem->vimep_dst)) { 617 return (0); 618 } else { 619 return (1); 620 } 621 } 622 623 /* 624 * Return array entry. 625 */ 626 static vdev_indirect_mapping_entry_phys_t * 627 vdev_indirect_mapping_entry(vdev_indirect_mapping_t *vim, uint64_t index) 628 { 629 uint64_t size; 630 off_t offset = 0; 631 int rc; 632 633 if (vim->vim_phys->vimp_num_entries == 0) 634 return (NULL); 635 636 if (vim->vim_entries == NULL) { 637 uint64_t bsize; 638 639 bsize = vim->vim_dn->dn_datablkszsec << SPA_MINBLOCKSHIFT; 640 size = vim->vim_phys->vimp_num_entries * 641 sizeof (*vim->vim_entries); 642 if (size > bsize) { 643 size = bsize / sizeof (*vim->vim_entries); 644 size *= sizeof (*vim->vim_entries); 645 } 646 vim->vim_entries = malloc(size); 647 if (vim->vim_entries == NULL) 648 return (NULL); 649 vim->vim_num_entries = size / sizeof (*vim->vim_entries); 650 offset = index * sizeof (*vim->vim_entries); 651 } 652 653 /* We have data in vim_entries */ 654 if (offset == 0) { 655 if (index >= vim->vim_entry_offset && 656 index <= vim->vim_entry_offset + vim->vim_num_entries) { 657 index -= vim->vim_entry_offset; 658 return (&vim->vim_entries[index]); 659 } 660 offset = index * sizeof (*vim->vim_entries); 661 } 662 663 vim->vim_entry_offset = index; 664 size = vim->vim_num_entries * sizeof (*vim->vim_entries); 665 rc = dnode_read(vim->vim_spa, vim->vim_dn, offset, vim->vim_entries, 666 size); 667 if (rc != 0) { 668 /* Read error, invalidate vim_entries. */ 669 free(vim->vim_entries); 670 vim->vim_entries = NULL; 671 return (NULL); 672 } 673 index -= vim->vim_entry_offset; 674 return (&vim->vim_entries[index]); 675 } 676 677 /* 678 * Returns the mapping entry for the given offset. 679 * 680 * It's possible that the given offset will not be in the mapping table 681 * (i.e. no mapping entries contain this offset), in which case, the 682 * return value value depends on the "next_if_missing" parameter. 683 * 684 * If the offset is not found in the table and "next_if_missing" is 685 * B_FALSE, then NULL will always be returned. The behavior is intended 686 * to allow consumers to get the entry corresponding to the offset 687 * parameter, iff the offset overlaps with an entry in the table. 688 * 689 * If the offset is not found in the table and "next_if_missing" is 690 * B_TRUE, then the entry nearest to the given offset will be returned, 691 * such that the entry's source offset is greater than the offset 692 * passed in (i.e. the "next" mapping entry in the table is returned, if 693 * the offset is missing from the table). If there are no entries whose 694 * source offset is greater than the passed in offset, NULL is returned. 695 */ 696 static vdev_indirect_mapping_entry_phys_t * 697 vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim, 698 uint64_t offset) 699 { 700 ASSERT(vim->vim_phys->vimp_num_entries > 0); 701 702 vdev_indirect_mapping_entry_phys_t *entry; 703 704 uint64_t last = vim->vim_phys->vimp_num_entries - 1; 705 uint64_t base = 0; 706 707 /* 708 * We don't define these inside of the while loop because we use 709 * their value in the case that offset isn't in the mapping. 710 */ 711 uint64_t mid; 712 int result; 713 714 while (last >= base) { 715 mid = base + ((last - base) >> 1); 716 717 entry = vdev_indirect_mapping_entry(vim, mid); 718 if (entry == NULL) 719 break; 720 result = dva_mapping_overlap_compare(&offset, entry); 721 722 if (result == 0) { 723 break; 724 } else if (result < 0) { 725 last = mid - 1; 726 } else { 727 base = mid + 1; 728 } 729 } 730 return (entry); 731 } 732 733 /* 734 * Given an indirect vdev and an extent on that vdev, it duplicates the 735 * physical entries of the indirect mapping that correspond to the extent 736 * to a new array and returns a pointer to it. In addition, copied_entries 737 * is populated with the number of mapping entries that were duplicated. 738 * 739 * Finally, since we are doing an allocation, it is up to the caller to 740 * free the array allocated in this function. 741 */ 742 vdev_indirect_mapping_entry_phys_t * 743 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset, 744 uint64_t asize, uint64_t *copied_entries) 745 { 746 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL; 747 vdev_indirect_mapping_t *vim = vd->v_mapping; 748 uint64_t entries = 0; 749 750 vdev_indirect_mapping_entry_phys_t *first_mapping = 751 vdev_indirect_mapping_entry_for_offset(vim, offset); 752 ASSERT3P(first_mapping, !=, NULL); 753 754 vdev_indirect_mapping_entry_phys_t *m = first_mapping; 755 while (asize > 0) { 756 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 757 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m); 758 uint64_t inner_size = MIN(asize, size - inner_offset); 759 760 offset += inner_size; 761 asize -= inner_size; 762 entries++; 763 m++; 764 } 765 766 size_t copy_length = entries * sizeof (*first_mapping); 767 duplicate_mappings = malloc(copy_length); 768 if (duplicate_mappings != NULL) 769 bcopy(first_mapping, duplicate_mappings, copy_length); 770 else 771 entries = 0; 772 773 *copied_entries = entries; 774 775 return (duplicate_mappings); 776 } 777 778 static vdev_t * 779 vdev_lookup_top(spa_t *spa, uint64_t vdev) 780 { 781 vdev_t *rvd; 782 vdev_list_t *vlist; 783 784 vlist = &spa->spa_root_vdev->v_children; 785 STAILQ_FOREACH(rvd, vlist, v_childlink) 786 if (rvd->v_id == vdev) 787 break; 788 789 return (rvd); 790 } 791 792 /* 793 * This is a callback for vdev_indirect_remap() which allocates an 794 * indirect_split_t for each split segment and adds it to iv_splits. 795 */ 796 static void 797 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset, 798 uint64_t size, void *arg) 799 { 800 int n = 1; 801 zio_t *zio = arg; 802 indirect_vsd_t *iv = zio->io_vsd; 803 804 if (vd->v_read == vdev_indirect_read) 805 return; 806 807 if (vd->v_read == vdev_mirror_read) 808 n = vd->v_nchildren; 809 810 indirect_split_t *is = 811 malloc(offsetof(indirect_split_t, is_child[n])); 812 if (is == NULL) { 813 zio->io_error = ENOMEM; 814 return; 815 } 816 bzero(is, offsetof(indirect_split_t, is_child[n])); 817 818 is->is_children = n; 819 is->is_size = size; 820 is->is_split_offset = split_offset; 821 is->is_target_offset = offset; 822 is->is_vdev = vd; 823 824 /* 825 * Note that we only consider multiple copies of the data for 826 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even 827 * though they use the same ops as mirror, because there's only one 828 * "good" copy under the replacing/spare. 829 */ 830 if (vd->v_read == vdev_mirror_read) { 831 int i = 0; 832 vdev_t *kid; 833 834 STAILQ_FOREACH(kid, &vd->v_children, v_childlink) { 835 is->is_child[i++].ic_vdev = kid; 836 } 837 } else { 838 is->is_child[0].ic_vdev = vd; 839 } 840 841 list_insert_tail(&iv->iv_splits, is); 842 } 843 844 static void 845 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, void *arg) 846 { 847 list_t stack; 848 spa_t *spa = vd->v_spa; 849 zio_t *zio = arg; 850 remap_segment_t *rs; 851 852 list_create(&stack, sizeof (remap_segment_t), 853 offsetof(remap_segment_t, rs_node)); 854 855 rs = rs_alloc(vd, offset, asize, 0); 856 if (rs == NULL) { 857 printf("vdev_indirect_remap: out of memory.\n"); 858 zio->io_error = ENOMEM; 859 } 860 for (; rs != NULL; rs = list_remove_head(&stack)) { 861 vdev_t *v = rs->rs_vd; 862 uint64_t num_entries = 0; 863 /* vdev_indirect_mapping_t *vim = v->v_mapping; */ 864 vdev_indirect_mapping_entry_phys_t *mapping = 865 vdev_indirect_mapping_duplicate_adjacent_entries(v, 866 rs->rs_offset, rs->rs_asize, &num_entries); 867 868 if (num_entries == 0) 869 zio->io_error = ENOMEM; 870 871 for (uint64_t i = 0; i < num_entries; i++) { 872 vdev_indirect_mapping_entry_phys_t *m = &mapping[i]; 873 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 874 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst); 875 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst); 876 uint64_t inner_offset = rs->rs_offset - 877 DVA_MAPPING_GET_SRC_OFFSET(m); 878 uint64_t inner_size = 879 MIN(rs->rs_asize, size - inner_offset); 880 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev); 881 882 if (dst_v->v_read == vdev_indirect_read) { 883 remap_segment_t *o; 884 885 o = rs_alloc(dst_v, dst_offset + inner_offset, 886 inner_size, rs->rs_split_offset); 887 if (o == NULL) { 888 printf("vdev_indirect_remap: " 889 "out of memory.\n"); 890 zio->io_error = ENOMEM; 891 break; 892 } 893 894 list_insert_head(&stack, o); 895 } 896 vdev_indirect_gather_splits(rs->rs_split_offset, dst_v, 897 dst_offset + inner_offset, 898 inner_size, arg); 899 900 /* 901 * vdev_indirect_gather_splits can have memory 902 * allocation error, we can not recover from it. 903 */ 904 if (zio->io_error != 0) 905 break; 906 rs->rs_offset += inner_size; 907 rs->rs_asize -= inner_size; 908 rs->rs_split_offset += inner_size; 909 } 910 911 free(mapping); 912 free(rs); 913 if (zio->io_error != 0) 914 break; 915 } 916 917 list_destroy(&stack); 918 } 919 920 static void 921 vdev_indirect_map_free(zio_t *zio) 922 { 923 indirect_vsd_t *iv = zio->io_vsd; 924 indirect_split_t *is; 925 926 while ((is = list_head(&iv->iv_splits)) != NULL) { 927 for (int c = 0; c < is->is_children; c++) { 928 indirect_child_t *ic = &is->is_child[c]; 929 free(ic->ic_data); 930 } 931 list_remove(&iv->iv_splits, is); 932 free(is); 933 } 934 free(iv); 935 } 936 937 static int 938 vdev_indirect_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 939 off_t offset, size_t bytes) 940 { 941 zio_t zio; 942 spa_t *spa = vdev->v_spa; 943 indirect_vsd_t *iv; 944 indirect_split_t *first; 945 int rc = EIO; 946 947 iv = calloc(1, sizeof(*iv)); 948 if (iv == NULL) 949 return (ENOMEM); 950 951 list_create(&iv->iv_splits, 952 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node)); 953 954 bzero(&zio, sizeof(zio)); 955 zio.io_spa = spa; 956 zio.io_bp = (blkptr_t *)bp; 957 zio.io_data = buf; 958 zio.io_size = bytes; 959 zio.io_offset = offset; 960 zio.io_vd = vdev; 961 zio.io_vsd = iv; 962 963 if (vdev->v_mapping == NULL) { 964 vdev_indirect_config_t *vic; 965 966 vic = &vdev->vdev_indirect_config; 967 vdev->v_mapping = vdev_indirect_mapping_open(spa, 968 &spa->spa_mos, vic->vic_mapping_object); 969 } 970 971 vdev_indirect_remap(vdev, offset, bytes, &zio); 972 if (zio.io_error != 0) 973 return (zio.io_error); 974 975 first = list_head(&iv->iv_splits); 976 if (first->is_size == zio.io_size) { 977 /* 978 * This is not a split block; we are pointing to the entire 979 * data, which will checksum the same as the original data. 980 * Pass the BP down so that the child i/o can verify the 981 * checksum, and try a different location if available 982 * (e.g. on a mirror). 983 * 984 * While this special case could be handled the same as the 985 * general (split block) case, doing it this way ensures 986 * that the vast majority of blocks on indirect vdevs 987 * (which are not split) are handled identically to blocks 988 * on non-indirect vdevs. This allows us to be less strict 989 * about performance in the general (but rare) case. 990 */ 991 rc = first->is_vdev->v_read(first->is_vdev, zio.io_bp, 992 zio.io_data, first->is_target_offset, bytes); 993 } else { 994 iv->iv_split_block = B_TRUE; 995 /* 996 * Read one copy of each split segment, from the 997 * top-level vdev. Since we don't know the 998 * checksum of each split individually, the child 999 * zio can't ensure that we get the right data. 1000 * E.g. if it's a mirror, it will just read from a 1001 * random (healthy) leaf vdev. We have to verify 1002 * the checksum in vdev_indirect_io_done(). 1003 */ 1004 for (indirect_split_t *is = list_head(&iv->iv_splits); 1005 is != NULL; is = list_next(&iv->iv_splits, is)) { 1006 char *ptr = zio.io_data; 1007 1008 rc = is->is_vdev->v_read(is->is_vdev, zio.io_bp, 1009 ptr + is->is_split_offset, is->is_target_offset, 1010 is->is_size); 1011 } 1012 if (zio_checksum_verify(spa, zio.io_bp, zio.io_data)) 1013 rc = ECKSUM; 1014 else 1015 rc = 0; 1016 } 1017 1018 vdev_indirect_map_free(&zio); 1019 if (rc == 0) 1020 rc = zio.io_error; 1021 1022 return (rc); 1023 } 1024 1025 static int 1026 vdev_disk_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 1027 off_t offset, size_t bytes) 1028 { 1029 1030 return (vdev_read_phys(vdev, bp, buf, 1031 offset + VDEV_LABEL_START_SIZE, bytes)); 1032 } 1033 1034 1035 static int 1036 vdev_mirror_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 1037 off_t offset, size_t bytes) 1038 { 1039 vdev_t *kid; 1040 int rc; 1041 1042 rc = EIO; 1043 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1044 if (kid->v_state != VDEV_STATE_HEALTHY) 1045 continue; 1046 rc = kid->v_read(kid, bp, buf, offset, bytes); 1047 if (!rc) 1048 return (0); 1049 } 1050 1051 return (rc); 1052 } 1053 1054 static int 1055 vdev_replacing_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 1056 off_t offset, size_t bytes) 1057 { 1058 vdev_t *kid; 1059 1060 /* 1061 * Here we should have two kids: 1062 * First one which is the one we are replacing and we can trust 1063 * only this one to have valid data, but it might not be present. 1064 * Second one is that one we are replacing with. It is most likely 1065 * healthy, but we can't trust it has needed data, so we won't use it. 1066 */ 1067 kid = STAILQ_FIRST(&vdev->v_children); 1068 if (kid == NULL) 1069 return (EIO); 1070 if (kid->v_state != VDEV_STATE_HEALTHY) 1071 return (EIO); 1072 return (kid->v_read(kid, bp, buf, offset, bytes)); 1073 } 1074 1075 static vdev_t * 1076 vdev_find(uint64_t guid) 1077 { 1078 vdev_t *vdev; 1079 1080 STAILQ_FOREACH(vdev, &zfs_vdevs, v_alllink) 1081 if (vdev->v_guid == guid) 1082 return (vdev); 1083 1084 return (0); 1085 } 1086 1087 static vdev_t * 1088 vdev_create(uint64_t guid, vdev_read_t *_read) 1089 { 1090 vdev_t *vdev; 1091 vdev_indirect_config_t *vic; 1092 1093 vdev = calloc(1, sizeof(vdev_t)); 1094 if (vdev != NULL) { 1095 STAILQ_INIT(&vdev->v_children); 1096 vdev->v_guid = guid; 1097 vdev->v_read = _read; 1098 1099 /* 1100 * root vdev has no read function. 1101 * We only point root vdev from spa. 1102 */ 1103 if (_read != NULL) { 1104 vic = &vdev->vdev_indirect_config; 1105 vic->vic_prev_indirect_vdev = UINT64_MAX; 1106 STAILQ_INSERT_TAIL(&zfs_vdevs, vdev, v_alllink); 1107 } 1108 } 1109 1110 return (vdev); 1111 } 1112 1113 static void 1114 vdev_set_initial_state(vdev_t *vdev, const unsigned char *nvlist) 1115 { 1116 uint64_t is_offline, is_faulted, is_degraded, is_removed, isnt_present; 1117 uint64_t is_log; 1118 1119 is_offline = is_removed = is_faulted = is_degraded = isnt_present = 0; 1120 is_log = 0; 1121 (void) nvlist_find(nvlist, ZPOOL_CONFIG_OFFLINE, DATA_TYPE_UINT64, NULL, 1122 &is_offline); 1123 (void) nvlist_find(nvlist, ZPOOL_CONFIG_REMOVED, DATA_TYPE_UINT64, NULL, 1124 &is_removed); 1125 (void) nvlist_find(nvlist, ZPOOL_CONFIG_FAULTED, DATA_TYPE_UINT64, NULL, 1126 &is_faulted); 1127 (void) nvlist_find(nvlist, ZPOOL_CONFIG_DEGRADED, DATA_TYPE_UINT64, 1128 NULL, &is_degraded); 1129 (void) nvlist_find(nvlist, ZPOOL_CONFIG_NOT_PRESENT, DATA_TYPE_UINT64, 1130 NULL, &isnt_present); 1131 (void) nvlist_find(nvlist, ZPOOL_CONFIG_IS_LOG, DATA_TYPE_UINT64, NULL, 1132 &is_log); 1133 1134 if (is_offline != 0) 1135 vdev->v_state = VDEV_STATE_OFFLINE; 1136 else if (is_removed != 0) 1137 vdev->v_state = VDEV_STATE_REMOVED; 1138 else if (is_faulted != 0) 1139 vdev->v_state = VDEV_STATE_FAULTED; 1140 else if (is_degraded != 0) 1141 vdev->v_state = VDEV_STATE_DEGRADED; 1142 else if (isnt_present != 0) 1143 vdev->v_state = VDEV_STATE_CANT_OPEN; 1144 1145 vdev->v_islog = is_log == 1; 1146 } 1147 1148 static int 1149 vdev_init(uint64_t guid, const unsigned char *nvlist, vdev_t **vdevp) 1150 { 1151 uint64_t id, ashift, asize, nparity; 1152 const char *path; 1153 const char *type; 1154 vdev_t *vdev; 1155 1156 if (nvlist_find(nvlist, ZPOOL_CONFIG_ID, DATA_TYPE_UINT64, NULL, &id) || 1157 nvlist_find(nvlist, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING, 1158 NULL, &type)) { 1159 return (ENOENT); 1160 } 1161 1162 if (strcmp(type, VDEV_TYPE_MIRROR) != 0 && 1163 strcmp(type, VDEV_TYPE_DISK) != 0 && 1164 #ifdef ZFS_TEST 1165 strcmp(type, VDEV_TYPE_FILE) != 0 && 1166 #endif 1167 strcmp(type, VDEV_TYPE_RAIDZ) != 0 && 1168 strcmp(type, VDEV_TYPE_INDIRECT) != 0 && 1169 strcmp(type, VDEV_TYPE_REPLACING) != 0) { 1170 printf("ZFS: can only boot from disk, mirror, raidz1, " 1171 "raidz2 and raidz3 vdevs\n"); 1172 return (EIO); 1173 } 1174 1175 if (strcmp(type, VDEV_TYPE_MIRROR) == 0) 1176 vdev = vdev_create(guid, vdev_mirror_read); 1177 else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) 1178 vdev = vdev_create(guid, vdev_raidz_read); 1179 else if (strcmp(type, VDEV_TYPE_REPLACING) == 0) 1180 vdev = vdev_create(guid, vdev_replacing_read); 1181 else if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) { 1182 vdev_indirect_config_t *vic; 1183 1184 vdev = vdev_create(guid, vdev_indirect_read); 1185 if (vdev != NULL) { 1186 vdev->v_state = VDEV_STATE_HEALTHY; 1187 vic = &vdev->vdev_indirect_config; 1188 1189 nvlist_find(nvlist, 1190 ZPOOL_CONFIG_INDIRECT_OBJECT, 1191 DATA_TYPE_UINT64, 1192 NULL, &vic->vic_mapping_object); 1193 nvlist_find(nvlist, 1194 ZPOOL_CONFIG_INDIRECT_BIRTHS, 1195 DATA_TYPE_UINT64, 1196 NULL, &vic->vic_births_object); 1197 nvlist_find(nvlist, 1198 ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 1199 DATA_TYPE_UINT64, 1200 NULL, &vic->vic_prev_indirect_vdev); 1201 } 1202 } else { 1203 vdev = vdev_create(guid, vdev_disk_read); 1204 } 1205 1206 if (vdev == NULL) 1207 return (ENOMEM); 1208 1209 vdev_set_initial_state(vdev, nvlist); 1210 vdev->v_id = id; 1211 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASHIFT, 1212 DATA_TYPE_UINT64, NULL, &ashift) == 0) 1213 vdev->v_ashift = ashift; 1214 1215 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE, 1216 DATA_TYPE_UINT64, NULL, &asize) == 0) { 1217 vdev->v_psize = asize + 1218 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 1219 } 1220 1221 if (nvlist_find(nvlist, ZPOOL_CONFIG_NPARITY, 1222 DATA_TYPE_UINT64, NULL, &nparity) == 0) 1223 vdev->v_nparity = nparity; 1224 1225 if (nvlist_find(nvlist, ZPOOL_CONFIG_PATH, 1226 DATA_TYPE_STRING, NULL, &path) == 0) { 1227 if (strncmp(path, "/dev/", 5) == 0) 1228 path += 5; 1229 vdev->v_name = strdup(path); 1230 } else { 1231 char *name; 1232 1233 name = NULL; 1234 if (strcmp(type, "raidz") == 0) { 1235 if (vdev->v_nparity < 1 || 1236 vdev->v_nparity > 3) { 1237 printf("ZFS: can only boot from disk, " 1238 "mirror, raidz1, raidz2 and raidz3 " 1239 "vdevs\n"); 1240 return (EIO); 1241 } 1242 (void) asprintf(&name, "%s%d-%" PRIu64, type, 1243 vdev->v_nparity, id); 1244 } else { 1245 (void) asprintf(&name, "%s-%" PRIu64, type, id); 1246 } 1247 vdev->v_name = name; 1248 } 1249 *vdevp = vdev; 1250 return (0); 1251 } 1252 1253 /* 1254 * Find slot for vdev. We return either NULL to signal to use 1255 * STAILQ_INSERT_HEAD, or we return link element to be used with 1256 * STAILQ_INSERT_AFTER. 1257 */ 1258 static vdev_t * 1259 vdev_find_previous(vdev_t *top_vdev, vdev_t *vdev) 1260 { 1261 vdev_t *v, *previous; 1262 1263 if (STAILQ_EMPTY(&top_vdev->v_children)) 1264 return (NULL); 1265 1266 previous = NULL; 1267 STAILQ_FOREACH(v, &top_vdev->v_children, v_childlink) { 1268 if (v->v_id > vdev->v_id) 1269 return (previous); 1270 1271 if (v->v_id == vdev->v_id) 1272 return (v); 1273 1274 if (v->v_id < vdev->v_id) 1275 previous = v; 1276 } 1277 return (previous); 1278 } 1279 1280 static size_t 1281 vdev_child_count(vdev_t *vdev) 1282 { 1283 vdev_t *v; 1284 size_t count; 1285 1286 count = 0; 1287 STAILQ_FOREACH(v, &vdev->v_children, v_childlink) { 1288 count++; 1289 } 1290 return (count); 1291 } 1292 1293 /* 1294 * Insert vdev into top_vdev children list. List is ordered by v_id. 1295 */ 1296 static void 1297 vdev_insert(vdev_t *top_vdev, vdev_t *vdev) 1298 { 1299 vdev_t *previous; 1300 size_t count; 1301 1302 /* 1303 * The top level vdev can appear in random order, depending how 1304 * the firmware is presenting the disk devices. 1305 * However, we will insert vdev to create list ordered by v_id, 1306 * so we can use either STAILQ_INSERT_HEAD or STAILQ_INSERT_AFTER 1307 * as STAILQ does not have insert before. 1308 */ 1309 previous = vdev_find_previous(top_vdev, vdev); 1310 1311 if (previous == NULL) { 1312 STAILQ_INSERT_HEAD(&top_vdev->v_children, vdev, v_childlink); 1313 count = vdev_child_count(top_vdev); 1314 if (top_vdev->v_nchildren < count) 1315 top_vdev->v_nchildren = count; 1316 return; 1317 } 1318 1319 if (previous->v_id == vdev->v_id) 1320 return; 1321 1322 STAILQ_INSERT_AFTER(&top_vdev->v_children, previous, vdev, v_childlink); 1323 count = vdev_child_count(top_vdev); 1324 if (top_vdev->v_nchildren < count) 1325 top_vdev->v_nchildren = count; 1326 } 1327 1328 static int 1329 vdev_from_nvlist(spa_t *spa, uint64_t top_guid, const unsigned char *nvlist) 1330 { 1331 vdev_t *top_vdev, *vdev; 1332 const unsigned char *kids; 1333 int rc, nkids; 1334 1335 /* Get top vdev. */ 1336 top_vdev = vdev_find(top_guid); 1337 if (top_vdev == NULL) { 1338 rc = vdev_init(top_guid, nvlist, &top_vdev); 1339 if (rc != 0) 1340 return (rc); 1341 top_vdev->v_spa = spa; 1342 top_vdev->v_top = top_vdev; 1343 vdev_insert(spa->spa_root_vdev, top_vdev); 1344 } 1345 1346 /* Add children if there are any. */ 1347 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1348 &nkids, &kids); 1349 if (rc == 0) { 1350 for (int i = 0; i < nkids; i++) { 1351 uint64_t guid; 1352 1353 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, 1354 DATA_TYPE_UINT64, NULL, &guid); 1355 if (rc != 0) 1356 return (rc); 1357 rc = vdev_init(guid, kids, &vdev); 1358 if (rc != 0) 1359 return (rc); 1360 1361 vdev->v_spa = spa; 1362 vdev->v_top = top_vdev; 1363 vdev_insert(top_vdev, vdev); 1364 1365 kids = nvlist_next(kids); 1366 } 1367 } else { 1368 rc = 0; 1369 } 1370 1371 return (rc); 1372 } 1373 1374 static int 1375 vdev_init_from_label(spa_t *spa, const unsigned char *nvlist) 1376 { 1377 uint64_t pool_guid, top_guid; 1378 const unsigned char *vdevs; 1379 1380 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 1381 NULL, &pool_guid) || 1382 nvlist_find(nvlist, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64, 1383 NULL, &top_guid) || 1384 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, 1385 NULL, &vdevs)) { 1386 printf("ZFS: can't find vdev details\n"); 1387 return (ENOENT); 1388 } 1389 1390 return (vdev_from_nvlist(spa, top_guid, vdevs)); 1391 } 1392 1393 static void 1394 vdev_set_state(vdev_t *vdev) 1395 { 1396 vdev_t *kid; 1397 int good_kids; 1398 int bad_kids; 1399 1400 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1401 vdev_set_state(kid); 1402 } 1403 1404 /* 1405 * A mirror or raidz is healthy if all its kids are healthy. A 1406 * mirror is degraded if any of its kids is healthy; a raidz 1407 * is degraded if at most nparity kids are offline. 1408 */ 1409 if (STAILQ_FIRST(&vdev->v_children)) { 1410 good_kids = 0; 1411 bad_kids = 0; 1412 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1413 if (kid->v_state == VDEV_STATE_HEALTHY) 1414 good_kids++; 1415 else 1416 bad_kids++; 1417 } 1418 if (bad_kids == 0) { 1419 vdev->v_state = VDEV_STATE_HEALTHY; 1420 } else { 1421 if (vdev->v_read == vdev_mirror_read) { 1422 if (good_kids) { 1423 vdev->v_state = VDEV_STATE_DEGRADED; 1424 } else { 1425 vdev->v_state = VDEV_STATE_OFFLINE; 1426 } 1427 } else if (vdev->v_read == vdev_raidz_read) { 1428 if (bad_kids > vdev->v_nparity) { 1429 vdev->v_state = VDEV_STATE_OFFLINE; 1430 } else { 1431 vdev->v_state = VDEV_STATE_DEGRADED; 1432 } 1433 } 1434 } 1435 } 1436 } 1437 1438 static int 1439 vdev_update_from_nvlist(uint64_t top_guid, const unsigned char *nvlist) 1440 { 1441 vdev_t *vdev; 1442 const unsigned char *kids; 1443 int rc, nkids; 1444 1445 /* Update top vdev. */ 1446 vdev = vdev_find(top_guid); 1447 if (vdev != NULL) 1448 vdev_set_initial_state(vdev, nvlist); 1449 1450 /* Update children if there are any. */ 1451 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1452 &nkids, &kids); 1453 if (rc == 0) { 1454 for (int i = 0; i < nkids; i++) { 1455 uint64_t guid; 1456 1457 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, 1458 DATA_TYPE_UINT64, NULL, &guid); 1459 if (rc != 0) 1460 break; 1461 1462 vdev = vdev_find(guid); 1463 if (vdev != NULL) 1464 vdev_set_initial_state(vdev, kids); 1465 1466 kids = nvlist_next(kids); 1467 } 1468 } else { 1469 rc = 0; 1470 } 1471 1472 return (rc); 1473 } 1474 1475 static int 1476 vdev_init_from_nvlist(spa_t *spa, const unsigned char *nvlist) 1477 { 1478 uint64_t pool_guid, vdev_children; 1479 const unsigned char *vdevs, *kids; 1480 int rc, nkids; 1481 1482 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 1483 NULL, &pool_guid) || 1484 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64, 1485 NULL, &vdev_children) || 1486 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, 1487 NULL, &vdevs)) { 1488 printf("ZFS: can't find vdev details\n"); 1489 return (ENOENT); 1490 } 1491 1492 /* Wrong guid?! */ 1493 if (spa->spa_guid != pool_guid) 1494 return (EIO); 1495 1496 spa->spa_root_vdev->v_nchildren = vdev_children; 1497 1498 rc = nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1499 &nkids, &kids); 1500 1501 /* 1502 * MOS config has at least one child for root vdev. 1503 */ 1504 if (rc != 0) 1505 return (EIO); 1506 1507 for (int i = 0; i < nkids; i++) { 1508 uint64_t guid; 1509 vdev_t *vdev; 1510 1511 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, 1512 NULL, &guid); 1513 if (rc != 0) 1514 break; 1515 vdev = vdev_find(guid); 1516 /* 1517 * Top level vdev is missing, create it. 1518 */ 1519 if (vdev == NULL) 1520 rc = vdev_from_nvlist(spa, guid, kids); 1521 else 1522 rc = vdev_update_from_nvlist(guid, kids); 1523 if (rc != 0) 1524 break; 1525 kids = nvlist_next(kids); 1526 } 1527 1528 /* 1529 * Re-evaluate top-level vdev state. 1530 */ 1531 vdev_set_state(spa->spa_root_vdev); 1532 1533 return (rc); 1534 } 1535 1536 static spa_t * 1537 spa_find_by_guid(uint64_t guid) 1538 { 1539 spa_t *spa; 1540 1541 STAILQ_FOREACH(spa, &zfs_pools, spa_link) 1542 if (spa->spa_guid == guid) 1543 return (spa); 1544 1545 return (NULL); 1546 } 1547 1548 static spa_t * 1549 spa_find_by_name(const char *name) 1550 { 1551 spa_t *spa; 1552 1553 STAILQ_FOREACH(spa, &zfs_pools, spa_link) 1554 if (strcmp(spa->spa_name, name) == 0) 1555 return (spa); 1556 1557 return (NULL); 1558 } 1559 1560 #ifdef BOOT2 1561 static spa_t * 1562 spa_get_primary(void) 1563 { 1564 1565 return (STAILQ_FIRST(&zfs_pools)); 1566 } 1567 1568 static vdev_t * 1569 spa_get_primary_vdev(const spa_t *spa) 1570 { 1571 vdev_t *vdev; 1572 vdev_t *kid; 1573 1574 if (spa == NULL) 1575 spa = spa_get_primary(); 1576 if (spa == NULL) 1577 return (NULL); 1578 vdev = spa->spa_root_vdev; 1579 if (vdev == NULL) 1580 return (NULL); 1581 for (kid = STAILQ_FIRST(&vdev->v_children); kid != NULL; 1582 kid = STAILQ_FIRST(&vdev->v_children)) 1583 vdev = kid; 1584 return (vdev); 1585 } 1586 #endif 1587 1588 static spa_t * 1589 spa_create(uint64_t guid, const char *name) 1590 { 1591 spa_t *spa; 1592 1593 if ((spa = calloc(1, sizeof(spa_t))) == NULL) 1594 return (NULL); 1595 if ((spa->spa_name = strdup(name)) == NULL) { 1596 free(spa); 1597 return (NULL); 1598 } 1599 spa->spa_guid = guid; 1600 spa->spa_root_vdev = vdev_create(guid, NULL); 1601 if (spa->spa_root_vdev == NULL) { 1602 free(spa->spa_name); 1603 free(spa); 1604 return (NULL); 1605 } 1606 spa->spa_root_vdev->v_name = strdup("root"); 1607 STAILQ_INSERT_TAIL(&zfs_pools, spa, spa_link); 1608 1609 return (spa); 1610 } 1611 1612 static const char * 1613 state_name(vdev_state_t state) 1614 { 1615 static const char *names[] = { 1616 "UNKNOWN", 1617 "CLOSED", 1618 "OFFLINE", 1619 "REMOVED", 1620 "CANT_OPEN", 1621 "FAULTED", 1622 "DEGRADED", 1623 "ONLINE" 1624 }; 1625 return (names[state]); 1626 } 1627 1628 #ifdef BOOT2 1629 1630 #define pager_printf printf 1631 1632 #else 1633 1634 static int 1635 pager_printf(const char *fmt, ...) 1636 { 1637 char line[80]; 1638 va_list args; 1639 1640 va_start(args, fmt); 1641 vsnprintf(line, sizeof(line), fmt, args); 1642 va_end(args); 1643 return (pager_output(line)); 1644 } 1645 1646 #endif 1647 1648 #define STATUS_FORMAT " %s %s\n" 1649 1650 static int 1651 print_state(int indent, const char *name, vdev_state_t state) 1652 { 1653 int i; 1654 char buf[512]; 1655 1656 buf[0] = 0; 1657 for (i = 0; i < indent; i++) 1658 strcat(buf, " "); 1659 strcat(buf, name); 1660 return (pager_printf(STATUS_FORMAT, buf, state_name(state))); 1661 } 1662 1663 static int 1664 vdev_status(vdev_t *vdev, int indent) 1665 { 1666 vdev_t *kid; 1667 int ret; 1668 1669 if (vdev->v_islog) { 1670 (void) pager_output(" logs\n"); 1671 indent++; 1672 } 1673 1674 ret = print_state(indent, vdev->v_name, vdev->v_state); 1675 if (ret != 0) 1676 return (ret); 1677 1678 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1679 ret = vdev_status(kid, indent + 1); 1680 if (ret != 0) 1681 return (ret); 1682 } 1683 return (ret); 1684 } 1685 1686 static int 1687 spa_status(spa_t *spa) 1688 { 1689 static char bootfs[ZFS_MAXNAMELEN]; 1690 uint64_t rootid; 1691 vdev_list_t *vlist; 1692 vdev_t *vdev; 1693 int good_kids, bad_kids, degraded_kids, ret; 1694 vdev_state_t state; 1695 1696 ret = pager_printf(" pool: %s\n", spa->spa_name); 1697 if (ret != 0) 1698 return (ret); 1699 1700 if (zfs_get_root(spa, &rootid) == 0 && 1701 zfs_rlookup(spa, rootid, bootfs) == 0) { 1702 if (bootfs[0] == '\0') 1703 ret = pager_printf("bootfs: %s\n", spa->spa_name); 1704 else 1705 ret = pager_printf("bootfs: %s/%s\n", spa->spa_name, 1706 bootfs); 1707 if (ret != 0) 1708 return (ret); 1709 } 1710 ret = pager_printf("config:\n\n"); 1711 if (ret != 0) 1712 return (ret); 1713 ret = pager_printf(STATUS_FORMAT, "NAME", "STATE"); 1714 if (ret != 0) 1715 return (ret); 1716 1717 good_kids = 0; 1718 degraded_kids = 0; 1719 bad_kids = 0; 1720 vlist = &spa->spa_root_vdev->v_children; 1721 STAILQ_FOREACH(vdev, vlist, v_childlink) { 1722 if (vdev->v_state == VDEV_STATE_HEALTHY) 1723 good_kids++; 1724 else if (vdev->v_state == VDEV_STATE_DEGRADED) 1725 degraded_kids++; 1726 else 1727 bad_kids++; 1728 } 1729 1730 state = VDEV_STATE_CLOSED; 1731 if (good_kids > 0 && (degraded_kids + bad_kids) == 0) 1732 state = VDEV_STATE_HEALTHY; 1733 else if ((good_kids + degraded_kids) > 0) 1734 state = VDEV_STATE_DEGRADED; 1735 1736 ret = print_state(0, spa->spa_name, state); 1737 if (ret != 0) 1738 return (ret); 1739 1740 STAILQ_FOREACH(vdev, vlist, v_childlink) { 1741 ret = vdev_status(vdev, 1); 1742 if (ret != 0) 1743 return (ret); 1744 } 1745 return (ret); 1746 } 1747 1748 static int 1749 spa_all_status(void) 1750 { 1751 spa_t *spa; 1752 int first = 1, ret = 0; 1753 1754 STAILQ_FOREACH(spa, &zfs_pools, spa_link) { 1755 if (!first) { 1756 ret = pager_printf("\n"); 1757 if (ret != 0) 1758 return (ret); 1759 } 1760 first = 0; 1761 ret = spa_status(spa); 1762 if (ret != 0) 1763 return (ret); 1764 } 1765 return (ret); 1766 } 1767 1768 static uint64_t 1769 vdev_label_offset(uint64_t psize, int l, uint64_t offset) 1770 { 1771 uint64_t label_offset; 1772 1773 if (l < VDEV_LABELS / 2) 1774 label_offset = 0; 1775 else 1776 label_offset = psize - VDEV_LABELS * sizeof (vdev_label_t); 1777 1778 return (offset + l * sizeof (vdev_label_t) + label_offset); 1779 } 1780 1781 static int 1782 vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2) 1783 { 1784 unsigned int seq1 = 0; 1785 unsigned int seq2 = 0; 1786 int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg); 1787 1788 if (cmp != 0) 1789 return (cmp); 1790 1791 cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp); 1792 if (cmp != 0) 1793 return (cmp); 1794 1795 if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1)) 1796 seq1 = MMP_SEQ(ub1); 1797 1798 if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2)) 1799 seq2 = MMP_SEQ(ub2); 1800 1801 return (AVL_CMP(seq1, seq2)); 1802 } 1803 1804 static int 1805 uberblock_verify(uberblock_t *ub) 1806 { 1807 if (ub->ub_magic == BSWAP_64((uint64_t)UBERBLOCK_MAGIC)) { 1808 byteswap_uint64_array(ub, sizeof (uberblock_t)); 1809 } 1810 1811 if (ub->ub_magic != UBERBLOCK_MAGIC || 1812 !SPA_VERSION_IS_SUPPORTED(ub->ub_version)) 1813 return (EINVAL); 1814 1815 return (0); 1816 } 1817 1818 static int 1819 vdev_label_read(vdev_t *vd, int l, void *buf, uint64_t offset, 1820 size_t size) 1821 { 1822 blkptr_t bp; 1823 off_t off; 1824 1825 off = vdev_label_offset(vd->v_psize, l, offset); 1826 1827 BP_ZERO(&bp); 1828 BP_SET_LSIZE(&bp, size); 1829 BP_SET_PSIZE(&bp, size); 1830 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL); 1831 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); 1832 DVA_SET_OFFSET(BP_IDENTITY(&bp), off); 1833 ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0); 1834 1835 return (vdev_read_phys(vd, &bp, buf, off, size)); 1836 } 1837 1838 static unsigned char * 1839 vdev_label_read_config(vdev_t *vd, uint64_t txg) 1840 { 1841 vdev_phys_t *label; 1842 uint64_t best_txg = 0; 1843 uint64_t label_txg = 0; 1844 uint64_t asize; 1845 unsigned char *nvl; 1846 size_t nvl_size; 1847 int error; 1848 1849 label = malloc(sizeof (vdev_phys_t)); 1850 if (label == NULL) 1851 return (NULL); 1852 1853 nvl_size = VDEV_PHYS_SIZE - sizeof (zio_eck_t) - 4; 1854 nvl = malloc(nvl_size); 1855 if (nvl == NULL) 1856 goto done; 1857 1858 for (int l = 0; l < VDEV_LABELS; l++) { 1859 const unsigned char *nvlist; 1860 1861 if (vdev_label_read(vd, l, label, 1862 offsetof(vdev_label_t, vl_vdev_phys), 1863 sizeof (vdev_phys_t))) 1864 continue; 1865 1866 if (label->vp_nvlist[0] != NV_ENCODE_XDR) 1867 continue; 1868 1869 nvlist = (const unsigned char *) label->vp_nvlist + 4; 1870 error = nvlist_find(nvlist, ZPOOL_CONFIG_POOL_TXG, 1871 DATA_TYPE_UINT64, NULL, &label_txg); 1872 if (error != 0 || label_txg == 0) { 1873 memcpy(nvl, nvlist, nvl_size); 1874 goto done; 1875 } 1876 1877 if (label_txg <= txg && label_txg > best_txg) { 1878 best_txg = label_txg; 1879 memcpy(nvl, nvlist, nvl_size); 1880 1881 /* 1882 * Use asize from pool config. We need this 1883 * because we can get bad value from BIOS. 1884 */ 1885 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE, 1886 DATA_TYPE_UINT64, NULL, &asize) == 0) { 1887 vd->v_psize = asize + 1888 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 1889 } 1890 } 1891 } 1892 1893 if (best_txg == 0) { 1894 free(nvl); 1895 nvl = NULL; 1896 } 1897 done: 1898 free(label); 1899 return (nvl); 1900 } 1901 1902 static void 1903 vdev_uberblock_load(vdev_t *vd, uberblock_t *ub) 1904 { 1905 uberblock_t *buf; 1906 1907 buf = malloc(VDEV_UBERBLOCK_SIZE(vd)); 1908 if (buf == NULL) 1909 return; 1910 1911 for (int l = 0; l < VDEV_LABELS; l++) { 1912 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { 1913 if (vdev_label_read(vd, l, buf, 1914 VDEV_UBERBLOCK_OFFSET(vd, n), 1915 VDEV_UBERBLOCK_SIZE(vd))) 1916 continue; 1917 if (uberblock_verify(buf) != 0) 1918 continue; 1919 1920 if (vdev_uberblock_compare(buf, ub) > 0) 1921 *ub = *buf; 1922 } 1923 } 1924 free(buf); 1925 } 1926 1927 static int 1928 vdev_probe(vdev_phys_read_t *_read, void *read_priv, spa_t **spap) 1929 { 1930 vdev_t vtmp; 1931 spa_t *spa; 1932 vdev_t *vdev; 1933 unsigned char *nvlist; 1934 uint64_t val; 1935 uint64_t guid, vdev_children; 1936 uint64_t pool_txg, pool_guid; 1937 const char *pool_name; 1938 const unsigned char *features; 1939 int rc; 1940 1941 /* 1942 * Load the vdev label and figure out which 1943 * uberblock is most current. 1944 */ 1945 memset(&vtmp, 0, sizeof(vtmp)); 1946 vtmp.v_phys_read = _read; 1947 vtmp.v_read_priv = read_priv; 1948 vtmp.v_psize = P2ALIGN(ldi_get_size(read_priv), 1949 (uint64_t)sizeof (vdev_label_t)); 1950 1951 /* Test for minimum device size. */ 1952 if (vtmp.v_psize < SPA_MINDEVSIZE) 1953 return (EIO); 1954 1955 nvlist = vdev_label_read_config(&vtmp, UINT64_MAX); 1956 if (nvlist == NULL) 1957 return (EIO); 1958 1959 if (nvlist_find(nvlist, ZPOOL_CONFIG_VERSION, DATA_TYPE_UINT64, 1960 NULL, &val) != 0) { 1961 free(nvlist); 1962 return (EIO); 1963 } 1964 1965 if (!SPA_VERSION_IS_SUPPORTED(val)) { 1966 printf("ZFS: unsupported ZFS version %u (should be %u)\n", 1967 (unsigned)val, (unsigned)SPA_VERSION); 1968 free(nvlist); 1969 return (EIO); 1970 } 1971 1972 /* Check ZFS features for read */ 1973 if (nvlist_find(nvlist, ZPOOL_CONFIG_FEATURES_FOR_READ, 1974 DATA_TYPE_NVLIST, NULL, &features) == 0 && 1975 nvlist_check_features_for_read(features) != 0) { 1976 free(nvlist); 1977 return (EIO); 1978 } 1979 1980 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_STATE, DATA_TYPE_UINT64, 1981 NULL, &val) != 0) { 1982 free(nvlist); 1983 return (EIO); 1984 } 1985 1986 if (val == POOL_STATE_DESTROYED) { 1987 /* We don't boot only from destroyed pools. */ 1988 free(nvlist); 1989 return (EIO); 1990 } 1991 1992 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64, 1993 NULL, &pool_txg) != 0 || 1994 nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 1995 NULL, &pool_guid) != 0 || 1996 nvlist_find(nvlist, ZPOOL_CONFIG_POOL_NAME, DATA_TYPE_STRING, 1997 NULL, &pool_name) != 0) { 1998 /* 1999 * Cache and spare devices end up here - just ignore 2000 * them. 2001 */ 2002 free(nvlist); 2003 return (EIO); 2004 } 2005 2006 /* 2007 * Create the pool if this is the first time we've seen it. 2008 */ 2009 spa = spa_find_by_guid(pool_guid); 2010 if (spa == NULL) { 2011 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN, 2012 DATA_TYPE_UINT64, NULL, &vdev_children); 2013 spa = spa_create(pool_guid, pool_name); 2014 if (spa == NULL) { 2015 free(nvlist); 2016 return (ENOMEM); 2017 } 2018 spa->spa_root_vdev->v_nchildren = vdev_children; 2019 } 2020 if (pool_txg > spa->spa_txg) 2021 spa->spa_txg = pool_txg; 2022 2023 /* 2024 * Get the vdev tree and create our in-core copy of it. 2025 * If we already have a vdev with this guid, this must 2026 * be some kind of alias (overlapping slices, dangerously dedicated 2027 * disks etc). 2028 */ 2029 if (nvlist_find(nvlist, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, 2030 NULL, &guid) != 0) { 2031 free(nvlist); 2032 return (EIO); 2033 } 2034 vdev = vdev_find(guid); 2035 /* Has this vdev already been inited? */ 2036 if (vdev && vdev->v_phys_read) { 2037 free(nvlist); 2038 return (EIO); 2039 } 2040 2041 rc = vdev_init_from_label(spa, nvlist); 2042 free(nvlist); 2043 if (rc != 0) 2044 return (rc); 2045 2046 /* 2047 * We should already have created an incomplete vdev for this 2048 * vdev. Find it and initialise it with our read proc. 2049 */ 2050 vdev = vdev_find(guid); 2051 if (vdev != NULL) { 2052 vdev->v_phys_read = _read; 2053 vdev->v_read_priv = read_priv; 2054 vdev->v_psize = vtmp.v_psize; 2055 /* 2056 * If no other state is set, mark vdev healthy. 2057 */ 2058 if (vdev->v_state == VDEV_STATE_UNKNOWN) 2059 vdev->v_state = VDEV_STATE_HEALTHY; 2060 } else { 2061 printf("ZFS: inconsistent nvlist contents\n"); 2062 return (EIO); 2063 } 2064 2065 if (vdev->v_islog) 2066 spa->spa_with_log = vdev->v_islog; 2067 2068 /* 2069 * Re-evaluate top-level vdev state. 2070 */ 2071 vdev_set_state(vdev->v_top); 2072 2073 /* 2074 * Ok, we are happy with the pool so far. Lets find 2075 * the best uberblock and then we can actually access 2076 * the contents of the pool. 2077 */ 2078 vdev_uberblock_load(vdev, &spa->spa_uberblock); 2079 2080 if (spap != NULL) 2081 *spap = spa; 2082 return (0); 2083 } 2084 2085 static int 2086 ilog2(int n) 2087 { 2088 int v; 2089 2090 for (v = 0; v < 32; v++) 2091 if (n == (1 << v)) 2092 return (v); 2093 return (-1); 2094 } 2095 2096 static int 2097 zio_read_gang(const spa_t *spa, const blkptr_t *bp, void *buf) 2098 { 2099 blkptr_t gbh_bp; 2100 zio_gbh_phys_t zio_gb; 2101 char *pbuf; 2102 int i; 2103 2104 /* Artificial BP for gang block header. */ 2105 gbh_bp = *bp; 2106 BP_SET_PSIZE(&gbh_bp, SPA_GANGBLOCKSIZE); 2107 BP_SET_LSIZE(&gbh_bp, SPA_GANGBLOCKSIZE); 2108 BP_SET_CHECKSUM(&gbh_bp, ZIO_CHECKSUM_GANG_HEADER); 2109 BP_SET_COMPRESS(&gbh_bp, ZIO_COMPRESS_OFF); 2110 for (i = 0; i < SPA_DVAS_PER_BP; i++) 2111 DVA_SET_GANG(&gbh_bp.blk_dva[i], 0); 2112 2113 /* Read gang header block using the artificial BP. */ 2114 if (zio_read(spa, &gbh_bp, &zio_gb)) 2115 return (EIO); 2116 2117 pbuf = buf; 2118 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { 2119 blkptr_t *gbp = &zio_gb.zg_blkptr[i]; 2120 2121 if (BP_IS_HOLE(gbp)) 2122 continue; 2123 if (zio_read(spa, gbp, pbuf)) 2124 return (EIO); 2125 pbuf += BP_GET_PSIZE(gbp); 2126 } 2127 2128 if (zio_checksum_verify(spa, bp, buf)) 2129 return (EIO); 2130 return (0); 2131 } 2132 2133 static int 2134 zio_read(const spa_t *spa, const blkptr_t *bp, void *buf) 2135 { 2136 int cpfunc = BP_GET_COMPRESS(bp); 2137 uint64_t align, size; 2138 void *pbuf; 2139 int i, error; 2140 2141 /* 2142 * Process data embedded in block pointer 2143 */ 2144 if (BP_IS_EMBEDDED(bp)) { 2145 ASSERT(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 2146 2147 size = BPE_GET_PSIZE(bp); 2148 ASSERT(size <= BPE_PAYLOAD_SIZE); 2149 2150 if (cpfunc != ZIO_COMPRESS_OFF) 2151 pbuf = zfs_alloc(size); 2152 else 2153 pbuf = buf; 2154 2155 decode_embedded_bp_compressed(bp, pbuf); 2156 error = 0; 2157 2158 if (cpfunc != ZIO_COMPRESS_OFF) { 2159 error = zio_decompress_data(cpfunc, pbuf, 2160 size, buf, BP_GET_LSIZE(bp)); 2161 zfs_free(pbuf, size); 2162 } 2163 if (error != 0) 2164 printf("ZFS: i/o error - unable to decompress " 2165 "block pointer data, error %d\n", error); 2166 return (error); 2167 } 2168 2169 error = EIO; 2170 2171 for (i = 0; i < SPA_DVAS_PER_BP; i++) { 2172 const dva_t *dva = &bp->blk_dva[i]; 2173 vdev_t *vdev; 2174 vdev_list_t *vlist; 2175 uint64_t vdevid; 2176 off_t offset; 2177 2178 if (!dva->dva_word[0] && !dva->dva_word[1]) 2179 continue; 2180 2181 vdevid = DVA_GET_VDEV(dva); 2182 offset = DVA_GET_OFFSET(dva); 2183 vlist = &spa->spa_root_vdev->v_children; 2184 STAILQ_FOREACH(vdev, vlist, v_childlink) { 2185 if (vdev->v_id == vdevid) 2186 break; 2187 } 2188 if (!vdev || !vdev->v_read) 2189 continue; 2190 2191 size = BP_GET_PSIZE(bp); 2192 if (vdev->v_read == vdev_raidz_read) { 2193 align = 1ULL << vdev->v_ashift; 2194 if (P2PHASE(size, align) != 0) 2195 size = P2ROUNDUP(size, align); 2196 } 2197 if (size != BP_GET_PSIZE(bp) || cpfunc != ZIO_COMPRESS_OFF) 2198 pbuf = zfs_alloc(size); 2199 else 2200 pbuf = buf; 2201 2202 if (DVA_GET_GANG(dva)) 2203 error = zio_read_gang(spa, bp, pbuf); 2204 else 2205 error = vdev->v_read(vdev, bp, pbuf, offset, size); 2206 if (error == 0) { 2207 if (cpfunc != ZIO_COMPRESS_OFF) 2208 error = zio_decompress_data(cpfunc, pbuf, 2209 BP_GET_PSIZE(bp), buf, BP_GET_LSIZE(bp)); 2210 else if (size != BP_GET_PSIZE(bp)) 2211 bcopy(pbuf, buf, BP_GET_PSIZE(bp)); 2212 } 2213 if (buf != pbuf) 2214 zfs_free(pbuf, size); 2215 if (error == 0) 2216 break; 2217 } 2218 if (error != 0) 2219 printf("ZFS: i/o error - all block copies unavailable\n"); 2220 return (error); 2221 } 2222 2223 static int 2224 dnode_read(const spa_t *spa, const dnode_phys_t *dnode, off_t offset, 2225 void *buf, size_t buflen) 2226 { 2227 int ibshift = dnode->dn_indblkshift - SPA_BLKPTRSHIFT; 2228 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2229 int nlevels = dnode->dn_nlevels; 2230 int i, rc; 2231 2232 if (bsize > SPA_MAXBLOCKSIZE) { 2233 printf("ZFS: I/O error - blocks larger than %llu are not " 2234 "supported\n", SPA_MAXBLOCKSIZE); 2235 return (EIO); 2236 } 2237 2238 /* 2239 * Note: bsize may not be a power of two here so we need to do an 2240 * actual divide rather than a bitshift. 2241 */ 2242 while (buflen > 0) { 2243 uint64_t bn = offset / bsize; 2244 int boff = offset % bsize; 2245 int ibn; 2246 const blkptr_t *indbp; 2247 blkptr_t bp; 2248 2249 if (bn > dnode->dn_maxblkid) 2250 return (EIO); 2251 2252 if (dnode == dnode_cache_obj && bn == dnode_cache_bn) 2253 goto cached; 2254 2255 indbp = dnode->dn_blkptr; 2256 for (i = 0; i < nlevels; i++) { 2257 /* 2258 * Copy the bp from the indirect array so that 2259 * we can re-use the scratch buffer for multi-level 2260 * objects. 2261 */ 2262 ibn = bn >> ((nlevels - i - 1) * ibshift); 2263 ibn &= ((1 << ibshift) - 1); 2264 bp = indbp[ibn]; 2265 if (BP_IS_HOLE(&bp)) { 2266 memset(dnode_cache_buf, 0, bsize); 2267 break; 2268 } 2269 rc = zio_read(spa, &bp, dnode_cache_buf); 2270 if (rc) 2271 return (rc); 2272 indbp = (const blkptr_t *) dnode_cache_buf; 2273 } 2274 dnode_cache_obj = dnode; 2275 dnode_cache_bn = bn; 2276 cached: 2277 2278 /* 2279 * The buffer contains our data block. Copy what we 2280 * need from it and loop. 2281 */ 2282 i = bsize - boff; 2283 if (i > buflen) i = buflen; 2284 memcpy(buf, &dnode_cache_buf[boff], i); 2285 buf = ((char *)buf) + i; 2286 offset += i; 2287 buflen -= i; 2288 } 2289 2290 return (0); 2291 } 2292 2293 /* 2294 * Lookup a value in a microzap directory. Assumes that the zap 2295 * scratch buffer contains the directory contents. 2296 */ 2297 static int 2298 mzap_lookup(const dnode_phys_t *dnode, const char *name, uint64_t *value) 2299 { 2300 const mzap_phys_t *mz; 2301 const mzap_ent_phys_t *mze; 2302 size_t size; 2303 int chunks, i; 2304 2305 /* 2306 * Microzap objects use exactly one block. Read the whole 2307 * thing. 2308 */ 2309 size = dnode->dn_datablkszsec * 512; 2310 2311 mz = (const mzap_phys_t *) zap_scratch; 2312 chunks = size / MZAP_ENT_LEN - 1; 2313 2314 for (i = 0; i < chunks; i++) { 2315 mze = &mz->mz_chunk[i]; 2316 if (strcmp(mze->mze_name, name) == 0) { 2317 *value = mze->mze_value; 2318 return (0); 2319 } 2320 } 2321 2322 return (ENOENT); 2323 } 2324 2325 /* 2326 * Compare a name with a zap leaf entry. Return non-zero if the name 2327 * matches. 2328 */ 2329 static int 2330 fzap_name_equal(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, 2331 const char *name) 2332 { 2333 size_t namelen; 2334 const zap_leaf_chunk_t *nc; 2335 const char *p; 2336 2337 namelen = zc->l_entry.le_name_numints; 2338 2339 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk); 2340 p = name; 2341 while (namelen > 0) { 2342 size_t len; 2343 2344 len = namelen; 2345 if (len > ZAP_LEAF_ARRAY_BYTES) 2346 len = ZAP_LEAF_ARRAY_BYTES; 2347 if (memcmp(p, nc->l_array.la_array, len)) 2348 return (0); 2349 p += len; 2350 namelen -= len; 2351 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next); 2352 } 2353 2354 return (1); 2355 } 2356 2357 /* 2358 * Extract a uint64_t value from a zap leaf entry. 2359 */ 2360 static uint64_t 2361 fzap_leaf_value(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc) 2362 { 2363 const zap_leaf_chunk_t *vc; 2364 int i; 2365 uint64_t value; 2366 const uint8_t *p; 2367 2368 vc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_value_chunk); 2369 for (i = 0, value = 0, p = vc->l_array.la_array; i < 8; i++) { 2370 value = (value << 8) | p[i]; 2371 } 2372 2373 return (value); 2374 } 2375 2376 static void 2377 stv(int len, void *addr, uint64_t value) 2378 { 2379 switch (len) { 2380 case 1: 2381 *(uint8_t *)addr = value; 2382 return; 2383 case 2: 2384 *(uint16_t *)addr = value; 2385 return; 2386 case 4: 2387 *(uint32_t *)addr = value; 2388 return; 2389 case 8: 2390 *(uint64_t *)addr = value; 2391 return; 2392 } 2393 } 2394 2395 /* 2396 * Extract a array from a zap leaf entry. 2397 */ 2398 static void 2399 fzap_leaf_array(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, 2400 uint64_t integer_size, uint64_t num_integers, void *buf) 2401 { 2402 uint64_t array_int_len = zc->l_entry.le_value_intlen; 2403 uint64_t value = 0; 2404 uint64_t *u64 = buf; 2405 char *p = buf; 2406 int len = MIN(zc->l_entry.le_value_numints, num_integers); 2407 int chunk = zc->l_entry.le_value_chunk; 2408 int byten = 0; 2409 2410 if (integer_size == 8 && len == 1) { 2411 *u64 = fzap_leaf_value(zl, zc); 2412 return; 2413 } 2414 2415 while (len > 0) { 2416 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(zl, chunk).l_array; 2417 int i; 2418 2419 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(zl)); 2420 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) { 2421 value = (value << 8) | la->la_array[i]; 2422 byten++; 2423 if (byten == array_int_len) { 2424 stv(integer_size, p, value); 2425 byten = 0; 2426 len--; 2427 if (len == 0) 2428 return; 2429 p += integer_size; 2430 } 2431 } 2432 chunk = la->la_next; 2433 } 2434 } 2435 2436 static int 2437 fzap_check_size(uint64_t integer_size, uint64_t num_integers) 2438 { 2439 2440 switch (integer_size) { 2441 case 1: 2442 case 2: 2443 case 4: 2444 case 8: 2445 break; 2446 default: 2447 return (EINVAL); 2448 } 2449 2450 if (integer_size * num_integers > ZAP_MAXVALUELEN) 2451 return (E2BIG); 2452 2453 return (0); 2454 } 2455 2456 /* 2457 * Lookup a value in a fatzap directory. Assumes that the zap scratch 2458 * buffer contains the directory header. 2459 */ 2460 static int 2461 fzap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name, 2462 uint64_t integer_size, uint64_t num_integers, void *value) 2463 { 2464 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2465 zap_phys_t zh = *(zap_phys_t *)zap_scratch; 2466 fat_zap_t z; 2467 uint64_t *ptrtbl; 2468 uint64_t hash; 2469 int rc; 2470 2471 if (zh.zap_magic != ZAP_MAGIC) 2472 return (EIO); 2473 2474 if ((rc = fzap_check_size(integer_size, num_integers)) != 0) 2475 return (rc); 2476 2477 z.zap_block_shift = ilog2(bsize); 2478 z.zap_phys = (zap_phys_t *)zap_scratch; 2479 2480 /* 2481 * Figure out where the pointer table is and read it in if necessary. 2482 */ 2483 if (zh.zap_ptrtbl.zt_blk) { 2484 rc = dnode_read(spa, dnode, zh.zap_ptrtbl.zt_blk * bsize, 2485 zap_scratch, bsize); 2486 if (rc) 2487 return (rc); 2488 ptrtbl = (uint64_t *)zap_scratch; 2489 } else { 2490 ptrtbl = &ZAP_EMBEDDED_PTRTBL_ENT(&z, 0); 2491 } 2492 2493 hash = zap_hash(zh.zap_salt, name); 2494 2495 zap_leaf_t zl; 2496 zl.l_bs = z.zap_block_shift; 2497 2498 off_t off = ptrtbl[hash >> (64 - zh.zap_ptrtbl.zt_shift)] << zl.l_bs; 2499 zap_leaf_chunk_t *zc; 2500 2501 rc = dnode_read(spa, dnode, off, zap_scratch, bsize); 2502 if (rc) 2503 return (rc); 2504 2505 zl.l_phys = (zap_leaf_phys_t *)zap_scratch; 2506 2507 /* 2508 * Make sure this chunk matches our hash. 2509 */ 2510 if (zl.l_phys->l_hdr.lh_prefix_len > 0 && 2511 zl.l_phys->l_hdr.lh_prefix != 2512 hash >> (64 - zl.l_phys->l_hdr.lh_prefix_len)) 2513 return (ENOENT); 2514 2515 /* 2516 * Hash within the chunk to find our entry. 2517 */ 2518 int shift = (64 - ZAP_LEAF_HASH_SHIFT(&zl) - 2519 zl.l_phys->l_hdr.lh_prefix_len); 2520 int h = (hash >> shift) & ((1 << ZAP_LEAF_HASH_SHIFT(&zl)) - 1); 2521 h = zl.l_phys->l_hash[h]; 2522 if (h == 0xffff) 2523 return (ENOENT); 2524 zc = &ZAP_LEAF_CHUNK(&zl, h); 2525 while (zc->l_entry.le_hash != hash) { 2526 if (zc->l_entry.le_next == 0xffff) 2527 return (ENOENT); 2528 zc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_next); 2529 } 2530 if (fzap_name_equal(&zl, zc, name)) { 2531 if (zc->l_entry.le_value_intlen * zc->l_entry.le_value_numints > 2532 integer_size * num_integers) 2533 return (E2BIG); 2534 fzap_leaf_array(&zl, zc, integer_size, num_integers, value); 2535 return (0); 2536 } 2537 2538 return (ENOENT); 2539 } 2540 2541 /* 2542 * Lookup a name in a zap object and return its value as a uint64_t. 2543 */ 2544 static int 2545 zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name, 2546 uint64_t integer_size, uint64_t num_integers, void *value) 2547 { 2548 int rc; 2549 uint64_t zap_type; 2550 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2551 2552 rc = dnode_read(spa, dnode, 0, zap_scratch, size); 2553 if (rc) 2554 return (rc); 2555 2556 zap_type = *(uint64_t *)zap_scratch; 2557 if (zap_type == ZBT_MICRO) 2558 return (mzap_lookup(dnode, name, value)); 2559 else if (zap_type == ZBT_HEADER) { 2560 return (fzap_lookup(spa, dnode, name, integer_size, 2561 num_integers, value)); 2562 } 2563 printf("ZFS: invalid zap_type=%d\n", (int)zap_type); 2564 return (EIO); 2565 } 2566 2567 /* 2568 * List a microzap directory. Assumes that the zap scratch buffer contains 2569 * the directory contents. 2570 */ 2571 static int 2572 mzap_list(const dnode_phys_t *dnode, int (*callback)(const char *, uint64_t)) 2573 { 2574 const mzap_phys_t *mz; 2575 const mzap_ent_phys_t *mze; 2576 size_t size; 2577 int chunks, i, rc; 2578 2579 /* 2580 * Microzap objects use exactly one block. Read the whole 2581 * thing. 2582 */ 2583 size = dnode->dn_datablkszsec * 512; 2584 mz = (const mzap_phys_t *) zap_scratch; 2585 chunks = size / MZAP_ENT_LEN - 1; 2586 2587 for (i = 0; i < chunks; i++) { 2588 mze = &mz->mz_chunk[i]; 2589 if (mze->mze_name[0]) { 2590 rc = callback(mze->mze_name, mze->mze_value); 2591 if (rc != 0) 2592 return (rc); 2593 } 2594 } 2595 2596 return (0); 2597 } 2598 2599 /* 2600 * List a fatzap directory. Assumes that the zap scratch buffer contains 2601 * the directory header. 2602 */ 2603 static int 2604 fzap_list(const spa_t *spa, const dnode_phys_t *dnode, 2605 int (*callback)(const char *, uint64_t)) 2606 { 2607 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2608 zap_phys_t zh = *(zap_phys_t *)zap_scratch; 2609 fat_zap_t z; 2610 int i, j, rc; 2611 2612 if (zh.zap_magic != ZAP_MAGIC) 2613 return (EIO); 2614 2615 z.zap_block_shift = ilog2(bsize); 2616 z.zap_phys = (zap_phys_t *)zap_scratch; 2617 2618 /* 2619 * This assumes that the leaf blocks start at block 1. The 2620 * documentation isn't exactly clear on this. 2621 */ 2622 zap_leaf_t zl; 2623 zl.l_bs = z.zap_block_shift; 2624 for (i = 0; i < zh.zap_num_leafs; i++) { 2625 off_t off = ((off_t)(i + 1)) << zl.l_bs; 2626 char name[256], *p; 2627 uint64_t value; 2628 2629 if (dnode_read(spa, dnode, off, zap_scratch, bsize)) 2630 return (EIO); 2631 2632 zl.l_phys = (zap_leaf_phys_t *)zap_scratch; 2633 2634 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) { 2635 zap_leaf_chunk_t *zc, *nc; 2636 int namelen; 2637 2638 zc = &ZAP_LEAF_CHUNK(&zl, j); 2639 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) 2640 continue; 2641 namelen = zc->l_entry.le_name_numints; 2642 if (namelen > sizeof(name)) 2643 namelen = sizeof(name); 2644 2645 /* 2646 * Paste the name back together. 2647 */ 2648 nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk); 2649 p = name; 2650 while (namelen > 0) { 2651 int len; 2652 len = namelen; 2653 if (len > ZAP_LEAF_ARRAY_BYTES) 2654 len = ZAP_LEAF_ARRAY_BYTES; 2655 memcpy(p, nc->l_array.la_array, len); 2656 p += len; 2657 namelen -= len; 2658 nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next); 2659 } 2660 2661 /* 2662 * Assume the first eight bytes of the value are 2663 * a uint64_t. 2664 */ 2665 value = fzap_leaf_value(&zl, zc); 2666 2667 /* printf("%s 0x%jx\n", name, (uintmax_t)value); */ 2668 rc = callback((const char *)name, value); 2669 if (rc != 0) 2670 return (rc); 2671 } 2672 } 2673 2674 return (0); 2675 } 2676 2677 static int zfs_printf(const char *name, uint64_t value __unused) 2678 { 2679 2680 printf("%s\n", name); 2681 2682 return (0); 2683 } 2684 2685 /* 2686 * List a zap directory. 2687 */ 2688 static int 2689 zap_list(const spa_t *spa, const dnode_phys_t *dnode) 2690 { 2691 uint64_t zap_type; 2692 size_t size = dnode->dn_datablkszsec * 512; 2693 2694 if (dnode_read(spa, dnode, 0, zap_scratch, size)) 2695 return (EIO); 2696 2697 zap_type = *(uint64_t *)zap_scratch; 2698 if (zap_type == ZBT_MICRO) 2699 return (mzap_list(dnode, zfs_printf)); 2700 else 2701 return (fzap_list(spa, dnode, zfs_printf)); 2702 } 2703 2704 static int 2705 objset_get_dnode(const spa_t *spa, const objset_phys_t *os, uint64_t objnum, 2706 dnode_phys_t *dnode) 2707 { 2708 off_t offset; 2709 2710 offset = objnum * sizeof(dnode_phys_t); 2711 return dnode_read(spa, &os->os_meta_dnode, offset, 2712 dnode, sizeof(dnode_phys_t)); 2713 } 2714 2715 static int 2716 mzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name, 2717 uint64_t value) 2718 { 2719 const mzap_phys_t *mz; 2720 const mzap_ent_phys_t *mze; 2721 size_t size; 2722 int chunks, i; 2723 2724 /* 2725 * Microzap objects use exactly one block. Read the whole 2726 * thing. 2727 */ 2728 size = dnode->dn_datablkszsec * 512; 2729 2730 mz = (const mzap_phys_t *)zap_scratch; 2731 chunks = size / MZAP_ENT_LEN - 1; 2732 2733 for (i = 0; i < chunks; i++) { 2734 mze = &mz->mz_chunk[i]; 2735 if (value == mze->mze_value) { 2736 strcpy(name, mze->mze_name); 2737 return (0); 2738 } 2739 } 2740 2741 return (ENOENT); 2742 } 2743 2744 static void 2745 fzap_name_copy(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, char *name) 2746 { 2747 size_t namelen; 2748 const zap_leaf_chunk_t *nc; 2749 char *p; 2750 2751 namelen = zc->l_entry.le_name_numints; 2752 2753 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk); 2754 p = name; 2755 while (namelen > 0) { 2756 size_t len; 2757 len = namelen; 2758 if (len > ZAP_LEAF_ARRAY_BYTES) 2759 len = ZAP_LEAF_ARRAY_BYTES; 2760 memcpy(p, nc->l_array.la_array, len); 2761 p += len; 2762 namelen -= len; 2763 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next); 2764 } 2765 2766 *p = '\0'; 2767 } 2768 2769 static int 2770 fzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name, 2771 uint64_t value) 2772 { 2773 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2774 zap_phys_t zh = *(zap_phys_t *)zap_scratch; 2775 fat_zap_t z; 2776 int i, j; 2777 2778 if (zh.zap_magic != ZAP_MAGIC) 2779 return (EIO); 2780 2781 z.zap_block_shift = ilog2(bsize); 2782 z.zap_phys = (zap_phys_t *)zap_scratch; 2783 2784 /* 2785 * This assumes that the leaf blocks start at block 1. The 2786 * documentation isn't exactly clear on this. 2787 */ 2788 zap_leaf_t zl; 2789 zl.l_bs = z.zap_block_shift; 2790 for (i = 0; i < zh.zap_num_leafs; i++) { 2791 off_t off = ((off_t)(i + 1)) << zl.l_bs; 2792 2793 if (dnode_read(spa, dnode, off, zap_scratch, bsize)) 2794 return (EIO); 2795 2796 zl.l_phys = (zap_leaf_phys_t *)zap_scratch; 2797 2798 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) { 2799 zap_leaf_chunk_t *zc; 2800 2801 zc = &ZAP_LEAF_CHUNK(&zl, j); 2802 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) 2803 continue; 2804 if (zc->l_entry.le_value_intlen != 8 || 2805 zc->l_entry.le_value_numints != 1) 2806 continue; 2807 2808 if (fzap_leaf_value(&zl, zc) == value) { 2809 fzap_name_copy(&zl, zc, name); 2810 return (0); 2811 } 2812 } 2813 } 2814 2815 return (ENOENT); 2816 } 2817 2818 static int 2819 zap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name, 2820 uint64_t value) 2821 { 2822 int rc; 2823 uint64_t zap_type; 2824 size_t size = dnode->dn_datablkszsec * 512; 2825 2826 rc = dnode_read(spa, dnode, 0, zap_scratch, size); 2827 if (rc) 2828 return (rc); 2829 2830 zap_type = *(uint64_t *)zap_scratch; 2831 if (zap_type == ZBT_MICRO) 2832 return (mzap_rlookup(spa, dnode, name, value)); 2833 else 2834 return (fzap_rlookup(spa, dnode, name, value)); 2835 } 2836 2837 static int 2838 zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result) 2839 { 2840 char name[256]; 2841 char component[256]; 2842 uint64_t dir_obj, parent_obj, child_dir_zapobj; 2843 dnode_phys_t child_dir_zap, dataset, dir, parent; 2844 dsl_dir_phys_t *dd; 2845 dsl_dataset_phys_t *ds; 2846 char *p; 2847 int len; 2848 2849 p = &name[sizeof(name) - 1]; 2850 *p = '\0'; 2851 2852 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) { 2853 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 2854 return (EIO); 2855 } 2856 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 2857 dir_obj = ds->ds_dir_obj; 2858 2859 for (;;) { 2860 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir) != 0) 2861 return (EIO); 2862 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 2863 2864 /* Actual loop condition. */ 2865 parent_obj = dd->dd_parent_obj; 2866 if (parent_obj == 0) 2867 break; 2868 2869 if (objset_get_dnode(spa, &spa->spa_mos, parent_obj, 2870 &parent) != 0) 2871 return (EIO); 2872 dd = (dsl_dir_phys_t *)&parent.dn_bonus; 2873 child_dir_zapobj = dd->dd_child_dir_zapobj; 2874 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, 2875 &child_dir_zap) != 0) 2876 return (EIO); 2877 if (zap_rlookup(spa, &child_dir_zap, component, dir_obj) != 0) 2878 return (EIO); 2879 2880 len = strlen(component); 2881 p -= len; 2882 memcpy(p, component, len); 2883 --p; 2884 *p = '/'; 2885 2886 /* Actual loop iteration. */ 2887 dir_obj = parent_obj; 2888 } 2889 2890 if (*p != '\0') 2891 ++p; 2892 strcpy(result, p); 2893 2894 return (0); 2895 } 2896 2897 static int 2898 zfs_lookup_dataset(const spa_t *spa, const char *name, uint64_t *objnum) 2899 { 2900 char element[256]; 2901 uint64_t dir_obj, child_dir_zapobj; 2902 dnode_phys_t child_dir_zap, dir; 2903 dsl_dir_phys_t *dd; 2904 const char *p, *q; 2905 2906 if (objset_get_dnode(spa, &spa->spa_mos, 2907 DMU_POOL_DIRECTORY_OBJECT, &dir)) 2908 return (EIO); 2909 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof (dir_obj), 2910 1, &dir_obj)) 2911 return (EIO); 2912 2913 p = name; 2914 for (;;) { 2915 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir)) 2916 return (EIO); 2917 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 2918 2919 while (*p == '/') 2920 p++; 2921 /* Actual loop condition #1. */ 2922 if (*p == '\0') 2923 break; 2924 2925 q = strchr(p, '/'); 2926 if (q) { 2927 memcpy(element, p, q - p); 2928 element[q - p] = '\0'; 2929 p = q + 1; 2930 } else { 2931 strcpy(element, p); 2932 p += strlen(p); 2933 } 2934 2935 child_dir_zapobj = dd->dd_child_dir_zapobj; 2936 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, 2937 &child_dir_zap) != 0) 2938 return (EIO); 2939 2940 /* Actual loop condition #2. */ 2941 if (zap_lookup(spa, &child_dir_zap, element, sizeof (dir_obj), 2942 1, &dir_obj) != 0) 2943 return (ENOENT); 2944 } 2945 2946 *objnum = dd->dd_head_dataset_obj; 2947 return (0); 2948 } 2949 2950 #ifndef BOOT2 2951 static int 2952 zfs_list_dataset(const spa_t *spa, uint64_t objnum/*, int pos, char *entry*/) 2953 { 2954 uint64_t dir_obj, child_dir_zapobj; 2955 dnode_phys_t child_dir_zap, dir, dataset; 2956 dsl_dataset_phys_t *ds; 2957 dsl_dir_phys_t *dd; 2958 2959 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) { 2960 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 2961 return (EIO); 2962 } 2963 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 2964 dir_obj = ds->ds_dir_obj; 2965 2966 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir)) { 2967 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj); 2968 return (EIO); 2969 } 2970 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 2971 2972 child_dir_zapobj = dd->dd_child_dir_zapobj; 2973 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, 2974 &child_dir_zap) != 0) { 2975 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj); 2976 return (EIO); 2977 } 2978 2979 return (zap_list(spa, &child_dir_zap) != 0); 2980 } 2981 2982 int 2983 zfs_callback_dataset(const spa_t *spa, uint64_t objnum, 2984 int (*callback)(const char *, uint64_t)) 2985 { 2986 uint64_t dir_obj, child_dir_zapobj, zap_type; 2987 dnode_phys_t child_dir_zap, dir, dataset; 2988 dsl_dataset_phys_t *ds; 2989 dsl_dir_phys_t *dd; 2990 int err; 2991 2992 err = objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset); 2993 if (err != 0) { 2994 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 2995 return (err); 2996 } 2997 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 2998 dir_obj = ds->ds_dir_obj; 2999 3000 err = objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir); 3001 if (err != 0) { 3002 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj); 3003 return (err); 3004 } 3005 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3006 3007 child_dir_zapobj = dd->dd_child_dir_zapobj; 3008 err = objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, 3009 &child_dir_zap); 3010 if (err != 0) { 3011 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj); 3012 return (err); 3013 } 3014 3015 err = dnode_read(spa, &child_dir_zap, 0, zap_scratch, 3016 child_dir_zap.dn_datablkszsec * 512); 3017 if (err != 0) 3018 return (err); 3019 3020 zap_type = *(uint64_t *)zap_scratch; 3021 if (zap_type == ZBT_MICRO) 3022 return (mzap_list(&child_dir_zap, callback)); 3023 else 3024 return (fzap_list(spa, &child_dir_zap, callback)); 3025 } 3026 #endif 3027 3028 /* 3029 * Find the object set given the object number of its dataset object 3030 * and return its details in *objset 3031 */ 3032 static int 3033 zfs_mount_dataset(const spa_t *spa, uint64_t objnum, objset_phys_t *objset) 3034 { 3035 dnode_phys_t dataset; 3036 dsl_dataset_phys_t *ds; 3037 3038 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) { 3039 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 3040 return (EIO); 3041 } 3042 3043 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3044 if (zio_read(spa, &ds->ds_bp, objset)) { 3045 printf("ZFS: can't read object set for dataset %ju\n", 3046 (uintmax_t)objnum); 3047 return (EIO); 3048 } 3049 3050 return (0); 3051 } 3052 3053 /* 3054 * Find the object set pointed to by the BOOTFS property or the root 3055 * dataset if there is none and return its details in *objset 3056 */ 3057 static int 3058 zfs_get_root(const spa_t *spa, uint64_t *objid) 3059 { 3060 dnode_phys_t dir, propdir; 3061 uint64_t props, bootfs, root; 3062 3063 *objid = 0; 3064 3065 /* 3066 * Start with the MOS directory object. 3067 */ 3068 if (objset_get_dnode(spa, &spa->spa_mos, 3069 DMU_POOL_DIRECTORY_OBJECT, &dir)) { 3070 printf("ZFS: can't read MOS object directory\n"); 3071 return (EIO); 3072 } 3073 3074 /* 3075 * Lookup the pool_props and see if we can find a bootfs. 3076 */ 3077 if (zap_lookup(spa, &dir, DMU_POOL_PROPS, 3078 sizeof(props), 1, &props) == 0 && 3079 objset_get_dnode(spa, &spa->spa_mos, props, &propdir) == 0 && 3080 zap_lookup(spa, &propdir, "bootfs", 3081 sizeof(bootfs), 1, &bootfs) == 0 && bootfs != 0) { 3082 *objid = bootfs; 3083 return (0); 3084 } 3085 /* 3086 * Lookup the root dataset directory 3087 */ 3088 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, 3089 sizeof(root), 1, &root) || 3090 objset_get_dnode(spa, &spa->spa_mos, root, &dir)) { 3091 printf("ZFS: can't find root dsl_dir\n"); 3092 return (EIO); 3093 } 3094 3095 /* 3096 * Use the information from the dataset directory's bonus buffer 3097 * to find the dataset object and from that the object set itself. 3098 */ 3099 dsl_dir_phys_t *dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3100 *objid = dd->dd_head_dataset_obj; 3101 return (0); 3102 } 3103 3104 static int 3105 zfs_mount(const spa_t *spa, uint64_t rootobj, struct zfsmount *mount) 3106 { 3107 3108 mount->spa = spa; 3109 3110 /* 3111 * Find the root object set if not explicitly provided 3112 */ 3113 if (rootobj == 0 && zfs_get_root(spa, &rootobj)) { 3114 printf("ZFS: can't find root filesystem\n"); 3115 return (EIO); 3116 } 3117 3118 if (zfs_mount_dataset(spa, rootobj, &mount->objset)) { 3119 printf("ZFS: can't open root filesystem\n"); 3120 return (EIO); 3121 } 3122 3123 mount->rootobj = rootobj; 3124 3125 return (0); 3126 } 3127 3128 /* 3129 * callback function for feature name checks. 3130 */ 3131 static int 3132 check_feature(const char *name, uint64_t value) 3133 { 3134 int i; 3135 3136 if (value == 0) 3137 return (0); 3138 if (name[0] == '\0') 3139 return (0); 3140 3141 for (i = 0; features_for_read[i] != NULL; i++) { 3142 if (strcmp(name, features_for_read[i]) == 0) 3143 return (0); 3144 } 3145 printf("ZFS: unsupported feature: %s\n", name); 3146 return (EIO); 3147 } 3148 3149 /* 3150 * Checks whether the MOS features that are active are supported. 3151 */ 3152 static int 3153 check_mos_features(const spa_t *spa) 3154 { 3155 dnode_phys_t dir; 3156 uint64_t objnum, zap_type; 3157 size_t size; 3158 int rc; 3159 3160 if ((rc = objset_get_dnode(spa, &spa->spa_mos, DMU_OT_OBJECT_DIRECTORY, 3161 &dir)) != 0) 3162 return (rc); 3163 if ((rc = zap_lookup(spa, &dir, DMU_POOL_FEATURES_FOR_READ, 3164 sizeof (objnum), 1, &objnum)) != 0) { 3165 /* 3166 * It is older pool without features. As we have already 3167 * tested the label, just return without raising the error. 3168 */ 3169 return (0); 3170 } 3171 3172 if ((rc = objset_get_dnode(spa, &spa->spa_mos, objnum, &dir)) != 0) 3173 return (rc); 3174 3175 if (dir.dn_type != DMU_OTN_ZAP_METADATA) 3176 return (EIO); 3177 3178 size = dir.dn_datablkszsec * 512; 3179 if (dnode_read(spa, &dir, 0, zap_scratch, size)) 3180 return (EIO); 3181 3182 zap_type = *(uint64_t *)zap_scratch; 3183 if (zap_type == ZBT_MICRO) 3184 rc = mzap_list(&dir, check_feature); 3185 else 3186 rc = fzap_list(spa, &dir, check_feature); 3187 3188 return (rc); 3189 } 3190 3191 static int 3192 load_nvlist(spa_t *spa, uint64_t obj, unsigned char **value) 3193 { 3194 dnode_phys_t dir; 3195 size_t size; 3196 int rc; 3197 unsigned char *nv; 3198 3199 *value = NULL; 3200 if ((rc = objset_get_dnode(spa, &spa->spa_mos, obj, &dir)) != 0) 3201 return (rc); 3202 if (dir.dn_type != DMU_OT_PACKED_NVLIST && 3203 dir.dn_bonustype != DMU_OT_PACKED_NVLIST_SIZE) { 3204 return (EIO); 3205 } 3206 3207 if (dir.dn_bonuslen != sizeof (uint64_t)) 3208 return (EIO); 3209 3210 size = *(uint64_t *)DN_BONUS(&dir); 3211 nv = malloc(size); 3212 if (nv == NULL) 3213 return (ENOMEM); 3214 3215 rc = dnode_read(spa, &dir, 0, nv, size); 3216 if (rc != 0) { 3217 free(nv); 3218 nv = NULL; 3219 return (rc); 3220 } 3221 *value = nv; 3222 return (rc); 3223 } 3224 3225 static int 3226 zfs_spa_init(spa_t *spa) 3227 { 3228 dnode_phys_t dir; 3229 uint64_t config_object; 3230 unsigned char *nvlist; 3231 int rc; 3232 3233 if (zio_read(spa, &spa->spa_uberblock.ub_rootbp, &spa->spa_mos)) { 3234 printf("ZFS: can't read MOS of pool %s\n", spa->spa_name); 3235 return (EIO); 3236 } 3237 if (spa->spa_mos.os_type != DMU_OST_META) { 3238 printf("ZFS: corrupted MOS of pool %s\n", spa->spa_name); 3239 return (EIO); 3240 } 3241 3242 if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT, 3243 &dir)) { 3244 printf("ZFS: failed to read pool %s directory object\n", 3245 spa->spa_name); 3246 return (EIO); 3247 } 3248 /* this is allowed to fail, older pools do not have salt */ 3249 rc = zap_lookup(spa, &dir, DMU_POOL_CHECKSUM_SALT, 1, 3250 sizeof (spa->spa_cksum_salt.zcs_bytes), 3251 spa->spa_cksum_salt.zcs_bytes); 3252 3253 rc = check_mos_features(spa); 3254 if (rc != 0) { 3255 printf("ZFS: pool %s is not supported\n", spa->spa_name); 3256 return (rc); 3257 } 3258 3259 rc = zap_lookup(spa, &dir, DMU_POOL_CONFIG, 3260 sizeof (config_object), 1, &config_object); 3261 if (rc != 0) { 3262 printf("ZFS: can not read MOS %s\n", DMU_POOL_CONFIG); 3263 return (EIO); 3264 } 3265 rc = load_nvlist(spa, config_object, &nvlist); 3266 if (rc != 0) 3267 return (rc); 3268 3269 /* Update vdevs from MOS config. */ 3270 rc = vdev_init_from_nvlist(spa, nvlist + 4); 3271 free(nvlist); 3272 return (rc); 3273 } 3274 3275 static int 3276 zfs_dnode_stat(const spa_t *spa, dnode_phys_t *dn, struct stat *sb) 3277 { 3278 3279 if (dn->dn_bonustype != DMU_OT_SA) { 3280 znode_phys_t *zp = (znode_phys_t *)dn->dn_bonus; 3281 3282 sb->st_mode = zp->zp_mode; 3283 sb->st_uid = zp->zp_uid; 3284 sb->st_gid = zp->zp_gid; 3285 sb->st_size = zp->zp_size; 3286 } else { 3287 sa_hdr_phys_t *sahdrp; 3288 int hdrsize; 3289 size_t size = 0; 3290 void *buf = NULL; 3291 3292 if (dn->dn_bonuslen != 0) 3293 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn); 3294 else { 3295 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0) { 3296 blkptr_t *bp = DN_SPILL_BLKPTR(dn); 3297 int error; 3298 3299 size = BP_GET_LSIZE(bp); 3300 buf = zfs_alloc(size); 3301 error = zio_read(spa, bp, buf); 3302 if (error != 0) { 3303 zfs_free(buf, size); 3304 return (error); 3305 } 3306 sahdrp = buf; 3307 } else { 3308 return (EIO); 3309 } 3310 } 3311 hdrsize = SA_HDR_SIZE(sahdrp); 3312 sb->st_mode = *(uint64_t *)((char *)sahdrp + hdrsize + 3313 SA_MODE_OFFSET); 3314 sb->st_uid = *(uint64_t *)((char *)sahdrp + hdrsize + 3315 SA_UID_OFFSET); 3316 sb->st_gid = *(uint64_t *)((char *)sahdrp + hdrsize + 3317 SA_GID_OFFSET); 3318 sb->st_size = *(uint64_t *)((char *)sahdrp + hdrsize + 3319 SA_SIZE_OFFSET); 3320 if (buf != NULL) 3321 zfs_free(buf, size); 3322 } 3323 3324 return (0); 3325 } 3326 3327 static int 3328 zfs_dnode_readlink(const spa_t *spa, dnode_phys_t *dn, char *path, size_t psize) 3329 { 3330 int rc = 0; 3331 3332 if (dn->dn_bonustype == DMU_OT_SA) { 3333 sa_hdr_phys_t *sahdrp = NULL; 3334 size_t size = 0; 3335 void *buf = NULL; 3336 int hdrsize; 3337 char *p; 3338 3339 if (dn->dn_bonuslen != 0) 3340 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn); 3341 else { 3342 blkptr_t *bp; 3343 3344 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) == 0) 3345 return (EIO); 3346 bp = DN_SPILL_BLKPTR(dn); 3347 3348 size = BP_GET_LSIZE(bp); 3349 buf = zfs_alloc(size); 3350 rc = zio_read(spa, bp, buf); 3351 if (rc != 0) { 3352 zfs_free(buf, size); 3353 return (rc); 3354 } 3355 sahdrp = buf; 3356 } 3357 hdrsize = SA_HDR_SIZE(sahdrp); 3358 p = (char *)((uintptr_t)sahdrp + hdrsize + SA_SYMLINK_OFFSET); 3359 memcpy(path, p, psize); 3360 if (buf != NULL) 3361 zfs_free(buf, size); 3362 return (0); 3363 } 3364 /* 3365 * Second test is purely to silence bogus compiler 3366 * warning about accessing past the end of dn_bonus. 3367 */ 3368 if (psize + sizeof(znode_phys_t) <= dn->dn_bonuslen && 3369 sizeof(znode_phys_t) <= sizeof(dn->dn_bonus)) { 3370 memcpy(path, &dn->dn_bonus[sizeof(znode_phys_t)], psize); 3371 } else { 3372 rc = dnode_read(spa, dn, 0, path, psize); 3373 } 3374 return (rc); 3375 } 3376 3377 struct obj_list { 3378 uint64_t objnum; 3379 STAILQ_ENTRY(obj_list) entry; 3380 }; 3381 3382 /* 3383 * Lookup a file and return its dnode. 3384 */ 3385 static int 3386 zfs_lookup(const struct zfsmount *mount, const char *upath, dnode_phys_t *dnode) 3387 { 3388 int rc; 3389 uint64_t objnum; 3390 const spa_t *spa; 3391 dnode_phys_t dn; 3392 const char *p, *q; 3393 char element[256]; 3394 char path[1024]; 3395 int symlinks_followed = 0; 3396 struct stat sb; 3397 struct obj_list *entry, *tentry; 3398 STAILQ_HEAD(, obj_list) on_cache = STAILQ_HEAD_INITIALIZER(on_cache); 3399 3400 spa = mount->spa; 3401 if (mount->objset.os_type != DMU_OST_ZFS) { 3402 printf("ZFS: unexpected object set type %ju\n", 3403 (uintmax_t)mount->objset.os_type); 3404 return (EIO); 3405 } 3406 3407 if ((entry = malloc(sizeof(struct obj_list))) == NULL) 3408 return (ENOMEM); 3409 3410 /* 3411 * Get the root directory dnode. 3412 */ 3413 rc = objset_get_dnode(spa, &mount->objset, MASTER_NODE_OBJ, &dn); 3414 if (rc) { 3415 free(entry); 3416 return (rc); 3417 } 3418 3419 rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, sizeof(objnum), 1, &objnum); 3420 if (rc) { 3421 free(entry); 3422 return (rc); 3423 } 3424 entry->objnum = objnum; 3425 STAILQ_INSERT_HEAD(&on_cache, entry, entry); 3426 3427 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3428 if (rc != 0) 3429 goto done; 3430 3431 p = upath; 3432 while (p && *p) { 3433 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3434 if (rc != 0) 3435 goto done; 3436 3437 while (*p == '/') 3438 p++; 3439 if (*p == '\0') 3440 break; 3441 q = p; 3442 while (*q != '\0' && *q != '/') 3443 q++; 3444 3445 /* skip dot */ 3446 if (p + 1 == q && p[0] == '.') { 3447 p++; 3448 continue; 3449 } 3450 /* double dot */ 3451 if (p + 2 == q && p[0] == '.' && p[1] == '.') { 3452 p += 2; 3453 if (STAILQ_FIRST(&on_cache) == 3454 STAILQ_LAST(&on_cache, obj_list, entry)) { 3455 rc = ENOENT; 3456 goto done; 3457 } 3458 entry = STAILQ_FIRST(&on_cache); 3459 STAILQ_REMOVE_HEAD(&on_cache, entry); 3460 free(entry); 3461 objnum = (STAILQ_FIRST(&on_cache))->objnum; 3462 continue; 3463 } 3464 if (q - p + 1 > sizeof(element)) { 3465 rc = ENAMETOOLONG; 3466 goto done; 3467 } 3468 memcpy(element, p, q - p); 3469 element[q - p] = 0; 3470 p = q; 3471 3472 if ((rc = zfs_dnode_stat(spa, &dn, &sb)) != 0) 3473 goto done; 3474 if (!S_ISDIR(sb.st_mode)) { 3475 rc = ENOTDIR; 3476 goto done; 3477 } 3478 3479 rc = zap_lookup(spa, &dn, element, sizeof (objnum), 1, &objnum); 3480 if (rc) 3481 goto done; 3482 objnum = ZFS_DIRENT_OBJ(objnum); 3483 3484 if ((entry = malloc(sizeof(struct obj_list))) == NULL) { 3485 rc = ENOMEM; 3486 goto done; 3487 } 3488 entry->objnum = objnum; 3489 STAILQ_INSERT_HEAD(&on_cache, entry, entry); 3490 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3491 if (rc) 3492 goto done; 3493 3494 /* 3495 * Check for symlink. 3496 */ 3497 rc = zfs_dnode_stat(spa, &dn, &sb); 3498 if (rc) 3499 goto done; 3500 if (S_ISLNK(sb.st_mode)) { 3501 if (symlinks_followed > 10) { 3502 rc = EMLINK; 3503 goto done; 3504 } 3505 symlinks_followed++; 3506 3507 /* 3508 * Read the link value and copy the tail of our 3509 * current path onto the end. 3510 */ 3511 if (sb.st_size + strlen(p) + 1 > sizeof(path)) { 3512 rc = ENAMETOOLONG; 3513 goto done; 3514 } 3515 strcpy(&path[sb.st_size], p); 3516 3517 rc = zfs_dnode_readlink(spa, &dn, path, sb.st_size); 3518 if (rc != 0) 3519 goto done; 3520 3521 /* 3522 * Restart with the new path, starting either at 3523 * the root or at the parent depending whether or 3524 * not the link is relative. 3525 */ 3526 p = path; 3527 if (*p == '/') { 3528 while (STAILQ_FIRST(&on_cache) != 3529 STAILQ_LAST(&on_cache, obj_list, entry)) { 3530 entry = STAILQ_FIRST(&on_cache); 3531 STAILQ_REMOVE_HEAD(&on_cache, entry); 3532 free(entry); 3533 } 3534 } else { 3535 entry = STAILQ_FIRST(&on_cache); 3536 STAILQ_REMOVE_HEAD(&on_cache, entry); 3537 free(entry); 3538 } 3539 objnum = (STAILQ_FIRST(&on_cache))->objnum; 3540 } 3541 } 3542 3543 *dnode = dn; 3544 done: 3545 STAILQ_FOREACH_SAFE(entry, &on_cache, entry, tentry) 3546 free(entry); 3547 return (rc); 3548 } 3549