1 /*- 2 * Copyright (c) 2007 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * Stand-alone ZFS file reader. 32 */ 33 34 #include <sys/endian.h> 35 #include <sys/stat.h> 36 #include <sys/stdint.h> 37 #include <sys/list.h> 38 #include <machine/_inttypes.h> 39 40 #include "zfsimpl.h" 41 #include "zfssubr.c" 42 43 44 struct zfsmount { 45 const spa_t *spa; 46 objset_phys_t objset; 47 uint64_t rootobj; 48 }; 49 static struct zfsmount zfsmount __unused; 50 51 /* 52 * The indirect_child_t represents the vdev that we will read from, when we 53 * need to read all copies of the data (e.g. for scrub or reconstruction). 54 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror), 55 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs, 56 * ic_vdev is a child of the mirror. 57 */ 58 typedef struct indirect_child { 59 void *ic_data; 60 vdev_t *ic_vdev; 61 } indirect_child_t; 62 63 /* 64 * The indirect_split_t represents one mapped segment of an i/o to the 65 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be 66 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size. 67 * For split blocks, there will be several of these. 68 */ 69 typedef struct indirect_split { 70 list_node_t is_node; /* link on iv_splits */ 71 72 /* 73 * is_split_offset is the offset into the i/o. 74 * This is the sum of the previous splits' is_size's. 75 */ 76 uint64_t is_split_offset; 77 78 vdev_t *is_vdev; /* top-level vdev */ 79 uint64_t is_target_offset; /* offset on is_vdev */ 80 uint64_t is_size; 81 int is_children; /* number of entries in is_child[] */ 82 83 /* 84 * is_good_child is the child that we are currently using to 85 * attempt reconstruction. 86 */ 87 int is_good_child; 88 89 indirect_child_t is_child[1]; /* variable-length */ 90 } indirect_split_t; 91 92 /* 93 * The indirect_vsd_t is associated with each i/o to the indirect vdev. 94 * It is the "Vdev-Specific Data" in the zio_t's io_vsd. 95 */ 96 typedef struct indirect_vsd { 97 boolean_t iv_split_block; 98 boolean_t iv_reconstruct; 99 100 list_t iv_splits; /* list of indirect_split_t's */ 101 } indirect_vsd_t; 102 103 /* 104 * List of all vdevs, chained through v_alllink. 105 */ 106 static vdev_list_t zfs_vdevs; 107 108 /* 109 * List of ZFS features supported for read 110 */ 111 static const char *features_for_read[] = { 112 "org.illumos:lz4_compress", 113 "com.delphix:hole_birth", 114 "com.delphix:extensible_dataset", 115 "com.delphix:embedded_data", 116 "org.open-zfs:large_blocks", 117 "org.illumos:sha512", 118 "org.illumos:skein", 119 "org.zfsonlinux:large_dnode", 120 "com.joyent:multi_vdev_crash_dump", 121 "com.delphix:spacemap_histogram", 122 "com.delphix:zpool_checkpoint", 123 "com.delphix:spacemap_v2", 124 "com.datto:encryption", 125 "org.zfsonlinux:allocation_classes", 126 "com.datto:resilver_defer", 127 "com.delphix:device_removal", 128 "com.delphix:obsolete_counts", 129 "com.intel:allocation_classes", 130 NULL 131 }; 132 133 /* 134 * List of all pools, chained through spa_link. 135 */ 136 static spa_list_t zfs_pools; 137 138 static const dnode_phys_t *dnode_cache_obj; 139 static uint64_t dnode_cache_bn; 140 static char *dnode_cache_buf; 141 142 static int zio_read(const spa_t *spa, const blkptr_t *bp, void *buf); 143 static int zfs_get_root(const spa_t *spa, uint64_t *objid); 144 static int zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result); 145 static int zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, 146 const char *name, uint64_t integer_size, uint64_t num_integers, 147 void *value); 148 static int objset_get_dnode(const spa_t *, const objset_phys_t *, uint64_t, 149 dnode_phys_t *); 150 static int dnode_read(const spa_t *, const dnode_phys_t *, off_t, void *, 151 size_t); 152 static int vdev_indirect_read(vdev_t *, const blkptr_t *, void *, off_t, 153 size_t); 154 static int vdev_mirror_read(vdev_t *, const blkptr_t *, void *, off_t, size_t); 155 vdev_indirect_mapping_t *vdev_indirect_mapping_open(spa_t *, objset_phys_t *, 156 uint64_t); 157 vdev_indirect_mapping_entry_phys_t * 158 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *, uint64_t, 159 uint64_t, uint64_t *); 160 161 static void 162 zfs_init(void) 163 { 164 STAILQ_INIT(&zfs_vdevs); 165 STAILQ_INIT(&zfs_pools); 166 167 dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE); 168 169 zfs_init_crc(); 170 } 171 172 static int 173 nvlist_check_features_for_read(nvlist_t *nvl) 174 { 175 nvlist_t *features = NULL; 176 nvs_data_t *data; 177 nvp_header_t *nvp; 178 nv_string_t *nvp_name; 179 int rc; 180 181 rc = nvlist_find(nvl, ZPOOL_CONFIG_FEATURES_FOR_READ, 182 DATA_TYPE_NVLIST, NULL, &features, NULL); 183 if (rc != 0) 184 return (rc); 185 186 data = (nvs_data_t *)features->nv_data; 187 nvp = &data->nvl_pair; /* first pair in nvlist */ 188 189 while (nvp->encoded_size != 0 && nvp->decoded_size != 0) { 190 int i, found; 191 192 nvp_name = (nv_string_t *)((uintptr_t)nvp + sizeof(*nvp)); 193 found = 0; 194 195 for (i = 0; features_for_read[i] != NULL; i++) { 196 if (memcmp(nvp_name->nv_data, features_for_read[i], 197 nvp_name->nv_size) == 0) { 198 found = 1; 199 break; 200 } 201 } 202 203 if (!found) { 204 printf("ZFS: unsupported feature: %.*s\n", 205 nvp_name->nv_size, nvp_name->nv_data); 206 rc = EIO; 207 } 208 nvp = (nvp_header_t *)((uint8_t *)nvp + nvp->encoded_size); 209 } 210 nvlist_destroy(features); 211 212 return (rc); 213 } 214 215 static int 216 vdev_read_phys(vdev_t *vdev, const blkptr_t *bp, void *buf, 217 off_t offset, size_t size) 218 { 219 size_t psize; 220 int rc; 221 222 if (!vdev->v_phys_read) 223 return (EIO); 224 225 if (bp) { 226 psize = BP_GET_PSIZE(bp); 227 } else { 228 psize = size; 229 } 230 231 rc = vdev->v_phys_read(vdev, vdev->v_read_priv, offset, buf, psize); 232 if (rc == 0) { 233 if (bp != NULL) 234 rc = zio_checksum_verify(vdev->v_spa, bp, buf); 235 } 236 237 return (rc); 238 } 239 240 typedef struct remap_segment { 241 vdev_t *rs_vd; 242 uint64_t rs_offset; 243 uint64_t rs_asize; 244 uint64_t rs_split_offset; 245 list_node_t rs_node; 246 } remap_segment_t; 247 248 static remap_segment_t * 249 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset) 250 { 251 remap_segment_t *rs = malloc(sizeof (remap_segment_t)); 252 253 if (rs != NULL) { 254 rs->rs_vd = vd; 255 rs->rs_offset = offset; 256 rs->rs_asize = asize; 257 rs->rs_split_offset = split_offset; 258 } 259 260 return (rs); 261 } 262 263 vdev_indirect_mapping_t * 264 vdev_indirect_mapping_open(spa_t *spa, objset_phys_t *os, 265 uint64_t mapping_object) 266 { 267 vdev_indirect_mapping_t *vim; 268 vdev_indirect_mapping_phys_t *vim_phys; 269 int rc; 270 271 vim = calloc(1, sizeof (*vim)); 272 if (vim == NULL) 273 return (NULL); 274 275 vim->vim_dn = calloc(1, sizeof (*vim->vim_dn)); 276 if (vim->vim_dn == NULL) { 277 free(vim); 278 return (NULL); 279 } 280 281 rc = objset_get_dnode(spa, os, mapping_object, vim->vim_dn); 282 if (rc != 0) { 283 free(vim->vim_dn); 284 free(vim); 285 return (NULL); 286 } 287 288 vim->vim_spa = spa; 289 vim->vim_phys = malloc(sizeof (*vim->vim_phys)); 290 if (vim->vim_phys == NULL) { 291 free(vim->vim_dn); 292 free(vim); 293 return (NULL); 294 } 295 296 vim_phys = (vdev_indirect_mapping_phys_t *)DN_BONUS(vim->vim_dn); 297 *vim->vim_phys = *vim_phys; 298 299 vim->vim_objset = os; 300 vim->vim_object = mapping_object; 301 vim->vim_entries = NULL; 302 303 vim->vim_havecounts = 304 (vim->vim_dn->dn_bonuslen > VDEV_INDIRECT_MAPPING_SIZE_V0); 305 306 return (vim); 307 } 308 309 /* 310 * Compare an offset with an indirect mapping entry; there are three 311 * possible scenarios: 312 * 313 * 1. The offset is "less than" the mapping entry; meaning the 314 * offset is less than the source offset of the mapping entry. In 315 * this case, there is no overlap between the offset and the 316 * mapping entry and -1 will be returned. 317 * 318 * 2. The offset is "greater than" the mapping entry; meaning the 319 * offset is greater than the mapping entry's source offset plus 320 * the entry's size. In this case, there is no overlap between 321 * the offset and the mapping entry and 1 will be returned. 322 * 323 * NOTE: If the offset is actually equal to the entry's offset 324 * plus size, this is considered to be "greater" than the entry, 325 * and this case applies (i.e. 1 will be returned). Thus, the 326 * entry's "range" can be considered to be inclusive at its 327 * start, but exclusive at its end: e.g. [src, src + size). 328 * 329 * 3. The last case to consider is if the offset actually falls 330 * within the mapping entry's range. If this is the case, the 331 * offset is considered to be "equal to" the mapping entry and 332 * 0 will be returned. 333 * 334 * NOTE: If the offset is equal to the entry's source offset, 335 * this case applies and 0 will be returned. If the offset is 336 * equal to the entry's source plus its size, this case does 337 * *not* apply (see "NOTE" above for scenario 2), and 1 will be 338 * returned. 339 */ 340 static int 341 dva_mapping_overlap_compare(const void *v_key, const void *v_array_elem) 342 { 343 const uint64_t *key = v_key; 344 const vdev_indirect_mapping_entry_phys_t *array_elem = 345 v_array_elem; 346 uint64_t src_offset = DVA_MAPPING_GET_SRC_OFFSET(array_elem); 347 348 if (*key < src_offset) { 349 return (-1); 350 } else if (*key < src_offset + DVA_GET_ASIZE(&array_elem->vimep_dst)) { 351 return (0); 352 } else { 353 return (1); 354 } 355 } 356 357 /* 358 * Return array entry. 359 */ 360 static vdev_indirect_mapping_entry_phys_t * 361 vdev_indirect_mapping_entry(vdev_indirect_mapping_t *vim, uint64_t index) 362 { 363 uint64_t size; 364 off_t offset = 0; 365 int rc; 366 367 if (vim->vim_phys->vimp_num_entries == 0) 368 return (NULL); 369 370 if (vim->vim_entries == NULL) { 371 uint64_t bsize; 372 373 bsize = vim->vim_dn->dn_datablkszsec << SPA_MINBLOCKSHIFT; 374 size = vim->vim_phys->vimp_num_entries * 375 sizeof (*vim->vim_entries); 376 if (size > bsize) { 377 size = bsize / sizeof (*vim->vim_entries); 378 size *= sizeof (*vim->vim_entries); 379 } 380 vim->vim_entries = malloc(size); 381 if (vim->vim_entries == NULL) 382 return (NULL); 383 vim->vim_num_entries = size / sizeof (*vim->vim_entries); 384 offset = index * sizeof (*vim->vim_entries); 385 } 386 387 /* We have data in vim_entries */ 388 if (offset == 0) { 389 if (index >= vim->vim_entry_offset && 390 index <= vim->vim_entry_offset + vim->vim_num_entries) { 391 index -= vim->vim_entry_offset; 392 return (&vim->vim_entries[index]); 393 } 394 offset = index * sizeof (*vim->vim_entries); 395 } 396 397 vim->vim_entry_offset = index; 398 size = vim->vim_num_entries * sizeof (*vim->vim_entries); 399 rc = dnode_read(vim->vim_spa, vim->vim_dn, offset, vim->vim_entries, 400 size); 401 if (rc != 0) { 402 /* Read error, invalidate vim_entries. */ 403 free(vim->vim_entries); 404 vim->vim_entries = NULL; 405 return (NULL); 406 } 407 index -= vim->vim_entry_offset; 408 return (&vim->vim_entries[index]); 409 } 410 411 /* 412 * Returns the mapping entry for the given offset. 413 * 414 * It's possible that the given offset will not be in the mapping table 415 * (i.e. no mapping entries contain this offset), in which case, the 416 * return value value depends on the "next_if_missing" parameter. 417 * 418 * If the offset is not found in the table and "next_if_missing" is 419 * B_FALSE, then NULL will always be returned. The behavior is intended 420 * to allow consumers to get the entry corresponding to the offset 421 * parameter, iff the offset overlaps with an entry in the table. 422 * 423 * If the offset is not found in the table and "next_if_missing" is 424 * B_TRUE, then the entry nearest to the given offset will be returned, 425 * such that the entry's source offset is greater than the offset 426 * passed in (i.e. the "next" mapping entry in the table is returned, if 427 * the offset is missing from the table). If there are no entries whose 428 * source offset is greater than the passed in offset, NULL is returned. 429 */ 430 static vdev_indirect_mapping_entry_phys_t * 431 vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim, 432 uint64_t offset) 433 { 434 ASSERT(vim->vim_phys->vimp_num_entries > 0); 435 436 vdev_indirect_mapping_entry_phys_t *entry; 437 438 uint64_t last = vim->vim_phys->vimp_num_entries - 1; 439 uint64_t base = 0; 440 441 /* 442 * We don't define these inside of the while loop because we use 443 * their value in the case that offset isn't in the mapping. 444 */ 445 uint64_t mid; 446 int result; 447 448 while (last >= base) { 449 mid = base + ((last - base) >> 1); 450 451 entry = vdev_indirect_mapping_entry(vim, mid); 452 if (entry == NULL) 453 break; 454 result = dva_mapping_overlap_compare(&offset, entry); 455 456 if (result == 0) { 457 break; 458 } else if (result < 0) { 459 last = mid - 1; 460 } else { 461 base = mid + 1; 462 } 463 } 464 return (entry); 465 } 466 467 /* 468 * Given an indirect vdev and an extent on that vdev, it duplicates the 469 * physical entries of the indirect mapping that correspond to the extent 470 * to a new array and returns a pointer to it. In addition, copied_entries 471 * is populated with the number of mapping entries that were duplicated. 472 * 473 * Finally, since we are doing an allocation, it is up to the caller to 474 * free the array allocated in this function. 475 */ 476 vdev_indirect_mapping_entry_phys_t * 477 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset, 478 uint64_t asize, uint64_t *copied_entries) 479 { 480 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL; 481 vdev_indirect_mapping_t *vim = vd->v_mapping; 482 uint64_t entries = 0; 483 484 vdev_indirect_mapping_entry_phys_t *first_mapping = 485 vdev_indirect_mapping_entry_for_offset(vim, offset); 486 ASSERT3P(first_mapping, !=, NULL); 487 488 vdev_indirect_mapping_entry_phys_t *m = first_mapping; 489 while (asize > 0) { 490 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 491 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m); 492 uint64_t inner_size = MIN(asize, size - inner_offset); 493 494 offset += inner_size; 495 asize -= inner_size; 496 entries++; 497 m++; 498 } 499 500 size_t copy_length = entries * sizeof (*first_mapping); 501 duplicate_mappings = malloc(copy_length); 502 if (duplicate_mappings != NULL) 503 bcopy(first_mapping, duplicate_mappings, copy_length); 504 else 505 entries = 0; 506 507 *copied_entries = entries; 508 509 return (duplicate_mappings); 510 } 511 512 static vdev_t * 513 vdev_lookup_top(spa_t *spa, uint64_t vdev) 514 { 515 vdev_t *rvd; 516 vdev_list_t *vlist; 517 518 vlist = &spa->spa_root_vdev->v_children; 519 STAILQ_FOREACH(rvd, vlist, v_childlink) 520 if (rvd->v_id == vdev) 521 break; 522 523 return (rvd); 524 } 525 526 /* 527 * This is a callback for vdev_indirect_remap() which allocates an 528 * indirect_split_t for each split segment and adds it to iv_splits. 529 */ 530 static void 531 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset, 532 uint64_t size, void *arg) 533 { 534 int n = 1; 535 zio_t *zio = arg; 536 indirect_vsd_t *iv = zio->io_vsd; 537 538 if (vd->v_read == vdev_indirect_read) 539 return; 540 541 if (vd->v_read == vdev_mirror_read) 542 n = vd->v_nchildren; 543 544 indirect_split_t *is = 545 malloc(offsetof(indirect_split_t, is_child[n])); 546 if (is == NULL) { 547 zio->io_error = ENOMEM; 548 return; 549 } 550 bzero(is, offsetof(indirect_split_t, is_child[n])); 551 552 is->is_children = n; 553 is->is_size = size; 554 is->is_split_offset = split_offset; 555 is->is_target_offset = offset; 556 is->is_vdev = vd; 557 558 /* 559 * Note that we only consider multiple copies of the data for 560 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even 561 * though they use the same ops as mirror, because there's only one 562 * "good" copy under the replacing/spare. 563 */ 564 if (vd->v_read == vdev_mirror_read) { 565 int i = 0; 566 vdev_t *kid; 567 568 STAILQ_FOREACH(kid, &vd->v_children, v_childlink) { 569 is->is_child[i++].ic_vdev = kid; 570 } 571 } else { 572 is->is_child[0].ic_vdev = vd; 573 } 574 575 list_insert_tail(&iv->iv_splits, is); 576 } 577 578 static void 579 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, void *arg) 580 { 581 list_t stack; 582 spa_t *spa = vd->v_spa; 583 zio_t *zio = arg; 584 remap_segment_t *rs; 585 586 list_create(&stack, sizeof (remap_segment_t), 587 offsetof(remap_segment_t, rs_node)); 588 589 rs = rs_alloc(vd, offset, asize, 0); 590 if (rs == NULL) { 591 printf("vdev_indirect_remap: out of memory.\n"); 592 zio->io_error = ENOMEM; 593 } 594 for (; rs != NULL; rs = list_remove_head(&stack)) { 595 vdev_t *v = rs->rs_vd; 596 uint64_t num_entries = 0; 597 /* vdev_indirect_mapping_t *vim = v->v_mapping; */ 598 vdev_indirect_mapping_entry_phys_t *mapping = 599 vdev_indirect_mapping_duplicate_adjacent_entries(v, 600 rs->rs_offset, rs->rs_asize, &num_entries); 601 602 if (num_entries == 0) 603 zio->io_error = ENOMEM; 604 605 for (uint64_t i = 0; i < num_entries; i++) { 606 vdev_indirect_mapping_entry_phys_t *m = &mapping[i]; 607 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 608 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst); 609 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst); 610 uint64_t inner_offset = rs->rs_offset - 611 DVA_MAPPING_GET_SRC_OFFSET(m); 612 uint64_t inner_size = 613 MIN(rs->rs_asize, size - inner_offset); 614 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev); 615 616 if (dst_v->v_read == vdev_indirect_read) { 617 remap_segment_t *o; 618 619 o = rs_alloc(dst_v, dst_offset + inner_offset, 620 inner_size, rs->rs_split_offset); 621 if (o == NULL) { 622 printf("vdev_indirect_remap: " 623 "out of memory.\n"); 624 zio->io_error = ENOMEM; 625 break; 626 } 627 628 list_insert_head(&stack, o); 629 } 630 vdev_indirect_gather_splits(rs->rs_split_offset, dst_v, 631 dst_offset + inner_offset, 632 inner_size, arg); 633 634 /* 635 * vdev_indirect_gather_splits can have memory 636 * allocation error, we can not recover from it. 637 */ 638 if (zio->io_error != 0) 639 break; 640 rs->rs_offset += inner_size; 641 rs->rs_asize -= inner_size; 642 rs->rs_split_offset += inner_size; 643 } 644 645 free(mapping); 646 free(rs); 647 if (zio->io_error != 0) 648 break; 649 } 650 651 list_destroy(&stack); 652 } 653 654 static void 655 vdev_indirect_map_free(zio_t *zio) 656 { 657 indirect_vsd_t *iv = zio->io_vsd; 658 indirect_split_t *is; 659 660 while ((is = list_head(&iv->iv_splits)) != NULL) { 661 for (int c = 0; c < is->is_children; c++) { 662 indirect_child_t *ic = &is->is_child[c]; 663 free(ic->ic_data); 664 } 665 list_remove(&iv->iv_splits, is); 666 free(is); 667 } 668 free(iv); 669 } 670 671 static int 672 vdev_indirect_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 673 off_t offset, size_t bytes) 674 { 675 zio_t zio; 676 spa_t *spa = vdev->v_spa; 677 indirect_vsd_t *iv; 678 indirect_split_t *first; 679 int rc = EIO; 680 681 iv = calloc(1, sizeof(*iv)); 682 if (iv == NULL) 683 return (ENOMEM); 684 685 list_create(&iv->iv_splits, 686 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node)); 687 688 bzero(&zio, sizeof(zio)); 689 zio.io_spa = spa; 690 zio.io_bp = (blkptr_t *)bp; 691 zio.io_data = buf; 692 zio.io_size = bytes; 693 zio.io_offset = offset; 694 zio.io_vd = vdev; 695 zio.io_vsd = iv; 696 697 if (vdev->v_mapping == NULL) { 698 vdev_indirect_config_t *vic; 699 700 vic = &vdev->vdev_indirect_config; 701 vdev->v_mapping = vdev_indirect_mapping_open(spa, 702 &spa->spa_mos, vic->vic_mapping_object); 703 } 704 705 vdev_indirect_remap(vdev, offset, bytes, &zio); 706 if (zio.io_error != 0) 707 return (zio.io_error); 708 709 first = list_head(&iv->iv_splits); 710 if (first->is_size == zio.io_size) { 711 /* 712 * This is not a split block; we are pointing to the entire 713 * data, which will checksum the same as the original data. 714 * Pass the BP down so that the child i/o can verify the 715 * checksum, and try a different location if available 716 * (e.g. on a mirror). 717 * 718 * While this special case could be handled the same as the 719 * general (split block) case, doing it this way ensures 720 * that the vast majority of blocks on indirect vdevs 721 * (which are not split) are handled identically to blocks 722 * on non-indirect vdevs. This allows us to be less strict 723 * about performance in the general (but rare) case. 724 */ 725 rc = first->is_vdev->v_read(first->is_vdev, zio.io_bp, 726 zio.io_data, first->is_target_offset, bytes); 727 } else { 728 iv->iv_split_block = B_TRUE; 729 /* 730 * Read one copy of each split segment, from the 731 * top-level vdev. Since we don't know the 732 * checksum of each split individually, the child 733 * zio can't ensure that we get the right data. 734 * E.g. if it's a mirror, it will just read from a 735 * random (healthy) leaf vdev. We have to verify 736 * the checksum in vdev_indirect_io_done(). 737 */ 738 for (indirect_split_t *is = list_head(&iv->iv_splits); 739 is != NULL; is = list_next(&iv->iv_splits, is)) { 740 char *ptr = zio.io_data; 741 742 rc = is->is_vdev->v_read(is->is_vdev, zio.io_bp, 743 ptr + is->is_split_offset, is->is_target_offset, 744 is->is_size); 745 } 746 if (zio_checksum_verify(spa, zio.io_bp, zio.io_data)) 747 rc = ECKSUM; 748 else 749 rc = 0; 750 } 751 752 vdev_indirect_map_free(&zio); 753 if (rc == 0) 754 rc = zio.io_error; 755 756 return (rc); 757 } 758 759 static int 760 vdev_disk_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 761 off_t offset, size_t bytes) 762 { 763 764 return (vdev_read_phys(vdev, bp, buf, 765 offset + VDEV_LABEL_START_SIZE, bytes)); 766 } 767 768 769 static int 770 vdev_mirror_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 771 off_t offset, size_t bytes) 772 { 773 vdev_t *kid; 774 int rc; 775 776 rc = EIO; 777 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 778 if (kid->v_state != VDEV_STATE_HEALTHY) 779 continue; 780 rc = kid->v_read(kid, bp, buf, offset, bytes); 781 if (!rc) 782 return (0); 783 } 784 785 return (rc); 786 } 787 788 static int 789 vdev_replacing_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 790 off_t offset, size_t bytes) 791 { 792 vdev_t *kid; 793 794 /* 795 * Here we should have two kids: 796 * First one which is the one we are replacing and we can trust 797 * only this one to have valid data, but it might not be present. 798 * Second one is that one we are replacing with. It is most likely 799 * healthy, but we can't trust it has needed data, so we won't use it. 800 */ 801 kid = STAILQ_FIRST(&vdev->v_children); 802 if (kid == NULL) 803 return (EIO); 804 if (kid->v_state != VDEV_STATE_HEALTHY) 805 return (EIO); 806 return (kid->v_read(kid, bp, buf, offset, bytes)); 807 } 808 809 static vdev_t * 810 vdev_find(uint64_t guid) 811 { 812 vdev_t *vdev; 813 814 STAILQ_FOREACH(vdev, &zfs_vdevs, v_alllink) 815 if (vdev->v_guid == guid) 816 return (vdev); 817 818 return (0); 819 } 820 821 static vdev_t * 822 vdev_create(uint64_t guid, vdev_read_t *_read) 823 { 824 vdev_t *vdev; 825 vdev_indirect_config_t *vic; 826 827 vdev = calloc(1, sizeof(vdev_t)); 828 if (vdev != NULL) { 829 STAILQ_INIT(&vdev->v_children); 830 vdev->v_guid = guid; 831 vdev->v_read = _read; 832 833 /* 834 * root vdev has no read function, we use this fact to 835 * skip setting up data we do not need for root vdev. 836 * We only point root vdev from spa. 837 */ 838 if (_read != NULL) { 839 vic = &vdev->vdev_indirect_config; 840 vic->vic_prev_indirect_vdev = UINT64_MAX; 841 STAILQ_INSERT_TAIL(&zfs_vdevs, vdev, v_alllink); 842 } 843 } 844 845 return (vdev); 846 } 847 848 static void 849 vdev_set_initial_state(vdev_t *vdev, const nvlist_t *nvlist) 850 { 851 uint64_t is_offline, is_faulted, is_degraded, is_removed, isnt_present; 852 uint64_t is_log; 853 854 is_offline = is_removed = is_faulted = is_degraded = isnt_present = 0; 855 is_log = 0; 856 (void) nvlist_find(nvlist, ZPOOL_CONFIG_OFFLINE, DATA_TYPE_UINT64, NULL, 857 &is_offline, NULL); 858 (void) nvlist_find(nvlist, ZPOOL_CONFIG_REMOVED, DATA_TYPE_UINT64, NULL, 859 &is_removed, NULL); 860 (void) nvlist_find(nvlist, ZPOOL_CONFIG_FAULTED, DATA_TYPE_UINT64, NULL, 861 &is_faulted, NULL); 862 (void) nvlist_find(nvlist, ZPOOL_CONFIG_DEGRADED, DATA_TYPE_UINT64, 863 NULL, &is_degraded, NULL); 864 (void) nvlist_find(nvlist, ZPOOL_CONFIG_NOT_PRESENT, DATA_TYPE_UINT64, 865 NULL, &isnt_present, NULL); 866 (void) nvlist_find(nvlist, ZPOOL_CONFIG_IS_LOG, DATA_TYPE_UINT64, NULL, 867 &is_log, NULL); 868 869 if (is_offline != 0) 870 vdev->v_state = VDEV_STATE_OFFLINE; 871 else if (is_removed != 0) 872 vdev->v_state = VDEV_STATE_REMOVED; 873 else if (is_faulted != 0) 874 vdev->v_state = VDEV_STATE_FAULTED; 875 else if (is_degraded != 0) 876 vdev->v_state = VDEV_STATE_DEGRADED; 877 else if (isnt_present != 0) 878 vdev->v_state = VDEV_STATE_CANT_OPEN; 879 880 vdev->v_islog = is_log != 0; 881 } 882 883 static int 884 vdev_init(uint64_t guid, const nvlist_t *nvlist, vdev_t **vdevp) 885 { 886 uint64_t id, ashift, asize, nparity; 887 const char *path; 888 const char *type; 889 int len, pathlen; 890 char *name; 891 vdev_t *vdev; 892 893 if (nvlist_find(nvlist, ZPOOL_CONFIG_ID, DATA_TYPE_UINT64, NULL, &id, 894 NULL) || 895 nvlist_find(nvlist, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING, NULL, 896 &type, &len)) { 897 return (ENOENT); 898 } 899 900 if (memcmp(type, VDEV_TYPE_MIRROR, len) != 0 && 901 memcmp(type, VDEV_TYPE_DISK, len) != 0 && 902 #ifdef ZFS_TEST 903 memcmp(type, VDEV_TYPE_FILE, len) != 0 && 904 #endif 905 memcmp(type, VDEV_TYPE_RAIDZ, len) != 0 && 906 memcmp(type, VDEV_TYPE_INDIRECT, len) != 0 && 907 memcmp(type, VDEV_TYPE_REPLACING, len) != 0) { 908 printf("ZFS: can only boot from disk, mirror, raidz1, " 909 "raidz2 and raidz3 vdevs\n"); 910 return (EIO); 911 } 912 913 if (memcmp(type, VDEV_TYPE_MIRROR, len) == 0) 914 vdev = vdev_create(guid, vdev_mirror_read); 915 else if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) 916 vdev = vdev_create(guid, vdev_raidz_read); 917 else if (memcmp(type, VDEV_TYPE_REPLACING, len) == 0) 918 vdev = vdev_create(guid, vdev_replacing_read); 919 else if (memcmp(type, VDEV_TYPE_INDIRECT, len) == 0) { 920 vdev_indirect_config_t *vic; 921 922 vdev = vdev_create(guid, vdev_indirect_read); 923 if (vdev != NULL) { 924 vdev->v_state = VDEV_STATE_HEALTHY; 925 vic = &vdev->vdev_indirect_config; 926 927 nvlist_find(nvlist, 928 ZPOOL_CONFIG_INDIRECT_OBJECT, 929 DATA_TYPE_UINT64, 930 NULL, &vic->vic_mapping_object, NULL); 931 nvlist_find(nvlist, 932 ZPOOL_CONFIG_INDIRECT_BIRTHS, 933 DATA_TYPE_UINT64, 934 NULL, &vic->vic_births_object, NULL); 935 nvlist_find(nvlist, 936 ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 937 DATA_TYPE_UINT64, 938 NULL, &vic->vic_prev_indirect_vdev, NULL); 939 } 940 } else { 941 vdev = vdev_create(guid, vdev_disk_read); 942 } 943 944 if (vdev == NULL) 945 return (ENOMEM); 946 947 vdev_set_initial_state(vdev, nvlist); 948 vdev->v_id = id; 949 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASHIFT, 950 DATA_TYPE_UINT64, NULL, &ashift, NULL) == 0) 951 vdev->v_ashift = ashift; 952 953 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE, 954 DATA_TYPE_UINT64, NULL, &asize, NULL) == 0) { 955 vdev->v_psize = asize + 956 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 957 } 958 959 if (nvlist_find(nvlist, ZPOOL_CONFIG_NPARITY, 960 DATA_TYPE_UINT64, NULL, &nparity, NULL) == 0) 961 vdev->v_nparity = nparity; 962 963 if (nvlist_find(nvlist, ZPOOL_CONFIG_PATH, 964 DATA_TYPE_STRING, NULL, &path, &pathlen) == 0) { 965 char prefix[] = "/dev/"; 966 967 len = strlen(prefix); 968 if (len < pathlen && memcmp(path, prefix, len) == 0) { 969 path += len; 970 pathlen -= len; 971 } 972 name = malloc(pathlen + 1); 973 bcopy(path, name, pathlen); 974 name[pathlen] = '\0'; 975 vdev->v_name = name; 976 } else { 977 name = NULL; 978 if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) { 979 if (vdev->v_nparity < 1 || 980 vdev->v_nparity > 3) { 981 printf("ZFS: invalid raidz parity: %d\n", 982 vdev->v_nparity); 983 return (EIO); 984 } 985 (void) asprintf(&name, "%.*s%d-%" PRIu64, len, type, 986 vdev->v_nparity, id); 987 } else { 988 (void) asprintf(&name, "%.*s-%" PRIu64, len, type, id); 989 } 990 vdev->v_name = name; 991 } 992 *vdevp = vdev; 993 return (0); 994 } 995 996 /* 997 * Find slot for vdev. We return either NULL to signal to use 998 * STAILQ_INSERT_HEAD, or we return link element to be used with 999 * STAILQ_INSERT_AFTER. 1000 */ 1001 static vdev_t * 1002 vdev_find_previous(vdev_t *top_vdev, vdev_t *vdev) 1003 { 1004 vdev_t *v, *previous; 1005 1006 if (STAILQ_EMPTY(&top_vdev->v_children)) 1007 return (NULL); 1008 1009 previous = NULL; 1010 STAILQ_FOREACH(v, &top_vdev->v_children, v_childlink) { 1011 if (v->v_id > vdev->v_id) 1012 return (previous); 1013 1014 if (v->v_id == vdev->v_id) 1015 return (v); 1016 1017 if (v->v_id < vdev->v_id) 1018 previous = v; 1019 } 1020 return (previous); 1021 } 1022 1023 static size_t 1024 vdev_child_count(vdev_t *vdev) 1025 { 1026 vdev_t *v; 1027 size_t count; 1028 1029 count = 0; 1030 STAILQ_FOREACH(v, &vdev->v_children, v_childlink) { 1031 count++; 1032 } 1033 return (count); 1034 } 1035 1036 /* 1037 * Insert vdev into top_vdev children list. List is ordered by v_id. 1038 */ 1039 static void 1040 vdev_insert(vdev_t *top_vdev, vdev_t *vdev) 1041 { 1042 vdev_t *previous; 1043 size_t count; 1044 1045 /* 1046 * The top level vdev can appear in random order, depending how 1047 * the firmware is presenting the disk devices. 1048 * However, we will insert vdev to create list ordered by v_id, 1049 * so we can use either STAILQ_INSERT_HEAD or STAILQ_INSERT_AFTER 1050 * as STAILQ does not have insert before. 1051 */ 1052 previous = vdev_find_previous(top_vdev, vdev); 1053 1054 if (previous == NULL) { 1055 STAILQ_INSERT_HEAD(&top_vdev->v_children, vdev, v_childlink); 1056 } else if (previous->v_id == vdev->v_id) { 1057 /* 1058 * This vdev was configured from label config, 1059 * do not insert duplicate. 1060 */ 1061 return; 1062 } else { 1063 STAILQ_INSERT_AFTER(&top_vdev->v_children, previous, vdev, 1064 v_childlink); 1065 } 1066 1067 count = vdev_child_count(top_vdev); 1068 if (top_vdev->v_nchildren < count) 1069 top_vdev->v_nchildren = count; 1070 } 1071 1072 static int 1073 vdev_from_nvlist(spa_t *spa, uint64_t top_guid, const nvlist_t *nvlist) 1074 { 1075 vdev_t *top_vdev, *vdev; 1076 nvlist_t *kids = NULL; 1077 int rc, nkids; 1078 1079 /* Get top vdev. */ 1080 top_vdev = vdev_find(top_guid); 1081 if (top_vdev == NULL) { 1082 rc = vdev_init(top_guid, nvlist, &top_vdev); 1083 if (rc != 0) 1084 return (rc); 1085 top_vdev->v_spa = spa; 1086 top_vdev->v_top = top_vdev; 1087 vdev_insert(spa->spa_root_vdev, top_vdev); 1088 } 1089 1090 /* Add children if there are any. */ 1091 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1092 &nkids, &kids, NULL); 1093 if (rc == 0) { 1094 for (int i = 0; i < nkids; i++) { 1095 uint64_t guid; 1096 1097 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, 1098 DATA_TYPE_UINT64, NULL, &guid, NULL); 1099 if (rc != 0) { 1100 nvlist_destroy(kids); 1101 return (rc); 1102 } 1103 rc = vdev_init(guid, kids, &vdev); 1104 if (rc != 0) 1105 return (rc); 1106 1107 vdev->v_spa = spa; 1108 vdev->v_top = top_vdev; 1109 vdev_insert(top_vdev, vdev); 1110 1111 rc = nvlist_next(kids); 1112 } 1113 } else { 1114 /* 1115 * When there are no children, nvlist_find() does return 1116 * error, reset it because leaf devices have no children. 1117 */ 1118 rc = 0; 1119 } 1120 nvlist_destroy(kids); 1121 1122 return (rc); 1123 } 1124 1125 static int 1126 vdev_init_from_label(spa_t *spa, const nvlist_t *nvlist) 1127 { 1128 uint64_t pool_guid, top_guid; 1129 nvlist_t *vdevs; 1130 int rc; 1131 1132 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 1133 NULL, &pool_guid, NULL) || 1134 nvlist_find(nvlist, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64, 1135 NULL, &top_guid, NULL) || 1136 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, 1137 NULL, &vdevs, NULL)) { 1138 printf("ZFS: can't find vdev details\n"); 1139 return (ENOENT); 1140 } 1141 1142 rc = vdev_from_nvlist(spa, top_guid, vdevs); 1143 nvlist_destroy(vdevs); 1144 return (rc); 1145 } 1146 1147 static void 1148 vdev_set_state(vdev_t *vdev) 1149 { 1150 vdev_t *kid; 1151 int good_kids; 1152 int bad_kids; 1153 1154 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1155 vdev_set_state(kid); 1156 } 1157 1158 /* 1159 * A mirror or raidz is healthy if all its kids are healthy. A 1160 * mirror is degraded if any of its kids is healthy; a raidz 1161 * is degraded if at most nparity kids are offline. 1162 */ 1163 if (STAILQ_FIRST(&vdev->v_children)) { 1164 good_kids = 0; 1165 bad_kids = 0; 1166 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1167 if (kid->v_state == VDEV_STATE_HEALTHY) 1168 good_kids++; 1169 else 1170 bad_kids++; 1171 } 1172 if (bad_kids == 0) { 1173 vdev->v_state = VDEV_STATE_HEALTHY; 1174 } else { 1175 if (vdev->v_read == vdev_mirror_read) { 1176 if (good_kids) { 1177 vdev->v_state = VDEV_STATE_DEGRADED; 1178 } else { 1179 vdev->v_state = VDEV_STATE_OFFLINE; 1180 } 1181 } else if (vdev->v_read == vdev_raidz_read) { 1182 if (bad_kids > vdev->v_nparity) { 1183 vdev->v_state = VDEV_STATE_OFFLINE; 1184 } else { 1185 vdev->v_state = VDEV_STATE_DEGRADED; 1186 } 1187 } 1188 } 1189 } 1190 } 1191 1192 static int 1193 vdev_update_from_nvlist(uint64_t top_guid, const nvlist_t *nvlist) 1194 { 1195 vdev_t *vdev; 1196 nvlist_t *kids = NULL; 1197 int rc, nkids; 1198 1199 /* Update top vdev. */ 1200 vdev = vdev_find(top_guid); 1201 if (vdev != NULL) 1202 vdev_set_initial_state(vdev, nvlist); 1203 1204 /* Update children if there are any. */ 1205 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1206 &nkids, &kids, NULL); 1207 if (rc == 0) { 1208 for (int i = 0; i < nkids; i++) { 1209 uint64_t guid; 1210 1211 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, 1212 DATA_TYPE_UINT64, NULL, &guid, NULL); 1213 if (rc != 0) 1214 break; 1215 1216 vdev = vdev_find(guid); 1217 if (vdev != NULL) 1218 vdev_set_initial_state(vdev, kids); 1219 1220 rc = nvlist_next(kids); 1221 } 1222 } else { 1223 rc = 0; 1224 } 1225 nvlist_destroy(kids); 1226 1227 return (rc); 1228 } 1229 1230 static int 1231 vdev_init_from_nvlist(spa_t *spa, const nvlist_t *nvlist) 1232 { 1233 uint64_t pool_guid, vdev_children; 1234 nvlist_t *vdevs = NULL, *kids = NULL; 1235 int rc, nkids; 1236 1237 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 1238 NULL, &pool_guid, NULL) || 1239 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64, 1240 NULL, &vdev_children, NULL) || 1241 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, 1242 NULL, &vdevs, NULL)) { 1243 printf("ZFS: can't find vdev details\n"); 1244 return (ENOENT); 1245 } 1246 1247 /* Wrong guid?! */ 1248 if (spa->spa_guid != pool_guid) { 1249 nvlist_destroy(vdevs); 1250 return (EINVAL); 1251 } 1252 1253 spa->spa_root_vdev->v_nchildren = vdev_children; 1254 1255 rc = nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1256 &nkids, &kids, NULL); 1257 nvlist_destroy(vdevs); 1258 1259 /* 1260 * MOS config has at least one child for root vdev. 1261 */ 1262 if (rc != 0) 1263 return (rc); 1264 1265 for (int i = 0; i < nkids; i++) { 1266 uint64_t guid; 1267 vdev_t *vdev; 1268 1269 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, 1270 NULL, &guid, NULL); 1271 if (rc != 0) 1272 break; 1273 vdev = vdev_find(guid); 1274 /* 1275 * Top level vdev is missing, create it. 1276 */ 1277 if (vdev == NULL) 1278 rc = vdev_from_nvlist(spa, guid, kids); 1279 else 1280 rc = vdev_update_from_nvlist(guid, kids); 1281 if (rc != 0) 1282 break; 1283 nvlist_next(kids); 1284 } 1285 nvlist_destroy(kids); 1286 1287 /* 1288 * Re-evaluate top-level vdev state. 1289 */ 1290 vdev_set_state(spa->spa_root_vdev); 1291 1292 return (rc); 1293 } 1294 1295 static spa_t * 1296 spa_find_by_guid(uint64_t guid) 1297 { 1298 spa_t *spa; 1299 1300 STAILQ_FOREACH(spa, &zfs_pools, spa_link) 1301 if (spa->spa_guid == guid) 1302 return (spa); 1303 1304 return (NULL); 1305 } 1306 1307 static spa_t * 1308 spa_find_by_name(const char *name) 1309 { 1310 spa_t *spa; 1311 1312 STAILQ_FOREACH(spa, &zfs_pools, spa_link) 1313 if (strcmp(spa->spa_name, name) == 0) 1314 return (spa); 1315 1316 return (NULL); 1317 } 1318 1319 #ifdef BOOT2 1320 static spa_t * 1321 spa_get_primary(void) 1322 { 1323 1324 return (STAILQ_FIRST(&zfs_pools)); 1325 } 1326 1327 static vdev_t * 1328 spa_get_primary_vdev(const spa_t *spa) 1329 { 1330 vdev_t *vdev; 1331 vdev_t *kid; 1332 1333 if (spa == NULL) 1334 spa = spa_get_primary(); 1335 if (spa == NULL) 1336 return (NULL); 1337 vdev = spa->spa_root_vdev; 1338 if (vdev == NULL) 1339 return (NULL); 1340 for (kid = STAILQ_FIRST(&vdev->v_children); kid != NULL; 1341 kid = STAILQ_FIRST(&vdev->v_children)) 1342 vdev = kid; 1343 return (vdev); 1344 } 1345 #endif 1346 1347 static spa_t * 1348 spa_create(uint64_t guid, const char *name) 1349 { 1350 spa_t *spa; 1351 1352 if ((spa = calloc(1, sizeof(spa_t))) == NULL) 1353 return (NULL); 1354 if ((spa->spa_name = strdup(name)) == NULL) { 1355 free(spa); 1356 return (NULL); 1357 } 1358 spa->spa_guid = guid; 1359 spa->spa_root_vdev = vdev_create(guid, NULL); 1360 if (spa->spa_root_vdev == NULL) { 1361 free(spa->spa_name); 1362 free(spa); 1363 return (NULL); 1364 } 1365 spa->spa_root_vdev->v_name = strdup("root"); 1366 STAILQ_INSERT_TAIL(&zfs_pools, spa, spa_link); 1367 1368 return (spa); 1369 } 1370 1371 static const char * 1372 state_name(vdev_state_t state) 1373 { 1374 static const char *names[] = { 1375 "UNKNOWN", 1376 "CLOSED", 1377 "OFFLINE", 1378 "REMOVED", 1379 "CANT_OPEN", 1380 "FAULTED", 1381 "DEGRADED", 1382 "ONLINE" 1383 }; 1384 return (names[state]); 1385 } 1386 1387 #ifdef BOOT2 1388 1389 #define pager_printf printf 1390 1391 #else 1392 1393 static int 1394 pager_printf(const char *fmt, ...) 1395 { 1396 char line[80]; 1397 va_list args; 1398 1399 va_start(args, fmt); 1400 vsnprintf(line, sizeof(line), fmt, args); 1401 va_end(args); 1402 return (pager_output(line)); 1403 } 1404 1405 #endif 1406 1407 #define STATUS_FORMAT " %s %s\n" 1408 1409 static int 1410 print_state(int indent, const char *name, vdev_state_t state) 1411 { 1412 int i; 1413 char buf[512]; 1414 1415 buf[0] = 0; 1416 for (i = 0; i < indent; i++) 1417 strcat(buf, " "); 1418 strcat(buf, name); 1419 return (pager_printf(STATUS_FORMAT, buf, state_name(state))); 1420 } 1421 1422 static int 1423 vdev_status(vdev_t *vdev, int indent) 1424 { 1425 vdev_t *kid; 1426 int ret; 1427 1428 if (vdev->v_islog) { 1429 (void) pager_output(" logs\n"); 1430 indent++; 1431 } 1432 1433 ret = print_state(indent, vdev->v_name, vdev->v_state); 1434 if (ret != 0) 1435 return (ret); 1436 1437 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1438 ret = vdev_status(kid, indent + 1); 1439 if (ret != 0) 1440 return (ret); 1441 } 1442 return (ret); 1443 } 1444 1445 static int 1446 spa_status(spa_t *spa) 1447 { 1448 static char bootfs[ZFS_MAXNAMELEN]; 1449 uint64_t rootid; 1450 vdev_list_t *vlist; 1451 vdev_t *vdev; 1452 int good_kids, bad_kids, degraded_kids, ret; 1453 vdev_state_t state; 1454 1455 ret = pager_printf(" pool: %s\n", spa->spa_name); 1456 if (ret != 0) 1457 return (ret); 1458 1459 if (zfs_get_root(spa, &rootid) == 0 && 1460 zfs_rlookup(spa, rootid, bootfs) == 0) { 1461 if (bootfs[0] == '\0') 1462 ret = pager_printf("bootfs: %s\n", spa->spa_name); 1463 else 1464 ret = pager_printf("bootfs: %s/%s\n", spa->spa_name, 1465 bootfs); 1466 if (ret != 0) 1467 return (ret); 1468 } 1469 ret = pager_printf("config:\n\n"); 1470 if (ret != 0) 1471 return (ret); 1472 ret = pager_printf(STATUS_FORMAT, "NAME", "STATE"); 1473 if (ret != 0) 1474 return (ret); 1475 1476 good_kids = 0; 1477 degraded_kids = 0; 1478 bad_kids = 0; 1479 vlist = &spa->spa_root_vdev->v_children; 1480 STAILQ_FOREACH(vdev, vlist, v_childlink) { 1481 if (vdev->v_state == VDEV_STATE_HEALTHY) 1482 good_kids++; 1483 else if (vdev->v_state == VDEV_STATE_DEGRADED) 1484 degraded_kids++; 1485 else 1486 bad_kids++; 1487 } 1488 1489 state = VDEV_STATE_CLOSED; 1490 if (good_kids > 0 && (degraded_kids + bad_kids) == 0) 1491 state = VDEV_STATE_HEALTHY; 1492 else if ((good_kids + degraded_kids) > 0) 1493 state = VDEV_STATE_DEGRADED; 1494 1495 ret = print_state(0, spa->spa_name, state); 1496 if (ret != 0) 1497 return (ret); 1498 1499 STAILQ_FOREACH(vdev, vlist, v_childlink) { 1500 ret = vdev_status(vdev, 1); 1501 if (ret != 0) 1502 return (ret); 1503 } 1504 return (ret); 1505 } 1506 1507 static int 1508 spa_all_status(void) 1509 { 1510 spa_t *spa; 1511 int first = 1, ret = 0; 1512 1513 STAILQ_FOREACH(spa, &zfs_pools, spa_link) { 1514 if (!first) { 1515 ret = pager_printf("\n"); 1516 if (ret != 0) 1517 return (ret); 1518 } 1519 first = 0; 1520 ret = spa_status(spa); 1521 if (ret != 0) 1522 return (ret); 1523 } 1524 return (ret); 1525 } 1526 1527 static uint64_t 1528 vdev_label_offset(uint64_t psize, int l, uint64_t offset) 1529 { 1530 uint64_t label_offset; 1531 1532 if (l < VDEV_LABELS / 2) 1533 label_offset = 0; 1534 else 1535 label_offset = psize - VDEV_LABELS * sizeof (vdev_label_t); 1536 1537 return (offset + l * sizeof (vdev_label_t) + label_offset); 1538 } 1539 1540 static int 1541 vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2) 1542 { 1543 unsigned int seq1 = 0; 1544 unsigned int seq2 = 0; 1545 int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg); 1546 1547 if (cmp != 0) 1548 return (cmp); 1549 1550 cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp); 1551 if (cmp != 0) 1552 return (cmp); 1553 1554 if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1)) 1555 seq1 = MMP_SEQ(ub1); 1556 1557 if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2)) 1558 seq2 = MMP_SEQ(ub2); 1559 1560 return (AVL_CMP(seq1, seq2)); 1561 } 1562 1563 static int 1564 uberblock_verify(uberblock_t *ub) 1565 { 1566 if (ub->ub_magic == BSWAP_64((uint64_t)UBERBLOCK_MAGIC)) { 1567 byteswap_uint64_array(ub, sizeof (uberblock_t)); 1568 } 1569 1570 if (ub->ub_magic != UBERBLOCK_MAGIC || 1571 !SPA_VERSION_IS_SUPPORTED(ub->ub_version)) 1572 return (EINVAL); 1573 1574 return (0); 1575 } 1576 1577 static int 1578 vdev_label_read(vdev_t *vd, int l, void *buf, uint64_t offset, 1579 size_t size) 1580 { 1581 blkptr_t bp; 1582 off_t off; 1583 1584 off = vdev_label_offset(vd->v_psize, l, offset); 1585 1586 BP_ZERO(&bp); 1587 BP_SET_LSIZE(&bp, size); 1588 BP_SET_PSIZE(&bp, size); 1589 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL); 1590 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); 1591 DVA_SET_OFFSET(BP_IDENTITY(&bp), off); 1592 ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0); 1593 1594 return (vdev_read_phys(vd, &bp, buf, off, size)); 1595 } 1596 1597 static nvlist_t * 1598 vdev_label_read_config(vdev_t *vd, uint64_t txg) 1599 { 1600 vdev_phys_t *label; 1601 uint64_t best_txg = 0; 1602 uint64_t label_txg = 0; 1603 uint64_t asize; 1604 nvlist_t *nvl = NULL, *tmp; 1605 int error; 1606 1607 label = malloc(sizeof (vdev_phys_t)); 1608 if (label == NULL) 1609 return (NULL); 1610 1611 for (int l = 0; l < VDEV_LABELS; l++) { 1612 const unsigned char *nvlist; 1613 1614 if (vdev_label_read(vd, l, label, 1615 offsetof(vdev_label_t, vl_vdev_phys), 1616 sizeof (vdev_phys_t))) 1617 continue; 1618 1619 nvlist = (const unsigned char *) label->vp_nvlist; 1620 tmp = nvlist_import(nvlist + 4, nvlist[0], nvlist[1]); 1621 if (tmp == NULL) 1622 continue; 1623 1624 error = nvlist_find(tmp, ZPOOL_CONFIG_POOL_TXG, 1625 DATA_TYPE_UINT64, NULL, &label_txg, NULL); 1626 if (error != 0 || label_txg == 0) { 1627 nvlist_destroy(nvl); 1628 nvl = tmp; 1629 goto done; 1630 } 1631 1632 if (label_txg <= txg && label_txg > best_txg) { 1633 best_txg = label_txg; 1634 nvlist_destroy(nvl); 1635 nvl = tmp; 1636 tmp = NULL; 1637 1638 /* 1639 * Use asize from pool config. We need this 1640 * because we can get bad value from BIOS. 1641 */ 1642 if (nvlist_find(nvl, ZPOOL_CONFIG_ASIZE, 1643 DATA_TYPE_UINT64, NULL, &asize, NULL) == 0) { 1644 vd->v_psize = asize + 1645 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 1646 } 1647 } 1648 nvlist_destroy(tmp); 1649 } 1650 1651 if (best_txg == 0) { 1652 nvlist_destroy(nvl); 1653 nvl = NULL; 1654 } 1655 done: 1656 free(label); 1657 return (nvl); 1658 } 1659 1660 static void 1661 vdev_uberblock_load(vdev_t *vd, uberblock_t *ub) 1662 { 1663 uberblock_t *buf; 1664 1665 buf = malloc(VDEV_UBERBLOCK_SIZE(vd)); 1666 if (buf == NULL) 1667 return; 1668 1669 for (int l = 0; l < VDEV_LABELS; l++) { 1670 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { 1671 if (vdev_label_read(vd, l, buf, 1672 VDEV_UBERBLOCK_OFFSET(vd, n), 1673 VDEV_UBERBLOCK_SIZE(vd))) 1674 continue; 1675 if (uberblock_verify(buf) != 0) 1676 continue; 1677 1678 if (vdev_uberblock_compare(buf, ub) > 0) 1679 *ub = *buf; 1680 } 1681 } 1682 free(buf); 1683 } 1684 1685 static int 1686 vdev_probe(vdev_phys_read_t *_read, void *read_priv, spa_t **spap) 1687 { 1688 vdev_t vtmp; 1689 spa_t *spa; 1690 vdev_t *vdev; 1691 nvlist_t *nvl; 1692 uint64_t val; 1693 uint64_t guid, vdev_children; 1694 uint64_t pool_txg, pool_guid; 1695 const char *pool_name; 1696 int rc, namelen; 1697 1698 /* 1699 * Load the vdev label and figure out which 1700 * uberblock is most current. 1701 */ 1702 memset(&vtmp, 0, sizeof(vtmp)); 1703 vtmp.v_phys_read = _read; 1704 vtmp.v_read_priv = read_priv; 1705 vtmp.v_psize = P2ALIGN(ldi_get_size(read_priv), 1706 (uint64_t)sizeof (vdev_label_t)); 1707 1708 /* Test for minimum device size. */ 1709 if (vtmp.v_psize < SPA_MINDEVSIZE) 1710 return (EIO); 1711 1712 nvl = vdev_label_read_config(&vtmp, UINT64_MAX); 1713 if (nvl == NULL) 1714 return (EIO); 1715 1716 if (nvlist_find(nvl, ZPOOL_CONFIG_VERSION, DATA_TYPE_UINT64, 1717 NULL, &val, NULL) != 0) { 1718 nvlist_destroy(nvl); 1719 return (EIO); 1720 } 1721 1722 if (!SPA_VERSION_IS_SUPPORTED(val)) { 1723 printf("ZFS: unsupported ZFS version %u (should be %u)\n", 1724 (unsigned)val, (unsigned)SPA_VERSION); 1725 nvlist_destroy(nvl); 1726 return (EIO); 1727 } 1728 1729 /* Check ZFS features for read */ 1730 rc = nvlist_check_features_for_read(nvl); 1731 if (rc != 0) { 1732 nvlist_destroy(nvl); 1733 return (EIO); 1734 } 1735 1736 if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_STATE, DATA_TYPE_UINT64, 1737 NULL, &val, NULL) != 0) { 1738 nvlist_destroy(nvl); 1739 return (EIO); 1740 } 1741 1742 if (val == POOL_STATE_DESTROYED) { 1743 /* We don't boot only from destroyed pools. */ 1744 nvlist_destroy(nvl); 1745 return (EIO); 1746 } 1747 1748 if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64, 1749 NULL, &pool_txg, NULL) != 0 || 1750 nvlist_find(nvl, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 1751 NULL, &pool_guid, NULL) != 0 || 1752 nvlist_find(nvl, ZPOOL_CONFIG_POOL_NAME, DATA_TYPE_STRING, 1753 NULL, &pool_name, &namelen) != 0) { 1754 /* 1755 * Cache and spare devices end up here - just ignore 1756 * them. 1757 */ 1758 nvlist_destroy(nvl); 1759 return (EIO); 1760 } 1761 1762 /* 1763 * Create the pool if this is the first time we've seen it. 1764 */ 1765 spa = spa_find_by_guid(pool_guid); 1766 if (spa == NULL) { 1767 char *name; 1768 1769 nvlist_find(nvl, ZPOOL_CONFIG_VDEV_CHILDREN, 1770 DATA_TYPE_UINT64, NULL, &vdev_children, NULL); 1771 name = malloc(namelen + 1); 1772 if (name == NULL) { 1773 nvlist_destroy(nvl); 1774 return (ENOMEM); 1775 } 1776 bcopy(pool_name, name, namelen); 1777 name[namelen] = '\0'; 1778 spa = spa_create(pool_guid, name); 1779 free(name); 1780 if (spa == NULL) { 1781 nvlist_destroy(nvl); 1782 return (ENOMEM); 1783 } 1784 spa->spa_root_vdev->v_nchildren = vdev_children; 1785 } 1786 if (pool_txg > spa->spa_txg) 1787 spa->spa_txg = pool_txg; 1788 1789 /* 1790 * Get the vdev tree and create our in-core copy of it. 1791 * If we already have a vdev with this guid, this must 1792 * be some kind of alias (overlapping slices, dangerously dedicated 1793 * disks etc). 1794 */ 1795 if (nvlist_find(nvl, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, 1796 NULL, &guid, NULL) != 0) { 1797 nvlist_destroy(nvl); 1798 return (EIO); 1799 } 1800 vdev = vdev_find(guid); 1801 /* Has this vdev already been inited? */ 1802 if (vdev && vdev->v_phys_read) { 1803 nvlist_destroy(nvl); 1804 return (EIO); 1805 } 1806 1807 rc = vdev_init_from_label(spa, nvl); 1808 nvlist_destroy(nvl); 1809 if (rc != 0) 1810 return (rc); 1811 1812 /* 1813 * We should already have created an incomplete vdev for this 1814 * vdev. Find it and initialise it with our read proc. 1815 */ 1816 vdev = vdev_find(guid); 1817 if (vdev != NULL) { 1818 vdev->v_phys_read = _read; 1819 vdev->v_read_priv = read_priv; 1820 vdev->v_psize = vtmp.v_psize; 1821 /* 1822 * If no other state is set, mark vdev healthy. 1823 */ 1824 if (vdev->v_state == VDEV_STATE_UNKNOWN) 1825 vdev->v_state = VDEV_STATE_HEALTHY; 1826 } else { 1827 printf("ZFS: inconsistent nvlist contents\n"); 1828 return (EIO); 1829 } 1830 1831 if (vdev->v_islog) 1832 spa->spa_with_log = vdev->v_islog; 1833 1834 /* 1835 * Re-evaluate top-level vdev state. 1836 */ 1837 vdev_set_state(vdev->v_top); 1838 1839 /* 1840 * Ok, we are happy with the pool so far. Lets find 1841 * the best uberblock and then we can actually access 1842 * the contents of the pool. 1843 */ 1844 vdev_uberblock_load(vdev, &spa->spa_uberblock); 1845 1846 if (spap != NULL) 1847 *spap = spa; 1848 return (0); 1849 } 1850 1851 static int 1852 ilog2(int n) 1853 { 1854 int v; 1855 1856 for (v = 0; v < 32; v++) 1857 if (n == (1 << v)) 1858 return (v); 1859 return (-1); 1860 } 1861 1862 static int 1863 zio_read_gang(const spa_t *spa, const blkptr_t *bp, void *buf) 1864 { 1865 blkptr_t gbh_bp; 1866 zio_gbh_phys_t zio_gb; 1867 char *pbuf; 1868 int i; 1869 1870 /* Artificial BP for gang block header. */ 1871 gbh_bp = *bp; 1872 BP_SET_PSIZE(&gbh_bp, SPA_GANGBLOCKSIZE); 1873 BP_SET_LSIZE(&gbh_bp, SPA_GANGBLOCKSIZE); 1874 BP_SET_CHECKSUM(&gbh_bp, ZIO_CHECKSUM_GANG_HEADER); 1875 BP_SET_COMPRESS(&gbh_bp, ZIO_COMPRESS_OFF); 1876 for (i = 0; i < SPA_DVAS_PER_BP; i++) 1877 DVA_SET_GANG(&gbh_bp.blk_dva[i], 0); 1878 1879 /* Read gang header block using the artificial BP. */ 1880 if (zio_read(spa, &gbh_bp, &zio_gb)) 1881 return (EIO); 1882 1883 pbuf = buf; 1884 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { 1885 blkptr_t *gbp = &zio_gb.zg_blkptr[i]; 1886 1887 if (BP_IS_HOLE(gbp)) 1888 continue; 1889 if (zio_read(spa, gbp, pbuf)) 1890 return (EIO); 1891 pbuf += BP_GET_PSIZE(gbp); 1892 } 1893 1894 if (zio_checksum_verify(spa, bp, buf)) 1895 return (EIO); 1896 return (0); 1897 } 1898 1899 static int 1900 zio_read(const spa_t *spa, const blkptr_t *bp, void *buf) 1901 { 1902 int cpfunc = BP_GET_COMPRESS(bp); 1903 uint64_t align, size; 1904 void *pbuf; 1905 int i, error; 1906 1907 /* 1908 * Process data embedded in block pointer 1909 */ 1910 if (BP_IS_EMBEDDED(bp)) { 1911 ASSERT(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 1912 1913 size = BPE_GET_PSIZE(bp); 1914 ASSERT(size <= BPE_PAYLOAD_SIZE); 1915 1916 if (cpfunc != ZIO_COMPRESS_OFF) 1917 pbuf = malloc(size); 1918 else 1919 pbuf = buf; 1920 1921 if (pbuf == NULL) 1922 return (ENOMEM); 1923 1924 decode_embedded_bp_compressed(bp, pbuf); 1925 error = 0; 1926 1927 if (cpfunc != ZIO_COMPRESS_OFF) { 1928 error = zio_decompress_data(cpfunc, pbuf, 1929 size, buf, BP_GET_LSIZE(bp)); 1930 free(pbuf); 1931 } 1932 if (error != 0) 1933 printf("ZFS: i/o error - unable to decompress " 1934 "block pointer data, error %d\n", error); 1935 return (error); 1936 } 1937 1938 error = EIO; 1939 1940 for (i = 0; i < SPA_DVAS_PER_BP; i++) { 1941 const dva_t *dva = &bp->blk_dva[i]; 1942 vdev_t *vdev; 1943 vdev_list_t *vlist; 1944 uint64_t vdevid; 1945 off_t offset; 1946 1947 if (!dva->dva_word[0] && !dva->dva_word[1]) 1948 continue; 1949 1950 vdevid = DVA_GET_VDEV(dva); 1951 offset = DVA_GET_OFFSET(dva); 1952 vlist = &spa->spa_root_vdev->v_children; 1953 STAILQ_FOREACH(vdev, vlist, v_childlink) { 1954 if (vdev->v_id == vdevid) 1955 break; 1956 } 1957 if (!vdev || !vdev->v_read) 1958 continue; 1959 1960 size = BP_GET_PSIZE(bp); 1961 if (vdev->v_read == vdev_raidz_read) { 1962 align = 1ULL << vdev->v_ashift; 1963 if (P2PHASE(size, align) != 0) 1964 size = P2ROUNDUP(size, align); 1965 } 1966 if (size != BP_GET_PSIZE(bp) || cpfunc != ZIO_COMPRESS_OFF) 1967 pbuf = malloc(size); 1968 else 1969 pbuf = buf; 1970 1971 if (pbuf == NULL) { 1972 error = ENOMEM; 1973 break; 1974 } 1975 1976 if (DVA_GET_GANG(dva)) 1977 error = zio_read_gang(spa, bp, pbuf); 1978 else 1979 error = vdev->v_read(vdev, bp, pbuf, offset, size); 1980 if (error == 0) { 1981 if (cpfunc != ZIO_COMPRESS_OFF) 1982 error = zio_decompress_data(cpfunc, pbuf, 1983 BP_GET_PSIZE(bp), buf, BP_GET_LSIZE(bp)); 1984 else if (size != BP_GET_PSIZE(bp)) 1985 bcopy(pbuf, buf, BP_GET_PSIZE(bp)); 1986 } else { 1987 printf("zio_read error: %d\n", error); 1988 } 1989 if (buf != pbuf) 1990 free(pbuf); 1991 if (error == 0) 1992 break; 1993 } 1994 if (error != 0) 1995 printf("ZFS: i/o error - all block copies unavailable\n"); 1996 1997 return (error); 1998 } 1999 2000 static int 2001 dnode_read(const spa_t *spa, const dnode_phys_t *dnode, off_t offset, 2002 void *buf, size_t buflen) 2003 { 2004 int ibshift = dnode->dn_indblkshift - SPA_BLKPTRSHIFT; 2005 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2006 int nlevels = dnode->dn_nlevels; 2007 int i, rc; 2008 2009 if (bsize > SPA_MAXBLOCKSIZE) { 2010 printf("ZFS: I/O error - blocks larger than %llu are not " 2011 "supported\n", SPA_MAXBLOCKSIZE); 2012 return (EIO); 2013 } 2014 2015 /* 2016 * Note: bsize may not be a power of two here so we need to do an 2017 * actual divide rather than a bitshift. 2018 */ 2019 while (buflen > 0) { 2020 uint64_t bn = offset / bsize; 2021 int boff = offset % bsize; 2022 int ibn; 2023 const blkptr_t *indbp; 2024 blkptr_t bp; 2025 2026 if (bn > dnode->dn_maxblkid) 2027 return (EIO); 2028 2029 if (dnode == dnode_cache_obj && bn == dnode_cache_bn) 2030 goto cached; 2031 2032 indbp = dnode->dn_blkptr; 2033 for (i = 0; i < nlevels; i++) { 2034 /* 2035 * Copy the bp from the indirect array so that 2036 * we can re-use the scratch buffer for multi-level 2037 * objects. 2038 */ 2039 ibn = bn >> ((nlevels - i - 1) * ibshift); 2040 ibn &= ((1 << ibshift) - 1); 2041 bp = indbp[ibn]; 2042 if (BP_IS_HOLE(&bp)) { 2043 memset(dnode_cache_buf, 0, bsize); 2044 break; 2045 } 2046 rc = zio_read(spa, &bp, dnode_cache_buf); 2047 if (rc) 2048 return (rc); 2049 indbp = (const blkptr_t *) dnode_cache_buf; 2050 } 2051 dnode_cache_obj = dnode; 2052 dnode_cache_bn = bn; 2053 cached: 2054 2055 /* 2056 * The buffer contains our data block. Copy what we 2057 * need from it and loop. 2058 */ 2059 i = bsize - boff; 2060 if (i > buflen) i = buflen; 2061 memcpy(buf, &dnode_cache_buf[boff], i); 2062 buf = ((char *)buf) + i; 2063 offset += i; 2064 buflen -= i; 2065 } 2066 2067 return (0); 2068 } 2069 2070 /* 2071 * Lookup a value in a microzap directory. 2072 */ 2073 static int 2074 mzap_lookup(const mzap_phys_t *mz, size_t size, const char *name, 2075 uint64_t *value) 2076 { 2077 const mzap_ent_phys_t *mze; 2078 int chunks, i; 2079 2080 /* 2081 * Microzap objects use exactly one block. Read the whole 2082 * thing. 2083 */ 2084 chunks = size / MZAP_ENT_LEN - 1; 2085 for (i = 0; i < chunks; i++) { 2086 mze = &mz->mz_chunk[i]; 2087 if (strcmp(mze->mze_name, name) == 0) { 2088 *value = mze->mze_value; 2089 return (0); 2090 } 2091 } 2092 2093 return (ENOENT); 2094 } 2095 2096 /* 2097 * Compare a name with a zap leaf entry. Return non-zero if the name 2098 * matches. 2099 */ 2100 static int 2101 fzap_name_equal(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, 2102 const char *name) 2103 { 2104 size_t namelen; 2105 const zap_leaf_chunk_t *nc; 2106 const char *p; 2107 2108 namelen = zc->l_entry.le_name_numints; 2109 2110 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk); 2111 p = name; 2112 while (namelen > 0) { 2113 size_t len; 2114 2115 len = namelen; 2116 if (len > ZAP_LEAF_ARRAY_BYTES) 2117 len = ZAP_LEAF_ARRAY_BYTES; 2118 if (memcmp(p, nc->l_array.la_array, len)) 2119 return (0); 2120 p += len; 2121 namelen -= len; 2122 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next); 2123 } 2124 2125 return (1); 2126 } 2127 2128 /* 2129 * Extract a uint64_t value from a zap leaf entry. 2130 */ 2131 static uint64_t 2132 fzap_leaf_value(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc) 2133 { 2134 const zap_leaf_chunk_t *vc; 2135 int i; 2136 uint64_t value; 2137 const uint8_t *p; 2138 2139 vc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_value_chunk); 2140 for (i = 0, value = 0, p = vc->l_array.la_array; i < 8; i++) { 2141 value = (value << 8) | p[i]; 2142 } 2143 2144 return (value); 2145 } 2146 2147 static void 2148 stv(int len, void *addr, uint64_t value) 2149 { 2150 switch (len) { 2151 case 1: 2152 *(uint8_t *)addr = value; 2153 return; 2154 case 2: 2155 *(uint16_t *)addr = value; 2156 return; 2157 case 4: 2158 *(uint32_t *)addr = value; 2159 return; 2160 case 8: 2161 *(uint64_t *)addr = value; 2162 return; 2163 } 2164 } 2165 2166 /* 2167 * Extract a array from a zap leaf entry. 2168 */ 2169 static void 2170 fzap_leaf_array(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, 2171 uint64_t integer_size, uint64_t num_integers, void *buf) 2172 { 2173 uint64_t array_int_len = zc->l_entry.le_value_intlen; 2174 uint64_t value = 0; 2175 uint64_t *u64 = buf; 2176 char *p = buf; 2177 int len = MIN(zc->l_entry.le_value_numints, num_integers); 2178 int chunk = zc->l_entry.le_value_chunk; 2179 int byten = 0; 2180 2181 if (integer_size == 8 && len == 1) { 2182 *u64 = fzap_leaf_value(zl, zc); 2183 return; 2184 } 2185 2186 while (len > 0) { 2187 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(zl, chunk).l_array; 2188 int i; 2189 2190 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(zl)); 2191 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) { 2192 value = (value << 8) | la->la_array[i]; 2193 byten++; 2194 if (byten == array_int_len) { 2195 stv(integer_size, p, value); 2196 byten = 0; 2197 len--; 2198 if (len == 0) 2199 return; 2200 p += integer_size; 2201 } 2202 } 2203 chunk = la->la_next; 2204 } 2205 } 2206 2207 static int 2208 fzap_check_size(uint64_t integer_size, uint64_t num_integers) 2209 { 2210 2211 switch (integer_size) { 2212 case 1: 2213 case 2: 2214 case 4: 2215 case 8: 2216 break; 2217 default: 2218 return (EINVAL); 2219 } 2220 2221 if (integer_size * num_integers > ZAP_MAXVALUELEN) 2222 return (E2BIG); 2223 2224 return (0); 2225 } 2226 2227 static void 2228 zap_leaf_free(zap_leaf_t *leaf) 2229 { 2230 free(leaf->l_phys); 2231 free(leaf); 2232 } 2233 2234 static int 2235 zap_get_leaf_byblk(fat_zap_t *zap, uint64_t blk, zap_leaf_t **lp) 2236 { 2237 int bs = FZAP_BLOCK_SHIFT(zap); 2238 int err; 2239 2240 *lp = malloc(sizeof(**lp)); 2241 if (*lp == NULL) 2242 return (ENOMEM); 2243 2244 (*lp)->l_bs = bs; 2245 (*lp)->l_phys = malloc(1 << bs); 2246 2247 if ((*lp)->l_phys == NULL) { 2248 free(*lp); 2249 return (ENOMEM); 2250 } 2251 err = dnode_read(zap->zap_spa, zap->zap_dnode, blk << bs, (*lp)->l_phys, 2252 1 << bs); 2253 if (err != 0) { 2254 zap_leaf_free(*lp); 2255 } 2256 return (err); 2257 } 2258 2259 static int 2260 zap_table_load(fat_zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, 2261 uint64_t *valp) 2262 { 2263 int bs = FZAP_BLOCK_SHIFT(zap); 2264 uint64_t blk = idx >> (bs - 3); 2265 uint64_t off = idx & ((1 << (bs - 3)) - 1); 2266 uint64_t *buf; 2267 int rc; 2268 2269 buf = malloc(1 << zap->zap_block_shift); 2270 if (buf == NULL) 2271 return (ENOMEM); 2272 rc = dnode_read(zap->zap_spa, zap->zap_dnode, (tbl->zt_blk + blk) << bs, 2273 buf, 1 << zap->zap_block_shift); 2274 if (rc == 0) 2275 *valp = buf[off]; 2276 free(buf); 2277 return (rc); 2278 } 2279 2280 static int 2281 zap_idx_to_blk(fat_zap_t *zap, uint64_t idx, uint64_t *valp) 2282 { 2283 if (zap->zap_phys->zap_ptrtbl.zt_numblks == 0) { 2284 *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx); 2285 return (0); 2286 } else { 2287 return (zap_table_load(zap, &zap->zap_phys->zap_ptrtbl, 2288 idx, valp)); 2289 } 2290 } 2291 2292 #define ZAP_HASH_IDX(hash, n) (((n) == 0) ? 0 : ((hash) >> (64 - (n)))) 2293 static int 2294 zap_deref_leaf(fat_zap_t *zap, uint64_t h, zap_leaf_t **lp) 2295 { 2296 uint64_t idx, blk; 2297 int err; 2298 2299 idx = ZAP_HASH_IDX(h, zap->zap_phys->zap_ptrtbl.zt_shift); 2300 err = zap_idx_to_blk(zap, idx, &blk); 2301 if (err != 0) 2302 return (err); 2303 return (zap_get_leaf_byblk(zap, blk, lp)); 2304 } 2305 2306 #define CHAIN_END 0xffff /* end of the chunk chain */ 2307 #define LEAF_HASH(l, h) \ 2308 ((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \ 2309 ((h) >> \ 2310 (64 - ZAP_LEAF_HASH_SHIFT(l) - (l)->l_phys->l_hdr.lh_prefix_len))) 2311 #define LEAF_HASH_ENTPTR(l, h) (&(l)->l_phys->l_hash[LEAF_HASH(l, h)]) 2312 2313 static int 2314 zap_leaf_lookup(zap_leaf_t *zl, uint64_t hash, const char *name, 2315 uint64_t integer_size, uint64_t num_integers, void *value) 2316 { 2317 int rc; 2318 uint16_t *chunkp; 2319 struct zap_leaf_entry *le; 2320 2321 /* 2322 * Make sure this chunk matches our hash. 2323 */ 2324 if (zl->l_phys->l_hdr.lh_prefix_len > 0 && 2325 zl->l_phys->l_hdr.lh_prefix != 2326 hash >> (64 - zl->l_phys->l_hdr.lh_prefix_len)) 2327 return (EIO); 2328 2329 rc = ENOENT; 2330 for (chunkp = LEAF_HASH_ENTPTR(zl, hash); 2331 *chunkp != CHAIN_END; chunkp = &le->le_next) { 2332 zap_leaf_chunk_t *zc; 2333 uint16_t chunk = *chunkp; 2334 2335 le = ZAP_LEAF_ENTRY(zl, chunk); 2336 if (le->le_hash != hash) 2337 continue; 2338 zc = &ZAP_LEAF_CHUNK(zl, chunk); 2339 if (fzap_name_equal(zl, zc, name)) { 2340 if (zc->l_entry.le_value_intlen > integer_size) { 2341 rc = EINVAL; 2342 } else { 2343 fzap_leaf_array(zl, zc, integer_size, 2344 num_integers, value); 2345 rc = 0; 2346 } 2347 break; 2348 } 2349 } 2350 return (rc); 2351 } 2352 2353 /* 2354 * Lookup a value in a fatzap directory. 2355 */ 2356 static int 2357 fzap_lookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, 2358 const char *name, uint64_t integer_size, uint64_t num_integers, 2359 void *value) 2360 { 2361 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2362 fat_zap_t z; 2363 zap_leaf_t *zl; 2364 uint64_t hash; 2365 int rc; 2366 2367 if (zh->zap_magic != ZAP_MAGIC) 2368 return (EIO); 2369 2370 if ((rc = fzap_check_size(integer_size, num_integers)) != 0) 2371 return (rc); 2372 2373 z.zap_block_shift = ilog2(bsize); 2374 z.zap_phys = zh; 2375 z.zap_spa = spa; 2376 z.zap_dnode = dnode; 2377 2378 hash = zap_hash(zh->zap_salt, name); 2379 rc = zap_deref_leaf(&z, hash, &zl); 2380 if (rc != 0) 2381 return (rc); 2382 2383 rc = zap_leaf_lookup(zl, hash, name, integer_size, num_integers, value); 2384 2385 zap_leaf_free(zl); 2386 return (rc); 2387 } 2388 2389 /* 2390 * Lookup a name in a zap object and return its value as a uint64_t. 2391 */ 2392 static int 2393 zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name, 2394 uint64_t integer_size, uint64_t num_integers, void *value) 2395 { 2396 int rc; 2397 zap_phys_t *zap; 2398 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2399 2400 zap = malloc(size); 2401 if (zap == NULL) 2402 return (ENOMEM); 2403 2404 rc = dnode_read(spa, dnode, 0, zap, size); 2405 if (rc) 2406 goto done; 2407 2408 switch (zap->zap_block_type) { 2409 case ZBT_MICRO: 2410 rc = mzap_lookup((const mzap_phys_t *)zap, size, name, value); 2411 break; 2412 case ZBT_HEADER: 2413 rc = fzap_lookup(spa, dnode, zap, name, integer_size, 2414 num_integers, value); 2415 break; 2416 default: 2417 printf("ZFS: invalid zap_type=%" PRIx64 "\n", 2418 zap->zap_block_type); 2419 rc = EIO; 2420 } 2421 done: 2422 free(zap); 2423 return (rc); 2424 } 2425 2426 /* 2427 * List a microzap directory. 2428 */ 2429 static int 2430 mzap_list(const mzap_phys_t *mz, size_t size, 2431 int (*callback)(const char *, uint64_t)) 2432 { 2433 const mzap_ent_phys_t *mze; 2434 int chunks, i, rc; 2435 2436 /* 2437 * Microzap objects use exactly one block. Read the whole 2438 * thing. 2439 */ 2440 rc = 0; 2441 chunks = size / MZAP_ENT_LEN - 1; 2442 for (i = 0; i < chunks; i++) { 2443 mze = &mz->mz_chunk[i]; 2444 if (mze->mze_name[0]) { 2445 rc = callback(mze->mze_name, mze->mze_value); 2446 if (rc != 0) 2447 break; 2448 } 2449 } 2450 2451 return (rc); 2452 } 2453 2454 /* 2455 * List a fatzap directory. 2456 */ 2457 static int 2458 fzap_list(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, 2459 int (*callback)(const char *, uint64_t)) 2460 { 2461 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2462 fat_zap_t z; 2463 uint64_t i; 2464 int j, rc; 2465 2466 if (zh->zap_magic != ZAP_MAGIC) 2467 return (EIO); 2468 2469 z.zap_block_shift = ilog2(bsize); 2470 z.zap_phys = zh; 2471 2472 /* 2473 * This assumes that the leaf blocks start at block 1. The 2474 * documentation isn't exactly clear on this. 2475 */ 2476 zap_leaf_t zl; 2477 zl.l_bs = z.zap_block_shift; 2478 zl.l_phys = malloc(bsize); 2479 if (zl.l_phys == NULL) 2480 return (ENOMEM); 2481 2482 for (i = 0; i < zh->zap_num_leafs; i++) { 2483 off_t off = ((off_t)(i + 1)) << zl.l_bs; 2484 char name[256], *p; 2485 uint64_t value; 2486 2487 if (dnode_read(spa, dnode, off, zl.l_phys, bsize)) { 2488 free(zl.l_phys); 2489 return (EIO); 2490 } 2491 2492 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) { 2493 zap_leaf_chunk_t *zc, *nc; 2494 int namelen; 2495 2496 zc = &ZAP_LEAF_CHUNK(&zl, j); 2497 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) 2498 continue; 2499 namelen = zc->l_entry.le_name_numints; 2500 if (namelen > sizeof(name)) 2501 namelen = sizeof(name); 2502 2503 /* 2504 * Paste the name back together. 2505 */ 2506 nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk); 2507 p = name; 2508 while (namelen > 0) { 2509 int len; 2510 len = namelen; 2511 if (len > ZAP_LEAF_ARRAY_BYTES) 2512 len = ZAP_LEAF_ARRAY_BYTES; 2513 memcpy(p, nc->l_array.la_array, len); 2514 p += len; 2515 namelen -= len; 2516 nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next); 2517 } 2518 2519 /* 2520 * Assume the first eight bytes of the value are 2521 * a uint64_t. 2522 */ 2523 value = fzap_leaf_value(&zl, zc); 2524 2525 /* printf("%s 0x%jx\n", name, (uintmax_t)value); */ 2526 rc = callback((const char *)name, value); 2527 if (rc != 0) { 2528 free(zl.l_phys); 2529 return (rc); 2530 } 2531 } 2532 } 2533 2534 free(zl.l_phys); 2535 return (0); 2536 } 2537 2538 static int zfs_printf(const char *name, uint64_t value __unused) 2539 { 2540 2541 printf("%s\n", name); 2542 2543 return (0); 2544 } 2545 2546 /* 2547 * List a zap directory. 2548 */ 2549 static int 2550 zap_list(const spa_t *spa, const dnode_phys_t *dnode) 2551 { 2552 zap_phys_t *zap; 2553 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2554 int rc; 2555 2556 zap = malloc(size); 2557 if (zap == NULL) 2558 return (ENOMEM); 2559 2560 rc = dnode_read(spa, dnode, 0, zap, size); 2561 if (rc == 0) { 2562 if (zap->zap_block_type == ZBT_MICRO) 2563 rc = mzap_list((const mzap_phys_t *)zap, size, 2564 zfs_printf); 2565 else 2566 rc = fzap_list(spa, dnode, zap, zfs_printf); 2567 } 2568 free(zap); 2569 return (rc); 2570 } 2571 2572 static int 2573 objset_get_dnode(const spa_t *spa, const objset_phys_t *os, uint64_t objnum, 2574 dnode_phys_t *dnode) 2575 { 2576 off_t offset; 2577 2578 offset = objnum * sizeof(dnode_phys_t); 2579 return dnode_read(spa, &os->os_meta_dnode, offset, 2580 dnode, sizeof(dnode_phys_t)); 2581 } 2582 2583 /* 2584 * Lookup a name in a microzap directory. 2585 */ 2586 static int 2587 mzap_rlookup(const mzap_phys_t *mz, size_t size, char *name, uint64_t value) 2588 { 2589 const mzap_ent_phys_t *mze; 2590 int chunks, i; 2591 2592 /* 2593 * Microzap objects use exactly one block. Read the whole 2594 * thing. 2595 */ 2596 chunks = size / MZAP_ENT_LEN - 1; 2597 for (i = 0; i < chunks; i++) { 2598 mze = &mz->mz_chunk[i]; 2599 if (value == mze->mze_value) { 2600 strcpy(name, mze->mze_name); 2601 return (0); 2602 } 2603 } 2604 2605 return (ENOENT); 2606 } 2607 2608 static void 2609 fzap_name_copy(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, char *name) 2610 { 2611 size_t namelen; 2612 const zap_leaf_chunk_t *nc; 2613 char *p; 2614 2615 namelen = zc->l_entry.le_name_numints; 2616 2617 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk); 2618 p = name; 2619 while (namelen > 0) { 2620 size_t len; 2621 len = namelen; 2622 if (len > ZAP_LEAF_ARRAY_BYTES) 2623 len = ZAP_LEAF_ARRAY_BYTES; 2624 memcpy(p, nc->l_array.la_array, len); 2625 p += len; 2626 namelen -= len; 2627 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next); 2628 } 2629 2630 *p = '\0'; 2631 } 2632 2633 static int 2634 fzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, 2635 char *name, uint64_t value) 2636 { 2637 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2638 fat_zap_t z; 2639 uint64_t i; 2640 int j, rc; 2641 2642 if (zh->zap_magic != ZAP_MAGIC) 2643 return (EIO); 2644 2645 z.zap_block_shift = ilog2(bsize); 2646 z.zap_phys = zh; 2647 2648 /* 2649 * This assumes that the leaf blocks start at block 1. The 2650 * documentation isn't exactly clear on this. 2651 */ 2652 zap_leaf_t zl; 2653 zl.l_bs = z.zap_block_shift; 2654 zl.l_phys = malloc(bsize); 2655 if (zl.l_phys == NULL) 2656 return (ENOMEM); 2657 2658 for (i = 0; i < zh->zap_num_leafs; i++) { 2659 off_t off = ((off_t)(i + 1)) << zl.l_bs; 2660 2661 rc = dnode_read(spa, dnode, off, zl.l_phys, bsize); 2662 if (rc != 0) 2663 goto done; 2664 2665 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) { 2666 zap_leaf_chunk_t *zc; 2667 2668 zc = &ZAP_LEAF_CHUNK(&zl, j); 2669 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) 2670 continue; 2671 if (zc->l_entry.le_value_intlen != 8 || 2672 zc->l_entry.le_value_numints != 1) 2673 continue; 2674 2675 if (fzap_leaf_value(&zl, zc) == value) { 2676 fzap_name_copy(&zl, zc, name); 2677 goto done; 2678 } 2679 } 2680 } 2681 2682 rc = ENOENT; 2683 done: 2684 free(zl.l_phys); 2685 return (rc); 2686 } 2687 2688 static int 2689 zap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name, 2690 uint64_t value) 2691 { 2692 zap_phys_t *zap; 2693 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2694 int rc; 2695 2696 zap = malloc(size); 2697 if (zap == NULL) 2698 return (ENOMEM); 2699 2700 rc = dnode_read(spa, dnode, 0, zap, size); 2701 if (rc == 0) { 2702 if (zap->zap_block_type == ZBT_MICRO) 2703 rc = mzap_rlookup((const mzap_phys_t *)zap, size, 2704 name, value); 2705 else 2706 rc = fzap_rlookup(spa, dnode, zap, name, value); 2707 } 2708 free(zap); 2709 return (rc); 2710 } 2711 2712 static int 2713 zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result) 2714 { 2715 char name[256]; 2716 char component[256]; 2717 uint64_t dir_obj, parent_obj, child_dir_zapobj; 2718 dnode_phys_t child_dir_zap, dataset, dir, parent; 2719 dsl_dir_phys_t *dd; 2720 dsl_dataset_phys_t *ds; 2721 char *p; 2722 int len; 2723 2724 p = &name[sizeof(name) - 1]; 2725 *p = '\0'; 2726 2727 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) { 2728 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 2729 return (EIO); 2730 } 2731 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 2732 dir_obj = ds->ds_dir_obj; 2733 2734 for (;;) { 2735 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir) != 0) 2736 return (EIO); 2737 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 2738 2739 /* Actual loop condition. */ 2740 parent_obj = dd->dd_parent_obj; 2741 if (parent_obj == 0) 2742 break; 2743 2744 if (objset_get_dnode(spa, &spa->spa_mos, parent_obj, 2745 &parent) != 0) 2746 return (EIO); 2747 dd = (dsl_dir_phys_t *)&parent.dn_bonus; 2748 child_dir_zapobj = dd->dd_child_dir_zapobj; 2749 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, 2750 &child_dir_zap) != 0) 2751 return (EIO); 2752 if (zap_rlookup(spa, &child_dir_zap, component, dir_obj) != 0) 2753 return (EIO); 2754 2755 len = strlen(component); 2756 p -= len; 2757 memcpy(p, component, len); 2758 --p; 2759 *p = '/'; 2760 2761 /* Actual loop iteration. */ 2762 dir_obj = parent_obj; 2763 } 2764 2765 if (*p != '\0') 2766 ++p; 2767 strcpy(result, p); 2768 2769 return (0); 2770 } 2771 2772 static int 2773 zfs_lookup_dataset(const spa_t *spa, const char *name, uint64_t *objnum) 2774 { 2775 char element[256]; 2776 uint64_t dir_obj, child_dir_zapobj; 2777 dnode_phys_t child_dir_zap, dir; 2778 dsl_dir_phys_t *dd; 2779 const char *p, *q; 2780 2781 if (objset_get_dnode(spa, &spa->spa_mos, 2782 DMU_POOL_DIRECTORY_OBJECT, &dir)) 2783 return (EIO); 2784 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof (dir_obj), 2785 1, &dir_obj)) 2786 return (EIO); 2787 2788 p = name; 2789 for (;;) { 2790 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir)) 2791 return (EIO); 2792 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 2793 2794 while (*p == '/') 2795 p++; 2796 /* Actual loop condition #1. */ 2797 if (*p == '\0') 2798 break; 2799 2800 q = strchr(p, '/'); 2801 if (q) { 2802 memcpy(element, p, q - p); 2803 element[q - p] = '\0'; 2804 p = q + 1; 2805 } else { 2806 strcpy(element, p); 2807 p += strlen(p); 2808 } 2809 2810 child_dir_zapobj = dd->dd_child_dir_zapobj; 2811 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, 2812 &child_dir_zap) != 0) 2813 return (EIO); 2814 2815 /* Actual loop condition #2. */ 2816 if (zap_lookup(spa, &child_dir_zap, element, sizeof (dir_obj), 2817 1, &dir_obj) != 0) 2818 return (ENOENT); 2819 } 2820 2821 *objnum = dd->dd_head_dataset_obj; 2822 return (0); 2823 } 2824 2825 #ifndef BOOT2 2826 static int 2827 zfs_list_dataset(const spa_t *spa, uint64_t objnum/*, int pos, char *entry*/) 2828 { 2829 uint64_t dir_obj, child_dir_zapobj; 2830 dnode_phys_t child_dir_zap, dir, dataset; 2831 dsl_dataset_phys_t *ds; 2832 dsl_dir_phys_t *dd; 2833 2834 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) { 2835 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 2836 return (EIO); 2837 } 2838 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 2839 dir_obj = ds->ds_dir_obj; 2840 2841 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir)) { 2842 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj); 2843 return (EIO); 2844 } 2845 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 2846 2847 child_dir_zapobj = dd->dd_child_dir_zapobj; 2848 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, 2849 &child_dir_zap) != 0) { 2850 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj); 2851 return (EIO); 2852 } 2853 2854 return (zap_list(spa, &child_dir_zap) != 0); 2855 } 2856 2857 int 2858 zfs_callback_dataset(const spa_t *spa, uint64_t objnum, 2859 int (*callback)(const char *, uint64_t)) 2860 { 2861 uint64_t dir_obj, child_dir_zapobj; 2862 dnode_phys_t child_dir_zap, dir, dataset; 2863 dsl_dataset_phys_t *ds; 2864 dsl_dir_phys_t *dd; 2865 zap_phys_t *zap; 2866 size_t size; 2867 int err; 2868 2869 err = objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset); 2870 if (err != 0) { 2871 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 2872 return (err); 2873 } 2874 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 2875 dir_obj = ds->ds_dir_obj; 2876 2877 err = objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir); 2878 if (err != 0) { 2879 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj); 2880 return (err); 2881 } 2882 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 2883 2884 child_dir_zapobj = dd->dd_child_dir_zapobj; 2885 err = objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, 2886 &child_dir_zap); 2887 if (err != 0) { 2888 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj); 2889 return (err); 2890 } 2891 2892 size = child_dir_zap.dn_datablkszsec << SPA_MINBLOCKSHIFT; 2893 zap = malloc(size); 2894 if (zap != NULL) { 2895 err = dnode_read(spa, &child_dir_zap, 0, zap, size); 2896 if (err != 0) 2897 goto done; 2898 2899 if (zap->zap_block_type == ZBT_MICRO) 2900 err = mzap_list((const mzap_phys_t *)zap, size, 2901 callback); 2902 else 2903 err = fzap_list(spa, &child_dir_zap, zap, callback); 2904 } else { 2905 err = ENOMEM; 2906 } 2907 done: 2908 free(zap); 2909 return (err); 2910 } 2911 #endif 2912 2913 /* 2914 * Find the object set given the object number of its dataset object 2915 * and return its details in *objset 2916 */ 2917 static int 2918 zfs_mount_dataset(const spa_t *spa, uint64_t objnum, objset_phys_t *objset) 2919 { 2920 dnode_phys_t dataset; 2921 dsl_dataset_phys_t *ds; 2922 2923 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) { 2924 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 2925 return (EIO); 2926 } 2927 2928 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 2929 if (zio_read(spa, &ds->ds_bp, objset)) { 2930 printf("ZFS: can't read object set for dataset %ju\n", 2931 (uintmax_t)objnum); 2932 return (EIO); 2933 } 2934 2935 return (0); 2936 } 2937 2938 /* 2939 * Find the object set pointed to by the BOOTFS property or the root 2940 * dataset if there is none and return its details in *objset 2941 */ 2942 static int 2943 zfs_get_root(const spa_t *spa, uint64_t *objid) 2944 { 2945 dnode_phys_t dir, propdir; 2946 uint64_t props, bootfs, root; 2947 2948 *objid = 0; 2949 2950 /* 2951 * Start with the MOS directory object. 2952 */ 2953 if (objset_get_dnode(spa, &spa->spa_mos, 2954 DMU_POOL_DIRECTORY_OBJECT, &dir)) { 2955 printf("ZFS: can't read MOS object directory\n"); 2956 return (EIO); 2957 } 2958 2959 /* 2960 * Lookup the pool_props and see if we can find a bootfs. 2961 */ 2962 if (zap_lookup(spa, &dir, DMU_POOL_PROPS, 2963 sizeof(props), 1, &props) == 0 && 2964 objset_get_dnode(spa, &spa->spa_mos, props, &propdir) == 0 && 2965 zap_lookup(spa, &propdir, "bootfs", 2966 sizeof(bootfs), 1, &bootfs) == 0 && bootfs != 0) { 2967 *objid = bootfs; 2968 return (0); 2969 } 2970 /* 2971 * Lookup the root dataset directory 2972 */ 2973 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, 2974 sizeof(root), 1, &root) || 2975 objset_get_dnode(spa, &spa->spa_mos, root, &dir)) { 2976 printf("ZFS: can't find root dsl_dir\n"); 2977 return (EIO); 2978 } 2979 2980 /* 2981 * Use the information from the dataset directory's bonus buffer 2982 * to find the dataset object and from that the object set itself. 2983 */ 2984 dsl_dir_phys_t *dd = (dsl_dir_phys_t *)&dir.dn_bonus; 2985 *objid = dd->dd_head_dataset_obj; 2986 return (0); 2987 } 2988 2989 static int 2990 zfs_mount(const spa_t *spa, uint64_t rootobj, struct zfsmount *mount) 2991 { 2992 2993 mount->spa = spa; 2994 2995 /* 2996 * Find the root object set if not explicitly provided 2997 */ 2998 if (rootobj == 0 && zfs_get_root(spa, &rootobj)) { 2999 printf("ZFS: can't find root filesystem\n"); 3000 return (EIO); 3001 } 3002 3003 if (zfs_mount_dataset(spa, rootobj, &mount->objset)) { 3004 printf("ZFS: can't open root filesystem\n"); 3005 return (EIO); 3006 } 3007 3008 mount->rootobj = rootobj; 3009 3010 return (0); 3011 } 3012 3013 /* 3014 * callback function for feature name checks. 3015 */ 3016 static int 3017 check_feature(const char *name, uint64_t value) 3018 { 3019 int i; 3020 3021 if (value == 0) 3022 return (0); 3023 if (name[0] == '\0') 3024 return (0); 3025 3026 for (i = 0; features_for_read[i] != NULL; i++) { 3027 if (strcmp(name, features_for_read[i]) == 0) 3028 return (0); 3029 } 3030 printf("ZFS: unsupported feature: %s\n", name); 3031 return (EIO); 3032 } 3033 3034 /* 3035 * Checks whether the MOS features that are active are supported. 3036 */ 3037 static int 3038 check_mos_features(const spa_t *spa) 3039 { 3040 dnode_phys_t dir; 3041 zap_phys_t *zap; 3042 uint64_t objnum; 3043 size_t size; 3044 int rc; 3045 3046 if ((rc = objset_get_dnode(spa, &spa->spa_mos, DMU_OT_OBJECT_DIRECTORY, 3047 &dir)) != 0) 3048 return (rc); 3049 if ((rc = zap_lookup(spa, &dir, DMU_POOL_FEATURES_FOR_READ, 3050 sizeof (objnum), 1, &objnum)) != 0) { 3051 /* 3052 * It is older pool without features. As we have already 3053 * tested the label, just return without raising the error. 3054 */ 3055 return (0); 3056 } 3057 3058 if ((rc = objset_get_dnode(spa, &spa->spa_mos, objnum, &dir)) != 0) 3059 return (rc); 3060 3061 if (dir.dn_type != DMU_OTN_ZAP_METADATA) 3062 return (EIO); 3063 3064 size = dir.dn_datablkszsec << SPA_MINBLOCKSHIFT; 3065 zap = malloc(size); 3066 if (zap == NULL) 3067 return (ENOMEM); 3068 3069 if (dnode_read(spa, &dir, 0, zap, size)) { 3070 free(zap); 3071 return (EIO); 3072 } 3073 3074 if (zap->zap_block_type == ZBT_MICRO) 3075 rc = mzap_list((const mzap_phys_t *)zap, size, check_feature); 3076 else 3077 rc = fzap_list(spa, &dir, zap, check_feature); 3078 3079 free(zap); 3080 return (rc); 3081 } 3082 3083 static int 3084 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 3085 { 3086 dnode_phys_t dir; 3087 size_t size; 3088 int rc; 3089 unsigned char *nv; 3090 3091 *value = NULL; 3092 if ((rc = objset_get_dnode(spa, &spa->spa_mos, obj, &dir)) != 0) 3093 return (rc); 3094 if (dir.dn_type != DMU_OT_PACKED_NVLIST && 3095 dir.dn_bonustype != DMU_OT_PACKED_NVLIST_SIZE) { 3096 return (EIO); 3097 } 3098 3099 if (dir.dn_bonuslen != sizeof (uint64_t)) 3100 return (EIO); 3101 3102 size = *(uint64_t *)DN_BONUS(&dir); 3103 nv = malloc(size); 3104 if (nv == NULL) 3105 return (ENOMEM); 3106 3107 rc = dnode_read(spa, &dir, 0, nv, size); 3108 if (rc != 0) { 3109 free(nv); 3110 nv = NULL; 3111 return (rc); 3112 } 3113 *value = nvlist_import(nv + 4, nv[0], nv[1]); 3114 free(nv); 3115 return (rc); 3116 } 3117 3118 static int 3119 zfs_spa_init(spa_t *spa) 3120 { 3121 dnode_phys_t dir; 3122 uint64_t config_object; 3123 nvlist_t *nvlist; 3124 int rc; 3125 3126 if (zio_read(spa, &spa->spa_uberblock.ub_rootbp, &spa->spa_mos)) { 3127 printf("ZFS: can't read MOS of pool %s\n", spa->spa_name); 3128 return (EIO); 3129 } 3130 if (spa->spa_mos.os_type != DMU_OST_META) { 3131 printf("ZFS: corrupted MOS of pool %s\n", spa->spa_name); 3132 return (EIO); 3133 } 3134 3135 if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT, 3136 &dir)) { 3137 printf("ZFS: failed to read pool %s directory object\n", 3138 spa->spa_name); 3139 return (EIO); 3140 } 3141 /* this is allowed to fail, older pools do not have salt */ 3142 rc = zap_lookup(spa, &dir, DMU_POOL_CHECKSUM_SALT, 1, 3143 sizeof (spa->spa_cksum_salt.zcs_bytes), 3144 spa->spa_cksum_salt.zcs_bytes); 3145 3146 rc = check_mos_features(spa); 3147 if (rc != 0) { 3148 printf("ZFS: pool %s is not supported\n", spa->spa_name); 3149 return (rc); 3150 } 3151 3152 rc = zap_lookup(spa, &dir, DMU_POOL_CONFIG, 3153 sizeof (config_object), 1, &config_object); 3154 if (rc != 0) { 3155 printf("ZFS: can not read MOS %s\n", DMU_POOL_CONFIG); 3156 return (EIO); 3157 } 3158 rc = load_nvlist(spa, config_object, &nvlist); 3159 if (rc != 0) 3160 return (rc); 3161 /* 3162 * Update vdevs from MOS config. Note, we do skip encoding bytes 3163 * here. See also vdev_label_read_config(). 3164 */ 3165 rc = vdev_init_from_nvlist(spa, nvlist); 3166 nvlist_destroy(nvlist); 3167 return (rc); 3168 } 3169 3170 static int 3171 zfs_dnode_stat(const spa_t *spa, dnode_phys_t *dn, struct stat *sb) 3172 { 3173 3174 if (dn->dn_bonustype != DMU_OT_SA) { 3175 znode_phys_t *zp = (znode_phys_t *)dn->dn_bonus; 3176 3177 sb->st_mode = zp->zp_mode; 3178 sb->st_uid = zp->zp_uid; 3179 sb->st_gid = zp->zp_gid; 3180 sb->st_size = zp->zp_size; 3181 } else { 3182 sa_hdr_phys_t *sahdrp; 3183 int hdrsize; 3184 size_t size = 0; 3185 void *buf = NULL; 3186 3187 if (dn->dn_bonuslen != 0) 3188 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn); 3189 else { 3190 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0) { 3191 blkptr_t *bp = DN_SPILL_BLKPTR(dn); 3192 int error; 3193 3194 size = BP_GET_LSIZE(bp); 3195 buf = malloc(size); 3196 if (buf == NULL) 3197 error = ENOMEM; 3198 else 3199 error = zio_read(spa, bp, buf); 3200 3201 if (error != 0) { 3202 free(buf); 3203 return (error); 3204 } 3205 sahdrp = buf; 3206 } else { 3207 return (EIO); 3208 } 3209 } 3210 hdrsize = SA_HDR_SIZE(sahdrp); 3211 sb->st_mode = *(uint64_t *)((char *)sahdrp + hdrsize + 3212 SA_MODE_OFFSET); 3213 sb->st_uid = *(uint64_t *)((char *)sahdrp + hdrsize + 3214 SA_UID_OFFSET); 3215 sb->st_gid = *(uint64_t *)((char *)sahdrp + hdrsize + 3216 SA_GID_OFFSET); 3217 sb->st_size = *(uint64_t *)((char *)sahdrp + hdrsize + 3218 SA_SIZE_OFFSET); 3219 free(buf); 3220 } 3221 3222 return (0); 3223 } 3224 3225 static int 3226 zfs_dnode_readlink(const spa_t *spa, dnode_phys_t *dn, char *path, size_t psize) 3227 { 3228 int rc = 0; 3229 3230 if (dn->dn_bonustype == DMU_OT_SA) { 3231 sa_hdr_phys_t *sahdrp = NULL; 3232 size_t size = 0; 3233 void *buf = NULL; 3234 int hdrsize; 3235 char *p; 3236 3237 if (dn->dn_bonuslen != 0) { 3238 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn); 3239 } else { 3240 blkptr_t *bp; 3241 3242 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) == 0) 3243 return (EIO); 3244 bp = DN_SPILL_BLKPTR(dn); 3245 3246 size = BP_GET_LSIZE(bp); 3247 buf = malloc(size); 3248 if (buf == NULL) 3249 rc = ENOMEM; 3250 else 3251 rc = zio_read(spa, bp, buf); 3252 if (rc != 0) { 3253 free(buf); 3254 return (rc); 3255 } 3256 sahdrp = buf; 3257 } 3258 hdrsize = SA_HDR_SIZE(sahdrp); 3259 p = (char *)((uintptr_t)sahdrp + hdrsize + SA_SYMLINK_OFFSET); 3260 memcpy(path, p, psize); 3261 free(buf); 3262 return (0); 3263 } 3264 /* 3265 * Second test is purely to silence bogus compiler 3266 * warning about accessing past the end of dn_bonus. 3267 */ 3268 if (psize + sizeof(znode_phys_t) <= dn->dn_bonuslen && 3269 sizeof(znode_phys_t) <= sizeof(dn->dn_bonus)) { 3270 memcpy(path, &dn->dn_bonus[sizeof(znode_phys_t)], psize); 3271 } else { 3272 rc = dnode_read(spa, dn, 0, path, psize); 3273 } 3274 return (rc); 3275 } 3276 3277 struct obj_list { 3278 uint64_t objnum; 3279 STAILQ_ENTRY(obj_list) entry; 3280 }; 3281 3282 /* 3283 * Lookup a file and return its dnode. 3284 */ 3285 static int 3286 zfs_lookup(const struct zfsmount *mount, const char *upath, dnode_phys_t *dnode) 3287 { 3288 int rc; 3289 uint64_t objnum; 3290 const spa_t *spa; 3291 dnode_phys_t dn; 3292 const char *p, *q; 3293 char element[256]; 3294 char path[1024]; 3295 int symlinks_followed = 0; 3296 struct stat sb; 3297 struct obj_list *entry, *tentry; 3298 STAILQ_HEAD(, obj_list) on_cache = STAILQ_HEAD_INITIALIZER(on_cache); 3299 3300 spa = mount->spa; 3301 if (mount->objset.os_type != DMU_OST_ZFS) { 3302 printf("ZFS: unexpected object set type %ju\n", 3303 (uintmax_t)mount->objset.os_type); 3304 return (EIO); 3305 } 3306 3307 if ((entry = malloc(sizeof(struct obj_list))) == NULL) 3308 return (ENOMEM); 3309 3310 /* 3311 * Get the root directory dnode. 3312 */ 3313 rc = objset_get_dnode(spa, &mount->objset, MASTER_NODE_OBJ, &dn); 3314 if (rc) { 3315 free(entry); 3316 return (rc); 3317 } 3318 3319 rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, sizeof(objnum), 1, &objnum); 3320 if (rc) { 3321 free(entry); 3322 return (rc); 3323 } 3324 entry->objnum = objnum; 3325 STAILQ_INSERT_HEAD(&on_cache, entry, entry); 3326 3327 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3328 if (rc != 0) 3329 goto done; 3330 3331 p = upath; 3332 while (p && *p) { 3333 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3334 if (rc != 0) 3335 goto done; 3336 3337 while (*p == '/') 3338 p++; 3339 if (*p == '\0') 3340 break; 3341 q = p; 3342 while (*q != '\0' && *q != '/') 3343 q++; 3344 3345 /* skip dot */ 3346 if (p + 1 == q && p[0] == '.') { 3347 p++; 3348 continue; 3349 } 3350 /* double dot */ 3351 if (p + 2 == q && p[0] == '.' && p[1] == '.') { 3352 p += 2; 3353 if (STAILQ_FIRST(&on_cache) == 3354 STAILQ_LAST(&on_cache, obj_list, entry)) { 3355 rc = ENOENT; 3356 goto done; 3357 } 3358 entry = STAILQ_FIRST(&on_cache); 3359 STAILQ_REMOVE_HEAD(&on_cache, entry); 3360 free(entry); 3361 objnum = (STAILQ_FIRST(&on_cache))->objnum; 3362 continue; 3363 } 3364 if (q - p + 1 > sizeof(element)) { 3365 rc = ENAMETOOLONG; 3366 goto done; 3367 } 3368 memcpy(element, p, q - p); 3369 element[q - p] = 0; 3370 p = q; 3371 3372 if ((rc = zfs_dnode_stat(spa, &dn, &sb)) != 0) 3373 goto done; 3374 if (!S_ISDIR(sb.st_mode)) { 3375 rc = ENOTDIR; 3376 goto done; 3377 } 3378 3379 rc = zap_lookup(spa, &dn, element, sizeof (objnum), 1, &objnum); 3380 if (rc) 3381 goto done; 3382 objnum = ZFS_DIRENT_OBJ(objnum); 3383 3384 if ((entry = malloc(sizeof(struct obj_list))) == NULL) { 3385 rc = ENOMEM; 3386 goto done; 3387 } 3388 entry->objnum = objnum; 3389 STAILQ_INSERT_HEAD(&on_cache, entry, entry); 3390 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3391 if (rc) 3392 goto done; 3393 3394 /* 3395 * Check for symlink. 3396 */ 3397 rc = zfs_dnode_stat(spa, &dn, &sb); 3398 if (rc) 3399 goto done; 3400 if (S_ISLNK(sb.st_mode)) { 3401 if (symlinks_followed > 10) { 3402 rc = EMLINK; 3403 goto done; 3404 } 3405 symlinks_followed++; 3406 3407 /* 3408 * Read the link value and copy the tail of our 3409 * current path onto the end. 3410 */ 3411 if (sb.st_size + strlen(p) + 1 > sizeof(path)) { 3412 rc = ENAMETOOLONG; 3413 goto done; 3414 } 3415 strcpy(&path[sb.st_size], p); 3416 3417 rc = zfs_dnode_readlink(spa, &dn, path, sb.st_size); 3418 if (rc != 0) 3419 goto done; 3420 3421 /* 3422 * Restart with the new path, starting either at 3423 * the root or at the parent depending whether or 3424 * not the link is relative. 3425 */ 3426 p = path; 3427 if (*p == '/') { 3428 while (STAILQ_FIRST(&on_cache) != 3429 STAILQ_LAST(&on_cache, obj_list, entry)) { 3430 entry = STAILQ_FIRST(&on_cache); 3431 STAILQ_REMOVE_HEAD(&on_cache, entry); 3432 free(entry); 3433 } 3434 } else { 3435 entry = STAILQ_FIRST(&on_cache); 3436 STAILQ_REMOVE_HEAD(&on_cache, entry); 3437 free(entry); 3438 } 3439 objnum = (STAILQ_FIRST(&on_cache))->objnum; 3440 } 3441 } 3442 3443 *dnode = dn; 3444 done: 3445 STAILQ_FOREACH_SAFE(entry, &on_cache, entry, tentry) 3446 free(entry); 3447 return (rc); 3448 } 3449