1 /*- 2 * Copyright (c) 2007 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Stand-alone ZFS file reader. 29 */ 30 31 #include <stdbool.h> 32 #include <sys/endian.h> 33 #include <sys/stat.h> 34 #include <sys/stdint.h> 35 #include <sys/list.h> 36 #include <sys/zfs_bootenv.h> 37 #include <machine/_inttypes.h> 38 39 #include "zfsimpl.h" 40 #include "zfssubr.c" 41 42 #ifdef HAS_ZSTD_ZFS 43 extern int zstd_init(void); 44 #endif 45 46 struct zfsmount { 47 char *path; 48 const spa_t *spa; 49 objset_phys_t objset; 50 uint64_t rootobj; 51 STAILQ_ENTRY(zfsmount) next; 52 }; 53 54 typedef STAILQ_HEAD(zfs_mnt_list, zfsmount) zfs_mnt_list_t; 55 static zfs_mnt_list_t zfsmount = STAILQ_HEAD_INITIALIZER(zfsmount); 56 57 /* 58 * The indirect_child_t represents the vdev that we will read from, when we 59 * need to read all copies of the data (e.g. for scrub or reconstruction). 60 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror), 61 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs, 62 * ic_vdev is a child of the mirror. 63 */ 64 typedef struct indirect_child { 65 void *ic_data; 66 vdev_t *ic_vdev; 67 } indirect_child_t; 68 69 /* 70 * The indirect_split_t represents one mapped segment of an i/o to the 71 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be 72 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size. 73 * For split blocks, there will be several of these. 74 */ 75 typedef struct indirect_split { 76 list_node_t is_node; /* link on iv_splits */ 77 78 /* 79 * is_split_offset is the offset into the i/o. 80 * This is the sum of the previous splits' is_size's. 81 */ 82 uint64_t is_split_offset; 83 84 vdev_t *is_vdev; /* top-level vdev */ 85 uint64_t is_target_offset; /* offset on is_vdev */ 86 uint64_t is_size; 87 int is_children; /* number of entries in is_child[] */ 88 89 /* 90 * is_good_child is the child that we are currently using to 91 * attempt reconstruction. 92 */ 93 int is_good_child; 94 95 indirect_child_t is_child[1]; /* variable-length */ 96 } indirect_split_t; 97 98 /* 99 * The indirect_vsd_t is associated with each i/o to the indirect vdev. 100 * It is the "Vdev-Specific Data" in the zio_t's io_vsd. 101 */ 102 typedef struct indirect_vsd { 103 boolean_t iv_split_block; 104 boolean_t iv_reconstruct; 105 106 list_t iv_splits; /* list of indirect_split_t's */ 107 } indirect_vsd_t; 108 109 /* 110 * List of all vdevs, chained through v_alllink. 111 */ 112 static vdev_list_t zfs_vdevs; 113 114 /* 115 * List of supported read-incompatible ZFS features. Do not add here features 116 * marked as ZFEATURE_FLAG_READONLY_COMPAT, they are irrelevant for read-only! 117 */ 118 static const char *features_for_read[] = { 119 "com.datto:bookmark_v2", 120 "com.datto:encryption", 121 "com.delphix:bookmark_written", 122 "com.delphix:device_removal", 123 "com.delphix:embedded_data", 124 "com.delphix:extensible_dataset", 125 "com.delphix:head_errlog", 126 "com.delphix:hole_birth", 127 "com.joyent:multi_vdev_crash_dump", 128 "com.klarasystems:vdev_zaps_v2", 129 "org.freebsd:zstd_compress", 130 "org.illumos:lz4_compress", 131 "org.illumos:sha512", 132 "org.illumos:skein", 133 "org.open-zfs:large_blocks", 134 "org.openzfs:blake3", 135 "org.zfsonlinux:large_dnode", 136 NULL 137 }; 138 139 /* 140 * List of all pools, chained through spa_link. 141 */ 142 static spa_list_t zfs_pools; 143 144 static const dnode_phys_t *dnode_cache_obj; 145 static uint64_t dnode_cache_bn; 146 static char *dnode_cache_buf; 147 148 static int zio_read(const spa_t *spa, const blkptr_t *bp, void *buf); 149 static int zfs_get_root(const spa_t *spa, uint64_t *objid); 150 static int zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result); 151 static int zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, 152 const char *name, uint64_t integer_size, uint64_t num_integers, 153 void *value); 154 static int objset_get_dnode(const spa_t *, const objset_phys_t *, uint64_t, 155 dnode_phys_t *); 156 static int dnode_read(const spa_t *, const dnode_phys_t *, off_t, void *, 157 size_t); 158 static int vdev_indirect_read(vdev_t *, const blkptr_t *, void *, off_t, 159 size_t); 160 static int vdev_mirror_read(vdev_t *, const blkptr_t *, void *, off_t, size_t); 161 vdev_indirect_mapping_t *vdev_indirect_mapping_open(spa_t *, objset_phys_t *, 162 uint64_t); 163 vdev_indirect_mapping_entry_phys_t * 164 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *, uint64_t, 165 uint64_t, uint64_t *); 166 167 static void 168 zfs_init(void) 169 { 170 STAILQ_INIT(&zfs_vdevs); 171 STAILQ_INIT(&zfs_pools); 172 173 dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE); 174 175 zfs_init_crc(); 176 #ifdef HAS_ZSTD_ZFS 177 zstd_init(); 178 #endif 179 } 180 181 static int 182 nvlist_check_features_for_read(nvlist_t *nvl) 183 { 184 nvlist_t *features = NULL; 185 nvs_data_t *data; 186 nvp_header_t *nvp; 187 nv_string_t *nvp_name; 188 int rc; 189 190 rc = nvlist_find(nvl, ZPOOL_CONFIG_FEATURES_FOR_READ, 191 DATA_TYPE_NVLIST, NULL, &features, NULL); 192 switch (rc) { 193 case 0: 194 break; /* Continue with checks */ 195 196 case ENOENT: 197 return (0); /* All features are disabled */ 198 199 default: 200 return (rc); /* Error while reading nvlist */ 201 } 202 203 data = (nvs_data_t *)features->nv_data; 204 nvp = &data->nvl_pair; /* first pair in nvlist */ 205 206 while (nvp->encoded_size != 0 && nvp->decoded_size != 0) { 207 int i, found; 208 209 nvp_name = (nv_string_t *)((uintptr_t)nvp + sizeof(*nvp)); 210 found = 0; 211 212 for (i = 0; features_for_read[i] != NULL; i++) { 213 if (memcmp(nvp_name->nv_data, features_for_read[i], 214 nvp_name->nv_size) == 0) { 215 found = 1; 216 break; 217 } 218 } 219 220 if (!found) { 221 printf("ZFS: unsupported feature: %.*s\n", 222 nvp_name->nv_size, nvp_name->nv_data); 223 rc = EIO; 224 } 225 nvp = (nvp_header_t *)((uint8_t *)nvp + nvp->encoded_size); 226 } 227 nvlist_destroy(features); 228 229 return (rc); 230 } 231 232 static int 233 vdev_read_phys(vdev_t *vdev, const blkptr_t *bp, void *buf, 234 off_t offset, size_t size) 235 { 236 size_t psize; 237 int rc; 238 239 if (vdev->v_phys_read == NULL) 240 return (ENOTSUP); 241 242 if (bp) { 243 psize = BP_GET_PSIZE(bp); 244 } else { 245 psize = size; 246 } 247 248 rc = vdev->v_phys_read(vdev, vdev->v_priv, offset, buf, psize); 249 if (rc == 0) { 250 if (bp != NULL) 251 rc = zio_checksum_verify(vdev->v_spa, bp, buf); 252 } 253 254 return (rc); 255 } 256 257 static int 258 vdev_write_phys(vdev_t *vdev, void *buf, off_t offset, size_t size) 259 { 260 if (vdev->v_phys_write == NULL) 261 return (ENOTSUP); 262 263 return (vdev->v_phys_write(vdev, offset, buf, size)); 264 } 265 266 typedef struct remap_segment { 267 vdev_t *rs_vd; 268 uint64_t rs_offset; 269 uint64_t rs_asize; 270 uint64_t rs_split_offset; 271 list_node_t rs_node; 272 } remap_segment_t; 273 274 static remap_segment_t * 275 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset) 276 { 277 remap_segment_t *rs = malloc(sizeof (remap_segment_t)); 278 279 if (rs != NULL) { 280 rs->rs_vd = vd; 281 rs->rs_offset = offset; 282 rs->rs_asize = asize; 283 rs->rs_split_offset = split_offset; 284 } 285 286 return (rs); 287 } 288 289 vdev_indirect_mapping_t * 290 vdev_indirect_mapping_open(spa_t *spa, objset_phys_t *os, 291 uint64_t mapping_object) 292 { 293 vdev_indirect_mapping_t *vim; 294 vdev_indirect_mapping_phys_t *vim_phys; 295 int rc; 296 297 vim = calloc(1, sizeof (*vim)); 298 if (vim == NULL) 299 return (NULL); 300 301 vim->vim_dn = calloc(1, sizeof (*vim->vim_dn)); 302 if (vim->vim_dn == NULL) { 303 free(vim); 304 return (NULL); 305 } 306 307 rc = objset_get_dnode(spa, os, mapping_object, vim->vim_dn); 308 if (rc != 0) { 309 free(vim->vim_dn); 310 free(vim); 311 return (NULL); 312 } 313 314 vim->vim_spa = spa; 315 vim->vim_phys = malloc(sizeof (*vim->vim_phys)); 316 if (vim->vim_phys == NULL) { 317 free(vim->vim_dn); 318 free(vim); 319 return (NULL); 320 } 321 322 vim_phys = (vdev_indirect_mapping_phys_t *)DN_BONUS(vim->vim_dn); 323 *vim->vim_phys = *vim_phys; 324 325 vim->vim_objset = os; 326 vim->vim_object = mapping_object; 327 vim->vim_entries = NULL; 328 329 vim->vim_havecounts = 330 (vim->vim_dn->dn_bonuslen > VDEV_INDIRECT_MAPPING_SIZE_V0); 331 332 return (vim); 333 } 334 335 /* 336 * Compare an offset with an indirect mapping entry; there are three 337 * possible scenarios: 338 * 339 * 1. The offset is "less than" the mapping entry; meaning the 340 * offset is less than the source offset of the mapping entry. In 341 * this case, there is no overlap between the offset and the 342 * mapping entry and -1 will be returned. 343 * 344 * 2. The offset is "greater than" the mapping entry; meaning the 345 * offset is greater than the mapping entry's source offset plus 346 * the entry's size. In this case, there is no overlap between 347 * the offset and the mapping entry and 1 will be returned. 348 * 349 * NOTE: If the offset is actually equal to the entry's offset 350 * plus size, this is considered to be "greater" than the entry, 351 * and this case applies (i.e. 1 will be returned). Thus, the 352 * entry's "range" can be considered to be inclusive at its 353 * start, but exclusive at its end: e.g. [src, src + size). 354 * 355 * 3. The last case to consider is if the offset actually falls 356 * within the mapping entry's range. If this is the case, the 357 * offset is considered to be "equal to" the mapping entry and 358 * 0 will be returned. 359 * 360 * NOTE: If the offset is equal to the entry's source offset, 361 * this case applies and 0 will be returned. If the offset is 362 * equal to the entry's source plus its size, this case does 363 * *not* apply (see "NOTE" above for scenario 2), and 1 will be 364 * returned. 365 */ 366 static int 367 dva_mapping_overlap_compare(const void *v_key, const void *v_array_elem) 368 { 369 const uint64_t *key = v_key; 370 const vdev_indirect_mapping_entry_phys_t *array_elem = 371 v_array_elem; 372 uint64_t src_offset = DVA_MAPPING_GET_SRC_OFFSET(array_elem); 373 374 if (*key < src_offset) { 375 return (-1); 376 } else if (*key < src_offset + DVA_GET_ASIZE(&array_elem->vimep_dst)) { 377 return (0); 378 } else { 379 return (1); 380 } 381 } 382 383 /* 384 * Return array entry. 385 */ 386 static vdev_indirect_mapping_entry_phys_t * 387 vdev_indirect_mapping_entry(vdev_indirect_mapping_t *vim, uint64_t index) 388 { 389 uint64_t size; 390 off_t offset = 0; 391 int rc; 392 393 if (vim->vim_phys->vimp_num_entries == 0) 394 return (NULL); 395 396 if (vim->vim_entries == NULL) { 397 uint64_t bsize; 398 399 bsize = vim->vim_dn->dn_datablkszsec << SPA_MINBLOCKSHIFT; 400 size = vim->vim_phys->vimp_num_entries * 401 sizeof (*vim->vim_entries); 402 if (size > bsize) { 403 size = bsize / sizeof (*vim->vim_entries); 404 size *= sizeof (*vim->vim_entries); 405 } 406 vim->vim_entries = malloc(size); 407 if (vim->vim_entries == NULL) 408 return (NULL); 409 vim->vim_num_entries = size / sizeof (*vim->vim_entries); 410 offset = index * sizeof (*vim->vim_entries); 411 } 412 413 /* We have data in vim_entries */ 414 if (offset == 0) { 415 if (index >= vim->vim_entry_offset && 416 index <= vim->vim_entry_offset + vim->vim_num_entries) { 417 index -= vim->vim_entry_offset; 418 return (&vim->vim_entries[index]); 419 } 420 offset = index * sizeof (*vim->vim_entries); 421 } 422 423 vim->vim_entry_offset = index; 424 size = vim->vim_num_entries * sizeof (*vim->vim_entries); 425 rc = dnode_read(vim->vim_spa, vim->vim_dn, offset, vim->vim_entries, 426 size); 427 if (rc != 0) { 428 /* Read error, invalidate vim_entries. */ 429 free(vim->vim_entries); 430 vim->vim_entries = NULL; 431 return (NULL); 432 } 433 index -= vim->vim_entry_offset; 434 return (&vim->vim_entries[index]); 435 } 436 437 /* 438 * Returns the mapping entry for the given offset. 439 * 440 * It's possible that the given offset will not be in the mapping table 441 * (i.e. no mapping entries contain this offset), in which case, the 442 * return value depends on the "next_if_missing" parameter. 443 * 444 * If the offset is not found in the table and "next_if_missing" is 445 * B_FALSE, then NULL will always be returned. The behavior is intended 446 * to allow consumers to get the entry corresponding to the offset 447 * parameter, iff the offset overlaps with an entry in the table. 448 * 449 * If the offset is not found in the table and "next_if_missing" is 450 * B_TRUE, then the entry nearest to the given offset will be returned, 451 * such that the entry's source offset is greater than the offset 452 * passed in (i.e. the "next" mapping entry in the table is returned, if 453 * the offset is missing from the table). If there are no entries whose 454 * source offset is greater than the passed in offset, NULL is returned. 455 */ 456 static vdev_indirect_mapping_entry_phys_t * 457 vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim, 458 uint64_t offset) 459 { 460 ASSERT(vim->vim_phys->vimp_num_entries > 0); 461 462 vdev_indirect_mapping_entry_phys_t *entry; 463 464 uint64_t last = vim->vim_phys->vimp_num_entries - 1; 465 uint64_t base = 0; 466 467 /* 468 * We don't define these inside of the while loop because we use 469 * their value in the case that offset isn't in the mapping. 470 */ 471 uint64_t mid; 472 int result; 473 474 while (last >= base) { 475 mid = base + ((last - base) >> 1); 476 477 entry = vdev_indirect_mapping_entry(vim, mid); 478 if (entry == NULL) 479 break; 480 result = dva_mapping_overlap_compare(&offset, entry); 481 482 if (result == 0) { 483 break; 484 } else if (result < 0) { 485 last = mid - 1; 486 } else { 487 base = mid + 1; 488 } 489 } 490 return (entry); 491 } 492 493 /* 494 * Given an indirect vdev and an extent on that vdev, it duplicates the 495 * physical entries of the indirect mapping that correspond to the extent 496 * to a new array and returns a pointer to it. In addition, copied_entries 497 * is populated with the number of mapping entries that were duplicated. 498 * 499 * Finally, since we are doing an allocation, it is up to the caller to 500 * free the array allocated in this function. 501 */ 502 vdev_indirect_mapping_entry_phys_t * 503 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset, 504 uint64_t asize, uint64_t *copied_entries) 505 { 506 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL; 507 vdev_indirect_mapping_t *vim = vd->v_mapping; 508 uint64_t entries = 0; 509 510 vdev_indirect_mapping_entry_phys_t *first_mapping = 511 vdev_indirect_mapping_entry_for_offset(vim, offset); 512 ASSERT3P(first_mapping, !=, NULL); 513 514 vdev_indirect_mapping_entry_phys_t *m = first_mapping; 515 while (asize > 0) { 516 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 517 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m); 518 uint64_t inner_size = MIN(asize, size - inner_offset); 519 520 offset += inner_size; 521 asize -= inner_size; 522 entries++; 523 m++; 524 } 525 526 size_t copy_length = entries * sizeof (*first_mapping); 527 duplicate_mappings = malloc(copy_length); 528 if (duplicate_mappings != NULL) 529 bcopy(first_mapping, duplicate_mappings, copy_length); 530 else 531 entries = 0; 532 533 *copied_entries = entries; 534 535 return (duplicate_mappings); 536 } 537 538 static vdev_t * 539 vdev_lookup_top(spa_t *spa, uint64_t vdev) 540 { 541 vdev_t *rvd; 542 vdev_list_t *vlist; 543 544 vlist = &spa->spa_root_vdev->v_children; 545 STAILQ_FOREACH(rvd, vlist, v_childlink) 546 if (rvd->v_id == vdev) 547 break; 548 549 return (rvd); 550 } 551 552 /* 553 * This is a callback for vdev_indirect_remap() which allocates an 554 * indirect_split_t for each split segment and adds it to iv_splits. 555 */ 556 static void 557 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset, 558 uint64_t size, void *arg) 559 { 560 int n = 1; 561 zio_t *zio = arg; 562 indirect_vsd_t *iv = zio->io_vsd; 563 564 if (vd->v_read == vdev_indirect_read) 565 return; 566 567 if (vd->v_read == vdev_mirror_read) 568 n = vd->v_nchildren; 569 570 indirect_split_t *is = 571 malloc(offsetof(indirect_split_t, is_child[n])); 572 if (is == NULL) { 573 zio->io_error = ENOMEM; 574 return; 575 } 576 bzero(is, offsetof(indirect_split_t, is_child[n])); 577 578 is->is_children = n; 579 is->is_size = size; 580 is->is_split_offset = split_offset; 581 is->is_target_offset = offset; 582 is->is_vdev = vd; 583 584 /* 585 * Note that we only consider multiple copies of the data for 586 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even 587 * though they use the same ops as mirror, because there's only one 588 * "good" copy under the replacing/spare. 589 */ 590 if (vd->v_read == vdev_mirror_read) { 591 int i = 0; 592 vdev_t *kid; 593 594 STAILQ_FOREACH(kid, &vd->v_children, v_childlink) { 595 is->is_child[i++].ic_vdev = kid; 596 } 597 } else { 598 is->is_child[0].ic_vdev = vd; 599 } 600 601 list_insert_tail(&iv->iv_splits, is); 602 } 603 604 static void 605 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, void *arg) 606 { 607 list_t stack; 608 spa_t *spa = vd->v_spa; 609 zio_t *zio = arg; 610 remap_segment_t *rs; 611 612 list_create(&stack, sizeof (remap_segment_t), 613 offsetof(remap_segment_t, rs_node)); 614 615 rs = rs_alloc(vd, offset, asize, 0); 616 if (rs == NULL) { 617 printf("vdev_indirect_remap: out of memory.\n"); 618 zio->io_error = ENOMEM; 619 } 620 for (; rs != NULL; rs = list_remove_head(&stack)) { 621 vdev_t *v = rs->rs_vd; 622 uint64_t num_entries = 0; 623 /* vdev_indirect_mapping_t *vim = v->v_mapping; */ 624 vdev_indirect_mapping_entry_phys_t *mapping = 625 vdev_indirect_mapping_duplicate_adjacent_entries(v, 626 rs->rs_offset, rs->rs_asize, &num_entries); 627 628 if (num_entries == 0) 629 zio->io_error = ENOMEM; 630 631 for (uint64_t i = 0; i < num_entries; i++) { 632 vdev_indirect_mapping_entry_phys_t *m = &mapping[i]; 633 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 634 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst); 635 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst); 636 uint64_t inner_offset = rs->rs_offset - 637 DVA_MAPPING_GET_SRC_OFFSET(m); 638 uint64_t inner_size = 639 MIN(rs->rs_asize, size - inner_offset); 640 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev); 641 642 if (dst_v->v_read == vdev_indirect_read) { 643 remap_segment_t *o; 644 645 o = rs_alloc(dst_v, dst_offset + inner_offset, 646 inner_size, rs->rs_split_offset); 647 if (o == NULL) { 648 printf("vdev_indirect_remap: " 649 "out of memory.\n"); 650 zio->io_error = ENOMEM; 651 break; 652 } 653 654 list_insert_head(&stack, o); 655 } 656 vdev_indirect_gather_splits(rs->rs_split_offset, dst_v, 657 dst_offset + inner_offset, 658 inner_size, arg); 659 660 /* 661 * vdev_indirect_gather_splits can have memory 662 * allocation error, we can not recover from it. 663 */ 664 if (zio->io_error != 0) 665 break; 666 rs->rs_offset += inner_size; 667 rs->rs_asize -= inner_size; 668 rs->rs_split_offset += inner_size; 669 } 670 671 free(mapping); 672 free(rs); 673 if (zio->io_error != 0) 674 break; 675 } 676 677 list_destroy(&stack); 678 } 679 680 static void 681 vdev_indirect_map_free(zio_t *zio) 682 { 683 indirect_vsd_t *iv = zio->io_vsd; 684 indirect_split_t *is; 685 686 while ((is = list_head(&iv->iv_splits)) != NULL) { 687 for (int c = 0; c < is->is_children; c++) { 688 indirect_child_t *ic = &is->is_child[c]; 689 free(ic->ic_data); 690 } 691 list_remove(&iv->iv_splits, is); 692 free(is); 693 } 694 free(iv); 695 } 696 697 static int 698 vdev_indirect_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 699 off_t offset, size_t bytes) 700 { 701 zio_t zio; 702 spa_t *spa = vdev->v_spa; 703 indirect_vsd_t *iv; 704 indirect_split_t *first; 705 int rc = EIO; 706 707 iv = calloc(1, sizeof(*iv)); 708 if (iv == NULL) 709 return (ENOMEM); 710 711 list_create(&iv->iv_splits, 712 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node)); 713 714 bzero(&zio, sizeof(zio)); 715 zio.io_spa = spa; 716 zio.io_bp = (blkptr_t *)bp; 717 zio.io_data = buf; 718 zio.io_size = bytes; 719 zio.io_offset = offset; 720 zio.io_vd = vdev; 721 zio.io_vsd = iv; 722 723 if (vdev->v_mapping == NULL) { 724 vdev_indirect_config_t *vic; 725 726 vic = &vdev->vdev_indirect_config; 727 vdev->v_mapping = vdev_indirect_mapping_open(spa, 728 spa->spa_mos, vic->vic_mapping_object); 729 } 730 731 vdev_indirect_remap(vdev, offset, bytes, &zio); 732 if (zio.io_error != 0) 733 return (zio.io_error); 734 735 first = list_head(&iv->iv_splits); 736 if (first->is_size == zio.io_size) { 737 /* 738 * This is not a split block; we are pointing to the entire 739 * data, which will checksum the same as the original data. 740 * Pass the BP down so that the child i/o can verify the 741 * checksum, and try a different location if available 742 * (e.g. on a mirror). 743 * 744 * While this special case could be handled the same as the 745 * general (split block) case, doing it this way ensures 746 * that the vast majority of blocks on indirect vdevs 747 * (which are not split) are handled identically to blocks 748 * on non-indirect vdevs. This allows us to be less strict 749 * about performance in the general (but rare) case. 750 */ 751 rc = first->is_vdev->v_read(first->is_vdev, zio.io_bp, 752 zio.io_data, first->is_target_offset, bytes); 753 } else { 754 iv->iv_split_block = B_TRUE; 755 /* 756 * Read one copy of each split segment, from the 757 * top-level vdev. Since we don't know the 758 * checksum of each split individually, the child 759 * zio can't ensure that we get the right data. 760 * E.g. if it's a mirror, it will just read from a 761 * random (healthy) leaf vdev. We have to verify 762 * the checksum in vdev_indirect_io_done(). 763 */ 764 for (indirect_split_t *is = list_head(&iv->iv_splits); 765 is != NULL; is = list_next(&iv->iv_splits, is)) { 766 char *ptr = zio.io_data; 767 768 rc = is->is_vdev->v_read(is->is_vdev, zio.io_bp, 769 ptr + is->is_split_offset, is->is_target_offset, 770 is->is_size); 771 } 772 if (zio_checksum_verify(spa, zio.io_bp, zio.io_data)) 773 rc = ECKSUM; 774 else 775 rc = 0; 776 } 777 778 vdev_indirect_map_free(&zio); 779 if (rc == 0) 780 rc = zio.io_error; 781 782 return (rc); 783 } 784 785 static int 786 vdev_disk_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 787 off_t offset, size_t bytes) 788 { 789 790 return (vdev_read_phys(vdev, bp, buf, 791 offset + VDEV_LABEL_START_SIZE, bytes)); 792 } 793 794 static int 795 vdev_missing_read(vdev_t *vdev __unused, const blkptr_t *bp __unused, 796 void *buf __unused, off_t offset __unused, size_t bytes __unused) 797 { 798 799 return (ENOTSUP); 800 } 801 802 static int 803 vdev_mirror_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 804 off_t offset, size_t bytes) 805 { 806 vdev_t *kid; 807 int rc; 808 809 rc = EIO; 810 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 811 if (kid->v_state != VDEV_STATE_HEALTHY) 812 continue; 813 rc = kid->v_read(kid, bp, buf, offset, bytes); 814 if (!rc) 815 return (0); 816 } 817 818 return (rc); 819 } 820 821 static int 822 vdev_replacing_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 823 off_t offset, size_t bytes) 824 { 825 vdev_t *kid; 826 827 /* 828 * Here we should have two kids: 829 * First one which is the one we are replacing and we can trust 830 * only this one to have valid data, but it might not be present. 831 * Second one is that one we are replacing with. It is most likely 832 * healthy, but we can't trust it has needed data, so we won't use it. 833 */ 834 kid = STAILQ_FIRST(&vdev->v_children); 835 if (kid == NULL) 836 return (EIO); 837 if (kid->v_state != VDEV_STATE_HEALTHY) 838 return (EIO); 839 return (kid->v_read(kid, bp, buf, offset, bytes)); 840 } 841 842 static vdev_t * 843 vdev_find(uint64_t guid) 844 { 845 vdev_t *vdev; 846 847 STAILQ_FOREACH(vdev, &zfs_vdevs, v_alllink) 848 if (vdev->v_guid == guid) 849 return (vdev); 850 851 return (0); 852 } 853 854 static vdev_t * 855 vdev_create(uint64_t guid, vdev_read_t *_read) 856 { 857 vdev_t *vdev; 858 vdev_indirect_config_t *vic; 859 860 vdev = calloc(1, sizeof(vdev_t)); 861 if (vdev != NULL) { 862 STAILQ_INIT(&vdev->v_children); 863 vdev->v_guid = guid; 864 vdev->v_read = _read; 865 866 /* 867 * root vdev has no read function, we use this fact to 868 * skip setting up data we do not need for root vdev. 869 * We only point root vdev from spa. 870 */ 871 if (_read != NULL) { 872 vic = &vdev->vdev_indirect_config; 873 vic->vic_prev_indirect_vdev = UINT64_MAX; 874 STAILQ_INSERT_TAIL(&zfs_vdevs, vdev, v_alllink); 875 } 876 } 877 878 return (vdev); 879 } 880 881 static void 882 vdev_set_initial_state(vdev_t *vdev, const nvlist_t *nvlist) 883 { 884 uint64_t is_offline, is_faulted, is_degraded, is_removed, isnt_present; 885 uint64_t is_log; 886 887 is_offline = is_removed = is_faulted = is_degraded = isnt_present = 0; 888 is_log = 0; 889 (void) nvlist_find(nvlist, ZPOOL_CONFIG_OFFLINE, DATA_TYPE_UINT64, NULL, 890 &is_offline, NULL); 891 (void) nvlist_find(nvlist, ZPOOL_CONFIG_REMOVED, DATA_TYPE_UINT64, NULL, 892 &is_removed, NULL); 893 (void) nvlist_find(nvlist, ZPOOL_CONFIG_FAULTED, DATA_TYPE_UINT64, NULL, 894 &is_faulted, NULL); 895 (void) nvlist_find(nvlist, ZPOOL_CONFIG_DEGRADED, DATA_TYPE_UINT64, 896 NULL, &is_degraded, NULL); 897 (void) nvlist_find(nvlist, ZPOOL_CONFIG_NOT_PRESENT, DATA_TYPE_UINT64, 898 NULL, &isnt_present, NULL); 899 (void) nvlist_find(nvlist, ZPOOL_CONFIG_IS_LOG, DATA_TYPE_UINT64, NULL, 900 &is_log, NULL); 901 902 if (is_offline != 0) 903 vdev->v_state = VDEV_STATE_OFFLINE; 904 else if (is_removed != 0) 905 vdev->v_state = VDEV_STATE_REMOVED; 906 else if (is_faulted != 0) 907 vdev->v_state = VDEV_STATE_FAULTED; 908 else if (is_degraded != 0) 909 vdev->v_state = VDEV_STATE_DEGRADED; 910 else if (isnt_present != 0) 911 vdev->v_state = VDEV_STATE_CANT_OPEN; 912 913 vdev->v_islog = is_log != 0; 914 } 915 916 static int 917 vdev_init(uint64_t guid, const nvlist_t *nvlist, vdev_t **vdevp) 918 { 919 uint64_t id, ashift, asize, nparity; 920 const char *path; 921 const char *type; 922 int len, pathlen; 923 char *name; 924 vdev_t *vdev; 925 926 if (nvlist_find(nvlist, ZPOOL_CONFIG_ID, DATA_TYPE_UINT64, NULL, &id, 927 NULL) || 928 nvlist_find(nvlist, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING, NULL, 929 &type, &len)) { 930 return (ENOENT); 931 } 932 933 if (memcmp(type, VDEV_TYPE_MIRROR, len) != 0 && 934 memcmp(type, VDEV_TYPE_DISK, len) != 0 && 935 #ifdef ZFS_TEST 936 memcmp(type, VDEV_TYPE_FILE, len) != 0 && 937 #endif 938 memcmp(type, VDEV_TYPE_RAIDZ, len) != 0 && 939 memcmp(type, VDEV_TYPE_INDIRECT, len) != 0 && 940 memcmp(type, VDEV_TYPE_REPLACING, len) != 0 && 941 memcmp(type, VDEV_TYPE_HOLE, len) != 0) { 942 printf("ZFS: can only boot from disk, mirror, raidz1, " 943 "raidz2 and raidz3 vdevs, got: %.*s\n", len, type); 944 return (EIO); 945 } 946 947 if (memcmp(type, VDEV_TYPE_MIRROR, len) == 0) 948 vdev = vdev_create(guid, vdev_mirror_read); 949 else if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) 950 vdev = vdev_create(guid, vdev_raidz_read); 951 else if (memcmp(type, VDEV_TYPE_REPLACING, len) == 0) 952 vdev = vdev_create(guid, vdev_replacing_read); 953 else if (memcmp(type, VDEV_TYPE_INDIRECT, len) == 0) { 954 vdev_indirect_config_t *vic; 955 956 vdev = vdev_create(guid, vdev_indirect_read); 957 if (vdev != NULL) { 958 vdev->v_state = VDEV_STATE_HEALTHY; 959 vic = &vdev->vdev_indirect_config; 960 961 nvlist_find(nvlist, 962 ZPOOL_CONFIG_INDIRECT_OBJECT, 963 DATA_TYPE_UINT64, 964 NULL, &vic->vic_mapping_object, NULL); 965 nvlist_find(nvlist, 966 ZPOOL_CONFIG_INDIRECT_BIRTHS, 967 DATA_TYPE_UINT64, 968 NULL, &vic->vic_births_object, NULL); 969 nvlist_find(nvlist, 970 ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 971 DATA_TYPE_UINT64, 972 NULL, &vic->vic_prev_indirect_vdev, NULL); 973 } 974 } else if (memcmp(type, VDEV_TYPE_HOLE, len) == 0) { 975 vdev = vdev_create(guid, vdev_missing_read); 976 } else { 977 vdev = vdev_create(guid, vdev_disk_read); 978 } 979 980 if (vdev == NULL) 981 return (ENOMEM); 982 983 vdev_set_initial_state(vdev, nvlist); 984 vdev->v_id = id; 985 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASHIFT, 986 DATA_TYPE_UINT64, NULL, &ashift, NULL) == 0) 987 vdev->v_ashift = ashift; 988 989 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE, 990 DATA_TYPE_UINT64, NULL, &asize, NULL) == 0) { 991 vdev->v_psize = asize + 992 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 993 } 994 995 if (nvlist_find(nvlist, ZPOOL_CONFIG_NPARITY, 996 DATA_TYPE_UINT64, NULL, &nparity, NULL) == 0) 997 vdev->v_nparity = nparity; 998 999 if (nvlist_find(nvlist, ZPOOL_CONFIG_PATH, 1000 DATA_TYPE_STRING, NULL, &path, &pathlen) == 0) { 1001 char prefix[] = "/dev/"; 1002 1003 len = strlen(prefix); 1004 if (len < pathlen && memcmp(path, prefix, len) == 0) { 1005 path += len; 1006 pathlen -= len; 1007 } 1008 name = malloc(pathlen + 1); 1009 bcopy(path, name, pathlen); 1010 name[pathlen] = '\0'; 1011 vdev->v_name = name; 1012 } else { 1013 name = NULL; 1014 if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) { 1015 if (vdev->v_nparity < 1 || 1016 vdev->v_nparity > 3) { 1017 printf("ZFS: invalid raidz parity: %d\n", 1018 vdev->v_nparity); 1019 return (EIO); 1020 } 1021 (void) asprintf(&name, "%.*s%d-%" PRIu64, len, type, 1022 vdev->v_nparity, id); 1023 } else { 1024 (void) asprintf(&name, "%.*s-%" PRIu64, len, type, id); 1025 } 1026 vdev->v_name = name; 1027 } 1028 *vdevp = vdev; 1029 return (0); 1030 } 1031 1032 /* 1033 * Find slot for vdev. We return either NULL to signal to use 1034 * STAILQ_INSERT_HEAD, or we return link element to be used with 1035 * STAILQ_INSERT_AFTER. 1036 */ 1037 static vdev_t * 1038 vdev_find_previous(vdev_t *top_vdev, vdev_t *vdev) 1039 { 1040 vdev_t *v, *previous; 1041 1042 if (STAILQ_EMPTY(&top_vdev->v_children)) 1043 return (NULL); 1044 1045 previous = NULL; 1046 STAILQ_FOREACH(v, &top_vdev->v_children, v_childlink) { 1047 if (v->v_id > vdev->v_id) 1048 return (previous); 1049 1050 if (v->v_id == vdev->v_id) 1051 return (v); 1052 1053 if (v->v_id < vdev->v_id) 1054 previous = v; 1055 } 1056 return (previous); 1057 } 1058 1059 static size_t 1060 vdev_child_count(vdev_t *vdev) 1061 { 1062 vdev_t *v; 1063 size_t count; 1064 1065 count = 0; 1066 STAILQ_FOREACH(v, &vdev->v_children, v_childlink) { 1067 count++; 1068 } 1069 return (count); 1070 } 1071 1072 /* 1073 * Insert vdev into top_vdev children list. List is ordered by v_id. 1074 */ 1075 static void 1076 vdev_insert(vdev_t *top_vdev, vdev_t *vdev) 1077 { 1078 vdev_t *previous; 1079 size_t count; 1080 1081 /* 1082 * The top level vdev can appear in random order, depending how 1083 * the firmware is presenting the disk devices. 1084 * However, we will insert vdev to create list ordered by v_id, 1085 * so we can use either STAILQ_INSERT_HEAD or STAILQ_INSERT_AFTER 1086 * as STAILQ does not have insert before. 1087 */ 1088 previous = vdev_find_previous(top_vdev, vdev); 1089 1090 if (previous == NULL) { 1091 STAILQ_INSERT_HEAD(&top_vdev->v_children, vdev, v_childlink); 1092 } else if (previous->v_id == vdev->v_id) { 1093 /* 1094 * This vdev was configured from label config, 1095 * do not insert duplicate. 1096 */ 1097 return; 1098 } else { 1099 STAILQ_INSERT_AFTER(&top_vdev->v_children, previous, vdev, 1100 v_childlink); 1101 } 1102 1103 count = vdev_child_count(top_vdev); 1104 if (top_vdev->v_nchildren < count) 1105 top_vdev->v_nchildren = count; 1106 } 1107 1108 static int 1109 vdev_from_nvlist(spa_t *spa, uint64_t top_guid, const nvlist_t *nvlist) 1110 { 1111 vdev_t *top_vdev, *vdev; 1112 nvlist_t **kids = NULL; 1113 int rc, nkids; 1114 1115 /* Get top vdev. */ 1116 top_vdev = vdev_find(top_guid); 1117 if (top_vdev == NULL) { 1118 rc = vdev_init(top_guid, nvlist, &top_vdev); 1119 if (rc != 0) 1120 return (rc); 1121 top_vdev->v_spa = spa; 1122 top_vdev->v_top = top_vdev; 1123 vdev_insert(spa->spa_root_vdev, top_vdev); 1124 } 1125 1126 /* Add children if there are any. */ 1127 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1128 &nkids, &kids, NULL); 1129 if (rc == 0) { 1130 for (int i = 0; i < nkids; i++) { 1131 uint64_t guid; 1132 1133 rc = nvlist_find(kids[i], ZPOOL_CONFIG_GUID, 1134 DATA_TYPE_UINT64, NULL, &guid, NULL); 1135 if (rc != 0) 1136 goto done; 1137 1138 rc = vdev_init(guid, kids[i], &vdev); 1139 if (rc != 0) 1140 goto done; 1141 1142 vdev->v_spa = spa; 1143 vdev->v_top = top_vdev; 1144 vdev_insert(top_vdev, vdev); 1145 } 1146 } else { 1147 /* 1148 * When there are no children, nvlist_find() does return 1149 * error, reset it because leaf devices have no children. 1150 */ 1151 rc = 0; 1152 } 1153 done: 1154 if (kids != NULL) { 1155 for (int i = 0; i < nkids; i++) 1156 nvlist_destroy(kids[i]); 1157 free(kids); 1158 } 1159 1160 return (rc); 1161 } 1162 1163 static int 1164 vdev_init_from_label(spa_t *spa, const nvlist_t *nvlist) 1165 { 1166 uint64_t pool_guid, top_guid; 1167 nvlist_t *vdevs; 1168 int rc; 1169 1170 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 1171 NULL, &pool_guid, NULL) || 1172 nvlist_find(nvlist, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64, 1173 NULL, &top_guid, NULL) || 1174 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, 1175 NULL, &vdevs, NULL)) { 1176 printf("ZFS: can't find vdev details\n"); 1177 return (ENOENT); 1178 } 1179 1180 rc = vdev_from_nvlist(spa, top_guid, vdevs); 1181 nvlist_destroy(vdevs); 1182 return (rc); 1183 } 1184 1185 static void 1186 vdev_set_state(vdev_t *vdev) 1187 { 1188 vdev_t *kid; 1189 int good_kids; 1190 int bad_kids; 1191 1192 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1193 vdev_set_state(kid); 1194 } 1195 1196 /* 1197 * A mirror or raidz is healthy if all its kids are healthy. A 1198 * mirror is degraded if any of its kids is healthy; a raidz 1199 * is degraded if at most nparity kids are offline. 1200 */ 1201 if (STAILQ_FIRST(&vdev->v_children)) { 1202 good_kids = 0; 1203 bad_kids = 0; 1204 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1205 if (kid->v_state == VDEV_STATE_HEALTHY) 1206 good_kids++; 1207 else 1208 bad_kids++; 1209 } 1210 if (bad_kids == 0) { 1211 vdev->v_state = VDEV_STATE_HEALTHY; 1212 } else { 1213 if (vdev->v_read == vdev_mirror_read) { 1214 if (good_kids) { 1215 vdev->v_state = VDEV_STATE_DEGRADED; 1216 } else { 1217 vdev->v_state = VDEV_STATE_OFFLINE; 1218 } 1219 } else if (vdev->v_read == vdev_raidz_read) { 1220 if (bad_kids > vdev->v_nparity) { 1221 vdev->v_state = VDEV_STATE_OFFLINE; 1222 } else { 1223 vdev->v_state = VDEV_STATE_DEGRADED; 1224 } 1225 } 1226 } 1227 } 1228 } 1229 1230 static int 1231 vdev_update_from_nvlist(uint64_t top_guid, const nvlist_t *nvlist) 1232 { 1233 vdev_t *vdev; 1234 nvlist_t **kids = NULL; 1235 int rc, nkids; 1236 1237 /* Update top vdev. */ 1238 vdev = vdev_find(top_guid); 1239 if (vdev != NULL) 1240 vdev_set_initial_state(vdev, nvlist); 1241 1242 /* Update children if there are any. */ 1243 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1244 &nkids, &kids, NULL); 1245 if (rc == 0) { 1246 for (int i = 0; i < nkids; i++) { 1247 uint64_t guid; 1248 1249 rc = nvlist_find(kids[i], ZPOOL_CONFIG_GUID, 1250 DATA_TYPE_UINT64, NULL, &guid, NULL); 1251 if (rc != 0) 1252 break; 1253 1254 vdev = vdev_find(guid); 1255 if (vdev != NULL) 1256 vdev_set_initial_state(vdev, kids[i]); 1257 } 1258 } else { 1259 rc = 0; 1260 } 1261 if (kids != NULL) { 1262 for (int i = 0; i < nkids; i++) 1263 nvlist_destroy(kids[i]); 1264 free(kids); 1265 } 1266 1267 return (rc); 1268 } 1269 1270 static int 1271 vdev_init_from_nvlist(spa_t *spa, const nvlist_t *nvlist) 1272 { 1273 uint64_t pool_guid, vdev_children; 1274 nvlist_t *vdevs = NULL, **kids = NULL; 1275 int rc, nkids; 1276 1277 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 1278 NULL, &pool_guid, NULL) || 1279 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64, 1280 NULL, &vdev_children, NULL) || 1281 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, 1282 NULL, &vdevs, NULL)) { 1283 printf("ZFS: can't find vdev details\n"); 1284 return (ENOENT); 1285 } 1286 1287 /* Wrong guid?! */ 1288 if (spa->spa_guid != pool_guid) { 1289 nvlist_destroy(vdevs); 1290 return (EINVAL); 1291 } 1292 1293 spa->spa_root_vdev->v_nchildren = vdev_children; 1294 1295 rc = nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1296 &nkids, &kids, NULL); 1297 nvlist_destroy(vdevs); 1298 1299 /* 1300 * MOS config has at least one child for root vdev. 1301 */ 1302 if (rc != 0) 1303 return (rc); 1304 1305 for (int i = 0; i < nkids; i++) { 1306 uint64_t guid; 1307 vdev_t *vdev; 1308 1309 rc = nvlist_find(kids[i], ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, 1310 NULL, &guid, NULL); 1311 if (rc != 0) 1312 break; 1313 vdev = vdev_find(guid); 1314 /* 1315 * Top level vdev is missing, create it. 1316 */ 1317 if (vdev == NULL) 1318 rc = vdev_from_nvlist(spa, guid, kids[i]); 1319 else 1320 rc = vdev_update_from_nvlist(guid, kids[i]); 1321 if (rc != 0) 1322 break; 1323 } 1324 if (kids != NULL) { 1325 for (int i = 0; i < nkids; i++) 1326 nvlist_destroy(kids[i]); 1327 free(kids); 1328 } 1329 1330 /* 1331 * Re-evaluate top-level vdev state. 1332 */ 1333 vdev_set_state(spa->spa_root_vdev); 1334 1335 return (rc); 1336 } 1337 1338 static spa_t * 1339 spa_find_by_guid(uint64_t guid) 1340 { 1341 spa_t *spa; 1342 1343 STAILQ_FOREACH(spa, &zfs_pools, spa_link) 1344 if (spa->spa_guid == guid) 1345 return (spa); 1346 1347 return (NULL); 1348 } 1349 1350 static spa_t * 1351 spa_find_by_name(const char *name) 1352 { 1353 spa_t *spa; 1354 1355 STAILQ_FOREACH(spa, &zfs_pools, spa_link) 1356 if (strcmp(spa->spa_name, name) == 0) 1357 return (spa); 1358 1359 return (NULL); 1360 } 1361 1362 static spa_t * 1363 spa_create(uint64_t guid, const char *name) 1364 { 1365 spa_t *spa; 1366 1367 if ((spa = calloc(1, sizeof(spa_t))) == NULL) 1368 return (NULL); 1369 if ((spa->spa_name = strdup(name)) == NULL) { 1370 free(spa); 1371 return (NULL); 1372 } 1373 spa->spa_uberblock = &spa->spa_uberblock_master; 1374 spa->spa_mos = &spa->spa_mos_master; 1375 spa->spa_guid = guid; 1376 spa->spa_root_vdev = vdev_create(guid, NULL); 1377 if (spa->spa_root_vdev == NULL) { 1378 free(spa->spa_name); 1379 free(spa); 1380 return (NULL); 1381 } 1382 spa->spa_root_vdev->v_name = strdup("root"); 1383 STAILQ_INSERT_TAIL(&zfs_pools, spa, spa_link); 1384 1385 return (spa); 1386 } 1387 1388 static const char * 1389 state_name(vdev_state_t state) 1390 { 1391 static const char *names[] = { 1392 "UNKNOWN", 1393 "CLOSED", 1394 "OFFLINE", 1395 "REMOVED", 1396 "CANT_OPEN", 1397 "FAULTED", 1398 "DEGRADED", 1399 "ONLINE" 1400 }; 1401 return (names[state]); 1402 } 1403 1404 #ifdef BOOT2 1405 1406 #define pager_printf printf 1407 1408 #else 1409 1410 static int 1411 pager_printf(const char *fmt, ...) 1412 { 1413 char line[80]; 1414 va_list args; 1415 1416 va_start(args, fmt); 1417 vsnprintf(line, sizeof(line), fmt, args); 1418 va_end(args); 1419 return (pager_output(line)); 1420 } 1421 1422 #endif 1423 1424 #define STATUS_FORMAT " %s %s\n" 1425 1426 static int 1427 print_state(int indent, const char *name, vdev_state_t state) 1428 { 1429 int i; 1430 char buf[512]; 1431 1432 buf[0] = 0; 1433 for (i = 0; i < indent; i++) 1434 strcat(buf, " "); 1435 strcat(buf, name); 1436 return (pager_printf(STATUS_FORMAT, buf, state_name(state))); 1437 } 1438 1439 static int 1440 vdev_status(vdev_t *vdev, int indent) 1441 { 1442 vdev_t *kid; 1443 int ret; 1444 1445 if (vdev->v_islog) { 1446 (void) pager_output(" logs\n"); 1447 indent++; 1448 } 1449 1450 ret = print_state(indent, vdev->v_name, vdev->v_state); 1451 if (ret != 0) 1452 return (ret); 1453 1454 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1455 ret = vdev_status(kid, indent + 1); 1456 if (ret != 0) 1457 return (ret); 1458 } 1459 return (ret); 1460 } 1461 1462 static int 1463 spa_status(spa_t *spa) 1464 { 1465 static char bootfs[ZFS_MAXNAMELEN]; 1466 uint64_t rootid; 1467 vdev_list_t *vlist; 1468 vdev_t *vdev; 1469 int good_kids, bad_kids, degraded_kids, ret; 1470 vdev_state_t state; 1471 1472 ret = pager_printf(" pool: %s\n", spa->spa_name); 1473 if (ret != 0) 1474 return (ret); 1475 1476 if (zfs_get_root(spa, &rootid) == 0 && 1477 zfs_rlookup(spa, rootid, bootfs) == 0) { 1478 if (bootfs[0] == '\0') 1479 ret = pager_printf("bootfs: %s\n", spa->spa_name); 1480 else 1481 ret = pager_printf("bootfs: %s/%s\n", spa->spa_name, 1482 bootfs); 1483 if (ret != 0) 1484 return (ret); 1485 } 1486 ret = pager_printf("config:\n\n"); 1487 if (ret != 0) 1488 return (ret); 1489 ret = pager_printf(STATUS_FORMAT, "NAME", "STATE"); 1490 if (ret != 0) 1491 return (ret); 1492 1493 good_kids = 0; 1494 degraded_kids = 0; 1495 bad_kids = 0; 1496 vlist = &spa->spa_root_vdev->v_children; 1497 STAILQ_FOREACH(vdev, vlist, v_childlink) { 1498 if (vdev->v_state == VDEV_STATE_HEALTHY) 1499 good_kids++; 1500 else if (vdev->v_state == VDEV_STATE_DEGRADED) 1501 degraded_kids++; 1502 else 1503 bad_kids++; 1504 } 1505 1506 state = VDEV_STATE_CLOSED; 1507 if (good_kids > 0 && (degraded_kids + bad_kids) == 0) 1508 state = VDEV_STATE_HEALTHY; 1509 else if ((good_kids + degraded_kids) > 0) 1510 state = VDEV_STATE_DEGRADED; 1511 1512 ret = print_state(0, spa->spa_name, state); 1513 if (ret != 0) 1514 return (ret); 1515 1516 STAILQ_FOREACH(vdev, vlist, v_childlink) { 1517 ret = vdev_status(vdev, 1); 1518 if (ret != 0) 1519 return (ret); 1520 } 1521 return (ret); 1522 } 1523 1524 static int 1525 spa_all_status(void) 1526 { 1527 spa_t *spa; 1528 int first = 1, ret = 0; 1529 1530 STAILQ_FOREACH(spa, &zfs_pools, spa_link) { 1531 if (!first) { 1532 ret = pager_printf("\n"); 1533 if (ret != 0) 1534 return (ret); 1535 } 1536 first = 0; 1537 ret = spa_status(spa); 1538 if (ret != 0) 1539 return (ret); 1540 } 1541 return (ret); 1542 } 1543 1544 static uint64_t 1545 vdev_label_offset(uint64_t psize, int l, uint64_t offset) 1546 { 1547 uint64_t label_offset; 1548 1549 if (l < VDEV_LABELS / 2) 1550 label_offset = 0; 1551 else 1552 label_offset = psize - VDEV_LABELS * sizeof (vdev_label_t); 1553 1554 return (offset + l * sizeof (vdev_label_t) + label_offset); 1555 } 1556 1557 static int 1558 vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2) 1559 { 1560 unsigned int seq1 = 0; 1561 unsigned int seq2 = 0; 1562 int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg); 1563 1564 if (cmp != 0) 1565 return (cmp); 1566 1567 cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp); 1568 if (cmp != 0) 1569 return (cmp); 1570 1571 if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1)) 1572 seq1 = MMP_SEQ(ub1); 1573 1574 if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2)) 1575 seq2 = MMP_SEQ(ub2); 1576 1577 return (AVL_CMP(seq1, seq2)); 1578 } 1579 1580 static int 1581 uberblock_verify(uberblock_t *ub) 1582 { 1583 if (ub->ub_magic == BSWAP_64((uint64_t)UBERBLOCK_MAGIC)) { 1584 byteswap_uint64_array(ub, sizeof (uberblock_t)); 1585 } 1586 1587 if (ub->ub_magic != UBERBLOCK_MAGIC || 1588 !SPA_VERSION_IS_SUPPORTED(ub->ub_version)) 1589 return (EINVAL); 1590 1591 return (0); 1592 } 1593 1594 static int 1595 vdev_label_read(vdev_t *vd, int l, void *buf, uint64_t offset, 1596 size_t size) 1597 { 1598 blkptr_t bp; 1599 off_t off; 1600 1601 off = vdev_label_offset(vd->v_psize, l, offset); 1602 1603 BP_ZERO(&bp); 1604 BP_SET_LSIZE(&bp, size); 1605 BP_SET_PSIZE(&bp, size); 1606 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL); 1607 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); 1608 DVA_SET_OFFSET(BP_IDENTITY(&bp), off); 1609 ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0); 1610 1611 return (vdev_read_phys(vd, &bp, buf, off, size)); 1612 } 1613 1614 /* 1615 * We do need to be sure we write to correct location. 1616 * Our vdev label does consist of 4 fields: 1617 * pad1 (8k), reserved. 1618 * bootenv (8k), checksummed, previously reserved, may contian garbage. 1619 * vdev_phys (112k), checksummed 1620 * uberblock ring (128k), checksummed. 1621 * 1622 * Since bootenv area may contain garbage, we can not reliably read it, as 1623 * we can get checksum errors. 1624 * Next best thing is vdev_phys - it is just after bootenv. It still may 1625 * be corrupted, but in such case we will miss this one write. 1626 */ 1627 static int 1628 vdev_label_write_validate(vdev_t *vd, int l, uint64_t offset) 1629 { 1630 uint64_t off, o_phys; 1631 void *buf; 1632 size_t size = VDEV_PHYS_SIZE; 1633 int rc; 1634 1635 o_phys = offsetof(vdev_label_t, vl_vdev_phys); 1636 off = vdev_label_offset(vd->v_psize, l, o_phys); 1637 1638 /* off should be 8K from bootenv */ 1639 if (vdev_label_offset(vd->v_psize, l, offset) + VDEV_PAD_SIZE != off) 1640 return (EINVAL); 1641 1642 buf = malloc(size); 1643 if (buf == NULL) 1644 return (ENOMEM); 1645 1646 /* Read vdev_phys */ 1647 rc = vdev_label_read(vd, l, buf, o_phys, size); 1648 free(buf); 1649 return (rc); 1650 } 1651 1652 static int 1653 vdev_label_write(vdev_t *vd, int l, vdev_boot_envblock_t *be, uint64_t offset) 1654 { 1655 zio_checksum_info_t *ci; 1656 zio_cksum_t cksum; 1657 off_t off; 1658 size_t size = VDEV_PAD_SIZE; 1659 int rc; 1660 1661 if (vd->v_phys_write == NULL) 1662 return (ENOTSUP); 1663 1664 off = vdev_label_offset(vd->v_psize, l, offset); 1665 1666 rc = vdev_label_write_validate(vd, l, offset); 1667 if (rc != 0) { 1668 return (rc); 1669 } 1670 1671 ci = &zio_checksum_table[ZIO_CHECKSUM_LABEL]; 1672 be->vbe_zbt.zec_magic = ZEC_MAGIC; 1673 zio_checksum_label_verifier(&be->vbe_zbt.zec_cksum, off); 1674 ci->ci_func[0](be, size, NULL, &cksum); 1675 be->vbe_zbt.zec_cksum = cksum; 1676 1677 return (vdev_write_phys(vd, be, off, size)); 1678 } 1679 1680 static int 1681 vdev_write_bootenv_impl(vdev_t *vdev, vdev_boot_envblock_t *be) 1682 { 1683 vdev_t *kid; 1684 int rv = 0, err; 1685 1686 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1687 if (kid->v_state != VDEV_STATE_HEALTHY) 1688 continue; 1689 err = vdev_write_bootenv_impl(kid, be); 1690 if (err != 0) 1691 rv = err; 1692 } 1693 1694 /* 1695 * Non-leaf vdevs do not have v_phys_write. 1696 */ 1697 if (vdev->v_phys_write == NULL) 1698 return (rv); 1699 1700 for (int l = 0; l < VDEV_LABELS; l++) { 1701 err = vdev_label_write(vdev, l, be, 1702 offsetof(vdev_label_t, vl_be)); 1703 if (err != 0) { 1704 printf("failed to write bootenv to %s label %d: %d\n", 1705 vdev->v_name ? vdev->v_name : "unknown", l, err); 1706 rv = err; 1707 } 1708 } 1709 return (rv); 1710 } 1711 1712 int 1713 vdev_write_bootenv(vdev_t *vdev, nvlist_t *nvl) 1714 { 1715 vdev_boot_envblock_t *be; 1716 nvlist_t nv, *nvp; 1717 uint64_t version; 1718 int rv; 1719 1720 if (nvl->nv_size > sizeof(be->vbe_bootenv)) 1721 return (E2BIG); 1722 1723 version = VB_RAW; 1724 nvp = vdev_read_bootenv(vdev); 1725 if (nvp != NULL) { 1726 nvlist_find(nvp, BOOTENV_VERSION, DATA_TYPE_UINT64, NULL, 1727 &version, NULL); 1728 nvlist_destroy(nvp); 1729 } 1730 1731 be = calloc(1, sizeof(*be)); 1732 if (be == NULL) 1733 return (ENOMEM); 1734 1735 be->vbe_version = version; 1736 switch (version) { 1737 case VB_RAW: 1738 /* 1739 * If there is no envmap, we will just wipe bootenv. 1740 */ 1741 nvlist_find(nvl, GRUB_ENVMAP, DATA_TYPE_STRING, NULL, 1742 be->vbe_bootenv, NULL); 1743 rv = 0; 1744 break; 1745 1746 case VB_NVLIST: 1747 nv.nv_header = nvl->nv_header; 1748 nv.nv_asize = nvl->nv_asize; 1749 nv.nv_size = nvl->nv_size; 1750 1751 bcopy(&nv.nv_header, be->vbe_bootenv, sizeof(nv.nv_header)); 1752 nv.nv_data = be->vbe_bootenv + sizeof(nvs_header_t); 1753 bcopy(nvl->nv_data, nv.nv_data, nv.nv_size); 1754 rv = nvlist_export(&nv); 1755 break; 1756 1757 default: 1758 rv = EINVAL; 1759 break; 1760 } 1761 1762 if (rv == 0) { 1763 be->vbe_version = htobe64(be->vbe_version); 1764 rv = vdev_write_bootenv_impl(vdev, be); 1765 } 1766 free(be); 1767 return (rv); 1768 } 1769 1770 /* 1771 * Read the bootenv area from pool label, return the nvlist from it. 1772 * We return from first successful read. 1773 */ 1774 nvlist_t * 1775 vdev_read_bootenv(vdev_t *vdev) 1776 { 1777 vdev_t *kid; 1778 nvlist_t *benv; 1779 vdev_boot_envblock_t *be; 1780 char *command; 1781 bool ok; 1782 int rv; 1783 1784 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1785 if (kid->v_state != VDEV_STATE_HEALTHY) 1786 continue; 1787 1788 benv = vdev_read_bootenv(kid); 1789 if (benv != NULL) 1790 return (benv); 1791 } 1792 1793 be = malloc(sizeof (*be)); 1794 if (be == NULL) 1795 return (NULL); 1796 1797 rv = 0; 1798 for (int l = 0; l < VDEV_LABELS; l++) { 1799 rv = vdev_label_read(vdev, l, be, 1800 offsetof(vdev_label_t, vl_be), 1801 sizeof (*be)); 1802 if (rv == 0) 1803 break; 1804 } 1805 if (rv != 0) { 1806 free(be); 1807 return (NULL); 1808 } 1809 1810 be->vbe_version = be64toh(be->vbe_version); 1811 switch (be->vbe_version) { 1812 case VB_RAW: 1813 /* 1814 * we have textual data in vbe_bootenv, create nvlist 1815 * with key "envmap". 1816 */ 1817 benv = nvlist_create(NV_UNIQUE_NAME); 1818 if (benv != NULL) { 1819 if (*be->vbe_bootenv == '\0') { 1820 nvlist_add_uint64(benv, BOOTENV_VERSION, 1821 VB_NVLIST); 1822 break; 1823 } 1824 nvlist_add_uint64(benv, BOOTENV_VERSION, VB_RAW); 1825 be->vbe_bootenv[sizeof (be->vbe_bootenv) - 1] = '\0'; 1826 nvlist_add_string(benv, GRUB_ENVMAP, be->vbe_bootenv); 1827 } 1828 break; 1829 1830 case VB_NVLIST: 1831 benv = nvlist_import(be->vbe_bootenv, sizeof(be->vbe_bootenv)); 1832 break; 1833 1834 default: 1835 command = (char *)be; 1836 ok = false; 1837 1838 /* Check for legacy zfsbootcfg command string */ 1839 for (int i = 0; command[i] != '\0'; i++) { 1840 if (iscntrl(command[i])) { 1841 ok = false; 1842 break; 1843 } else { 1844 ok = true; 1845 } 1846 } 1847 benv = nvlist_create(NV_UNIQUE_NAME); 1848 if (benv != NULL) { 1849 if (ok) 1850 nvlist_add_string(benv, FREEBSD_BOOTONCE, 1851 command); 1852 else 1853 nvlist_add_uint64(benv, BOOTENV_VERSION, 1854 VB_NVLIST); 1855 } 1856 break; 1857 } 1858 free(be); 1859 return (benv); 1860 } 1861 1862 static uint64_t 1863 vdev_get_label_asize(nvlist_t *nvl) 1864 { 1865 nvlist_t *vdevs; 1866 uint64_t asize; 1867 const char *type; 1868 int len; 1869 1870 asize = 0; 1871 /* Get vdev tree */ 1872 if (nvlist_find(nvl, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, 1873 NULL, &vdevs, NULL) != 0) 1874 return (asize); 1875 1876 /* 1877 * Get vdev type. We will calculate asize for raidz, mirror and disk. 1878 * For raidz, the asize is raw size of all children. 1879 */ 1880 if (nvlist_find(vdevs, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING, 1881 NULL, &type, &len) != 0) 1882 goto done; 1883 1884 if (memcmp(type, VDEV_TYPE_MIRROR, len) != 0 && 1885 memcmp(type, VDEV_TYPE_DISK, len) != 0 && 1886 memcmp(type, VDEV_TYPE_RAIDZ, len) != 0) 1887 goto done; 1888 1889 if (nvlist_find(vdevs, ZPOOL_CONFIG_ASIZE, DATA_TYPE_UINT64, 1890 NULL, &asize, NULL) != 0) 1891 goto done; 1892 1893 if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) { 1894 nvlist_t **kids; 1895 int nkids; 1896 1897 if (nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN, 1898 DATA_TYPE_NVLIST_ARRAY, &nkids, &kids, NULL) != 0) { 1899 asize = 0; 1900 goto done; 1901 } 1902 1903 asize /= nkids; 1904 for (int i = 0; i < nkids; i++) 1905 nvlist_destroy(kids[i]); 1906 free(kids); 1907 } 1908 1909 asize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 1910 done: 1911 nvlist_destroy(vdevs); 1912 return (asize); 1913 } 1914 1915 static nvlist_t * 1916 vdev_label_read_config(vdev_t *vd, uint64_t txg) 1917 { 1918 vdev_phys_t *label; 1919 uint64_t best_txg = 0; 1920 uint64_t label_txg = 0; 1921 uint64_t asize; 1922 nvlist_t *nvl = NULL, *tmp; 1923 int error; 1924 1925 label = malloc(sizeof (vdev_phys_t)); 1926 if (label == NULL) 1927 return (NULL); 1928 1929 for (int l = 0; l < VDEV_LABELS; l++) { 1930 if (vdev_label_read(vd, l, label, 1931 offsetof(vdev_label_t, vl_vdev_phys), 1932 sizeof (vdev_phys_t))) 1933 continue; 1934 1935 tmp = nvlist_import(label->vp_nvlist, 1936 sizeof(label->vp_nvlist)); 1937 if (tmp == NULL) 1938 continue; 1939 1940 error = nvlist_find(tmp, ZPOOL_CONFIG_POOL_TXG, 1941 DATA_TYPE_UINT64, NULL, &label_txg, NULL); 1942 if (error != 0 || label_txg == 0) { 1943 nvlist_destroy(nvl); 1944 nvl = tmp; 1945 goto done; 1946 } 1947 1948 if (label_txg <= txg && label_txg > best_txg) { 1949 best_txg = label_txg; 1950 nvlist_destroy(nvl); 1951 nvl = tmp; 1952 tmp = NULL; 1953 1954 /* 1955 * Use asize from pool config. We need this 1956 * because we can get bad value from BIOS. 1957 */ 1958 asize = vdev_get_label_asize(nvl); 1959 if (asize != 0) { 1960 vd->v_psize = asize; 1961 } 1962 } 1963 nvlist_destroy(tmp); 1964 } 1965 1966 if (best_txg == 0) { 1967 nvlist_destroy(nvl); 1968 nvl = NULL; 1969 } 1970 done: 1971 free(label); 1972 return (nvl); 1973 } 1974 1975 static void 1976 vdev_uberblock_load(vdev_t *vd, uberblock_t *ub) 1977 { 1978 uberblock_t *buf; 1979 1980 buf = malloc(VDEV_UBERBLOCK_SIZE(vd)); 1981 if (buf == NULL) 1982 return; 1983 1984 for (int l = 0; l < VDEV_LABELS; l++) { 1985 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { 1986 if (vdev_label_read(vd, l, buf, 1987 VDEV_UBERBLOCK_OFFSET(vd, n), 1988 VDEV_UBERBLOCK_SIZE(vd))) 1989 continue; 1990 if (uberblock_verify(buf) != 0) 1991 continue; 1992 1993 if (vdev_uberblock_compare(buf, ub) > 0) 1994 *ub = *buf; 1995 } 1996 } 1997 free(buf); 1998 } 1999 2000 static int 2001 vdev_probe(vdev_phys_read_t *_read, vdev_phys_write_t *_write, void *priv, 2002 spa_t **spap) 2003 { 2004 vdev_t vtmp; 2005 spa_t *spa; 2006 vdev_t *vdev; 2007 nvlist_t *nvl; 2008 uint64_t val; 2009 uint64_t guid, vdev_children; 2010 uint64_t pool_txg, pool_guid; 2011 const char *pool_name; 2012 int rc, namelen; 2013 2014 /* 2015 * Load the vdev label and figure out which 2016 * uberblock is most current. 2017 */ 2018 memset(&vtmp, 0, sizeof(vtmp)); 2019 vtmp.v_phys_read = _read; 2020 vtmp.v_phys_write = _write; 2021 vtmp.v_priv = priv; 2022 vtmp.v_psize = P2ALIGN(ldi_get_size(priv), 2023 (uint64_t)sizeof (vdev_label_t)); 2024 2025 /* Test for minimum device size. */ 2026 if (vtmp.v_psize < SPA_MINDEVSIZE) 2027 return (EIO); 2028 2029 nvl = vdev_label_read_config(&vtmp, UINT64_MAX); 2030 if (nvl == NULL) 2031 return (EIO); 2032 2033 if (nvlist_find(nvl, ZPOOL_CONFIG_VERSION, DATA_TYPE_UINT64, 2034 NULL, &val, NULL) != 0) { 2035 nvlist_destroy(nvl); 2036 return (EIO); 2037 } 2038 2039 if (!SPA_VERSION_IS_SUPPORTED(val)) { 2040 printf("ZFS: unsupported ZFS version %u (should be %u)\n", 2041 (unsigned)val, (unsigned)SPA_VERSION); 2042 nvlist_destroy(nvl); 2043 return (EIO); 2044 } 2045 2046 /* Check ZFS features for read */ 2047 rc = nvlist_check_features_for_read(nvl); 2048 if (rc != 0) { 2049 nvlist_destroy(nvl); 2050 return (EIO); 2051 } 2052 2053 if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_STATE, DATA_TYPE_UINT64, 2054 NULL, &val, NULL) != 0) { 2055 nvlist_destroy(nvl); 2056 return (EIO); 2057 } 2058 2059 if (val == POOL_STATE_DESTROYED) { 2060 /* We don't boot only from destroyed pools. */ 2061 nvlist_destroy(nvl); 2062 return (EIO); 2063 } 2064 2065 if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64, 2066 NULL, &pool_txg, NULL) != 0 || 2067 nvlist_find(nvl, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 2068 NULL, &pool_guid, NULL) != 0 || 2069 nvlist_find(nvl, ZPOOL_CONFIG_POOL_NAME, DATA_TYPE_STRING, 2070 NULL, &pool_name, &namelen) != 0) { 2071 /* 2072 * Cache and spare devices end up here - just ignore 2073 * them. 2074 */ 2075 nvlist_destroy(nvl); 2076 return (EIO); 2077 } 2078 2079 /* 2080 * Create the pool if this is the first time we've seen it. 2081 */ 2082 spa = spa_find_by_guid(pool_guid); 2083 if (spa == NULL) { 2084 char *name; 2085 2086 nvlist_find(nvl, ZPOOL_CONFIG_VDEV_CHILDREN, 2087 DATA_TYPE_UINT64, NULL, &vdev_children, NULL); 2088 name = malloc(namelen + 1); 2089 if (name == NULL) { 2090 nvlist_destroy(nvl); 2091 return (ENOMEM); 2092 } 2093 bcopy(pool_name, name, namelen); 2094 name[namelen] = '\0'; 2095 spa = spa_create(pool_guid, name); 2096 free(name); 2097 if (spa == NULL) { 2098 nvlist_destroy(nvl); 2099 return (ENOMEM); 2100 } 2101 spa->spa_root_vdev->v_nchildren = vdev_children; 2102 } 2103 if (pool_txg > spa->spa_txg) 2104 spa->spa_txg = pool_txg; 2105 2106 /* 2107 * Get the vdev tree and create our in-core copy of it. 2108 * If we already have a vdev with this guid, this must 2109 * be some kind of alias (overlapping slices, dangerously dedicated 2110 * disks etc). 2111 */ 2112 if (nvlist_find(nvl, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, 2113 NULL, &guid, NULL) != 0) { 2114 nvlist_destroy(nvl); 2115 return (EIO); 2116 } 2117 vdev = vdev_find(guid); 2118 /* Has this vdev already been inited? */ 2119 if (vdev && vdev->v_phys_read) { 2120 nvlist_destroy(nvl); 2121 return (EIO); 2122 } 2123 2124 rc = vdev_init_from_label(spa, nvl); 2125 nvlist_destroy(nvl); 2126 if (rc != 0) 2127 return (rc); 2128 2129 /* 2130 * We should already have created an incomplete vdev for this 2131 * vdev. Find it and initialise it with our read proc. 2132 */ 2133 vdev = vdev_find(guid); 2134 if (vdev != NULL) { 2135 vdev->v_phys_read = _read; 2136 vdev->v_phys_write = _write; 2137 vdev->v_priv = priv; 2138 vdev->v_psize = vtmp.v_psize; 2139 /* 2140 * If no other state is set, mark vdev healthy. 2141 */ 2142 if (vdev->v_state == VDEV_STATE_UNKNOWN) 2143 vdev->v_state = VDEV_STATE_HEALTHY; 2144 } else { 2145 printf("ZFS: inconsistent nvlist contents\n"); 2146 return (EIO); 2147 } 2148 2149 if (vdev->v_islog) 2150 spa->spa_with_log = vdev->v_islog; 2151 2152 /* 2153 * Re-evaluate top-level vdev state. 2154 */ 2155 vdev_set_state(vdev->v_top); 2156 2157 /* 2158 * Ok, we are happy with the pool so far. Lets find 2159 * the best uberblock and then we can actually access 2160 * the contents of the pool. 2161 */ 2162 vdev_uberblock_load(vdev, spa->spa_uberblock); 2163 2164 if (spap != NULL) 2165 *spap = spa; 2166 return (0); 2167 } 2168 2169 static int 2170 ilog2(int n) 2171 { 2172 int v; 2173 2174 for (v = 0; v < 32; v++) 2175 if (n == (1 << v)) 2176 return (v); 2177 return (-1); 2178 } 2179 2180 static int 2181 zio_read_gang(const spa_t *spa, const blkptr_t *bp, void *buf) 2182 { 2183 blkptr_t gbh_bp; 2184 zio_gbh_phys_t zio_gb; 2185 char *pbuf; 2186 int i; 2187 2188 /* Artificial BP for gang block header. */ 2189 gbh_bp = *bp; 2190 BP_SET_PSIZE(&gbh_bp, SPA_GANGBLOCKSIZE); 2191 BP_SET_LSIZE(&gbh_bp, SPA_GANGBLOCKSIZE); 2192 BP_SET_CHECKSUM(&gbh_bp, ZIO_CHECKSUM_GANG_HEADER); 2193 BP_SET_COMPRESS(&gbh_bp, ZIO_COMPRESS_OFF); 2194 for (i = 0; i < SPA_DVAS_PER_BP; i++) 2195 DVA_SET_GANG(&gbh_bp.blk_dva[i], 0); 2196 2197 /* Read gang header block using the artificial BP. */ 2198 if (zio_read(spa, &gbh_bp, &zio_gb)) 2199 return (EIO); 2200 2201 pbuf = buf; 2202 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { 2203 blkptr_t *gbp = &zio_gb.zg_blkptr[i]; 2204 2205 if (BP_IS_HOLE(gbp)) 2206 continue; 2207 if (zio_read(spa, gbp, pbuf)) 2208 return (EIO); 2209 pbuf += BP_GET_PSIZE(gbp); 2210 } 2211 2212 if (zio_checksum_verify(spa, bp, buf)) 2213 return (EIO); 2214 return (0); 2215 } 2216 2217 static int 2218 zio_read(const spa_t *spa, const blkptr_t *bp, void *buf) 2219 { 2220 int cpfunc = BP_GET_COMPRESS(bp); 2221 uint64_t align, size; 2222 void *pbuf; 2223 int i, error; 2224 2225 /* 2226 * Process data embedded in block pointer 2227 */ 2228 if (BP_IS_EMBEDDED(bp)) { 2229 ASSERT(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 2230 2231 size = BPE_GET_PSIZE(bp); 2232 ASSERT(size <= BPE_PAYLOAD_SIZE); 2233 2234 if (cpfunc != ZIO_COMPRESS_OFF) 2235 pbuf = malloc(size); 2236 else 2237 pbuf = buf; 2238 2239 if (pbuf == NULL) 2240 return (ENOMEM); 2241 2242 decode_embedded_bp_compressed(bp, pbuf); 2243 error = 0; 2244 2245 if (cpfunc != ZIO_COMPRESS_OFF) { 2246 error = zio_decompress_data(cpfunc, pbuf, 2247 size, buf, BP_GET_LSIZE(bp)); 2248 free(pbuf); 2249 } 2250 if (error != 0) 2251 printf("ZFS: i/o error - unable to decompress " 2252 "block pointer data, error %d\n", error); 2253 return (error); 2254 } 2255 2256 error = EIO; 2257 2258 for (i = 0; i < SPA_DVAS_PER_BP; i++) { 2259 const dva_t *dva = &bp->blk_dva[i]; 2260 vdev_t *vdev; 2261 vdev_list_t *vlist; 2262 uint64_t vdevid; 2263 off_t offset; 2264 2265 if (!dva->dva_word[0] && !dva->dva_word[1]) 2266 continue; 2267 2268 vdevid = DVA_GET_VDEV(dva); 2269 offset = DVA_GET_OFFSET(dva); 2270 vlist = &spa->spa_root_vdev->v_children; 2271 STAILQ_FOREACH(vdev, vlist, v_childlink) { 2272 if (vdev->v_id == vdevid) 2273 break; 2274 } 2275 if (!vdev || !vdev->v_read) 2276 continue; 2277 2278 size = BP_GET_PSIZE(bp); 2279 if (vdev->v_read == vdev_raidz_read) { 2280 align = 1ULL << vdev->v_ashift; 2281 if (P2PHASE(size, align) != 0) 2282 size = P2ROUNDUP(size, align); 2283 } 2284 if (size != BP_GET_PSIZE(bp) || cpfunc != ZIO_COMPRESS_OFF) 2285 pbuf = malloc(size); 2286 else 2287 pbuf = buf; 2288 2289 if (pbuf == NULL) { 2290 error = ENOMEM; 2291 break; 2292 } 2293 2294 if (DVA_GET_GANG(dva)) 2295 error = zio_read_gang(spa, bp, pbuf); 2296 else 2297 error = vdev->v_read(vdev, bp, pbuf, offset, size); 2298 if (error == 0) { 2299 if (cpfunc != ZIO_COMPRESS_OFF) 2300 error = zio_decompress_data(cpfunc, pbuf, 2301 BP_GET_PSIZE(bp), buf, BP_GET_LSIZE(bp)); 2302 else if (size != BP_GET_PSIZE(bp)) 2303 bcopy(pbuf, buf, BP_GET_PSIZE(bp)); 2304 } else { 2305 printf("zio_read error: %d\n", error); 2306 } 2307 if (buf != pbuf) 2308 free(pbuf); 2309 if (error == 0) 2310 break; 2311 } 2312 if (error != 0) 2313 printf("ZFS: i/o error - all block copies unavailable\n"); 2314 2315 return (error); 2316 } 2317 2318 static int 2319 dnode_read(const spa_t *spa, const dnode_phys_t *dnode, off_t offset, 2320 void *buf, size_t buflen) 2321 { 2322 int ibshift = dnode->dn_indblkshift - SPA_BLKPTRSHIFT; 2323 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2324 int nlevels = dnode->dn_nlevels; 2325 int i, rc; 2326 2327 if (bsize > SPA_MAXBLOCKSIZE) { 2328 printf("ZFS: I/O error - blocks larger than %llu are not " 2329 "supported\n", SPA_MAXBLOCKSIZE); 2330 return (EIO); 2331 } 2332 2333 /* 2334 * Handle odd block sizes, mirrors dmu_read_impl(). Data can't exist 2335 * past the first block, so we'll clip the read to the portion of the 2336 * buffer within bsize and zero out the remainder. 2337 */ 2338 if (dnode->dn_maxblkid == 0) { 2339 size_t newbuflen; 2340 2341 newbuflen = offset > bsize ? 0 : MIN(buflen, bsize - offset); 2342 bzero((char *)buf + newbuflen, buflen - newbuflen); 2343 buflen = newbuflen; 2344 } 2345 2346 /* 2347 * Note: bsize may not be a power of two here so we need to do an 2348 * actual divide rather than a bitshift. 2349 */ 2350 while (buflen > 0) { 2351 uint64_t bn = offset / bsize; 2352 int boff = offset % bsize; 2353 int ibn; 2354 const blkptr_t *indbp; 2355 blkptr_t bp; 2356 2357 if (bn > dnode->dn_maxblkid) 2358 return (EIO); 2359 2360 if (dnode == dnode_cache_obj && bn == dnode_cache_bn) 2361 goto cached; 2362 2363 indbp = dnode->dn_blkptr; 2364 for (i = 0; i < nlevels; i++) { 2365 /* 2366 * Copy the bp from the indirect array so that 2367 * we can re-use the scratch buffer for multi-level 2368 * objects. 2369 */ 2370 ibn = bn >> ((nlevels - i - 1) * ibshift); 2371 ibn &= ((1 << ibshift) - 1); 2372 bp = indbp[ibn]; 2373 if (BP_IS_HOLE(&bp)) { 2374 memset(dnode_cache_buf, 0, bsize); 2375 break; 2376 } 2377 rc = zio_read(spa, &bp, dnode_cache_buf); 2378 if (rc) 2379 return (rc); 2380 indbp = (const blkptr_t *) dnode_cache_buf; 2381 } 2382 dnode_cache_obj = dnode; 2383 dnode_cache_bn = bn; 2384 cached: 2385 2386 /* 2387 * The buffer contains our data block. Copy what we 2388 * need from it and loop. 2389 */ 2390 i = bsize - boff; 2391 if (i > buflen) i = buflen; 2392 memcpy(buf, &dnode_cache_buf[boff], i); 2393 buf = ((char *)buf) + i; 2394 offset += i; 2395 buflen -= i; 2396 } 2397 2398 return (0); 2399 } 2400 2401 /* 2402 * Lookup a value in a microzap directory. 2403 */ 2404 static int 2405 mzap_lookup(const mzap_phys_t *mz, size_t size, const char *name, 2406 uint64_t *value) 2407 { 2408 const mzap_ent_phys_t *mze; 2409 int chunks, i; 2410 2411 /* 2412 * Microzap objects use exactly one block. Read the whole 2413 * thing. 2414 */ 2415 chunks = size / MZAP_ENT_LEN - 1; 2416 for (i = 0; i < chunks; i++) { 2417 mze = &mz->mz_chunk[i]; 2418 if (strcmp(mze->mze_name, name) == 0) { 2419 *value = mze->mze_value; 2420 return (0); 2421 } 2422 } 2423 2424 return (ENOENT); 2425 } 2426 2427 /* 2428 * Compare a name with a zap leaf entry. Return non-zero if the name 2429 * matches. 2430 */ 2431 static int 2432 fzap_name_equal(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, 2433 const char *name) 2434 { 2435 size_t namelen; 2436 const zap_leaf_chunk_t *nc; 2437 const char *p; 2438 2439 namelen = zc->l_entry.le_name_numints; 2440 2441 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk); 2442 p = name; 2443 while (namelen > 0) { 2444 size_t len; 2445 2446 len = namelen; 2447 if (len > ZAP_LEAF_ARRAY_BYTES) 2448 len = ZAP_LEAF_ARRAY_BYTES; 2449 if (memcmp(p, nc->l_array.la_array, len)) 2450 return (0); 2451 p += len; 2452 namelen -= len; 2453 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next); 2454 } 2455 2456 return (1); 2457 } 2458 2459 /* 2460 * Extract a uint64_t value from a zap leaf entry. 2461 */ 2462 static uint64_t 2463 fzap_leaf_value(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc) 2464 { 2465 const zap_leaf_chunk_t *vc; 2466 int i; 2467 uint64_t value; 2468 const uint8_t *p; 2469 2470 vc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_value_chunk); 2471 for (i = 0, value = 0, p = vc->l_array.la_array; i < 8; i++) { 2472 value = (value << 8) | p[i]; 2473 } 2474 2475 return (value); 2476 } 2477 2478 static void 2479 stv(int len, void *addr, uint64_t value) 2480 { 2481 switch (len) { 2482 case 1: 2483 *(uint8_t *)addr = value; 2484 return; 2485 case 2: 2486 *(uint16_t *)addr = value; 2487 return; 2488 case 4: 2489 *(uint32_t *)addr = value; 2490 return; 2491 case 8: 2492 *(uint64_t *)addr = value; 2493 return; 2494 } 2495 } 2496 2497 /* 2498 * Extract a array from a zap leaf entry. 2499 */ 2500 static void 2501 fzap_leaf_array(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, 2502 uint64_t integer_size, uint64_t num_integers, void *buf) 2503 { 2504 uint64_t array_int_len = zc->l_entry.le_value_intlen; 2505 uint64_t value = 0; 2506 uint64_t *u64 = buf; 2507 char *p = buf; 2508 int len = MIN(zc->l_entry.le_value_numints, num_integers); 2509 int chunk = zc->l_entry.le_value_chunk; 2510 int byten = 0; 2511 2512 if (integer_size == 8 && len == 1) { 2513 *u64 = fzap_leaf_value(zl, zc); 2514 return; 2515 } 2516 2517 while (len > 0) { 2518 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(zl, chunk).l_array; 2519 int i; 2520 2521 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(zl)); 2522 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) { 2523 value = (value << 8) | la->la_array[i]; 2524 byten++; 2525 if (byten == array_int_len) { 2526 stv(integer_size, p, value); 2527 byten = 0; 2528 len--; 2529 if (len == 0) 2530 return; 2531 p += integer_size; 2532 } 2533 } 2534 chunk = la->la_next; 2535 } 2536 } 2537 2538 static int 2539 fzap_check_size(uint64_t integer_size, uint64_t num_integers) 2540 { 2541 2542 switch (integer_size) { 2543 case 1: 2544 case 2: 2545 case 4: 2546 case 8: 2547 break; 2548 default: 2549 return (EINVAL); 2550 } 2551 2552 if (integer_size * num_integers > ZAP_MAXVALUELEN) 2553 return (E2BIG); 2554 2555 return (0); 2556 } 2557 2558 static void 2559 zap_leaf_free(zap_leaf_t *leaf) 2560 { 2561 free(leaf->l_phys); 2562 free(leaf); 2563 } 2564 2565 static int 2566 zap_get_leaf_byblk(fat_zap_t *zap, uint64_t blk, zap_leaf_t **lp) 2567 { 2568 int bs = FZAP_BLOCK_SHIFT(zap); 2569 int err; 2570 2571 *lp = malloc(sizeof(**lp)); 2572 if (*lp == NULL) 2573 return (ENOMEM); 2574 2575 (*lp)->l_bs = bs; 2576 (*lp)->l_phys = malloc(1 << bs); 2577 2578 if ((*lp)->l_phys == NULL) { 2579 free(*lp); 2580 return (ENOMEM); 2581 } 2582 err = dnode_read(zap->zap_spa, zap->zap_dnode, blk << bs, (*lp)->l_phys, 2583 1 << bs); 2584 if (err != 0) { 2585 zap_leaf_free(*lp); 2586 } 2587 return (err); 2588 } 2589 2590 static int 2591 zap_table_load(fat_zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, 2592 uint64_t *valp) 2593 { 2594 int bs = FZAP_BLOCK_SHIFT(zap); 2595 uint64_t blk = idx >> (bs - 3); 2596 uint64_t off = idx & ((1 << (bs - 3)) - 1); 2597 uint64_t *buf; 2598 int rc; 2599 2600 buf = malloc(1 << zap->zap_block_shift); 2601 if (buf == NULL) 2602 return (ENOMEM); 2603 rc = dnode_read(zap->zap_spa, zap->zap_dnode, (tbl->zt_blk + blk) << bs, 2604 buf, 1 << zap->zap_block_shift); 2605 if (rc == 0) 2606 *valp = buf[off]; 2607 free(buf); 2608 return (rc); 2609 } 2610 2611 static int 2612 zap_idx_to_blk(fat_zap_t *zap, uint64_t idx, uint64_t *valp) 2613 { 2614 if (zap->zap_phys->zap_ptrtbl.zt_numblks == 0) { 2615 *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx); 2616 return (0); 2617 } else { 2618 return (zap_table_load(zap, &zap->zap_phys->zap_ptrtbl, 2619 idx, valp)); 2620 } 2621 } 2622 2623 #define ZAP_HASH_IDX(hash, n) (((n) == 0) ? 0 : ((hash) >> (64 - (n)))) 2624 static int 2625 zap_deref_leaf(fat_zap_t *zap, uint64_t h, zap_leaf_t **lp) 2626 { 2627 uint64_t idx, blk; 2628 int err; 2629 2630 idx = ZAP_HASH_IDX(h, zap->zap_phys->zap_ptrtbl.zt_shift); 2631 err = zap_idx_to_blk(zap, idx, &blk); 2632 if (err != 0) 2633 return (err); 2634 return (zap_get_leaf_byblk(zap, blk, lp)); 2635 } 2636 2637 #define CHAIN_END 0xffff /* end of the chunk chain */ 2638 #define LEAF_HASH(l, h) \ 2639 ((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \ 2640 ((h) >> \ 2641 (64 - ZAP_LEAF_HASH_SHIFT(l) - (l)->l_phys->l_hdr.lh_prefix_len))) 2642 #define LEAF_HASH_ENTPTR(l, h) (&(l)->l_phys->l_hash[LEAF_HASH(l, h)]) 2643 2644 static int 2645 zap_leaf_lookup(zap_leaf_t *zl, uint64_t hash, const char *name, 2646 uint64_t integer_size, uint64_t num_integers, void *value) 2647 { 2648 int rc; 2649 uint16_t *chunkp; 2650 struct zap_leaf_entry *le; 2651 2652 /* 2653 * Make sure this chunk matches our hash. 2654 */ 2655 if (zl->l_phys->l_hdr.lh_prefix_len > 0 && 2656 zl->l_phys->l_hdr.lh_prefix != 2657 hash >> (64 - zl->l_phys->l_hdr.lh_prefix_len)) 2658 return (EIO); 2659 2660 rc = ENOENT; 2661 for (chunkp = LEAF_HASH_ENTPTR(zl, hash); 2662 *chunkp != CHAIN_END; chunkp = &le->le_next) { 2663 zap_leaf_chunk_t *zc; 2664 uint16_t chunk = *chunkp; 2665 2666 le = ZAP_LEAF_ENTRY(zl, chunk); 2667 if (le->le_hash != hash) 2668 continue; 2669 zc = &ZAP_LEAF_CHUNK(zl, chunk); 2670 if (fzap_name_equal(zl, zc, name)) { 2671 if (zc->l_entry.le_value_intlen > integer_size) { 2672 rc = EINVAL; 2673 } else { 2674 fzap_leaf_array(zl, zc, integer_size, 2675 num_integers, value); 2676 rc = 0; 2677 } 2678 break; 2679 } 2680 } 2681 return (rc); 2682 } 2683 2684 /* 2685 * Lookup a value in a fatzap directory. 2686 */ 2687 static int 2688 fzap_lookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, 2689 const char *name, uint64_t integer_size, uint64_t num_integers, 2690 void *value) 2691 { 2692 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2693 fat_zap_t z; 2694 zap_leaf_t *zl; 2695 uint64_t hash; 2696 int rc; 2697 2698 if (zh->zap_magic != ZAP_MAGIC) 2699 return (EIO); 2700 2701 if ((rc = fzap_check_size(integer_size, num_integers)) != 0) { 2702 return (rc); 2703 } 2704 2705 z.zap_block_shift = ilog2(bsize); 2706 z.zap_phys = zh; 2707 z.zap_spa = spa; 2708 z.zap_dnode = dnode; 2709 2710 hash = zap_hash(zh->zap_salt, name); 2711 rc = zap_deref_leaf(&z, hash, &zl); 2712 if (rc != 0) 2713 return (rc); 2714 2715 rc = zap_leaf_lookup(zl, hash, name, integer_size, num_integers, value); 2716 2717 zap_leaf_free(zl); 2718 return (rc); 2719 } 2720 2721 /* 2722 * Lookup a name in a zap object and return its value as a uint64_t. 2723 */ 2724 static int 2725 zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name, 2726 uint64_t integer_size, uint64_t num_integers, void *value) 2727 { 2728 int rc; 2729 zap_phys_t *zap; 2730 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2731 2732 zap = malloc(size); 2733 if (zap == NULL) 2734 return (ENOMEM); 2735 2736 rc = dnode_read(spa, dnode, 0, zap, size); 2737 if (rc) 2738 goto done; 2739 2740 switch (zap->zap_block_type) { 2741 case ZBT_MICRO: 2742 rc = mzap_lookup((const mzap_phys_t *)zap, size, name, value); 2743 break; 2744 case ZBT_HEADER: 2745 rc = fzap_lookup(spa, dnode, zap, name, integer_size, 2746 num_integers, value); 2747 break; 2748 default: 2749 printf("ZFS: invalid zap_type=%" PRIx64 "\n", 2750 zap->zap_block_type); 2751 rc = EIO; 2752 } 2753 done: 2754 free(zap); 2755 return (rc); 2756 } 2757 2758 /* 2759 * List a microzap directory. 2760 */ 2761 static int 2762 mzap_list(const mzap_phys_t *mz, size_t size, 2763 int (*callback)(const char *, uint64_t)) 2764 { 2765 const mzap_ent_phys_t *mze; 2766 int chunks, i, rc; 2767 2768 /* 2769 * Microzap objects use exactly one block. Read the whole 2770 * thing. 2771 */ 2772 rc = 0; 2773 chunks = size / MZAP_ENT_LEN - 1; 2774 for (i = 0; i < chunks; i++) { 2775 mze = &mz->mz_chunk[i]; 2776 if (mze->mze_name[0]) { 2777 rc = callback(mze->mze_name, mze->mze_value); 2778 if (rc != 0) 2779 break; 2780 } 2781 } 2782 2783 return (rc); 2784 } 2785 2786 /* 2787 * List a fatzap directory. 2788 */ 2789 static int 2790 fzap_list(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, 2791 int (*callback)(const char *, uint64_t)) 2792 { 2793 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2794 fat_zap_t z; 2795 uint64_t i; 2796 int j, rc; 2797 2798 if (zh->zap_magic != ZAP_MAGIC) 2799 return (EIO); 2800 2801 z.zap_block_shift = ilog2(bsize); 2802 z.zap_phys = zh; 2803 2804 /* 2805 * This assumes that the leaf blocks start at block 1. The 2806 * documentation isn't exactly clear on this. 2807 */ 2808 zap_leaf_t zl; 2809 zl.l_bs = z.zap_block_shift; 2810 zl.l_phys = malloc(bsize); 2811 if (zl.l_phys == NULL) 2812 return (ENOMEM); 2813 2814 for (i = 0; i < zh->zap_num_leafs; i++) { 2815 off_t off = ((off_t)(i + 1)) << zl.l_bs; 2816 char name[256], *p; 2817 uint64_t value; 2818 2819 if (dnode_read(spa, dnode, off, zl.l_phys, bsize)) { 2820 free(zl.l_phys); 2821 return (EIO); 2822 } 2823 2824 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) { 2825 zap_leaf_chunk_t *zc, *nc; 2826 int namelen; 2827 2828 zc = &ZAP_LEAF_CHUNK(&zl, j); 2829 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) 2830 continue; 2831 namelen = zc->l_entry.le_name_numints; 2832 if (namelen > sizeof(name)) 2833 namelen = sizeof(name); 2834 2835 /* 2836 * Paste the name back together. 2837 */ 2838 nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk); 2839 p = name; 2840 while (namelen > 0) { 2841 int len; 2842 len = namelen; 2843 if (len > ZAP_LEAF_ARRAY_BYTES) 2844 len = ZAP_LEAF_ARRAY_BYTES; 2845 memcpy(p, nc->l_array.la_array, len); 2846 p += len; 2847 namelen -= len; 2848 nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next); 2849 } 2850 2851 /* 2852 * Assume the first eight bytes of the value are 2853 * a uint64_t. 2854 */ 2855 value = fzap_leaf_value(&zl, zc); 2856 2857 /* printf("%s 0x%jx\n", name, (uintmax_t)value); */ 2858 rc = callback((const char *)name, value); 2859 if (rc != 0) { 2860 free(zl.l_phys); 2861 return (rc); 2862 } 2863 } 2864 } 2865 2866 free(zl.l_phys); 2867 return (0); 2868 } 2869 2870 static int zfs_printf(const char *name, uint64_t value __unused) 2871 { 2872 2873 printf("%s\n", name); 2874 2875 return (0); 2876 } 2877 2878 /* 2879 * List a zap directory. 2880 */ 2881 static int 2882 zap_list(const spa_t *spa, const dnode_phys_t *dnode) 2883 { 2884 zap_phys_t *zap; 2885 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2886 int rc; 2887 2888 zap = malloc(size); 2889 if (zap == NULL) 2890 return (ENOMEM); 2891 2892 rc = dnode_read(spa, dnode, 0, zap, size); 2893 if (rc == 0) { 2894 if (zap->zap_block_type == ZBT_MICRO) 2895 rc = mzap_list((const mzap_phys_t *)zap, size, 2896 zfs_printf); 2897 else 2898 rc = fzap_list(spa, dnode, zap, zfs_printf); 2899 } 2900 free(zap); 2901 return (rc); 2902 } 2903 2904 static int 2905 objset_get_dnode(const spa_t *spa, const objset_phys_t *os, uint64_t objnum, 2906 dnode_phys_t *dnode) 2907 { 2908 off_t offset; 2909 2910 offset = objnum * sizeof(dnode_phys_t); 2911 return dnode_read(spa, &os->os_meta_dnode, offset, 2912 dnode, sizeof(dnode_phys_t)); 2913 } 2914 2915 /* 2916 * Lookup a name in a microzap directory. 2917 */ 2918 static int 2919 mzap_rlookup(const mzap_phys_t *mz, size_t size, char *name, uint64_t value) 2920 { 2921 const mzap_ent_phys_t *mze; 2922 int chunks, i; 2923 2924 /* 2925 * Microzap objects use exactly one block. Read the whole 2926 * thing. 2927 */ 2928 chunks = size / MZAP_ENT_LEN - 1; 2929 for (i = 0; i < chunks; i++) { 2930 mze = &mz->mz_chunk[i]; 2931 if (value == mze->mze_value) { 2932 strcpy(name, mze->mze_name); 2933 return (0); 2934 } 2935 } 2936 2937 return (ENOENT); 2938 } 2939 2940 static void 2941 fzap_name_copy(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, char *name) 2942 { 2943 size_t namelen; 2944 const zap_leaf_chunk_t *nc; 2945 char *p; 2946 2947 namelen = zc->l_entry.le_name_numints; 2948 2949 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk); 2950 p = name; 2951 while (namelen > 0) { 2952 size_t len; 2953 len = namelen; 2954 if (len > ZAP_LEAF_ARRAY_BYTES) 2955 len = ZAP_LEAF_ARRAY_BYTES; 2956 memcpy(p, nc->l_array.la_array, len); 2957 p += len; 2958 namelen -= len; 2959 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next); 2960 } 2961 2962 *p = '\0'; 2963 } 2964 2965 static int 2966 fzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, 2967 char *name, uint64_t value) 2968 { 2969 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2970 fat_zap_t z; 2971 uint64_t i; 2972 int j, rc; 2973 2974 if (zh->zap_magic != ZAP_MAGIC) 2975 return (EIO); 2976 2977 z.zap_block_shift = ilog2(bsize); 2978 z.zap_phys = zh; 2979 2980 /* 2981 * This assumes that the leaf blocks start at block 1. The 2982 * documentation isn't exactly clear on this. 2983 */ 2984 zap_leaf_t zl; 2985 zl.l_bs = z.zap_block_shift; 2986 zl.l_phys = malloc(bsize); 2987 if (zl.l_phys == NULL) 2988 return (ENOMEM); 2989 2990 for (i = 0; i < zh->zap_num_leafs; i++) { 2991 off_t off = ((off_t)(i + 1)) << zl.l_bs; 2992 2993 rc = dnode_read(spa, dnode, off, zl.l_phys, bsize); 2994 if (rc != 0) 2995 goto done; 2996 2997 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) { 2998 zap_leaf_chunk_t *zc; 2999 3000 zc = &ZAP_LEAF_CHUNK(&zl, j); 3001 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) 3002 continue; 3003 if (zc->l_entry.le_value_intlen != 8 || 3004 zc->l_entry.le_value_numints != 1) 3005 continue; 3006 3007 if (fzap_leaf_value(&zl, zc) == value) { 3008 fzap_name_copy(&zl, zc, name); 3009 goto done; 3010 } 3011 } 3012 } 3013 3014 rc = ENOENT; 3015 done: 3016 free(zl.l_phys); 3017 return (rc); 3018 } 3019 3020 static int 3021 zap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name, 3022 uint64_t value) 3023 { 3024 zap_phys_t *zap; 3025 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 3026 int rc; 3027 3028 zap = malloc(size); 3029 if (zap == NULL) 3030 return (ENOMEM); 3031 3032 rc = dnode_read(spa, dnode, 0, zap, size); 3033 if (rc == 0) { 3034 if (zap->zap_block_type == ZBT_MICRO) 3035 rc = mzap_rlookup((const mzap_phys_t *)zap, size, 3036 name, value); 3037 else 3038 rc = fzap_rlookup(spa, dnode, zap, name, value); 3039 } 3040 free(zap); 3041 return (rc); 3042 } 3043 3044 static int 3045 zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result) 3046 { 3047 char name[256]; 3048 char component[256]; 3049 uint64_t dir_obj, parent_obj, child_dir_zapobj; 3050 dnode_phys_t child_dir_zap, snapnames_zap, dataset, dir, parent; 3051 dsl_dir_phys_t *dd; 3052 dsl_dataset_phys_t *ds; 3053 char *p; 3054 int len; 3055 boolean_t issnap = B_FALSE; 3056 3057 p = &name[sizeof(name) - 1]; 3058 *p = '\0'; 3059 3060 if (objset_get_dnode(spa, spa->spa_mos, objnum, &dataset)) { 3061 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 3062 return (EIO); 3063 } 3064 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3065 dir_obj = ds->ds_dir_obj; 3066 if (ds->ds_snapnames_zapobj == 0) 3067 issnap = B_TRUE; 3068 3069 for (;;) { 3070 if (objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir) != 0) 3071 return (EIO); 3072 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3073 3074 /* Actual loop condition. */ 3075 parent_obj = dd->dd_parent_obj; 3076 if (parent_obj == 0) 3077 break; 3078 3079 if (objset_get_dnode(spa, spa->spa_mos, parent_obj, 3080 &parent) != 0) 3081 return (EIO); 3082 dd = (dsl_dir_phys_t *)&parent.dn_bonus; 3083 if (issnap == B_TRUE) { 3084 /* 3085 * The dataset we are looking up is a snapshot 3086 * the dir_obj is the parent already, we don't want 3087 * the grandparent just yet. Reset to the parent. 3088 */ 3089 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3090 /* Lookup the dataset to get the snapname ZAP */ 3091 if (objset_get_dnode(spa, spa->spa_mos, 3092 dd->dd_head_dataset_obj, &dataset)) 3093 return (EIO); 3094 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3095 if (objset_get_dnode(spa, spa->spa_mos, 3096 ds->ds_snapnames_zapobj, &snapnames_zap) != 0) 3097 return (EIO); 3098 /* Get the name of the snapshot */ 3099 if (zap_rlookup(spa, &snapnames_zap, component, 3100 objnum) != 0) 3101 return (EIO); 3102 len = strlen(component); 3103 p -= len; 3104 memcpy(p, component, len); 3105 --p; 3106 *p = '@'; 3107 issnap = B_FALSE; 3108 continue; 3109 } 3110 3111 child_dir_zapobj = dd->dd_child_dir_zapobj; 3112 if (objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj, 3113 &child_dir_zap) != 0) 3114 return (EIO); 3115 if (zap_rlookup(spa, &child_dir_zap, component, dir_obj) != 0) 3116 return (EIO); 3117 3118 len = strlen(component); 3119 p -= len; 3120 memcpy(p, component, len); 3121 --p; 3122 *p = '/'; 3123 3124 /* Actual loop iteration. */ 3125 dir_obj = parent_obj; 3126 } 3127 3128 if (*p != '\0') 3129 ++p; 3130 strcpy(result, p); 3131 3132 return (0); 3133 } 3134 3135 static int 3136 zfs_lookup_dataset(const spa_t *spa, const char *name, uint64_t *objnum) 3137 { 3138 char element[256]; 3139 uint64_t dir_obj, child_dir_zapobj; 3140 dnode_phys_t child_dir_zap, snapnames_zap, dir, dataset; 3141 dsl_dir_phys_t *dd; 3142 dsl_dataset_phys_t *ds; 3143 const char *p, *q; 3144 boolean_t issnap = B_FALSE; 3145 3146 if (objset_get_dnode(spa, spa->spa_mos, 3147 DMU_POOL_DIRECTORY_OBJECT, &dir)) 3148 return (EIO); 3149 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof (dir_obj), 3150 1, &dir_obj)) 3151 return (EIO); 3152 3153 p = name; 3154 for (;;) { 3155 if (objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir)) 3156 return (EIO); 3157 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3158 3159 while (*p == '/') 3160 p++; 3161 /* Actual loop condition #1. */ 3162 if (*p == '\0') 3163 break; 3164 3165 q = strchr(p, '/'); 3166 if (q) { 3167 memcpy(element, p, q - p); 3168 element[q - p] = '\0'; 3169 p = q + 1; 3170 } else { 3171 strcpy(element, p); 3172 p += strlen(p); 3173 } 3174 3175 if (issnap == B_TRUE) { 3176 if (objset_get_dnode(spa, spa->spa_mos, 3177 dd->dd_head_dataset_obj, &dataset)) 3178 return (EIO); 3179 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3180 if (objset_get_dnode(spa, spa->spa_mos, 3181 ds->ds_snapnames_zapobj, &snapnames_zap) != 0) 3182 return (EIO); 3183 /* Actual loop condition #2. */ 3184 if (zap_lookup(spa, &snapnames_zap, element, 3185 sizeof (dir_obj), 1, &dir_obj) != 0) 3186 return (ENOENT); 3187 *objnum = dir_obj; 3188 return (0); 3189 } else if ((q = strchr(element, '@')) != NULL) { 3190 issnap = B_TRUE; 3191 element[q - element] = '\0'; 3192 p = q + 1; 3193 } 3194 child_dir_zapobj = dd->dd_child_dir_zapobj; 3195 if (objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj, 3196 &child_dir_zap) != 0) 3197 return (EIO); 3198 3199 /* Actual loop condition #2. */ 3200 if (zap_lookup(spa, &child_dir_zap, element, sizeof (dir_obj), 3201 1, &dir_obj) != 0) 3202 return (ENOENT); 3203 } 3204 3205 *objnum = dd->dd_head_dataset_obj; 3206 return (0); 3207 } 3208 3209 #ifndef BOOT2 3210 static int 3211 zfs_list_dataset(const spa_t *spa, uint64_t objnum/*, int pos, char *entry*/) 3212 { 3213 uint64_t dir_obj, child_dir_zapobj; 3214 dnode_phys_t child_dir_zap, dir, dataset; 3215 dsl_dataset_phys_t *ds; 3216 dsl_dir_phys_t *dd; 3217 3218 if (objset_get_dnode(spa, spa->spa_mos, objnum, &dataset)) { 3219 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 3220 return (EIO); 3221 } 3222 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3223 dir_obj = ds->ds_dir_obj; 3224 3225 if (objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir)) { 3226 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj); 3227 return (EIO); 3228 } 3229 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3230 3231 child_dir_zapobj = dd->dd_child_dir_zapobj; 3232 if (objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj, 3233 &child_dir_zap) != 0) { 3234 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj); 3235 return (EIO); 3236 } 3237 3238 return (zap_list(spa, &child_dir_zap) != 0); 3239 } 3240 3241 int 3242 zfs_callback_dataset(const spa_t *spa, uint64_t objnum, 3243 int (*callback)(const char *, uint64_t)) 3244 { 3245 uint64_t dir_obj, child_dir_zapobj; 3246 dnode_phys_t child_dir_zap, dir, dataset; 3247 dsl_dataset_phys_t *ds; 3248 dsl_dir_phys_t *dd; 3249 zap_phys_t *zap; 3250 size_t size; 3251 int err; 3252 3253 err = objset_get_dnode(spa, spa->spa_mos, objnum, &dataset); 3254 if (err != 0) { 3255 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 3256 return (err); 3257 } 3258 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3259 dir_obj = ds->ds_dir_obj; 3260 3261 err = objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir); 3262 if (err != 0) { 3263 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj); 3264 return (err); 3265 } 3266 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3267 3268 child_dir_zapobj = dd->dd_child_dir_zapobj; 3269 err = objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj, 3270 &child_dir_zap); 3271 if (err != 0) { 3272 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj); 3273 return (err); 3274 } 3275 3276 size = child_dir_zap.dn_datablkszsec << SPA_MINBLOCKSHIFT; 3277 zap = malloc(size); 3278 if (zap != NULL) { 3279 err = dnode_read(spa, &child_dir_zap, 0, zap, size); 3280 if (err != 0) 3281 goto done; 3282 3283 if (zap->zap_block_type == ZBT_MICRO) 3284 err = mzap_list((const mzap_phys_t *)zap, size, 3285 callback); 3286 else 3287 err = fzap_list(spa, &child_dir_zap, zap, callback); 3288 } else { 3289 err = ENOMEM; 3290 } 3291 done: 3292 free(zap); 3293 return (err); 3294 } 3295 #endif 3296 3297 /* 3298 * Find the object set given the object number of its dataset object 3299 * and return its details in *objset 3300 */ 3301 static int 3302 zfs_mount_dataset(const spa_t *spa, uint64_t objnum, objset_phys_t *objset) 3303 { 3304 dnode_phys_t dataset; 3305 dsl_dataset_phys_t *ds; 3306 3307 if (objset_get_dnode(spa, spa->spa_mos, objnum, &dataset)) { 3308 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 3309 return (EIO); 3310 } 3311 3312 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3313 if (zio_read(spa, &ds->ds_bp, objset)) { 3314 printf("ZFS: can't read object set for dataset %ju\n", 3315 (uintmax_t)objnum); 3316 return (EIO); 3317 } 3318 3319 return (0); 3320 } 3321 3322 /* 3323 * Find the object set pointed to by the BOOTFS property or the root 3324 * dataset if there is none and return its details in *objset 3325 */ 3326 static int 3327 zfs_get_root(const spa_t *spa, uint64_t *objid) 3328 { 3329 dnode_phys_t dir, propdir; 3330 uint64_t props, bootfs, root; 3331 3332 *objid = 0; 3333 3334 /* 3335 * Start with the MOS directory object. 3336 */ 3337 if (objset_get_dnode(spa, spa->spa_mos, 3338 DMU_POOL_DIRECTORY_OBJECT, &dir)) { 3339 printf("ZFS: can't read MOS object directory\n"); 3340 return (EIO); 3341 } 3342 3343 /* 3344 * Lookup the pool_props and see if we can find a bootfs. 3345 */ 3346 if (zap_lookup(spa, &dir, DMU_POOL_PROPS, 3347 sizeof(props), 1, &props) == 0 && 3348 objset_get_dnode(spa, spa->spa_mos, props, &propdir) == 0 && 3349 zap_lookup(spa, &propdir, "bootfs", 3350 sizeof(bootfs), 1, &bootfs) == 0 && bootfs != 0) { 3351 *objid = bootfs; 3352 return (0); 3353 } 3354 /* 3355 * Lookup the root dataset directory 3356 */ 3357 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, 3358 sizeof(root), 1, &root) || 3359 objset_get_dnode(spa, spa->spa_mos, root, &dir)) { 3360 printf("ZFS: can't find root dsl_dir\n"); 3361 return (EIO); 3362 } 3363 3364 /* 3365 * Use the information from the dataset directory's bonus buffer 3366 * to find the dataset object and from that the object set itself. 3367 */ 3368 dsl_dir_phys_t *dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3369 *objid = dd->dd_head_dataset_obj; 3370 return (0); 3371 } 3372 3373 static int 3374 zfs_mount_impl(const spa_t *spa, uint64_t rootobj, struct zfsmount *mount) 3375 { 3376 3377 mount->spa = spa; 3378 3379 /* 3380 * Find the root object set if not explicitly provided 3381 */ 3382 if (rootobj == 0 && zfs_get_root(spa, &rootobj)) { 3383 printf("ZFS: can't find root filesystem\n"); 3384 return (EIO); 3385 } 3386 3387 if (zfs_mount_dataset(spa, rootobj, &mount->objset)) { 3388 printf("ZFS: can't open root filesystem\n"); 3389 return (EIO); 3390 } 3391 3392 mount->rootobj = rootobj; 3393 3394 return (0); 3395 } 3396 3397 /* 3398 * callback function for feature name checks. 3399 */ 3400 static int 3401 check_feature(const char *name, uint64_t value) 3402 { 3403 int i; 3404 3405 if (value == 0) 3406 return (0); 3407 if (name[0] == '\0') 3408 return (0); 3409 3410 for (i = 0; features_for_read[i] != NULL; i++) { 3411 if (strcmp(name, features_for_read[i]) == 0) 3412 return (0); 3413 } 3414 printf("ZFS: unsupported feature: %s\n", name); 3415 return (EIO); 3416 } 3417 3418 /* 3419 * Checks whether the MOS features that are active are supported. 3420 */ 3421 static int 3422 check_mos_features(const spa_t *spa) 3423 { 3424 dnode_phys_t dir; 3425 zap_phys_t *zap; 3426 uint64_t objnum; 3427 size_t size; 3428 int rc; 3429 3430 if ((rc = objset_get_dnode(spa, spa->spa_mos, DMU_OT_OBJECT_DIRECTORY, 3431 &dir)) != 0) 3432 return (rc); 3433 if ((rc = zap_lookup(spa, &dir, DMU_POOL_FEATURES_FOR_READ, 3434 sizeof (objnum), 1, &objnum)) != 0) { 3435 /* 3436 * It is older pool without features. As we have already 3437 * tested the label, just return without raising the error. 3438 */ 3439 return (0); 3440 } 3441 3442 if ((rc = objset_get_dnode(spa, spa->spa_mos, objnum, &dir)) != 0) 3443 return (rc); 3444 3445 if (dir.dn_type != DMU_OTN_ZAP_METADATA) 3446 return (EIO); 3447 3448 size = dir.dn_datablkszsec << SPA_MINBLOCKSHIFT; 3449 zap = malloc(size); 3450 if (zap == NULL) 3451 return (ENOMEM); 3452 3453 if (dnode_read(spa, &dir, 0, zap, size)) { 3454 free(zap); 3455 return (EIO); 3456 } 3457 3458 if (zap->zap_block_type == ZBT_MICRO) 3459 rc = mzap_list((const mzap_phys_t *)zap, size, check_feature); 3460 else 3461 rc = fzap_list(spa, &dir, zap, check_feature); 3462 3463 free(zap); 3464 return (rc); 3465 } 3466 3467 static int 3468 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 3469 { 3470 dnode_phys_t dir; 3471 size_t size; 3472 int rc; 3473 char *nv; 3474 3475 *value = NULL; 3476 if ((rc = objset_get_dnode(spa, spa->spa_mos, obj, &dir)) != 0) 3477 return (rc); 3478 if (dir.dn_type != DMU_OT_PACKED_NVLIST && 3479 dir.dn_bonustype != DMU_OT_PACKED_NVLIST_SIZE) { 3480 return (EIO); 3481 } 3482 3483 if (dir.dn_bonuslen != sizeof (uint64_t)) 3484 return (EIO); 3485 3486 size = *(uint64_t *)DN_BONUS(&dir); 3487 nv = malloc(size); 3488 if (nv == NULL) 3489 return (ENOMEM); 3490 3491 rc = dnode_read(spa, &dir, 0, nv, size); 3492 if (rc != 0) { 3493 free(nv); 3494 nv = NULL; 3495 return (rc); 3496 } 3497 *value = nvlist_import(nv, size); 3498 free(nv); 3499 return (rc); 3500 } 3501 3502 static int 3503 zfs_spa_init(spa_t *spa) 3504 { 3505 struct uberblock checkpoint; 3506 dnode_phys_t dir; 3507 uint64_t config_object; 3508 nvlist_t *nvlist; 3509 int rc; 3510 3511 if (zio_read(spa, &spa->spa_uberblock->ub_rootbp, spa->spa_mos)) { 3512 printf("ZFS: can't read MOS of pool %s\n", spa->spa_name); 3513 return (EIO); 3514 } 3515 if (spa->spa_mos->os_type != DMU_OST_META) { 3516 printf("ZFS: corrupted MOS of pool %s\n", spa->spa_name); 3517 return (EIO); 3518 } 3519 3520 if (objset_get_dnode(spa, &spa->spa_mos_master, 3521 DMU_POOL_DIRECTORY_OBJECT, &dir)) { 3522 printf("ZFS: failed to read pool %s directory object\n", 3523 spa->spa_name); 3524 return (EIO); 3525 } 3526 /* this is allowed to fail, older pools do not have salt */ 3527 rc = zap_lookup(spa, &dir, DMU_POOL_CHECKSUM_SALT, 1, 3528 sizeof (spa->spa_cksum_salt.zcs_bytes), 3529 spa->spa_cksum_salt.zcs_bytes); 3530 3531 rc = check_mos_features(spa); 3532 if (rc != 0) { 3533 printf("ZFS: pool %s is not supported\n", spa->spa_name); 3534 return (rc); 3535 } 3536 3537 rc = zap_lookup(spa, &dir, DMU_POOL_CONFIG, 3538 sizeof (config_object), 1, &config_object); 3539 if (rc != 0) { 3540 printf("ZFS: can not read MOS %s\n", DMU_POOL_CONFIG); 3541 return (EIO); 3542 } 3543 rc = load_nvlist(spa, config_object, &nvlist); 3544 if (rc != 0) 3545 return (rc); 3546 3547 rc = zap_lookup(spa, &dir, DMU_POOL_ZPOOL_CHECKPOINT, 3548 sizeof(uint64_t), sizeof(checkpoint) / sizeof(uint64_t), 3549 &checkpoint); 3550 if (rc == 0 && checkpoint.ub_checkpoint_txg != 0) { 3551 memcpy(&spa->spa_uberblock_checkpoint, &checkpoint, 3552 sizeof(checkpoint)); 3553 if (zio_read(spa, &spa->spa_uberblock_checkpoint.ub_rootbp, 3554 &spa->spa_mos_checkpoint)) { 3555 printf("ZFS: can not read checkpoint data.\n"); 3556 return (EIO); 3557 } 3558 } 3559 3560 /* 3561 * Update vdevs from MOS config. Note, we do skip encoding bytes 3562 * here. See also vdev_label_read_config(). 3563 */ 3564 rc = vdev_init_from_nvlist(spa, nvlist); 3565 nvlist_destroy(nvlist); 3566 return (rc); 3567 } 3568 3569 static int 3570 zfs_dnode_stat(const spa_t *spa, dnode_phys_t *dn, struct stat *sb) 3571 { 3572 3573 if (dn->dn_bonustype != DMU_OT_SA) { 3574 znode_phys_t *zp = (znode_phys_t *)dn->dn_bonus; 3575 3576 sb->st_mode = zp->zp_mode; 3577 sb->st_uid = zp->zp_uid; 3578 sb->st_gid = zp->zp_gid; 3579 sb->st_size = zp->zp_size; 3580 } else { 3581 sa_hdr_phys_t *sahdrp; 3582 int hdrsize; 3583 size_t size = 0; 3584 void *buf = NULL; 3585 3586 if (dn->dn_bonuslen != 0) 3587 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn); 3588 else { 3589 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0) { 3590 blkptr_t *bp = DN_SPILL_BLKPTR(dn); 3591 int error; 3592 3593 size = BP_GET_LSIZE(bp); 3594 buf = malloc(size); 3595 if (buf == NULL) 3596 error = ENOMEM; 3597 else 3598 error = zio_read(spa, bp, buf); 3599 3600 if (error != 0) { 3601 free(buf); 3602 return (error); 3603 } 3604 sahdrp = buf; 3605 } else { 3606 return (EIO); 3607 } 3608 } 3609 hdrsize = SA_HDR_SIZE(sahdrp); 3610 sb->st_mode = *(uint64_t *)((char *)sahdrp + hdrsize + 3611 SA_MODE_OFFSET); 3612 sb->st_uid = *(uint64_t *)((char *)sahdrp + hdrsize + 3613 SA_UID_OFFSET); 3614 sb->st_gid = *(uint64_t *)((char *)sahdrp + hdrsize + 3615 SA_GID_OFFSET); 3616 sb->st_size = *(uint64_t *)((char *)sahdrp + hdrsize + 3617 SA_SIZE_OFFSET); 3618 free(buf); 3619 } 3620 3621 return (0); 3622 } 3623 3624 static int 3625 zfs_dnode_readlink(const spa_t *spa, dnode_phys_t *dn, char *path, size_t psize) 3626 { 3627 int rc = 0; 3628 3629 if (dn->dn_bonustype == DMU_OT_SA) { 3630 sa_hdr_phys_t *sahdrp = NULL; 3631 size_t size = 0; 3632 void *buf = NULL; 3633 int hdrsize; 3634 char *p; 3635 3636 if (dn->dn_bonuslen != 0) { 3637 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn); 3638 } else { 3639 blkptr_t *bp; 3640 3641 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) == 0) 3642 return (EIO); 3643 bp = DN_SPILL_BLKPTR(dn); 3644 3645 size = BP_GET_LSIZE(bp); 3646 buf = malloc(size); 3647 if (buf == NULL) 3648 rc = ENOMEM; 3649 else 3650 rc = zio_read(spa, bp, buf); 3651 if (rc != 0) { 3652 free(buf); 3653 return (rc); 3654 } 3655 sahdrp = buf; 3656 } 3657 hdrsize = SA_HDR_SIZE(sahdrp); 3658 p = (char *)((uintptr_t)sahdrp + hdrsize + SA_SYMLINK_OFFSET); 3659 memcpy(path, p, psize); 3660 free(buf); 3661 return (0); 3662 } 3663 /* 3664 * Second test is purely to silence bogus compiler 3665 * warning about accessing past the end of dn_bonus. 3666 */ 3667 if (psize + sizeof(znode_phys_t) <= dn->dn_bonuslen && 3668 sizeof(znode_phys_t) <= sizeof(dn->dn_bonus)) { 3669 memcpy(path, &dn->dn_bonus[sizeof(znode_phys_t)], psize); 3670 } else { 3671 rc = dnode_read(spa, dn, 0, path, psize); 3672 } 3673 return (rc); 3674 } 3675 3676 struct obj_list { 3677 uint64_t objnum; 3678 STAILQ_ENTRY(obj_list) entry; 3679 }; 3680 3681 /* 3682 * Lookup a file and return its dnode. 3683 */ 3684 static int 3685 zfs_lookup(const struct zfsmount *mount, const char *upath, dnode_phys_t *dnode) 3686 { 3687 int rc; 3688 uint64_t objnum; 3689 const spa_t *spa; 3690 dnode_phys_t dn; 3691 const char *p, *q; 3692 char element[256]; 3693 char path[1024]; 3694 int symlinks_followed = 0; 3695 struct stat sb; 3696 struct obj_list *entry, *tentry; 3697 STAILQ_HEAD(, obj_list) on_cache = STAILQ_HEAD_INITIALIZER(on_cache); 3698 3699 spa = mount->spa; 3700 if (mount->objset.os_type != DMU_OST_ZFS) { 3701 printf("ZFS: unexpected object set type %ju\n", 3702 (uintmax_t)mount->objset.os_type); 3703 return (EIO); 3704 } 3705 3706 if ((entry = malloc(sizeof(struct obj_list))) == NULL) 3707 return (ENOMEM); 3708 3709 /* 3710 * Get the root directory dnode. 3711 */ 3712 rc = objset_get_dnode(spa, &mount->objset, MASTER_NODE_OBJ, &dn); 3713 if (rc) { 3714 free(entry); 3715 return (rc); 3716 } 3717 3718 rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, sizeof(objnum), 1, &objnum); 3719 if (rc) { 3720 free(entry); 3721 return (rc); 3722 } 3723 entry->objnum = objnum; 3724 STAILQ_INSERT_HEAD(&on_cache, entry, entry); 3725 3726 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3727 if (rc != 0) 3728 goto done; 3729 3730 p = upath; 3731 while (p && *p) { 3732 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3733 if (rc != 0) 3734 goto done; 3735 3736 while (*p == '/') 3737 p++; 3738 if (*p == '\0') 3739 break; 3740 q = p; 3741 while (*q != '\0' && *q != '/') 3742 q++; 3743 3744 /* skip dot */ 3745 if (p + 1 == q && p[0] == '.') { 3746 p++; 3747 continue; 3748 } 3749 /* double dot */ 3750 if (p + 2 == q && p[0] == '.' && p[1] == '.') { 3751 p += 2; 3752 if (STAILQ_FIRST(&on_cache) == 3753 STAILQ_LAST(&on_cache, obj_list, entry)) { 3754 rc = ENOENT; 3755 goto done; 3756 } 3757 entry = STAILQ_FIRST(&on_cache); 3758 STAILQ_REMOVE_HEAD(&on_cache, entry); 3759 free(entry); 3760 objnum = (STAILQ_FIRST(&on_cache))->objnum; 3761 continue; 3762 } 3763 if (q - p + 1 > sizeof(element)) { 3764 rc = ENAMETOOLONG; 3765 goto done; 3766 } 3767 memcpy(element, p, q - p); 3768 element[q - p] = 0; 3769 p = q; 3770 3771 if ((rc = zfs_dnode_stat(spa, &dn, &sb)) != 0) 3772 goto done; 3773 if (!S_ISDIR(sb.st_mode)) { 3774 rc = ENOTDIR; 3775 goto done; 3776 } 3777 3778 rc = zap_lookup(spa, &dn, element, sizeof (objnum), 1, &objnum); 3779 if (rc) 3780 goto done; 3781 objnum = ZFS_DIRENT_OBJ(objnum); 3782 3783 if ((entry = malloc(sizeof(struct obj_list))) == NULL) { 3784 rc = ENOMEM; 3785 goto done; 3786 } 3787 entry->objnum = objnum; 3788 STAILQ_INSERT_HEAD(&on_cache, entry, entry); 3789 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3790 if (rc) 3791 goto done; 3792 3793 /* 3794 * Check for symlink. 3795 */ 3796 rc = zfs_dnode_stat(spa, &dn, &sb); 3797 if (rc) 3798 goto done; 3799 if (S_ISLNK(sb.st_mode)) { 3800 if (symlinks_followed > 10) { 3801 rc = EMLINK; 3802 goto done; 3803 } 3804 symlinks_followed++; 3805 3806 /* 3807 * Read the link value and copy the tail of our 3808 * current path onto the end. 3809 */ 3810 if (sb.st_size + strlen(p) + 1 > sizeof(path)) { 3811 rc = ENAMETOOLONG; 3812 goto done; 3813 } 3814 strcpy(&path[sb.st_size], p); 3815 3816 rc = zfs_dnode_readlink(spa, &dn, path, sb.st_size); 3817 if (rc != 0) 3818 goto done; 3819 3820 /* 3821 * Restart with the new path, starting either at 3822 * the root or at the parent depending whether or 3823 * not the link is relative. 3824 */ 3825 p = path; 3826 if (*p == '/') { 3827 while (STAILQ_FIRST(&on_cache) != 3828 STAILQ_LAST(&on_cache, obj_list, entry)) { 3829 entry = STAILQ_FIRST(&on_cache); 3830 STAILQ_REMOVE_HEAD(&on_cache, entry); 3831 free(entry); 3832 } 3833 } else { 3834 entry = STAILQ_FIRST(&on_cache); 3835 STAILQ_REMOVE_HEAD(&on_cache, entry); 3836 free(entry); 3837 } 3838 objnum = (STAILQ_FIRST(&on_cache))->objnum; 3839 } 3840 } 3841 3842 *dnode = dn; 3843 done: 3844 STAILQ_FOREACH_SAFE(entry, &on_cache, entry, tentry) 3845 free(entry); 3846 return (rc); 3847 } 3848 3849 /* 3850 * Return either a cached copy of the bootenv, or read each of the vdev children 3851 * looking for the bootenv. Cache what's found and return the results. Returns 0 3852 * when benvp is filled in, and some errno when not. 3853 */ 3854 static int 3855 zfs_get_bootenv_spa(spa_t *spa, nvlist_t **benvp) 3856 { 3857 vdev_t *vd; 3858 nvlist_t *benv = NULL; 3859 3860 if (spa->spa_bootenv == NULL) { 3861 STAILQ_FOREACH(vd, &spa->spa_root_vdev->v_children, 3862 v_childlink) { 3863 benv = vdev_read_bootenv(vd); 3864 3865 if (benv != NULL) 3866 break; 3867 } 3868 spa->spa_bootenv = benv; 3869 } 3870 benv = spa->spa_bootenv; 3871 3872 if (benv == NULL) 3873 return (ENOENT); 3874 3875 *benvp = benv; 3876 return (0); 3877 } 3878 3879 /* 3880 * Store nvlist to pool label bootenv area. Also updates cached pointer in spa. 3881 */ 3882 static int 3883 zfs_set_bootenv_spa(spa_t *spa, nvlist_t *benv) 3884 { 3885 vdev_t *vd; 3886 3887 STAILQ_FOREACH(vd, &spa->spa_root_vdev->v_children, v_childlink) { 3888 vdev_write_bootenv(vd, benv); 3889 } 3890 3891 spa->spa_bootenv = benv; 3892 return (0); 3893 } 3894 3895 /* 3896 * Get bootonce value by key. The bootonce <key, value> pair is removed from the 3897 * bootenv nvlist and the remaining nvlist is committed back to disk. This process 3898 * the bootonce flag since we've reached the point in the boot that we've 'used' 3899 * the BE. For chained boot scenarios, we may reach this point multiple times (but 3900 * only remove it and return 0 the first time). 3901 */ 3902 static int 3903 zfs_get_bootonce_spa(spa_t *spa, const char *key, char *buf, size_t size) 3904 { 3905 nvlist_t *benv; 3906 char *result = NULL; 3907 int result_size, rv; 3908 3909 if ((rv = zfs_get_bootenv_spa(spa, &benv)) != 0) 3910 return (rv); 3911 3912 if ((rv = nvlist_find(benv, key, DATA_TYPE_STRING, NULL, 3913 &result, &result_size)) == 0) { 3914 if (result_size == 0) { 3915 /* ignore empty string */ 3916 rv = ENOENT; 3917 } else if (buf != NULL) { 3918 size = MIN((size_t)result_size + 1, size); 3919 strlcpy(buf, result, size); 3920 } 3921 (void)nvlist_remove(benv, key, DATA_TYPE_STRING); 3922 (void)zfs_set_bootenv_spa(spa, benv); 3923 } 3924 3925 return (rv); 3926 } 3927