1 /*- 2 * Copyright (c) 2007 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Stand-alone ZFS file reader. 29 */ 30 31 #include <stdbool.h> 32 #include <sys/endian.h> 33 #include <sys/stat.h> 34 #include <sys/stdint.h> 35 #include <sys/list.h> 36 #include <sys/zfs_bootenv.h> 37 #include <machine/_inttypes.h> 38 39 #include "zfsimpl.h" 40 #include "zfssubr.c" 41 42 #ifdef HAS_ZSTD_ZFS 43 extern int zstd_init(void); 44 #endif 45 46 struct zfsmount { 47 char *path; 48 const spa_t *spa; 49 objset_phys_t objset; 50 uint64_t rootobj; 51 STAILQ_ENTRY(zfsmount) next; 52 }; 53 54 typedef STAILQ_HEAD(zfs_mnt_list, zfsmount) zfs_mnt_list_t; 55 static zfs_mnt_list_t zfsmount = STAILQ_HEAD_INITIALIZER(zfsmount); 56 57 /* 58 * The indirect_child_t represents the vdev that we will read from, when we 59 * need to read all copies of the data (e.g. for scrub or reconstruction). 60 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror), 61 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs, 62 * ic_vdev is a child of the mirror. 63 */ 64 typedef struct indirect_child { 65 void *ic_data; 66 vdev_t *ic_vdev; 67 } indirect_child_t; 68 69 /* 70 * The indirect_split_t represents one mapped segment of an i/o to the 71 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be 72 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size. 73 * For split blocks, there will be several of these. 74 */ 75 typedef struct indirect_split { 76 list_node_t is_node; /* link on iv_splits */ 77 78 /* 79 * is_split_offset is the offset into the i/o. 80 * This is the sum of the previous splits' is_size's. 81 */ 82 uint64_t is_split_offset; 83 84 vdev_t *is_vdev; /* top-level vdev */ 85 uint64_t is_target_offset; /* offset on is_vdev */ 86 uint64_t is_size; 87 int is_children; /* number of entries in is_child[] */ 88 89 /* 90 * is_good_child is the child that we are currently using to 91 * attempt reconstruction. 92 */ 93 int is_good_child; 94 95 indirect_child_t is_child[1]; /* variable-length */ 96 } indirect_split_t; 97 98 /* 99 * The indirect_vsd_t is associated with each i/o to the indirect vdev. 100 * It is the "Vdev-Specific Data" in the zio_t's io_vsd. 101 */ 102 typedef struct indirect_vsd { 103 boolean_t iv_split_block; 104 boolean_t iv_reconstruct; 105 106 list_t iv_splits; /* list of indirect_split_t's */ 107 } indirect_vsd_t; 108 109 /* 110 * List of supported read-incompatible ZFS features. Do not add here features 111 * marked as ZFEATURE_FLAG_READONLY_COMPAT, they are irrelevant for read-only! 112 */ 113 static const char *features_for_read[] = { 114 "com.datto:bookmark_v2", 115 "com.datto:encryption", 116 "com.delphix:bookmark_written", 117 "com.delphix:device_removal", 118 "com.delphix:embedded_data", 119 "com.delphix:extensible_dataset", 120 "com.delphix:head_errlog", 121 "com.delphix:hole_birth", 122 "com.joyent:multi_vdev_crash_dump", 123 "com.klarasystems:vdev_zaps_v2", 124 "org.freebsd:zstd_compress", 125 "org.illumos:lz4_compress", 126 "org.illumos:sha512", 127 "org.illumos:skein", 128 "org.open-zfs:large_blocks", 129 "org.openzfs:blake3", 130 "org.zfsonlinux:large_dnode", 131 NULL 132 }; 133 134 /* 135 * List of all pools, chained through spa_link. 136 */ 137 static spa_list_t zfs_pools; 138 139 static const dnode_phys_t *dnode_cache_obj; 140 static uint64_t dnode_cache_bn; 141 static char *dnode_cache_buf; 142 143 static int zio_read(const spa_t *spa, const blkptr_t *bp, void *buf); 144 static int zfs_get_root(const spa_t *spa, uint64_t *objid); 145 static int zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result); 146 static int zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, 147 const char *name, uint64_t integer_size, uint64_t num_integers, 148 void *value); 149 static int objset_get_dnode(const spa_t *, const objset_phys_t *, uint64_t, 150 dnode_phys_t *); 151 static int dnode_read(const spa_t *, const dnode_phys_t *, off_t, void *, 152 size_t); 153 static int vdev_indirect_read(vdev_t *, const blkptr_t *, void *, off_t, 154 size_t); 155 static int vdev_mirror_read(vdev_t *, const blkptr_t *, void *, off_t, size_t); 156 vdev_indirect_mapping_t *vdev_indirect_mapping_open(spa_t *, objset_phys_t *, 157 uint64_t); 158 vdev_indirect_mapping_entry_phys_t * 159 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *, uint64_t, 160 uint64_t, uint64_t *); 161 162 static void 163 zfs_init(void) 164 { 165 STAILQ_INIT(&zfs_pools); 166 167 dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE); 168 169 zfs_init_crc(); 170 #ifdef HAS_ZSTD_ZFS 171 zstd_init(); 172 #endif 173 } 174 175 static int 176 nvlist_check_features_for_read(nvlist_t *nvl) 177 { 178 nvlist_t *features = NULL; 179 nvs_data_t *data; 180 nvp_header_t *nvp; 181 nv_string_t *nvp_name; 182 int rc; 183 184 rc = nvlist_find(nvl, ZPOOL_CONFIG_FEATURES_FOR_READ, 185 DATA_TYPE_NVLIST, NULL, &features, NULL); 186 switch (rc) { 187 case 0: 188 break; /* Continue with checks */ 189 190 case ENOENT: 191 return (0); /* All features are disabled */ 192 193 default: 194 return (rc); /* Error while reading nvlist */ 195 } 196 197 data = (nvs_data_t *)features->nv_data; 198 nvp = &data->nvl_pair; /* first pair in nvlist */ 199 200 while (nvp->encoded_size != 0 && nvp->decoded_size != 0) { 201 int i, found; 202 203 nvp_name = (nv_string_t *)((uintptr_t)nvp + sizeof(*nvp)); 204 found = 0; 205 206 for (i = 0; features_for_read[i] != NULL; i++) { 207 if (memcmp(nvp_name->nv_data, features_for_read[i], 208 nvp_name->nv_size) == 0) { 209 found = 1; 210 break; 211 } 212 } 213 214 if (!found) { 215 printf("ZFS: unsupported feature: %.*s\n", 216 nvp_name->nv_size, nvp_name->nv_data); 217 rc = EIO; 218 } 219 nvp = (nvp_header_t *)((uint8_t *)nvp + nvp->encoded_size); 220 } 221 nvlist_destroy(features); 222 223 return (rc); 224 } 225 226 static int 227 vdev_read_phys(vdev_t *vdev, const blkptr_t *bp, void *buf, 228 off_t offset, size_t size) 229 { 230 size_t psize; 231 int rc; 232 233 if (vdev->v_phys_read == NULL) 234 return (ENOTSUP); 235 236 if (bp) { 237 psize = BP_GET_PSIZE(bp); 238 } else { 239 psize = size; 240 } 241 242 rc = vdev->v_phys_read(vdev, vdev->v_priv, offset, buf, psize); 243 if (rc == 0) { 244 if (bp != NULL) 245 rc = zio_checksum_verify(vdev->v_spa, bp, buf); 246 } 247 248 return (rc); 249 } 250 251 static int 252 vdev_write_phys(vdev_t *vdev, void *buf, off_t offset, size_t size) 253 { 254 if (vdev->v_phys_write == NULL) 255 return (ENOTSUP); 256 257 return (vdev->v_phys_write(vdev, offset, buf, size)); 258 } 259 260 typedef struct remap_segment { 261 vdev_t *rs_vd; 262 uint64_t rs_offset; 263 uint64_t rs_asize; 264 uint64_t rs_split_offset; 265 list_node_t rs_node; 266 } remap_segment_t; 267 268 static remap_segment_t * 269 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset) 270 { 271 remap_segment_t *rs = malloc(sizeof (remap_segment_t)); 272 273 if (rs != NULL) { 274 rs->rs_vd = vd; 275 rs->rs_offset = offset; 276 rs->rs_asize = asize; 277 rs->rs_split_offset = split_offset; 278 } 279 280 return (rs); 281 } 282 283 vdev_indirect_mapping_t * 284 vdev_indirect_mapping_open(spa_t *spa, objset_phys_t *os, 285 uint64_t mapping_object) 286 { 287 vdev_indirect_mapping_t *vim; 288 vdev_indirect_mapping_phys_t *vim_phys; 289 int rc; 290 291 vim = calloc(1, sizeof (*vim)); 292 if (vim == NULL) 293 return (NULL); 294 295 vim->vim_dn = calloc(1, sizeof (*vim->vim_dn)); 296 if (vim->vim_dn == NULL) { 297 free(vim); 298 return (NULL); 299 } 300 301 rc = objset_get_dnode(spa, os, mapping_object, vim->vim_dn); 302 if (rc != 0) { 303 free(vim->vim_dn); 304 free(vim); 305 return (NULL); 306 } 307 308 vim->vim_spa = spa; 309 vim->vim_phys = malloc(sizeof (*vim->vim_phys)); 310 if (vim->vim_phys == NULL) { 311 free(vim->vim_dn); 312 free(vim); 313 return (NULL); 314 } 315 316 vim_phys = (vdev_indirect_mapping_phys_t *)DN_BONUS(vim->vim_dn); 317 *vim->vim_phys = *vim_phys; 318 319 vim->vim_objset = os; 320 vim->vim_object = mapping_object; 321 vim->vim_entries = NULL; 322 323 vim->vim_havecounts = 324 (vim->vim_dn->dn_bonuslen > VDEV_INDIRECT_MAPPING_SIZE_V0); 325 326 return (vim); 327 } 328 329 /* 330 * Compare an offset with an indirect mapping entry; there are three 331 * possible scenarios: 332 * 333 * 1. The offset is "less than" the mapping entry; meaning the 334 * offset is less than the source offset of the mapping entry. In 335 * this case, there is no overlap between the offset and the 336 * mapping entry and -1 will be returned. 337 * 338 * 2. The offset is "greater than" the mapping entry; meaning the 339 * offset is greater than the mapping entry's source offset plus 340 * the entry's size. In this case, there is no overlap between 341 * the offset and the mapping entry and 1 will be returned. 342 * 343 * NOTE: If the offset is actually equal to the entry's offset 344 * plus size, this is considered to be "greater" than the entry, 345 * and this case applies (i.e. 1 will be returned). Thus, the 346 * entry's "range" can be considered to be inclusive at its 347 * start, but exclusive at its end: e.g. [src, src + size). 348 * 349 * 3. The last case to consider is if the offset actually falls 350 * within the mapping entry's range. If this is the case, the 351 * offset is considered to be "equal to" the mapping entry and 352 * 0 will be returned. 353 * 354 * NOTE: If the offset is equal to the entry's source offset, 355 * this case applies and 0 will be returned. If the offset is 356 * equal to the entry's source plus its size, this case does 357 * *not* apply (see "NOTE" above for scenario 2), and 1 will be 358 * returned. 359 */ 360 static int 361 dva_mapping_overlap_compare(const void *v_key, const void *v_array_elem) 362 { 363 const uint64_t *key = v_key; 364 const vdev_indirect_mapping_entry_phys_t *array_elem = 365 v_array_elem; 366 uint64_t src_offset = DVA_MAPPING_GET_SRC_OFFSET(array_elem); 367 368 if (*key < src_offset) { 369 return (-1); 370 } else if (*key < src_offset + DVA_GET_ASIZE(&array_elem->vimep_dst)) { 371 return (0); 372 } else { 373 return (1); 374 } 375 } 376 377 /* 378 * Return array entry. 379 */ 380 static vdev_indirect_mapping_entry_phys_t * 381 vdev_indirect_mapping_entry(vdev_indirect_mapping_t *vim, uint64_t index) 382 { 383 uint64_t size; 384 off_t offset = 0; 385 int rc; 386 387 if (vim->vim_phys->vimp_num_entries == 0) 388 return (NULL); 389 390 if (vim->vim_entries == NULL) { 391 uint64_t bsize; 392 393 bsize = vim->vim_dn->dn_datablkszsec << SPA_MINBLOCKSHIFT; 394 size = vim->vim_phys->vimp_num_entries * 395 sizeof (*vim->vim_entries); 396 if (size > bsize) { 397 size = bsize / sizeof (*vim->vim_entries); 398 size *= sizeof (*vim->vim_entries); 399 } 400 vim->vim_entries = malloc(size); 401 if (vim->vim_entries == NULL) 402 return (NULL); 403 vim->vim_num_entries = size / sizeof (*vim->vim_entries); 404 offset = index * sizeof (*vim->vim_entries); 405 } 406 407 /* We have data in vim_entries */ 408 if (offset == 0) { 409 if (index >= vim->vim_entry_offset && 410 index <= vim->vim_entry_offset + vim->vim_num_entries) { 411 index -= vim->vim_entry_offset; 412 return (&vim->vim_entries[index]); 413 } 414 offset = index * sizeof (*vim->vim_entries); 415 } 416 417 vim->vim_entry_offset = index; 418 size = vim->vim_num_entries * sizeof (*vim->vim_entries); 419 rc = dnode_read(vim->vim_spa, vim->vim_dn, offset, vim->vim_entries, 420 size); 421 if (rc != 0) { 422 /* Read error, invalidate vim_entries. */ 423 free(vim->vim_entries); 424 vim->vim_entries = NULL; 425 return (NULL); 426 } 427 index -= vim->vim_entry_offset; 428 return (&vim->vim_entries[index]); 429 } 430 431 /* 432 * Returns the mapping entry for the given offset. 433 * 434 * It's possible that the given offset will not be in the mapping table 435 * (i.e. no mapping entries contain this offset), in which case, the 436 * return value depends on the "next_if_missing" parameter. 437 * 438 * If the offset is not found in the table and "next_if_missing" is 439 * B_FALSE, then NULL will always be returned. The behavior is intended 440 * to allow consumers to get the entry corresponding to the offset 441 * parameter, iff the offset overlaps with an entry in the table. 442 * 443 * If the offset is not found in the table and "next_if_missing" is 444 * B_TRUE, then the entry nearest to the given offset will be returned, 445 * such that the entry's source offset is greater than the offset 446 * passed in (i.e. the "next" mapping entry in the table is returned, if 447 * the offset is missing from the table). If there are no entries whose 448 * source offset is greater than the passed in offset, NULL is returned. 449 */ 450 static vdev_indirect_mapping_entry_phys_t * 451 vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim, 452 uint64_t offset) 453 { 454 ASSERT(vim->vim_phys->vimp_num_entries > 0); 455 456 vdev_indirect_mapping_entry_phys_t *entry; 457 458 uint64_t last = vim->vim_phys->vimp_num_entries - 1; 459 uint64_t base = 0; 460 461 /* 462 * We don't define these inside of the while loop because we use 463 * their value in the case that offset isn't in the mapping. 464 */ 465 uint64_t mid; 466 int result; 467 468 while (last >= base) { 469 mid = base + ((last - base) >> 1); 470 471 entry = vdev_indirect_mapping_entry(vim, mid); 472 if (entry == NULL) 473 break; 474 result = dva_mapping_overlap_compare(&offset, entry); 475 476 if (result == 0) { 477 break; 478 } else if (result < 0) { 479 last = mid - 1; 480 } else { 481 base = mid + 1; 482 } 483 } 484 return (entry); 485 } 486 487 /* 488 * Given an indirect vdev and an extent on that vdev, it duplicates the 489 * physical entries of the indirect mapping that correspond to the extent 490 * to a new array and returns a pointer to it. In addition, copied_entries 491 * is populated with the number of mapping entries that were duplicated. 492 * 493 * Finally, since we are doing an allocation, it is up to the caller to 494 * free the array allocated in this function. 495 */ 496 vdev_indirect_mapping_entry_phys_t * 497 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset, 498 uint64_t asize, uint64_t *copied_entries) 499 { 500 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL; 501 vdev_indirect_mapping_t *vim = vd->v_mapping; 502 uint64_t entries = 0; 503 504 vdev_indirect_mapping_entry_phys_t *first_mapping = 505 vdev_indirect_mapping_entry_for_offset(vim, offset); 506 ASSERT3P(first_mapping, !=, NULL); 507 508 vdev_indirect_mapping_entry_phys_t *m = first_mapping; 509 while (asize > 0) { 510 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 511 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m); 512 uint64_t inner_size = MIN(asize, size - inner_offset); 513 514 offset += inner_size; 515 asize -= inner_size; 516 entries++; 517 m++; 518 } 519 520 size_t copy_length = entries * sizeof (*first_mapping); 521 duplicate_mappings = malloc(copy_length); 522 if (duplicate_mappings != NULL) 523 bcopy(first_mapping, duplicate_mappings, copy_length); 524 else 525 entries = 0; 526 527 *copied_entries = entries; 528 529 return (duplicate_mappings); 530 } 531 532 static vdev_t * 533 vdev_lookup_top(spa_t *spa, uint64_t vdev) 534 { 535 vdev_t *rvd; 536 vdev_list_t *vlist; 537 538 vlist = &spa->spa_root_vdev->v_children; 539 STAILQ_FOREACH(rvd, vlist, v_childlink) 540 if (rvd->v_id == vdev) 541 break; 542 543 return (rvd); 544 } 545 546 /* 547 * This is a callback for vdev_indirect_remap() which allocates an 548 * indirect_split_t for each split segment and adds it to iv_splits. 549 */ 550 static void 551 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset, 552 uint64_t size, void *arg) 553 { 554 int n = 1; 555 zio_t *zio = arg; 556 indirect_vsd_t *iv = zio->io_vsd; 557 558 if (vd->v_read == vdev_indirect_read) 559 return; 560 561 if (vd->v_read == vdev_mirror_read) 562 n = vd->v_nchildren; 563 564 indirect_split_t *is = 565 malloc(offsetof(indirect_split_t, is_child[n])); 566 if (is == NULL) { 567 zio->io_error = ENOMEM; 568 return; 569 } 570 bzero(is, offsetof(indirect_split_t, is_child[n])); 571 572 is->is_children = n; 573 is->is_size = size; 574 is->is_split_offset = split_offset; 575 is->is_target_offset = offset; 576 is->is_vdev = vd; 577 578 /* 579 * Note that we only consider multiple copies of the data for 580 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even 581 * though they use the same ops as mirror, because there's only one 582 * "good" copy under the replacing/spare. 583 */ 584 if (vd->v_read == vdev_mirror_read) { 585 int i = 0; 586 vdev_t *kid; 587 588 STAILQ_FOREACH(kid, &vd->v_children, v_childlink) { 589 is->is_child[i++].ic_vdev = kid; 590 } 591 } else { 592 is->is_child[0].ic_vdev = vd; 593 } 594 595 list_insert_tail(&iv->iv_splits, is); 596 } 597 598 static void 599 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, void *arg) 600 { 601 list_t stack; 602 spa_t *spa = vd->v_spa; 603 zio_t *zio = arg; 604 remap_segment_t *rs; 605 606 list_create(&stack, sizeof (remap_segment_t), 607 offsetof(remap_segment_t, rs_node)); 608 609 rs = rs_alloc(vd, offset, asize, 0); 610 if (rs == NULL) { 611 printf("vdev_indirect_remap: out of memory.\n"); 612 zio->io_error = ENOMEM; 613 } 614 for (; rs != NULL; rs = list_remove_head(&stack)) { 615 vdev_t *v = rs->rs_vd; 616 uint64_t num_entries = 0; 617 /* vdev_indirect_mapping_t *vim = v->v_mapping; */ 618 vdev_indirect_mapping_entry_phys_t *mapping = 619 vdev_indirect_mapping_duplicate_adjacent_entries(v, 620 rs->rs_offset, rs->rs_asize, &num_entries); 621 622 if (num_entries == 0) 623 zio->io_error = ENOMEM; 624 625 for (uint64_t i = 0; i < num_entries; i++) { 626 vdev_indirect_mapping_entry_phys_t *m = &mapping[i]; 627 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 628 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst); 629 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst); 630 uint64_t inner_offset = rs->rs_offset - 631 DVA_MAPPING_GET_SRC_OFFSET(m); 632 uint64_t inner_size = 633 MIN(rs->rs_asize, size - inner_offset); 634 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev); 635 636 if (dst_v->v_read == vdev_indirect_read) { 637 remap_segment_t *o; 638 639 o = rs_alloc(dst_v, dst_offset + inner_offset, 640 inner_size, rs->rs_split_offset); 641 if (o == NULL) { 642 printf("vdev_indirect_remap: " 643 "out of memory.\n"); 644 zio->io_error = ENOMEM; 645 break; 646 } 647 648 list_insert_head(&stack, o); 649 } 650 vdev_indirect_gather_splits(rs->rs_split_offset, dst_v, 651 dst_offset + inner_offset, 652 inner_size, arg); 653 654 /* 655 * vdev_indirect_gather_splits can have memory 656 * allocation error, we can not recover from it. 657 */ 658 if (zio->io_error != 0) 659 break; 660 rs->rs_offset += inner_size; 661 rs->rs_asize -= inner_size; 662 rs->rs_split_offset += inner_size; 663 } 664 665 free(mapping); 666 free(rs); 667 if (zio->io_error != 0) 668 break; 669 } 670 671 list_destroy(&stack); 672 } 673 674 static void 675 vdev_indirect_map_free(zio_t *zio) 676 { 677 indirect_vsd_t *iv = zio->io_vsd; 678 indirect_split_t *is; 679 680 while ((is = list_head(&iv->iv_splits)) != NULL) { 681 for (int c = 0; c < is->is_children; c++) { 682 indirect_child_t *ic = &is->is_child[c]; 683 free(ic->ic_data); 684 } 685 list_remove(&iv->iv_splits, is); 686 free(is); 687 } 688 free(iv); 689 } 690 691 static int 692 vdev_indirect_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 693 off_t offset, size_t bytes) 694 { 695 zio_t zio; 696 spa_t *spa = vdev->v_spa; 697 indirect_vsd_t *iv; 698 indirect_split_t *first; 699 int rc = EIO; 700 701 iv = calloc(1, sizeof(*iv)); 702 if (iv == NULL) 703 return (ENOMEM); 704 705 list_create(&iv->iv_splits, 706 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node)); 707 708 bzero(&zio, sizeof(zio)); 709 zio.io_spa = spa; 710 zio.io_bp = (blkptr_t *)bp; 711 zio.io_data = buf; 712 zio.io_size = bytes; 713 zio.io_offset = offset; 714 zio.io_vd = vdev; 715 zio.io_vsd = iv; 716 717 if (vdev->v_mapping == NULL) { 718 vdev_indirect_config_t *vic; 719 720 vic = &vdev->vdev_indirect_config; 721 vdev->v_mapping = vdev_indirect_mapping_open(spa, 722 spa->spa_mos, vic->vic_mapping_object); 723 } 724 725 vdev_indirect_remap(vdev, offset, bytes, &zio); 726 if (zio.io_error != 0) 727 return (zio.io_error); 728 729 first = list_head(&iv->iv_splits); 730 if (first->is_size == zio.io_size) { 731 /* 732 * This is not a split block; we are pointing to the entire 733 * data, which will checksum the same as the original data. 734 * Pass the BP down so that the child i/o can verify the 735 * checksum, and try a different location if available 736 * (e.g. on a mirror). 737 * 738 * While this special case could be handled the same as the 739 * general (split block) case, doing it this way ensures 740 * that the vast majority of blocks on indirect vdevs 741 * (which are not split) are handled identically to blocks 742 * on non-indirect vdevs. This allows us to be less strict 743 * about performance in the general (but rare) case. 744 */ 745 rc = first->is_vdev->v_read(first->is_vdev, zio.io_bp, 746 zio.io_data, first->is_target_offset, bytes); 747 } else { 748 iv->iv_split_block = B_TRUE; 749 /* 750 * Read one copy of each split segment, from the 751 * top-level vdev. Since we don't know the 752 * checksum of each split individually, the child 753 * zio can't ensure that we get the right data. 754 * E.g. if it's a mirror, it will just read from a 755 * random (healthy) leaf vdev. We have to verify 756 * the checksum in vdev_indirect_io_done(). 757 */ 758 for (indirect_split_t *is = list_head(&iv->iv_splits); 759 is != NULL; is = list_next(&iv->iv_splits, is)) { 760 char *ptr = zio.io_data; 761 762 rc = is->is_vdev->v_read(is->is_vdev, zio.io_bp, 763 ptr + is->is_split_offset, is->is_target_offset, 764 is->is_size); 765 } 766 if (zio_checksum_verify(spa, zio.io_bp, zio.io_data)) 767 rc = ECKSUM; 768 else 769 rc = 0; 770 } 771 772 vdev_indirect_map_free(&zio); 773 if (rc == 0) 774 rc = zio.io_error; 775 776 return (rc); 777 } 778 779 static int 780 vdev_disk_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 781 off_t offset, size_t bytes) 782 { 783 784 return (vdev_read_phys(vdev, bp, buf, 785 offset + VDEV_LABEL_START_SIZE, bytes)); 786 } 787 788 static int 789 vdev_missing_read(vdev_t *vdev __unused, const blkptr_t *bp __unused, 790 void *buf __unused, off_t offset __unused, size_t bytes __unused) 791 { 792 793 return (ENOTSUP); 794 } 795 796 static int 797 vdev_mirror_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 798 off_t offset, size_t bytes) 799 { 800 vdev_t *kid; 801 int rc; 802 803 rc = EIO; 804 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 805 if (kid->v_state != VDEV_STATE_HEALTHY) 806 continue; 807 rc = kid->v_read(kid, bp, buf, offset, bytes); 808 if (!rc) 809 return (0); 810 } 811 812 return (rc); 813 } 814 815 static int 816 vdev_replacing_read(vdev_t *vdev, const blkptr_t *bp, void *buf, 817 off_t offset, size_t bytes) 818 { 819 vdev_t *kid; 820 821 /* 822 * Here we should have two kids: 823 * First one which is the one we are replacing and we can trust 824 * only this one to have valid data, but it might not be present. 825 * Second one is that one we are replacing with. It is most likely 826 * healthy, but we can't trust it has needed data, so we won't use it. 827 */ 828 kid = STAILQ_FIRST(&vdev->v_children); 829 if (kid == NULL) 830 return (EIO); 831 if (kid->v_state != VDEV_STATE_HEALTHY) 832 return (EIO); 833 return (kid->v_read(kid, bp, buf, offset, bytes)); 834 } 835 836 /* 837 * List of vdevs that were fully initialized from their own label, but later a 838 * newer label was found that obsoleted the stale label, freeing its 839 * configuration tree. We keep those vdevs around, since a new configuration 840 * may include them. 841 */ 842 static vdev_list_t orphans = STAILQ_HEAD_INITIALIZER(orphans); 843 844 static vdev_t * 845 vdev_find(vdev_list_t *list, uint64_t guid) 846 { 847 vdev_t *vdev, *safe; 848 849 STAILQ_FOREACH_SAFE(vdev, list, v_childlink, safe) { 850 if (vdev->v_guid == guid) 851 return (vdev); 852 if ((vdev = vdev_find(&vdev->v_children, guid)) != NULL) 853 return (vdev); 854 } 855 856 return (NULL); 857 } 858 859 static vdev_t * 860 vdev_create(uint64_t guid, vdev_read_t *_read) 861 { 862 vdev_t *vdev; 863 vdev_indirect_config_t *vic; 864 865 if ((vdev = vdev_find(&orphans, guid))) { 866 STAILQ_REMOVE(&orphans, vdev, vdev, v_childlink); 867 return (vdev); 868 } 869 870 vdev = calloc(1, sizeof(vdev_t)); 871 if (vdev != NULL) { 872 STAILQ_INIT(&vdev->v_children); 873 vdev->v_guid = guid; 874 vdev->v_read = _read; 875 876 /* 877 * root vdev has no read function, we use this fact to 878 * skip setting up data we do not need for root vdev. 879 * We only point root vdev from spa. 880 */ 881 if (_read != NULL) { 882 vic = &vdev->vdev_indirect_config; 883 vic->vic_prev_indirect_vdev = UINT64_MAX; 884 } 885 } 886 887 return (vdev); 888 } 889 890 static void 891 vdev_set_initial_state(vdev_t *vdev, const nvlist_t *nvlist) 892 { 893 uint64_t is_offline, is_faulted, is_degraded, is_removed, isnt_present; 894 uint64_t is_log; 895 896 is_offline = is_removed = is_faulted = is_degraded = isnt_present = 0; 897 is_log = 0; 898 (void) nvlist_find(nvlist, ZPOOL_CONFIG_OFFLINE, DATA_TYPE_UINT64, NULL, 899 &is_offline, NULL); 900 (void) nvlist_find(nvlist, ZPOOL_CONFIG_REMOVED, DATA_TYPE_UINT64, NULL, 901 &is_removed, NULL); 902 (void) nvlist_find(nvlist, ZPOOL_CONFIG_FAULTED, DATA_TYPE_UINT64, NULL, 903 &is_faulted, NULL); 904 (void) nvlist_find(nvlist, ZPOOL_CONFIG_DEGRADED, DATA_TYPE_UINT64, 905 NULL, &is_degraded, NULL); 906 (void) nvlist_find(nvlist, ZPOOL_CONFIG_NOT_PRESENT, DATA_TYPE_UINT64, 907 NULL, &isnt_present, NULL); 908 (void) nvlist_find(nvlist, ZPOOL_CONFIG_IS_LOG, DATA_TYPE_UINT64, NULL, 909 &is_log, NULL); 910 911 if (is_offline != 0) 912 vdev->v_state = VDEV_STATE_OFFLINE; 913 else if (is_removed != 0) 914 vdev->v_state = VDEV_STATE_REMOVED; 915 else if (is_faulted != 0) 916 vdev->v_state = VDEV_STATE_FAULTED; 917 else if (is_degraded != 0) 918 vdev->v_state = VDEV_STATE_DEGRADED; 919 else if (isnt_present != 0) 920 vdev->v_state = VDEV_STATE_CANT_OPEN; 921 922 vdev->v_islog = is_log != 0; 923 } 924 925 static int 926 vdev_init(uint64_t guid, const nvlist_t *nvlist, vdev_t **vdevp) 927 { 928 uint64_t id, ashift, asize, nparity; 929 const char *path; 930 const char *type; 931 int len, pathlen; 932 char *name; 933 vdev_t *vdev; 934 935 if (nvlist_find(nvlist, ZPOOL_CONFIG_ID, DATA_TYPE_UINT64, NULL, &id, 936 NULL) || 937 nvlist_find(nvlist, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING, NULL, 938 &type, &len)) { 939 return (ENOENT); 940 } 941 942 if (memcmp(type, VDEV_TYPE_MIRROR, len) != 0 && 943 memcmp(type, VDEV_TYPE_DISK, len) != 0 && 944 #ifdef ZFS_TEST 945 memcmp(type, VDEV_TYPE_FILE, len) != 0 && 946 #endif 947 memcmp(type, VDEV_TYPE_RAIDZ, len) != 0 && 948 memcmp(type, VDEV_TYPE_INDIRECT, len) != 0 && 949 memcmp(type, VDEV_TYPE_REPLACING, len) != 0 && 950 memcmp(type, VDEV_TYPE_HOLE, len) != 0) { 951 printf("ZFS: can only boot from disk, mirror, raidz1, " 952 "raidz2 and raidz3 vdevs, got: %.*s\n", len, type); 953 return (EIO); 954 } 955 956 if (memcmp(type, VDEV_TYPE_MIRROR, len) == 0) 957 vdev = vdev_create(guid, vdev_mirror_read); 958 else if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) 959 vdev = vdev_create(guid, vdev_raidz_read); 960 else if (memcmp(type, VDEV_TYPE_REPLACING, len) == 0) 961 vdev = vdev_create(guid, vdev_replacing_read); 962 else if (memcmp(type, VDEV_TYPE_INDIRECT, len) == 0) { 963 vdev_indirect_config_t *vic; 964 965 vdev = vdev_create(guid, vdev_indirect_read); 966 if (vdev != NULL) { 967 vdev->v_state = VDEV_STATE_HEALTHY; 968 vic = &vdev->vdev_indirect_config; 969 970 nvlist_find(nvlist, 971 ZPOOL_CONFIG_INDIRECT_OBJECT, 972 DATA_TYPE_UINT64, 973 NULL, &vic->vic_mapping_object, NULL); 974 nvlist_find(nvlist, 975 ZPOOL_CONFIG_INDIRECT_BIRTHS, 976 DATA_TYPE_UINT64, 977 NULL, &vic->vic_births_object, NULL); 978 nvlist_find(nvlist, 979 ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 980 DATA_TYPE_UINT64, 981 NULL, &vic->vic_prev_indirect_vdev, NULL); 982 } 983 } else if (memcmp(type, VDEV_TYPE_HOLE, len) == 0) { 984 vdev = vdev_create(guid, vdev_missing_read); 985 } else { 986 vdev = vdev_create(guid, vdev_disk_read); 987 } 988 989 if (vdev == NULL) 990 return (ENOMEM); 991 992 vdev_set_initial_state(vdev, nvlist); 993 vdev->v_id = id; 994 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASHIFT, 995 DATA_TYPE_UINT64, NULL, &ashift, NULL) == 0) 996 vdev->v_ashift = ashift; 997 998 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE, 999 DATA_TYPE_UINT64, NULL, &asize, NULL) == 0) { 1000 vdev->v_psize = asize + 1001 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 1002 } 1003 1004 if (nvlist_find(nvlist, ZPOOL_CONFIG_NPARITY, 1005 DATA_TYPE_UINT64, NULL, &nparity, NULL) == 0) 1006 vdev->v_nparity = nparity; 1007 1008 if (nvlist_find(nvlist, ZPOOL_CONFIG_PATH, 1009 DATA_TYPE_STRING, NULL, &path, &pathlen) == 0) { 1010 char prefix[] = "/dev/"; 1011 1012 len = strlen(prefix); 1013 if (len < pathlen && memcmp(path, prefix, len) == 0) { 1014 path += len; 1015 pathlen -= len; 1016 } 1017 name = malloc(pathlen + 1); 1018 bcopy(path, name, pathlen); 1019 name[pathlen] = '\0'; 1020 vdev->v_name = name; 1021 } else { 1022 name = NULL; 1023 if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) { 1024 if (vdev->v_nparity < 1 || 1025 vdev->v_nparity > 3) { 1026 printf("ZFS: invalid raidz parity: %d\n", 1027 vdev->v_nparity); 1028 return (EIO); 1029 } 1030 (void) asprintf(&name, "%.*s%d-%" PRIu64, len, type, 1031 vdev->v_nparity, id); 1032 } else { 1033 (void) asprintf(&name, "%.*s-%" PRIu64, len, type, id); 1034 } 1035 vdev->v_name = name; 1036 } 1037 *vdevp = vdev; 1038 return (0); 1039 } 1040 1041 /* 1042 * Find slot for vdev. We return either NULL to signal to use 1043 * STAILQ_INSERT_HEAD, or we return link element to be used with 1044 * STAILQ_INSERT_AFTER. 1045 */ 1046 static vdev_t * 1047 vdev_find_previous(vdev_t *top_vdev, uint64_t id) 1048 { 1049 vdev_t *v, *previous; 1050 1051 previous = NULL; 1052 STAILQ_FOREACH(v, &top_vdev->v_children, v_childlink) { 1053 if (v->v_id > id) 1054 return (previous); 1055 1056 if (v->v_id == id) 1057 return (v); 1058 1059 if (v->v_id < id) 1060 previous = v; 1061 } 1062 return (previous); 1063 } 1064 1065 static size_t 1066 vdev_child_count(vdev_t *vdev) 1067 { 1068 vdev_t *v; 1069 size_t count; 1070 1071 count = 0; 1072 STAILQ_FOREACH(v, &vdev->v_children, v_childlink) { 1073 count++; 1074 } 1075 return (count); 1076 } 1077 1078 /* 1079 * Insert vdev into top_vdev children list. List is ordered by v_id. 1080 */ 1081 static vdev_t * 1082 vdev_insert(vdev_t *top_vdev, vdev_t *vdev) 1083 { 1084 vdev_t *previous; 1085 size_t count; 1086 1087 /* 1088 * The top level vdev can appear in random order, depending how 1089 * the firmware is presenting the disk devices. 1090 * However, we will insert vdev to create list ordered by v_id, 1091 * so we can use either STAILQ_INSERT_HEAD or STAILQ_INSERT_AFTER 1092 * as STAILQ does not have insert before. 1093 */ 1094 previous = vdev_find_previous(top_vdev, vdev->v_id); 1095 1096 if (previous == NULL) { 1097 STAILQ_INSERT_HEAD(&top_vdev->v_children, vdev, v_childlink); 1098 } else if (previous->v_id == vdev->v_id) { 1099 /* 1100 * This vdev was configured from label config, 1101 * do not insert duplicate. 1102 */ 1103 free(vdev); 1104 return (previous); 1105 } else { 1106 STAILQ_INSERT_AFTER(&top_vdev->v_children, previous, vdev, 1107 v_childlink); 1108 } 1109 1110 count = vdev_child_count(top_vdev); 1111 if (top_vdev->v_nchildren < count) 1112 top_vdev->v_nchildren = count; 1113 return (vdev); 1114 } 1115 1116 static int 1117 vdev_from_nvlist(spa_t *spa, uint64_t top_guid, uint64_t label_guid, 1118 uint64_t txg, const nvlist_t *nvlist) 1119 { 1120 vdev_t *top_vdev, *vdev; 1121 nvlist_t **kids = NULL; 1122 int rc, nkids; 1123 1124 /* Get top vdev. */ 1125 top_vdev = vdev_find(&spa->spa_root_vdev->v_children, top_guid); 1126 if (top_vdev == NULL) { 1127 rc = vdev_init(top_guid, nvlist, &top_vdev); 1128 if (rc != 0) 1129 return (rc); 1130 top_vdev->v_spa = spa; 1131 top_vdev->v_top = top_vdev; 1132 top_vdev->v_label = label_guid; 1133 top_vdev->v_txg = txg; 1134 (void )vdev_insert(spa->spa_root_vdev, top_vdev); 1135 } 1136 1137 /* Add children if there are any. */ 1138 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1139 &nkids, &kids, NULL); 1140 if (rc == 0) { 1141 for (int i = 0; i < nkids; i++) { 1142 uint64_t guid; 1143 1144 rc = nvlist_find(kids[i], ZPOOL_CONFIG_GUID, 1145 DATA_TYPE_UINT64, NULL, &guid, NULL); 1146 if (rc != 0) 1147 goto done; 1148 1149 rc = vdev_init(guid, kids[i], &vdev); 1150 if (rc != 0) 1151 goto done; 1152 1153 vdev->v_spa = spa; 1154 vdev->v_top = top_vdev; 1155 vdev = vdev_insert(top_vdev, vdev); 1156 } 1157 } else { 1158 /* 1159 * When there are no children, nvlist_find() does return 1160 * error, reset it because leaf devices have no children. 1161 */ 1162 rc = 0; 1163 } 1164 done: 1165 if (kids != NULL) { 1166 for (int i = 0; i < nkids; i++) 1167 nvlist_destroy(kids[i]); 1168 free(kids); 1169 } 1170 1171 return (rc); 1172 } 1173 1174 static void 1175 vdev_set_state(vdev_t *vdev) 1176 { 1177 vdev_t *kid; 1178 int good_kids; 1179 int bad_kids; 1180 1181 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1182 vdev_set_state(kid); 1183 } 1184 1185 /* 1186 * A mirror or raidz is healthy if all its kids are healthy. A 1187 * mirror is degraded if any of its kids is healthy; a raidz 1188 * is degraded if at most nparity kids are offline. 1189 */ 1190 if (STAILQ_FIRST(&vdev->v_children)) { 1191 good_kids = 0; 1192 bad_kids = 0; 1193 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1194 if (kid->v_state == VDEV_STATE_HEALTHY) 1195 good_kids++; 1196 else 1197 bad_kids++; 1198 } 1199 if (bad_kids == 0) { 1200 vdev->v_state = VDEV_STATE_HEALTHY; 1201 } else { 1202 if (vdev->v_read == vdev_mirror_read) { 1203 if (good_kids) { 1204 vdev->v_state = VDEV_STATE_DEGRADED; 1205 } else { 1206 vdev->v_state = VDEV_STATE_OFFLINE; 1207 } 1208 } else if (vdev->v_read == vdev_raidz_read) { 1209 if (bad_kids > vdev->v_nparity) { 1210 vdev->v_state = VDEV_STATE_OFFLINE; 1211 } else { 1212 vdev->v_state = VDEV_STATE_DEGRADED; 1213 } 1214 } 1215 } 1216 } 1217 } 1218 1219 static int 1220 vdev_update_from_nvlist(vdev_t *root, uint64_t top_guid, const nvlist_t *nvlist) 1221 { 1222 vdev_t *vdev; 1223 nvlist_t **kids = NULL; 1224 int rc, nkids; 1225 1226 /* Update top vdev. */ 1227 vdev = vdev_find(&root->v_children, top_guid); 1228 if (vdev != NULL) 1229 vdev_set_initial_state(vdev, nvlist); 1230 1231 /* Update children if there are any. */ 1232 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1233 &nkids, &kids, NULL); 1234 if (rc == 0) { 1235 for (int i = 0; i < nkids; i++) { 1236 uint64_t guid; 1237 1238 rc = nvlist_find(kids[i], ZPOOL_CONFIG_GUID, 1239 DATA_TYPE_UINT64, NULL, &guid, NULL); 1240 if (rc != 0) 1241 break; 1242 1243 vdev = vdev_find(&root->v_children, guid); 1244 if (vdev != NULL) 1245 vdev_set_initial_state(vdev, kids[i]); 1246 } 1247 } else { 1248 rc = 0; 1249 } 1250 if (kids != NULL) { 1251 for (int i = 0; i < nkids; i++) 1252 nvlist_destroy(kids[i]); 1253 free(kids); 1254 } 1255 1256 return (rc); 1257 } 1258 1259 static void 1260 vdev_free(struct vdev *vdev) 1261 { 1262 struct vdev *kid, *safe; 1263 1264 STAILQ_FOREACH_SAFE(kid, &vdev->v_children, v_childlink, safe) 1265 vdev_free(kid); 1266 if (vdev->v_phys_read != NULL) 1267 STAILQ_INSERT_HEAD(&orphans, vdev, v_childlink); 1268 else 1269 free(vdev); 1270 } 1271 1272 static int 1273 vdev_init_from_nvlist(spa_t *spa, const nvlist_t *nvlist) 1274 { 1275 uint64_t pool_guid, vdev_children; 1276 nvlist_t *vdevs = NULL, **kids = NULL; 1277 int rc, nkids; 1278 1279 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 1280 NULL, &pool_guid, NULL) || 1281 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64, 1282 NULL, &vdev_children, NULL) || 1283 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, 1284 NULL, &vdevs, NULL)) { 1285 printf("ZFS: can't find vdev details\n"); 1286 return (ENOENT); 1287 } 1288 1289 /* Wrong guid?! */ 1290 if (spa->spa_guid != pool_guid) { 1291 nvlist_destroy(vdevs); 1292 return (EINVAL); 1293 } 1294 1295 spa->spa_root_vdev->v_nchildren = vdev_children; 1296 1297 rc = nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1298 &nkids, &kids, NULL); 1299 nvlist_destroy(vdevs); 1300 1301 /* 1302 * MOS config has at least one child for root vdev. 1303 */ 1304 if (rc != 0) 1305 return (rc); 1306 1307 for (int i = 0; i < nkids; i++) { 1308 uint64_t guid; 1309 vdev_t *vdev; 1310 1311 rc = nvlist_find(kids[i], ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, 1312 NULL, &guid, NULL); 1313 if (rc != 0) 1314 break; 1315 vdev = vdev_find(&spa->spa_root_vdev->v_children, guid); 1316 /* 1317 * Top level vdev is missing, create it. 1318 * XXXGL: how can this happen? 1319 */ 1320 if (vdev == NULL) 1321 rc = vdev_from_nvlist(spa, guid, 0, 0, kids[i]); 1322 else 1323 rc = vdev_update_from_nvlist(spa->spa_root_vdev, guid, 1324 kids[i]); 1325 if (rc != 0) 1326 break; 1327 } 1328 if (kids != NULL) { 1329 for (int i = 0; i < nkids; i++) 1330 nvlist_destroy(kids[i]); 1331 free(kids); 1332 } 1333 1334 /* 1335 * Re-evaluate top-level vdev state. 1336 */ 1337 vdev_set_state(spa->spa_root_vdev); 1338 1339 return (rc); 1340 } 1341 1342 static bool 1343 nvlist_find_child_guid(const nvlist_t *nvlist, uint64_t guid) 1344 { 1345 nvlist_t **kids = NULL; 1346 int nkids, i; 1347 bool rv = false; 1348 1349 if (nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, 1350 &nkids, &kids, NULL) != 0) 1351 nkids = 0; 1352 1353 for (i = 0; i < nkids; i++) { 1354 uint64_t kid_guid; 1355 1356 if (nvlist_find(kids[i], ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, 1357 NULL, &kid_guid, NULL) != 0) 1358 break; 1359 if (kid_guid == guid) 1360 rv = true; 1361 else 1362 rv = nvlist_find_child_guid(kids[i], guid); 1363 if (rv) 1364 break; 1365 } 1366 1367 for (i = 0; i < nkids; i++) 1368 nvlist_destroy(kids[i]); 1369 free(kids); 1370 1371 return (rv); 1372 } 1373 1374 static bool 1375 nvlist_find_vdev_guid(const nvlist_t *nvlist, uint64_t guid) 1376 { 1377 nvlist_t *vdevs; 1378 bool rv; 1379 1380 if (nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, NULL, 1381 &vdevs, NULL) != 0) 1382 return (false); 1383 rv = nvlist_find_child_guid(vdevs, guid); 1384 nvlist_destroy(vdevs); 1385 1386 return (rv); 1387 } 1388 1389 static spa_t * 1390 spa_find_by_guid(uint64_t guid) 1391 { 1392 spa_t *spa; 1393 1394 STAILQ_FOREACH(spa, &zfs_pools, spa_link) 1395 if (spa->spa_guid == guid) 1396 return (spa); 1397 1398 return (NULL); 1399 } 1400 1401 static spa_t * 1402 spa_find_by_name(const char *name) 1403 { 1404 spa_t *spa; 1405 1406 STAILQ_FOREACH(spa, &zfs_pools, spa_link) 1407 if (strcmp(spa->spa_name, name) == 0) 1408 return (spa); 1409 1410 return (NULL); 1411 } 1412 1413 static spa_t * 1414 spa_create(uint64_t guid, const char *name) 1415 { 1416 spa_t *spa; 1417 1418 if ((spa = calloc(1, sizeof(spa_t))) == NULL) 1419 return (NULL); 1420 if ((spa->spa_name = strdup(name)) == NULL) { 1421 free(spa); 1422 return (NULL); 1423 } 1424 spa->spa_uberblock = &spa->spa_uberblock_master; 1425 spa->spa_mos = &spa->spa_mos_master; 1426 spa->spa_guid = guid; 1427 spa->spa_root_vdev = vdev_create(guid, NULL); 1428 if (spa->spa_root_vdev == NULL) { 1429 free(spa->spa_name); 1430 free(spa); 1431 return (NULL); 1432 } 1433 spa->spa_root_vdev->v_name = spa->spa_name; 1434 STAILQ_INSERT_TAIL(&zfs_pools, spa, spa_link); 1435 1436 return (spa); 1437 } 1438 1439 static const char * 1440 state_name(vdev_state_t state) 1441 { 1442 static const char *names[] = { 1443 "UNKNOWN", 1444 "CLOSED", 1445 "OFFLINE", 1446 "REMOVED", 1447 "CANT_OPEN", 1448 "FAULTED", 1449 "DEGRADED", 1450 "ONLINE" 1451 }; 1452 return (names[state]); 1453 } 1454 1455 #ifdef BOOT2 1456 1457 #define pager_printf printf 1458 1459 #else 1460 1461 static int 1462 pager_printf(const char *fmt, ...) 1463 { 1464 char line[80]; 1465 va_list args; 1466 1467 va_start(args, fmt); 1468 vsnprintf(line, sizeof(line), fmt, args); 1469 va_end(args); 1470 return (pager_output(line)); 1471 } 1472 1473 #endif 1474 1475 #define STATUS_FORMAT " %s %s\n" 1476 1477 static int 1478 print_state(int indent, const char *name, vdev_state_t state) 1479 { 1480 int i; 1481 char buf[512]; 1482 1483 buf[0] = 0; 1484 for (i = 0; i < indent; i++) 1485 strcat(buf, " "); 1486 strcat(buf, name); 1487 return (pager_printf(STATUS_FORMAT, buf, state_name(state))); 1488 } 1489 1490 static int 1491 vdev_status(vdev_t *vdev, int indent) 1492 { 1493 vdev_t *kid; 1494 int ret; 1495 1496 if (vdev->v_islog) { 1497 (void) pager_output(" logs\n"); 1498 indent++; 1499 } 1500 1501 ret = print_state(indent, vdev->v_name, vdev->v_state); 1502 if (ret != 0) 1503 return (ret); 1504 1505 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1506 ret = vdev_status(kid, indent + 1); 1507 if (ret != 0) 1508 return (ret); 1509 } 1510 return (ret); 1511 } 1512 1513 static int 1514 spa_status(spa_t *spa) 1515 { 1516 static char bootfs[ZFS_MAXNAMELEN]; 1517 uint64_t rootid; 1518 vdev_list_t *vlist; 1519 vdev_t *vdev; 1520 int good_kids, bad_kids, degraded_kids, ret; 1521 vdev_state_t state; 1522 1523 ret = pager_printf(" pool: %s\n", spa->spa_name); 1524 if (ret != 0) 1525 return (ret); 1526 1527 if (zfs_get_root(spa, &rootid) == 0 && 1528 zfs_rlookup(spa, rootid, bootfs) == 0) { 1529 if (bootfs[0] == '\0') 1530 ret = pager_printf("bootfs: %s\n", spa->spa_name); 1531 else 1532 ret = pager_printf("bootfs: %s/%s\n", spa->spa_name, 1533 bootfs); 1534 if (ret != 0) 1535 return (ret); 1536 } 1537 ret = pager_printf("config:\n\n"); 1538 if (ret != 0) 1539 return (ret); 1540 ret = pager_printf(STATUS_FORMAT, "NAME", "STATE"); 1541 if (ret != 0) 1542 return (ret); 1543 1544 good_kids = 0; 1545 degraded_kids = 0; 1546 bad_kids = 0; 1547 vlist = &spa->spa_root_vdev->v_children; 1548 STAILQ_FOREACH(vdev, vlist, v_childlink) { 1549 if (vdev->v_state == VDEV_STATE_HEALTHY) 1550 good_kids++; 1551 else if (vdev->v_state == VDEV_STATE_DEGRADED) 1552 degraded_kids++; 1553 else 1554 bad_kids++; 1555 } 1556 1557 state = VDEV_STATE_CLOSED; 1558 if (good_kids > 0 && (degraded_kids + bad_kids) == 0) 1559 state = VDEV_STATE_HEALTHY; 1560 else if ((good_kids + degraded_kids) > 0) 1561 state = VDEV_STATE_DEGRADED; 1562 1563 ret = print_state(0, spa->spa_name, state); 1564 if (ret != 0) 1565 return (ret); 1566 1567 STAILQ_FOREACH(vdev, vlist, v_childlink) { 1568 ret = vdev_status(vdev, 1); 1569 if (ret != 0) 1570 return (ret); 1571 } 1572 return (ret); 1573 } 1574 1575 static int 1576 spa_all_status(void) 1577 { 1578 spa_t *spa; 1579 int first = 1, ret = 0; 1580 1581 STAILQ_FOREACH(spa, &zfs_pools, spa_link) { 1582 if (!first) { 1583 ret = pager_printf("\n"); 1584 if (ret != 0) 1585 return (ret); 1586 } 1587 first = 0; 1588 ret = spa_status(spa); 1589 if (ret != 0) 1590 return (ret); 1591 } 1592 return (ret); 1593 } 1594 1595 static uint64_t 1596 vdev_label_offset(uint64_t psize, int l, uint64_t offset) 1597 { 1598 uint64_t label_offset; 1599 1600 if (l < VDEV_LABELS / 2) 1601 label_offset = 0; 1602 else 1603 label_offset = psize - VDEV_LABELS * sizeof (vdev_label_t); 1604 1605 return (offset + l * sizeof (vdev_label_t) + label_offset); 1606 } 1607 1608 static int 1609 vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2) 1610 { 1611 unsigned int seq1 = 0; 1612 unsigned int seq2 = 0; 1613 int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg); 1614 1615 if (cmp != 0) 1616 return (cmp); 1617 1618 cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp); 1619 if (cmp != 0) 1620 return (cmp); 1621 1622 if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1)) 1623 seq1 = MMP_SEQ(ub1); 1624 1625 if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2)) 1626 seq2 = MMP_SEQ(ub2); 1627 1628 return (AVL_CMP(seq1, seq2)); 1629 } 1630 1631 static int 1632 uberblock_verify(uberblock_t *ub) 1633 { 1634 if (ub->ub_magic == BSWAP_64((uint64_t)UBERBLOCK_MAGIC)) { 1635 byteswap_uint64_array(ub, sizeof (uberblock_t)); 1636 } 1637 1638 if (ub->ub_magic != UBERBLOCK_MAGIC || 1639 !SPA_VERSION_IS_SUPPORTED(ub->ub_version)) 1640 return (EINVAL); 1641 1642 return (0); 1643 } 1644 1645 static int 1646 vdev_label_read(vdev_t *vd, int l, void *buf, uint64_t offset, 1647 size_t size) 1648 { 1649 blkptr_t bp; 1650 off_t off; 1651 1652 off = vdev_label_offset(vd->v_psize, l, offset); 1653 1654 BP_ZERO(&bp); 1655 BP_SET_LSIZE(&bp, size); 1656 BP_SET_PSIZE(&bp, size); 1657 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL); 1658 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); 1659 DVA_SET_OFFSET(BP_IDENTITY(&bp), off); 1660 ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0); 1661 1662 return (vdev_read_phys(vd, &bp, buf, off, size)); 1663 } 1664 1665 /* 1666 * We do need to be sure we write to correct location. 1667 * Our vdev label does consist of 4 fields: 1668 * pad1 (8k), reserved. 1669 * bootenv (8k), checksummed, previously reserved, may contian garbage. 1670 * vdev_phys (112k), checksummed 1671 * uberblock ring (128k), checksummed. 1672 * 1673 * Since bootenv area may contain garbage, we can not reliably read it, as 1674 * we can get checksum errors. 1675 * Next best thing is vdev_phys - it is just after bootenv. It still may 1676 * be corrupted, but in such case we will miss this one write. 1677 */ 1678 static int 1679 vdev_label_write_validate(vdev_t *vd, int l, uint64_t offset) 1680 { 1681 uint64_t off, o_phys; 1682 void *buf; 1683 size_t size = VDEV_PHYS_SIZE; 1684 int rc; 1685 1686 o_phys = offsetof(vdev_label_t, vl_vdev_phys); 1687 off = vdev_label_offset(vd->v_psize, l, o_phys); 1688 1689 /* off should be 8K from bootenv */ 1690 if (vdev_label_offset(vd->v_psize, l, offset) + VDEV_PAD_SIZE != off) 1691 return (EINVAL); 1692 1693 buf = malloc(size); 1694 if (buf == NULL) 1695 return (ENOMEM); 1696 1697 /* Read vdev_phys */ 1698 rc = vdev_label_read(vd, l, buf, o_phys, size); 1699 free(buf); 1700 return (rc); 1701 } 1702 1703 static int 1704 vdev_label_write(vdev_t *vd, int l, vdev_boot_envblock_t *be, uint64_t offset) 1705 { 1706 zio_checksum_info_t *ci; 1707 zio_cksum_t cksum; 1708 off_t off; 1709 size_t size = VDEV_PAD_SIZE; 1710 int rc; 1711 1712 if (vd->v_phys_write == NULL) 1713 return (ENOTSUP); 1714 1715 off = vdev_label_offset(vd->v_psize, l, offset); 1716 1717 rc = vdev_label_write_validate(vd, l, offset); 1718 if (rc != 0) { 1719 return (rc); 1720 } 1721 1722 ci = &zio_checksum_table[ZIO_CHECKSUM_LABEL]; 1723 be->vbe_zbt.zec_magic = ZEC_MAGIC; 1724 zio_checksum_label_verifier(&be->vbe_zbt.zec_cksum, off); 1725 ci->ci_func[0](be, size, NULL, &cksum); 1726 be->vbe_zbt.zec_cksum = cksum; 1727 1728 return (vdev_write_phys(vd, be, off, size)); 1729 } 1730 1731 static int 1732 vdev_write_bootenv_impl(vdev_t *vdev, vdev_boot_envblock_t *be) 1733 { 1734 vdev_t *kid; 1735 int rv = 0, err; 1736 1737 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1738 if (kid->v_state != VDEV_STATE_HEALTHY) 1739 continue; 1740 err = vdev_write_bootenv_impl(kid, be); 1741 if (err != 0) 1742 rv = err; 1743 } 1744 1745 /* 1746 * Non-leaf vdevs do not have v_phys_write. 1747 */ 1748 if (vdev->v_phys_write == NULL) 1749 return (rv); 1750 1751 for (int l = 0; l < VDEV_LABELS; l++) { 1752 err = vdev_label_write(vdev, l, be, 1753 offsetof(vdev_label_t, vl_be)); 1754 if (err != 0) { 1755 printf("failed to write bootenv to %s label %d: %d\n", 1756 vdev->v_name ? vdev->v_name : "unknown", l, err); 1757 rv = err; 1758 } 1759 } 1760 return (rv); 1761 } 1762 1763 int 1764 vdev_write_bootenv(vdev_t *vdev, nvlist_t *nvl) 1765 { 1766 vdev_boot_envblock_t *be; 1767 nvlist_t nv, *nvp; 1768 uint64_t version; 1769 int rv; 1770 1771 if (nvl->nv_size > sizeof(be->vbe_bootenv)) 1772 return (E2BIG); 1773 1774 version = VB_RAW; 1775 nvp = vdev_read_bootenv(vdev); 1776 if (nvp != NULL) { 1777 nvlist_find(nvp, BOOTENV_VERSION, DATA_TYPE_UINT64, NULL, 1778 &version, NULL); 1779 nvlist_destroy(nvp); 1780 } 1781 1782 be = calloc(1, sizeof(*be)); 1783 if (be == NULL) 1784 return (ENOMEM); 1785 1786 be->vbe_version = version; 1787 switch (version) { 1788 case VB_RAW: 1789 /* 1790 * If there is no envmap, we will just wipe bootenv. 1791 */ 1792 nvlist_find(nvl, GRUB_ENVMAP, DATA_TYPE_STRING, NULL, 1793 be->vbe_bootenv, NULL); 1794 rv = 0; 1795 break; 1796 1797 case VB_NVLIST: 1798 nv.nv_header = nvl->nv_header; 1799 nv.nv_asize = nvl->nv_asize; 1800 nv.nv_size = nvl->nv_size; 1801 1802 bcopy(&nv.nv_header, be->vbe_bootenv, sizeof(nv.nv_header)); 1803 nv.nv_data = be->vbe_bootenv + sizeof(nvs_header_t); 1804 bcopy(nvl->nv_data, nv.nv_data, nv.nv_size); 1805 rv = nvlist_export(&nv); 1806 break; 1807 1808 default: 1809 rv = EINVAL; 1810 break; 1811 } 1812 1813 if (rv == 0) { 1814 be->vbe_version = htobe64(be->vbe_version); 1815 rv = vdev_write_bootenv_impl(vdev, be); 1816 } 1817 free(be); 1818 return (rv); 1819 } 1820 1821 /* 1822 * Read the bootenv area from pool label, return the nvlist from it. 1823 * We return from first successful read. 1824 */ 1825 nvlist_t * 1826 vdev_read_bootenv(vdev_t *vdev) 1827 { 1828 vdev_t *kid; 1829 nvlist_t *benv; 1830 vdev_boot_envblock_t *be; 1831 char *command; 1832 bool ok; 1833 int rv; 1834 1835 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { 1836 if (kid->v_state != VDEV_STATE_HEALTHY) 1837 continue; 1838 1839 benv = vdev_read_bootenv(kid); 1840 if (benv != NULL) 1841 return (benv); 1842 } 1843 1844 be = malloc(sizeof (*be)); 1845 if (be == NULL) 1846 return (NULL); 1847 1848 rv = 0; 1849 for (int l = 0; l < VDEV_LABELS; l++) { 1850 rv = vdev_label_read(vdev, l, be, 1851 offsetof(vdev_label_t, vl_be), 1852 sizeof (*be)); 1853 if (rv == 0) 1854 break; 1855 } 1856 if (rv != 0) { 1857 free(be); 1858 return (NULL); 1859 } 1860 1861 be->vbe_version = be64toh(be->vbe_version); 1862 switch (be->vbe_version) { 1863 case VB_RAW: 1864 /* 1865 * we have textual data in vbe_bootenv, create nvlist 1866 * with key "envmap". 1867 */ 1868 benv = nvlist_create(NV_UNIQUE_NAME); 1869 if (benv != NULL) { 1870 if (*be->vbe_bootenv == '\0') { 1871 nvlist_add_uint64(benv, BOOTENV_VERSION, 1872 VB_NVLIST); 1873 break; 1874 } 1875 nvlist_add_uint64(benv, BOOTENV_VERSION, VB_RAW); 1876 be->vbe_bootenv[sizeof (be->vbe_bootenv) - 1] = '\0'; 1877 nvlist_add_string(benv, GRUB_ENVMAP, be->vbe_bootenv); 1878 } 1879 break; 1880 1881 case VB_NVLIST: 1882 benv = nvlist_import(be->vbe_bootenv, sizeof(be->vbe_bootenv)); 1883 break; 1884 1885 default: 1886 command = (char *)be; 1887 ok = false; 1888 1889 /* Check for legacy zfsbootcfg command string */ 1890 for (int i = 0; command[i] != '\0'; i++) { 1891 if (iscntrl(command[i])) { 1892 ok = false; 1893 break; 1894 } else { 1895 ok = true; 1896 } 1897 } 1898 benv = nvlist_create(NV_UNIQUE_NAME); 1899 if (benv != NULL) { 1900 if (ok) 1901 nvlist_add_string(benv, FREEBSD_BOOTONCE, 1902 command); 1903 else 1904 nvlist_add_uint64(benv, BOOTENV_VERSION, 1905 VB_NVLIST); 1906 } 1907 break; 1908 } 1909 free(be); 1910 return (benv); 1911 } 1912 1913 static uint64_t 1914 vdev_get_label_asize(nvlist_t *nvl) 1915 { 1916 nvlist_t *vdevs; 1917 uint64_t asize; 1918 const char *type; 1919 int len; 1920 1921 asize = 0; 1922 /* Get vdev tree */ 1923 if (nvlist_find(nvl, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, 1924 NULL, &vdevs, NULL) != 0) 1925 return (asize); 1926 1927 /* 1928 * Get vdev type. We will calculate asize for raidz, mirror and disk. 1929 * For raidz, the asize is raw size of all children. 1930 */ 1931 if (nvlist_find(vdevs, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING, 1932 NULL, &type, &len) != 0) 1933 goto done; 1934 1935 if (memcmp(type, VDEV_TYPE_MIRROR, len) != 0 && 1936 memcmp(type, VDEV_TYPE_DISK, len) != 0 && 1937 memcmp(type, VDEV_TYPE_RAIDZ, len) != 0) 1938 goto done; 1939 1940 if (nvlist_find(vdevs, ZPOOL_CONFIG_ASIZE, DATA_TYPE_UINT64, 1941 NULL, &asize, NULL) != 0) 1942 goto done; 1943 1944 if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) { 1945 nvlist_t **kids; 1946 int nkids; 1947 1948 if (nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN, 1949 DATA_TYPE_NVLIST_ARRAY, &nkids, &kids, NULL) != 0) { 1950 asize = 0; 1951 goto done; 1952 } 1953 1954 asize /= nkids; 1955 for (int i = 0; i < nkids; i++) 1956 nvlist_destroy(kids[i]); 1957 free(kids); 1958 } 1959 1960 asize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 1961 done: 1962 nvlist_destroy(vdevs); 1963 return (asize); 1964 } 1965 1966 static nvlist_t * 1967 vdev_label_read_config(vdev_t *vd, uint64_t txg) 1968 { 1969 vdev_phys_t *label; 1970 uint64_t best_txg = 0; 1971 uint64_t label_txg = 0; 1972 uint64_t asize; 1973 nvlist_t *nvl = NULL, *tmp; 1974 int error; 1975 1976 label = malloc(sizeof (vdev_phys_t)); 1977 if (label == NULL) 1978 return (NULL); 1979 1980 for (int l = 0; l < VDEV_LABELS; l++) { 1981 if (vdev_label_read(vd, l, label, 1982 offsetof(vdev_label_t, vl_vdev_phys), 1983 sizeof (vdev_phys_t))) 1984 continue; 1985 1986 tmp = nvlist_import(label->vp_nvlist, 1987 sizeof(label->vp_nvlist)); 1988 if (tmp == NULL) 1989 continue; 1990 1991 error = nvlist_find(tmp, ZPOOL_CONFIG_POOL_TXG, 1992 DATA_TYPE_UINT64, NULL, &label_txg, NULL); 1993 if (error != 0 || label_txg == 0) { 1994 nvlist_destroy(nvl); 1995 nvl = tmp; 1996 goto done; 1997 } 1998 1999 if (label_txg <= txg && label_txg > best_txg) { 2000 best_txg = label_txg; 2001 nvlist_destroy(nvl); 2002 nvl = tmp; 2003 tmp = NULL; 2004 2005 /* 2006 * Use asize from pool config. We need this 2007 * because we can get bad value from BIOS. 2008 */ 2009 asize = vdev_get_label_asize(nvl); 2010 if (asize != 0) { 2011 vd->v_psize = asize; 2012 } 2013 } 2014 nvlist_destroy(tmp); 2015 } 2016 2017 if (best_txg == 0) { 2018 nvlist_destroy(nvl); 2019 nvl = NULL; 2020 } 2021 done: 2022 free(label); 2023 return (nvl); 2024 } 2025 2026 static void 2027 vdev_uberblock_load(vdev_t *vd, uberblock_t *ub) 2028 { 2029 uberblock_t *buf; 2030 2031 buf = malloc(VDEV_UBERBLOCK_SIZE(vd)); 2032 if (buf == NULL) 2033 return; 2034 2035 for (int l = 0; l < VDEV_LABELS; l++) { 2036 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { 2037 if (vdev_label_read(vd, l, buf, 2038 VDEV_UBERBLOCK_OFFSET(vd, n), 2039 VDEV_UBERBLOCK_SIZE(vd))) 2040 continue; 2041 if (uberblock_verify(buf) != 0) 2042 continue; 2043 2044 if (vdev_uberblock_compare(buf, ub) > 0) 2045 *ub = *buf; 2046 } 2047 } 2048 free(buf); 2049 } 2050 2051 static int 2052 vdev_probe(vdev_phys_read_t *_read, vdev_phys_write_t *_write, void *priv, 2053 spa_t **spap) 2054 { 2055 vdev_t vtmp; 2056 spa_t *spa; 2057 vdev_t *vdev, *top; 2058 nvlist_t *nvl, *vdevs; 2059 uint64_t val; 2060 uint64_t guid, pool_guid, top_guid, txg; 2061 const char *pool_name; 2062 int rc, namelen; 2063 2064 /* 2065 * Load the vdev label and figure out which 2066 * uberblock is most current. 2067 */ 2068 memset(&vtmp, 0, sizeof(vtmp)); 2069 vtmp.v_phys_read = _read; 2070 vtmp.v_phys_write = _write; 2071 vtmp.v_priv = priv; 2072 vtmp.v_psize = P2ALIGN(ldi_get_size(priv), 2073 (uint64_t)sizeof (vdev_label_t)); 2074 2075 /* Test for minimum device size. */ 2076 if (vtmp.v_psize < SPA_MINDEVSIZE) 2077 return (EIO); 2078 2079 nvl = vdev_label_read_config(&vtmp, UINT64_MAX); 2080 if (nvl == NULL) 2081 return (EIO); 2082 2083 if (nvlist_find(nvl, ZPOOL_CONFIG_VERSION, DATA_TYPE_UINT64, 2084 NULL, &val, NULL) != 0) { 2085 nvlist_destroy(nvl); 2086 return (EIO); 2087 } 2088 2089 if (!SPA_VERSION_IS_SUPPORTED(val)) { 2090 printf("ZFS: unsupported ZFS version %u (should be %u)\n", 2091 (unsigned)val, (unsigned)SPA_VERSION); 2092 nvlist_destroy(nvl); 2093 return (EIO); 2094 } 2095 2096 /* Check ZFS features for read */ 2097 rc = nvlist_check_features_for_read(nvl); 2098 if (rc != 0) { 2099 nvlist_destroy(nvl); 2100 return (EIO); 2101 } 2102 2103 if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_STATE, DATA_TYPE_UINT64, 2104 NULL, &val, NULL) != 0) { 2105 nvlist_destroy(nvl); 2106 return (EIO); 2107 } 2108 2109 if (val == POOL_STATE_DESTROYED) { 2110 /* We don't boot only from destroyed pools. */ 2111 nvlist_destroy(nvl); 2112 return (EIO); 2113 } 2114 2115 if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64, 2116 NULL, &txg, NULL) != 0 || 2117 txg == 0 || 2118 nvlist_find(nvl, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64, 2119 NULL, &top_guid, NULL) != 0 || 2120 nvlist_find(nvl, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, 2121 NULL, &pool_guid, NULL) != 0 || 2122 nvlist_find(nvl, ZPOOL_CONFIG_POOL_NAME, DATA_TYPE_STRING, 2123 NULL, &pool_name, &namelen) != 0 || 2124 nvlist_find(nvl, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, 2125 NULL, &guid, NULL) != 0) { 2126 /* 2127 * Cache, spare and replaced devices end up here - just ignore 2128 * them. 2129 */ 2130 nvlist_destroy(nvl); 2131 return (EIO); 2132 } 2133 2134 /* 2135 * Create the pool if this is the first time we've seen it. 2136 */ 2137 spa = spa_find_by_guid(pool_guid); 2138 if (spa == NULL) { 2139 char *name; 2140 2141 name = malloc(namelen + 1); 2142 if (name == NULL) { 2143 nvlist_destroy(nvl); 2144 return (ENOMEM); 2145 } 2146 bcopy(pool_name, name, namelen); 2147 name[namelen] = '\0'; 2148 spa = spa_create(pool_guid, name); 2149 free(name); 2150 if (spa == NULL) { 2151 nvlist_destroy(nvl); 2152 return (ENOMEM); 2153 } 2154 } 2155 2156 /* 2157 * Check if configuration is already known. If configuration is known 2158 * and txg numbers don't match, we got 2x2 scenarios here. First, is 2159 * the label being read right now _newer_ than the one read before. 2160 * Second, is the vdev that provided the stale label _present_ in the 2161 * newer configuration. If neither is true, we completely ignore the 2162 * label. 2163 */ 2164 STAILQ_FOREACH(top, &spa->spa_root_vdev->v_children, v_childlink) 2165 if (top->v_guid == top_guid) { 2166 bool newer, present; 2167 2168 if (top->v_txg == txg) 2169 break; 2170 newer = (top->v_txg < txg); 2171 present = newer ? 2172 nvlist_find_vdev_guid(nvl, top->v_label) : 2173 (vdev_find(&top->v_children, guid) != NULL); 2174 printf("ZFS: pool %s vdev %s %s stale label from " 2175 "0x%jx@0x%jx, %s 0x%jx@0x%jx\n", 2176 spa->spa_name, top->v_name, 2177 present ? "using" : "ignoring", 2178 newer ? top->v_label : guid, 2179 newer ? top->v_txg : txg, 2180 present ? "referred by" : "using", 2181 newer ? guid : top->v_label, 2182 newer ? txg : top->v_txg); 2183 if (newer) { 2184 STAILQ_REMOVE(&spa->spa_root_vdev->v_children, 2185 top, vdev, v_childlink); 2186 vdev_free(top); 2187 break; 2188 } else if (present) { 2189 break; 2190 } else { 2191 nvlist_destroy(nvl); 2192 return (EIO); 2193 } 2194 } 2195 2196 /* 2197 * Get the vdev tree and create our in-core copy of it. 2198 * If we already have a vdev with this guid, this must 2199 * be some kind of alias (overlapping slices, dangerously dedicated 2200 * disks etc). 2201 */ 2202 vdev = vdev_find(&spa->spa_root_vdev->v_children, guid); 2203 /* Has this vdev already been inited? */ 2204 if (vdev && vdev->v_phys_read) { 2205 nvlist_destroy(nvl); 2206 return (EIO); 2207 } 2208 2209 if (nvlist_find(nvl, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, NULL, 2210 &vdevs, NULL)) { 2211 printf("ZFS: can't find vdev details\n"); 2212 nvlist_destroy(nvl); 2213 return (ENOENT); 2214 } 2215 2216 rc = vdev_from_nvlist(spa, top_guid, guid, txg, vdevs); 2217 nvlist_destroy(vdevs); 2218 nvlist_destroy(nvl); 2219 if (rc != 0) 2220 return (rc); 2221 2222 /* 2223 * We should already have created an incomplete vdev for this 2224 * vdev. Find it and initialise it with our read proc. 2225 */ 2226 vdev = vdev_find(&spa->spa_root_vdev->v_children, guid); 2227 if (vdev != NULL) { 2228 vdev->v_phys_read = _read; 2229 vdev->v_phys_write = _write; 2230 vdev->v_priv = priv; 2231 vdev->v_psize = vtmp.v_psize; 2232 /* 2233 * If no other state is set, mark vdev healthy. 2234 */ 2235 if (vdev->v_state == VDEV_STATE_UNKNOWN) 2236 vdev->v_state = VDEV_STATE_HEALTHY; 2237 } else { 2238 printf("ZFS: inconsistent nvlist contents\n"); 2239 return (EIO); 2240 } 2241 2242 if (vdev->v_islog) 2243 spa->spa_with_log = vdev->v_islog; 2244 2245 /* 2246 * Re-evaluate top-level vdev state. 2247 */ 2248 vdev_set_state(vdev->v_top); 2249 2250 /* 2251 * Ok, we are happy with the pool so far. Lets find 2252 * the best uberblock and then we can actually access 2253 * the contents of the pool. 2254 */ 2255 vdev_uberblock_load(vdev, spa->spa_uberblock); 2256 2257 if (spap != NULL) 2258 *spap = spa; 2259 return (0); 2260 } 2261 2262 static int 2263 ilog2(int n) 2264 { 2265 int v; 2266 2267 for (v = 0; v < 32; v++) 2268 if (n == (1 << v)) 2269 return (v); 2270 return (-1); 2271 } 2272 2273 static int 2274 zio_read_gang(const spa_t *spa, const blkptr_t *bp, void *buf) 2275 { 2276 blkptr_t gbh_bp; 2277 zio_gbh_phys_t zio_gb; 2278 char *pbuf; 2279 int i; 2280 2281 /* Artificial BP for gang block header. */ 2282 gbh_bp = *bp; 2283 BP_SET_PSIZE(&gbh_bp, SPA_GANGBLOCKSIZE); 2284 BP_SET_LSIZE(&gbh_bp, SPA_GANGBLOCKSIZE); 2285 BP_SET_CHECKSUM(&gbh_bp, ZIO_CHECKSUM_GANG_HEADER); 2286 BP_SET_COMPRESS(&gbh_bp, ZIO_COMPRESS_OFF); 2287 for (i = 0; i < SPA_DVAS_PER_BP; i++) 2288 DVA_SET_GANG(&gbh_bp.blk_dva[i], 0); 2289 2290 /* Read gang header block using the artificial BP. */ 2291 if (zio_read(spa, &gbh_bp, &zio_gb)) 2292 return (EIO); 2293 2294 pbuf = buf; 2295 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { 2296 blkptr_t *gbp = &zio_gb.zg_blkptr[i]; 2297 2298 if (BP_IS_HOLE(gbp)) 2299 continue; 2300 if (zio_read(spa, gbp, pbuf)) 2301 return (EIO); 2302 pbuf += BP_GET_PSIZE(gbp); 2303 } 2304 2305 if (zio_checksum_verify(spa, bp, buf)) 2306 return (EIO); 2307 return (0); 2308 } 2309 2310 static int 2311 zio_read(const spa_t *spa, const blkptr_t *bp, void *buf) 2312 { 2313 int cpfunc = BP_GET_COMPRESS(bp); 2314 uint64_t align, size; 2315 void *pbuf; 2316 int i, error; 2317 2318 /* 2319 * Process data embedded in block pointer 2320 */ 2321 if (BP_IS_EMBEDDED(bp)) { 2322 ASSERT(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 2323 2324 size = BPE_GET_PSIZE(bp); 2325 ASSERT(size <= BPE_PAYLOAD_SIZE); 2326 2327 if (cpfunc != ZIO_COMPRESS_OFF) 2328 pbuf = malloc(size); 2329 else 2330 pbuf = buf; 2331 2332 if (pbuf == NULL) 2333 return (ENOMEM); 2334 2335 decode_embedded_bp_compressed(bp, pbuf); 2336 error = 0; 2337 2338 if (cpfunc != ZIO_COMPRESS_OFF) { 2339 error = zio_decompress_data(cpfunc, pbuf, 2340 size, buf, BP_GET_LSIZE(bp)); 2341 free(pbuf); 2342 } 2343 if (error != 0) 2344 printf("ZFS: i/o error - unable to decompress " 2345 "block pointer data, error %d\n", error); 2346 return (error); 2347 } 2348 2349 error = EIO; 2350 2351 for (i = 0; i < SPA_DVAS_PER_BP; i++) { 2352 const dva_t *dva = &bp->blk_dva[i]; 2353 vdev_t *vdev; 2354 vdev_list_t *vlist; 2355 uint64_t vdevid; 2356 off_t offset; 2357 2358 if (!dva->dva_word[0] && !dva->dva_word[1]) 2359 continue; 2360 2361 vdevid = DVA_GET_VDEV(dva); 2362 offset = DVA_GET_OFFSET(dva); 2363 vlist = &spa->spa_root_vdev->v_children; 2364 STAILQ_FOREACH(vdev, vlist, v_childlink) { 2365 if (vdev->v_id == vdevid) 2366 break; 2367 } 2368 if (!vdev || !vdev->v_read) 2369 continue; 2370 2371 size = BP_GET_PSIZE(bp); 2372 if (vdev->v_read == vdev_raidz_read) { 2373 align = 1ULL << vdev->v_ashift; 2374 if (P2PHASE(size, align) != 0) 2375 size = P2ROUNDUP(size, align); 2376 } 2377 if (size != BP_GET_PSIZE(bp) || cpfunc != ZIO_COMPRESS_OFF) 2378 pbuf = malloc(size); 2379 else 2380 pbuf = buf; 2381 2382 if (pbuf == NULL) { 2383 error = ENOMEM; 2384 break; 2385 } 2386 2387 if (DVA_GET_GANG(dva)) 2388 error = zio_read_gang(spa, bp, pbuf); 2389 else 2390 error = vdev->v_read(vdev, bp, pbuf, offset, size); 2391 if (error == 0) { 2392 if (cpfunc != ZIO_COMPRESS_OFF) 2393 error = zio_decompress_data(cpfunc, pbuf, 2394 BP_GET_PSIZE(bp), buf, BP_GET_LSIZE(bp)); 2395 else if (size != BP_GET_PSIZE(bp)) 2396 bcopy(pbuf, buf, BP_GET_PSIZE(bp)); 2397 } else { 2398 printf("zio_read error: %d\n", error); 2399 } 2400 if (buf != pbuf) 2401 free(pbuf); 2402 if (error == 0) 2403 break; 2404 } 2405 if (error != 0) 2406 printf("ZFS: i/o error - all block copies unavailable\n"); 2407 2408 return (error); 2409 } 2410 2411 static int 2412 dnode_read(const spa_t *spa, const dnode_phys_t *dnode, off_t offset, 2413 void *buf, size_t buflen) 2414 { 2415 int ibshift = dnode->dn_indblkshift - SPA_BLKPTRSHIFT; 2416 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2417 int nlevels = dnode->dn_nlevels; 2418 int i, rc; 2419 2420 if (bsize > SPA_MAXBLOCKSIZE) { 2421 printf("ZFS: I/O error - blocks larger than %llu are not " 2422 "supported\n", SPA_MAXBLOCKSIZE); 2423 return (EIO); 2424 } 2425 2426 /* 2427 * Handle odd block sizes, mirrors dmu_read_impl(). Data can't exist 2428 * past the first block, so we'll clip the read to the portion of the 2429 * buffer within bsize and zero out the remainder. 2430 */ 2431 if (dnode->dn_maxblkid == 0) { 2432 size_t newbuflen; 2433 2434 newbuflen = offset > bsize ? 0 : MIN(buflen, bsize - offset); 2435 bzero((char *)buf + newbuflen, buflen - newbuflen); 2436 buflen = newbuflen; 2437 } 2438 2439 /* 2440 * Note: bsize may not be a power of two here so we need to do an 2441 * actual divide rather than a bitshift. 2442 */ 2443 while (buflen > 0) { 2444 uint64_t bn = offset / bsize; 2445 int boff = offset % bsize; 2446 int ibn; 2447 const blkptr_t *indbp; 2448 blkptr_t bp; 2449 2450 if (bn > dnode->dn_maxblkid) 2451 return (EIO); 2452 2453 if (dnode == dnode_cache_obj && bn == dnode_cache_bn) 2454 goto cached; 2455 2456 indbp = dnode->dn_blkptr; 2457 for (i = 0; i < nlevels; i++) { 2458 /* 2459 * Copy the bp from the indirect array so that 2460 * we can re-use the scratch buffer for multi-level 2461 * objects. 2462 */ 2463 ibn = bn >> ((nlevels - i - 1) * ibshift); 2464 ibn &= ((1 << ibshift) - 1); 2465 bp = indbp[ibn]; 2466 if (BP_IS_HOLE(&bp)) { 2467 memset(dnode_cache_buf, 0, bsize); 2468 break; 2469 } 2470 rc = zio_read(spa, &bp, dnode_cache_buf); 2471 if (rc) 2472 return (rc); 2473 indbp = (const blkptr_t *) dnode_cache_buf; 2474 } 2475 dnode_cache_obj = dnode; 2476 dnode_cache_bn = bn; 2477 cached: 2478 2479 /* 2480 * The buffer contains our data block. Copy what we 2481 * need from it and loop. 2482 */ 2483 i = bsize - boff; 2484 if (i > buflen) i = buflen; 2485 memcpy(buf, &dnode_cache_buf[boff], i); 2486 buf = ((char *)buf) + i; 2487 offset += i; 2488 buflen -= i; 2489 } 2490 2491 return (0); 2492 } 2493 2494 /* 2495 * Lookup a value in a microzap directory. 2496 */ 2497 static int 2498 mzap_lookup(const mzap_phys_t *mz, size_t size, const char *name, 2499 uint64_t *value) 2500 { 2501 const mzap_ent_phys_t *mze; 2502 int chunks, i; 2503 2504 /* 2505 * Microzap objects use exactly one block. Read the whole 2506 * thing. 2507 */ 2508 chunks = size / MZAP_ENT_LEN - 1; 2509 for (i = 0; i < chunks; i++) { 2510 mze = &mz->mz_chunk[i]; 2511 if (strcmp(mze->mze_name, name) == 0) { 2512 *value = mze->mze_value; 2513 return (0); 2514 } 2515 } 2516 2517 return (ENOENT); 2518 } 2519 2520 /* 2521 * Compare a name with a zap leaf entry. Return non-zero if the name 2522 * matches. 2523 */ 2524 static int 2525 fzap_name_equal(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, 2526 const char *name) 2527 { 2528 size_t namelen; 2529 const zap_leaf_chunk_t *nc; 2530 const char *p; 2531 2532 namelen = zc->l_entry.le_name_numints; 2533 2534 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk); 2535 p = name; 2536 while (namelen > 0) { 2537 size_t len; 2538 2539 len = namelen; 2540 if (len > ZAP_LEAF_ARRAY_BYTES) 2541 len = ZAP_LEAF_ARRAY_BYTES; 2542 if (memcmp(p, nc->l_array.la_array, len)) 2543 return (0); 2544 p += len; 2545 namelen -= len; 2546 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next); 2547 } 2548 2549 return (1); 2550 } 2551 2552 /* 2553 * Extract a uint64_t value from a zap leaf entry. 2554 */ 2555 static uint64_t 2556 fzap_leaf_value(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc) 2557 { 2558 const zap_leaf_chunk_t *vc; 2559 int i; 2560 uint64_t value; 2561 const uint8_t *p; 2562 2563 vc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_value_chunk); 2564 for (i = 0, value = 0, p = vc->l_array.la_array; i < 8; i++) { 2565 value = (value << 8) | p[i]; 2566 } 2567 2568 return (value); 2569 } 2570 2571 static void 2572 stv(int len, void *addr, uint64_t value) 2573 { 2574 switch (len) { 2575 case 1: 2576 *(uint8_t *)addr = value; 2577 return; 2578 case 2: 2579 *(uint16_t *)addr = value; 2580 return; 2581 case 4: 2582 *(uint32_t *)addr = value; 2583 return; 2584 case 8: 2585 *(uint64_t *)addr = value; 2586 return; 2587 } 2588 } 2589 2590 /* 2591 * Extract a array from a zap leaf entry. 2592 */ 2593 static void 2594 fzap_leaf_array(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, 2595 uint64_t integer_size, uint64_t num_integers, void *buf) 2596 { 2597 uint64_t array_int_len = zc->l_entry.le_value_intlen; 2598 uint64_t value = 0; 2599 uint64_t *u64 = buf; 2600 char *p = buf; 2601 int len = MIN(zc->l_entry.le_value_numints, num_integers); 2602 int chunk = zc->l_entry.le_value_chunk; 2603 int byten = 0; 2604 2605 if (integer_size == 8 && len == 1) { 2606 *u64 = fzap_leaf_value(zl, zc); 2607 return; 2608 } 2609 2610 while (len > 0) { 2611 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(zl, chunk).l_array; 2612 int i; 2613 2614 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(zl)); 2615 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) { 2616 value = (value << 8) | la->la_array[i]; 2617 byten++; 2618 if (byten == array_int_len) { 2619 stv(integer_size, p, value); 2620 byten = 0; 2621 len--; 2622 if (len == 0) 2623 return; 2624 p += integer_size; 2625 } 2626 } 2627 chunk = la->la_next; 2628 } 2629 } 2630 2631 static int 2632 fzap_check_size(uint64_t integer_size, uint64_t num_integers) 2633 { 2634 2635 switch (integer_size) { 2636 case 1: 2637 case 2: 2638 case 4: 2639 case 8: 2640 break; 2641 default: 2642 return (EINVAL); 2643 } 2644 2645 if (integer_size * num_integers > ZAP_MAXVALUELEN) 2646 return (E2BIG); 2647 2648 return (0); 2649 } 2650 2651 static void 2652 zap_leaf_free(zap_leaf_t *leaf) 2653 { 2654 free(leaf->l_phys); 2655 free(leaf); 2656 } 2657 2658 static int 2659 zap_get_leaf_byblk(fat_zap_t *zap, uint64_t blk, zap_leaf_t **lp) 2660 { 2661 int bs = FZAP_BLOCK_SHIFT(zap); 2662 int err; 2663 2664 *lp = malloc(sizeof(**lp)); 2665 if (*lp == NULL) 2666 return (ENOMEM); 2667 2668 (*lp)->l_bs = bs; 2669 (*lp)->l_phys = malloc(1 << bs); 2670 2671 if ((*lp)->l_phys == NULL) { 2672 free(*lp); 2673 return (ENOMEM); 2674 } 2675 err = dnode_read(zap->zap_spa, zap->zap_dnode, blk << bs, (*lp)->l_phys, 2676 1 << bs); 2677 if (err != 0) { 2678 zap_leaf_free(*lp); 2679 } 2680 return (err); 2681 } 2682 2683 static int 2684 zap_table_load(fat_zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, 2685 uint64_t *valp) 2686 { 2687 int bs = FZAP_BLOCK_SHIFT(zap); 2688 uint64_t blk = idx >> (bs - 3); 2689 uint64_t off = idx & ((1 << (bs - 3)) - 1); 2690 uint64_t *buf; 2691 int rc; 2692 2693 buf = malloc(1 << zap->zap_block_shift); 2694 if (buf == NULL) 2695 return (ENOMEM); 2696 rc = dnode_read(zap->zap_spa, zap->zap_dnode, (tbl->zt_blk + blk) << bs, 2697 buf, 1 << zap->zap_block_shift); 2698 if (rc == 0) 2699 *valp = buf[off]; 2700 free(buf); 2701 return (rc); 2702 } 2703 2704 static int 2705 zap_idx_to_blk(fat_zap_t *zap, uint64_t idx, uint64_t *valp) 2706 { 2707 if (zap->zap_phys->zap_ptrtbl.zt_numblks == 0) { 2708 *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx); 2709 return (0); 2710 } else { 2711 return (zap_table_load(zap, &zap->zap_phys->zap_ptrtbl, 2712 idx, valp)); 2713 } 2714 } 2715 2716 #define ZAP_HASH_IDX(hash, n) (((n) == 0) ? 0 : ((hash) >> (64 - (n)))) 2717 static int 2718 zap_deref_leaf(fat_zap_t *zap, uint64_t h, zap_leaf_t **lp) 2719 { 2720 uint64_t idx, blk; 2721 int err; 2722 2723 idx = ZAP_HASH_IDX(h, zap->zap_phys->zap_ptrtbl.zt_shift); 2724 err = zap_idx_to_blk(zap, idx, &blk); 2725 if (err != 0) 2726 return (err); 2727 return (zap_get_leaf_byblk(zap, blk, lp)); 2728 } 2729 2730 #define CHAIN_END 0xffff /* end of the chunk chain */ 2731 #define LEAF_HASH(l, h) \ 2732 ((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \ 2733 ((h) >> \ 2734 (64 - ZAP_LEAF_HASH_SHIFT(l) - (l)->l_phys->l_hdr.lh_prefix_len))) 2735 #define LEAF_HASH_ENTPTR(l, h) (&(l)->l_phys->l_hash[LEAF_HASH(l, h)]) 2736 2737 static int 2738 zap_leaf_lookup(zap_leaf_t *zl, uint64_t hash, const char *name, 2739 uint64_t integer_size, uint64_t num_integers, void *value) 2740 { 2741 int rc; 2742 uint16_t *chunkp; 2743 struct zap_leaf_entry *le; 2744 2745 /* 2746 * Make sure this chunk matches our hash. 2747 */ 2748 if (zl->l_phys->l_hdr.lh_prefix_len > 0 && 2749 zl->l_phys->l_hdr.lh_prefix != 2750 hash >> (64 - zl->l_phys->l_hdr.lh_prefix_len)) 2751 return (EIO); 2752 2753 rc = ENOENT; 2754 for (chunkp = LEAF_HASH_ENTPTR(zl, hash); 2755 *chunkp != CHAIN_END; chunkp = &le->le_next) { 2756 zap_leaf_chunk_t *zc; 2757 uint16_t chunk = *chunkp; 2758 2759 le = ZAP_LEAF_ENTRY(zl, chunk); 2760 if (le->le_hash != hash) 2761 continue; 2762 zc = &ZAP_LEAF_CHUNK(zl, chunk); 2763 if (fzap_name_equal(zl, zc, name)) { 2764 if (zc->l_entry.le_value_intlen > integer_size) { 2765 rc = EINVAL; 2766 } else { 2767 fzap_leaf_array(zl, zc, integer_size, 2768 num_integers, value); 2769 rc = 0; 2770 } 2771 break; 2772 } 2773 } 2774 return (rc); 2775 } 2776 2777 /* 2778 * Lookup a value in a fatzap directory. 2779 */ 2780 static int 2781 fzap_lookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, 2782 const char *name, uint64_t integer_size, uint64_t num_integers, 2783 void *value) 2784 { 2785 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2786 fat_zap_t z; 2787 zap_leaf_t *zl; 2788 uint64_t hash; 2789 int rc; 2790 2791 if (zh->zap_magic != ZAP_MAGIC) 2792 return (EIO); 2793 2794 if ((rc = fzap_check_size(integer_size, num_integers)) != 0) { 2795 return (rc); 2796 } 2797 2798 z.zap_block_shift = ilog2(bsize); 2799 z.zap_phys = zh; 2800 z.zap_spa = spa; 2801 z.zap_dnode = dnode; 2802 2803 hash = zap_hash(zh->zap_salt, name); 2804 rc = zap_deref_leaf(&z, hash, &zl); 2805 if (rc != 0) 2806 return (rc); 2807 2808 rc = zap_leaf_lookup(zl, hash, name, integer_size, num_integers, value); 2809 2810 zap_leaf_free(zl); 2811 return (rc); 2812 } 2813 2814 /* 2815 * Lookup a name in a zap object and return its value as a uint64_t. 2816 */ 2817 static int 2818 zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name, 2819 uint64_t integer_size, uint64_t num_integers, void *value) 2820 { 2821 int rc; 2822 zap_phys_t *zap; 2823 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2824 2825 zap = malloc(size); 2826 if (zap == NULL) 2827 return (ENOMEM); 2828 2829 rc = dnode_read(spa, dnode, 0, zap, size); 2830 if (rc) 2831 goto done; 2832 2833 switch (zap->zap_block_type) { 2834 case ZBT_MICRO: 2835 rc = mzap_lookup((const mzap_phys_t *)zap, size, name, value); 2836 break; 2837 case ZBT_HEADER: 2838 rc = fzap_lookup(spa, dnode, zap, name, integer_size, 2839 num_integers, value); 2840 break; 2841 default: 2842 printf("ZFS: invalid zap_type=%" PRIx64 "\n", 2843 zap->zap_block_type); 2844 rc = EIO; 2845 } 2846 done: 2847 free(zap); 2848 return (rc); 2849 } 2850 2851 /* 2852 * List a microzap directory. 2853 */ 2854 static int 2855 mzap_list(const mzap_phys_t *mz, size_t size, 2856 int (*callback)(const char *, uint64_t)) 2857 { 2858 const mzap_ent_phys_t *mze; 2859 int chunks, i, rc; 2860 2861 /* 2862 * Microzap objects use exactly one block. Read the whole 2863 * thing. 2864 */ 2865 rc = 0; 2866 chunks = size / MZAP_ENT_LEN - 1; 2867 for (i = 0; i < chunks; i++) { 2868 mze = &mz->mz_chunk[i]; 2869 if (mze->mze_name[0]) { 2870 rc = callback(mze->mze_name, mze->mze_value); 2871 if (rc != 0) 2872 break; 2873 } 2874 } 2875 2876 return (rc); 2877 } 2878 2879 /* 2880 * List a fatzap directory. 2881 */ 2882 static int 2883 fzap_list(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, 2884 int (*callback)(const char *, uint64_t)) 2885 { 2886 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2887 fat_zap_t z; 2888 uint64_t i; 2889 int j, rc; 2890 2891 if (zh->zap_magic != ZAP_MAGIC) 2892 return (EIO); 2893 2894 z.zap_block_shift = ilog2(bsize); 2895 z.zap_phys = zh; 2896 2897 /* 2898 * This assumes that the leaf blocks start at block 1. The 2899 * documentation isn't exactly clear on this. 2900 */ 2901 zap_leaf_t zl; 2902 zl.l_bs = z.zap_block_shift; 2903 zl.l_phys = malloc(bsize); 2904 if (zl.l_phys == NULL) 2905 return (ENOMEM); 2906 2907 for (i = 0; i < zh->zap_num_leafs; i++) { 2908 off_t off = ((off_t)(i + 1)) << zl.l_bs; 2909 char name[256], *p; 2910 uint64_t value; 2911 2912 if (dnode_read(spa, dnode, off, zl.l_phys, bsize)) { 2913 free(zl.l_phys); 2914 return (EIO); 2915 } 2916 2917 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) { 2918 zap_leaf_chunk_t *zc, *nc; 2919 int namelen; 2920 2921 zc = &ZAP_LEAF_CHUNK(&zl, j); 2922 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) 2923 continue; 2924 namelen = zc->l_entry.le_name_numints; 2925 if (namelen > sizeof(name)) 2926 namelen = sizeof(name); 2927 2928 /* 2929 * Paste the name back together. 2930 */ 2931 nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk); 2932 p = name; 2933 while (namelen > 0) { 2934 int len; 2935 len = namelen; 2936 if (len > ZAP_LEAF_ARRAY_BYTES) 2937 len = ZAP_LEAF_ARRAY_BYTES; 2938 memcpy(p, nc->l_array.la_array, len); 2939 p += len; 2940 namelen -= len; 2941 nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next); 2942 } 2943 2944 /* 2945 * Assume the first eight bytes of the value are 2946 * a uint64_t. 2947 */ 2948 value = fzap_leaf_value(&zl, zc); 2949 2950 /* printf("%s 0x%jx\n", name, (uintmax_t)value); */ 2951 rc = callback((const char *)name, value); 2952 if (rc != 0) { 2953 free(zl.l_phys); 2954 return (rc); 2955 } 2956 } 2957 } 2958 2959 free(zl.l_phys); 2960 return (0); 2961 } 2962 2963 static int zfs_printf(const char *name, uint64_t value __unused) 2964 { 2965 2966 printf("%s\n", name); 2967 2968 return (0); 2969 } 2970 2971 /* 2972 * List a zap directory. 2973 */ 2974 static int 2975 zap_list(const spa_t *spa, const dnode_phys_t *dnode) 2976 { 2977 zap_phys_t *zap; 2978 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 2979 int rc; 2980 2981 zap = malloc(size); 2982 if (zap == NULL) 2983 return (ENOMEM); 2984 2985 rc = dnode_read(spa, dnode, 0, zap, size); 2986 if (rc == 0) { 2987 if (zap->zap_block_type == ZBT_MICRO) 2988 rc = mzap_list((const mzap_phys_t *)zap, size, 2989 zfs_printf); 2990 else 2991 rc = fzap_list(spa, dnode, zap, zfs_printf); 2992 } 2993 free(zap); 2994 return (rc); 2995 } 2996 2997 static int 2998 objset_get_dnode(const spa_t *spa, const objset_phys_t *os, uint64_t objnum, 2999 dnode_phys_t *dnode) 3000 { 3001 off_t offset; 3002 3003 offset = objnum * sizeof(dnode_phys_t); 3004 return dnode_read(spa, &os->os_meta_dnode, offset, 3005 dnode, sizeof(dnode_phys_t)); 3006 } 3007 3008 /* 3009 * Lookup a name in a microzap directory. 3010 */ 3011 static int 3012 mzap_rlookup(const mzap_phys_t *mz, size_t size, char *name, uint64_t value) 3013 { 3014 const mzap_ent_phys_t *mze; 3015 int chunks, i; 3016 3017 /* 3018 * Microzap objects use exactly one block. Read the whole 3019 * thing. 3020 */ 3021 chunks = size / MZAP_ENT_LEN - 1; 3022 for (i = 0; i < chunks; i++) { 3023 mze = &mz->mz_chunk[i]; 3024 if (value == mze->mze_value) { 3025 strcpy(name, mze->mze_name); 3026 return (0); 3027 } 3028 } 3029 3030 return (ENOENT); 3031 } 3032 3033 static void 3034 fzap_name_copy(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, char *name) 3035 { 3036 size_t namelen; 3037 const zap_leaf_chunk_t *nc; 3038 char *p; 3039 3040 namelen = zc->l_entry.le_name_numints; 3041 3042 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk); 3043 p = name; 3044 while (namelen > 0) { 3045 size_t len; 3046 len = namelen; 3047 if (len > ZAP_LEAF_ARRAY_BYTES) 3048 len = ZAP_LEAF_ARRAY_BYTES; 3049 memcpy(p, nc->l_array.la_array, len); 3050 p += len; 3051 namelen -= len; 3052 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next); 3053 } 3054 3055 *p = '\0'; 3056 } 3057 3058 static int 3059 fzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, 3060 char *name, uint64_t value) 3061 { 3062 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 3063 fat_zap_t z; 3064 uint64_t i; 3065 int j, rc; 3066 3067 if (zh->zap_magic != ZAP_MAGIC) 3068 return (EIO); 3069 3070 z.zap_block_shift = ilog2(bsize); 3071 z.zap_phys = zh; 3072 3073 /* 3074 * This assumes that the leaf blocks start at block 1. The 3075 * documentation isn't exactly clear on this. 3076 */ 3077 zap_leaf_t zl; 3078 zl.l_bs = z.zap_block_shift; 3079 zl.l_phys = malloc(bsize); 3080 if (zl.l_phys == NULL) 3081 return (ENOMEM); 3082 3083 for (i = 0; i < zh->zap_num_leafs; i++) { 3084 off_t off = ((off_t)(i + 1)) << zl.l_bs; 3085 3086 rc = dnode_read(spa, dnode, off, zl.l_phys, bsize); 3087 if (rc != 0) 3088 goto done; 3089 3090 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) { 3091 zap_leaf_chunk_t *zc; 3092 3093 zc = &ZAP_LEAF_CHUNK(&zl, j); 3094 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) 3095 continue; 3096 if (zc->l_entry.le_value_intlen != 8 || 3097 zc->l_entry.le_value_numints != 1) 3098 continue; 3099 3100 if (fzap_leaf_value(&zl, zc) == value) { 3101 fzap_name_copy(&zl, zc, name); 3102 goto done; 3103 } 3104 } 3105 } 3106 3107 rc = ENOENT; 3108 done: 3109 free(zl.l_phys); 3110 return (rc); 3111 } 3112 3113 static int 3114 zap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name, 3115 uint64_t value) 3116 { 3117 zap_phys_t *zap; 3118 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; 3119 int rc; 3120 3121 zap = malloc(size); 3122 if (zap == NULL) 3123 return (ENOMEM); 3124 3125 rc = dnode_read(spa, dnode, 0, zap, size); 3126 if (rc == 0) { 3127 if (zap->zap_block_type == ZBT_MICRO) 3128 rc = mzap_rlookup((const mzap_phys_t *)zap, size, 3129 name, value); 3130 else 3131 rc = fzap_rlookup(spa, dnode, zap, name, value); 3132 } 3133 free(zap); 3134 return (rc); 3135 } 3136 3137 static int 3138 zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result) 3139 { 3140 char name[256]; 3141 char component[256]; 3142 uint64_t dir_obj, parent_obj, child_dir_zapobj; 3143 dnode_phys_t child_dir_zap, snapnames_zap, dataset, dir, parent; 3144 dsl_dir_phys_t *dd; 3145 dsl_dataset_phys_t *ds; 3146 char *p; 3147 int len; 3148 boolean_t issnap = B_FALSE; 3149 3150 p = &name[sizeof(name) - 1]; 3151 *p = '\0'; 3152 3153 if (objset_get_dnode(spa, spa->spa_mos, objnum, &dataset)) { 3154 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 3155 return (EIO); 3156 } 3157 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3158 dir_obj = ds->ds_dir_obj; 3159 if (ds->ds_snapnames_zapobj == 0) 3160 issnap = B_TRUE; 3161 3162 for (;;) { 3163 if (objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir) != 0) 3164 return (EIO); 3165 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3166 3167 /* Actual loop condition. */ 3168 parent_obj = dd->dd_parent_obj; 3169 if (parent_obj == 0) 3170 break; 3171 3172 if (objset_get_dnode(spa, spa->spa_mos, parent_obj, 3173 &parent) != 0) 3174 return (EIO); 3175 dd = (dsl_dir_phys_t *)&parent.dn_bonus; 3176 if (issnap == B_TRUE) { 3177 /* 3178 * The dataset we are looking up is a snapshot 3179 * the dir_obj is the parent already, we don't want 3180 * the grandparent just yet. Reset to the parent. 3181 */ 3182 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3183 /* Lookup the dataset to get the snapname ZAP */ 3184 if (objset_get_dnode(spa, spa->spa_mos, 3185 dd->dd_head_dataset_obj, &dataset)) 3186 return (EIO); 3187 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3188 if (objset_get_dnode(spa, spa->spa_mos, 3189 ds->ds_snapnames_zapobj, &snapnames_zap) != 0) 3190 return (EIO); 3191 /* Get the name of the snapshot */ 3192 if (zap_rlookup(spa, &snapnames_zap, component, 3193 objnum) != 0) 3194 return (EIO); 3195 len = strlen(component); 3196 p -= len; 3197 memcpy(p, component, len); 3198 --p; 3199 *p = '@'; 3200 issnap = B_FALSE; 3201 continue; 3202 } 3203 3204 child_dir_zapobj = dd->dd_child_dir_zapobj; 3205 if (objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj, 3206 &child_dir_zap) != 0) 3207 return (EIO); 3208 if (zap_rlookup(spa, &child_dir_zap, component, dir_obj) != 0) 3209 return (EIO); 3210 3211 len = strlen(component); 3212 p -= len; 3213 memcpy(p, component, len); 3214 --p; 3215 *p = '/'; 3216 3217 /* Actual loop iteration. */ 3218 dir_obj = parent_obj; 3219 } 3220 3221 if (*p != '\0') 3222 ++p; 3223 strcpy(result, p); 3224 3225 return (0); 3226 } 3227 3228 static int 3229 zfs_lookup_dataset(const spa_t *spa, const char *name, uint64_t *objnum) 3230 { 3231 char element[256]; 3232 uint64_t dir_obj, child_dir_zapobj; 3233 dnode_phys_t child_dir_zap, snapnames_zap, dir, dataset; 3234 dsl_dir_phys_t *dd; 3235 dsl_dataset_phys_t *ds; 3236 const char *p, *q; 3237 boolean_t issnap = B_FALSE; 3238 3239 if (objset_get_dnode(spa, spa->spa_mos, 3240 DMU_POOL_DIRECTORY_OBJECT, &dir)) 3241 return (EIO); 3242 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof (dir_obj), 3243 1, &dir_obj)) 3244 return (EIO); 3245 3246 p = name; 3247 for (;;) { 3248 if (objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir)) 3249 return (EIO); 3250 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3251 3252 while (*p == '/') 3253 p++; 3254 /* Actual loop condition #1. */ 3255 if (*p == '\0') 3256 break; 3257 3258 q = strchr(p, '/'); 3259 if (q) { 3260 memcpy(element, p, q - p); 3261 element[q - p] = '\0'; 3262 p = q + 1; 3263 } else { 3264 strcpy(element, p); 3265 p += strlen(p); 3266 } 3267 3268 if (issnap == B_TRUE) { 3269 if (objset_get_dnode(spa, spa->spa_mos, 3270 dd->dd_head_dataset_obj, &dataset)) 3271 return (EIO); 3272 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3273 if (objset_get_dnode(spa, spa->spa_mos, 3274 ds->ds_snapnames_zapobj, &snapnames_zap) != 0) 3275 return (EIO); 3276 /* Actual loop condition #2. */ 3277 if (zap_lookup(spa, &snapnames_zap, element, 3278 sizeof (dir_obj), 1, &dir_obj) != 0) 3279 return (ENOENT); 3280 *objnum = dir_obj; 3281 return (0); 3282 } else if ((q = strchr(element, '@')) != NULL) { 3283 issnap = B_TRUE; 3284 element[q - element] = '\0'; 3285 p = q + 1; 3286 } 3287 child_dir_zapobj = dd->dd_child_dir_zapobj; 3288 if (objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj, 3289 &child_dir_zap) != 0) 3290 return (EIO); 3291 3292 /* Actual loop condition #2. */ 3293 if (zap_lookup(spa, &child_dir_zap, element, sizeof (dir_obj), 3294 1, &dir_obj) != 0) 3295 return (ENOENT); 3296 } 3297 3298 *objnum = dd->dd_head_dataset_obj; 3299 return (0); 3300 } 3301 3302 #ifndef BOOT2 3303 static int 3304 zfs_list_dataset(const spa_t *spa, uint64_t objnum/*, int pos, char *entry*/) 3305 { 3306 uint64_t dir_obj, child_dir_zapobj; 3307 dnode_phys_t child_dir_zap, dir, dataset; 3308 dsl_dataset_phys_t *ds; 3309 dsl_dir_phys_t *dd; 3310 3311 if (objset_get_dnode(spa, spa->spa_mos, objnum, &dataset)) { 3312 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 3313 return (EIO); 3314 } 3315 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3316 dir_obj = ds->ds_dir_obj; 3317 3318 if (objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir)) { 3319 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj); 3320 return (EIO); 3321 } 3322 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3323 3324 child_dir_zapobj = dd->dd_child_dir_zapobj; 3325 if (objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj, 3326 &child_dir_zap) != 0) { 3327 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj); 3328 return (EIO); 3329 } 3330 3331 return (zap_list(spa, &child_dir_zap) != 0); 3332 } 3333 3334 int 3335 zfs_callback_dataset(const spa_t *spa, uint64_t objnum, 3336 int (*callback)(const char *, uint64_t)) 3337 { 3338 uint64_t dir_obj, child_dir_zapobj; 3339 dnode_phys_t child_dir_zap, dir, dataset; 3340 dsl_dataset_phys_t *ds; 3341 dsl_dir_phys_t *dd; 3342 zap_phys_t *zap; 3343 size_t size; 3344 int err; 3345 3346 err = objset_get_dnode(spa, spa->spa_mos, objnum, &dataset); 3347 if (err != 0) { 3348 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 3349 return (err); 3350 } 3351 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3352 dir_obj = ds->ds_dir_obj; 3353 3354 err = objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir); 3355 if (err != 0) { 3356 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj); 3357 return (err); 3358 } 3359 dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3360 3361 child_dir_zapobj = dd->dd_child_dir_zapobj; 3362 err = objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj, 3363 &child_dir_zap); 3364 if (err != 0) { 3365 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj); 3366 return (err); 3367 } 3368 3369 size = child_dir_zap.dn_datablkszsec << SPA_MINBLOCKSHIFT; 3370 zap = malloc(size); 3371 if (zap != NULL) { 3372 err = dnode_read(spa, &child_dir_zap, 0, zap, size); 3373 if (err != 0) 3374 goto done; 3375 3376 if (zap->zap_block_type == ZBT_MICRO) 3377 err = mzap_list((const mzap_phys_t *)zap, size, 3378 callback); 3379 else 3380 err = fzap_list(spa, &child_dir_zap, zap, callback); 3381 } else { 3382 err = ENOMEM; 3383 } 3384 done: 3385 free(zap); 3386 return (err); 3387 } 3388 #endif 3389 3390 /* 3391 * Find the object set given the object number of its dataset object 3392 * and return its details in *objset 3393 */ 3394 static int 3395 zfs_mount_dataset(const spa_t *spa, uint64_t objnum, objset_phys_t *objset) 3396 { 3397 dnode_phys_t dataset; 3398 dsl_dataset_phys_t *ds; 3399 3400 if (objset_get_dnode(spa, spa->spa_mos, objnum, &dataset)) { 3401 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); 3402 return (EIO); 3403 } 3404 3405 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; 3406 if (zio_read(spa, &ds->ds_bp, objset)) { 3407 printf("ZFS: can't read object set for dataset %ju\n", 3408 (uintmax_t)objnum); 3409 return (EIO); 3410 } 3411 3412 return (0); 3413 } 3414 3415 /* 3416 * Find the object set pointed to by the BOOTFS property or the root 3417 * dataset if there is none and return its details in *objset 3418 */ 3419 static int 3420 zfs_get_root(const spa_t *spa, uint64_t *objid) 3421 { 3422 dnode_phys_t dir, propdir; 3423 uint64_t props, bootfs, root; 3424 3425 *objid = 0; 3426 3427 /* 3428 * Start with the MOS directory object. 3429 */ 3430 if (objset_get_dnode(spa, spa->spa_mos, 3431 DMU_POOL_DIRECTORY_OBJECT, &dir)) { 3432 printf("ZFS: can't read MOS object directory\n"); 3433 return (EIO); 3434 } 3435 3436 /* 3437 * Lookup the pool_props and see if we can find a bootfs. 3438 */ 3439 if (zap_lookup(spa, &dir, DMU_POOL_PROPS, 3440 sizeof(props), 1, &props) == 0 && 3441 objset_get_dnode(spa, spa->spa_mos, props, &propdir) == 0 && 3442 zap_lookup(spa, &propdir, "bootfs", 3443 sizeof(bootfs), 1, &bootfs) == 0 && bootfs != 0) { 3444 *objid = bootfs; 3445 return (0); 3446 } 3447 /* 3448 * Lookup the root dataset directory 3449 */ 3450 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, 3451 sizeof(root), 1, &root) || 3452 objset_get_dnode(spa, spa->spa_mos, root, &dir)) { 3453 printf("ZFS: can't find root dsl_dir\n"); 3454 return (EIO); 3455 } 3456 3457 /* 3458 * Use the information from the dataset directory's bonus buffer 3459 * to find the dataset object and from that the object set itself. 3460 */ 3461 dsl_dir_phys_t *dd = (dsl_dir_phys_t *)&dir.dn_bonus; 3462 *objid = dd->dd_head_dataset_obj; 3463 return (0); 3464 } 3465 3466 static int 3467 zfs_mount_impl(const spa_t *spa, uint64_t rootobj, struct zfsmount *mount) 3468 { 3469 3470 mount->spa = spa; 3471 3472 /* 3473 * Find the root object set if not explicitly provided 3474 */ 3475 if (rootobj == 0 && zfs_get_root(spa, &rootobj)) { 3476 printf("ZFS: can't find root filesystem\n"); 3477 return (EIO); 3478 } 3479 3480 if (zfs_mount_dataset(spa, rootobj, &mount->objset)) { 3481 printf("ZFS: can't open root filesystem\n"); 3482 return (EIO); 3483 } 3484 3485 mount->rootobj = rootobj; 3486 3487 return (0); 3488 } 3489 3490 /* 3491 * callback function for feature name checks. 3492 */ 3493 static int 3494 check_feature(const char *name, uint64_t value) 3495 { 3496 int i; 3497 3498 if (value == 0) 3499 return (0); 3500 if (name[0] == '\0') 3501 return (0); 3502 3503 for (i = 0; features_for_read[i] != NULL; i++) { 3504 if (strcmp(name, features_for_read[i]) == 0) 3505 return (0); 3506 } 3507 printf("ZFS: unsupported feature: %s\n", name); 3508 return (EIO); 3509 } 3510 3511 /* 3512 * Checks whether the MOS features that are active are supported. 3513 */ 3514 static int 3515 check_mos_features(const spa_t *spa) 3516 { 3517 dnode_phys_t dir; 3518 zap_phys_t *zap; 3519 uint64_t objnum; 3520 size_t size; 3521 int rc; 3522 3523 if ((rc = objset_get_dnode(spa, spa->spa_mos, DMU_OT_OBJECT_DIRECTORY, 3524 &dir)) != 0) 3525 return (rc); 3526 if ((rc = zap_lookup(spa, &dir, DMU_POOL_FEATURES_FOR_READ, 3527 sizeof (objnum), 1, &objnum)) != 0) { 3528 /* 3529 * It is older pool without features. As we have already 3530 * tested the label, just return without raising the error. 3531 */ 3532 return (0); 3533 } 3534 3535 if ((rc = objset_get_dnode(spa, spa->spa_mos, objnum, &dir)) != 0) 3536 return (rc); 3537 3538 if (dir.dn_type != DMU_OTN_ZAP_METADATA) 3539 return (EIO); 3540 3541 size = dir.dn_datablkszsec << SPA_MINBLOCKSHIFT; 3542 zap = malloc(size); 3543 if (zap == NULL) 3544 return (ENOMEM); 3545 3546 if (dnode_read(spa, &dir, 0, zap, size)) { 3547 free(zap); 3548 return (EIO); 3549 } 3550 3551 if (zap->zap_block_type == ZBT_MICRO) 3552 rc = mzap_list((const mzap_phys_t *)zap, size, check_feature); 3553 else 3554 rc = fzap_list(spa, &dir, zap, check_feature); 3555 3556 free(zap); 3557 return (rc); 3558 } 3559 3560 static int 3561 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 3562 { 3563 dnode_phys_t dir; 3564 size_t size; 3565 int rc; 3566 char *nv; 3567 3568 *value = NULL; 3569 if ((rc = objset_get_dnode(spa, spa->spa_mos, obj, &dir)) != 0) 3570 return (rc); 3571 if (dir.dn_type != DMU_OT_PACKED_NVLIST && 3572 dir.dn_bonustype != DMU_OT_PACKED_NVLIST_SIZE) { 3573 return (EIO); 3574 } 3575 3576 if (dir.dn_bonuslen != sizeof (uint64_t)) 3577 return (EIO); 3578 3579 size = *(uint64_t *)DN_BONUS(&dir); 3580 nv = malloc(size); 3581 if (nv == NULL) 3582 return (ENOMEM); 3583 3584 rc = dnode_read(spa, &dir, 0, nv, size); 3585 if (rc != 0) { 3586 free(nv); 3587 nv = NULL; 3588 return (rc); 3589 } 3590 *value = nvlist_import(nv, size); 3591 free(nv); 3592 return (rc); 3593 } 3594 3595 static int 3596 zfs_spa_init(spa_t *spa) 3597 { 3598 struct uberblock checkpoint; 3599 dnode_phys_t dir; 3600 uint64_t config_object; 3601 nvlist_t *nvlist; 3602 int rc; 3603 3604 if (zio_read(spa, &spa->spa_uberblock->ub_rootbp, spa->spa_mos)) { 3605 printf("ZFS: can't read MOS of pool %s\n", spa->spa_name); 3606 return (EIO); 3607 } 3608 if (spa->spa_mos->os_type != DMU_OST_META) { 3609 printf("ZFS: corrupted MOS of pool %s\n", spa->spa_name); 3610 return (EIO); 3611 } 3612 3613 if (objset_get_dnode(spa, &spa->spa_mos_master, 3614 DMU_POOL_DIRECTORY_OBJECT, &dir)) { 3615 printf("ZFS: failed to read pool %s directory object\n", 3616 spa->spa_name); 3617 return (EIO); 3618 } 3619 /* this is allowed to fail, older pools do not have salt */ 3620 rc = zap_lookup(spa, &dir, DMU_POOL_CHECKSUM_SALT, 1, 3621 sizeof (spa->spa_cksum_salt.zcs_bytes), 3622 spa->spa_cksum_salt.zcs_bytes); 3623 3624 rc = check_mos_features(spa); 3625 if (rc != 0) { 3626 printf("ZFS: pool %s is not supported\n", spa->spa_name); 3627 return (rc); 3628 } 3629 3630 rc = zap_lookup(spa, &dir, DMU_POOL_CONFIG, 3631 sizeof (config_object), 1, &config_object); 3632 if (rc != 0) { 3633 printf("ZFS: can not read MOS %s\n", DMU_POOL_CONFIG); 3634 return (EIO); 3635 } 3636 rc = load_nvlist(spa, config_object, &nvlist); 3637 if (rc != 0) { 3638 printf("ZFS: failed to load pool %s nvlist\n", spa->spa_name); 3639 return (rc); 3640 } 3641 3642 rc = zap_lookup(spa, &dir, DMU_POOL_ZPOOL_CHECKPOINT, 3643 sizeof(uint64_t), sizeof(checkpoint) / sizeof(uint64_t), 3644 &checkpoint); 3645 if (rc == 0 && checkpoint.ub_checkpoint_txg != 0) { 3646 memcpy(&spa->spa_uberblock_checkpoint, &checkpoint, 3647 sizeof(checkpoint)); 3648 if (zio_read(spa, &spa->spa_uberblock_checkpoint.ub_rootbp, 3649 &spa->spa_mos_checkpoint)) { 3650 printf("ZFS: can not read checkpoint data.\n"); 3651 return (EIO); 3652 } 3653 } 3654 3655 /* 3656 * Update vdevs from MOS config. Note, we do skip encoding bytes 3657 * here. See also vdev_label_read_config(). 3658 */ 3659 rc = vdev_init_from_nvlist(spa, nvlist); 3660 nvlist_destroy(nvlist); 3661 return (rc); 3662 } 3663 3664 static int 3665 zfs_dnode_stat(const spa_t *spa, dnode_phys_t *dn, struct stat *sb) 3666 { 3667 3668 if (dn->dn_bonustype != DMU_OT_SA) { 3669 znode_phys_t *zp = (znode_phys_t *)dn->dn_bonus; 3670 3671 sb->st_mode = zp->zp_mode; 3672 sb->st_uid = zp->zp_uid; 3673 sb->st_gid = zp->zp_gid; 3674 sb->st_size = zp->zp_size; 3675 } else { 3676 sa_hdr_phys_t *sahdrp; 3677 int hdrsize; 3678 size_t size = 0; 3679 void *buf = NULL; 3680 3681 if (dn->dn_bonuslen != 0) 3682 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn); 3683 else { 3684 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0) { 3685 blkptr_t *bp = DN_SPILL_BLKPTR(dn); 3686 int error; 3687 3688 size = BP_GET_LSIZE(bp); 3689 buf = malloc(size); 3690 if (buf == NULL) 3691 error = ENOMEM; 3692 else 3693 error = zio_read(spa, bp, buf); 3694 3695 if (error != 0) { 3696 free(buf); 3697 return (error); 3698 } 3699 sahdrp = buf; 3700 } else { 3701 return (EIO); 3702 } 3703 } 3704 hdrsize = SA_HDR_SIZE(sahdrp); 3705 sb->st_mode = *(uint64_t *)((char *)sahdrp + hdrsize + 3706 SA_MODE_OFFSET); 3707 sb->st_uid = *(uint64_t *)((char *)sahdrp + hdrsize + 3708 SA_UID_OFFSET); 3709 sb->st_gid = *(uint64_t *)((char *)sahdrp + hdrsize + 3710 SA_GID_OFFSET); 3711 sb->st_size = *(uint64_t *)((char *)sahdrp + hdrsize + 3712 SA_SIZE_OFFSET); 3713 free(buf); 3714 } 3715 3716 return (0); 3717 } 3718 3719 static int 3720 zfs_dnode_readlink(const spa_t *spa, dnode_phys_t *dn, char *path, size_t psize) 3721 { 3722 int rc = 0; 3723 3724 if (dn->dn_bonustype == DMU_OT_SA) { 3725 sa_hdr_phys_t *sahdrp = NULL; 3726 size_t size = 0; 3727 void *buf = NULL; 3728 int hdrsize; 3729 char *p; 3730 3731 if (dn->dn_bonuslen != 0) { 3732 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn); 3733 } else { 3734 blkptr_t *bp; 3735 3736 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) == 0) 3737 return (EIO); 3738 bp = DN_SPILL_BLKPTR(dn); 3739 3740 size = BP_GET_LSIZE(bp); 3741 buf = malloc(size); 3742 if (buf == NULL) 3743 rc = ENOMEM; 3744 else 3745 rc = zio_read(spa, bp, buf); 3746 if (rc != 0) { 3747 free(buf); 3748 return (rc); 3749 } 3750 sahdrp = buf; 3751 } 3752 hdrsize = SA_HDR_SIZE(sahdrp); 3753 p = (char *)((uintptr_t)sahdrp + hdrsize + SA_SYMLINK_OFFSET); 3754 memcpy(path, p, psize); 3755 free(buf); 3756 return (0); 3757 } 3758 /* 3759 * Second test is purely to silence bogus compiler 3760 * warning about accessing past the end of dn_bonus. 3761 */ 3762 if (psize + sizeof(znode_phys_t) <= dn->dn_bonuslen && 3763 sizeof(znode_phys_t) <= sizeof(dn->dn_bonus)) { 3764 memcpy(path, &dn->dn_bonus[sizeof(znode_phys_t)], psize); 3765 } else { 3766 rc = dnode_read(spa, dn, 0, path, psize); 3767 } 3768 return (rc); 3769 } 3770 3771 struct obj_list { 3772 uint64_t objnum; 3773 STAILQ_ENTRY(obj_list) entry; 3774 }; 3775 3776 /* 3777 * Lookup a file and return its dnode. 3778 */ 3779 static int 3780 zfs_lookup(const struct zfsmount *mount, const char *upath, dnode_phys_t *dnode) 3781 { 3782 int rc; 3783 uint64_t objnum; 3784 const spa_t *spa; 3785 dnode_phys_t dn; 3786 const char *p, *q; 3787 char element[256]; 3788 char path[1024]; 3789 int symlinks_followed = 0; 3790 struct stat sb; 3791 struct obj_list *entry, *tentry; 3792 STAILQ_HEAD(, obj_list) on_cache = STAILQ_HEAD_INITIALIZER(on_cache); 3793 3794 spa = mount->spa; 3795 if (mount->objset.os_type != DMU_OST_ZFS) { 3796 printf("ZFS: unexpected object set type %ju\n", 3797 (uintmax_t)mount->objset.os_type); 3798 return (EIO); 3799 } 3800 3801 if ((entry = malloc(sizeof(struct obj_list))) == NULL) 3802 return (ENOMEM); 3803 3804 /* 3805 * Get the root directory dnode. 3806 */ 3807 rc = objset_get_dnode(spa, &mount->objset, MASTER_NODE_OBJ, &dn); 3808 if (rc) { 3809 free(entry); 3810 return (rc); 3811 } 3812 3813 rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, sizeof(objnum), 1, &objnum); 3814 if (rc) { 3815 free(entry); 3816 return (rc); 3817 } 3818 entry->objnum = objnum; 3819 STAILQ_INSERT_HEAD(&on_cache, entry, entry); 3820 3821 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3822 if (rc != 0) 3823 goto done; 3824 3825 p = upath; 3826 while (p && *p) { 3827 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3828 if (rc != 0) 3829 goto done; 3830 3831 while (*p == '/') 3832 p++; 3833 if (*p == '\0') 3834 break; 3835 q = p; 3836 while (*q != '\0' && *q != '/') 3837 q++; 3838 3839 /* skip dot */ 3840 if (p + 1 == q && p[0] == '.') { 3841 p++; 3842 continue; 3843 } 3844 /* double dot */ 3845 if (p + 2 == q && p[0] == '.' && p[1] == '.') { 3846 p += 2; 3847 if (STAILQ_FIRST(&on_cache) == 3848 STAILQ_LAST(&on_cache, obj_list, entry)) { 3849 rc = ENOENT; 3850 goto done; 3851 } 3852 entry = STAILQ_FIRST(&on_cache); 3853 STAILQ_REMOVE_HEAD(&on_cache, entry); 3854 free(entry); 3855 objnum = (STAILQ_FIRST(&on_cache))->objnum; 3856 continue; 3857 } 3858 if (q - p + 1 > sizeof(element)) { 3859 rc = ENAMETOOLONG; 3860 goto done; 3861 } 3862 memcpy(element, p, q - p); 3863 element[q - p] = 0; 3864 p = q; 3865 3866 if ((rc = zfs_dnode_stat(spa, &dn, &sb)) != 0) 3867 goto done; 3868 if (!S_ISDIR(sb.st_mode)) { 3869 rc = ENOTDIR; 3870 goto done; 3871 } 3872 3873 rc = zap_lookup(spa, &dn, element, sizeof (objnum), 1, &objnum); 3874 if (rc) 3875 goto done; 3876 objnum = ZFS_DIRENT_OBJ(objnum); 3877 3878 if ((entry = malloc(sizeof(struct obj_list))) == NULL) { 3879 rc = ENOMEM; 3880 goto done; 3881 } 3882 entry->objnum = objnum; 3883 STAILQ_INSERT_HEAD(&on_cache, entry, entry); 3884 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); 3885 if (rc) 3886 goto done; 3887 3888 /* 3889 * Check for symlink. 3890 */ 3891 rc = zfs_dnode_stat(spa, &dn, &sb); 3892 if (rc) 3893 goto done; 3894 if (S_ISLNK(sb.st_mode)) { 3895 if (symlinks_followed > 10) { 3896 rc = EMLINK; 3897 goto done; 3898 } 3899 symlinks_followed++; 3900 3901 /* 3902 * Read the link value and copy the tail of our 3903 * current path onto the end. 3904 */ 3905 if (sb.st_size + strlen(p) + 1 > sizeof(path)) { 3906 rc = ENAMETOOLONG; 3907 goto done; 3908 } 3909 strcpy(&path[sb.st_size], p); 3910 3911 rc = zfs_dnode_readlink(spa, &dn, path, sb.st_size); 3912 if (rc != 0) 3913 goto done; 3914 3915 /* 3916 * Restart with the new path, starting either at 3917 * the root or at the parent depending whether or 3918 * not the link is relative. 3919 */ 3920 p = path; 3921 if (*p == '/') { 3922 while (STAILQ_FIRST(&on_cache) != 3923 STAILQ_LAST(&on_cache, obj_list, entry)) { 3924 entry = STAILQ_FIRST(&on_cache); 3925 STAILQ_REMOVE_HEAD(&on_cache, entry); 3926 free(entry); 3927 } 3928 } else { 3929 entry = STAILQ_FIRST(&on_cache); 3930 STAILQ_REMOVE_HEAD(&on_cache, entry); 3931 free(entry); 3932 } 3933 objnum = (STAILQ_FIRST(&on_cache))->objnum; 3934 } 3935 } 3936 3937 *dnode = dn; 3938 done: 3939 STAILQ_FOREACH_SAFE(entry, &on_cache, entry, tentry) 3940 free(entry); 3941 return (rc); 3942 } 3943 3944 /* 3945 * Return either a cached copy of the bootenv, or read each of the vdev children 3946 * looking for the bootenv. Cache what's found and return the results. Returns 0 3947 * when benvp is filled in, and some errno when not. 3948 */ 3949 static int 3950 zfs_get_bootenv_spa(spa_t *spa, nvlist_t **benvp) 3951 { 3952 vdev_t *vd; 3953 nvlist_t *benv = NULL; 3954 3955 if (spa->spa_bootenv == NULL) { 3956 STAILQ_FOREACH(vd, &spa->spa_root_vdev->v_children, 3957 v_childlink) { 3958 benv = vdev_read_bootenv(vd); 3959 3960 if (benv != NULL) 3961 break; 3962 } 3963 spa->spa_bootenv = benv; 3964 } 3965 benv = spa->spa_bootenv; 3966 3967 if (benv == NULL) 3968 return (ENOENT); 3969 3970 *benvp = benv; 3971 return (0); 3972 } 3973 3974 /* 3975 * Store nvlist to pool label bootenv area. Also updates cached pointer in spa. 3976 */ 3977 static int 3978 zfs_set_bootenv_spa(spa_t *spa, nvlist_t *benv) 3979 { 3980 vdev_t *vd; 3981 3982 STAILQ_FOREACH(vd, &spa->spa_root_vdev->v_children, v_childlink) { 3983 vdev_write_bootenv(vd, benv); 3984 } 3985 3986 spa->spa_bootenv = benv; 3987 return (0); 3988 } 3989 3990 /* 3991 * Get bootonce value by key. The bootonce <key, value> pair is removed from the 3992 * bootenv nvlist and the remaining nvlist is committed back to disk. This process 3993 * the bootonce flag since we've reached the point in the boot that we've 'used' 3994 * the BE. For chained boot scenarios, we may reach this point multiple times (but 3995 * only remove it and return 0 the first time). 3996 */ 3997 static int 3998 zfs_get_bootonce_spa(spa_t *spa, const char *key, char *buf, size_t size) 3999 { 4000 nvlist_t *benv; 4001 char *result = NULL; 4002 int result_size, rv; 4003 4004 if ((rv = zfs_get_bootenv_spa(spa, &benv)) != 0) 4005 return (rv); 4006 4007 if ((rv = nvlist_find(benv, key, DATA_TYPE_STRING, NULL, 4008 &result, &result_size)) == 0) { 4009 if (result_size == 0) { 4010 /* ignore empty string */ 4011 rv = ENOENT; 4012 } else if (buf != NULL) { 4013 size = MIN((size_t)result_size + 1, size); 4014 strlcpy(buf, result, size); 4015 } 4016 (void)nvlist_remove(benv, key, DATA_TYPE_STRING); 4017 (void)zfs_set_bootenv_spa(spa, benv); 4018 } 4019 4020 return (rv); 4021 } 4022