1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Pool import support functions. 31 * 32 * To import a pool, we rely on reading the configuration information from the 33 * ZFS label of each device. If we successfully read the label, then we 34 * organize the configuration information in the following hierarchy: 35 * 36 * pool guid -> toplevel vdev guid -> label txg 37 * 38 * Duplicate entries matching this same tuple will be discarded. Once we have 39 * examined every device, we pick the best label txg config for each toplevel 40 * vdev. We then arrange these toplevel vdevs into a complete pool config, and 41 * update any paths that have changed. Finally, we attempt to import the pool 42 * using our derived config, and record the results. 43 */ 44 45 #include <devid.h> 46 #include <dirent.h> 47 #include <errno.h> 48 #include <libintl.h> 49 #include <stdlib.h> 50 #include <string.h> 51 #include <sys/stat.h> 52 #include <unistd.h> 53 #include <fcntl.h> 54 55 #include <sys/vdev_impl.h> 56 57 #include "libzfs.h" 58 #include "libzfs_impl.h" 59 60 /* 61 * Intermediate structures used to gather configuration information. 62 */ 63 typedef struct config_entry { 64 uint64_t ce_txg; 65 nvlist_t *ce_config; 66 struct config_entry *ce_next; 67 } config_entry_t; 68 69 typedef struct vdev_entry { 70 uint64_t ve_guid; 71 config_entry_t *ve_configs; 72 struct vdev_entry *ve_next; 73 } vdev_entry_t; 74 75 typedef struct pool_entry { 76 uint64_t pe_guid; 77 vdev_entry_t *pe_vdevs; 78 struct pool_entry *pe_next; 79 } pool_entry_t; 80 81 typedef struct name_entry { 82 const char *ne_name; 83 uint64_t ne_guid; 84 struct name_entry *ne_next; 85 } name_entry_t; 86 87 typedef struct pool_list { 88 pool_entry_t *pools; 89 name_entry_t *names; 90 } pool_list_t; 91 92 static char * 93 get_devid(const char *path) 94 { 95 int fd; 96 ddi_devid_t devid; 97 char *minor, *ret; 98 99 if ((fd = open(path, O_RDONLY)) < 0) 100 return (NULL); 101 102 minor = NULL; 103 ret = NULL; 104 if (devid_get(fd, &devid) == 0) { 105 if (devid_get_minor_name(fd, &minor) == 0) 106 ret = devid_str_encode(devid, minor); 107 if (minor != NULL) 108 devid_str_free(minor); 109 devid_free(devid); 110 } 111 (void) close(fd); 112 113 return (ret); 114 } 115 116 117 /* 118 * Go through and fix up any path and/or devid information for the given vdev 119 * configuration. 120 */ 121 static void 122 fix_paths(nvlist_t *nv, name_entry_t *names) 123 { 124 nvlist_t **child; 125 uint_t c, children; 126 uint64_t guid; 127 name_entry_t *ne, *best; 128 char *path, *devid; 129 int matched; 130 131 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 132 &child, &children) == 0) { 133 for (c = 0; c < children; c++) 134 fix_paths(child[c], names); 135 return; 136 } 137 138 /* 139 * This is a leaf (file or disk) vdev. In either case, go through 140 * the name list and see if we find a matching guid. If so, replace 141 * the path and see if we can calculate a new devid. 142 * 143 * There may be multiple names associated with a particular guid, in 144 * which case we have overlapping slices or multiple paths to the same 145 * disk. If this is the case, then we want to pick the path that is 146 * the most similar to the original, where "most similar" is the number 147 * of matching characters starting from the end of the path. This will 148 * preserve slice numbers even if the disks have been reorganized, and 149 * will also catch preferred disk names if multiple paths exist. 150 */ 151 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); 152 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) 153 path = NULL; 154 155 matched = 0; 156 best = NULL; 157 for (ne = names; ne != NULL; ne = ne->ne_next) { 158 if (ne->ne_guid == guid) { 159 const char *src, *dst; 160 int count; 161 162 if (path == NULL) { 163 best = ne; 164 break; 165 } 166 167 src = ne->ne_name + strlen(ne->ne_name) - 1; 168 dst = path + strlen(path) - 1; 169 for (count = 0; src >= ne->ne_name && dst >= path; 170 src--, dst--, count++) 171 if (*src != *dst) 172 break; 173 174 /* 175 * At this point, 'count' is the number of characters 176 * matched from the end. 177 */ 178 if (count > matched || best == NULL) { 179 best = ne; 180 matched = count; 181 } 182 } 183 } 184 185 if (best == NULL) 186 return; 187 188 verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) == 0); 189 190 if ((devid = get_devid(best->ne_name)) == NULL) { 191 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); 192 } else { 193 verify(nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) == 0); 194 devid_str_free(devid); 195 } 196 } 197 198 /* 199 * Add the given configuration to the list of known devices. 200 */ 201 static void 202 add_config(pool_list_t *pl, const char *path, nvlist_t *config) 203 { 204 uint64_t pool_guid, vdev_guid, top_guid, txg; 205 pool_entry_t *pe; 206 vdev_entry_t *ve; 207 config_entry_t *ce; 208 name_entry_t *ne; 209 210 /* 211 * If we have a valid config but cannot read any of these fields, then 212 * it means we have a half-initialized label. In vdev_label_init() 213 * we write a label with txg == 0 so that we can identify the device 214 * in case the user refers to the same disk later on. If we fail to 215 * create the pool, we'll be left with a label in this state 216 * which should not be considered part of a valid pool. 217 */ 218 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 219 &pool_guid) != 0 || 220 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 221 &vdev_guid) != 0 || 222 nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, 223 &top_guid) != 0 || 224 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 225 &txg) != 0 || txg == 0) { 226 nvlist_free(config); 227 return; 228 } 229 230 /* 231 * First, see if we know about this pool. If not, then add it to the 232 * list of known pools. 233 */ 234 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { 235 if (pe->pe_guid == pool_guid) 236 break; 237 } 238 239 if (pe == NULL) { 240 pe = zfs_malloc(sizeof (pool_entry_t)); 241 pe->pe_guid = pool_guid; 242 pe->pe_next = pl->pools; 243 pl->pools = pe; 244 } 245 246 /* 247 * Second, see if we know about this toplevel vdev. Add it if its 248 * missing. 249 */ 250 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { 251 if (ve->ve_guid == top_guid) 252 break; 253 } 254 255 if (ve == NULL) { 256 ve = zfs_malloc(sizeof (vdev_entry_t)); 257 ve->ve_guid = top_guid; 258 ve->ve_next = pe->pe_vdevs; 259 pe->pe_vdevs = ve; 260 } 261 262 /* 263 * Third, see if we have a config with a matching transaction group. If 264 * so, then we do nothing. Otherwise, add it to the list of known 265 * configs. 266 */ 267 for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { 268 if (ce->ce_txg == txg) 269 break; 270 } 271 272 if (ce == NULL) { 273 ce = zfs_malloc(sizeof (config_entry_t)); 274 ce->ce_txg = txg; 275 ce->ce_config = config; 276 ce->ce_next = ve->ve_configs; 277 ve->ve_configs = ce; 278 } else { 279 nvlist_free(config); 280 } 281 282 /* 283 * At this point we've successfully added our config to the list of 284 * known configs. The last thing to do is add the vdev guid -> path 285 * mappings so that we can fix up the configuration as necessary before 286 * doing the import. 287 */ 288 ne = zfs_malloc(sizeof (name_entry_t)); 289 290 ne->ne_name = zfs_strdup(path); 291 ne->ne_guid = vdev_guid; 292 ne->ne_next = pl->names; 293 pl->names = ne; 294 } 295 296 /* 297 * Convert our list of pools into the definitive set of configurations. We 298 * start by picking the best config for each toplevel vdev. Once that's done, 299 * we assemble the toplevel vdevs into a full config for the pool. We make a 300 * pass to fix up any incorrect paths, and then add it to the main list to 301 * return to the user. 302 */ 303 static nvlist_t * 304 get_configs(pool_list_t *pl) 305 { 306 pool_entry_t *pe, *penext; 307 vdev_entry_t *ve, *venext; 308 config_entry_t *ce, *cenext; 309 nvlist_t *ret, *config, *tmp, *nvtop, *nvroot; 310 int config_seen; 311 uint64_t best_txg; 312 char *name; 313 zfs_cmd_t zc = { 0 }; 314 uint64_t guid; 315 char *packed; 316 size_t len; 317 int err; 318 319 verify(nvlist_alloc(&ret, 0, 0) == 0); 320 321 for (pe = pl->pools; pe != NULL; pe = penext) { 322 uint_t c; 323 uint_t children = 0; 324 uint64_t id; 325 nvlist_t **child = NULL; 326 327 penext = pe->pe_next; 328 329 verify(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0); 330 config_seen = FALSE; 331 332 /* 333 * Iterate over all toplevel vdevs. Grab the pool configuration 334 * from the first one we find, and then go through the rest and 335 * add them as necessary to the 'vdevs' member of the config. 336 */ 337 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { 338 venext = ve->ve_next; 339 340 /* 341 * Determine the best configuration for this vdev by 342 * selecting the config with the latest transaction 343 * group. 344 */ 345 best_txg = 0; 346 for (ce = ve->ve_configs; ce != NULL; 347 ce = ce->ce_next) { 348 349 if (ce->ce_txg > best_txg) 350 tmp = ce->ce_config; 351 } 352 353 if (!config_seen) { 354 /* 355 * Copy the relevant pieces of data to the pool 356 * configuration: 357 * 358 * pool guid 359 * name 360 * pool state 361 */ 362 uint64_t state; 363 364 verify(nvlist_lookup_uint64(tmp, 365 ZPOOL_CONFIG_POOL_GUID, &guid) == 0); 366 verify(nvlist_add_uint64(config, 367 ZPOOL_CONFIG_POOL_GUID, guid) == 0); 368 verify(nvlist_lookup_string(tmp, 369 ZPOOL_CONFIG_POOL_NAME, &name) == 0); 370 verify(nvlist_add_string(config, 371 ZPOOL_CONFIG_POOL_NAME, name) == 0); 372 verify(nvlist_lookup_uint64(tmp, 373 ZPOOL_CONFIG_POOL_STATE, &state) == 0); 374 verify(nvlist_add_uint64(config, 375 ZPOOL_CONFIG_POOL_STATE, state) == 0); 376 377 config_seen = TRUE; 378 } 379 380 /* 381 * Add this top-level vdev to the child array. 382 */ 383 verify(nvlist_lookup_nvlist(tmp, 384 ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); 385 verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, 386 &id) == 0); 387 if (id >= children) { 388 nvlist_t **newchild; 389 390 newchild = zfs_malloc((id + 1) * 391 sizeof (nvlist_t *)); 392 393 for (c = 0; c < children; c++) 394 newchild[c] = child[c]; 395 396 free(child); 397 child = newchild; 398 children = id + 1; 399 } 400 verify(nvlist_dup(nvtop, &child[id], 0) == 0); 401 402 /* 403 * Go through and free all config information. 404 */ 405 for (ce = ve->ve_configs; ce != NULL; ce = cenext) { 406 cenext = ce->ce_next; 407 408 nvlist_free(ce->ce_config); 409 free(ce); 410 } 411 412 /* 413 * Free this vdev entry, since it has now been merged 414 * into the main config. 415 */ 416 free(ve); 417 } 418 419 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 420 &guid) == 0); 421 422 /* 423 * Look for any missing top-level vdevs. If this is the case, 424 * create a faked up 'missing' vdev as a placeholder. We cannot 425 * simply compress the child array, because the kernel performs 426 * certain checks to make sure the vdev IDs match their location 427 * in the configuration. 428 */ 429 for (c = 0; c < children; c++) 430 if (child[c] == NULL) { 431 nvlist_t *missing; 432 verify(nvlist_alloc(&missing, NV_UNIQUE_NAME, 433 0) == 0); 434 verify(nvlist_add_string(missing, 435 ZPOOL_CONFIG_TYPE, VDEV_TYPE_MISSING) == 0); 436 verify(nvlist_add_uint64(missing, 437 ZPOOL_CONFIG_ID, c) == 0); 438 verify(nvlist_add_uint64(missing, 439 ZPOOL_CONFIG_GUID, 0ULL) == 0); 440 child[c] = missing; 441 } 442 443 /* 444 * Put all of this pool's top-level vdevs into a root vdev. 445 */ 446 verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0); 447 verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 448 VDEV_TYPE_ROOT) == 0); 449 verify(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 450 verify(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) == 0); 451 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 452 child, children) == 0); 453 454 for (c = 0; c < children; c++) 455 nvlist_free(child[c]); 456 free(child); 457 458 /* 459 * Go through and fix up any paths and/or devids based on our 460 * known list of vdev GUID -> path mappings. 461 */ 462 fix_paths(nvroot, pl->names); 463 464 /* 465 * Add the root vdev to this pool's configuration. 466 */ 467 verify(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 468 nvroot) == 0); 469 nvlist_free(nvroot); 470 471 /* 472 * Free this pool entry. 473 */ 474 free(pe); 475 476 /* 477 * Determine if this pool is currently active, in which case we 478 * can't actually import it. 479 */ 480 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 481 &name) == 0); 482 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 483 &guid) == 0); 484 485 (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name)); 486 if (ioctl(zfs_fd, ZFS_IOC_POOL_GUID, &zc) == 0 && 487 guid == zc.zc_pool_guid) { 488 nvlist_free(config); 489 continue; 490 } 491 492 /* 493 * Try to do the import in order to get vdev state. 494 */ 495 if ((err = nvlist_size(config, &len, NV_ENCODE_NATIVE)) != 0) 496 zfs_baderror(err); 497 498 packed = zfs_malloc(len); 499 500 if ((err = nvlist_pack(config, &packed, &len, 501 NV_ENCODE_NATIVE, 0)) != 0) 502 zfs_baderror(err); 503 504 nvlist_free(config); 505 config = NULL; 506 507 zc.zc_config_src_size = len; 508 zc.zc_config_src = (uint64_t)(uintptr_t)packed; 509 510 zc.zc_config_dst_size = 2 * len; 511 zc.zc_config_dst = (uint64_t)(uintptr_t) 512 zfs_malloc(zc.zc_config_dst_size); 513 514 while ((err = ioctl(zfs_fd, ZFS_IOC_POOL_TRYIMPORT, 515 &zc)) != 0 && errno == ENOMEM) { 516 free((void *)(uintptr_t)zc.zc_config_dst); 517 zc.zc_config_dst = (uint64_t)(uintptr_t) 518 zfs_malloc(zc.zc_config_dst_size); 519 } 520 521 free(packed); 522 523 if (err) 524 zfs_baderror(errno); 525 526 verify(nvlist_unpack((void *)(uintptr_t)zc.zc_config_dst, 527 zc.zc_config_dst_size, &config, 0) == 0); 528 529 set_pool_health(config); 530 531 /* 532 * Add this pool to the list of configs. 533 */ 534 verify(nvlist_add_nvlist(ret, name, config) == 0); 535 536 nvlist_free(config); 537 538 free((void *)(uintptr_t)zc.zc_config_dst); 539 } 540 541 return (ret); 542 } 543 544 /* 545 * Return the offset of the given label. 546 */ 547 static uint64_t 548 label_offset(size_t size, int l) 549 { 550 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? 551 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); 552 } 553 554 /* 555 * Given a file descriptor, read the label information and return an nvlist 556 * describing the configuration, if there is one. 557 */ 558 nvlist_t * 559 zpool_read_label(int fd) 560 { 561 struct stat64 statbuf; 562 int l; 563 vdev_label_t *label; 564 nvlist_t *config; 565 uint64_t version, state, txg; 566 567 if (fstat64(fd, &statbuf) == -1) 568 return (NULL); 569 570 label = zfs_malloc(sizeof (vdev_label_t)); 571 572 for (l = 0; l < VDEV_LABELS; l++) { 573 if (pread(fd, label, sizeof (vdev_label_t), 574 label_offset(statbuf.st_size, l)) != sizeof (vdev_label_t)) 575 continue; 576 577 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, 578 sizeof (label->vl_vdev_phys.vp_nvlist), &config, 0) != 0) 579 continue; 580 581 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 582 &version) != 0 || version != UBERBLOCK_VERSION) { 583 nvlist_free(config); 584 continue; 585 } 586 587 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 588 &state) != 0 || state > POOL_STATE_EXPORTED) { 589 nvlist_free(config); 590 continue; 591 } 592 593 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 594 &txg) != 0 || txg == 0) { 595 nvlist_free(config); 596 continue; 597 } 598 599 free(label); 600 return (config); 601 } 602 603 free(label); 604 return (NULL); 605 } 606 607 /* 608 * Given a list of directories to search, find all pools stored on disk. This 609 * includes partial pools which are not available to import. If no args are 610 * given (argc is 0), then the default directory (/dev/dsk) is searched. 611 */ 612 nvlist_t * 613 zpool_find_import(int argc, char **argv) 614 { 615 int i; 616 DIR *dirp; 617 struct dirent64 *dp; 618 char path[MAXPATHLEN]; 619 struct stat64 statbuf; 620 nvlist_t *ret, *config; 621 static char *default_dir = "/dev/dsk"; 622 int fd; 623 pool_list_t pools = { 0 }; 624 625 if (argc == 0) { 626 argc = 1; 627 argv = &default_dir; 628 } 629 630 /* 631 * Go through and read the label configuration information from every 632 * possible device, organizing the information according to pool GUID 633 * and toplevel GUID. 634 */ 635 for (i = 0; i < argc; i++) { 636 if (argv[i][0] != '/') { 637 zfs_error(dgettext(TEXT_DOMAIN, 638 "cannot open '%s': must be an absolute path"), 639 argv[i]); 640 return (NULL); 641 } 642 643 if ((dirp = opendir(argv[i])) == NULL) { 644 zfs_error(dgettext(TEXT_DOMAIN, 645 "cannot open '%s': %s"), argv[i], 646 strerror(errno)); 647 return (NULL); 648 } 649 650 /* 651 * This is not MT-safe, but we have no MT consumers of libzfs 652 */ 653 while ((dp = readdir64(dirp)) != NULL) { 654 655 (void) snprintf(path, sizeof (path), "%s/%s", 656 argv[i], dp->d_name); 657 658 if (stat64(path, &statbuf) != 0) 659 continue; 660 661 /* 662 * Ignore directories (which includes "." and ".."). 663 */ 664 if (S_ISDIR(statbuf.st_mode)) 665 continue; 666 667 if ((fd = open64(path, O_RDONLY)) < 0) 668 continue; 669 670 config = zpool_read_label(fd); 671 672 (void) close(fd); 673 674 if (config != NULL) 675 add_config(&pools, path, config); 676 } 677 } 678 679 ret = get_configs(&pools); 680 681 return (ret); 682 } 683 684 int 685 find_guid(nvlist_t *nv, uint64_t guid) 686 { 687 uint64_t tmp; 688 nvlist_t **child; 689 uint_t c, children; 690 691 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); 692 if (tmp == guid) 693 return (TRUE); 694 695 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 696 &child, &children) == 0) { 697 for (c = 0; c < children; c++) 698 if (find_guid(child[c], guid)) 699 return (TRUE); 700 } 701 702 return (FALSE); 703 } 704 705 /* 706 * Determines if the pool is in use. If so, it returns TRUE and the state of 707 * the pool as well as the name of the pool. Both strings are allocated and 708 * must be freed by the caller. 709 */ 710 int 711 zpool_in_use(int fd, pool_state_t *state, char **namestr) 712 { 713 nvlist_t *config; 714 char *name; 715 int ret; 716 zfs_cmd_t zc = { 0 }; 717 uint64_t guid, vdev_guid; 718 zpool_handle_t *zhp; 719 nvlist_t *pool_config; 720 uint64_t stateval; 721 722 if ((config = zpool_read_label(fd)) == NULL) 723 return (FALSE); 724 725 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 726 &name) == 0); 727 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 728 &stateval) == 0); 729 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 730 &guid) == 0); 731 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 732 &vdev_guid) == 0); 733 734 switch (stateval) { 735 case POOL_STATE_EXPORTED: 736 ret = TRUE; 737 break; 738 739 case POOL_STATE_ACTIVE: 740 /* 741 * For an active pool, we have to determine if it's really part 742 * of an active pool (in which case the pool will exist and the 743 * guid will be the same), or whether it's part of an active 744 * pool that was disconnected without being explicitly exported. 745 * 746 * We use the direct ioctl() first to avoid triggering an error 747 * message if the pool cannot be opened. 748 */ 749 (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name)); 750 if (ioctl(zfs_fd, ZFS_IOC_POOL_GUID, &zc) == 0 && 751 guid == zc.zc_pool_guid) { 752 /* 753 * Because the device may have been removed while 754 * offlined, we only report it as active if the vdev is 755 * still present in the config. Otherwise, pretend like 756 * it's not in use. 757 */ 758 if ((zhp = zpool_open_canfail(name)) != NULL && 759 (pool_config = zpool_get_config(zhp, NULL)) 760 != NULL) { 761 nvlist_t *nvroot; 762 763 verify(nvlist_lookup_nvlist(pool_config, 764 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 765 ret = find_guid(nvroot, vdev_guid); 766 } else { 767 ret = FALSE; 768 } 769 } else { 770 stateval = POOL_STATE_POTENTIALLY_ACTIVE; 771 ret = TRUE; 772 } 773 break; 774 775 default: 776 ret = FALSE; 777 } 778 779 780 if (ret) { 781 *namestr = zfs_strdup(name); 782 *state = (pool_state_t)stateval; 783 } 784 785 nvlist_free(config); 786 return (ret); 787 } 788