1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * Copyright 2019 Joyent, Inc. 26 * Copyright 2024 OmniOS Community Edition (OmniOSce) Association. 27 */ 28 29 /* 30 * The ZFS retire agent is responsible for managing hot spares across all pools. 31 * When we see a device fault or a device removal, we try to open the associated 32 * pool and look for any hot spares. We iterate over any available hot spares 33 * and attempt a 'zpool replace' for each one. 34 * 35 * For vdevs diagnosed as faulty, the agent is also responsible for proactively 36 * marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors). 37 */ 38 39 #include <fm/fmd_api.h> 40 #include <sys/fs/zfs.h> 41 #include <sys/fm/protocol.h> 42 #include <sys/fm/fs/zfs.h> 43 #include <libzfs.h> 44 #include <fm/libtopo.h> 45 #include <string.h> 46 47 typedef struct zfs_retire_repaired { 48 struct zfs_retire_repaired *zrr_next; 49 uint64_t zrr_pool; 50 uint64_t zrr_vdev; 51 } zfs_retire_repaired_t; 52 53 typedef struct zfs_retire_data { 54 libzfs_handle_t *zrd_hdl; 55 zfs_retire_repaired_t *zrd_repaired; 56 } zfs_retire_data_t; 57 58 static void 59 zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp) 60 { 61 zfs_retire_repaired_t *zrp; 62 63 while ((zrp = zdp->zrd_repaired) != NULL) { 64 zdp->zrd_repaired = zrp->zrr_next; 65 fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t)); 66 } 67 } 68 69 /* 70 * Find a pool with a matching GUID. 71 */ 72 typedef struct find_cbdata { 73 uint64_t cb_guid; 74 const char *cb_fru; 75 zpool_handle_t *cb_zhp; 76 nvlist_t *cb_vdev; 77 } find_cbdata_t; 78 79 static int 80 find_pool(zpool_handle_t *zhp, void *data) 81 { 82 find_cbdata_t *cbp = data; 83 84 if (cbp->cb_guid == 85 zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) { 86 cbp->cb_zhp = zhp; 87 return (1); 88 } 89 90 zpool_close(zhp); 91 return (0); 92 } 93 94 /* 95 * Find a vdev within a tree with a matching GUID. 96 */ 97 static nvlist_t * 98 find_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, const char *search_fru, 99 uint64_t search_guid) 100 { 101 uint64_t guid; 102 nvlist_t **child; 103 uint_t c, children; 104 nvlist_t *ret; 105 char *fru; 106 107 if (search_fru != NULL) { 108 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &fru) == 0 && 109 libzfs_fru_compare(zhdl, fru, search_fru)) 110 return (nv); 111 } else { 112 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 && 113 guid == search_guid) 114 return (nv); 115 } 116 117 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 118 &child, &children) != 0) 119 return (NULL); 120 121 for (c = 0; c < children; c++) { 122 if ((ret = find_vdev(zhdl, child[c], search_fru, 123 search_guid)) != NULL) 124 return (ret); 125 } 126 127 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 128 &child, &children) == 0) { 129 for (c = 0; c < children; c++) { 130 if ((ret = find_vdev(zhdl, child[c], search_fru, 131 search_guid)) != NULL) 132 return (ret); 133 } 134 } 135 136 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 137 &child, &children) == 0) { 138 for (c = 0; c < children; c++) { 139 if ((ret = find_vdev(zhdl, child[c], search_fru, 140 search_guid)) != NULL) 141 return (ret); 142 } 143 } 144 145 return (NULL); 146 } 147 148 /* 149 * Given a (pool, vdev) GUID pair, find the matching pool and vdev. 150 */ 151 static zpool_handle_t * 152 find_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid, 153 nvlist_t **vdevp) 154 { 155 find_cbdata_t cb; 156 zpool_handle_t *zhp; 157 nvlist_t *config, *nvroot; 158 159 /* 160 * Find the corresponding pool and make sure the vdev still exists. 161 */ 162 cb.cb_guid = pool_guid; 163 if (zpool_iter(zhdl, find_pool, &cb) != 1) 164 return (NULL); 165 166 zhp = cb.cb_zhp; 167 config = zpool_get_config(zhp, NULL); 168 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 169 &nvroot) != 0) { 170 zpool_close(zhp); 171 return (NULL); 172 } 173 174 if (vdev_guid != 0) { 175 if ((*vdevp = find_vdev(zhdl, nvroot, NULL, 176 vdev_guid)) == NULL) { 177 zpool_close(zhp); 178 return (NULL); 179 } 180 } 181 182 return (zhp); 183 } 184 185 static int 186 search_pool(zpool_handle_t *zhp, void *data) 187 { 188 find_cbdata_t *cbp = data; 189 nvlist_t *config; 190 nvlist_t *nvroot; 191 192 config = zpool_get_config(zhp, NULL); 193 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 194 &nvroot) != 0) { 195 zpool_close(zhp); 196 return (0); 197 } 198 199 if ((cbp->cb_vdev = find_vdev(zpool_get_handle(zhp), nvroot, 200 cbp->cb_fru, 0)) != NULL) { 201 cbp->cb_zhp = zhp; 202 return (1); 203 } 204 205 zpool_close(zhp); 206 return (0); 207 } 208 209 /* 210 * Given a FRU FMRI, find the matching pool and vdev. 211 */ 212 static zpool_handle_t * 213 find_by_fru(libzfs_handle_t *zhdl, const char *fru, nvlist_t **vdevp) 214 { 215 find_cbdata_t cb; 216 217 cb.cb_fru = fru; 218 cb.cb_zhp = NULL; 219 if (zpool_iter(zhdl, search_pool, &cb) != 1) 220 return (NULL); 221 222 *vdevp = cb.cb_vdev; 223 return (cb.cb_zhp); 224 } 225 226 /* 227 * Callback for sorting spares by increasing size. 228 */ 229 static int 230 sort_spares_by_size(const void *ap, const void *bp) 231 { 232 nvlist_t *a = *(nvlist_t **)ap; 233 nvlist_t *b = *(nvlist_t **)bp; 234 vdev_stat_t *vsa, *vsb; 235 vdev_stat_t v0 = { 0 }; 236 uint_t c; 237 238 if (nvlist_lookup_uint64_array(a, ZPOOL_CONFIG_VDEV_STATS, 239 (uint64_t **)&vsa, &c) != 0) { 240 vsa = &v0; 241 } 242 243 if (nvlist_lookup_uint64_array(b, ZPOOL_CONFIG_VDEV_STATS, 244 (uint64_t **)&vsb, &c) != 0) { 245 vsb = &v0; 246 } 247 248 if (vsa->vs_rsize > vsb->vs_rsize) 249 return (1); 250 if (vsa->vs_rsize < vsb->vs_rsize) 251 return (-1); 252 return (0); 253 } 254 255 /* 256 * Given a vdev, attempt to replace it with every known spare until one 257 * succeeds. The spares are first sorted by increasing size so that the 258 * smallest possible replacement is used. 259 */ 260 static void 261 replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev) 262 { 263 nvlist_t *config, *nvroot, *replacement; 264 nvlist_t **spares, **sorted_spares; 265 uint_t s, nspares; 266 char *dev_name; 267 zprop_source_t source; 268 int ashift; 269 zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl); 270 libzfs_handle_t *zhdl = zdp->zrd_hdl; 271 272 config = zpool_get_config(zhp, NULL); 273 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 274 &nvroot) != 0) { 275 return; 276 } 277 278 /* 279 * Find out if there are any hot spares available in the pool. 280 */ 281 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 282 &spares, &nspares) != 0) { 283 return; 284 } 285 286 /* 287 * look up "ashift" pool property, we may need it for the replacement 288 */ 289 ashift = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &source); 290 291 replacement = fmd_nvl_alloc(hdl, FMD_SLEEP); 292 293 (void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE, 294 VDEV_TYPE_ROOT); 295 296 dev_name = zpool_vdev_name(zhdl, zhp, vdev, B_FALSE); 297 298 /* 299 * Try to replace each spare, starting with the smallest and ending 300 * when we successfully replace it. 301 */ 302 sorted_spares = fmd_hdl_alloc(hdl, nspares * sizeof (nvlist_t *), 303 FMD_SLEEP); 304 for (s = 0; s < nspares; s++) 305 sorted_spares[s] = spares[s]; 306 qsort((void *)sorted_spares, nspares, sizeof (nvlist_t *), 307 sort_spares_by_size); 308 309 for (s = 0; s < nspares; s++) { 310 nvlist_t *spare = sorted_spares[s]; 311 char *spare_name; 312 313 if (nvlist_lookup_string(spare, ZPOOL_CONFIG_PATH, 314 &spare_name) != 0) { 315 continue; 316 } 317 318 /* if set, add the "ashift" pool property to the spare nvlist */ 319 if (source != ZPROP_SRC_DEFAULT) { 320 (void) nvlist_add_uint64(spare, 321 ZPOOL_CONFIG_ASHIFT, ashift); 322 } 323 324 (void) nvlist_add_nvlist_array(replacement, 325 ZPOOL_CONFIG_CHILDREN, &spare, 1); 326 327 if (zpool_vdev_attach(zhp, dev_name, spare_name, 328 replacement, B_TRUE) == 0) { 329 break; 330 } 331 } 332 333 fmd_hdl_free(hdl, sorted_spares, nspares * sizeof (nvlist_t *)); 334 free(dev_name); 335 nvlist_free(replacement); 336 } 337 338 /* 339 * Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and 340 * ASRU is now usable. ZFS has found the device to be present and 341 * functioning. 342 */ 343 /*ARGSUSED*/ 344 void 345 zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl) 346 { 347 zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl); 348 zfs_retire_repaired_t *zrp; 349 uint64_t pool_guid, vdev_guid; 350 nvlist_t *asru; 351 352 if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, 353 &pool_guid) != 0 || nvlist_lookup_uint64(nvl, 354 FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0) 355 return; 356 357 /* 358 * Before checking the state of the ASRU, go through and see if we've 359 * already made an attempt to repair this ASRU. This list is cleared 360 * whenever we receive any kind of list event, and is designed to 361 * prevent us from generating a feedback loop when we attempt repairs 362 * against a faulted pool. The problem is that checking the unusable 363 * state of the ASRU can involve opening the pool, which can post 364 * statechange events but otherwise leave the pool in the faulted 365 * state. This list allows us to detect when a statechange event is 366 * due to our own request. 367 */ 368 for (zrp = zdp->zrd_repaired; zrp != NULL; zrp = zrp->zrr_next) { 369 if (zrp->zrr_pool == pool_guid && 370 zrp->zrr_vdev == vdev_guid) 371 return; 372 } 373 374 asru = fmd_nvl_alloc(hdl, FMD_SLEEP); 375 376 (void) nvlist_add_uint8(asru, FM_VERSION, ZFS_SCHEME_VERSION0); 377 (void) nvlist_add_string(asru, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS); 378 (void) nvlist_add_uint64(asru, FM_FMRI_ZFS_POOL, pool_guid); 379 (void) nvlist_add_uint64(asru, FM_FMRI_ZFS_VDEV, vdev_guid); 380 381 /* 382 * We explicitly check for the unusable state here to make sure we 383 * aren't responding to a transient state change. As part of opening a 384 * vdev, it's possible to see the 'statechange' event, only to be 385 * followed by a vdev failure later. If we don't check the current 386 * state of the vdev (or pool) before marking it repaired, then we risk 387 * generating spurious repair events followed immediately by the same 388 * diagnosis. 389 * 390 * This assumes that the ZFS scheme code associated unusable (i.e. 391 * isolated) with its own definition of faulty state. In the case of a 392 * DEGRADED leaf vdev (due to checksum errors), this is not the case. 393 * This works, however, because the transient state change is not 394 * posted in this case. This could be made more explicit by not 395 * relying on the scheme's unusable callback and instead directly 396 * checking the vdev state, where we could correctly account for 397 * DEGRADED state. 398 */ 399 if (!fmd_nvl_fmri_unusable(hdl, asru) && fmd_nvl_fmri_has_fault(hdl, 400 asru, FMD_HAS_FAULT_ASRU, NULL)) { 401 topo_hdl_t *thp; 402 char *fmri = NULL; 403 int err; 404 405 thp = fmd_hdl_topo_hold(hdl, TOPO_VERSION); 406 if (topo_fmri_nvl2str(thp, asru, &fmri, &err) == 0) 407 (void) fmd_repair_asru(hdl, fmri); 408 fmd_hdl_topo_rele(hdl, thp); 409 410 topo_hdl_strfree(thp, fmri); 411 } 412 nvlist_free(asru); 413 zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP); 414 zrp->zrr_next = zdp->zrd_repaired; 415 zrp->zrr_pool = pool_guid; 416 zrp->zrr_vdev = vdev_guid; 417 zdp->zrd_repaired = zrp; 418 } 419 420 /*ARGSUSED*/ 421 static void 422 zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, 423 const char *class) 424 { 425 uint64_t pool_guid, vdev_guid; 426 zpool_handle_t *zhp; 427 nvlist_t *resource, *fault, *fru; 428 nvlist_t **faults; 429 uint_t f, nfaults; 430 zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl); 431 libzfs_handle_t *zhdl = zdp->zrd_hdl; 432 boolean_t fault_device, degrade_device; 433 boolean_t is_repair; 434 char *scheme, *fmri; 435 nvlist_t *vdev; 436 char *uuid; 437 int repair_done = 0; 438 boolean_t retire; 439 boolean_t is_disk; 440 vdev_aux_t aux; 441 topo_hdl_t *thp; 442 int err; 443 444 /* 445 * If this is a resource notifying us of device removal, then simply 446 * check for an available spare and continue. 447 */ 448 if (strcmp(class, "resource.fs.zfs.removed") == 0) { 449 if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, 450 &pool_guid) != 0 || 451 nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, 452 &vdev_guid) != 0) 453 return; 454 455 if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid, 456 &vdev)) == NULL) 457 return; 458 459 if (fmd_prop_get_int32(hdl, "spare_on_remove")) 460 replace_with_spare(hdl, zhp, vdev); 461 zpool_close(zhp); 462 return; 463 } 464 465 if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0) 466 return; 467 468 if (strcmp(class, "resource.fs.zfs.statechange") == 0 || 469 strcmp(class, 470 "resource.sysevent.EC_zfs.ESC_ZFS_vdev_remove") == 0) { 471 zfs_vdev_repair(hdl, nvl); 472 return; 473 } 474 475 zfs_retire_clear_data(hdl, zdp); 476 477 if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0) 478 is_repair = B_TRUE; 479 else 480 is_repair = B_FALSE; 481 482 /* 483 * We subscribe to zfs faults as well as all repair events. 484 */ 485 if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST, 486 &faults, &nfaults) != 0) 487 return; 488 489 for (f = 0; f < nfaults; f++) { 490 fault = faults[f]; 491 492 fault_device = B_FALSE; 493 degrade_device = B_FALSE; 494 is_disk = B_FALSE; 495 496 if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE, 497 &retire) == 0 && retire == 0) 498 continue; 499 500 if (fmd_nvl_class_match(hdl, fault, 501 "fault.io.disk.ssm-wearout") && 502 fmd_prop_get_int32(hdl, "ssm_wearout_skip_retire") == 503 FMD_B_TRUE) { 504 fmd_hdl_debug(hdl, "zfs-retire: ignoring SSM fault"); 505 continue; 506 } 507 508 /* 509 * While we subscribe to fault.fs.zfs.*, we only take action 510 * for faults targeting a specific vdev (open failure or SERD 511 * failure). We also subscribe to fault.io.* events, so that 512 * faulty disks will be faulted in the ZFS configuration. 513 */ 514 if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) { 515 fault_device = B_TRUE; 516 } else if (fmd_nvl_class_match(hdl, fault, 517 "fault.fs.zfs.vdev.checksum")) { 518 degrade_device = B_TRUE; 519 } else if (fmd_nvl_class_match(hdl, fault, 520 "fault.fs.zfs.device")) { 521 fault_device = B_FALSE; 522 } else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) { 523 is_disk = B_TRUE; 524 fault_device = B_TRUE; 525 } else { 526 continue; 527 } 528 529 if (is_disk) { 530 /* 531 * This is a disk fault. Lookup the FRU, convert it to 532 * an FMRI string, and attempt to find a matching vdev. 533 */ 534 if (nvlist_lookup_nvlist(fault, FM_FAULT_FRU, 535 &fru) != 0 || 536 nvlist_lookup_string(fru, FM_FMRI_SCHEME, 537 &scheme) != 0) 538 continue; 539 540 if (strcmp(scheme, FM_FMRI_SCHEME_HC) != 0) 541 continue; 542 543 thp = fmd_hdl_topo_hold(hdl, TOPO_VERSION); 544 if (topo_fmri_nvl2str(thp, fru, &fmri, &err) != 0) { 545 fmd_hdl_topo_rele(hdl, thp); 546 continue; 547 } 548 549 zhp = find_by_fru(zhdl, fmri, &vdev); 550 topo_hdl_strfree(thp, fmri); 551 fmd_hdl_topo_rele(hdl, thp); 552 553 if (zhp == NULL) 554 continue; 555 556 (void) nvlist_lookup_uint64(vdev, 557 ZPOOL_CONFIG_GUID, &vdev_guid); 558 aux = VDEV_AUX_EXTERNAL; 559 } else { 560 /* 561 * This is a ZFS fault. Lookup the resource, and 562 * attempt to find the matching vdev. 563 */ 564 if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE, 565 &resource) != 0 || 566 nvlist_lookup_string(resource, FM_FMRI_SCHEME, 567 &scheme) != 0) 568 continue; 569 570 if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0) 571 continue; 572 573 if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL, 574 &pool_guid) != 0) 575 continue; 576 577 if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV, 578 &vdev_guid) != 0) { 579 if (is_repair) 580 vdev_guid = 0; 581 else 582 continue; 583 } 584 585 if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid, 586 &vdev)) == NULL) 587 continue; 588 589 aux = VDEV_AUX_ERR_EXCEEDED; 590 } 591 592 if (vdev_guid == 0) { 593 /* 594 * For pool-level repair events, clear the entire pool. 595 */ 596 (void) zpool_clear(zhp, NULL, NULL); 597 zpool_close(zhp); 598 continue; 599 } 600 601 /* 602 * If this is a repair event, then mark the vdev as repaired and 603 * continue. 604 */ 605 if (is_repair) { 606 repair_done = 1; 607 (void) zpool_vdev_clear(zhp, vdev_guid); 608 zpool_close(zhp); 609 continue; 610 } 611 612 /* 613 * Actively fault the device if needed. 614 */ 615 if (fault_device) 616 (void) zpool_vdev_fault(zhp, vdev_guid, aux); 617 if (degrade_device) 618 (void) zpool_vdev_degrade(zhp, vdev_guid, aux); 619 620 /* 621 * Attempt to substitute a hot spare. 622 */ 623 replace_with_spare(hdl, zhp, vdev); 624 zpool_close(zhp); 625 } 626 627 if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done && 628 nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0) 629 fmd_case_uuresolved(hdl, uuid); 630 } 631 632 static const fmd_hdl_ops_t fmd_ops = { 633 zfs_retire_recv, /* fmdo_recv */ 634 NULL, /* fmdo_timeout */ 635 NULL, /* fmdo_close */ 636 NULL, /* fmdo_stats */ 637 NULL, /* fmdo_gc */ 638 }; 639 640 static const fmd_prop_t fmd_props[] = { 641 { "spare_on_remove", FMD_TYPE_BOOL, "true" }, 642 { "ssm_wearout_skip_retire", FMD_TYPE_BOOL, "true"}, 643 { NULL, 0, NULL } 644 }; 645 646 static const fmd_hdl_info_t fmd_info = { 647 "ZFS Retire Agent", "1.0", &fmd_ops, fmd_props 648 }; 649 650 void 651 _fmd_init(fmd_hdl_t *hdl) 652 { 653 zfs_retire_data_t *zdp; 654 libzfs_handle_t *zhdl; 655 656 if ((zhdl = libzfs_init()) == NULL) 657 return; 658 659 if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) { 660 libzfs_fini(zhdl); 661 return; 662 } 663 664 zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP); 665 zdp->zrd_hdl = zhdl; 666 667 fmd_hdl_setspecific(hdl, zdp); 668 } 669 670 void 671 _fmd_fini(fmd_hdl_t *hdl) 672 { 673 zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl); 674 675 if (zdp != NULL) { 676 zfs_retire_clear_data(hdl, zdp); 677 libzfs_fini(zdp->zrd_hdl); 678 fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t)); 679 } 680 } 681