xref: /freebsd/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24  *
25  * Copyright (c) 2016, Intel Corporation.
26  * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
27  */
28 
29 /*
30  * The ZFS retire agent is responsible for managing hot spares across all pools.
31  * When we see a device fault or a device removal, we try to open the associated
32  * pool and look for any hot spares.  We iterate over any available hot spares
33  * and attempt a 'zpool replace' for each one.
34  *
35  * For vdevs diagnosed as faulty, the agent is also responsible for proactively
36  * marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
37  */
38 
39 #include <sys/fs/zfs.h>
40 #include <sys/fm/protocol.h>
41 #include <sys/fm/fs/zfs.h>
42 #include <libzutil.h>
43 #include <libzfs.h>
44 #include <string.h>
45 #include <libgen.h>
46 
47 #include "zfs_agents.h"
48 #include "fmd_api.h"
49 
50 
51 typedef struct zfs_retire_repaired {
52 	struct zfs_retire_repaired	*zrr_next;
53 	uint64_t			zrr_pool;
54 	uint64_t			zrr_vdev;
55 } zfs_retire_repaired_t;
56 
57 typedef struct zfs_retire_data {
58 	libzfs_handle_t			*zrd_hdl;
59 	zfs_retire_repaired_t		*zrd_repaired;
60 } zfs_retire_data_t;
61 
62 static void
zfs_retire_clear_data(fmd_hdl_t * hdl,zfs_retire_data_t * zdp)63 zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
64 {
65 	zfs_retire_repaired_t *zrp;
66 
67 	while ((zrp = zdp->zrd_repaired) != NULL) {
68 		zdp->zrd_repaired = zrp->zrr_next;
69 		fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
70 	}
71 }
72 
73 /*
74  * Find a pool with a matching GUID.
75  */
76 typedef struct find_cbdata {
77 	uint64_t	cb_guid;
78 	zpool_handle_t	*cb_zhp;
79 	nvlist_t	*cb_vdev;
80 	uint64_t	cb_vdev_guid;
81 	uint64_t	cb_num_spares;
82 } find_cbdata_t;
83 
84 static int
find_pool(zpool_handle_t * zhp,void * data)85 find_pool(zpool_handle_t *zhp, void *data)
86 {
87 	find_cbdata_t *cbp = data;
88 
89 	if (cbp->cb_guid ==
90 	    zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) {
91 		cbp->cb_zhp = zhp;
92 		return (1);
93 	}
94 
95 	zpool_close(zhp);
96 	return (0);
97 }
98 
99 /*
100  * Find a vdev within a tree with a matching GUID.
101  */
102 static nvlist_t *
find_vdev(libzfs_handle_t * zhdl,nvlist_t * nv,uint64_t search_guid)103 find_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, uint64_t search_guid)
104 {
105 	uint64_t guid;
106 	nvlist_t **child;
107 	uint_t c, children;
108 	nvlist_t *ret;
109 
110 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
111 	    guid == search_guid) {
112 		fmd_hdl_debug(fmd_module_hdl("zfs-retire"),
113 		    "matched vdev %llu", guid);
114 		return (nv);
115 	}
116 
117 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
118 	    &child, &children) != 0)
119 		return (NULL);
120 
121 	for (c = 0; c < children; c++) {
122 		if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
123 			return (ret);
124 	}
125 
126 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
127 	    &child, &children) != 0)
128 		return (NULL);
129 
130 	for (c = 0; c < children; c++) {
131 		if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
132 			return (ret);
133 	}
134 
135 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
136 	    &child, &children) != 0)
137 		return (NULL);
138 
139 	for (c = 0; c < children; c++) {
140 		if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
141 			return (ret);
142 	}
143 
144 	return (NULL);
145 }
146 
147 static int
remove_spares(zpool_handle_t * zhp,void * data)148 remove_spares(zpool_handle_t *zhp, void *data)
149 {
150 	nvlist_t *config, *nvroot;
151 	nvlist_t **spares;
152 	uint_t nspares;
153 	char *devname;
154 	find_cbdata_t *cbp = data;
155 	uint64_t spareguid = 0;
156 	vdev_stat_t *vs;
157 	unsigned int c;
158 
159 	config = zpool_get_config(zhp, NULL);
160 	if (nvlist_lookup_nvlist(config,
161 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0) {
162 		zpool_close(zhp);
163 		return (0);
164 	}
165 
166 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
167 	    &spares, &nspares) != 0) {
168 		zpool_close(zhp);
169 		return (0);
170 	}
171 
172 	for (int i = 0; i < nspares; i++) {
173 		if (nvlist_lookup_uint64(spares[i], ZPOOL_CONFIG_GUID,
174 		    &spareguid) == 0 && spareguid == cbp->cb_vdev_guid) {
175 			devname = zpool_vdev_name(NULL, zhp, spares[i],
176 			    B_FALSE);
177 			nvlist_lookup_uint64_array(spares[i],
178 			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c);
179 			if (vs->vs_state != VDEV_STATE_REMOVED &&
180 			    zpool_vdev_remove_wanted(zhp, devname) == 0)
181 				cbp->cb_num_spares++;
182 			break;
183 		}
184 	}
185 
186 	zpool_close(zhp);
187 	return (0);
188 }
189 
190 /*
191  * Given a vdev guid, find and remove all spares associated with it.
192  */
193 static int
find_and_remove_spares(libzfs_handle_t * zhdl,uint64_t vdev_guid)194 find_and_remove_spares(libzfs_handle_t *zhdl, uint64_t vdev_guid)
195 {
196 	find_cbdata_t cb;
197 
198 	cb.cb_num_spares = 0;
199 	cb.cb_vdev_guid = vdev_guid;
200 	zpool_iter(zhdl, remove_spares, &cb);
201 
202 	return (cb.cb_num_spares);
203 }
204 
205 /*
206  * Given a (pool, vdev) GUID pair, find the matching pool and vdev.
207  */
208 static zpool_handle_t *
find_by_guid(libzfs_handle_t * zhdl,uint64_t pool_guid,uint64_t vdev_guid,nvlist_t ** vdevp)209 find_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid,
210     nvlist_t **vdevp)
211 {
212 	find_cbdata_t cb;
213 	zpool_handle_t *zhp;
214 	nvlist_t *config, *nvroot;
215 
216 	/*
217 	 * Find the corresponding pool and make sure the vdev still exists.
218 	 */
219 	cb.cb_guid = pool_guid;
220 	if (zpool_iter(zhdl, find_pool, &cb) != 1)
221 		return (NULL);
222 
223 	zhp = cb.cb_zhp;
224 	config = zpool_get_config(zhp, NULL);
225 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
226 	    &nvroot) != 0) {
227 		zpool_close(zhp);
228 		return (NULL);
229 	}
230 
231 	if (vdev_guid != 0) {
232 		if ((*vdevp = find_vdev(zhdl, nvroot, vdev_guid)) == NULL) {
233 			zpool_close(zhp);
234 			return (NULL);
235 		}
236 	}
237 
238 	return (zhp);
239 }
240 
241 /*
242  * Given a vdev, attempt to replace it with every known spare until one
243  * succeeds or we run out of devices to try.
244  * Return whether we were successful or not in replacing the device.
245  */
246 static boolean_t
replace_with_spare(fmd_hdl_t * hdl,zpool_handle_t * zhp,nvlist_t * vdev)247 replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
248 {
249 	nvlist_t *config, *nvroot, *replacement;
250 	nvlist_t **spares;
251 	uint_t s, nspares;
252 	char *dev_name;
253 	zprop_source_t source;
254 	int ashift;
255 
256 	config = zpool_get_config(zhp, NULL);
257 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
258 	    &nvroot) != 0)
259 		return (B_FALSE);
260 
261 	/*
262 	 * Find out if there are any hot spares available in the pool.
263 	 */
264 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
265 	    &spares, &nspares) != 0)
266 		return (B_FALSE);
267 
268 	/*
269 	 * lookup "ashift" pool property, we may need it for the replacement
270 	 */
271 	ashift = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &source);
272 
273 	replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
274 
275 	(void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
276 	    VDEV_TYPE_ROOT);
277 
278 	dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
279 
280 	/*
281 	 * Try to replace each spare, ending when we successfully
282 	 * replace it.
283 	 */
284 	for (s = 0; s < nspares; s++) {
285 		boolean_t rebuild = B_FALSE;
286 		const char *spare_name, *type;
287 
288 		if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
289 		    &spare_name) != 0)
290 			continue;
291 
292 		/* prefer sequential resilvering for distributed spares */
293 		if ((nvlist_lookup_string(spares[s], ZPOOL_CONFIG_TYPE,
294 		    &type) == 0) && strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0)
295 			rebuild = B_TRUE;
296 
297 		/* if set, add the "ashift" pool property to the spare nvlist */
298 		if (source != ZPROP_SRC_DEFAULT)
299 			(void) nvlist_add_uint64(spares[s],
300 			    ZPOOL_CONFIG_ASHIFT, ashift);
301 
302 		(void) nvlist_add_nvlist_array(replacement,
303 		    ZPOOL_CONFIG_CHILDREN, (const nvlist_t **)&spares[s], 1);
304 
305 		fmd_hdl_debug(hdl, "zpool_vdev_replace '%s' with spare '%s'",
306 		    dev_name, zfs_basename(spare_name));
307 
308 		if (zpool_vdev_attach(zhp, dev_name, spare_name,
309 		    replacement, B_TRUE, rebuild) == 0) {
310 			free(dev_name);
311 			nvlist_free(replacement);
312 			return (B_TRUE);
313 		}
314 	}
315 
316 	free(dev_name);
317 	nvlist_free(replacement);
318 
319 	return (B_FALSE);
320 }
321 
322 /*
323  * Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
324  * ASRU is now usable.  ZFS has found the device to be present and
325  * functioning.
326  */
327 static void
zfs_vdev_repair(fmd_hdl_t * hdl,nvlist_t * nvl)328 zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl)
329 {
330 	zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
331 	zfs_retire_repaired_t *zrp;
332 	uint64_t pool_guid, vdev_guid;
333 	if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
334 	    &pool_guid) != 0 || nvlist_lookup_uint64(nvl,
335 	    FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
336 		return;
337 
338 	/*
339 	 * Before checking the state of the ASRU, go through and see if we've
340 	 * already made an attempt to repair this ASRU.  This list is cleared
341 	 * whenever we receive any kind of list event, and is designed to
342 	 * prevent us from generating a feedback loop when we attempt repairs
343 	 * against a faulted pool.  The problem is that checking the unusable
344 	 * state of the ASRU can involve opening the pool, which can post
345 	 * statechange events but otherwise leave the pool in the faulted
346 	 * state.  This list allows us to detect when a statechange event is
347 	 * due to our own request.
348 	 */
349 	for (zrp = zdp->zrd_repaired; zrp != NULL; zrp = zrp->zrr_next) {
350 		if (zrp->zrr_pool == pool_guid &&
351 		    zrp->zrr_vdev == vdev_guid)
352 			return;
353 	}
354 
355 	zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
356 	zrp->zrr_next = zdp->zrd_repaired;
357 	zrp->zrr_pool = pool_guid;
358 	zrp->zrr_vdev = vdev_guid;
359 	zdp->zrd_repaired = zrp;
360 
361 	fmd_hdl_debug(hdl, "marking repaired vdev %llu on pool %llu",
362 	    vdev_guid, pool_guid);
363 }
364 
365 static void
zfs_retire_recv(fmd_hdl_t * hdl,fmd_event_t * ep,nvlist_t * nvl,const char * class)366 zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
367     const char *class)
368 {
369 	(void) ep;
370 	uint64_t pool_guid, vdev_guid;
371 	zpool_handle_t *zhp;
372 	nvlist_t *resource, *fault;
373 	nvlist_t **faults;
374 	uint_t f, nfaults;
375 	zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
376 	libzfs_handle_t *zhdl = zdp->zrd_hdl;
377 	boolean_t fault_device, degrade_device;
378 	boolean_t is_repair;
379 	boolean_t l2arc = B_FALSE;
380 	boolean_t spare = B_FALSE;
381 	const char *scheme;
382 	nvlist_t *vdev = NULL;
383 	const char *uuid;
384 	int repair_done = 0;
385 	boolean_t retire;
386 	boolean_t is_disk;
387 	vdev_aux_t aux;
388 	uint64_t state = 0;
389 	vdev_stat_t *vs;
390 	unsigned int c;
391 
392 	fmd_hdl_debug(hdl, "zfs_retire_recv: '%s'", class);
393 
394 	(void) nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE,
395 	    &state);
396 
397 	/*
398 	 * If this is a resource notifying us of device removal then simply
399 	 * check for an available spare and continue unless the device is a
400 	 * l2arc vdev, in which case we just offline it.
401 	 */
402 	if (strcmp(class, "resource.fs.zfs.removed") == 0 ||
403 	    (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
404 	    (state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
405 		const char *devtype;
406 		char *devname;
407 
408 		if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
409 		    &devtype) == 0) {
410 			if (strcmp(devtype, VDEV_TYPE_SPARE) == 0)
411 				spare = B_TRUE;
412 			else if (strcmp(devtype, VDEV_TYPE_L2CACHE) == 0)
413 				l2arc = B_TRUE;
414 		}
415 
416 		if (nvlist_lookup_uint64(nvl,
417 		    FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
418 			return;
419 
420 		if (vdev_guid == 0) {
421 			fmd_hdl_debug(hdl, "Got a zero GUID");
422 			return;
423 		}
424 
425 		if (spare) {
426 			int nspares = find_and_remove_spares(zhdl, vdev_guid);
427 			fmd_hdl_debug(hdl, "%d spares removed", nspares);
428 			return;
429 		}
430 
431 		if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
432 		    &pool_guid) != 0)
433 			return;
434 
435 		if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
436 		    &vdev)) == NULL)
437 			return;
438 
439 		devname = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
440 
441 		nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
442 		    (uint64_t **)&vs, &c);
443 
444 		/*
445 		 * If state removed is requested for already removed vdev,
446 		 * its a loopback event from spa_async_remove(). Just
447 		 * ignore it.
448 		 */
449 		if ((vs->vs_state == VDEV_STATE_REMOVED && state ==
450 		    VDEV_STATE_REMOVED) || vs->vs_state == VDEV_STATE_OFFLINE)
451 			return;
452 
453 		/* Remove the vdev since device is unplugged */
454 		int remove_status = 0;
455 		if (l2arc || (strcmp(class, "resource.fs.zfs.removed") == 0)) {
456 			remove_status = zpool_vdev_remove_wanted(zhp, devname);
457 			fmd_hdl_debug(hdl, "zpool_vdev_remove_wanted '%s'"
458 			    ", err:%d", devname, libzfs_errno(zhdl));
459 		}
460 
461 		/* Replace the vdev with a spare if its not a l2arc */
462 		if (!l2arc && !remove_status &&
463 		    (!fmd_prop_get_int32(hdl, "spare_on_remove") ||
464 		    replace_with_spare(hdl, zhp, vdev) == B_FALSE)) {
465 			/* Could not handle with spare */
466 			fmd_hdl_debug(hdl, "no spare for '%s'", devname);
467 		}
468 
469 		free(devname);
470 		zpool_close(zhp);
471 		return;
472 	}
473 
474 	if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
475 		return;
476 
477 	/*
478 	 * Note: on Linux statechange events are more than just
479 	 * healthy ones so we need to confirm the actual state value.
480 	 */
481 	if (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
482 	    state == VDEV_STATE_HEALTHY) {
483 		zfs_vdev_repair(hdl, nvl);
484 		return;
485 	}
486 	if (strcmp(class, "sysevent.fs.zfs.vdev_remove") == 0) {
487 		zfs_vdev_repair(hdl, nvl);
488 		return;
489 	}
490 
491 	zfs_retire_clear_data(hdl, zdp);
492 
493 	if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0)
494 		is_repair = B_TRUE;
495 	else
496 		is_repair = B_FALSE;
497 
498 	/*
499 	 * We subscribe to zfs faults as well as all repair events.
500 	 */
501 	if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
502 	    &faults, &nfaults) != 0)
503 		return;
504 
505 	for (f = 0; f < nfaults; f++) {
506 		fault = faults[f];
507 
508 		fault_device = B_FALSE;
509 		degrade_device = B_FALSE;
510 		is_disk = B_FALSE;
511 
512 		if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
513 		    &retire) == 0 && retire == 0)
514 			continue;
515 
516 		/*
517 		 * While we subscribe to fault.fs.zfs.*, we only take action
518 		 * for faults targeting a specific vdev (open failure or SERD
519 		 * failure).  We also subscribe to fault.io.* events, so that
520 		 * faulty disks will be faulted in the ZFS configuration.
521 		 */
522 		if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
523 			fault_device = B_TRUE;
524 		} else if (fmd_nvl_class_match(hdl, fault,
525 		    "fault.fs.zfs.vdev.checksum")) {
526 			degrade_device = B_TRUE;
527 		} else if (fmd_nvl_class_match(hdl, fault,
528 		    "fault.fs.zfs.vdev.slow_io")) {
529 			degrade_device = B_TRUE;
530 		} else if (fmd_nvl_class_match(hdl, fault,
531 		    "fault.fs.zfs.device")) {
532 			fault_device = B_FALSE;
533 		} else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) {
534 			is_disk = B_TRUE;
535 			fault_device = B_TRUE;
536 		} else {
537 			continue;
538 		}
539 
540 		if (is_disk) {
541 			continue;
542 		} else {
543 			/*
544 			 * This is a ZFS fault.  Lookup the resource, and
545 			 * attempt to find the matching vdev.
546 			 */
547 			if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
548 			    &resource) != 0 ||
549 			    nvlist_lookup_string(resource, FM_FMRI_SCHEME,
550 			    &scheme) != 0)
551 				continue;
552 
553 			if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0)
554 				continue;
555 
556 			if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
557 			    &pool_guid) != 0)
558 				continue;
559 
560 			if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
561 			    &vdev_guid) != 0) {
562 				if (is_repair)
563 					vdev_guid = 0;
564 				else
565 					continue;
566 			}
567 
568 			if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
569 			    &vdev)) == NULL)
570 				continue;
571 
572 			aux = VDEV_AUX_ERR_EXCEEDED;
573 		}
574 
575 		if (vdev_guid == 0) {
576 			/*
577 			 * For pool-level repair events, clear the entire pool.
578 			 */
579 			fmd_hdl_debug(hdl, "zpool_clear of pool '%s'",
580 			    zpool_get_name(zhp));
581 			(void) zpool_clear(zhp, NULL, NULL);
582 			zpool_close(zhp);
583 			continue;
584 		}
585 
586 		/*
587 		 * If this is a repair event, then mark the vdev as repaired and
588 		 * continue.
589 		 */
590 		if (is_repair) {
591 			repair_done = 1;
592 			fmd_hdl_debug(hdl, "zpool_clear of pool '%s' vdev %llu",
593 			    zpool_get_name(zhp), vdev_guid);
594 			(void) zpool_vdev_clear(zhp, vdev_guid);
595 			zpool_close(zhp);
596 			continue;
597 		}
598 
599 		/*
600 		 * Actively fault the device if needed.
601 		 */
602 		if (fault_device)
603 			(void) zpool_vdev_fault(zhp, vdev_guid, aux);
604 		if (degrade_device)
605 			(void) zpool_vdev_degrade(zhp, vdev_guid, aux);
606 
607 		if (fault_device || degrade_device)
608 			fmd_hdl_debug(hdl, "zpool_vdev_%s: vdev %llu on '%s'",
609 			    fault_device ? "fault" : "degrade", vdev_guid,
610 			    zpool_get_name(zhp));
611 
612 		/*
613 		 * Attempt to substitute a hot spare.
614 		 */
615 		(void) replace_with_spare(hdl, zhp, vdev);
616 
617 		zpool_close(zhp);
618 	}
619 
620 	if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
621 	    nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
622 		fmd_case_uuresolved(hdl, uuid);
623 }
624 
625 static const fmd_hdl_ops_t fmd_ops = {
626 	zfs_retire_recv,	/* fmdo_recv */
627 	NULL,			/* fmdo_timeout */
628 	NULL,			/* fmdo_close */
629 	NULL,			/* fmdo_stats */
630 	NULL,			/* fmdo_gc */
631 };
632 
633 static const fmd_prop_t fmd_props[] = {
634 	{ "spare_on_remove", FMD_TYPE_BOOL, "true" },
635 	{ NULL, 0, NULL }
636 };
637 
638 static const fmd_hdl_info_t fmd_info = {
639 	"ZFS Retire Agent", "1.0", &fmd_ops, fmd_props
640 };
641 
642 void
_zfs_retire_init(fmd_hdl_t * hdl)643 _zfs_retire_init(fmd_hdl_t *hdl)
644 {
645 	zfs_retire_data_t *zdp;
646 	libzfs_handle_t *zhdl;
647 
648 	if ((zhdl = libzfs_init()) == NULL)
649 		return;
650 
651 	if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
652 		libzfs_fini(zhdl);
653 		return;
654 	}
655 
656 	zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
657 	zdp->zrd_hdl = zhdl;
658 
659 	fmd_hdl_setspecific(hdl, zdp);
660 }
661 
662 void
_zfs_retire_fini(fmd_hdl_t * hdl)663 _zfs_retire_fini(fmd_hdl_t *hdl)
664 {
665 	zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
666 
667 	if (zdp != NULL) {
668 		zfs_retire_clear_data(hdl, zdp);
669 		libzfs_fini(zdp->zrd_hdl);
670 		fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t));
671 	}
672 }
673