xref: /freebsd/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c (revision e25152834cdf3b353892835a4f3b157e066a8ed4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23  *
24  * Copyright (c) 2016, Intel Corporation.
25  * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
26  */
27 
28 /*
29  * The ZFS retire agent is responsible for managing hot spares across all pools.
30  * When we see a device fault or a device removal, we try to open the associated
31  * pool and look for any hot spares.  We iterate over any available hot spares
32  * and attempt a 'zpool replace' for each one.
33  *
34  * For vdevs diagnosed as faulty, the agent is also responsible for proactively
35  * marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
36  */
37 
38 #include <sys/fs/zfs.h>
39 #include <sys/fm/protocol.h>
40 #include <sys/fm/fs/zfs.h>
41 #include <libzfs.h>
42 #include <string.h>
43 
44 #include "zfs_agents.h"
45 #include "fmd_api.h"
46 
47 
48 typedef struct zfs_retire_repaired {
49 	struct zfs_retire_repaired	*zrr_next;
50 	uint64_t			zrr_pool;
51 	uint64_t			zrr_vdev;
52 } zfs_retire_repaired_t;
53 
54 typedef struct zfs_retire_data {
55 	libzfs_handle_t			*zrd_hdl;
56 	zfs_retire_repaired_t		*zrd_repaired;
57 } zfs_retire_data_t;
58 
59 static void
60 zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
61 {
62 	zfs_retire_repaired_t *zrp;
63 
64 	while ((zrp = zdp->zrd_repaired) != NULL) {
65 		zdp->zrd_repaired = zrp->zrr_next;
66 		fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
67 	}
68 }
69 
70 /*
71  * Find a pool with a matching GUID.
72  */
73 typedef struct find_cbdata {
74 	uint64_t	cb_guid;
75 	zpool_handle_t	*cb_zhp;
76 	nvlist_t	*cb_vdev;
77 } find_cbdata_t;
78 
79 static int
80 find_pool(zpool_handle_t *zhp, void *data)
81 {
82 	find_cbdata_t *cbp = data;
83 
84 	if (cbp->cb_guid ==
85 	    zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) {
86 		cbp->cb_zhp = zhp;
87 		return (1);
88 	}
89 
90 	zpool_close(zhp);
91 	return (0);
92 }
93 
94 /*
95  * Find a vdev within a tree with a matching GUID.
96  */
97 static nvlist_t *
98 find_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, uint64_t search_guid)
99 {
100 	uint64_t guid;
101 	nvlist_t **child;
102 	uint_t c, children;
103 	nvlist_t *ret;
104 
105 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
106 	    guid == search_guid) {
107 		fmd_hdl_debug(fmd_module_hdl("zfs-retire"),
108 		    "matched vdev %llu", guid);
109 		return (nv);
110 	}
111 
112 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
113 	    &child, &children) != 0)
114 		return (NULL);
115 
116 	for (c = 0; c < children; c++) {
117 		if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
118 			return (ret);
119 	}
120 
121 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
122 	    &child, &children) != 0)
123 		return (NULL);
124 
125 	for (c = 0; c < children; c++) {
126 		if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
127 			return (ret);
128 	}
129 
130 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
131 	    &child, &children) != 0)
132 		return (NULL);
133 
134 	for (c = 0; c < children; c++) {
135 		if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
136 			return (ret);
137 	}
138 
139 	return (NULL);
140 }
141 
142 /*
143  * Given a (pool, vdev) GUID pair, find the matching pool and vdev.
144  */
145 static zpool_handle_t *
146 find_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid,
147     nvlist_t **vdevp)
148 {
149 	find_cbdata_t cb;
150 	zpool_handle_t *zhp;
151 	nvlist_t *config, *nvroot;
152 
153 	/*
154 	 * Find the corresponding pool and make sure the vdev still exists.
155 	 */
156 	cb.cb_guid = pool_guid;
157 	if (zpool_iter(zhdl, find_pool, &cb) != 1)
158 		return (NULL);
159 
160 	zhp = cb.cb_zhp;
161 	config = zpool_get_config(zhp, NULL);
162 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
163 	    &nvroot) != 0) {
164 		zpool_close(zhp);
165 		return (NULL);
166 	}
167 
168 	if (vdev_guid != 0) {
169 		if ((*vdevp = find_vdev(zhdl, nvroot, vdev_guid)) == NULL) {
170 			zpool_close(zhp);
171 			return (NULL);
172 		}
173 	}
174 
175 	return (zhp);
176 }
177 
178 /*
179  * Given a vdev, attempt to replace it with every known spare until one
180  * succeeds or we run out of devices to try.
181  * Return whether we were successful or not in replacing the device.
182  */
183 static boolean_t
184 replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
185 {
186 	nvlist_t *config, *nvroot, *replacement;
187 	nvlist_t **spares;
188 	uint_t s, nspares;
189 	char *dev_name;
190 	zprop_source_t source;
191 	int ashift;
192 
193 	config = zpool_get_config(zhp, NULL);
194 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
195 	    &nvroot) != 0)
196 		return (B_FALSE);
197 
198 	/*
199 	 * Find out if there are any hot spares available in the pool.
200 	 */
201 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
202 	    &spares, &nspares) != 0)
203 		return (B_FALSE);
204 
205 	/*
206 	 * lookup "ashift" pool property, we may need it for the replacement
207 	 */
208 	ashift = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &source);
209 
210 	replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
211 
212 	(void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
213 	    VDEV_TYPE_ROOT);
214 
215 	dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
216 
217 	/*
218 	 * Try to replace each spare, ending when we successfully
219 	 * replace it.
220 	 */
221 	for (s = 0; s < nspares; s++) {
222 		char *spare_name;
223 
224 		if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
225 		    &spare_name) != 0)
226 			continue;
227 
228 		/* if set, add the "ashift" pool property to the spare nvlist */
229 		if (source != ZPROP_SRC_DEFAULT)
230 			(void) nvlist_add_uint64(spares[s],
231 			    ZPOOL_CONFIG_ASHIFT, ashift);
232 
233 		(void) nvlist_add_nvlist_array(replacement,
234 		    ZPOOL_CONFIG_CHILDREN, &spares[s], 1);
235 
236 		fmd_hdl_debug(hdl, "zpool_vdev_replace '%s' with spare '%s'",
237 		    dev_name, basename(spare_name));
238 
239 		if (zpool_vdev_attach(zhp, dev_name, spare_name,
240 		    replacement, B_TRUE, B_FALSE) == 0) {
241 			free(dev_name);
242 			nvlist_free(replacement);
243 			return (B_TRUE);
244 		}
245 	}
246 
247 	free(dev_name);
248 	nvlist_free(replacement);
249 
250 	return (B_FALSE);
251 }
252 
253 /*
254  * Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
255  * ASRU is now usable.  ZFS has found the device to be present and
256  * functioning.
257  */
258 /*ARGSUSED*/
259 static void
260 zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl)
261 {
262 	zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
263 	zfs_retire_repaired_t *zrp;
264 	uint64_t pool_guid, vdev_guid;
265 	if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
266 	    &pool_guid) != 0 || nvlist_lookup_uint64(nvl,
267 	    FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
268 		return;
269 
270 	/*
271 	 * Before checking the state of the ASRU, go through and see if we've
272 	 * already made an attempt to repair this ASRU.  This list is cleared
273 	 * whenever we receive any kind of list event, and is designed to
274 	 * prevent us from generating a feedback loop when we attempt repairs
275 	 * against a faulted pool.  The problem is that checking the unusable
276 	 * state of the ASRU can involve opening the pool, which can post
277 	 * statechange events but otherwise leave the pool in the faulted
278 	 * state.  This list allows us to detect when a statechange event is
279 	 * due to our own request.
280 	 */
281 	for (zrp = zdp->zrd_repaired; zrp != NULL; zrp = zrp->zrr_next) {
282 		if (zrp->zrr_pool == pool_guid &&
283 		    zrp->zrr_vdev == vdev_guid)
284 			return;
285 	}
286 
287 	zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
288 	zrp->zrr_next = zdp->zrd_repaired;
289 	zrp->zrr_pool = pool_guid;
290 	zrp->zrr_vdev = vdev_guid;
291 	zdp->zrd_repaired = zrp;
292 
293 	fmd_hdl_debug(hdl, "marking repaired vdev %llu on pool %llu",
294 	    vdev_guid, pool_guid);
295 }
296 
297 /*ARGSUSED*/
298 static void
299 zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
300     const char *class)
301 {
302 	uint64_t pool_guid, vdev_guid;
303 	zpool_handle_t *zhp;
304 	nvlist_t *resource, *fault;
305 	nvlist_t **faults;
306 	uint_t f, nfaults;
307 	zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
308 	libzfs_handle_t *zhdl = zdp->zrd_hdl;
309 	boolean_t fault_device, degrade_device;
310 	boolean_t is_repair;
311 	char *scheme;
312 	nvlist_t *vdev = NULL;
313 	char *uuid;
314 	int repair_done = 0;
315 	boolean_t retire;
316 	boolean_t is_disk;
317 	vdev_aux_t aux;
318 	uint64_t state = 0;
319 
320 	fmd_hdl_debug(hdl, "zfs_retire_recv: '%s'", class);
321 
322 	nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, &state);
323 
324 	/*
325 	 * If this is a resource notifying us of device removal then simply
326 	 * check for an available spare and continue unless the device is a
327 	 * l2arc vdev, in which case we just offline it.
328 	 */
329 	if (strcmp(class, "resource.fs.zfs.removed") == 0 ||
330 	    (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
331 	    state == VDEV_STATE_REMOVED)) {
332 		char *devtype;
333 		char *devname;
334 
335 		if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
336 		    &pool_guid) != 0 ||
337 		    nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
338 		    &vdev_guid) != 0)
339 			return;
340 
341 		if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
342 		    &vdev)) == NULL)
343 			return;
344 
345 		devname = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
346 
347 		/* Can't replace l2arc with a spare: offline the device */
348 		if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
349 		    &devtype) == 0 && strcmp(devtype, VDEV_TYPE_L2CACHE) == 0) {
350 			fmd_hdl_debug(hdl, "zpool_vdev_offline '%s'", devname);
351 			zpool_vdev_offline(zhp, devname, B_TRUE);
352 		} else if (!fmd_prop_get_int32(hdl, "spare_on_remove") ||
353 		    replace_with_spare(hdl, zhp, vdev) == B_FALSE) {
354 			/* Could not handle with spare */
355 			fmd_hdl_debug(hdl, "no spare for '%s'", devname);
356 		}
357 
358 		free(devname);
359 		zpool_close(zhp);
360 		return;
361 	}
362 
363 	if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
364 		return;
365 
366 	/*
367 	 * Note: on zfsonlinux statechange events are more than just
368 	 * healthy ones so we need to confirm the actual state value.
369 	 */
370 	if (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
371 	    state == VDEV_STATE_HEALTHY) {
372 		zfs_vdev_repair(hdl, nvl);
373 		return;
374 	}
375 	if (strcmp(class, "sysevent.fs.zfs.vdev_remove") == 0) {
376 		zfs_vdev_repair(hdl, nvl);
377 		return;
378 	}
379 
380 	zfs_retire_clear_data(hdl, zdp);
381 
382 	if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0)
383 		is_repair = B_TRUE;
384 	else
385 		is_repair = B_FALSE;
386 
387 	/*
388 	 * We subscribe to zfs faults as well as all repair events.
389 	 */
390 	if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
391 	    &faults, &nfaults) != 0)
392 		return;
393 
394 	for (f = 0; f < nfaults; f++) {
395 		fault = faults[f];
396 
397 		fault_device = B_FALSE;
398 		degrade_device = B_FALSE;
399 		is_disk = B_FALSE;
400 
401 		if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
402 		    &retire) == 0 && retire == 0)
403 			continue;
404 
405 		/*
406 		 * While we subscribe to fault.fs.zfs.*, we only take action
407 		 * for faults targeting a specific vdev (open failure or SERD
408 		 * failure).  We also subscribe to fault.io.* events, so that
409 		 * faulty disks will be faulted in the ZFS configuration.
410 		 */
411 		if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
412 			fault_device = B_TRUE;
413 		} else if (fmd_nvl_class_match(hdl, fault,
414 		    "fault.fs.zfs.vdev.checksum")) {
415 			degrade_device = B_TRUE;
416 		} else if (fmd_nvl_class_match(hdl, fault,
417 		    "fault.fs.zfs.device")) {
418 			fault_device = B_FALSE;
419 		} else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) {
420 			is_disk = B_TRUE;
421 			fault_device = B_TRUE;
422 		} else {
423 			continue;
424 		}
425 
426 		if (is_disk) {
427 			continue;
428 		} else {
429 			/*
430 			 * This is a ZFS fault.  Lookup the resource, and
431 			 * attempt to find the matching vdev.
432 			 */
433 			if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
434 			    &resource) != 0 ||
435 			    nvlist_lookup_string(resource, FM_FMRI_SCHEME,
436 			    &scheme) != 0)
437 				continue;
438 
439 			if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0)
440 				continue;
441 
442 			if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
443 			    &pool_guid) != 0)
444 				continue;
445 
446 			if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
447 			    &vdev_guid) != 0) {
448 				if (is_repair)
449 					vdev_guid = 0;
450 				else
451 					continue;
452 			}
453 
454 			if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
455 			    &vdev)) == NULL)
456 				continue;
457 
458 			aux = VDEV_AUX_ERR_EXCEEDED;
459 		}
460 
461 		if (vdev_guid == 0) {
462 			/*
463 			 * For pool-level repair events, clear the entire pool.
464 			 */
465 			fmd_hdl_debug(hdl, "zpool_clear of pool '%s'",
466 			    zpool_get_name(zhp));
467 			(void) zpool_clear(zhp, NULL, NULL);
468 			zpool_close(zhp);
469 			continue;
470 		}
471 
472 		/*
473 		 * If this is a repair event, then mark the vdev as repaired and
474 		 * continue.
475 		 */
476 		if (is_repair) {
477 			repair_done = 1;
478 			fmd_hdl_debug(hdl, "zpool_clear of pool '%s' vdev %llu",
479 			    zpool_get_name(zhp), vdev_guid);
480 			(void) zpool_vdev_clear(zhp, vdev_guid);
481 			zpool_close(zhp);
482 			continue;
483 		}
484 
485 		/*
486 		 * Actively fault the device if needed.
487 		 */
488 		if (fault_device)
489 			(void) zpool_vdev_fault(zhp, vdev_guid, aux);
490 		if (degrade_device)
491 			(void) zpool_vdev_degrade(zhp, vdev_guid, aux);
492 
493 		if (fault_device || degrade_device)
494 			fmd_hdl_debug(hdl, "zpool_vdev_%s: vdev %llu on '%s'",
495 			    fault_device ? "fault" : "degrade", vdev_guid,
496 			    zpool_get_name(zhp));
497 
498 		/*
499 		 * Attempt to substitute a hot spare.
500 		 */
501 		(void) replace_with_spare(hdl, zhp, vdev);
502 		zpool_close(zhp);
503 	}
504 
505 	if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
506 	    nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
507 		fmd_case_uuresolved(hdl, uuid);
508 }
509 
510 static const fmd_hdl_ops_t fmd_ops = {
511 	zfs_retire_recv,	/* fmdo_recv */
512 	NULL,			/* fmdo_timeout */
513 	NULL,			/* fmdo_close */
514 	NULL,			/* fmdo_stats */
515 	NULL,			/* fmdo_gc */
516 };
517 
518 static const fmd_prop_t fmd_props[] = {
519 	{ "spare_on_remove", FMD_TYPE_BOOL, "true" },
520 	{ NULL, 0, NULL }
521 };
522 
523 static const fmd_hdl_info_t fmd_info = {
524 	"ZFS Retire Agent", "1.0", &fmd_ops, fmd_props
525 };
526 
527 void
528 _zfs_retire_init(fmd_hdl_t *hdl)
529 {
530 	zfs_retire_data_t *zdp;
531 	libzfs_handle_t *zhdl;
532 
533 	if ((zhdl = libzfs_init()) == NULL)
534 		return;
535 
536 	if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
537 		libzfs_fini(zhdl);
538 		return;
539 	}
540 
541 	zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
542 	zdp->zrd_hdl = zhdl;
543 
544 	fmd_hdl_setspecific(hdl, zdp);
545 }
546 
547 void
548 _zfs_retire_fini(fmd_hdl_t *hdl)
549 {
550 	zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
551 
552 	if (zdp != NULL) {
553 		zfs_retire_clear_data(hdl, zdp);
554 		libzfs_fini(zdp->zrd_hdl);
555 		fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t));
556 	}
557 }
558