xref: /freebsd/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c (revision c7a063741720ef81d4caa4613242579d12f1d605)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2016, 2017, Intel Corporation.
26  * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
27  */
28 
29 /*
30  * ZFS syseventd module.
31  *
32  * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
33  *
34  * The purpose of this module is to identify when devices are added to the
35  * system, and appropriately online or replace the affected vdevs.
36  *
37  * When a device is added to the system:
38  *
39  * 	1. Search for any vdevs whose devid matches that of the newly added
40  *	   device.
41  *
42  * 	2. If no vdevs are found, then search for any vdevs whose udev path
43  *	   matches that of the new device.
44  *
45  *	3. If no vdevs match by either method, then ignore the event.
46  *
47  * 	4. Attempt to online the device with a flag to indicate that it should
48  *	   be unspared when resilvering completes.  If this succeeds, then the
49  *	   same device was inserted and we should continue normally.
50  *
51  *	5. If the pool does not have the 'autoreplace' property set, attempt to
52  *	   online the device again without the unspare flag, which will
53  *	   generate a FMA fault.
54  *
55  *	6. If the pool has the 'autoreplace' property set, and the matching vdev
56  *	   is a whole disk, then label the new disk and attempt a 'zpool
57  *	   replace'.
58  *
59  * The module responds to EC_DEV_ADD events.  The special ESC_ZFS_VDEV_CHECK
60  * event indicates that a device failed to open during pool load, but the
61  * autoreplace property was set.  In this case, we deferred the associated
62  * FMA fault until our module had a chance to process the autoreplace logic.
63  * If the device could not be replaced, then the second online attempt will
64  * trigger the FMA fault that we skipped earlier.
65  *
66  * On Linux udev provides a disk insert for both the disk and the partition.
67  */
68 
69 #include <ctype.h>
70 #include <fcntl.h>
71 #include <libnvpair.h>
72 #include <libzfs.h>
73 #include <libzutil.h>
74 #include <limits.h>
75 #include <stddef.h>
76 #include <stdlib.h>
77 #include <string.h>
78 #include <syslog.h>
79 #include <sys/list.h>
80 #include <sys/sunddi.h>
81 #include <sys/sysevent/eventdefs.h>
82 #include <sys/sysevent/dev.h>
83 #include <thread_pool.h>
84 #include <pthread.h>
85 #include <unistd.h>
86 #include <errno.h>
87 #include "zfs_agents.h"
88 #include "../zed_log.h"
89 
90 #define	DEV_BYID_PATH	"/dev/disk/by-id/"
91 #define	DEV_BYPATH_PATH	"/dev/disk/by-path/"
92 #define	DEV_BYVDEV_PATH	"/dev/disk/by-vdev/"
93 
94 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
95 
96 libzfs_handle_t *g_zfshdl;
97 list_t g_pool_list;	/* list of unavailable pools at initialization */
98 list_t g_device_list;	/* list of disks with asynchronous label request */
99 tpool_t *g_tpool;
100 boolean_t g_enumeration_done;
101 pthread_t g_zfs_tid;	/* zfs_enum_pools() thread */
102 
103 typedef struct unavailpool {
104 	zpool_handle_t	*uap_zhp;
105 	list_node_t	uap_node;
106 } unavailpool_t;
107 
108 typedef struct pendingdev {
109 	char		pd_physpath[128];
110 	list_node_t	pd_node;
111 } pendingdev_t;
112 
113 static int
114 zfs_toplevel_state(zpool_handle_t *zhp)
115 {
116 	nvlist_t *nvroot;
117 	vdev_stat_t *vs;
118 	unsigned int c;
119 
120 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
121 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
122 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
123 	    (uint64_t **)&vs, &c) == 0);
124 	return (vs->vs_state);
125 }
126 
127 static int
128 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
129 {
130 	zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
131 	    zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
132 
133 	if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
134 		unavailpool_t *uap;
135 		uap = malloc(sizeof (unavailpool_t));
136 		if (uap == NULL) {
137 			perror("malloc");
138 			exit(EXIT_FAILURE);
139 		}
140 
141 		uap->uap_zhp = zhp;
142 		list_insert_tail((list_t *)data, uap);
143 	} else {
144 		zpool_close(zhp);
145 	}
146 	return (0);
147 }
148 
149 /*
150  * Two stage replace on Linux
151  * since we get disk notifications
152  * we can wait for partitioned disk slice to show up!
153  *
154  * First stage tags the disk, initiates async partitioning, and returns
155  * Second stage finds the tag and proceeds to ZFS labeling/replace
156  *
157  * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
158  *
159  * 1. physical match with no fs, no partition
160  *	tag it top, partition disk
161  *
162  * 2. physical match again, see partition and tag
163  *
164  */
165 
166 /*
167  * The device associated with the given vdev (either by devid or physical path)
168  * has been added to the system.  If 'isdisk' is set, then we only attempt a
169  * replacement if it's a whole disk.  This also implies that we should label the
170  * disk first.
171  *
172  * First, we attempt to online the device (making sure to undo any spare
173  * operation when finished).  If this succeeds, then we're done.  If it fails,
174  * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
175  * but that the label was not what we expected.  If the 'autoreplace' property
176  * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
177  * replace'.  If the online is successful, but the new state is something else
178  * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
179  * race, and we should avoid attempting to relabel the disk.
180  *
181  * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
182  */
183 static void
184 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
185 {
186 	char *path;
187 	vdev_state_t newstate;
188 	nvlist_t *nvroot, *newvd;
189 	pendingdev_t *device;
190 	uint64_t wholedisk = 0ULL;
191 	uint64_t offline = 0ULL, faulted = 0ULL;
192 	uint64_t guid = 0ULL;
193 	char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
194 	char rawpath[PATH_MAX], fullpath[PATH_MAX];
195 	char devpath[PATH_MAX];
196 	int ret;
197 	boolean_t is_sd = B_FALSE;
198 	boolean_t is_mpath_wholedisk = B_FALSE;
199 	uint_t c;
200 	vdev_stat_t *vs;
201 
202 	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
203 		return;
204 
205 	/* Skip healthy disks */
206 	verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
207 	    (uint64_t **)&vs, &c) == 0);
208 	if (vs->vs_state == VDEV_STATE_HEALTHY) {
209 		zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
210 		    __func__, path);
211 		return;
212 	}
213 
214 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
215 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
216 	    &enc_sysfs_path);
217 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
218 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
219 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);
220 
221 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
222 
223 	/*
224 	 * Special case:
225 	 *
226 	 * We've seen times where a disk won't have a ZPOOL_CONFIG_PHYS_PATH
227 	 * entry in their config. For example, on this force-faulted disk:
228 	 *
229 	 *	children[0]:
230 	 *	   type: 'disk'
231 	 *	   id: 0
232 	 *	   guid: 14309659774640089719
233 	 *        path: '/dev/disk/by-vdev/L28'
234 	 *        whole_disk: 0
235 	 *        DTL: 654
236 	 *        create_txg: 4
237 	 *        com.delphix:vdev_zap_leaf: 1161
238 	 *        faulted: 1
239 	 *        aux_state: 'external'
240 	 *	children[1]:
241 	 *        type: 'disk'
242 	 *        id: 1
243 	 *        guid: 16002508084177980912
244 	 *        path: '/dev/disk/by-vdev/L29'
245 	 *        devid: 'dm-uuid-mpath-35000c500a61d68a3'
246 	 *        phys_path: 'L29'
247 	 *        vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
248 	 *        whole_disk: 0
249 	 *        DTL: 1028
250 	 *        create_txg: 4
251 	 *        com.delphix:vdev_zap_leaf: 131
252 	 *
253 	 * If the disk's path is a /dev/disk/by-vdev/ path, then we can infer
254 	 * the ZPOOL_CONFIG_PHYS_PATH from the by-vdev disk name.
255 	 */
256 	if (physpath == NULL && path != NULL) {
257 		/* If path begins with "/dev/disk/by-vdev/" ... */
258 		if (strncmp(path, DEV_BYVDEV_PATH,
259 		    strlen(DEV_BYVDEV_PATH)) == 0) {
260 			/* Set physpath to the char after "/dev/disk/by-vdev" */
261 			physpath = &path[strlen(DEV_BYVDEV_PATH)];
262 		}
263 	}
264 
265 	/*
266 	 * We don't want to autoreplace offlined disks.  However, we do want to
267 	 * replace force-faulted disks (`zpool offline -f`).  Force-faulted
268 	 * disks have both offline=1 and faulted=1 in the nvlist.
269 	 */
270 	if (offline && !faulted) {
271 		zed_log_msg(LOG_INFO, "%s: %s is offline, skip autoreplace",
272 		    __func__, path);
273 		return;
274 	}
275 
276 	is_mpath_wholedisk = is_mpath_whole_disk(path);
277 	zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
278 	    " %s blank disk, %s mpath blank disk, %s labeled, enc sysfs '%s', "
279 	    "(guid %llu)",
280 	    zpool_get_name(zhp), path,
281 	    physpath ? physpath : "NULL",
282 	    wholedisk ? "is" : "not",
283 	    is_mpath_wholedisk? "is" : "not",
284 	    labeled ? "is" : "not",
285 	    enc_sysfs_path,
286 	    (long long unsigned int)guid);
287 
288 	/*
289 	 * The VDEV guid is preferred for identification (gets passed in path)
290 	 */
291 	if (guid != 0) {
292 		(void) snprintf(fullpath, sizeof (fullpath), "%llu",
293 		    (long long unsigned int)guid);
294 	} else {
295 		/*
296 		 * otherwise use path sans partition suffix for whole disks
297 		 */
298 		(void) strlcpy(fullpath, path, sizeof (fullpath));
299 		if (wholedisk) {
300 			char *spath = zfs_strip_partition(fullpath);
301 			if (!spath) {
302 				zed_log_msg(LOG_INFO, "%s: Can't alloc",
303 				    __func__);
304 				return;
305 			}
306 
307 			(void) strlcpy(fullpath, spath, sizeof (fullpath));
308 			free(spath);
309 		}
310 	}
311 
312 	/*
313 	 * Attempt to online the device.
314 	 */
315 	if (zpool_vdev_online(zhp, fullpath,
316 	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
317 	    (newstate == VDEV_STATE_HEALTHY ||
318 	    newstate == VDEV_STATE_DEGRADED)) {
319 		zed_log_msg(LOG_INFO,
320 		    "  zpool_vdev_online: vdev '%s' ('%s') is "
321 		    "%s", fullpath, physpath, (newstate == VDEV_STATE_HEALTHY) ?
322 		    "HEALTHY" : "DEGRADED");
323 		return;
324 	}
325 
326 	/*
327 	 * vdev_id alias rule for using scsi_debug devices (FMA automated
328 	 * testing)
329 	 */
330 	if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
331 		is_sd = B_TRUE;
332 
333 	/*
334 	 * If the pool doesn't have the autoreplace property set, then use
335 	 * vdev online to trigger a FMA fault by posting an ereport.
336 	 */
337 	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
338 	    !(wholedisk || is_mpath_wholedisk) || (physpath == NULL)) {
339 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
340 		    &newstate);
341 		zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
342 		    "not a blank disk for '%s' ('%s')", fullpath,
343 		    physpath);
344 		return;
345 	}
346 
347 	/*
348 	 * Convert physical path into its current device node.  Rawpath
349 	 * needs to be /dev/disk/by-vdev for a scsi_debug device since
350 	 * /dev/disk/by-path will not be present.
351 	 */
352 	(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
353 	    is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
354 
355 	if (realpath(rawpath, devpath) == NULL && !is_mpath_wholedisk) {
356 		zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
357 		    rawpath, strerror(errno));
358 
359 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
360 		    &newstate);
361 
362 		zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
363 		    fullpath, libzfs_error_description(g_zfshdl));
364 		return;
365 	}
366 
367 	/* Only autoreplace bad disks */
368 	if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
369 	    (vs->vs_state != VDEV_STATE_FAULTED) &&
370 	    (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
371 		zed_log_msg(LOG_INFO, "  not autoreplacing since disk isn't in "
372 		    "a bad state (currently %llu)", vs->vs_state);
373 		return;
374 	}
375 
376 	nvlist_lookup_string(vdev, "new_devid", &new_devid);
377 
378 	if (is_mpath_wholedisk) {
379 		/* Don't label device mapper or multipath disks. */
380 	} else if (!labeled) {
381 		/*
382 		 * we're auto-replacing a raw disk, so label it first
383 		 */
384 		char *leafname;
385 
386 		/*
387 		 * If this is a request to label a whole disk, then attempt to
388 		 * write out the label.  Before we can label the disk, we need
389 		 * to map the physical string that was matched on to the under
390 		 * lying device node.
391 		 *
392 		 * If any part of this process fails, then do a force online
393 		 * to trigger a ZFS fault for the device (and any hot spare
394 		 * replacement).
395 		 */
396 		leafname = strrchr(devpath, '/') + 1;
397 
398 		/*
399 		 * If this is a request to label a whole disk, then attempt to
400 		 * write out the label.
401 		 */
402 		if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
403 			zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
404 			    "label '%s' (%s)", leafname,
405 			    libzfs_error_description(g_zfshdl));
406 
407 			(void) zpool_vdev_online(zhp, fullpath,
408 			    ZFS_ONLINE_FORCEFAULT, &newstate);
409 			return;
410 		}
411 
412 		/*
413 		 * The disk labeling is asynchronous on Linux. Just record
414 		 * this label request and return as there will be another
415 		 * disk add event for the partition after the labeling is
416 		 * completed.
417 		 */
418 		device = malloc(sizeof (pendingdev_t));
419 		if (device == NULL) {
420 			perror("malloc");
421 			exit(EXIT_FAILURE);
422 		}
423 
424 		(void) strlcpy(device->pd_physpath, physpath,
425 		    sizeof (device->pd_physpath));
426 		list_insert_tail(&g_device_list, device);
427 
428 		zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
429 		    leafname, (u_longlong_t)guid);
430 
431 		return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */
432 
433 	} else /* labeled */ {
434 		boolean_t found = B_FALSE;
435 		/*
436 		 * match up with request above to label the disk
437 		 */
438 		for (device = list_head(&g_device_list); device != NULL;
439 		    device = list_next(&g_device_list, device)) {
440 			if (strcmp(physpath, device->pd_physpath) == 0) {
441 				list_remove(&g_device_list, device);
442 				free(device);
443 				found = B_TRUE;
444 				break;
445 			}
446 			zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
447 			    physpath, device->pd_physpath);
448 		}
449 		if (!found) {
450 			/* unexpected partition slice encountered */
451 			zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
452 			    fullpath);
453 			(void) zpool_vdev_online(zhp, fullpath,
454 			    ZFS_ONLINE_FORCEFAULT, &newstate);
455 			return;
456 		}
457 
458 		zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
459 		    physpath, (u_longlong_t)guid);
460 
461 		(void) snprintf(devpath, sizeof (devpath), "%s%s",
462 		    DEV_BYID_PATH, new_devid);
463 	}
464 
465 	/*
466 	 * Construct the root vdev to pass to zpool_vdev_attach().  While adding
467 	 * the entire vdev structure is harmless, we construct a reduced set of
468 	 * path/physpath/wholedisk to keep it simple.
469 	 */
470 	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
471 		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
472 		return;
473 	}
474 	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
475 		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
476 		nvlist_free(nvroot);
477 		return;
478 	}
479 
480 	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
481 	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
482 	    nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
483 	    (physpath != NULL && nvlist_add_string(newvd,
484 	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
485 	    (enc_sysfs_path != NULL && nvlist_add_string(newvd,
486 	    ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
487 	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
488 	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
489 	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
490 	    (const nvlist_t **)&newvd, 1) != 0) {
491 		zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
492 		nvlist_free(newvd);
493 		nvlist_free(nvroot);
494 		return;
495 	}
496 
497 	nvlist_free(newvd);
498 
499 	/*
500 	 * Wait for udev to verify the links exist, then auto-replace
501 	 * the leaf disk at same physical location.
502 	 */
503 	if (zpool_label_disk_wait(path, 3000) != 0) {
504 		zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
505 		    "disk %s is missing", path);
506 		nvlist_free(nvroot);
507 		return;
508 	}
509 
510 	/*
511 	 * Prefer sequential resilvering when supported (mirrors and dRAID),
512 	 * otherwise fallback to a traditional healing resilver.
513 	 */
514 	ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);
515 	if (ret != 0) {
516 		ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,
517 		    B_TRUE, B_FALSE);
518 	}
519 
520 	zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
521 	    fullpath, path, (ret == 0) ? "no errors" :
522 	    libzfs_error_description(g_zfshdl));
523 
524 	nvlist_free(nvroot);
525 }
526 
527 /*
528  * Utility functions to find a vdev matching given criteria.
529  */
530 typedef struct dev_data {
531 	const char		*dd_compare;
532 	const char		*dd_prop;
533 	zfs_process_func_t	dd_func;
534 	boolean_t		dd_found;
535 	boolean_t		dd_islabeled;
536 	uint64_t		dd_pool_guid;
537 	uint64_t		dd_vdev_guid;
538 	uint64_t		dd_new_vdev_guid;
539 	const char		*dd_new_devid;
540 } dev_data_t;
541 
542 static void
543 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
544 {
545 	dev_data_t *dp = data;
546 	char *path = NULL;
547 	uint_t c, children;
548 	nvlist_t **child;
549 	uint64_t guid = 0;
550 
551 	/*
552 	 * First iterate over any children.
553 	 */
554 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
555 	    &child, &children) == 0) {
556 		for (c = 0; c < children; c++)
557 			zfs_iter_vdev(zhp, child[c], data);
558 	}
559 
560 	/*
561 	 * Iterate over any spares and cache devices
562 	 */
563 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
564 	    &child, &children) == 0) {
565 		for (c = 0; c < children; c++)
566 			zfs_iter_vdev(zhp, child[c], data);
567 	}
568 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
569 	    &child, &children) == 0) {
570 		for (c = 0; c < children; c++)
571 			zfs_iter_vdev(zhp, child[c], data);
572 	}
573 
574 	/* once a vdev was matched and processed there is nothing left to do */
575 	if (dp->dd_found)
576 		return;
577 	(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &guid);
578 
579 	/*
580 	 * Match by GUID if available otherwise fallback to devid or physical
581 	 */
582 	if (dp->dd_vdev_guid != 0) {
583 		if (guid != dp->dd_vdev_guid)
584 			return;
585 		zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched on %llu", guid);
586 		dp->dd_found = B_TRUE;
587 
588 	} else if (dp->dd_compare != NULL) {
589 		/*
590 		 * NOTE: On Linux there is an event for partition, so unlike
591 		 * illumos, substring matching is not required to accommodate
592 		 * the partition suffix. An exact match will be present in
593 		 * the dp->dd_compare value.
594 		 * If the attached disk already contains a vdev GUID, it means
595 		 * the disk is not clean. In such a scenario, the physical path
596 		 * would be a match that makes the disk faulted when trying to
597 		 * online it. So, we would only want to proceed if either GUID
598 		 * matches with the last attached disk or the disk is in clean
599 		 * state.
600 		 */
601 		if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
602 		    strcmp(dp->dd_compare, path) != 0) {
603 			zed_log_msg(LOG_INFO, "  %s: no match (%s != vdev %s)",
604 			    __func__, dp->dd_compare, path);
605 			return;
606 		}
607 		if (dp->dd_new_vdev_guid != 0 && dp->dd_new_vdev_guid != guid) {
608 			zed_log_msg(LOG_INFO, "  %s: no match (GUID:%llu"
609 			    " != vdev GUID:%llu)", __func__,
610 			    dp->dd_new_vdev_guid, guid);
611 			return;
612 		}
613 
614 		zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched %s on %s",
615 		    dp->dd_prop, path);
616 		dp->dd_found = B_TRUE;
617 
618 		/* pass the new devid for use by replacing code */
619 		if (dp->dd_new_devid != NULL) {
620 			(void) nvlist_add_string(nvl, "new_devid",
621 			    dp->dd_new_devid);
622 		}
623 	}
624 
625 	(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
626 }
627 
628 static void
629 zfs_enable_ds(void *arg)
630 {
631 	unavailpool_t *pool = (unavailpool_t *)arg;
632 
633 	(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
634 	zpool_close(pool->uap_zhp);
635 	free(pool);
636 }
637 
638 static int
639 zfs_iter_pool(zpool_handle_t *zhp, void *data)
640 {
641 	nvlist_t *config, *nvl;
642 	dev_data_t *dp = data;
643 	uint64_t pool_guid;
644 	unavailpool_t *pool;
645 
646 	zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
647 	    zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
648 
649 	/*
650 	 * For each vdev in this pool, look for a match to apply dd_func
651 	 */
652 	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
653 		if (dp->dd_pool_guid == 0 ||
654 		    (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
655 		    &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
656 			(void) nvlist_lookup_nvlist(config,
657 			    ZPOOL_CONFIG_VDEV_TREE, &nvl);
658 			zfs_iter_vdev(zhp, nvl, data);
659 		}
660 	} else {
661 		zed_log_msg(LOG_INFO, "%s: no config\n", __func__);
662 	}
663 
664 	/*
665 	 * if this pool was originally unavailable,
666 	 * then enable its datasets asynchronously
667 	 */
668 	if (g_enumeration_done)  {
669 		for (pool = list_head(&g_pool_list); pool != NULL;
670 		    pool = list_next(&g_pool_list, pool)) {
671 
672 			if (strcmp(zpool_get_name(zhp),
673 			    zpool_get_name(pool->uap_zhp)))
674 				continue;
675 			if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
676 				list_remove(&g_pool_list, pool);
677 				(void) tpool_dispatch(g_tpool, zfs_enable_ds,
678 				    pool);
679 				break;
680 			}
681 		}
682 	}
683 
684 	zpool_close(zhp);
685 	return (dp->dd_found);	/* cease iteration after a match */
686 }
687 
688 /*
689  * Given a physical device location, iterate over all
690  * (pool, vdev) pairs which correspond to that location.
691  */
692 static boolean_t
693 devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
694     boolean_t is_slice, uint64_t new_vdev_guid)
695 {
696 	dev_data_t data = { 0 };
697 
698 	data.dd_compare = physical;
699 	data.dd_func = func;
700 	data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
701 	data.dd_found = B_FALSE;
702 	data.dd_islabeled = is_slice;
703 	data.dd_new_devid = devid;	/* used by auto replace code */
704 	data.dd_new_vdev_guid = new_vdev_guid;
705 
706 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
707 
708 	return (data.dd_found);
709 }
710 
711 /*
712  * Given a device identifier, find any vdevs with a matching by-vdev
713  * path.  Normally we shouldn't need this as the comparison would be
714  * made earlier in the devphys_iter().  For example, if we were replacing
715  * /dev/disk/by-vdev/L28, normally devphys_iter() would match the
716  * ZPOOL_CONFIG_PHYS_PATH of "L28" from the old disk config to "L28"
717  * of the new disk config.  However, we've seen cases where
718  * ZPOOL_CONFIG_PHYS_PATH was not in the config for the old disk.  Here's
719  * an example of a real 2-disk mirror pool where one disk was force
720  * faulted:
721  *
722  *       com.delphix:vdev_zap_top: 129
723  *           children[0]:
724  *               type: 'disk'
725  *               id: 0
726  *               guid: 14309659774640089719
727  *               path: '/dev/disk/by-vdev/L28'
728  *               whole_disk: 0
729  *               DTL: 654
730  *               create_txg: 4
731  *               com.delphix:vdev_zap_leaf: 1161
732  *               faulted: 1
733  *               aux_state: 'external'
734  *           children[1]:
735  *               type: 'disk'
736  *               id: 1
737  *               guid: 16002508084177980912
738  *               path: '/dev/disk/by-vdev/L29'
739  *               devid: 'dm-uuid-mpath-35000c500a61d68a3'
740  *               phys_path: 'L29'
741  *               vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
742  *               whole_disk: 0
743  *               DTL: 1028
744  *               create_txg: 4
745  *               com.delphix:vdev_zap_leaf: 131
746  *
747  * So in the case above, the only thing we could compare is the path.
748  *
749  * We can do this because we assume by-vdev paths are authoritative as physical
750  * paths.  We could not assume this for normal paths like /dev/sda since the
751  * physical location /dev/sda points to could change over time.
752  */
753 static boolean_t
754 by_vdev_path_iter(const char *by_vdev_path, const char *devid,
755     zfs_process_func_t func, boolean_t is_slice)
756 {
757 	dev_data_t data = { 0 };
758 
759 	data.dd_compare = by_vdev_path;
760 	data.dd_func = func;
761 	data.dd_prop = ZPOOL_CONFIG_PATH;
762 	data.dd_found = B_FALSE;
763 	data.dd_islabeled = is_slice;
764 	data.dd_new_devid = devid;
765 
766 	if (strncmp(by_vdev_path, DEV_BYVDEV_PATH,
767 	    strlen(DEV_BYVDEV_PATH)) != 0) {
768 		/* by_vdev_path doesn't start with "/dev/disk/by-vdev/" */
769 		return (B_FALSE);
770 	}
771 
772 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
773 
774 	return (data.dd_found);
775 }
776 
777 /*
778  * Given a device identifier, find any vdevs with a matching devid.
779  * On Linux we can match devid directly which is always a whole disk.
780  */
781 static boolean_t
782 devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
783 {
784 	dev_data_t data = { 0 };
785 
786 	data.dd_compare = devid;
787 	data.dd_func = func;
788 	data.dd_prop = ZPOOL_CONFIG_DEVID;
789 	data.dd_found = B_FALSE;
790 	data.dd_islabeled = is_slice;
791 	data.dd_new_devid = devid;
792 
793 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
794 
795 	return (data.dd_found);
796 }
797 
798 /*
799  * Given a device guid, find any vdevs with a matching guid.
800  */
801 static boolean_t
802 guid_iter(uint64_t pool_guid, uint64_t vdev_guid, const char *devid,
803     zfs_process_func_t func, boolean_t is_slice)
804 {
805 	dev_data_t data = { 0 };
806 
807 	data.dd_func = func;
808 	data.dd_found = B_FALSE;
809 	data.dd_pool_guid = pool_guid;
810 	data.dd_vdev_guid = vdev_guid;
811 	data.dd_islabeled = is_slice;
812 	data.dd_new_devid = devid;
813 
814 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
815 
816 	return (data.dd_found);
817 }
818 
819 /*
820  * Handle a EC_DEV_ADD.ESC_DISK event.
821  *
822  * illumos
823  *	Expects: DEV_PHYS_PATH string in schema
824  *	Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
825  *
826  *      path: '/dev/dsk/c0t1d0s0' (persistent)
827  *     devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
828  * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
829  *
830  * linux
831  *	provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
832  *	Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
833  *
834  *      path: '/dev/sdc1' (not persistent)
835  *     devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
836  * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
837  */
838 static int
839 zfs_deliver_add(nvlist_t *nvl)
840 {
841 	char *devpath = NULL, *devid = NULL;
842 	uint64_t pool_guid = 0, vdev_guid = 0;
843 	boolean_t is_slice;
844 
845 	/*
846 	 * Expecting a devid string and an optional physical location and guid
847 	 */
848 	if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0) {
849 		zed_log_msg(LOG_INFO, "%s: no dev identifier\n", __func__);
850 		return (-1);
851 	}
852 
853 	(void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
854 	(void) nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &pool_guid);
855 	(void) nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &vdev_guid);
856 
857 	is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
858 
859 	zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
860 	    devid, devpath ? devpath : "NULL", is_slice);
861 
862 	/*
863 	 * Iterate over all vdevs looking for a match in the following order:
864 	 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
865 	 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
866 	 * 3. ZPOOL_CONFIG_GUID (identifies unique vdev).
867 	 * 4. ZPOOL_CONFIG_PATH for /dev/disk/by-vdev devices only (since
868 	 *    by-vdev paths represent physical paths).
869 	 */
870 	if (devid_iter(devid, zfs_process_add, is_slice))
871 		return (0);
872 	if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,
873 	    is_slice, vdev_guid))
874 		return (0);
875 	if (vdev_guid != 0)
876 		(void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,
877 		    is_slice);
878 
879 	if (devpath != NULL) {
880 		/* Can we match a /dev/disk/by-vdev/ path? */
881 		char by_vdev_path[MAXPATHLEN];
882 		snprintf(by_vdev_path, sizeof (by_vdev_path),
883 		    "/dev/disk/by-vdev/%s", devpath);
884 		if (by_vdev_path_iter(by_vdev_path, devid, zfs_process_add,
885 		    is_slice))
886 			return (0);
887 	}
888 
889 	return (0);
890 }
891 
892 /*
893  * Called when we receive a VDEV_CHECK event, which indicates a device could not
894  * be opened during initial pool open, but the autoreplace property was set on
895  * the pool.  In this case, we treat it as if it were an add event.
896  */
897 static int
898 zfs_deliver_check(nvlist_t *nvl)
899 {
900 	dev_data_t data = { 0 };
901 
902 	if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
903 	    &data.dd_pool_guid) != 0 ||
904 	    nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
905 	    &data.dd_vdev_guid) != 0 ||
906 	    data.dd_vdev_guid == 0)
907 		return (0);
908 
909 	zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
910 	    data.dd_pool_guid, data.dd_vdev_guid);
911 
912 	data.dd_func = zfs_process_add;
913 
914 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
915 
916 	return (0);
917 }
918 
919 /*
920  * Given a path to a vdev, lookup the vdev's physical size from its
921  * config nvlist.
922  *
923  * Returns the vdev's physical size in bytes on success, 0 on error.
924  */
925 static uint64_t
926 vdev_size_from_config(zpool_handle_t *zhp, const char *vdev_path)
927 {
928 	nvlist_t *nvl = NULL;
929 	boolean_t avail_spare, l2cache, log;
930 	vdev_stat_t *vs = NULL;
931 	uint_t c;
932 
933 	nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
934 	if (!nvl)
935 		return (0);
936 
937 	verify(nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_VDEV_STATS,
938 	    (uint64_t **)&vs, &c) == 0);
939 	if (!vs) {
940 		zed_log_msg(LOG_INFO, "%s: no nvlist for '%s'", __func__,
941 		    vdev_path);
942 		return (0);
943 	}
944 
945 	return (vs->vs_pspace);
946 }
947 
948 /*
949  * Given a path to a vdev, lookup if the vdev is a "whole disk" in the
950  * config nvlist.  "whole disk" means that ZFS was passed a whole disk
951  * at pool creation time, which it partitioned up and has full control over.
952  * Thus a partition with wholedisk=1 set tells us that zfs created the
953  * partition at creation time.  A partition without whole disk set would have
954  * been created by externally (like with fdisk) and passed to ZFS.
955  *
956  * Returns the whole disk value (either 0 or 1).
957  */
958 static uint64_t
959 vdev_whole_disk_from_config(zpool_handle_t *zhp, const char *vdev_path)
960 {
961 	nvlist_t *nvl = NULL;
962 	boolean_t avail_spare, l2cache, log;
963 	uint64_t wholedisk = 0;
964 
965 	nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
966 	if (!nvl)
967 		return (0);
968 
969 	(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
970 
971 	return (wholedisk);
972 }
973 
974 /*
975  * If the device size grew more than 1% then return true.
976  */
977 #define	DEVICE_GREW(oldsize, newsize) \
978 		    ((newsize > oldsize) && \
979 		    ((newsize / (newsize - oldsize)) <= 100))
980 
981 static int
982 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
983 {
984 	boolean_t avail_spare, l2cache;
985 	nvlist_t *udev_nvl = data;
986 	nvlist_t *tgt;
987 	int error;
988 
989 	char *tmp_devname, devname[MAXPATHLEN] = "";
990 	uint64_t guid;
991 
992 	if (nvlist_lookup_uint64(udev_nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
993 		sprintf(devname, "%llu", (u_longlong_t)guid);
994 	} else if (nvlist_lookup_string(udev_nvl, DEV_PHYS_PATH,
995 	    &tmp_devname) == 0) {
996 		strlcpy(devname, tmp_devname, MAXPATHLEN);
997 		zfs_append_partition(devname, MAXPATHLEN);
998 	} else {
999 		zed_log_msg(LOG_INFO, "%s: no guid or physpath", __func__);
1000 	}
1001 
1002 	zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
1003 	    devname, zpool_get_name(zhp));
1004 
1005 	if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
1006 	    &avail_spare, &l2cache, NULL)) != NULL) {
1007 		char *path, fullpath[MAXPATHLEN];
1008 		uint64_t wholedisk = 0;
1009 
1010 		error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
1011 		if (error) {
1012 			zpool_close(zhp);
1013 			return (0);
1014 		}
1015 
1016 		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
1017 		    &wholedisk);
1018 
1019 		if (wholedisk) {
1020 			path = strrchr(path, '/');
1021 			if (path != NULL) {
1022 				path = zfs_strip_partition(path + 1);
1023 				if (path == NULL) {
1024 					zpool_close(zhp);
1025 					return (0);
1026 				}
1027 			} else {
1028 				zpool_close(zhp);
1029 				return (0);
1030 			}
1031 
1032 			(void) strlcpy(fullpath, path, sizeof (fullpath));
1033 			free(path);
1034 
1035 			/*
1036 			 * We need to reopen the pool associated with this
1037 			 * device so that the kernel can update the size of
1038 			 * the expanded device.  When expanding there is no
1039 			 * need to restart the scrub from the beginning.
1040 			 */
1041 			boolean_t scrub_restart = B_FALSE;
1042 			(void) zpool_reopen_one(zhp, &scrub_restart);
1043 		} else {
1044 			(void) strlcpy(fullpath, path, sizeof (fullpath));
1045 		}
1046 
1047 		if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
1048 			vdev_state_t newstate;
1049 
1050 			if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
1051 				/*
1052 				 * If this disk size has not changed, then
1053 				 * there's no need to do an autoexpand.  To
1054 				 * check we look at the disk's size in its
1055 				 * config, and compare it to the disk size
1056 				 * that udev is reporting.
1057 				 */
1058 				uint64_t udev_size = 0, conf_size = 0,
1059 				    wholedisk = 0, udev_parent_size = 0;
1060 
1061 				/*
1062 				 * Get the size of our disk that udev is
1063 				 * reporting.
1064 				 */
1065 				if (nvlist_lookup_uint64(udev_nvl, DEV_SIZE,
1066 				    &udev_size) != 0) {
1067 					udev_size = 0;
1068 				}
1069 
1070 				/*
1071 				 * Get the size of our disk's parent device
1072 				 * from udev (where sda1's parent is sda).
1073 				 */
1074 				if (nvlist_lookup_uint64(udev_nvl,
1075 				    DEV_PARENT_SIZE, &udev_parent_size) != 0) {
1076 					udev_parent_size = 0;
1077 				}
1078 
1079 				conf_size = vdev_size_from_config(zhp,
1080 				    fullpath);
1081 
1082 				wholedisk = vdev_whole_disk_from_config(zhp,
1083 				    fullpath);
1084 
1085 				/*
1086 				 * Only attempt an autoexpand if the vdev size
1087 				 * changed.  There are two different cases
1088 				 * to consider.
1089 				 *
1090 				 * 1. wholedisk=1
1091 				 * If you do a 'zpool create' on a whole disk
1092 				 * (like /dev/sda), then zfs will create
1093 				 * partitions on the disk (like /dev/sda1).  In
1094 				 * that case, wholedisk=1 will be set in the
1095 				 * partition's nvlist config.  So zed will need
1096 				 * to see if your parent device (/dev/sda)
1097 				 * expanded in size, and if so, then attempt
1098 				 * the autoexpand.
1099 				 *
1100 				 * 2. wholedisk=0
1101 				 * If you do a 'zpool create' on an existing
1102 				 * partition, or a device that doesn't allow
1103 				 * partitions, then wholedisk=0, and you will
1104 				 * simply need to check if the device itself
1105 				 * expanded in size.
1106 				 */
1107 				if (DEVICE_GREW(conf_size, udev_size) ||
1108 				    (wholedisk && DEVICE_GREW(conf_size,
1109 				    udev_parent_size))) {
1110 					error = zpool_vdev_online(zhp, fullpath,
1111 					    0, &newstate);
1112 
1113 					zed_log_msg(LOG_INFO,
1114 					    "%s: autoexpanding '%s' from %llu"
1115 					    " to %llu bytes in pool '%s': %d",
1116 					    __func__, fullpath, conf_size,
1117 					    MAX(udev_size, udev_parent_size),
1118 					    zpool_get_name(zhp), error);
1119 				}
1120 			}
1121 		}
1122 		zpool_close(zhp);
1123 		return (1);
1124 	}
1125 	zpool_close(zhp);
1126 	return (0);
1127 }
1128 
1129 /*
1130  * This function handles the ESC_DEV_DLE device change event.  Use the
1131  * provided vdev guid when looking up a disk or partition, when the guid
1132  * is not present assume the entire disk is owned by ZFS and append the
1133  * expected -part1 partition information then lookup by physical path.
1134  */
1135 static int
1136 zfs_deliver_dle(nvlist_t *nvl)
1137 {
1138 	char *devname, name[MAXPATHLEN];
1139 	uint64_t guid;
1140 
1141 	if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
1142 		sprintf(name, "%llu", (u_longlong_t)guid);
1143 	} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
1144 		strlcpy(name, devname, MAXPATHLEN);
1145 		zfs_append_partition(name, MAXPATHLEN);
1146 	} else {
1147 		sprintf(name, "unknown");
1148 		zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
1149 	}
1150 
1151 	if (zpool_iter(g_zfshdl, zfsdle_vdev_online, nvl) != 1) {
1152 		zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
1153 		    "found", name);
1154 		return (1);
1155 	}
1156 
1157 	return (0);
1158 }
1159 
1160 /*
1161  * syseventd daemon module event handler
1162  *
1163  * Handles syseventd daemon zfs device related events:
1164  *
1165  *	EC_DEV_ADD.ESC_DISK
1166  *	EC_DEV_STATUS.ESC_DEV_DLE
1167  *	EC_ZFS.ESC_ZFS_VDEV_CHECK
1168  *
1169  * Note: assumes only one thread active at a time (not thread safe)
1170  */
1171 static int
1172 zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
1173 {
1174 	int ret;
1175 	boolean_t is_check = B_FALSE, is_dle = B_FALSE;
1176 
1177 	if (strcmp(class, EC_DEV_ADD) == 0) {
1178 		/*
1179 		 * We're mainly interested in disk additions, but we also listen
1180 		 * for new loop devices, to allow for simplified testing.
1181 		 */
1182 		if (strcmp(subclass, ESC_DISK) != 0 &&
1183 		    strcmp(subclass, ESC_LOFI) != 0)
1184 			return (0);
1185 
1186 		is_check = B_FALSE;
1187 	} else if (strcmp(class, EC_ZFS) == 0 &&
1188 	    strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
1189 		/*
1190 		 * This event signifies that a device failed to open
1191 		 * during pool load, but the 'autoreplace' property was
1192 		 * set, so we should pretend it's just been added.
1193 		 */
1194 		is_check = B_TRUE;
1195 	} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
1196 	    strcmp(subclass, ESC_DEV_DLE) == 0) {
1197 		is_dle = B_TRUE;
1198 	} else {
1199 		return (0);
1200 	}
1201 
1202 	if (is_dle)
1203 		ret = zfs_deliver_dle(nvl);
1204 	else if (is_check)
1205 		ret = zfs_deliver_check(nvl);
1206 	else
1207 		ret = zfs_deliver_add(nvl);
1208 
1209 	return (ret);
1210 }
1211 
1212 static void *
1213 zfs_enum_pools(void *arg)
1214 {
1215 	(void) arg;
1216 
1217 	(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
1218 	/*
1219 	 * Linux - instead of using a thread pool, each list entry
1220 	 * will spawn a thread when an unavailable pool transitions
1221 	 * to available. zfs_slm_fini will wait for these threads.
1222 	 */
1223 	g_enumeration_done = B_TRUE;
1224 	return (NULL);
1225 }
1226 
1227 /*
1228  * called from zed daemon at startup
1229  *
1230  * sent messages from zevents or udev monitor
1231  *
1232  * For now, each agent has its own libzfs instance
1233  */
1234 int
1235 zfs_slm_init(void)
1236 {
1237 	if ((g_zfshdl = libzfs_init()) == NULL)
1238 		return (-1);
1239 
1240 	/*
1241 	 * collect a list of unavailable pools (asynchronously,
1242 	 * since this can take a while)
1243 	 */
1244 	list_create(&g_pool_list, sizeof (struct unavailpool),
1245 	    offsetof(struct unavailpool, uap_node));
1246 
1247 	if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
1248 		list_destroy(&g_pool_list);
1249 		libzfs_fini(g_zfshdl);
1250 		return (-1);
1251 	}
1252 
1253 	pthread_setname_np(g_zfs_tid, "enum-pools");
1254 	list_create(&g_device_list, sizeof (struct pendingdev),
1255 	    offsetof(struct pendingdev, pd_node));
1256 
1257 	return (0);
1258 }
1259 
1260 void
1261 zfs_slm_fini(void)
1262 {
1263 	unavailpool_t *pool;
1264 	pendingdev_t *device;
1265 
1266 	/* wait for zfs_enum_pools thread to complete */
1267 	(void) pthread_join(g_zfs_tid, NULL);
1268 	/* destroy the thread pool */
1269 	if (g_tpool != NULL) {
1270 		tpool_wait(g_tpool);
1271 		tpool_destroy(g_tpool);
1272 	}
1273 
1274 	while ((pool = (list_head(&g_pool_list))) != NULL) {
1275 		list_remove(&g_pool_list, pool);
1276 		zpool_close(pool->uap_zhp);
1277 		free(pool);
1278 	}
1279 	list_destroy(&g_pool_list);
1280 
1281 	while ((device = (list_head(&g_device_list))) != NULL) {
1282 		list_remove(&g_device_list, device);
1283 		free(device);
1284 	}
1285 	list_destroy(&g_device_list);
1286 
1287 	libzfs_fini(g_zfshdl);
1288 }
1289 
1290 void
1291 zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
1292 {
1293 	zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
1294 	(void) zfs_slm_deliver_event(class, subclass, nvl);
1295 }
1296