xref: /freebsd/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c (revision c57c26179033f64c2011a2d2a904ee3fa62e826a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2016, 2017, Intel Corporation.
26  * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
27  * Copyright (c) 2023, Klara Inc.
28  */
29 
30 /*
31  * ZFS syseventd module.
32  *
33  * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
34  *
35  * The purpose of this module is to identify when devices are added to the
36  * system, and appropriately online or replace the affected vdevs.
37  *
38  * When a device is added to the system:
39  *
40  * 	1. Search for any vdevs whose devid matches that of the newly added
41  *	   device.
42  *
43  * 	2. If no vdevs are found, then search for any vdevs whose udev path
44  *	   matches that of the new device.
45  *
46  *	3. If no vdevs match by either method, then ignore the event.
47  *
48  * 	4. Attempt to online the device with a flag to indicate that it should
49  *	   be unspared when resilvering completes.  If this succeeds, then the
50  *	   same device was inserted and we should continue normally.
51  *
52  *	5. If the pool does not have the 'autoreplace' property set, attempt to
53  *	   online the device again without the unspare flag, which will
54  *	   generate a FMA fault.
55  *
56  *	6. If the pool has the 'autoreplace' property set, and the matching vdev
57  *	   is a whole disk, then label the new disk and attempt a 'zpool
58  *	   replace'.
59  *
60  * The module responds to EC_DEV_ADD events.  The special ESC_ZFS_VDEV_CHECK
61  * event indicates that a device failed to open during pool load, but the
62  * autoreplace property was set.  In this case, we deferred the associated
63  * FMA fault until our module had a chance to process the autoreplace logic.
64  * If the device could not be replaced, then the second online attempt will
65  * trigger the FMA fault that we skipped earlier.
66  *
67  * On Linux udev provides a disk insert for both the disk and the partition.
68  */
69 
70 #include <ctype.h>
71 #include <fcntl.h>
72 #include <libnvpair.h>
73 #include <libzfs.h>
74 #include <libzutil.h>
75 #include <limits.h>
76 #include <stddef.h>
77 #include <stdlib.h>
78 #include <string.h>
79 #include <syslog.h>
80 #include <sys/list.h>
81 #include <sys/sunddi.h>
82 #include <sys/sysevent/eventdefs.h>
83 #include <sys/sysevent/dev.h>
84 #include <thread_pool.h>
85 #include <pthread.h>
86 #include <unistd.h>
87 #include <errno.h>
88 #include "zfs_agents.h"
89 #include "../zed_log.h"
90 
91 #define	DEV_BYID_PATH	"/dev/disk/by-id/"
92 #define	DEV_BYPATH_PATH	"/dev/disk/by-path/"
93 #define	DEV_BYVDEV_PATH	"/dev/disk/by-vdev/"
94 
95 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
96 
97 libzfs_handle_t *g_zfshdl;
98 list_t g_pool_list;	/* list of unavailable pools at initialization */
99 list_t g_device_list;	/* list of disks with asynchronous label request */
100 tpool_t *g_tpool;
101 boolean_t g_enumeration_done;
102 pthread_t g_zfs_tid;	/* zfs_enum_pools() thread */
103 
104 typedef struct unavailpool {
105 	zpool_handle_t	*uap_zhp;
106 	list_node_t	uap_node;
107 } unavailpool_t;
108 
109 typedef struct pendingdev {
110 	char		pd_physpath[128];
111 	list_node_t	pd_node;
112 } pendingdev_t;
113 
114 static int
115 zfs_toplevel_state(zpool_handle_t *zhp)
116 {
117 	nvlist_t *nvroot;
118 	vdev_stat_t *vs;
119 	unsigned int c;
120 
121 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
122 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
123 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
124 	    (uint64_t **)&vs, &c) == 0);
125 	return (vs->vs_state);
126 }
127 
128 static int
129 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
130 {
131 	zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
132 	    zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
133 
134 	if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
135 		unavailpool_t *uap;
136 		uap = malloc(sizeof (unavailpool_t));
137 		if (uap == NULL) {
138 			perror("malloc");
139 			exit(EXIT_FAILURE);
140 		}
141 
142 		uap->uap_zhp = zhp;
143 		list_insert_tail((list_t *)data, uap);
144 	} else {
145 		zpool_close(zhp);
146 	}
147 	return (0);
148 }
149 
150 /*
151  * Write an array of strings to the zed log
152  */
153 static void lines_to_zed_log_msg(char **lines, int lines_cnt)
154 {
155 	int i;
156 	for (i = 0; i < lines_cnt; i++) {
157 		zed_log_msg(LOG_INFO, "%s", lines[i]);
158 	}
159 }
160 
161 /*
162  * Two stage replace on Linux
163  * since we get disk notifications
164  * we can wait for partitioned disk slice to show up!
165  *
166  * First stage tags the disk, initiates async partitioning, and returns
167  * Second stage finds the tag and proceeds to ZFS labeling/replace
168  *
169  * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
170  *
171  * 1. physical match with no fs, no partition
172  *	tag it top, partition disk
173  *
174  * 2. physical match again, see partition and tag
175  *
176  */
177 
178 /*
179  * The device associated with the given vdev (either by devid or physical path)
180  * has been added to the system.  If 'isdisk' is set, then we only attempt a
181  * replacement if it's a whole disk.  This also implies that we should label the
182  * disk first.
183  *
184  * First, we attempt to online the device (making sure to undo any spare
185  * operation when finished).  If this succeeds, then we're done.  If it fails,
186  * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
187  * but that the label was not what we expected.  If the 'autoreplace' property
188  * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
189  * replace'.  If the online is successful, but the new state is something else
190  * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
191  * race, and we should avoid attempting to relabel the disk.
192  *
193  * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
194  */
195 static void
196 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
197 {
198 	const char *path;
199 	vdev_state_t newstate;
200 	nvlist_t *nvroot, *newvd;
201 	pendingdev_t *device;
202 	uint64_t wholedisk = 0ULL;
203 	uint64_t offline = 0ULL, faulted = 0ULL;
204 	uint64_t guid = 0ULL;
205 	uint64_t is_spare = 0;
206 	const char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
207 	char rawpath[PATH_MAX], fullpath[PATH_MAX];
208 	char pathbuf[PATH_MAX];
209 	int ret;
210 	int online_flag = ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE;
211 	boolean_t is_sd = B_FALSE;
212 	boolean_t is_mpath_wholedisk = B_FALSE;
213 	uint_t c;
214 	vdev_stat_t *vs;
215 	char **lines = NULL;
216 	int lines_cnt = 0;
217 
218 	/*
219 	 * Get the persistent path, typically under the '/dev/disk/by-id' or
220 	 * '/dev/disk/by-vdev' directories.  Note that this path can change
221 	 * when a vdev is replaced with a new disk.
222 	 */
223 	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
224 		return;
225 
226 	/* Skip healthy disks */
227 	verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
228 	    (uint64_t **)&vs, &c) == 0);
229 	if (vs->vs_state == VDEV_STATE_HEALTHY) {
230 		zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
231 		    __func__, path);
232 		return;
233 	}
234 
235 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
236 
237 	update_vdev_config_dev_sysfs_path(vdev, path,
238 	    ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
239 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
240 	    &enc_sysfs_path);
241 
242 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
243 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
244 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);
245 
246 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
247 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_IS_SPARE, &is_spare);
248 
249 	/*
250 	 * Special case:
251 	 *
252 	 * We've seen times where a disk won't have a ZPOOL_CONFIG_PHYS_PATH
253 	 * entry in their config. For example, on this force-faulted disk:
254 	 *
255 	 *	children[0]:
256 	 *	   type: 'disk'
257 	 *	   id: 0
258 	 *	   guid: 14309659774640089719
259 	 *        path: '/dev/disk/by-vdev/L28'
260 	 *        whole_disk: 0
261 	 *        DTL: 654
262 	 *        create_txg: 4
263 	 *        com.delphix:vdev_zap_leaf: 1161
264 	 *        faulted: 1
265 	 *        aux_state: 'external'
266 	 *	children[1]:
267 	 *        type: 'disk'
268 	 *        id: 1
269 	 *        guid: 16002508084177980912
270 	 *        path: '/dev/disk/by-vdev/L29'
271 	 *        devid: 'dm-uuid-mpath-35000c500a61d68a3'
272 	 *        phys_path: 'L29'
273 	 *        vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
274 	 *        whole_disk: 0
275 	 *        DTL: 1028
276 	 *        create_txg: 4
277 	 *        com.delphix:vdev_zap_leaf: 131
278 	 *
279 	 * If the disk's path is a /dev/disk/by-vdev/ path, then we can infer
280 	 * the ZPOOL_CONFIG_PHYS_PATH from the by-vdev disk name.
281 	 */
282 	if (physpath == NULL && path != NULL) {
283 		/* If path begins with "/dev/disk/by-vdev/" ... */
284 		if (strncmp(path, DEV_BYVDEV_PATH,
285 		    strlen(DEV_BYVDEV_PATH)) == 0) {
286 			/* Set physpath to the char after "/dev/disk/by-vdev" */
287 			physpath = &path[strlen(DEV_BYVDEV_PATH)];
288 		}
289 	}
290 
291 	/*
292 	 * We don't want to autoreplace offlined disks.  However, we do want to
293 	 * replace force-faulted disks (`zpool offline -f`).  Force-faulted
294 	 * disks have both offline=1 and faulted=1 in the nvlist.
295 	 */
296 	if (offline && !faulted) {
297 		zed_log_msg(LOG_INFO, "%s: %s is offline, skip autoreplace",
298 		    __func__, path);
299 		return;
300 	}
301 
302 	is_mpath_wholedisk = is_mpath_whole_disk(path);
303 	zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
304 	    " %s blank disk, %s mpath blank disk, %s labeled, enc sysfs '%s', "
305 	    "(guid %llu)",
306 	    zpool_get_name(zhp), path,
307 	    physpath ? physpath : "NULL",
308 	    wholedisk ? "is" : "not",
309 	    is_mpath_wholedisk? "is" : "not",
310 	    labeled ? "is" : "not",
311 	    enc_sysfs_path,
312 	    (long long unsigned int)guid);
313 
314 	/*
315 	 * The VDEV guid is preferred for identification (gets passed in path)
316 	 */
317 	if (guid != 0) {
318 		(void) snprintf(fullpath, sizeof (fullpath), "%llu",
319 		    (long long unsigned int)guid);
320 	} else {
321 		/*
322 		 * otherwise use path sans partition suffix for whole disks
323 		 */
324 		(void) strlcpy(fullpath, path, sizeof (fullpath));
325 		if (wholedisk) {
326 			char *spath = zfs_strip_partition(fullpath);
327 			if (!spath) {
328 				zed_log_msg(LOG_INFO, "%s: Can't alloc",
329 				    __func__);
330 				return;
331 			}
332 
333 			(void) strlcpy(fullpath, spath, sizeof (fullpath));
334 			free(spath);
335 		}
336 	}
337 
338 	if (is_spare)
339 		online_flag |= ZFS_ONLINE_SPARE;
340 
341 	/*
342 	 * Attempt to online the device.
343 	 */
344 	if (zpool_vdev_online(zhp, fullpath, online_flag, &newstate) == 0 &&
345 	    (newstate == VDEV_STATE_HEALTHY ||
346 	    newstate == VDEV_STATE_DEGRADED)) {
347 		zed_log_msg(LOG_INFO,
348 		    "  zpool_vdev_online: vdev '%s' ('%s') is "
349 		    "%s", fullpath, physpath, (newstate == VDEV_STATE_HEALTHY) ?
350 		    "HEALTHY" : "DEGRADED");
351 		return;
352 	}
353 
354 	/*
355 	 * vdev_id alias rule for using scsi_debug devices (FMA automated
356 	 * testing)
357 	 */
358 	if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
359 		is_sd = B_TRUE;
360 
361 	/*
362 	 * If the pool doesn't have the autoreplace property set, then use
363 	 * vdev online to trigger a FMA fault by posting an ereport.
364 	 */
365 	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
366 	    !(wholedisk || is_mpath_wholedisk) || (physpath == NULL)) {
367 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
368 		    &newstate);
369 		zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
370 		    "not a blank disk for '%s' ('%s')", fullpath,
371 		    physpath);
372 		return;
373 	}
374 
375 	/*
376 	 * Convert physical path into its current device node.  Rawpath
377 	 * needs to be /dev/disk/by-vdev for a scsi_debug device since
378 	 * /dev/disk/by-path will not be present.
379 	 */
380 	(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
381 	    is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
382 
383 	if (realpath(rawpath, pathbuf) == NULL && !is_mpath_wholedisk) {
384 		zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
385 		    rawpath, strerror(errno));
386 
387 		int err = zpool_vdev_online(zhp, fullpath,
388 		    ZFS_ONLINE_FORCEFAULT, &newstate);
389 
390 		zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s) "
391 		    "err %d, new state %d",
392 		    fullpath, libzfs_error_description(g_zfshdl), err,
393 		    err ? (int)newstate : 0);
394 		return;
395 	}
396 
397 	/* Only autoreplace bad disks */
398 	if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
399 	    (vs->vs_state != VDEV_STATE_FAULTED) &&
400 	    (vs->vs_state != VDEV_STATE_REMOVED) &&
401 	    (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
402 		zed_log_msg(LOG_INFO, "  not autoreplacing since disk isn't in "
403 		    "a bad state (currently %llu)", vs->vs_state);
404 		return;
405 	}
406 
407 	nvlist_lookup_string(vdev, "new_devid", &new_devid);
408 
409 	if (is_mpath_wholedisk) {
410 		/* Don't label device mapper or multipath disks. */
411 		zed_log_msg(LOG_INFO,
412 		    "  it's a multipath wholedisk, don't label");
413 		if (zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
414 		    &lines_cnt) != 0) {
415 			zed_log_msg(LOG_INFO,
416 			    "  zpool_prepare_disk: could not "
417 			    "prepare '%s' (%s)", fullpath,
418 			    libzfs_error_description(g_zfshdl));
419 			if (lines_cnt > 0) {
420 				zed_log_msg(LOG_INFO,
421 				    "  zfs_prepare_disk output:");
422 				lines_to_zed_log_msg(lines, lines_cnt);
423 			}
424 			libzfs_free_str_array(lines, lines_cnt);
425 			return;
426 		}
427 	} else if (!labeled) {
428 		/*
429 		 * we're auto-replacing a raw disk, so label it first
430 		 */
431 		char *leafname;
432 
433 		/*
434 		 * If this is a request to label a whole disk, then attempt to
435 		 * write out the label.  Before we can label the disk, we need
436 		 * to map the physical string that was matched on to the under
437 		 * lying device node.
438 		 *
439 		 * If any part of this process fails, then do a force online
440 		 * to trigger a ZFS fault for the device (and any hot spare
441 		 * replacement).
442 		 */
443 		leafname = strrchr(pathbuf, '/') + 1;
444 
445 		/*
446 		 * If this is a request to label a whole disk, then attempt to
447 		 * write out the label.
448 		 */
449 		if (zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
450 		    vdev, "autoreplace", &lines, &lines_cnt) != 0) {
451 			zed_log_msg(LOG_WARNING,
452 			    "  zpool_prepare_and_label_disk: could not "
453 			    "label '%s' (%s)", leafname,
454 			    libzfs_error_description(g_zfshdl));
455 			if (lines_cnt > 0) {
456 				zed_log_msg(LOG_INFO,
457 				"  zfs_prepare_disk output:");
458 				lines_to_zed_log_msg(lines, lines_cnt);
459 			}
460 			libzfs_free_str_array(lines, lines_cnt);
461 
462 			(void) zpool_vdev_online(zhp, fullpath,
463 			    ZFS_ONLINE_FORCEFAULT, &newstate);
464 			return;
465 		}
466 
467 		/*
468 		 * The disk labeling is asynchronous on Linux. Just record
469 		 * this label request and return as there will be another
470 		 * disk add event for the partition after the labeling is
471 		 * completed.
472 		 */
473 		device = malloc(sizeof (pendingdev_t));
474 		if (device == NULL) {
475 			perror("malloc");
476 			exit(EXIT_FAILURE);
477 		}
478 
479 		(void) strlcpy(device->pd_physpath, physpath,
480 		    sizeof (device->pd_physpath));
481 		list_insert_tail(&g_device_list, device);
482 
483 		zed_log_msg(LOG_NOTICE, "  zpool_label_disk: async '%s' (%llu)",
484 		    leafname, (u_longlong_t)guid);
485 
486 		return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */
487 
488 	} else /* labeled */ {
489 		boolean_t found = B_FALSE;
490 		/*
491 		 * match up with request above to label the disk
492 		 */
493 		for (device = list_head(&g_device_list); device != NULL;
494 		    device = list_next(&g_device_list, device)) {
495 			if (strcmp(physpath, device->pd_physpath) == 0) {
496 				list_remove(&g_device_list, device);
497 				free(device);
498 				found = B_TRUE;
499 				break;
500 			}
501 			zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
502 			    physpath, device->pd_physpath);
503 		}
504 		if (!found) {
505 			/* unexpected partition slice encountered */
506 			zed_log_msg(LOG_WARNING, "labeled disk %s was "
507 			    "unexpected here", fullpath);
508 			(void) zpool_vdev_online(zhp, fullpath,
509 			    ZFS_ONLINE_FORCEFAULT, &newstate);
510 			return;
511 		}
512 
513 		zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
514 		    physpath, (u_longlong_t)guid);
515 
516 		/*
517 		 * Paths that begin with '/dev/disk/by-id/' will change and so
518 		 * they must be updated before calling zpool_vdev_attach().
519 		 */
520 		if (strncmp(path, DEV_BYID_PATH, strlen(DEV_BYID_PATH)) == 0) {
521 			(void) snprintf(pathbuf, sizeof (pathbuf), "%s%s",
522 			    DEV_BYID_PATH, new_devid);
523 			zed_log_msg(LOG_INFO, "  zpool_label_disk: path '%s' "
524 			    "replaced by '%s'", path, pathbuf);
525 			path = pathbuf;
526 		}
527 	}
528 
529 	libzfs_free_str_array(lines, lines_cnt);
530 
531 	/*
532 	 * Construct the root vdev to pass to zpool_vdev_attach().  While adding
533 	 * the entire vdev structure is harmless, we construct a reduced set of
534 	 * path/physpath/wholedisk to keep it simple.
535 	 */
536 	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
537 		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
538 		return;
539 	}
540 	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
541 		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
542 		nvlist_free(nvroot);
543 		return;
544 	}
545 
546 	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
547 	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
548 	    nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
549 	    (physpath != NULL && nvlist_add_string(newvd,
550 	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
551 	    (enc_sysfs_path != NULL && nvlist_add_string(newvd,
552 	    ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
553 	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
554 	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
555 	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
556 	    (const nvlist_t **)&newvd, 1) != 0) {
557 		zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
558 		nvlist_free(newvd);
559 		nvlist_free(nvroot);
560 		return;
561 	}
562 
563 	nvlist_free(newvd);
564 
565 	/*
566 	 * Wait for udev to verify the links exist, then auto-replace
567 	 * the leaf disk at same physical location.
568 	 */
569 	if (zpool_label_disk_wait(path, DISK_LABEL_WAIT) != 0) {
570 		zed_log_msg(LOG_WARNING, "zfs_mod: pool '%s', after labeling "
571 		    "replacement disk, the expected disk partition link '%s' "
572 		    "is missing after waiting %u ms",
573 		    zpool_get_name(zhp), path, DISK_LABEL_WAIT);
574 		nvlist_free(nvroot);
575 		return;
576 	}
577 
578 	/*
579 	 * Prefer sequential resilvering when supported (mirrors and dRAID),
580 	 * otherwise fallback to a traditional healing resilver.
581 	 */
582 	ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);
583 	if (ret != 0) {
584 		ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,
585 		    B_TRUE, B_FALSE);
586 	}
587 
588 	zed_log_msg(LOG_WARNING, "  zpool_vdev_replace: %s with %s (%s)",
589 	    fullpath, path, (ret == 0) ? "no errors" :
590 	    libzfs_error_description(g_zfshdl));
591 
592 	nvlist_free(nvroot);
593 }
594 
595 /*
596  * Utility functions to find a vdev matching given criteria.
597  */
598 typedef struct dev_data {
599 	const char		*dd_compare;
600 	const char		*dd_prop;
601 	zfs_process_func_t	dd_func;
602 	boolean_t		dd_found;
603 	boolean_t		dd_islabeled;
604 	uint64_t		dd_pool_guid;
605 	uint64_t		dd_vdev_guid;
606 	uint64_t		dd_new_vdev_guid;
607 	const char		*dd_new_devid;
608 	uint64_t		dd_num_spares;
609 } dev_data_t;
610 
611 static void
612 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
613 {
614 	dev_data_t *dp = data;
615 	const char *path = NULL;
616 	uint_t c, children;
617 	nvlist_t **child;
618 	uint64_t guid = 0;
619 	uint64_t isspare = 0;
620 
621 	/*
622 	 * First iterate over any children.
623 	 */
624 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
625 	    &child, &children) == 0) {
626 		for (c = 0; c < children; c++)
627 			zfs_iter_vdev(zhp, child[c], data);
628 	}
629 
630 	/*
631 	 * Iterate over any spares and cache devices
632 	 */
633 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
634 	    &child, &children) == 0) {
635 		for (c = 0; c < children; c++)
636 			zfs_iter_vdev(zhp, child[c], data);
637 	}
638 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
639 	    &child, &children) == 0) {
640 		for (c = 0; c < children; c++)
641 			zfs_iter_vdev(zhp, child[c], data);
642 	}
643 
644 	/* once a vdev was matched and processed there is nothing left to do */
645 	if (dp->dd_found && dp->dd_num_spares == 0)
646 		return;
647 	(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &guid);
648 
649 	/*
650 	 * Match by GUID if available otherwise fallback to devid or physical
651 	 */
652 	if (dp->dd_vdev_guid != 0) {
653 		if (guid != dp->dd_vdev_guid)
654 			return;
655 		zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched on %llu", guid);
656 		dp->dd_found = B_TRUE;
657 
658 	} else if (dp->dd_compare != NULL) {
659 		/*
660 		 * NOTE: On Linux there is an event for partition, so unlike
661 		 * illumos, substring matching is not required to accommodate
662 		 * the partition suffix. An exact match will be present in
663 		 * the dp->dd_compare value.
664 		 * If the attached disk already contains a vdev GUID, it means
665 		 * the disk is not clean. In such a scenario, the physical path
666 		 * would be a match that makes the disk faulted when trying to
667 		 * online it. So, we would only want to proceed if either GUID
668 		 * matches with the last attached disk or the disk is in clean
669 		 * state.
670 		 */
671 		if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
672 		    strcmp(dp->dd_compare, path) != 0) {
673 			return;
674 		}
675 		if (dp->dd_new_vdev_guid != 0 && dp->dd_new_vdev_guid != guid) {
676 			zed_log_msg(LOG_INFO, "  %s: no match (GUID:%llu"
677 			    " != vdev GUID:%llu)", __func__,
678 			    dp->dd_new_vdev_guid, guid);
679 			return;
680 		}
681 
682 		zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched %s on %s",
683 		    dp->dd_prop, path);
684 		dp->dd_found = B_TRUE;
685 
686 		/* pass the new devid for use by auto-replacing code */
687 		if (dp->dd_new_devid != NULL) {
688 			(void) nvlist_add_string(nvl, "new_devid",
689 			    dp->dd_new_devid);
690 		}
691 	}
692 
693 	if (dp->dd_found == B_TRUE && nvlist_lookup_uint64(nvl,
694 	    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
695 		dp->dd_num_spares++;
696 
697 	(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
698 }
699 
700 static void
701 zfs_enable_ds(void *arg)
702 {
703 	unavailpool_t *pool = (unavailpool_t *)arg;
704 
705 	(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0, 512);
706 	zpool_close(pool->uap_zhp);
707 	free(pool);
708 }
709 
710 static int
711 zfs_iter_pool(zpool_handle_t *zhp, void *data)
712 {
713 	nvlist_t *config, *nvl;
714 	dev_data_t *dp = data;
715 	uint64_t pool_guid;
716 	unavailpool_t *pool;
717 
718 	zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
719 	    zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
720 
721 	/*
722 	 * For each vdev in this pool, look for a match to apply dd_func
723 	 */
724 	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
725 		if (dp->dd_pool_guid == 0 ||
726 		    (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
727 		    &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
728 			(void) nvlist_lookup_nvlist(config,
729 			    ZPOOL_CONFIG_VDEV_TREE, &nvl);
730 			zfs_iter_vdev(zhp, nvl, data);
731 		}
732 	} else {
733 		zed_log_msg(LOG_INFO, "%s: no config\n", __func__);
734 	}
735 
736 	/*
737 	 * if this pool was originally unavailable,
738 	 * then enable its datasets asynchronously
739 	 */
740 	if (g_enumeration_done)  {
741 		for (pool = list_head(&g_pool_list); pool != NULL;
742 		    pool = list_next(&g_pool_list, pool)) {
743 
744 			if (strcmp(zpool_get_name(zhp),
745 			    zpool_get_name(pool->uap_zhp)))
746 				continue;
747 			if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
748 				list_remove(&g_pool_list, pool);
749 				(void) tpool_dispatch(g_tpool, zfs_enable_ds,
750 				    pool);
751 				break;
752 			}
753 		}
754 	}
755 
756 	zpool_close(zhp);
757 
758 	/* cease iteration after a match */
759 	return (dp->dd_found && dp->dd_num_spares == 0);
760 }
761 
762 /*
763  * Given a physical device location, iterate over all
764  * (pool, vdev) pairs which correspond to that location.
765  */
766 static boolean_t
767 devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
768     boolean_t is_slice, uint64_t new_vdev_guid)
769 {
770 	dev_data_t data = { 0 };
771 
772 	data.dd_compare = physical;
773 	data.dd_func = func;
774 	data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
775 	data.dd_found = B_FALSE;
776 	data.dd_islabeled = is_slice;
777 	data.dd_new_devid = devid;	/* used by auto replace code */
778 	data.dd_new_vdev_guid = new_vdev_guid;
779 
780 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
781 
782 	return (data.dd_found);
783 }
784 
785 /*
786  * Given a device identifier, find any vdevs with a matching by-vdev
787  * path.  Normally we shouldn't need this as the comparison would be
788  * made earlier in the devphys_iter().  For example, if we were replacing
789  * /dev/disk/by-vdev/L28, normally devphys_iter() would match the
790  * ZPOOL_CONFIG_PHYS_PATH of "L28" from the old disk config to "L28"
791  * of the new disk config.  However, we've seen cases where
792  * ZPOOL_CONFIG_PHYS_PATH was not in the config for the old disk.  Here's
793  * an example of a real 2-disk mirror pool where one disk was force
794  * faulted:
795  *
796  *       com.delphix:vdev_zap_top: 129
797  *           children[0]:
798  *               type: 'disk'
799  *               id: 0
800  *               guid: 14309659774640089719
801  *               path: '/dev/disk/by-vdev/L28'
802  *               whole_disk: 0
803  *               DTL: 654
804  *               create_txg: 4
805  *               com.delphix:vdev_zap_leaf: 1161
806  *               faulted: 1
807  *               aux_state: 'external'
808  *           children[1]:
809  *               type: 'disk'
810  *               id: 1
811  *               guid: 16002508084177980912
812  *               path: '/dev/disk/by-vdev/L29'
813  *               devid: 'dm-uuid-mpath-35000c500a61d68a3'
814  *               phys_path: 'L29'
815  *               vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
816  *               whole_disk: 0
817  *               DTL: 1028
818  *               create_txg: 4
819  *               com.delphix:vdev_zap_leaf: 131
820  *
821  * So in the case above, the only thing we could compare is the path.
822  *
823  * We can do this because we assume by-vdev paths are authoritative as physical
824  * paths.  We could not assume this for normal paths like /dev/sda since the
825  * physical location /dev/sda points to could change over time.
826  */
827 static boolean_t
828 by_vdev_path_iter(const char *by_vdev_path, const char *devid,
829     zfs_process_func_t func, boolean_t is_slice)
830 {
831 	dev_data_t data = { 0 };
832 
833 	data.dd_compare = by_vdev_path;
834 	data.dd_func = func;
835 	data.dd_prop = ZPOOL_CONFIG_PATH;
836 	data.dd_found = B_FALSE;
837 	data.dd_islabeled = is_slice;
838 	data.dd_new_devid = devid;
839 
840 	if (strncmp(by_vdev_path, DEV_BYVDEV_PATH,
841 	    strlen(DEV_BYVDEV_PATH)) != 0) {
842 		/* by_vdev_path doesn't start with "/dev/disk/by-vdev/" */
843 		return (B_FALSE);
844 	}
845 
846 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
847 
848 	return (data.dd_found);
849 }
850 
851 /*
852  * Given a device identifier, find any vdevs with a matching devid.
853  * On Linux we can match devid directly which is always a whole disk.
854  */
855 static boolean_t
856 devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
857 {
858 	dev_data_t data = { 0 };
859 
860 	data.dd_compare = devid;
861 	data.dd_func = func;
862 	data.dd_prop = ZPOOL_CONFIG_DEVID;
863 	data.dd_found = B_FALSE;
864 	data.dd_islabeled = is_slice;
865 	data.dd_new_devid = devid;
866 
867 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
868 
869 	return (data.dd_found);
870 }
871 
872 /*
873  * Given a device guid, find any vdevs with a matching guid.
874  */
875 static boolean_t
876 guid_iter(uint64_t pool_guid, uint64_t vdev_guid, const char *devid,
877     zfs_process_func_t func, boolean_t is_slice)
878 {
879 	dev_data_t data = { 0 };
880 
881 	data.dd_func = func;
882 	data.dd_found = B_FALSE;
883 	data.dd_pool_guid = pool_guid;
884 	data.dd_vdev_guid = vdev_guid;
885 	data.dd_islabeled = is_slice;
886 	data.dd_new_devid = devid;
887 
888 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
889 
890 	return (data.dd_found);
891 }
892 
893 /*
894  * Handle a EC_DEV_ADD.ESC_DISK event.
895  *
896  * illumos
897  *	Expects: DEV_PHYS_PATH string in schema
898  *	Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
899  *
900  *      path: '/dev/dsk/c0t1d0s0' (persistent)
901  *     devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
902  * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
903  *
904  * linux
905  *	provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
906  *	Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
907  *
908  *      path: '/dev/sdc1' (not persistent)
909  *     devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
910  * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
911  */
912 static int
913 zfs_deliver_add(nvlist_t *nvl)
914 {
915 	const char *devpath = NULL, *devid = NULL;
916 	uint64_t pool_guid = 0, vdev_guid = 0;
917 	boolean_t is_slice;
918 
919 	/*
920 	 * Expecting a devid string and an optional physical location and guid
921 	 */
922 	if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0) {
923 		zed_log_msg(LOG_INFO, "%s: no dev identifier\n", __func__);
924 		return (-1);
925 	}
926 
927 	(void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
928 	(void) nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &pool_guid);
929 	(void) nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &vdev_guid);
930 
931 	is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
932 
933 	zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
934 	    devid, devpath ? devpath : "NULL", is_slice);
935 
936 	/*
937 	 * Iterate over all vdevs looking for a match in the following order:
938 	 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
939 	 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
940 	 * 3. ZPOOL_CONFIG_GUID (identifies unique vdev).
941 	 * 4. ZPOOL_CONFIG_PATH for /dev/disk/by-vdev devices only (since
942 	 *    by-vdev paths represent physical paths).
943 	 */
944 	if (devid_iter(devid, zfs_process_add, is_slice))
945 		return (0);
946 	if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,
947 	    is_slice, vdev_guid))
948 		return (0);
949 	if (vdev_guid != 0)
950 		(void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,
951 		    is_slice);
952 
953 	if (devpath != NULL) {
954 		/* Can we match a /dev/disk/by-vdev/ path? */
955 		char by_vdev_path[MAXPATHLEN];
956 		snprintf(by_vdev_path, sizeof (by_vdev_path),
957 		    "/dev/disk/by-vdev/%s", devpath);
958 		if (by_vdev_path_iter(by_vdev_path, devid, zfs_process_add,
959 		    is_slice))
960 			return (0);
961 	}
962 
963 	return (0);
964 }
965 
966 /*
967  * Called when we receive a VDEV_CHECK event, which indicates a device could not
968  * be opened during initial pool open, but the autoreplace property was set on
969  * the pool.  In this case, we treat it as if it were an add event.
970  */
971 static int
972 zfs_deliver_check(nvlist_t *nvl)
973 {
974 	dev_data_t data = { 0 };
975 
976 	if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
977 	    &data.dd_pool_guid) != 0 ||
978 	    nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
979 	    &data.dd_vdev_guid) != 0 ||
980 	    data.dd_vdev_guid == 0)
981 		return (0);
982 
983 	zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
984 	    data.dd_pool_guid, data.dd_vdev_guid);
985 
986 	data.dd_func = zfs_process_add;
987 
988 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
989 
990 	return (0);
991 }
992 
993 /*
994  * Given a path to a vdev, lookup the vdev's physical size from its
995  * config nvlist.
996  *
997  * Returns the vdev's physical size in bytes on success, 0 on error.
998  */
999 static uint64_t
1000 vdev_size_from_config(zpool_handle_t *zhp, const char *vdev_path)
1001 {
1002 	nvlist_t *nvl = NULL;
1003 	boolean_t avail_spare, l2cache, log;
1004 	vdev_stat_t *vs = NULL;
1005 	uint_t c;
1006 
1007 	nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
1008 	if (!nvl)
1009 		return (0);
1010 
1011 	verify(nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_VDEV_STATS,
1012 	    (uint64_t **)&vs, &c) == 0);
1013 	if (!vs) {
1014 		zed_log_msg(LOG_INFO, "%s: no nvlist for '%s'", __func__,
1015 		    vdev_path);
1016 		return (0);
1017 	}
1018 
1019 	return (vs->vs_pspace);
1020 }
1021 
1022 /*
1023  * Given a path to a vdev, lookup if the vdev is a "whole disk" in the
1024  * config nvlist.  "whole disk" means that ZFS was passed a whole disk
1025  * at pool creation time, which it partitioned up and has full control over.
1026  * Thus a partition with wholedisk=1 set tells us that zfs created the
1027  * partition at creation time.  A partition without whole disk set would have
1028  * been created by externally (like with fdisk) and passed to ZFS.
1029  *
1030  * Returns the whole disk value (either 0 or 1).
1031  */
1032 static uint64_t
1033 vdev_whole_disk_from_config(zpool_handle_t *zhp, const char *vdev_path)
1034 {
1035 	nvlist_t *nvl = NULL;
1036 	boolean_t avail_spare, l2cache, log;
1037 	uint64_t wholedisk = 0;
1038 
1039 	nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
1040 	if (!nvl)
1041 		return (0);
1042 
1043 	(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
1044 
1045 	return (wholedisk);
1046 }
1047 
1048 /*
1049  * If the device size grew more than 1% then return true.
1050  */
1051 #define	DEVICE_GREW(oldsize, newsize) \
1052 		    ((newsize > oldsize) && \
1053 		    ((newsize / (newsize - oldsize)) <= 100))
1054 
1055 static int
1056 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
1057 {
1058 	boolean_t avail_spare, l2cache;
1059 	nvlist_t *udev_nvl = data;
1060 	nvlist_t *tgt;
1061 	int error;
1062 
1063 	const char *tmp_devname;
1064 	char devname[MAXPATHLEN] = "";
1065 	uint64_t guid;
1066 
1067 	if (nvlist_lookup_uint64(udev_nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
1068 		sprintf(devname, "%llu", (u_longlong_t)guid);
1069 	} else if (nvlist_lookup_string(udev_nvl, DEV_PHYS_PATH,
1070 	    &tmp_devname) == 0) {
1071 		strlcpy(devname, tmp_devname, MAXPATHLEN);
1072 		zfs_append_partition(devname, MAXPATHLEN);
1073 	} else {
1074 		zed_log_msg(LOG_INFO, "%s: no guid or physpath", __func__);
1075 	}
1076 
1077 	zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
1078 	    devname, zpool_get_name(zhp));
1079 
1080 	if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
1081 	    &avail_spare, &l2cache, NULL)) != NULL) {
1082 		const char *path;
1083 		char fullpath[MAXPATHLEN];
1084 		uint64_t wholedisk = 0;
1085 
1086 		error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
1087 		if (error) {
1088 			zpool_close(zhp);
1089 			return (0);
1090 		}
1091 
1092 		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
1093 		    &wholedisk);
1094 
1095 		if (wholedisk) {
1096 			char *tmp;
1097 			path = strrchr(path, '/');
1098 			if (path != NULL) {
1099 				tmp = zfs_strip_partition(path + 1);
1100 				if (tmp == NULL) {
1101 					zpool_close(zhp);
1102 					return (0);
1103 				}
1104 			} else {
1105 				zpool_close(zhp);
1106 				return (0);
1107 			}
1108 
1109 			(void) strlcpy(fullpath, tmp, sizeof (fullpath));
1110 			free(tmp);
1111 
1112 			/*
1113 			 * We need to reopen the pool associated with this
1114 			 * device so that the kernel can update the size of
1115 			 * the expanded device.  When expanding there is no
1116 			 * need to restart the scrub from the beginning.
1117 			 */
1118 			boolean_t scrub_restart = B_FALSE;
1119 			(void) zpool_reopen_one(zhp, &scrub_restart);
1120 		} else {
1121 			(void) strlcpy(fullpath, path, sizeof (fullpath));
1122 		}
1123 
1124 		if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
1125 			vdev_state_t newstate;
1126 
1127 			if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
1128 				/*
1129 				 * If this disk size has not changed, then
1130 				 * there's no need to do an autoexpand.  To
1131 				 * check we look at the disk's size in its
1132 				 * config, and compare it to the disk size
1133 				 * that udev is reporting.
1134 				 */
1135 				uint64_t udev_size = 0, conf_size = 0,
1136 				    wholedisk = 0, udev_parent_size = 0;
1137 
1138 				/*
1139 				 * Get the size of our disk that udev is
1140 				 * reporting.
1141 				 */
1142 				if (nvlist_lookup_uint64(udev_nvl, DEV_SIZE,
1143 				    &udev_size) != 0) {
1144 					udev_size = 0;
1145 				}
1146 
1147 				/*
1148 				 * Get the size of our disk's parent device
1149 				 * from udev (where sda1's parent is sda).
1150 				 */
1151 				if (nvlist_lookup_uint64(udev_nvl,
1152 				    DEV_PARENT_SIZE, &udev_parent_size) != 0) {
1153 					udev_parent_size = 0;
1154 				}
1155 
1156 				conf_size = vdev_size_from_config(zhp,
1157 				    fullpath);
1158 
1159 				wholedisk = vdev_whole_disk_from_config(zhp,
1160 				    fullpath);
1161 
1162 				/*
1163 				 * Only attempt an autoexpand if the vdev size
1164 				 * changed.  There are two different cases
1165 				 * to consider.
1166 				 *
1167 				 * 1. wholedisk=1
1168 				 * If you do a 'zpool create' on a whole disk
1169 				 * (like /dev/sda), then zfs will create
1170 				 * partitions on the disk (like /dev/sda1).  In
1171 				 * that case, wholedisk=1 will be set in the
1172 				 * partition's nvlist config.  So zed will need
1173 				 * to see if your parent device (/dev/sda)
1174 				 * expanded in size, and if so, then attempt
1175 				 * the autoexpand.
1176 				 *
1177 				 * 2. wholedisk=0
1178 				 * If you do a 'zpool create' on an existing
1179 				 * partition, or a device that doesn't allow
1180 				 * partitions, then wholedisk=0, and you will
1181 				 * simply need to check if the device itself
1182 				 * expanded in size.
1183 				 */
1184 				if (DEVICE_GREW(conf_size, udev_size) ||
1185 				    (wholedisk && DEVICE_GREW(conf_size,
1186 				    udev_parent_size))) {
1187 					error = zpool_vdev_online(zhp, fullpath,
1188 					    0, &newstate);
1189 
1190 					zed_log_msg(LOG_INFO,
1191 					    "%s: autoexpanding '%s' from %llu"
1192 					    " to %llu bytes in pool '%s': %d",
1193 					    __func__, fullpath, conf_size,
1194 					    MAX(udev_size, udev_parent_size),
1195 					    zpool_get_name(zhp), error);
1196 				}
1197 			}
1198 		}
1199 		zpool_close(zhp);
1200 		return (1);
1201 	}
1202 	zpool_close(zhp);
1203 	return (0);
1204 }
1205 
1206 /*
1207  * This function handles the ESC_DEV_DLE device change event.  Use the
1208  * provided vdev guid when looking up a disk or partition, when the guid
1209  * is not present assume the entire disk is owned by ZFS and append the
1210  * expected -part1 partition information then lookup by physical path.
1211  */
1212 static int
1213 zfs_deliver_dle(nvlist_t *nvl)
1214 {
1215 	const char *devname;
1216 	char name[MAXPATHLEN];
1217 	uint64_t guid;
1218 
1219 	if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
1220 		sprintf(name, "%llu", (u_longlong_t)guid);
1221 	} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
1222 		strlcpy(name, devname, MAXPATHLEN);
1223 		zfs_append_partition(name, MAXPATHLEN);
1224 	} else {
1225 		sprintf(name, "unknown");
1226 		zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
1227 	}
1228 
1229 	if (zpool_iter(g_zfshdl, zfsdle_vdev_online, nvl) != 1) {
1230 		zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
1231 		    "found", name);
1232 		return (1);
1233 	}
1234 
1235 	return (0);
1236 }
1237 
1238 /*
1239  * syseventd daemon module event handler
1240  *
1241  * Handles syseventd daemon zfs device related events:
1242  *
1243  *	EC_DEV_ADD.ESC_DISK
1244  *	EC_DEV_STATUS.ESC_DEV_DLE
1245  *	EC_ZFS.ESC_ZFS_VDEV_CHECK
1246  *
1247  * Note: assumes only one thread active at a time (not thread safe)
1248  */
1249 static int
1250 zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
1251 {
1252 	int ret;
1253 	boolean_t is_check = B_FALSE, is_dle = B_FALSE;
1254 
1255 	if (strcmp(class, EC_DEV_ADD) == 0) {
1256 		/*
1257 		 * We're mainly interested in disk additions, but we also listen
1258 		 * for new loop devices, to allow for simplified testing.
1259 		 */
1260 		if (strcmp(subclass, ESC_DISK) != 0 &&
1261 		    strcmp(subclass, ESC_LOFI) != 0)
1262 			return (0);
1263 
1264 		is_check = B_FALSE;
1265 	} else if (strcmp(class, EC_ZFS) == 0 &&
1266 	    strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
1267 		/*
1268 		 * This event signifies that a device failed to open
1269 		 * during pool load, but the 'autoreplace' property was
1270 		 * set, so we should pretend it's just been added.
1271 		 */
1272 		is_check = B_TRUE;
1273 	} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
1274 	    strcmp(subclass, ESC_DEV_DLE) == 0) {
1275 		is_dle = B_TRUE;
1276 	} else {
1277 		return (0);
1278 	}
1279 
1280 	if (is_dle)
1281 		ret = zfs_deliver_dle(nvl);
1282 	else if (is_check)
1283 		ret = zfs_deliver_check(nvl);
1284 	else
1285 		ret = zfs_deliver_add(nvl);
1286 
1287 	return (ret);
1288 }
1289 
1290 static void *
1291 zfs_enum_pools(void *arg)
1292 {
1293 	(void) arg;
1294 
1295 	(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
1296 	/*
1297 	 * Linux - instead of using a thread pool, each list entry
1298 	 * will spawn a thread when an unavailable pool transitions
1299 	 * to available. zfs_slm_fini will wait for these threads.
1300 	 */
1301 	g_enumeration_done = B_TRUE;
1302 	return (NULL);
1303 }
1304 
1305 /*
1306  * called from zed daemon at startup
1307  *
1308  * sent messages from zevents or udev monitor
1309  *
1310  * For now, each agent has its own libzfs instance
1311  */
1312 int
1313 zfs_slm_init(void)
1314 {
1315 	if ((g_zfshdl = libzfs_init()) == NULL)
1316 		return (-1);
1317 
1318 	/*
1319 	 * collect a list of unavailable pools (asynchronously,
1320 	 * since this can take a while)
1321 	 */
1322 	list_create(&g_pool_list, sizeof (struct unavailpool),
1323 	    offsetof(struct unavailpool, uap_node));
1324 
1325 	if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
1326 		list_destroy(&g_pool_list);
1327 		libzfs_fini(g_zfshdl);
1328 		return (-1);
1329 	}
1330 
1331 	pthread_setname_np(g_zfs_tid, "enum-pools");
1332 	list_create(&g_device_list, sizeof (struct pendingdev),
1333 	    offsetof(struct pendingdev, pd_node));
1334 
1335 	return (0);
1336 }
1337 
1338 void
1339 zfs_slm_fini(void)
1340 {
1341 	unavailpool_t *pool;
1342 	pendingdev_t *device;
1343 
1344 	/* wait for zfs_enum_pools thread to complete */
1345 	(void) pthread_join(g_zfs_tid, NULL);
1346 	/* destroy the thread pool */
1347 	if (g_tpool != NULL) {
1348 		tpool_wait(g_tpool);
1349 		tpool_destroy(g_tpool);
1350 	}
1351 
1352 	while ((pool = list_remove_head(&g_pool_list)) != NULL) {
1353 		zpool_close(pool->uap_zhp);
1354 		free(pool);
1355 	}
1356 	list_destroy(&g_pool_list);
1357 
1358 	while ((device = list_remove_head(&g_device_list)) != NULL)
1359 		free(device);
1360 	list_destroy(&g_device_list);
1361 
1362 	libzfs_fini(g_zfshdl);
1363 }
1364 
1365 void
1366 zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
1367 {
1368 	zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
1369 	(void) zfs_slm_deliver_event(class, subclass, nvl);
1370 }
1371