xref: /freebsd/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c (revision a67cc943273ba7cba2f78e33fc5897e1fbecd462)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2016, 2017, Intel Corporation.
26  * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
27  */
28 
29 /*
30  * ZFS syseventd module.
31  *
32  * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
33  *
34  * The purpose of this module is to identify when devices are added to the
35  * system, and appropriately online or replace the affected vdevs.
36  *
37  * When a device is added to the system:
38  *
39  * 	1. Search for any vdevs whose devid matches that of the newly added
40  *	   device.
41  *
42  * 	2. If no vdevs are found, then search for any vdevs whose udev path
43  *	   matches that of the new device.
44  *
45  *	3. If no vdevs match by either method, then ignore the event.
46  *
47  * 	4. Attempt to online the device with a flag to indicate that it should
48  *	   be unspared when resilvering completes.  If this succeeds, then the
49  *	   same device was inserted and we should continue normally.
50  *
51  *	5. If the pool does not have the 'autoreplace' property set, attempt to
52  *	   online the device again without the unspare flag, which will
53  *	   generate a FMA fault.
54  *
55  *	6. If the pool has the 'autoreplace' property set, and the matching vdev
56  *	   is a whole disk, then label the new disk and attempt a 'zpool
57  *	   replace'.
58  *
59  * The module responds to EC_DEV_ADD events.  The special ESC_ZFS_VDEV_CHECK
60  * event indicates that a device failed to open during pool load, but the
61  * autoreplace property was set.  In this case, we deferred the associated
62  * FMA fault until our module had a chance to process the autoreplace logic.
63  * If the device could not be replaced, then the second online attempt will
64  * trigger the FMA fault that we skipped earlier.
65  *
66  * ZFS on Linux porting notes:
67  *	Linux udev provides a disk insert for both the disk and the partition
68  *
69  */
70 
71 #include <ctype.h>
72 #include <fcntl.h>
73 #include <libnvpair.h>
74 #include <libzfs.h>
75 #include <libzutil.h>
76 #include <limits.h>
77 #include <stddef.h>
78 #include <stdlib.h>
79 #include <string.h>
80 #include <syslog.h>
81 #include <sys/list.h>
82 #include <sys/sunddi.h>
83 #include <sys/sysevent/eventdefs.h>
84 #include <sys/sysevent/dev.h>
85 #include <thread_pool.h>
86 #include <pthread.h>
87 #include <unistd.h>
88 #include <errno.h>
89 #include "zfs_agents.h"
90 #include "../zed_log.h"
91 
92 #define	DEV_BYID_PATH	"/dev/disk/by-id/"
93 #define	DEV_BYPATH_PATH	"/dev/disk/by-path/"
94 #define	DEV_BYVDEV_PATH	"/dev/disk/by-vdev/"
95 
96 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
97 
98 libzfs_handle_t *g_zfshdl;
99 list_t g_pool_list;	/* list of unavailable pools at initialization */
100 list_t g_device_list;	/* list of disks with asynchronous label request */
101 tpool_t *g_tpool;
102 boolean_t g_enumeration_done;
103 pthread_t g_zfs_tid;	/* zfs_enum_pools() thread */
104 
105 typedef struct unavailpool {
106 	zpool_handle_t	*uap_zhp;
107 	list_node_t	uap_node;
108 } unavailpool_t;
109 
110 typedef struct pendingdev {
111 	char		pd_physpath[128];
112 	list_node_t	pd_node;
113 } pendingdev_t;
114 
115 static int
116 zfs_toplevel_state(zpool_handle_t *zhp)
117 {
118 	nvlist_t *nvroot;
119 	vdev_stat_t *vs;
120 	unsigned int c;
121 
122 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
123 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
124 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
125 	    (uint64_t **)&vs, &c) == 0);
126 	return (vs->vs_state);
127 }
128 
129 static int
130 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
131 {
132 	zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
133 	    zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
134 
135 	if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
136 		unavailpool_t *uap;
137 		uap = malloc(sizeof (unavailpool_t));
138 		uap->uap_zhp = zhp;
139 		list_insert_tail((list_t *)data, uap);
140 	} else {
141 		zpool_close(zhp);
142 	}
143 	return (0);
144 }
145 
146 /*
147  * Two stage replace on Linux
148  * since we get disk notifications
149  * we can wait for partitioned disk slice to show up!
150  *
151  * First stage tags the disk, initiates async partitioning, and returns
152  * Second stage finds the tag and proceeds to ZFS labeling/replace
153  *
154  * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
155  *
156  * 1. physical match with no fs, no partition
157  *	tag it top, partition disk
158  *
159  * 2. physical match again, see partition and tag
160  *
161  */
162 
163 /*
164  * The device associated with the given vdev (either by devid or physical path)
165  * has been added to the system.  If 'isdisk' is set, then we only attempt a
166  * replacement if it's a whole disk.  This also implies that we should label the
167  * disk first.
168  *
169  * First, we attempt to online the device (making sure to undo any spare
170  * operation when finished).  If this succeeds, then we're done.  If it fails,
171  * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
172  * but that the label was not what we expected.  If the 'autoreplace' property
173  * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
174  * replace'.  If the online is successful, but the new state is something else
175  * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
176  * race, and we should avoid attempting to relabel the disk.
177  *
178  * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
179  */
180 static void
181 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
182 {
183 	char *path;
184 	vdev_state_t newstate;
185 	nvlist_t *nvroot, *newvd;
186 	pendingdev_t *device;
187 	uint64_t wholedisk = 0ULL;
188 	uint64_t offline = 0ULL;
189 	uint64_t guid = 0ULL;
190 	char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
191 	char rawpath[PATH_MAX], fullpath[PATH_MAX];
192 	char devpath[PATH_MAX];
193 	int ret;
194 	boolean_t is_dm = B_FALSE;
195 	boolean_t is_sd = B_FALSE;
196 	uint_t c;
197 	vdev_stat_t *vs;
198 
199 	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
200 		return;
201 
202 	/* Skip healthy disks */
203 	verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
204 	    (uint64_t **)&vs, &c) == 0);
205 	if (vs->vs_state == VDEV_STATE_HEALTHY) {
206 		zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
207 		    __func__, path);
208 		return;
209 	}
210 
211 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
212 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
213 	    &enc_sysfs_path);
214 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
215 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
216 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
217 
218 	if (offline)
219 		return;  /* don't intervene if it was taken offline */
220 
221 	is_dm = zfs_dev_is_dm(path);
222 	zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
223 	    " wholedisk %d, %s dm (guid %llu)", zpool_get_name(zhp), path,
224 	    physpath ? physpath : "NULL", wholedisk, is_dm ? "is" : "not",
225 	    (long long unsigned int)guid);
226 
227 	/*
228 	 * The VDEV guid is preferred for identification (gets passed in path)
229 	 */
230 	if (guid != 0) {
231 		(void) snprintf(fullpath, sizeof (fullpath), "%llu",
232 		    (long long unsigned int)guid);
233 	} else {
234 		/*
235 		 * otherwise use path sans partition suffix for whole disks
236 		 */
237 		(void) strlcpy(fullpath, path, sizeof (fullpath));
238 		if (wholedisk) {
239 			char *spath = zfs_strip_partition(fullpath);
240 			if (!spath) {
241 				zed_log_msg(LOG_INFO, "%s: Can't alloc",
242 				    __func__);
243 				return;
244 			}
245 
246 			(void) strlcpy(fullpath, spath, sizeof (fullpath));
247 			free(spath);
248 		}
249 	}
250 
251 	/*
252 	 * Attempt to online the device.
253 	 */
254 	if (zpool_vdev_online(zhp, fullpath,
255 	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
256 	    (newstate == VDEV_STATE_HEALTHY ||
257 	    newstate == VDEV_STATE_DEGRADED)) {
258 		zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
259 		    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
260 		    "HEALTHY" : "DEGRADED");
261 		return;
262 	}
263 
264 	/*
265 	 * vdev_id alias rule for using scsi_debug devices (FMA automated
266 	 * testing)
267 	 */
268 	if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
269 		is_sd = B_TRUE;
270 
271 	/*
272 	 * If the pool doesn't have the autoreplace property set, then use
273 	 * vdev online to trigger a FMA fault by posting an ereport.
274 	 */
275 	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
276 	    !(wholedisk || is_dm) || (physpath == NULL)) {
277 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
278 		    &newstate);
279 		zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
280 		    "not a whole disk for '%s'", fullpath);
281 		return;
282 	}
283 
284 	/*
285 	 * Convert physical path into its current device node.  Rawpath
286 	 * needs to be /dev/disk/by-vdev for a scsi_debug device since
287 	 * /dev/disk/by-path will not be present.
288 	 */
289 	(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
290 	    is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
291 
292 	if (realpath(rawpath, devpath) == NULL && !is_dm) {
293 		zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
294 		    rawpath, strerror(errno));
295 
296 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
297 		    &newstate);
298 
299 		zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
300 		    fullpath, libzfs_error_description(g_zfshdl));
301 		return;
302 	}
303 
304 	/* Only autoreplace bad disks */
305 	if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
306 	    (vs->vs_state != VDEV_STATE_FAULTED) &&
307 	    (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
308 		return;
309 	}
310 
311 	nvlist_lookup_string(vdev, "new_devid", &new_devid);
312 
313 	if (is_dm) {
314 		/* Don't label device mapper or multipath disks. */
315 	} else if (!labeled) {
316 		/*
317 		 * we're auto-replacing a raw disk, so label it first
318 		 */
319 		char *leafname;
320 
321 		/*
322 		 * If this is a request to label a whole disk, then attempt to
323 		 * write out the label.  Before we can label the disk, we need
324 		 * to map the physical string that was matched on to the under
325 		 * lying device node.
326 		 *
327 		 * If any part of this process fails, then do a force online
328 		 * to trigger a ZFS fault for the device (and any hot spare
329 		 * replacement).
330 		 */
331 		leafname = strrchr(devpath, '/') + 1;
332 
333 		/*
334 		 * If this is a request to label a whole disk, then attempt to
335 		 * write out the label.
336 		 */
337 		if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
338 			zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
339 			    "label '%s' (%s)", leafname,
340 			    libzfs_error_description(g_zfshdl));
341 
342 			(void) zpool_vdev_online(zhp, fullpath,
343 			    ZFS_ONLINE_FORCEFAULT, &newstate);
344 			return;
345 		}
346 
347 		/*
348 		 * The disk labeling is asynchronous on Linux. Just record
349 		 * this label request and return as there will be another
350 		 * disk add event for the partition after the labeling is
351 		 * completed.
352 		 */
353 		device = malloc(sizeof (pendingdev_t));
354 		(void) strlcpy(device->pd_physpath, physpath,
355 		    sizeof (device->pd_physpath));
356 		list_insert_tail(&g_device_list, device);
357 
358 		zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
359 		    leafname, (u_longlong_t)guid);
360 
361 		return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */
362 
363 	} else /* labeled */ {
364 		boolean_t found = B_FALSE;
365 		/*
366 		 * match up with request above to label the disk
367 		 */
368 		for (device = list_head(&g_device_list); device != NULL;
369 		    device = list_next(&g_device_list, device)) {
370 			if (strcmp(physpath, device->pd_physpath) == 0) {
371 				list_remove(&g_device_list, device);
372 				free(device);
373 				found = B_TRUE;
374 				break;
375 			}
376 			zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
377 			    physpath, device->pd_physpath);
378 		}
379 		if (!found) {
380 			/* unexpected partition slice encountered */
381 			zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
382 			    fullpath);
383 			(void) zpool_vdev_online(zhp, fullpath,
384 			    ZFS_ONLINE_FORCEFAULT, &newstate);
385 			return;
386 		}
387 
388 		zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
389 		    physpath, (u_longlong_t)guid);
390 
391 		(void) snprintf(devpath, sizeof (devpath), "%s%s",
392 		    DEV_BYID_PATH, new_devid);
393 	}
394 
395 	/*
396 	 * Construct the root vdev to pass to zpool_vdev_attach().  While adding
397 	 * the entire vdev structure is harmless, we construct a reduced set of
398 	 * path/physpath/wholedisk to keep it simple.
399 	 */
400 	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
401 		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
402 		return;
403 	}
404 	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
405 		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
406 		nvlist_free(nvroot);
407 		return;
408 	}
409 
410 	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
411 	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
412 	    nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
413 	    (physpath != NULL && nvlist_add_string(newvd,
414 	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
415 	    (enc_sysfs_path != NULL && nvlist_add_string(newvd,
416 	    ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
417 	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
418 	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
419 	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
420 	    1) != 0) {
421 		zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
422 		nvlist_free(newvd);
423 		nvlist_free(nvroot);
424 		return;
425 	}
426 
427 	nvlist_free(newvd);
428 
429 	/*
430 	 * Wait for udev to verify the links exist, then auto-replace
431 	 * the leaf disk at same physical location.
432 	 */
433 	if (zpool_label_disk_wait(path, 3000) != 0) {
434 		zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
435 		    "disk %s is missing", path);
436 		nvlist_free(nvroot);
437 		return;
438 	}
439 
440 	ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_FALSE);
441 
442 	zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
443 	    fullpath, path, (ret == 0) ? "no errors" :
444 	    libzfs_error_description(g_zfshdl));
445 
446 	nvlist_free(nvroot);
447 }
448 
449 /*
450  * Utility functions to find a vdev matching given criteria.
451  */
452 typedef struct dev_data {
453 	const char		*dd_compare;
454 	const char		*dd_prop;
455 	zfs_process_func_t	dd_func;
456 	boolean_t		dd_found;
457 	boolean_t		dd_islabeled;
458 	uint64_t		dd_pool_guid;
459 	uint64_t		dd_vdev_guid;
460 	const char		*dd_new_devid;
461 } dev_data_t;
462 
463 static void
464 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
465 {
466 	dev_data_t *dp = data;
467 	char *path = NULL;
468 	uint_t c, children;
469 	nvlist_t **child;
470 
471 	/*
472 	 * First iterate over any children.
473 	 */
474 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
475 	    &child, &children) == 0) {
476 		for (c = 0; c < children; c++)
477 			zfs_iter_vdev(zhp, child[c], data);
478 	}
479 
480 	/*
481 	 * Iterate over any spares and cache devices
482 	 */
483 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
484 	    &child, &children) == 0) {
485 		for (c = 0; c < children; c++)
486 			zfs_iter_vdev(zhp, child[c], data);
487 	}
488 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
489 	    &child, &children) == 0) {
490 		for (c = 0; c < children; c++)
491 			zfs_iter_vdev(zhp, child[c], data);
492 	}
493 
494 	/* once a vdev was matched and processed there is nothing left to do */
495 	if (dp->dd_found)
496 		return;
497 
498 	/*
499 	 * Match by GUID if available otherwise fallback to devid or physical
500 	 */
501 	if (dp->dd_vdev_guid != 0) {
502 		uint64_t guid;
503 
504 		if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
505 		    &guid) != 0 || guid != dp->dd_vdev_guid) {
506 			return;
507 		}
508 		zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched on %llu", guid);
509 		dp->dd_found = B_TRUE;
510 
511 	} else if (dp->dd_compare != NULL) {
512 		/*
513 		 * NOTE: On Linux there is an event for partition, so unlike
514 		 * illumos, substring matching is not required to accommodate
515 		 * the partition suffix. An exact match will be present in
516 		 * the dp->dd_compare value.
517 		 */
518 		if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
519 		    strcmp(dp->dd_compare, path) != 0)
520 			return;
521 
522 		zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched %s on %s",
523 		    dp->dd_prop, path);
524 		dp->dd_found = B_TRUE;
525 
526 		/* pass the new devid for use by replacing code */
527 		if (dp->dd_new_devid != NULL) {
528 			(void) nvlist_add_string(nvl, "new_devid",
529 			    dp->dd_new_devid);
530 		}
531 	}
532 
533 	(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
534 }
535 
536 static void
537 zfs_enable_ds(void *arg)
538 {
539 	unavailpool_t *pool = (unavailpool_t *)arg;
540 
541 	(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
542 	zpool_close(pool->uap_zhp);
543 	free(pool);
544 }
545 
546 static int
547 zfs_iter_pool(zpool_handle_t *zhp, void *data)
548 {
549 	nvlist_t *config, *nvl;
550 	dev_data_t *dp = data;
551 	uint64_t pool_guid;
552 	unavailpool_t *pool;
553 
554 	zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
555 	    zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
556 
557 	/*
558 	 * For each vdev in this pool, look for a match to apply dd_func
559 	 */
560 	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
561 		if (dp->dd_pool_guid == 0 ||
562 		    (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
563 		    &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
564 			(void) nvlist_lookup_nvlist(config,
565 			    ZPOOL_CONFIG_VDEV_TREE, &nvl);
566 			zfs_iter_vdev(zhp, nvl, data);
567 		}
568 	}
569 
570 	/*
571 	 * if this pool was originally unavailable,
572 	 * then enable its datasets asynchronously
573 	 */
574 	if (g_enumeration_done)  {
575 		for (pool = list_head(&g_pool_list); pool != NULL;
576 		    pool = list_next(&g_pool_list, pool)) {
577 
578 			if (strcmp(zpool_get_name(zhp),
579 			    zpool_get_name(pool->uap_zhp)))
580 				continue;
581 			if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
582 				list_remove(&g_pool_list, pool);
583 				(void) tpool_dispatch(g_tpool, zfs_enable_ds,
584 				    pool);
585 				break;
586 			}
587 		}
588 	}
589 
590 	zpool_close(zhp);
591 	return (dp->dd_found);	/* cease iteration after a match */
592 }
593 
594 /*
595  * Given a physical device location, iterate over all
596  * (pool, vdev) pairs which correspond to that location.
597  */
598 static boolean_t
599 devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
600     boolean_t is_slice)
601 {
602 	dev_data_t data = { 0 };
603 
604 	data.dd_compare = physical;
605 	data.dd_func = func;
606 	data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
607 	data.dd_found = B_FALSE;
608 	data.dd_islabeled = is_slice;
609 	data.dd_new_devid = devid;	/* used by auto replace code */
610 
611 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
612 
613 	return (data.dd_found);
614 }
615 
616 /*
617  * Given a device identifier, find any vdevs with a matching devid.
618  * On Linux we can match devid directly which is always a whole disk.
619  */
620 static boolean_t
621 devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
622 {
623 	dev_data_t data = { 0 };
624 
625 	data.dd_compare = devid;
626 	data.dd_func = func;
627 	data.dd_prop = ZPOOL_CONFIG_DEVID;
628 	data.dd_found = B_FALSE;
629 	data.dd_islabeled = is_slice;
630 	data.dd_new_devid = devid;
631 
632 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
633 
634 	return (data.dd_found);
635 }
636 
637 /*
638  * Handle a EC_DEV_ADD.ESC_DISK event.
639  *
640  * illumos
641  *	Expects: DEV_PHYS_PATH string in schema
642  *	Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
643  *
644  *      path: '/dev/dsk/c0t1d0s0' (persistent)
645  *     devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
646  * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
647  *
648  * linux
649  *	provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
650  *	Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
651  *
652  *      path: '/dev/sdc1' (not persistent)
653  *     devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
654  * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
655  */
656 static int
657 zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
658 {
659 	char *devpath = NULL, *devid;
660 	boolean_t is_slice;
661 
662 	/*
663 	 * Expecting a devid string and an optional physical location
664 	 */
665 	if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0)
666 		return (-1);
667 
668 	(void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
669 
670 	is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
671 
672 	zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
673 	    devid, devpath ? devpath : "NULL", is_slice);
674 
675 	/*
676 	 * Iterate over all vdevs looking for a match in the following order:
677 	 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
678 	 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
679 	 *
680 	 * For disks, we only want to pay attention to vdevs marked as whole
681 	 * disks or are a multipath device.
682 	 */
683 	if (!devid_iter(devid, zfs_process_add, is_slice) && devpath != NULL)
684 		(void) devphys_iter(devpath, devid, zfs_process_add, is_slice);
685 
686 	return (0);
687 }
688 
689 /*
690  * Called when we receive a VDEV_CHECK event, which indicates a device could not
691  * be opened during initial pool open, but the autoreplace property was set on
692  * the pool.  In this case, we treat it as if it were an add event.
693  */
694 static int
695 zfs_deliver_check(nvlist_t *nvl)
696 {
697 	dev_data_t data = { 0 };
698 
699 	if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
700 	    &data.dd_pool_guid) != 0 ||
701 	    nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
702 	    &data.dd_vdev_guid) != 0 ||
703 	    data.dd_vdev_guid == 0)
704 		return (0);
705 
706 	zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
707 	    data.dd_pool_guid, data.dd_vdev_guid);
708 
709 	data.dd_func = zfs_process_add;
710 
711 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
712 
713 	return (0);
714 }
715 
716 static int
717 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
718 {
719 	char *devname = data;
720 	boolean_t avail_spare, l2cache;
721 	nvlist_t *tgt;
722 	int error;
723 
724 	zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
725 	    devname, zpool_get_name(zhp));
726 
727 	if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
728 	    &avail_spare, &l2cache, NULL)) != NULL) {
729 		char *path, fullpath[MAXPATHLEN];
730 		uint64_t wholedisk;
731 
732 		error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
733 		if (error) {
734 			zpool_close(zhp);
735 			return (0);
736 		}
737 
738 		error = nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
739 		    &wholedisk);
740 		if (error)
741 			wholedisk = 0;
742 
743 		if (wholedisk) {
744 			path = strrchr(path, '/');
745 			if (path != NULL) {
746 				path = zfs_strip_partition(path + 1);
747 				if (path == NULL) {
748 					zpool_close(zhp);
749 					return (0);
750 				}
751 			} else {
752 				zpool_close(zhp);
753 				return (0);
754 			}
755 
756 			(void) strlcpy(fullpath, path, sizeof (fullpath));
757 			free(path);
758 
759 			/*
760 			 * We need to reopen the pool associated with this
761 			 * device so that the kernel can update the size of
762 			 * the expanded device.  When expanding there is no
763 			 * need to restart the scrub from the beginning.
764 			 */
765 			boolean_t scrub_restart = B_FALSE;
766 			(void) zpool_reopen_one(zhp, &scrub_restart);
767 		} else {
768 			(void) strlcpy(fullpath, path, sizeof (fullpath));
769 		}
770 
771 		if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
772 			vdev_state_t newstate;
773 
774 			if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
775 				error = zpool_vdev_online(zhp, fullpath, 0,
776 				    &newstate);
777 				zed_log_msg(LOG_INFO, "zfsdle_vdev_online: "
778 				    "setting device '%s' to ONLINE state "
779 				    "in pool '%s': %d", fullpath,
780 				    zpool_get_name(zhp), error);
781 			}
782 		}
783 		zpool_close(zhp);
784 		return (1);
785 	}
786 	zpool_close(zhp);
787 	return (0);
788 }
789 
790 /*
791  * This function handles the ESC_DEV_DLE device change event.  Use the
792  * provided vdev guid when looking up a disk or partition, when the guid
793  * is not present assume the entire disk is owned by ZFS and append the
794  * expected -part1 partition information then lookup by physical path.
795  */
796 static int
797 zfs_deliver_dle(nvlist_t *nvl)
798 {
799 	char *devname, name[MAXPATHLEN];
800 	uint64_t guid;
801 
802 	if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
803 		sprintf(name, "%llu", (u_longlong_t)guid);
804 	} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
805 		strlcpy(name, devname, MAXPATHLEN);
806 		zfs_append_partition(name, MAXPATHLEN);
807 	} else {
808 		zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
809 	}
810 
811 	if (zpool_iter(g_zfshdl, zfsdle_vdev_online, name) != 1) {
812 		zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
813 		    "found", name);
814 		return (1);
815 	}
816 
817 	return (0);
818 }
819 
820 /*
821  * syseventd daemon module event handler
822  *
823  * Handles syseventd daemon zfs device related events:
824  *
825  *	EC_DEV_ADD.ESC_DISK
826  *	EC_DEV_STATUS.ESC_DEV_DLE
827  *	EC_ZFS.ESC_ZFS_VDEV_CHECK
828  *
829  * Note: assumes only one thread active at a time (not thread safe)
830  */
831 static int
832 zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
833 {
834 	int ret;
835 	boolean_t is_lofi = B_FALSE, is_check = B_FALSE, is_dle = B_FALSE;
836 
837 	if (strcmp(class, EC_DEV_ADD) == 0) {
838 		/*
839 		 * We're mainly interested in disk additions, but we also listen
840 		 * for new loop devices, to allow for simplified testing.
841 		 */
842 		if (strcmp(subclass, ESC_DISK) == 0)
843 			is_lofi = B_FALSE;
844 		else if (strcmp(subclass, ESC_LOFI) == 0)
845 			is_lofi = B_TRUE;
846 		else
847 			return (0);
848 
849 		is_check = B_FALSE;
850 	} else if (strcmp(class, EC_ZFS) == 0 &&
851 	    strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
852 		/*
853 		 * This event signifies that a device failed to open
854 		 * during pool load, but the 'autoreplace' property was
855 		 * set, so we should pretend it's just been added.
856 		 */
857 		is_check = B_TRUE;
858 	} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
859 	    strcmp(subclass, ESC_DEV_DLE) == 0) {
860 		is_dle = B_TRUE;
861 	} else {
862 		return (0);
863 	}
864 
865 	if (is_dle)
866 		ret = zfs_deliver_dle(nvl);
867 	else if (is_check)
868 		ret = zfs_deliver_check(nvl);
869 	else
870 		ret = zfs_deliver_add(nvl, is_lofi);
871 
872 	return (ret);
873 }
874 
875 /*ARGSUSED*/
876 static void *
877 zfs_enum_pools(void *arg)
878 {
879 	(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
880 	/*
881 	 * Linux - instead of using a thread pool, each list entry
882 	 * will spawn a thread when an unavailable pool transitions
883 	 * to available. zfs_slm_fini will wait for these threads.
884 	 */
885 	g_enumeration_done = B_TRUE;
886 	return (NULL);
887 }
888 
889 /*
890  * called from zed daemon at startup
891  *
892  * sent messages from zevents or udev monitor
893  *
894  * For now, each agent has its own libzfs instance
895  */
896 int
897 zfs_slm_init()
898 {
899 	if ((g_zfshdl = libzfs_init()) == NULL)
900 		return (-1);
901 
902 	/*
903 	 * collect a list of unavailable pools (asynchronously,
904 	 * since this can take a while)
905 	 */
906 	list_create(&g_pool_list, sizeof (struct unavailpool),
907 	    offsetof(struct unavailpool, uap_node));
908 
909 	if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
910 		list_destroy(&g_pool_list);
911 		libzfs_fini(g_zfshdl);
912 		return (-1);
913 	}
914 
915 	list_create(&g_device_list, sizeof (struct pendingdev),
916 	    offsetof(struct pendingdev, pd_node));
917 
918 	return (0);
919 }
920 
921 void
922 zfs_slm_fini()
923 {
924 	unavailpool_t *pool;
925 	pendingdev_t *device;
926 
927 	/* wait for zfs_enum_pools thread to complete */
928 	(void) pthread_join(g_zfs_tid, NULL);
929 	/* destroy the thread pool */
930 	if (g_tpool != NULL) {
931 		tpool_wait(g_tpool);
932 		tpool_destroy(g_tpool);
933 	}
934 
935 	while ((pool = (list_head(&g_pool_list))) != NULL) {
936 		list_remove(&g_pool_list, pool);
937 		zpool_close(pool->uap_zhp);
938 		free(pool);
939 	}
940 	list_destroy(&g_pool_list);
941 
942 	while ((device = (list_head(&g_device_list))) != NULL) {
943 		list_remove(&g_device_list, device);
944 		free(device);
945 	}
946 	list_destroy(&g_device_list);
947 
948 	libzfs_fini(g_zfshdl);
949 }
950 
951 void
952 zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
953 {
954 	zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
955 	(void) zfs_slm_deliver_event(class, subclass, nvl);
956 }
957