xref: /illumos-gate/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c (revision efd4c9b63ad77503c101fc6c2ed8ba96c9d52964)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * ZFS syseventd module.
27  *
28  * The purpose of this module is to identify when devices are added to the
29  * system, and appropriately online or replace the affected vdevs.
30  *
31  * When a device is added to the system:
32  *
33  * 	1. Search for any vdevs whose devid matches that of the newly added
34  *	   device.
35  *
36  * 	2. If no vdevs are found, then search for any vdevs whose devfs path
37  *	   matches that of the new device.
38  *
39  *	3. If no vdevs match by either method, then ignore the event.
40  *
41  * 	4. Attempt to online the device with a flag to indicate that it should
42  *	   be unspared when resilvering completes.  If this succeeds, then the
43  *	   same device was inserted and we should continue normally.
44  *
45  *	5. If the pool does not have the 'autoreplace' property set, attempt to
46  *	   online the device again without the unspare flag, which will
47  *	   generate a FMA fault.
48  *
49  *	6. If the pool has the 'autoreplace' property set, and the matching vdev
50  *	   is a whole disk, then label the new disk and attempt a 'zpool
51  *	   replace'.
52  *
53  * The module responds to EC_DEV_ADD events for both disks and lofi devices,
54  * with the latter used for testing.  The special ESC_ZFS_VDEV_CHECK event
55  * indicates that a device failed to open during pool load, but the autoreplace
56  * property was set.  In this case, we deferred the associated FMA fault until
57  * our module had a chance to process the autoreplace logic.  If the device
58  * could not be replaced, then the second online attempt will trigger the FMA
59  * fault that we skipped earlier.
60  */
61 
62 #include <alloca.h>
63 #include <devid.h>
64 #include <fcntl.h>
65 #include <libnvpair.h>
66 #include <libsysevent.h>
67 #include <libzfs.h>
68 #include <limits.h>
69 #include <stdlib.h>
70 #include <string.h>
71 #include <syslog.h>
72 #include <sys/list.h>
73 #include <sys/sunddi.h>
74 #include <sys/sysevent/eventdefs.h>
75 #include <sys/sysevent/dev.h>
76 #include <thread_pool.h>
77 #include <unistd.h>
78 #include "syseventd.h"
79 
80 #if defined(__i386) || defined(__amd64)
81 #define	PHYS_PATH	":q"
82 #define	RAW_SLICE	"p0"
83 #elif defined(__sparc)
84 #define	PHYS_PATH	":c"
85 #define	RAW_SLICE	"s2"
86 #else
87 #error Unknown architecture
88 #endif
89 
90 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
91 
92 libzfs_handle_t *g_zfshdl;
93 list_t g_pool_list;
94 tpool_t *g_tpool;
95 boolean_t g_enumeration_done;
96 thread_t g_zfs_tid;
97 
98 typedef struct unavailpool {
99 	zpool_handle_t	*uap_zhp;
100 	list_node_t	uap_node;
101 } unavailpool_t;
102 
103 int
104 zfs_toplevel_state(zpool_handle_t *zhp)
105 {
106 	nvlist_t *nvroot;
107 	vdev_stat_t *vs;
108 	unsigned int c;
109 
110 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
111 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
112 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
113 	    (uint64_t **)&vs, &c) == 0);
114 	return (vs->vs_state);
115 }
116 
117 static int
118 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
119 {
120 	if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
121 		unavailpool_t *uap;
122 		uap = malloc(sizeof (unavailpool_t));
123 		uap->uap_zhp = zhp;
124 		list_insert_tail((list_t *)data, uap);
125 	} else {
126 		zpool_close(zhp);
127 	}
128 	return (0);
129 }
130 
131 /*
132  * The device associated with the given vdev (either by devid or physical path)
133  * has been added to the system.  If 'isdisk' is set, then we only attempt a
134  * replacement if it's a whole disk.  This also implies that we should label the
135  * disk first.
136  *
137  * First, we attempt to online the device (making sure to undo any spare
138  * operation when finished).  If this succeeds, then we're done.  If it fails,
139  * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
140  * but that the label was not what we expected.  If the 'autoreplace' property
141  * is not set, then we relabel the disk (if specified), and attempt a 'zpool
142  * replace'.  If the online is successful, but the new state is something else
143  * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
144  * race, and we should avoid attempting to relabel the disk.
145  */
146 static void
147 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
148 {
149 	char *path;
150 	vdev_state_t newstate;
151 	nvlist_t *nvroot, *newvd;
152 	uint64_t wholedisk = 0ULL;
153 	char *physpath = NULL;
154 	char rawpath[PATH_MAX], fullpath[PATH_MAX];
155 	size_t len;
156 
157 	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
158 		return;
159 
160 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
161 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
162 
163 	/*
164 	 * We should have a way to online a device by guid.  With the current
165 	 * interface, we are forced to chop off the 's0' for whole disks.
166 	 */
167 	(void) strlcpy(fullpath, path, sizeof (fullpath));
168 	if (wholedisk)
169 		fullpath[strlen(fullpath) - 2] = '\0';
170 
171 	/*
172 	 * Attempt to online the device.  It would be nice to online this by
173 	 * GUID, but the current interface only supports lookup by path.
174 	 */
175 	if (zpool_vdev_online(zhp, fullpath,
176 	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
177 	    (newstate == VDEV_STATE_HEALTHY || newstate == VDEV_STATE_DEGRADED))
178 		return;
179 
180 	/*
181 	 * If the pool doesn't have the autoreplace property set, then attempt a
182 	 * true online (without the unspare flag), which will trigger a FMA
183 	 * fault.
184 	 */
185 	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
186 	    (isdisk && !wholedisk)) {
187 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
188 		    &newstate);
189 		return;
190 	}
191 
192 	if (isdisk) {
193 		/*
194 		 * If this is a request to label a whole disk, then attempt to
195 		 * write out the label.  Before we can label the disk, we need
196 		 * access to a raw node.  Ideally, we'd like to walk the devinfo
197 		 * tree and find a raw node from the corresponding parent node.
198 		 * This is overly complicated, and since we know how we labeled
199 		 * this device in the first place, we know it's save to switch
200 		 * from /dev/dsk to /dev/rdsk and append the backup slice.
201 		 *
202 		 * If any part of this process fails, then do a force online to
203 		 * trigger a ZFS fault for the device (and any hot spare
204 		 * replacement).
205 		 */
206 		if (strncmp(path, "/dev/dsk/", 9) != 0) {
207 			(void) zpool_vdev_online(zhp, fullpath,
208 			    ZFS_ONLINE_FORCEFAULT, &newstate);
209 			return;
210 		}
211 
212 		(void) strlcpy(rawpath, path + 9, sizeof (rawpath));
213 		len = strlen(rawpath);
214 		rawpath[len - 2] = '\0';
215 
216 		if (zpool_label_disk(g_zfshdl, zhp, rawpath) != 0) {
217 			(void) zpool_vdev_online(zhp, fullpath,
218 			    ZFS_ONLINE_FORCEFAULT, &newstate);
219 			return;
220 		}
221 	}
222 
223 	/*
224 	 * Cosntruct the root vdev to pass to zpool_vdev_attach().  While adding
225 	 * the entire vdev structure is harmless, we construct a reduced set of
226 	 * path/physpath/wholedisk to keep it simple.
227 	 */
228 	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
229 		return;
230 
231 	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
232 		nvlist_free(nvroot);
233 		return;
234 	}
235 
236 	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
237 	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
238 	    (physpath != NULL && nvlist_add_string(newvd,
239 	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
240 	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
241 	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
242 	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
243 	    1) != 0) {
244 		nvlist_free(newvd);
245 		nvlist_free(nvroot);
246 		return;
247 	}
248 
249 	nvlist_free(newvd);
250 
251 	(void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);
252 
253 	nvlist_free(nvroot);
254 
255 }
256 
257 /*
258  * Utility functions to find a vdev matching given criteria.
259  */
260 typedef struct dev_data {
261 	const char		*dd_compare;
262 	const char		*dd_prop;
263 	zfs_process_func_t	dd_func;
264 	boolean_t		dd_found;
265 	boolean_t		dd_isdisk;
266 	uint64_t		dd_pool_guid;
267 	uint64_t		dd_vdev_guid;
268 } dev_data_t;
269 
270 static void
271 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
272 {
273 	dev_data_t *dp = data;
274 	char *path;
275 	uint_t c, children;
276 	nvlist_t **child;
277 	size_t len;
278 	uint64_t guid;
279 
280 	/*
281 	 * First iterate over any children.
282 	 */
283 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
284 	    &child, &children) == 0) {
285 		for (c = 0; c < children; c++)
286 			zfs_iter_vdev(zhp, child[c], data);
287 		return;
288 	}
289 
290 	if (dp->dd_vdev_guid != 0) {
291 		if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
292 		    &guid) != 0 || guid != dp->dd_vdev_guid)
293 			return;
294 	} else {
295 		len = strlen(dp->dd_compare);
296 
297 		if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
298 		    strncmp(dp->dd_compare, path, len) != 0)
299 			return;
300 
301 		/*
302 		 * Normally, we want to have an exact match for the comparison
303 		 * string.  However, we allow substring matches in the following
304 		 * cases:
305 		 *
306 		 * 	<path>:		This is a devpath, and the target is one
307 		 * 			of its children.
308 		 *
309 		 * 	<path/>		This is a devid for a whole disk, and
310 		 * 			the target is one of its children.
311 		 */
312 		if (path[len] != '\0' && path[len] != ':' &&
313 		    path[len - 1] != '/')
314 			return;
315 	}
316 
317 	(dp->dd_func)(zhp, nvl, dp->dd_isdisk);
318 }
319 
320 void
321 zfs_enable_ds(void *arg)
322 {
323 	unavailpool_t *pool = (unavailpool_t *)arg;
324 
325 	(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
326 	zpool_close(pool->uap_zhp);
327 	free(pool);
328 }
329 
330 static int
331 zfs_iter_pool(zpool_handle_t *zhp, void *data)
332 {
333 	nvlist_t *config, *nvl;
334 	dev_data_t *dp = data;
335 	uint64_t pool_guid;
336 	unavailpool_t *pool;
337 
338 	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
339 		if (dp->dd_pool_guid == 0 ||
340 		    (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
341 		    &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
342 			(void) nvlist_lookup_nvlist(config,
343 			    ZPOOL_CONFIG_VDEV_TREE, &nvl);
344 			zfs_iter_vdev(zhp, nvl, data);
345 		}
346 	}
347 	if (g_enumeration_done)  {
348 		for (pool = list_head(&g_pool_list); pool != NULL;
349 		    pool = list_next(&g_pool_list, pool)) {
350 
351 			if (strcmp(zpool_get_name(zhp),
352 			    zpool_get_name(pool->uap_zhp)))
353 				continue;
354 			if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
355 				list_remove(&g_pool_list, pool);
356 				(void) tpool_dispatch(g_tpool, zfs_enable_ds,
357 				    pool);
358 				break;
359 			}
360 		}
361 	}
362 
363 	zpool_close(zhp);
364 	return (0);
365 }
366 
367 /*
368  * Given a physical device path, iterate over all (pool, vdev) pairs which
369  * correspond to the given path.
370  */
371 static boolean_t
372 devpath_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
373 {
374 	dev_data_t data = { 0 };
375 
376 	data.dd_compare = devpath;
377 	data.dd_func = func;
378 	data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
379 	data.dd_found = B_FALSE;
380 	data.dd_isdisk = wholedisk;
381 
382 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
383 
384 	return (data.dd_found);
385 }
386 
387 /*
388  * Given a /devices path, lookup the corresponding devid for each minor node,
389  * and find any vdevs with matching devids.  Doing this straight up would be
390  * rather inefficient, O(minor nodes * vdevs in system), so we take advantage of
391  * the fact that each devid ends with "/<minornode>".  Once we find any valid
392  * minor node, we chop off the portion after the last slash, and then search for
393  * matching vdevs, which is O(vdevs in system).
394  */
395 static boolean_t
396 devid_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
397 {
398 	size_t len = strlen(devpath) + sizeof ("/devices") +
399 	    sizeof (PHYS_PATH) - 1;
400 	char *fullpath;
401 	int fd;
402 	ddi_devid_t devid;
403 	char *devidstr, *fulldevid;
404 	dev_data_t data = { 0 };
405 
406 	/*
407 	 * Try to open a known minor node.
408 	 */
409 	fullpath = alloca(len);
410 	(void) snprintf(fullpath, len, "/devices%s%s", devpath, PHYS_PATH);
411 	if ((fd = open(fullpath, O_RDONLY)) < 0)
412 		return (B_FALSE);
413 
414 	/*
415 	 * Determine the devid as a string, with no trailing slash for the minor
416 	 * node.
417 	 */
418 	if (devid_get(fd, &devid) != 0) {
419 		(void) close(fd);
420 		return (B_FALSE);
421 	}
422 	(void) close(fd);
423 
424 	if ((devidstr = devid_str_encode(devid, NULL)) == NULL) {
425 		devid_free(devid);
426 		return (B_FALSE);
427 	}
428 
429 	len = strlen(devidstr) + 2;
430 	fulldevid = alloca(len);
431 	(void) snprintf(fulldevid, len, "%s/", devidstr);
432 
433 	data.dd_compare = fulldevid;
434 	data.dd_func = func;
435 	data.dd_prop = ZPOOL_CONFIG_DEVID;
436 	data.dd_found = B_FALSE;
437 	data.dd_isdisk = wholedisk;
438 
439 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
440 
441 	devid_str_free(devidstr);
442 	devid_free(devid);
443 
444 	return (data.dd_found);
445 }
446 
447 /*
448  * This function is called when we receive a devfs add event.  This can be
449  * either a disk event or a lofi event, and the behavior is slightly different
450  * depending on which it is.
451  */
452 static int
453 zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
454 {
455 	char *devpath, *devname;
456 	char path[PATH_MAX], realpath[PATH_MAX];
457 	char *colon, *raw;
458 	int ret;
459 
460 	/*
461 	 * The main unit of operation is the physical device path.  For disks,
462 	 * this is the device node, as all minor nodes are affected.  For lofi
463 	 * devices, this includes the minor path.  Unfortunately, this isn't
464 	 * represented in the DEV_PHYS_PATH for various reasons.
465 	 */
466 	if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath) != 0)
467 		return (-1);
468 
469 	/*
470 	 * If this is a lofi device, then also get the minor instance name.
471 	 * Unfortunately, the current payload doesn't include an easy way to get
472 	 * this information.  So we cheat by resolving the 'dev_name' (which
473 	 * refers to the raw device) and taking the portion between ':(*),raw'.
474 	 */
475 	(void) strlcpy(realpath, devpath, sizeof (realpath));
476 	if (is_lofi) {
477 		if (nvlist_lookup_string(nvl, DEV_NAME,
478 		    &devname) == 0 &&
479 		    (ret = resolvepath(devname, path,
480 		    sizeof (path))) > 0) {
481 			path[ret] = '\0';
482 			colon = strchr(path, ':');
483 			if (colon != NULL)
484 				raw = strstr(colon + 1, ",raw");
485 			if (colon != NULL && raw != NULL) {
486 				*raw = '\0';
487 				(void) snprintf(realpath,
488 				    sizeof (realpath), "%s%s",
489 				    devpath, colon);
490 				*raw = ',';
491 			}
492 		}
493 	}
494 
495 	/*
496 	 * Iterate over all vdevs with a matching devid, and then those with a
497 	 * matching /devices path.  For disks, we only want to pay attention to
498 	 * vdevs marked as whole disks.  For lofi, we don't care (because we're
499 	 * matching an exact minor name).
500 	 */
501 	if (!devid_iter(realpath, zfs_process_add, !is_lofi))
502 		(void) devpath_iter(realpath, zfs_process_add, !is_lofi);
503 
504 	return (0);
505 }
506 
507 /*
508  * Called when we receive a VDEV_CHECK event, which indicates a device could not
509  * be opened during initial pool open, but the autoreplace property was set on
510  * the pool.  In this case, we treat it as if it were an add event.
511  */
512 static int
513 zfs_deliver_check(nvlist_t *nvl)
514 {
515 	dev_data_t data = { 0 };
516 
517 	if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
518 	    &data.dd_pool_guid) != 0 ||
519 	    nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
520 	    &data.dd_vdev_guid) != 0)
521 		return (0);
522 
523 	data.dd_isdisk = B_TRUE;
524 	data.dd_func = zfs_process_add;
525 
526 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
527 
528 	return (0);
529 }
530 
531 #define	DEVICE_PREFIX	"/devices"
532 
533 static int
534 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
535 {
536 	char *devname = data;
537 	boolean_t avail_spare, l2cache;
538 	vdev_state_t newstate;
539 	nvlist_t *tgt;
540 
541 	syseventd_print(9, "zfsdle_vdev_online: searching for %s in pool %s\n",
542 	    devname, zpool_get_name(zhp));
543 
544 	if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
545 	    &avail_spare, &l2cache, NULL)) != NULL) {
546 		char *path, fullpath[MAXPATHLEN];
547 		uint64_t wholedisk = 0ULL;
548 
549 		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
550 		    &path) == 0);
551 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
552 		    &wholedisk) == 0);
553 
554 		(void) strlcpy(fullpath, path, sizeof (fullpath));
555 		if (wholedisk)
556 			fullpath[strlen(fullpath) - 2] = '\0';
557 
558 		if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
559 			syseventd_print(9, "zfsdle_vdev_online: setting device"
560 			    " device %s to ONLINE state in pool %s.\n",
561 			    fullpath, zpool_get_name(zhp));
562 			if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL)
563 				(void) zpool_vdev_online(zhp, fullpath, 0,
564 				    &newstate);
565 		}
566 		zpool_close(zhp);
567 		return (1);
568 	}
569 	zpool_close(zhp);
570 	return (0);
571 }
572 
573 int
574 zfs_deliver_dle(nvlist_t *nvl)
575 {
576 	char *devname;
577 	if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) != 0) {
578 		syseventd_print(9, "zfs_deliver_event: no physpath\n");
579 		return (-1);
580 	}
581 	if (strncmp(devname, DEVICE_PREFIX, strlen(DEVICE_PREFIX)) != 0) {
582 		syseventd_print(9, "zfs_deliver_event: invalid "
583 		    "device '%s'", devname);
584 		return (-1);
585 	}
586 
587 	/*
588 	 * We try to find the device using the physical
589 	 * path that has been supplied. We need to strip off
590 	 * the /devices prefix before starting our search.
591 	 */
592 	devname += strlen(DEVICE_PREFIX);
593 	if (zpool_iter(g_zfshdl, zfsdle_vdev_online, devname) != 1) {
594 		syseventd_print(9, "zfs_deliver_event: device '%s' not"
595 		    " found\n", devname);
596 		return (1);
597 	}
598 	return (0);
599 }
600 
601 
602 /*ARGSUSED*/
603 static int
604 zfs_deliver_event(sysevent_t *ev, int unused)
605 {
606 	const char *class = sysevent_get_class_name(ev);
607 	const char *subclass = sysevent_get_subclass_name(ev);
608 	nvlist_t *nvl;
609 	int ret;
610 	boolean_t is_lofi, is_check, is_dle = B_FALSE;
611 
612 	if (strcmp(class, EC_DEV_ADD) == 0) {
613 		/*
614 		 * We're mainly interested in disk additions, but we also listen
615 		 * for new lofi devices, to allow for simplified testing.
616 		 */
617 		if (strcmp(subclass, ESC_DISK) == 0)
618 			is_lofi = B_FALSE;
619 		else if (strcmp(subclass, ESC_LOFI) == 0)
620 			is_lofi = B_TRUE;
621 		else
622 			return (0);
623 
624 		is_check = B_FALSE;
625 	} else if (strcmp(class, EC_ZFS) == 0 &&
626 	    strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
627 		/*
628 		 * This event signifies that a device failed to open during pool
629 		 * load, but the 'autoreplace' property was set, so we should
630 		 * pretend it's just been added.
631 		 */
632 		is_check = B_TRUE;
633 	} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
634 	    strcmp(subclass, ESC_DEV_DLE) == 0) {
635 		is_dle = B_TRUE;
636 	} else {
637 		return (0);
638 	}
639 
640 	if (sysevent_get_attr_list(ev, &nvl) != 0)
641 		return (-1);
642 
643 	if (is_dle)
644 		ret = zfs_deliver_dle(nvl);
645 	else if (is_check)
646 		ret = zfs_deliver_check(nvl);
647 	else
648 		ret = zfs_deliver_add(nvl, is_lofi);
649 
650 	nvlist_free(nvl);
651 	return (ret);
652 }
653 
654 /*ARGSUSED*/
655 void *
656 zfs_enum_pools(void *arg)
657 {
658 	(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
659 	if (!list_is_empty(&g_pool_list))
660 		g_tpool = tpool_create(1, sysconf(_SC_NPROCESSORS_ONLN),
661 		    0, NULL);
662 	g_enumeration_done = B_TRUE;
663 	return (NULL);
664 }
665 
666 static struct slm_mod_ops zfs_mod_ops = {
667 	SE_MAJOR_VERSION, SE_MINOR_VERSION, 10, zfs_deliver_event
668 };
669 
670 struct slm_mod_ops *
671 slm_init()
672 {
673 	if ((g_zfshdl = libzfs_init()) == NULL)
674 		return (NULL);
675 	/*
676 	 * collect a list of unavailable pools (asynchronously,
677 	 * since this can take a while)
678 	 */
679 	list_create(&g_pool_list, sizeof (struct unavailpool),
680 	    offsetof(struct unavailpool, uap_node));
681 	if (thr_create(NULL, 0, zfs_enum_pools, NULL, 0, &g_zfs_tid) != 0)
682 		return (NULL);
683 	return (&zfs_mod_ops);
684 }
685 
686 void
687 slm_fini()
688 {
689 	unavailpool_t *pool;
690 
691 	if (g_tpool != NULL) {
692 		tpool_wait(g_tpool);
693 		tpool_destroy(g_tpool);
694 	}
695 	while ((pool = (list_head(&g_pool_list))) != NULL) {
696 		list_remove(&g_pool_list, pool);
697 		zpool_close(pool->uap_zhp);
698 		free(pool);
699 	}
700 	(void) thr_join(g_zfs_tid, NULL, NULL);
701 	list_destroy(&g_pool_list);
702 	libzfs_fini(g_zfshdl);
703 }
704