xref: /illumos-gate/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c (revision 69a119caa6570c7077699161b7c28b6ee9f8b0f4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25  */
26 
27 /*
28  * ZFS syseventd module.
29  *
30  * The purpose of this module is to identify when devices are added to the
31  * system, and appropriately online or replace the affected vdevs.
32  *
33  * When a device is added to the system:
34  *
35  * 	1. Search for any vdevs whose devid matches that of the newly added
36  *	   device.
37  *
38  * 	2. If no vdevs are found, then search for any vdevs whose devfs path
39  *	   matches that of the new device.
40  *
41  *	3. If no vdevs match by either method, then ignore the event.
42  *
43  * 	4. Attempt to online the device with a flag to indicate that it should
44  *	   be unspared when resilvering completes.  If this succeeds, then the
45  *	   same device was inserted and we should continue normally.
46  *
47  *	5. If the pool does not have the 'autoreplace' property set, attempt to
48  *	   online the device again without the unspare flag, which will
49  *	   generate a FMA fault.
50  *
51  *	6. If the pool has the 'autoreplace' property set, and the matching vdev
52  *	   is a whole disk, then label the new disk and attempt a 'zpool
53  *	   replace'.
54  *
55  * The module responds to EC_DEV_ADD events for both disks and lofi devices,
56  * with the latter used for testing.  The special ESC_ZFS_VDEV_CHECK event
57  * indicates that a device failed to open during pool load, but the autoreplace
58  * property was set.  In this case, we deferred the associated FMA fault until
59  * our module had a chance to process the autoreplace logic.  If the device
60  * could not be replaced, then the second online attempt will trigger the FMA
61  * fault that we skipped earlier.
62  */
63 
64 #include <alloca.h>
65 #include <devid.h>
66 #include <fcntl.h>
67 #include <libnvpair.h>
68 #include <libsysevent.h>
69 #include <libzfs.h>
70 #include <limits.h>
71 #include <stdlib.h>
72 #include <string.h>
73 #include <syslog.h>
74 #include <sys/list.h>
75 #include <sys/sunddi.h>
76 #include <sys/sysevent/eventdefs.h>
77 #include <sys/sysevent/dev.h>
78 #include <thread_pool.h>
79 #include <unistd.h>
80 #include "syseventd.h"
81 
82 #if defined(__i386) || defined(__amd64)
83 #define	PHYS_PATH	":q"
84 #define	RAW_SLICE	"p0"
85 #elif defined(__sparc)
86 #define	PHYS_PATH	":c"
87 #define	RAW_SLICE	"s2"
88 #else
89 #error Unknown architecture
90 #endif
91 
92 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
93 
94 libzfs_handle_t *g_zfshdl;
95 list_t g_pool_list;
96 tpool_t *g_tpool;
97 boolean_t g_enumeration_done;
98 thread_t g_zfs_tid;
99 
100 typedef struct unavailpool {
101 	zpool_handle_t	*uap_zhp;
102 	list_node_t	uap_node;
103 } unavailpool_t;
104 
105 int
106 zfs_toplevel_state(zpool_handle_t *zhp)
107 {
108 	nvlist_t *nvroot;
109 	vdev_stat_t *vs;
110 	unsigned int c;
111 
112 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
113 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
114 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
115 	    (uint64_t **)&vs, &c) == 0);
116 	return (vs->vs_state);
117 }
118 
119 static int
120 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
121 {
122 	if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
123 		unavailpool_t *uap;
124 		uap = malloc(sizeof (unavailpool_t));
125 		uap->uap_zhp = zhp;
126 		list_insert_tail((list_t *)data, uap);
127 	} else {
128 		zpool_close(zhp);
129 	}
130 	return (0);
131 }
132 
133 /*
134  * The device associated with the given vdev (either by devid or physical path)
135  * has been added to the system.  If 'isdisk' is set, then we only attempt a
136  * replacement if it's a whole disk.  This also implies that we should label the
137  * disk first.
138  *
139  * First, we attempt to online the device (making sure to undo any spare
140  * operation when finished).  If this succeeds, then we're done.  If it fails,
141  * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
142  * but that the label was not what we expected.  If the 'autoreplace' property
143  * is not set, then we relabel the disk (if specified), and attempt a 'zpool
144  * replace'.  If the online is successful, but the new state is something else
145  * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
146  * race, and we should avoid attempting to relabel the disk.
147  */
148 static void
149 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
150 {
151 	char *path;
152 	vdev_state_t newstate;
153 	nvlist_t *nvroot, *newvd;
154 	uint64_t wholedisk = 0ULL;
155 	char *physpath = NULL;
156 	char rawpath[PATH_MAX], fullpath[PATH_MAX];
157 	size_t len;
158 
159 	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
160 		return;
161 
162 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
163 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
164 
165 	/*
166 	 * We should have a way to online a device by guid.  With the current
167 	 * interface, we are forced to chop off the 's0' for whole disks.
168 	 */
169 	(void) strlcpy(fullpath, path, sizeof (fullpath));
170 	if (wholedisk)
171 		fullpath[strlen(fullpath) - 2] = '\0';
172 
173 	/*
174 	 * Attempt to online the device.  It would be nice to online this by
175 	 * GUID, but the current interface only supports lookup by path.
176 	 */
177 	if (zpool_vdev_online(zhp, fullpath,
178 	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
179 	    (newstate == VDEV_STATE_HEALTHY || newstate == VDEV_STATE_DEGRADED))
180 		return;
181 
182 	/*
183 	 * If the pool doesn't have the autoreplace property set, then attempt a
184 	 * true online (without the unspare flag), which will trigger a FMA
185 	 * fault.
186 	 */
187 	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
188 	    (isdisk && !wholedisk)) {
189 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
190 		    &newstate);
191 		return;
192 	}
193 
194 	if (isdisk) {
195 		/*
196 		 * If this is a request to label a whole disk, then attempt to
197 		 * write out the label.  Before we can label the disk, we need
198 		 * access to a raw node.  Ideally, we'd like to walk the devinfo
199 		 * tree and find a raw node from the corresponding parent node.
200 		 * This is overly complicated, and since we know how we labeled
201 		 * this device in the first place, we know it's save to switch
202 		 * from /dev/dsk to /dev/rdsk and append the backup slice.
203 		 *
204 		 * If any part of this process fails, then do a force online to
205 		 * trigger a ZFS fault for the device (and any hot spare
206 		 * replacement).
207 		 */
208 		if (strncmp(path, "/dev/dsk/", 9) != 0) {
209 			(void) zpool_vdev_online(zhp, fullpath,
210 			    ZFS_ONLINE_FORCEFAULT, &newstate);
211 			return;
212 		}
213 
214 		(void) strlcpy(rawpath, path + 9, sizeof (rawpath));
215 		len = strlen(rawpath);
216 		rawpath[len - 2] = '\0';
217 
218 		if (zpool_label_disk(g_zfshdl, zhp, rawpath) != 0) {
219 			(void) zpool_vdev_online(zhp, fullpath,
220 			    ZFS_ONLINE_FORCEFAULT, &newstate);
221 			return;
222 		}
223 	}
224 
225 	/*
226 	 * Cosntruct the root vdev to pass to zpool_vdev_attach().  While adding
227 	 * the entire vdev structure is harmless, we construct a reduced set of
228 	 * path/physpath/wholedisk to keep it simple.
229 	 */
230 	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
231 		return;
232 
233 	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
234 		nvlist_free(nvroot);
235 		return;
236 	}
237 
238 	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
239 	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
240 	    (physpath != NULL && nvlist_add_string(newvd,
241 	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
242 	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
243 	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
244 	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
245 	    1) != 0) {
246 		nvlist_free(newvd);
247 		nvlist_free(nvroot);
248 		return;
249 	}
250 
251 	nvlist_free(newvd);
252 
253 	(void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);
254 
255 	nvlist_free(nvroot);
256 
257 }
258 
259 /*
260  * Utility functions to find a vdev matching given criteria.
261  */
262 typedef struct dev_data {
263 	const char		*dd_compare;
264 	const char		*dd_prop;
265 	zfs_process_func_t	dd_func;
266 	boolean_t		dd_found;
267 	boolean_t		dd_isdisk;
268 	uint64_t		dd_pool_guid;
269 	uint64_t		dd_vdev_guid;
270 } dev_data_t;
271 
272 static void
273 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
274 {
275 	dev_data_t *dp = data;
276 	char *path;
277 	uint_t c, children;
278 	nvlist_t **child;
279 	size_t len;
280 	uint64_t guid;
281 
282 	/*
283 	 * First iterate over any children.
284 	 */
285 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
286 	    &child, &children) == 0) {
287 		for (c = 0; c < children; c++)
288 			zfs_iter_vdev(zhp, child[c], data);
289 		return;
290 	}
291 
292 	if (dp->dd_vdev_guid != 0) {
293 		if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
294 		    &guid) != 0 || guid != dp->dd_vdev_guid)
295 			return;
296 	} else {
297 		len = strlen(dp->dd_compare);
298 
299 		if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
300 		    strncmp(dp->dd_compare, path, len) != 0)
301 			return;
302 
303 		/*
304 		 * Normally, we want to have an exact match for the comparison
305 		 * string.  However, we allow substring matches in the following
306 		 * cases:
307 		 *
308 		 * 	<path>:		This is a devpath, and the target is one
309 		 * 			of its children.
310 		 *
311 		 * 	<path/>		This is a devid for a whole disk, and
312 		 * 			the target is one of its children.
313 		 */
314 		if (path[len] != '\0' && path[len] != ':' &&
315 		    path[len - 1] != '/')
316 			return;
317 	}
318 
319 	(dp->dd_func)(zhp, nvl, dp->dd_isdisk);
320 }
321 
322 void
323 zfs_enable_ds(void *arg)
324 {
325 	unavailpool_t *pool = (unavailpool_t *)arg;
326 
327 	(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
328 	zpool_close(pool->uap_zhp);
329 	free(pool);
330 }
331 
332 static int
333 zfs_iter_pool(zpool_handle_t *zhp, void *data)
334 {
335 	nvlist_t *config, *nvl;
336 	dev_data_t *dp = data;
337 	uint64_t pool_guid;
338 	unavailpool_t *pool;
339 
340 	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
341 		if (dp->dd_pool_guid == 0 ||
342 		    (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
343 		    &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
344 			(void) nvlist_lookup_nvlist(config,
345 			    ZPOOL_CONFIG_VDEV_TREE, &nvl);
346 			zfs_iter_vdev(zhp, nvl, data);
347 		}
348 	}
349 	if (g_enumeration_done)  {
350 		for (pool = list_head(&g_pool_list); pool != NULL;
351 		    pool = list_next(&g_pool_list, pool)) {
352 
353 			if (strcmp(zpool_get_name(zhp),
354 			    zpool_get_name(pool->uap_zhp)))
355 				continue;
356 			if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
357 				list_remove(&g_pool_list, pool);
358 				(void) tpool_dispatch(g_tpool, zfs_enable_ds,
359 				    pool);
360 				break;
361 			}
362 		}
363 	}
364 
365 	zpool_close(zhp);
366 	return (0);
367 }
368 
369 /*
370  * Given a physical device path, iterate over all (pool, vdev) pairs which
371  * correspond to the given path.
372  */
373 static boolean_t
374 devpath_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
375 {
376 	dev_data_t data = { 0 };
377 
378 	data.dd_compare = devpath;
379 	data.dd_func = func;
380 	data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
381 	data.dd_found = B_FALSE;
382 	data.dd_isdisk = wholedisk;
383 
384 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
385 
386 	return (data.dd_found);
387 }
388 
389 /*
390  * Given a /devices path, lookup the corresponding devid for each minor node,
391  * and find any vdevs with matching devids.  Doing this straight up would be
392  * rather inefficient, O(minor nodes * vdevs in system), so we take advantage of
393  * the fact that each devid ends with "/<minornode>".  Once we find any valid
394  * minor node, we chop off the portion after the last slash, and then search for
395  * matching vdevs, which is O(vdevs in system).
396  */
397 static boolean_t
398 devid_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
399 {
400 	size_t len = strlen(devpath) + sizeof ("/devices") +
401 	    sizeof (PHYS_PATH) - 1;
402 	char *fullpath;
403 	int fd;
404 	ddi_devid_t devid;
405 	char *devidstr, *fulldevid;
406 	dev_data_t data = { 0 };
407 
408 	/*
409 	 * Try to open a known minor node.
410 	 */
411 	fullpath = alloca(len);
412 	(void) snprintf(fullpath, len, "/devices%s%s", devpath, PHYS_PATH);
413 	if ((fd = open(fullpath, O_RDONLY)) < 0)
414 		return (B_FALSE);
415 
416 	/*
417 	 * Determine the devid as a string, with no trailing slash for the minor
418 	 * node.
419 	 */
420 	if (devid_get(fd, &devid) != 0) {
421 		(void) close(fd);
422 		return (B_FALSE);
423 	}
424 	(void) close(fd);
425 
426 	if ((devidstr = devid_str_encode(devid, NULL)) == NULL) {
427 		devid_free(devid);
428 		return (B_FALSE);
429 	}
430 
431 	len = strlen(devidstr) + 2;
432 	fulldevid = alloca(len);
433 	(void) snprintf(fulldevid, len, "%s/", devidstr);
434 
435 	data.dd_compare = fulldevid;
436 	data.dd_func = func;
437 	data.dd_prop = ZPOOL_CONFIG_DEVID;
438 	data.dd_found = B_FALSE;
439 	data.dd_isdisk = wholedisk;
440 
441 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
442 
443 	devid_str_free(devidstr);
444 	devid_free(devid);
445 
446 	return (data.dd_found);
447 }
448 
449 /*
450  * This function is called when we receive a devfs add event.  This can be
451  * either a disk event or a lofi event, and the behavior is slightly different
452  * depending on which it is.
453  */
454 static int
455 zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
456 {
457 	char *devpath, *devname;
458 	char path[PATH_MAX], realpath[PATH_MAX];
459 	char *colon, *raw;
460 	int ret;
461 
462 	/*
463 	 * The main unit of operation is the physical device path.  For disks,
464 	 * this is the device node, as all minor nodes are affected.  For lofi
465 	 * devices, this includes the minor path.  Unfortunately, this isn't
466 	 * represented in the DEV_PHYS_PATH for various reasons.
467 	 */
468 	if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath) != 0)
469 		return (-1);
470 
471 	/*
472 	 * If this is a lofi device, then also get the minor instance name.
473 	 * Unfortunately, the current payload doesn't include an easy way to get
474 	 * this information.  So we cheat by resolving the 'dev_name' (which
475 	 * refers to the raw device) and taking the portion between ':(*),raw'.
476 	 */
477 	(void) strlcpy(realpath, devpath, sizeof (realpath));
478 	if (is_lofi) {
479 		if (nvlist_lookup_string(nvl, DEV_NAME,
480 		    &devname) == 0 &&
481 		    (ret = resolvepath(devname, path,
482 		    sizeof (path))) > 0) {
483 			path[ret] = '\0';
484 			colon = strchr(path, ':');
485 			if (colon != NULL)
486 				raw = strstr(colon + 1, ",raw");
487 			if (colon != NULL && raw != NULL) {
488 				*raw = '\0';
489 				(void) snprintf(realpath,
490 				    sizeof (realpath), "%s%s",
491 				    devpath, colon);
492 				*raw = ',';
493 			}
494 		}
495 	}
496 
497 	/*
498 	 * Iterate over all vdevs with a matching devid, and then those with a
499 	 * matching /devices path.  For disks, we only want to pay attention to
500 	 * vdevs marked as whole disks.  For lofi, we don't care (because we're
501 	 * matching an exact minor name).
502 	 */
503 	if (!devid_iter(realpath, zfs_process_add, !is_lofi))
504 		(void) devpath_iter(realpath, zfs_process_add, !is_lofi);
505 
506 	return (0);
507 }
508 
509 /*
510  * Called when we receive a VDEV_CHECK event, which indicates a device could not
511  * be opened during initial pool open, but the autoreplace property was set on
512  * the pool.  In this case, we treat it as if it were an add event.
513  */
514 static int
515 zfs_deliver_check(nvlist_t *nvl)
516 {
517 	dev_data_t data = { 0 };
518 
519 	if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
520 	    &data.dd_pool_guid) != 0 ||
521 	    nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
522 	    &data.dd_vdev_guid) != 0 ||
523 	    data.dd_vdev_guid == 0)
524 		return (0);
525 
526 	data.dd_isdisk = B_TRUE;
527 	data.dd_func = zfs_process_add;
528 
529 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
530 
531 	return (0);
532 }
533 
534 #define	DEVICE_PREFIX	"/devices"
535 
536 static int
537 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
538 {
539 	char *devname = data;
540 	boolean_t avail_spare, l2cache;
541 	vdev_state_t newstate;
542 	nvlist_t *tgt;
543 
544 	syseventd_print(9, "zfsdle_vdev_online: searching for %s in pool %s\n",
545 	    devname, zpool_get_name(zhp));
546 
547 	if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
548 	    &avail_spare, &l2cache, NULL)) != NULL) {
549 		char *path, fullpath[MAXPATHLEN];
550 		uint64_t wholedisk = 0ULL;
551 
552 		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
553 		    &path) == 0);
554 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
555 		    &wholedisk) == 0);
556 
557 		(void) strlcpy(fullpath, path, sizeof (fullpath));
558 		if (wholedisk) {
559 			fullpath[strlen(fullpath) - 2] = '\0';
560 
561 			/*
562 			 * We need to reopen the pool associated with this
563 			 * device so that the kernel can update the size
564 			 * of the expanded device.
565 			 */
566 			(void) zpool_reopen(zhp);
567 		}
568 
569 		if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
570 			syseventd_print(9, "zfsdle_vdev_online: setting device"
571 			    " device %s to ONLINE state in pool %s.\n",
572 			    fullpath, zpool_get_name(zhp));
573 			if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL)
574 				(void) zpool_vdev_online(zhp, fullpath, 0,
575 				    &newstate);
576 		}
577 		zpool_close(zhp);
578 		return (1);
579 	}
580 	zpool_close(zhp);
581 	return (0);
582 }
583 
584 int
585 zfs_deliver_dle(nvlist_t *nvl)
586 {
587 	char *devname;
588 	if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) != 0) {
589 		syseventd_print(9, "zfs_deliver_event: no physpath\n");
590 		return (-1);
591 	}
592 	if (strncmp(devname, DEVICE_PREFIX, strlen(DEVICE_PREFIX)) != 0) {
593 		syseventd_print(9, "zfs_deliver_event: invalid "
594 		    "device '%s'", devname);
595 		return (-1);
596 	}
597 
598 	/*
599 	 * We try to find the device using the physical
600 	 * path that has been supplied. We need to strip off
601 	 * the /devices prefix before starting our search.
602 	 */
603 	devname += strlen(DEVICE_PREFIX);
604 	if (zpool_iter(g_zfshdl, zfsdle_vdev_online, devname) != 1) {
605 		syseventd_print(9, "zfs_deliver_event: device '%s' not"
606 		    " found\n", devname);
607 		return (1);
608 	}
609 	return (0);
610 }
611 
612 
613 /*ARGSUSED*/
614 static int
615 zfs_deliver_event(sysevent_t *ev, int unused)
616 {
617 	const char *class = sysevent_get_class_name(ev);
618 	const char *subclass = sysevent_get_subclass_name(ev);
619 	nvlist_t *nvl;
620 	int ret;
621 	boolean_t is_lofi, is_check, is_dle = B_FALSE;
622 
623 	if (strcmp(class, EC_DEV_ADD) == 0) {
624 		/*
625 		 * We're mainly interested in disk additions, but we also listen
626 		 * for new lofi devices, to allow for simplified testing.
627 		 */
628 		if (strcmp(subclass, ESC_DISK) == 0)
629 			is_lofi = B_FALSE;
630 		else if (strcmp(subclass, ESC_LOFI) == 0)
631 			is_lofi = B_TRUE;
632 		else
633 			return (0);
634 
635 		is_check = B_FALSE;
636 	} else if (strcmp(class, EC_ZFS) == 0 &&
637 	    strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
638 		/*
639 		 * This event signifies that a device failed to open during pool
640 		 * load, but the 'autoreplace' property was set, so we should
641 		 * pretend it's just been added.
642 		 */
643 		is_check = B_TRUE;
644 	} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
645 	    strcmp(subclass, ESC_DEV_DLE) == 0) {
646 		is_dle = B_TRUE;
647 	} else {
648 		return (0);
649 	}
650 
651 	if (sysevent_get_attr_list(ev, &nvl) != 0)
652 		return (-1);
653 
654 	if (is_dle)
655 		ret = zfs_deliver_dle(nvl);
656 	else if (is_check)
657 		ret = zfs_deliver_check(nvl);
658 	else
659 		ret = zfs_deliver_add(nvl, is_lofi);
660 
661 	nvlist_free(nvl);
662 	return (ret);
663 }
664 
665 /*ARGSUSED*/
666 void *
667 zfs_enum_pools(void *arg)
668 {
669 	(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
670 	if (!list_is_empty(&g_pool_list))
671 		g_tpool = tpool_create(1, sysconf(_SC_NPROCESSORS_ONLN),
672 		    0, NULL);
673 	g_enumeration_done = B_TRUE;
674 	return (NULL);
675 }
676 
677 static struct slm_mod_ops zfs_mod_ops = {
678 	SE_MAJOR_VERSION, SE_MINOR_VERSION, 10, zfs_deliver_event
679 };
680 
681 struct slm_mod_ops *
682 slm_init()
683 {
684 	if ((g_zfshdl = libzfs_init()) == NULL)
685 		return (NULL);
686 	/*
687 	 * collect a list of unavailable pools (asynchronously,
688 	 * since this can take a while)
689 	 */
690 	list_create(&g_pool_list, sizeof (struct unavailpool),
691 	    offsetof(struct unavailpool, uap_node));
692 	if (thr_create(NULL, 0, zfs_enum_pools, NULL, 0, &g_zfs_tid) != 0)
693 		return (NULL);
694 	return (&zfs_mod_ops);
695 }
696 
697 void
698 slm_fini()
699 {
700 	unavailpool_t *pool;
701 
702 	if (g_tpool != NULL) {
703 		tpool_wait(g_tpool);
704 		tpool_destroy(g_tpool);
705 	}
706 	while ((pool = (list_head(&g_pool_list))) != NULL) {
707 		list_remove(&g_pool_list, pool);
708 		zpool_close(pool->uap_zhp);
709 		free(pool);
710 	}
711 	(void) thr_join(g_zfs_tid, NULL, NULL);
712 	list_destroy(&g_pool_list);
713 	libzfs_fini(g_zfshdl);
714 }
715