xref: /titanic_41/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c (revision b22c0ee92424ab44e9fab259154af68988b7b7e6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25  */
26 
27 /*
28  * ZFS syseventd module.
29  *
30  * The purpose of this module is to identify when devices are added to the
31  * system, and appropriately online or replace the affected vdevs.
32  *
33  * When a device is added to the system:
34  *
35  * 	1. Search for any vdevs whose devid matches that of the newly added
36  *	   device.
37  *
38  * 	2. If no vdevs are found, then search for any vdevs whose devfs path
39  *	   matches that of the new device.
40  *
41  *	3. If no vdevs match by either method, then ignore the event.
42  *
43  * 	4. Attempt to online the device with a flag to indicate that it should
44  *	   be unspared when resilvering completes.  If this succeeds, then the
45  *	   same device was inserted and we should continue normally.
46  *
47  *	5. If the pool does not have the 'autoreplace' property set, attempt to
48  *	   online the device again without the unspare flag, which will
49  *	   generate a FMA fault.
50  *
51  *	6. If the pool has the 'autoreplace' property set, and the matching vdev
52  *	   is a whole disk, then label the new disk and attempt a 'zpool
53  *	   replace'.
54  *
55  * The module responds to EC_DEV_ADD events for both disks and lofi devices,
56  * with the latter used for testing.  The special ESC_ZFS_VDEV_CHECK event
57  * indicates that a device failed to open during pool load, but the autoreplace
58  * property was set.  In this case, we deferred the associated FMA fault until
59  * our module had a chance to process the autoreplace logic.  If the device
60  * could not be replaced, then the second online attempt will trigger the FMA
61  * fault that we skipped earlier.
62  */
63 
64 #include <alloca.h>
65 #include <devid.h>
66 #include <fcntl.h>
67 #include <libnvpair.h>
68 #include <libsysevent.h>
69 #include <libzfs.h>
70 #include <limits.h>
71 #include <stdlib.h>
72 #include <string.h>
73 #include <syslog.h>
74 #include <sys/list.h>
75 #include <sys/sunddi.h>
76 #include <sys/sysevent/eventdefs.h>
77 #include <sys/sysevent/dev.h>
78 #include <thread_pool.h>
79 #include <unistd.h>
80 #include "syseventd.h"
81 
82 #if defined(__i386) || defined(__amd64)
83 #define	PHYS_PATH	":q"
84 #define	RAW_SLICE	"p0"
85 #elif defined(__sparc)
86 #define	PHYS_PATH	":c"
87 #define	RAW_SLICE	"s2"
88 #else
89 #error Unknown architecture
90 #endif
91 
92 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
93 
94 libzfs_handle_t *g_zfshdl;
95 list_t g_pool_list;
96 tpool_t *g_tpool;
97 boolean_t g_enumeration_done;
98 thread_t g_zfs_tid;
99 
100 typedef struct unavailpool {
101 	zpool_handle_t	*uap_zhp;
102 	list_node_t	uap_node;
103 } unavailpool_t;
104 
105 int
zfs_toplevel_state(zpool_handle_t * zhp)106 zfs_toplevel_state(zpool_handle_t *zhp)
107 {
108 	nvlist_t *nvroot;
109 	vdev_stat_t *vs;
110 	unsigned int c;
111 
112 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
113 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
114 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
115 	    (uint64_t **)&vs, &c) == 0);
116 	return (vs->vs_state);
117 }
118 
119 static int
zfs_unavail_pool(zpool_handle_t * zhp,void * data)120 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
121 {
122 	if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
123 		unavailpool_t *uap;
124 		uap = malloc(sizeof (unavailpool_t));
125 		uap->uap_zhp = zhp;
126 		list_insert_tail((list_t *)data, uap);
127 	} else {
128 		zpool_close(zhp);
129 	}
130 	return (0);
131 }
132 
133 /*
134  * The device associated with the given vdev (either by devid or physical path)
135  * has been added to the system.  If 'isdisk' is set, then we only attempt a
136  * replacement if it's a whole disk.  This also implies that we should label the
137  * disk first.
138  *
139  * First, we attempt to online the device (making sure to undo any spare
140  * operation when finished).  If this succeeds, then we're done.  If it fails,
141  * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
142  * but that the label was not what we expected.  If the 'autoreplace' property
143  * is not set, then we relabel the disk (if specified), and attempt a 'zpool
144  * replace'.  If the online is successful, but the new state is something else
145  * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
146  * race, and we should avoid attempting to relabel the disk.
147  */
148 static void
zfs_process_add(zpool_handle_t * zhp,nvlist_t * vdev,boolean_t isdisk)149 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
150 {
151 	char *path;
152 	vdev_state_t newstate;
153 	nvlist_t *nvroot, *newvd;
154 	uint64_t wholedisk = 0ULL;
155 	uint64_t offline = 0ULL;
156 	char *physpath = NULL;
157 	char rawpath[PATH_MAX], fullpath[PATH_MAX];
158 	size_t len;
159 
160 	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
161 		return;
162 
163 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
164 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
165 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
166 
167 	/*
168 	 * We should have a way to online a device by guid.  With the current
169 	 * interface, we are forced to chop off the 's0' for whole disks.
170 	 */
171 	(void) strlcpy(fullpath, path, sizeof (fullpath));
172 	if (wholedisk)
173 		fullpath[strlen(fullpath) - 2] = '\0';
174 
175 	/*
176 	 * Attempt to online the device.  It would be nice to online this by
177 	 * GUID, but the current interface only supports lookup by path.
178 	 */
179 	if (offline ||
180 	    (zpool_vdev_online(zhp, fullpath,
181 	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
182 	    (newstate == VDEV_STATE_HEALTHY ||
183 	    newstate == VDEV_STATE_DEGRADED)))
184 		return;
185 
186 	/*
187 	 * If the pool doesn't have the autoreplace property set, then attempt a
188 	 * true online (without the unspare flag), which will trigger a FMA
189 	 * fault.
190 	 */
191 	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
192 	    (isdisk && !wholedisk)) {
193 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
194 		    &newstate);
195 		return;
196 	}
197 
198 	if (isdisk) {
199 		/*
200 		 * If this is a request to label a whole disk, then attempt to
201 		 * write out the label.  Before we can label the disk, we need
202 		 * access to a raw node.  Ideally, we'd like to walk the devinfo
203 		 * tree and find a raw node from the corresponding parent node.
204 		 * This is overly complicated, and since we know how we labeled
205 		 * this device in the first place, we know it's save to switch
206 		 * from /dev/dsk to /dev/rdsk and append the backup slice.
207 		 *
208 		 * If any part of this process fails, then do a force online to
209 		 * trigger a ZFS fault for the device (and any hot spare
210 		 * replacement).
211 		 */
212 		if (strncmp(path, "/dev/dsk/", 9) != 0) {
213 			(void) zpool_vdev_online(zhp, fullpath,
214 			    ZFS_ONLINE_FORCEFAULT, &newstate);
215 			return;
216 		}
217 
218 		(void) strlcpy(rawpath, path + 9, sizeof (rawpath));
219 		len = strlen(rawpath);
220 		rawpath[len - 2] = '\0';
221 
222 		if (zpool_label_disk(g_zfshdl, zhp, rawpath) != 0) {
223 			(void) zpool_vdev_online(zhp, fullpath,
224 			    ZFS_ONLINE_FORCEFAULT, &newstate);
225 			return;
226 		}
227 	}
228 
229 	/*
230 	 * Cosntruct the root vdev to pass to zpool_vdev_attach().  While adding
231 	 * the entire vdev structure is harmless, we construct a reduced set of
232 	 * path/physpath/wholedisk to keep it simple.
233 	 */
234 	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
235 		return;
236 
237 	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
238 		nvlist_free(nvroot);
239 		return;
240 	}
241 
242 	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
243 	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
244 	    (physpath != NULL && nvlist_add_string(newvd,
245 	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
246 	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
247 	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
248 	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
249 	    1) != 0) {
250 		nvlist_free(newvd);
251 		nvlist_free(nvroot);
252 		return;
253 	}
254 
255 	nvlist_free(newvd);
256 
257 	(void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);
258 
259 	nvlist_free(nvroot);
260 
261 }
262 
263 /*
264  * Utility functions to find a vdev matching given criteria.
265  */
266 typedef struct dev_data {
267 	const char		*dd_compare;
268 	const char		*dd_prop;
269 	zfs_process_func_t	dd_func;
270 	boolean_t		dd_found;
271 	boolean_t		dd_isdisk;
272 	uint64_t		dd_pool_guid;
273 	uint64_t		dd_vdev_guid;
274 } dev_data_t;
275 
276 static void
zfs_iter_vdev(zpool_handle_t * zhp,nvlist_t * nvl,void * data)277 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
278 {
279 	dev_data_t *dp = data;
280 	char *path;
281 	uint_t c, children;
282 	nvlist_t **child;
283 	size_t len;
284 	uint64_t guid;
285 
286 	/*
287 	 * First iterate over any children.
288 	 */
289 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
290 	    &child, &children) == 0) {
291 		for (c = 0; c < children; c++)
292 			zfs_iter_vdev(zhp, child[c], data);
293 		return;
294 	}
295 
296 	if (dp->dd_vdev_guid != 0) {
297 		if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
298 		    &guid) != 0 || guid != dp->dd_vdev_guid)
299 			return;
300 	} else {
301 		len = strlen(dp->dd_compare);
302 
303 		if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
304 		    strncmp(dp->dd_compare, path, len) != 0)
305 			return;
306 
307 		/*
308 		 * Normally, we want to have an exact match for the comparison
309 		 * string.  However, we allow substring matches in the following
310 		 * cases:
311 		 *
312 		 * 	<path>:		This is a devpath, and the target is one
313 		 * 			of its children.
314 		 *
315 		 * 	<path/>		This is a devid for a whole disk, and
316 		 * 			the target is one of its children.
317 		 */
318 		if (path[len] != '\0' && path[len] != ':' &&
319 		    path[len - 1] != '/')
320 			return;
321 	}
322 
323 	(dp->dd_func)(zhp, nvl, dp->dd_isdisk);
324 }
325 
326 void
zfs_enable_ds(void * arg)327 zfs_enable_ds(void *arg)
328 {
329 	unavailpool_t *pool = (unavailpool_t *)arg;
330 
331 	(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
332 	zpool_close(pool->uap_zhp);
333 	free(pool);
334 }
335 
336 static int
zfs_iter_pool(zpool_handle_t * zhp,void * data)337 zfs_iter_pool(zpool_handle_t *zhp, void *data)
338 {
339 	nvlist_t *config, *nvl;
340 	dev_data_t *dp = data;
341 	uint64_t pool_guid;
342 	unavailpool_t *pool;
343 
344 	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
345 		if (dp->dd_pool_guid == 0 ||
346 		    (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
347 		    &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
348 			(void) nvlist_lookup_nvlist(config,
349 			    ZPOOL_CONFIG_VDEV_TREE, &nvl);
350 			zfs_iter_vdev(zhp, nvl, data);
351 		}
352 	}
353 	if (g_enumeration_done)  {
354 		for (pool = list_head(&g_pool_list); pool != NULL;
355 		    pool = list_next(&g_pool_list, pool)) {
356 
357 			if (strcmp(zpool_get_name(zhp),
358 			    zpool_get_name(pool->uap_zhp)))
359 				continue;
360 			if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
361 				list_remove(&g_pool_list, pool);
362 				(void) tpool_dispatch(g_tpool, zfs_enable_ds,
363 				    pool);
364 				break;
365 			}
366 		}
367 	}
368 
369 	zpool_close(zhp);
370 	return (0);
371 }
372 
373 /*
374  * Given a physical device path, iterate over all (pool, vdev) pairs which
375  * correspond to the given path.
376  */
377 static boolean_t
devpath_iter(const char * devpath,zfs_process_func_t func,boolean_t wholedisk)378 devpath_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
379 {
380 	dev_data_t data = { 0 };
381 
382 	data.dd_compare = devpath;
383 	data.dd_func = func;
384 	data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
385 	data.dd_found = B_FALSE;
386 	data.dd_isdisk = wholedisk;
387 
388 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
389 
390 	return (data.dd_found);
391 }
392 
393 /*
394  * Given a /devices path, lookup the corresponding devid for each minor node,
395  * and find any vdevs with matching devids.  Doing this straight up would be
396  * rather inefficient, O(minor nodes * vdevs in system), so we take advantage of
397  * the fact that each devid ends with "/<minornode>".  Once we find any valid
398  * minor node, we chop off the portion after the last slash, and then search for
399  * matching vdevs, which is O(vdevs in system).
400  */
401 static boolean_t
devid_iter(const char * devpath,zfs_process_func_t func,boolean_t wholedisk)402 devid_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
403 {
404 	size_t len = strlen(devpath) + sizeof ("/devices") +
405 	    sizeof (PHYS_PATH) - 1;
406 	char *fullpath;
407 	int fd;
408 	ddi_devid_t devid;
409 	char *devidstr, *fulldevid;
410 	dev_data_t data = { 0 };
411 
412 	/*
413 	 * Try to open a known minor node.
414 	 */
415 	fullpath = alloca(len);
416 	(void) snprintf(fullpath, len, "/devices%s%s", devpath, PHYS_PATH);
417 	if ((fd = open(fullpath, O_RDONLY)) < 0)
418 		return (B_FALSE);
419 
420 	/*
421 	 * Determine the devid as a string, with no trailing slash for the minor
422 	 * node.
423 	 */
424 	if (devid_get(fd, &devid) != 0) {
425 		(void) close(fd);
426 		return (B_FALSE);
427 	}
428 	(void) close(fd);
429 
430 	if ((devidstr = devid_str_encode(devid, NULL)) == NULL) {
431 		devid_free(devid);
432 		return (B_FALSE);
433 	}
434 
435 	len = strlen(devidstr) + 2;
436 	fulldevid = alloca(len);
437 	(void) snprintf(fulldevid, len, "%s/", devidstr);
438 
439 	data.dd_compare = fulldevid;
440 	data.dd_func = func;
441 	data.dd_prop = ZPOOL_CONFIG_DEVID;
442 	data.dd_found = B_FALSE;
443 	data.dd_isdisk = wholedisk;
444 
445 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
446 
447 	devid_str_free(devidstr);
448 	devid_free(devid);
449 
450 	return (data.dd_found);
451 }
452 
453 /*
454  * This function is called when we receive a devfs add event.  This can be
455  * either a disk event or a lofi event, and the behavior is slightly different
456  * depending on which it is.
457  */
458 static int
zfs_deliver_add(nvlist_t * nvl,boolean_t is_lofi)459 zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
460 {
461 	char *devpath, *devname;
462 	char path[PATH_MAX], realpath[PATH_MAX];
463 	char *colon, *raw;
464 	int ret;
465 
466 	/*
467 	 * The main unit of operation is the physical device path.  For disks,
468 	 * this is the device node, as all minor nodes are affected.  For lofi
469 	 * devices, this includes the minor path.  Unfortunately, this isn't
470 	 * represented in the DEV_PHYS_PATH for various reasons.
471 	 */
472 	if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath) != 0)
473 		return (-1);
474 
475 	/*
476 	 * If this is a lofi device, then also get the minor instance name.
477 	 * Unfortunately, the current payload doesn't include an easy way to get
478 	 * this information.  So we cheat by resolving the 'dev_name' (which
479 	 * refers to the raw device) and taking the portion between ':(*),raw'.
480 	 */
481 	(void) strlcpy(realpath, devpath, sizeof (realpath));
482 	if (is_lofi) {
483 		if (nvlist_lookup_string(nvl, DEV_NAME,
484 		    &devname) == 0 &&
485 		    (ret = resolvepath(devname, path,
486 		    sizeof (path))) > 0) {
487 			path[ret] = '\0';
488 			colon = strchr(path, ':');
489 			if (colon != NULL)
490 				raw = strstr(colon + 1, ",raw");
491 			if (colon != NULL && raw != NULL) {
492 				*raw = '\0';
493 				(void) snprintf(realpath,
494 				    sizeof (realpath), "%s%s",
495 				    devpath, colon);
496 				*raw = ',';
497 			}
498 		}
499 	}
500 
501 	/*
502 	 * Iterate over all vdevs with a matching devid, and then those with a
503 	 * matching /devices path.  For disks, we only want to pay attention to
504 	 * vdevs marked as whole disks.  For lofi, we don't care (because we're
505 	 * matching an exact minor name).
506 	 */
507 	if (!devid_iter(realpath, zfs_process_add, !is_lofi))
508 		(void) devpath_iter(realpath, zfs_process_add, !is_lofi);
509 
510 	return (0);
511 }
512 
513 /*
514  * Called when we receive a VDEV_CHECK event, which indicates a device could not
515  * be opened during initial pool open, but the autoreplace property was set on
516  * the pool.  In this case, we treat it as if it were an add event.
517  */
518 static int
zfs_deliver_check(nvlist_t * nvl)519 zfs_deliver_check(nvlist_t *nvl)
520 {
521 	dev_data_t data = { 0 };
522 
523 	if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
524 	    &data.dd_pool_guid) != 0 ||
525 	    nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
526 	    &data.dd_vdev_guid) != 0 ||
527 	    data.dd_vdev_guid == 0)
528 		return (0);
529 
530 	data.dd_isdisk = B_TRUE;
531 	data.dd_func = zfs_process_add;
532 
533 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
534 
535 	return (0);
536 }
537 
538 #define	DEVICE_PREFIX	"/devices"
539 
540 static int
zfsdle_vdev_online(zpool_handle_t * zhp,void * data)541 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
542 {
543 	char *devname = data;
544 	boolean_t avail_spare, l2cache;
545 	vdev_state_t newstate;
546 	nvlist_t *tgt;
547 
548 	syseventd_print(9, "zfsdle_vdev_online: searching for %s in pool %s\n",
549 	    devname, zpool_get_name(zhp));
550 
551 	if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
552 	    &avail_spare, &l2cache, NULL)) != NULL) {
553 		char *path, fullpath[MAXPATHLEN];
554 		uint64_t wholedisk = 0ULL;
555 
556 		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
557 		    &path) == 0);
558 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
559 		    &wholedisk) == 0);
560 
561 		(void) strlcpy(fullpath, path, sizeof (fullpath));
562 		if (wholedisk) {
563 			fullpath[strlen(fullpath) - 2] = '\0';
564 
565 			/*
566 			 * We need to reopen the pool associated with this
567 			 * device so that the kernel can update the size
568 			 * of the expanded device.
569 			 */
570 			(void) zpool_reopen(zhp);
571 		}
572 
573 		if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
574 			syseventd_print(9, "zfsdle_vdev_online: setting device"
575 			    " device %s to ONLINE state in pool %s.\n",
576 			    fullpath, zpool_get_name(zhp));
577 			if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL)
578 				(void) zpool_vdev_online(zhp, fullpath, 0,
579 				    &newstate);
580 		}
581 		zpool_close(zhp);
582 		return (1);
583 	}
584 	zpool_close(zhp);
585 	return (0);
586 }
587 
588 int
zfs_deliver_dle(nvlist_t * nvl)589 zfs_deliver_dle(nvlist_t *nvl)
590 {
591 	char *devname;
592 	if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) != 0) {
593 		syseventd_print(9, "zfs_deliver_event: no physpath\n");
594 		return (-1);
595 	}
596 	if (strncmp(devname, DEVICE_PREFIX, strlen(DEVICE_PREFIX)) != 0) {
597 		syseventd_print(9, "zfs_deliver_event: invalid "
598 		    "device '%s'", devname);
599 		return (-1);
600 	}
601 
602 	/*
603 	 * We try to find the device using the physical
604 	 * path that has been supplied. We need to strip off
605 	 * the /devices prefix before starting our search.
606 	 */
607 	devname += strlen(DEVICE_PREFIX);
608 	if (zpool_iter(g_zfshdl, zfsdle_vdev_online, devname) != 1) {
609 		syseventd_print(9, "zfs_deliver_event: device '%s' not"
610 		    " found\n", devname);
611 		return (1);
612 	}
613 	return (0);
614 }
615 
616 
617 /*ARGSUSED*/
618 static int
zfs_deliver_event(sysevent_t * ev,int unused)619 zfs_deliver_event(sysevent_t *ev, int unused)
620 {
621 	const char *class = sysevent_get_class_name(ev);
622 	const char *subclass = sysevent_get_subclass_name(ev);
623 	nvlist_t *nvl;
624 	int ret;
625 	boolean_t is_lofi, is_check, is_dle = B_FALSE;
626 
627 	if (strcmp(class, EC_DEV_ADD) == 0) {
628 		/*
629 		 * We're mainly interested in disk additions, but we also listen
630 		 * for new lofi devices, to allow for simplified testing.
631 		 */
632 		if (strcmp(subclass, ESC_DISK) == 0)
633 			is_lofi = B_FALSE;
634 		else if (strcmp(subclass, ESC_LOFI) == 0)
635 			is_lofi = B_TRUE;
636 		else
637 			return (0);
638 
639 		is_check = B_FALSE;
640 	} else if (strcmp(class, EC_ZFS) == 0 &&
641 	    strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
642 		/*
643 		 * This event signifies that a device failed to open during pool
644 		 * load, but the 'autoreplace' property was set, so we should
645 		 * pretend it's just been added.
646 		 */
647 		is_check = B_TRUE;
648 	} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
649 	    strcmp(subclass, ESC_DEV_DLE) == 0) {
650 		is_dle = B_TRUE;
651 	} else {
652 		return (0);
653 	}
654 
655 	if (sysevent_get_attr_list(ev, &nvl) != 0)
656 		return (-1);
657 
658 	if (is_dle)
659 		ret = zfs_deliver_dle(nvl);
660 	else if (is_check)
661 		ret = zfs_deliver_check(nvl);
662 	else
663 		ret = zfs_deliver_add(nvl, is_lofi);
664 
665 	nvlist_free(nvl);
666 	return (ret);
667 }
668 
669 /*ARGSUSED*/
670 void *
zfs_enum_pools(void * arg)671 zfs_enum_pools(void *arg)
672 {
673 	(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
674 	if (!list_is_empty(&g_pool_list))
675 		g_tpool = tpool_create(1, sysconf(_SC_NPROCESSORS_ONLN),
676 		    0, NULL);
677 	g_enumeration_done = B_TRUE;
678 	return (NULL);
679 }
680 
681 static struct slm_mod_ops zfs_mod_ops = {
682 	SE_MAJOR_VERSION, SE_MINOR_VERSION, 10, zfs_deliver_event
683 };
684 
685 struct slm_mod_ops *
slm_init()686 slm_init()
687 {
688 	if ((g_zfshdl = libzfs_init()) == NULL)
689 		return (NULL);
690 	/*
691 	 * collect a list of unavailable pools (asynchronously,
692 	 * since this can take a while)
693 	 */
694 	list_create(&g_pool_list, sizeof (struct unavailpool),
695 	    offsetof(struct unavailpool, uap_node));
696 	if (thr_create(NULL, 0, zfs_enum_pools, NULL, 0, &g_zfs_tid) != 0)
697 		return (NULL);
698 	return (&zfs_mod_ops);
699 }
700 
701 void
slm_fini()702 slm_fini()
703 {
704 	unavailpool_t *pool;
705 
706 	if (g_tpool != NULL) {
707 		tpool_wait(g_tpool);
708 		tpool_destroy(g_tpool);
709 	}
710 	while ((pool = (list_head(&g_pool_list))) != NULL) {
711 		list_remove(&g_pool_list, pool);
712 		zpool_close(pool->uap_zhp);
713 		free(pool);
714 	}
715 	(void) thr_join(g_zfs_tid, NULL, NULL);
716 	list_destroy(&g_pool_list);
717 	libzfs_fini(g_zfshdl);
718 }
719