xref: /titanic_41/usr/src/cmd/rcm_daemon/common/svm_rcm.c (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <string.h>
32 #include <sys/types.h>
33 #include <errno.h>
34 #include <meta.h>
35 #include <sys/lvm/mdio.h>
36 #include <sys/lvm/md_sp.h>
37 #include <sdssc.h>
38 
39 #include "rcm_module.h"
40 
41 /*
42  * This module is the RCM Module for SVM. The policy adopted by this module
43  * is to block offline requests for any SVM resource that is in use. A
44  * resource is considered to be in use if it contains a metadb or if it is
45  * a non-errored component of a metadevice that is open.
46  *
47  * The module uses the library libmeta to access the current state of the
48  * metadevices. On entry, and when svm_register() is called, the module
49  * builds a cache of all of the SVM resources and their dependencies. Each
50  * metadevice has an entry of type deventry_t which is accessed by a hash
51  * function. When the cache is built each SVM resource is registered with
52  * the RCM framework.  The check_device code path uses meta_invalidate_name to
53  * ensure that the caching in libmeta will not conflict with the cache
54  * we build within this code.
55  *
56  * When an RCM operation occurs that affects a registered SVM resource, the RCM
57  * framework will call the appropriate routine in this module. The cache
58  * entry will be found and if the resource has dependants, a callback will
59  * be made into the RCM framework to pass the request on to the dependants,
60  * which may themselves by SVM resources.
61  *
62  * Locking:
63  *      The cache is protected by a mutex
64  */
65 
66 /*
67  * Private constants
68  */
69 
70 /*
71  * Generic Messages
72  */
73 #define	MSG_UNRECOGNIZED	gettext("SVM: \"%s\" is not a SVM resource")
74 #define	MSG_NODEPS		gettext("SVM: can't find dependents")
75 #define	MSG_OPENERR		gettext("SVM: can't open \"%s\"")
76 #define	MSG_CACHEFAIL		gettext("SVM: can't malloc cache")
77 
78 #define	ERR_UNRECOGNIZED	gettext("unrecognized SVM resource")
79 #define	ERR_NODEPS		gettext("can't find SVM resource dependents")
80 
81 /*
82  * Macros to produce a quoted string containing the value of a preprocessor
83  * macro. For example, if SIZE is defined to be 256, VAL2STR(SIZE) is "256".
84  * This is used to construct format strings for scanf-family functions below.
85  */
86 #define	QUOTE(x)	#x
87 #define	VAL2STR(x)	QUOTE(x)
88 
89 typedef enum {
90     SVM_SLICE = 0,
91     SVM_STRIPE,
92     SVM_CONCAT,
93     SVM_MIRROR,
94     SVM_RAID,
95     SVM_TRANS,
96     SVM_SOFTPART,
97     SVM_HS
98 } svm_type_t;
99 
100 /* Hash table parameters */
101 #define	HASH_DEFAULT	251
102 
103 /* Hot spare pool users */
104 typedef struct hspuser {
105 	struct hspuser  *next;		/* next user */
106 	char		*hspusername;	/* name */
107 	dev_t		hspuserkey;	/* key */
108 } hspuser_t;
109 
110 /* Hot spare pool entry */
111 typedef struct hspentry {
112 	struct hspentry *link;		/* link through all hsp entries */
113 	struct hspentry *next;		/* next hsp entry for a slice */
114 	char		*hspname;	/* name */
115 	hspuser_t	*hspuser;	/* first hsp user */
116 } hspentry_t;
117 
118 /* Hash table entry */
119 typedef struct deventry {
120 	struct deventry		*next;		/* next entry with same hash */
121 	svm_type_t		devtype;	/* device type */
122 	dev_t			devkey;		/* key */
123 	char			*devname;	/* name */
124 	struct deventry		*dependent;	/* 1st dependent */
125 	struct deventry		*next_dep;	/* next dependent */
126 	struct deventry		*antecedent;	/* antecedent */
127 	hspentry_t		*hsp_list;	/* list of hot spare pools */
128 	int			flags;		/* flags */
129 } deventry_t;
130 
131 /* flag values */
132 #define	 REMOVED	0x1
133 #define	 IN_HSP		0x2
134 #define	 TRANS_LOG	0x4
135 #define	 CONT_SOFTPART	0x8
136 #define	 CONT_METADB	0x10
137 
138 /*
139  * Device redundancy flags. If the device can be removed from the
140  * metadevice configuration then it is considered a redundant device,
141  * otherwise not.
142  */
143 #define	NOTINDEVICE	-1
144 #define	NOTREDUNDANT	0
145 #define	REDUNDANT	1
146 
147 /* Cache */
148 typedef struct cache {
149 	deventry_t	**hashline;	/* hash table */
150 	int32_t		size;		/* sizer of hash table */
151 	uint32_t	registered;	/* cache regsitered */
152 } cache_t;
153 
154 /*
155  * Forward declarations of private functions
156  */
157 
158 static int svm_register(rcm_handle_t *hd);
159 static int svm_unregister(rcm_handle_t *hd);
160 static deventry_t *cache_dependent(cache_t *cache, char *devname, int devflags,
161     deventry_t *dependents);
162 static deventry_t *cache_device(cache_t *cache, char *devname,
163     svm_type_t devtype, md_dev64_t devkey, int devflags);
164 static hspentry_t *find_hsp(char *hspname);
165 static hspuser_t *add_hsp_user(char *hspname, deventry_t *deventry);
166 static hspentry_t *add_hsp(char *hspname, deventry_t *deventry);
167 static void free_names(mdnamelist_t *nlp);
168 static int cache_all_devices(cache_t *cache);
169 static int cache_hsp(cache_t *cache, mdhspnamelist_t *nlp, md_hsp_t *hsp);
170 static int cache_trans(cache_t *cache, mdnamelist_t *nlp, md_trans_t *trans);
171 static int cache_mirror(cache_t *cache, mdnamelist_t *nlp,
172     md_mirror_t *mirror);
173 static int cache_raid(cache_t *cache, mdnamelist_t *nlp, md_raid_t *raid);
174 static int cache_stripe(cache_t *cache, mdnamelist_t *nlp,
175     md_stripe_t *stripe);
176 static int cache_sp(cache_t *cache, mdnamelist_t *nlp, md_sp_t *soft_part);
177 static int cache_all_devices_in_set(cache_t *cache, mdsetname_t *sp);
178 static cache_t  *create_cache();
179 static deventry_t *create_deventry(char *devname, svm_type_t devtype,
180     md_dev64_t devkey, int devflags);
181 static void cache_remove(cache_t *cache, deventry_t *deventry);
182 static deventry_t *cache_lookup(cache_t *cache, char *devname);
183 static void cache_sync(rcm_handle_t *hd, cache_t **cachep);
184 static char *cache_walk(cache_t *cache, uint32_t *i, deventry_t **hashline);
185 static void free_cache(cache_t **cache);
186 static void free_deventry(deventry_t **deventry);
187 static uint32_t hash(uint32_t h, char *s);
188 static void register_device(rcm_handle_t *hd, char *devname);
189 static int add_dep(int *ndeps, char ***depsp, deventry_t *deventry);
190 static int get_dependents(deventry_t *deventry, char *** dependentsp);
191 char *add_to_usage(char ** usagep, char *string);
192 char *add_to_usage_fmt(char **usagep, char *fmt, char *string);
193 static int is_open(dev_t devkey);
194 static int svm_offline(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
195     char **errorp, rcm_info_t **infop);
196 static int svm_online(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
197     char **errorp, rcm_info_t **infop);
198 static int svm_get_info(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
199     char **usagep, char **errorp, nvlist_t *props, rcm_info_t **infop);
200 static int svm_suspend(rcm_handle_t *hd, char *rsrc, id_t id,
201     timespec_t *interval, uint_t flags, char **errorp,
202     rcm_info_t **infop);
203 static int svm_resume(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
204     char **errorp, rcm_info_t **infop);
205 static int svm_remove(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
206     char **errorp, rcm_info_t **infop);
207 static int check_device(deventry_t *deventry);
208 static int check_mirror(mdsetname_t *sp, mdname_t *np, md_error_t *ep);
209 
210 /*
211  * Module-Private data
212  */
213 static struct rcm_mod_ops svm_ops =
214 {
215 	RCM_MOD_OPS_VERSION,
216 	svm_register,
217 	svm_unregister,
218 	svm_get_info,
219 	svm_suspend,
220 	svm_resume,
221 	svm_offline,
222 	svm_online,
223 	svm_remove,
224 	NULL,
225 	NULL,
226 	NULL
227 };
228 
229 static cache_t *svm_cache = NULL;
230 static mutex_t svm_cache_lock;
231 static hspentry_t *hsp_head = NULL;
232 
233 /*
234  * Module Interface Routines
235  */
236 
237 /*
238  *      rcm_mod_init()
239  *
240  *      Create a cache, and return the ops structure.
241  *      Input: None
242  *      Return: rcm_mod_ops structure
243  */
244 struct rcm_mod_ops *
245 rcm_mod_init()
246 {
247 	/* initialize the lock mutex */
248 	if (mutex_init(&svm_cache_lock, USYNC_THREAD, NULL)) {
249 		rcm_log_message(RCM_ERROR,
250 		    gettext("SVM: can't init mutex"));
251 		return (NULL);
252 	}
253 
254 	/* need to initialize the cluster library to avoid seg faults */
255 	if (sdssc_bind_library() == SDSSC_ERROR) {
256 		rcm_log_message(RCM_ERROR,
257 			gettext("SVM: Interface error with libsds_sc.so,"
258 			    " aborting."));
259 		return (NULL);
260 	}
261 
262 	/* Create a cache */
263 	if ((svm_cache = create_cache()) == NULL) {
264 		rcm_log_message(RCM_ERROR,
265 			gettext("SVM: module can't function, aborting."));
266 		return (NULL);
267 	}
268 
269 	/* Return the ops vectors */
270 	return (&svm_ops);
271 }
272 
273 /*
274  *	rcm_mod_info()
275  *
276  *	Return a string describing this module.
277  *	Input: None
278  *	Return: String
279  *	Locking: None
280  */
281 const char *
282 rcm_mod_info()
283 {
284 	return (gettext("Solaris Volume Manager module %I%"));
285 }
286 
287 /*
288  *	rcm_mod_fini()
289  *
290  *	Destroy the cache and mutex
291  *	Input: None
292  *	Return: RCM_SUCCESS
293  *	Locking: None
294  */
295 int
296 rcm_mod_fini()
297 {
298 	(void) mutex_lock(&svm_cache_lock);
299 	if (svm_cache) {
300 		free_cache(&svm_cache);
301 	}
302 	(void) mutex_unlock(&svm_cache_lock);
303 	(void) mutex_destroy(&svm_cache_lock);
304 	return (RCM_SUCCESS);
305 }
306 
307 /*
308  *	svm_register()
309  *
310  *	Make sure the cache is properly sync'ed, and its registrations are in
311  *	order.
312  *
313  *	Input:
314  *		rcm_handle_t	*hd
315  *	Return:
316  *		RCM_SUCCESS
317  *      Locking: the cache is locked throughout the execution of this routine
318  *      because it reads and possibly modifies cache links continuously.
319  */
320 static int
321 svm_register(rcm_handle_t *hd)
322 {
323 	uint32_t i = 0;
324 	deventry_t *l = NULL;
325 	char    *devicename;
326 
327 
328 	rcm_log_message(RCM_TRACE1, "SVM: register\n");
329 	/* Guard against bad arguments */
330 	assert(hd != NULL);
331 
332 	/* Lock the cache */
333 	(void) mutex_lock(&svm_cache_lock);
334 
335 	/* If the cache has already been registered, then just sync it.  */
336 	if (svm_cache && svm_cache->registered) {
337 		cache_sync(hd, &svm_cache);
338 		(void) mutex_unlock(&svm_cache_lock);
339 		return (RCM_SUCCESS);
340 	}
341 
342 	/* If not, register the whole cache and mark it as registered. */
343 	while ((devicename = cache_walk(svm_cache, &i, &l)) != NULL) {
344 			register_device(hd, devicename);
345 	}
346 	svm_cache->registered = 1;
347 
348 	/* Unlock the cache */
349 	(void) mutex_unlock(&svm_cache_lock);
350 
351 	return (RCM_SUCCESS);
352 }
353 
354 /*
355  *	svm_unregister()
356  *
357  *	Manually walk through the cache, unregistering all the special files and
358  *	mount points.
359  *
360  *	Input:
361  *		rcm_handle_t	*hd
362  *	Return:
363  *		RCM_SUCCESS
364  *      Locking: the cache is locked throughout the execution of this routine
365  *      because it reads and modifies cache links continuously.
366  */
367 static int
368 svm_unregister(rcm_handle_t *hd)
369 {
370 	deventry_t *l = NULL;
371 	uint32_t i = 0;
372 	char	    *devicename;
373 
374 	rcm_log_message(RCM_TRACE1, "SVM: unregister\n");
375 	/* Guard against bad arguments */
376 	assert(hd != NULL);
377 
378 	/* Walk the cache, unregistering everything */
379 	(void) mutex_lock(&svm_cache_lock);
380 	if (svm_cache != NULL) {
381 		while ((devicename = cache_walk(svm_cache, &i, &l)) != NULL) {
382 			(void) rcm_unregister_interest(hd, devicename, 0);
383 		}
384 		svm_cache->registered = 0;
385 	}
386 	(void) mutex_unlock(&svm_cache_lock);
387 	return (RCM_SUCCESS);
388 }
389 
390 /*
391  *      svm_offline()
392  *
393  *      Determine dependents of the resource being offlined, and offline
394  *      them all.
395  *
396  *      Input:
397  *		rcm_handle_t	*hd		handle
398  *		char*		*rsrc		resource name
399  *		id_t		id		0
400  *		char		**errorp	ptr to error message
401  *		rcm_info_t	**infop		ptr to info string
402  *      Output:
403  *		char		**errorp	pass back error message
404  *      Return:
405  *		int		RCM_SUCCESS or RCM_FAILURE
406  *      Locking: the cache is locked for most of this routine, except while
407  *      processing dependents.
408  */
409 /*ARGSUSED*/
410 static int
411 svm_offline(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags,
412     char **errorp, rcm_info_t **infop)
413 {
414 	int		rv = RCM_SUCCESS;
415 	int		ret;
416 	char		**dependents;
417 	deventry_t	*deventry;
418 	hspentry_t	*hspentry;
419 	hspuser_t	*hspuser;
420 
421 	/* Guard against bad arguments */
422 	assert(hd != NULL);
423 	assert(rsrc != NULL);
424 	assert(id == (id_t)0);
425 	assert(errorp != NULL);
426 
427 	/* Trace */
428 	rcm_log_message(RCM_TRACE1, "SVM: offline(%s), flags(%d)\n",
429 	    rsrc, flags);
430 
431 	/* Lock the cache */
432 	(void) mutex_lock(&svm_cache_lock);
433 
434 	/* Lookup the resource in the cache. */
435 	if ((deventry = cache_lookup(svm_cache, rsrc)) == NULL) {
436 		rcm_log_message(RCM_ERROR, MSG_UNRECOGNIZED);
437 		*errorp = strdup(ERR_UNRECOGNIZED);
438 		(void) mutex_unlock(&svm_cache_lock);
439 		rv = RCM_FAILURE;
440 		rcm_log_message(RCM_TRACE1, "SVM: svm_offline(%s) exit %d\n",
441 		    rsrc, rv);
442 		return (rv);
443 	}
444 	/* If it is a TRANS device, do not allow the offline */
445 	if (deventry->devtype == SVM_TRANS) {
446 		rv = RCM_FAILURE;
447 		(void) mutex_unlock(&svm_cache_lock);
448 		goto exit;
449 	}
450 
451 	if (deventry->flags&IN_HSP) {
452 		/*
453 		 * If this is in a hot spare pool, check to see
454 		 * if any of the hot spare pool users are open
455 		 */
456 		hspentry = deventry->hsp_list;
457 		while (hspentry) {
458 			hspuser = hspentry->hspuser;
459 			while (hspuser) {
460 				/* Check if open */
461 				if (is_open(hspuser->hspuserkey)) {
462 					rv = RCM_FAILURE;
463 					(void) mutex_unlock(&svm_cache_lock);
464 					goto exit;
465 				}
466 				hspuser = hspuser->next;
467 			}
468 			hspentry = hspentry->next;
469 		}
470 	}
471 
472 	/* Fail if the device contains a metadb replica */
473 	if (deventry->flags&CONT_METADB) {
474 		/*
475 		 * The user should delete the replica before continuing,
476 		 * so force the error.
477 		 */
478 		rcm_log_message(RCM_TRACE1, "SVM: %s has a replica\n",
479 		    deventry->devname);
480 		rv = RCM_FAILURE;
481 		(void) mutex_unlock(&svm_cache_lock);
482 		goto exit;
483 	}
484 
485 	/* Get dependents */
486 	if (get_dependents(deventry, &dependents) != 0) {
487 		rcm_log_message(RCM_ERROR, MSG_NODEPS);
488 		rv = RCM_FAILURE;
489 		(void) mutex_unlock(&svm_cache_lock);
490 		goto exit;
491 	}
492 
493 	if (dependents) {
494 		/* Check if the device is broken (needs maintanence). */
495 		if (check_device(deventry) == REDUNDANT) {
496 			/*
497 			 * The device is broken, the offline request should
498 			 * succeed, so ignore any of the dependents.
499 			 */
500 			rcm_log_message(RCM_TRACE1,
501 			    "SVM: ignoring dependents\n");
502 			(void) mutex_unlock(&svm_cache_lock);
503 			free(dependents);
504 			goto exit;
505 		}
506 		(void) mutex_unlock(&svm_cache_lock);
507 		ret = rcm_request_offline_list(hd, dependents, flags, infop);
508 		if (ret != RCM_SUCCESS) {
509 			rv = ret;
510 		}
511 		free(dependents);
512 	} else {
513 		/* If no dependents, check if the metadevice is open */
514 		if ((deventry->devkey) && (is_open(deventry->devkey))) {
515 			rv = RCM_FAILURE;
516 			(void) mutex_unlock(&svm_cache_lock);
517 			goto exit;
518 		}
519 		(void) mutex_unlock(&svm_cache_lock);
520 	}
521 exit:
522 	rcm_log_message(RCM_TRACE1, "SVM: svm_offline(%s) exit %d\n", rsrc, rv);
523 	if (rv != RCM_SUCCESS)
524 		*errorp = strdup(gettext("unable to offline"));
525 	return (rv);
526 }
527 
528 /*
529  *      svm_online()
530  *
531  *      Just pass the online notification on to the dependents of this resource
532  *
533  *      Input:
534  *		rcm_handle_t	*hd		handle
535  *		char*		*rsrc		resource name
536  *		id_t		id		0
537  *		char		**errorp	ptr to error message
538  *		rcm_info_t	**infop		ptr to info string
539  *      Output:
540  *		char		**errorp	pass back error message
541  *      Return:
542  *		int		RCM_SUCCESS or RCM_FAILURE
543  *      Locking: the cache is locked for most of this routine, except while
544  *      processing dependents.
545  */
546 /*ARGSUSED*/
547 static int
548 svm_online(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags, char **errorp,
549     rcm_info_t **infop)
550 {
551 	int		rv = RCM_SUCCESS;
552 	char		**dependents;
553 	deventry_t	*deventry;
554 
555 	/* Guard against bad arguments */
556 	assert(hd != NULL);
557 	assert(rsrc != NULL);
558 	assert(id == (id_t)0);
559 
560 	/* Trace */
561 	rcm_log_message(RCM_TRACE1, "SVM: online(%s)\n", rsrc);
562 
563 	/* Lookup this resource in the cache (cache gets locked) */
564 	(void) mutex_lock(&svm_cache_lock);
565 	deventry = cache_lookup(svm_cache, rsrc);
566 	if (deventry == NULL) {
567 		(void) mutex_unlock(&svm_cache_lock);
568 		rcm_log_message(RCM_ERROR, MSG_UNRECOGNIZED, rsrc);
569 		*errorp = strdup(ERR_UNRECOGNIZED);
570 		return (RCM_FAILURE);
571 	}
572 
573 	/* Get dependents */
574 	if (get_dependents(deventry, &dependents) != 0) {
575 		(void) mutex_unlock(&svm_cache_lock);
576 		rcm_log_message(RCM_ERROR, MSG_NODEPS);
577 		*errorp = strdup(ERR_NODEPS);
578 		return (RCM_FAILURE);
579 	}
580 	(void) mutex_unlock(&svm_cache_lock);
581 
582 	if (dependents) {
583 		rv = rcm_notify_online_list(hd, dependents, flags, infop);
584 		if (rv != RCM_SUCCESS)
585 			*errorp = strdup(gettext("unable to online"));
586 		free(dependents);
587 	}
588 
589 	return (rv);
590 }
591 
592 /*
593  *      svm_get_info()
594  *
595  *      Gather usage information for this resource.
596  *
597  *      Input:
598  *		rcm_handle_t	*hd		handle
599  *		char*		*rsrc		resource name
600  *		id_t		id		0
601  *		char		**errorp	ptr to error message
602  *		nvlist_t	*props		Not used
603  *		rcm_info_t	**infop		ptr to info string
604  *      Output:
605  *		char		**infop		pass back info string
606  *      Return:
607  *		int		RCM_SUCCESS or RCM_FAILURE
608  *      Locking: the cache is locked  throughout the whole function
609  */
610 /*ARGSUSED*/
611 static int
612 svm_get_info(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags, char **usagep,
613     char **errorp, nvlist_t *props, rcm_info_t **infop)
614 {
615 	int 		rv = RCM_SUCCESS;
616 	deventry_t	*deventry;
617 	deventry_t	*dependent;
618 	hspentry_t	*hspentry;
619 	char		**dependents;
620 
621 	/* Guard against bad arguments */
622 	assert(hd != NULL);
623 	assert(rsrc != NULL);
624 	assert(id == (id_t)0);
625 	assert(usagep != NULL);
626 	assert(errorp != NULL);
627 
628 	/* Trace */
629 	rcm_log_message(RCM_TRACE1, "SVM: get_info(%s)\n", rsrc);
630 
631 	/* Lookup this resource in the cache (cache gets locked) */
632 	(void) mutex_lock(&svm_cache_lock);
633 	deventry = cache_lookup(svm_cache, rsrc);
634 	if (deventry == NULL) {
635 		(void) mutex_unlock(&svm_cache_lock);
636 		rcm_log_message(RCM_ERROR, MSG_UNRECOGNIZED, rsrc);
637 		*errorp = strdup(ERR_UNRECOGNIZED);
638 		return (RCM_FAILURE);
639 	}
640 
641 	*usagep = NULL; /* Initialise usage string */
642 	if (deventry->flags&CONT_METADB) {
643 		*usagep = add_to_usage(usagep, gettext("contains metadb(s)"));
644 	}
645 	if (deventry->flags&CONT_SOFTPART) {
646 		*usagep = add_to_usage(usagep,
647 		    gettext("contains soft partition(s)"));
648 	}
649 	if (deventry->devtype == SVM_SOFTPART) {
650 		*usagep = add_to_usage_fmt(usagep,
651 		    gettext("soft partition based on \"%s\""),
652 		    deventry->antecedent->devname);
653 	}
654 
655 	if (deventry->flags&IN_HSP) {
656 		int	hspflag = 0;
657 		hspentry = deventry->hsp_list;
658 		while (hspentry) {
659 			if (hspflag == 0) {
660 				*usagep = add_to_usage(usagep,
661 				    gettext("member of hot spare pool"));
662 				hspflag = 1;
663 			}
664 			*usagep = add_to_usage_fmt(usagep, "\"%s\"",
665 			    hspentry->hspname);
666 			hspentry = hspentry->next;
667 		}
668 	} else {
669 		dependent = deventry->dependent;
670 		while (dependent) {
671 			/* Resource has dependents */
672 			switch (dependent->devtype) {
673 			case SVM_STRIPE:
674 				*usagep = add_to_usage_fmt(usagep,
675 				    gettext("component of stripe \"%s\""),
676 				    dependent->devname);
677 				break;
678 			case SVM_CONCAT:
679 				*usagep = add_to_usage_fmt(usagep,
680 				    gettext("component of concat \"%s\""),
681 				    dependent->devname);
682 				break;
683 			case SVM_MIRROR:
684 				*usagep = add_to_usage_fmt(usagep,
685 				    gettext("submirror of \"%s\""),
686 				    dependent->devname);
687 				break;
688 			case SVM_RAID:
689 				*usagep = add_to_usage_fmt(usagep,
690 				    gettext("component of RAID \"%s\""),
691 				    dependent->devname);
692 				break;
693 			case SVM_TRANS:
694 				if (deventry->flags&TRANS_LOG) {
695 					*usagep = add_to_usage_fmt(usagep,
696 					    gettext("trans log for \"%s\""),
697 					    dependent->devname);
698 				} else {
699 					*usagep = add_to_usage_fmt(usagep,
700 					    gettext("trans master for \"%s\""),
701 					    dependent->devname);
702 				}
703 				break;
704 			case SVM_SOFTPART:
705 				/* Contains soft parts, already processed */
706 				break;
707 			default:
708 				rcm_log_message(RCM_ERROR,
709 				    gettext("Unknown type %d\n"),
710 				    dependent->devtype);
711 			}
712 			dependent = dependent->next_dep;
713 		}
714 	}
715 
716 	/* Get dependents  and recurse if necessary */
717 	if (get_dependents(deventry, &dependents) != 0) {
718 		(void) mutex_unlock(&svm_cache_lock);
719 		rcm_log_message(RCM_ERROR, MSG_NODEPS);
720 		*errorp = strdup(ERR_NODEPS);
721 		return (RCM_FAILURE);
722 	}
723 	(void) mutex_unlock(&svm_cache_lock);
724 
725 	if ((flags & RCM_INCLUDE_DEPENDENT) && (dependents != NULL)) {
726 		rv = rcm_get_info_list(hd, dependents, flags, infop);
727 		if (rv != RCM_SUCCESS)
728 			*errorp = strdup(gettext("unable to get info"));
729 	}
730 	free(dependents);
731 
732 	if (*usagep != NULL)
733 		rcm_log_message(RCM_TRACE1, "SVM: usage = %s\n", *usagep);
734 	return (rv);
735 }
736 
737 /*
738  *      svm_suspend()
739  *
740  *      Notify all dependents that the resource is being suspended.
741  *      Since no real operation is involved, QUERY or not doesn't matter.
742  *
743  *      Input:
744  *		rcm_handle_t	*hd		handle
745  *		char*		*rsrc		resource name
746  *		id_t		id		0
747  *		char		**errorp	ptr to error message
748  *		rcm_info_t	**infop		ptr to info string
749  *      Output:
750  *		char		**errorp	pass back error message
751  *      Return:
752  *		int		RCM_SUCCESS or RCM_FAILURE
753  *      Locking: the cache is locked for most of this routine, except while
754  *      processing dependents.
755  */
756 static int
757 svm_suspend(rcm_handle_t *hd, char *rsrc, id_t id, timespec_t *interval,
758     uint_t flags, char **errorp, rcm_info_t **infop)
759 {
760 	int		rv = RCM_SUCCESS;
761 	deventry_t	*deventry;
762 	char		**dependents;
763 
764 	/* Guard against bad arguments */
765 	assert(hd != NULL);
766 	assert(rsrc != NULL);
767 	assert(id == (id_t)0);
768 	assert(interval != NULL);
769 	assert(errorp != NULL);
770 
771 	/* Trace */
772 	rcm_log_message(RCM_TRACE1, "SVM: suspend(%s)\n", rsrc);
773 
774 	/* Lock the cache and extract information about this resource.  */
775 	(void) mutex_lock(&svm_cache_lock);
776 	if ((deventry = cache_lookup(svm_cache, rsrc)) == NULL) {
777 		(void) mutex_unlock(&svm_cache_lock);
778 		rcm_log_message(RCM_ERROR, MSG_UNRECOGNIZED, rsrc);
779 		*errorp = strdup(ERR_UNRECOGNIZED);
780 		return (RCM_SUCCESS);
781 	}
782 
783 	/* Get dependents */
784 	if (get_dependents(deventry, &dependents) != 0) {
785 		(void) mutex_unlock(&svm_cache_lock);
786 		rcm_log_message(RCM_ERROR, MSG_NODEPS);
787 		*errorp = strdup(ERR_NODEPS);
788 		return (RCM_FAILURE);
789 	}
790 	(void) mutex_unlock(&svm_cache_lock);
791 
792 	if (dependents) {
793 		rv = rcm_request_suspend_list(hd, dependents, flags,
794 		    interval, infop);
795 		if (rv != RCM_SUCCESS)
796 			*errorp = strdup(gettext("unable to suspend"));
797 		free(dependents);
798 	}
799 
800 	return (rv);
801 }
802 
803 /*
804  *      svm_resume()
805  *
806  *      Notify all dependents that the resource is being resumed.
807  *
808  *      Input:
809  *		rcm_handle_t	*hd		handle
810  *		char*		*rsrc		resource name
811  *		id_t		id		0
812  *		char		**errorp	ptr to error message
813  *		rcm_info_t	**infop		ptr to info string
814  *      Output:
815  *		char		**errorp	pass back error message
816  *      Return:
817  *		int		RCM_SUCCESS or RCM_FAILURE
818  *      Locking: the cache is locked for most of this routine, except while
819  *      processing dependents.
820  *
821  */
822 static int
823 svm_resume(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags, char **errorp,
824     rcm_info_t **infop)
825 {
826 	int		rv = RCM_SUCCESS;
827 	deventry_t	*deventry;
828 	char		**dependents;
829 
830 	/* Guard against bad arguments */
831 	assert(hd != NULL);
832 	assert(rsrc != NULL);
833 	assert(id == (id_t)0);
834 	assert(errorp != NULL);
835 
836 	/* Trace */
837 	rcm_log_message(RCM_TRACE1, "SVM: resume(%s)\n", rsrc);
838 
839 	/*
840 	 * Lock the cache just long enough to extract information about this
841 	 * resource.
842 	 */
843 	(void) mutex_lock(&svm_cache_lock);
844 	if ((deventry = cache_lookup(svm_cache, rsrc)) == NULL) {
845 		(void) mutex_unlock(&svm_cache_lock);
846 		rcm_log_message(RCM_ERROR, MSG_UNRECOGNIZED, rsrc);
847 		*errorp = strdup(ERR_UNRECOGNIZED);
848 		return (RCM_SUCCESS);
849 	}
850 
851 	/* Get dependents */
852 
853 	if (get_dependents(deventry, &dependents) != 0) {
854 		(void) mutex_unlock(&svm_cache_lock);
855 		rcm_log_message(RCM_ERROR, MSG_NODEPS);
856 		*errorp = strdup(ERR_NODEPS);
857 		return (RCM_FAILURE);
858 	}
859 
860 	(void) mutex_unlock(&svm_cache_lock);
861 	if (dependents) {
862 		rv = rcm_notify_resume_list(hd, dependents, flags, infop);
863 		if (rv != RCM_SUCCESS)
864 			*errorp = strdup(gettext("unable to resume"));
865 		free(dependents);
866 	}
867 
868 	return (rv);
869 }
870 
871 
872 /*
873  *	svm_remove()
874  *
875  *      Remove the resource from the cache and notify all dependents that
876  *      the resource has been removed.
877  *
878  *      Input:
879  *		rcm_handle_t	*hd		handle
880  *		char*		*rsrc		resource name
881  *		id_t		id		0
882  *		char		**errorp	ptr to error message
883  *		rcm_info_t	**infop		ptr to info string
884  *      Output:
885  *		char		**errorp	pass back error message
886  *      Return:
887  *		int		RCM_SUCCESS or RCM_FAILURE
888  *      Locking: the cache is locked for most of this routine, except while
889  *      processing dependents.
890  */
891 static int
892 svm_remove(rcm_handle_t *hd, char *rsrc, id_t id, uint_t flags, char **errorp,
893     rcm_info_t **infop)
894 {
895 	int		rv = RCM_SUCCESS;
896 	char		**dependents;
897 	deventry_t	*deventry;
898 
899 	/* Guard against bad arguments */
900 	assert(hd != NULL);
901 	assert(rsrc != NULL);
902 	assert(id == (id_t)0);
903 
904 	/* Trace */
905 	rcm_log_message(RCM_TRACE1, "SVM: svm_remove(%s)\n", rsrc);
906 
907 	/* Lock the cache while removing resource */
908 	(void) mutex_lock(&svm_cache_lock);
909 	if ((deventry = cache_lookup(svm_cache, rsrc)) == NULL) {
910 		(void) mutex_unlock(&svm_cache_lock);
911 		return (RCM_SUCCESS);
912 	}
913 
914 	/* Get dependents */
915 	if (get_dependents(deventry, &dependents) != 0) {
916 		(void) mutex_unlock(&svm_cache_lock);
917 		rcm_log_message(RCM_ERROR, MSG_NODEPS);
918 		deventry->flags |= REMOVED;
919 		*errorp = strdup(ERR_NODEPS);
920 		return (RCM_FAILURE);
921 	}
922 
923 	if (dependents) {
924 		(void) mutex_unlock(&svm_cache_lock);
925 		rv = rcm_notify_remove_list(hd, dependents, flags, infop);
926 		(void) mutex_lock(&svm_cache_lock);
927 		if (rv != RCM_SUCCESS)
928 			*errorp = strdup(gettext("unable to remove"));
929 		free(dependents);
930 	}
931 
932 	/* Mark entry as removed */
933 	deventry->flags |= REMOVED;
934 
935 	(void) mutex_unlock(&svm_cache_lock);
936 	rcm_log_message(RCM_TRACE1, "SVM: exit svm_remove(%s)\n", rsrc);
937 	/* Clean up and return success */
938 	return (RCM_SUCCESS);
939 }
940 
941 /*
942  * Definitions of private functions
943  *
944  */
945 
946 /*
947  *	find_hsp()
948  *
949  *	Find the hot spare entry from the linked list of all hotspare pools
950  *
951  *	Input:
952  *		char		*hspname	name of hot spare pool
953  *	Return:
954  *		hspentry_t	hot spare entry
955  */
956 static hspentry_t *
957 find_hsp(char *hspname)
958 {
959 	hspentry_t	*hspentry = hsp_head;
960 
961 	while (hspentry) {
962 		if (strcmp(hspname, hspentry->hspname) == 0)
963 			return (hspentry);
964 		hspentry = hspentry->link;
965 	}
966 	return (NULL);
967 }
968 
969 /*
970  *      add_hsp_user()
971  *
972  *      Add a hot spare pool user to the list for the hsp specfied by
973  *	hspname. The memory allocated here will be freed by free_cache()
974  *
975  *      Input:
976  *		char		*hspname	hot spare pool name
977  *		deventry_t	*deventry	specified hsp user
978  *      Return:
979  *		hspuser_t	entry in hsp user list
980  */
981 static hspuser_t *
982 add_hsp_user(char *hspname, deventry_t *deventry)
983 {
984 	hspuser_t	*newhspuser;
985 	char		*newhspusername;
986 	hspuser_t	*previous;
987 	hspentry_t	*hspentry;
988 
989 	hspentry = find_hsp(hspname);
990 	if (hspentry == NULL)
991 		return (NULL);
992 	rcm_log_message(RCM_TRACE1, "SVM: Enter add_hsp_user %s, %x, %x\n",
993 	    hspname, hspentry, hspentry->hspuser);
994 
995 	newhspuser = (hspuser_t *)malloc(sizeof (*newhspuser));
996 	if (newhspuser == NULL) {
997 		rcm_log_message(RCM_ERROR,
998 		    gettext("SVM: can't malloc hspuser"));
999 		return (NULL);
1000 	}
1001 	(void) memset((char *)newhspuser, 0, sizeof (*newhspuser));
1002 
1003 	newhspusername = strdup(deventry->devname);
1004 	if (newhspusername == NULL) {
1005 		rcm_log_message(RCM_ERROR,
1006 		    gettext("SVM: can't malloc hspusername"));
1007 		free(newhspuser);
1008 		return (NULL);
1009 	}
1010 	newhspuser->hspusername = newhspusername;
1011 	newhspuser->hspuserkey = deventry->devkey;
1012 
1013 	if ((previous = hspentry->hspuser) == NULL) {
1014 		hspentry->hspuser = newhspuser;
1015 	} else {
1016 		hspuser_t	*temp = previous->next;
1017 		previous->next = newhspuser;
1018 		newhspuser->next = temp;
1019 	}
1020 	rcm_log_message(RCM_TRACE1, "SVM: Added hsp_user %s (dev %x) to %s\n",
1021 	    newhspusername, newhspuser->hspuserkey, hspname);
1022 	return (newhspuser);
1023 }
1024 
1025 /*
1026  *      add_hsp()
1027  *
1028  *      Add a hot spare pool entry to the list for the slice, deventry.
1029  *      Also add to the linked list of all hsp pools
1030  *	The memory alllocated here will be freed by free_cache()
1031  *
1032  *      Input:
1033  *		char		*hspname	name of hsp pool entry
1034  *		deventry_t	*deventry	device entry for the slice
1035  *      Return:
1036  *		hspentry_t	end of hsp list
1037  *      Locking: None
1038  */
1039 static hspentry_t *
1040 add_hsp(char *hspname, deventry_t *deventry)
1041 {
1042 	hspentry_t	*newhspentry;
1043 	hspentry_t	*previous;
1044 	char		*newhspname;
1045 
1046 	rcm_log_message(RCM_TRACE1, "SVM: Enter add_hsp %s\n",
1047 	    hspname);
1048 	newhspentry = (hspentry_t *)malloc(sizeof (*newhspentry));
1049 	if (newhspentry == NULL) {
1050 		rcm_log_message(RCM_ERROR,
1051 		    gettext("SVM: can't malloc hspentry"));
1052 		return (NULL);
1053 	}
1054 	(void) memset((char *)newhspentry, 0, sizeof (*newhspentry));
1055 
1056 	newhspname = strdup(hspname);
1057 	if (newhspname == NULL) {
1058 		rcm_log_message(RCM_ERROR,
1059 		    gettext("SVM: can't malloc hspname"));
1060 		free(newhspentry);
1061 		return (NULL);
1062 	}
1063 	newhspentry->hspname = newhspname;
1064 
1065 	/* Add to linked list of all hotspare pools */
1066 	newhspentry->link = hsp_head;
1067 	hsp_head = newhspentry;
1068 
1069 	/* Add to list of hotspare pools containing this slice */
1070 	if ((previous = deventry->hsp_list) == NULL) {
1071 		deventry->hsp_list = newhspentry;
1072 	} else {
1073 		hspentry_t	*temp = previous->next;
1074 		previous->next = newhspentry;
1075 		newhspentry->next = temp;
1076 	}
1077 	rcm_log_message(RCM_TRACE1, "SVM: Exit add_hsp %s\n",
1078 	    hspname);
1079 	return (newhspentry);
1080 }
1081 
1082 /*
1083  *      cache_dependent()
1084  *
1085  *      Add a dependent for a deventry to the cache and return the cache entry
1086  *	If the name is not in the cache, we assume that it a SLICE. If it
1087  *	turns out to be any other type of metadevice, when it is processed
1088  *	in cache_all_devices_in_set(), cache_device() will be called to
1089  *	set the type to the actual value.
1090  *
1091  *      Input:
1092  *		cache_t		*cache		cache
1093  *		char		*devname	metadevice name
1094  *		int		devflags	metadevice flags
1095  *		deventry_t	*dependent	dependent of this metadevice
1096  *      Return:
1097  *		deventry_t	metadevice entry added to cache
1098  *      Locking: None
1099  */
1100 static deventry_t *
1101 cache_dependent(cache_t *cache, char *devname, int devflags,
1102     deventry_t *dependent)
1103 {
1104 
1105 	deventry_t	*newdeventry = NULL;
1106 	deventry_t	*hashprev = NULL;
1107 	deventry_t	*deventry = NULL;
1108 	deventry_t	*previous = NULL;
1109 	uint32_t	hash_index;
1110 	int		comp;
1111 
1112 	rcm_log_message(RCM_TRACE1, "SVM: Enter cache_dep %s, %x, %s\n",
1113 	    devname, devflags, dependent->devname);
1114 
1115 	hash_index = hash(cache->size, devname);
1116 	if (hash_index >= cache->size) {
1117 		rcm_log_message(RCM_ERROR,
1118 		    gettext("SVM: can't hash device."));
1119 		return (NULL);
1120 	}
1121 
1122 	deventry = cache->hashline[hash_index];
1123 
1124 	/* if the hash table slot is empty, then this is easy */
1125 	if (deventry == NULL) {
1126 		deventry = create_deventry(devname, SVM_SLICE, 0, devflags);
1127 		cache->hashline[hash_index] = deventry;
1128 	} else {
1129 	/* if the hash table slot isn't empty, find the immediate successor */
1130 		hashprev = NULL;
1131 		while ((comp = strcmp(deventry->devname, devname)) < 0 &&
1132 		    deventry->next != NULL) {
1133 			hashprev = deventry;
1134 			deventry = deventry->next;
1135 		}
1136 
1137 		if (comp == 0) {
1138 			/* if already in cache, just update the flags */
1139 			deventry->flags |= devflags;
1140 		} else {
1141 			/* insert the entry if it's not already there */
1142 			if ((newdeventry = create_deventry(devname,
1143 			    SVM_SLICE, 0, devflags)) == NULL) {
1144 				rcm_log_message(RCM_ERROR,
1145 				    gettext("SVM: can't create hash line."));
1146 				return (NULL);
1147 			}
1148 			if (comp > 0) {
1149 				newdeventry->next = deventry;
1150 				if (hashprev)
1151 					hashprev->next = newdeventry;
1152 				else
1153 					cache->hashline[hash_index] =
1154 					    newdeventry;
1155 			} else if (comp < 0) {
1156 				newdeventry->next = deventry->next;
1157 				deventry->next = newdeventry;
1158 			}
1159 			deventry = newdeventry;
1160 		}
1161 	}
1162 	/* complete deventry by linking the dependent to it */
1163 	dependent->antecedent = deventry;
1164 	if ((previous = deventry->dependent) != NULL) {
1165 		deventry_t *temp = previous->next_dep;
1166 		previous->next_dep = dependent;
1167 		dependent->next_dep = temp;
1168 	} else deventry->dependent = dependent;
1169 	return (deventry);
1170 
1171 }
1172 
1173 /*
1174  *      cache_device()
1175  *
1176  *      Add an entry to the cache for devname
1177  *
1178  *      Input:
1179  *		cache_t		*cache		cache
1180  *		char		*devname	metadevice named
1181  *		svm_type_t	devtype		metadevice type
1182  *		md_dev64_t	devkey		dev_t of device
1183  *		int		devflags	device flags
1184  *      Return:
1185  *		deventry_t	metadevice added to cache
1186  *      Locking: None
1187  */
1188 static deventry_t *
1189 cache_device(cache_t *cache, char *devname, svm_type_t devtype,
1190     md_dev64_t devkey, int devflags)
1191 {
1192 	deventry_t	*newdeventry = NULL;
1193 	deventry_t	*previous = NULL;
1194 	deventry_t	*deventry = NULL;
1195 	uint32_t	hash_index;
1196 	int		comp;
1197 
1198 	rcm_log_message(RCM_TRACE1, "SVM: Enter cache_device %s, %x, %lx, %x\n",
1199 	    devname, devtype, devkey, devflags);
1200 
1201 	hash_index = hash(cache->size, devname);
1202 	if (hash_index >= cache->size) {
1203 		rcm_log_message(RCM_ERROR,
1204 		    gettext("SVM: can't hash device."));
1205 		return (NULL);
1206 	}
1207 
1208 	deventry = cache->hashline[hash_index];
1209 
1210 	/* if the hash table slot is empty, then this is easy */
1211 	if (deventry == NULL) {
1212 		deventry = create_deventry(devname, devtype, devkey,
1213 		    devflags);
1214 		cache->hashline[hash_index] = deventry;
1215 	} else {
1216 	/* if the hash table slot isn't empty, find the immediate successor */
1217 		previous = NULL;
1218 		while ((comp = strcmp(deventry->devname, devname)) < 0 &&
1219 		    deventry->next != NULL) {
1220 			previous = deventry;
1221 			deventry = deventry->next;
1222 		}
1223 
1224 		if (comp == 0) {
1225 			/*
1226 			 * If entry already exists, just set the type, key
1227 			 * and flags
1228 			 */
1229 			deventry->devtype = devtype;
1230 			deventry->devkey = meta_cmpldev(devkey);
1231 			deventry->flags |= devflags;
1232 		} else {
1233 			/* insert the entry if it's not already there */
1234 			if ((newdeventry = create_deventry(devname, devtype,
1235 			    devkey, devflags)) == NULL) {
1236 				rcm_log_message(RCM_ERROR,
1237 				    gettext("SVM: can't create hash line."));
1238 			}
1239 			if (comp > 0) {
1240 				newdeventry->next = deventry;
1241 				if (previous)
1242 					previous->next = newdeventry;
1243 				else
1244 					cache->hashline[hash_index] =
1245 					    newdeventry;
1246 			} else if (comp < 0) {
1247 				newdeventry->next = deventry->next;
1248 				deventry->next = newdeventry;
1249 			}
1250 			deventry = newdeventry;
1251 		}
1252 	}
1253 	return (deventry);
1254 }
1255 /*
1256  *	free_names()
1257  *
1258  *	Free all name list entries
1259  *
1260  *	Input:
1261  *		mdnamelist_t		*np		namelist pointer
1262  *	Return: None
1263  */
1264 
1265 static void
1266 free_names(mdnamelist_t *nlp)
1267 {
1268 	mdnamelist_t *p;
1269 
1270 	for (p = nlp; p != NULL; p = p->next) {
1271 	    meta_invalidate_name(p->namep);
1272 	    p->namep = NULL;
1273 	}
1274 	metafreenamelist(nlp);
1275 }
1276 
1277 /*
1278  * cache_hsp()
1279  *
1280  *	Add an entry to the cache for each slice in the hot spare
1281  *	pool. Call add_hsp() to add the hot spare pool to the list
1282  *	of all hot spare pools.
1283  *
1284  *	Input:
1285  *		cache_t		*cache	cache
1286  *		mdnamelist_t	*nlp	pointer to hsp name
1287  *		md_hsp_t	*hsp
1288  *	Return:
1289  *		0 if successful or error code
1290  */
1291 static int
1292 cache_hsp(cache_t *cache, mdhspnamelist_t *nlp, md_hsp_t *hsp)
1293 {
1294 	int		i;
1295 	deventry_t	*deventry;
1296 	md_hs_t		*hs;
1297 
1298 	for (i = 0; i < hsp->hotspares.hotspares_len; i++) {
1299 		hs = &hsp->hotspares.hotspares_val[i];
1300 		if ((deventry = cache_device(cache, hs->hsnamep->bname,
1301 		    SVM_SLICE, hs->hsnamep->dev,
1302 		    IN_HSP)) == NULL) {
1303 			return (ENOMEM);
1304 		}
1305 		if (add_hsp(nlp->hspnamep->hspname, deventry) == NULL) {
1306 			return (ENOMEM);
1307 		}
1308 	}
1309 	return (0);
1310 }
1311 
1312 /*
1313  * cache_trans()
1314  *
1315  *	Add an entry to the cache for trans metadevice, the master
1316  *	and the log. Call cache_dependent() to link that master and
1317  *	the log to the trans metadevice.
1318  *
1319  *	Input:
1320  *		cache_t		*cache	cache
1321  *		mdnamelist_t	*nlp	pointer to trans name
1322  *		md_trans_t	*trans
1323  *	Return:
1324  *		0 if successful or error code
1325  *
1326  */
1327 static int
1328 cache_trans(cache_t *cache, mdnamelist_t *nlp, md_trans_t *trans)
1329 {
1330 	deventry_t	*antecedent;
1331 
1332 	if ((antecedent = cache_device(cache, nlp->namep->bname, SVM_TRANS,
1333 	    nlp->namep->dev, 0)) == NULL) {
1334 		return (ENOMEM);
1335 	}
1336 
1337 	if (cache_device(cache, trans->masternamep->bname, SVM_SLICE,
1338 	    trans->masternamep->dev, 0) == NULL) {
1339 		return (ENOMEM);
1340 	}
1341 
1342 	if (cache_dependent(cache, trans->masternamep->bname, 0,
1343 	    antecedent) == NULL) {
1344 		return (ENOMEM);
1345 	}
1346 
1347 	if (trans->lognamep != NULL) {
1348 		if (cache_device(cache, trans->lognamep->bname, SVM_SLICE,
1349 		    trans->lognamep->dev, TRANS_LOG) == NULL) {
1350 			return (ENOMEM);
1351 		}
1352 
1353 		if (cache_dependent(cache, trans->lognamep->bname, 0,
1354 		    antecedent) == NULL) {
1355 			return (ENOMEM);
1356 		}
1357 	}
1358 	return (0);
1359 }
1360 
1361 /*
1362  * cache_mirror()
1363  *
1364  *	Add an entry to the cache for the mirror. For each
1365  *	submirror, call cache_dependent() to add an entry to the
1366  *	cache and to link it to mirror entry.
1367  *
1368  *	Input:
1369  *		cache_t		*cache	cache
1370  *		mdnamelist_t	*nlp	pointer to mirror name
1371  *		md_mirror_t	*mirror
1372  *	Return:
1373  *		0 if successful or error code
1374  *
1375  */
1376 static int
1377 cache_mirror(cache_t *cache, mdnamelist_t *nlp, md_mirror_t *mirror)
1378 {
1379 	int i;
1380 	deventry_t	*antecedent;
1381 
1382 	if ((antecedent = cache_device(cache, nlp->namep->bname, SVM_MIRROR,
1383 	    nlp->namep->dev, 0)) == NULL) {
1384 		return (ENOMEM);
1385 	}
1386 	for (i = 0; i <  NMIRROR; i++) {
1387 		md_submirror_t	*submirror;
1388 
1389 		submirror = &mirror->submirrors[i];
1390 		if (submirror->state == SMS_UNUSED)
1391 			continue;
1392 
1393 		if (!submirror->submirnamep)
1394 			continue;
1395 
1396 		if (cache_dependent(cache, submirror->submirnamep->bname,
1397 		    0, antecedent) == NULL) {
1398 			return (ENOMEM);
1399 		}
1400 	}
1401 	return (0);
1402 }
1403 
1404 /*
1405  * cache_raid()
1406  *
1407  *	Add an entry to the cache for the RAID metadevice. For
1408  *	each component of the RAID call cache_dependent() to add
1409  *	add it to the cache and to link it to the RAID metadevice.
1410  *
1411  *	Input:
1412  *		cache_t		*cache	cache
1413  *		mdnamelist_t	*nlp	pointer to raid name
1414  *		md_raid_t	*raid	mirror
1415  *	Return:
1416  *		0 if successful or error code
1417  */
1418 static int
1419 cache_raid(cache_t *cache, mdnamelist_t *nlp, md_raid_t *raid)
1420 {
1421 	int i;
1422 	deventry_t	*antecedent;
1423 
1424 	if ((antecedent = cache_device(cache, nlp->namep->bname, SVM_RAID,
1425 	    nlp->namep->dev, 0)) == NULL) {
1426 		return (ENOMEM);
1427 	}
1428 	if (raid->hspnamep) {
1429 		if (add_hsp_user(raid->hspnamep->hspname,
1430 		    antecedent) == NULL) {
1431 			return (ENOMEM);
1432 		}
1433 	}
1434 	for (i = 0; i < raid->cols.cols_len; i++) {
1435 		if (cache_dependent(cache,
1436 		    raid->cols.cols_val[i].colnamep->bname, 0,
1437 		    antecedent) == NULL) {
1438 			return (ENOMEM);
1439 		}
1440 	}
1441 	return (0);
1442 }
1443 
1444 /*
1445  * cache_stripe()
1446  *
1447  *	Add a CONCAT or a STRIPE entry entry to the cache for the
1448  *	metadevice and call cache_dependent() to add each
1449  *	component to the cache.
1450  *
1451  *	Input:
1452  *		cache_t		*cache	cache
1453  *		mdnamelist_t	*nlp	pointer to stripe name
1454  *		md_stripe_t	*stripe
1455  *	Return:
1456  *		0 if successful or error code
1457  *
1458  */
1459 static int
1460 cache_stripe(cache_t *cache, mdnamelist_t *nlp, md_stripe_t *stripe)
1461 {
1462 	int i;
1463 	deventry_t	*antecedent;
1464 
1465 	if ((antecedent = cache_device(cache, nlp->namep->bname, SVM_CONCAT,
1466 	    nlp->namep->dev, 0)) == NULL) {
1467 		return (ENOMEM);
1468 	}
1469 
1470 	if (stripe->hspnamep) {
1471 		if (add_hsp_user(stripe->hspnamep->hspname,
1472 		    antecedent) == NULL) {
1473 			return (ENOMEM);
1474 		}
1475 	}
1476 	for (i = 0; i < stripe->rows.rows_len; i++) {
1477 		md_row_t	*rowp;
1478 		int		j;
1479 
1480 		rowp = &stripe->rows.rows_val[i];
1481 		if (stripe->rows.rows_len == 1 && rowp->comps.comps_len > 1) {
1482 			if ((void*) cache_device(cache, nlp->namep->bname,
1483 			    SVM_STRIPE, nlp->namep->dev, 0) == NULL)
1484 				return (ENOMEM);
1485 		}
1486 		for (j = 0; j < rowp->comps.comps_len; j++) {
1487 			md_comp_t	*component;
1488 
1489 			component = &rowp->comps.comps_val[j];
1490 			if (cache_dependent(cache,
1491 			    component->compnamep->bname, 0,
1492 			    antecedent) == NULL) {
1493 				return (ENOMEM);
1494 			}
1495 		}
1496 	}
1497 	return (0);
1498 }
1499 
1500 /*
1501  * cache_sp()
1502  *
1503  *	Add an entry to the cache for the softpart and also call
1504  *	cache_dependent() to set the CONT_SOFTPART flag in the
1505  *	cache entry for the metadevice that contains the softpart.
1506  *
1507  *	Input:
1508  *		cache_t		*cache	cache
1509  *		mdnamelist_t	*nlp	pointer to soft part name
1510  *		md_sp_t		*soft_part
1511  *	Return:
1512  *		0 if successful or error code
1513  *
1514  */
1515 static int
1516 cache_sp(cache_t *cache, mdnamelist_t *nlp, md_sp_t *soft_part)
1517 {
1518 	deventry_t	*antecedent;
1519 
1520 	if ((antecedent = cache_device(cache, nlp->namep->bname,
1521 	    SVM_SOFTPART, nlp->namep->dev, 0)) == NULL) {
1522 			    return (ENOMEM);
1523 	}
1524 	if (cache_dependent(cache, soft_part->compnamep->bname,
1525 	    CONT_SOFTPART, antecedent) == NULL) {
1526 		return (ENOMEM);
1527 	}
1528 	return (0);
1529 }
1530 
1531 /*
1532  *      cache_all_devices_in_set()
1533  *
1534  *      Add all of the metadevices and mddb replicas in the set to the
1535  *	cache
1536  *
1537  *      Input:
1538  *		cache_t		*cache		cache
1539  *		mdsetname_t	*sp		setname
1540  *      Return:
1541  *		0 if successful or error code
1542  */
1543 
1544 static int
1545 cache_all_devices_in_set(cache_t *cache, mdsetname_t *sp)
1546 {
1547 	md_error_t		error = mdnullerror;
1548 	md_replicalist_t	*replica_list = NULL;
1549 	md_replicalist_t	*mdbp;
1550 	mdnamelist_t		*nlp;
1551 	mdnamelist_t		*trans_list = NULL;
1552 	mdnamelist_t		*mirror_list = NULL;
1553 	mdnamelist_t		*raid_list = NULL;
1554 	mdnamelist_t		*stripe_list = NULL;
1555 	mdnamelist_t		*sp_list = NULL;
1556 	mdhspnamelist_t		*hsp_list = NULL;
1557 
1558 	rcm_log_message(RCM_TRACE1, "SVM: cache_all_devices_in_set\n");
1559 
1560 	/* Add each mddb replica to the cache */
1561 	if (metareplicalist(sp, MD_BASICNAME_OK, &replica_list, &error) < 0) {
1562 	    /* there are no metadb's; that is ok, no need to check the rest */
1563 	    mdclrerror(&error);
1564 	    return (0);
1565 	}
1566 
1567 	for (mdbp = replica_list; mdbp != NULL; mdbp = mdbp->rl_next) {
1568 		if (cache_device(cache, mdbp->rl_repp->r_namep->bname,
1569 		    SVM_SLICE, mdbp->rl_repp->r_namep->dev,
1570 		    CONT_METADB) == NULL) {
1571 			metafreereplicalist(replica_list);
1572 			return (ENOMEM);
1573 		}
1574 	}
1575 	metafreereplicalist(replica_list);
1576 
1577 	/* Process Hot Spare pools */
1578 	if (meta_get_hsp_names(sp, &hsp_list, 0, &error) >= 0) {
1579 	    mdhspnamelist_t *nlp;
1580 
1581 		for (nlp = hsp_list; nlp != NULL; nlp = nlp->next) {
1582 			md_hsp_t	*hsp;
1583 
1584 			hsp = meta_get_hsp(sp, nlp->hspnamep, &error);
1585 			if (hsp != NULL) {
1586 				if (cache_hsp(cache, nlp, hsp) != 0) {
1587 					metafreehspnamelist(hsp_list);
1588 					return (ENOMEM);
1589 				}
1590 			}
1591 			meta_invalidate_hsp(nlp->hspnamep);
1592 		}
1593 		metafreehspnamelist(hsp_list);
1594 	}
1595 
1596 	/* Process Trans devices */
1597 	if (meta_get_trans_names(sp, &trans_list, 0, &error) >= 0) {
1598 		for (nlp = trans_list; nlp != NULL; nlp = nlp->next) {
1599 			mdname_t	*mdn;
1600 			md_trans_t	*trans;
1601 
1602 			mdn = metaname(&sp, nlp->namep->cname, &error);
1603 			if (mdn == NULL) {
1604 				continue;
1605 			}
1606 
1607 			trans = meta_get_trans(sp, mdn, &error);
1608 
1609 			if (trans != NULL && trans->masternamep != NULL) {
1610 				if (cache_trans(cache, nlp, trans) != NULL) {
1611 					free_names(trans_list);
1612 					return (ENOMEM);
1613 				}
1614 			}
1615 		}
1616 		free_names(trans_list);
1617 	}
1618 
1619 	/* Process Mirrors */
1620 	if (meta_get_mirror_names(sp, &mirror_list, 0, &error) >= 0) {
1621 		for (nlp = mirror_list; nlp != NULL; nlp = nlp->next) {
1622 			mdname_t	*mdn;
1623 			md_mirror_t	*mirror;
1624 
1625 			mdn = metaname(&sp, nlp->namep->cname, &error);
1626 			if (mdn == NULL) {
1627 				continue;
1628 			}
1629 
1630 			mirror = meta_get_mirror(sp, mdn, &error);
1631 
1632 			if (mirror != NULL) {
1633 				if (cache_mirror(cache, nlp, mirror) != 0) {
1634 					free_names(mirror_list);
1635 					return (ENOMEM);
1636 				}
1637 			}
1638 		}
1639 		free_names(mirror_list);
1640 	}
1641 
1642 	/* Process Raid devices */
1643 	if (meta_get_raid_names(sp, &raid_list, 0, &error) >= 0) {
1644 		for (nlp = raid_list; nlp != NULL; nlp = nlp->next) {
1645 			mdname_t	*mdn;
1646 			md_raid_t	*raid;
1647 
1648 			mdn = metaname(&sp, nlp->namep->cname, &error);
1649 			if (mdn == NULL) {
1650 				continue;
1651 			}
1652 
1653 			raid = meta_get_raid(sp, mdn, &error);
1654 
1655 			if (raid != NULL) {
1656 				if (cache_raid(cache, nlp, raid) != 0) {
1657 					free_names(raid_list);
1658 					return (ENOMEM);
1659 				}
1660 			}
1661 		}
1662 		free_names(raid_list);
1663 	}
1664 
1665 	/* Process Slices */
1666 	if (meta_get_stripe_names(sp, &stripe_list, 0, &error) >= 0) {
1667 		for (nlp = stripe_list; nlp != NULL; nlp = nlp->next) {
1668 			mdname_t	*mdn;
1669 			md_stripe_t	*stripe;
1670 
1671 			mdn = metaname(&sp, nlp->namep->cname, &error);
1672 			if (mdn == NULL) {
1673 				continue;
1674 			}
1675 
1676 			stripe = meta_get_stripe(sp, mdn, &error);
1677 
1678 			if (stripe != NULL) {
1679 				if (cache_stripe(cache, nlp, stripe) != 0) {
1680 					free_names(stripe_list);
1681 					return (ENOMEM);
1682 				}
1683 			}
1684 		}
1685 		free_names(stripe_list);
1686 	}
1687 
1688 	/* Process Soft partitions */
1689 	if (meta_get_sp_names(sp, &sp_list, 0, &error) >= 0) {
1690 		for (nlp = sp_list; nlp != NULL; nlp = nlp->next) {
1691 			mdname_t	*mdn;
1692 			md_sp_t		*soft_part;
1693 
1694 			mdn = metaname(&sp, nlp->namep->cname, &error);
1695 			if (mdn == NULL) {
1696 				continue;
1697 			}
1698 
1699 			soft_part = meta_get_sp(sp, mdn, &error);
1700 
1701 			if (soft_part != NULL) {
1702 				if (cache_sp(cache, nlp, soft_part) != 0) {
1703 					free_names(sp_list);
1704 					return (ENOMEM);
1705 				}
1706 			}
1707 		}
1708 		free_names(sp_list);
1709 	}
1710 	mdclrerror(&error);
1711 	return (0);
1712 }
1713 
1714 /*
1715  *      create_all_devices()
1716  *
1717  *      Cache all devices in all sets
1718  *
1719  *      Input:
1720  *		cache_t		cache
1721  *      Return:
1722  *		0 if successful, error code if not
1723  *      Locking: None
1724  */
1725 static int
1726 cache_all_devices(cache_t *cache)
1727 {
1728 	int		max_sets;
1729 	md_error_t	error = mdnullerror;
1730 	int		i;
1731 
1732 	if ((max_sets = get_max_sets(&error)) == 0) {
1733 		return (0);
1734 	}
1735 	if (!mdisok(&error)) {
1736 		mdclrerror(&error);
1737 		return (0);
1738 	}
1739 
1740 	rcm_log_message(RCM_TRACE1,
1741 	    "SVM: cache_all_devices,max sets = %d\n", max_sets);
1742 	/* for each possible set number, see if we really have a diskset */
1743 	for (i = 0; i < max_sets; i++) {
1744 		mdsetname_t	*sp;
1745 
1746 		if ((sp = metasetnosetname(i, &error)) == NULL) {
1747 			rcm_log_message(RCM_TRACE1,
1748 			    "SVM: cache_all_devices no set: setno %d\n", i);
1749 			if (!mdisok(&error) &&
1750 			    ((error.info.errclass == MDEC_RPC) ||
1751 			    (mdiserror(&error, MDE_SMF_NO_SERVICE)))) {
1752 				/*
1753 				 * metad rpc program not available
1754 				 * - no metasets.  metad rpc not available
1755 				 * is indicated either by an RPC error or
1756 				 * the fact that the service is not
1757 				 * enabled.
1758 				 */
1759 				break;
1760 			}
1761 
1762 			continue;
1763 		}
1764 
1765 		if (cache_all_devices_in_set(cache, sp)) {
1766 			metaflushsetname(sp);
1767 			return (ENOMEM);
1768 		}
1769 		metaflushsetname(sp);
1770 	}
1771 	mdclrerror(&error);
1772 	rcm_log_message(RCM_TRACE1, "SVM: exit cache_all_devices\n");
1773 	return (0);
1774 }
1775 
1776 /*
1777  *      create_cache()
1778  *
1779  *      Create an empty cache
1780  *	If the function fails free_cache() will be called to free any
1781  *	allocated memory.
1782  *
1783  *      Input: None
1784  *      Return:
1785  *		cache_t		cache created
1786  *      Locking: None
1787  */
1788 static cache_t *
1789 create_cache()
1790 {
1791 	cache_t		*cache;
1792 	uint32_t	size;
1793 	int		ret;
1794 
1795 	size = HASH_DEFAULT;
1796 	/* try allocating storage for a new, empty cache */
1797 	if ((cache = (cache_t *)malloc(sizeof (cache_t))) == NULL) {
1798 		rcm_log_message(RCM_ERROR, MSG_CACHEFAIL);
1799 		return (NULL);
1800 	}
1801 
1802 	(void) memset((char *)cache, 0, sizeof (*cache));
1803 	cache->hashline = (deventry_t **)calloc(size, sizeof (deventry_t *));
1804 	if (cache->hashline == NULL) {
1805 		rcm_log_message(RCM_ERROR, MSG_CACHEFAIL);
1806 		free(cache);
1807 		return (NULL);
1808 	}
1809 	cache->size = size;
1810 
1811 	/* Initialise linked list of hsp entries */
1812 	hsp_head = NULL;
1813 
1814 	/* add entries to cache */
1815 	ret = cache_all_devices(cache);
1816 	if (ret != 0) {
1817 		free_cache(&cache);
1818 		return (NULL);
1819 	}
1820 
1821 	/* Mark the cache as new */
1822 	cache->registered = 0;
1823 
1824 	/* Finished - return the new cache */
1825 	return (cache);
1826 }
1827 
1828 /*
1829  *      create_deventry()
1830  *
1831  *      Create a new deventry entry for device with name devname
1832  *	The memory alllocated here will be freed by free_cache()
1833  *
1834  *      Input:
1835  *		char		*devname	device name
1836  *		svm_type_t	devtype		metadevice type
1837  *		md_dev64_t	devkey		device key
1838  *		int		devflags	device flags
1839  *      Return:
1840  *		deventry_t	New deventry
1841  *      Locking: None
1842  */
1843 static deventry_t *
1844 create_deventry(char *devname, svm_type_t devtype, md_dev64_t devkey,
1845     int devflags)
1846 {
1847 	deventry_t	*newdeventry;
1848 	char		*newdevname;
1849 
1850 	newdeventry = (deventry_t *)malloc(sizeof (*newdeventry));
1851 	if (newdeventry == NULL) {
1852 		rcm_log_message(RCM_ERROR,
1853 		    gettext("SVM: can't malloc deventrys"));
1854 		return (NULL);
1855 	}
1856 	(void) memset((char *)newdeventry, 0, sizeof (*newdeventry));
1857 
1858 	newdevname = strdup(devname);
1859 	if (newdevname == NULL) {
1860 		rcm_log_message(RCM_ERROR,
1861 		    gettext("SVM: can't malloc devname"));
1862 		free(newdeventry);
1863 		return (NULL);
1864 	}
1865 	newdeventry->devname = newdevname;
1866 	newdeventry->devtype = devtype;
1867 	newdeventry->devkey = meta_cmpldev(devkey);
1868 	newdeventry->flags = devflags;
1869 	rcm_log_message(RCM_TRACE1,
1870 	    "SVM created deventry for %s\n", newdeventry->devname);
1871 	return (newdeventry);
1872 }
1873 
1874 /*
1875  *      cache_remove()
1876  *
1877  *      Given a cache and a deventry, the deventry is
1878  *      removed from the cache's tables and memory for the deventry is
1879  *      free'ed.
1880  *
1881  *      Input:
1882  *		cache_t		*cache		cache
1883  *		deventry_t	*deventry	deventry to be removed
1884  *      Return: None
1885  *      Locking: The cache must be locked by the caller prior to calling
1886  *      this routine.
1887  */
1888 static void
1889 cache_remove(cache_t *cache, deventry_t *deventry)
1890 {
1891 	deventry_t	*olddeventry;
1892 	deventry_t	*previous;
1893 	hspentry_t	*hspentry;
1894 	hspentry_t	*oldhspentry;
1895 	hspuser_t	*hspuser;
1896 	hspuser_t	*oldhspuser;
1897 	uint32_t	hash_index;
1898 
1899 	/* sanity check */
1900 	if (cache == NULL || deventry == NULL || deventry->devname == NULL)
1901 		return;
1902 
1903 
1904 	/* If this is in the hash table, remove it from there */
1905 	hash_index = hash(cache->size, deventry->devname);
1906 	if (hash_index >= cache->size) {
1907 		rcm_log_message(RCM_ERROR,
1908 		    gettext("SVM: can't hash device."));
1909 		return;
1910 	}
1911 	olddeventry = cache->hashline[hash_index];
1912 	previous = NULL;
1913 	while (olddeventry) {
1914 		if (olddeventry->devname &&
1915 		    strcmp(olddeventry->devname, deventry->devname) == 0) {
1916 			break;
1917 		}
1918 		previous = olddeventry;
1919 		olddeventry = olddeventry->next;
1920 	}
1921 	if (olddeventry) {
1922 		if (previous)
1923 			previous->next = olddeventry->next;
1924 		else
1925 			cache->hashline[hash_index] = olddeventry->next;
1926 
1927 		if (olddeventry->flags&IN_HSP) {
1928 			/*
1929 			 * If this is in a hot spare pool, remove the list
1930 			 * of hot spare pools that it is in along with
1931 			 * all of the volumes that are users of the pool
1932 			 */
1933 			hspentry = olddeventry->hsp_list;
1934 			while (hspentry) {
1935 				oldhspentry = hspentry;
1936 				hspuser = hspentry->hspuser;
1937 				while (hspuser) {
1938 					oldhspuser = hspuser;
1939 					free(hspuser->hspusername);
1940 					hspuser = hspuser->next;
1941 					free(oldhspuser);
1942 				}
1943 				free(hspentry->hspname);
1944 				hspentry = hspentry->next;
1945 				free(oldhspentry);
1946 			}
1947 		}
1948 		free(olddeventry->devname);
1949 		free(olddeventry);
1950 	}
1951 
1952 }
1953 
1954 /*
1955  *      cache_lookup()
1956  *
1957  *      Return the deventry corresponding to devname from the cache
1958  *      Input:
1959  *		cache_t		cache		cache
1960  *		char		*devname	name to lookup in cache
1961  *      Return:
1962  *		deventry_t	deventry of name, NULL if not found
1963  *      Locking: cache lock held on entry and on exit
1964  */
1965 static deventry_t *
1966 cache_lookup(cache_t *cache, char *devname)
1967 {
1968 	int		comp;
1969 	uint32_t	hash_index;
1970 	deventry_t	*deventry;
1971 
1972 	hash_index = hash(cache->size, devname);
1973 	if (hash_index >= cache->size) {
1974 		rcm_log_message(RCM_ERROR,
1975 		    gettext("SVM: can't hash resource."));
1976 		return (NULL);
1977 	}
1978 
1979 	deventry = cache->hashline[hash_index];
1980 	while (deventry) {
1981 		comp = strcmp(deventry->devname, devname);
1982 		if (comp == 0)
1983 			return (deventry);
1984 		if (comp > 0)
1985 			return (NULL);
1986 		deventry = deventry->next;
1987 	}
1988 	return (NULL);
1989 }
1990 
1991 /*
1992  *      cache_sync()
1993  *
1994  *      Resync cache with the svm database
1995  *
1996  *      Input:
1997  *		rcm_handle_t	*hd		rcm handle
1998  *		cache_t		**cachep	pointer to cache
1999  *      Return:
2000  *		cache_t		**cachep	pointer to new cache
2001  *      Return: None
2002  *      Locking: The cache must be locked prior to entry
2003  */
2004 static void
2005 cache_sync(rcm_handle_t *hd, cache_t **cachep)
2006 {
2007 	char		*devicename;
2008 	deventry_t	*deventry;
2009 	cache_t		*new_cache;
2010 	cache_t		*old_cache = *cachep;
2011 	deventry_t	*hashline = NULL;
2012 	uint32_t	i = 0;
2013 
2014 	/* Get a new cache */
2015 	if ((new_cache = create_cache()) == NULL) {
2016 		rcm_log_message(RCM_WARNING,
2017 		    gettext("SVM: WARNING: couldn't re-cache."));
2018 		return;
2019 	}
2020 
2021 	/* For every entry in the new cache... */
2022 	while ((devicename = cache_walk(new_cache, &i, &hashline)) != NULL) {
2023 		/* Look for this entry in the old cache */
2024 		deventry = cache_lookup(old_cache, devicename);
2025 		/*
2026 		 * If no entry in old cache, register the resource. If there
2027 		 * is an entry, but it is marked as removed, register it
2028 		 * again and remove it from the old cache
2029 		 */
2030 		if (deventry == NULL) {
2031 			register_device(hd, hashline->devname);
2032 		} else {
2033 			if (deventry->flags&REMOVED)
2034 				register_device(hd, hashline->devname);
2035 			cache_remove(old_cache, deventry);
2036 		}
2037 	}
2038 
2039 	/*
2040 	 * For every device left in the old cache, just unregister if
2041 	 * it has not already been removed
2042 	 */
2043 	i = 0;
2044 	hashline = NULL;
2045 	while ((devicename = cache_walk(old_cache, &i, &hashline)) != NULL) {
2046 		if (!(hashline->flags&REMOVED)) {
2047 			(void) rcm_unregister_interest(hd, devicename, 0);
2048 		}
2049 	}
2050 
2051 	/* Swap pointers */
2052 	*cachep = new_cache;
2053 
2054 	/* Destroy old cache */
2055 	free_cache(&old_cache);
2056 
2057 	/* Mark the new cache as registered */
2058 	new_cache-> registered = 1;
2059 }
2060 
2061 /*
2062  * cache_walk()
2063  *
2064  *      Perform one step of a walk through the cache.  The i and hashline
2065  *      parameters are updated to store progress of the walk for future steps.
2066  *      They must all be initialized for the beginning of the walk
2067  *      (i = 0, line = NULL). Initialize variables to these values for these
2068  *      parameters, and then pass in the address of each of the variables
2069  *      along with the cache.  A NULL return value will be given to indicate
2070  *      when there are no more cached items to be returned.
2071  *
2072  *      Input:
2073  *		cache_t		*cache		cache
2074  *		uint32_t	*i		hash table index of prev entry
2075  *		deventry_t	**line		ptr to previous device entry
2076  *      Output:
2077  *		uint32_t	*i		updated hash table index
2078  *		deventry_t	**line		ptr to device entry
2079  *      Return:
2080  *		char*		device name (NULL for end of cache)
2081  *      Locking: The cache must be locked prior to calling this routine.
2082  */
2083 static char *
2084 cache_walk(cache_t *cache, uint32_t *i, deventry_t **line)
2085 {
2086 	uint32_t	j;
2087 
2088 	/* sanity check */
2089 	if (cache == NULL || i == NULL || line == NULL ||
2090 	    *i >= cache->size)
2091 		return (NULL);
2092 
2093 	/* if initial values were given, look for the first entry */
2094 	if (*i == 0 && *line == NULL) {
2095 		for (j = 0; j < cache->size; j++) {
2096 			if (cache->hashline[j]) {
2097 				*i = j;
2098 				*line = cache->hashline[j];
2099 				return ((*line)->devname);
2100 			}
2101 		}
2102 	} else {
2103 		/* otherwise, look for the next entry for this hash value */
2104 		if (*line && (*line)->next) {
2105 			*line = (*line)->next;
2106 			return ((*line)->devname);
2107 		} else {
2108 		/* next look further down in the hash table */
2109 			for (j = (*i) + 1; j < cache->size; j++) {
2110 				if (cache->hashline[j]) {
2111 					*i = j;
2112 					*line = cache->hashline[j];
2113 					return ((*line)->devname);
2114 				}
2115 			}
2116 		}
2117 	}
2118 
2119 	/*
2120 	 * We would have returned somewhere above if there were any more
2121 	 * entries.  So set the sentinel values and return a NULL.
2122 	 */
2123 	*i = cache->size;
2124 	*line = NULL;
2125 	return (NULL);
2126 }
2127 
2128 /*
2129  *      free_cache()
2130  *
2131  *      Given a pointer to a cache structure, this routine will free all
2132  *      of the memory allocated within the cache.
2133  *
2134  *      Input:
2135  *		cache_t		**cache		ptr to cache
2136  *      Return: None
2137  *      Locking: cache lock held on entry
2138  */
2139 static void
2140 free_cache(cache_t **cache)
2141 {
2142 	uint32_t	index;
2143 	cache_t		*realcache;
2144 
2145 	/* sanity check */
2146 	if (cache == NULL || *cache == NULL)
2147 		return;
2148 
2149 	/* de-reference the cache pointer */
2150 	realcache = *cache;
2151 
2152 	/* free the hash table */
2153 	for (index = 0; index < realcache->size; index++) {
2154 		free_deventry(&realcache->hashline[index]);
2155 	}
2156 	free(realcache->hashline);
2157 	realcache->hashline = NULL;
2158 
2159 	free(realcache);
2160 	*cache = NULL;
2161 }
2162 
2163 /*
2164  *      free_deventry()
2165  *
2166  *      This routine frees all of the memory allocated within a node of a
2167  *      deventry.
2168  *
2169  *      Input:
2170  *		deventry_t	**deventry	ptr to deventry
2171  *      Return: None
2172  *      Locking: cache lock held on entry
2173  */
2174 static void
2175 free_deventry(deventry_t **deventry)
2176 {
2177 	deventry_t	*olddeventry;
2178 	hspentry_t	*hspentry;
2179 	hspentry_t	*oldhspentry;
2180 	hspuser_t	*hspuser;
2181 	hspuser_t	*oldhspuser;
2182 
2183 	if (deventry != NULL) {
2184 		while (*deventry != NULL) {
2185 			olddeventry = (*deventry)->next;
2186 			if ((*deventry)->flags&IN_HSP) {
2187 				/*
2188 				 * If this is in a hot spare pool, remove the
2189 				 * memory allocated to hot spare pools and
2190 				 * the users of the pool
2191 				 */
2192 				hspentry = (*deventry)->hsp_list;
2193 				while (hspentry) {
2194 					oldhspentry = hspentry;
2195 					hspuser = hspentry->hspuser;
2196 					while (hspuser) {
2197 						oldhspuser = hspuser;
2198 						free(hspuser->hspusername);
2199 						hspuser = hspuser->next;
2200 						free(oldhspuser);
2201 					}
2202 					free(hspentry->hspname);
2203 					hspentry = hspentry->next;
2204 					free(oldhspentry);
2205 				}
2206 			}
2207 			free((*deventry)->devname);
2208 			free (*deventry);
2209 			*deventry = olddeventry;
2210 		}
2211 	}
2212 }
2213 
2214 /*
2215  *      hash()
2216  *
2217  *	A rotating hashing function that converts a string 's' to an index
2218  *      in a hash table of size 'h'.
2219  *
2220  *      Input:
2221  *		uint32_t	h		hash table size
2222  *		char		*s		string to be hashed
2223  *      Return:
2224  *		uint32_t	hash value
2225  *      Locking: None
2226  */
2227 static uint32_t
2228 hash(uint32_t h, char *s)
2229 {
2230 
2231 	int	len;
2232 	int	hash, i;
2233 
2234 	len = strlen(s);
2235 
2236 	for (hash = len, i = 0; i < len; ++i) {
2237 		hash = (hash<<4)^(hash>>28)^s[i];
2238 	}
2239 	return (hash % h);
2240 }
2241 
2242 /*
2243  *      register_device()
2244  *
2245  *      Register a device
2246  *
2247  *      Input:
2248  *		rcm_handle_t	*hd		rcm handle
2249  *		char		*devname	device name
2250  *      Return: None
2251  *      Locking: None
2252  */
2253 static void
2254 register_device(rcm_handle_t *hd, char *devname)
2255 {
2256 	/* Sanity check */
2257 	if (devname == NULL)
2258 		return;
2259 
2260 	rcm_log_message(RCM_TRACE1, "SVM: Registering %s(%d)\n", devname,
2261 		devname);
2262 
2263 	if (rcm_register_interest(hd, devname, 0, NULL) != RCM_SUCCESS) {
2264 		rcm_log_message(RCM_ERROR,
2265 		    gettext("SVM: failed to register \"%s\"\n"), devname);
2266 	}
2267 }
2268 
2269 /*
2270  *      add_dep()
2271  *
2272  *      Add an entry to an array of dependent names for a device. Used to
2273  *      build an array to call the rcm framework with when passing on a
2274  *      DR request.
2275  *
2276  *      Input:
2277  *		int		*ndeps		ptr to current number of deps
2278  *		char		***depsp	ptr to current dependent array
2279  *		deventry_t	*deventry	deventry of device to be added
2280  *      Output:
2281  *		int		*ndeps		ptr to updated no of deps
2282  *		char		***depsp	ptr to new dependant array
2283  *      Return:
2284  *		int		0, of ok, -1 if failed to allocate memory
2285  *      Locking: None
2286  */
2287 static int
2288 add_dep(int *ndeps, char ***depsp, deventry_t *deventry)
2289 {
2290 	char	**deps_new;
2291 
2292 	*ndeps += 1;
2293 	deps_new = realloc(*depsp, ((*ndeps) + 1) * sizeof (char  *));
2294 	if (deps_new == NULL) {
2295 		rcm_log_message(RCM_ERROR,
2296 		    gettext("SVM: cannot allocate dependent array (%s).\n"),
2297 		    strerror(errno));
2298 		return (-1);
2299 	}
2300 	deps_new[(*ndeps-1)] = deventry->devname;
2301 	deps_new[(*ndeps)] = NULL;
2302 	*depsp = deps_new;
2303 	return (0);
2304 }
2305 
2306 
2307 /*
2308  *      get_dependent()
2309  *
2310  *      Create a list of all dependents of a device
2311  *      Do not add dependent if it is marked as removed
2312  *
2313  *      Input:
2314  *		deventry_t	*deventry	device entry
2315  *      Output:
2316  *		char		***dependentsp	pty to dependent list
2317  *      Return:
2318  *		int		0, if ok, -1 if failed
2319  *      Locking: None
2320  */
2321 static int
2322 get_dependents(deventry_t *deventry, char *** dependentsp)
2323 {
2324 	int		ndeps = 0;
2325 	deventry_t	*dependent;
2326 	char		**deps = NULL;
2327 
2328 
2329 	dependent = deventry->dependent;
2330 	if (dependent == NULL) {
2331 		*dependentsp = NULL;
2332 		return (0);
2333 	}
2334 	while (dependent != NULL) {
2335 		/*
2336 		 * do not add dependent if we have
2337 		 * already received a remove notifification
2338 		 */
2339 		if (!(dependent->flags&REMOVED))
2340 			if (add_dep(&ndeps, &deps, dependent) < 0)
2341 				return (-1);
2342 		dependent = dependent->next_dep;
2343 	}
2344 	if (ndeps == 0) {
2345 		*dependentsp = NULL;
2346 	} else {
2347 		*dependentsp = deps;
2348 	}
2349 	return (0);
2350 }
2351 
2352 /*
2353  *      add_to_usage()
2354  *      Add string to the usage string pointed at by usagep. Allocate memory
2355  *      for the new usage string and free the memory used by the original
2356  *      usage string
2357  *
2358  *      Input:
2359  *		char	**usagep	ptr to usage string
2360  *		char	*string		string to be added to usage
2361  *      Return:
2362  *		char	ptr to new usage string
2363  *      Locking: None
2364  */
2365 char *
2366 add_to_usage(char ** usagep, char *string)
2367 {
2368 	int	len;
2369 	char	*new_usage = NULL;
2370 
2371 	if (*usagep == NULL) {
2372 		len = 0;
2373 	} else {
2374 		len = strlen(*usagep) + 2; /* allow space for comma */
2375 	}
2376 	len += strlen(string) + 1;
2377 	if (new_usage = calloc(1, len)) {
2378 		if (*usagep) {
2379 			(void) strcpy(new_usage, *usagep);
2380 			free(*usagep);
2381 			(void) strcat(new_usage, ", ");
2382 		}
2383 		(void) strcat(new_usage, string);
2384 	}
2385 	return (new_usage);
2386 }
2387 
2388 /*
2389  *      add_to_usage_fmt()
2390  *
2391  *      Add a formatted string , of the form "blah %s" to the usage string
2392  *      pointed at by usagep. Allocate memory for the new usage string and free
2393  *      the memory used by the original usage string.
2394  *
2395  *      Input:
2396  *		char		**usagep	ptr to current usage string
2397  *		char		*fmt		format string
2398  *		char		*string		string to be added
2399  *      Return:
2400  *		char*		new usage string
2401  *      Locking: None
2402  */
2403 /*PRINTFLIKE2*/
2404 char *
2405 add_to_usage_fmt(char **usagep, char *fmt, char *string)
2406 {
2407 	int	len;
2408 	char	*usage;
2409 	char	*new_usage = NULL;
2410 
2411 	len = strlen(fmt)
2412 	    + strlen(string) + 1;
2413 	if (usage = calloc(1, len)) {
2414 		(void) sprintf(usage, fmt, string);
2415 		new_usage = add_to_usage(usagep, usage);
2416 		free(usage);
2417 	}
2418 	return (new_usage);
2419 }
2420 
2421 /*
2422  *      is_open()
2423  *
2424  *      Make ioctl call to find if a device is open
2425  *
2426  *      Input:
2427  *		dev_t 		devkey	dev_t for device
2428  *      Return:
2429  *		int		0 if not open,  !=0 if open
2430  *      Locking: None
2431  */
2432 static int
2433 is_open(dev_t devkey)
2434 {
2435 	int		fd;
2436 	md_isopen_t	isopen_ioc;
2437 
2438 	/* Open admin device */
2439 	if ((fd = open(ADMSPECIAL, O_RDONLY, 0)) < 0) {
2440 		rcm_log_message(RCM_ERROR, MSG_OPENERR, ADMSPECIAL);
2441 		return (0);
2442 	}
2443 
2444 	(void) memset(&isopen_ioc, 0, sizeof (isopen_ioc));
2445 	isopen_ioc.dev = devkey;
2446 	if (ioctl(fd, MD_IOCISOPEN, &isopen_ioc) < 0) {
2447 		(void) close(fd);
2448 		return (0);
2449 	}
2450 	(void) close(fd);
2451 	return (isopen_ioc.isopen);
2452 }
2453 
2454 /*
2455  *	check_softpart()
2456  *
2457  *	Check the status of the passed in device within the softpartition.
2458  *
2459  *	Input:
2460  *		mdsetname_t *	the name of the set
2461  *		mdname_t *	the softpartition device that is being examined
2462  *		char *		the device which needs to be checked
2463  *		md_error_t *	error pointer (not used)
2464  *	Return:
2465  *		int		REDUNDANT    - device is redundant and can be
2466  *					       removed
2467  *				NOTREDUNDANT - device cannot be removed
2468  *				NOTINDEVICE  - device is not part of this
2469  *					       component
2470  */
2471 static int
2472 check_softpart(mdsetname_t *sp, mdname_t *np, char *uname, md_error_t *ep)
2473 {
2474 	md_sp_t	*softp = NULL;
2475 
2476 	rcm_log_message(RCM_TRACE1, "SVM: softpart checking %s %s\n",
2477 	    np->bname, uname);
2478 
2479 	softp = meta_get_sp(sp, np, ep);
2480 
2481 	/* softp cannot be NULL, if it is then the RCM cache is corrupt */
2482 	assert(softp != NULL);
2483 
2484 	/*
2485 	 * if the softpartition is not a parent then nothing can be done, user
2486 	 * must close the device and then fix the under lying devices.
2487 	 */
2488 	if (!(MD_HAS_PARENT(softp->common.parent))) {
2489 		rcm_log_message(RCM_TRACE1,
2490 		    "SVM: softpart is a top level device\n");
2491 		return (NOTREDUNDANT);
2492 	}
2493 
2494 	if (strcmp(softp->compnamep->bname, uname) != 0) {
2495 		/*
2496 		 * This can occur if this function has been called by the
2497 		 * check_raid5 code as it is cycling through each column
2498 		 * in turn.
2499 		 */
2500 		rcm_log_message(RCM_TRACE1,
2501 		    "SVM: %s is not in softpart (%s)\n",
2502 		    uname, softp->compnamep->bname);
2503 		return (NOTINDEVICE);
2504 	}
2505 
2506 	/*
2507 	 * Check the status of the soft partition this only moves from
2508 	 * an okay state if the underlying devices fails while the soft
2509 	 * partition is open.
2510 	 */
2511 	if (softp->status != MD_SP_OK) {
2512 		rcm_log_message(RCM_TRACE1,
2513 		    "SVM: softpart is broken (state: 0x%x)\n",
2514 		    softp->status);
2515 		return (REDUNDANT);
2516 	}
2517 
2518 	return (NOTREDUNDANT);
2519 }
2520 
2521 /*
2522  *	check_raid5()
2523  *
2524  *	Check the status of the passed in device within the raid5 in question.
2525  *
2526  *	Input:
2527  *		mdsetname_t *	the name of the set
2528  *		mdname_t *	the raid5 device that is being examined
2529  *		char *		the device which needs to be checked
2530  *		md_error_t *	error pointer (not used)
2531  *	Return:
2532  *		int		REDUNDANT    - device is redundant and can be
2533  *					       removed
2534  *				NOTREDUNDANT - device cannot be removed
2535  */
2536 static int
2537 check_raid5(mdsetname_t *sp, mdname_t *np, char *uname, md_error_t *ep)
2538 {
2539 	md_raid_t	*raidp = NULL;
2540 	md_raidcol_t	*colp = NULL;
2541 	int		i;
2542 	int		rval = 0;
2543 
2544 	rcm_log_message(RCM_TRACE1, "SVM: raid5 checking %s %s\n",
2545 	    np->bname, uname);
2546 
2547 	raidp = meta_get_raid(sp, np, ep);
2548 
2549 	/* raidp cannot be NULL, if it is then the RCM cache is corrupt */
2550 	assert(raidp != NULL);
2551 
2552 	/*
2553 	 * Now check each column in the device. We cannot rely upon the state
2554 	 * of the device because if a hotspare is in use all the states are
2555 	 * set to Okay, both at the metadevice layer and the column layer.
2556 	 */
2557 	for (i = 0; (i < raidp->cols.cols_len); i++) {
2558 		colp = &raidp->cols.cols_val[i];
2559 		np = colp->colnamep;
2560 
2561 		rcm_log_message(RCM_TRACE1,
2562 		    "SVM: raid5 checking %s state %s 0x%x\n",
2563 		    np->bname, raid_col_state_to_name(colp, NULL, 0),
2564 		    colp->state);
2565 
2566 		/*
2567 		 * It is possible for the column to be a softpartition,
2568 		 * so need to check the softpartiton if this is the
2569 		 * case. It is *not* valid for the column to be a
2570 		 * stripe/concat/mirror, and so no check to see what
2571 		 * type of metadevice is being used.
2572 		 */
2573 		if (metaismeta(np)) {
2574 			/* this is a metadevice ie a softpartiton */
2575 			rval = check_softpart(sp, np, uname, ep);
2576 			if (rval == REDUNDANT) {
2577 				rcm_log_message(RCM_TRACE1,
2578 				    "SVM: raid5 %s is broken\n", uname);
2579 				meta_invalidate_name(np);
2580 				return (REDUNDANT);
2581 			} else if (rval == NOTREDUNDANT &&
2582 			    colp->hsnamep != NULL) {
2583 				rcm_log_message(RCM_TRACE1,
2584 				    "SVM: raid5 device is broken, hotspared\n");
2585 				meta_invalidate_name(np);
2586 				return (REDUNDANT);
2587 			}
2588 			meta_invalidate_name(np);
2589 			continue;
2590 		}
2591 		meta_invalidate_name(np);
2592 
2593 		if (strcmp(uname, np->bname) != 0)
2594 			continue;
2595 
2596 		/*
2597 		 * Found the device. Check if it is broken or hotspared.
2598 		 */
2599 		if (colp->state & RUS_ERRED) {
2600 			rcm_log_message(RCM_TRACE1,
2601 			    "SVM: raid5 column device is broken\n");
2602 			return (REDUNDANT);
2603 		}
2604 
2605 		if (colp->hsnamep != NULL) {
2606 			rcm_log_message(RCM_TRACE1,
2607 			    "SVM: raid5 column device is broken, hotspared\n");
2608 			return (REDUNDANT);
2609 		}
2610 	}
2611 	return (NOTREDUNDANT);
2612 }
2613 
2614 /*
2615  *	check_stripe()
2616  *
2617  *	Check the status of the passed in device within the stripe in question.
2618  *
2619  *	Input:
2620  *		mdsetname_t *	the name of the set
2621  *		mdname_t *	the stripe that is being examined
2622  *		char *		the device which needs to be checked
2623  *		md_error_t *	error pointer (not used)
2624  *	Return:
2625  *		int		REDUNDANT    - device is redundant and can be
2626  *					       removed
2627  *				NOTREDUNDANT - device cannot be removed
2628  *				NOTINDEVICE  - device is not part of this
2629  *					       component
2630  */
2631 static int
2632 check_stripe(mdsetname_t *sp, mdname_t *np, char *uname, md_error_t *ep)
2633 {
2634 	md_stripe_t	*stripep = NULL;
2635 	md_row_t	*mrp = NULL;
2636 	md_comp_t	*mcp;
2637 	mdname_t	*pnp;
2638 	char		*miscname;
2639 	int		row;
2640 	int		col;
2641 
2642 	rcm_log_message(RCM_TRACE1, "SVM: concat/stripe checking %s %s\n",
2643 	    np->bname, uname);
2644 	stripep = meta_get_stripe(sp, np, ep);
2645 
2646 	/* stripep cannot be NULL, if it is then the RCM cache is corrupt */
2647 	assert(stripep != NULL);
2648 
2649 	/*
2650 	 * If the stripe is not a parent then nothing can be done, user
2651 	 * must close the device and then fix the devices.
2652 	 */
2653 	if (!(MD_HAS_PARENT(stripep->common.parent))) {
2654 		rcm_log_message(RCM_TRACE1,
2655 		    "SVM: stripe is a top level device\n");
2656 		return (NOTREDUNDANT);
2657 	}
2658 
2659 	pnp = metamnumname(&sp, stripep->common.parent, 0, ep);
2660 
2661 	if (pnp == NULL) {
2662 		/*
2663 		 * Only NULL when the replicas are in an inconsistant state
2664 		 * ie the device says it is the parent of X but X does not
2665 		 * exist.
2666 		 */
2667 		rcm_log_message(RCM_TRACE1, "SVM: parent is not configured\n");
2668 		return (NOTREDUNDANT);
2669 	}
2670 
2671 	/*
2672 	 * Get the type of the parent and make sure that it is a mirror,
2673 	 * if it is then need to find out the number of submirrors, and
2674 	 * if it is not a mirror then this is not a REDUNDANT device.
2675 	 */
2676 	if ((miscname = metagetmiscname(pnp, ep)) == NULL) {
2677 		/*
2678 		 * Again something is wrong with the configuration.
2679 		 */
2680 		rcm_log_message(RCM_TRACE1,
2681 		    "SVM: unable to find the type of %s\n", pnp->cname);
2682 		meta_invalidate_name(pnp);
2683 		return (NOTREDUNDANT);
2684 	}
2685 
2686 	if (!(strcmp(miscname, MD_MIRROR) == 0 &&
2687 	    check_mirror(sp, pnp, ep) == REDUNDANT)) {
2688 		rcm_log_message(RCM_TRACE1,
2689 		    "SVM: %s is a %s and not redundant\n",
2690 		    pnp->cname, miscname);
2691 		meta_invalidate_name(pnp);
2692 		return (NOTREDUNDANT);
2693 	}
2694 
2695 	meta_invalidate_name(pnp);
2696 
2697 	for (row = 0; row < stripep->rows.rows_len; row++) {
2698 		mrp = &stripep->rows.rows_val[row];
2699 
2700 		/* now the components in the row */
2701 		for (col = 0; col < mrp->comps.comps_len; col++) {
2702 			mcp = &mrp->comps.comps_val[col];
2703 
2704 			rcm_log_message(RCM_TRACE1,
2705 			    "SVM: stripe comp %s check\n",
2706 			    mcp->compnamep->bname);
2707 
2708 			if (strcmp(mcp->compnamep->bname, uname) != 0)
2709 				continue;
2710 
2711 			rcm_log_message(RCM_TRACE1,
2712 			    "SVM: component state: %s\n",
2713 			    comp_state_to_name(mcp, NULL, 0));
2714 
2715 			if (mcp->hsnamep != NULL) {
2716 				/* device is broken and hotspared */
2717 				rcm_log_message(RCM_TRACE1,
2718 				    "SVM: stripe %s broken, hotspare active\n",
2719 				    uname);
2720 				return (REDUNDANT);
2721 			}
2722 
2723 			/*
2724 			 * LAST_ERRED is a special case.  If the state of a
2725 			 * component is CS_LAST_ERRED then this is the last
2726 			 * copy of the data and we need to keep using it, even
2727 			 * though we had errors.  Thus, we must block the DR
2728 			 * request.  If you follow the documented procedure for
2729 			 * fixing each component (fix devs in maintenance
2730 			 * before last erred) then the mirror will
2731 			 * automatically transition Last Erred components to
2732 			 * the Erred state after which they can be DRed out.
2733 			 */
2734 			if (mcp->state == CS_ERRED) {
2735 				/* device is broken */
2736 				rcm_log_message(RCM_TRACE1,
2737 				    "SVM: stripe %s is broken\n", uname);
2738 				return (REDUNDANT);
2739 			}
2740 
2741 			/*
2742 			 * Short circuit - if here the component has been
2743 			 * found in the column so no further processing is
2744 			 * required here.
2745 			 */
2746 			return (NOTREDUNDANT);
2747 		}
2748 	}
2749 
2750 	/*
2751 	 * Only get to this point if the device (uname) has not been
2752 	 * found in the stripe. This means that there is something
2753 	 * wrong with the device dependency list.
2754 	 */
2755 	rcm_log_message(RCM_TRACE1,
2756 	    "SVM: component %s is not part of %s\n",
2757 	    uname, np->bname);
2758 
2759 	return (NOTINDEVICE);
2760 }
2761 
2762 /*
2763  *	check_mirror()
2764  *
2765  *	Make sure that the mirror > 1 submirror.
2766  *
2767  *	Input:
2768  *		mdsetname_t *	the name of the set
2769  *		mdname_t *	the stripe that is being examined
2770  *	Return:
2771  *		int		REDUNDANT    - mirror > 1 submirrors
2772  *				NOTREDUNDANT - mirror has 1 submirror
2773  */
2774 static int
2775 check_mirror(mdsetname_t *sp, mdname_t *np, md_error_t *ep)
2776 {
2777 	uint_t		nsm = 0;	/* number of submirrors */
2778 	uint_t		smi = 0;	/* index into submirror array */
2779 	md_mirror_t	*mirrorp = NULL;
2780 
2781 	rcm_log_message(RCM_TRACE1, "SVM: mirror checking %s\n", np->bname);
2782 	mirrorp = meta_get_mirror(sp, np, ep);
2783 
2784 	/* mirrorp cannot be NULL, if it is then the RCM cache is corrupt */
2785 	assert(mirrorp != NULL);
2786 
2787 	/*
2788 	 * Need to check how many submirrors that the mirror has.
2789 	 */
2790 	for (smi = 0, nsm = 0; (smi < NMIRROR); ++smi) {
2791 		md_submirror_t	*mdsp = &mirrorp->submirrors[smi];
2792 		mdname_t	*submirnamep = mdsp->submirnamep;
2793 
2794 		/* Is this submirror being used ?  No, then continue */
2795 		if (submirnamep == NULL)
2796 			continue;
2797 		nsm++;
2798 	}
2799 
2800 	/*
2801 	 * If there is only one submirror then there is no redundancy
2802 	 * in the configuration and the user needs to take some other
2803 	 * action before using cfgadm on the device ie close the metadevice.
2804 	 */
2805 	if (nsm == 1) {
2806 		rcm_log_message(RCM_TRACE1,
2807 		    "SVM: only one submirror unable to allow action\n");
2808 		return (NOTREDUNDANT);
2809 	}
2810 
2811 	return (REDUNDANT);
2812 }
2813 
2814 /*
2815  *	check_device()
2816  *
2817  *	Check the current status of the underlying device.
2818  *
2819  *	Input:
2820  *		deventry_t *	the device that is being checked
2821  *	Return:
2822  *		int		REDUNDANT    - device is redundant and can be
2823  *					       removed
2824  *				NOTREDUNDANT - device cannot be removed
2825  *	Locking:
2826  *		None
2827  *
2828  * The check_device code path (the functions called by check_device) use
2829  * libmeta calls directly to determine if the specified device is
2830  * redundant or not.  The can lead to conflicts between data cached in
2831  * libmeta and data that is being cached by this rcm module.  Since the
2832  * rcm cache is our primary source of information here, we need to make
2833  * sure that we are not getting stale data from the libmeta caches.
2834  * We use meta_invalidate_name throughout this code path to clear the
2835  * cached data in libmeta in order to ensure that we are not using stale data.
2836  */
2837 static int
2838 check_device(deventry_t *deventry)
2839 {
2840 	mdsetname_t	*sp;
2841 	md_error_t	error = mdnullerror;
2842 	char		sname[BUFSIZ+1];
2843 	uint32_t	d;
2844 	mdname_t	*np;
2845 	deventry_t	*dependent;
2846 	int		rval = NOTREDUNDANT;
2847 	int		ret;
2848 
2849 	dependent = deventry->dependent;
2850 
2851 	rcm_log_message(RCM_TRACE1, "SVM: check_device(%s)\n",
2852 	    deventry->devname);
2853 	/*
2854 	 * should not be null because the caller has already figured out
2855 	 * there are dependent devices.
2856 	 */
2857 	assert(dependent != NULL);
2858 
2859 	do {
2860 
2861 		rcm_log_message(RCM_TRACE1, "SVM: check dependent: %s\n",
2862 		    dependent->devname);
2863 
2864 		if (dependent->flags & REMOVED) {
2865 			dependent = dependent->next_dep;
2866 			continue;
2867 		}
2868 
2869 		/*
2870 		 * The device *should* be a metadevice and so need to see if
2871 		 * it contains a setname.
2872 		 */
2873 		ret = sscanf(dependent->devname,
2874 		    "/dev/md/%" VAL2STR(BUFSIZ) "[^/]/dsk/d%u",
2875 		    sname, &d);
2876 
2877 		if (ret != 2)
2878 			(void) strcpy(sname, MD_LOCAL_NAME);
2879 
2880 		if ((sp = metasetname(sname, &error)) == NULL) {
2881 			rcm_log_message(RCM_TRACE1,
2882 			    "SVM: unable to get setname for \"%s\", error %s\n",
2883 			    sname, mde_sperror(&error, ""));
2884 			break;
2885 		}
2886 
2887 		rcm_log_message(RCM_TRACE1, "SVM: processing: %s\n",
2888 		    dependent->devname);
2889 
2890 		np = metaname(&sp, dependent->devname, &error);
2891 
2892 		switch (dependent->devtype) {
2893 		case SVM_TRANS:
2894 			/*
2895 			 * No code to check trans devices because ufs logging
2896 			 * should be being used.
2897 			 */
2898 			rcm_log_message(RCM_TRACE1,
2899 			    "SVM: Use UFS logging instead of trans devices\n");
2900 			break;
2901 		case SVM_SLICE:
2902 		case SVM_STRIPE:
2903 		case SVM_CONCAT:
2904 			rval = check_stripe(sp, np, deventry->devname, &error);
2905 			break;
2906 		case SVM_MIRROR:
2907 			/*
2908 			 * No check here as this is performed by the one
2909 			 * above when the submirror is checked.
2910 			 */
2911 			rcm_log_message(RCM_TRACE1,
2912 			    "SVM: Mirror check is done by the stripe check\n");
2913 			break;
2914 		case SVM_RAID:
2915 			/*
2916 			 * Raid5 devices can be built on soft partitions or
2917 			 * slices and so the check here is for the raid5
2918 			 * device built on top of slices. Note, a raid5 cannot
2919 			 * be built on a stripe/concat.
2920 			 */
2921 			rval = check_raid5(sp, np, deventry->devname, &error);
2922 			break;
2923 		case SVM_SOFTPART:
2924 			/*
2925 			 * Raid5 devices can be built on top of soft partitions
2926 			 * and so they have to be checked.
2927 			 */
2928 			rval = check_softpart(sp, np, deventry->devname,
2929 			    &error);
2930 			break;
2931 		default:
2932 			rcm_log_message(RCM_TRACE1,
2933 			    "SVM: unknown devtype: %d\n", dependent->devtype);
2934 			break;
2935 		}
2936 
2937 		meta_invalidate_name(np);
2938 
2939 		if (rval == REDUNDANT)
2940 			break;
2941 	} while ((dependent = dependent->next_dep) != NULL);
2942 
2943 	rcm_log_message(RCM_TRACE1, "SVM: check_device return %d\n", rval);
2944 	return (rval);
2945 }
2946