xref: /titanic_41/usr/src/cmd/rcm_daemon/common/rcm_subr.c (revision 2449e17f82f6097fd2c665b64723e31ceecbeca6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  *
22  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include "rcm_impl.h"
29 #include "rcm_module.h"
30 
31 /*
32  * Short-circuits unloading of modules with no registrations, so that
33  * they are present during the next db_sync cycle.
34  */
35 #define	MOD_REFCNT_INIT		2
36 
37 int need_cleanup;	/* flag indicating if clean up is needed */
38 
39 static mutex_t mod_lock;	/* protects module list */
40 static module_t *module_head;	/* linked list of modules */
41 static rsrc_node_t *rsrc_root;	/* root of all resources */
42 
43 /*
44  * Misc help routines
45  */
46 static void rcmd_db_print();
47 static void rcm_handle_free(rcm_handle_t *);
48 static rcm_handle_t *rcm_handle_alloc(module_t *);
49 static void rsrc_clients_free(client_t *);
50 static struct rcm_mod_ops *modops_from_v1(void *);
51 static int call_getinfo(struct rcm_mod_ops *, rcm_handle_t *, char *, id_t,
52     uint_t, char **, char **, nvlist_t *, rcm_info_t **);
53 static int node_action(rsrc_node_t *, void *);
54 
55 extern void start_polling_thread();
56 
57 /*
58  * translate /dev name to a /devices path
59  *
60  * N.B. This routine can be enhanced to understand network names
61  *	and friendly names in the future.
62  */
63 char *
64 resolve_name(char *alias)
65 {
66 	char *tmp;
67 	const char *dev = "/dev/";
68 
69 	if (strlen(alias) == 0)
70 		return (NULL);
71 
72 	if (strncmp(alias, dev, strlen(dev)) == 0) {
73 		/*
74 		 * Treat /dev/... as a symbolic link
75 		 */
76 		tmp = s_malloc(PATH_MAX);
77 		if (realpath(alias, tmp) != NULL) {
78 			return (tmp);
79 		} else {
80 			free(tmp);
81 		}
82 		/* Fail to resolve /dev/ name, use the name as is */
83 	}
84 
85 	return (s_strdup(alias));
86 }
87 
88 /*
89  * Figure out resource type based on "resolved" name
90  *
91  * N.B. This routine does not figure out file system mount points.
92  *	This is determined at runtime when filesys module register
93  *	with RCM_FILESYS flag.
94  */
95 int
96 rsrc_get_type(const char *resolved_name)
97 {
98 	if (resolved_name[0] != '/')
99 		return (RSRC_TYPE_ABSTRACT);
100 
101 	if (strncmp("/devices/", resolved_name, 9) == 0)
102 		return (RSRC_TYPE_DEVICE);
103 
104 	return (RSRC_TYPE_NORMAL);
105 }
106 
107 /*
108  * Module operations:
109  *	module_load, module_unload, module_info, module_attach, module_detach,
110  *	cli_module_hold, cli_module_rele
111  */
112 
113 #ifdef	ENABLE_MODULE_DETACH
114 /*
115  * call unregister() entry point to allow module to unregister for
116  * resources without getting confused.
117  */
118 static void
119 module_detach(module_t *module)
120 {
121 	struct rcm_mod_ops *ops = module->modops;
122 
123 	rcm_log_message(RCM_TRACE2, "module_detach(name=%s)\n", module->name);
124 
125 	ops->rcmop_unregister(module->rcmhandle);
126 }
127 #endif	/* ENABLE_MODULE_DETACH */
128 
129 /*
130  * call register() entry point to allow module to register for resources
131  */
132 static void
133 module_attach(module_t *module)
134 {
135 	struct rcm_mod_ops *ops = module->modops;
136 
137 	rcm_log_message(RCM_TRACE2, "module_attach(name=%s)\n", module->name);
138 
139 	if (ops->rcmop_register(module->rcmhandle) != RCM_SUCCESS) {
140 		rcm_log_message(RCM_WARNING,
141 		    gettext("module %s register() failed\n"), module->name);
142 	}
143 }
144 
145 struct rcm_mod_ops *
146 module_init(module_t *module)
147 {
148 	if (module->dlhandle)
149 		/* rcm module */
150 		return (module->init());
151 	else
152 		/* rcm script */
153 		return (script_init(module));
154 }
155 
156 /*
157  * call rmc_mod_info() entry of module
158  */
159 static const char *
160 module_info(module_t *module)
161 {
162 	if (module->dlhandle)
163 		/* rcm module */
164 		return (module->info());
165 	else
166 		/* rcm script */
167 		return (script_info(module));
168 }
169 
170 int
171 module_fini(module_t *module)
172 {
173 	if (module->dlhandle)
174 		/* rcm module */
175 		return (module->fini());
176 	else
177 		/* rcm script */
178 		return (script_fini(module));
179 }
180 
181 /*
182  * call rmc_mod_fini() entry of module, dlclose module, and free memory
183  */
184 static void
185 module_unload(module_t *module)
186 {
187 	int version = module->modops->version;
188 
189 	rcm_log_message(RCM_DEBUG, "module_unload(name=%s)\n", module->name);
190 
191 	(void) module_fini(module);
192 
193 	rcm_handle_free(module->rcmhandle);
194 	free(module->name);
195 
196 	switch (version) {
197 	case RCM_MOD_OPS_V1:
198 		/*
199 		 * Free memory associated with converted ops vector
200 		 */
201 		free(module->modops);
202 		break;
203 
204 	case RCM_MOD_OPS_VERSION:
205 	default:
206 		break;
207 	}
208 
209 	if (module->dlhandle)
210 		rcm_module_close(module->dlhandle);
211 
212 	free(module);
213 }
214 
215 /*
216  * Locate the module, execute rcm_mod_init() and check ops vector version
217  */
218 static module_t *
219 module_load(char *modname)
220 {
221 	module_t *module;
222 
223 	rcm_log_message(RCM_DEBUG, "module_load(name=%s)\n", modname);
224 
225 	/*
226 	 * dlopen the module
227 	 */
228 	module = s_calloc(1, sizeof (*module));
229 	module->name = s_strdup(modname);
230 	module->modops = NULL;
231 	rcm_init_queue(&module->client_q);
232 
233 	if (rcm_is_script(modname) == 0) {
234 		/* rcm module */
235 		module->dlhandle = rcm_module_open(modname);
236 
237 		if (module->dlhandle == NULL) {
238 			rcm_log_message(RCM_NOTICE,
239 				gettext("cannot open module %s\n"), modname);
240 			goto fail;
241 		}
242 
243 		/*
244 		 * dlsym rcm_mod_init/fini/info() entry points
245 		 */
246 		module->init = (struct rcm_mod_ops *(*)())dlsym(
247 					module->dlhandle, "rcm_mod_init");
248 		module->fini = (int (*)())dlsym(
249 					module->dlhandle, "rcm_mod_fini");
250 		module->info = (const char *(*)())dlsym(module->dlhandle,
251 		    "rcm_mod_info");
252 		if (module->init == NULL || module->fini == NULL ||
253 		    module->info == NULL) {
254 			rcm_log_message(RCM_ERROR,
255 			    gettext("missing entries in module %s\n"), modname);
256 			goto fail;
257 		}
258 
259 	} else {
260 		/* rcm script */
261 		module->dlhandle = NULL;
262 		module->init = (struct rcm_mod_ops *(*)()) NULL;
263 		module->fini = (int (*)()) NULL;
264 		module->info = (const char *(*)()) NULL;
265 	}
266 
267 	if ((module->modops = module_init(module)) == NULL) {
268 		if (module->dlhandle)
269 			rcm_log_message(RCM_ERROR,
270 				gettext("cannot init module %s\n"), modname);
271 		goto fail;
272 	}
273 
274 	/*
275 	 * Check ops vector version
276 	 */
277 	switch (module->modops->version) {
278 	case RCM_MOD_OPS_V1:
279 		module->modops = modops_from_v1((void *)module->modops);
280 		break;
281 
282 	case RCM_MOD_OPS_VERSION:
283 		break;
284 
285 	default:
286 		rcm_log_message(RCM_ERROR,
287 		    gettext("module %s rejected: version %d not supported\n"),
288 		    modname, module->modops->version);
289 		(void) module_fini(module);
290 		goto fail;
291 	}
292 
293 	/*
294 	 * Make sure all fields are set
295 	 */
296 	if ((module->modops->rcmop_register == NULL) ||
297 	    (module->modops->rcmop_unregister == NULL) ||
298 	    (module->modops->rcmop_get_info == NULL) ||
299 	    (module->modops->rcmop_request_suspend == NULL) ||
300 	    (module->modops->rcmop_notify_resume == NULL) ||
301 	    (module->modops->rcmop_request_offline == NULL) ||
302 	    (module->modops->rcmop_notify_online == NULL) ||
303 	    (module->modops->rcmop_notify_remove == NULL)) {
304 		rcm_log_message(RCM_ERROR,
305 		    gettext("module %s rejected: has NULL ops fields\n"),
306 		    modname);
307 		(void) module_fini(module);
308 		goto fail;
309 	}
310 
311 	module->rcmhandle = rcm_handle_alloc(module);
312 	return (module);
313 
314 fail:
315 	if (module->modops && module->modops->version == RCM_MOD_OPS_V1)
316 		free(module->modops);
317 
318 	if (module->dlhandle)
319 		rcm_module_close(module->dlhandle);
320 
321 	free(module->name);
322 	free(module);
323 	return (NULL);
324 }
325 
326 /*
327  * add one to module hold count. load the module if not loaded
328  */
329 static module_t *
330 cli_module_hold(char *modname)
331 {
332 	module_t *module;
333 
334 	rcm_log_message(RCM_TRACE3, "cli_module_hold(%s)\n", modname);
335 
336 	(void) mutex_lock(&mod_lock);
337 	module = module_head;
338 	while (module) {
339 		if (strcmp(module->name, modname) == 0) {
340 			break;
341 		}
342 		module = module->next;
343 	}
344 
345 	if (module) {
346 		module->ref_count++;
347 		(void) mutex_unlock(&mod_lock);
348 		return (module);
349 	}
350 
351 	/*
352 	 * Module not found, attempt to load it
353 	 */
354 	if ((module = module_load(modname)) == NULL) {
355 		(void) mutex_unlock(&mod_lock);
356 		return (NULL);
357 	}
358 
359 	/*
360 	 * Hold module and link module into module list
361 	 */
362 	module->ref_count = MOD_REFCNT_INIT;
363 	module->next = module_head;
364 	module_head = module;
365 
366 	(void) mutex_unlock(&mod_lock);
367 
368 	return (module);
369 }
370 
371 /*
372  * decrement module hold count. Unload it if no reference
373  */
374 static void
375 cli_module_rele(module_t *module)
376 {
377 	module_t *curr = module_head, *prev = NULL;
378 
379 	rcm_log_message(RCM_TRACE3, "cli_module_rele(name=%s)\n", module->name);
380 
381 	(void) mutex_lock(&mod_lock);
382 	if (--(module->ref_count) != 0) {
383 		(void) mutex_unlock(&mod_lock);
384 		return;
385 	}
386 
387 	rcm_log_message(RCM_TRACE2, "unloading module %s\n", module->name);
388 
389 	/*
390 	 * Unlink the module from list
391 	 */
392 	while (curr && (curr != module)) {
393 		prev = curr;
394 		curr = curr->next;
395 	}
396 	if (curr == NULL) {
397 		rcm_log_message(RCM_ERROR,
398 		    gettext("Unexpected error: module %s not found.\n"),
399 		    module->name);
400 	} else if (prev == NULL) {
401 		module_head = curr->next;
402 	} else {
403 		prev->next = curr->next;
404 	}
405 	(void) mutex_unlock(&mod_lock);
406 
407 	module_unload(module);
408 }
409 
410 /*
411  * Gather usage info be passed back to requester. Discard info if user does
412  * not care (list == NULL).
413  */
414 void
415 add_busy_rsrc_to_list(char *alias, pid_t pid, int state, int seq_num,
416     char *modname, const char *infostr, const char *errstr,
417     nvlist_t *client_props, rcm_info_t **list)
418 {
419 	rcm_info_t *info;
420 	rcm_info_t *tmp;
421 	char *buf = NULL;
422 	size_t buflen = 0;
423 
424 	if (list == NULL) {
425 		return;
426 	}
427 
428 	info = s_calloc(1, sizeof (*info));
429 	if (errno = nvlist_alloc(&(info->info), NV_UNIQUE_NAME, 0)) {
430 		rcm_log_message(RCM_ERROR, "failed (nvlist_alloc=%s).\n",
431 		    strerror(errno));
432 		rcmd_exit(errno);
433 	}
434 
435 	/*LINTED*/
436 	if ((errno = nvlist_add_string(info->info, RCM_RSRCNAME, alias)) ||
437 	    (errno = nvlist_add_int32(info->info, RCM_SEQ_NUM, seq_num)) ||
438 	    (errno = nvlist_add_int64(info->info, RCM_CLIENT_ID, pid)) ||
439 	    (errno = nvlist_add_int32(info->info, RCM_RSRCSTATE, state))) {
440 		rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
441 		    strerror(errno));
442 		rcmd_exit(errno);
443 	}
444 
445 	/*
446 	 * Daemon calls to add_busy_rsrc_to_list may pass in
447 	 * error/info. Add these through librcm interfaces.
448 	 */
449 	if (errstr) {
450 		rcm_log_message(RCM_TRACE3, "adding error string: %s\n",
451 		    errstr);
452 		if (errno = nvlist_add_string(info->info, RCM_CLIENT_ERROR,
453 		    (char *)errstr)) {
454 			rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
455 			    strerror(errno));
456 			rcmd_exit(errno);
457 		}
458 	}
459 
460 	if (infostr) {
461 		if (errno = nvlist_add_string(info->info, RCM_CLIENT_INFO,
462 		    (char *)infostr)) {
463 			rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
464 			    strerror(errno));
465 			rcmd_exit(errno);
466 		}
467 	}
468 
469 	if (modname) {
470 		if (errno = nvlist_add_string(info->info, RCM_CLIENT_MODNAME,
471 		    modname)) {
472 			rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
473 			    strerror(errno));
474 			rcmd_exit(errno);
475 		}
476 	}
477 
478 	if (client_props) {
479 		if (errno = nvlist_pack(client_props, &buf, &buflen,
480 		    NV_ENCODE_NATIVE, 0)) {
481 			rcm_log_message(RCM_ERROR, "failed (nvlist_pack=%s).\n",
482 			    strerror(errno));
483 			rcmd_exit(errno);
484 		}
485 		if (errno = nvlist_add_byte_array(info->info,
486 		    RCM_CLIENT_PROPERTIES, (uchar_t *)buf, buflen)) {
487 			rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
488 			    strerror(errno));
489 			rcmd_exit(errno);
490 		}
491 		(void) free(buf);
492 	}
493 
494 
495 	/* link info at end of list */
496 	if (*list) {
497 		tmp = *list;
498 		while (tmp->next)
499 			tmp = tmp->next;
500 		tmp->next = info;
501 	} else {
502 		*list = info;
503 	}
504 }
505 
506 /*
507  * Resource client realted operations:
508  *	rsrc_client_alloc, rsrc_client_find, rsrc_client_add,
509  *	rsrc_client_remove, rsrc_client_action,	rsrc_client_action_list
510  */
511 
512 /* Allocate rsrc_client_t structure. Load module if necessary. */
513 /*ARGSUSED*/
514 static client_t *
515 rsrc_client_alloc(char *alias, char *modname, pid_t pid, uint_t flag)
516 {
517 	client_t *client;
518 	module_t *mod;
519 
520 	assert((alias != NULL) && (modname != NULL));
521 
522 	rcm_log_message(RCM_TRACE4, "rsrc_client_alloc(%s, %s, %ld)\n",
523 	    alias, modname, pid);
524 
525 	if ((mod = cli_module_hold(modname)) == NULL) {
526 		return (NULL);
527 	}
528 
529 	client = s_calloc(1, sizeof (client_t));
530 	client->module = mod;
531 	client->pid = pid;
532 	client->alias = s_strdup(alias);
533 	client->prv_flags = 0;
534 	client->state = RCM_STATE_ONLINE;
535 	client->flag = flag;
536 
537 	/* This queue is protected by rcm_req_lock */
538 	rcm_enqueue_tail(&mod->client_q, &client->queue);
539 
540 	return (client);
541 }
542 
543 /* Find client in list matching modname and pid */
544 client_t *
545 rsrc_client_find(char *modname, pid_t pid, client_t **list)
546 {
547 	client_t *client = *list;
548 
549 	rcm_log_message(RCM_TRACE4, "rsrc_client_find(%s, %ld, %p)\n",
550 	    modname, pid, (void *)list);
551 
552 	while (client) {
553 		if ((client->pid == pid) &&
554 		    strcmp(modname, client->module->name) == 0) {
555 			break;
556 		}
557 		client = client->next;
558 	}
559 	return (client);
560 }
561 
562 /* Add a client to client list */
563 static void
564 rsrc_client_add(client_t *client, client_t **list)
565 {
566 	rcm_log_message(RCM_TRACE4, "rsrc_client_add: %s, %s, %ld\n",
567 	    client->alias, client->module->name, client->pid);
568 
569 	client->next = *list;
570 	*list = client;
571 }
572 
573 /* Remove client from list and destroy it */
574 static void
575 rsrc_client_remove(client_t *client, client_t **list)
576 {
577 	client_t *tmp, *prev = NULL;
578 
579 	rcm_log_message(RCM_TRACE4, "rsrc_client_remove: %s, %s, %ld\n",
580 	    client->alias, client->module->name, client->pid);
581 
582 	tmp = *list;
583 	while (tmp) {
584 		if (client != tmp) {
585 			prev = tmp;
586 			tmp = tmp->next;
587 			continue;
588 		}
589 		if (prev) {
590 			prev->next = tmp->next;
591 		} else {
592 			*list = tmp->next;
593 		}
594 		tmp->next = NULL;
595 		rsrc_clients_free(tmp);
596 		return;
597 	}
598 }
599 
600 /* Free a list of clients. Called from cleanup thread only */
601 static void
602 rsrc_clients_free(client_t *list)
603 {
604 	client_t *client = list;
605 
606 	while (client) {
607 
608 		/*
609 		 * Note that the rcm daemon is single threaded while
610 		 * executing this routine. So there is no need to acquire
611 		 * rcm_req_lock here while dequeuing.
612 		 */
613 		rcm_dequeue(&client->queue);
614 
615 		if (client->module) {
616 			cli_module_rele(client->module);
617 		}
618 		list = client->next;
619 		if (client->alias) {
620 			free(client->alias);
621 		}
622 		free(client);
623 		client = list;
624 	}
625 }
626 
627 /*
628  * Invoke a callback into a single client
629  * This is the core of rcm_mod_ops interface
630  */
631 static int
632 rsrc_client_action(client_t *client, int cmd, void *arg)
633 {
634 	int			rval = RCM_SUCCESS;
635 	char			*dummy_error = NULL;
636 	char			*error = NULL;
637 	char			*info = NULL;
638 	rcm_handle_t		*hdl;
639 	nvlist_t		*client_props = NULL;
640 	rcm_info_t		*depend_info = NULL;
641 	struct rcm_mod_ops	*ops = client->module->modops;
642 	tree_walk_arg_t		*targ = (tree_walk_arg_t *)arg;
643 
644 	rcm_log_message(RCM_TRACE4,
645 	    "rsrc_client_action: %s, %s, cmd=%d, flag=0x%x\n", client->alias,
646 	    client->module->name, cmd, targ->flag);
647 
648 	/*
649 	 * Create a per-operation handle, increment seq_num by 1 so we will
650 	 * know if a module uses this handle to callback into rcm_daemon.
651 	 */
652 	hdl = rcm_handle_alloc(client->module);
653 	hdl->seq_num = targ->seq_num + 1;
654 
655 	/*
656 	 * Filter out operations for which the client didn't register.
657 	 */
658 	switch (cmd) {
659 	case CMD_SUSPEND:
660 	case CMD_RESUME:
661 	case CMD_OFFLINE:
662 	case CMD_ONLINE:
663 	case CMD_REMOVE:
664 		if ((client->flag & RCM_REGISTER_DR) == 0) {
665 			rcm_handle_free(hdl);
666 			return (RCM_SUCCESS);
667 		}
668 		break;
669 	case CMD_REQUEST_CHANGE:
670 	case CMD_NOTIFY_CHANGE:
671 		if ((client->flag & RCM_REGISTER_CAPACITY) == 0) {
672 			rcm_handle_free(hdl);
673 			return (RCM_SUCCESS);
674 		}
675 		break;
676 	case CMD_EVENT:
677 		if ((client->flag & RCM_REGISTER_EVENT) == 0) {
678 			rcm_handle_free(hdl);
679 			return (RCM_SUCCESS);
680 		}
681 		break;
682 	}
683 
684 	/*
685 	 * Create nvlist_t for any client-specific properties.
686 	 */
687 	if (errno = nvlist_alloc(&client_props, NV_UNIQUE_NAME, 0)) {
688 		rcm_log_message(RCM_ERROR,
689 		    "client action failed (nvlist_alloc=%s)\n",
690 		    strerror(errno));
691 		rcmd_exit(errno);
692 	}
693 
694 	/*
695 	 * Process the operation via a callback to the client module.
696 	 */
697 	switch (cmd) {
698 	case CMD_GETINFO:
699 		rval = call_getinfo(ops, hdl, client->alias, client->pid,
700 		    targ->flag, &info, &error, client_props, &depend_info);
701 		break;
702 
703 	case CMD_SUSPEND:
704 		if (((targ->flag & RCM_QUERY_CANCEL) == 0) &&
705 		    (client->state == RCM_STATE_SUSPEND)) {
706 			break;
707 		}
708 
709 		if ((targ->flag & RCM_QUERY) == 0) {
710 			rcm_log_message(RCM_DEBUG, "suspending %s\n",
711 			    client->alias);
712 		} else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
713 			rcm_log_message(RCM_DEBUG, "suspend query %s\n",
714 			    client->alias);
715 		} else {
716 			rcm_log_message(RCM_DEBUG,
717 			    "suspend query %s cancelled\n", client->alias);
718 		}
719 
720 		/*
721 		 * Update the client's state before the operation.
722 		 * If this is a cancelled query, then updating the state is
723 		 * the only thing that needs to be done, so break afterwards.
724 		 */
725 		if ((targ->flag & RCM_QUERY) == 0) {
726 			client->state = RCM_STATE_SUSPENDING;
727 		} else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
728 			client->state = RCM_STATE_SUSPEND_QUERYING;
729 		} else {
730 			client->state = RCM_STATE_ONLINE;
731 			break;
732 		}
733 
734 		rval = ops->rcmop_request_suspend(hdl, client->alias,
735 		    client->pid, targ->interval, targ->flag, &error,
736 		    &depend_info);
737 
738 		/* Update the client's state after the operation. */
739 		if ((targ->flag & RCM_QUERY) == 0) {
740 			if (rval == RCM_SUCCESS) {
741 				client->state = RCM_STATE_SUSPEND;
742 			} else {
743 				client->state = RCM_STATE_SUSPEND_FAIL;
744 			}
745 		} else {
746 			if (rval == RCM_SUCCESS) {
747 				client->state = RCM_STATE_SUSPEND_QUERY;
748 			} else {
749 				client->state = RCM_STATE_SUSPEND_QUERY_FAIL;
750 			}
751 		}
752 		break;
753 
754 	case CMD_RESUME:
755 		if (client->state == RCM_STATE_ONLINE) {
756 			break;
757 		}
758 		client->state = RCM_STATE_RESUMING;
759 		rval = ops->rcmop_notify_resume(hdl, client->alias, client->pid,
760 		    targ->flag, &error, &depend_info);
761 
762 		/* online state is unconditional */
763 		client->state = RCM_STATE_ONLINE;
764 		break;
765 
766 	case CMD_OFFLINE:
767 		if (((targ->flag & RCM_QUERY_CANCEL) == 0) &&
768 		    (client->state == RCM_STATE_OFFLINE)) {
769 			break;
770 		}
771 
772 		if ((targ->flag & RCM_QUERY) == 0) {
773 			rcm_log_message(RCM_DEBUG, "offlining %s\n",
774 			    client->alias);
775 		} else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
776 			rcm_log_message(RCM_DEBUG, "offline query %s\n",
777 			    client->alias);
778 		} else {
779 			rcm_log_message(RCM_DEBUG,
780 			    "offline query %s cancelled\n", client->alias);
781 		}
782 
783 		/*
784 		 * Update the client's state before the operation.
785 		 * If this is a cancelled query, then updating the state is
786 		 * the only thing that needs to be done, so break afterwards.
787 		 */
788 		if ((targ->flag & RCM_QUERY) == 0) {
789 			client->state = RCM_STATE_OFFLINING;
790 		} else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
791 			client->state = RCM_STATE_OFFLINE_QUERYING;
792 		} else {
793 			client->state = RCM_STATE_ONLINE;
794 			break;
795 		}
796 
797 		rval = ops->rcmop_request_offline(hdl, client->alias,
798 		    client->pid, targ->flag, &error, &depend_info);
799 
800 		/* Update the client's state after the operation. */
801 		if ((targ->flag & RCM_QUERY) == 0) {
802 			if (rval == RCM_SUCCESS) {
803 				client->state = RCM_STATE_OFFLINE;
804 			} else {
805 				client->state = RCM_STATE_OFFLINE_FAIL;
806 			}
807 		} else {
808 			if (rval == RCM_SUCCESS) {
809 				client->state = RCM_STATE_OFFLINE_QUERY;
810 			} else {
811 				client->state = RCM_STATE_OFFLINE_QUERY_FAIL;
812 			}
813 		}
814 		break;
815 
816 	case CMD_ONLINE:
817 		if (client->state == RCM_STATE_ONLINE) {
818 			break;
819 		}
820 
821 		rcm_log_message(RCM_DEBUG, "onlining %s\n", client->alias);
822 
823 		client->state = RCM_STATE_ONLINING;
824 		rval = ops->rcmop_notify_online(hdl, client->alias, client->pid,
825 		    targ->flag, &error, &depend_info);
826 		client->state = RCM_STATE_ONLINE;
827 		break;
828 
829 	case CMD_REMOVE:
830 		rcm_log_message(RCM_DEBUG, "removing %s\n", client->alias);
831 		client->state = RCM_STATE_REMOVING;
832 		rval = ops->rcmop_notify_remove(hdl, client->alias, client->pid,
833 		    targ->flag, &error, &depend_info);
834 		client->state = RCM_STATE_REMOVE;
835 		break;
836 
837 	case CMD_REQUEST_CHANGE:
838 		rcm_log_message(RCM_DEBUG, "requesting state change of %s\n",
839 		    client->alias);
840 		if (ops->rcmop_request_capacity_change)
841 			rval = ops->rcmop_request_capacity_change(hdl,
842 			    client->alias, client->pid, targ->flag, targ->nvl,
843 			    &error, &depend_info);
844 		break;
845 
846 	case CMD_NOTIFY_CHANGE:
847 		rcm_log_message(RCM_DEBUG, "requesting state change of %s\n",
848 		    client->alias);
849 		if (ops->rcmop_notify_capacity_change)
850 			rval = ops->rcmop_notify_capacity_change(hdl,
851 			    client->alias, client->pid, targ->flag, targ->nvl,
852 			    &error, &depend_info);
853 		break;
854 
855 	case CMD_EVENT:
856 		rcm_log_message(RCM_DEBUG, "delivering event to %s\n",
857 		    client->alias);
858 		if (ops->rcmop_notify_event)
859 			rval = ops->rcmop_notify_event(hdl, client->alias,
860 			    client->pid, targ->flag, &error, targ->nvl,
861 			    &depend_info);
862 		break;
863 
864 	default:
865 		rcm_log_message(RCM_ERROR, gettext("unknown command %d\n"),
866 		    cmd);
867 		rval = RCM_FAILURE;
868 		break;
869 	}
870 
871 	/* reset error code to the most significant error */
872 	if (rval != RCM_SUCCESS)
873 		targ->retcode = rval;
874 
875 	/*
876 	 * XXX - The code below may produce duplicate rcm_info_t's on error?
877 	 */
878 	if ((cmd != CMD_GETINFO) &&
879 	    ((rval != RCM_SUCCESS) ||
880 	    (error != NULL) ||
881 	    (targ->flag & RCM_SCOPE))) {
882 		(void) call_getinfo(ops, hdl, client->alias, client->pid,
883 		    targ->flag & (~(RCM_INCLUDE_DEPENDENT|RCM_INCLUDE_SUBTREE)),
884 		    &info, &dummy_error, client_props, &depend_info);
885 		if (dummy_error)
886 			(void) free(dummy_error);
887 	} else if (cmd != CMD_GETINFO) {
888 		nvlist_free(client_props);
889 		client_props = NULL;
890 	}
891 
892 	if (client_props) {
893 		add_busy_rsrc_to_list(client->alias, client->pid, client->state,
894 		    targ->seq_num, client->module->name, info, error,
895 		    client_props, targ->info);
896 		nvlist_free(client_props);
897 	}
898 
899 	if (info)
900 		(void) free(info);
901 	if (error)
902 		(void) free(error);
903 
904 	if (depend_info) {
905 		if (targ->info) {
906 			(void) rcm_append_info(targ->info, depend_info);
907 		} else {
908 			rcm_free_info(depend_info);
909 		}
910 	}
911 
912 	rcm_handle_free(hdl);
913 	return (rval);
914 }
915 
916 /*
917  * invoke a callback into a list of clients, return 0 if all success
918  */
919 int
920 rsrc_client_action_list(client_t *list, int cmd, void *arg)
921 {
922 	int error, rval = RCM_SUCCESS;
923 
924 	while (list) {
925 		client_t *client = list;
926 		list = client->next;
927 
928 		if (client->state == RCM_STATE_REMOVE)
929 			continue;
930 
931 		error = rsrc_client_action(client, cmd, arg);
932 		if (error != RCM_SUCCESS) {
933 			rval = error;
934 		}
935 	}
936 
937 	return (rval);
938 }
939 
940 /*
941  * Node realted operations:
942  *
943  *	rn_alloc, rn_free, rn_find_child,
944  *	rn_get_child, rn_get_sibling,
945  *	rsrc_node_find, rsrc_node_add_user, rsrc_node_remove_user,
946  */
947 
948 /* Allocate node based on a logical or physical name */
949 static rsrc_node_t *
950 rn_alloc(char *name, int type)
951 {
952 	rsrc_node_t *node;
953 
954 	rcm_log_message(RCM_TRACE4, "rn_alloc(%s, %d)\n", name, type);
955 
956 	node = s_calloc(1, sizeof (*node));
957 	node->name = s_strdup(name);
958 	node->type = type;
959 
960 	return (node);
961 }
962 
963 /*
964  * Free node along with its siblings and children
965  */
966 static void
967 rn_free(rsrc_node_t *node)
968 {
969 	if (node == NULL) {
970 		return;
971 	}
972 
973 	if (node->child) {
974 		rn_free(node->child);
975 	}
976 
977 	if (node->sibling) {
978 		rn_free(node->sibling);
979 	}
980 
981 	rsrc_clients_free(node->users);
982 	free(node->name);
983 	free(node);
984 }
985 
986 /*
987  * Find next sibling
988  */
989 static rsrc_node_t *
990 rn_get_sibling(rsrc_node_t *node)
991 {
992 	return (node->sibling);
993 }
994 
995 /*
996  * Find first child
997  */
998 static rsrc_node_t *
999 rn_get_child(rsrc_node_t *node)
1000 {
1001 	return (node->child);
1002 }
1003 
1004 /*
1005  * Find child named childname. Create it if flag is RSRC_NODE_CRTEATE
1006  */
1007 static rsrc_node_t *
1008 rn_find_child(rsrc_node_t *parent, char *childname, int flag, int type)
1009 {
1010 	rsrc_node_t *child = parent->child;
1011 	rsrc_node_t *new, *prev = NULL;
1012 
1013 	rcm_log_message(RCM_TRACE4,
1014 	    "rn_find_child(parent=%s, child=%s, 0x%x, %d)\n",
1015 	    parent->name, childname, flag, type);
1016 
1017 	/*
1018 	 * Children are ordered based on strcmp.
1019 	 */
1020 	while (child && (strcmp(child->name, childname) < 0)) {
1021 		prev = child;
1022 		child = child->sibling;
1023 	}
1024 
1025 	if (child && (strcmp(child->name, childname) == 0)) {
1026 		return (child);
1027 	}
1028 
1029 	if (flag != RSRC_NODE_CREATE)
1030 		return (NULL);
1031 
1032 	new = rn_alloc(childname, type);
1033 	new->parent = parent;
1034 	new->sibling = child;
1035 
1036 	/*
1037 	 * Set this linkage last so we don't break ongoing operations.
1038 	 *
1039 	 * N.B. Assume setting a pointer is an atomic operation.
1040 	 */
1041 	if (prev == NULL) {
1042 		parent->child = new;
1043 	} else {
1044 		prev->sibling = new;
1045 	}
1046 
1047 	return (new);
1048 }
1049 
1050 /*
1051  * Pathname related help functions
1052  */
1053 static void
1054 pn_preprocess(char *pathname, int type)
1055 {
1056 	char *tmp;
1057 
1058 	if (type != RSRC_TYPE_DEVICE)
1059 		return;
1060 
1061 	/*
1062 	 * For devices, convert ':' to '/' (treat minor nodes and children)
1063 	 */
1064 	tmp = strchr(pathname, ':');
1065 	if (tmp == NULL)
1066 		return;
1067 
1068 	*tmp = '/';
1069 }
1070 
1071 static char *
1072 pn_getnextcomp(char *pathname, char **lasts)
1073 {
1074 	char *slash;
1075 
1076 	if (pathname == NULL)
1077 		return (NULL);
1078 
1079 	/* skip slashes' */
1080 	while (*pathname == '/')
1081 		++pathname;
1082 
1083 	if (*pathname == '\0')
1084 		return (NULL);
1085 
1086 	slash = strchr(pathname, '/');
1087 	if (slash != NULL) {
1088 		*slash = '\0';
1089 		*lasts = slash + 1;
1090 	} else {
1091 		*lasts = NULL;
1092 	}
1093 
1094 	return (pathname);
1095 }
1096 
1097 /*
1098  * Find a node in tree based on device, which is the physical pathname
1099  * of the form /sbus@.../esp@.../sd@...
1100  */
1101 int
1102 rsrc_node_find(char *rsrcname, int flag, rsrc_node_t **nodep)
1103 {
1104 	char *pathname, *nodename, *lasts;
1105 	rsrc_node_t *node;
1106 	int type;
1107 
1108 	rcm_log_message(RCM_TRACE4, "rn_node_find(%s, 0x%x)\n", rsrcname, flag);
1109 
1110 	/*
1111 	 * For RSRC_TYPE_ABSTRACT, look under /ABSTRACT. For other types,
1112 	 * look under /SYSTEM.
1113 	 */
1114 	pathname = resolve_name(rsrcname);
1115 	if (pathname == NULL)
1116 		return (EINVAL);
1117 
1118 	type = rsrc_get_type(pathname);
1119 	switch (type) {
1120 	case RSRC_TYPE_DEVICE:
1121 	case RSRC_TYPE_NORMAL:
1122 		node = rn_find_child(rsrc_root, "SYSTEM", RSRC_NODE_CREATE,
1123 		    RSRC_TYPE_NORMAL);
1124 		break;
1125 
1126 	case RSRC_TYPE_ABSTRACT:
1127 		node = rn_find_child(rsrc_root, "ABSTRACT", RSRC_NODE_CREATE,
1128 		    RSRC_TYPE_NORMAL);
1129 		break;
1130 
1131 	default:
1132 		/* just to make sure */
1133 		free(pathname);
1134 		return (EINVAL);
1135 	}
1136 
1137 	/*
1138 	 * Find position of device within tree. Upon exiting the loop, device
1139 	 * should be placed between prev and curr.
1140 	 */
1141 	pn_preprocess(pathname, type);
1142 	lasts = pathname;
1143 	while ((nodename = pn_getnextcomp(lasts, &lasts)) != NULL) {
1144 		rsrc_node_t *parent = node;
1145 		node = rn_find_child(parent, nodename, flag, type);
1146 		if (node == NULL) {
1147 			assert((flag & RSRC_NODE_CREATE) == 0);
1148 			free(pathname);
1149 			*nodep = NULL;
1150 			return (RCM_SUCCESS);
1151 		}
1152 	}
1153 	free(pathname);
1154 	*nodep = node;
1155 	return (RCM_SUCCESS);
1156 }
1157 
1158 /*
1159  * add a usage client to a node
1160  */
1161 /*ARGSUSED*/
1162 int
1163 rsrc_node_add_user(rsrc_node_t *node, char *alias, char *modname, pid_t pid,
1164     uint_t flag)
1165 {
1166 	client_t *user;
1167 
1168 	rcm_log_message(RCM_TRACE3,
1169 	    "rsrc_node_add_user(%s, %s, %s, %ld, 0x%x)\n",
1170 	    node->name, alias, modname, pid, flag);
1171 
1172 	user = rsrc_client_find(modname, pid, &node->users);
1173 
1174 	/*
1175 	 * If a client_t already exists, add the registration and return
1176 	 * success if it's a valid registration request.
1177 	 *
1178 	 * Return EALREADY if the resource is already registered.
1179 	 * This means either the client_t already has the requested
1180 	 * registration flagged, or that a DR registration was attempted
1181 	 * on a resource already in use in the DR operations state model.
1182 	 */
1183 	if (user != NULL) {
1184 
1185 		if (user->flag & (flag & RCM_REGISTER_MASK)) {
1186 			return (EALREADY);
1187 		}
1188 
1189 		if ((flag & RCM_REGISTER_DR) &&
1190 		    (user->state != RCM_STATE_REMOVE)) {
1191 			return (EALREADY);
1192 		}
1193 
1194 		user->flag |= (flag & RCM_REGISTER_MASK);
1195 		if ((flag & RCM_REGISTER_DR) ||
1196 		    (user->state == RCM_STATE_REMOVE)) {
1197 			user->state = RCM_STATE_ONLINE;
1198 		}
1199 
1200 		return (RCM_SUCCESS);
1201 	}
1202 
1203 	/*
1204 	 * Otherwise create a new client_t and create a new registration.
1205 	 */
1206 	if ((user = rsrc_client_alloc(alias, modname, pid, flag)) != NULL) {
1207 		rsrc_client_add(user, &node->users);
1208 	}
1209 	if (flag & RCM_FILESYS)
1210 		node->type = RSRC_TYPE_FILESYS;
1211 
1212 	return (RCM_SUCCESS);
1213 }
1214 
1215 /*
1216  * remove a usage client of a node
1217  */
1218 int
1219 rsrc_node_remove_user(rsrc_node_t *node, char *modname, pid_t pid, uint_t flag)
1220 {
1221 	client_t *user;
1222 
1223 	rcm_log_message(RCM_TRACE3,
1224 	    "rsrc_node_remove_user(%s, %s, %ld, 0x%x)\n", node->name, modname,
1225 	    pid, flag);
1226 
1227 	user = rsrc_client_find(modname, pid, &node->users);
1228 	if ((user == NULL) || (user->state == RCM_STATE_REMOVE)) {
1229 		rcm_log_message(RCM_NOTICE, gettext(
1230 		    "client not registered: module=%s, pid=%d, dev=%s\n"),
1231 		    modname, pid, node->name);
1232 		return (ENOENT);
1233 	}
1234 
1235 	/* Strip off the registration being removed (DR, event, capacity) */
1236 	user->flag = user->flag & (~(flag & RCM_REGISTER_MASK));
1237 
1238 	/*
1239 	 * Mark the client as removed if all registrations have been removed
1240 	 */
1241 	if ((user->flag & RCM_REGISTER_MASK) == 0)
1242 		user->state = RCM_STATE_REMOVE;
1243 
1244 	return (RCM_SUCCESS);
1245 }
1246 
1247 /*
1248  * Tree walking function - rsrc_walk
1249  */
1250 
1251 #define	MAX_TREE_DEPTH		32
1252 
1253 #define	RN_WALK_CONTINUE	0
1254 #define	RN_WALK_PRUNESIB	1
1255 #define	RN_WALK_PRUNECHILD	2
1256 #define	RN_WALK_TERMINATE	3
1257 
1258 #define	EMPTY_STACK(sp)		((sp)->depth == 0)
1259 #define	TOP_NODE(sp)		((sp)->node[(sp)->depth - 1])
1260 #define	PRUNE_SIB(sp)		((sp)->prunesib[(sp)->depth - 1])
1261 #define	PRUNE_CHILD(sp)		((sp)->prunechild[(sp)->depth - 1])
1262 #define	POP_STACK(sp)		((sp)->depth)--
1263 #define	PUSH_STACK(sp, rn)	\
1264 	(sp)->node[(sp)->depth] = (rn);	\
1265 	(sp)->prunesib[(sp)->depth] = 0;	\
1266 	(sp)->prunechild[(sp)->depth] = 0;	\
1267 	((sp)->depth)++
1268 
1269 struct rn_stack {
1270 	rsrc_node_t *node[MAX_TREE_DEPTH];
1271 	char	prunesib[MAX_TREE_DEPTH];
1272 	char	prunechild[MAX_TREE_DEPTH];
1273 	int	depth;
1274 };
1275 
1276 /* walking one node and update node stack */
1277 /*ARGSUSED*/
1278 static void
1279 walk_one_node(struct rn_stack *sp, void *arg,
1280     int (*node_callback)(rsrc_node_t *, void *))
1281 {
1282 	int prunesib;
1283 	rsrc_node_t *child, *sibling;
1284 	rsrc_node_t *node = TOP_NODE(sp);
1285 
1286 	rcm_log_message(RCM_TRACE4, "walk_one_node(%s)\n", node->name);
1287 
1288 	switch (node_callback(node, arg)) {
1289 	case RN_WALK_TERMINATE:
1290 		POP_STACK(sp);
1291 		while (!EMPTY_STACK(sp)) {
1292 			node = TOP_NODE(sp);
1293 			POP_STACK(sp);
1294 		}
1295 		return;
1296 
1297 	case RN_WALK_PRUNESIB:
1298 		PRUNE_SIB(sp) = 1;
1299 		break;
1300 
1301 	case RN_WALK_PRUNECHILD:
1302 		PRUNE_CHILD(sp) = 1;
1303 		break;
1304 
1305 	case RN_WALK_CONTINUE:
1306 	default:
1307 		break;
1308 	}
1309 
1310 	/*
1311 	 * Push child on the stack
1312 	 */
1313 	if (!PRUNE_CHILD(sp) && (child = rn_get_child(node)) != NULL) {
1314 		PUSH_STACK(sp, child);
1315 		return;
1316 	}
1317 
1318 	/*
1319 	 * Pop the stack till a node's sibling can be pushed
1320 	 */
1321 	prunesib = PRUNE_SIB(sp);
1322 	POP_STACK(sp);
1323 	while (!EMPTY_STACK(sp) &&
1324 	    (prunesib || (sibling = rn_get_sibling(node)) == NULL)) {
1325 		node = TOP_NODE(sp);
1326 		prunesib = PRUNE_SIB(sp);
1327 		POP_STACK(sp);
1328 	}
1329 
1330 	if (EMPTY_STACK(sp)) {
1331 		return;
1332 	}
1333 
1334 	/*
1335 	 * push sibling onto the stack
1336 	 */
1337 	PUSH_STACK(sp, sibling);
1338 }
1339 
1340 /*
1341  * walk tree rooted at root in child-first order
1342  */
1343 static void
1344 rsrc_walk(rsrc_node_t *root, void *arg,
1345     int (*node_callback)(rsrc_node_t *, void *))
1346 {
1347 	struct rn_stack stack;
1348 
1349 	rcm_log_message(RCM_TRACE3, "rsrc_walk(%s)\n", root->name);
1350 
1351 	/*
1352 	 * Push root on stack and walk in child-first order
1353 	 */
1354 	stack.depth = 0;
1355 	PUSH_STACK(&stack, root);
1356 	PRUNE_SIB(&stack) = 1;
1357 
1358 	while (!EMPTY_STACK(&stack)) {
1359 		walk_one_node(&stack, arg, node_callback);
1360 	}
1361 }
1362 
1363 /*
1364  * Callback for a command action on a node
1365  */
1366 static int
1367 node_action(rsrc_node_t *node, void *arg)
1368 {
1369 	tree_walk_arg_t *targ = (tree_walk_arg_t *)arg;
1370 	uint_t flag = targ->flag;
1371 
1372 	rcm_log_message(RCM_TRACE4, "node_action(%s)\n", node->name);
1373 
1374 	/*
1375 	 * If flag indicates operation on a filesystem, we don't callback on
1376 	 * the filesystem root to avoid infinite recursion on filesystem module.
1377 	 *
1378 	 * N.B. Such request should only come from filesystem RCM module.
1379 	 */
1380 	if (flag & RCM_FILESYS) {
1381 		assert(node->type == RSRC_TYPE_FILESYS);
1382 		targ->flag &= ~RCM_FILESYS;
1383 		return (RN_WALK_CONTINUE);
1384 	}
1385 
1386 	/*
1387 	 * Execute state change callback
1388 	 */
1389 	(void) rsrc_client_action_list(node->users, targ->cmd, arg);
1390 
1391 	/*
1392 	 * Upon hitting a filesys root, prune children.
1393 	 * The filesys module should have taken care of
1394 	 * children by now.
1395 	 */
1396 	if (node->type == RSRC_TYPE_FILESYS)
1397 		return (RN_WALK_PRUNECHILD);
1398 
1399 	return (RN_WALK_CONTINUE);
1400 }
1401 
1402 /*
1403  * Execute a command on a subtree under root.
1404  */
1405 int
1406 rsrc_tree_action(rsrc_node_t *root, int cmd, tree_walk_arg_t *arg)
1407 {
1408 	rcm_log_message(RCM_TRACE2, "tree_action(%s, %d)\n", root->name, cmd);
1409 
1410 	arg->cmd = cmd;
1411 	arg->retcode = RCM_SUCCESS;
1412 	rsrc_walk(root, (void *)arg, node_action);
1413 
1414 	return (arg->retcode);
1415 }
1416 
1417 /*
1418  * Get info on current regsitrations
1419  */
1420 int
1421 rsrc_usage_info(char **rsrcnames, uint_t flag, int seq_num, rcm_info_t **info)
1422 {
1423 	rsrc_node_t *node;
1424 	rcm_info_t *result = NULL;
1425 	tree_walk_arg_t arg;
1426 	int initial_req;
1427 	int rv;
1428 	int i;
1429 
1430 	arg.flag = flag;
1431 	arg.info = &result;
1432 	arg.seq_num = seq_num;
1433 
1434 	for (i = 0; rsrcnames[i] != NULL; i++) {
1435 
1436 		rcm_log_message(RCM_TRACE2, "rsrc_usage_info(%s, 0x%x, %d)\n",
1437 		    rsrcnames[i], flag, seq_num);
1438 
1439 		if (flag & RCM_INCLUDE_DEPENDENT) {
1440 			initial_req = ((seq_num & SEQ_NUM_MASK) == 0);
1441 
1442 			/*
1443 			 * if redundant request, skip the operation
1444 			 */
1445 			if (info_req_add(rsrcnames[i], flag, seq_num) != 0) {
1446 				continue;
1447 			}
1448 		}
1449 
1450 		rv = rsrc_node_find(rsrcnames[i], 0, &node);
1451 		if ((rv != RCM_SUCCESS) || (node == NULL)) {
1452 			if ((flag & RCM_INCLUDE_DEPENDENT) && initial_req)
1453 				info_req_remove(seq_num);
1454 			continue;
1455 		}
1456 
1457 		/*
1458 		 * Based on RCM_INCLUDE_SUBTREE flag, query either the subtree
1459 		 * or just the node.
1460 		 */
1461 		if (flag & RCM_INCLUDE_SUBTREE) {
1462 			(void) rsrc_tree_action(node, CMD_GETINFO, &arg);
1463 		} else {
1464 			arg.cmd = CMD_GETINFO;
1465 			(void) node_action(node, (void *)&arg);
1466 		}
1467 
1468 		if ((flag & RCM_INCLUDE_DEPENDENT) && initial_req)
1469 			info_req_remove(seq_num);
1470 	}
1471 
1472 out:
1473 	(void) rcm_append_info(info, result);
1474 	return (rv);
1475 }
1476 
1477 /*
1478  * Get the list of currently loaded module
1479  */
1480 rcm_info_t *
1481 rsrc_mod_info()
1482 {
1483 	module_t *mod;
1484 	rcm_info_t *info = NULL;
1485 
1486 	(void) mutex_lock(&mod_lock);
1487 	mod = module_head;
1488 	while (mod) {
1489 		char *modinfo = s_strdup(module_info(mod));
1490 		add_busy_rsrc_to_list("dummy", 0, 0, 0, mod->name,
1491 		    modinfo, NULL, NULL, &info);
1492 		mod = mod->next;
1493 	}
1494 	(void) mutex_unlock(&mod_lock);
1495 
1496 	return (info);
1497 }
1498 
1499 /*
1500  * Initialize resource map - load all modules
1501  */
1502 void
1503 rcmd_db_init()
1504 {
1505 	char *tmp;
1506 	DIR *mod_dir;
1507 	struct dirent *entp;
1508 	int i;
1509 	char *dir_name;
1510 	int rcm_script;
1511 
1512 	rcm_log_message(RCM_DEBUG, "rcmd_db_init(): initialize database\n");
1513 
1514 	if (script_main_init() == -1)
1515 		rcmd_exit(errno);
1516 
1517 	rsrc_root = rn_alloc("/", RSRC_TYPE_NORMAL);
1518 
1519 	for (i = 0; (dir_name = rcm_dir(i, &rcm_script)) != NULL; i++) {
1520 
1521 		if ((mod_dir = opendir(dir_name)) == NULL) {
1522 			continue;	/* try next directory */
1523 		}
1524 
1525 		rcm_log_message(RCM_TRACE2, "search directory %s\n", dir_name);
1526 
1527 		while ((entp = readdir(mod_dir)) != NULL) {
1528 			module_t *module;
1529 
1530 			if (strcmp(entp->d_name, ".") == 0 ||
1531 				strcmp(entp->d_name, "..") == 0)
1532 				continue;
1533 
1534 			if (rcm_script == 0) {
1535 				/* rcm module */
1536 				if (((tmp = strstr(entp->d_name,
1537 				    RCM_MODULE_SUFFIX)) == NULL) ||
1538 				    (tmp[strlen(RCM_MODULE_SUFFIX)] != '\0')) {
1539 					continue;
1540 				}
1541 			}
1542 
1543 			module = cli_module_hold(entp->d_name);
1544 			if (module == NULL) {
1545 				if (rcm_script == 0)
1546 					rcm_log_message(RCM_ERROR,
1547 					    gettext("%s: failed to load\n"),
1548 					    entp->d_name);
1549 				continue;
1550 			}
1551 
1552 			if (module->ref_count == MOD_REFCNT_INIT) {
1553 				/*
1554 				 * ask module to register for resource 1st time
1555 				 */
1556 				module_attach(module);
1557 			}
1558 			cli_module_rele(module);
1559 		}
1560 		(void) closedir(mod_dir);
1561 	}
1562 
1563 	rcmd_db_print();
1564 }
1565 
1566 /*
1567  * sync resource map - ask all modules to register again
1568  */
1569 void
1570 rcmd_db_sync()
1571 {
1572 	static time_t sync_time = (time_t)-1;
1573 	const time_t interval = 5;	/* resync at most every 5 sec */
1574 
1575 	module_t *mod;
1576 	time_t curr = time(NULL);
1577 
1578 	if ((sync_time != (time_t)-1) && (curr - sync_time < interval))
1579 		return;
1580 
1581 	sync_time = curr;
1582 	(void) mutex_lock(&mod_lock);
1583 	mod = module_head;
1584 	while (mod) {
1585 		/*
1586 		 * Hold module by incrementing ref count and release
1587 		 * mod_lock to avoid deadlock, since rcmop_register()
1588 		 * may callback into the daemon and request mod_lock.
1589 		 */
1590 		mod->ref_count++;
1591 		(void) mutex_unlock(&mod_lock);
1592 
1593 		mod->modops->rcmop_register(mod->rcmhandle);
1594 
1595 		(void) mutex_lock(&mod_lock);
1596 		mod->ref_count--;
1597 		mod = mod->next;
1598 	}
1599 	(void) mutex_unlock(&mod_lock);
1600 }
1601 
1602 /*
1603  * Determine if a process is alive
1604  */
1605 int
1606 proc_exist(pid_t pid)
1607 {
1608 	char path[64];
1609 	const char *procfs = "/proc";
1610 	struct stat sb;
1611 
1612 	if (pid == (pid_t)0) {
1613 		return (1);
1614 	}
1615 
1616 	(void) snprintf(path, sizeof (path), "%s/%ld", procfs, pid);
1617 	return (stat(path, &sb) == 0);
1618 }
1619 
1620 /*
1621  * Cleaup client list
1622  *
1623  * N.B. This routine runs in a single-threaded environment only. It is only
1624  *	called by the cleanup thread, which never runs in parallel with other
1625  *	threads.
1626  */
1627 static void
1628 clean_client_list(client_t **listp)
1629 {
1630 	client_t *client = *listp;
1631 
1632 	/*
1633 	 * Cleanup notification clients for which pid no longer exists
1634 	 */
1635 	while (client) {
1636 		if ((client->state != RCM_STATE_REMOVE) &&
1637 		    proc_exist(client->pid)) {
1638 			listp = &client->next;
1639 			client = *listp;
1640 			continue;
1641 		}
1642 
1643 		/*
1644 		 * Destroy this client_t. rsrc_client_remove updates
1645 		 * listp to point to the next client.
1646 		 */
1647 		rsrc_client_remove(client, listp);
1648 		client = *listp;
1649 	}
1650 }
1651 
1652 /*ARGSUSED*/
1653 static int
1654 clean_node(rsrc_node_t *node, void *arg)
1655 {
1656 	rcm_log_message(RCM_TRACE4, "clean_node(%s)\n", node->name);
1657 
1658 	clean_client_list(&node->users);
1659 
1660 	return (RN_WALK_CONTINUE);
1661 }
1662 
1663 static void
1664 clean_rsrc_tree()
1665 {
1666 	rcm_log_message(RCM_TRACE4,
1667 	    "clean_rsrc_tree(): delete stale dr clients\n");
1668 
1669 	rsrc_walk(rsrc_root, NULL, clean_node);
1670 }
1671 
1672 static void
1673 db_clean()
1674 {
1675 	extern barrier_t barrier;
1676 	extern void clean_dr_list();
1677 
1678 	for (;;) {
1679 		(void) mutex_lock(&rcm_req_lock);
1680 		start_polling_thread();
1681 		(void) mutex_unlock(&rcm_req_lock);
1682 
1683 		(void) mutex_lock(&barrier.lock);
1684 		while (need_cleanup == 0)
1685 			(void) cond_wait(&barrier.cv, &barrier.lock);
1686 		(void) mutex_unlock(&barrier.lock);
1687 
1688 		/*
1689 		 * Make sure all other threads are either blocked or exited.
1690 		 */
1691 		rcmd_set_state(RCMD_CLEANUP);
1692 
1693 		need_cleanup = 0;
1694 
1695 		/*
1696 		 * clean dr_req_list
1697 		 */
1698 		clean_dr_list();
1699 
1700 		/*
1701 		 * clean resource tree
1702 		 */
1703 		clean_rsrc_tree();
1704 
1705 		rcmd_set_state(RCMD_NORMAL);
1706 	}
1707 }
1708 
1709 void
1710 rcmd_db_clean()
1711 {
1712 	rcm_log_message(RCM_DEBUG,
1713 	    "rcm_db_clean(): launch thread to clean database\n");
1714 
1715 	if (thr_create(NULL, NULL, (void *(*)(void *))db_clean,
1716 	    NULL, THR_DETACHED, NULL) != 0) {
1717 		rcm_log_message(RCM_WARNING,
1718 		    gettext("failed to create cleanup thread %s\n"),
1719 		    strerror(errno));
1720 	}
1721 }
1722 
1723 /*ARGSUSED*/
1724 static int
1725 print_node(rsrc_node_t *node, void *arg)
1726 {
1727 	client_t *user;
1728 
1729 	rcm_log_message(RCM_DEBUG, "rscname: %s, state = 0x%x\n", node->name);
1730 	rcm_log_message(RCM_DEBUG, "	users:\n");
1731 
1732 	if ((user = node->users) == NULL) {
1733 		rcm_log_message(RCM_DEBUG, "    none\n");
1734 		return (RN_WALK_CONTINUE);
1735 	}
1736 
1737 	while (user) {
1738 		rcm_log_message(RCM_DEBUG, "	%s, %d, %s\n",
1739 		    user->module->name, user->pid, user->alias);
1740 		user = user->next;
1741 	}
1742 	return (RN_WALK_CONTINUE);
1743 }
1744 
1745 static void
1746 rcmd_db_print()
1747 {
1748 	module_t *mod;
1749 
1750 	rcm_log_message(RCM_DEBUG, "modules:\n");
1751 	(void) mutex_lock(&mod_lock);
1752 	mod = module_head;
1753 	while (mod) {
1754 		rcm_log_message(RCM_DEBUG, "	%s\n", mod->name);
1755 		mod = mod->next;
1756 	}
1757 	(void) mutex_unlock(&mod_lock);
1758 
1759 	rcm_log_message(RCM_DEBUG, "\nresource tree:\n");
1760 
1761 	rsrc_walk(rsrc_root, NULL, print_node);
1762 
1763 	rcm_log_message(RCM_DEBUG, "\n");
1764 }
1765 
1766 /*
1767  * Allocate handle from calling into each RCM module
1768  */
1769 static rcm_handle_t *
1770 rcm_handle_alloc(module_t *module)
1771 {
1772 	rcm_handle_t *hdl;
1773 
1774 	hdl = s_malloc(sizeof (rcm_handle_t));
1775 
1776 	hdl->modname = module->name;
1777 	hdl->pid = 0;
1778 	hdl->lrcm_ops = &rcm_ops;	/* for callback into daemon directly */
1779 	hdl->module = module;
1780 
1781 	return (hdl);
1782 }
1783 
1784 /*
1785  * Free rcm_handle_t
1786  */
1787 static void
1788 rcm_handle_free(rcm_handle_t *handle)
1789 {
1790 	free(handle);
1791 }
1792 
1793 /*
1794  * help function that exit on memory outage
1795  */
1796 void *
1797 s_malloc(size_t size)
1798 {
1799 	void *buf = malloc(size);
1800 
1801 	if (buf == NULL) {
1802 		rcmd_exit(ENOMEM);
1803 	}
1804 	return (buf);
1805 }
1806 
1807 void *
1808 s_calloc(int n, size_t size)
1809 {
1810 	void *buf = calloc(n, size);
1811 
1812 	if (buf == NULL) {
1813 		rcmd_exit(ENOMEM);
1814 	}
1815 	return (buf);
1816 }
1817 
1818 void *
1819 s_realloc(void *ptr, size_t size)
1820 {
1821 	void *new = realloc(ptr, size);
1822 
1823 	if (new == NULL) {
1824 		rcmd_exit(ENOMEM);
1825 	}
1826 	return (new);
1827 }
1828 
1829 char *
1830 s_strdup(const char *str)
1831 {
1832 	char *buf = strdup(str);
1833 
1834 	if (buf == NULL) {
1835 		rcmd_exit(ENOMEM);
1836 	}
1837 	return (buf);
1838 }
1839 
1840 /*
1841  * Convert a version 1 ops vector to current ops vector
1842  * Fields missing in version 1 are set to NULL.
1843  */
1844 static struct rcm_mod_ops *
1845 modops_from_v1(void *ops_v1)
1846 {
1847 	struct rcm_mod_ops *ops;
1848 
1849 	ops = s_calloc(1, sizeof (struct rcm_mod_ops));
1850 	bcopy(ops_v1, ops, sizeof (struct rcm_mod_ops_v1));
1851 	return (ops);
1852 }
1853 
1854 /* call a module's getinfo routine; detects v1 ops and adjusts the call */
1855 static int
1856 call_getinfo(struct rcm_mod_ops *ops, rcm_handle_t *hdl, char *alias, id_t pid,
1857     uint_t flag, char **info, char **error, nvlist_t *client_props,
1858     rcm_info_t **infop)
1859 {
1860 	int rval;
1861 	struct rcm_mod_ops_v1 *v1_ops;
1862 
1863 	if (ops->version == RCM_MOD_OPS_V1) {
1864 		v1_ops = (struct rcm_mod_ops_v1 *)ops;
1865 		rval = v1_ops->rcmop_get_info(hdl, alias, pid, flag, info,
1866 		    infop);
1867 		if (rval != RCM_SUCCESS && *info != NULL)
1868 			*error = strdup(*info);
1869 		return (rval);
1870 	} else {
1871 		return (ops->rcmop_get_info(hdl, alias, pid, flag, info, error,
1872 		    client_props, infop));
1873 	}
1874 }
1875 
1876 void
1877 rcm_init_queue(rcm_queue_t *head)
1878 {
1879 	head->next = head->prev = head;
1880 }
1881 
1882 void
1883 rcm_enqueue_head(rcm_queue_t *head, rcm_queue_t *element)
1884 {
1885 	rcm_enqueue(head, element);
1886 }
1887 
1888 void
1889 rcm_enqueue_tail(rcm_queue_t *head, rcm_queue_t *element)
1890 {
1891 	rcm_enqueue(head->prev, element);
1892 }
1893 
1894 void
1895 rcm_enqueue(rcm_queue_t *list_element, rcm_queue_t *element)
1896 {
1897 	element->next = list_element->next;
1898 	element->prev = list_element;
1899 	element->next->prev = element;
1900 	list_element->next = element;
1901 }
1902 
1903 rcm_queue_t *
1904 rcm_dequeue_head(rcm_queue_t *head)
1905 {
1906 	rcm_queue_t	*element = head->next;
1907 	rcm_dequeue(element);
1908 	return (element);
1909 }
1910 
1911 rcm_queue_t *
1912 rcm_dequeue_tail(rcm_queue_t *head)
1913 {
1914 	rcm_queue_t	*element = head->prev;
1915 	rcm_dequeue(element);
1916 	return (element);
1917 }
1918 
1919 void
1920 rcm_dequeue(rcm_queue_t *element)
1921 {
1922 	element->prev->next = element->next;
1923 	element->next->prev = element->prev;
1924 	element->next = element->prev = NULL;
1925 }
1926