1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
22 * Use is subject to license terms.
23 */
24
25 #pragma ident "%Z%%M% %I% %E% SMI"
26
27 #include "rcm_impl.h"
28 #include "rcm_module.h"
29
30 /*
31 * Short-circuits unloading of modules with no registrations, so that
32 * they are present during the next db_sync cycle.
33 */
34 #define MOD_REFCNT_INIT 2
35
36 int need_cleanup; /* flag indicating if clean up is needed */
37
38 static mutex_t mod_lock; /* protects module list */
39 static module_t *module_head; /* linked list of modules */
40 static rsrc_node_t *rsrc_root; /* root of all resources */
41
42 /*
43 * Misc help routines
44 */
45 static void rcmd_db_print();
46 static void rcm_handle_free(rcm_handle_t *);
47 static rcm_handle_t *rcm_handle_alloc(module_t *);
48 static void rsrc_clients_free(client_t *);
49 static struct rcm_mod_ops *modops_from_v1(void *);
50 static int call_getinfo(struct rcm_mod_ops *, rcm_handle_t *, char *, id_t,
51 uint_t, char **, char **, nvlist_t *, rcm_info_t **);
52 static int node_action(rsrc_node_t *, void *);
53
54 extern void start_polling_thread();
55
56 /*
57 * translate /dev name to a /devices path
58 *
59 * N.B. This routine can be enhanced to understand network names
60 * and friendly names in the future.
61 */
62 char *
resolve_name(char * alias)63 resolve_name(char *alias)
64 {
65 char *tmp;
66 const char *dev = "/dev/";
67
68 if (strlen(alias) == 0)
69 return (NULL);
70
71 if (strncmp(alias, dev, strlen(dev)) == 0) {
72 /*
73 * Treat /dev/... as a symbolic link
74 */
75 tmp = s_malloc(PATH_MAX);
76 if (realpath(alias, tmp) != NULL) {
77 return (tmp);
78 } else {
79 free(tmp);
80 }
81 /* Fail to resolve /dev/ name, use the name as is */
82 }
83
84 return (s_strdup(alias));
85 }
86
87 /*
88 * Figure out resource type based on "resolved" name
89 *
90 * N.B. This routine does not figure out file system mount points.
91 * This is determined at runtime when filesys module register
92 * with RCM_FILESYS flag.
93 */
94 int
rsrc_get_type(const char * resolved_name)95 rsrc_get_type(const char *resolved_name)
96 {
97 if (resolved_name[0] != '/')
98 return (RSRC_TYPE_ABSTRACT);
99
100 if (strncmp("/devices/", resolved_name, 9) == 0)
101 return (RSRC_TYPE_DEVICE);
102
103 return (RSRC_TYPE_NORMAL);
104 }
105
106 /*
107 * Module operations:
108 * module_load, module_unload, module_info, module_attach, module_detach,
109 * cli_module_hold, cli_module_rele
110 */
111
112 #ifdef ENABLE_MODULE_DETACH
113 /*
114 * call unregister() entry point to allow module to unregister for
115 * resources without getting confused.
116 */
117 static void
module_detach(module_t * module)118 module_detach(module_t *module)
119 {
120 struct rcm_mod_ops *ops = module->modops;
121
122 rcm_log_message(RCM_TRACE2, "module_detach(name=%s)\n", module->name);
123
124 ops->rcmop_unregister(module->rcmhandle);
125 }
126 #endif /* ENABLE_MODULE_DETACH */
127
128 /*
129 * call register() entry point to allow module to register for resources
130 */
131 static void
module_attach(module_t * module)132 module_attach(module_t *module)
133 {
134 struct rcm_mod_ops *ops = module->modops;
135
136 rcm_log_message(RCM_TRACE2, "module_attach(name=%s)\n", module->name);
137
138 if (ops->rcmop_register(module->rcmhandle) != RCM_SUCCESS) {
139 rcm_log_message(RCM_WARNING,
140 gettext("module %s register() failed\n"), module->name);
141 }
142 }
143
144 struct rcm_mod_ops *
module_init(module_t * module)145 module_init(module_t *module)
146 {
147 if (module->dlhandle)
148 /* rcm module */
149 return (module->init());
150 else
151 /* rcm script */
152 return (script_init(module));
153 }
154
155 /*
156 * call rmc_mod_info() entry of module
157 */
158 static const char *
module_info(module_t * module)159 module_info(module_t *module)
160 {
161 if (module->dlhandle)
162 /* rcm module */
163 return (module->info());
164 else
165 /* rcm script */
166 return (script_info(module));
167 }
168
169 int
module_fini(module_t * module)170 module_fini(module_t *module)
171 {
172 if (module->dlhandle)
173 /* rcm module */
174 return (module->fini());
175 else
176 /* rcm script */
177 return (script_fini(module));
178 }
179
180 /*
181 * call rmc_mod_fini() entry of module, dlclose module, and free memory
182 */
183 static void
module_unload(module_t * module)184 module_unload(module_t *module)
185 {
186 int version = module->modops->version;
187
188 rcm_log_message(RCM_DEBUG, "module_unload(name=%s)\n", module->name);
189
190 (void) module_fini(module);
191
192 rcm_handle_free(module->rcmhandle);
193 free(module->name);
194
195 switch (version) {
196 case RCM_MOD_OPS_V1:
197 /*
198 * Free memory associated with converted ops vector
199 */
200 free(module->modops);
201 break;
202
203 case RCM_MOD_OPS_VERSION:
204 default:
205 break;
206 }
207
208 if (module->dlhandle)
209 rcm_module_close(module->dlhandle);
210
211 free(module);
212 }
213
214 /*
215 * Locate the module, execute rcm_mod_init() and check ops vector version
216 */
217 static module_t *
module_load(char * modname)218 module_load(char *modname)
219 {
220 module_t *module;
221
222 rcm_log_message(RCM_DEBUG, "module_load(name=%s)\n", modname);
223
224 /*
225 * dlopen the module
226 */
227 module = s_calloc(1, sizeof (*module));
228 module->name = s_strdup(modname);
229 module->modops = NULL;
230 rcm_init_queue(&module->client_q);
231
232 if (rcm_is_script(modname) == 0) {
233 /* rcm module */
234 module->dlhandle = rcm_module_open(modname);
235
236 if (module->dlhandle == NULL) {
237 rcm_log_message(RCM_NOTICE,
238 gettext("cannot open module %s\n"), modname);
239 goto fail;
240 }
241
242 /*
243 * dlsym rcm_mod_init/fini/info() entry points
244 */
245 module->init = (struct rcm_mod_ops *(*)())dlsym(
246 module->dlhandle, "rcm_mod_init");
247 module->fini = (int (*)())dlsym(
248 module->dlhandle, "rcm_mod_fini");
249 module->info = (const char *(*)())dlsym(module->dlhandle,
250 "rcm_mod_info");
251 if (module->init == NULL || module->fini == NULL ||
252 module->info == NULL) {
253 rcm_log_message(RCM_ERROR,
254 gettext("missing entries in module %s\n"), modname);
255 goto fail;
256 }
257
258 } else {
259 /* rcm script */
260 module->dlhandle = NULL;
261 module->init = (struct rcm_mod_ops *(*)()) NULL;
262 module->fini = (int (*)()) NULL;
263 module->info = (const char *(*)()) NULL;
264 }
265
266 if ((module->modops = module_init(module)) == NULL) {
267 if (module->dlhandle)
268 rcm_log_message(RCM_ERROR,
269 gettext("cannot init module %s\n"), modname);
270 goto fail;
271 }
272
273 /*
274 * Check ops vector version
275 */
276 switch (module->modops->version) {
277 case RCM_MOD_OPS_V1:
278 module->modops = modops_from_v1((void *)module->modops);
279 break;
280
281 case RCM_MOD_OPS_VERSION:
282 break;
283
284 default:
285 rcm_log_message(RCM_ERROR,
286 gettext("module %s rejected: version %d not supported\n"),
287 modname, module->modops->version);
288 (void) module_fini(module);
289 goto fail;
290 }
291
292 /*
293 * Make sure all fields are set
294 */
295 if ((module->modops->rcmop_register == NULL) ||
296 (module->modops->rcmop_unregister == NULL) ||
297 (module->modops->rcmop_get_info == NULL) ||
298 (module->modops->rcmop_request_suspend == NULL) ||
299 (module->modops->rcmop_notify_resume == NULL) ||
300 (module->modops->rcmop_request_offline == NULL) ||
301 (module->modops->rcmop_notify_online == NULL) ||
302 (module->modops->rcmop_notify_remove == NULL)) {
303 rcm_log_message(RCM_ERROR,
304 gettext("module %s rejected: has NULL ops fields\n"),
305 modname);
306 (void) module_fini(module);
307 goto fail;
308 }
309
310 module->rcmhandle = rcm_handle_alloc(module);
311 return (module);
312
313 fail:
314 if (module->modops && module->modops->version == RCM_MOD_OPS_V1)
315 free(module->modops);
316
317 if (module->dlhandle)
318 rcm_module_close(module->dlhandle);
319
320 free(module->name);
321 free(module);
322 return (NULL);
323 }
324
325 /*
326 * add one to module hold count. load the module if not loaded
327 */
328 static module_t *
cli_module_hold(char * modname)329 cli_module_hold(char *modname)
330 {
331 module_t *module;
332
333 rcm_log_message(RCM_TRACE3, "cli_module_hold(%s)\n", modname);
334
335 (void) mutex_lock(&mod_lock);
336 module = module_head;
337 while (module) {
338 if (strcmp(module->name, modname) == 0) {
339 break;
340 }
341 module = module->next;
342 }
343
344 if (module) {
345 module->ref_count++;
346 (void) mutex_unlock(&mod_lock);
347 return (module);
348 }
349
350 /*
351 * Module not found, attempt to load it
352 */
353 if ((module = module_load(modname)) == NULL) {
354 (void) mutex_unlock(&mod_lock);
355 return (NULL);
356 }
357
358 /*
359 * Hold module and link module into module list
360 */
361 module->ref_count = MOD_REFCNT_INIT;
362 module->next = module_head;
363 module_head = module;
364
365 (void) mutex_unlock(&mod_lock);
366
367 return (module);
368 }
369
370 /*
371 * decrement module hold count. Unload it if no reference
372 */
373 static void
cli_module_rele(module_t * module)374 cli_module_rele(module_t *module)
375 {
376 module_t *curr = module_head, *prev = NULL;
377
378 rcm_log_message(RCM_TRACE3, "cli_module_rele(name=%s)\n", module->name);
379
380 (void) mutex_lock(&mod_lock);
381 if (--(module->ref_count) != 0) {
382 (void) mutex_unlock(&mod_lock);
383 return;
384 }
385
386 rcm_log_message(RCM_TRACE2, "unloading module %s\n", module->name);
387
388 /*
389 * Unlink the module from list
390 */
391 while (curr && (curr != module)) {
392 prev = curr;
393 curr = curr->next;
394 }
395 if (curr == NULL) {
396 rcm_log_message(RCM_ERROR,
397 gettext("Unexpected error: module %s not found.\n"),
398 module->name);
399 } else if (prev == NULL) {
400 module_head = curr->next;
401 } else {
402 prev->next = curr->next;
403 }
404 (void) mutex_unlock(&mod_lock);
405
406 module_unload(module);
407 }
408
409 /*
410 * Gather usage info be passed back to requester. Discard info if user does
411 * not care (list == NULL).
412 */
413 void
add_busy_rsrc_to_list(char * alias,pid_t pid,int state,int seq_num,char * modname,const char * infostr,const char * errstr,nvlist_t * client_props,rcm_info_t ** list)414 add_busy_rsrc_to_list(char *alias, pid_t pid, int state, int seq_num,
415 char *modname, const char *infostr, const char *errstr,
416 nvlist_t *client_props, rcm_info_t **list)
417 {
418 rcm_info_t *info;
419 rcm_info_t *tmp;
420 char *buf = NULL;
421 size_t buflen = 0;
422
423 if (list == NULL) {
424 return;
425 }
426
427 info = s_calloc(1, sizeof (*info));
428 if (errno = nvlist_alloc(&(info->info), NV_UNIQUE_NAME, 0)) {
429 rcm_log_message(RCM_ERROR, "failed (nvlist_alloc=%s).\n",
430 strerror(errno));
431 rcmd_exit(errno);
432 }
433
434 /*LINTED*/
435 if ((errno = nvlist_add_string(info->info, RCM_RSRCNAME, alias)) ||
436 (errno = nvlist_add_int32(info->info, RCM_SEQ_NUM, seq_num)) ||
437 (errno = nvlist_add_int64(info->info, RCM_CLIENT_ID, pid)) ||
438 (errno = nvlist_add_int32(info->info, RCM_RSRCSTATE, state))) {
439 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
440 strerror(errno));
441 rcmd_exit(errno);
442 }
443
444 /*
445 * Daemon calls to add_busy_rsrc_to_list may pass in
446 * error/info. Add these through librcm interfaces.
447 */
448 if (errstr) {
449 rcm_log_message(RCM_TRACE3, "adding error string: %s\n",
450 errstr);
451 if (errno = nvlist_add_string(info->info, RCM_CLIENT_ERROR,
452 (char *)errstr)) {
453 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
454 strerror(errno));
455 rcmd_exit(errno);
456 }
457 }
458
459 if (infostr) {
460 if (errno = nvlist_add_string(info->info, RCM_CLIENT_INFO,
461 (char *)infostr)) {
462 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
463 strerror(errno));
464 rcmd_exit(errno);
465 }
466 }
467
468 if (modname) {
469 if (errno = nvlist_add_string(info->info, RCM_CLIENT_MODNAME,
470 modname)) {
471 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
472 strerror(errno));
473 rcmd_exit(errno);
474 }
475 }
476
477 if (client_props) {
478 if (errno = nvlist_pack(client_props, &buf, &buflen,
479 NV_ENCODE_NATIVE, 0)) {
480 rcm_log_message(RCM_ERROR, "failed (nvlist_pack=%s).\n",
481 strerror(errno));
482 rcmd_exit(errno);
483 }
484 if (errno = nvlist_add_byte_array(info->info,
485 RCM_CLIENT_PROPERTIES, (uchar_t *)buf, buflen)) {
486 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
487 strerror(errno));
488 rcmd_exit(errno);
489 }
490 (void) free(buf);
491 }
492
493
494 /* link info at end of list */
495 if (*list) {
496 tmp = *list;
497 while (tmp->next)
498 tmp = tmp->next;
499 tmp->next = info;
500 } else {
501 *list = info;
502 }
503 }
504
505 /*
506 * Resource client realted operations:
507 * rsrc_client_alloc, rsrc_client_find, rsrc_client_add,
508 * rsrc_client_remove, rsrc_client_action, rsrc_client_action_list
509 */
510
511 /* Allocate rsrc_client_t structure. Load module if necessary. */
512 /*ARGSUSED*/
513 static client_t *
rsrc_client_alloc(char * alias,char * modname,pid_t pid,uint_t flag)514 rsrc_client_alloc(char *alias, char *modname, pid_t pid, uint_t flag)
515 {
516 client_t *client;
517 module_t *mod;
518
519 assert((alias != NULL) && (modname != NULL));
520
521 rcm_log_message(RCM_TRACE4, "rsrc_client_alloc(%s, %s, %ld)\n",
522 alias, modname, pid);
523
524 if ((mod = cli_module_hold(modname)) == NULL) {
525 return (NULL);
526 }
527
528 client = s_calloc(1, sizeof (client_t));
529 client->module = mod;
530 client->pid = pid;
531 client->alias = s_strdup(alias);
532 client->prv_flags = 0;
533 client->state = RCM_STATE_ONLINE;
534 client->flag = flag;
535
536 /* This queue is protected by rcm_req_lock */
537 rcm_enqueue_tail(&mod->client_q, &client->queue);
538
539 return (client);
540 }
541
542 /* Find client in list matching modname and pid */
543 client_t *
rsrc_client_find(char * modname,pid_t pid,client_t ** list)544 rsrc_client_find(char *modname, pid_t pid, client_t **list)
545 {
546 client_t *client = *list;
547
548 rcm_log_message(RCM_TRACE4, "rsrc_client_find(%s, %ld, %p)\n",
549 modname, pid, (void *)list);
550
551 while (client) {
552 if ((client->pid == pid) &&
553 strcmp(modname, client->module->name) == 0) {
554 break;
555 }
556 client = client->next;
557 }
558 return (client);
559 }
560
561 /* Add a client to client list */
562 static void
rsrc_client_add(client_t * client,client_t ** list)563 rsrc_client_add(client_t *client, client_t **list)
564 {
565 rcm_log_message(RCM_TRACE4, "rsrc_client_add: %s, %s, %ld\n",
566 client->alias, client->module->name, client->pid);
567
568 client->next = *list;
569 *list = client;
570 }
571
572 /* Remove client from list and destroy it */
573 static void
rsrc_client_remove(client_t * client,client_t ** list)574 rsrc_client_remove(client_t *client, client_t **list)
575 {
576 client_t *tmp, *prev = NULL;
577
578 rcm_log_message(RCM_TRACE4, "rsrc_client_remove: %s, %s, %ld\n",
579 client->alias, client->module->name, client->pid);
580
581 tmp = *list;
582 while (tmp) {
583 if (client != tmp) {
584 prev = tmp;
585 tmp = tmp->next;
586 continue;
587 }
588 if (prev) {
589 prev->next = tmp->next;
590 } else {
591 *list = tmp->next;
592 }
593 tmp->next = NULL;
594 rsrc_clients_free(tmp);
595 return;
596 }
597 }
598
599 /* Free a list of clients. Called from cleanup thread only */
600 static void
rsrc_clients_free(client_t * list)601 rsrc_clients_free(client_t *list)
602 {
603 client_t *client = list;
604
605 while (client) {
606
607 /*
608 * Note that the rcm daemon is single threaded while
609 * executing this routine. So there is no need to acquire
610 * rcm_req_lock here while dequeuing.
611 */
612 rcm_dequeue(&client->queue);
613
614 if (client->module) {
615 cli_module_rele(client->module);
616 }
617 list = client->next;
618 if (client->alias) {
619 free(client->alias);
620 }
621 free(client);
622 client = list;
623 }
624 }
625
626 /*
627 * Invoke a callback into a single client
628 * This is the core of rcm_mod_ops interface
629 */
630 static int
rsrc_client_action(client_t * client,int cmd,void * arg)631 rsrc_client_action(client_t *client, int cmd, void *arg)
632 {
633 int rval = RCM_SUCCESS;
634 char *dummy_error = NULL;
635 char *error = NULL;
636 char *info = NULL;
637 rcm_handle_t *hdl;
638 nvlist_t *client_props = NULL;
639 rcm_info_t *depend_info = NULL;
640 struct rcm_mod_ops *ops = client->module->modops;
641 tree_walk_arg_t *targ = (tree_walk_arg_t *)arg;
642
643 rcm_log_message(RCM_TRACE4,
644 "rsrc_client_action: %s, %s, cmd=%d, flag=0x%x\n", client->alias,
645 client->module->name, cmd, targ->flag);
646
647 /*
648 * Create a per-operation handle, increment seq_num by 1 so we will
649 * know if a module uses this handle to callback into rcm_daemon.
650 */
651 hdl = rcm_handle_alloc(client->module);
652 hdl->seq_num = targ->seq_num + 1;
653
654 /*
655 * Filter out operations for which the client didn't register.
656 */
657 switch (cmd) {
658 case CMD_SUSPEND:
659 case CMD_RESUME:
660 case CMD_OFFLINE:
661 case CMD_ONLINE:
662 case CMD_REMOVE:
663 if ((client->flag & RCM_REGISTER_DR) == 0) {
664 rcm_handle_free(hdl);
665 return (RCM_SUCCESS);
666 }
667 break;
668 case CMD_REQUEST_CHANGE:
669 case CMD_NOTIFY_CHANGE:
670 if ((client->flag & RCM_REGISTER_CAPACITY) == 0) {
671 rcm_handle_free(hdl);
672 return (RCM_SUCCESS);
673 }
674 break;
675 case CMD_EVENT:
676 if ((client->flag & RCM_REGISTER_EVENT) == 0) {
677 rcm_handle_free(hdl);
678 return (RCM_SUCCESS);
679 }
680 break;
681 }
682
683 /*
684 * Create nvlist_t for any client-specific properties.
685 */
686 if (errno = nvlist_alloc(&client_props, NV_UNIQUE_NAME, 0)) {
687 rcm_log_message(RCM_ERROR,
688 "client action failed (nvlist_alloc=%s)\n",
689 strerror(errno));
690 rcmd_exit(errno);
691 }
692
693 /*
694 * Process the operation via a callback to the client module.
695 */
696 switch (cmd) {
697 case CMD_GETINFO:
698 rval = call_getinfo(ops, hdl, client->alias, client->pid,
699 targ->flag, &info, &error, client_props, &depend_info);
700 break;
701
702 case CMD_SUSPEND:
703 if (((targ->flag & RCM_QUERY_CANCEL) == 0) &&
704 (client->state == RCM_STATE_SUSPEND)) {
705 break;
706 }
707
708 if ((targ->flag & RCM_QUERY) == 0) {
709 rcm_log_message(RCM_DEBUG, "suspending %s\n",
710 client->alias);
711 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
712 rcm_log_message(RCM_DEBUG, "suspend query %s\n",
713 client->alias);
714 } else {
715 rcm_log_message(RCM_DEBUG,
716 "suspend query %s cancelled\n", client->alias);
717 }
718
719 /*
720 * Update the client's state before the operation.
721 * If this is a cancelled query, then updating the state is
722 * the only thing that needs to be done, so break afterwards.
723 */
724 if ((targ->flag & RCM_QUERY) == 0) {
725 client->state = RCM_STATE_SUSPENDING;
726 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
727 client->state = RCM_STATE_SUSPEND_QUERYING;
728 } else {
729 client->state = RCM_STATE_ONLINE;
730 break;
731 }
732
733 rval = ops->rcmop_request_suspend(hdl, client->alias,
734 client->pid, targ->interval, targ->flag, &error,
735 &depend_info);
736
737 /* Update the client's state after the operation. */
738 if ((targ->flag & RCM_QUERY) == 0) {
739 if (rval == RCM_SUCCESS) {
740 client->state = RCM_STATE_SUSPEND;
741 } else {
742 client->state = RCM_STATE_SUSPEND_FAIL;
743 }
744 } else {
745 if (rval == RCM_SUCCESS) {
746 client->state = RCM_STATE_SUSPEND_QUERY;
747 } else {
748 client->state = RCM_STATE_SUSPEND_QUERY_FAIL;
749 }
750 }
751 break;
752
753 case CMD_RESUME:
754 if (client->state == RCM_STATE_ONLINE) {
755 break;
756 }
757 client->state = RCM_STATE_RESUMING;
758 rval = ops->rcmop_notify_resume(hdl, client->alias, client->pid,
759 targ->flag, &error, &depend_info);
760
761 /* online state is unconditional */
762 client->state = RCM_STATE_ONLINE;
763 break;
764
765 case CMD_OFFLINE:
766 if (((targ->flag & RCM_QUERY_CANCEL) == 0) &&
767 (client->state == RCM_STATE_OFFLINE)) {
768 break;
769 }
770
771 if ((targ->flag & RCM_QUERY) == 0) {
772 rcm_log_message(RCM_DEBUG, "offlining %s\n",
773 client->alias);
774 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
775 rcm_log_message(RCM_DEBUG, "offline query %s\n",
776 client->alias);
777 } else {
778 rcm_log_message(RCM_DEBUG,
779 "offline query %s cancelled\n", client->alias);
780 }
781
782 /*
783 * Update the client's state before the operation.
784 * If this is a cancelled query, then updating the state is
785 * the only thing that needs to be done, so break afterwards.
786 */
787 if ((targ->flag & RCM_QUERY) == 0) {
788 client->state = RCM_STATE_OFFLINING;
789 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
790 client->state = RCM_STATE_OFFLINE_QUERYING;
791 } else {
792 client->state = RCM_STATE_ONLINE;
793 break;
794 }
795
796 rval = ops->rcmop_request_offline(hdl, client->alias,
797 client->pid, targ->flag, &error, &depend_info);
798
799 /*
800 * If this is a retire operation and we managed to call
801 * into at least one client, set retcode to RCM_SUCCESS to
802 * indicate that retire has been subject to constraints
803 * This retcode will be further modified by actual return
804 * code.
805 */
806 if ((targ->flag & RCM_RETIRE_REQUEST) &&
807 (targ->retcode == RCM_NO_CONSTRAINT)) {
808 rcm_log_message(RCM_DEBUG,
809 "at least 1 client, constraint applied: %s\n",
810 client->alias);
811 targ->retcode = RCM_SUCCESS;
812 }
813
814 /* Update the client's state after the operation. */
815 if ((targ->flag & RCM_QUERY) == 0) {
816 if (rval == RCM_SUCCESS) {
817 client->state = RCM_STATE_OFFLINE;
818 } else {
819 client->state = RCM_STATE_OFFLINE_FAIL;
820 }
821 } else {
822 if (rval == RCM_SUCCESS) {
823 client->state = RCM_STATE_OFFLINE_QUERY;
824 } else {
825 client->state = RCM_STATE_OFFLINE_QUERY_FAIL;
826 }
827 }
828 break;
829
830 case CMD_ONLINE:
831 if (client->state == RCM_STATE_ONLINE) {
832 break;
833 }
834
835 rcm_log_message(RCM_DEBUG, "onlining %s\n", client->alias);
836
837 client->state = RCM_STATE_ONLINING;
838 rval = ops->rcmop_notify_online(hdl, client->alias, client->pid,
839 targ->flag, &error, &depend_info);
840 client->state = RCM_STATE_ONLINE;
841 break;
842
843 case CMD_REMOVE:
844 rcm_log_message(RCM_DEBUG, "removing %s\n", client->alias);
845 client->state = RCM_STATE_REMOVING;
846 rval = ops->rcmop_notify_remove(hdl, client->alias, client->pid,
847 targ->flag, &error, &depend_info);
848 client->state = RCM_STATE_REMOVE;
849 break;
850
851 case CMD_REQUEST_CHANGE:
852 rcm_log_message(RCM_DEBUG, "requesting state change of %s\n",
853 client->alias);
854 if (ops->rcmop_request_capacity_change)
855 rval = ops->rcmop_request_capacity_change(hdl,
856 client->alias, client->pid, targ->flag, targ->nvl,
857 &error, &depend_info);
858 break;
859
860 case CMD_NOTIFY_CHANGE:
861 rcm_log_message(RCM_DEBUG, "requesting state change of %s\n",
862 client->alias);
863 if (ops->rcmop_notify_capacity_change)
864 rval = ops->rcmop_notify_capacity_change(hdl,
865 client->alias, client->pid, targ->flag, targ->nvl,
866 &error, &depend_info);
867 break;
868
869 case CMD_EVENT:
870 rcm_log_message(RCM_DEBUG, "delivering event to %s\n",
871 client->alias);
872 if (ops->rcmop_notify_event)
873 rval = ops->rcmop_notify_event(hdl, client->alias,
874 client->pid, targ->flag, &error, targ->nvl,
875 &depend_info);
876 break;
877
878 default:
879 rcm_log_message(RCM_ERROR, gettext("unknown command %d\n"),
880 cmd);
881 rval = RCM_FAILURE;
882 break;
883 }
884
885 /* reset error code to the most significant error */
886 if (rval != RCM_SUCCESS)
887 targ->retcode = rval;
888
889 /*
890 * XXX - The code below may produce duplicate rcm_info_t's on error?
891 */
892 if ((cmd != CMD_GETINFO) &&
893 ((rval != RCM_SUCCESS) ||
894 (error != NULL) ||
895 (targ->flag & RCM_SCOPE))) {
896 (void) call_getinfo(ops, hdl, client->alias, client->pid,
897 targ->flag & (~(RCM_INCLUDE_DEPENDENT|RCM_INCLUDE_SUBTREE)),
898 &info, &dummy_error, client_props, &depend_info);
899 if (dummy_error)
900 (void) free(dummy_error);
901 } else if (cmd != CMD_GETINFO) {
902 nvlist_free(client_props);
903 client_props = NULL;
904 }
905
906 if (client_props) {
907 add_busy_rsrc_to_list(client->alias, client->pid, client->state,
908 targ->seq_num, client->module->name, info, error,
909 client_props, targ->info);
910 nvlist_free(client_props);
911 }
912
913 if (info)
914 (void) free(info);
915 if (error)
916 (void) free(error);
917
918 if (depend_info) {
919 if (targ->info) {
920 (void) rcm_append_info(targ->info, depend_info);
921 } else {
922 rcm_free_info(depend_info);
923 }
924 }
925
926 rcm_handle_free(hdl);
927 return (rval);
928 }
929
930 /*
931 * invoke a callback into a list of clients, return 0 if all success
932 */
933 int
rsrc_client_action_list(client_t * list,int cmd,void * arg)934 rsrc_client_action_list(client_t *list, int cmd, void *arg)
935 {
936 int error, rval = RCM_SUCCESS;
937 tree_walk_arg_t *targ = (tree_walk_arg_t *)arg;
938
939 while (list) {
940 client_t *client = list;
941 list = client->next;
942
943 /*
944 * Make offline idempotent in the retire
945 * case
946 */
947 if ((targ->flag & RCM_RETIRE_REQUEST) &&
948 client->state == RCM_STATE_REMOVE) {
949 client->state = RCM_STATE_ONLINE;
950 rcm_log_message(RCM_DEBUG, "RETIRE: idempotent client "
951 "state: REMOVE -> ONLINE: %s\n", client->alias);
952 }
953
954 if (client->state == RCM_STATE_REMOVE)
955 continue;
956
957 error = rsrc_client_action(client, cmd, arg);
958 if (error != RCM_SUCCESS) {
959 rval = error;
960 }
961 }
962
963 return (rval);
964 }
965
966 /*
967 * Node realted operations:
968 *
969 * rn_alloc, rn_free, rn_find_child,
970 * rn_get_child, rn_get_sibling,
971 * rsrc_node_find, rsrc_node_add_user, rsrc_node_remove_user,
972 */
973
974 /* Allocate node based on a logical or physical name */
975 static rsrc_node_t *
rn_alloc(char * name,int type)976 rn_alloc(char *name, int type)
977 {
978 rsrc_node_t *node;
979
980 rcm_log_message(RCM_TRACE4, "rn_alloc(%s, %d)\n", name, type);
981
982 node = s_calloc(1, sizeof (*node));
983 node->name = s_strdup(name);
984 node->type = type;
985
986 return (node);
987 }
988
989 /*
990 * Free node along with its siblings and children
991 */
992 static void
rn_free(rsrc_node_t * node)993 rn_free(rsrc_node_t *node)
994 {
995 if (node == NULL) {
996 return;
997 }
998
999 if (node->child) {
1000 rn_free(node->child);
1001 }
1002
1003 if (node->sibling) {
1004 rn_free(node->sibling);
1005 }
1006
1007 rsrc_clients_free(node->users);
1008 free(node->name);
1009 free(node);
1010 }
1011
1012 /*
1013 * Find next sibling
1014 */
1015 static rsrc_node_t *
rn_get_sibling(rsrc_node_t * node)1016 rn_get_sibling(rsrc_node_t *node)
1017 {
1018 return (node->sibling);
1019 }
1020
1021 /*
1022 * Find first child
1023 */
1024 static rsrc_node_t *
rn_get_child(rsrc_node_t * node)1025 rn_get_child(rsrc_node_t *node)
1026 {
1027 return (node->child);
1028 }
1029
1030 /*
1031 * Find child named childname. Create it if flag is RSRC_NODE_CRTEATE
1032 */
1033 static rsrc_node_t *
rn_find_child(rsrc_node_t * parent,char * childname,int flag,int type)1034 rn_find_child(rsrc_node_t *parent, char *childname, int flag, int type)
1035 {
1036 rsrc_node_t *child = parent->child;
1037 rsrc_node_t *new, *prev = NULL;
1038
1039 rcm_log_message(RCM_TRACE4,
1040 "rn_find_child(parent=%s, child=%s, 0x%x, %d)\n",
1041 parent->name, childname, flag, type);
1042
1043 /*
1044 * Children are ordered based on strcmp.
1045 */
1046 while (child && (strcmp(child->name, childname) < 0)) {
1047 prev = child;
1048 child = child->sibling;
1049 }
1050
1051 if (child && (strcmp(child->name, childname) == 0)) {
1052 return (child);
1053 }
1054
1055 if (flag != RSRC_NODE_CREATE)
1056 return (NULL);
1057
1058 new = rn_alloc(childname, type);
1059 new->parent = parent;
1060 new->sibling = child;
1061
1062 /*
1063 * Set this linkage last so we don't break ongoing operations.
1064 *
1065 * N.B. Assume setting a pointer is an atomic operation.
1066 */
1067 if (prev == NULL) {
1068 parent->child = new;
1069 } else {
1070 prev->sibling = new;
1071 }
1072
1073 return (new);
1074 }
1075
1076 /*
1077 * Pathname related help functions
1078 */
1079 static void
pn_preprocess(char * pathname,int type)1080 pn_preprocess(char *pathname, int type)
1081 {
1082 char *tmp;
1083
1084 if (type != RSRC_TYPE_DEVICE)
1085 return;
1086
1087 /*
1088 * For devices, convert ':' to '/' (treat minor nodes and children)
1089 */
1090 tmp = strchr(pathname, ':');
1091 if (tmp == NULL)
1092 return;
1093
1094 *tmp = '/';
1095 }
1096
1097 static char *
pn_getnextcomp(char * pathname,char ** lasts)1098 pn_getnextcomp(char *pathname, char **lasts)
1099 {
1100 char *slash;
1101
1102 if (pathname == NULL)
1103 return (NULL);
1104
1105 /* skip slashes' */
1106 while (*pathname == '/')
1107 ++pathname;
1108
1109 if (*pathname == '\0')
1110 return (NULL);
1111
1112 slash = strchr(pathname, '/');
1113 if (slash != NULL) {
1114 *slash = '\0';
1115 *lasts = slash + 1;
1116 } else {
1117 *lasts = NULL;
1118 }
1119
1120 return (pathname);
1121 }
1122
1123 /*
1124 * Find a node in tree based on device, which is the physical pathname
1125 * of the form /sbus@.../esp@.../sd@...
1126 */
1127 int
rsrc_node_find(char * rsrcname,int flag,rsrc_node_t ** nodep)1128 rsrc_node_find(char *rsrcname, int flag, rsrc_node_t **nodep)
1129 {
1130 char *pathname, *nodename, *lasts;
1131 rsrc_node_t *node;
1132 int type;
1133
1134 rcm_log_message(RCM_TRACE4, "rn_node_find(%s, 0x%x)\n", rsrcname, flag);
1135
1136 /*
1137 * For RSRC_TYPE_ABSTRACT, look under /ABSTRACT. For other types,
1138 * look under /SYSTEM.
1139 */
1140 pathname = resolve_name(rsrcname);
1141 if (pathname == NULL)
1142 return (EINVAL);
1143
1144 type = rsrc_get_type(pathname);
1145 switch (type) {
1146 case RSRC_TYPE_DEVICE:
1147 case RSRC_TYPE_NORMAL:
1148 node = rn_find_child(rsrc_root, "SYSTEM", RSRC_NODE_CREATE,
1149 RSRC_TYPE_NORMAL);
1150 break;
1151
1152 case RSRC_TYPE_ABSTRACT:
1153 node = rn_find_child(rsrc_root, "ABSTRACT", RSRC_NODE_CREATE,
1154 RSRC_TYPE_NORMAL);
1155 break;
1156
1157 default:
1158 /* just to make sure */
1159 free(pathname);
1160 return (EINVAL);
1161 }
1162
1163 /*
1164 * Find position of device within tree. Upon exiting the loop, device
1165 * should be placed between prev and curr.
1166 */
1167 pn_preprocess(pathname, type);
1168 lasts = pathname;
1169 while ((nodename = pn_getnextcomp(lasts, &lasts)) != NULL) {
1170 rsrc_node_t *parent = node;
1171 node = rn_find_child(parent, nodename, flag, type);
1172 if (node == NULL) {
1173 assert((flag & RSRC_NODE_CREATE) == 0);
1174 free(pathname);
1175 *nodep = NULL;
1176 return (RCM_SUCCESS);
1177 }
1178 }
1179 free(pathname);
1180 *nodep = node;
1181 return (RCM_SUCCESS);
1182 }
1183
1184 /*
1185 * add a usage client to a node
1186 */
1187 /*ARGSUSED*/
1188 int
rsrc_node_add_user(rsrc_node_t * node,char * alias,char * modname,pid_t pid,uint_t flag)1189 rsrc_node_add_user(rsrc_node_t *node, char *alias, char *modname, pid_t pid,
1190 uint_t flag)
1191 {
1192 client_t *user;
1193
1194 rcm_log_message(RCM_TRACE3,
1195 "rsrc_node_add_user(%s, %s, %s, %ld, 0x%x)\n",
1196 node->name, alias, modname, pid, flag);
1197
1198 user = rsrc_client_find(modname, pid, &node->users);
1199
1200 /*
1201 * If a client_t already exists, add the registration and return
1202 * success if it's a valid registration request.
1203 *
1204 * Return EALREADY if the resource is already registered.
1205 * This means either the client_t already has the requested
1206 * registration flagged, or that a DR registration was attempted
1207 * on a resource already in use in the DR operations state model.
1208 */
1209 if (user != NULL) {
1210
1211 if (user->flag & (flag & RCM_REGISTER_MASK)) {
1212 return (EALREADY);
1213 }
1214
1215 if ((flag & RCM_REGISTER_DR) &&
1216 (user->state != RCM_STATE_REMOVE)) {
1217 return (EALREADY);
1218 }
1219
1220 user->flag |= (flag & RCM_REGISTER_MASK);
1221 if ((flag & RCM_REGISTER_DR) ||
1222 (user->state == RCM_STATE_REMOVE)) {
1223 user->state = RCM_STATE_ONLINE;
1224 }
1225
1226 return (RCM_SUCCESS);
1227 }
1228
1229 /*
1230 * Otherwise create a new client_t and create a new registration.
1231 */
1232 if ((user = rsrc_client_alloc(alias, modname, pid, flag)) != NULL) {
1233 rsrc_client_add(user, &node->users);
1234 }
1235 if (flag & RCM_FILESYS)
1236 node->type = RSRC_TYPE_FILESYS;
1237
1238 return (RCM_SUCCESS);
1239 }
1240
1241 /*
1242 * remove a usage client of a node
1243 */
1244 int
rsrc_node_remove_user(rsrc_node_t * node,char * modname,pid_t pid,uint_t flag)1245 rsrc_node_remove_user(rsrc_node_t *node, char *modname, pid_t pid, uint_t flag)
1246 {
1247 client_t *user;
1248
1249 rcm_log_message(RCM_TRACE3,
1250 "rsrc_node_remove_user(%s, %s, %ld, 0x%x)\n", node->name, modname,
1251 pid, flag);
1252
1253 user = rsrc_client_find(modname, pid, &node->users);
1254 if ((user == NULL) || (user->state == RCM_STATE_REMOVE)) {
1255 rcm_log_message(RCM_NOTICE, gettext(
1256 "client not registered: module=%s, pid=%d, dev=%s\n"),
1257 modname, pid, node->name);
1258 return (ENOENT);
1259 }
1260
1261 /* Strip off the registration being removed (DR, event, capacity) */
1262 user->flag = user->flag & (~(flag & RCM_REGISTER_MASK));
1263
1264 /*
1265 * Mark the client as removed if all registrations have been removed
1266 */
1267 if ((user->flag & RCM_REGISTER_MASK) == 0)
1268 user->state = RCM_STATE_REMOVE;
1269
1270 return (RCM_SUCCESS);
1271 }
1272
1273 /*
1274 * Tree walking function - rsrc_walk
1275 */
1276
1277 #define MAX_TREE_DEPTH 32
1278
1279 #define RN_WALK_CONTINUE 0
1280 #define RN_WALK_PRUNESIB 1
1281 #define RN_WALK_PRUNECHILD 2
1282 #define RN_WALK_TERMINATE 3
1283
1284 #define EMPTY_STACK(sp) ((sp)->depth == 0)
1285 #define TOP_NODE(sp) ((sp)->node[(sp)->depth - 1])
1286 #define PRUNE_SIB(sp) ((sp)->prunesib[(sp)->depth - 1])
1287 #define PRUNE_CHILD(sp) ((sp)->prunechild[(sp)->depth - 1])
1288 #define POP_STACK(sp) ((sp)->depth)--
1289 #define PUSH_STACK(sp, rn) \
1290 (sp)->node[(sp)->depth] = (rn); \
1291 (sp)->prunesib[(sp)->depth] = 0; \
1292 (sp)->prunechild[(sp)->depth] = 0; \
1293 ((sp)->depth)++
1294
1295 struct rn_stack {
1296 rsrc_node_t *node[MAX_TREE_DEPTH];
1297 char prunesib[MAX_TREE_DEPTH];
1298 char prunechild[MAX_TREE_DEPTH];
1299 int depth;
1300 };
1301
1302 /* walking one node and update node stack */
1303 /*ARGSUSED*/
1304 static void
walk_one_node(struct rn_stack * sp,void * arg,int (* node_callback)(rsrc_node_t *,void *))1305 walk_one_node(struct rn_stack *sp, void *arg,
1306 int (*node_callback)(rsrc_node_t *, void *))
1307 {
1308 int prunesib;
1309 rsrc_node_t *child, *sibling;
1310 rsrc_node_t *node = TOP_NODE(sp);
1311
1312 rcm_log_message(RCM_TRACE4, "walk_one_node(%s)\n", node->name);
1313
1314 switch (node_callback(node, arg)) {
1315 case RN_WALK_TERMINATE:
1316 POP_STACK(sp);
1317 while (!EMPTY_STACK(sp)) {
1318 node = TOP_NODE(sp);
1319 POP_STACK(sp);
1320 }
1321 return;
1322
1323 case RN_WALK_PRUNESIB:
1324 PRUNE_SIB(sp) = 1;
1325 break;
1326
1327 case RN_WALK_PRUNECHILD:
1328 PRUNE_CHILD(sp) = 1;
1329 break;
1330
1331 case RN_WALK_CONTINUE:
1332 default:
1333 break;
1334 }
1335
1336 /*
1337 * Push child on the stack
1338 */
1339 if (!PRUNE_CHILD(sp) && (child = rn_get_child(node)) != NULL) {
1340 PUSH_STACK(sp, child);
1341 return;
1342 }
1343
1344 /*
1345 * Pop the stack till a node's sibling can be pushed
1346 */
1347 prunesib = PRUNE_SIB(sp);
1348 POP_STACK(sp);
1349 while (!EMPTY_STACK(sp) &&
1350 (prunesib || (sibling = rn_get_sibling(node)) == NULL)) {
1351 node = TOP_NODE(sp);
1352 prunesib = PRUNE_SIB(sp);
1353 POP_STACK(sp);
1354 }
1355
1356 if (EMPTY_STACK(sp)) {
1357 return;
1358 }
1359
1360 /*
1361 * push sibling onto the stack
1362 */
1363 PUSH_STACK(sp, sibling);
1364 }
1365
1366 /*
1367 * walk tree rooted at root in child-first order
1368 */
1369 static void
rsrc_walk(rsrc_node_t * root,void * arg,int (* node_callback)(rsrc_node_t *,void *))1370 rsrc_walk(rsrc_node_t *root, void *arg,
1371 int (*node_callback)(rsrc_node_t *, void *))
1372 {
1373 struct rn_stack stack;
1374
1375 rcm_log_message(RCM_TRACE3, "rsrc_walk(%s)\n", root->name);
1376
1377 /*
1378 * Push root on stack and walk in child-first order
1379 */
1380 stack.depth = 0;
1381 PUSH_STACK(&stack, root);
1382 PRUNE_SIB(&stack) = 1;
1383
1384 while (!EMPTY_STACK(&stack)) {
1385 walk_one_node(&stack, arg, node_callback);
1386 }
1387 }
1388
1389 /*
1390 * Callback for a command action on a node
1391 */
1392 static int
node_action(rsrc_node_t * node,void * arg)1393 node_action(rsrc_node_t *node, void *arg)
1394 {
1395 tree_walk_arg_t *targ = (tree_walk_arg_t *)arg;
1396 uint_t flag = targ->flag;
1397
1398 rcm_log_message(RCM_TRACE4, "node_action(%s)\n", node->name);
1399
1400 /*
1401 * If flag indicates operation on a filesystem, we don't callback on
1402 * the filesystem root to avoid infinite recursion on filesystem module.
1403 *
1404 * N.B. Such request should only come from filesystem RCM module.
1405 */
1406 if (flag & RCM_FILESYS) {
1407 assert(node->type == RSRC_TYPE_FILESYS);
1408 targ->flag &= ~RCM_FILESYS;
1409 return (RN_WALK_CONTINUE);
1410 }
1411
1412 /*
1413 * Execute state change callback
1414 */
1415 (void) rsrc_client_action_list(node->users, targ->cmd, arg);
1416
1417 /*
1418 * Upon hitting a filesys root, prune children.
1419 * The filesys module should have taken care of
1420 * children by now.
1421 */
1422 if (node->type == RSRC_TYPE_FILESYS)
1423 return (RN_WALK_PRUNECHILD);
1424
1425 return (RN_WALK_CONTINUE);
1426 }
1427
1428 /*
1429 * Execute a command on a subtree under root.
1430 */
1431 int
rsrc_tree_action(rsrc_node_t * root,int cmd,tree_walk_arg_t * arg)1432 rsrc_tree_action(rsrc_node_t *root, int cmd, tree_walk_arg_t *arg)
1433 {
1434 rcm_log_message(RCM_TRACE2, "tree_action(%s, %d)\n", root->name, cmd);
1435
1436 arg->cmd = cmd;
1437
1438 /*
1439 * If RCM_RETIRE_REQUEST is set, just walk one node and preset
1440 * retcode to NO_CONSTRAINT
1441 */
1442 if (arg->flag & RCM_RETIRE_REQUEST) {
1443 rcm_log_message(RCM_TRACE1, "tree_action: RETIRE_REQ: walking "
1444 "only root node: %s\n", root->name);
1445 arg->retcode = RCM_NO_CONSTRAINT;
1446 (void) node_action(root, arg);
1447 } else {
1448 arg->retcode = RCM_SUCCESS;
1449 rsrc_walk(root, (void *)arg, node_action);
1450 }
1451
1452 return (arg->retcode);
1453 }
1454
1455 /*
1456 * Get info on current regsitrations
1457 */
1458 int
rsrc_usage_info(char ** rsrcnames,uint_t flag,int seq_num,rcm_info_t ** info)1459 rsrc_usage_info(char **rsrcnames, uint_t flag, int seq_num, rcm_info_t **info)
1460 {
1461 rsrc_node_t *node;
1462 rcm_info_t *result = NULL;
1463 tree_walk_arg_t arg;
1464 int initial_req;
1465 int rv;
1466 int i;
1467
1468 arg.flag = flag;
1469 arg.info = &result;
1470 arg.seq_num = seq_num;
1471
1472 for (i = 0; rsrcnames[i] != NULL; i++) {
1473
1474 rcm_log_message(RCM_TRACE2, "rsrc_usage_info(%s, 0x%x, %d)\n",
1475 rsrcnames[i], flag, seq_num);
1476
1477 if (flag & RCM_INCLUDE_DEPENDENT) {
1478 initial_req = ((seq_num & SEQ_NUM_MASK) == 0);
1479
1480 /*
1481 * if redundant request, skip the operation
1482 */
1483 if (info_req_add(rsrcnames[i], flag, seq_num) != 0) {
1484 continue;
1485 }
1486 }
1487
1488 rv = rsrc_node_find(rsrcnames[i], 0, &node);
1489 if ((rv != RCM_SUCCESS) || (node == NULL)) {
1490 if ((flag & RCM_INCLUDE_DEPENDENT) && initial_req)
1491 info_req_remove(seq_num);
1492 continue;
1493 }
1494
1495 /*
1496 * Based on RCM_INCLUDE_SUBTREE flag, query either the subtree
1497 * or just the node.
1498 */
1499 if (flag & RCM_INCLUDE_SUBTREE) {
1500 (void) rsrc_tree_action(node, CMD_GETINFO, &arg);
1501 } else {
1502 arg.cmd = CMD_GETINFO;
1503 (void) node_action(node, (void *)&arg);
1504 }
1505
1506 if ((flag & RCM_INCLUDE_DEPENDENT) && initial_req)
1507 info_req_remove(seq_num);
1508 }
1509
1510 out:
1511 (void) rcm_append_info(info, result);
1512 return (rv);
1513 }
1514
1515 /*
1516 * Get the list of currently loaded module
1517 */
1518 rcm_info_t *
rsrc_mod_info()1519 rsrc_mod_info()
1520 {
1521 module_t *mod;
1522 rcm_info_t *info = NULL;
1523
1524 (void) mutex_lock(&mod_lock);
1525 mod = module_head;
1526 while (mod) {
1527 char *modinfo = s_strdup(module_info(mod));
1528 add_busy_rsrc_to_list("dummy", 0, 0, 0, mod->name,
1529 modinfo, NULL, NULL, &info);
1530 mod = mod->next;
1531 }
1532 (void) mutex_unlock(&mod_lock);
1533
1534 return (info);
1535 }
1536
1537 /*
1538 * Initialize resource map - load all modules
1539 */
1540 void
rcmd_db_init()1541 rcmd_db_init()
1542 {
1543 char *tmp;
1544 DIR *mod_dir;
1545 struct dirent *entp;
1546 int i;
1547 char *dir_name;
1548 int rcm_script;
1549
1550 rcm_log_message(RCM_DEBUG, "rcmd_db_init(): initialize database\n");
1551
1552 if (script_main_init() == -1)
1553 rcmd_exit(errno);
1554
1555 rsrc_root = rn_alloc("/", RSRC_TYPE_NORMAL);
1556
1557 for (i = 0; (dir_name = rcm_dir(i, &rcm_script)) != NULL; i++) {
1558
1559 if ((mod_dir = opendir(dir_name)) == NULL) {
1560 continue; /* try next directory */
1561 }
1562
1563 rcm_log_message(RCM_TRACE2, "search directory %s\n", dir_name);
1564
1565 while ((entp = readdir(mod_dir)) != NULL) {
1566 module_t *module;
1567
1568 if (strcmp(entp->d_name, ".") == 0 ||
1569 strcmp(entp->d_name, "..") == 0)
1570 continue;
1571
1572 if (rcm_script == 0) {
1573 /* rcm module */
1574 if (((tmp = strstr(entp->d_name,
1575 RCM_MODULE_SUFFIX)) == NULL) ||
1576 (tmp[strlen(RCM_MODULE_SUFFIX)] != '\0')) {
1577 continue;
1578 }
1579 }
1580
1581 module = cli_module_hold(entp->d_name);
1582 if (module == NULL) {
1583 if (rcm_script == 0)
1584 rcm_log_message(RCM_ERROR,
1585 gettext("%s: failed to load\n"),
1586 entp->d_name);
1587 continue;
1588 }
1589
1590 if (module->ref_count == MOD_REFCNT_INIT) {
1591 /*
1592 * ask module to register for resource 1st time
1593 */
1594 module_attach(module);
1595 }
1596 cli_module_rele(module);
1597 }
1598 (void) closedir(mod_dir);
1599 }
1600
1601 rcmd_db_print();
1602 }
1603
1604 /*
1605 * sync resource map - ask all modules to register again
1606 */
1607 void
rcmd_db_sync()1608 rcmd_db_sync()
1609 {
1610 static time_t sync_time = (time_t)-1;
1611 const time_t interval = 5; /* resync at most every 5 sec */
1612
1613 module_t *mod;
1614 time_t curr = time(NULL);
1615
1616 if ((sync_time != (time_t)-1) && (curr - sync_time < interval))
1617 return;
1618
1619 sync_time = curr;
1620 (void) mutex_lock(&mod_lock);
1621 mod = module_head;
1622 while (mod) {
1623 /*
1624 * Hold module by incrementing ref count and release
1625 * mod_lock to avoid deadlock, since rcmop_register()
1626 * may callback into the daemon and request mod_lock.
1627 */
1628 mod->ref_count++;
1629 (void) mutex_unlock(&mod_lock);
1630
1631 mod->modops->rcmop_register(mod->rcmhandle);
1632
1633 (void) mutex_lock(&mod_lock);
1634 mod->ref_count--;
1635 mod = mod->next;
1636 }
1637 (void) mutex_unlock(&mod_lock);
1638 }
1639
1640 /*
1641 * Determine if a process is alive
1642 */
1643 int
proc_exist(pid_t pid)1644 proc_exist(pid_t pid)
1645 {
1646 char path[64];
1647 const char *procfs = "/proc";
1648 struct stat sb;
1649
1650 if (pid == (pid_t)0) {
1651 return (1);
1652 }
1653
1654 (void) snprintf(path, sizeof (path), "%s/%ld", procfs, pid);
1655 return (stat(path, &sb) == 0);
1656 }
1657
1658 /*
1659 * Cleaup client list
1660 *
1661 * N.B. This routine runs in a single-threaded environment only. It is only
1662 * called by the cleanup thread, which never runs in parallel with other
1663 * threads.
1664 */
1665 static void
clean_client_list(client_t ** listp)1666 clean_client_list(client_t **listp)
1667 {
1668 client_t *client = *listp;
1669
1670 /*
1671 * Cleanup notification clients for which pid no longer exists
1672 */
1673 while (client) {
1674 if ((client->state != RCM_STATE_REMOVE) &&
1675 proc_exist(client->pid)) {
1676 listp = &client->next;
1677 client = *listp;
1678 continue;
1679 }
1680
1681 /*
1682 * Destroy this client_t. rsrc_client_remove updates
1683 * listp to point to the next client.
1684 */
1685 rsrc_client_remove(client, listp);
1686 client = *listp;
1687 }
1688 }
1689
1690 /*ARGSUSED*/
1691 static int
clean_node(rsrc_node_t * node,void * arg)1692 clean_node(rsrc_node_t *node, void *arg)
1693 {
1694 rcm_log_message(RCM_TRACE4, "clean_node(%s)\n", node->name);
1695
1696 clean_client_list(&node->users);
1697
1698 return (RN_WALK_CONTINUE);
1699 }
1700
1701 static void
clean_rsrc_tree()1702 clean_rsrc_tree()
1703 {
1704 rcm_log_message(RCM_TRACE4,
1705 "clean_rsrc_tree(): delete stale dr clients\n");
1706
1707 rsrc_walk(rsrc_root, NULL, clean_node);
1708 }
1709
1710 static void
db_clean()1711 db_clean()
1712 {
1713 extern barrier_t barrier;
1714 extern void clean_dr_list();
1715
1716 for (;;) {
1717 (void) mutex_lock(&rcm_req_lock);
1718 start_polling_thread();
1719 (void) mutex_unlock(&rcm_req_lock);
1720
1721 (void) mutex_lock(&barrier.lock);
1722 while (need_cleanup == 0)
1723 (void) cond_wait(&barrier.cv, &barrier.lock);
1724 (void) mutex_unlock(&barrier.lock);
1725
1726 /*
1727 * Make sure all other threads are either blocked or exited.
1728 */
1729 rcmd_set_state(RCMD_CLEANUP);
1730
1731 need_cleanup = 0;
1732
1733 /*
1734 * clean dr_req_list
1735 */
1736 clean_dr_list();
1737
1738 /*
1739 * clean resource tree
1740 */
1741 clean_rsrc_tree();
1742
1743 rcmd_set_state(RCMD_NORMAL);
1744 }
1745 }
1746
1747 void
rcmd_db_clean()1748 rcmd_db_clean()
1749 {
1750 rcm_log_message(RCM_DEBUG,
1751 "rcm_db_clean(): launch thread to clean database\n");
1752
1753 if (thr_create(NULL, NULL, (void *(*)(void *))db_clean,
1754 NULL, THR_DETACHED, NULL) != 0) {
1755 rcm_log_message(RCM_WARNING,
1756 gettext("failed to create cleanup thread %s\n"),
1757 strerror(errno));
1758 }
1759 }
1760
1761 /*ARGSUSED*/
1762 static int
print_node(rsrc_node_t * node,void * arg)1763 print_node(rsrc_node_t *node, void *arg)
1764 {
1765 client_t *user;
1766
1767 rcm_log_message(RCM_DEBUG, "rscname: %s, state = 0x%x\n", node->name);
1768 rcm_log_message(RCM_DEBUG, " users:\n");
1769
1770 if ((user = node->users) == NULL) {
1771 rcm_log_message(RCM_DEBUG, " none\n");
1772 return (RN_WALK_CONTINUE);
1773 }
1774
1775 while (user) {
1776 rcm_log_message(RCM_DEBUG, " %s, %d, %s\n",
1777 user->module->name, user->pid, user->alias);
1778 user = user->next;
1779 }
1780 return (RN_WALK_CONTINUE);
1781 }
1782
1783 static void
rcmd_db_print()1784 rcmd_db_print()
1785 {
1786 module_t *mod;
1787
1788 rcm_log_message(RCM_DEBUG, "modules:\n");
1789 (void) mutex_lock(&mod_lock);
1790 mod = module_head;
1791 while (mod) {
1792 rcm_log_message(RCM_DEBUG, " %s\n", mod->name);
1793 mod = mod->next;
1794 }
1795 (void) mutex_unlock(&mod_lock);
1796
1797 rcm_log_message(RCM_DEBUG, "\nresource tree:\n");
1798
1799 rsrc_walk(rsrc_root, NULL, print_node);
1800
1801 rcm_log_message(RCM_DEBUG, "\n");
1802 }
1803
1804 /*
1805 * Allocate handle from calling into each RCM module
1806 */
1807 static rcm_handle_t *
rcm_handle_alloc(module_t * module)1808 rcm_handle_alloc(module_t *module)
1809 {
1810 rcm_handle_t *hdl;
1811
1812 hdl = s_malloc(sizeof (rcm_handle_t));
1813
1814 hdl->modname = module->name;
1815 hdl->pid = 0;
1816 hdl->lrcm_ops = &rcm_ops; /* for callback into daemon directly */
1817 hdl->module = module;
1818
1819 return (hdl);
1820 }
1821
1822 /*
1823 * Free rcm_handle_t
1824 */
1825 static void
rcm_handle_free(rcm_handle_t * handle)1826 rcm_handle_free(rcm_handle_t *handle)
1827 {
1828 free(handle);
1829 }
1830
1831 /*
1832 * help function that exit on memory outage
1833 */
1834 void *
s_malloc(size_t size)1835 s_malloc(size_t size)
1836 {
1837 void *buf = malloc(size);
1838
1839 if (buf == NULL) {
1840 rcmd_exit(ENOMEM);
1841 }
1842 return (buf);
1843 }
1844
1845 void *
s_calloc(int n,size_t size)1846 s_calloc(int n, size_t size)
1847 {
1848 void *buf = calloc(n, size);
1849
1850 if (buf == NULL) {
1851 rcmd_exit(ENOMEM);
1852 }
1853 return (buf);
1854 }
1855
1856 void *
s_realloc(void * ptr,size_t size)1857 s_realloc(void *ptr, size_t size)
1858 {
1859 void *new = realloc(ptr, size);
1860
1861 if (new == NULL) {
1862 rcmd_exit(ENOMEM);
1863 }
1864 return (new);
1865 }
1866
1867 char *
s_strdup(const char * str)1868 s_strdup(const char *str)
1869 {
1870 char *buf = strdup(str);
1871
1872 if (buf == NULL) {
1873 rcmd_exit(ENOMEM);
1874 }
1875 return (buf);
1876 }
1877
1878 /*
1879 * Convert a version 1 ops vector to current ops vector
1880 * Fields missing in version 1 are set to NULL.
1881 */
1882 static struct rcm_mod_ops *
modops_from_v1(void * ops_v1)1883 modops_from_v1(void *ops_v1)
1884 {
1885 struct rcm_mod_ops *ops;
1886
1887 ops = s_calloc(1, sizeof (struct rcm_mod_ops));
1888 bcopy(ops_v1, ops, sizeof (struct rcm_mod_ops_v1));
1889 return (ops);
1890 }
1891
1892 /* call a module's getinfo routine; detects v1 ops and adjusts the call */
1893 static int
call_getinfo(struct rcm_mod_ops * ops,rcm_handle_t * hdl,char * alias,id_t pid,uint_t flag,char ** info,char ** error,nvlist_t * client_props,rcm_info_t ** infop)1894 call_getinfo(struct rcm_mod_ops *ops, rcm_handle_t *hdl, char *alias, id_t pid,
1895 uint_t flag, char **info, char **error, nvlist_t *client_props,
1896 rcm_info_t **infop)
1897 {
1898 int rval;
1899 struct rcm_mod_ops_v1 *v1_ops;
1900
1901 if (ops->version == RCM_MOD_OPS_V1) {
1902 v1_ops = (struct rcm_mod_ops_v1 *)ops;
1903 rval = v1_ops->rcmop_get_info(hdl, alias, pid, flag, info,
1904 infop);
1905 if (rval != RCM_SUCCESS && *info != NULL)
1906 *error = strdup(*info);
1907 return (rval);
1908 } else {
1909 return (ops->rcmop_get_info(hdl, alias, pid, flag, info, error,
1910 client_props, infop));
1911 }
1912 }
1913
1914 void
rcm_init_queue(rcm_queue_t * head)1915 rcm_init_queue(rcm_queue_t *head)
1916 {
1917 head->next = head->prev = head;
1918 }
1919
1920 void
rcm_enqueue_head(rcm_queue_t * head,rcm_queue_t * element)1921 rcm_enqueue_head(rcm_queue_t *head, rcm_queue_t *element)
1922 {
1923 rcm_enqueue(head, element);
1924 }
1925
1926 void
rcm_enqueue_tail(rcm_queue_t * head,rcm_queue_t * element)1927 rcm_enqueue_tail(rcm_queue_t *head, rcm_queue_t *element)
1928 {
1929 rcm_enqueue(head->prev, element);
1930 }
1931
1932 void
rcm_enqueue(rcm_queue_t * list_element,rcm_queue_t * element)1933 rcm_enqueue(rcm_queue_t *list_element, rcm_queue_t *element)
1934 {
1935 element->next = list_element->next;
1936 element->prev = list_element;
1937 element->next->prev = element;
1938 list_element->next = element;
1939 }
1940
1941 rcm_queue_t *
rcm_dequeue_head(rcm_queue_t * head)1942 rcm_dequeue_head(rcm_queue_t *head)
1943 {
1944 rcm_queue_t *element = head->next;
1945 rcm_dequeue(element);
1946 return (element);
1947 }
1948
1949 rcm_queue_t *
rcm_dequeue_tail(rcm_queue_t * head)1950 rcm_dequeue_tail(rcm_queue_t *head)
1951 {
1952 rcm_queue_t *element = head->prev;
1953 rcm_dequeue(element);
1954 return (element);
1955 }
1956
1957 void
rcm_dequeue(rcm_queue_t * element)1958 rcm_dequeue(rcm_queue_t *element)
1959 {
1960 element->prev->next = element->next;
1961 element->next->prev = element->prev;
1962 element->next = element->prev = NULL;
1963 }
1964