1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
22 * Use is subject to license terms.
23 */
24
25 #include "rcm_impl.h"
26 #include "rcm_module.h"
27
28 /*
29 * Short-circuits unloading of modules with no registrations, so that
30 * they are present during the next db_sync cycle.
31 */
32 #define MOD_REFCNT_INIT 2
33
34 int need_cleanup; /* flag indicating if clean up is needed */
35
36 static mutex_t mod_lock; /* protects module list */
37 static module_t *module_head; /* linked list of modules */
38 static rsrc_node_t *rsrc_root; /* root of all resources */
39
40 /*
41 * Misc help routines
42 */
43 static void rcmd_db_print();
44 static void rcm_handle_free(rcm_handle_t *);
45 static rcm_handle_t *rcm_handle_alloc(module_t *);
46 static void rsrc_clients_free(client_t *);
47 static struct rcm_mod_ops *modops_from_v1(void *);
48 static int call_getinfo(struct rcm_mod_ops *, rcm_handle_t *, char *, id_t,
49 uint_t, char **, char **, nvlist_t *, rcm_info_t **);
50 static int node_action(rsrc_node_t *, void *);
51
52 extern void start_polling_thread();
53
54 /*
55 * translate /dev name to a /devices path
56 *
57 * N.B. This routine can be enhanced to understand network names
58 * and friendly names in the future.
59 */
60 char *
resolve_name(char * alias)61 resolve_name(char *alias)
62 {
63 char *tmp;
64 const char *dev = "/dev/";
65
66 if (strlen(alias) == 0)
67 return (NULL);
68
69 if (strncmp(alias, dev, strlen(dev)) == 0) {
70 /*
71 * Treat /dev/... as a symbolic link
72 */
73 tmp = s_malloc(PATH_MAX);
74 if (realpath(alias, tmp) != NULL) {
75 return (tmp);
76 } else {
77 free(tmp);
78 }
79 /* Fail to resolve /dev/ name, use the name as is */
80 }
81
82 return (s_strdup(alias));
83 }
84
85 /*
86 * Figure out resource type based on "resolved" name
87 *
88 * N.B. This routine does not figure out file system mount points.
89 * This is determined at runtime when filesys module register
90 * with RCM_FILESYS flag.
91 */
92 int
rsrc_get_type(const char * resolved_name)93 rsrc_get_type(const char *resolved_name)
94 {
95 if (resolved_name[0] != '/')
96 return (RSRC_TYPE_ABSTRACT);
97
98 if (strncmp("/devices/", resolved_name, 9) == 0)
99 return (RSRC_TYPE_DEVICE);
100
101 return (RSRC_TYPE_NORMAL);
102 }
103
104 /*
105 * Module operations:
106 * module_load, module_unload, module_info, module_attach, module_detach,
107 * cli_module_hold, cli_module_rele
108 */
109
110 #ifdef ENABLE_MODULE_DETACH
111 /*
112 * call unregister() entry point to allow module to unregister for
113 * resources without getting confused.
114 */
115 static void
module_detach(module_t * module)116 module_detach(module_t *module)
117 {
118 struct rcm_mod_ops *ops = module->modops;
119
120 rcm_log_message(RCM_TRACE2, "module_detach(name=%s)\n", module->name);
121
122 ops->rcmop_unregister(module->rcmhandle);
123 }
124 #endif /* ENABLE_MODULE_DETACH */
125
126 /*
127 * call register() entry point to allow module to register for resources
128 */
129 static void
module_attach(module_t * module)130 module_attach(module_t *module)
131 {
132 struct rcm_mod_ops *ops = module->modops;
133
134 rcm_log_message(RCM_TRACE2, "module_attach(name=%s)\n", module->name);
135
136 if (ops->rcmop_register(module->rcmhandle) != RCM_SUCCESS) {
137 rcm_log_message(RCM_WARNING,
138 gettext("module %s register() failed\n"), module->name);
139 }
140 }
141
142 struct rcm_mod_ops *
module_init(module_t * module)143 module_init(module_t *module)
144 {
145 if (module->dlhandle)
146 /* rcm module */
147 return (module->init());
148 else
149 /* rcm script */
150 return (script_init(module));
151 }
152
153 /*
154 * call rmc_mod_info() entry of module
155 */
156 static const char *
module_info(module_t * module)157 module_info(module_t *module)
158 {
159 if (module->dlhandle)
160 /* rcm module */
161 return (module->info());
162 else
163 /* rcm script */
164 return (script_info(module));
165 }
166
167 int
module_fini(module_t * module)168 module_fini(module_t *module)
169 {
170 if (module->dlhandle)
171 /* rcm module */
172 return (module->fini());
173 else
174 /* rcm script */
175 return (script_fini(module));
176 }
177
178 /*
179 * call rmc_mod_fini() entry of module, dlclose module, and free memory
180 */
181 static void
module_unload(module_t * module)182 module_unload(module_t *module)
183 {
184 int version = module->modops->version;
185
186 rcm_log_message(RCM_DEBUG, "module_unload(name=%s)\n", module->name);
187
188 (void) module_fini(module);
189
190 rcm_handle_free(module->rcmhandle);
191 free(module->name);
192
193 switch (version) {
194 case RCM_MOD_OPS_V1:
195 /*
196 * Free memory associated with converted ops vector
197 */
198 free(module->modops);
199 break;
200
201 case RCM_MOD_OPS_VERSION:
202 default:
203 break;
204 }
205
206 if (module->dlhandle)
207 rcm_module_close(module->dlhandle);
208
209 free(module);
210 }
211
212 /*
213 * Locate the module, execute rcm_mod_init() and check ops vector version
214 */
215 static module_t *
module_load(char * modname)216 module_load(char *modname)
217 {
218 module_t *module;
219
220 rcm_log_message(RCM_DEBUG, "module_load(name=%s)\n", modname);
221
222 /*
223 * dlopen the module
224 */
225 module = s_calloc(1, sizeof (*module));
226 module->name = s_strdup(modname);
227 module->modops = NULL;
228 rcm_init_queue(&module->client_q);
229
230 if (rcm_is_script(modname) == 0) {
231 /* rcm module */
232 module->dlhandle = rcm_module_open(modname);
233
234 if (module->dlhandle == NULL) {
235 rcm_log_message(RCM_NOTICE,
236 gettext("cannot open module %s\n"), modname);
237 goto fail;
238 }
239
240 /*
241 * dlsym rcm_mod_init/fini/info() entry points
242 */
243 module->init = (struct rcm_mod_ops *(*)())dlsym(
244 module->dlhandle, "rcm_mod_init");
245 module->fini = (int (*)())dlsym(
246 module->dlhandle, "rcm_mod_fini");
247 module->info = (const char *(*)())dlsym(module->dlhandle,
248 "rcm_mod_info");
249 if (module->init == NULL || module->fini == NULL ||
250 module->info == NULL) {
251 rcm_log_message(RCM_ERROR,
252 gettext("missing entries in module %s\n"), modname);
253 goto fail;
254 }
255
256 } else {
257 /* rcm script */
258 module->dlhandle = NULL;
259 module->init = (struct rcm_mod_ops *(*)()) NULL;
260 module->fini = (int (*)()) NULL;
261 module->info = (const char *(*)()) NULL;
262 }
263
264 if ((module->modops = module_init(module)) == NULL) {
265 if (module->dlhandle)
266 rcm_log_message(RCM_ERROR,
267 gettext("cannot init module %s\n"), modname);
268 goto fail;
269 }
270
271 /*
272 * Check ops vector version
273 */
274 switch (module->modops->version) {
275 case RCM_MOD_OPS_V1:
276 module->modops = modops_from_v1((void *)module->modops);
277 break;
278
279 case RCM_MOD_OPS_VERSION:
280 break;
281
282 default:
283 rcm_log_message(RCM_ERROR,
284 gettext("module %s rejected: version %d not supported\n"),
285 modname, module->modops->version);
286 (void) module_fini(module);
287 goto fail;
288 }
289
290 /*
291 * Make sure all fields are set
292 */
293 if ((module->modops->rcmop_register == NULL) ||
294 (module->modops->rcmop_unregister == NULL) ||
295 (module->modops->rcmop_get_info == NULL) ||
296 (module->modops->rcmop_request_suspend == NULL) ||
297 (module->modops->rcmop_notify_resume == NULL) ||
298 (module->modops->rcmop_request_offline == NULL) ||
299 (module->modops->rcmop_notify_online == NULL) ||
300 (module->modops->rcmop_notify_remove == NULL)) {
301 rcm_log_message(RCM_ERROR,
302 gettext("module %s rejected: has NULL ops fields\n"),
303 modname);
304 (void) module_fini(module);
305 goto fail;
306 }
307
308 module->rcmhandle = rcm_handle_alloc(module);
309 return (module);
310
311 fail:
312 if (module->modops && module->modops->version == RCM_MOD_OPS_V1)
313 free(module->modops);
314
315 if (module->dlhandle)
316 rcm_module_close(module->dlhandle);
317
318 free(module->name);
319 free(module);
320 return (NULL);
321 }
322
323 /*
324 * add one to module hold count. load the module if not loaded
325 */
326 static module_t *
cli_module_hold(char * modname)327 cli_module_hold(char *modname)
328 {
329 module_t *module;
330
331 rcm_log_message(RCM_TRACE3, "cli_module_hold(%s)\n", modname);
332
333 (void) mutex_lock(&mod_lock);
334 module = module_head;
335 while (module) {
336 if (strcmp(module->name, modname) == 0) {
337 break;
338 }
339 module = module->next;
340 }
341
342 if (module) {
343 module->ref_count++;
344 (void) mutex_unlock(&mod_lock);
345 return (module);
346 }
347
348 /*
349 * Module not found, attempt to load it
350 */
351 if ((module = module_load(modname)) == NULL) {
352 (void) mutex_unlock(&mod_lock);
353 return (NULL);
354 }
355
356 /*
357 * Hold module and link module into module list
358 */
359 module->ref_count = MOD_REFCNT_INIT;
360 module->next = module_head;
361 module_head = module;
362
363 (void) mutex_unlock(&mod_lock);
364
365 return (module);
366 }
367
368 /*
369 * decrement module hold count. Unload it if no reference
370 */
371 static void
cli_module_rele(module_t * module)372 cli_module_rele(module_t *module)
373 {
374 module_t *curr = module_head, *prev = NULL;
375
376 rcm_log_message(RCM_TRACE3, "cli_module_rele(name=%s)\n", module->name);
377
378 (void) mutex_lock(&mod_lock);
379 if (--(module->ref_count) != 0) {
380 (void) mutex_unlock(&mod_lock);
381 return;
382 }
383
384 rcm_log_message(RCM_TRACE2, "unloading module %s\n", module->name);
385
386 /*
387 * Unlink the module from list
388 */
389 while (curr && (curr != module)) {
390 prev = curr;
391 curr = curr->next;
392 }
393 if (curr == NULL) {
394 rcm_log_message(RCM_ERROR,
395 gettext("Unexpected error: module %s not found.\n"),
396 module->name);
397 } else if (prev == NULL) {
398 module_head = curr->next;
399 } else {
400 prev->next = curr->next;
401 }
402 (void) mutex_unlock(&mod_lock);
403
404 module_unload(module);
405 }
406
407 /*
408 * Gather usage info be passed back to requester. Discard info if user does
409 * not care (list == NULL).
410 */
411 void
add_busy_rsrc_to_list(char * alias,pid_t pid,int state,int seq_num,char * modname,const char * infostr,const char * errstr,nvlist_t * client_props,rcm_info_t ** list)412 add_busy_rsrc_to_list(char *alias, pid_t pid, int state, int seq_num,
413 char *modname, const char *infostr, const char *errstr,
414 nvlist_t *client_props, rcm_info_t **list)
415 {
416 rcm_info_t *info;
417 rcm_info_t *tmp;
418 char *buf = NULL;
419 size_t buflen = 0;
420
421 if (list == NULL) {
422 return;
423 }
424
425 info = s_calloc(1, sizeof (*info));
426 if (errno = nvlist_alloc(&(info->info), NV_UNIQUE_NAME, 0)) {
427 rcm_log_message(RCM_ERROR, "failed (nvlist_alloc=%s).\n",
428 strerror(errno));
429 rcmd_exit(errno);
430 }
431
432 /*LINTED*/
433 if ((errno = nvlist_add_string(info->info, RCM_RSRCNAME, alias)) ||
434 (errno = nvlist_add_int32(info->info, RCM_SEQ_NUM, seq_num)) ||
435 (errno = nvlist_add_int64(info->info, RCM_CLIENT_ID, pid)) ||
436 (errno = nvlist_add_int32(info->info, RCM_RSRCSTATE, state))) {
437 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
438 strerror(errno));
439 rcmd_exit(errno);
440 }
441
442 /*
443 * Daemon calls to add_busy_rsrc_to_list may pass in
444 * error/info. Add these through librcm interfaces.
445 */
446 if (errstr) {
447 rcm_log_message(RCM_TRACE3, "adding error string: %s\n",
448 errstr);
449 if (errno = nvlist_add_string(info->info, RCM_CLIENT_ERROR,
450 (char *)errstr)) {
451 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
452 strerror(errno));
453 rcmd_exit(errno);
454 }
455 }
456
457 if (infostr) {
458 if (errno = nvlist_add_string(info->info, RCM_CLIENT_INFO,
459 (char *)infostr)) {
460 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
461 strerror(errno));
462 rcmd_exit(errno);
463 }
464 }
465
466 if (modname) {
467 if (errno = nvlist_add_string(info->info, RCM_CLIENT_MODNAME,
468 modname)) {
469 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
470 strerror(errno));
471 rcmd_exit(errno);
472 }
473 }
474
475 if (client_props) {
476 if (errno = nvlist_pack(client_props, &buf, &buflen,
477 NV_ENCODE_NATIVE, 0)) {
478 rcm_log_message(RCM_ERROR, "failed (nvlist_pack=%s).\n",
479 strerror(errno));
480 rcmd_exit(errno);
481 }
482 if (errno = nvlist_add_byte_array(info->info,
483 RCM_CLIENT_PROPERTIES, (uchar_t *)buf, buflen)) {
484 rcm_log_message(RCM_ERROR, "failed (nvlist_add=%s).\n",
485 strerror(errno));
486 rcmd_exit(errno);
487 }
488 (void) free(buf);
489 }
490
491
492 /* link info at end of list */
493 if (*list) {
494 tmp = *list;
495 while (tmp->next)
496 tmp = tmp->next;
497 tmp->next = info;
498 } else {
499 *list = info;
500 }
501 }
502
503 /*
504 * Resource client realted operations:
505 * rsrc_client_alloc, rsrc_client_find, rsrc_client_add,
506 * rsrc_client_remove, rsrc_client_action, rsrc_client_action_list
507 */
508
509 /* Allocate rsrc_client_t structure. Load module if necessary. */
510 /*ARGSUSED*/
511 static client_t *
rsrc_client_alloc(char * alias,char * modname,pid_t pid,uint_t flag)512 rsrc_client_alloc(char *alias, char *modname, pid_t pid, uint_t flag)
513 {
514 client_t *client;
515 module_t *mod;
516
517 assert((alias != NULL) && (modname != NULL));
518
519 rcm_log_message(RCM_TRACE4, "rsrc_client_alloc(%s, %s, %ld)\n",
520 alias, modname, pid);
521
522 if ((mod = cli_module_hold(modname)) == NULL) {
523 return (NULL);
524 }
525
526 client = s_calloc(1, sizeof (client_t));
527 client->module = mod;
528 client->pid = pid;
529 client->alias = s_strdup(alias);
530 client->prv_flags = 0;
531 client->state = RCM_STATE_ONLINE;
532 client->flag = flag;
533
534 /* This queue is protected by rcm_req_lock */
535 rcm_enqueue_tail(&mod->client_q, &client->queue);
536
537 return (client);
538 }
539
540 /* Find client in list matching modname and pid */
541 client_t *
rsrc_client_find(char * modname,pid_t pid,client_t ** list)542 rsrc_client_find(char *modname, pid_t pid, client_t **list)
543 {
544 client_t *client = *list;
545
546 rcm_log_message(RCM_TRACE4, "rsrc_client_find(%s, %ld, %p)\n",
547 modname, pid, (void *)list);
548
549 while (client) {
550 if ((client->pid == pid) &&
551 strcmp(modname, client->module->name) == 0) {
552 break;
553 }
554 client = client->next;
555 }
556 return (client);
557 }
558
559 /* Add a client to client list */
560 static void
rsrc_client_add(client_t * client,client_t ** list)561 rsrc_client_add(client_t *client, client_t **list)
562 {
563 rcm_log_message(RCM_TRACE4, "rsrc_client_add: %s, %s, %ld\n",
564 client->alias, client->module->name, client->pid);
565
566 client->next = *list;
567 *list = client;
568 }
569
570 /* Remove client from list and destroy it */
571 static void
rsrc_client_remove(client_t * client,client_t ** list)572 rsrc_client_remove(client_t *client, client_t **list)
573 {
574 client_t *tmp, *prev = NULL;
575
576 rcm_log_message(RCM_TRACE4, "rsrc_client_remove: %s, %s, %ld\n",
577 client->alias, client->module->name, client->pid);
578
579 tmp = *list;
580 while (tmp) {
581 if (client != tmp) {
582 prev = tmp;
583 tmp = tmp->next;
584 continue;
585 }
586 if (prev) {
587 prev->next = tmp->next;
588 } else {
589 *list = tmp->next;
590 }
591 tmp->next = NULL;
592 rsrc_clients_free(tmp);
593 return;
594 }
595 }
596
597 /* Free a list of clients. Called from cleanup thread only */
598 static void
rsrc_clients_free(client_t * list)599 rsrc_clients_free(client_t *list)
600 {
601 client_t *client = list;
602
603 while (client) {
604
605 /*
606 * Note that the rcm daemon is single threaded while
607 * executing this routine. So there is no need to acquire
608 * rcm_req_lock here while dequeuing.
609 */
610 rcm_dequeue(&client->queue);
611
612 if (client->module) {
613 cli_module_rele(client->module);
614 }
615 list = client->next;
616 if (client->alias) {
617 free(client->alias);
618 }
619 free(client);
620 client = list;
621 }
622 }
623
624 /*
625 * Invoke a callback into a single client
626 * This is the core of rcm_mod_ops interface
627 */
628 static int
rsrc_client_action(client_t * client,int cmd,void * arg)629 rsrc_client_action(client_t *client, int cmd, void *arg)
630 {
631 int rval = RCM_SUCCESS;
632 char *dummy_error = NULL;
633 char *error = NULL;
634 char *info = NULL;
635 rcm_handle_t *hdl;
636 nvlist_t *client_props = NULL;
637 rcm_info_t *depend_info = NULL;
638 struct rcm_mod_ops *ops = client->module->modops;
639 tree_walk_arg_t *targ = (tree_walk_arg_t *)arg;
640
641 rcm_log_message(RCM_TRACE4,
642 "rsrc_client_action: %s, %s, cmd=%d, flag=0x%x\n", client->alias,
643 client->module->name, cmd, targ->flag);
644
645 /*
646 * Create a per-operation handle, increment seq_num by 1 so we will
647 * know if a module uses this handle to callback into rcm_daemon.
648 */
649 hdl = rcm_handle_alloc(client->module);
650 hdl->seq_num = targ->seq_num + 1;
651
652 /*
653 * Filter out operations for which the client didn't register.
654 */
655 switch (cmd) {
656 case CMD_SUSPEND:
657 case CMD_RESUME:
658 case CMD_OFFLINE:
659 case CMD_ONLINE:
660 case CMD_REMOVE:
661 if ((client->flag & RCM_REGISTER_DR) == 0) {
662 rcm_handle_free(hdl);
663 return (RCM_SUCCESS);
664 }
665 break;
666 case CMD_REQUEST_CHANGE:
667 case CMD_NOTIFY_CHANGE:
668 if ((client->flag & RCM_REGISTER_CAPACITY) == 0) {
669 rcm_handle_free(hdl);
670 return (RCM_SUCCESS);
671 }
672 break;
673 case CMD_EVENT:
674 if ((client->flag & RCM_REGISTER_EVENT) == 0) {
675 rcm_handle_free(hdl);
676 return (RCM_SUCCESS);
677 }
678 break;
679 }
680
681 /*
682 * Create nvlist_t for any client-specific properties.
683 */
684 if (errno = nvlist_alloc(&client_props, NV_UNIQUE_NAME, 0)) {
685 rcm_log_message(RCM_ERROR,
686 "client action failed (nvlist_alloc=%s)\n",
687 strerror(errno));
688 rcmd_exit(errno);
689 }
690
691 /*
692 * Process the operation via a callback to the client module.
693 */
694 switch (cmd) {
695 case CMD_GETINFO:
696 rval = call_getinfo(ops, hdl, client->alias, client->pid,
697 targ->flag, &info, &error, client_props, &depend_info);
698 break;
699
700 case CMD_SUSPEND:
701 if (((targ->flag & RCM_QUERY_CANCEL) == 0) &&
702 (client->state == RCM_STATE_SUSPEND)) {
703 break;
704 }
705
706 if ((targ->flag & RCM_QUERY) == 0) {
707 rcm_log_message(RCM_DEBUG, "suspending %s\n",
708 client->alias);
709 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
710 rcm_log_message(RCM_DEBUG, "suspend query %s\n",
711 client->alias);
712 } else {
713 rcm_log_message(RCM_DEBUG,
714 "suspend query %s cancelled\n", client->alias);
715 }
716
717 /*
718 * Update the client's state before the operation.
719 * If this is a cancelled query, then updating the state is
720 * the only thing that needs to be done, so break afterwards.
721 */
722 if ((targ->flag & RCM_QUERY) == 0) {
723 client->state = RCM_STATE_SUSPENDING;
724 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
725 client->state = RCM_STATE_SUSPEND_QUERYING;
726 } else {
727 client->state = RCM_STATE_ONLINE;
728 break;
729 }
730
731 rval = ops->rcmop_request_suspend(hdl, client->alias,
732 client->pid, targ->interval, targ->flag, &error,
733 &depend_info);
734
735 /* Update the client's state after the operation. */
736 if ((targ->flag & RCM_QUERY) == 0) {
737 if (rval == RCM_SUCCESS) {
738 client->state = RCM_STATE_SUSPEND;
739 } else {
740 client->state = RCM_STATE_SUSPEND_FAIL;
741 }
742 } else {
743 if (rval == RCM_SUCCESS) {
744 client->state = RCM_STATE_SUSPEND_QUERY;
745 } else {
746 client->state = RCM_STATE_SUSPEND_QUERY_FAIL;
747 }
748 }
749 break;
750
751 case CMD_RESUME:
752 if (client->state == RCM_STATE_ONLINE) {
753 break;
754 }
755 client->state = RCM_STATE_RESUMING;
756 rval = ops->rcmop_notify_resume(hdl, client->alias, client->pid,
757 targ->flag, &error, &depend_info);
758
759 /* online state is unconditional */
760 client->state = RCM_STATE_ONLINE;
761 break;
762
763 case CMD_OFFLINE:
764 if (((targ->flag & RCM_QUERY_CANCEL) == 0) &&
765 (client->state == RCM_STATE_OFFLINE)) {
766 break;
767 }
768
769 if ((targ->flag & RCM_QUERY) == 0) {
770 rcm_log_message(RCM_DEBUG, "offlining %s\n",
771 client->alias);
772 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
773 rcm_log_message(RCM_DEBUG, "offline query %s\n",
774 client->alias);
775 } else {
776 rcm_log_message(RCM_DEBUG,
777 "offline query %s cancelled\n", client->alias);
778 }
779
780 /*
781 * Update the client's state before the operation.
782 * If this is a cancelled query, then updating the state is
783 * the only thing that needs to be done, so break afterwards.
784 */
785 if ((targ->flag & RCM_QUERY) == 0) {
786 client->state = RCM_STATE_OFFLINING;
787 } else if ((targ->flag & RCM_QUERY_CANCEL) == 0) {
788 client->state = RCM_STATE_OFFLINE_QUERYING;
789 } else {
790 client->state = RCM_STATE_ONLINE;
791 break;
792 }
793
794 rval = ops->rcmop_request_offline(hdl, client->alias,
795 client->pid, targ->flag, &error, &depend_info);
796
797 /*
798 * If this is a retire operation and we managed to call
799 * into at least one client, set retcode to RCM_SUCCESS to
800 * indicate that retire has been subject to constraints
801 * This retcode will be further modified by actual return
802 * code.
803 */
804 if ((targ->flag & RCM_RETIRE_REQUEST) &&
805 (targ->retcode == RCM_NO_CONSTRAINT)) {
806 rcm_log_message(RCM_DEBUG,
807 "at least 1 client, constraint applied: %s\n",
808 client->alias);
809 targ->retcode = RCM_SUCCESS;
810 }
811
812 /* Update the client's state after the operation. */
813 if ((targ->flag & RCM_QUERY) == 0) {
814 if (rval == RCM_SUCCESS) {
815 client->state = RCM_STATE_OFFLINE;
816 } else {
817 client->state = RCM_STATE_OFFLINE_FAIL;
818 }
819 } else {
820 if (rval == RCM_SUCCESS) {
821 client->state = RCM_STATE_OFFLINE_QUERY;
822 } else {
823 client->state = RCM_STATE_OFFLINE_QUERY_FAIL;
824 }
825 }
826 break;
827
828 case CMD_ONLINE:
829 if (client->state == RCM_STATE_ONLINE) {
830 break;
831 }
832
833 rcm_log_message(RCM_DEBUG, "onlining %s\n", client->alias);
834
835 client->state = RCM_STATE_ONLINING;
836 rval = ops->rcmop_notify_online(hdl, client->alias, client->pid,
837 targ->flag, &error, &depend_info);
838 client->state = RCM_STATE_ONLINE;
839 break;
840
841 case CMD_REMOVE:
842 rcm_log_message(RCM_DEBUG, "removing %s\n", client->alias);
843 client->state = RCM_STATE_REMOVING;
844 rval = ops->rcmop_notify_remove(hdl, client->alias, client->pid,
845 targ->flag, &error, &depend_info);
846 client->state = RCM_STATE_REMOVE;
847 break;
848
849 case CMD_REQUEST_CHANGE:
850 rcm_log_message(RCM_DEBUG, "requesting state change of %s\n",
851 client->alias);
852 if (ops->rcmop_request_capacity_change)
853 rval = ops->rcmop_request_capacity_change(hdl,
854 client->alias, client->pid, targ->flag, targ->nvl,
855 &error, &depend_info);
856 break;
857
858 case CMD_NOTIFY_CHANGE:
859 rcm_log_message(RCM_DEBUG, "requesting state change of %s\n",
860 client->alias);
861 if (ops->rcmop_notify_capacity_change)
862 rval = ops->rcmop_notify_capacity_change(hdl,
863 client->alias, client->pid, targ->flag, targ->nvl,
864 &error, &depend_info);
865 break;
866
867 case CMD_EVENT:
868 rcm_log_message(RCM_DEBUG, "delivering event to %s\n",
869 client->alias);
870 if (ops->rcmop_notify_event)
871 rval = ops->rcmop_notify_event(hdl, client->alias,
872 client->pid, targ->flag, &error, targ->nvl,
873 &depend_info);
874 break;
875
876 default:
877 rcm_log_message(RCM_ERROR, gettext("unknown command %d\n"),
878 cmd);
879 rval = RCM_FAILURE;
880 break;
881 }
882
883 /* reset error code to the most significant error */
884 if (rval != RCM_SUCCESS)
885 targ->retcode = rval;
886
887 /*
888 * XXX - The code below may produce duplicate rcm_info_t's on error?
889 */
890 if ((cmd != CMD_GETINFO) &&
891 ((rval != RCM_SUCCESS) ||
892 (error != NULL) ||
893 (targ->flag & RCM_SCOPE))) {
894 (void) call_getinfo(ops, hdl, client->alias, client->pid,
895 targ->flag & (~(RCM_INCLUDE_DEPENDENT|RCM_INCLUDE_SUBTREE)),
896 &info, &dummy_error, client_props, &depend_info);
897 if (dummy_error)
898 (void) free(dummy_error);
899 } else if (cmd != CMD_GETINFO) {
900 nvlist_free(client_props);
901 client_props = NULL;
902 }
903
904 if (client_props) {
905 add_busy_rsrc_to_list(client->alias, client->pid, client->state,
906 targ->seq_num, client->module->name, info, error,
907 client_props, targ->info);
908 nvlist_free(client_props);
909 }
910
911 if (info)
912 (void) free(info);
913 if (error)
914 (void) free(error);
915
916 if (depend_info) {
917 if (targ->info) {
918 (void) rcm_append_info(targ->info, depend_info);
919 } else {
920 rcm_free_info(depend_info);
921 }
922 }
923
924 rcm_handle_free(hdl);
925 return (rval);
926 }
927
928 /*
929 * invoke a callback into a list of clients, return 0 if all success
930 */
931 int
rsrc_client_action_list(client_t * list,int cmd,void * arg)932 rsrc_client_action_list(client_t *list, int cmd, void *arg)
933 {
934 int error, rval = RCM_SUCCESS;
935 tree_walk_arg_t *targ = (tree_walk_arg_t *)arg;
936
937 while (list) {
938 client_t *client = list;
939 list = client->next;
940
941 /*
942 * Make offline idempotent in the retire
943 * case
944 */
945 if ((targ->flag & RCM_RETIRE_REQUEST) &&
946 client->state == RCM_STATE_REMOVE) {
947 client->state = RCM_STATE_ONLINE;
948 rcm_log_message(RCM_DEBUG, "RETIRE: idempotent client "
949 "state: REMOVE -> ONLINE: %s\n", client->alias);
950 }
951
952 if (client->state == RCM_STATE_REMOVE)
953 continue;
954
955 error = rsrc_client_action(client, cmd, arg);
956 if (error != RCM_SUCCESS) {
957 rval = error;
958 }
959 }
960
961 return (rval);
962 }
963
964 /*
965 * Node realted operations:
966 *
967 * rn_alloc, rn_free, rn_find_child,
968 * rn_get_child, rn_get_sibling,
969 * rsrc_node_find, rsrc_node_add_user, rsrc_node_remove_user,
970 */
971
972 /* Allocate node based on a logical or physical name */
973 static rsrc_node_t *
rn_alloc(char * name,int type)974 rn_alloc(char *name, int type)
975 {
976 rsrc_node_t *node;
977
978 rcm_log_message(RCM_TRACE4, "rn_alloc(%s, %d)\n", name, type);
979
980 node = s_calloc(1, sizeof (*node));
981 node->name = s_strdup(name);
982 node->type = type;
983
984 return (node);
985 }
986
987 /*
988 * Free node along with its siblings and children
989 */
990 static void
rn_free(rsrc_node_t * node)991 rn_free(rsrc_node_t *node)
992 {
993 if (node == NULL) {
994 return;
995 }
996
997 if (node->child) {
998 rn_free(node->child);
999 }
1000
1001 if (node->sibling) {
1002 rn_free(node->sibling);
1003 }
1004
1005 rsrc_clients_free(node->users);
1006 free(node->name);
1007 free(node);
1008 }
1009
1010 /*
1011 * Find next sibling
1012 */
1013 static rsrc_node_t *
rn_get_sibling(rsrc_node_t * node)1014 rn_get_sibling(rsrc_node_t *node)
1015 {
1016 return (node->sibling);
1017 }
1018
1019 /*
1020 * Find first child
1021 */
1022 static rsrc_node_t *
rn_get_child(rsrc_node_t * node)1023 rn_get_child(rsrc_node_t *node)
1024 {
1025 return (node->child);
1026 }
1027
1028 /*
1029 * Find child named childname. Create it if flag is RSRC_NODE_CRTEATE
1030 */
1031 static rsrc_node_t *
rn_find_child(rsrc_node_t * parent,char * childname,int flag,int type)1032 rn_find_child(rsrc_node_t *parent, char *childname, int flag, int type)
1033 {
1034 rsrc_node_t *child = parent->child;
1035 rsrc_node_t *new, *prev = NULL;
1036
1037 rcm_log_message(RCM_TRACE4,
1038 "rn_find_child(parent=%s, child=%s, 0x%x, %d)\n",
1039 parent->name, childname, flag, type);
1040
1041 /*
1042 * Children are ordered based on strcmp.
1043 */
1044 while (child && (strcmp(child->name, childname) < 0)) {
1045 prev = child;
1046 child = child->sibling;
1047 }
1048
1049 if (child && (strcmp(child->name, childname) == 0)) {
1050 return (child);
1051 }
1052
1053 if (flag != RSRC_NODE_CREATE)
1054 return (NULL);
1055
1056 new = rn_alloc(childname, type);
1057 new->parent = parent;
1058 new->sibling = child;
1059
1060 /*
1061 * Set this linkage last so we don't break ongoing operations.
1062 *
1063 * N.B. Assume setting a pointer is an atomic operation.
1064 */
1065 if (prev == NULL) {
1066 parent->child = new;
1067 } else {
1068 prev->sibling = new;
1069 }
1070
1071 return (new);
1072 }
1073
1074 /*
1075 * Pathname related help functions
1076 */
1077 static void
pn_preprocess(char * pathname,int type)1078 pn_preprocess(char *pathname, int type)
1079 {
1080 char *tmp;
1081
1082 if (type != RSRC_TYPE_DEVICE)
1083 return;
1084
1085 /*
1086 * For devices, convert ':' to '/' (treat minor nodes and children)
1087 */
1088 tmp = strchr(pathname, ':');
1089 if (tmp == NULL)
1090 return;
1091
1092 *tmp = '/';
1093 }
1094
1095 static char *
pn_getnextcomp(char * pathname,char ** lasts)1096 pn_getnextcomp(char *pathname, char **lasts)
1097 {
1098 char *slash;
1099
1100 if (pathname == NULL)
1101 return (NULL);
1102
1103 /* skip slashes' */
1104 while (*pathname == '/')
1105 ++pathname;
1106
1107 if (*pathname == '\0')
1108 return (NULL);
1109
1110 slash = strchr(pathname, '/');
1111 if (slash != NULL) {
1112 *slash = '\0';
1113 *lasts = slash + 1;
1114 } else {
1115 *lasts = NULL;
1116 }
1117
1118 return (pathname);
1119 }
1120
1121 /*
1122 * Find a node in tree based on device, which is the physical pathname
1123 * of the form /sbus@.../esp@.../sd@...
1124 */
1125 int
rsrc_node_find(char * rsrcname,int flag,rsrc_node_t ** nodep)1126 rsrc_node_find(char *rsrcname, int flag, rsrc_node_t **nodep)
1127 {
1128 char *pathname, *nodename, *lasts;
1129 rsrc_node_t *node;
1130 int type;
1131
1132 rcm_log_message(RCM_TRACE4, "rn_node_find(%s, 0x%x)\n", rsrcname, flag);
1133
1134 /*
1135 * For RSRC_TYPE_ABSTRACT, look under /ABSTRACT. For other types,
1136 * look under /SYSTEM.
1137 */
1138 pathname = resolve_name(rsrcname);
1139 if (pathname == NULL)
1140 return (EINVAL);
1141
1142 type = rsrc_get_type(pathname);
1143 switch (type) {
1144 case RSRC_TYPE_DEVICE:
1145 case RSRC_TYPE_NORMAL:
1146 node = rn_find_child(rsrc_root, "SYSTEM", RSRC_NODE_CREATE,
1147 RSRC_TYPE_NORMAL);
1148 break;
1149
1150 case RSRC_TYPE_ABSTRACT:
1151 node = rn_find_child(rsrc_root, "ABSTRACT", RSRC_NODE_CREATE,
1152 RSRC_TYPE_NORMAL);
1153 break;
1154
1155 default:
1156 /* just to make sure */
1157 free(pathname);
1158 return (EINVAL);
1159 }
1160
1161 /*
1162 * Find position of device within tree. Upon exiting the loop, device
1163 * should be placed between prev and curr.
1164 */
1165 pn_preprocess(pathname, type);
1166 lasts = pathname;
1167 while ((nodename = pn_getnextcomp(lasts, &lasts)) != NULL) {
1168 rsrc_node_t *parent = node;
1169 node = rn_find_child(parent, nodename, flag, type);
1170 if (node == NULL) {
1171 assert((flag & RSRC_NODE_CREATE) == 0);
1172 free(pathname);
1173 *nodep = NULL;
1174 return (RCM_SUCCESS);
1175 }
1176 }
1177 free(pathname);
1178 *nodep = node;
1179 return (RCM_SUCCESS);
1180 }
1181
1182 /*
1183 * add a usage client to a node
1184 */
1185 /*ARGSUSED*/
1186 int
rsrc_node_add_user(rsrc_node_t * node,char * alias,char * modname,pid_t pid,uint_t flag)1187 rsrc_node_add_user(rsrc_node_t *node, char *alias, char *modname, pid_t pid,
1188 uint_t flag)
1189 {
1190 client_t *user;
1191
1192 rcm_log_message(RCM_TRACE3,
1193 "rsrc_node_add_user(%s, %s, %s, %ld, 0x%x)\n",
1194 node->name, alias, modname, pid, flag);
1195
1196 user = rsrc_client_find(modname, pid, &node->users);
1197
1198 /*
1199 * If a client_t already exists, add the registration and return
1200 * success if it's a valid registration request.
1201 *
1202 * Return EALREADY if the resource is already registered.
1203 * This means either the client_t already has the requested
1204 * registration flagged, or that a DR registration was attempted
1205 * on a resource already in use in the DR operations state model.
1206 */
1207 if (user != NULL) {
1208
1209 if (user->flag & (flag & RCM_REGISTER_MASK)) {
1210 return (EALREADY);
1211 }
1212
1213 if ((flag & RCM_REGISTER_DR) &&
1214 (user->state != RCM_STATE_REMOVE)) {
1215 return (EALREADY);
1216 }
1217
1218 user->flag |= (flag & RCM_REGISTER_MASK);
1219 if ((flag & RCM_REGISTER_DR) ||
1220 (user->state == RCM_STATE_REMOVE)) {
1221 user->state = RCM_STATE_ONLINE;
1222 }
1223
1224 return (RCM_SUCCESS);
1225 }
1226
1227 /*
1228 * Otherwise create a new client_t and create a new registration.
1229 */
1230 if ((user = rsrc_client_alloc(alias, modname, pid, flag)) != NULL) {
1231 rsrc_client_add(user, &node->users);
1232 }
1233 if (flag & RCM_FILESYS)
1234 node->type = RSRC_TYPE_FILESYS;
1235
1236 return (RCM_SUCCESS);
1237 }
1238
1239 /*
1240 * remove a usage client of a node
1241 */
1242 int
rsrc_node_remove_user(rsrc_node_t * node,char * modname,pid_t pid,uint_t flag)1243 rsrc_node_remove_user(rsrc_node_t *node, char *modname, pid_t pid, uint_t flag)
1244 {
1245 client_t *user;
1246
1247 rcm_log_message(RCM_TRACE3,
1248 "rsrc_node_remove_user(%s, %s, %ld, 0x%x)\n", node->name, modname,
1249 pid, flag);
1250
1251 user = rsrc_client_find(modname, pid, &node->users);
1252 if ((user == NULL) || (user->state == RCM_STATE_REMOVE)) {
1253 rcm_log_message(RCM_NOTICE, gettext(
1254 "client not registered: module=%s, pid=%d, dev=%s\n"),
1255 modname, pid, node->name);
1256 return (ENOENT);
1257 }
1258
1259 /* Strip off the registration being removed (DR, event, capacity) */
1260 user->flag = user->flag & (~(flag & RCM_REGISTER_MASK));
1261
1262 /*
1263 * Mark the client as removed if all registrations have been removed
1264 */
1265 if ((user->flag & RCM_REGISTER_MASK) == 0)
1266 user->state = RCM_STATE_REMOVE;
1267
1268 return (RCM_SUCCESS);
1269 }
1270
1271 /*
1272 * Tree walking function - rsrc_walk
1273 */
1274
1275 #define MAX_TREE_DEPTH 32
1276
1277 #define RN_WALK_CONTINUE 0
1278 #define RN_WALK_PRUNESIB 1
1279 #define RN_WALK_PRUNECHILD 2
1280 #define RN_WALK_TERMINATE 3
1281
1282 #define EMPTY_STACK(sp) ((sp)->depth == 0)
1283 #define TOP_NODE(sp) ((sp)->node[(sp)->depth - 1])
1284 #define PRUNE_SIB(sp) ((sp)->prunesib[(sp)->depth - 1])
1285 #define PRUNE_CHILD(sp) ((sp)->prunechild[(sp)->depth - 1])
1286 #define POP_STACK(sp) ((sp)->depth)--
1287 #define PUSH_STACK(sp, rn) \
1288 (sp)->node[(sp)->depth] = (rn); \
1289 (sp)->prunesib[(sp)->depth] = 0; \
1290 (sp)->prunechild[(sp)->depth] = 0; \
1291 ((sp)->depth)++
1292
1293 struct rn_stack {
1294 rsrc_node_t *node[MAX_TREE_DEPTH];
1295 char prunesib[MAX_TREE_DEPTH];
1296 char prunechild[MAX_TREE_DEPTH];
1297 int depth;
1298 };
1299
1300 /* walking one node and update node stack */
1301 /*ARGSUSED*/
1302 static void
walk_one_node(struct rn_stack * sp,void * arg,int (* node_callback)(rsrc_node_t *,void *))1303 walk_one_node(struct rn_stack *sp, void *arg,
1304 int (*node_callback)(rsrc_node_t *, void *))
1305 {
1306 int prunesib;
1307 rsrc_node_t *child, *sibling;
1308 rsrc_node_t *node = TOP_NODE(sp);
1309
1310 rcm_log_message(RCM_TRACE4, "walk_one_node(%s)\n", node->name);
1311
1312 switch (node_callback(node, arg)) {
1313 case RN_WALK_TERMINATE:
1314 POP_STACK(sp);
1315 while (!EMPTY_STACK(sp)) {
1316 node = TOP_NODE(sp);
1317 POP_STACK(sp);
1318 }
1319 return;
1320
1321 case RN_WALK_PRUNESIB:
1322 PRUNE_SIB(sp) = 1;
1323 break;
1324
1325 case RN_WALK_PRUNECHILD:
1326 PRUNE_CHILD(sp) = 1;
1327 break;
1328
1329 case RN_WALK_CONTINUE:
1330 default:
1331 break;
1332 }
1333
1334 /*
1335 * Push child on the stack
1336 */
1337 if (!PRUNE_CHILD(sp) && (child = rn_get_child(node)) != NULL) {
1338 PUSH_STACK(sp, child);
1339 return;
1340 }
1341
1342 /*
1343 * Pop the stack till a node's sibling can be pushed
1344 */
1345 prunesib = PRUNE_SIB(sp);
1346 POP_STACK(sp);
1347 while (!EMPTY_STACK(sp) &&
1348 (prunesib || (sibling = rn_get_sibling(node)) == NULL)) {
1349 node = TOP_NODE(sp);
1350 prunesib = PRUNE_SIB(sp);
1351 POP_STACK(sp);
1352 }
1353
1354 if (EMPTY_STACK(sp)) {
1355 return;
1356 }
1357
1358 /*
1359 * push sibling onto the stack
1360 */
1361 PUSH_STACK(sp, sibling);
1362 }
1363
1364 /*
1365 * walk tree rooted at root in child-first order
1366 */
1367 static void
rsrc_walk(rsrc_node_t * root,void * arg,int (* node_callback)(rsrc_node_t *,void *))1368 rsrc_walk(rsrc_node_t *root, void *arg,
1369 int (*node_callback)(rsrc_node_t *, void *))
1370 {
1371 struct rn_stack stack;
1372
1373 rcm_log_message(RCM_TRACE3, "rsrc_walk(%s)\n", root->name);
1374
1375 /*
1376 * Push root on stack and walk in child-first order
1377 */
1378 stack.depth = 0;
1379 PUSH_STACK(&stack, root);
1380 PRUNE_SIB(&stack) = 1;
1381
1382 while (!EMPTY_STACK(&stack)) {
1383 walk_one_node(&stack, arg, node_callback);
1384 }
1385 }
1386
1387 /*
1388 * Callback for a command action on a node
1389 */
1390 static int
node_action(rsrc_node_t * node,void * arg)1391 node_action(rsrc_node_t *node, void *arg)
1392 {
1393 tree_walk_arg_t *targ = (tree_walk_arg_t *)arg;
1394 uint_t flag = targ->flag;
1395
1396 rcm_log_message(RCM_TRACE4, "node_action(%s)\n", node->name);
1397
1398 /*
1399 * If flag indicates operation on a filesystem, we don't callback on
1400 * the filesystem root to avoid infinite recursion on filesystem module.
1401 *
1402 * N.B. Such request should only come from filesystem RCM module.
1403 */
1404 if (flag & RCM_FILESYS) {
1405 assert(node->type == RSRC_TYPE_FILESYS);
1406 targ->flag &= ~RCM_FILESYS;
1407 return (RN_WALK_CONTINUE);
1408 }
1409
1410 /*
1411 * Execute state change callback
1412 */
1413 (void) rsrc_client_action_list(node->users, targ->cmd, arg);
1414
1415 /*
1416 * Upon hitting a filesys root, prune children.
1417 * The filesys module should have taken care of
1418 * children by now.
1419 */
1420 if (node->type == RSRC_TYPE_FILESYS)
1421 return (RN_WALK_PRUNECHILD);
1422
1423 return (RN_WALK_CONTINUE);
1424 }
1425
1426 /*
1427 * Execute a command on a subtree under root.
1428 */
1429 int
rsrc_tree_action(rsrc_node_t * root,int cmd,tree_walk_arg_t * arg)1430 rsrc_tree_action(rsrc_node_t *root, int cmd, tree_walk_arg_t *arg)
1431 {
1432 rcm_log_message(RCM_TRACE2, "tree_action(%s, %d)\n", root->name, cmd);
1433
1434 arg->cmd = cmd;
1435
1436 /*
1437 * If RCM_RETIRE_REQUEST is set, just walk one node and preset
1438 * retcode to NO_CONSTRAINT
1439 */
1440 if (arg->flag & RCM_RETIRE_REQUEST) {
1441 rcm_log_message(RCM_TRACE1, "tree_action: RETIRE_REQ: walking "
1442 "only root node: %s\n", root->name);
1443 arg->retcode = RCM_NO_CONSTRAINT;
1444 (void) node_action(root, arg);
1445 } else {
1446 arg->retcode = RCM_SUCCESS;
1447 rsrc_walk(root, (void *)arg, node_action);
1448 }
1449
1450 return (arg->retcode);
1451 }
1452
1453 /*
1454 * Get info on current regsitrations
1455 */
1456 int
rsrc_usage_info(char ** rsrcnames,uint_t flag,int seq_num,rcm_info_t ** info)1457 rsrc_usage_info(char **rsrcnames, uint_t flag, int seq_num, rcm_info_t **info)
1458 {
1459 rsrc_node_t *node;
1460 rcm_info_t *result = NULL;
1461 tree_walk_arg_t arg;
1462 int initial_req;
1463 int rv;
1464 int i;
1465
1466 arg.flag = flag;
1467 arg.info = &result;
1468 arg.seq_num = seq_num;
1469
1470 for (i = 0; rsrcnames[i] != NULL; i++) {
1471
1472 rcm_log_message(RCM_TRACE2, "rsrc_usage_info(%s, 0x%x, %d)\n",
1473 rsrcnames[i], flag, seq_num);
1474
1475 if (flag & RCM_INCLUDE_DEPENDENT) {
1476 initial_req = ((seq_num & SEQ_NUM_MASK) == 0);
1477
1478 /*
1479 * if redundant request, skip the operation
1480 */
1481 if (info_req_add(rsrcnames[i], flag, seq_num) != 0) {
1482 continue;
1483 }
1484 }
1485
1486 rv = rsrc_node_find(rsrcnames[i], 0, &node);
1487 if ((rv != RCM_SUCCESS) || (node == NULL)) {
1488 if ((flag & RCM_INCLUDE_DEPENDENT) && initial_req)
1489 info_req_remove(seq_num);
1490 continue;
1491 }
1492
1493 /*
1494 * Based on RCM_INCLUDE_SUBTREE flag, query either the subtree
1495 * or just the node.
1496 */
1497 if (flag & RCM_INCLUDE_SUBTREE) {
1498 (void) rsrc_tree_action(node, CMD_GETINFO, &arg);
1499 } else {
1500 arg.cmd = CMD_GETINFO;
1501 (void) node_action(node, (void *)&arg);
1502 }
1503
1504 if ((flag & RCM_INCLUDE_DEPENDENT) && initial_req)
1505 info_req_remove(seq_num);
1506 }
1507
1508 out:
1509 (void) rcm_append_info(info, result);
1510 return (rv);
1511 }
1512
1513 /*
1514 * Get the list of currently loaded module
1515 */
1516 rcm_info_t *
rsrc_mod_info()1517 rsrc_mod_info()
1518 {
1519 module_t *mod;
1520 rcm_info_t *info = NULL;
1521
1522 (void) mutex_lock(&mod_lock);
1523 mod = module_head;
1524 while (mod) {
1525 char *modinfo = s_strdup(module_info(mod));
1526 add_busy_rsrc_to_list("dummy", 0, 0, 0, mod->name,
1527 modinfo, NULL, NULL, &info);
1528 mod = mod->next;
1529 }
1530 (void) mutex_unlock(&mod_lock);
1531
1532 return (info);
1533 }
1534
1535 /*
1536 * Initialize resource map - load all modules
1537 */
1538 void
rcmd_db_init()1539 rcmd_db_init()
1540 {
1541 char *tmp;
1542 DIR *mod_dir;
1543 struct dirent *entp;
1544 int i;
1545 char *dir_name;
1546 int rcm_script;
1547
1548 rcm_log_message(RCM_DEBUG, "rcmd_db_init(): initialize database\n");
1549
1550 if (script_main_init() == -1)
1551 rcmd_exit(errno);
1552
1553 rsrc_root = rn_alloc("/", RSRC_TYPE_NORMAL);
1554
1555 for (i = 0; (dir_name = rcm_dir(i, &rcm_script)) != NULL; i++) {
1556
1557 if ((mod_dir = opendir(dir_name)) == NULL) {
1558 continue; /* try next directory */
1559 }
1560
1561 rcm_log_message(RCM_TRACE2, "search directory %s\n", dir_name);
1562
1563 while ((entp = readdir(mod_dir)) != NULL) {
1564 module_t *module;
1565
1566 if (strcmp(entp->d_name, ".") == 0 ||
1567 strcmp(entp->d_name, "..") == 0)
1568 continue;
1569
1570 if (rcm_script == 0) {
1571 /* rcm module */
1572 if (((tmp = strstr(entp->d_name,
1573 RCM_MODULE_SUFFIX)) == NULL) ||
1574 (tmp[strlen(RCM_MODULE_SUFFIX)] != '\0')) {
1575 continue;
1576 }
1577 }
1578
1579 module = cli_module_hold(entp->d_name);
1580 if (module == NULL) {
1581 if (rcm_script == 0)
1582 rcm_log_message(RCM_ERROR,
1583 gettext("%s: failed to load\n"),
1584 entp->d_name);
1585 continue;
1586 }
1587
1588 if (module->ref_count == MOD_REFCNT_INIT) {
1589 /*
1590 * ask module to register for resource 1st time
1591 */
1592 module_attach(module);
1593 }
1594 cli_module_rele(module);
1595 }
1596 (void) closedir(mod_dir);
1597 }
1598
1599 rcmd_db_print();
1600 }
1601
1602 /*
1603 * sync resource map - ask all modules to register again
1604 */
1605 void
rcmd_db_sync()1606 rcmd_db_sync()
1607 {
1608 static time_t sync_time = (time_t)-1;
1609 const time_t interval = 5; /* resync at most every 5 sec */
1610
1611 module_t *mod;
1612 time_t curr = time(NULL);
1613
1614 if ((sync_time != (time_t)-1) && (curr - sync_time < interval))
1615 return;
1616
1617 sync_time = curr;
1618 (void) mutex_lock(&mod_lock);
1619 mod = module_head;
1620 while (mod) {
1621 /*
1622 * Hold module by incrementing ref count and release
1623 * mod_lock to avoid deadlock, since rcmop_register()
1624 * may callback into the daemon and request mod_lock.
1625 */
1626 mod->ref_count++;
1627 (void) mutex_unlock(&mod_lock);
1628
1629 mod->modops->rcmop_register(mod->rcmhandle);
1630
1631 (void) mutex_lock(&mod_lock);
1632 mod->ref_count--;
1633 mod = mod->next;
1634 }
1635 (void) mutex_unlock(&mod_lock);
1636 }
1637
1638 /*
1639 * Determine if a process is alive
1640 */
1641 int
proc_exist(pid_t pid)1642 proc_exist(pid_t pid)
1643 {
1644 char path[64];
1645 const char *procfs = "/proc";
1646 struct stat sb;
1647
1648 if (pid == (pid_t)0) {
1649 return (1);
1650 }
1651
1652 (void) snprintf(path, sizeof (path), "%s/%ld", procfs, pid);
1653 return (stat(path, &sb) == 0);
1654 }
1655
1656 /*
1657 * Cleaup client list
1658 *
1659 * N.B. This routine runs in a single-threaded environment only. It is only
1660 * called by the cleanup thread, which never runs in parallel with other
1661 * threads.
1662 */
1663 static void
clean_client_list(client_t ** listp)1664 clean_client_list(client_t **listp)
1665 {
1666 client_t *client = *listp;
1667
1668 /*
1669 * Cleanup notification clients for which pid no longer exists
1670 */
1671 while (client) {
1672 if ((client->state != RCM_STATE_REMOVE) &&
1673 proc_exist(client->pid)) {
1674 listp = &client->next;
1675 client = *listp;
1676 continue;
1677 }
1678
1679 /*
1680 * Destroy this client_t. rsrc_client_remove updates
1681 * listp to point to the next client.
1682 */
1683 rsrc_client_remove(client, listp);
1684 client = *listp;
1685 }
1686 }
1687
1688 /*ARGSUSED*/
1689 static int
clean_node(rsrc_node_t * node,void * arg)1690 clean_node(rsrc_node_t *node, void *arg)
1691 {
1692 rcm_log_message(RCM_TRACE4, "clean_node(%s)\n", node->name);
1693
1694 clean_client_list(&node->users);
1695
1696 return (RN_WALK_CONTINUE);
1697 }
1698
1699 static void
clean_rsrc_tree()1700 clean_rsrc_tree()
1701 {
1702 rcm_log_message(RCM_TRACE4,
1703 "clean_rsrc_tree(): delete stale dr clients\n");
1704
1705 rsrc_walk(rsrc_root, NULL, clean_node);
1706 }
1707
1708 static void *
db_clean(void * arg __unused)1709 db_clean(void *arg __unused)
1710 {
1711 extern barrier_t barrier;
1712 extern void clean_dr_list();
1713
1714 for (;;) {
1715 (void) mutex_lock(&rcm_req_lock);
1716 start_polling_thread();
1717 (void) mutex_unlock(&rcm_req_lock);
1718
1719 (void) mutex_lock(&barrier.lock);
1720 while (need_cleanup == 0)
1721 (void) cond_wait(&barrier.cv, &barrier.lock);
1722 (void) mutex_unlock(&barrier.lock);
1723
1724 /*
1725 * Make sure all other threads are either blocked or exited.
1726 */
1727 rcmd_set_state(RCMD_CLEANUP);
1728
1729 need_cleanup = 0;
1730
1731 /*
1732 * clean dr_req_list
1733 */
1734 clean_dr_list();
1735
1736 /*
1737 * clean resource tree
1738 */
1739 clean_rsrc_tree();
1740
1741 rcmd_set_state(RCMD_NORMAL);
1742 }
1743 return (NULL);
1744 }
1745
1746 void
rcmd_db_clean(void)1747 rcmd_db_clean(void)
1748 {
1749 rcm_log_message(RCM_DEBUG,
1750 "rcm_db_clean(): launch thread to clean database\n");
1751
1752 if (thr_create(NULL, 0, db_clean, NULL, THR_DETACHED, NULL) != 0) {
1753 rcm_log_message(RCM_WARNING,
1754 gettext("failed to create cleanup thread %s\n"),
1755 strerror(errno));
1756 }
1757 }
1758
1759 /*ARGSUSED*/
1760 static int
print_node(rsrc_node_t * node,void * arg)1761 print_node(rsrc_node_t *node, void *arg)
1762 {
1763 client_t *user;
1764
1765 rcm_log_message(RCM_DEBUG, "rscname: %s, state = 0x%x\n", node->name);
1766 rcm_log_message(RCM_DEBUG, " users:\n");
1767
1768 if ((user = node->users) == NULL) {
1769 rcm_log_message(RCM_DEBUG, " none\n");
1770 return (RN_WALK_CONTINUE);
1771 }
1772
1773 while (user) {
1774 rcm_log_message(RCM_DEBUG, " %s, %d, %s\n",
1775 user->module->name, user->pid, user->alias);
1776 user = user->next;
1777 }
1778 return (RN_WALK_CONTINUE);
1779 }
1780
1781 static void
rcmd_db_print()1782 rcmd_db_print()
1783 {
1784 module_t *mod;
1785
1786 rcm_log_message(RCM_DEBUG, "modules:\n");
1787 (void) mutex_lock(&mod_lock);
1788 mod = module_head;
1789 while (mod) {
1790 rcm_log_message(RCM_DEBUG, " %s\n", mod->name);
1791 mod = mod->next;
1792 }
1793 (void) mutex_unlock(&mod_lock);
1794
1795 rcm_log_message(RCM_DEBUG, "\nresource tree:\n");
1796
1797 rsrc_walk(rsrc_root, NULL, print_node);
1798
1799 rcm_log_message(RCM_DEBUG, "\n");
1800 }
1801
1802 /*
1803 * Allocate handle from calling into each RCM module
1804 */
1805 static rcm_handle_t *
rcm_handle_alloc(module_t * module)1806 rcm_handle_alloc(module_t *module)
1807 {
1808 rcm_handle_t *hdl;
1809
1810 hdl = s_malloc(sizeof (rcm_handle_t));
1811
1812 hdl->modname = module->name;
1813 hdl->pid = 0;
1814 hdl->lrcm_ops = &rcm_ops; /* for callback into daemon directly */
1815 hdl->module = module;
1816
1817 return (hdl);
1818 }
1819
1820 /*
1821 * Free rcm_handle_t
1822 */
1823 static void
rcm_handle_free(rcm_handle_t * handle)1824 rcm_handle_free(rcm_handle_t *handle)
1825 {
1826 free(handle);
1827 }
1828
1829 /*
1830 * help function that exit on memory outage
1831 */
1832 void *
s_malloc(size_t size)1833 s_malloc(size_t size)
1834 {
1835 void *buf = malloc(size);
1836
1837 if (buf == NULL) {
1838 rcmd_exit(ENOMEM);
1839 }
1840 return (buf);
1841 }
1842
1843 void *
s_calloc(int n,size_t size)1844 s_calloc(int n, size_t size)
1845 {
1846 void *buf = calloc(n, size);
1847
1848 if (buf == NULL) {
1849 rcmd_exit(ENOMEM);
1850 }
1851 return (buf);
1852 }
1853
1854 void *
s_realloc(void * ptr,size_t size)1855 s_realloc(void *ptr, size_t size)
1856 {
1857 void *new = realloc(ptr, size);
1858
1859 if (new == NULL) {
1860 rcmd_exit(ENOMEM);
1861 }
1862 return (new);
1863 }
1864
1865 char *
s_strdup(const char * str)1866 s_strdup(const char *str)
1867 {
1868 char *buf = strdup(str);
1869
1870 if (buf == NULL) {
1871 rcmd_exit(ENOMEM);
1872 }
1873 return (buf);
1874 }
1875
1876 /*
1877 * Convert a version 1 ops vector to current ops vector
1878 * Fields missing in version 1 are set to NULL.
1879 */
1880 static struct rcm_mod_ops *
modops_from_v1(void * ops_v1)1881 modops_from_v1(void *ops_v1)
1882 {
1883 struct rcm_mod_ops *ops;
1884
1885 ops = s_calloc(1, sizeof (struct rcm_mod_ops));
1886 bcopy(ops_v1, ops, sizeof (struct rcm_mod_ops_v1));
1887 return (ops);
1888 }
1889
1890 /* call a module's getinfo routine; detects v1 ops and adjusts the call */
1891 static int
call_getinfo(struct rcm_mod_ops * ops,rcm_handle_t * hdl,char * alias,id_t pid,uint_t flag,char ** info,char ** error,nvlist_t * client_props,rcm_info_t ** infop)1892 call_getinfo(struct rcm_mod_ops *ops, rcm_handle_t *hdl, char *alias, id_t pid,
1893 uint_t flag, char **info, char **error, nvlist_t *client_props,
1894 rcm_info_t **infop)
1895 {
1896 int rval;
1897 struct rcm_mod_ops_v1 *v1_ops;
1898
1899 if (ops->version == RCM_MOD_OPS_V1) {
1900 v1_ops = (struct rcm_mod_ops_v1 *)ops;
1901 rval = v1_ops->rcmop_get_info(hdl, alias, pid, flag, info,
1902 infop);
1903 if (rval != RCM_SUCCESS && *info != NULL)
1904 *error = strdup(*info);
1905 return (rval);
1906 } else {
1907 return (ops->rcmop_get_info(hdl, alias, pid, flag, info, error,
1908 client_props, infop));
1909 }
1910 }
1911
1912 void
rcm_init_queue(rcm_queue_t * head)1913 rcm_init_queue(rcm_queue_t *head)
1914 {
1915 head->next = head->prev = head;
1916 }
1917
1918 void
rcm_enqueue_head(rcm_queue_t * head,rcm_queue_t * element)1919 rcm_enqueue_head(rcm_queue_t *head, rcm_queue_t *element)
1920 {
1921 rcm_enqueue(head, element);
1922 }
1923
1924 void
rcm_enqueue_tail(rcm_queue_t * head,rcm_queue_t * element)1925 rcm_enqueue_tail(rcm_queue_t *head, rcm_queue_t *element)
1926 {
1927 rcm_enqueue(head->prev, element);
1928 }
1929
1930 void
rcm_enqueue(rcm_queue_t * list_element,rcm_queue_t * element)1931 rcm_enqueue(rcm_queue_t *list_element, rcm_queue_t *element)
1932 {
1933 element->next = list_element->next;
1934 element->prev = list_element;
1935 element->next->prev = element;
1936 list_element->next = element;
1937 }
1938
1939 rcm_queue_t *
rcm_dequeue_head(rcm_queue_t * head)1940 rcm_dequeue_head(rcm_queue_t *head)
1941 {
1942 rcm_queue_t *element = head->next;
1943 rcm_dequeue(element);
1944 return (element);
1945 }
1946
1947 rcm_queue_t *
rcm_dequeue_tail(rcm_queue_t * head)1948 rcm_dequeue_tail(rcm_queue_t *head)
1949 {
1950 rcm_queue_t *element = head->prev;
1951 rcm_dequeue(element);
1952 return (element);
1953 }
1954
1955 void
rcm_dequeue(rcm_queue_t * element)1956 rcm_dequeue(rcm_queue_t *element)
1957 {
1958 element->prev->next = element->next;
1959 element->next->prev = element->prev;
1960 element->next = element->prev = NULL;
1961 }
1962