1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Implementation of ri_init routine for obtaining mapping 30 * of system board attachment points to physical devices and to 31 * the Reconfiguration Coordination Manager (RCM) client usage 32 * of these devices. 33 */ 34 #include <string.h> 35 #include <stdlib.h> 36 #include <unistd.h> 37 #include <kstat.h> 38 #include <sys/param.h> 39 #include <sys/sbd_ioctl.h> 40 #include "rsrc_info_impl.h" 41 42 /* 43 * Occupant types exported by cfgadm sbd plugin via 44 * config_admin(3CFGADM). 45 */ 46 #define SBD_CM_CPU "cpu" 47 #define SBD_CM_MEM "memory" 48 #define SBD_CM_IO "io" 49 50 /* 51 * RCM abstract resource names. 52 */ 53 #define RCM_MEM_ALL "SUNW_memory" 54 #define RCM_CPU_ALL "SUNW_cpu" 55 #define RCM_CPU RCM_CPU_ALL"/cpu" 56 57 #define KBYTE 1024 58 #define MBYTE 1048576 59 #define USAGE_ALLOC_SIZE 128 60 61 /* 62 * define to allow io_cm_info to return NODE is NULL to ri_init, 63 * in order to skip over nodes w/unattached drivers 64 */ 65 #define RI_NODE_NIL 1 66 67 /* 68 * This code is CMP aware as it parses the 69 * cfgadm info field for individual cpuids. 70 */ 71 #define CPUID_SEP "," 72 #define CPU_INFO_FMT "cpuid=%s speed=%d ecache=%d" 73 74 typedef struct { 75 cfga_list_data_t *cfga_list_data; 76 int nlist; 77 } apd_t; 78 79 typedef struct { 80 long pagesize; 81 long syspages; 82 long sysmb; 83 } mem_stat_t; 84 85 #define ms_syspages m_stat.syspages 86 #define ms_pagesize m_stat.pagesize 87 #define ms_sysmb m_stat.sysmb 88 89 typedef int32_t cpuid_t; 90 91 typedef struct { 92 int cpuid_max; /* maximum cpuid value */ 93 int *ecache_sizes; /* indexed by cpuid */ 94 } ecache_info_t; 95 96 typedef struct { 97 rcm_handle_t *hdl; 98 rcm_info_t *offline_query_info; 99 char **rlist; 100 int nrlist; 101 cpuid_t *cpus; 102 int ncpus; 103 int ndevs; 104 uint_t query_pages; 105 mem_stat_t m_stat; 106 ecache_info_t ecache_info; 107 } rcmd_t; 108 109 typedef struct { 110 const char *rsrc; 111 const char *info; 112 } usage_t; 113 114 /* Lookup table entry for matching IO devices to RCM resource usage */ 115 typedef struct { 116 int index; /* index into the table array */ 117 di_node_t node; /* associated devinfo node */ 118 char *name; /* device full path name */ 119 int n_usage; 120 usage_t *usage; 121 } lookup_entry_t; 122 123 typedef struct { 124 int n_entries; 125 int n_slots; 126 lookup_entry_t *table; 127 } lookup_table_t; 128 129 typedef struct { 130 int err; 131 di_node_t node; 132 char *pathbuf; 133 lookup_table_t *table; 134 di_devlink_handle_t linkhd; 135 } devinfo_arg_t; 136 137 static int dyn_ap_ids(char *, cfga_list_data_t **, int *); 138 static int rcm_init(rcmd_t *, apd_t [], int, int); 139 static void rcm_fini(rcmd_t *); 140 static int rcm_query_init(rcmd_t *, apd_t [], int); 141 static int cap_request(ri_hdl_t *, rcmd_t *); 142 static int syscpus(cpuid_t **, int *); 143 static int cpu_cap_request(ri_hdl_t *, rcmd_t *); 144 static int mem_cap_request(ri_hdl_t *, rcmd_t *); 145 static int (*cm_rcm_qpass_func(cfga_type_t))(cfga_list_data_t *, rcmd_t *); 146 static int cpu_rcm_qpass(cfga_list_data_t *, rcmd_t *); 147 static int mem_rcm_qpass(cfga_list_data_t *, rcmd_t *); 148 static int io_rcm_qpass(cfga_list_data_t *, rcmd_t *); 149 static int (*cm_info_func(cfga_type_t))(ri_ap_t *, cfga_list_data_t *, int, 150 rcmd_t *); 151 static int cpu_cm_info(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *); 152 static int i_cpu_cm_info(processorid_t, int, ri_ap_t *, rcmd_t *); 153 static int mem_cm_info(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *); 154 static int io_cm_info(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *); 155 static int ident_leaf(di_node_t); 156 static int mk_drv_inst(di_node_t, char [], char *); 157 static int devinfo_node_walk(di_node_t, void *); 158 static int devinfo_minor_walk(di_node_t, di_minor_t, void *); 159 static int devinfo_devlink_walk(di_devlink_t, void *); 160 static int add_rcm_clients(ri_client_t **, rcmd_t *, rcm_info_t *, int, int *); 161 static int rcm_ignore(char *, char *); 162 static int add_query_state(rcmd_t *, ri_client_t *, const char *, const char *); 163 static int state2query(int); 164 static void dev_list_append(ri_dev_t **, ri_dev_t *); 165 static void dev_list_cpu_insert(ri_dev_t **, ri_dev_t *, processorid_t); 166 static rcm_info_tuple_t *tuple_lookup(rcmd_t *, const char *, const char *); 167 static ri_ap_t *ri_ap_alloc(char *, ri_hdl_t *); 168 static ri_dev_t *ri_dev_alloc(void); 169 static ri_dev_t *io_dev_alloc(char *); 170 static ri_client_t *ri_client_alloc(char *, char *); 171 static void apd_tbl_free(apd_t [], int); 172 static char *pstate2str(int); 173 static int ecache_info_init(ecache_info_t *); 174 static int find_cpu_nodes(di_node_t, void *); 175 static int prop_lookup_int(di_node_t, di_prom_handle_t, char *, int **); 176 static int add_lookup_entry(lookup_table_t *, const char *, di_node_t); 177 static int table_compare_names(const void *, const void *); 178 static int table_compare_indices(const void *, const void *); 179 static lookup_entry_t *lookup(lookup_table_t *table, const char *); 180 static int add_usage(lookup_entry_t *, const char *, rcm_info_tuple_t *); 181 static void empty_table(lookup_table_t *); 182 183 #ifdef DEBUG 184 static void dump_apd_tbl(FILE *, apd_t *, int); 185 #endif /* DEBUG */ 186 187 static struct { 188 char *type; 189 int (*cm_info)(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *); 190 int (*cm_rcm_qpass)(cfga_list_data_t *, rcmd_t *); 191 } cm_ctl[] = { 192 {SBD_CM_CPU, cpu_cm_info, cpu_rcm_qpass}, 193 {SBD_CM_MEM, mem_cm_info, mem_rcm_qpass}, 194 {SBD_CM_IO, io_cm_info, io_rcm_qpass} 195 }; 196 197 /* 198 * Table of known info string prefixes for RCM modules that do not 199 * represent actual resource usage, but instead provide name translations 200 * or sequencing within the RCM namespace. Since RCM provides no way to 201 * filter these out, we must maintain this hack. 202 */ 203 static char *rcm_info_filter[] = { 204 "Network interface", /* Network naming module */ 205 NULL 206 }; 207 208 209 /* 210 * Allocate snapshot handle. 211 */ 212 int 213 ri_init(int n_apids, char **ap_ids, int flags, ri_hdl_t **hdlp) 214 { 215 int i, j; 216 ri_hdl_t *ri_hdl; 217 ri_ap_t *ap_hdl; 218 rcmd_t *rcm = NULL; 219 cfga_list_data_t *cfga_ldata; 220 apd_t *apd, *apd_tbl = NULL; 221 int (*cm_info)(ri_ap_t *, cfga_list_data_t *, 222 int, rcmd_t *); 223 int rv = RI_SUCCESS; 224 int cm_info_rv; 225 226 if (n_apids <= 0 || ap_ids == NULL || hdlp == NULL) 227 return (RI_INVAL); 228 229 if (flags & ~RI_REQ_MASK) 230 return (RI_NOTSUP); 231 232 *hdlp = NULL; 233 if ((ri_hdl = calloc(1, sizeof (*ri_hdl))) == NULL || 234 (rcm = calloc(1, sizeof (*rcm))) == NULL || 235 (apd_tbl = calloc(n_apids, sizeof (*apd_tbl))) == NULL) { 236 dprintf((stderr, "calloc: %s\n", strerror(errno))); 237 rv = RI_FAILURE; 238 goto out; 239 } 240 241 /* 242 * Create mapping of boards to components. 243 */ 244 for (i = 0, apd = apd_tbl; i < n_apids; i++, apd++) { 245 if (dyn_ap_ids(ap_ids[i], &apd->cfga_list_data, 246 &apd->nlist) == -1) { 247 rv = RI_INVAL; 248 goto out; 249 } 250 } 251 #ifdef DEBUG 252 dump_apd_tbl(stderr, apd_tbl, n_apids); 253 #endif /* DEBUG */ 254 255 if (rcm_init(rcm, apd_tbl, n_apids, flags) != 0) { 256 rv = RI_FAILURE; 257 goto out; 258 } 259 260 /* 261 * Best effort attempt to read cpu ecache sizes from 262 * OBP/Solaris device trees. These are later looked up 263 * in i_cpu_cm_info(). 264 */ 265 (void) ecache_info_init(&rcm->ecache_info); 266 267 for (i = 0, apd = apd_tbl; i < n_apids; i++, apd++) { 268 if ((ap_hdl = ri_ap_alloc(ap_ids[i], ri_hdl)) == NULL) { 269 rv = RI_FAILURE; 270 goto out; 271 } 272 273 /* 274 * Add component info based on occupant type. Note all 275 * passes through the apd table skip over the first 276 * cfgadm_list_data entry, which is the static system board 277 * attachment point. 278 */ 279 for (j = 1, cfga_ldata = &apd->cfga_list_data[1]; 280 j < apd->nlist; j++, cfga_ldata++) { 281 if (cfga_ldata->ap_o_state != CFGA_STAT_CONFIGURED) { 282 continue; 283 } 284 285 if ((cm_info = 286 cm_info_func(cfga_ldata->ap_type)) != NULL) { 287 cm_info_rv = 288 (*cm_info)(ap_hdl, cfga_ldata, flags, rcm); 289 if (cm_info_rv != 0) { 290 /* 291 * If we cannot obtain info for the ap, 292 * skip it and do not fail the entire 293 * operation. This case occurs when the 294 * driver for a device is not attached: 295 * di_init() returns failed back to 296 * io_cm_info(). 297 */ 298 if (cm_info_rv == RI_NODE_NIL) 299 continue; 300 else { 301 rv = RI_FAILURE; 302 goto out; 303 } 304 } 305 } 306 } 307 } 308 309 if ((flags & RI_INCLUDE_QUERY) && cap_request(ri_hdl, rcm) != 0) 310 rv = RI_FAILURE; 311 312 out: 313 if (apd_tbl != NULL) 314 apd_tbl_free(apd_tbl, n_apids); 315 if (rcm != NULL) 316 rcm_fini(rcm); 317 318 if (rv == RI_SUCCESS) 319 *hdlp = ri_hdl; 320 else 321 ri_fini(ri_hdl); 322 323 return (rv); 324 } 325 326 /* 327 * Map static board attachment point to dynamic attachment points (components). 328 */ 329 static int 330 dyn_ap_ids(char *ap_id, cfga_list_data_t **ap_id_list, int *nlist) 331 { 332 cfga_err_t cfga_err; 333 char *errstr; 334 char *opts = "parsable"; 335 char *listops = "class=sbd"; 336 337 cfga_err = config_list_ext(1, &ap_id, ap_id_list, nlist, 338 opts, listops, &errstr, CFGA_FLAG_LIST_ALL); 339 if (cfga_err != CFGA_OK) { 340 dprintf((stderr, "config_list_ext: %s\n", 341 config_strerror(cfga_err))); 342 return (-1); 343 } 344 345 return (0); 346 } 347 348 /* 349 * Initialize rcm handle, memory stats. Cache query result if necessary. 350 */ 351 static int 352 rcm_init(rcmd_t *rcm, apd_t apd_tbl[], int napds, int flags) 353 { 354 longlong_t ii; 355 int rv = 0; 356 357 rcm->offline_query_info = NULL; 358 rcm->rlist = NULL; 359 rcm->cpus = NULL; 360 361 if (rcm_alloc_handle(NULL, RCM_NOPID, NULL, &rcm->hdl) != RCM_SUCCESS) { 362 dprintf((stderr, "rcm_alloc_handle (errno=%d)\n", errno)); 363 return (-1); 364 } 365 366 if ((rcm->ms_pagesize = sysconf(_SC_PAGE_SIZE)) == -1 || 367 (rcm->ms_syspages = sysconf(_SC_PHYS_PAGES)) == -1) { 368 dprintf((stderr, "sysconf: %s\n", strerror(errno))); 369 return (-1); 370 } 371 ii = (longlong_t)rcm->ms_pagesize * rcm->ms_syspages; 372 rcm->ms_sysmb = (int)((ii+MBYTE-1) / MBYTE); 373 374 if (flags & RI_INCLUDE_QUERY) 375 rv = rcm_query_init(rcm, apd_tbl, napds); 376 377 return (rv); 378 } 379 380 static void 381 rcm_fini(rcmd_t *rcm) 382 { 383 char **cpp; 384 385 assert(rcm != NULL); 386 387 if (rcm->offline_query_info != NULL) 388 rcm_free_info(rcm->offline_query_info); 389 if (rcm->hdl != NULL) 390 rcm_free_handle(rcm->hdl); 391 392 if (rcm->rlist != NULL) { 393 for (cpp = rcm->rlist; *cpp != NULL; cpp++) 394 s_free(*cpp); 395 free(rcm->rlist); 396 } 397 398 s_free(rcm->cpus); 399 free(rcm); 400 } 401 402 #define NODENAME_CMP "cmp" 403 #define NODENAME_SSM "ssm" 404 #define PROP_CPUID "cpuid" 405 #define PROP_DEVICE_TYPE "device-type" 406 #define PROP_ECACHE_SIZE "ecache-size" 407 #define PROP_L2_CACHE_SIZE "l2-cache-size" 408 #define PROP_L3_CACHE_SIZE "l3-cache-size" 409 410 typedef struct { 411 di_node_t root; 412 di_prom_handle_t ph; 413 ecache_info_t *ecache_info; 414 } di_arg_t; 415 416 /* 417 * The ecache sizes for individual cpus are read from the 418 * OBP/Solaris device trees. This info cannot be derived 419 * from the cfgadm_sbd cpu attachment point ecache info, 420 * which may be a sum of multiple cores for CMP. 421 */ 422 static int 423 ecache_info_init(ecache_info_t *ec) 424 { 425 di_arg_t di_arg; 426 di_prom_handle_t ph = DI_PROM_HANDLE_NIL; 427 di_node_t root = DI_NODE_NIL; 428 int cpuid_max, rv = 0; 429 430 assert(ec != NULL && ec->cpuid_max == 0 && ec->ecache_sizes == NULL); 431 432 if ((cpuid_max = sysconf(_SC_CPUID_MAX)) == -1) { 433 dprintf((stderr, "sysconf fail: %s\n", strerror(errno))); 434 rv = -1; 435 goto done; 436 } 437 438 if ((root = di_init("/", DINFOCPYALL)) == DI_NODE_NIL) { 439 dprintf((stderr, "di_init fail: %s\n", strerror(errno))); 440 rv = -1; 441 goto done; 442 } 443 444 if ((ph = di_prom_init()) == DI_PROM_HANDLE_NIL) { 445 dprintf((stderr, "di_prom_init fail: %s\n", strerror(errno))); 446 rv = -1; 447 goto done; 448 } 449 450 if ((ec->ecache_sizes = calloc(cpuid_max + 1, sizeof (int))) == NULL) { 451 dprintf((stderr, "calloc fail: %s\n", strerror(errno))); 452 rv = -1; 453 goto done; 454 } 455 ec->cpuid_max = cpuid_max; 456 457 dprintf((stderr, "cpuid_max is set to %d\n", ec->cpuid_max)); 458 459 di_arg.ph = ph; 460 di_arg.root = root; 461 di_arg.ecache_info = ec; 462 463 if (di_walk_node(root, DI_WALK_CLDFIRST, (void *)&di_arg, 464 find_cpu_nodes) != 0) { 465 dprintf((stderr, "di_walk_node fail: %s\n", strerror(errno))); 466 rv = -1; 467 } 468 469 done: 470 if (root != DI_NODE_NIL) 471 di_fini(root); 472 if (ph != DI_PROM_HANDLE_NIL) 473 di_prom_fini(ph); 474 475 return (rv); 476 } 477 478 /* 479 * Libdevinfo node walk callback for reading ecache size 480 * properties for cpu device nodes. Subtrees not containing 481 * cpu nodes are filtered out. 482 */ 483 static int 484 find_cpu_nodes(di_node_t node, void *arg) 485 { 486 char *name; 487 int *cpuid, *ecache; 488 di_arg_t *di_arg = (di_arg_t *)arg; 489 ecache_info_t *ec = di_arg->ecache_info; 490 di_prom_handle_t ph = di_arg->ph; 491 492 if (node == DI_NODE_NIL) { 493 return (DI_WALK_TERMINATE); 494 } 495 496 if (node == di_arg->root) { 497 return (DI_WALK_CONTINUE); 498 } 499 500 if (di_nodeid(node) == DI_PSEUDO_NODEID) { 501 return (DI_WALK_PRUNECHILD); 502 } 503 504 name = di_node_name(node); 505 if (name != NULL) { 506 /* 507 * CMP nodes will be the parent of cpu nodes. On some platforms, 508 * cpu nodes will be under the ssm node. In either case, 509 * continue searching this subtree. 510 */ 511 if (strncmp(name, NODENAME_SSM, strlen(NODENAME_SSM)) == 0 || 512 strncmp(name, NODENAME_CMP, strlen(NODENAME_CMP)) == 0) { 513 return (DI_WALK_CONTINUE); 514 } 515 } 516 517 dprintf((stderr, "find_cpu_nodes: node=%p, name=%s, binding_name=%s\n", 518 node, di_node_name(node), di_binding_name(node))); 519 520 /* 521 * Ecache size property name differs with processor implementation. 522 * Panther has both L2 and L3, so check for L3 first to differentiate 523 * from Jaguar, which has only L2. 524 */ 525 if (prop_lookup_int(node, ph, PROP_CPUID, &cpuid) == 0 && 526 (prop_lookup_int(node, ph, PROP_ECACHE_SIZE, &ecache) == 0 || 527 prop_lookup_int(node, ph, PROP_L3_CACHE_SIZE, &ecache) == 0 || 528 prop_lookup_int(node, ph, PROP_L2_CACHE_SIZE, &ecache) == 0)) { 529 assert(ec != NULL && ec->ecache_sizes != NULL && 530 *cpuid <= ec->cpuid_max); 531 ec->ecache_sizes[*cpuid] = *ecache; 532 } 533 534 return (DI_WALK_PRUNECHILD); 535 } 536 537 /* 538 * Given a di_node_t, call the appropriate int property lookup routine. 539 * Note: This lookup fails if the int property has multiple value entries. 540 */ 541 static int 542 prop_lookup_int(di_node_t node, di_prom_handle_t ph, char *propname, int **ival) 543 { 544 int rv; 545 546 rv = (di_nodeid(node) == DI_PROM_NODEID) ? 547 di_prom_prop_lookup_ints(ph, node, propname, ival) : 548 di_prop_lookup_ints(DDI_DEV_T_ANY, node, propname, ival); 549 550 return (rv == 1 ? 0 : -1); 551 } 552 553 /* 554 * For offline queries, RCM must be given a list of all resources 555 * so modules can have access to the full scope of the operation. 556 * The rcm_get_info calls are made individually in order to map the 557 * returned rcm_info_t's to physical devices. The rcm_request_offline 558 * result is cached so the query state can be looked up as we process 559 * the rcm_get_info calls. This routine also tallies up the amount of 560 * memory going away and creates a list of cpu ids to be used 561 * later for rcm_request_capacity_change. 562 */ 563 static int 564 rcm_query_init(rcmd_t *rcm, apd_t apd_tbl[], int napds) 565 { 566 apd_t *apd; 567 int i, j; 568 cfga_list_data_t *cfga_ldata; 569 int (*cm_rcm_qpass)(cfga_list_data_t *, rcmd_t *); 570 #ifdef DEBUG 571 char **cpp; 572 #endif /* DEBUG */ 573 574 /* 575 * Initial pass to size cpu and resource name arrays needed to 576 * interface with RCM. Attachment point ids for CMP can represent 577 * multiple cpus (and resource names). Instead of parsing the 578 * cfgadm info field here, use the worse case that all component 579 * attachment points are CMP. 580 */ 581 rcm->ndevs = 0; 582 for (i = 0, apd = apd_tbl; i < napds; i++, apd++) { 583 for (j = 1, cfga_ldata = &apd->cfga_list_data[1]; 584 j < apd->nlist; j++, cfga_ldata++) { 585 if (cfga_ldata->ap_o_state != CFGA_STAT_CONFIGURED) { 586 continue; 587 } 588 rcm->ndevs += SBD_MAX_CORES_PER_CMP; 589 } 590 } 591 592 /* account for trailing NULL in rlist */ 593 if (rcm->ndevs > 0 && 594 ((rcm->cpus = calloc(rcm->ndevs, sizeof (cpuid_t))) == NULL || 595 (rcm->rlist = calloc(rcm->ndevs + 1, sizeof (char *))) == NULL)) { 596 dprintf((stderr, "calloc: %s\n", strerror(errno))); 597 return (-1); 598 } 599 600 /* 601 * Second pass to fill in the RCM resource and cpu lists. 602 */ 603 for (i = 0, apd = apd_tbl; i < napds; i++, apd++) { 604 for (j = 1, cfga_ldata = &apd->cfga_list_data[1]; 605 j < apd->nlist; j++, cfga_ldata++) { 606 if (cfga_ldata->ap_o_state != CFGA_STAT_CONFIGURED) { 607 continue; 608 } 609 if ((cm_rcm_qpass = 610 cm_rcm_qpass_func(cfga_ldata->ap_type)) != NULL && 611 (*cm_rcm_qpass)(cfga_ldata, rcm) != 0) { 612 return (-1); 613 } 614 } 615 } 616 617 if (rcm->nrlist == 0) 618 return (0); 619 620 /* 621 * Cache query result. Since we are only interested in the 622 * set of RCM clients processed and not their request status, 623 * the return value is irrelevant. 624 */ 625 (void) rcm_request_offline_list(rcm->hdl, rcm->rlist, 626 RCM_QUERY|RCM_SCOPE, &rcm->offline_query_info); 627 628 #ifdef DEBUG 629 dprintf((stderr, "RCM rlist: nrlist=%d\n", rcm->nrlist)); 630 for (cpp = rcm->rlist, i = 0; *cpp != NULL; cpp++, i++) { 631 dprintf((stderr, "rlist[%d]=%s\n", i, *cpp)); 632 } 633 #endif /* DEBUG */ 634 635 return (0); 636 } 637 638 static int 639 cap_request(ri_hdl_t *ri_hdl, rcmd_t *rcm) 640 { 641 return (((rcm->ncpus > 0 && cpu_cap_request(ri_hdl, rcm) != 0) || 642 (rcm->query_pages > 0 && mem_cap_request(ri_hdl, rcm) != 0)) ? 643 -1 : 0); 644 } 645 646 /* 647 * RCM capacity change request for cpus. 648 */ 649 static int 650 cpu_cap_request(ri_hdl_t *ri_hdl, rcmd_t *rcm) 651 { 652 cpuid_t *syscpuids, *newcpuids; 653 int sysncpus, newncpus; 654 rcm_info_t *rcm_info = NULL; 655 int i, j, k; 656 nvlist_t *nvl; 657 int rv = 0; 658 659 /* get all cpus in the system */ 660 if (syscpus(&syscpuids, &sysncpus) == -1) 661 return (-1); 662 663 newncpus = sysncpus - rcm->ncpus; 664 if ((newcpuids = calloc(newncpus, sizeof (cpuid_t))) == NULL) { 665 dprintf((stderr, "calloc: %s", strerror(errno))); 666 rv = -1; 667 goto out; 668 } 669 670 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) { 671 dprintf((stderr, "nvlist_alloc fail\n")); 672 rv = -1; 673 goto out; 674 } 675 676 /* 677 * Construct the new cpu list. 678 */ 679 for (i = 0, j = 0; i < sysncpus; i++) { 680 for (k = 0; k < rcm->ncpus; k++) { 681 if (rcm->cpus[k] == syscpuids[i]) { 682 break; 683 } 684 } 685 if (k == rcm->ncpus) { 686 newcpuids[j++] = syscpuids[i]; 687 } 688 } 689 690 if (nvlist_add_int32(nvl, "old_total", sysncpus) != 0 || 691 nvlist_add_int32(nvl, "new_total", newncpus) != 0 || 692 nvlist_add_int32_array(nvl, "old_cpu_list", syscpuids, 693 sysncpus) != 0 || 694 nvlist_add_int32_array(nvl, "new_cpu_list", newcpuids, 695 newncpus) != 0) { 696 dprintf((stderr, "nvlist_add fail\n")); 697 rv = -1; 698 goto out; 699 } 700 701 #ifdef DEBUG 702 dprintf((stderr, "old_total=%d\n", sysncpus)); 703 for (i = 0; i < sysncpus; i++) { 704 dprintf((stderr, "old_cpu_list[%d]=%d\n", i, syscpuids[i])); 705 } 706 dprintf((stderr, "new_total=%d\n", newncpus)); 707 for (i = 0; i < newncpus; i++) { 708 dprintf((stderr, "new_cpu_list[%d]=%d\n", i, newcpuids[i])); 709 } 710 #endif /* DEBUG */ 711 712 (void) rcm_request_capacity_change(rcm->hdl, RCM_CPU_ALL, 713 RCM_QUERY|RCM_SCOPE, nvl, &rcm_info); 714 715 rv = add_rcm_clients(&ri_hdl->cpu_cap_clients, rcm, rcm_info, 0, NULL); 716 717 out: 718 s_free(syscpuids); 719 s_free(newcpuids); 720 if (nvl != NULL) 721 nvlist_free(nvl); 722 if (rcm_info != NULL) 723 rcm_free_info(rcm_info); 724 725 return (rv); 726 } 727 728 static int 729 syscpus(cpuid_t **cpuids, int *ncpus) 730 { 731 kstat_t *ksp; 732 kstat_ctl_t *kc; 733 cpuid_t *cp; 734 int i; 735 736 if ((*ncpus = sysconf(_SC_NPROCESSORS_CONF)) == -1) { 737 dprintf((stderr, "sysconf: %s\n", errno)); 738 return (-1); 739 } 740 741 if ((kc = kstat_open()) == NULL) { 742 dprintf((stderr, "kstat_open fail\n")); 743 return (-1); 744 } 745 746 if ((cp = calloc(*ncpus, sizeof (cpuid_t))) == NULL) { 747 dprintf((stderr, "calloc: %s\n", errno)); 748 (void) kstat_close(kc); 749 return (-1); 750 } 751 752 for (i = 0, ksp = kc->kc_chain; ksp != NULL; ksp = ksp->ks_next) { 753 if (strcmp(ksp->ks_module, "cpu_info") == 0) { 754 cp[i++] = ksp->ks_instance; 755 } 756 } 757 758 (void) kstat_close(kc); 759 *cpuids = cp; 760 761 return (0); 762 } 763 764 /* 765 * RCM capacity change request for memory. 766 */ 767 static int 768 mem_cap_request(ri_hdl_t *ri_hdl, rcmd_t *rcm) 769 { 770 nvlist_t *nvl; 771 rcm_info_t *rcm_info = NULL; 772 long newpages; 773 int rv = 0; 774 775 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) { 776 dprintf((stderr, "nvlist_alloc fail\n")); 777 return (-1); 778 } 779 780 newpages = rcm->ms_syspages - rcm->query_pages; 781 if (nvlist_add_int32(nvl, "page_size", rcm->ms_pagesize) != 0 || 782 nvlist_add_int32(nvl, "old_pages", rcm->ms_syspages) != 0 || 783 nvlist_add_int32(nvl, "new_pages", newpages) != 0) { 784 dprintf((stderr, "nvlist_add fail\n")); 785 nvlist_free(nvl); 786 return (-1); 787 } 788 789 dprintf((stderr, "memory capacity change req: " 790 "page_size=%d, old_pages=%d, new_pages=%d\n", 791 rcm->ms_pagesize, rcm->ms_syspages, newpages)); 792 793 (void) rcm_request_capacity_change(rcm->hdl, RCM_MEM_ALL, 794 RCM_QUERY|RCM_SCOPE, nvl, &rcm_info); 795 796 rv = add_rcm_clients(&ri_hdl->mem_cap_clients, rcm, rcm_info, 0, NULL); 797 798 nvlist_free(nvl); 799 if (rcm_info != NULL) 800 rcm_free_info(rcm_info); 801 802 return (rv); 803 } 804 805 static int 806 (*cm_rcm_qpass_func(cfga_type_t ap_type))(cfga_list_data_t *, rcmd_t *) 807 { 808 int i; 809 810 for (i = 0; i < sizeof (cm_ctl) / sizeof (cm_ctl[0]); i++) { 811 if (strcmp(cm_ctl[i].type, ap_type) == 0) { 812 return (cm_ctl[i].cm_rcm_qpass); 813 } 814 } 815 return (NULL); 816 } 817 818 /* 819 * Save cpu ids and RCM abstract resource names. 820 * Cpu ids will be used for the capacity change request. 821 * Resource names will be used for the offline query. 822 */ 823 static int 824 cpu_rcm_qpass(cfga_list_data_t *cfga_ldata, rcmd_t *rcm) 825 { 826 processorid_t cpuid; 827 char *cpustr, *lasts, *rsrcname, rbuf[32]; 828 char cbuf[CFGA_INFO_LEN]; 829 int speed, ecache; 830 831 assert(sscanf(cfga_ldata->ap_info, CPU_INFO_FMT, &cbuf, &speed, 832 &ecache) == 3); 833 834 for (cpustr = (char *)strtok_r(cbuf, CPUID_SEP, &lasts); 835 cpustr != NULL; 836 cpustr = (char *)strtok_r(NULL, CPUID_SEP, &lasts)) { 837 cpuid = atoi(cpustr); 838 839 (void) snprintf(rbuf, sizeof (rbuf), "%s%d", RCM_CPU, cpuid); 840 if ((rsrcname = strdup(rbuf)) == NULL) { 841 dprintf((stderr, "strdup fail\n")); 842 return (-1); 843 } 844 assert(rcm->nrlist < rcm->ndevs && rcm->ncpus < rcm->ndevs); 845 rcm->rlist[rcm->nrlist++] = rsrcname; 846 rcm->cpus[rcm->ncpus++] = (cpuid_t)cpuid; 847 848 dprintf((stderr, "cpu_cm_info: cpuid=%d, rsrcname=%s", 849 cpuid, rsrcname)); 850 } 851 852 return (0); 853 } 854 855 /* 856 * No RCM resource names for individual memory units, so 857 * just add to offline query page count. 858 */ 859 static int 860 mem_rcm_qpass(cfga_list_data_t *cfga, rcmd_t *rcm) 861 { 862 char *cp; 863 uint_t kbytes; 864 longlong_t ii; 865 866 if ((cp = strstr(cfga->ap_info, "size")) == NULL || 867 sscanf(cp, "size=%u", &kbytes) != 1) { 868 dprintf((stderr, "unknown sbd info format: %s\n", cp)); 869 return (-1); 870 } 871 872 ii = (longlong_t)kbytes * KBYTE; 873 rcm->query_pages += (uint_t)(ii / rcm->ms_pagesize); 874 875 dprintf((stderr, "%s: npages=%u\n", cfga->ap_log_id, 876 (uint_t)(ii / rcm->ms_pagesize))); 877 878 return (0); 879 } 880 881 /* 882 * Add physical I/O bus name to RCM resource list. 883 */ 884 static int 885 io_rcm_qpass(cfga_list_data_t *cfga, rcmd_t *rcm) 886 { 887 char path[MAXPATHLEN]; 888 char buf[MAXPATHLEN]; 889 char *rsrcname; 890 891 if (sscanf(cfga->ap_info, "device=%s", path) != 1) { 892 dprintf((stderr, "unknown sbd info format: %s\n", 893 cfga->ap_info)); 894 return (-1); 895 } 896 897 (void) snprintf(buf, sizeof (buf), "/devices%s", path); 898 if ((rsrcname = strdup(buf)) == NULL) { 899 dprintf((stderr, "strdup fail\n")); 900 return (-1); 901 } 902 903 assert(rcm->nrlist < rcm->ndevs); 904 rcm->rlist[rcm->nrlist++] = rsrcname; 905 906 return (0); 907 } 908 909 static int 910 (*cm_info_func(cfga_type_t ap_type))(ri_ap_t *, cfga_list_data_t *, 911 int, rcmd_t *) 912 { 913 int i; 914 915 for (i = 0; i < sizeof (cm_ctl) / sizeof (cm_ctl[0]); i++) { 916 if (strcmp(cm_ctl[i].type, ap_type) == 0) { 917 return (cm_ctl[i].cm_info); 918 } 919 } 920 return (NULL); 921 } 922 923 /* 924 * Create cpu handle, adding properties exported by sbd plugin and 925 * RCM client usage. 926 */ 927 /* ARGSUSED */ 928 static int 929 cpu_cm_info(ri_ap_t *ap, cfga_list_data_t *cfga, int flags, rcmd_t *rcm) 930 { 931 processorid_t cpuid; 932 int speed, ecache, rv = 0; 933 char buf[CFGA_INFO_LEN], *cpustr, *lasts; 934 935 if (sscanf(cfga->ap_info, CPU_INFO_FMT, &buf, &speed, &ecache) != 3) { 936 dprintf((stderr, "unknown sbd info format: %s\n", 937 cfga->ap_info)); 938 return (-1); 939 } 940 941 /* parse cpuids */ 942 for (cpustr = (char *)strtok_r(buf, CPUID_SEP, &lasts); 943 cpustr != NULL; 944 cpustr = (char *)strtok_r(NULL, CPUID_SEP, &lasts)) { 945 cpuid = atoi(cpustr); 946 if ((rv = i_cpu_cm_info(cpuid, speed, ap, rcm)) != 0) { 947 break; 948 } 949 } 950 951 return (rv); 952 } 953 954 static int 955 i_cpu_cm_info(processorid_t cpuid, int speed, ri_ap_t *ap, rcmd_t *rcm) 956 { 957 int ecache = 0; 958 char *state, buf[32]; 959 processor_info_t cpu_info; 960 ri_dev_t *cpu = NULL; 961 rcm_info_t *rcm_info = NULL; 962 963 /* 964 * Could have been unconfigured in the interim, so cannot 965 * count on processor_info recognizing it. 966 */ 967 state = (processor_info(cpuid, &cpu_info) == 0) ? 968 pstate2str(cpu_info.pi_state) : "unknown"; 969 970 if ((cpu = ri_dev_alloc()) == NULL) { 971 dprintf((stderr, "ri_dev_alloc failed\n")); 972 return (-1); 973 } 974 975 if (rcm->ecache_info.ecache_sizes != NULL) { 976 assert(rcm->ecache_info.cpuid_max != 0 && 977 cpuid <= rcm->ecache_info.cpuid_max); 978 ecache = rcm->ecache_info.ecache_sizes[cpuid] / MBYTE; 979 } 980 981 dprintf((stderr, "i_cpu_cm_info: cpu(%d) ecache=%d MB\n", 982 cpuid, ecache)); 983 984 if (nvlist_add_int32(cpu->conf_props, RI_CPU_ID, cpuid) != 0 || 985 nvlist_add_int32(cpu->conf_props, RI_CPU_SPEED, speed) != 0 || 986 nvlist_add_int32(cpu->conf_props, RI_CPU_ECACHE, ecache) != 0 || 987 nvlist_add_string(cpu->conf_props, RI_CPU_STATE, state) != 0) { 988 dprintf((stderr, "nvlist_add fail\n")); 989 ri_dev_free(cpu); 990 return (-1); 991 } 992 993 (void) snprintf(buf, sizeof (buf), "%s%d", RCM_CPU, cpuid); 994 dprintf((stderr, "rcm_get_info(%s)\n", buf)); 995 if (rcm_get_info(rcm->hdl, buf, RCM_INCLUDE_DEPENDENT, 996 &rcm_info) != RCM_SUCCESS) { 997 dprintf((stderr, "rcm_get_info (errno=%d)\n", errno)); 998 ri_dev_free(cpu); 999 if (rcm_info != NULL) 1000 rcm_free_info(rcm_info); 1001 return (-1); 1002 } 1003 1004 dev_list_cpu_insert(&ap->cpus, cpu, cpuid); 1005 1006 return (0); 1007 } 1008 1009 /* 1010 * Create memory handle, adding properties exported by sbd plugin. 1011 * No RCM tuples to be saved unless RCM is modified to export names 1012 * for individual memory units. 1013 */ 1014 /* ARGSUSED */ 1015 static int 1016 mem_cm_info(ri_ap_t *ap, cfga_list_data_t *cfga, int flags, rcmd_t *rcm) 1017 { 1018 ri_dev_t *mem; 1019 char *cp; 1020 char *cpval; 1021 int len; 1022 uint64_t base_addr; /* required */ 1023 int32_t size_kb; /* required */ 1024 int32_t perm_kb = 0; /* optional */ 1025 char target[CFGA_AP_LOG_ID_LEN] = ""; /* optional */ 1026 int32_t del_kb = 0; /* optional */ 1027 int32_t rem_kb = 0; /* optional */ 1028 char source[CFGA_AP_LOG_ID_LEN] = ""; /* optional */ 1029 1030 if (sscanf(cfga->ap_info, "address=0x%llx size=%u", &base_addr, 1031 &size_kb) != 2) { 1032 goto err_fmt; 1033 } 1034 1035 if ((cp = strstr(cfga->ap_info, "permanent")) != NULL && 1036 sscanf(cp, "permanent=%u", &perm_kb) != 1) { 1037 goto err_fmt; 1038 } 1039 1040 if ((cp = strstr(cfga->ap_info, "target")) != NULL) { 1041 if ((cpval = strstr(cp, "=")) == NULL) { 1042 goto err_fmt; 1043 } 1044 for (len = 0; cpval[len] != '\0' && cpval[len] != ' '; len++) { 1045 if (len >= CFGA_AP_LOG_ID_LEN) { 1046 goto err_fmt; 1047 } 1048 } 1049 if (sscanf(cp, "target=%s deleted=%u remaining=%u", &target, 1050 &del_kb, &rem_kb) != 3) { 1051 goto err_fmt; 1052 } 1053 } 1054 1055 if ((cp = strstr(cfga->ap_info, "source")) != NULL) { 1056 if ((cpval = strstr(cp, "=")) == NULL) { 1057 goto err_fmt; 1058 } 1059 for (len = 0; cpval[len] != '\0' && cpval[len] != ' '; len++) { 1060 if (len >= CFGA_AP_LOG_ID_LEN) { 1061 goto err_fmt; 1062 } 1063 } 1064 if (sscanf(cp, "source=%s", &source) != 1) { 1065 goto err_fmt; 1066 } 1067 } 1068 1069 dprintf((stderr, "%s: base=0x%llx, size=%u, permanent=%u\n", 1070 cfga->ap_log_id, base_addr, size_kb, perm_kb)); 1071 1072 if ((mem = ri_dev_alloc()) == NULL) 1073 return (-1); 1074 1075 /* 1076 * Convert memory sizes to MB (truncate). 1077 */ 1078 if (nvlist_add_uint64(mem->conf_props, RI_MEM_ADDR, base_addr) != 0 || 1079 nvlist_add_int32(mem->conf_props, RI_MEM_BRD, size_kb/KBYTE) != 0 || 1080 nvlist_add_int32(mem->conf_props, RI_MEM_PERM, 1081 perm_kb/KBYTE) != 0) { 1082 dprintf((stderr, "nvlist_add failure\n")); 1083 ri_dev_free(mem); 1084 return (-1); 1085 } 1086 1087 if (target[0] != '\0' && 1088 (nvlist_add_string(mem->conf_props, RI_MEM_TARG, target) != 0 || 1089 nvlist_add_int32(mem->conf_props, RI_MEM_DEL, del_kb/KBYTE) != 0 || 1090 nvlist_add_int32(mem->conf_props, RI_MEM_REMAIN, 1091 rem_kb/KBYTE) != 0)) { 1092 dprintf((stderr, "nvlist_add failure\n")); 1093 ri_dev_free(mem); 1094 return (-1); 1095 } 1096 1097 if (source[0] != '\0' && 1098 nvlist_add_string(mem->conf_props, RI_MEM_SRC, source) != 0) { 1099 dprintf((stderr, "nvlist_add failure\n")); 1100 ri_dev_free(mem); 1101 return (-1); 1102 } 1103 1104 /* 1105 * XXX - move this property to attachment point hdl? 1106 */ 1107 if (nvlist_add_int32(mem->conf_props, RI_MEM_DOMAIN, 1108 rcm->ms_sysmb) != 0) { 1109 dprintf((stderr, "nvlist_add failure\n")); 1110 ri_dev_free(mem); 1111 return (-1); 1112 } 1113 1114 dev_list_append(&ap->mems, mem); 1115 return (0); 1116 1117 err_fmt: 1118 dprintf((stderr, "unknown sbd info format: %s\n", cfga->ap_info)); 1119 return (-1); 1120 } 1121 1122 /* 1123 * Initiate a libdevinfo walk on the IO bus path. 1124 * XXX - investigate performance using two threads here: one thread to do the 1125 * libdevinfo snapshot and treewalk; and one thread to get RCM usage info 1126 */ 1127 static int 1128 io_cm_info(ri_ap_t *ap, cfga_list_data_t *cfga, int flags, rcmd_t *rcm) 1129 { 1130 int i; 1131 int j; 1132 int k; 1133 int set_size; 1134 int retval = 0; 1135 int n_usage; 1136 devinfo_arg_t di_arg; 1137 lookup_table_t devicetable; 1138 lookup_entry_t *deventry; 1139 lookup_entry_t *lastdeventry; 1140 ri_dev_t *io = NULL; 1141 ri_client_t *client; 1142 ri_client_t *tmp; 1143 di_devlink_handle_t linkhd = NULL; 1144 di_node_t root = DI_NODE_NIL; 1145 di_node_t node = DI_NODE_NIL; 1146 rcm_info_tuple_t *rcm_tuple; 1147 rcm_info_t *rcm_info = NULL; 1148 const char *rcm_rsrc = NULL; 1149 char drv_inst[MAXPATHLEN]; 1150 char path[MAXPATHLEN]; 1151 char pathbuf[MAXPATHLEN]; 1152 1153 dprintf((stderr, "io_cm_info(%s)\n", cfga->ap_log_id)); 1154 1155 /* Extract devfs path from cfgadm information */ 1156 if (sscanf(cfga->ap_info, "device=%s\n", path) != 1) { 1157 dprintf((stderr, "unknown sbd info format: %s\n", 1158 cfga->ap_info)); 1159 return (-1); 1160 } 1161 1162 /* Initialize empty device lookup table */ 1163 devicetable.n_entries = 0; 1164 devicetable.n_slots = 0; 1165 devicetable.table = NULL; 1166 1167 /* Get libdevinfo snapshot */ 1168 dprintf((stderr, "di_init(%s)\n", path)); 1169 if ((root = di_init(path, DINFOCPYALL)) == DI_NODE_NIL) { 1170 dprintf((stderr, "di_init: %s\n", strerror(errno))); 1171 retval = RI_NODE_NIL; /* tell ri_init to skip this node */ 1172 goto end; 1173 } 1174 1175 /* 1176 * Map in devlinks database. 1177 * XXX - This could be moved to ri_init() for better performance. 1178 */ 1179 dprintf((stderr, "di_devlink_init()\n")); 1180 if ((linkhd = di_devlink_init(NULL, 0)) == NULL) { 1181 dprintf((stderr, "di_devlink_init: %s\n", strerror(errno))); 1182 retval = -1; 1183 goto end; 1184 } 1185 1186 /* Initialize argument for devinfo treewalk */ 1187 di_arg.err = 0; 1188 di_arg.node = DI_NODE_NIL; 1189 di_arg.pathbuf = pathbuf; 1190 di_arg.table = &devicetable; 1191 di_arg.linkhd = linkhd; 1192 1193 /* Use libdevinfo treewalk to build device lookup table */ 1194 if (di_walk_node(root, DI_WALK_CLDFIRST, (void *)&di_arg, 1195 devinfo_node_walk) != 0) { 1196 dprintf((stderr, "di_walk_node: %s\n", strerror(errno))); 1197 retval = -1; 1198 goto end; 1199 } 1200 if (di_arg.err != 0) { 1201 dprintf((stderr, "di_walk_node: device tree walk failed\n")); 1202 retval = -1; 1203 goto end; 1204 } 1205 1206 /* Call RCM to gather usage information */ 1207 (void) snprintf(pathbuf, MAXPATHLEN, "/devices%s", path); 1208 dprintf((stderr, "rcm_get_info(%s)\n", pathbuf)); 1209 if (rcm_get_info(rcm->hdl, pathbuf, 1210 RCM_INCLUDE_SUBTREE|RCM_INCLUDE_DEPENDENT, &rcm_info) != 1211 RCM_SUCCESS) { 1212 dprintf((stderr, "rcm_get_info (errno=%d)\n", errno)); 1213 retval = -1; 1214 goto end; 1215 } 1216 1217 /* Sort the device table by name (proper order for lookups) */ 1218 qsort(devicetable.table, devicetable.n_entries, sizeof (lookup_entry_t), 1219 table_compare_names); 1220 1221 /* Perform mappings of RCM usage segments to device table entries */ 1222 lastdeventry = NULL; 1223 rcm_tuple = NULL; 1224 while ((rcm_tuple = rcm_info_next(rcm_info, rcm_tuple)) != NULL) { 1225 if ((rcm_rsrc = rcm_info_rsrc(rcm_tuple)) == NULL) 1226 continue; 1227 if (deventry = lookup(&devicetable, rcm_rsrc)) { 1228 if (add_usage(deventry, rcm_rsrc, rcm_tuple)) { 1229 retval = -1; 1230 goto end; 1231 } 1232 lastdeventry = deventry; 1233 } else { 1234 if (add_usage(lastdeventry, rcm_rsrc, rcm_tuple)) { 1235 retval = -1; 1236 goto end; 1237 } 1238 } 1239 } 1240 1241 /* Re-sort the device table by index number (original treewalk order) */ 1242 qsort(devicetable.table, devicetable.n_entries, sizeof (lookup_entry_t), 1243 table_compare_indices); 1244 1245 /* 1246 * Use the mapped usage and the device table to construct ri_dev_t's. 1247 * Construct one for each set of entries in the device table with 1248 * matching di_node_t's, if: 1) it has mapped RCM usage, or 2) it is 1249 * a leaf node and the caller has requested that unmanaged nodes be 1250 * included in the output. 1251 */ 1252 i = 0; 1253 while (i < devicetable.n_entries) { 1254 1255 node = devicetable.table[i].node; 1256 1257 /* Count how many usage records are mapped to this node's set */ 1258 n_usage = 0; 1259 set_size = 0; 1260 while (((i + set_size) < devicetable.n_entries) && 1261 (devicetable.table[i + set_size].node == node)) { 1262 n_usage += devicetable.table[i + set_size].n_usage; 1263 set_size += 1; 1264 } 1265 1266 /* 1267 * If there's no usage, then the node is unmanaged. Skip this 1268 * set of devicetable entries unless the node is a leaf node 1269 * and the caller has requested information on unmanaged leaves. 1270 */ 1271 if ((n_usage == 0) && 1272 !((flags & RI_INCLUDE_UNMANAGED) && (ident_leaf(node)))) { 1273 i += set_size; 1274 continue; 1275 } 1276 1277 /* 1278 * The checks above determined that this node is going in. 1279 * So determine its driver/instance name and allocate an 1280 * ri_dev_t for this node. 1281 */ 1282 if (mk_drv_inst(node, drv_inst, devicetable.table[i].name)) { 1283 dprintf((stderr, "mk_drv_inst failed\n")); 1284 retval = -1; 1285 break; 1286 } 1287 if ((io = io_dev_alloc(drv_inst)) == NULL) { 1288 dprintf((stderr, "io_dev_alloc failed\n")); 1289 retval = -1; 1290 break; 1291 } 1292 1293 /* Now add all the RCM usage records (if any) to the ri_dev_t */ 1294 for (j = i; j < (i + set_size); j++) { 1295 for (k = 0; k < devicetable.table[j].n_usage; k++) { 1296 /* Create new ri_client_t for basic usage */ 1297 client = ri_client_alloc( 1298 (char *)devicetable.table[j].usage[k].rsrc, 1299 (char *)devicetable.table[j].usage[k].info); 1300 if (client == NULL) { 1301 dprintf((stderr, 1302 "ri_client_alloc failed\n")); 1303 ri_dev_free(io); 1304 retval = -1; 1305 goto end; 1306 } 1307 1308 /* Add extra query usage to the ri_client_t */ 1309 if ((flags & RI_INCLUDE_QUERY) && 1310 (add_query_state(rcm, client, 1311 devicetable.table[j].usage[k].rsrc, 1312 devicetable.table[j].usage[k].info) != 0)) { 1313 dprintf((stderr, 1314 "add_query_state failed\n")); 1315 ri_dev_free(io); 1316 ri_client_free(client); 1317 retval = -1; 1318 goto end; 1319 } 1320 1321 /* Link new ri_client_t to ri_dev_t */ 1322 if (io->rcm_clients) { 1323 tmp = io->rcm_clients; 1324 while (tmp->next) 1325 tmp = tmp->next; 1326 tmp->next = client; 1327 } else { 1328 io->rcm_clients = client; 1329 } 1330 } 1331 } 1332 1333 /* Link the ri_dev_t into the return value */ 1334 dev_list_append(&ap->ios, io); 1335 1336 /* Advance to the next node set */ 1337 i += set_size; 1338 } 1339 1340 end: 1341 if (rcm_info != NULL) 1342 rcm_free_info(rcm_info); 1343 if (linkhd != NULL) 1344 di_devlink_fini(&linkhd); 1345 if (root != DI_NODE_NIL) 1346 di_fini(root); 1347 empty_table(&devicetable); 1348 1349 dprintf((stderr, "io_cm_info: returning %d\n", retval)); 1350 return (retval); 1351 } 1352 1353 static int 1354 ident_leaf(di_node_t node) 1355 { 1356 di_minor_t minor = DI_MINOR_NIL; 1357 1358 return ((minor = di_minor_next(node, minor)) != DI_MINOR_NIL && 1359 di_child_node(node) == DI_NODE_NIL); 1360 } 1361 1362 /* ARGSUSED */ 1363 static int 1364 mk_drv_inst(di_node_t node, char drv_inst[], char *devfs_path) 1365 { 1366 char *drv; 1367 int inst; 1368 1369 if ((drv = di_driver_name(node)) == NULL) { 1370 dprintf((stderr, "no driver bound to %s\n", 1371 devfs_path)); 1372 return (-1); 1373 } 1374 1375 if ((inst = di_instance(node)) == -1) { 1376 dprintf((stderr, "no instance assigned to %s\n", 1377 devfs_path)); 1378 return (-1); 1379 } 1380 (void) snprintf(drv_inst, MAXPATHLEN, "%s%d", drv, inst); 1381 1382 return (0); 1383 } 1384 1385 /* 1386 * Libdevinfo walker. 1387 * 1388 * During the tree walk of the attached IO devices, for each node 1389 * and all of its associated minors, the following actions are performed: 1390 * - The /devices path of the physical device node or minor 1391 * is stored in a lookup table along with a reference to the 1392 * libdevinfo node it represents via add_lookup_entry(). 1393 * - The device links associated with each device are also 1394 * stored in the same lookup table along with a reference to 1395 * the libdevinfo node it represents via the minor walk callback. 1396 * 1397 */ 1398 static int 1399 devinfo_node_walk(di_node_t node, void *arg) 1400 { 1401 char *devfs_path; 1402 #ifdef DEBUG 1403 char *drv; 1404 #endif /* DEBUG */ 1405 devinfo_arg_t *di_arg = (devinfo_arg_t *)arg; 1406 1407 if (node == DI_NODE_NIL) { 1408 return (DI_WALK_TERMINATE); 1409 } 1410 1411 if (((di_state(node) & DI_DRIVER_DETACHED) == 0) && 1412 ((devfs_path = di_devfs_path(node)) != NULL)) { 1413 1414 /* Use the provided path buffer to create full /devices path */ 1415 (void) snprintf(di_arg->pathbuf, MAXPATHLEN, "/devices%s", 1416 devfs_path); 1417 1418 #ifdef DEBUG 1419 dprintf((stderr, "devinfo_node_walk(%s)\n", di_arg->pathbuf)); 1420 if ((drv = di_driver_name(node)) != NULL) 1421 dprintf((stderr, " driver name %s instance %d\n", drv, 1422 di_instance(node))); 1423 #endif 1424 1425 /* Free the devfs_path */ 1426 di_devfs_path_free(devfs_path); 1427 1428 /* Add an entry to the lookup table for this physical device */ 1429 if (add_lookup_entry(di_arg->table, di_arg->pathbuf, node)) { 1430 dprintf((stderr, "add_lookup_entry: %s\n", 1431 strerror(errno))); 1432 di_arg->err = 1; 1433 return (DI_WALK_TERMINATE); 1434 } 1435 1436 /* Check if this node has minors */ 1437 if ((di_minor_next(node, DI_MINOR_NIL)) != DI_MINOR_NIL) { 1438 /* Walk this node's minors */ 1439 di_arg->node = node; 1440 if (di_walk_minor(node, NULL, DI_CHECK_ALIAS, arg, 1441 devinfo_minor_walk) != 0) { 1442 dprintf((stderr, "di_walk_minor: %s\n", 1443 strerror(errno))); 1444 di_arg->err = 1; 1445 return (DI_WALK_TERMINATE); 1446 } 1447 } 1448 } 1449 1450 return (DI_WALK_CONTINUE); 1451 } 1452 1453 /* 1454 * Use di_devlink_walk to find the /dev link from /devices path for this minor 1455 */ 1456 static int 1457 devinfo_minor_walk(di_node_t node, di_minor_t minor, void *arg) 1458 { 1459 char *name; 1460 char *devfs_path; 1461 devinfo_arg_t *di_arg = (devinfo_arg_t *)arg; 1462 char pathbuf[MAXPATHLEN]; 1463 1464 #ifdef DEBUG 1465 dprintf((stderr, "devinfo_minor_walk(%d) %s\n", minor, 1466 di_arg->pathbuf)); 1467 1468 if ((name = di_minor_name(minor)) != NULL) { 1469 dprintf((stderr, " minor name %s\n", name)); 1470 } 1471 #endif /* DEBUG */ 1472 1473 /* Terminate the walk when the device node changes */ 1474 if (node != di_arg->node) { 1475 return (DI_WALK_TERMINATE); 1476 } 1477 1478 /* Construct full /devices path for this minor */ 1479 if ((name = di_minor_name(minor)) == NULL) { 1480 return (DI_WALK_CONTINUE); 1481 } 1482 (void) snprintf(pathbuf, MAXPATHLEN, "%s:%s", di_arg->pathbuf, name); 1483 1484 /* Add lookup entry for this minor node */ 1485 if (add_lookup_entry(di_arg->table, pathbuf, node)) { 1486 dprintf((stderr, "add_lookup_entry: %s\n", strerror(errno))); 1487 di_arg->err = 1; 1488 return (DI_WALK_TERMINATE); 1489 } 1490 1491 /* 1492 * Walk the associated device links. 1493 * Note that di_devlink_walk() doesn't want "/devices" in its paths. 1494 * Also note that di_devlink_walk() will fail if there are no device 1495 * links, which is fine; so ignore if it fails. Only check for 1496 * internal failures during such a walk. 1497 */ 1498 devfs_path = &pathbuf[strlen("/devices")]; 1499 (void) di_devlink_walk(di_arg->linkhd, NULL, devfs_path, 0, arg, 1500 devinfo_devlink_walk); 1501 if (di_arg->err != 0) { 1502 return (DI_WALK_TERMINATE); 1503 } 1504 1505 return (DI_WALK_CONTINUE); 1506 } 1507 1508 static int 1509 devinfo_devlink_walk(di_devlink_t devlink, void *arg) 1510 { 1511 const char *linkpath; 1512 devinfo_arg_t *di_arg = (devinfo_arg_t *)arg; 1513 1514 /* Get the devlink's path */ 1515 if ((linkpath = di_devlink_path(devlink)) == NULL) { 1516 dprintf((stderr, "di_devlink_path: %s\n", strerror(errno))); 1517 di_arg->err = 1; 1518 return (DI_WALK_TERMINATE); 1519 } 1520 dprintf((stderr, "devinfo_devlink_walk: %s\n", linkpath)); 1521 1522 /* Add lookup entry for this devlink */ 1523 if (add_lookup_entry(di_arg->table, linkpath, di_arg->node)) { 1524 dprintf((stderr, "add_lookup_entry: %s\n", strerror(errno))); 1525 di_arg->err = 1; 1526 return (DI_WALK_TERMINATE); 1527 } 1528 1529 return (DI_WALK_CONTINUE); 1530 } 1531 1532 /* 1533 * Map rcm_info_t's to ri_client_t's, filtering out "uninteresting" (hack) 1534 * RCM clients. The number of "interesting" ri_client_t's is returned 1535 * in cnt if passed non-NULL. 1536 */ 1537 static int 1538 add_rcm_clients(ri_client_t **client_list, rcmd_t *rcm, rcm_info_t *info, 1539 int flags, int *cnt) 1540 { 1541 rcm_info_tuple_t *tuple; 1542 char *rsrc, *usage; 1543 ri_client_t *client, *tmp; 1544 1545 assert(client_list != NULL && rcm != NULL); 1546 1547 if (info == NULL) 1548 return (0); 1549 1550 if (cnt != NULL) 1551 *cnt = 0; 1552 1553 tuple = NULL; 1554 while ((tuple = rcm_info_next(info, tuple)) != NULL) { 1555 if ((rsrc = (char *)rcm_info_rsrc(tuple)) == NULL || 1556 (usage = (char *)rcm_info_info(tuple)) == NULL) { 1557 continue; 1558 } 1559 1560 if (rcm_ignore(rsrc, usage) == 0) 1561 continue; 1562 1563 if ((client = ri_client_alloc(rsrc, usage)) == NULL) 1564 return (-1); 1565 1566 if ((flags & RI_INCLUDE_QUERY) && add_query_state(rcm, client, 1567 rsrc, usage) != 0) { 1568 ri_client_free(client); 1569 return (-1); 1570 } 1571 1572 if (cnt != NULL) 1573 ++*cnt; 1574 1575 /* 1576 * Link in 1577 */ 1578 if ((tmp = *client_list) == NULL) { 1579 *client_list = client; 1580 continue; 1581 } 1582 while (tmp->next != NULL) { 1583 tmp = tmp->next; 1584 } 1585 tmp->next = client; 1586 } 1587 1588 return (0); 1589 } 1590 1591 /* 1592 * Currently only filtering out based on known info string prefixes. 1593 */ 1594 /* ARGSUSED */ 1595 static int 1596 rcm_ignore(char *rsrc, char *infostr) 1597 { 1598 char **cpp; 1599 1600 for (cpp = rcm_info_filter; *cpp != NULL; cpp++) { 1601 if (strncmp(infostr, *cpp, strlen(*cpp)) == 0) { 1602 return (0); 1603 } 1604 } 1605 return (-1); 1606 } 1607 1608 /* 1609 * If this tuple was cached in the offline query pass, add the 1610 * query state and error string to the ri_client_t. 1611 */ 1612 static int 1613 add_query_state(rcmd_t *rcm, ri_client_t *client, const char *rsrc, 1614 const char *info) 1615 { 1616 int qstate = RI_QUERY_UNKNOWN; 1617 char *errstr = NULL; 1618 rcm_info_tuple_t *cached_tuple; 1619 1620 if ((cached_tuple = tuple_lookup(rcm, rsrc, info)) != NULL) { 1621 qstate = state2query(rcm_info_state(cached_tuple)); 1622 errstr = (char *)rcm_info_error(cached_tuple); 1623 } 1624 1625 if (nvlist_add_int32(client->usg_props, RI_QUERY_STATE, qstate) != 0 || 1626 (errstr != NULL && nvlist_add_string(client->usg_props, 1627 RI_QUERY_ERR, errstr) != 0)) { 1628 dprintf((stderr, "nvlist_add fail\n")); 1629 return (-1); 1630 } 1631 1632 return (0); 1633 } 1634 1635 static int 1636 state2query(int rcm_state) 1637 { 1638 int query; 1639 1640 switch (rcm_state) { 1641 case RCM_STATE_OFFLINE_QUERY: 1642 case RCM_STATE_SUSPEND_QUERY: 1643 query = RI_QUERY_OK; 1644 break; 1645 case RCM_STATE_OFFLINE_QUERY_FAIL: 1646 case RCM_STATE_SUSPEND_QUERY_FAIL: 1647 query = RI_QUERY_FAIL; 1648 break; 1649 default: 1650 query = RI_QUERY_UNKNOWN; 1651 break; 1652 } 1653 1654 return (query); 1655 } 1656 1657 static void 1658 dev_list_append(ri_dev_t **head, ri_dev_t *dev) 1659 { 1660 ri_dev_t *tmp; 1661 1662 if ((tmp = *head) == NULL) { 1663 *head = dev; 1664 return; 1665 } 1666 while (tmp->next != NULL) { 1667 tmp = tmp->next; 1668 } 1669 tmp->next = dev; 1670 } 1671 1672 /* 1673 * The cpu list is ordered on cpuid since CMP cpuids will not necessarily 1674 * be discovered in sequence. 1675 */ 1676 static void 1677 dev_list_cpu_insert(ri_dev_t **listp, ri_dev_t *dev, processorid_t newid) 1678 { 1679 ri_dev_t *tmp; 1680 int32_t cpuid; 1681 1682 while ((tmp = *listp) != NULL && 1683 nvlist_lookup_int32(tmp->conf_props, RI_CPU_ID, &cpuid) == 0 && 1684 cpuid < newid) { 1685 listp = &tmp->next; 1686 } 1687 1688 dev->next = tmp; 1689 *listp = dev; 1690 } 1691 1692 /* 1693 * Linear lookup. Should convert to hash tab. 1694 */ 1695 static rcm_info_tuple_t * 1696 tuple_lookup(rcmd_t *rcm, const char *krsrc, const char *kinfo) 1697 { 1698 rcm_info_tuple_t *tuple = NULL; 1699 const char *rsrc, *info; 1700 1701 if ((rcm == NULL) || (krsrc == NULL) || (kinfo == NULL)) { 1702 return (NULL); 1703 } 1704 1705 while ((tuple = rcm_info_next(rcm->offline_query_info, 1706 tuple)) != NULL) { 1707 if ((rsrc = rcm_info_rsrc(tuple)) == NULL || 1708 (info = rcm_info_info(tuple)) == NULL) { 1709 continue; 1710 } 1711 1712 if (strcmp(rsrc, krsrc) == 0 && strcmp(info, kinfo) == 0) { 1713 return (tuple); 1714 } 1715 } 1716 return (NULL); 1717 } 1718 1719 /* 1720 * Create and link attachment point handle. 1721 */ 1722 static ri_ap_t * 1723 ri_ap_alloc(char *ap_id, ri_hdl_t *hdl) 1724 { 1725 ri_ap_t *ap, *tmp; 1726 1727 if ((ap = calloc(1, sizeof (*ap))) == NULL) { 1728 dprintf((stderr, "calloc: %s\n", strerror(errno))); 1729 return (NULL); 1730 } 1731 1732 if (nvlist_alloc(&ap->conf_props, NV_UNIQUE_NAME, 0) != 0 || 1733 nvlist_add_string(ap->conf_props, RI_AP_REQ_ID, ap_id) != 0) { 1734 if (ap->conf_props != NULL) 1735 nvlist_free(ap->conf_props); 1736 free(ap); 1737 return (NULL); 1738 } 1739 1740 if ((tmp = hdl->aps) == NULL) { 1741 hdl->aps = ap; 1742 } else { 1743 while (tmp->next != NULL) { 1744 tmp = tmp->next; 1745 } 1746 tmp->next = ap; 1747 } 1748 1749 return (ap); 1750 } 1751 1752 static ri_dev_t * 1753 ri_dev_alloc(void) 1754 { 1755 ri_dev_t *dev; 1756 1757 if ((dev = calloc(1, sizeof (*dev))) == NULL || 1758 nvlist_alloc(&dev->conf_props, NV_UNIQUE_NAME, 0) != 0) { 1759 s_free(dev); 1760 } 1761 return (dev); 1762 } 1763 1764 static ri_dev_t * 1765 io_dev_alloc(char *drv_inst) 1766 { 1767 ri_dev_t *io; 1768 1769 assert(drv_inst != NULL); 1770 1771 if ((io = ri_dev_alloc()) == NULL) 1772 return (NULL); 1773 1774 if (nvlist_add_string(io->conf_props, RI_IO_DRV_INST, 1775 drv_inst) != 0) { 1776 dprintf((stderr, "nvlist_add_string fail\n")); 1777 ri_dev_free(io); 1778 return (NULL); 1779 } 1780 1781 return (io); 1782 } 1783 1784 static ri_client_t * 1785 ri_client_alloc(char *rsrc, char *usage) 1786 { 1787 ri_client_t *client; 1788 1789 assert(rsrc != NULL && usage != NULL); 1790 1791 if ((client = calloc(1, sizeof (*client))) == NULL) { 1792 dprintf((stderr, "calloc: %s\n", strerror(errno))); 1793 return (NULL); 1794 } 1795 1796 if (nvlist_alloc(&client->usg_props, NV_UNIQUE_NAME, 0) != 0) { 1797 dprintf((stderr, "nvlist_alloc fail\n")); 1798 free(client); 1799 return (NULL); 1800 } 1801 1802 if (nvlist_add_string(client->usg_props, RI_CLIENT_RSRC, rsrc) != 0 || 1803 nvlist_add_string(client->usg_props, RI_CLIENT_USAGE, usage) != 0) { 1804 dprintf((stderr, "nvlist_add_string fail\n")); 1805 ri_client_free(client); 1806 return (NULL); 1807 } 1808 1809 return (client); 1810 } 1811 1812 static void 1813 apd_tbl_free(apd_t apd_tbl[], int napds) 1814 { 1815 int i; 1816 apd_t *apd; 1817 1818 for (i = 0, apd = apd_tbl; i < napds; i++, apd++) 1819 s_free(apd->cfga_list_data); 1820 1821 free(apd_tbl); 1822 } 1823 1824 static char * 1825 pstate2str(int pi_state) 1826 { 1827 char *state; 1828 1829 switch (pi_state) { 1830 case P_OFFLINE: 1831 state = PS_OFFLINE; 1832 break; 1833 case P_ONLINE: 1834 state = PS_ONLINE; 1835 break; 1836 case P_FAULTED: 1837 state = PS_FAULTED; 1838 break; 1839 case P_POWEROFF: 1840 state = PS_POWEROFF; 1841 break; 1842 case P_NOINTR: 1843 state = PS_NOINTR; 1844 break; 1845 case P_SPARE: 1846 state = PS_SPARE; 1847 break; 1848 default: 1849 state = "unknown"; 1850 break; 1851 } 1852 1853 return (state); 1854 } 1855 1856 #ifdef DEBUG 1857 static void 1858 dump_apd_tbl(FILE *fp, apd_t *apds, int n_apds) 1859 { 1860 int i, j; 1861 cfga_list_data_t *cfga_ldata; 1862 1863 for (i = 0; i < n_apds; i++, apds++) { 1864 dprintf((stderr, "apd_tbl[%d].nlist=%d\n", i, apds->nlist)); 1865 for (j = 0, cfga_ldata = apds->cfga_list_data; j < apds->nlist; 1866 j++, cfga_ldata++) { 1867 dprintf((fp, 1868 "apd_tbl[%d].cfga_list_data[%d].ap_log_id=%s\n", 1869 i, j, cfga_ldata->ap_log_id)); 1870 } 1871 } 1872 } 1873 #endif /* DEBUG */ 1874 1875 /* 1876 * The lookup table is a simple array that is grown in chunks 1877 * to optimize memory allocation. 1878 * Indices are assigned to each array entry in-order so that 1879 * the original device tree ordering can be discerned at a later time. 1880 * 1881 * add_lookup_entry is called from the libdevinfo tree traversal callbacks: 1882 * 1) devinfo_node_walk - physical device path for each node in 1883 * the devinfo tree via di_walk_node(), lookup entry name is 1884 * /devices/[di_devfs_path] 1885 * 2) devinfo_minor_walk - physical device path plus minor name for 1886 * each minor associated with a node via di_walk_minor(), lookup entry 1887 * name is /devices/[di_devfs_path:di_minor_name] 1888 * 3) devinfo_devlink_walk - for each minor's /dev link from its /devices 1889 * path via di_devlink_walk(), lookup entry name is di_devlink_path() 1890 */ 1891 static int 1892 add_lookup_entry(lookup_table_t *table, const char *name, di_node_t node) 1893 { 1894 size_t size; 1895 lookup_entry_t *new_table; 1896 1897 1898 /* Grow the lookup table by USAGE_ALLOC_SIZE slots if necessary */ 1899 if (table->n_entries == table->n_slots) { 1900 size = (table->n_slots + USAGE_ALLOC_SIZE) * 1901 sizeof (lookup_entry_t); 1902 new_table = (lookup_entry_t *)realloc(table->table, size); 1903 if (new_table == NULL) { 1904 dprintf((stderr, "add_lookup_entry: alloc failed: %s\n", 1905 strerror(errno))); 1906 errno = ENOMEM; 1907 return (-1); 1908 } 1909 table->table = new_table; 1910 table->n_slots += USAGE_ALLOC_SIZE; 1911 } 1912 1913 dprintf((stderr, "add_lookup_entry[%d]:%s\n", table->n_entries, name)); 1914 1915 /* Add this name to the next slot */ 1916 if ((table->table[table->n_entries].name = strdup(name)) == NULL) { 1917 dprintf((stderr, "add_lookup_entry: strdup failed: %s\n", 1918 strerror(errno))); 1919 errno = ENOMEM; 1920 return (-1); 1921 } 1922 table->table[table->n_entries].index = table->n_entries; 1923 table->table[table->n_entries].node = node; 1924 table->table[table->n_entries].n_usage = 0; 1925 table->table[table->n_entries].usage = NULL; 1926 table->n_entries += 1; 1927 1928 return (0); 1929 } 1930 1931 /* 1932 * lookup table entry names are full pathname strings, all start with / 1933 */ 1934 static int 1935 table_compare_names(const void *a, const void *b) 1936 { 1937 lookup_entry_t *entry1 = (lookup_entry_t *)a; 1938 lookup_entry_t *entry2 = (lookup_entry_t *)b; 1939 1940 return (strcmp(entry1->name, entry2->name)); 1941 } 1942 1943 1944 /* 1945 * Compare two indices and return -1 for less, 1 for greater, 0 for equal 1946 */ 1947 static int 1948 table_compare_indices(const void *a, const void *b) 1949 { 1950 lookup_entry_t *entry1 = (lookup_entry_t *)a; 1951 lookup_entry_t *entry2 = (lookup_entry_t *)b; 1952 1953 if (entry1->index < entry2->index) 1954 return (-1); 1955 if (entry1->index > entry2->index) 1956 return (1); 1957 return (0); 1958 } 1959 1960 /* 1961 * Given a RCM resource name, find the matching entry in the IO device table 1962 */ 1963 static lookup_entry_t * 1964 lookup(lookup_table_t *table, const char *rcm_rsrc) 1965 { 1966 lookup_entry_t *entry; 1967 lookup_entry_t lookup_arg; 1968 1969 dprintf((stderr, "lookup:%s\n", rcm_rsrc)); 1970 lookup_arg.name = (char *)rcm_rsrc; 1971 entry = bsearch(&lookup_arg, table->table, table->n_entries, 1972 sizeof (lookup_entry_t), table_compare_names); 1973 1974 #ifdef DEBUG 1975 if (entry != NULL) { 1976 dprintf((stderr, " found entry:%d\n", entry->index)); 1977 } 1978 #endif /* DEBUG */ 1979 return (entry); 1980 } 1981 1982 /* 1983 * Add RCM usage to the given device table entry. 1984 * Returns -1 on realloc failure. 1985 */ 1986 static int 1987 add_usage(lookup_entry_t *entry, const char *rcm_rsrc, rcm_info_tuple_t *tuple) 1988 { 1989 size_t size; 1990 const char *info; 1991 usage_t *new_usage; 1992 1993 if ((entry == NULL) || 1994 ((info = rcm_info_info(tuple)) == NULL)) 1995 return (0); 1996 1997 if (rcm_ignore((char *)rcm_rsrc, (char *)info) == 0) 1998 return (0); 1999 2000 size = (entry->n_usage + 1) * sizeof (usage_t); 2001 new_usage = (usage_t *)realloc(entry->usage, size); 2002 if (new_usage == NULL) { 2003 dprintf((stderr, "add_usage: alloc failed: %s\n", 2004 strerror(errno))); 2005 return (-1); 2006 } 2007 dprintf((stderr, "add_usage: entry %d rsrc: %s info: %s\n", 2008 entry->index, rcm_rsrc, info)); 2009 2010 entry->usage = new_usage; 2011 entry->usage[entry->n_usage].rsrc = rcm_rsrc; 2012 entry->usage[entry->n_usage].info = info; 2013 entry->n_usage += 1; 2014 return (0); 2015 } 2016 2017 static void 2018 empty_table(lookup_table_t *table) 2019 { 2020 int i; 2021 2022 if (table) { 2023 for (i = 0; i < table->n_entries; i++) { 2024 if (table->table[i].name) 2025 free(table->table[i].name); 2026 /* 2027 * Note: the strings pointed to from within 2028 * usage were freed already by rcm_free_info 2029 */ 2030 if (table->table[i].usage) 2031 free(table->table[i].usage); 2032 } 2033 if (table->table) 2034 free(table->table); 2035 table->table = NULL; 2036 table->n_entries = 0; 2037 table->n_slots = 0; 2038 } 2039 } 2040