xref: /titanic_44/usr/src/cmd/dcs/sparc/sun4u/ri_init.c (revision 1a7c1b724419d3cb5fa6eea75123c6b2060ba31b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Implementation of ri_init routine for obtaining mapping
31  * of system board attachment points to physical devices and to
32  * the Reconfiguration Coordination Manager (RCM) client usage
33  * of these devices.
34  */
35 #include <string.h>
36 #include <stdlib.h>
37 #include <unistd.h>
38 #include <kstat.h>
39 #include <sys/param.h>
40 #include <sys/sbd_ioctl.h>
41 #include "rsrc_info_impl.h"
42 
43 /*
44  * Occupant types exported by cfgadm sbd plugin via
45  * config_admin(3CFGADM).
46  */
47 #define	SBD_CM_CPU	"cpu"
48 #define	SBD_CM_MEM	"memory"
49 #define	SBD_CM_IO	"io"
50 
51 /*
52  * RCM abstract resource names.
53  */
54 #define	RCM_MEM_ALL	"SUNW_memory"
55 #define	RCM_CPU_ALL	"SUNW_cpu"
56 #define	RCM_CPU		RCM_CPU_ALL"/cpu"
57 
58 #define	KBYTE		1024
59 #define	MBYTE		1048576
60 #define	USAGE_ALLOC_SIZE	128
61 
62 /*
63  * define to allow io_cm_info to return NODE is NULL to ri_init,
64  * in order to skip over nodes w/unattached drivers
65  */
66 #define	RI_NODE_NIL	1
67 
68 /*
69  * This code is CMP aware as it parses the
70  * cfgadm info field for individual cpuids.
71  */
72 #define	CPUID_SEP	","
73 #define	CPU_INFO_FMT	"cpuid=%s speed=%d ecache=%d"
74 
75 typedef struct {
76 	cfga_list_data_t *cfga_list_data;
77 	int		nlist;
78 } apd_t;
79 
80 typedef struct {
81 	long		pagesize;
82 	long		syspages;
83 	long		sysmb;
84 } mem_stat_t;
85 
86 #define	ms_syspages	m_stat.syspages
87 #define	ms_pagesize	m_stat.pagesize
88 #define	ms_sysmb	m_stat.sysmb
89 
90 typedef int32_t		cpuid_t;
91 
92 typedef struct {
93 	int	cpuid_max;	/* maximum cpuid value */
94 	int	*ecache_sizes;	/* indexed by cpuid */
95 } ecache_info_t;
96 
97 typedef struct {
98 	rcm_handle_t	*hdl;
99 	rcm_info_t	*offline_query_info;
100 	char		**rlist;
101 	int		nrlist;
102 	cpuid_t		*cpus;
103 	int		ncpus;
104 	int		ndevs;
105 	uint_t		query_pages;
106 	mem_stat_t	m_stat;
107 	ecache_info_t	ecache_info;
108 } rcmd_t;
109 
110 typedef struct {
111 	const char	*rsrc;
112 	const char	*info;
113 } usage_t;
114 
115 /* Lookup table entry for matching IO devices to RCM resource usage */
116 typedef struct {
117 	int		index;		/* index into the table array */
118 	di_node_t	node;		/* associated devinfo node */
119 	char		*name;		/* device full path name */
120 	int		n_usage;
121 	usage_t		*usage;
122 } lookup_entry_t;
123 
124 typedef struct {
125 	int		n_entries;
126 	int		n_slots;
127 	lookup_entry_t	*table;
128 } lookup_table_t;
129 
130 typedef struct {
131 	int			err;
132 	di_node_t		node;
133 	char			*pathbuf;
134 	lookup_table_t		*table;
135 	di_devlink_handle_t	linkhd;
136 } devinfo_arg_t;
137 
138 static int dyn_ap_ids(char *, cfga_list_data_t **, int *);
139 static int rcm_init(rcmd_t *, apd_t [], int, int);
140 static void rcm_fini(rcmd_t *);
141 static int rcm_query_init(rcmd_t *, apd_t [], int);
142 static int cap_request(ri_hdl_t *, rcmd_t *);
143 static int syscpus(cpuid_t **, int *);
144 static int cpu_cap_request(ri_hdl_t *, rcmd_t *);
145 static int mem_cap_request(ri_hdl_t *, rcmd_t *);
146 static int (*cm_rcm_qpass_func(cfga_type_t))(cfga_list_data_t *, rcmd_t *);
147 static int cpu_rcm_qpass(cfga_list_data_t *, rcmd_t *);
148 static int mem_rcm_qpass(cfga_list_data_t *, rcmd_t *);
149 static int io_rcm_qpass(cfga_list_data_t *, rcmd_t *);
150 static int (*cm_info_func(cfga_type_t))(ri_ap_t *, cfga_list_data_t *, int,
151     rcmd_t *);
152 static int cpu_cm_info(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *);
153 static int i_cpu_cm_info(processorid_t, int, ri_ap_t *, rcmd_t *);
154 static int mem_cm_info(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *);
155 static int io_cm_info(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *);
156 static int ident_leaf(di_node_t);
157 static int mk_drv_inst(di_node_t, char [], char *);
158 static int devinfo_node_walk(di_node_t, void *);
159 static int devinfo_minor_walk(di_node_t, di_minor_t, void *);
160 static int devinfo_devlink_walk(di_devlink_t, void *);
161 static int add_rcm_clients(ri_client_t **, rcmd_t *, rcm_info_t *, int, int *);
162 static int rcm_ignore(char *, char *);
163 static int add_query_state(rcmd_t *, ri_client_t *, const char *, const char *);
164 static int state2query(int);
165 static void dev_list_append(ri_dev_t **, ri_dev_t *);
166 static void dev_list_cpu_insert(ri_dev_t **, ri_dev_t *, processorid_t);
167 static rcm_info_tuple_t *tuple_lookup(rcmd_t *, const char *, const char *);
168 static ri_ap_t *ri_ap_alloc(char *, ri_hdl_t *);
169 static ri_dev_t *ri_dev_alloc(void);
170 static ri_dev_t *io_dev_alloc(char *);
171 static ri_client_t *ri_client_alloc(char *, char *);
172 static void apd_tbl_free(apd_t [], int);
173 static char *pstate2str(int);
174 static int ecache_info_init(ecache_info_t *);
175 static int find_cpu_nodes(di_node_t, void *);
176 static int prop_lookup_int(di_node_t, di_prom_handle_t, char *, int **);
177 static int add_lookup_entry(lookup_table_t *, const char *, di_node_t);
178 static int table_compare_names(const void *, const void *);
179 static int table_compare_indices(const void *, const void *);
180 static lookup_entry_t *lookup(lookup_table_t *table, const char *);
181 static int add_usage(lookup_entry_t *, const char *, rcm_info_tuple_t *);
182 static void empty_table(lookup_table_t *);
183 
184 #ifdef DEBUG
185 static void		dump_apd_tbl(FILE *, apd_t *, int);
186 #endif /* DEBUG */
187 
188 static struct {
189 	char	*type;
190 	int	(*cm_info)(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *);
191 	int	(*cm_rcm_qpass)(cfga_list_data_t *, rcmd_t *);
192 } cm_ctl[] = {
193 	{SBD_CM_CPU,	cpu_cm_info,	cpu_rcm_qpass},
194 	{SBD_CM_MEM,	mem_cm_info,	mem_rcm_qpass},
195 	{SBD_CM_IO,	io_cm_info,	io_rcm_qpass}
196 };
197 
198 /*
199  * Table of known info string prefixes for RCM modules that do not
200  * represent actual resource usage, but instead provide name translations
201  * or sequencing within the RCM namespace. Since RCM provides no way to
202  * filter these out, we must maintain this hack.
203  */
204 static char *rcm_info_filter[] = {
205 	"Network interface",		/* Network naming module */
206 	NULL
207 };
208 
209 
210 /*
211  * Allocate snapshot handle.
212  */
213 int
214 ri_init(int n_apids, char **ap_ids, int flags, ri_hdl_t **hdlp)
215 {
216 	int			i, j;
217 	ri_hdl_t		*ri_hdl;
218 	ri_ap_t			*ap_hdl;
219 	rcmd_t			*rcm = NULL;
220 	cfga_list_data_t	*cfga_ldata;
221 	apd_t			*apd, *apd_tbl = NULL;
222 	int			(*cm_info)(ri_ap_t *, cfga_list_data_t *,
223 				    int, rcmd_t *);
224 	int			rv = RI_SUCCESS;
225 	int			cm_info_rv;
226 
227 	if (n_apids <= 0 || ap_ids == NULL || hdlp == NULL)
228 		return (RI_INVAL);
229 
230 	if (flags & ~RI_REQ_MASK)
231 		return (RI_NOTSUP);
232 
233 	*hdlp = NULL;
234 	if ((ri_hdl = calloc(1, sizeof (*ri_hdl))) == NULL ||
235 	    (rcm = calloc(1, sizeof (*rcm))) == NULL ||
236 	    (apd_tbl = calloc(n_apids, sizeof (*apd_tbl))) == NULL) {
237 		dprintf((stderr, "calloc: %s\n", strerror(errno)));
238 		rv = RI_FAILURE;
239 		goto out;
240 	}
241 
242 	/*
243 	 * Create mapping of boards to components.
244 	 */
245 	for (i = 0, apd = apd_tbl; i < n_apids; i++, apd++) {
246 		if (dyn_ap_ids(ap_ids[i], &apd->cfga_list_data,
247 		    &apd->nlist) == -1) {
248 			rv = RI_INVAL;
249 			goto out;
250 		}
251 	}
252 #ifdef DEBUG
253 	dump_apd_tbl(stderr, apd_tbl, n_apids);
254 #endif /* DEBUG */
255 
256 	if (rcm_init(rcm, apd_tbl, n_apids, flags) != 0) {
257 		rv = RI_FAILURE;
258 		goto out;
259 	}
260 
261 	/*
262 	 * Best effort attempt to read cpu ecache sizes from
263 	 * OBP/Solaris device trees. These are later looked up
264 	 * in i_cpu_cm_info().
265 	 */
266 	(void) ecache_info_init(&rcm->ecache_info);
267 
268 	for (i = 0, apd = apd_tbl; i < n_apids; i++, apd++) {
269 		if ((ap_hdl = ri_ap_alloc(ap_ids[i], ri_hdl)) == NULL) {
270 			rv = RI_FAILURE;
271 			goto out;
272 		}
273 
274 		/*
275 		 * Add component info based on occupant type. Note all
276 		 * passes through the apd table skip over the first
277 		 * cfgadm_list_data entry, which is the static system board
278 		 * attachment point.
279 		 */
280 		for (j = 1, cfga_ldata = &apd->cfga_list_data[1];
281 		    j < apd->nlist; j++, cfga_ldata++) {
282 			if (cfga_ldata->ap_o_state != CFGA_STAT_CONFIGURED) {
283 				continue;
284 			}
285 
286 			if ((cm_info =
287 			    cm_info_func(cfga_ldata->ap_type)) != NULL) {
288 				cm_info_rv =
289 				    (*cm_info)(ap_hdl, cfga_ldata, flags, rcm);
290 				if (cm_info_rv != 0) {
291 					/*
292 					 * If we cannot obtain info for the ap,
293 					 * skip it and do not fail the entire
294 					 * operation.  This case occurs when the
295 					 * driver for a device is not attached:
296 					 * di_init() returns failed back to
297 					 * io_cm_info().
298 					 */
299 					if (cm_info_rv == RI_NODE_NIL)
300 						continue;
301 					else {
302 						rv = RI_FAILURE;
303 						goto out;
304 					}
305 				}
306 			}
307 		}
308 	}
309 
310 	if ((flags & RI_INCLUDE_QUERY) && cap_request(ri_hdl, rcm) != 0)
311 		rv = RI_FAILURE;
312 
313 out:
314 	if (apd_tbl != NULL)
315 		apd_tbl_free(apd_tbl, n_apids);
316 	if (rcm != NULL)
317 		rcm_fini(rcm);
318 
319 	if (rv == RI_SUCCESS)
320 		*hdlp = ri_hdl;
321 	else
322 		ri_fini(ri_hdl);
323 
324 	return (rv);
325 }
326 
327 /*
328  * Map static board attachment point to dynamic attachment points (components).
329  */
330 static int
331 dyn_ap_ids(char *ap_id, cfga_list_data_t **ap_id_list, int *nlist)
332 {
333 	cfga_err_t	cfga_err;
334 	char		*errstr;
335 	char		*opts = "parsable";
336 	char		*listops = "class=sbd";
337 
338 	cfga_err = config_list_ext(1, &ap_id, ap_id_list, nlist,
339 	    opts, listops, &errstr, CFGA_FLAG_LIST_ALL);
340 	if (cfga_err != CFGA_OK) {
341 		dprintf((stderr, "config_list_ext: %s\n",
342 		    config_strerror(cfga_err)));
343 		return (-1);
344 	}
345 
346 	return (0);
347 }
348 
349 /*
350  * Initialize rcm handle, memory stats. Cache query result if necessary.
351  */
352 static int
353 rcm_init(rcmd_t *rcm, apd_t apd_tbl[], int napds, int flags)
354 {
355 	longlong_t	ii;
356 	int		rv = 0;
357 
358 	rcm->offline_query_info = NULL;
359 	rcm->rlist = NULL;
360 	rcm->cpus = NULL;
361 
362 	if (rcm_alloc_handle(NULL, RCM_NOPID, NULL, &rcm->hdl) != RCM_SUCCESS) {
363 		dprintf((stderr, "rcm_alloc_handle (errno=%d)\n", errno));
364 		return (-1);
365 	}
366 
367 	if ((rcm->ms_pagesize = sysconf(_SC_PAGE_SIZE)) == -1 ||
368 	    (rcm->ms_syspages = sysconf(_SC_PHYS_PAGES)) == -1) {
369 		dprintf((stderr, "sysconf: %s\n", strerror(errno)));
370 		return (-1);
371 	}
372 	ii = (longlong_t)rcm->ms_pagesize * rcm->ms_syspages;
373 	rcm->ms_sysmb = (int)((ii+MBYTE-1) / MBYTE);
374 
375 	if (flags & RI_INCLUDE_QUERY)
376 	    rv = rcm_query_init(rcm, apd_tbl, napds);
377 
378 	return (rv);
379 }
380 
381 static void
382 rcm_fini(rcmd_t *rcm)
383 {
384 	char	**cpp;
385 
386 	assert(rcm != NULL);
387 
388 	if (rcm->offline_query_info != NULL)
389 		rcm_free_info(rcm->offline_query_info);
390 	if (rcm->hdl != NULL)
391 		rcm_free_handle(rcm->hdl);
392 
393 	if (rcm->rlist != NULL) {
394 		for (cpp = rcm->rlist; *cpp != NULL; cpp++)
395 			s_free(*cpp);
396 		free(rcm->rlist);
397 	}
398 
399 	s_free(rcm->cpus);
400 	free(rcm);
401 }
402 
403 #define	NODENAME_CMP		"cmp"
404 #define	NODENAME_SSM		"ssm"
405 #define	PROP_CPUID		"cpuid"
406 #define	PROP_DEVICE_TYPE	"device-type"
407 #define	PROP_ECACHE_SIZE	"ecache-size"
408 #define	PROP_L2_CACHE_SIZE	"l2-cache-size"
409 #define	PROP_L3_CACHE_SIZE	"l3-cache-size"
410 
411 typedef struct {
412 	di_node_t		root;
413 	di_prom_handle_t	ph;
414 	ecache_info_t		*ecache_info;
415 } di_arg_t;
416 
417 /*
418  * The ecache sizes for individual cpus are read from the
419  * OBP/Solaris device trees. This info cannot be derived
420  * from the cfgadm_sbd cpu attachment point ecache info,
421  * which may be a sum of multiple cores for CMP.
422  */
423 static int
424 ecache_info_init(ecache_info_t *ec)
425 {
426 	di_arg_t	di_arg;
427 	di_prom_handle_t ph = DI_PROM_HANDLE_NIL;
428 	di_node_t	root = DI_NODE_NIL;
429 	int		cpuid_max, rv = 0;
430 
431 	assert(ec != NULL && ec->cpuid_max == 0 && ec->ecache_sizes == NULL);
432 
433 	if ((cpuid_max = sysconf(_SC_CPUID_MAX)) == -1) {
434 		dprintf((stderr, "sysconf fail: %s\n", strerror(errno)));
435 		rv = -1;
436 		goto done;
437 	}
438 
439 	if ((root = di_init("/", DINFOCPYALL)) == DI_NODE_NIL) {
440 		dprintf((stderr, "di_init fail: %s\n", strerror(errno)));
441 		rv = -1;
442 		goto done;
443 	}
444 
445 	if ((ph = di_prom_init()) == DI_PROM_HANDLE_NIL) {
446 		dprintf((stderr, "di_prom_init fail: %s\n", strerror(errno)));
447 		rv = -1;
448 		goto done;
449 	}
450 
451 	if ((ec->ecache_sizes = calloc(cpuid_max + 1, sizeof (int))) == NULL) {
452 		dprintf((stderr, "calloc fail: %s\n", strerror(errno)));
453 		rv = -1;
454 		goto done;
455 	}
456 	ec->cpuid_max = cpuid_max;
457 
458 	dprintf((stderr, "cpuid_max is set to %d\n", ec->cpuid_max));
459 
460 	di_arg.ph = ph;
461 	di_arg.root = root;
462 	di_arg.ecache_info = ec;
463 
464 	if (di_walk_node(root, DI_WALK_CLDFIRST, (void *)&di_arg,
465 	    find_cpu_nodes) != 0) {
466 		dprintf((stderr, "di_walk_node fail: %s\n", strerror(errno)));
467 		rv = -1;
468 	}
469 
470 done:
471 	if (root != DI_NODE_NIL)
472 		di_fini(root);
473 	if (ph != DI_PROM_HANDLE_NIL)
474 		di_prom_fini(ph);
475 
476 	return (rv);
477 }
478 
479 /*
480  * Libdevinfo node walk callback for reading ecache size
481  * properties for cpu device nodes. Subtrees not containing
482  * cpu nodes are filtered out.
483  */
484 static int
485 find_cpu_nodes(di_node_t node, void *arg)
486 {
487 	char			*name;
488 	int			*cpuid, *ecache;
489 	di_arg_t		*di_arg = (di_arg_t *)arg;
490 	ecache_info_t		*ec = di_arg->ecache_info;
491 	di_prom_handle_t	ph = di_arg->ph;
492 
493 	if (node == DI_NODE_NIL) {
494 		return (DI_WALK_TERMINATE);
495 	}
496 
497 	if (node == di_arg->root) {
498 		return (DI_WALK_CONTINUE);
499 	}
500 
501 	if (di_nodeid(node) == DI_PSEUDO_NODEID) {
502 		return (DI_WALK_PRUNECHILD);
503 	}
504 
505 	name = di_node_name(node);
506 	if (name != NULL) {
507 		/*
508 		 * CMP nodes will be the parent of cpu nodes. On some platforms,
509 		 * cpu nodes will be under the ssm node. In either case,
510 		 * continue searching this subtree.
511 		 */
512 		if (strncmp(name, NODENAME_SSM, strlen(NODENAME_SSM)) == 0 ||
513 		    strncmp(name, NODENAME_CMP, strlen(NODENAME_CMP)) == 0) {
514 			return (DI_WALK_CONTINUE);
515 		}
516 	}
517 
518 	dprintf((stderr, "find_cpu_nodes: node=%p, name=%s, binding_name=%s\n",
519 	    node, di_node_name(node), di_binding_name(node)));
520 
521 	/*
522 	 * Ecache size property name differs with processor implementation.
523 	 * Panther has both L2 and L3, so check for L3 first to differentiate
524 	 * from Jaguar, which has only L2.
525 	 */
526 	if (prop_lookup_int(node, ph, PROP_CPUID, &cpuid) == 0 &&
527 	    (prop_lookup_int(node, ph, PROP_ECACHE_SIZE, &ecache) == 0 ||
528 	    prop_lookup_int(node, ph, PROP_L3_CACHE_SIZE, &ecache) == 0 ||
529 	    prop_lookup_int(node, ph, PROP_L2_CACHE_SIZE, &ecache) == 0)) {
530 		assert(ec != NULL && ec->ecache_sizes != NULL &&
531 		    *cpuid <= ec->cpuid_max);
532 		ec->ecache_sizes[*cpuid] = *ecache;
533 	}
534 
535 	return (DI_WALK_PRUNECHILD);
536 }
537 
538 /*
539  * Given a di_node_t, call the appropriate int property lookup routine.
540  * Note: This lookup fails if the int property has multiple value entries.
541  */
542 static int
543 prop_lookup_int(di_node_t node, di_prom_handle_t ph, char *propname, int **ival)
544 {
545 	int rv;
546 
547 	rv = (di_nodeid(node) == DI_PROM_NODEID) ?
548 	    di_prom_prop_lookup_ints(ph, node, propname, ival) :
549 	    di_prop_lookup_ints(DDI_DEV_T_ANY, node, propname, ival);
550 
551 	return (rv == 1 ? 0 : -1);
552 }
553 
554 /*
555  * For offline queries, RCM must be given a list of all resources
556  * so modules can have access to the full scope of the operation.
557  * The rcm_get_info calls are made individually in order to map the
558  * returned rcm_info_t's to physical devices. The rcm_request_offline
559  * result is cached so the query state can be looked up as we process
560  * the rcm_get_info calls. This routine also tallies up the amount of
561  * memory going away and creates a list of cpu ids to be used
562  * later for rcm_request_capacity_change.
563  */
564 static int
565 rcm_query_init(rcmd_t *rcm, apd_t apd_tbl[], int napds)
566 {
567 	apd_t			*apd;
568 	int 			i, j;
569 	cfga_list_data_t	*cfga_ldata;
570 	int			(*cm_rcm_qpass)(cfga_list_data_t *, rcmd_t *);
571 #ifdef DEBUG
572 	char			**cpp;
573 #endif /* DEBUG */
574 
575 	/*
576 	 * Initial pass to size cpu and resource name arrays needed to
577 	 * interface with RCM. Attachment point ids for CMP can represent
578 	 * multiple cpus (and resource names). Instead of parsing the
579 	 * cfgadm info field here, use the worse case that all component
580 	 * attachment points are CMP.
581 	 */
582 	rcm->ndevs = 0;
583 	for (i = 0, apd = apd_tbl; i < napds; i++, apd++) {
584 		for (j = 1, cfga_ldata = &apd->cfga_list_data[1];
585 		    j < apd->nlist; j++, cfga_ldata++) {
586 			if (cfga_ldata->ap_o_state != CFGA_STAT_CONFIGURED) {
587 				continue;
588 			}
589 			rcm->ndevs += SBD_MAX_CORES_PER_CMP;
590 		}
591 	}
592 
593 	/* account for trailing NULL in rlist */
594 	if (rcm->ndevs > 0 &&
595 	    ((rcm->cpus = calloc(rcm->ndevs, sizeof (cpuid_t))) == NULL ||
596 	    (rcm->rlist = calloc(rcm->ndevs + 1, sizeof (char *))) == NULL)) {
597 		dprintf((stderr, "calloc: %s\n", strerror(errno)));
598 		return (-1);
599 	}
600 
601 	/*
602 	 * Second pass to fill in the RCM resource and cpu lists.
603 	 */
604 	for (i = 0, apd = apd_tbl; i < napds; i++, apd++) {
605 		for (j = 1, cfga_ldata = &apd->cfga_list_data[1];
606 		    j < apd->nlist; j++, cfga_ldata++) {
607 			if (cfga_ldata->ap_o_state != CFGA_STAT_CONFIGURED) {
608 				continue;
609 			}
610 			if ((cm_rcm_qpass =
611 			    cm_rcm_qpass_func(cfga_ldata->ap_type)) != NULL &&
612 			    (*cm_rcm_qpass)(cfga_ldata, rcm) != 0) {
613 				return (-1);
614 			}
615 		}
616 	}
617 
618 	if (rcm->nrlist == 0)
619 		return (0);
620 
621 	/*
622 	 * Cache query result. Since we are only interested in the
623 	 * set of RCM clients processed and not their request status,
624 	 * the return value is irrelevant.
625 	 */
626 	(void) rcm_request_offline_list(rcm->hdl, rcm->rlist,
627 	    RCM_QUERY|RCM_SCOPE, &rcm->offline_query_info);
628 
629 #ifdef DEBUG
630 	dprintf((stderr, "RCM rlist: nrlist=%d\n", rcm->nrlist));
631 	for (cpp = rcm->rlist, i = 0; *cpp != NULL; cpp++, i++) {
632 		dprintf((stderr, "rlist[%d]=%s\n", i, *cpp));
633 	}
634 #endif /* DEBUG */
635 
636 	return (0);
637 }
638 
639 static int
640 cap_request(ri_hdl_t *ri_hdl, rcmd_t *rcm)
641 {
642 	return (((rcm->ncpus > 0 && cpu_cap_request(ri_hdl, rcm) != 0) ||
643 	    (rcm->query_pages > 0 && mem_cap_request(ri_hdl, rcm) != 0)) ?
644 	    -1 : 0);
645 }
646 
647 /*
648  * RCM capacity change request for cpus.
649  */
650 static int
651 cpu_cap_request(ri_hdl_t *ri_hdl, rcmd_t *rcm)
652 {
653 	cpuid_t		*syscpuids, *newcpuids;
654 	int		sysncpus, newncpus;
655 	rcm_info_t	*rcm_info = NULL;
656 	int		i, j, k;
657 	nvlist_t	*nvl;
658 	int		rv = 0;
659 
660 	/* get all cpus in the system */
661 	if (syscpus(&syscpuids, &sysncpus) == -1)
662 		return (-1);
663 
664 	newncpus = sysncpus - rcm->ncpus;
665 	if ((newcpuids = calloc(newncpus, sizeof (cpuid_t))) == NULL) {
666 		dprintf((stderr, "calloc: %s", strerror(errno)));
667 		rv = -1;
668 		goto out;
669 	}
670 
671 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
672 		dprintf((stderr, "nvlist_alloc fail\n"));
673 		rv = -1;
674 		goto out;
675 	}
676 
677 	/*
678 	 * Construct the new cpu list.
679 	 */
680 	for (i = 0, j = 0; i < sysncpus; i++) {
681 		for (k = 0; k < rcm->ncpus; k++) {
682 			if (rcm->cpus[k] == syscpuids[i]) {
683 				break;
684 			}
685 		}
686 		if (k == rcm->ncpus) {
687 			newcpuids[j++] = syscpuids[i];
688 		}
689 	}
690 
691 	if (nvlist_add_int32(nvl, "old_total", sysncpus) != 0 ||
692 	    nvlist_add_int32(nvl, "new_total", newncpus) != 0 ||
693 	    nvlist_add_int32_array(nvl, "old_cpu_list", syscpuids,
694 	    sysncpus) != 0 ||
695 	    nvlist_add_int32_array(nvl, "new_cpu_list", newcpuids,
696 	    newncpus) != 0) {
697 		dprintf((stderr, "nvlist_add fail\n"));
698 		rv = -1;
699 		goto out;
700 	}
701 
702 #ifdef DEBUG
703 	dprintf((stderr, "old_total=%d\n", sysncpus));
704 	for (i = 0; i < sysncpus; i++) {
705 		dprintf((stderr, "old_cpu_list[%d]=%d\n", i, syscpuids[i]));
706 	}
707 	dprintf((stderr, "new_total=%d\n", newncpus));
708 	for (i = 0; i < newncpus; i++) {
709 		dprintf((stderr, "new_cpu_list[%d]=%d\n", i, newcpuids[i]));
710 	}
711 #endif /* DEBUG */
712 
713 	(void) rcm_request_capacity_change(rcm->hdl, RCM_CPU_ALL,
714 	    RCM_QUERY|RCM_SCOPE, nvl, &rcm_info);
715 
716 	rv = add_rcm_clients(&ri_hdl->cpu_cap_clients, rcm, rcm_info, 0, NULL);
717 
718 out:
719 	s_free(syscpuids);
720 	s_free(newcpuids);
721 	if (nvl != NULL)
722 		nvlist_free(nvl);
723 	if (rcm_info != NULL)
724 		rcm_free_info(rcm_info);
725 
726 	return (rv);
727 }
728 
729 static int
730 syscpus(cpuid_t **cpuids, int *ncpus)
731 {
732 	kstat_t		*ksp;
733 	kstat_ctl_t	*kc;
734 	cpuid_t		*cp;
735 	int		i;
736 
737 	if ((*ncpus = sysconf(_SC_NPROCESSORS_CONF)) == -1) {
738 		dprintf((stderr, "sysconf: %s\n", errno));
739 		return (-1);
740 	}
741 
742 	if ((kc = kstat_open()) == NULL) {
743 		dprintf((stderr, "kstat_open fail\n"));
744 		return (-1);
745 	}
746 
747 	if ((cp = calloc(*ncpus, sizeof (cpuid_t))) == NULL) {
748 		dprintf((stderr, "calloc: %s\n", errno));
749 		(void) kstat_close(kc);
750 		return (-1);
751 	}
752 
753 	for (i = 0, ksp = kc->kc_chain; ksp != NULL; ksp = ksp->ks_next) {
754 		if (strcmp(ksp->ks_module, "cpu_info") == 0) {
755 			cp[i++] = ksp->ks_instance;
756 		}
757 	}
758 
759 	(void) kstat_close(kc);
760 	*cpuids = cp;
761 
762 	return (0);
763 }
764 
765 /*
766  * RCM capacity change request for memory.
767  */
768 static int
769 mem_cap_request(ri_hdl_t *ri_hdl, rcmd_t *rcm)
770 {
771 	nvlist_t	*nvl;
772 	rcm_info_t	*rcm_info = NULL;
773 	long 		newpages;
774 	int		rv = 0;
775 
776 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
777 		dprintf((stderr, "nvlist_alloc fail\n"));
778 		return (-1);
779 	}
780 
781 	newpages = rcm->ms_syspages - rcm->query_pages;
782 	if (nvlist_add_int32(nvl, "page_size", rcm->ms_pagesize) != 0 ||
783 	    nvlist_add_int32(nvl, "old_pages", rcm->ms_syspages) != 0 ||
784 	    nvlist_add_int32(nvl, "new_pages", newpages) != 0) {
785 		dprintf((stderr, "nvlist_add fail\n"));
786 		nvlist_free(nvl);
787 		return (-1);
788 	}
789 
790 	dprintf((stderr, "memory capacity change req: "
791 	    "page_size=%d, old_pages=%d, new_pages=%d\n",
792 	    rcm->ms_pagesize, rcm->ms_syspages, newpages));
793 
794 	(void) rcm_request_capacity_change(rcm->hdl, RCM_MEM_ALL,
795 	    RCM_QUERY|RCM_SCOPE, nvl, &rcm_info);
796 
797 	rv = add_rcm_clients(&ri_hdl->mem_cap_clients, rcm, rcm_info, 0, NULL);
798 
799 	nvlist_free(nvl);
800 	if (rcm_info != NULL)
801 		rcm_free_info(rcm_info);
802 
803 	return (rv);
804 }
805 
806 static int
807 (*cm_rcm_qpass_func(cfga_type_t ap_type))(cfga_list_data_t *, rcmd_t *)
808 {
809 	int i;
810 
811 	for (i = 0; i < sizeof (cm_ctl) / sizeof (cm_ctl[0]); i++) {
812 		if (strcmp(cm_ctl[i].type, ap_type) == 0) {
813 			return (cm_ctl[i].cm_rcm_qpass);
814 		}
815 	}
816 	return (NULL);
817 }
818 
819 /*
820  * Save cpu ids and RCM abstract resource names.
821  * Cpu ids will be used for the capacity change request.
822  * Resource names will be used for the offline query.
823  */
824 static int
825 cpu_rcm_qpass(cfga_list_data_t *cfga_ldata, rcmd_t *rcm)
826 {
827 	processorid_t	cpuid;
828 	char		*cpustr, *lasts, *rsrcname, rbuf[32];
829 	char		cbuf[CFGA_INFO_LEN];
830 	int		speed, ecache;
831 
832 	assert(sscanf(cfga_ldata->ap_info, CPU_INFO_FMT, &cbuf, &speed,
833 	    &ecache) == 3);
834 
835 	for (cpustr = (char *)strtok_r(cbuf, CPUID_SEP, &lasts);
836 	    cpustr != NULL;
837 	    cpustr = (char *)strtok_r(NULL, CPUID_SEP, &lasts)) {
838 		cpuid = atoi(cpustr);
839 
840 		(void) snprintf(rbuf, sizeof (rbuf), "%s%d", RCM_CPU, cpuid);
841 		if ((rsrcname = strdup(rbuf)) == NULL) {
842 			dprintf((stderr, "strdup fail\n"));
843 			return (-1);
844 		}
845 		assert(rcm->nrlist < rcm->ndevs && rcm->ncpus < rcm->ndevs);
846 		rcm->rlist[rcm->nrlist++] = rsrcname;
847 		rcm->cpus[rcm->ncpus++] = (cpuid_t)cpuid;
848 
849 		dprintf((stderr, "cpu_cm_info: cpuid=%d, rsrcname=%s",
850 		    cpuid, rsrcname));
851 	}
852 
853 	return (0);
854 }
855 
856 /*
857  * No RCM resource names for individual memory units, so
858  * just add to offline query page count.
859  */
860 static int
861 mem_rcm_qpass(cfga_list_data_t *cfga, rcmd_t *rcm)
862 {
863 	char		*cp;
864 	uint_t		kbytes;
865 	longlong_t	ii;
866 
867 	if ((cp = strstr(cfga->ap_info, "size")) == NULL ||
868 	    sscanf(cp, "size=%u", &kbytes) != 1) {
869 		dprintf((stderr, "unknown sbd info format: %s\n", cp));
870 		return (-1);
871 	}
872 
873 	ii = (longlong_t)kbytes * KBYTE;
874 	rcm->query_pages += (uint_t)(ii / rcm->ms_pagesize);
875 
876 	dprintf((stderr, "%s: npages=%u\n", cfga->ap_log_id,
877 	    (uint_t)(ii / rcm->ms_pagesize)));
878 
879 	return (0);
880 }
881 
882 /*
883  * Add physical I/O bus name to RCM resource list.
884  */
885 static int
886 io_rcm_qpass(cfga_list_data_t *cfga, rcmd_t *rcm)
887 {
888 	char		path[MAXPATHLEN];
889 	char		buf[MAXPATHLEN];
890 	char		*rsrcname;
891 
892 	if (sscanf(cfga->ap_info, "device=%s", path) != 1) {
893 		dprintf((stderr, "unknown sbd info format: %s\n",
894 		    cfga->ap_info));
895 		return (-1);
896 	}
897 
898 	(void) snprintf(buf, sizeof (buf), "/devices%s", path);
899 	if ((rsrcname = strdup(buf)) == NULL) {
900 		dprintf((stderr, "strdup fail\n"));
901 		return (-1);
902 	}
903 
904 	assert(rcm->nrlist < rcm->ndevs);
905 	rcm->rlist[rcm->nrlist++] = rsrcname;
906 
907 	return (0);
908 }
909 
910 static int
911 (*cm_info_func(cfga_type_t ap_type))(ri_ap_t *, cfga_list_data_t *,
912     int, rcmd_t *)
913 {
914 	int i;
915 
916 	for (i = 0; i < sizeof (cm_ctl) / sizeof (cm_ctl[0]); i++) {
917 		if (strcmp(cm_ctl[i].type, ap_type) == 0) {
918 			return (cm_ctl[i].cm_info);
919 		}
920 	}
921 	return (NULL);
922 }
923 
924 /*
925  * Create cpu handle, adding properties exported by sbd plugin and
926  * RCM client usage.
927  */
928 /* ARGSUSED */
929 static int
930 cpu_cm_info(ri_ap_t *ap, cfga_list_data_t *cfga, int flags, rcmd_t *rcm)
931 {
932 	processorid_t	cpuid;
933 	int		speed, ecache, rv = 0;
934 	char		buf[CFGA_INFO_LEN], *cpustr, *lasts;
935 
936 	if (sscanf(cfga->ap_info, CPU_INFO_FMT, &buf, &speed, &ecache) != 3) {
937 		dprintf((stderr, "unknown sbd info format: %s\n",
938 		    cfga->ap_info));
939 		return (-1);
940 	}
941 
942 	/* parse cpuids */
943 	for (cpustr = (char *)strtok_r(buf, CPUID_SEP, &lasts);
944 	    cpustr != NULL;
945 	    cpustr = (char *)strtok_r(NULL, CPUID_SEP, &lasts)) {
946 		cpuid = atoi(cpustr);
947 		if ((rv = i_cpu_cm_info(cpuid, speed, ap, rcm)) != 0) {
948 			break;
949 		}
950 	}
951 
952 	return (rv);
953 }
954 
955 static int
956 i_cpu_cm_info(processorid_t cpuid, int speed, ri_ap_t *ap, rcmd_t *rcm)
957 {
958 	int		ecache = 0;
959 	char		*state, buf[32];
960 	processor_info_t cpu_info;
961 	ri_dev_t	*cpu = NULL;
962 	rcm_info_t	*rcm_info = NULL;
963 
964 	/*
965 	 * Could have been unconfigured in the interim, so cannot
966 	 * count on processor_info recognizing it.
967 	 */
968 	state = (processor_info(cpuid, &cpu_info) == 0) ?
969 	    pstate2str(cpu_info.pi_state) : "unknown";
970 
971 	if ((cpu = ri_dev_alloc()) == NULL) {
972 		dprintf((stderr, "ri_dev_alloc failed\n"));
973 		return (-1);
974 	}
975 
976 	if (rcm->ecache_info.ecache_sizes != NULL) {
977 		assert(rcm->ecache_info.cpuid_max != 0 &&
978 		    cpuid <= rcm->ecache_info.cpuid_max);
979 		ecache = rcm->ecache_info.ecache_sizes[cpuid] / MBYTE;
980 	}
981 
982 	dprintf((stderr, "i_cpu_cm_info: cpu(%d) ecache=%d MB\n",
983 	    cpuid, ecache));
984 
985 	if (nvlist_add_int32(cpu->conf_props, RI_CPU_ID, cpuid) != 0 ||
986 	    nvlist_add_int32(cpu->conf_props, RI_CPU_SPEED, speed) != 0 ||
987 	    nvlist_add_int32(cpu->conf_props, RI_CPU_ECACHE, ecache) != 0 ||
988 	    nvlist_add_string(cpu->conf_props, RI_CPU_STATE, state) != 0) {
989 		dprintf((stderr, "nvlist_add fail\n"));
990 		ri_dev_free(cpu);
991 		return (-1);
992 	}
993 
994 	(void) snprintf(buf, sizeof (buf), "%s%d", RCM_CPU, cpuid);
995 	dprintf((stderr, "rcm_get_info(%s)\n", buf));
996 	if (rcm_get_info(rcm->hdl, buf, RCM_INCLUDE_DEPENDENT,
997 	    &rcm_info) != RCM_SUCCESS) {
998 		dprintf((stderr, "rcm_get_info (errno=%d)\n", errno));
999 		ri_dev_free(cpu);
1000 		if (rcm_info != NULL)
1001 			rcm_free_info(rcm_info);
1002 		return (-1);
1003 	}
1004 
1005 	dev_list_cpu_insert(&ap->cpus, cpu, cpuid);
1006 
1007 	return (0);
1008 }
1009 
1010 /*
1011  * Create memory handle, adding properties exported by sbd plugin.
1012  * No RCM tuples to be saved unless RCM is modified to export names
1013  * for individual memory units.
1014  */
1015 /* ARGSUSED */
1016 static int
1017 mem_cm_info(ri_ap_t *ap, cfga_list_data_t *cfga, int flags, rcmd_t *rcm)
1018 {
1019 	ri_dev_t	*mem;
1020 	char		*cp;
1021 	uint64_t	base_addr;		/* required */
1022 	int32_t		size_kb;		/* required */
1023 	int32_t		perm_kb = 0;		/* optional */
1024 	char		*target = NULL;		/* optional */
1025 	int32_t		del_kb = 0;		/* optional */
1026 	int32_t		rem_kb = 0;		/* optional */
1027 	char		*source = NULL;		/* optional */
1028 
1029 	if (sscanf(cfga->ap_info, "address=0x%llx size=%u", &base_addr,
1030 	    &size_kb) != 2) {
1031 		goto err_fmt;
1032 	}
1033 
1034 	if ((cp = strstr(cfga->ap_info, "permanent")) != NULL &&
1035 	    sscanf(cp, "permanent=%u", &perm_kb) != 1) {
1036 		goto err_fmt;
1037 	}
1038 
1039 	if ((cp = strstr(cfga->ap_info, "target")) != NULL &&
1040 	    sscanf(cp, "target=%s deleted=%u remaining=%u", &target, &del_kb,
1041 	    &rem_kb) != 3) {
1042 		goto err_fmt;
1043 	}
1044 
1045 	if ((cp = strstr(cfga->ap_info, "source")) != NULL &&
1046 	    sscanf(cp, "source=%s", &source) != 1) {
1047 		goto err_fmt;
1048 	}
1049 
1050 	dprintf((stderr, "%s: base=0x%llx, size=%u, permanent=%u\n",
1051 	    cfga->ap_log_id, base_addr, size_kb, perm_kb));
1052 
1053 	if ((mem = ri_dev_alloc()) == NULL)
1054 		return (-1);
1055 
1056 	/*
1057 	 * Convert memory sizes to MB (truncate).
1058 	 */
1059 	if (nvlist_add_uint64(mem->conf_props, RI_MEM_ADDR, base_addr) != 0 ||
1060 	    nvlist_add_int32(mem->conf_props, RI_MEM_BRD, size_kb/KBYTE) != 0 ||
1061 	    nvlist_add_int32(mem->conf_props, RI_MEM_PERM,
1062 	    perm_kb/KBYTE) != 0) {
1063 		dprintf((stderr, "nvlist_add failure\n"));
1064 		ri_dev_free(mem);
1065 		return (-1);
1066 	}
1067 
1068 	if (target != NULL &&
1069 	    (nvlist_add_string(mem->conf_props, RI_MEM_TARG, target) != 0 ||
1070 	    nvlist_add_int32(mem->conf_props, RI_MEM_DEL, del_kb/KBYTE) != 0 ||
1071 	    nvlist_add_int32(mem->conf_props, RI_MEM_REMAIN,
1072 	    rem_kb/KBYTE) != 0)) {
1073 		dprintf((stderr, "nvlist_add failure\n"));
1074 		ri_dev_free(mem);
1075 		return (-1);
1076 	}
1077 
1078 	if (source != NULL && nvlist_add_string(mem->conf_props, RI_MEM_SRC,
1079 	    source) != 0) {
1080 		dprintf((stderr, "nvlist_add failure\n"));
1081 		ri_dev_free(mem);
1082 		return (-1);
1083 	}
1084 
1085 	/*
1086 	 * XXX - move this property to attachment point hdl?
1087 	 */
1088 	if (nvlist_add_int32(mem->conf_props, RI_MEM_DOMAIN,
1089 	    rcm->ms_sysmb) != 0) {
1090 		dprintf((stderr, "nvlist_add failure\n"));
1091 		ri_dev_free(mem);
1092 		return (-1);
1093 	}
1094 
1095 	dev_list_append(&ap->mems, mem);
1096 	return (0);
1097 
1098 err_fmt:
1099 	dprintf((stderr, "unknown sbd info format: %s\n", cfga->ap_info));
1100 	return (-1);
1101 }
1102 
1103 /*
1104  * Initiate a libdevinfo walk on the IO bus path.
1105  * XXX - investigate performance using two threads here: one thread to do the
1106  * libdevinfo snapshot and treewalk; and one thread to get RCM usage info
1107  */
1108 static int
1109 io_cm_info(ri_ap_t *ap, cfga_list_data_t *cfga, int flags, rcmd_t *rcm)
1110 {
1111 	int			i;
1112 	int			j;
1113 	int			k;
1114 	int			set_size;
1115 	int			retval = 0;
1116 	int			n_usage;
1117 	devinfo_arg_t		di_arg;
1118 	lookup_table_t		devicetable;
1119 	lookup_entry_t		*deventry;
1120 	lookup_entry_t		*lastdeventry;
1121 	ri_dev_t		*io = NULL;
1122 	ri_client_t		*client;
1123 	ri_client_t		*tmp;
1124 	di_devlink_handle_t	linkhd = NULL;
1125 	di_node_t		root = DI_NODE_NIL;
1126 	di_node_t		node = DI_NODE_NIL;
1127 	rcm_info_tuple_t	*rcm_tuple;
1128 	rcm_info_t		*rcm_info = NULL;
1129 	const char		*rcm_rsrc = NULL;
1130 	char			drv_inst[MAXPATHLEN];
1131 	char			path[MAXPATHLEN];
1132 	char			pathbuf[MAXPATHLEN];
1133 
1134 	dprintf((stderr, "io_cm_info(%s)\n", cfga->ap_log_id));
1135 
1136 	/* Extract devfs path from cfgadm information */
1137 	if (sscanf(cfga->ap_info, "device=%s\n", path) != 1) {
1138 		dprintf((stderr, "unknown sbd info format: %s\n",
1139 		    cfga->ap_info));
1140 		return (-1);
1141 	}
1142 
1143 	/* Initialize empty device lookup table */
1144 	devicetable.n_entries = 0;
1145 	devicetable.n_slots = 0;
1146 	devicetable.table = NULL;
1147 
1148 	/* Get libdevinfo snapshot */
1149 	dprintf((stderr, "di_init(%s)\n", path));
1150 	if ((root = di_init(path, DINFOCPYALL)) == DI_NODE_NIL) {
1151 		dprintf((stderr, "di_init: %s\n", strerror(errno)));
1152 		retval = RI_NODE_NIL; /* tell ri_init to skip this node */
1153 		goto end;
1154 	}
1155 
1156 	/*
1157 	 * Map in devlinks database.
1158 	 * XXX - This could be moved to ri_init() for better performance.
1159 	 */
1160 	dprintf((stderr, "di_devlink_init()\n"));
1161 	if ((linkhd = di_devlink_init(NULL, 0)) == NULL) {
1162 		dprintf((stderr, "di_devlink_init: %s\n", strerror(errno)));
1163 		retval = -1;
1164 		goto end;
1165 	}
1166 
1167 	/* Initialize argument for devinfo treewalk */
1168 	di_arg.err = 0;
1169 	di_arg.node = DI_NODE_NIL;
1170 	di_arg.pathbuf = pathbuf;
1171 	di_arg.table = &devicetable;
1172 	di_arg.linkhd = linkhd;
1173 
1174 	/* Use libdevinfo treewalk to build device lookup table */
1175 	if (di_walk_node(root, DI_WALK_CLDFIRST, (void *)&di_arg,
1176 	    devinfo_node_walk) != 0) {
1177 		dprintf((stderr, "di_walk_node: %s\n", strerror(errno)));
1178 		retval = -1;
1179 		goto end;
1180 	}
1181 	if (di_arg.err != 0) {
1182 		dprintf((stderr, "di_walk_node: device tree walk failed\n"));
1183 		retval = -1;
1184 		goto end;
1185 	}
1186 
1187 	/* Call RCM to gather usage information */
1188 	(void) snprintf(pathbuf, MAXPATHLEN, "/devices%s", path);
1189 	dprintf((stderr, "rcm_get_info(%s)\n", pathbuf));
1190 	if (rcm_get_info(rcm->hdl, pathbuf,
1191 	    RCM_INCLUDE_SUBTREE|RCM_INCLUDE_DEPENDENT, &rcm_info) !=
1192 	    RCM_SUCCESS) {
1193 		dprintf((stderr, "rcm_get_info (errno=%d)\n", errno));
1194 		retval = -1;
1195 		goto end;
1196 	}
1197 
1198 	/* Sort the device table by name (proper order for lookups) */
1199 	qsort(devicetable.table, devicetable.n_entries, sizeof (lookup_entry_t),
1200 	    table_compare_names);
1201 
1202 	/* Perform mappings of RCM usage segments to device table entries */
1203 	lastdeventry = NULL;
1204 	rcm_tuple = NULL;
1205 	while ((rcm_tuple = rcm_info_next(rcm_info, rcm_tuple)) != NULL) {
1206 		if ((rcm_rsrc = rcm_info_rsrc(rcm_tuple)) == NULL)
1207 			continue;
1208 		if (deventry = lookup(&devicetable, rcm_rsrc)) {
1209 			if (add_usage(deventry, rcm_rsrc, rcm_tuple)) {
1210 				retval = -1;
1211 				goto end;
1212 			}
1213 			lastdeventry = deventry;
1214 		} else {
1215 			if (add_usage(lastdeventry, rcm_rsrc, rcm_tuple)) {
1216 				retval = -1;
1217 				goto end;
1218 			}
1219 		}
1220 	}
1221 
1222 	/* Re-sort the device table by index number (original treewalk order) */
1223 	qsort(devicetable.table, devicetable.n_entries, sizeof (lookup_entry_t),
1224 	    table_compare_indices);
1225 
1226 	/*
1227 	 * Use the mapped usage and the device table to construct ri_dev_t's.
1228 	 * Construct one for each set of entries in the device table with
1229 	 * matching di_node_t's, if: 1) it has mapped RCM usage, or 2) it is
1230 	 * a leaf node and the caller has requested that unmanaged nodes be
1231 	 * included in the output.
1232 	 */
1233 	i = 0;
1234 	while (i < devicetable.n_entries) {
1235 
1236 		node = devicetable.table[i].node;
1237 
1238 		/* Count how many usage records are mapped to this node's set */
1239 		n_usage = 0;
1240 		set_size = 0;
1241 		while (((i + set_size) < devicetable.n_entries) &&
1242 		    (devicetable.table[i + set_size].node == node)) {
1243 			n_usage += devicetable.table[i + set_size].n_usage;
1244 			set_size += 1;
1245 		}
1246 
1247 		/*
1248 		 * If there's no usage, then the node is unmanaged.  Skip this
1249 		 * set of devicetable entries unless the node is a leaf node
1250 		 * and the caller has requested information on unmanaged leaves.
1251 		 */
1252 		if ((n_usage == 0) &&
1253 		    !((flags & RI_INCLUDE_UNMANAGED) && (ident_leaf(node)))) {
1254 			i += set_size;
1255 			continue;
1256 		}
1257 
1258 		/*
1259 		 * The checks above determined that this node is going in.
1260 		 * So determine its driver/instance name and allocate an
1261 		 * ri_dev_t for this node.
1262 		 */
1263 		if (mk_drv_inst(node, drv_inst, devicetable.table[i].name)) {
1264 			dprintf((stderr, "mk_drv_inst failed\n"));
1265 			retval = -1;
1266 			break;
1267 		}
1268 		if ((io = io_dev_alloc(drv_inst)) == NULL) {
1269 			dprintf((stderr, "io_dev_alloc failed\n"));
1270 			retval = -1;
1271 			break;
1272 		}
1273 
1274 		/* Now add all the RCM usage records (if any) to the ri_dev_t */
1275 		for (j = i; j < (i + set_size); j++) {
1276 			for (k = 0; k < devicetable.table[j].n_usage; k++) {
1277 				/* Create new ri_client_t for basic usage */
1278 				client = ri_client_alloc(
1279 				    (char *)devicetable.table[j].usage[k].rsrc,
1280 				    (char *)devicetable.table[j].usage[k].info);
1281 				if (client == NULL) {
1282 					dprintf((stderr,
1283 					    "ri_client_alloc failed\n"));
1284 					ri_dev_free(io);
1285 					retval = -1;
1286 					goto end;
1287 				}
1288 
1289 				/* Add extra query usage to the ri_client_t */
1290 				if ((flags & RI_INCLUDE_QUERY) &&
1291 				    (add_query_state(rcm, client,
1292 				    devicetable.table[j].usage[k].rsrc,
1293 				    devicetable.table[j].usage[k].info) != 0)) {
1294 					dprintf((stderr,
1295 					    "add_query_state failed\n"));
1296 					ri_dev_free(io);
1297 					ri_client_free(client);
1298 					retval = -1;
1299 					goto end;
1300 				}
1301 
1302 				/* Link new ri_client_t to ri_dev_t */
1303 				if (io->rcm_clients) {
1304 					tmp = io->rcm_clients;
1305 					while (tmp->next)
1306 						tmp = tmp->next;
1307 					tmp->next = client;
1308 				} else {
1309 					io->rcm_clients = client;
1310 				}
1311 			}
1312 		}
1313 
1314 		/* Link the ri_dev_t into the return value */
1315 		dev_list_append(&ap->ios, io);
1316 
1317 		/* Advance to the next node set */
1318 		i += set_size;
1319 	}
1320 
1321 end:
1322 	if (rcm_info != NULL)
1323 		rcm_free_info(rcm_info);
1324 	if (linkhd != NULL)
1325 		di_devlink_fini(&linkhd);
1326 	if (root != DI_NODE_NIL)
1327 		di_fini(root);
1328 	empty_table(&devicetable);
1329 
1330 	dprintf((stderr, "io_cm_info: returning %d\n", retval));
1331 	return (retval);
1332 }
1333 
1334 static int
1335 ident_leaf(di_node_t node)
1336 {
1337 	di_minor_t	minor = DI_MINOR_NIL;
1338 
1339 	return ((minor = di_minor_next(node, minor)) != DI_MINOR_NIL &&
1340 	    di_child_node(node) == DI_NODE_NIL);
1341 }
1342 
1343 /* ARGSUSED */
1344 static int
1345 mk_drv_inst(di_node_t node, char drv_inst[], char *devfs_path)
1346 {
1347 	char	*drv;
1348 	int	inst;
1349 
1350 	if ((drv = di_driver_name(node)) == NULL) {
1351 		dprintf((stderr, "no driver bound to %s\n",
1352 		    devfs_path));
1353 		return (-1);
1354 	}
1355 
1356 	if ((inst = di_instance(node)) == -1) {
1357 		dprintf((stderr, "no instance assigned to %s\n",
1358 		    devfs_path));
1359 		return (-1);
1360 	}
1361 	(void) snprintf(drv_inst, MAXPATHLEN, "%s%d", drv, inst);
1362 
1363 	return (0);
1364 }
1365 
1366 /*
1367  * Libdevinfo walker.
1368  *
1369  * During the tree walk of the attached IO devices, for each node
1370  * and all of its associated minors, the following actions are performed:
1371  *  -  The /devices path of the physical device node or minor
1372  *     is stored in a lookup table along with a reference to the
1373  *     libdevinfo node it represents via add_lookup_entry().
1374  *  -  The device links associated with each device are also
1375  *     stored in the same lookup table along with a reference to
1376  *     the libdevinfo node it represents via the minor walk callback.
1377  *
1378  */
1379 static int
1380 devinfo_node_walk(di_node_t node, void *arg)
1381 {
1382 	char			*devfs_path;
1383 #ifdef DEBUG
1384 	char			*drv;
1385 #endif /* DEBUG */
1386 	devinfo_arg_t		*di_arg = (devinfo_arg_t *)arg;
1387 
1388 	if (node == DI_NODE_NIL) {
1389 		return (DI_WALK_TERMINATE);
1390 	}
1391 
1392 	if (((di_state(node) & DI_DRIVER_DETACHED) == 0) &&
1393 	    ((devfs_path = di_devfs_path(node)) != NULL)) {
1394 
1395 		/* Use the provided path buffer to create full /devices path */
1396 		(void) snprintf(di_arg->pathbuf, MAXPATHLEN, "/devices%s",
1397 		    devfs_path);
1398 
1399 #ifdef DEBUG
1400 		dprintf((stderr, "devinfo_node_walk(%s)\n", di_arg->pathbuf));
1401 		if ((drv = di_driver_name(node)) != NULL)
1402 			dprintf((stderr, " driver name %s instance %d\n", drv,
1403 			    di_instance(node)));
1404 #endif
1405 
1406 		/* Free the devfs_path */
1407 		di_devfs_path_free(devfs_path);
1408 
1409 		/* Add an entry to the lookup table for this physical device */
1410 		if (add_lookup_entry(di_arg->table, di_arg->pathbuf, node)) {
1411 			dprintf((stderr, "add_lookup_entry: %s\n",
1412 			    strerror(errno)));
1413 			di_arg->err = 1;
1414 			return (DI_WALK_TERMINATE);
1415 		}
1416 
1417 		/* Check if this node has minors */
1418 		if ((di_minor_next(node, DI_MINOR_NIL)) != DI_MINOR_NIL) {
1419 			/* Walk this node's minors */
1420 			di_arg->node = node;
1421 			if (di_walk_minor(node, NULL, DI_CHECK_ALIAS, arg,
1422 			    devinfo_minor_walk) != 0) {
1423 				dprintf((stderr, "di_walk_minor: %s\n",
1424 				    strerror(errno)));
1425 				di_arg->err = 1;
1426 				return (DI_WALK_TERMINATE);
1427 			}
1428 		}
1429 	}
1430 
1431 	return (DI_WALK_CONTINUE);
1432 }
1433 
1434 /*
1435  * Use di_devlink_walk to find the /dev link from /devices path for this minor
1436  */
1437 static int
1438 devinfo_minor_walk(di_node_t node, di_minor_t minor, void *arg)
1439 {
1440 	char		*name;
1441 	char		*devfs_path;
1442 	devinfo_arg_t	*di_arg = (devinfo_arg_t *)arg;
1443 	char		pathbuf[MAXPATHLEN];
1444 
1445 #ifdef DEBUG
1446 	dprintf((stderr, "devinfo_minor_walk(%d) %s\n", minor,
1447 	    di_arg->pathbuf));
1448 
1449 	if ((name = di_minor_name(minor)) != NULL) {
1450 		dprintf((stderr, "  minor name %s\n", name));
1451 	}
1452 #endif /* DEBUG */
1453 
1454 	/* Terminate the walk when the device node changes */
1455 	if (node != di_arg->node) {
1456 		return (DI_WALK_TERMINATE);
1457 	}
1458 
1459 	/* Construct full /devices path for this minor */
1460 	if ((name = di_minor_name(minor)) == NULL) {
1461 		return (DI_WALK_CONTINUE);
1462 	}
1463 	(void) snprintf(pathbuf, MAXPATHLEN, "%s:%s", di_arg->pathbuf, name);
1464 
1465 	/* Add lookup entry for this minor node */
1466 	if (add_lookup_entry(di_arg->table, pathbuf, node)) {
1467 		dprintf((stderr, "add_lookup_entry: %s\n", strerror(errno)));
1468 		di_arg->err = 1;
1469 		return (DI_WALK_TERMINATE);
1470 	}
1471 
1472 	/*
1473 	 * Walk the associated device links.
1474 	 * Note that di_devlink_walk() doesn't want "/devices" in its paths.
1475 	 * Also note that di_devlink_walk() will fail if there are no device
1476 	 * links, which is fine; so ignore if it fails.  Only check for
1477 	 * internal failures during such a walk.
1478 	 */
1479 	devfs_path = &pathbuf[strlen("/devices")];
1480 	(void) di_devlink_walk(di_arg->linkhd, NULL, devfs_path, 0, arg,
1481 	    devinfo_devlink_walk);
1482 	if (di_arg->err != 0) {
1483 		return (DI_WALK_TERMINATE);
1484 	}
1485 
1486 	return (DI_WALK_CONTINUE);
1487 }
1488 
1489 static int
1490 devinfo_devlink_walk(di_devlink_t devlink, void *arg)
1491 {
1492 	const char	*linkpath;
1493 	devinfo_arg_t	*di_arg = (devinfo_arg_t *)arg;
1494 
1495 	/* Get the devlink's path */
1496 	if ((linkpath = di_devlink_path(devlink)) == NULL) {
1497 		dprintf((stderr, "di_devlink_path: %s\n", strerror(errno)));
1498 		di_arg->err = 1;
1499 		return (DI_WALK_TERMINATE);
1500 	}
1501 	dprintf((stderr, "devinfo_devlink_walk: %s\n", linkpath));
1502 
1503 	/* Add lookup entry for this devlink */
1504 	if (add_lookup_entry(di_arg->table, linkpath, di_arg->node)) {
1505 		dprintf((stderr, "add_lookup_entry: %s\n", strerror(errno)));
1506 		di_arg->err = 1;
1507 		return (DI_WALK_TERMINATE);
1508 	}
1509 
1510 	return (DI_WALK_CONTINUE);
1511 }
1512 
1513 /*
1514  * Map rcm_info_t's to ri_client_t's, filtering out "uninteresting" (hack)
1515  * RCM clients. The number of "interesting" ri_client_t's is returned
1516  * in cnt if passed non-NULL.
1517  */
1518 static int
1519 add_rcm_clients(ri_client_t **client_list, rcmd_t *rcm, rcm_info_t *info,
1520     int flags, int *cnt)
1521 {
1522 	rcm_info_tuple_t	*tuple;
1523 	char			*rsrc, *usage;
1524 	ri_client_t		*client, *tmp;
1525 
1526 	assert(client_list != NULL && rcm != NULL);
1527 
1528 	if (info == NULL)
1529 		return (0);
1530 
1531 	if (cnt != NULL)
1532 		*cnt = 0;
1533 
1534 	tuple = NULL;
1535 	while ((tuple = rcm_info_next(info, tuple)) != NULL) {
1536 		if ((rsrc = (char *)rcm_info_rsrc(tuple)) == NULL ||
1537 		    (usage = (char *)rcm_info_info(tuple)) == NULL) {
1538 			continue;
1539 		}
1540 
1541 		if (rcm_ignore(rsrc, usage) == 0)
1542 			continue;
1543 
1544 		if ((client = ri_client_alloc(rsrc, usage)) == NULL)
1545 			return (-1);
1546 
1547 		if ((flags & RI_INCLUDE_QUERY) && add_query_state(rcm, client,
1548 		    rsrc, usage) != 0) {
1549 			ri_client_free(client);
1550 			return (-1);
1551 		}
1552 
1553 		if (cnt != NULL)
1554 			++*cnt;
1555 
1556 		/*
1557 		 * Link in
1558 		 */
1559 		if ((tmp = *client_list) == NULL) {
1560 			*client_list = client;
1561 			continue;
1562 		}
1563 		while (tmp->next != NULL) {
1564 			tmp = tmp->next;
1565 		}
1566 		tmp->next = client;
1567 	}
1568 
1569 	return (0);
1570 }
1571 
1572 /*
1573  * Currently only filtering out based on known info string prefixes.
1574  */
1575 /* ARGSUSED */
1576 static int
1577 rcm_ignore(char *rsrc, char *infostr)
1578 {
1579 	char	**cpp;
1580 
1581 	for (cpp = rcm_info_filter; *cpp != NULL; cpp++) {
1582 		if (strncmp(infostr, *cpp, strlen(*cpp)) == 0) {
1583 			return (0);
1584 		}
1585 	}
1586 	return (-1);
1587 }
1588 
1589 /*
1590  * If this tuple was cached in the offline query pass, add the
1591  * query state and error string to the ri_client_t.
1592  */
1593 static int
1594 add_query_state(rcmd_t *rcm, ri_client_t *client, const char *rsrc,
1595     const char *info)
1596 {
1597 	int			qstate = RI_QUERY_UNKNOWN;
1598 	char			*errstr = NULL;
1599 	rcm_info_tuple_t	*cached_tuple;
1600 
1601 	if ((cached_tuple = tuple_lookup(rcm, rsrc, info)) != NULL) {
1602 		qstate = state2query(rcm_info_state(cached_tuple));
1603 		errstr = (char *)rcm_info_error(cached_tuple);
1604 	}
1605 
1606 	if (nvlist_add_int32(client->usg_props, RI_QUERY_STATE, qstate) != 0 ||
1607 	    (errstr != NULL && nvlist_add_string(client->usg_props,
1608 	    RI_QUERY_ERR, errstr) != 0)) {
1609 		dprintf((stderr, "nvlist_add fail\n"));
1610 		return (-1);
1611 	}
1612 
1613 	return (0);
1614 }
1615 
1616 static int
1617 state2query(int rcm_state)
1618 {
1619 	int	query;
1620 
1621 	switch (rcm_state) {
1622 	case RCM_STATE_OFFLINE_QUERY:
1623 	case RCM_STATE_SUSPEND_QUERY:
1624 		query = RI_QUERY_OK;
1625 		break;
1626 	case RCM_STATE_OFFLINE_QUERY_FAIL:
1627 	case RCM_STATE_SUSPEND_QUERY_FAIL:
1628 		query = RI_QUERY_FAIL;
1629 		break;
1630 	default:
1631 		query = RI_QUERY_UNKNOWN;
1632 		break;
1633 	}
1634 
1635 	return (query);
1636 }
1637 
1638 static void
1639 dev_list_append(ri_dev_t **head, ri_dev_t *dev)
1640 {
1641 	ri_dev_t	*tmp;
1642 
1643 	if ((tmp = *head) == NULL) {
1644 		*head = dev;
1645 		return;
1646 	}
1647 	while (tmp->next != NULL) {
1648 		tmp = tmp->next;
1649 	}
1650 	tmp->next = dev;
1651 }
1652 
1653 /*
1654  * The cpu list is ordered on cpuid since CMP cpuids will not necessarily
1655  * be discovered in sequence.
1656  */
1657 static void
1658 dev_list_cpu_insert(ri_dev_t **listp, ri_dev_t *dev, processorid_t newid)
1659 {
1660 	ri_dev_t	*tmp;
1661 	int32_t		cpuid;
1662 
1663 	while ((tmp = *listp) != NULL &&
1664 	    nvlist_lookup_int32(tmp->conf_props, RI_CPU_ID, &cpuid) == 0 &&
1665 	    cpuid < newid) {
1666 		listp = &tmp->next;
1667 	}
1668 
1669 	dev->next = tmp;
1670 	*listp = dev;
1671 }
1672 
1673 /*
1674  * Linear lookup. Should convert to hash tab.
1675  */
1676 static rcm_info_tuple_t *
1677 tuple_lookup(rcmd_t *rcm, const char *krsrc, const char *kinfo)
1678 {
1679 	rcm_info_tuple_t	*tuple = NULL;
1680 	const char		*rsrc, *info;
1681 
1682 	if ((rcm == NULL) || (krsrc == NULL) || (kinfo == NULL)) {
1683 		return (NULL);
1684 	}
1685 
1686 	while ((tuple = rcm_info_next(rcm->offline_query_info,
1687 	    tuple)) != NULL) {
1688 		if ((rsrc = rcm_info_rsrc(tuple)) == NULL ||
1689 		    (info = rcm_info_info(tuple)) == NULL) {
1690 			continue;
1691 		}
1692 
1693 		if (strcmp(rsrc, krsrc) == 0 && strcmp(info, kinfo) == 0) {
1694 			return (tuple);
1695 		}
1696 	}
1697 	return (NULL);
1698 }
1699 
1700 /*
1701  * Create and link attachment point handle.
1702  */
1703 static ri_ap_t *
1704 ri_ap_alloc(char *ap_id, ri_hdl_t *hdl)
1705 {
1706 	ri_ap_t		*ap, *tmp;
1707 
1708 	if ((ap = calloc(1, sizeof (*ap))) == NULL) {
1709 		dprintf((stderr, "calloc: %s\n", strerror(errno)));
1710 		return (NULL);
1711 	}
1712 
1713 	if (nvlist_alloc(&ap->conf_props, NV_UNIQUE_NAME, 0) != 0 ||
1714 	    nvlist_add_string(ap->conf_props, RI_AP_REQ_ID, ap_id) != 0) {
1715 		if (ap->conf_props != NULL)
1716 			nvlist_free(ap->conf_props);
1717 		free(ap);
1718 		return (NULL);
1719 	}
1720 
1721 	if ((tmp = hdl->aps) == NULL) {
1722 		hdl->aps = ap;
1723 	} else {
1724 		while (tmp->next != NULL) {
1725 			tmp = tmp->next;
1726 		}
1727 		tmp->next = ap;
1728 	}
1729 
1730 	return (ap);
1731 }
1732 
1733 static ri_dev_t *
1734 ri_dev_alloc(void)
1735 {
1736 	ri_dev_t	*dev;
1737 
1738 	if ((dev = calloc(1, sizeof (*dev))) == NULL ||
1739 	    nvlist_alloc(&dev->conf_props, NV_UNIQUE_NAME, 0) != 0) {
1740 		s_free(dev);
1741 	}
1742 	return (dev);
1743 }
1744 
1745 static ri_dev_t *
1746 io_dev_alloc(char *drv_inst)
1747 {
1748 	ri_dev_t	*io;
1749 
1750 	assert(drv_inst != NULL);
1751 
1752 	if ((io = ri_dev_alloc()) == NULL)
1753 		return (NULL);
1754 
1755 	if (nvlist_add_string(io->conf_props, RI_IO_DRV_INST,
1756 	    drv_inst) != 0) {
1757 		dprintf((stderr, "nvlist_add_string fail\n"));
1758 		ri_dev_free(io);
1759 		return (NULL);
1760 	}
1761 
1762 	return (io);
1763 }
1764 
1765 static ri_client_t *
1766 ri_client_alloc(char *rsrc, char *usage)
1767 {
1768 	ri_client_t	*client;
1769 
1770 	assert(rsrc != NULL && usage != NULL);
1771 
1772 	if ((client = calloc(1, sizeof (*client))) == NULL) {
1773 		dprintf((stderr, "calloc: %s\n", strerror(errno)));
1774 		return (NULL);
1775 	}
1776 
1777 	if (nvlist_alloc(&client->usg_props, NV_UNIQUE_NAME, 0) != 0) {
1778 		dprintf((stderr, "nvlist_alloc fail\n"));
1779 		free(client);
1780 		return (NULL);
1781 	}
1782 
1783 	if (nvlist_add_string(client->usg_props, RI_CLIENT_RSRC, rsrc) != 0 ||
1784 	    nvlist_add_string(client->usg_props, RI_CLIENT_USAGE, usage) != 0) {
1785 		dprintf((stderr, "nvlist_add_string fail\n"));
1786 		ri_client_free(client);
1787 		return (NULL);
1788 	}
1789 
1790 	return (client);
1791 }
1792 
1793 static void
1794 apd_tbl_free(apd_t apd_tbl[], int napds)
1795 {
1796 	int	i;
1797 	apd_t	*apd;
1798 
1799 	for (i = 0, apd = apd_tbl; i < napds; i++, apd++)
1800 		s_free(apd->cfga_list_data);
1801 
1802 	free(apd_tbl);
1803 }
1804 
1805 static char *
1806 pstate2str(int pi_state)
1807 {
1808 	char	*state;
1809 
1810 	switch (pi_state) {
1811 	case P_OFFLINE:
1812 		state = PS_OFFLINE;
1813 		break;
1814 	case P_ONLINE:
1815 		state = PS_ONLINE;
1816 		break;
1817 	case P_FAULTED:
1818 		state = PS_FAULTED;
1819 		break;
1820 	case P_POWEROFF:
1821 		state = PS_POWEROFF;
1822 		break;
1823 	case P_NOINTR:
1824 		state = PS_NOINTR;
1825 		break;
1826 	case P_SPARE:
1827 		state = PS_SPARE;
1828 		break;
1829 	default:
1830 		state = "unknown";
1831 		break;
1832 	}
1833 
1834 	return (state);
1835 }
1836 
1837 #ifdef DEBUG
1838 static void
1839 dump_apd_tbl(FILE *fp, apd_t *apds, int n_apds)
1840 {
1841 	int			i, j;
1842 	cfga_list_data_t	*cfga_ldata;
1843 
1844 	for (i = 0; i < n_apds; i++, apds++) {
1845 		dprintf((stderr, "apd_tbl[%d].nlist=%d\n", i, apds->nlist));
1846 		for (j = 0, cfga_ldata = apds->cfga_list_data; j < apds->nlist;
1847 		    j++, cfga_ldata++) {
1848 			dprintf((fp,
1849 			    "apd_tbl[%d].cfga_list_data[%d].ap_log_id=%s\n",
1850 			    i, j, cfga_ldata->ap_log_id));
1851 		}
1852 	}
1853 }
1854 #endif /* DEBUG */
1855 
1856 /*
1857  * The lookup table is a simple array that is grown in chunks
1858  * to optimize memory allocation.
1859  * Indices are assigned to each array entry in-order so that
1860  * the original device tree ordering can be discerned at a later time.
1861  *
1862  * add_lookup_entry is called from the libdevinfo tree traversal callbacks:
1863  * 1) devinfo_node_walk - physical device path for each node in
1864  *    the devinfo tree via di_walk_node(), lookup entry name is
1865  *    /devices/[di_devfs_path]
1866  * 2) devinfo_minor_walk - physical device path plus minor name for
1867  *    each minor associated with a node via di_walk_minor(), lookup entry
1868  *    name is /devices/[di_devfs_path:di_minor_name]
1869  * 3) devinfo_devlink_walk - for each minor's /dev link from its /devices
1870  *    path via di_devlink_walk(), lookup entry name is di_devlink_path()
1871  */
1872 static int
1873 add_lookup_entry(lookup_table_t *table, const char *name, di_node_t node)
1874 {
1875 	size_t		size;
1876 	lookup_entry_t	*new_table;
1877 
1878 
1879 	/* Grow the lookup table by USAGE_ALLOC_SIZE slots if necessary */
1880 	if (table->n_entries == table->n_slots) {
1881 		size = (table->n_slots + USAGE_ALLOC_SIZE) *
1882 		    sizeof (lookup_entry_t);
1883 		new_table = (lookup_entry_t *)realloc(table->table, size);
1884 		if (new_table == NULL) {
1885 			dprintf((stderr, "add_lookup_entry: alloc failed: %s\n",
1886 			    strerror(errno)));
1887 			errno = ENOMEM;
1888 			return (-1);
1889 		}
1890 		table->table = new_table;
1891 		table->n_slots += USAGE_ALLOC_SIZE;
1892 	}
1893 
1894 	dprintf((stderr, "add_lookup_entry[%d]:%s\n", table->n_entries, name));
1895 
1896 	/* Add this name to the next slot */
1897 	if ((table->table[table->n_entries].name = strdup(name)) == NULL) {
1898 		dprintf((stderr, "add_lookup_entry: strdup failed: %s\n",
1899 		    strerror(errno)));
1900 		errno = ENOMEM;
1901 		return (-1);
1902 	}
1903 	table->table[table->n_entries].index = table->n_entries;
1904 	table->table[table->n_entries].node = node;
1905 	table->table[table->n_entries].n_usage = 0;
1906 	table->table[table->n_entries].usage = NULL;
1907 	table->n_entries += 1;
1908 
1909 	return (0);
1910 }
1911 
1912 /*
1913  * lookup table entry names are full pathname strings, all start with /
1914  */
1915 static int
1916 table_compare_names(const void *a, const void *b)
1917 {
1918 	lookup_entry_t *entry1 = (lookup_entry_t *)a;
1919 	lookup_entry_t *entry2 = (lookup_entry_t *)b;
1920 
1921 	return (strcmp(entry1->name, entry2->name));
1922 }
1923 
1924 
1925 /*
1926  * Compare two indices and return -1 for less, 1 for greater, 0 for equal
1927  */
1928 static int
1929 table_compare_indices(const void *a, const void *b)
1930 {
1931 	lookup_entry_t *entry1 = (lookup_entry_t *)a;
1932 	lookup_entry_t *entry2 = (lookup_entry_t *)b;
1933 
1934 	if (entry1->index < entry2->index)
1935 		return (-1);
1936 	if (entry1->index > entry2->index)
1937 		return (1);
1938 	return (0);
1939 }
1940 
1941 /*
1942  * Given a RCM resource name, find the matching entry in the IO device table
1943  */
1944 static lookup_entry_t *
1945 lookup(lookup_table_t *table, const char *rcm_rsrc)
1946 {
1947 	lookup_entry_t	*entry;
1948 	lookup_entry_t	lookup_arg;
1949 
1950 	dprintf((stderr, "lookup:%s\n", rcm_rsrc));
1951 	lookup_arg.name = (char *)rcm_rsrc;
1952 	entry = bsearch(&lookup_arg, table->table, table->n_entries,
1953 	    sizeof (lookup_entry_t), table_compare_names);
1954 
1955 #ifdef DEBUG
1956 	if (entry != NULL) {
1957 		dprintf((stderr, " found entry:%d\n", entry->index));
1958 	}
1959 #endif /* DEBUG */
1960 	return (entry);
1961 }
1962 
1963 /*
1964  * Add RCM usage to the given device table entry.
1965  * Returns -1 on realloc failure.
1966  */
1967 static int
1968 add_usage(lookup_entry_t *entry, const char *rcm_rsrc, rcm_info_tuple_t *tuple)
1969 {
1970 	size_t		size;
1971 	const char	*info;
1972 	usage_t		*new_usage;
1973 
1974 	if ((entry == NULL) ||
1975 	    ((info = rcm_info_info(tuple)) == NULL))
1976 		return (0);
1977 
1978 	if (rcm_ignore((char *)rcm_rsrc, (char *)info) == 0)
1979 		return (0);
1980 
1981 	size = (entry->n_usage + 1) * sizeof (usage_t);
1982 	new_usage = (usage_t *)realloc(entry->usage, size);
1983 	if (new_usage == NULL) {
1984 		dprintf((stderr, "add_usage: alloc failed: %s\n",
1985 		    strerror(errno)));
1986 		return (-1);
1987 	}
1988 	dprintf((stderr, "add_usage: entry %d rsrc: %s info: %s\n",
1989 	    entry->index, rcm_rsrc, info));
1990 
1991 	entry->usage = new_usage;
1992 	entry->usage[entry->n_usage].rsrc = rcm_rsrc;
1993 	entry->usage[entry->n_usage].info = info;
1994 	entry->n_usage += 1;
1995 	return (0);
1996 }
1997 
1998 static void
1999 empty_table(lookup_table_t *table)
2000 {
2001 	int i;
2002 
2003 	if (table) {
2004 		for (i = 0; i < table->n_entries; i++) {
2005 			if (table->table[i].name)
2006 				free(table->table[i].name);
2007 			/*
2008 			 * Note: the strings pointed to from within
2009 			 * usage were freed already by rcm_free_info
2010 			 */
2011 			if (table->table[i].usage)
2012 				free(table->table[i].usage);
2013 		}
2014 		if (table->table)
2015 			free(table->table);
2016 		table->table = NULL;
2017 		table->n_entries = 0;
2018 		table->n_slots = 0;
2019 	}
2020 }
2021