xref: /titanic_52/usr/src/cmd/dcs/sparc/sun4u/ri_init.c (revision 23a1ccea6aac035f084a7a4cdc968687d1b02daf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Implementation of ri_init routine for obtaining mapping
28  * of system board attachment points to physical devices and to
29  * the Reconfiguration Coordination Manager (RCM) client usage
30  * of these devices.
31  */
32 #include <string.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <kstat.h>
36 #include <sys/param.h>
37 #include <sys/sbd_ioctl.h>
38 #include "rsrc_info_impl.h"
39 
40 /*
41  * Occupant types exported by cfgadm sbd plugin via
42  * config_admin(3CFGADM).
43  */
44 #define	SBD_CM_CPU	"cpu"
45 #define	SBD_CM_MEM	"memory"
46 #define	SBD_CM_IO	"io"
47 
48 /*
49  * RCM abstract resource names.
50  */
51 #define	RCM_MEM_ALL	"SUNW_memory"
52 #define	RCM_CPU_ALL	"SUNW_cpu"
53 #define	RCM_CPU		RCM_CPU_ALL"/cpu"
54 
55 #define	KBYTE		1024
56 #define	MBYTE		1048576
57 #define	USAGE_ALLOC_SIZE	128
58 
59 /*
60  * define to allow io_cm_info to return NODE is NULL to ri_init,
61  * in order to skip over nodes w/unattached drivers
62  */
63 #define	RI_NODE_NIL	1
64 
65 /*
66  * This code is CMP aware as it parses the
67  * cfgadm info field for individual cpuids.
68  */
69 #define	CPUID_SEP	","
70 #define	CPU_INFO_FMT	"cpuid=%s speed=%d ecache=%d"
71 
72 typedef struct {
73 	cfga_list_data_t *cfga_list_data;
74 	int		nlist;
75 } apd_t;
76 
77 typedef struct {
78 	long		pagesize;
79 	long		syspages;
80 	long		sysmb;
81 } mem_stat_t;
82 
83 #define	ms_syspages	m_stat.syspages
84 #define	ms_pagesize	m_stat.pagesize
85 #define	ms_sysmb	m_stat.sysmb
86 
87 typedef int32_t		cpuid_t;
88 
89 typedef struct {
90 	int	cpuid_max;	/* maximum cpuid value */
91 	int	ecache_curr;	/* cached during tree walk */
92 	int	*ecache_sizes;	/* indexed by cpuid */
93 } ecache_info_t;
94 
95 typedef struct {
96 	rcm_handle_t	*hdl;
97 	rcm_info_t	*offline_query_info;
98 	char		**rlist;
99 	int		nrlist;
100 	cpuid_t		*cpus;
101 	int		ncpus;
102 	int		ndevs;
103 	uint_t		query_pages;
104 	mem_stat_t	m_stat;
105 	ecache_info_t	ecache_info;
106 } rcmd_t;
107 
108 typedef struct {
109 	const char	*rsrc;
110 	const char	*info;
111 } usage_t;
112 
113 /* Lookup table entry for matching IO devices to RCM resource usage */
114 typedef struct {
115 	int		index;		/* index into the table array */
116 	di_node_t	node;		/* associated devinfo node */
117 	char		*name;		/* device full path name */
118 	int		n_usage;
119 	usage_t		*usage;
120 } lookup_entry_t;
121 
122 typedef struct {
123 	int		n_entries;
124 	int		n_slots;
125 	lookup_entry_t	*table;
126 } lookup_table_t;
127 
128 typedef struct {
129 	int			err;
130 	di_node_t		node;
131 	char			*pathbuf;
132 	lookup_table_t		*table;
133 	di_devlink_handle_t	linkhd;
134 } devinfo_arg_t;
135 
136 static int dyn_ap_ids(char *, cfga_list_data_t **, int *);
137 static int rcm_init(rcmd_t *, apd_t [], int, int);
138 static void rcm_fini(rcmd_t *);
139 static int rcm_query_init(rcmd_t *, apd_t [], int);
140 static int cap_request(ri_hdl_t *, rcmd_t *);
141 static int syscpus(cpuid_t **, int *);
142 static int cpu_cap_request(ri_hdl_t *, rcmd_t *);
143 static int mem_cap_request(ri_hdl_t *, rcmd_t *);
144 static int (*cm_rcm_qpass_func(cfga_type_t))(cfga_list_data_t *, rcmd_t *);
145 static int cpu_rcm_qpass(cfga_list_data_t *, rcmd_t *);
146 static int mem_rcm_qpass(cfga_list_data_t *, rcmd_t *);
147 static int io_rcm_qpass(cfga_list_data_t *, rcmd_t *);
148 static int (*cm_info_func(cfga_type_t))(ri_ap_t *, cfga_list_data_t *, int,
149     rcmd_t *);
150 static int cpu_cm_info(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *);
151 static int i_cpu_cm_info(processorid_t, int, int, ri_ap_t *, rcmd_t *);
152 static int mem_cm_info(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *);
153 static int io_cm_info(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *);
154 static int ident_leaf(di_node_t);
155 static int mk_drv_inst(di_node_t, char [], char *);
156 static int devinfo_node_walk(di_node_t, void *);
157 static int devinfo_minor_walk(di_node_t, di_minor_t, void *);
158 static int devinfo_devlink_walk(di_devlink_t, void *);
159 static int add_rcm_clients(ri_client_t **, rcmd_t *, rcm_info_t *, int, int *);
160 static int rcm_ignore(char *, char *);
161 static int add_query_state(rcmd_t *, ri_client_t *, const char *, const char *);
162 static int state2query(int);
163 static void dev_list_append(ri_dev_t **, ri_dev_t *);
164 static void dev_list_cpu_insert(ri_dev_t **, ri_dev_t *, processorid_t);
165 static rcm_info_tuple_t *tuple_lookup(rcmd_t *, const char *, const char *);
166 static ri_ap_t *ri_ap_alloc(char *, ri_hdl_t *);
167 static ri_dev_t *ri_dev_alloc(void);
168 static ri_dev_t *io_dev_alloc(char *);
169 static ri_client_t *ri_client_alloc(char *, char *);
170 static void apd_tbl_free(apd_t [], int);
171 static char *pstate2str(int);
172 static int ecache_info_init(ecache_info_t *);
173 static int find_cpu_nodes(di_node_t, void *);
174 static int prop_lookup_int(di_node_t, di_prom_handle_t, char *, int **);
175 static int add_lookup_entry(lookup_table_t *, const char *, di_node_t);
176 static int table_compare_names(const void *, const void *);
177 static int table_compare_indices(const void *, const void *);
178 static lookup_entry_t *lookup(lookup_table_t *table, const char *);
179 static int add_usage(lookup_entry_t *, const char *, rcm_info_tuple_t *);
180 static void empty_table(lookup_table_t *);
181 
182 #ifdef DEBUG
183 static void		dump_apd_tbl(FILE *, apd_t *, int);
184 #endif /* DEBUG */
185 
186 static struct {
187 	char	*type;
188 	int	(*cm_info)(ri_ap_t *, cfga_list_data_t *, int, rcmd_t *);
189 	int	(*cm_rcm_qpass)(cfga_list_data_t *, rcmd_t *);
190 } cm_ctl[] = {
191 	{SBD_CM_CPU,	cpu_cm_info,	cpu_rcm_qpass},
192 	{SBD_CM_MEM,	mem_cm_info,	mem_rcm_qpass},
193 	{SBD_CM_IO,	io_cm_info,	io_rcm_qpass}
194 };
195 
196 /*
197  * Table of known info string prefixes for RCM modules that do not
198  * represent actual resource usage, but instead provide name translations
199  * or sequencing within the RCM namespace. Since RCM provides no way to
200  * filter these out, we must maintain this hack.
201  */
202 static char *rcm_info_filter[] = {
203 	"Network interface",		/* Network naming module */
204 	NULL
205 };
206 
207 
208 /*
209  * Allocate snapshot handle.
210  */
211 int
212 ri_init(int n_apids, char **ap_ids, int flags, ri_hdl_t **hdlp)
213 {
214 	int			i, j;
215 	ri_hdl_t		*ri_hdl;
216 	ri_ap_t			*ap_hdl;
217 	rcmd_t			*rcm = NULL;
218 	cfga_list_data_t	*cfga_ldata;
219 	apd_t			*apd, *apd_tbl = NULL;
220 	int			(*cm_info)(ri_ap_t *, cfga_list_data_t *,
221 				    int, rcmd_t *);
222 	int			rv = RI_SUCCESS;
223 	int			cm_info_rv;
224 
225 	if (n_apids <= 0 || ap_ids == NULL || hdlp == NULL)
226 		return (RI_INVAL);
227 
228 	if (flags & ~RI_REQ_MASK)
229 		return (RI_NOTSUP);
230 
231 	*hdlp = NULL;
232 	if ((ri_hdl = calloc(1, sizeof (*ri_hdl))) == NULL ||
233 	    (rcm = calloc(1, sizeof (*rcm))) == NULL ||
234 	    (apd_tbl = calloc(n_apids, sizeof (*apd_tbl))) == NULL) {
235 		dprintf((stderr, "calloc: %s\n", strerror(errno)));
236 		rv = RI_FAILURE;
237 		goto out;
238 	}
239 
240 	/*
241 	 * Create mapping of boards to components.
242 	 */
243 	for (i = 0, apd = apd_tbl; i < n_apids; i++, apd++) {
244 		if (dyn_ap_ids(ap_ids[i], &apd->cfga_list_data,
245 		    &apd->nlist) == -1) {
246 			rv = RI_INVAL;
247 			goto out;
248 		}
249 	}
250 #ifdef DEBUG
251 	dump_apd_tbl(stderr, apd_tbl, n_apids);
252 #endif /* DEBUG */
253 
254 	if (rcm_init(rcm, apd_tbl, n_apids, flags) != 0) {
255 		rv = RI_FAILURE;
256 		goto out;
257 	}
258 
259 	/*
260 	 * Best effort attempt to read cpu ecache sizes from
261 	 * OBP/Solaris device trees. These are later looked up
262 	 * in i_cpu_cm_info().
263 	 */
264 	(void) ecache_info_init(&rcm->ecache_info);
265 
266 	for (i = 0, apd = apd_tbl; i < n_apids; i++, apd++) {
267 		if ((ap_hdl = ri_ap_alloc(ap_ids[i], ri_hdl)) == NULL) {
268 			rv = RI_FAILURE;
269 			goto out;
270 		}
271 
272 		/*
273 		 * Add component info based on occupant type. Note all
274 		 * passes through the apd table skip over the first
275 		 * cfgadm_list_data entry, which is the static system board
276 		 * attachment point.
277 		 */
278 		for (j = 1, cfga_ldata = &apd->cfga_list_data[1];
279 		    j < apd->nlist; j++, cfga_ldata++) {
280 			if (cfga_ldata->ap_o_state != CFGA_STAT_CONFIGURED) {
281 				continue;
282 			}
283 
284 			if ((cm_info =
285 			    cm_info_func(cfga_ldata->ap_type)) != NULL) {
286 				cm_info_rv =
287 				    (*cm_info)(ap_hdl, cfga_ldata, flags, rcm);
288 				if (cm_info_rv != 0) {
289 					/*
290 					 * If we cannot obtain info for the ap,
291 					 * skip it and do not fail the entire
292 					 * operation.  This case occurs when the
293 					 * driver for a device is not attached:
294 					 * di_init() returns failed back to
295 					 * io_cm_info().
296 					 */
297 					if (cm_info_rv == RI_NODE_NIL)
298 						continue;
299 					else {
300 						rv = RI_FAILURE;
301 						goto out;
302 					}
303 				}
304 			}
305 		}
306 	}
307 
308 	if ((flags & RI_INCLUDE_QUERY) && cap_request(ri_hdl, rcm) != 0)
309 		rv = RI_FAILURE;
310 
311 out:
312 	if (apd_tbl != NULL)
313 		apd_tbl_free(apd_tbl, n_apids);
314 	if (rcm != NULL)
315 		rcm_fini(rcm);
316 
317 	if (rv == RI_SUCCESS)
318 		*hdlp = ri_hdl;
319 	else
320 		ri_fini(ri_hdl);
321 
322 	return (rv);
323 }
324 
325 /*
326  * Map static board attachment point to dynamic attachment points (components).
327  */
328 static int
329 dyn_ap_ids(char *ap_id, cfga_list_data_t **ap_id_list, int *nlist)
330 {
331 	cfga_err_t	cfga_err;
332 	char		*errstr;
333 	char		*opts = "parsable";
334 	char		*listops = "class=sbd";
335 
336 	cfga_err = config_list_ext(1, &ap_id, ap_id_list, nlist,
337 	    opts, listops, &errstr, CFGA_FLAG_LIST_ALL);
338 	if (cfga_err != CFGA_OK) {
339 		dprintf((stderr, "config_list_ext: %s\n",
340 		    config_strerror(cfga_err)));
341 		return (-1);
342 	}
343 
344 	return (0);
345 }
346 
347 /*
348  * Initialize rcm handle, memory stats. Cache query result if necessary.
349  */
350 static int
351 rcm_init(rcmd_t *rcm, apd_t apd_tbl[], int napds, int flags)
352 {
353 	longlong_t	ii;
354 	int		rv = 0;
355 
356 	rcm->offline_query_info = NULL;
357 	rcm->rlist = NULL;
358 	rcm->cpus = NULL;
359 
360 	if (rcm_alloc_handle(NULL, RCM_NOPID, NULL, &rcm->hdl) != RCM_SUCCESS) {
361 		dprintf((stderr, "rcm_alloc_handle (errno=%d)\n", errno));
362 		return (-1);
363 	}
364 
365 	if ((rcm->ms_pagesize = sysconf(_SC_PAGE_SIZE)) == -1 ||
366 	    (rcm->ms_syspages = sysconf(_SC_PHYS_PAGES)) == -1) {
367 		dprintf((stderr, "sysconf: %s\n", strerror(errno)));
368 		return (-1);
369 	}
370 	ii = (longlong_t)rcm->ms_pagesize * rcm->ms_syspages;
371 	rcm->ms_sysmb = (int)((ii+MBYTE-1) / MBYTE);
372 
373 	if (flags & RI_INCLUDE_QUERY)
374 		rv = rcm_query_init(rcm, apd_tbl, napds);
375 
376 	return (rv);
377 }
378 
379 static void
380 rcm_fini(rcmd_t *rcm)
381 {
382 	char	**cpp;
383 
384 	assert(rcm != NULL);
385 
386 	if (rcm->offline_query_info != NULL)
387 		rcm_free_info(rcm->offline_query_info);
388 	if (rcm->hdl != NULL)
389 		rcm_free_handle(rcm->hdl);
390 
391 	if (rcm->rlist != NULL) {
392 		for (cpp = rcm->rlist; *cpp != NULL; cpp++)
393 			s_free(*cpp);
394 		free(rcm->rlist);
395 	}
396 
397 	s_free(rcm->cpus);
398 	free(rcm);
399 }
400 
401 #define	NODENAME_CMP		"cmp"
402 #define	NODENAME_SSM		"ssm"
403 #define	PROP_CPUID		"cpuid"
404 #define	PROP_DEVICE_TYPE	"device-type"
405 #define	PROP_ECACHE_SIZE	"ecache-size"
406 #define	PROP_L2_CACHE_SIZE	"l2-cache-size"
407 #define	PROP_L3_CACHE_SIZE	"l3-cache-size"
408 
409 typedef struct {
410 	di_node_t		root;
411 	di_prom_handle_t	ph;
412 	ecache_info_t		*ecache_info;
413 } di_arg_t;
414 
415 /*
416  * The ecache sizes for individual cpus are read from the
417  * OBP/Solaris device trees. This info cannot be derived
418  * from the cfgadm_sbd cpu attachment point ecache info,
419  * which may be a sum of multiple cores for CMP.
420  */
421 static int
422 ecache_info_init(ecache_info_t *ec)
423 {
424 	di_arg_t	di_arg;
425 	di_prom_handle_t ph = DI_PROM_HANDLE_NIL;
426 	di_node_t	root = DI_NODE_NIL;
427 	int		cpuid_max, rv = 0;
428 
429 	assert(ec != NULL && ec->cpuid_max == 0 && ec->ecache_sizes == NULL);
430 
431 	if ((cpuid_max = sysconf(_SC_CPUID_MAX)) == -1) {
432 		dprintf((stderr, "sysconf fail: %s\n", strerror(errno)));
433 		rv = -1;
434 		goto done;
435 	}
436 
437 	if ((root = di_init("/", DINFOCPYALL)) == DI_NODE_NIL) {
438 		dprintf((stderr, "di_init fail: %s\n", strerror(errno)));
439 		rv = -1;
440 		goto done;
441 	}
442 
443 	if ((ph = di_prom_init()) == DI_PROM_HANDLE_NIL) {
444 		dprintf((stderr, "di_prom_init fail: %s\n", strerror(errno)));
445 		rv = -1;
446 		goto done;
447 	}
448 
449 	if ((ec->ecache_sizes = calloc(cpuid_max + 1, sizeof (int))) == NULL) {
450 		dprintf((stderr, "calloc fail: %s\n", strerror(errno)));
451 		rv = -1;
452 		goto done;
453 	}
454 	ec->cpuid_max = cpuid_max;
455 
456 	dprintf((stderr, "cpuid_max is set to %d\n", ec->cpuid_max));
457 
458 	di_arg.ph = ph;
459 	di_arg.root = root;
460 	di_arg.ecache_info = ec;
461 
462 	if (di_walk_node(root, DI_WALK_CLDFIRST, (void *)&di_arg,
463 	    find_cpu_nodes) != 0) {
464 		dprintf((stderr, "di_walk_node fail: %s\n", strerror(errno)));
465 		rv = -1;
466 	}
467 
468 done:
469 	if (root != DI_NODE_NIL)
470 		di_fini(root);
471 	if (ph != DI_PROM_HANDLE_NIL)
472 		di_prom_fini(ph);
473 
474 	return (rv);
475 }
476 
477 /*
478  * Libdevinfo node walk callback for reading ecache size
479  * properties for cpu device nodes. Subtrees not containing
480  * cpu nodes are filtered out.
481  */
482 static int
483 find_cpu_nodes(di_node_t node, void *arg)
484 {
485 	char			*name;
486 	int			*cpuid, *ecache;
487 	di_arg_t		*di_arg = (di_arg_t *)arg;
488 	ecache_info_t		*ec = di_arg->ecache_info;
489 	di_prom_handle_t	ph = di_arg->ph;
490 	int			walk_child = 0;
491 
492 	if (node == DI_NODE_NIL) {
493 		return (DI_WALK_TERMINATE);
494 	}
495 
496 	if (node == di_arg->root) {
497 		return (DI_WALK_CONTINUE);
498 	}
499 
500 	if (di_nodeid(node) == DI_PSEUDO_NODEID) {
501 		return (DI_WALK_PRUNECHILD);
502 	}
503 
504 	name = di_node_name(node);
505 	if (name != NULL) {
506 		/*
507 		 * CMP nodes will be the parent of cpu nodes. On some platforms,
508 		 * cpu nodes will be under the ssm node. In either case,
509 		 * continue searching this subtree.
510 		 */
511 		if (strncmp(name, NODENAME_SSM, strlen(NODENAME_SSM)) == 0 ||
512 		    strncmp(name, NODENAME_CMP, strlen(NODENAME_CMP)) == 0) {
513 			return (DI_WALK_CONTINUE);
514 		}
515 	}
516 
517 	dprintf((stderr, "find_cpu_nodes: node=%p, name=%s, binding_name=%s\n",
518 	    node, di_node_name(node), di_binding_name(node)));
519 
520 	/*
521 	 * Ecache size property name differs with processor implementation.
522 	 * Panther has both L2 and L3, so check for L3 first to differentiate
523 	 * from Jaguar, which has only L2.
524 	 */
525 	if (prop_lookup_int(node, ph, PROP_ECACHE_SIZE, &ecache) == 0 ||
526 	    prop_lookup_int(node, ph, PROP_L3_CACHE_SIZE, &ecache) == 0 ||
527 	    prop_lookup_int(node, ph, PROP_L2_CACHE_SIZE, &ecache) == 0) {
528 		/*
529 		 * On some platforms the cache property is in the core
530 		 * node while the cpuid is in the child cpu node.  It may
531 		 * be needed while processing this node or a child node.
532 		 */
533 		ec->ecache_curr = *ecache;
534 		walk_child = 1;
535 	}
536 
537 	if (prop_lookup_int(node, ph, PROP_CPUID, &cpuid) == 0) {
538 
539 		assert(ec != NULL && ec->ecache_sizes != NULL &&
540 		    *cpuid <= ec->cpuid_max);
541 
542 		if (ec->ecache_curr != 0) {
543 			ec->ecache_sizes[*cpuid] = ec->ecache_curr;
544 
545 		}
546 	}
547 
548 	return (walk_child ? DI_WALK_CONTINUE : DI_WALK_PRUNECHILD);
549 }
550 
551 /*
552  * Given a di_node_t, call the appropriate int property lookup routine.
553  * Note: This lookup fails if the int property has multiple value entries.
554  */
555 static int
556 prop_lookup_int(di_node_t node, di_prom_handle_t ph, char *propname, int **ival)
557 {
558 	int rv;
559 
560 	rv = (di_nodeid(node) == DI_PROM_NODEID) ?
561 	    di_prom_prop_lookup_ints(ph, node, propname, ival) :
562 	    di_prop_lookup_ints(DDI_DEV_T_ANY, node, propname, ival);
563 
564 	return (rv == 1 ? 0 : -1);
565 }
566 
567 /*
568  * For offline queries, RCM must be given a list of all resources
569  * so modules can have access to the full scope of the operation.
570  * The rcm_get_info calls are made individually in order to map the
571  * returned rcm_info_t's to physical devices. The rcm_request_offline
572  * result is cached so the query state can be looked up as we process
573  * the rcm_get_info calls. This routine also tallies up the amount of
574  * memory going away and creates a list of cpu ids to be used
575  * later for rcm_request_capacity_change.
576  */
577 static int
578 rcm_query_init(rcmd_t *rcm, apd_t apd_tbl[], int napds)
579 {
580 	apd_t			*apd;
581 	int 			i, j;
582 	cfga_list_data_t	*cfga_ldata;
583 	int			(*cm_rcm_qpass)(cfga_list_data_t *, rcmd_t *);
584 #ifdef DEBUG
585 	char			**cpp;
586 #endif /* DEBUG */
587 
588 	/*
589 	 * Initial pass to size cpu and resource name arrays needed to
590 	 * interface with RCM. Attachment point ids for CMP can represent
591 	 * multiple cpus (and resource names). Instead of parsing the
592 	 * cfgadm info field here, use the worse case that all component
593 	 * attachment points are CMP.
594 	 */
595 	rcm->ndevs = 0;
596 	for (i = 0, apd = apd_tbl; i < napds; i++, apd++) {
597 		for (j = 1, cfga_ldata = &apd->cfga_list_data[1];
598 		    j < apd->nlist; j++, cfga_ldata++) {
599 			if (cfga_ldata->ap_o_state != CFGA_STAT_CONFIGURED) {
600 				continue;
601 			}
602 			rcm->ndevs += SBD_MAX_CORES_PER_CMP;
603 		}
604 	}
605 
606 	/* account for trailing NULL in rlist */
607 	if (rcm->ndevs > 0 &&
608 	    ((rcm->cpus = calloc(rcm->ndevs, sizeof (cpuid_t))) == NULL ||
609 	    (rcm->rlist = calloc(rcm->ndevs + 1, sizeof (char *))) == NULL)) {
610 		dprintf((stderr, "calloc: %s\n", strerror(errno)));
611 		return (-1);
612 	}
613 
614 	/*
615 	 * Second pass to fill in the RCM resource and cpu lists.
616 	 */
617 	for (i = 0, apd = apd_tbl; i < napds; i++, apd++) {
618 		for (j = 1, cfga_ldata = &apd->cfga_list_data[1];
619 		    j < apd->nlist; j++, cfga_ldata++) {
620 			if (cfga_ldata->ap_o_state != CFGA_STAT_CONFIGURED) {
621 				continue;
622 			}
623 			if ((cm_rcm_qpass =
624 			    cm_rcm_qpass_func(cfga_ldata->ap_type)) != NULL &&
625 			    (*cm_rcm_qpass)(cfga_ldata, rcm) != 0) {
626 				return (-1);
627 			}
628 		}
629 	}
630 
631 	if (rcm->nrlist == 0)
632 		return (0);
633 
634 	/*
635 	 * Cache query result. Since we are only interested in the
636 	 * set of RCM clients processed and not their request status,
637 	 * the return value is irrelevant.
638 	 */
639 	(void) rcm_request_offline_list(rcm->hdl, rcm->rlist,
640 	    RCM_QUERY|RCM_SCOPE, &rcm->offline_query_info);
641 
642 #ifdef DEBUG
643 	dprintf((stderr, "RCM rlist: nrlist=%d\n", rcm->nrlist));
644 	for (cpp = rcm->rlist, i = 0; *cpp != NULL; cpp++, i++) {
645 		dprintf((stderr, "rlist[%d]=%s\n", i, *cpp));
646 	}
647 #endif /* DEBUG */
648 
649 	return (0);
650 }
651 
652 static int
653 cap_request(ri_hdl_t *ri_hdl, rcmd_t *rcm)
654 {
655 	return (((rcm->ncpus > 0 && cpu_cap_request(ri_hdl, rcm) != 0) ||
656 	    (rcm->query_pages > 0 && mem_cap_request(ri_hdl, rcm) != 0)) ?
657 	    -1 : 0);
658 }
659 
660 /*
661  * RCM capacity change request for cpus.
662  */
663 static int
664 cpu_cap_request(ri_hdl_t *ri_hdl, rcmd_t *rcm)
665 {
666 	cpuid_t		*syscpuids, *newcpuids;
667 	int		sysncpus, newncpus;
668 	rcm_info_t	*rcm_info = NULL;
669 	int		i, j, k;
670 	nvlist_t	*nvl;
671 	int		rv = 0;
672 
673 	/* get all cpus in the system */
674 	if (syscpus(&syscpuids, &sysncpus) == -1)
675 		return (-1);
676 
677 	newncpus = sysncpus - rcm->ncpus;
678 	if ((newcpuids = calloc(newncpus, sizeof (cpuid_t))) == NULL) {
679 		dprintf((stderr, "calloc: %s", strerror(errno)));
680 		rv = -1;
681 		goto out;
682 	}
683 
684 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
685 		dprintf((stderr, "nvlist_alloc fail\n"));
686 		rv = -1;
687 		goto out;
688 	}
689 
690 	/*
691 	 * Construct the new cpu list.
692 	 */
693 	for (i = 0, j = 0; i < sysncpus; i++) {
694 		for (k = 0; k < rcm->ncpus; k++) {
695 			if (rcm->cpus[k] == syscpuids[i]) {
696 				break;
697 			}
698 		}
699 		if (k == rcm->ncpus) {
700 			newcpuids[j++] = syscpuids[i];
701 		}
702 	}
703 
704 	if (nvlist_add_int32(nvl, "old_total", sysncpus) != 0 ||
705 	    nvlist_add_int32(nvl, "new_total", newncpus) != 0 ||
706 	    nvlist_add_int32_array(nvl, "old_cpu_list", syscpuids,
707 	    sysncpus) != 0 ||
708 	    nvlist_add_int32_array(nvl, "new_cpu_list", newcpuids,
709 	    newncpus) != 0) {
710 		dprintf((stderr, "nvlist_add fail\n"));
711 		rv = -1;
712 		goto out;
713 	}
714 
715 #ifdef DEBUG
716 	dprintf((stderr, "old_total=%d\n", sysncpus));
717 	for (i = 0; i < sysncpus; i++) {
718 		dprintf((stderr, "old_cpu_list[%d]=%d\n", i, syscpuids[i]));
719 	}
720 	dprintf((stderr, "new_total=%d\n", newncpus));
721 	for (i = 0; i < newncpus; i++) {
722 		dprintf((stderr, "new_cpu_list[%d]=%d\n", i, newcpuids[i]));
723 	}
724 #endif /* DEBUG */
725 
726 	(void) rcm_request_capacity_change(rcm->hdl, RCM_CPU_ALL,
727 	    RCM_QUERY|RCM_SCOPE, nvl, &rcm_info);
728 
729 	rv = add_rcm_clients(&ri_hdl->cpu_cap_clients, rcm, rcm_info, 0, NULL);
730 
731 out:
732 	s_free(syscpuids);
733 	s_free(newcpuids);
734 	if (nvl != NULL)
735 		nvlist_free(nvl);
736 	if (rcm_info != NULL)
737 		rcm_free_info(rcm_info);
738 
739 	return (rv);
740 }
741 
742 static int
743 syscpus(cpuid_t **cpuids, int *ncpus)
744 {
745 	kstat_t		*ksp;
746 	kstat_ctl_t	*kc;
747 	cpuid_t		*cp;
748 	int		i;
749 
750 	if ((*ncpus = sysconf(_SC_NPROCESSORS_CONF)) == -1) {
751 		dprintf((stderr, "sysconf: %s\n", errno));
752 		return (-1);
753 	}
754 
755 	if ((kc = kstat_open()) == NULL) {
756 		dprintf((stderr, "kstat_open fail\n"));
757 		return (-1);
758 	}
759 
760 	if ((cp = calloc(*ncpus, sizeof (cpuid_t))) == NULL) {
761 		dprintf((stderr, "calloc: %s\n", errno));
762 		(void) kstat_close(kc);
763 		return (-1);
764 	}
765 
766 	for (i = 0, ksp = kc->kc_chain; ksp != NULL; ksp = ksp->ks_next) {
767 		if (strcmp(ksp->ks_module, "cpu_info") == 0) {
768 			cp[i++] = ksp->ks_instance;
769 		}
770 	}
771 
772 	(void) kstat_close(kc);
773 	*cpuids = cp;
774 
775 	return (0);
776 }
777 
778 /*
779  * RCM capacity change request for memory.
780  */
781 static int
782 mem_cap_request(ri_hdl_t *ri_hdl, rcmd_t *rcm)
783 {
784 	nvlist_t	*nvl;
785 	rcm_info_t	*rcm_info = NULL;
786 	long 		newpages;
787 	int		rv = 0;
788 
789 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
790 		dprintf((stderr, "nvlist_alloc fail\n"));
791 		return (-1);
792 	}
793 
794 	newpages = rcm->ms_syspages - rcm->query_pages;
795 	if (nvlist_add_int32(nvl, "page_size", rcm->ms_pagesize) != 0 ||
796 	    nvlist_add_int32(nvl, "old_pages", rcm->ms_syspages) != 0 ||
797 	    nvlist_add_int32(nvl, "new_pages", newpages) != 0) {
798 		dprintf((stderr, "nvlist_add fail\n"));
799 		nvlist_free(nvl);
800 		return (-1);
801 	}
802 
803 	dprintf((stderr, "memory capacity change req: "
804 	    "page_size=%d, old_pages=%d, new_pages=%d\n",
805 	    rcm->ms_pagesize, rcm->ms_syspages, newpages));
806 
807 	(void) rcm_request_capacity_change(rcm->hdl, RCM_MEM_ALL,
808 	    RCM_QUERY|RCM_SCOPE, nvl, &rcm_info);
809 
810 	rv = add_rcm_clients(&ri_hdl->mem_cap_clients, rcm, rcm_info, 0, NULL);
811 
812 	nvlist_free(nvl);
813 	if (rcm_info != NULL)
814 		rcm_free_info(rcm_info);
815 
816 	return (rv);
817 }
818 
819 static int
820 (*cm_rcm_qpass_func(cfga_type_t ap_type))(cfga_list_data_t *, rcmd_t *)
821 {
822 	int i;
823 
824 	for (i = 0; i < sizeof (cm_ctl) / sizeof (cm_ctl[0]); i++) {
825 		if (strcmp(cm_ctl[i].type, ap_type) == 0) {
826 			return (cm_ctl[i].cm_rcm_qpass);
827 		}
828 	}
829 	return (NULL);
830 }
831 
832 /*
833  * Save cpu ids and RCM abstract resource names.
834  * Cpu ids will be used for the capacity change request.
835  * Resource names will be used for the offline query.
836  */
837 static int
838 cpu_rcm_qpass(cfga_list_data_t *cfga_ldata, rcmd_t *rcm)
839 {
840 	processorid_t	cpuid;
841 	char		*cpustr, *lasts, *rsrcname, rbuf[32];
842 	char		cbuf[CFGA_INFO_LEN];
843 	int		speed, ecache;
844 
845 	assert(sscanf(cfga_ldata->ap_info, CPU_INFO_FMT, &cbuf, &speed,
846 	    &ecache) == 3);
847 
848 	for (cpustr = (char *)strtok_r(cbuf, CPUID_SEP, &lasts);
849 	    cpustr != NULL;
850 	    cpustr = (char *)strtok_r(NULL, CPUID_SEP, &lasts)) {
851 		cpuid = atoi(cpustr);
852 
853 		(void) snprintf(rbuf, sizeof (rbuf), "%s%d", RCM_CPU, cpuid);
854 		if ((rsrcname = strdup(rbuf)) == NULL) {
855 			dprintf((stderr, "strdup fail\n"));
856 			return (-1);
857 		}
858 		assert(rcm->nrlist < rcm->ndevs && rcm->ncpus < rcm->ndevs);
859 		rcm->rlist[rcm->nrlist++] = rsrcname;
860 		rcm->cpus[rcm->ncpus++] = (cpuid_t)cpuid;
861 
862 		dprintf((stderr, "cpu_cm_info: cpuid=%d, rsrcname=%s",
863 		    cpuid, rsrcname));
864 	}
865 
866 	return (0);
867 }
868 
869 /*
870  * No RCM resource names for individual memory units, so
871  * just add to offline query page count.
872  */
873 static int
874 mem_rcm_qpass(cfga_list_data_t *cfga, rcmd_t *rcm)
875 {
876 	char		*cp;
877 	uint_t		kbytes;
878 	longlong_t	ii;
879 
880 	if ((cp = strstr(cfga->ap_info, "size")) == NULL ||
881 	    sscanf(cp, "size=%u", &kbytes) != 1) {
882 		dprintf((stderr, "unknown sbd info format: %s\n", cp));
883 		return (-1);
884 	}
885 
886 	ii = (longlong_t)kbytes * KBYTE;
887 	rcm->query_pages += (uint_t)(ii / rcm->ms_pagesize);
888 
889 	dprintf((stderr, "%s: npages=%u\n", cfga->ap_log_id,
890 	    (uint_t)(ii / rcm->ms_pagesize)));
891 
892 	return (0);
893 }
894 
895 /*
896  * Add physical I/O bus name to RCM resource list.
897  */
898 static int
899 io_rcm_qpass(cfga_list_data_t *cfga, rcmd_t *rcm)
900 {
901 	char		path[MAXPATHLEN];
902 	char		buf[MAXPATHLEN];
903 	char		*rsrcname;
904 
905 	if (sscanf(cfga->ap_info, "device=%s", path) != 1) {
906 		dprintf((stderr, "unknown sbd info format: %s\n",
907 		    cfga->ap_info));
908 		return (-1);
909 	}
910 
911 	(void) snprintf(buf, sizeof (buf), "/devices%s", path);
912 	if ((rsrcname = strdup(buf)) == NULL) {
913 		dprintf((stderr, "strdup fail\n"));
914 		return (-1);
915 	}
916 
917 	assert(rcm->nrlist < rcm->ndevs);
918 	rcm->rlist[rcm->nrlist++] = rsrcname;
919 
920 	return (0);
921 }
922 
923 static int
924 (*cm_info_func(cfga_type_t ap_type))(ri_ap_t *, cfga_list_data_t *,
925     int, rcmd_t *)
926 {
927 	int i;
928 
929 	for (i = 0; i < sizeof (cm_ctl) / sizeof (cm_ctl[0]); i++) {
930 		if (strcmp(cm_ctl[i].type, ap_type) == 0) {
931 			return (cm_ctl[i].cm_info);
932 		}
933 	}
934 	return (NULL);
935 }
936 
937 /*
938  * Create cpu handle, adding properties exported by sbd plugin and
939  * RCM client usage.
940  */
941 /* ARGSUSED */
942 static int
943 cpu_cm_info(ri_ap_t *ap, cfga_list_data_t *cfga, int flags, rcmd_t *rcm)
944 {
945 	processorid_t	cpuid;
946 	int		speed, ecache, rv = 0;
947 	char		buf[CFGA_INFO_LEN], *cpustr, *lasts;
948 
949 	if (sscanf(cfga->ap_info, CPU_INFO_FMT, &buf, &speed, &ecache) != 3) {
950 		dprintf((stderr, "unknown sbd info format: %s\n",
951 		    cfga->ap_info));
952 		return (-1);
953 	}
954 
955 	/* parse cpuids */
956 	for (cpustr = (char *)strtok_r(buf, CPUID_SEP, &lasts);
957 	    cpustr != NULL;
958 	    cpustr = (char *)strtok_r(NULL, CPUID_SEP, &lasts)) {
959 		cpuid = atoi(cpustr);
960 		if ((rv = i_cpu_cm_info(cpuid, speed, ecache, ap, rcm)) != 0) {
961 			break;
962 		}
963 	}
964 
965 	return (rv);
966 }
967 
968 static int
969 i_cpu_cm_info(processorid_t cpuid, int speed, int ecache_cfga, ri_ap_t *ap,
970     rcmd_t *rcm)
971 {
972 	int		ecache_mb = 0;
973 	int		ecache_kb = 0;
974 	char		*state, buf[32];
975 	processor_info_t cpu_info;
976 	ri_dev_t	*cpu = NULL;
977 	rcm_info_t	*rcm_info = NULL;
978 
979 	/*
980 	 * Could have been unconfigured in the interim, so cannot
981 	 * count on processor_info recognizing it.
982 	 */
983 	state = (processor_info(cpuid, &cpu_info) == 0) ?
984 	    pstate2str(cpu_info.pi_state) : "unknown";
985 
986 	if ((cpu = ri_dev_alloc()) == NULL) {
987 		dprintf((stderr, "ri_dev_alloc failed\n"));
988 		return (-1);
989 	}
990 
991 	/*
992 	 * Assume the ecache_info table has the right e-cache size for
993 	 * this CPU.  Use the value found in cfgadm (ecache_cfga) if not.
994 	 */
995 	if (rcm->ecache_info.ecache_sizes != NULL) {
996 		assert(rcm->ecache_info.cpuid_max != 0 &&
997 		    cpuid <= rcm->ecache_info.cpuid_max);
998 		ecache_mb = rcm->ecache_info.ecache_sizes[cpuid] / MBYTE;
999 		ecache_kb = rcm->ecache_info.ecache_sizes[cpuid] / KBYTE;
1000 	}
1001 
1002 	if (ecache_mb == 0) {
1003 		ecache_mb = ecache_cfga;
1004 	}
1005 
1006 	dprintf((stderr, "i_cpu_cm_info: cpu(%d) ecache=%d MB\n",
1007 	    cpuid, ecache));
1008 
1009 	if (nvlist_add_int32(cpu->conf_props, RI_CPU_ID, cpuid) != 0 ||
1010 	    nvlist_add_int32(cpu->conf_props, RI_CPU_SPEED, speed) != 0 ||
1011 	    nvlist_add_int32(cpu->conf_props, RI_CPU_ECACHE, ecache_mb) != 0 ||
1012 	    nvlist_add_string(cpu->conf_props, RI_CPU_STATE, state) != 0) {
1013 		dprintf((stderr, "nvlist_add fail\n"));
1014 		ri_dev_free(cpu);
1015 		return (-1);
1016 	}
1017 
1018 	/*
1019 	 * Report cache size in kilobyte units if available.  This info is
1020 	 * added to support processors with cache sizes that are non-integer
1021 	 * megabyte multiples.
1022 	 */
1023 	if (ecache_kb != 0) {
1024 		if (nvlist_add_int32(cpu->conf_props, RI_CPU_ECACHE_KBYTE,
1025 		    ecache_kb) != 0)  {
1026 			dprintf((stderr, "nvlist_add fail: %s\n",
1027 			    RI_CPU_ECACHE_KBYTE));
1028 			ri_dev_free(cpu);
1029 			return (-1);
1030 		}
1031 	}
1032 
1033 	(void) snprintf(buf, sizeof (buf), "%s%d", RCM_CPU, cpuid);
1034 	dprintf((stderr, "rcm_get_info(%s)\n", buf));
1035 	if (rcm_get_info(rcm->hdl, buf, RCM_INCLUDE_DEPENDENT,
1036 	    &rcm_info) != RCM_SUCCESS) {
1037 		dprintf((stderr, "rcm_get_info (errno=%d)\n", errno));
1038 		ri_dev_free(cpu);
1039 		if (rcm_info != NULL)
1040 			rcm_free_info(rcm_info);
1041 		return (-1);
1042 	}
1043 
1044 	dev_list_cpu_insert(&ap->cpus, cpu, cpuid);
1045 
1046 	return (0);
1047 }
1048 
1049 /*
1050  * Create memory handle, adding properties exported by sbd plugin.
1051  * No RCM tuples to be saved unless RCM is modified to export names
1052  * for individual memory units.
1053  */
1054 /* ARGSUSED */
1055 static int
1056 mem_cm_info(ri_ap_t *ap, cfga_list_data_t *cfga, int flags, rcmd_t *rcm)
1057 {
1058 	ri_dev_t	*mem;
1059 	char		*cp;
1060 	char		*cpval;
1061 	int		len;
1062 	uint64_t	base_addr;				/* required */
1063 	int32_t		size_kb;				/* required */
1064 	int32_t		perm_kb = 0;				/* optional */
1065 	char		target[CFGA_AP_LOG_ID_LEN] = "";	/* optional */
1066 	int32_t		del_kb = 0;				/* optional */
1067 	int32_t		rem_kb = 0;				/* optional */
1068 	char		source[CFGA_AP_LOG_ID_LEN] = "";	/* optional */
1069 
1070 	if (sscanf(cfga->ap_info, "address=0x%llx size=%u", &base_addr,
1071 	    &size_kb) != 2) {
1072 		goto err_fmt;
1073 	}
1074 
1075 	if ((cp = strstr(cfga->ap_info, "permanent")) != NULL &&
1076 	    sscanf(cp, "permanent=%u", &perm_kb) != 1) {
1077 		goto err_fmt;
1078 	}
1079 
1080 	if ((cp = strstr(cfga->ap_info, "target")) != NULL) {
1081 		if ((cpval = strstr(cp, "=")) == NULL) {
1082 			goto err_fmt;
1083 		}
1084 		for (len = 0; cpval[len] != '\0' && cpval[len] != ' '; len++) {
1085 			if (len >= CFGA_AP_LOG_ID_LEN) {
1086 				goto err_fmt;
1087 			}
1088 		}
1089 		if (sscanf(cp, "target=%s deleted=%u remaining=%u", &target,
1090 		    &del_kb, &rem_kb) != 3) {
1091 			goto err_fmt;
1092 		}
1093 	}
1094 
1095 	if ((cp = strstr(cfga->ap_info, "source")) != NULL) {
1096 		if ((cpval = strstr(cp, "=")) == NULL) {
1097 			goto err_fmt;
1098 		}
1099 		for (len = 0; cpval[len] != '\0' && cpval[len] != ' '; len++) {
1100 			if (len >= CFGA_AP_LOG_ID_LEN) {
1101 				goto err_fmt;
1102 			}
1103 		}
1104 		if (sscanf(cp, "source=%s", &source) != 1) {
1105 			goto err_fmt;
1106 		}
1107 	}
1108 
1109 	dprintf((stderr, "%s: base=0x%llx, size=%u, permanent=%u\n",
1110 	    cfga->ap_log_id, base_addr, size_kb, perm_kb));
1111 
1112 	if ((mem = ri_dev_alloc()) == NULL)
1113 		return (-1);
1114 
1115 	/*
1116 	 * Convert memory sizes to MB (truncate).
1117 	 */
1118 	if (nvlist_add_uint64(mem->conf_props, RI_MEM_ADDR, base_addr) != 0 ||
1119 	    nvlist_add_int32(mem->conf_props, RI_MEM_BRD, size_kb/KBYTE) != 0 ||
1120 	    nvlist_add_int32(mem->conf_props, RI_MEM_PERM,
1121 	    perm_kb/KBYTE) != 0) {
1122 		dprintf((stderr, "nvlist_add failure\n"));
1123 		ri_dev_free(mem);
1124 		return (-1);
1125 	}
1126 
1127 	if (target[0] != '\0' &&
1128 	    (nvlist_add_string(mem->conf_props, RI_MEM_TARG, target) != 0 ||
1129 	    nvlist_add_int32(mem->conf_props, RI_MEM_DEL, del_kb/KBYTE) != 0 ||
1130 	    nvlist_add_int32(mem->conf_props, RI_MEM_REMAIN,
1131 	    rem_kb/KBYTE) != 0)) {
1132 		dprintf((stderr, "nvlist_add failure\n"));
1133 		ri_dev_free(mem);
1134 		return (-1);
1135 	}
1136 
1137 	if (source[0] != '\0' &&
1138 	    nvlist_add_string(mem->conf_props, RI_MEM_SRC, source) != 0) {
1139 		dprintf((stderr, "nvlist_add failure\n"));
1140 		ri_dev_free(mem);
1141 		return (-1);
1142 	}
1143 
1144 	/*
1145 	 * XXX - move this property to attachment point hdl?
1146 	 */
1147 	if (nvlist_add_int32(mem->conf_props, RI_MEM_DOMAIN,
1148 	    rcm->ms_sysmb) != 0) {
1149 		dprintf((stderr, "nvlist_add failure\n"));
1150 		ri_dev_free(mem);
1151 		return (-1);
1152 	}
1153 
1154 	dev_list_append(&ap->mems, mem);
1155 	return (0);
1156 
1157 err_fmt:
1158 	dprintf((stderr, "unknown sbd info format: %s\n", cfga->ap_info));
1159 	return (-1);
1160 }
1161 
1162 /*
1163  * Initiate a libdevinfo walk on the IO bus path.
1164  * XXX - investigate performance using two threads here: one thread to do the
1165  * libdevinfo snapshot and treewalk; and one thread to get RCM usage info
1166  */
1167 static int
1168 io_cm_info(ri_ap_t *ap, cfga_list_data_t *cfga, int flags, rcmd_t *rcm)
1169 {
1170 	int			i;
1171 	int			j;
1172 	int			k;
1173 	int			set_size;
1174 	int			retval = 0;
1175 	int			n_usage;
1176 	devinfo_arg_t		di_arg;
1177 	lookup_table_t		devicetable;
1178 	lookup_entry_t		*deventry;
1179 	lookup_entry_t		*lastdeventry;
1180 	ri_dev_t		*io = NULL;
1181 	ri_client_t		*client;
1182 	ri_client_t		*tmp;
1183 	di_devlink_handle_t	linkhd = NULL;
1184 	di_node_t		root = DI_NODE_NIL;
1185 	di_node_t		node = DI_NODE_NIL;
1186 	rcm_info_tuple_t	*rcm_tuple;
1187 	rcm_info_t		*rcm_info = NULL;
1188 	const char		*rcm_rsrc = NULL;
1189 	char			drv_inst[MAXPATHLEN];
1190 	char			path[MAXPATHLEN];
1191 	char			pathbuf[MAXPATHLEN];
1192 
1193 	dprintf((stderr, "io_cm_info(%s)\n", cfga->ap_log_id));
1194 
1195 	/* Extract devfs path from cfgadm information */
1196 	if (sscanf(cfga->ap_info, "device=%s\n", path) != 1) {
1197 		dprintf((stderr, "unknown sbd info format: %s\n",
1198 		    cfga->ap_info));
1199 		return (-1);
1200 	}
1201 
1202 	/* Initialize empty device lookup table */
1203 	devicetable.n_entries = 0;
1204 	devicetable.n_slots = 0;
1205 	devicetable.table = NULL;
1206 
1207 	/* Get libdevinfo snapshot */
1208 	dprintf((stderr, "di_init(%s)\n", path));
1209 	if ((root = di_init(path, DINFOCPYALL)) == DI_NODE_NIL) {
1210 		dprintf((stderr, "di_init: %s\n", strerror(errno)));
1211 		retval = RI_NODE_NIL; /* tell ri_init to skip this node */
1212 		goto end;
1213 	}
1214 
1215 	/*
1216 	 * Map in devlinks database.
1217 	 * XXX - This could be moved to ri_init() for better performance.
1218 	 */
1219 	dprintf((stderr, "di_devlink_init()\n"));
1220 	if ((linkhd = di_devlink_init(NULL, 0)) == NULL) {
1221 		dprintf((stderr, "di_devlink_init: %s\n", strerror(errno)));
1222 		retval = -1;
1223 		goto end;
1224 	}
1225 
1226 	/* Initialize argument for devinfo treewalk */
1227 	di_arg.err = 0;
1228 	di_arg.node = DI_NODE_NIL;
1229 	di_arg.pathbuf = pathbuf;
1230 	di_arg.table = &devicetable;
1231 	di_arg.linkhd = linkhd;
1232 
1233 	/* Use libdevinfo treewalk to build device lookup table */
1234 	if (di_walk_node(root, DI_WALK_CLDFIRST, (void *)&di_arg,
1235 	    devinfo_node_walk) != 0) {
1236 		dprintf((stderr, "di_walk_node: %s\n", strerror(errno)));
1237 		retval = -1;
1238 		goto end;
1239 	}
1240 	if (di_arg.err != 0) {
1241 		dprintf((stderr, "di_walk_node: device tree walk failed\n"));
1242 		retval = -1;
1243 		goto end;
1244 	}
1245 
1246 	/* Call RCM to gather usage information */
1247 	(void) snprintf(pathbuf, MAXPATHLEN, "/devices%s", path);
1248 	dprintf((stderr, "rcm_get_info(%s)\n", pathbuf));
1249 	if (rcm_get_info(rcm->hdl, pathbuf,
1250 	    RCM_INCLUDE_SUBTREE|RCM_INCLUDE_DEPENDENT, &rcm_info) !=
1251 	    RCM_SUCCESS) {
1252 		dprintf((stderr, "rcm_get_info (errno=%d)\n", errno));
1253 		retval = -1;
1254 		goto end;
1255 	}
1256 
1257 	/* Sort the device table by name (proper order for lookups) */
1258 	qsort(devicetable.table, devicetable.n_entries, sizeof (lookup_entry_t),
1259 	    table_compare_names);
1260 
1261 	/* Perform mappings of RCM usage segments to device table entries */
1262 	lastdeventry = NULL;
1263 	rcm_tuple = NULL;
1264 	while ((rcm_tuple = rcm_info_next(rcm_info, rcm_tuple)) != NULL) {
1265 		if ((rcm_rsrc = rcm_info_rsrc(rcm_tuple)) == NULL)
1266 			continue;
1267 		if (deventry = lookup(&devicetable, rcm_rsrc)) {
1268 			if (add_usage(deventry, rcm_rsrc, rcm_tuple)) {
1269 				retval = -1;
1270 				goto end;
1271 			}
1272 			lastdeventry = deventry;
1273 		} else {
1274 			if (add_usage(lastdeventry, rcm_rsrc, rcm_tuple)) {
1275 				retval = -1;
1276 				goto end;
1277 			}
1278 		}
1279 	}
1280 
1281 	/* Re-sort the device table by index number (original treewalk order) */
1282 	qsort(devicetable.table, devicetable.n_entries, sizeof (lookup_entry_t),
1283 	    table_compare_indices);
1284 
1285 	/*
1286 	 * Use the mapped usage and the device table to construct ri_dev_t's.
1287 	 * Construct one for each set of entries in the device table with
1288 	 * matching di_node_t's, if: 1) it has mapped RCM usage, or 2) it is
1289 	 * a leaf node and the caller has requested that unmanaged nodes be
1290 	 * included in the output.
1291 	 */
1292 	i = 0;
1293 	while (i < devicetable.n_entries) {
1294 
1295 		node = devicetable.table[i].node;
1296 
1297 		/* Count how many usage records are mapped to this node's set */
1298 		n_usage = 0;
1299 		set_size = 0;
1300 		while (((i + set_size) < devicetable.n_entries) &&
1301 		    (devicetable.table[i + set_size].node == node)) {
1302 			n_usage += devicetable.table[i + set_size].n_usage;
1303 			set_size += 1;
1304 		}
1305 
1306 		/*
1307 		 * If there's no usage, then the node is unmanaged.  Skip this
1308 		 * set of devicetable entries unless the node is a leaf node
1309 		 * and the caller has requested information on unmanaged leaves.
1310 		 */
1311 		if ((n_usage == 0) &&
1312 		    !((flags & RI_INCLUDE_UNMANAGED) && (ident_leaf(node)))) {
1313 			i += set_size;
1314 			continue;
1315 		}
1316 
1317 		/*
1318 		 * The checks above determined that this node is going in.
1319 		 * So determine its driver/instance name and allocate an
1320 		 * ri_dev_t for this node.
1321 		 */
1322 		if (mk_drv_inst(node, drv_inst, devicetable.table[i].name)) {
1323 			dprintf((stderr, "mk_drv_inst failed\n"));
1324 			retval = -1;
1325 			break;
1326 		}
1327 		if ((io = io_dev_alloc(drv_inst)) == NULL) {
1328 			dprintf((stderr, "io_dev_alloc failed\n"));
1329 			retval = -1;
1330 			break;
1331 		}
1332 
1333 		/* Now add all the RCM usage records (if any) to the ri_dev_t */
1334 		for (j = i; j < (i + set_size); j++) {
1335 			for (k = 0; k < devicetable.table[j].n_usage; k++) {
1336 				/* Create new ri_client_t for basic usage */
1337 				client = ri_client_alloc(
1338 				    (char *)devicetable.table[j].usage[k].rsrc,
1339 				    (char *)devicetable.table[j].usage[k].info);
1340 				if (client == NULL) {
1341 					dprintf((stderr,
1342 					    "ri_client_alloc failed\n"));
1343 					ri_dev_free(io);
1344 					retval = -1;
1345 					goto end;
1346 				}
1347 
1348 				/* Add extra query usage to the ri_client_t */
1349 				if ((flags & RI_INCLUDE_QUERY) &&
1350 				    (add_query_state(rcm, client,
1351 				    devicetable.table[j].usage[k].rsrc,
1352 				    devicetable.table[j].usage[k].info) != 0)) {
1353 					dprintf((stderr,
1354 					    "add_query_state failed\n"));
1355 					ri_dev_free(io);
1356 					ri_client_free(client);
1357 					retval = -1;
1358 					goto end;
1359 				}
1360 
1361 				/* Link new ri_client_t to ri_dev_t */
1362 				if (io->rcm_clients) {
1363 					tmp = io->rcm_clients;
1364 					while (tmp->next)
1365 						tmp = tmp->next;
1366 					tmp->next = client;
1367 				} else {
1368 					io->rcm_clients = client;
1369 				}
1370 			}
1371 		}
1372 
1373 		/* Link the ri_dev_t into the return value */
1374 		dev_list_append(&ap->ios, io);
1375 
1376 		/* Advance to the next node set */
1377 		i += set_size;
1378 	}
1379 
1380 end:
1381 	if (rcm_info != NULL)
1382 		rcm_free_info(rcm_info);
1383 	if (linkhd != NULL)
1384 		di_devlink_fini(&linkhd);
1385 	if (root != DI_NODE_NIL)
1386 		di_fini(root);
1387 	empty_table(&devicetable);
1388 
1389 	dprintf((stderr, "io_cm_info: returning %d\n", retval));
1390 	return (retval);
1391 }
1392 
1393 static int
1394 ident_leaf(di_node_t node)
1395 {
1396 	di_minor_t	minor = DI_MINOR_NIL;
1397 
1398 	return ((minor = di_minor_next(node, minor)) != DI_MINOR_NIL &&
1399 	    di_child_node(node) == DI_NODE_NIL);
1400 }
1401 
1402 /* ARGSUSED */
1403 static int
1404 mk_drv_inst(di_node_t node, char drv_inst[], char *devfs_path)
1405 {
1406 	char	*drv;
1407 	int	inst;
1408 
1409 	if ((drv = di_driver_name(node)) == NULL) {
1410 		dprintf((stderr, "no driver bound to %s\n",
1411 		    devfs_path));
1412 		return (-1);
1413 	}
1414 
1415 	if ((inst = di_instance(node)) == -1) {
1416 		dprintf((stderr, "no instance assigned to %s\n",
1417 		    devfs_path));
1418 		return (-1);
1419 	}
1420 	(void) snprintf(drv_inst, MAXPATHLEN, "%s%d", drv, inst);
1421 
1422 	return (0);
1423 }
1424 
1425 /*
1426  * Libdevinfo walker.
1427  *
1428  * During the tree walk of the attached IO devices, for each node
1429  * and all of its associated minors, the following actions are performed:
1430  *  -  The /devices path of the physical device node or minor
1431  *     is stored in a lookup table along with a reference to the
1432  *     libdevinfo node it represents via add_lookup_entry().
1433  *  -  The device links associated with each device are also
1434  *     stored in the same lookup table along with a reference to
1435  *     the libdevinfo node it represents via the minor walk callback.
1436  *
1437  */
1438 static int
1439 devinfo_node_walk(di_node_t node, void *arg)
1440 {
1441 	char			*devfs_path;
1442 #ifdef DEBUG
1443 	char			*drv;
1444 #endif /* DEBUG */
1445 	devinfo_arg_t		*di_arg = (devinfo_arg_t *)arg;
1446 
1447 	if (node == DI_NODE_NIL) {
1448 		return (DI_WALK_TERMINATE);
1449 	}
1450 
1451 	if (((di_state(node) & DI_DRIVER_DETACHED) == 0) &&
1452 	    ((devfs_path = di_devfs_path(node)) != NULL)) {
1453 
1454 		/* Use the provided path buffer to create full /devices path */
1455 		(void) snprintf(di_arg->pathbuf, MAXPATHLEN, "/devices%s",
1456 		    devfs_path);
1457 
1458 #ifdef DEBUG
1459 		dprintf((stderr, "devinfo_node_walk(%s)\n", di_arg->pathbuf));
1460 		if ((drv = di_driver_name(node)) != NULL)
1461 			dprintf((stderr, " driver name %s instance %d\n", drv,
1462 			    di_instance(node)));
1463 #endif
1464 
1465 		/* Free the devfs_path */
1466 		di_devfs_path_free(devfs_path);
1467 
1468 		/* Add an entry to the lookup table for this physical device */
1469 		if (add_lookup_entry(di_arg->table, di_arg->pathbuf, node)) {
1470 			dprintf((stderr, "add_lookup_entry: %s\n",
1471 			    strerror(errno)));
1472 			di_arg->err = 1;
1473 			return (DI_WALK_TERMINATE);
1474 		}
1475 
1476 		/* Check if this node has minors */
1477 		if ((di_minor_next(node, DI_MINOR_NIL)) != DI_MINOR_NIL) {
1478 			/* Walk this node's minors */
1479 			di_arg->node = node;
1480 			if (di_walk_minor(node, NULL, DI_CHECK_ALIAS, arg,
1481 			    devinfo_minor_walk) != 0) {
1482 				dprintf((stderr, "di_walk_minor: %s\n",
1483 				    strerror(errno)));
1484 				di_arg->err = 1;
1485 				return (DI_WALK_TERMINATE);
1486 			}
1487 		}
1488 	}
1489 
1490 	return (DI_WALK_CONTINUE);
1491 }
1492 
1493 /*
1494  * Use di_devlink_walk to find the /dev link from /devices path for this minor
1495  */
1496 static int
1497 devinfo_minor_walk(di_node_t node, di_minor_t minor, void *arg)
1498 {
1499 	char		*name;
1500 	char		*devfs_path;
1501 	devinfo_arg_t	*di_arg = (devinfo_arg_t *)arg;
1502 	char		pathbuf[MAXPATHLEN];
1503 
1504 #ifdef DEBUG
1505 	dprintf((stderr, "devinfo_minor_walk(%d) %s\n", minor,
1506 	    di_arg->pathbuf));
1507 
1508 	if ((name = di_minor_name(minor)) != NULL) {
1509 		dprintf((stderr, "  minor name %s\n", name));
1510 	}
1511 #endif /* DEBUG */
1512 
1513 	/* Terminate the walk when the device node changes */
1514 	if (node != di_arg->node) {
1515 		return (DI_WALK_TERMINATE);
1516 	}
1517 
1518 	/* Construct full /devices path for this minor */
1519 	if ((name = di_minor_name(minor)) == NULL) {
1520 		return (DI_WALK_CONTINUE);
1521 	}
1522 	(void) snprintf(pathbuf, MAXPATHLEN, "%s:%s", di_arg->pathbuf, name);
1523 
1524 	/* Add lookup entry for this minor node */
1525 	if (add_lookup_entry(di_arg->table, pathbuf, node)) {
1526 		dprintf((stderr, "add_lookup_entry: %s\n", strerror(errno)));
1527 		di_arg->err = 1;
1528 		return (DI_WALK_TERMINATE);
1529 	}
1530 
1531 	/*
1532 	 * Walk the associated device links.
1533 	 * Note that di_devlink_walk() doesn't want "/devices" in its paths.
1534 	 * Also note that di_devlink_walk() will fail if there are no device
1535 	 * links, which is fine; so ignore if it fails.  Only check for
1536 	 * internal failures during such a walk.
1537 	 */
1538 	devfs_path = &pathbuf[strlen("/devices")];
1539 	(void) di_devlink_walk(di_arg->linkhd, NULL, devfs_path, 0, arg,
1540 	    devinfo_devlink_walk);
1541 	if (di_arg->err != 0) {
1542 		return (DI_WALK_TERMINATE);
1543 	}
1544 
1545 	return (DI_WALK_CONTINUE);
1546 }
1547 
1548 static int
1549 devinfo_devlink_walk(di_devlink_t devlink, void *arg)
1550 {
1551 	const char	*linkpath;
1552 	devinfo_arg_t	*di_arg = (devinfo_arg_t *)arg;
1553 
1554 	/* Get the devlink's path */
1555 	if ((linkpath = di_devlink_path(devlink)) == NULL) {
1556 		dprintf((stderr, "di_devlink_path: %s\n", strerror(errno)));
1557 		di_arg->err = 1;
1558 		return (DI_WALK_TERMINATE);
1559 	}
1560 	dprintf((stderr, "devinfo_devlink_walk: %s\n", linkpath));
1561 
1562 	/* Add lookup entry for this devlink */
1563 	if (add_lookup_entry(di_arg->table, linkpath, di_arg->node)) {
1564 		dprintf((stderr, "add_lookup_entry: %s\n", strerror(errno)));
1565 		di_arg->err = 1;
1566 		return (DI_WALK_TERMINATE);
1567 	}
1568 
1569 	return (DI_WALK_CONTINUE);
1570 }
1571 
1572 /*
1573  * Map rcm_info_t's to ri_client_t's, filtering out "uninteresting" (hack)
1574  * RCM clients. The number of "interesting" ri_client_t's is returned
1575  * in cnt if passed non-NULL.
1576  */
1577 static int
1578 add_rcm_clients(ri_client_t **client_list, rcmd_t *rcm, rcm_info_t *info,
1579     int flags, int *cnt)
1580 {
1581 	rcm_info_tuple_t	*tuple;
1582 	char			*rsrc, *usage;
1583 	ri_client_t		*client, *tmp;
1584 
1585 	assert(client_list != NULL && rcm != NULL);
1586 
1587 	if (info == NULL)
1588 		return (0);
1589 
1590 	if (cnt != NULL)
1591 		*cnt = 0;
1592 
1593 	tuple = NULL;
1594 	while ((tuple = rcm_info_next(info, tuple)) != NULL) {
1595 		if ((rsrc = (char *)rcm_info_rsrc(tuple)) == NULL ||
1596 		    (usage = (char *)rcm_info_info(tuple)) == NULL) {
1597 			continue;
1598 		}
1599 
1600 		if (rcm_ignore(rsrc, usage) == 0)
1601 			continue;
1602 
1603 		if ((client = ri_client_alloc(rsrc, usage)) == NULL)
1604 			return (-1);
1605 
1606 		if ((flags & RI_INCLUDE_QUERY) && add_query_state(rcm, client,
1607 		    rsrc, usage) != 0) {
1608 			ri_client_free(client);
1609 			return (-1);
1610 		}
1611 
1612 		if (cnt != NULL)
1613 			++*cnt;
1614 
1615 		/*
1616 		 * Link in
1617 		 */
1618 		if ((tmp = *client_list) == NULL) {
1619 			*client_list = client;
1620 			continue;
1621 		}
1622 		while (tmp->next != NULL) {
1623 			tmp = tmp->next;
1624 		}
1625 		tmp->next = client;
1626 	}
1627 
1628 	return (0);
1629 }
1630 
1631 /*
1632  * Currently only filtering out based on known info string prefixes.
1633  */
1634 /* ARGSUSED */
1635 static int
1636 rcm_ignore(char *rsrc, char *infostr)
1637 {
1638 	char	**cpp;
1639 
1640 	for (cpp = rcm_info_filter; *cpp != NULL; cpp++) {
1641 		if (strncmp(infostr, *cpp, strlen(*cpp)) == 0) {
1642 			return (0);
1643 		}
1644 	}
1645 	return (-1);
1646 }
1647 
1648 /*
1649  * If this tuple was cached in the offline query pass, add the
1650  * query state and error string to the ri_client_t.
1651  */
1652 static int
1653 add_query_state(rcmd_t *rcm, ri_client_t *client, const char *rsrc,
1654     const char *info)
1655 {
1656 	int			qstate = RI_QUERY_UNKNOWN;
1657 	char			*errstr = NULL;
1658 	rcm_info_tuple_t	*cached_tuple;
1659 
1660 	if ((cached_tuple = tuple_lookup(rcm, rsrc, info)) != NULL) {
1661 		qstate = state2query(rcm_info_state(cached_tuple));
1662 		errstr = (char *)rcm_info_error(cached_tuple);
1663 	}
1664 
1665 	if (nvlist_add_int32(client->usg_props, RI_QUERY_STATE, qstate) != 0 ||
1666 	    (errstr != NULL && nvlist_add_string(client->usg_props,
1667 	    RI_QUERY_ERR, errstr) != 0)) {
1668 		dprintf((stderr, "nvlist_add fail\n"));
1669 		return (-1);
1670 	}
1671 
1672 	return (0);
1673 }
1674 
1675 static int
1676 state2query(int rcm_state)
1677 {
1678 	int	query;
1679 
1680 	switch (rcm_state) {
1681 	case RCM_STATE_OFFLINE_QUERY:
1682 	case RCM_STATE_SUSPEND_QUERY:
1683 		query = RI_QUERY_OK;
1684 		break;
1685 	case RCM_STATE_OFFLINE_QUERY_FAIL:
1686 	case RCM_STATE_SUSPEND_QUERY_FAIL:
1687 		query = RI_QUERY_FAIL;
1688 		break;
1689 	default:
1690 		query = RI_QUERY_UNKNOWN;
1691 		break;
1692 	}
1693 
1694 	return (query);
1695 }
1696 
1697 static void
1698 dev_list_append(ri_dev_t **head, ri_dev_t *dev)
1699 {
1700 	ri_dev_t	*tmp;
1701 
1702 	if ((tmp = *head) == NULL) {
1703 		*head = dev;
1704 		return;
1705 	}
1706 	while (tmp->next != NULL) {
1707 		tmp = tmp->next;
1708 	}
1709 	tmp->next = dev;
1710 }
1711 
1712 /*
1713  * The cpu list is ordered on cpuid since CMP cpuids will not necessarily
1714  * be discovered in sequence.
1715  */
1716 static void
1717 dev_list_cpu_insert(ri_dev_t **listp, ri_dev_t *dev, processorid_t newid)
1718 {
1719 	ri_dev_t	*tmp;
1720 	int32_t		cpuid;
1721 
1722 	while ((tmp = *listp) != NULL &&
1723 	    nvlist_lookup_int32(tmp->conf_props, RI_CPU_ID, &cpuid) == 0 &&
1724 	    cpuid < newid) {
1725 		listp = &tmp->next;
1726 	}
1727 
1728 	dev->next = tmp;
1729 	*listp = dev;
1730 }
1731 
1732 /*
1733  * Linear lookup. Should convert to hash tab.
1734  */
1735 static rcm_info_tuple_t *
1736 tuple_lookup(rcmd_t *rcm, const char *krsrc, const char *kinfo)
1737 {
1738 	rcm_info_tuple_t	*tuple = NULL;
1739 	const char		*rsrc, *info;
1740 
1741 	if ((rcm == NULL) || (krsrc == NULL) || (kinfo == NULL)) {
1742 		return (NULL);
1743 	}
1744 
1745 	while ((tuple = rcm_info_next(rcm->offline_query_info,
1746 	    tuple)) != NULL) {
1747 		if ((rsrc = rcm_info_rsrc(tuple)) == NULL ||
1748 		    (info = rcm_info_info(tuple)) == NULL) {
1749 			continue;
1750 		}
1751 
1752 		if (strcmp(rsrc, krsrc) == 0 && strcmp(info, kinfo) == 0) {
1753 			return (tuple);
1754 		}
1755 	}
1756 	return (NULL);
1757 }
1758 
1759 /*
1760  * Create and link attachment point handle.
1761  */
1762 static ri_ap_t *
1763 ri_ap_alloc(char *ap_id, ri_hdl_t *hdl)
1764 {
1765 	ri_ap_t		*ap, *tmp;
1766 
1767 	if ((ap = calloc(1, sizeof (*ap))) == NULL) {
1768 		dprintf((stderr, "calloc: %s\n", strerror(errno)));
1769 		return (NULL);
1770 	}
1771 
1772 	if (nvlist_alloc(&ap->conf_props, NV_UNIQUE_NAME, 0) != 0 ||
1773 	    nvlist_add_string(ap->conf_props, RI_AP_REQ_ID, ap_id) != 0) {
1774 		if (ap->conf_props != NULL)
1775 			nvlist_free(ap->conf_props);
1776 		free(ap);
1777 		return (NULL);
1778 	}
1779 
1780 	if ((tmp = hdl->aps) == NULL) {
1781 		hdl->aps = ap;
1782 	} else {
1783 		while (tmp->next != NULL) {
1784 			tmp = tmp->next;
1785 		}
1786 		tmp->next = ap;
1787 	}
1788 
1789 	return (ap);
1790 }
1791 
1792 static ri_dev_t *
1793 ri_dev_alloc(void)
1794 {
1795 	ri_dev_t	*dev;
1796 
1797 	if ((dev = calloc(1, sizeof (*dev))) == NULL ||
1798 	    nvlist_alloc(&dev->conf_props, NV_UNIQUE_NAME, 0) != 0) {
1799 		s_free(dev);
1800 	}
1801 	return (dev);
1802 }
1803 
1804 static ri_dev_t *
1805 io_dev_alloc(char *drv_inst)
1806 {
1807 	ri_dev_t	*io;
1808 
1809 	assert(drv_inst != NULL);
1810 
1811 	if ((io = ri_dev_alloc()) == NULL)
1812 		return (NULL);
1813 
1814 	if (nvlist_add_string(io->conf_props, RI_IO_DRV_INST,
1815 	    drv_inst) != 0) {
1816 		dprintf((stderr, "nvlist_add_string fail\n"));
1817 		ri_dev_free(io);
1818 		return (NULL);
1819 	}
1820 
1821 	return (io);
1822 }
1823 
1824 static ri_client_t *
1825 ri_client_alloc(char *rsrc, char *usage)
1826 {
1827 	ri_client_t	*client;
1828 
1829 	assert(rsrc != NULL && usage != NULL);
1830 
1831 	if ((client = calloc(1, sizeof (*client))) == NULL) {
1832 		dprintf((stderr, "calloc: %s\n", strerror(errno)));
1833 		return (NULL);
1834 	}
1835 
1836 	if (nvlist_alloc(&client->usg_props, NV_UNIQUE_NAME, 0) != 0) {
1837 		dprintf((stderr, "nvlist_alloc fail\n"));
1838 		free(client);
1839 		return (NULL);
1840 	}
1841 
1842 	if (nvlist_add_string(client->usg_props, RI_CLIENT_RSRC, rsrc) != 0 ||
1843 	    nvlist_add_string(client->usg_props, RI_CLIENT_USAGE, usage) != 0) {
1844 		dprintf((stderr, "nvlist_add_string fail\n"));
1845 		ri_client_free(client);
1846 		return (NULL);
1847 	}
1848 
1849 	return (client);
1850 }
1851 
1852 static void
1853 apd_tbl_free(apd_t apd_tbl[], int napds)
1854 {
1855 	int	i;
1856 	apd_t	*apd;
1857 
1858 	for (i = 0, apd = apd_tbl; i < napds; i++, apd++)
1859 		s_free(apd->cfga_list_data);
1860 
1861 	free(apd_tbl);
1862 }
1863 
1864 static char *
1865 pstate2str(int pi_state)
1866 {
1867 	char	*state;
1868 
1869 	switch (pi_state) {
1870 	case P_OFFLINE:
1871 		state = PS_OFFLINE;
1872 		break;
1873 	case P_ONLINE:
1874 		state = PS_ONLINE;
1875 		break;
1876 	case P_FAULTED:
1877 		state = PS_FAULTED;
1878 		break;
1879 	case P_POWEROFF:
1880 		state = PS_POWEROFF;
1881 		break;
1882 	case P_NOINTR:
1883 		state = PS_NOINTR;
1884 		break;
1885 	case P_SPARE:
1886 		state = PS_SPARE;
1887 		break;
1888 	default:
1889 		state = "unknown";
1890 		break;
1891 	}
1892 
1893 	return (state);
1894 }
1895 
1896 #ifdef DEBUG
1897 static void
1898 dump_apd_tbl(FILE *fp, apd_t *apds, int n_apds)
1899 {
1900 	int			i, j;
1901 	cfga_list_data_t	*cfga_ldata;
1902 
1903 	for (i = 0; i < n_apds; i++, apds++) {
1904 		dprintf((stderr, "apd_tbl[%d].nlist=%d\n", i, apds->nlist));
1905 		for (j = 0, cfga_ldata = apds->cfga_list_data; j < apds->nlist;
1906 		    j++, cfga_ldata++) {
1907 			dprintf((fp,
1908 			    "apd_tbl[%d].cfga_list_data[%d].ap_log_id=%s\n",
1909 			    i, j, cfga_ldata->ap_log_id));
1910 		}
1911 	}
1912 }
1913 #endif /* DEBUG */
1914 
1915 /*
1916  * The lookup table is a simple array that is grown in chunks
1917  * to optimize memory allocation.
1918  * Indices are assigned to each array entry in-order so that
1919  * the original device tree ordering can be discerned at a later time.
1920  *
1921  * add_lookup_entry is called from the libdevinfo tree traversal callbacks:
1922  * 1) devinfo_node_walk - physical device path for each node in
1923  *    the devinfo tree via di_walk_node(), lookup entry name is
1924  *    /devices/[di_devfs_path]
1925  * 2) devinfo_minor_walk - physical device path plus minor name for
1926  *    each minor associated with a node via di_walk_minor(), lookup entry
1927  *    name is /devices/[di_devfs_path:di_minor_name]
1928  * 3) devinfo_devlink_walk - for each minor's /dev link from its /devices
1929  *    path via di_devlink_walk(), lookup entry name is di_devlink_path()
1930  */
1931 static int
1932 add_lookup_entry(lookup_table_t *table, const char *name, di_node_t node)
1933 {
1934 	size_t		size;
1935 	lookup_entry_t	*new_table;
1936 
1937 
1938 	/* Grow the lookup table by USAGE_ALLOC_SIZE slots if necessary */
1939 	if (table->n_entries == table->n_slots) {
1940 		size = (table->n_slots + USAGE_ALLOC_SIZE) *
1941 		    sizeof (lookup_entry_t);
1942 		new_table = (lookup_entry_t *)realloc(table->table, size);
1943 		if (new_table == NULL) {
1944 			dprintf((stderr, "add_lookup_entry: alloc failed: %s\n",
1945 			    strerror(errno)));
1946 			errno = ENOMEM;
1947 			return (-1);
1948 		}
1949 		table->table = new_table;
1950 		table->n_slots += USAGE_ALLOC_SIZE;
1951 	}
1952 
1953 	dprintf((stderr, "add_lookup_entry[%d]:%s\n", table->n_entries, name));
1954 
1955 	/* Add this name to the next slot */
1956 	if ((table->table[table->n_entries].name = strdup(name)) == NULL) {
1957 		dprintf((stderr, "add_lookup_entry: strdup failed: %s\n",
1958 		    strerror(errno)));
1959 		errno = ENOMEM;
1960 		return (-1);
1961 	}
1962 	table->table[table->n_entries].index = table->n_entries;
1963 	table->table[table->n_entries].node = node;
1964 	table->table[table->n_entries].n_usage = 0;
1965 	table->table[table->n_entries].usage = NULL;
1966 	table->n_entries += 1;
1967 
1968 	return (0);
1969 }
1970 
1971 /*
1972  * lookup table entry names are full pathname strings, all start with /
1973  */
1974 static int
1975 table_compare_names(const void *a, const void *b)
1976 {
1977 	lookup_entry_t *entry1 = (lookup_entry_t *)a;
1978 	lookup_entry_t *entry2 = (lookup_entry_t *)b;
1979 
1980 	return (strcmp(entry1->name, entry2->name));
1981 }
1982 
1983 
1984 /*
1985  * Compare two indices and return -1 for less, 1 for greater, 0 for equal
1986  */
1987 static int
1988 table_compare_indices(const void *a, const void *b)
1989 {
1990 	lookup_entry_t *entry1 = (lookup_entry_t *)a;
1991 	lookup_entry_t *entry2 = (lookup_entry_t *)b;
1992 
1993 	if (entry1->index < entry2->index)
1994 		return (-1);
1995 	if (entry1->index > entry2->index)
1996 		return (1);
1997 	return (0);
1998 }
1999 
2000 /*
2001  * Given a RCM resource name, find the matching entry in the IO device table
2002  */
2003 static lookup_entry_t *
2004 lookup(lookup_table_t *table, const char *rcm_rsrc)
2005 {
2006 	lookup_entry_t	*entry;
2007 	lookup_entry_t	lookup_arg;
2008 
2009 	dprintf((stderr, "lookup:%s\n", rcm_rsrc));
2010 	lookup_arg.name = (char *)rcm_rsrc;
2011 	entry = bsearch(&lookup_arg, table->table, table->n_entries,
2012 	    sizeof (lookup_entry_t), table_compare_names);
2013 
2014 #ifdef DEBUG
2015 	if (entry != NULL) {
2016 		dprintf((stderr, " found entry:%d\n", entry->index));
2017 	}
2018 #endif /* DEBUG */
2019 	return (entry);
2020 }
2021 
2022 /*
2023  * Add RCM usage to the given device table entry.
2024  * Returns -1 on realloc failure.
2025  */
2026 static int
2027 add_usage(lookup_entry_t *entry, const char *rcm_rsrc, rcm_info_tuple_t *tuple)
2028 {
2029 	size_t		size;
2030 	const char	*info;
2031 	usage_t		*new_usage;
2032 
2033 	if ((entry == NULL) ||
2034 	    ((info = rcm_info_info(tuple)) == NULL))
2035 		return (0);
2036 
2037 	if (rcm_ignore((char *)rcm_rsrc, (char *)info) == 0)
2038 		return (0);
2039 
2040 	size = (entry->n_usage + 1) * sizeof (usage_t);
2041 	new_usage = (usage_t *)realloc(entry->usage, size);
2042 	if (new_usage == NULL) {
2043 		dprintf((stderr, "add_usage: alloc failed: %s\n",
2044 		    strerror(errno)));
2045 		return (-1);
2046 	}
2047 	dprintf((stderr, "add_usage: entry %d rsrc: %s info: %s\n",
2048 	    entry->index, rcm_rsrc, info));
2049 
2050 	entry->usage = new_usage;
2051 	entry->usage[entry->n_usage].rsrc = rcm_rsrc;
2052 	entry->usage[entry->n_usage].info = info;
2053 	entry->n_usage += 1;
2054 	return (0);
2055 }
2056 
2057 static void
2058 empty_table(lookup_table_t *table)
2059 {
2060 	int i;
2061 
2062 	if (table) {
2063 		for (i = 0; i < table->n_entries; i++) {
2064 			if (table->table[i].name)
2065 				free(table->table[i].name);
2066 			/*
2067 			 * Note: the strings pointed to from within
2068 			 * usage were freed already by rcm_free_info
2069 			 */
2070 			if (table->table[i].usage)
2071 				free(table->table[i].usage);
2072 		}
2073 		if (table->table)
2074 			free(table->table);
2075 		table->table = NULL;
2076 		table->n_entries = 0;
2077 		table->n_slots = 0;
2078 	}
2079 }
2080