xref: /titanic_41/usr/src/uts/sun4v/io/cnex.c (revision ea394cb00fd96864e34d2841b4a22357b621c78f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * Logical domain channel devices are devices implemented entirely
29  * in software; cnex is the nexus for channel-devices. They use
30  * the HV channel interfaces via the LDC transport module to send
31  * and receive data and to register callbacks.
32  */
33 
34 #include <sys/types.h>
35 #include <sys/cmn_err.h>
36 #include <sys/conf.h>
37 #include <sys/ddi.h>
38 #include <sys/ddi_impldefs.h>
39 #include <sys/devops.h>
40 #include <sys/instance.h>
41 #include <sys/modctl.h>
42 #include <sys/open.h>
43 #include <sys/stat.h>
44 #include <sys/sunddi.h>
45 #include <sys/sunndi.h>
46 #include <sys/systm.h>
47 #include <sys/mkdev.h>
48 #include <sys/machsystm.h>
49 #include <sys/intreg.h>
50 #include <sys/intr.h>
51 #include <sys/ddi_intr_impl.h>
52 #include <sys/ivintr.h>
53 #include <sys/hypervisor_api.h>
54 #include <sys/ldc.h>
55 #include <sys/cnex.h>
56 #include <sys/mach_descrip.h>
57 #include <sys/hsvc.h>
58 #include <sys/sdt.h>
59 
60 /*
61  * Internal functions/information
62  */
63 static struct cnex_intr_map cnex_class_to_intr[] = {
64 	{LDC_DEV_GENERIC,	PIL_3,	 0},
65 	{LDC_DEV_BLK,		PIL_4,	10},
66 	{LDC_DEV_BLK_SVC,	PIL_3,	10},
67 	{LDC_DEV_NT,		PIL_6,	35},
68 	{LDC_DEV_NT_SVC,	PIL_4,	35},
69 	{LDC_DEV_SERIAL,	PIL_6,	 0}
70 };
71 #define	CNEX_MAX_DEVS (sizeof (cnex_class_to_intr) / \
72 				sizeof (cnex_class_to_intr[0]))
73 
74 #define	CNEX_TX_INTR_WEIGHT	0
75 
76 #define	SUN4V_REG_SPEC2CFG_HDL(x)	((x >> 32) & ~(0xfull << 28))
77 
78 static clock_t cnex_wait_usecs = 1000; /* wait time in usecs */
79 static int cnex_wait_retries = 3;
80 static void *cnex_state;
81 
82 static uint_t cnex_intr_wrapper(caddr_t arg);
83 static dev_info_t *cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id,
84     md_t *mdp, mde_cookie_t mde);
85 
86 /*
87  * Channel Interrupt Distribution
88  *
89  * In order to balance interrupts among available CPUs, we use
90  * the intr_dist_cpuid_{add,remove}_device_weight() interface to
91  * assign weights to channel interrupts. These weights, which are
92  * defined in the cnex_intr_map structure, influence which CPU
93  * is returned by intr_dist_cpuid() when called via the cnex
94  * interrupt redistribution callback cnex_intr_redist().
95  * Interrupts for VIO devclass channels are given more weight than
96  * other interrupts because they are expected to occur more
97  * frequently and have a larger impact on overall performance.
98  * Transmit interrupts are given a zero weight because they are
99  * not used.
100  *
101  * The interrupt weights influence the target CPU selection when
102  * interrupts are redistributed and when they are added. However,
103  * removal of interrupts can unbalance the distribution even if
104  * they are removed in converse order--compared to the order they
105  * are added. This can occur when interrupts are removed after
106  * redistribution occurs.
107  *
108  * Channel interrupt weights affect interrupt-CPU distribution
109  * relative to other weighted interrupts on the system. For VIO
110  * devclass channels, values are chosen to match those used by
111  * the PCI express nexus driver for net and storage devices.
112  */
113 static void cnex_intr_redist(void *arg, int32_t weight_max, int32_t weight);
114 static int cnex_intr_new_cpu(cnex_soft_state_t *ssp, cnex_intr_t *iinfo);
115 static int cnex_intr_dis_wait(cnex_soft_state_t *ssp, cnex_intr_t *iinfo);
116 static int32_t cnex_class_weight(ldc_dev_t devclass);
117 
118 /*
119  * Debug info
120  */
121 #ifdef DEBUG
122 
123 /*
124  * Print debug messages
125  *
126  * set cnexdbg to 0xf for enabling all msgs
127  * 0x8 - Errors
128  * 0x4 - Warnings
129  * 0x2 - All debug messages
130  * 0x1 - Minimal debug messages
131  */
132 
133 int cnexdbg = 0x8;
134 
135 static void
136 cnexdebug(const char *fmt, ...)
137 {
138 	char buf[512];
139 	va_list ap;
140 
141 	va_start(ap, fmt);
142 	(void) vsprintf(buf, fmt, ap);
143 	va_end(ap);
144 
145 	cmn_err(CE_CONT, "%s\n", buf);
146 }
147 
148 #define	D1		\
149 if (cnexdbg & 0x01)	\
150 	cnexdebug
151 
152 #define	D2		\
153 if (cnexdbg & 0x02)	\
154 	cnexdebug
155 
156 #define	DWARN		\
157 if (cnexdbg & 0x04)	\
158 	cnexdebug
159 
160 #define	DERR		\
161 if (cnexdbg & 0x08)	\
162 	cnexdebug
163 
164 #else
165 
166 #define	D1
167 #define	D2
168 #define	DWARN
169 #define	DERR
170 
171 #endif
172 
173 /*
174  * Config information
175  */
176 static int cnex_attach(dev_info_t *, ddi_attach_cmd_t);
177 static int cnex_detach(dev_info_t *, ddi_detach_cmd_t);
178 static int cnex_open(dev_t *, int, int, cred_t *);
179 static int cnex_close(dev_t, int, int, cred_t *);
180 static int cnex_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
181 static int cnex_ctl(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *,
182     void *);
183 
184 static struct bus_ops cnex_bus_ops = {
185 	BUSO_REV,
186 	nullbusmap,		/* bus_map */
187 	NULL,			/* bus_get_intrspec */
188 	NULL,			/* bus_add_intrspec */
189 	NULL,			/* bus_remove_intrspec */
190 	i_ddi_map_fault,	/* bus_map_fault */
191 	ddi_no_dma_map,		/* bus_dma_map */
192 	ddi_no_dma_allochdl,	/* bus_dma_allochdl */
193 	NULL,			/* bus_dma_freehdl */
194 	NULL,			/* bus_dma_bindhdl */
195 	NULL,			/* bus_dma_unbindhdl */
196 	NULL,			/* bus_dma_flush */
197 	NULL,			/* bus_dma_win */
198 	NULL,			/* bus_dma_ctl */
199 	cnex_ctl,		/* bus_ctl */
200 	ddi_bus_prop_op,	/* bus_prop_op */
201 	0,			/* bus_get_eventcookie */
202 	0,			/* bus_add_eventcall */
203 	0,			/* bus_remove_eventcall	*/
204 	0,			/* bus_post_event */
205 	NULL,			/* bus_intr_ctl */
206 	NULL,			/* bus_config */
207 	NULL,			/* bus_unconfig */
208 	NULL,			/* bus_fm_init */
209 	NULL,			/* bus_fm_fini */
210 	NULL,			/* bus_fm_access_enter */
211 	NULL,			/* bus_fm_access_exit */
212 	NULL,			/* bus_power */
213 	NULL			/* bus_intr_op */
214 };
215 
216 static struct cb_ops cnex_cb_ops = {
217 	cnex_open,			/* open */
218 	cnex_close,			/* close */
219 	nodev,				/* strategy */
220 	nodev,				/* print */
221 	nodev,				/* dump */
222 	nodev,				/* read */
223 	nodev,				/* write */
224 	cnex_ioctl,			/* ioctl */
225 	nodev,				/* devmap */
226 	nodev,				/* mmap */
227 	nodev,				/* segmap */
228 	nochpoll,			/* poll */
229 	ddi_prop_op,			/* cb_prop_op */
230 	0,				/* streamtab  */
231 	D_MP | D_NEW | D_HOTPLUG	/* Driver compatibility flag */
232 };
233 
234 static struct dev_ops cnex_ops = {
235 	DEVO_REV,		/* devo_rev, */
236 	0,			/* refcnt  */
237 	ddi_getinfo_1to1,	/* info */
238 	nulldev,		/* identify */
239 	nulldev,		/* probe */
240 	cnex_attach,		/* attach */
241 	cnex_detach,		/* detach */
242 	nodev,			/* reset */
243 	&cnex_cb_ops,		/* driver operations */
244 	&cnex_bus_ops,		/* bus operations */
245 	nulldev,		/* power */
246 	ddi_quiesce_not_needed,		/* quiesce */
247 };
248 
249 /*
250  * Module linkage information for the kernel.
251  */
252 static struct modldrv modldrv = {
253 	&mod_driverops,
254 	"sun4v channel-devices nexus",
255 	&cnex_ops,
256 };
257 
258 static struct modlinkage modlinkage = {
259 	MODREV_1, (void *)&modldrv, NULL
260 };
261 
262 int
263 _init(void)
264 {
265 	int err;
266 	uint64_t majornum;
267 	uint64_t minornum;
268 
269 	/*
270 	 * Check HV intr group api versioning.
271 	 * Note that cnex assumes interrupt cookies is
272 	 * in version 1.0 of the intr group api.
273 	 */
274 	if ((err = hsvc_version(HSVC_GROUP_INTR, &majornum, &minornum)) != 0) {
275 		cmn_err(CE_WARN, "cnex: failed to get intr api "
276 		    "group versioning errno=%d", err);
277 		return (err);
278 	} else if ((majornum != 1) && (majornum != 2)) {
279 		cmn_err(CE_WARN, "cnex: unsupported intr api group: "
280 		    "maj:0x%lx, min:0x%lx", majornum, minornum);
281 		return (ENOTSUP);
282 	}
283 
284 	if ((err = ddi_soft_state_init(&cnex_state,
285 	    sizeof (cnex_soft_state_t), 0)) != 0) {
286 		return (err);
287 	}
288 	if ((err = mod_install(&modlinkage)) != 0) {
289 		ddi_soft_state_fini(&cnex_state);
290 		return (err);
291 	}
292 	return (0);
293 }
294 
295 int
296 _fini(void)
297 {
298 	int err;
299 
300 	if ((err = mod_remove(&modlinkage)) != 0)
301 		return (err);
302 	ddi_soft_state_fini(&cnex_state);
303 	return (0);
304 }
305 
306 int
307 _info(struct modinfo *modinfop)
308 {
309 	return (mod_info(&modlinkage, modinfop));
310 }
311 
312 /*
313  * Callback function invoked by the interrupt redistribution
314  * framework. This will redirect interrupts at CPUs that are
315  * currently available in the system.
316  *
317  * Note: any interrupts with weight greater than or equal to
318  * weight_max must be redistributed when this callback is
319  * invoked with (weight == weight_max) which will be once per
320  * redistribution.
321  */
322 /*ARGSUSED*/
323 static void
324 cnex_intr_redist(void *arg, int32_t weight_max, int32_t weight)
325 {
326 	cnex_ldc_t		*cldcp;
327 	cnex_soft_state_t	*cnex_ssp = arg;
328 
329 	ASSERT(cnex_ssp != NULL);
330 	mutex_enter(&cnex_ssp->clist_lock);
331 
332 	cldcp = cnex_ssp->clist;
333 	while (cldcp != NULL) {
334 
335 		mutex_enter(&cldcp->lock);
336 
337 		if (cldcp->tx.hdlr && (cldcp->tx.weight == weight ||
338 		    (weight_max == weight && cldcp->tx.weight > weight))) {
339 			(void) cnex_intr_new_cpu(cnex_ssp, &cldcp->tx);
340 		}
341 
342 		if (cldcp->rx.hdlr && (cldcp->rx.weight == weight ||
343 		    (weight_max == weight && cldcp->rx.weight > weight))) {
344 			(void) cnex_intr_new_cpu(cnex_ssp, &cldcp->rx);
345 		}
346 
347 		mutex_exit(&cldcp->lock);
348 
349 		/* next channel */
350 		cldcp = cldcp->next;
351 	}
352 
353 	mutex_exit(&cnex_ssp->clist_lock);
354 }
355 
356 /*
357  * Internal function to replace the CPU used by an interrupt
358  * during interrupt redistribution.
359  */
360 static int
361 cnex_intr_new_cpu(cnex_soft_state_t *ssp, cnex_intr_t *iinfo)
362 {
363 	int	intr_state;
364 	int 	rv;
365 
366 	/* Determine if the interrupt is enabled */
367 	rv = hvldc_intr_getvalid(ssp->cfghdl, iinfo->ino, &intr_state);
368 	if (rv) {
369 		DWARN("cnex_intr_new_cpu: rx ino=0x%llx, can't get valid\n",
370 		    iinfo->ino);
371 		return (rv);
372 	}
373 
374 	/* If it is enabled, disable it */
375 	if (intr_state == HV_INTR_VALID) {
376 		rv = cnex_intr_dis_wait(ssp, iinfo);
377 		if (rv) {
378 			return (rv);
379 		}
380 	}
381 
382 	/* Target the interrupt at a new CPU. */
383 	iinfo->cpuid = intr_dist_cpuid();
384 	(void) hvldc_intr_settarget(ssp->cfghdl, iinfo->ino, iinfo->cpuid);
385 	intr_dist_cpuid_add_device_weight(iinfo->cpuid, iinfo->dip,
386 	    iinfo->weight);
387 
388 	/* Re-enable the interrupt if it was enabled */
389 	if (intr_state == HV_INTR_VALID) {
390 		(void) hvldc_intr_setvalid(ssp->cfghdl, iinfo->ino,
391 		    HV_INTR_VALID);
392 	}
393 
394 	return (0);
395 }
396 
397 /*
398  * Internal function to disable an interrupt and wait
399  * for any pending interrupts to finish.
400  */
401 static int
402 cnex_intr_dis_wait(cnex_soft_state_t *ssp, cnex_intr_t *iinfo)
403 {
404 	int rv, intr_state, retries;
405 
406 	/* disable interrupts */
407 	rv = hvldc_intr_setvalid(ssp->cfghdl, iinfo->ino, HV_INTR_NOTVALID);
408 	if (rv) {
409 		DWARN("cnex_intr_dis_wait: ino=0x%llx, can't set valid\n",
410 		    iinfo->ino);
411 		return (ENXIO);
412 	}
413 
414 	/*
415 	 * Make a best effort to wait for pending interrupts
416 	 * to finish. There is not much we can do if we timeout.
417 	 */
418 	retries = 0;
419 
420 	do {
421 		rv = hvldc_intr_getstate(ssp->cfghdl, iinfo->ino, &intr_state);
422 		if (rv) {
423 			DWARN("cnex_intr_dis_wait: ino=0x%llx, can't get "
424 			    "state\n", iinfo->ino);
425 			return (ENXIO);
426 		}
427 
428 		if (intr_state != HV_INTR_DELIVERED_STATE)
429 			break;
430 
431 		drv_usecwait(cnex_wait_usecs);
432 
433 	} while (!panicstr && ++retries <= cnex_wait_retries);
434 
435 	return (0);
436 }
437 
438 /*
439  * Returns the interrupt weight to use for the specified devclass.
440  */
441 static int32_t
442 cnex_class_weight(ldc_dev_t devclass)
443 {
444 	int idx;
445 
446 	for (idx = 0; idx < CNEX_MAX_DEVS; idx++) {
447 		if (devclass == cnex_class_to_intr[idx].devclass) {
448 			return (cnex_class_to_intr[idx].weight);
449 		}
450 	}
451 
452 	/*
453 	 * If this code is reached, the specified devclass is
454 	 * invalid. New devclasses should be added to
455 	 * cnex_class_to_intr.
456 	 */
457 	ASSERT(0);
458 
459 	return (0);
460 }
461 
462 /*
463  * Exported interface to register a LDC endpoint with
464  * the channel nexus
465  */
466 static int
467 cnex_reg_chan(dev_info_t *dip, uint64_t id, ldc_dev_t devclass)
468 {
469 	int		idx;
470 	cnex_ldc_t	*cldcp;
471 	cnex_ldc_t	*new_cldcp;
472 	int		listsz, num_nodes, num_channels;
473 	md_t		*mdp = NULL;
474 	mde_cookie_t	rootnode, *listp = NULL;
475 	uint64_t	tmp_id;
476 	uint64_t	rxino = (uint64_t)-1;
477 	uint64_t	txino = (uint64_t)-1;
478 	cnex_soft_state_t *cnex_ssp;
479 	int		status, instance;
480 	dev_info_t	*chan_dip = NULL;
481 
482 	/* Get device instance and structure */
483 	instance = ddi_get_instance(dip);
484 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
485 
486 	/* Check to see if channel is already registered */
487 	mutex_enter(&cnex_ssp->clist_lock);
488 	cldcp = cnex_ssp->clist;
489 	while (cldcp) {
490 		if (cldcp->id == id) {
491 			DWARN("cnex_reg_chan: channel 0x%llx exists\n", id);
492 			mutex_exit(&cnex_ssp->clist_lock);
493 			return (EINVAL);
494 		}
495 		cldcp = cldcp->next;
496 	}
497 	mutex_exit(&cnex_ssp->clist_lock);
498 
499 	/* Get the Tx/Rx inos from the MD */
500 	if ((mdp = md_get_handle()) == NULL) {
501 		DWARN("cnex_reg_chan: cannot init MD\n");
502 		return (ENXIO);
503 	}
504 	num_nodes = md_node_count(mdp);
505 	ASSERT(num_nodes > 0);
506 
507 	listsz = num_nodes * sizeof (mde_cookie_t);
508 	listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP);
509 
510 	rootnode = md_root_node(mdp);
511 
512 	/* search for all channel_endpoint nodes */
513 	num_channels = md_scan_dag(mdp, rootnode,
514 	    md_find_name(mdp, "channel-endpoint"),
515 	    md_find_name(mdp, "fwd"), listp);
516 	if (num_channels <= 0) {
517 		DWARN("cnex_reg_chan: invalid channel id\n");
518 		kmem_free(listp, listsz);
519 		(void) md_fini_handle(mdp);
520 		return (EINVAL);
521 	}
522 
523 	for (idx = 0; idx < num_channels; idx++) {
524 
525 		/* Get the channel ID */
526 		status = md_get_prop_val(mdp, listp[idx], "id", &tmp_id);
527 		if (status) {
528 			DWARN("cnex_reg_chan: cannot read LDC ID\n");
529 			kmem_free(listp, listsz);
530 			(void) md_fini_handle(mdp);
531 			return (ENXIO);
532 		}
533 		if (tmp_id != id)
534 			continue;
535 
536 		/* Get the Tx and Rx ino */
537 		status = md_get_prop_val(mdp, listp[idx], "tx-ino", &txino);
538 		if (status) {
539 			DWARN("cnex_reg_chan: cannot read Tx ino\n");
540 			kmem_free(listp, listsz);
541 			(void) md_fini_handle(mdp);
542 			return (ENXIO);
543 		}
544 		status = md_get_prop_val(mdp, listp[idx], "rx-ino", &rxino);
545 		if (status) {
546 			DWARN("cnex_reg_chan: cannot read Rx ino\n");
547 			kmem_free(listp, listsz);
548 			(void) md_fini_handle(mdp);
549 			return (ENXIO);
550 		}
551 		chan_dip = cnex_find_chan_dip(dip, id, mdp, listp[idx]);
552 		ASSERT(chan_dip != NULL);
553 	}
554 	kmem_free(listp, listsz);
555 	(void) md_fini_handle(mdp);
556 
557 	/*
558 	 * check to see if we looped through the list of channel IDs without
559 	 * matching one (i.e. an 'ino' has not been initialised).
560 	 */
561 	if ((rxino == -1) || (txino == -1)) {
562 		DERR("cnex_reg_chan: no ID matching '%llx' in MD\n", id);
563 		return (ENOENT);
564 	}
565 
566 	/* Allocate a new channel structure */
567 	new_cldcp = kmem_zalloc(sizeof (*new_cldcp), KM_SLEEP);
568 
569 	/* Initialize the channel */
570 	mutex_init(&new_cldcp->lock, NULL, MUTEX_DRIVER, NULL);
571 
572 	new_cldcp->id = id;
573 	new_cldcp->tx.ino = txino;
574 	new_cldcp->rx.ino = rxino;
575 	new_cldcp->devclass = devclass;
576 	new_cldcp->tx.weight = CNEX_TX_INTR_WEIGHT;
577 	new_cldcp->rx.weight = cnex_class_weight(devclass);
578 	new_cldcp->dip = chan_dip;
579 
580 	/*
581 	 * Add channel to nexus channel list.
582 	 * Check again to see if channel is already registered since
583 	 * clist_lock was dropped above.
584 	 */
585 	mutex_enter(&cnex_ssp->clist_lock);
586 	cldcp = cnex_ssp->clist;
587 	while (cldcp) {
588 		if (cldcp->id == id) {
589 			DWARN("cnex_reg_chan: channel 0x%llx exists\n", id);
590 			mutex_exit(&cnex_ssp->clist_lock);
591 			mutex_destroy(&new_cldcp->lock);
592 			kmem_free(new_cldcp, sizeof (*new_cldcp));
593 			return (EINVAL);
594 		}
595 		cldcp = cldcp->next;
596 	}
597 	new_cldcp->next = cnex_ssp->clist;
598 	cnex_ssp->clist = new_cldcp;
599 	mutex_exit(&cnex_ssp->clist_lock);
600 
601 	return (0);
602 }
603 
604 /*
605  * Add Tx/Rx interrupt handler for the channel
606  */
607 static int
608 cnex_add_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype,
609     uint_t (*hdlr)(), caddr_t arg1, caddr_t arg2)
610 {
611 	int		rv, idx, pil;
612 	cnex_ldc_t	*cldcp;
613 	cnex_intr_t	*iinfo;
614 	cnex_soft_state_t *cnex_ssp;
615 	int		instance;
616 
617 	/* Get device instance and structure */
618 	instance = ddi_get_instance(dip);
619 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
620 
621 	/* get channel info */
622 	mutex_enter(&cnex_ssp->clist_lock);
623 	cldcp = cnex_ssp->clist;
624 	while (cldcp) {
625 		if (cldcp->id == id)
626 			break;
627 		cldcp = cldcp->next;
628 	}
629 	if (cldcp == NULL) {
630 		DWARN("cnex_add_intr: channel 0x%llx does not exist\n", id);
631 		mutex_exit(&cnex_ssp->clist_lock);
632 		return (EINVAL);
633 	}
634 	mutex_exit(&cnex_ssp->clist_lock);
635 
636 	/* get channel lock */
637 	mutex_enter(&cldcp->lock);
638 
639 	/* get interrupt type */
640 	if (itype == CNEX_TX_INTR) {
641 		iinfo = &(cldcp->tx);
642 	} else if (itype == CNEX_RX_INTR) {
643 		iinfo = &(cldcp->rx);
644 	} else {
645 		DWARN("cnex_add_intr: invalid interrupt type\n", id);
646 		mutex_exit(&cldcp->lock);
647 		return (EINVAL);
648 	}
649 
650 	/* check if a handler is already added */
651 	if (iinfo->hdlr != 0) {
652 		DWARN("cnex_add_intr: interrupt handler exists\n");
653 		mutex_exit(&cldcp->lock);
654 		return (EINVAL);
655 	}
656 
657 	/* save interrupt handler info */
658 	iinfo->hdlr = hdlr;
659 	iinfo->arg1 = arg1;
660 	iinfo->arg2 = arg2;
661 
662 	/* save data for DTrace probes used by intrstat(1m) */
663 	iinfo->dip = cldcp->dip;
664 	iinfo->id = cldcp->id;
665 
666 	iinfo->icookie = MINVINTR_COOKIE + iinfo->ino;
667 
668 	/*
669 	 * Verify that the ino does not generate a cookie which
670 	 * is outside the (MINVINTR_COOKIE, MAXIVNUM) range of the
671 	 * system interrupt table.
672 	 */
673 	if (iinfo->icookie >= MAXIVNUM || iinfo->icookie < MINVINTR_COOKIE) {
674 		DWARN("cnex_add_intr: invalid cookie %x ino %x\n",
675 		    iinfo->icookie, iinfo->ino);
676 		mutex_exit(&cldcp->lock);
677 		return (EINVAL);
678 	}
679 
680 	D1("cnex_add_intr: add hdlr, cfghdl=0x%llx, ino=0x%llx, "
681 	    "cookie=0x%llx\n", cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie);
682 
683 	/* Pick a PIL on the basis of the channel's devclass */
684 	for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) {
685 		if (cldcp->devclass == cnex_class_to_intr[idx].devclass) {
686 			pil = cnex_class_to_intr[idx].pil;
687 			break;
688 		}
689 	}
690 
691 	/* add interrupt to solaris ivec table */
692 	if (add_ivintr(iinfo->icookie, pil, (intrfunc)cnex_intr_wrapper,
693 	    (caddr_t)iinfo, NULL, NULL) != 0) {
694 		DWARN("cnex_add_intr: add_ivintr fail cookie %x ino %x\n",
695 		    iinfo->icookie, iinfo->ino);
696 		mutex_exit(&cldcp->lock);
697 		return (EINVAL);
698 	}
699 
700 	/* set the cookie in the HV */
701 	rv = hvldc_intr_setcookie(cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie);
702 
703 	/* pick next CPU in the domain for this channel */
704 	iinfo->cpuid = intr_dist_cpuid();
705 
706 	/* set the target CPU and then enable interrupts */
707 	rv = hvldc_intr_settarget(cnex_ssp->cfghdl, iinfo->ino, iinfo->cpuid);
708 	if (rv) {
709 		DWARN("cnex_add_intr: ino=0x%llx, cannot set target cpu\n",
710 		    iinfo->ino);
711 		goto hv_error;
712 	}
713 	rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino,
714 	    HV_INTR_IDLE_STATE);
715 	if (rv) {
716 		DWARN("cnex_add_intr: ino=0x%llx, cannot set state\n",
717 		    iinfo->ino);
718 		goto hv_error;
719 	}
720 	rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, iinfo->ino, HV_INTR_VALID);
721 	if (rv) {
722 		DWARN("cnex_add_intr: ino=0x%llx, cannot set valid\n",
723 		    iinfo->ino);
724 		goto hv_error;
725 	}
726 
727 	intr_dist_cpuid_add_device_weight(iinfo->cpuid, iinfo->dip,
728 	    iinfo->weight);
729 
730 	mutex_exit(&cldcp->lock);
731 	return (0);
732 
733 hv_error:
734 	(void) rem_ivintr(iinfo->icookie, pil);
735 	mutex_exit(&cldcp->lock);
736 	return (ENXIO);
737 }
738 
739 
740 /*
741  * Exported interface to unregister a LDC endpoint with
742  * the channel nexus
743  */
744 static int
745 cnex_unreg_chan(dev_info_t *dip, uint64_t id)
746 {
747 	cnex_ldc_t	*cldcp, *prev_cldcp;
748 	cnex_soft_state_t *cnex_ssp;
749 	int		instance;
750 
751 	/* Get device instance and structure */
752 	instance = ddi_get_instance(dip);
753 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
754 
755 	/* find and remove channel from list */
756 	mutex_enter(&cnex_ssp->clist_lock);
757 	prev_cldcp = NULL;
758 	cldcp = cnex_ssp->clist;
759 	while (cldcp) {
760 		if (cldcp->id == id)
761 			break;
762 		prev_cldcp = cldcp;
763 		cldcp = cldcp->next;
764 	}
765 
766 	if (cldcp == 0) {
767 		DWARN("cnex_unreg_chan: invalid channel %d\n", id);
768 		mutex_exit(&cnex_ssp->clist_lock);
769 		return (EINVAL);
770 	}
771 
772 	if (cldcp->tx.hdlr || cldcp->rx.hdlr) {
773 		DWARN("cnex_unreg_chan: handlers still exist: chan %lx\n", id);
774 		mutex_exit(&cnex_ssp->clist_lock);
775 		return (ENXIO);
776 	}
777 
778 	if (prev_cldcp)
779 		prev_cldcp->next = cldcp->next;
780 	else
781 		cnex_ssp->clist = cldcp->next;
782 
783 	mutex_exit(&cnex_ssp->clist_lock);
784 
785 	/* destroy mutex */
786 	mutex_destroy(&cldcp->lock);
787 
788 	/* free channel */
789 	kmem_free(cldcp, sizeof (*cldcp));
790 
791 	return (0);
792 }
793 
794 /*
795  * Remove Tx/Rx interrupt handler for the channel
796  */
797 static int
798 cnex_rem_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype)
799 {
800 	int			rv, idx, pil;
801 	cnex_ldc_t		*cldcp;
802 	cnex_intr_t		*iinfo;
803 	cnex_soft_state_t	*cnex_ssp;
804 	int			instance, istate;
805 
806 	/* Get device instance and structure */
807 	instance = ddi_get_instance(dip);
808 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
809 
810 	/* get channel info */
811 	mutex_enter(&cnex_ssp->clist_lock);
812 	cldcp = cnex_ssp->clist;
813 	while (cldcp) {
814 		if (cldcp->id == id)
815 			break;
816 		cldcp = cldcp->next;
817 	}
818 	if (cldcp == NULL) {
819 		DWARN("cnex_rem_intr: channel 0x%llx does not exist\n", id);
820 		mutex_exit(&cnex_ssp->clist_lock);
821 		return (EINVAL);
822 	}
823 	mutex_exit(&cnex_ssp->clist_lock);
824 
825 	/* get rid of the channel intr handler */
826 	mutex_enter(&cldcp->lock);
827 
828 	/* get interrupt type */
829 	if (itype == CNEX_TX_INTR) {
830 		iinfo = &(cldcp->tx);
831 	} else if (itype == CNEX_RX_INTR) {
832 		iinfo = &(cldcp->rx);
833 	} else {
834 		DWARN("cnex_rem_intr: invalid interrupt type\n");
835 		mutex_exit(&cldcp->lock);
836 		return (EINVAL);
837 	}
838 
839 	D1("cnex_rem_intr: interrupt ino=0x%x\n", iinfo->ino);
840 
841 	/* check if a handler is already added */
842 	if (iinfo->hdlr == 0) {
843 		DWARN("cnex_rem_intr: interrupt handler does not exist\n");
844 		mutex_exit(&cldcp->lock);
845 		return (EINVAL);
846 	}
847 
848 	D1("cnex_rem_intr: set intr to invalid ino=0x%x\n", iinfo->ino);
849 	rv = hvldc_intr_setvalid(cnex_ssp->cfghdl,
850 	    iinfo->ino, HV_INTR_NOTVALID);
851 	if (rv) {
852 		DWARN("cnex_rem_intr: cannot set valid ino=%x\n", iinfo->ino);
853 		mutex_exit(&cldcp->lock);
854 		return (ENXIO);
855 	}
856 
857 	/*
858 	 * Check if there are pending interrupts. If interrupts are
859 	 * pending return EAGAIN.
860 	 */
861 	rv = hvldc_intr_getstate(cnex_ssp->cfghdl, iinfo->ino, &istate);
862 	if (rv) {
863 		DWARN("cnex_rem_intr: ino=0x%llx, cannot get state\n",
864 		    iinfo->ino);
865 		mutex_exit(&cldcp->lock);
866 		return (ENXIO);
867 	}
868 
869 	/* if interrupts are still pending print warning */
870 	if (istate != HV_INTR_IDLE_STATE) {
871 		DWARN("cnex_rem_intr: cannot remove intr busy ino=%x\n",
872 		    iinfo->ino);
873 		mutex_exit(&cldcp->lock);
874 		return (EAGAIN);
875 	}
876 
877 	/* Pick a PIL on the basis of the channel's devclass */
878 	for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) {
879 		if (cldcp->devclass == cnex_class_to_intr[idx].devclass) {
880 			pil = cnex_class_to_intr[idx].pil;
881 			break;
882 		}
883 	}
884 
885 	intr_dist_cpuid_rem_device_weight(iinfo->cpuid, iinfo->dip);
886 
887 	/* remove interrupt */
888 	(void) rem_ivintr(iinfo->icookie, pil);
889 
890 	/* clear interrupt info */
891 	bzero(iinfo, sizeof (*iinfo));
892 
893 	mutex_exit(&cldcp->lock);
894 
895 	return (0);
896 }
897 
898 
899 /*
900  * Clear pending Tx/Rx interrupt
901  */
902 static int
903 cnex_clr_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype)
904 {
905 	int			rv;
906 	cnex_ldc_t		*cldcp;
907 	cnex_intr_t		*iinfo;
908 	cnex_soft_state_t	*cnex_ssp;
909 	int			instance;
910 
911 	/* Get device instance and structure */
912 	instance = ddi_get_instance(dip);
913 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
914 
915 	/* get channel info */
916 	mutex_enter(&cnex_ssp->clist_lock);
917 	cldcp = cnex_ssp->clist;
918 	while (cldcp) {
919 		if (cldcp->id == id)
920 			break;
921 		cldcp = cldcp->next;
922 	}
923 	if (cldcp == NULL) {
924 		DWARN("cnex_clr_intr: channel 0x%llx does not exist\n", id);
925 		mutex_exit(&cnex_ssp->clist_lock);
926 		return (EINVAL);
927 	}
928 	mutex_exit(&cnex_ssp->clist_lock);
929 
930 	mutex_enter(&cldcp->lock);
931 
932 	/* get interrupt type */
933 	if (itype == CNEX_TX_INTR) {
934 		iinfo = &(cldcp->tx);
935 	} else if (itype == CNEX_RX_INTR) {
936 		iinfo = &(cldcp->rx);
937 	} else {
938 		DWARN("cnex_clr_intr: invalid interrupt type\n");
939 		mutex_exit(&cldcp->lock);
940 		return (EINVAL);
941 	}
942 
943 	D1("%s: interrupt ino=0x%x\n", __func__, iinfo->ino);
944 
945 	/* check if a handler is already added */
946 	if (iinfo->hdlr == 0) {
947 		DWARN("cnex_clr_intr: interrupt handler does not exist\n");
948 		mutex_exit(&cldcp->lock);
949 		return (EINVAL);
950 	}
951 
952 	rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino,
953 	    HV_INTR_IDLE_STATE);
954 	if (rv) {
955 		DWARN("cnex_clr_intr: cannot clear interrupt state\n");
956 		mutex_exit(&cldcp->lock);
957 		return (ENXIO);
958 	}
959 
960 	mutex_exit(&cldcp->lock);
961 
962 	return (0);
963 }
964 
965 /*
966  * Channel nexus interrupt handler wrapper
967  */
968 static uint_t
969 cnex_intr_wrapper(caddr_t arg)
970 {
971 	int 			res;
972 	uint_t 			(*handler)();
973 	caddr_t 		handler_arg1;
974 	caddr_t 		handler_arg2;
975 	cnex_intr_t 		*iinfo = (cnex_intr_t *)arg;
976 
977 	ASSERT(iinfo != NULL);
978 
979 	handler = iinfo->hdlr;
980 	handler_arg1 = iinfo->arg1;
981 	handler_arg2 = iinfo->arg2;
982 
983 	/*
984 	 * The 'interrupt__start' and 'interrupt__complete' probes
985 	 * are provided to support 'intrstat' command. These probes
986 	 * help monitor the interrupts on a per device basis only.
987 	 * In order to provide the ability to monitor the
988 	 * activity on a per channel basis, two additional
989 	 * probes('channelintr__start','channelintr__complete')
990 	 * are provided here.
991 	 */
992 	DTRACE_PROBE4(channelintr__start, uint64_t, iinfo->id,
993 	    cnex_intr_t *, iinfo, void *, handler, caddr_t, handler_arg1);
994 
995 	DTRACE_PROBE4(interrupt__start, dev_info_t, iinfo->dip,
996 	    void *, handler, caddr_t, handler_arg1, caddr_t, handler_arg2);
997 
998 	D1("cnex_intr_wrapper:ino=0x%llx invoke client handler\n", iinfo->ino);
999 	res = (*handler)(handler_arg1, handler_arg2);
1000 
1001 	DTRACE_PROBE4(interrupt__complete, dev_info_t, iinfo->dip,
1002 	    void *, handler, caddr_t, handler_arg1, int, res);
1003 
1004 	DTRACE_PROBE4(channelintr__complete, uint64_t, iinfo->id,
1005 	    cnex_intr_t *, iinfo, void *, handler, caddr_t, handler_arg1);
1006 
1007 	return (res);
1008 }
1009 
1010 /*ARGSUSED*/
1011 static int
1012 cnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1013 {
1014 	int 		rv, instance, reglen;
1015 	cnex_regspec_t	*reg_p;
1016 	ldc_cnex_t	cinfo;
1017 	cnex_soft_state_t *cnex_ssp;
1018 
1019 	switch (cmd) {
1020 	case DDI_ATTACH:
1021 		break;
1022 	case DDI_RESUME:
1023 		return (DDI_SUCCESS);
1024 	default:
1025 		return (DDI_FAILURE);
1026 	}
1027 
1028 	/*
1029 	 * Get the instance specific soft state structure.
1030 	 * Save the devi for this instance in the soft_state data.
1031 	 */
1032 	instance = ddi_get_instance(devi);
1033 	if (ddi_soft_state_zalloc(cnex_state, instance) != DDI_SUCCESS)
1034 		return (DDI_FAILURE);
1035 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
1036 
1037 	cnex_ssp->devi = devi;
1038 	cnex_ssp->clist = NULL;
1039 
1040 	if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
1041 	    "reg", (caddr_t)&reg_p, &reglen) != DDI_SUCCESS) {
1042 		return (DDI_FAILURE);
1043 	}
1044 
1045 	/* get the sun4v config handle for this device */
1046 	cnex_ssp->cfghdl = SUN4V_REG_SPEC2CFG_HDL(reg_p->physaddr);
1047 	kmem_free(reg_p, reglen);
1048 
1049 	D1("cnex_attach: cfghdl=0x%llx\n", cnex_ssp->cfghdl);
1050 
1051 	/* init channel list mutex */
1052 	mutex_init(&cnex_ssp->clist_lock, NULL, MUTEX_DRIVER, NULL);
1053 
1054 	/* Register with LDC module */
1055 	cinfo.dip = devi;
1056 	cinfo.reg_chan = cnex_reg_chan;
1057 	cinfo.unreg_chan = cnex_unreg_chan;
1058 	cinfo.add_intr = cnex_add_intr;
1059 	cinfo.rem_intr = cnex_rem_intr;
1060 	cinfo.clr_intr = cnex_clr_intr;
1061 
1062 	/*
1063 	 * LDC register will fail if an nexus instance had already
1064 	 * registered with the LDC framework
1065 	 */
1066 	rv = ldc_register(&cinfo);
1067 	if (rv) {
1068 		DWARN("cnex_attach: unable to register with LDC\n");
1069 		ddi_soft_state_free(cnex_state, instance);
1070 		mutex_destroy(&cnex_ssp->clist_lock);
1071 		return (DDI_FAILURE);
1072 	}
1073 
1074 	if (ddi_create_minor_node(devi, "devctl", S_IFCHR, instance,
1075 	    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1076 		ddi_remove_minor_node(devi, NULL);
1077 		ddi_soft_state_free(cnex_state, instance);
1078 		mutex_destroy(&cnex_ssp->clist_lock);
1079 		return (DDI_FAILURE);
1080 	}
1081 
1082 	/* Add interrupt redistribution callback. */
1083 	intr_dist_add_weighted(cnex_intr_redist, cnex_ssp);
1084 
1085 	ddi_report_dev(devi);
1086 	return (DDI_SUCCESS);
1087 }
1088 
1089 /*ARGSUSED*/
1090 static int
1091 cnex_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1092 {
1093 	int 		instance;
1094 	ldc_cnex_t	cinfo;
1095 	cnex_soft_state_t *cnex_ssp;
1096 
1097 	switch (cmd) {
1098 	case DDI_DETACH:
1099 		break;
1100 	case DDI_SUSPEND:
1101 		return (DDI_SUCCESS);
1102 	default:
1103 		return (DDI_FAILURE);
1104 	}
1105 
1106 	instance = ddi_get_instance(devi);
1107 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
1108 
1109 	/* check if there are any channels still registered */
1110 	if (cnex_ssp->clist) {
1111 		cmn_err(CE_WARN, "?cnex_dettach: channels registered %d\n",
1112 		    ddi_get_instance(devi));
1113 		return (DDI_FAILURE);
1114 	}
1115 
1116 	/* Unregister with LDC module */
1117 	cinfo.dip = devi;
1118 	(void) ldc_unregister(&cinfo);
1119 
1120 	/* Remove interrupt redistribution callback. */
1121 	intr_dist_rem_weighted(cnex_intr_redist, cnex_ssp);
1122 
1123 	/* destroy mutex */
1124 	mutex_destroy(&cnex_ssp->clist_lock);
1125 
1126 	/* free soft state structure */
1127 	ddi_soft_state_free(cnex_state, instance);
1128 
1129 	return (DDI_SUCCESS);
1130 }
1131 
1132 /*ARGSUSED*/
1133 static int
1134 cnex_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1135 {
1136 	int instance;
1137 
1138 	if (otyp != OTYP_CHR)
1139 		return (EINVAL);
1140 
1141 	instance = getminor(*devp);
1142 	if (ddi_get_soft_state(cnex_state, instance) == NULL)
1143 		return (ENXIO);
1144 
1145 	return (0);
1146 }
1147 
1148 /*ARGSUSED*/
1149 static int
1150 cnex_close(dev_t dev, int flags, int otyp, cred_t *credp)
1151 {
1152 	int instance;
1153 
1154 	if (otyp != OTYP_CHR)
1155 		return (EINVAL);
1156 
1157 	instance = getminor(dev);
1158 	if (ddi_get_soft_state(cnex_state, instance) == NULL)
1159 		return (ENXIO);
1160 
1161 	return (0);
1162 }
1163 
1164 /*ARGSUSED*/
1165 static int
1166 cnex_ioctl(dev_t dev,
1167     int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
1168 {
1169 	int instance;
1170 	cnex_soft_state_t *cnex_ssp;
1171 
1172 	instance = getminor(dev);
1173 	if ((cnex_ssp = ddi_get_soft_state(cnex_state, instance)) == NULL)
1174 		return (ENXIO);
1175 	ASSERT(cnex_ssp->devi);
1176 	return (ndi_devctl_ioctl(cnex_ssp->devi, cmd, arg, mode, 0));
1177 }
1178 
1179 static int
1180 cnex_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
1181     void *arg, void *result)
1182 {
1183 	char		name[MAXNAMELEN];
1184 	uint32_t	reglen;
1185 	int		*cnex_regspec;
1186 
1187 	switch (ctlop) {
1188 	case DDI_CTLOPS_REPORTDEV:
1189 		if (rdip == NULL)
1190 			return (DDI_FAILURE);
1191 		cmn_err(CE_CONT, "?channel-device: %s%d\n",
1192 		    ddi_driver_name(rdip), ddi_get_instance(rdip));
1193 		return (DDI_SUCCESS);
1194 
1195 	case DDI_CTLOPS_INITCHILD:
1196 	{
1197 		dev_info_t *child = (dev_info_t *)arg;
1198 
1199 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child,
1200 		    DDI_PROP_DONTPASS, "reg",
1201 		    &cnex_regspec, &reglen) != DDI_SUCCESS) {
1202 			return (DDI_FAILURE);
1203 		}
1204 
1205 		(void) snprintf(name, sizeof (name), "%x", *cnex_regspec);
1206 		ddi_set_name_addr(child, name);
1207 		ddi_set_parent_data(child, NULL);
1208 		ddi_prop_free(cnex_regspec);
1209 		return (DDI_SUCCESS);
1210 	}
1211 
1212 	case DDI_CTLOPS_UNINITCHILD:
1213 	{
1214 		dev_info_t *child = (dev_info_t *)arg;
1215 
1216 		NDI_CONFIG_DEBUG((CE_NOTE,
1217 		    "DDI_CTLOPS_UNINITCHILD(%s, instance=%d)",
1218 		    ddi_driver_name(child), DEVI(child)->devi_instance));
1219 
1220 		ddi_set_name_addr(child, NULL);
1221 
1222 		return (DDI_SUCCESS);
1223 	}
1224 
1225 	case DDI_CTLOPS_DMAPMAPC:
1226 	case DDI_CTLOPS_REPORTINT:
1227 	case DDI_CTLOPS_REGSIZE:
1228 	case DDI_CTLOPS_NREGS:
1229 	case DDI_CTLOPS_SIDDEV:
1230 	case DDI_CTLOPS_SLAVEONLY:
1231 	case DDI_CTLOPS_AFFINITY:
1232 	case DDI_CTLOPS_POKE:
1233 	case DDI_CTLOPS_PEEK:
1234 		/*
1235 		 * These ops correspond to functions that "shouldn't" be called
1236 		 * by a channel-device driver.  So we whine when we're called.
1237 		 */
1238 		cmn_err(CE_WARN, "%s%d: invalid op (%d) from %s%d\n",
1239 		    ddi_driver_name(dip), ddi_get_instance(dip), ctlop,
1240 		    ddi_driver_name(rdip), ddi_get_instance(rdip));
1241 		return (DDI_FAILURE);
1242 
1243 	case DDI_CTLOPS_ATTACH:
1244 	case DDI_CTLOPS_BTOP:
1245 	case DDI_CTLOPS_BTOPR:
1246 	case DDI_CTLOPS_DETACH:
1247 	case DDI_CTLOPS_DVMAPAGESIZE:
1248 	case DDI_CTLOPS_IOMIN:
1249 	case DDI_CTLOPS_POWER:
1250 	case DDI_CTLOPS_PTOB:
1251 	default:
1252 		/*
1253 		 * Everything else (e.g. PTOB/BTOP/BTOPR requests) we pass up
1254 		 */
1255 		return (ddi_ctlops(dip, rdip, ctlop, arg, result));
1256 	}
1257 }
1258 
1259 /*
1260  * cnex_find_chan_dip -- Find the dip of a device that is corresponding
1261  * 	to the specific channel. Below are the details on how the dip
1262  *	is derived.
1263  *
1264  *	- In the MD, the cfg-handle is expected to be unique for
1265  *	  virtual-device nodes that have the same 'name' property value.
1266  *	  This value is expected to be the same as that of "reg" property
1267  *	  of the corresponding OBP device node.
1268  *
1269  *	- The value of the 'name' property of a virtual-device node
1270  *	  in the MD is expected to be the same for the corresponding
1271  *	  OBP device node.
1272  *
1273  *	- Find the virtual-device node corresponding to a channel-endpoint
1274  *	  by walking backwards. Then obtain the values for the 'name' and
1275  *	  'cfg-handle' properties.
1276  *
1277  *	- Walk all the children of the cnex, find a matching dip which
1278  *	  has the same 'name' and 'reg' property values.
1279  *
1280  *	- The channels that have no corresponding device driver are
1281  *	  treated as if they  correspond to the cnex driver,
1282  *	  that is, return cnex dip for them. This means, the
1283  *	  cnex acts as an umbrella device driver. Note, this is
1284  *	  for 'intrstat' statistics purposes only. As a result of this,
1285  *	  the 'intrstat' shows cnex as the device that is servicing the
1286  *	  interrupts corresponding to these channels.
1287  *
1288  *	  For now, only one such case is known, that is, the channels that
1289  *	  are used by the "domain-services".
1290  */
1291 static dev_info_t *
1292 cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id,
1293     md_t *mdp, mde_cookie_t mde)
1294 {
1295 	int listsz;
1296 	int num_nodes;
1297 	int num_devs;
1298 	uint64_t cfghdl;
1299 	char *md_name;
1300 	mde_cookie_t *listp;
1301 	dev_info_t *cdip = NULL;
1302 
1303 	num_nodes = md_node_count(mdp);
1304 	ASSERT(num_nodes > 0);
1305 	listsz = num_nodes * sizeof (mde_cookie_t);
1306 	listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP);
1307 
1308 	num_devs = md_scan_dag(mdp, mde, md_find_name(mdp, "virtual-device"),
1309 	    md_find_name(mdp, "back"), listp);
1310 	ASSERT(num_devs <= 1);
1311 	if (num_devs <= 0) {
1312 		DWARN("cnex_find_chan_dip:channel(0x%llx): "
1313 		    "No virtual-device found\n", chan_id);
1314 		goto fdip_exit;
1315 	}
1316 	if (md_get_prop_str(mdp, listp[0], "name", &md_name) != 0) {
1317 		DWARN("cnex_find_chan_dip:channel(0x%llx): "
1318 		    "name property not found\n", chan_id);
1319 		goto fdip_exit;
1320 	}
1321 
1322 	D1("cnex_find_chan_dip: channel(0x%llx): virtual-device "
1323 	    "name property value = %s\n", chan_id, md_name);
1324 
1325 	if (md_get_prop_val(mdp, listp[0], "cfg-handle", &cfghdl) != 0) {
1326 		DWARN("cnex_find_chan_dip:channel(0x%llx): virtual-device's "
1327 		    "cfg-handle property not found\n", chan_id);
1328 		goto fdip_exit;
1329 	}
1330 
1331 	D1("cnex_find_chan_dip:channel(0x%llx): virtual-device cfg-handle "
1332 	    " property value = 0x%x\n", chan_id, cfghdl);
1333 
1334 	for (cdip = ddi_get_child(dip); cdip != NULL;
1335 	    cdip = ddi_get_next_sibling(cdip)) {
1336 
1337 		int *cnex_regspec;
1338 		uint32_t reglen;
1339 		char	*dev_name;
1340 
1341 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip,
1342 		    DDI_PROP_DONTPASS, "name",
1343 		    &dev_name) != DDI_PROP_SUCCESS) {
1344 			DWARN("cnex_find_chan_dip: name property not"
1345 			    " found for dip(0x%p)\n", cdip);
1346 			continue;
1347 		}
1348 		if (strcmp(md_name, dev_name) != 0) {
1349 			ddi_prop_free(dev_name);
1350 			continue;
1351 		}
1352 		ddi_prop_free(dev_name);
1353 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
1354 		    DDI_PROP_DONTPASS, "reg",
1355 		    &cnex_regspec, &reglen) != DDI_SUCCESS) {
1356 			DWARN("cnex_find_chan_dip: reg property not"
1357 			    " found for dip(0x%p)\n", cdip);
1358 			continue;
1359 		}
1360 		if (*cnex_regspec == cfghdl) {
1361 			D1("cnex_find_chan_dip:channel(0x%llx): found "
1362 			    "dip(0x%p) drvname=%s\n", chan_id, cdip,
1363 			    ddi_driver_name(cdip));
1364 			ddi_prop_free(cnex_regspec);
1365 			break;
1366 		}
1367 		ddi_prop_free(cnex_regspec);
1368 	}
1369 
1370 fdip_exit:
1371 	if (cdip == NULL) {
1372 		/*
1373 		 * If a virtual-device node exists but no dip found,
1374 		 * then for now print a DEBUG error message only.
1375 		 */
1376 		if (num_devs > 0) {
1377 			DERR("cnex_find_chan_dip:channel(0x%llx): "
1378 			    "No device found\n", chan_id);
1379 		}
1380 
1381 		/* If no dip was found, return cnex device's dip. */
1382 		cdip = dip;
1383 	}
1384 
1385 	kmem_free(listp, listsz);
1386 	D1("cnex_find_chan_dip:channel(0x%llx): returning dip=0x%p\n",
1387 	    chan_id, cdip);
1388 	return (cdip);
1389 }
1390 
1391 /* -------------------------------------------------------------------------- */
1392