xref: /titanic_44/usr/src/uts/sun4v/io/cnex.c (revision 4ebb14b236958cfe1ef4ff3b7a50216d9e51f997)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Logical domain channel devices are devices implemented entirely
30  * in software; cnex is the nexus for channel-devices. They use
31  * the HV channel interfaces via the LDC transport module to send
32  * and receive data and to register callbacks.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/cmn_err.h>
37 #include <sys/conf.h>
38 #include <sys/ddi.h>
39 #include <sys/ddi_impldefs.h>
40 #include <sys/devops.h>
41 #include <sys/instance.h>
42 #include <sys/modctl.h>
43 #include <sys/open.h>
44 #include <sys/stat.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/systm.h>
48 #include <sys/mkdev.h>
49 #include <sys/machsystm.h>
50 #include <sys/intr.h>
51 #include <sys/ddi_intr_impl.h>
52 #include <sys/ivintr.h>
53 #include <sys/hypervisor_api.h>
54 #include <sys/ldc.h>
55 #include <sys/cnex.h>
56 #include <sys/mach_descrip.h>
57 
58 /*
59  * Internal functions/information
60  */
61 static struct cnex_pil_map cnex_class_to_pil[] = {
62 	{LDC_DEV_GENERIC,	PIL_3},
63 	{LDC_DEV_BLK,		PIL_4},
64 	{LDC_DEV_BLK_SVC,	PIL_3},
65 	{LDC_DEV_NT,		PIL_6},
66 	{LDC_DEV_NT_SVC,	PIL_4},
67 	{LDC_DEV_SERIAL,	PIL_6}
68 };
69 #define	CNEX_MAX_DEVS (sizeof (cnex_class_to_pil) / \
70 				sizeof (cnex_class_to_pil[0]))
71 
72 #define	SUN4V_REG_SPEC2CFG_HDL(x)	((x >> 32) & ~(0xfull << 28))
73 
74 static clock_t cnex_wait_usecs = 1000; /* wait time in usecs */
75 static int cnex_wait_retries = 3;
76 static void *cnex_state;
77 
78 static void cnex_intr_redist(void *arg);
79 static uint_t cnex_intr_wrapper(caddr_t arg);
80 
81 /*
82  * Debug info
83  */
84 #ifdef DEBUG
85 
86 /*
87  * Print debug messages
88  *
89  * set cnexdbg to 0xf for enabling all msgs
90  * 0x8 - Errors
91  * 0x4 - Warnings
92  * 0x2 - All debug messages
93  * 0x1 - Minimal debug messages
94  */
95 
96 int cnexdbg = 0x8;
97 
98 static void
99 cnexdebug(const char *fmt, ...)
100 {
101 	char buf[512];
102 	va_list ap;
103 
104 	va_start(ap, fmt);
105 	(void) vsprintf(buf, fmt, ap);
106 	va_end(ap);
107 
108 	cmn_err(CE_CONT, "%s\n", buf);
109 }
110 
111 #define	D1		\
112 if (cnexdbg & 0x01)	\
113 	cnexdebug
114 
115 #define	D2		\
116 if (cnexdbg & 0x02)	\
117 	cnexdebug
118 
119 #define	DWARN		\
120 if (cnexdbg & 0x04)	\
121 	cnexdebug
122 
123 #define	DERR		\
124 if (cnexdbg & 0x08)	\
125 	cnexdebug
126 
127 #else
128 
129 #define	D1
130 #define	D2
131 #define	DWARN
132 #define	DERR
133 
134 #endif
135 
136 /*
137  * Config information
138  */
139 static int cnex_attach(dev_info_t *, ddi_attach_cmd_t);
140 static int cnex_detach(dev_info_t *, ddi_detach_cmd_t);
141 static int cnex_open(dev_t *, int, int, cred_t *);
142 static int cnex_close(dev_t, int, int, cred_t *);
143 static int cnex_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
144 static int cnex_ctl(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *,
145     void *);
146 
147 static struct bus_ops cnex_bus_ops = {
148 	BUSO_REV,
149 	nullbusmap,		/* bus_map */
150 	NULL,			/* bus_get_intrspec */
151 	NULL,			/* bus_add_intrspec */
152 	NULL,			/* bus_remove_intrspec */
153 	i_ddi_map_fault,	/* bus_map_fault */
154 	ddi_no_dma_map,		/* bus_dma_map */
155 	ddi_no_dma_allochdl,	/* bus_dma_allochdl */
156 	NULL,			/* bus_dma_freehdl */
157 	NULL,			/* bus_dma_bindhdl */
158 	NULL,			/* bus_dma_unbindhdl */
159 	NULL,			/* bus_dma_flush */
160 	NULL,			/* bus_dma_win */
161 	NULL,			/* bus_dma_ctl */
162 	cnex_ctl,		/* bus_ctl */
163 	ddi_bus_prop_op,	/* bus_prop_op */
164 	0,			/* bus_get_eventcookie */
165 	0,			/* bus_add_eventcall */
166 	0,			/* bus_remove_eventcall	*/
167 	0,			/* bus_post_event */
168 	NULL,			/* bus_intr_ctl */
169 	NULL,			/* bus_config */
170 	NULL,			/* bus_unconfig */
171 	NULL,			/* bus_fm_init */
172 	NULL,			/* bus_fm_fini */
173 	NULL,			/* bus_fm_access_enter */
174 	NULL,			/* bus_fm_access_exit */
175 	NULL,			/* bus_power */
176 	NULL			/* bus_intr_op */
177 };
178 
179 static struct cb_ops cnex_cb_ops = {
180 	cnex_open,			/* open */
181 	cnex_close,			/* close */
182 	nodev,				/* strategy */
183 	nodev,				/* print */
184 	nodev,				/* dump */
185 	nodev,				/* read */
186 	nodev,				/* write */
187 	cnex_ioctl,			/* ioctl */
188 	nodev,				/* devmap */
189 	nodev,				/* mmap */
190 	nodev,				/* segmap */
191 	nochpoll,			/* poll */
192 	ddi_prop_op,			/* cb_prop_op */
193 	0,				/* streamtab  */
194 	D_MP | D_NEW | D_HOTPLUG	/* Driver compatibility flag */
195 };
196 
197 static struct dev_ops cnex_ops = {
198 	DEVO_REV,		/* devo_rev, */
199 	0,			/* refcnt  */
200 	ddi_getinfo_1to1,	/* info */
201 	nulldev,		/* identify */
202 	nulldev,		/* probe */
203 	cnex_attach,		/* attach */
204 	cnex_detach,		/* detach */
205 	nodev,			/* reset */
206 	&cnex_cb_ops,		/* driver operations */
207 	&cnex_bus_ops,		/* bus operations */
208 	nulldev			/* power */
209 };
210 
211 /*
212  * Module linkage information for the kernel.
213  */
214 static struct modldrv modldrv = {
215 	&mod_driverops,
216 	"sun4v channel-devices nexus %I%",
217 	&cnex_ops,
218 };
219 
220 static struct modlinkage modlinkage = {
221 	MODREV_1, (void *)&modldrv, NULL
222 };
223 
224 int
225 _init(void)
226 {
227 	int err;
228 
229 	if ((err = ddi_soft_state_init(&cnex_state,
230 		sizeof (cnex_soft_state_t), 0)) != 0) {
231 		return (err);
232 	}
233 	if ((err = mod_install(&modlinkage)) != 0) {
234 		ddi_soft_state_fini(&cnex_state);
235 		return (err);
236 	}
237 	return (0);
238 }
239 
240 int
241 _fini(void)
242 {
243 	int err;
244 
245 	if ((err = mod_remove(&modlinkage)) != 0)
246 		return (err);
247 	ddi_soft_state_fini(&cnex_state);
248 	return (0);
249 }
250 
251 int
252 _info(struct modinfo *modinfop)
253 {
254 	return (mod_info(&modlinkage, modinfop));
255 }
256 
257 /*
258  * Callback function invoked by the interrupt redistribution
259  * framework. This will redirect interrupts at CPUs that are
260  * currently available in the system.
261  */
262 static void
263 cnex_intr_redist(void *arg)
264 {
265 	cnex_ldc_t		*cldcp;
266 	cnex_soft_state_t	*cnex_ssp = arg;
267 	int			intr_state;
268 	uint64_t		cpuid;
269 	int 			rv, retries = 0;
270 
271 	ASSERT(cnex_ssp != NULL);
272 	mutex_enter(&cnex_ssp->clist_lock);
273 
274 	cldcp = cnex_ssp->clist;
275 	while (cldcp != NULL) {
276 
277 		mutex_enter(&cldcp->lock);
278 
279 		if (cldcp->tx.hdlr) {
280 			/*
281 			 * Don't do anything for disabled interrupts.
282 			 */
283 			rv = hvldc_intr_getvalid(cnex_ssp->cfghdl,
284 			    cldcp->tx.ino, &intr_state);
285 			if (rv) {
286 				DWARN("cnex_intr_redist: tx ino=0x%llx, "
287 				    "can't get valid\n", cldcp->tx.ino);
288 				mutex_exit(&cldcp->lock);
289 				mutex_exit(&cnex_ssp->clist_lock);
290 				return;
291 			}
292 			if (intr_state == HV_INTR_NOTVALID) {
293 				mutex_exit(&cldcp->lock);
294 				cldcp = cldcp->next;
295 				continue;
296 			}
297 
298 			cpuid = intr_dist_cpuid();
299 
300 			/* disable interrupts */
301 			rv = hvldc_intr_setvalid(cnex_ssp->cfghdl,
302 			    cldcp->tx.ino, HV_INTR_NOTVALID);
303 			if (rv) {
304 				DWARN("cnex_intr_redist: tx ino=0x%llx, "
305 				    "can't set valid\n", cldcp->tx.ino);
306 				mutex_exit(&cldcp->lock);
307 				mutex_exit(&cnex_ssp->clist_lock);
308 				return;
309 			}
310 
311 			/*
312 			 * Make a best effort to wait for pending interrupts
313 			 * to finish. There is not much we can do if we timeout.
314 			 */
315 			retries = 0;
316 
317 			do {
318 				rv = hvldc_intr_getstate(cnex_ssp->cfghdl,
319 				    cldcp->tx.ino, &intr_state);
320 				if (rv) {
321 					DWARN("cnex_intr_redist: tx ino=0x%llx,"
322 					    "can't get state\n", cldcp->tx.ino);
323 					mutex_exit(&cldcp->lock);
324 					mutex_exit(&cnex_ssp->clist_lock);
325 					return;
326 				}
327 
328 				if (intr_state != HV_INTR_DELIVERED_STATE)
329 					break;
330 
331 				drv_usecwait(cnex_wait_usecs);
332 
333 			} while (!panicstr && ++retries <= cnex_wait_retries);
334 
335 			(void) hvldc_intr_settarget(cnex_ssp->cfghdl,
336 			    cldcp->tx.ino, cpuid);
337 			(void) hvldc_intr_setvalid(cnex_ssp->cfghdl,
338 			    cldcp->tx.ino, HV_INTR_VALID);
339 		}
340 
341 		if (cldcp->rx.hdlr) {
342 			/*
343 			 * Don't do anything for disabled interrupts.
344 			 */
345 			rv = hvldc_intr_getvalid(cnex_ssp->cfghdl,
346 			    cldcp->rx.ino, &intr_state);
347 			if (rv) {
348 				DWARN("cnex_intr_redist: rx ino=0x%llx, "
349 				    "can't get valid\n", cldcp->rx.ino);
350 				mutex_exit(&cldcp->lock);
351 				mutex_exit(&cnex_ssp->clist_lock);
352 				return;
353 			}
354 			if (intr_state == HV_INTR_NOTVALID) {
355 				mutex_exit(&cldcp->lock);
356 				cldcp = cldcp->next;
357 				continue;
358 			}
359 
360 			cpuid = intr_dist_cpuid();
361 
362 			/* disable interrupts */
363 			rv = hvldc_intr_setvalid(cnex_ssp->cfghdl,
364 			    cldcp->rx.ino, HV_INTR_NOTVALID);
365 			if (rv) {
366 				DWARN("cnex_intr_redist: rx ino=0x%llx, "
367 				    "can't set valid\n", cldcp->rx.ino);
368 				mutex_exit(&cldcp->lock);
369 				mutex_exit(&cnex_ssp->clist_lock);
370 				return;
371 			}
372 
373 			/*
374 			 * Make a best effort to wait for pending interrupts
375 			 * to finish. There is not much we can do if we timeout.
376 			 */
377 			retries = 0;
378 
379 			do {
380 				rv = hvldc_intr_getstate(cnex_ssp->cfghdl,
381 				    cldcp->rx.ino, &intr_state);
382 				if (rv) {
383 					DWARN("cnex_intr_redist: rx ino=0x%llx,"
384 					    "can't get state\n", cldcp->rx.ino);
385 					mutex_exit(&cldcp->lock);
386 					mutex_exit(&cnex_ssp->clist_lock);
387 					return;
388 				}
389 
390 				if (intr_state != HV_INTR_DELIVERED_STATE)
391 					break;
392 
393 				drv_usecwait(cnex_wait_usecs);
394 
395 			} while (!panicstr && ++retries <= cnex_wait_retries);
396 
397 			(void) hvldc_intr_settarget(cnex_ssp->cfghdl,
398 			    cldcp->rx.ino, cpuid);
399 			(void) hvldc_intr_setvalid(cnex_ssp->cfghdl,
400 			    cldcp->rx.ino, HV_INTR_VALID);
401 		}
402 
403 		mutex_exit(&cldcp->lock);
404 
405 		/* next channel */
406 		cldcp = cldcp->next;
407 	}
408 
409 	mutex_exit(&cnex_ssp->clist_lock);
410 }
411 
412 /*
413  * Exported interface to register a LDC endpoint with
414  * the channel nexus
415  */
416 static int
417 cnex_reg_chan(dev_info_t *dip, uint64_t id, ldc_dev_t devclass)
418 {
419 	int		idx;
420 	cnex_ldc_t	*cldcp;
421 	int		listsz, num_nodes, num_channels;
422 	md_t		*mdp = NULL;
423 	mde_cookie_t	rootnode, *listp = NULL;
424 	uint64_t	tmp_id;
425 	uint64_t	rxino = (uint64_t)-1;
426 	uint64_t	txino = (uint64_t)-1;
427 	cnex_soft_state_t *cnex_ssp;
428 	int		status, instance;
429 
430 	/* Get device instance and structure */
431 	instance = ddi_get_instance(dip);
432 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
433 
434 	/* Check to see if channel is already registered */
435 	mutex_enter(&cnex_ssp->clist_lock);
436 	cldcp = cnex_ssp->clist;
437 	while (cldcp) {
438 		if (cldcp->id == id) {
439 			DWARN("cnex_reg_chan: channel 0x%llx exists\n", id);
440 			mutex_exit(&cnex_ssp->clist_lock);
441 			return (EINVAL);
442 		}
443 		cldcp = cldcp->next;
444 	}
445 
446 	/* Get the Tx/Rx inos from the MD */
447 	if ((mdp = md_get_handle()) == NULL) {
448 		DWARN("cnex_reg_chan: cannot init MD\n");
449 		mutex_exit(&cnex_ssp->clist_lock);
450 		return (ENXIO);
451 	}
452 	num_nodes = md_node_count(mdp);
453 	ASSERT(num_nodes > 0);
454 
455 	listsz = num_nodes * sizeof (mde_cookie_t);
456 	listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP);
457 
458 	rootnode = md_root_node(mdp);
459 
460 	/* search for all channel_endpoint nodes */
461 	num_channels = md_scan_dag(mdp, rootnode,
462 	    md_find_name(mdp, "channel-endpoint"),
463 	    md_find_name(mdp, "fwd"), listp);
464 	if (num_channels <= 0) {
465 		DWARN("cnex_reg_chan: invalid channel id\n");
466 		kmem_free(listp, listsz);
467 		(void) md_fini_handle(mdp);
468 		mutex_exit(&cnex_ssp->clist_lock);
469 		return (EINVAL);
470 	}
471 
472 	for (idx = 0; idx < num_channels; idx++) {
473 
474 		/* Get the channel ID */
475 		status = md_get_prop_val(mdp, listp[idx], "id", &tmp_id);
476 		if (status) {
477 			DWARN("cnex_reg_chan: cannot read LDC ID\n");
478 			kmem_free(listp, listsz);
479 			(void) md_fini_handle(mdp);
480 			mutex_exit(&cnex_ssp->clist_lock);
481 			return (ENXIO);
482 		}
483 		if (tmp_id != id)
484 			continue;
485 
486 		/* Get the Tx and Rx ino */
487 		status = md_get_prop_val(mdp, listp[idx], "tx-ino", &txino);
488 		if (status) {
489 			DWARN("cnex_reg_chan: cannot read Tx ino\n");
490 			kmem_free(listp, listsz);
491 			(void) md_fini_handle(mdp);
492 			mutex_exit(&cnex_ssp->clist_lock);
493 			return (ENXIO);
494 		}
495 		status = md_get_prop_val(mdp, listp[idx], "rx-ino", &rxino);
496 		if (status) {
497 			DWARN("cnex_reg_chan: cannot read Rx ino\n");
498 			kmem_free(listp, listsz);
499 			(void) md_fini_handle(mdp);
500 			mutex_exit(&cnex_ssp->clist_lock);
501 			return (ENXIO);
502 		}
503 	}
504 	kmem_free(listp, listsz);
505 	(void) md_fini_handle(mdp);
506 
507 	/*
508 	 * check to see if we looped through the list of channel IDs without
509 	 * matching one (i.e. an 'ino' has not been initialised).
510 	 */
511 	if ((rxino == -1) || (txino == -1)) {
512 		DERR("cnex_reg_chan: no ID matching '%llx' in MD\n", id);
513 		mutex_exit(&cnex_ssp->clist_lock);
514 		return (ENOENT);
515 	}
516 
517 	/* Allocate a new channel structure */
518 	cldcp = kmem_zalloc(sizeof (*cldcp), KM_SLEEP);
519 
520 	/* Initialize the channel */
521 	mutex_init(&cldcp->lock, NULL, MUTEX_DRIVER, NULL);
522 
523 	cldcp->id = id;
524 	cldcp->tx.ino = txino;
525 	cldcp->rx.ino = rxino;
526 	cldcp->devclass = devclass;
527 
528 	/* add channel to nexus channel list */
529 	cldcp->next = cnex_ssp->clist;
530 	cnex_ssp->clist = cldcp;
531 
532 	mutex_exit(&cnex_ssp->clist_lock);
533 
534 	return (0);
535 }
536 
537 /*
538  * Add Tx/Rx interrupt handler for the channel
539  */
540 static int
541 cnex_add_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype,
542     uint_t (*hdlr)(), caddr_t arg1, caddr_t arg2)
543 {
544 	int		rv, idx, pil;
545 	cnex_ldc_t	*cldcp;
546 	cnex_intr_t	*iinfo;
547 	uint64_t	cpuid;
548 	cnex_soft_state_t *cnex_ssp;
549 	int		instance;
550 
551 	/* Get device instance and structure */
552 	instance = ddi_get_instance(dip);
553 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
554 
555 	/* get channel info */
556 	mutex_enter(&cnex_ssp->clist_lock);
557 	cldcp = cnex_ssp->clist;
558 	while (cldcp) {
559 		if (cldcp->id == id)
560 			break;
561 		cldcp = cldcp->next;
562 	}
563 	if (cldcp == NULL) {
564 		DWARN("cnex_add_intr: channel 0x%llx does not exist\n", id);
565 		mutex_exit(&cnex_ssp->clist_lock);
566 		return (EINVAL);
567 	}
568 	mutex_exit(&cnex_ssp->clist_lock);
569 
570 	/* get channel lock */
571 	mutex_enter(&cldcp->lock);
572 
573 	/* get interrupt type */
574 	if (itype == CNEX_TX_INTR) {
575 		iinfo = &(cldcp->tx);
576 	} else if (itype == CNEX_RX_INTR) {
577 		iinfo = &(cldcp->rx);
578 	} else {
579 		DWARN("cnex_add_intr: invalid interrupt type\n", id);
580 		mutex_exit(&cldcp->lock);
581 		return (EINVAL);
582 	}
583 
584 	/* check if a handler is already added */
585 	if (iinfo->hdlr != 0) {
586 		DWARN("cnex_add_intr: interrupt handler exists\n");
587 		mutex_exit(&cldcp->lock);
588 		return (EINVAL);
589 	}
590 
591 	/* save interrupt handler info */
592 	iinfo->hdlr = hdlr;
593 	iinfo->arg1 = arg1;
594 	iinfo->arg2 = arg2;
595 
596 	iinfo->ssp = cnex_ssp;
597 
598 	/*
599 	 * FIXME - generate the interrupt cookie
600 	 * using the interrupt registry
601 	 */
602 	iinfo->icookie = cnex_ssp->cfghdl | iinfo->ino;
603 
604 	D1("cnex_add_intr: add hdlr, cfghdl=0x%llx, ino=0x%llx, "
605 	    "cookie=0x%llx\n", cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie);
606 
607 	/* Pick a PIL on the basis of the channel's devclass */
608 	for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) {
609 		if (cldcp->devclass == cnex_class_to_pil[idx].devclass) {
610 			pil = cnex_class_to_pil[idx].pil;
611 			break;
612 		}
613 	}
614 
615 	/* add interrupt to solaris ivec table */
616 	VERIFY(add_ivintr(iinfo->icookie, pil, (intrfunc)cnex_intr_wrapper,
617 	    (caddr_t)iinfo, NULL, NULL) == 0);
618 
619 	/* set the cookie in the HV */
620 	rv = hvldc_intr_setcookie(cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie);
621 
622 	/* pick next CPU in the domain for this channel */
623 	cpuid = intr_dist_cpuid();
624 
625 	/* set the target CPU and then enable interrupts */
626 	rv = hvldc_intr_settarget(cnex_ssp->cfghdl, iinfo->ino, cpuid);
627 	if (rv) {
628 		DWARN("cnex_add_intr: ino=0x%llx, cannot set target cpu\n",
629 		    iinfo->ino);
630 		goto hv_error;
631 	}
632 	rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino,
633 	    HV_INTR_IDLE_STATE);
634 	if (rv) {
635 		DWARN("cnex_add_intr: ino=0x%llx, cannot set state\n",
636 		    iinfo->ino);
637 		goto hv_error;
638 	}
639 	rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, iinfo->ino, HV_INTR_VALID);
640 	if (rv) {
641 		DWARN("cnex_add_intr: ino=0x%llx, cannot set valid\n",
642 		    iinfo->ino);
643 		goto hv_error;
644 	}
645 
646 	mutex_exit(&cldcp->lock);
647 	return (0);
648 
649 hv_error:
650 	(void) rem_ivintr(iinfo->icookie, pil);
651 	mutex_exit(&cldcp->lock);
652 	return (ENXIO);
653 }
654 
655 
656 /*
657  * Exported interface to unregister a LDC endpoint with
658  * the channel nexus
659  */
660 static int
661 cnex_unreg_chan(dev_info_t *dip, uint64_t id)
662 {
663 	cnex_ldc_t	*cldcp, *prev_cldcp;
664 	cnex_soft_state_t *cnex_ssp;
665 	int		instance;
666 
667 	/* Get device instance and structure */
668 	instance = ddi_get_instance(dip);
669 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
670 
671 	/* find and remove channel from list */
672 	mutex_enter(&cnex_ssp->clist_lock);
673 	prev_cldcp = NULL;
674 	cldcp = cnex_ssp->clist;
675 	while (cldcp) {
676 		if (cldcp->id == id)
677 			break;
678 		prev_cldcp = cldcp;
679 		cldcp = cldcp->next;
680 	}
681 
682 	if (cldcp == 0) {
683 		DWARN("cnex_unreg_chan: invalid channel %d\n", id);
684 		mutex_exit(&cnex_ssp->clist_lock);
685 		return (EINVAL);
686 	}
687 
688 	if (cldcp->tx.hdlr || cldcp->rx.hdlr) {
689 		DWARN("cnex_unreg_chan: handlers still exist: chan %lx\n", id);
690 		mutex_exit(&cnex_ssp->clist_lock);
691 		return (ENXIO);
692 	}
693 
694 	if (prev_cldcp)
695 		prev_cldcp->next = cldcp->next;
696 	else
697 		cnex_ssp->clist = cldcp->next;
698 
699 	mutex_exit(&cnex_ssp->clist_lock);
700 
701 	/* destroy mutex */
702 	mutex_destroy(&cldcp->lock);
703 
704 	/* free channel */
705 	kmem_free(cldcp, sizeof (*cldcp));
706 
707 	return (0);
708 }
709 
710 /*
711  * Remove Tx/Rx interrupt handler for the channel
712  */
713 static int
714 cnex_rem_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype)
715 {
716 	int			rv, idx, pil;
717 	cnex_ldc_t		*cldcp;
718 	cnex_intr_t		*iinfo;
719 	cnex_soft_state_t	*cnex_ssp;
720 	int			instance, istate;
721 
722 	/* Get device instance and structure */
723 	instance = ddi_get_instance(dip);
724 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
725 
726 	/* get channel info */
727 	mutex_enter(&cnex_ssp->clist_lock);
728 	cldcp = cnex_ssp->clist;
729 	while (cldcp) {
730 		if (cldcp->id == id)
731 			break;
732 		cldcp = cldcp->next;
733 	}
734 	if (cldcp == NULL) {
735 		DWARN("cnex_rem_intr: channel 0x%llx does not exist\n", id);
736 		mutex_exit(&cnex_ssp->clist_lock);
737 		return (EINVAL);
738 	}
739 	mutex_exit(&cnex_ssp->clist_lock);
740 
741 	/* get rid of the channel intr handler */
742 	mutex_enter(&cldcp->lock);
743 
744 	/* get interrupt type */
745 	if (itype == CNEX_TX_INTR) {
746 		iinfo = &(cldcp->tx);
747 	} else if (itype == CNEX_RX_INTR) {
748 		iinfo = &(cldcp->rx);
749 	} else {
750 		DWARN("cnex_rem_intr: invalid interrupt type\n");
751 		mutex_exit(&cldcp->lock);
752 		return (EINVAL);
753 	}
754 
755 	D1("cnex_rem_intr: interrupt ino=0x%x\n", iinfo->ino);
756 
757 	/* check if a handler is already added */
758 	if (iinfo->hdlr == 0) {
759 		DWARN("cnex_rem_intr: interrupt handler does not exist\n");
760 		mutex_exit(&cldcp->lock);
761 		return (EINVAL);
762 	}
763 
764 	D1("cnex_rem_intr: set intr to invalid ino=0x%x\n", iinfo->ino);
765 	rv = hvldc_intr_setvalid(cnex_ssp->cfghdl,
766 	    iinfo->ino, HV_INTR_NOTVALID);
767 	if (rv) {
768 		DWARN("cnex_rem_intr: cannot set valid ino=%x\n", iinfo->ino);
769 		mutex_exit(&cldcp->lock);
770 		return (ENXIO);
771 	}
772 
773 	/*
774 	 * Check if there are pending interrupts. If interrupts are
775 	 * pending return EAGAIN.
776 	 */
777 	rv = hvldc_intr_getstate(cnex_ssp->cfghdl, iinfo->ino, &istate);
778 	if (rv) {
779 		DWARN("cnex_rem_intr: ino=0x%llx, cannot get state\n",
780 		    iinfo->ino);
781 		mutex_exit(&cldcp->lock);
782 		return (ENXIO);
783 	}
784 
785 	/* if interrupts are still pending print warning */
786 	if (istate != HV_INTR_IDLE_STATE) {
787 		DWARN("cnex_rem_intr: cannot remove intr busy ino=%x\n",
788 		    iinfo->ino);
789 		mutex_exit(&cldcp->lock);
790 		return (EAGAIN);
791 	}
792 
793 	/* Pick a PIL on the basis of the channel's devclass */
794 	for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) {
795 		if (cldcp->devclass == cnex_class_to_pil[idx].devclass) {
796 			pil = cnex_class_to_pil[idx].pil;
797 			break;
798 		}
799 	}
800 
801 	/* remove interrupt */
802 	(void) rem_ivintr(iinfo->icookie, pil);
803 
804 	/* clear interrupt info */
805 	bzero(iinfo, sizeof (*iinfo));
806 
807 	mutex_exit(&cldcp->lock);
808 
809 	return (0);
810 }
811 
812 
813 /*
814  * Clear pending Tx/Rx interrupt
815  */
816 static int
817 cnex_clr_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype)
818 {
819 	int			rv;
820 	cnex_ldc_t		*cldcp;
821 	cnex_intr_t		*iinfo;
822 	cnex_soft_state_t	*cnex_ssp;
823 	int			instance;
824 
825 	/* Get device instance and structure */
826 	instance = ddi_get_instance(dip);
827 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
828 
829 	/* get channel info */
830 	mutex_enter(&cnex_ssp->clist_lock);
831 	cldcp = cnex_ssp->clist;
832 	while (cldcp) {
833 		if (cldcp->id == id)
834 			break;
835 		cldcp = cldcp->next;
836 	}
837 	if (cldcp == NULL) {
838 		DWARN("cnex_clr_intr: channel 0x%llx does not exist\n", id);
839 		mutex_exit(&cnex_ssp->clist_lock);
840 		return (EINVAL);
841 	}
842 	mutex_exit(&cnex_ssp->clist_lock);
843 
844 	mutex_enter(&cldcp->lock);
845 
846 	/* get interrupt type */
847 	if (itype == CNEX_TX_INTR) {
848 		iinfo = &(cldcp->tx);
849 	} else if (itype == CNEX_RX_INTR) {
850 		iinfo = &(cldcp->rx);
851 	} else {
852 		DWARN("cnex_clr_intr: invalid interrupt type\n");
853 		mutex_exit(&cldcp->lock);
854 		return (EINVAL);
855 	}
856 
857 	D1("cnex_rem_intr: interrupt ino=0x%x\n", iinfo->ino);
858 
859 	/* check if a handler is already added */
860 	if (iinfo->hdlr == 0) {
861 		DWARN("cnex_clr_intr: interrupt handler does not exist\n");
862 		mutex_exit(&cldcp->lock);
863 		return (EINVAL);
864 	}
865 
866 	rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino,
867 	    HV_INTR_IDLE_STATE);
868 	if (rv) {
869 		DWARN("cnex_clr_intr: cannot clear interrupt state\n");
870 		mutex_exit(&cldcp->lock);
871 		return (ENXIO);
872 	}
873 
874 	mutex_exit(&cldcp->lock);
875 
876 	return (0);
877 }
878 
879 /*
880  * Channel nexus interrupt handler wrapper
881  */
882 static uint_t
883 cnex_intr_wrapper(caddr_t arg)
884 {
885 	int 			res;
886 	uint_t 			(*handler)();
887 	caddr_t 		handler_arg1;
888 	caddr_t 		handler_arg2;
889 	cnex_intr_t 		*iinfo = (cnex_intr_t *)arg;
890 
891 	ASSERT(iinfo != NULL);
892 
893 	handler = iinfo->hdlr;
894 	handler_arg1 = iinfo->arg1;
895 	handler_arg2 = iinfo->arg2;
896 
897 	D1("cnex_intr_wrapper: ino=0x%llx invoke client handler\n", iinfo->ino);
898 	res = (*handler)(handler_arg1, handler_arg2);
899 
900 	return (res);
901 }
902 
903 /*ARGSUSED*/
904 static int
905 cnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
906 {
907 	int 		rv, instance, reglen;
908 	cnex_regspec_t	*reg_p;
909 	ldc_cnex_t	cinfo;
910 	cnex_soft_state_t *cnex_ssp;
911 
912 	switch (cmd) {
913 	case DDI_ATTACH:
914 		break;
915 	case DDI_RESUME:
916 		return (DDI_SUCCESS);
917 	default:
918 		return (DDI_FAILURE);
919 	}
920 
921 	/*
922 	 * Get the instance specific soft state structure.
923 	 * Save the devi for this instance in the soft_state data.
924 	 */
925 	instance = ddi_get_instance(devi);
926 	if (ddi_soft_state_zalloc(cnex_state, instance) != DDI_SUCCESS)
927 		return (DDI_FAILURE);
928 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
929 
930 	cnex_ssp->devi = devi;
931 	cnex_ssp->clist = NULL;
932 
933 	if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
934 		"reg", (caddr_t)&reg_p, &reglen) != DDI_SUCCESS) {
935 		return (DDI_FAILURE);
936 	}
937 
938 	/* get the sun4v config handle for this device */
939 	cnex_ssp->cfghdl = SUN4V_REG_SPEC2CFG_HDL(reg_p->physaddr);
940 	kmem_free(reg_p, reglen);
941 
942 	D1("cnex_attach: cfghdl=0x%llx\n", cnex_ssp->cfghdl);
943 
944 	/* init channel list mutex */
945 	mutex_init(&cnex_ssp->clist_lock, NULL, MUTEX_DRIVER, NULL);
946 
947 	/* Register with LDC module */
948 	cinfo.dip = devi;
949 	cinfo.reg_chan = cnex_reg_chan;
950 	cinfo.unreg_chan = cnex_unreg_chan;
951 	cinfo.add_intr = cnex_add_intr;
952 	cinfo.rem_intr = cnex_rem_intr;
953 	cinfo.clr_intr = cnex_clr_intr;
954 
955 	/*
956 	 * LDC register will fail if an nexus instance had already
957 	 * registered with the LDC framework
958 	 */
959 	rv = ldc_register(&cinfo);
960 	if (rv) {
961 		DWARN("cnex_attach: unable to register with LDC\n");
962 		ddi_soft_state_free(cnex_state, instance);
963 		mutex_destroy(&cnex_ssp->clist_lock);
964 		return (DDI_FAILURE);
965 	}
966 
967 	if (ddi_create_minor_node(devi, "devctl", S_IFCHR, instance,
968 	    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
969 		ddi_remove_minor_node(devi, NULL);
970 		ddi_soft_state_free(cnex_state, instance);
971 		mutex_destroy(&cnex_ssp->clist_lock);
972 		return (DDI_FAILURE);
973 	}
974 
975 	/* Add interrupt redistribution callback. */
976 	intr_dist_add(cnex_intr_redist, cnex_ssp);
977 
978 	ddi_report_dev(devi);
979 	return (DDI_SUCCESS);
980 }
981 
982 /*ARGSUSED*/
983 static int
984 cnex_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
985 {
986 	int 		instance;
987 	ldc_cnex_t	cinfo;
988 	cnex_soft_state_t *cnex_ssp;
989 
990 	switch (cmd) {
991 	case DDI_DETACH:
992 		break;
993 	case DDI_SUSPEND:
994 		return (DDI_SUCCESS);
995 	default:
996 		return (DDI_FAILURE);
997 	}
998 
999 	instance = ddi_get_instance(devi);
1000 	cnex_ssp = ddi_get_soft_state(cnex_state, instance);
1001 
1002 	/* check if there are any channels still registered */
1003 	if (cnex_ssp->clist) {
1004 		cmn_err(CE_WARN, "?cnex_dettach: channels registered %d\n",
1005 		    ddi_get_instance(devi));
1006 		return (DDI_FAILURE);
1007 	}
1008 
1009 	/* Unregister with LDC module */
1010 	cinfo.dip = devi;
1011 	(void) ldc_unregister(&cinfo);
1012 
1013 	/* Remove interrupt redistribution callback. */
1014 	intr_dist_rem(cnex_intr_redist, cnex_ssp);
1015 
1016 	/* destroy mutex */
1017 	mutex_destroy(&cnex_ssp->clist_lock);
1018 
1019 	/* free soft state structure */
1020 	ddi_soft_state_free(cnex_state, instance);
1021 
1022 	return (DDI_SUCCESS);
1023 }
1024 
1025 /*ARGSUSED*/
1026 static int
1027 cnex_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1028 {
1029 	int instance;
1030 
1031 	if (otyp != OTYP_CHR)
1032 		return (EINVAL);
1033 
1034 	instance = getminor(*devp);
1035 	if (ddi_get_soft_state(cnex_state, instance) == NULL)
1036 		return (ENXIO);
1037 
1038 	return (0);
1039 }
1040 
1041 /*ARGSUSED*/
1042 static int
1043 cnex_close(dev_t dev, int flags, int otyp, cred_t *credp)
1044 {
1045 	int instance;
1046 
1047 	if (otyp != OTYP_CHR)
1048 		return (EINVAL);
1049 
1050 	instance = getminor(dev);
1051 	if (ddi_get_soft_state(cnex_state, instance) == NULL)
1052 		return (ENXIO);
1053 
1054 	return (0);
1055 }
1056 
1057 /*ARGSUSED*/
1058 static int
1059 cnex_ioctl(dev_t dev,
1060     int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
1061 {
1062 	int instance;
1063 	cnex_soft_state_t *cnex_ssp;
1064 
1065 	instance = getminor(dev);
1066 	if ((cnex_ssp = ddi_get_soft_state(cnex_state, instance)) == NULL)
1067 		return (ENXIO);
1068 	ASSERT(cnex_ssp->devi);
1069 	return (ndi_devctl_ioctl(cnex_ssp->devi, cmd, arg, mode, 0));
1070 }
1071 
1072 static int
1073 cnex_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
1074     void *arg, void *result)
1075 {
1076 	char		name[MAXNAMELEN];
1077 	uint32_t	reglen;
1078 	int		*cnex_regspec;
1079 
1080 	switch (ctlop) {
1081 	case DDI_CTLOPS_REPORTDEV:
1082 		if (rdip == NULL)
1083 			return (DDI_FAILURE);
1084 		cmn_err(CE_CONT, "?channel-device: %s%d\n",
1085 		    ddi_driver_name(rdip), ddi_get_instance(rdip));
1086 		return (DDI_SUCCESS);
1087 
1088 	case DDI_CTLOPS_INITCHILD:
1089 	{
1090 		dev_info_t *child = (dev_info_t *)arg;
1091 
1092 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child,
1093 			DDI_PROP_DONTPASS, "reg",
1094 			&cnex_regspec, &reglen) != DDI_SUCCESS) {
1095 			return (DDI_FAILURE);
1096 		}
1097 
1098 		(void) snprintf(name, sizeof (name), "%x", *cnex_regspec);
1099 		ddi_set_name_addr(child, name);
1100 		ddi_set_parent_data(child, NULL);
1101 		ddi_prop_free(cnex_regspec);
1102 		return (DDI_SUCCESS);
1103 	}
1104 
1105 	case DDI_CTLOPS_UNINITCHILD:
1106 	{
1107 		dev_info_t *child = (dev_info_t *)arg;
1108 
1109 		NDI_CONFIG_DEBUG((CE_NOTE,
1110 		    "DDI_CTLOPS_UNINITCHILD(%s, instance=%d)",
1111 		    ddi_driver_name(child), DEVI(child)->devi_instance));
1112 
1113 		ddi_set_name_addr(child, NULL);
1114 
1115 		return (DDI_SUCCESS);
1116 	}
1117 
1118 	case DDI_CTLOPS_DMAPMAPC:
1119 	case DDI_CTLOPS_REPORTINT:
1120 	case DDI_CTLOPS_REGSIZE:
1121 	case DDI_CTLOPS_NREGS:
1122 	case DDI_CTLOPS_SIDDEV:
1123 	case DDI_CTLOPS_SLAVEONLY:
1124 	case DDI_CTLOPS_AFFINITY:
1125 	case DDI_CTLOPS_POKE:
1126 	case DDI_CTLOPS_PEEK:
1127 		/*
1128 		 * These ops correspond to functions that "shouldn't" be called
1129 		 * by a channel-device driver.  So we whine when we're called.
1130 		 */
1131 		cmn_err(CE_WARN, "%s%d: invalid op (%d) from %s%d\n",
1132 		    ddi_driver_name(dip), ddi_get_instance(dip), ctlop,
1133 		    ddi_driver_name(rdip), ddi_get_instance(rdip));
1134 		return (DDI_FAILURE);
1135 
1136 	case DDI_CTLOPS_ATTACH:
1137 	case DDI_CTLOPS_BTOP:
1138 	case DDI_CTLOPS_BTOPR:
1139 	case DDI_CTLOPS_DETACH:
1140 	case DDI_CTLOPS_DVMAPAGESIZE:
1141 	case DDI_CTLOPS_IOMIN:
1142 	case DDI_CTLOPS_POWER:
1143 	case DDI_CTLOPS_PTOB:
1144 	default:
1145 		/*
1146 		 * Everything else (e.g. PTOB/BTOP/BTOPR requests) we pass up
1147 		 */
1148 		return (ddi_ctlops(dip, rdip, ctlop, arg, result));
1149 	}
1150 }
1151 
1152 /* -------------------------------------------------------------------------- */
1153