xref: /illumos-gate/usr/src/uts/sun4v/io/niumx/niumx.c (revision 150d2c5288c645a1c1a7d2bee61199a3729406c7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  *	Niagara2 Network Interface Unit (NIU) Nexus Driver
30  */
31 
32 #include <sys/conf.h>
33 #include <sys/modctl.h>
34 #include <sys/ddi_impldefs.h>
35 #include <sys/ddi_subrdefs.h>
36 #include <sys/ddi.h>
37 #include <sys/sunndi.h>
38 #include <sys/sunddi.h>
39 #include <sys/open.h>
40 #include <sys/stat.h>
41 #include <sys/file.h>
42 #include <sys/machsystm.h>
43 #include <sys/hsvc.h>
44 #include <sys/sdt.h>
45 #include <sys/hypervisor_api.h>
46 #include "niumx_var.h"
47 
48 
49 static int niumx_intr_ops(dev_info_t *dip, dev_info_t *rdip,
50 	ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
51 static int niumx_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
52 static int niumx_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
53 static int niumx_set_intr(dev_info_t *dip, dev_info_t *rdip,
54 	ddi_intr_handle_impl_t *hdlp, int valid);
55 static int niumx_add_intr(dev_info_t *dip, dev_info_t *rdip,
56 	ddi_intr_handle_impl_t *hdlp);
57 static int niumx_rem_intr(dev_info_t *dip, dev_info_t *rdip,
58 	ddi_intr_handle_impl_t *hdlp);
59 static uint_t niumx_intr_hdlr(void *arg);
60 static int niumx_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
61 	off_t offset, off_t len, caddr_t *addrp);
62 static int niumx_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
63 	ddi_dma_attr_t *attrp,
64 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep);
65 static int niumx_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
66 	ddi_dma_handle_t handlep);
67 static int niumx_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
68 	ddi_dma_handle_t handle, ddi_dma_req_t *dmareq,
69 	ddi_dma_cookie_t *cookiep, uint_t *ccountp);
70 static int niumx_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
71 	ddi_dma_handle_t handle);
72 static int niumx_ctlops(dev_info_t *dip, dev_info_t *rdip,
73 	ddi_ctl_enum_t op, void *arg, void *result);
74 
75 static struct bus_ops niumx_bus_ops = {
76 	BUSO_REV,
77 	niumx_map,
78 	0,
79 	0,
80 	0,
81 	i_ddi_map_fault,
82 	0,
83 	niumx_dma_allochdl,
84 	niumx_dma_freehdl,
85 	niumx_dma_bindhdl,
86 	niumx_dma_unbindhdl,
87 	0,
88 	0,
89 	0,
90 	niumx_ctlops,
91 	ddi_bus_prop_op,
92 	0,				/* (*bus_get_eventcookie)();    */
93 	0,				/* (*bus_add_eventcall)();	*/
94 	0,				/* (*bus_remove_eventcall)();   */
95 	0,				/* (*bus_post_event)();		*/
96 	0,				/* (*bus_intr_ctl)();		*/
97 	0,				/* (*bus_config)(); 		*/
98 	0,				/* (*bus_unconfig)(); 		*/
99 	0,				/* (*bus_fm_init)(); 		*/
100 	0,				/* (*bus_fm_fini)(); 		*/
101 	0,				/* (*bus_enter)()		*/
102 	0,				/* (*bus_exit)()		*/
103 	0,				/* (*bus_power)()		*/
104 	niumx_intr_ops			/* (*bus_intr_op)(); 		*/
105 };
106 
107 static struct dev_ops niumx_ops = {
108 	DEVO_REV,		/* devo_rev */
109 	0,			/* refcnt  */
110 	ddi_no_info,		/* info */
111 	nulldev,		/* identify */
112 	0,			/* probe */
113 	niumx_attach,		/* attach */
114 	niumx_detach,		/* detach */
115 	nulldev,		/* reset */
116 	(struct cb_ops *)0,	/* driver operations */
117 	&niumx_bus_ops,		/* bus operations */
118 	0
119 };
120 
121 /* Module linkage information for the kernel. */
122 static struct modldrv modldrv = {
123 	&mod_driverops, /* Type of module */
124 	"NIU Nexus Driver %I%",
125 	&niumx_ops,	/* driver ops */
126 };
127 
128 static struct modlinkage modlinkage = {
129 	MODREV_1,
130 	(void *)&modldrv,
131 	NULL
132 };
133 
134 static void *niumx_state;
135 static niumx_ih_t niumx_ihtable[NIUMX_MAX_INTRS];
136 
137 /*
138  * forward function declarations:
139  */
140 static void niumx_removechild(dev_info_t *);
141 static int niumx_initchild(dev_info_t *child);
142 
143 int
144 _init(void)
145 {
146 	int e;
147 	if ((e = ddi_soft_state_init(&niumx_state, sizeof (niumx_devstate_t),
148 	    1)) == 0 && (e = mod_install(&modlinkage)) != 0)
149 		ddi_soft_state_fini(&niumx_state);
150 	return (e);
151 }
152 
153 int
154 _fini(void)
155 {
156 	int e;
157 	if ((e = mod_remove(&modlinkage)) == 0)
158 		ddi_soft_state_fini(&niumx_state);
159 	return (e);
160 }
161 
162 int
163 _info(struct modinfo *modinfop)
164 {
165 	return (mod_info(&modlinkage, modinfop));
166 }
167 
168 void
169 niumx_intr_dist(void *arg)
170 {
171 	kmutex_t 	*lock_p = (kmutex_t *)arg;
172 	int		i = NIUMX_RSVD_INTRS;
173 	niumx_ih_t	*ih_p = niumx_ihtable + i;
174 
175 	DBG(DBG_A_INTX, NULL, "niumx_intr_dist entered\n");
176 	mutex_enter(lock_p);
177 	for (; i < NIUMX_MAX_INTRS; i++, ih_p++) {
178 		sysino_t sysino = ih_p->ih_sysino;
179 		cpuid_t	cpuid;
180 		int	intr_state;
181 		if (!sysino ||	/* sequence is significant */
182 		    (hvio_intr_getvalid(sysino, &intr_state) != H_EOK) ||
183 		    (intr_state == HV_INTR_NOTVALID) ||
184 		    (cpuid = intr_dist_cpuid()) == ih_p->ih_cpuid)
185 			continue;
186 
187 		(void) hvio_intr_setvalid(sysino, HV_INTR_NOTVALID);
188 		(void) hvio_intr_settarget(sysino, cpuid);
189 		(void) hvio_intr_setvalid(sysino, HV_INTR_VALID);
190 		ih_p->ih_cpuid = cpuid;
191 	}
192 	mutex_exit(lock_p);
193 }
194 
195 /*
196  * Hypervisor INTR services information for the NIU nexus driver.
197  */
198 static	uint64_t	niumx_intr_min_ver;   /* Neg. API minor version */
199 static hsvc_info_t niumx_hv_intr = {
200 	HSVC_REV_1, NULL, HSVC_GROUP_INTR, NIUMX_INTR_MAJOR_VER,
201 	NIUMX_INTR_MINOR_VER, "NIUMX"
202 };
203 
204 static int
205 niumx_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
206 {
207 	int instance = ddi_get_instance(dip);
208 	niumx_devstate_t *niumxds_p;	/* devstate pointer */
209 	niu_regspec_t	*reg_p;
210 	uint_t		reglen;
211 	int		ret = DDI_SUCCESS;
212 
213 	switch (cmd) {
214 	case DDI_ATTACH:
215 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
216 			DDI_PROP_DONTPASS, "reg", (int **)&reg_p, &reglen)
217 				!= DDI_PROP_SUCCESS) {
218 			DBG(DBG_ATTACH, dip, "reg lookup failed\n");
219 			ret = DDI_FAILURE;
220 			goto done;
221 		}
222 
223 		/*
224 		 * Allocate and get soft state structure.
225 		 */
226 		if (ddi_soft_state_zalloc(niumx_state, instance)
227 			!= DDI_SUCCESS) {
228 			ret = DDI_FAILURE;
229 			goto prop_free;
230 		}
231 		niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state,
232 							instance);
233 		niumxds_p->dip = dip;
234 		mutex_init(&niumxds_p->niumx_mutex, NULL, MUTEX_DRIVER, NULL);
235 
236 		DBG(DBG_ATTACH, dip, "soft state alloc'd instance = %d, "
237 			"niumxds_p = %p\n", instance, niumxds_p);
238 
239 		/*
240 		 * Negotiate the API version for HV INTR services.
241 		 */
242 		if ((ret = hsvc_register(&niumx_hv_intr, &niumx_intr_min_ver))
243 			!= H_EOK) {
244 		    cmn_err(CE_WARN, "%s: cannot negotiate hypervisor services "
245 		    "group: 0x%lx major: 0x%lx minor: 0x%lx errno: %d\n",
246 		    niumx_hv_intr.hsvc_modname, niumx_hv_intr.hsvc_group,
247 		    niumx_hv_intr.hsvc_major, niumx_hv_intr.hsvc_minor, ret);
248 		    ret = DDI_FAILURE;
249 		    goto cleanup;
250 		}
251 
252 		DBG(DBG_ATTACH, dip, "neg. HV API major 0x%lx minor 0x%lx\n",
253 			niumx_hv_intr.hsvc_major, niumx_intr_min_ver);
254 
255 		/* hv devhdl: low 28-bit of 1st "reg" entry's addr.hi */
256 		niumxds_p->niumx_dev_hdl = (devhandle_t)(reg_p->addr_high &
257 			NIUMX_DEVHDLE_MASK);
258 
259 		/* add interrupt redistribution callback */
260 		intr_dist_add(niumx_intr_dist, &niumxds_p->niumx_mutex);
261 
262 		niumxds_p->niumx_fm_cap = DDI_FM_EREPORT_CAPABLE |
263 			DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
264 
265 		ddi_fm_init(niumxds_p->dip, &niumxds_p->niumx_fm_cap,
266 			&niumxds_p->niumx_fm_ibc);
267 
268 		ret = DDI_SUCCESS;
269 		goto prop_free;
270 cleanup:
271 		mutex_destroy(&niumxds_p->niumx_mutex);
272 		ddi_soft_state_free(niumx_state, ddi_get_instance(dip));
273 prop_free:
274 		ddi_prop_free(reg_p);
275 done:
276 		return (ret);
277 
278 	case DDI_RESUME:
279 	default:
280 		break;
281 	}
282 	return (ret);
283 }
284 
285 static int
286 niumx_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
287 {
288 	niumx_devstate_t *niumxds_p;
289 
290 	switch (cmd) {
291 	case DDI_DETACH:
292 		(void) hsvc_unregister(&niumx_hv_intr);
293 
294 		niumxds_p = (niumx_devstate_t *)
295 		    ddi_get_soft_state(niumx_state, ddi_get_instance(dip));
296 
297 		intr_dist_rem(niumx_intr_dist, &niumxds_p->niumx_mutex);
298 		ddi_fm_fini(dip);
299 		mutex_destroy(&niumxds_p->niumx_mutex);
300 		ddi_soft_state_free(niumx_state, ddi_get_instance(dip));
301 		return (DDI_SUCCESS);
302 
303 	case DDI_SUSPEND:
304 	default:
305 		break;
306 	}
307 	return (DDI_FAILURE);
308 }
309 
310 /*ARGSUSED*/
311 int
312 niumx_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
313 	off_t offset, off_t len, caddr_t *vaddrp)
314 {
315 	struct regspec p_regspec;
316 	ddi_map_req_t p_mapreq;
317 	niu_regspec_t	*reg_p;
318 	int 	i, rn = mp->map_obj.rnumber, reglen, rnglen, rngnum, ret;
319 	niumx_ranges_t	*rng_p;
320 
321 	uint32_t	reg_begin, rng_begin;
322 
323 	DBG(DBG_MAP, dip, "%s%d: mapping %s%d reg %d\n", NAMEINST(dip),
324 		NAMEINST(rdip), rn);
325 
326 	if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
327 		"reg", (caddr_t)&reg_p, &reglen) != DDI_SUCCESS)
328 		return (DDI_FAILURE);
329 
330 	if (rn < 0 || (rn >= reglen / sizeof (niu_regspec_t))) {
331 		DBG(DBG_MAP, dip,  "rnumber out of range: %d\n", rn);
332 		kmem_free(reg_p, reglen);
333 		return (DDI_ME_RNUMBER_RANGE);
334 	}
335 
336 	/* build regspec up for parent */
337 	p_mapreq = *mp;		/* dup the whole structure */
338 	p_mapreq.map_type = DDI_MT_REGSPEC;
339 	p_mapreq.map_obj.rp = &p_regspec;
340 
341 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
342 		(caddr_t)&rng_p, &rnglen) != DDI_SUCCESS) {
343 			DBG(DBG_MAP,  dip, "%s%d: no ranges property\n",
344 				ddi_driver_name(dip), ddi_get_instance(dip));
345 			kmem_free(reg_p, reglen);
346 			return (DDI_FAILURE);
347 	}
348 
349 	/* locate matching ranges record */
350 	rngnum = rnglen / sizeof (niumx_ranges_t);
351 	for (i = 0, reg_p += rn; i < rngnum; rng_p++, i++) {
352 		if (reg_p->addr_high == rng_p->child_hi)
353 			break;
354 	}
355 
356 	if (i >= rngnum) {
357 		DBG(DBG_MAP, dip, "ranges record for reg[%d] not found.\n", rn);
358 		ret = DDI_ME_REGSPEC_RANGE;
359 		goto err;
360 	}
361 
362 	/*
363 	 * validate request has matching bus type and within 4G
364 	 * limit by comparing addr.hi of "ranges" and child "reg".
365 	 */
366 
367 	ASSERT(reg_p->size_high == 0);
368 
369 	rng_begin = rng_p->child_lo;
370 	reg_begin = reg_p->addr_low;
371 	/* check to verify reg bounds are within rng bounds */
372 	if (reg_begin < rng_begin || (reg_begin + (reg_p->size_low - 1)) >
373 			(rng_begin + (rng_p->size_lo - 1))) {
374 		DBG(DBG_MAP, dip, "size out of range for reg[%d].\n", rn);
375 		ret = DDI_ME_REGSPEC_RANGE;
376 		goto err;
377 	}
378 
379 	p_regspec.regspec_bustype = rng_p->parent_hi;
380 	p_regspec.regspec_addr = reg_begin - rng_begin + rng_p->parent_lo;
381 	p_regspec.regspec_size = reg_p->size_low;
382 	DBG(DBG_MAP, dip, "regspec:bus,addr,size = (%x,%x,%x)\n",
383 		p_regspec.regspec_bustype, p_regspec.regspec_addr,
384 		p_regspec.regspec_size);
385 	ret = ddi_map(dip, &p_mapreq, 0, 0, vaddrp);
386 	DBG(DBG_MAP, dip, "niumx_map: ret %d.\n", ret);
387 err:
388 	kmem_free(rng_p - i, rnglen);
389 	kmem_free(reg_p - rn, reglen);
390 	return (ret);
391 }
392 
393 /*
394  * niumx_ctlops
395  */
396 int
397 niumx_ctlops(dev_info_t *dip, dev_info_t *rdip,
398 	ddi_ctl_enum_t ctlop, void *arg, void *result)
399 {
400 	niu_regspec_t *reg_p;
401 	int	reglen, totreg;
402 
403 	DBG(DBG_CTLOPS, dip, "niumx_ctlops ctlop=%d.\n", ctlop);
404 	if (rdip == (dev_info_t *)0)
405 		return (DDI_FAILURE);
406 
407 	switch (ctlop) {
408 	case DDI_CTLOPS_REPORTDEV:
409 		cmn_err(CE_NOTE, "device: %s@%s, %s%d\n",
410 		    ddi_node_name(rdip), ddi_get_name_addr(rdip),
411 		    NAMEINST(rdip));
412 		return (DDI_SUCCESS);
413 
414 	case DDI_CTLOPS_INITCHILD:
415 		return (niumx_initchild((dev_info_t *)arg));
416 
417 	case DDI_CTLOPS_UNINITCHILD:
418 		niumx_removechild((dev_info_t *)arg);
419 		return (DDI_SUCCESS);
420 
421 	case DDI_CTLOPS_REGSIZE:
422 	case DDI_CTLOPS_NREGS:
423 		/* fall through */
424 		break;
425 	default:
426 		DBG(DBG_CTLOPS, dip, "just pass to ddi_cltops.\n");
427 		return (ddi_ctlops(dip, rdip, ctlop, arg, result));
428 	}
429 
430 	/* REGSIZE/NREGS */
431 
432 	*(int *)result = 0;
433 
434 	if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS |
435 		DDI_PROP_CANSLEEP, "reg", (caddr_t)&reg_p, &reglen)
436 			!= DDI_SUCCESS)
437 		return (DDI_FAILURE);
438 
439 	totreg = reglen / sizeof (niu_regspec_t);
440 	if (ctlop == DDI_CTLOPS_NREGS) {
441 		DBG(DBG_CTLOPS, (dev_info_t *)dip, "niumx_ctlops NREGS=%d.\n",
442 				totreg);
443 		*(int *)result = totreg;
444 	} else if (ctlop == DDI_CTLOPS_REGSIZE) {
445 		int	rn;
446 		rn = *(int *)arg;
447 		if (rn >= totreg) {
448 			kmem_free(reg_p, reglen);
449 			return (DDI_FAILURE);
450 		}
451 		*(off_t *)result = (reg_p + rn)->size_low;
452 		DBG(DBG_CTLOPS, (dev_info_t *)dip, "rn = %d, REGSIZE=%x.\n",
453 				rn, *(off_t *)result);
454 	}
455 
456 	kmem_free(reg_p, reglen);
457 	return (DDI_SUCCESS);
458 }
459 
460 static int
461 niumx_initchild(dev_info_t *child)
462 {
463 	char name[MAXNAMELEN];
464 	niu_regspec_t *r;
465 	uint_t n;
466 
467 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
468 	    "reg", (int **)&r, &n) != DDI_SUCCESS) {
469 		return (DDI_FAILURE);
470 	}
471 	(void) snprintf(name, MAXNAMELEN, "%x", (r[0].addr_high &
472 		NIUMX_FUNC_NUM_MASK));
473 	ddi_prop_free(r);
474 	ddi_set_name_addr(child, name);
475 	return (DDI_SUCCESS);
476 }
477 
478 static void
479 niumx_removechild(dev_info_t *dip)
480 {
481 	ddi_set_name_addr(dip, NULL);
482 	ddi_remove_minor_node(dip, NULL);
483 	impl_rem_dev_props(dip);
484 }
485 
486 
487 
488 /*
489  * bus dma alloc handle entry point:
490  */
491 /*ARGSUSED*/
492 int
493 niumx_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
494 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
495 {
496 	ddi_dma_impl_t *mp;
497 	int sleep = (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
498 
499 	DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", NAMEINST(rdip));
500 
501 	if (attrp->dma_attr_version != DMA_ATTR_V0) {
502 		DBG(DBG_DMA_ALLOCH, (dev_info_t *)dip, "DDI_DMA_BADATTR\n");
503 		return (DDI_DMA_BADATTR);
504 	}
505 
506 	/* Caution: we don't use zalloc to enhance performance! */
507 	if ((mp = kmem_alloc(sizeof (ddi_dma_impl_t), sleep)) == 0) {
508 		DBG(DBG_DMA_ALLOCH, dip, "can't alloc ddi_dma_impl_t\n");
509 		return (DDI_FAILURE);
510 	}
511 	mp->dmai_rdip = rdip;
512 	mp->dmai_pfnlst = NULL;
513 	mp->dmai_cookie = NULL;
514 	mp->dmai_fault = 0;
515 	mp->dmai_fault_check = NULL;
516 	mp->dmai_fault_notify = NULL;
517 
518 	mp->dmai_attr = *attrp; 	/* set requestors attr info */
519 
520 	DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp);
521 
522 	*handlep = (ddi_dma_handle_t)mp;
523 	return (DDI_SUCCESS);
524 }
525 
526 
527 /*
528  * bus dma free handle entry point:
529  */
530 /*ARGSUSED*/
531 int
532 niumx_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
533 {
534 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
535 
536 	if (mp->dmai_cookie)
537 		kmem_free(mp->dmai_cookie, sizeof (ddi_dma_cookie_t));
538 	kmem_free(mp, sizeof (ddi_dma_impl_t));
539 
540 	return (DDI_SUCCESS);
541 }
542 
543 
544 /*
545  * bus dma bind handle entry point:
546  *
547  *	check/enforce DMA type, setup pfn0 and some other key pieces
548  *	of this dma request.
549  * Note: this only works with DMA_OTYP_VADDR, and makes use of the known
550  *	fact that only contiguous memory blocks will be passed in.
551  *	Therefore only one cookie will ever be returned.
552  *
553  *	return values:
554  *		DDI_DMA_NOMAPPING - can't get valid pfn0, or bad dma type
555  *		DDI_DMA_NORESOURCES
556  *		DDI_SUCCESS
557  *
558  *	dma handle members affected (set on exit):
559  *	mp->dmai_object		- dmareq->dmar_object
560  *	mp->dmai_rflags		- dmareq->dmar_flags
561  *	mp->dmai_pfn0   	- 1st page pfn (if va/size pair and not shadow)
562  *	mp->dmai_roffset 	- initialized to starting page offset
563  *	mp->dmai_size		- # of total pages of entire object
564  *	mp->dmai_cookie		- new cookie alloc'd
565  */
566 /*ARGSUSED*/
567 int
568 niumx_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
569 	ddi_dma_handle_t handle, ddi_dma_req_t *dmareq,
570 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
571 {
572 	int (*waitfp)(caddr_t) = dmareq->dmar_fp;
573 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
574 	ddi_dma_obj_t *dobj_p = &dmareq->dmar_object;
575 	uint32_t offset;
576 	pfn_t pfn0;
577 	int ret;
578 
579 	DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", NAMEINST(rdip),
580 		mp, dmareq);
581 
582 	/* first check dma type */
583 	mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS | DMP_NOSYNC;
584 	switch (dobj_p->dmao_type) {
585 	case DMA_OTYP_VADDR: {
586 		caddr_t vaddr = dobj_p->dmao_obj.virt_obj.v_addr;
587 		struct as *as_p = dobj_p->dmao_obj.virt_obj.v_as;
588 		struct hat *hat_p = as_p ? as_p->a_hat : kas.a_hat;
589 		offset = (ulong_t)vaddr & NIUMX_PAGE_OFFSET;
590 		pfn0 = hat_getpfnum(hat_p, vaddr);
591 		}
592 		break;
593 
594 	case DMA_OTYP_BUFVADDR:
595 	case DMA_OTYP_PAGES:
596 	case DMA_OTYP_PADDR:
597 	default:
598 		cmn_err(CE_WARN, "%s%d requested unsupported dma type %x",
599 			NAMEINST(mp->dmai_rdip), dobj_p->dmao_type);
600 		ret = DDI_DMA_NOMAPPING;
601 		goto err;
602 	}
603 	if (pfn0 == PFN_INVALID) {
604 		cmn_err(CE_WARN, "%s%d: invalid pfn0 for DMA object %p",
605 			NAMEINST(dip), (void *)dobj_p);
606 		ret = DDI_DMA_NOMAPPING;
607 		goto err;
608 	}
609 	mp->dmai_object	 = *dobj_p;			/* whole object */
610 	mp->dmai_pfn0	 = (void *)pfn0;		/* cache pfn0   */
611 	mp->dmai_roffset = offset;			/* pg0 offset   */
612 	mp->dmai_mapping = mp->dmai_roffset | NIUMX_PTOB(pfn0);
613 	mp->dmai_size = mp->dmai_object.dmao_size;
614 
615 	DBG(DBG_DMA_BINDH, dip, "check pfn: mp=%p pfn0=%x\n",
616 		mp, mp->dmai_pfn0);
617 	if (!(mp->dmai_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t),
618 		waitfp == DDI_DMA_SLEEP ? KM_SLEEP : KM_NOSLEEP))) {
619 			ret = DDI_DMA_NORESOURCES;
620 			goto err;
621 		}
622 	mp->dmai_cookie->dmac_laddress = mp->dmai_mapping;
623 	mp->dmai_cookie->dmac_size = mp->dmai_size;
624 	*ccountp = 1;
625 	*cookiep = *mp->dmai_cookie;
626 	DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x, count=%d\n",
627 		cookiep->dmac_address, cookiep->dmac_size, *ccountp);
628 	return (DDI_DMA_MAPPED);
629 
630 err:
631 	DBG(DBG_DMA_BINDH, (dev_info_t *)dip,
632 			"niumx_dma_bindhdl error ret=%d\n", ret);
633 	return (ret);
634 }
635 
636 /*
637  * bus dma unbind handle entry point:
638  */
639 /*ARGSUSED*/
640 int
641 niumx_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
642 {
643 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
644 
645 	DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n",
646 		ddi_driver_name(rdip), ddi_get_instance(rdip), handle);
647 	if (mp->dmai_cookie) {
648 		kmem_free(mp->dmai_cookie, sizeof (ddi_dma_cookie_t));
649 		mp->dmai_cookie = NULL;
650 	}
651 
652 	return (DDI_SUCCESS);
653 }
654 
655 /*ARGSUSED*/
656 int
657 niumx_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
658     ddi_intr_handle_impl_t *hdlp, void *result)
659 {
660 
661 	int	ret = DDI_SUCCESS;
662 
663 	DBG(DBG_INTROPS, dip, "niumx_intr_ops: dip=%p rdip=%p intr_op=%x "
664 	    "handle=%p\n", dip, rdip, intr_op, hdlp);
665 
666 	switch (intr_op) {
667 
668 	case DDI_INTROP_SUPPORTED_TYPES:
669 		*(int *)result = DDI_INTR_TYPE_FIXED;
670 		break;
671 	case DDI_INTROP_GETCAP:
672 		*(int *)result =  DDI_INTR_FLAG_LEVEL;
673 		break;
674 	case DDI_INTROP_SETCAP:
675 		ret = DDI_ENOTSUP;
676 		break;
677 	case DDI_INTROP_ALLOC:
678 		/*  scratch1 = count,  # of intrs from DDI framework */
679 		*(int *)result = hdlp->ih_scratch1;
680 		break;
681 	case DDI_INTROP_FREE:
682 		/* Do we need to do anything here?  */
683 		break;
684 	case DDI_INTROP_GETPRI:
685 		*(int *)result = NIUMX_DEFAULT_PIL;
686 		break;
687 	case DDI_INTROP_SETPRI:
688 		ret = DDI_ENOTSUP;
689 		break;
690 	case DDI_INTROP_ADDISR:
691 		ret = niumx_add_intr(dip, rdip, hdlp);
692 		break;
693 	case DDI_INTROP_REMISR:
694 		ret = niumx_rem_intr(dip, rdip, hdlp);
695 		break;
696 	case DDI_INTROP_ENABLE:
697 		ret = niumx_set_intr(dip, rdip, hdlp, HV_INTR_VALID);
698 		break;
699 	case DDI_INTROP_DISABLE:
700 		ret = niumx_set_intr(dip, rdip, hdlp, HV_INTR_NOTVALID);
701 		break;
702 	case DDI_INTROP_SETMASK:
703 		ret = DDI_ENOTSUP;
704 		break;
705 	case DDI_INTROP_CLRMASK:
706 		ret = DDI_ENOTSUP;
707 		break;
708 	case DDI_INTROP_GETPENDING:
709 		ret = DDI_ENOTSUP;
710 		break;
711 	case DDI_INTROP_NINTRS:
712 	case DDI_INTROP_NAVAIL: {
713 		devino_t	*inos_p;
714 		int		inoslen;
715 		if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
716 			"interrupts", (caddr_t)&inos_p, &inoslen)
717 			!= DDI_SUCCESS) {
718 				ret = DDI_FAILURE;
719 				break;
720 			}
721 		*(int *)result = inoslen / sizeof (uint32_t);
722 		kmem_free(inos_p, inoslen);
723 		}
724 		break;
725 	default:
726 		ret = DDI_ENOTSUP;
727 		break;
728 	}
729 
730 	DBG(DBG_INTROPS, dip, "niumx_intr_ops: ret=%d\n", ret);
731 	return (ret);
732 }
733 
734 int
735 niumx_set_intr(dev_info_t *dip, dev_info_t *rdip,
736     ddi_intr_handle_impl_t *hdlp, int valid)
737 {
738 	niumx_ih_t	*ih_p;
739 	devino_t	*inos_p;
740 	int		inoslen, ret = DDI_SUCCESS;
741 	uint64_t	hvret;
742 
743 	DBG(DBG_A_INTX, dip, "niumx_set_intr: rdip=%s%d, valid=%d\n",
744 		NAMEINST(rdip), valid);
745 
746 	ASSERT(hdlp->ih_inum < NIUMX_MAX_INTRS);
747 
748 	/* find the appropriate slot from the fixed table */
749 	if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
750 		"interrupts", (caddr_t)&inos_p, &inoslen) != DDI_SUCCESS) {
751 		ret = DDI_FAILURE;
752 		goto fail;
753 	}
754 	ih_p = niumx_ihtable + inos_p[hdlp->ih_inum];
755 	DBG(DBG_A_INTX, dip, "enabling (%x,%x,%x)\n", ih_p->ih_inum,
756 			ih_p->ih_ino, ih_p->ih_sysino);
757 
758 	if ((hvret = hvio_intr_setvalid(ih_p->ih_sysino, valid))
759 		!= H_EOK) {
760 		DBG(DBG_A_INTX, dip, "hvio_intr_setvalid failed, ret 0x%x\n",
761 			hvret);
762 		ret = DDI_FAILURE;
763 	}
764 	kmem_free(inos_p, inoslen);
765 fail:
766 	return (ret);
767 }
768 
769 
770 
771 /*
772  * niumx_add_intr:
773  *
774  * This is the leaf/nexus/HV mapping, now read from "interrupts":
775  *
776  * we have a range of 64 to work with:
777  *   [0-15]  - reserved
778  *   [16]    - mac0
779  *   [17]    - MIF
780  *   [18]    - SYSERR
781  *   [19-26] - func0 Rx (qty. 8)
782  *   [27-34] - func0 Tx (qty. 8)
783  *   [35]    - mac1
784  *   [36-43] - func1 Rx (qty. 8)
785  *   [44-51] - func1 Tx (qty. 8)
786  *
787  *   [52] - Error Interrupt hook
788  */
789 int
790 niumx_add_intr(dev_info_t *dip, dev_info_t *rdip,
791     ddi_intr_handle_impl_t *hdlp)
792 {
793 	niumx_ih_t	*ih_p;
794 	int		inoslen, ret = DDI_SUCCESS;
795 	uint64_t	hvret;
796 	devino_t	*inos_p;
797 	sysino_t	sysino;
798 
799 	/* FMA Err handling hook */
800 	if (dip == rdip) {
801 		/*
802 		 * this is not the leaf calling us, so hardwire in the
803 		 * FMA interrupt details.
804 		 */
805 		ih_p = niumx_ihtable + NIUMX_EI_IH;
806 		ih_p->ih_ino = NIUMX_EI_IH;
807 		goto get_sysino;
808 	}
809 
810 	/* get new ino */
811 	if (hdlp->ih_inum >= NIUMX_MAX_INTRS) {
812 		DBG(DBG_INTR, dip, "error: inum %d out of range\n",
813 			hdlp->ih_inum);
814 		ret = DDI_FAILURE;
815 		goto done;
816 	}
817 	if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
818 		"interrupts", (caddr_t)&inos_p, &inoslen) != DDI_SUCCESS) {
819 		ret = DDI_FAILURE;
820 		goto done;
821 	}
822 	ih_p = niumx_ihtable + inos_p[hdlp->ih_inum];
823 	ih_p->ih_ino = inos_p[hdlp->ih_inum];
824 	kmem_free(inos_p, inoslen);
825 get_sysino:
826 	if ((hvret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
827 		ih_p->ih_ino, &sysino)) != H_EOK) {
828 		DBG(DBG_INTR, dip, "hvio_intr_devino_to_sysino failed, "
829 			"ret 0x%x\n", hvret);
830 		ret = DDI_FAILURE;
831 		goto done;
832 	}
833 	ih_p->ih_sysino = sysino;
834 	ih_p->ih_dip = dip;
835 	ih_p->ih_inum = hdlp->ih_inum;
836 	ih_p->ih_hdlr = hdlp->ih_cb_func;
837 	ih_p->ih_arg1 = hdlp->ih_cb_arg1;
838 	ih_p->ih_arg2 = hdlp->ih_cb_arg2;
839 
840 	DBG(DBG_A_INTX, dip, "niumx_add_intr: rdip=%s%d inum=0x%x "
841 		"handler=%p arg1=%p arg2=%p, new ih_p = %p\n", NAMEINST(rdip),
842 		hdlp->ih_inum, hdlp->ih_cb_func, hdlp->ih_cb_arg1,
843 		hdlp->ih_cb_arg2, ih_p);
844 
845 	if (hdlp->ih_pri == 0)
846 		hdlp->ih_pri = NIUMX_DEFAULT_PIL;
847 
848 	/* Save sysino value in hdlp */
849 	hdlp->ih_vector = ih_p->ih_sysino;
850 
851 	/* swap in our handler & arg */
852 	DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, (ddi_intr_handler_t *)niumx_intr_hdlr,
853 			(void *)ih_p, NULL);
854 
855 	DBG(DBG_A_INTX, dip, "adding (%x,%x,%x)\n", ih_p->ih_inum,
856 			ih_p->ih_ino, ih_p->ih_sysino);
857 	ret = i_ddi_add_ivintr(hdlp);
858 
859 	/* Restore orig. interrupt handler & args in handle. */
860 	DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_hdlr, ih_p->ih_arg1,
861 		ih_p->ih_arg2);
862 
863 	if (ret != DDI_SUCCESS) {
864 		DBG(DBG_A_INTX, dip, "i_ddi_add_ivintr error ret=%x\n", ret);
865 		goto done;
866 	}
867 
868 	/* select cpu, saving it for removal */
869 	ih_p->ih_cpuid = intr_dist_cpuid();
870 
871 	if ((hvret = hvio_intr_settarget(ih_p->ih_sysino, ih_p->ih_cpuid))
872 		!= H_EOK) {
873 		DBG(DBG_A_INTX, dip, "hvio_intr_settarget failed, ret 0x%x\n",
874 			hvret);
875 		ret = DDI_FAILURE;
876 	}
877 done:
878 	DBG(DBG_A_INTX, dip, "done, ret = %d, ih_p 0x%p, hdlp 0x%p\n", ih_p,
879 		hdlp, ret);
880 	return (ret);
881 }
882 
883 /*
884  * niumx_rem_intr:
885  *
886  * This function is called to unregister interrupts.
887  */
888 int
889 niumx_rem_intr(dev_info_t *dip, dev_info_t *rdip,
890     ddi_intr_handle_impl_t *hdlp)
891 {
892 	niumx_ih_t	*ih_p;
893 	cpuid_t		curr_cpu;
894 	devino_t	*inos_p;
895 	int		inoslen, ret = DDI_SUCCESS;
896 	uint64_t	hvret;
897 
898 	ASSERT(hdlp->ih_inum < NIUMX_MAX_INTRS);
899 
900 	/* find the appropriate slot from the fixed table */
901 	if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
902 		"interrupts", (caddr_t)&inos_p, &inoslen) != DDI_SUCCESS) {
903 		ret = DDI_FAILURE;
904 		goto fail1;
905 	}
906 	ih_p = niumx_ihtable + inos_p[hdlp->ih_inum];
907 	DBG(DBG_R_INTX, dip, "removing (%x,%x,%x)\n", ih_p->ih_inum,
908 			ih_p->ih_ino, ih_p->ih_sysino);
909 
910 	/* Get the current cpu */
911 	if ((hvret = hvio_intr_gettarget(ih_p->ih_sysino, &curr_cpu))
912 		!= H_EOK) {
913 		DBG(DBG_R_INTX, dip, "hvio_intr_gettarget failed, ret 0x%x\n",
914 			hvret);
915 		ret = DDI_FAILURE;
916 		goto fail2;
917 	}
918 
919 	intr_dist_cpuid_rem_device_weight(ih_p->ih_cpuid, rdip);
920 
921 	hdlp->ih_vector = ih_p->ih_sysino;
922 	if (hdlp->ih_vector !=  NULL) i_ddi_rem_ivintr(hdlp);
923 
924 	/* clear out this entry */
925 	ih_p->ih_ino = NULL;
926 fail2:
927 	kmem_free(inos_p, inoslen);
928 fail1:
929 	return (ret);
930 }
931 
932 /*
933  * niumx_intr_hdlr (our interrupt handler)
934  */
935 uint_t
936 niumx_intr_hdlr(void *arg)
937 {
938 	niumx_ih_t *ih_p = (niumx_ih_t *)arg;
939 	uint_t		r;
940 
941 	DTRACE_PROBE4(interrupt__start, dev_info_t, ih_p->ih_dip, void *,
942 		ih_p->ih_hdlr, caddr_t, ih_p->ih_arg1, caddr_t, ih_p->ih_arg2);
943 
944 	r = (*ih_p->ih_hdlr)(ih_p->ih_arg1, ih_p->ih_arg2);
945 
946 	DTRACE_PROBE4(interrupt__complete, dev_info_t, ih_p->ih_dip, void *,
947 		ih_p->ih_hdlr, caddr_t, ih_p->ih_arg1, int, r);
948 	return (r);
949 }
950 
951 #ifdef	DEBUG
952 uint64_t niumx_debug_flags = 0;
953 
954 static char *niumx_debug_sym [] = {	/* same sequence as niumx_debug_bit */
955 	/*  0 */ "attach",
956 	/*  1 */ "map",
957 	/*  2 */ "nex-ctlops",
958 	/*  3 */ "introps",
959 	/*  4 */ "intr-add",
960 	/*  5 */ "intr-rem",
961 	/*  6 */ "intr",
962 	/*  7 */ "dma-alloc",
963 	/*  8 */ "dma-bind",
964 	/*  9 */ "dma-unbind",
965 	/* 10 */ "chk-dma-mode"
966 };
967 
968 /*ARGSUSED*/
969 void
970 niumx_dbg(niumx_debug_bit_t bit, dev_info_t *dip, char *fmt, ...)
971 {
972 	va_list ap;
973 	char msgbuf[1024];
974 
975 	if (!(1ull << bit & niumx_debug_flags))
976 		return;
977 	va_start(ap, fmt);
978 	(void) vsprintf(msgbuf, fmt, ap);
979 	va_end(ap);
980 	cmn_err(CE_NOTE, "%s: %s", niumx_debug_sym[bit], msgbuf);
981 }
982 
983 #endif	/* DEBUG */
984