1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25
26 /*
27 * Niagara2 Network Interface Unit (NIU) Nexus Driver
28 */
29
30 #include <sys/conf.h>
31 #include <sys/modctl.h>
32 #include <sys/ddi_impldefs.h>
33 #include <sys/ddi_subrdefs.h>
34 #include <sys/ddi.h>
35 #include <sys/sunndi.h>
36 #include <sys/sunddi.h>
37 #include <sys/open.h>
38 #include <sys/stat.h>
39 #include <sys/file.h>
40 #include <sys/machsystm.h>
41 #include <sys/hsvc.h>
42 #include <sys/sdt.h>
43 #include <sys/hypervisor_api.h>
44 #include <sys/cpuvar.h>
45 #include "niumx_var.h"
46
47 static int niumx_fm_init_child(dev_info_t *, dev_info_t *, int,
48 ddi_iblock_cookie_t *);
49 static int niumx_intr_ops(dev_info_t *dip, dev_info_t *rdip,
50 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
51 static int niumx_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
52 static int niumx_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
53 static int niumx_set_intr(dev_info_t *dip, dev_info_t *rdip,
54 ddi_intr_handle_impl_t *hdlp, int valid);
55 static int niumx_add_intr(dev_info_t *dip, dev_info_t *rdip,
56 ddi_intr_handle_impl_t *hdlp);
57 static int niumx_rem_intr(dev_info_t *dip, dev_info_t *rdip,
58 ddi_intr_handle_impl_t *hdlp);
59 static uint_t niumx_intr_hdlr(void *arg);
60 static int niumx_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
61 off_t offset, off_t len, caddr_t *addrp);
62 static int niumx_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
63 ddi_dma_attr_t *attrp,
64 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep);
65 static int niumx_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
66 ddi_dma_handle_t handlep);
67 static int niumx_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
68 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq,
69 ddi_dma_cookie_t *cookiep, uint_t *ccountp);
70 static int niumx_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
71 ddi_dma_handle_t handle);
72 static int niumx_ctlops(dev_info_t *dip, dev_info_t *rdip,
73 ddi_ctl_enum_t op, void *arg, void *result);
74
75 int niumxtool_init(dev_info_t *dip);
76 void niumxtool_uninit(dev_info_t *dip);
77
78 int niumx_get_intr_target(niumx_devstate_t *niumxds_p, niudevino_t ino,
79 niucpuid_t *cpu_id);
80 int niumx_set_intr_target(niumx_devstate_t *niumxds_p, niudevino_t ino,
81 niucpuid_t cpu_id);
82
83 static struct bus_ops niumx_bus_ops = {
84 BUSO_REV,
85 niumx_map,
86 0,
87 0,
88 0,
89 i_ddi_map_fault,
90 0,
91 niumx_dma_allochdl,
92 niumx_dma_freehdl,
93 niumx_dma_bindhdl,
94 niumx_dma_unbindhdl,
95 0,
96 0,
97 0,
98 niumx_ctlops,
99 ddi_bus_prop_op,
100 0, /* (*bus_get_eventcookie)(); */
101 0, /* (*bus_add_eventcall)(); */
102 0, /* (*bus_remove_eventcall)(); */
103 0, /* (*bus_post_event)(); */
104 0, /* (*bus_intr_ctl)(); */
105 0, /* (*bus_config)(); */
106 0, /* (*bus_unconfig)(); */
107 niumx_fm_init_child, /* (*bus_fm_init)(); */
108 0, /* (*bus_fm_fini)(); */
109 0, /* (*bus_enter)() */
110 0, /* (*bus_exit)() */
111 0, /* (*bus_power)() */
112 niumx_intr_ops /* (*bus_intr_op)(); */
113 };
114
115 extern struct cb_ops niumx_cb_ops;
116
117 static struct dev_ops niumx_ops = {
118 DEVO_REV, /* devo_rev */
119 0, /* refcnt */
120 ddi_no_info, /* info */
121 nulldev, /* identify */
122 0, /* probe */
123 niumx_attach, /* attach */
124 niumx_detach, /* detach */
125 nulldev, /* reset */
126 &niumx_cb_ops, /* driver operations */
127 &niumx_bus_ops, /* bus operations */
128 0, /* power */
129 ddi_quiesce_not_supported, /* devo_quiesce */
130 };
131
132 /* Module linkage information for the kernel. */
133 static struct modldrv modldrv = {
134 &mod_driverops, /* Type of module */
135 "NIU Nexus Driver",
136 &niumx_ops, /* driver ops */
137 };
138
139 static struct modlinkage modlinkage = {
140 MODREV_1,
141 (void *)&modldrv,
142 NULL
143 };
144
145 void *niumx_state;
146
147 /*
148 * forward function declarations:
149 */
150 static void niumx_removechild(dev_info_t *);
151 static int niumx_initchild(dev_info_t *child);
152
153 int
_init(void)154 _init(void)
155 {
156 int e;
157 uint64_t mjrnum;
158 uint64_t mnrnum;
159
160 /*
161 * Check HV intr group api versioning.
162 * This driver uses the old interrupt routines which are supported
163 * in old firmware in the CORE API group and in newer firmware in
164 * the INTR API group. Support for these calls will be dropped
165 * once the INTR API group major goes to 2.
166 */
167 if ((hsvc_version(HSVC_GROUP_INTR, &mjrnum, &mnrnum) == 0) &&
168 (mjrnum > NIUMX_INTR_MAJOR_VER)) {
169 cmn_err(CE_WARN, "niumx: unsupported intr api group: "
170 "maj:0x%lx, min:0x%lx", mjrnum, mnrnum);
171 return (ENOTSUP);
172 }
173
174 if ((e = ddi_soft_state_init(&niumx_state, sizeof (niumx_devstate_t),
175 1)) == 0 && (e = mod_install(&modlinkage)) != 0)
176 ddi_soft_state_fini(&niumx_state);
177 return (e);
178 }
179
180 int
_fini(void)181 _fini(void)
182 {
183 int e;
184 if ((e = mod_remove(&modlinkage)) == 0)
185 ddi_soft_state_fini(&niumx_state);
186 return (e);
187 }
188
189 int
_info(struct modinfo * modinfop)190 _info(struct modinfo *modinfop)
191 {
192 return (mod_info(&modlinkage, modinfop));
193 }
194
195
196 hrtime_t niumx_intr_timeout = 2ull * NANOSEC; /* 2 seconds in nanoseconds */
197
198 void
niumx_intr_dist(void * arg)199 niumx_intr_dist(void *arg)
200 {
201 niumx_devstate_t *niumxds_p = (niumx_devstate_t *)arg;
202 kmutex_t *lock_p = &niumxds_p->niumx_mutex;
203 int i;
204 niumx_ih_t *ih_p = niumxds_p->niumx_ihtable;
205
206 DBG(NIUMX_DBG_A_INTX, NULL, "niumx_intr_dist entered\n");
207 mutex_enter(lock_p);
208 for (i = 0; i < NIUMX_MAX_INTRS; i++, ih_p++) {
209 niusysino_t sysino = ih_p->ih_sysino;
210 niucpuid_t cpuid;
211 int state;
212 hrtime_t start;
213 dev_info_t *dip = ih_p->ih_dip;
214
215 if (!sysino || (cpuid = intr_dist_cpuid()) == ih_p->ih_cpuid)
216 continue;
217
218 (void) hvio_intr_setvalid(sysino, HV_INTR_NOTVALID);
219
220 /* check for pending interrupts, busy wait if so */
221 for (start = gethrtime(); !panicstr &&
222 (hvio_intr_getstate(sysino, &state) == H_EOK) &&
223 (state == HV_INTR_DELIVERED_STATE); /* */) {
224 if (gethrtime() - start > niumx_intr_timeout) {
225 cmn_err(CE_WARN, "%s%d: niumx_intr_dist: "
226 "pending interrupt (%x,%lx) timedout\n",
227 ddi_driver_name(dip), ddi_get_instance(dip),
228 ih_p->ih_inum, sysino);
229 (void) hvio_intr_setstate(sysino,
230 HV_INTR_IDLE_STATE);
231 break;
232 }
233 }
234 (void) hvio_intr_settarget(sysino, cpuid);
235
236 if (ih_p->ih_state == HV_INTR_VALID)
237 (void) hvio_intr_setvalid(sysino, HV_INTR_VALID);
238 else
239 (void) hvio_intr_setvalid(sysino, HV_INTR_NOTVALID);
240
241 ih_p->ih_cpuid = cpuid;
242 }
243 mutex_exit(lock_p);
244 }
245
246 static int
niumx_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)247 niumx_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
248 {
249 int instance = ddi_get_instance(dip);
250 niumx_devstate_t *niumxds_p; /* devstate pointer */
251 niu_regspec_t *reg_p;
252 niumx_ih_t *ih_p;
253 uint_t reglen;
254 int i, ret = DDI_SUCCESS;
255
256 switch (cmd) {
257 case DDI_ATTACH:
258 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
259 DDI_PROP_DONTPASS, "reg", (int **)®_p, ®len)
260 != DDI_PROP_SUCCESS) {
261 DBG(NIUMX_DBG_ATTACH, dip, "reg lookup failed\n");
262 ret = DDI_FAILURE;
263 goto done;
264 }
265
266 /*
267 * Allocate and get soft state structure.
268 */
269 if (ddi_soft_state_zalloc(niumx_state, instance)
270 != DDI_SUCCESS) {
271 ret = DDI_FAILURE;
272 goto prop_free;
273 }
274 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state,
275 instance);
276 niumxds_p->dip = dip;
277 niumxds_p->niumx_open_count = 0;
278 mutex_init(&niumxds_p->niumx_mutex, NULL, MUTEX_DRIVER, NULL);
279
280 DBG(NIUMX_DBG_ATTACH, dip, "soft state alloc'd instance = %d, "
281 "niumxds_p = %p\n", instance, niumxds_p);
282
283 /* hv devhdl: low 28-bit of 1st "reg" entry's addr.hi */
284 niumxds_p->niumx_dev_hdl = (niudevhandle_t)(reg_p->addr_high &
285 NIUMX_DEVHDLE_MASK);
286
287 ih_p = niumxds_p->niumx_ihtable;
288 for (i = 0; i < NIUMX_MAX_INTRS; i++, ih_p++) {
289 ih_p->ih_sysino = 0;
290 ih_p->ih_state = HV_INTR_NOTVALID;
291 }
292
293 /* add interrupt redistribution callback */
294 intr_dist_add(niumx_intr_dist, niumxds_p);
295
296 niumxds_p->niumx_fm_cap = DDI_FM_EREPORT_CAPABLE;
297
298 ddi_fm_init(niumxds_p->dip, &niumxds_p->niumx_fm_cap,
299 &niumxds_p->niumx_fm_ibc);
300
301 if (niumxtool_init(dip) != DDI_SUCCESS) {
302 ret = DDI_FAILURE;
303 goto cleanup;
304 }
305
306 ret = DDI_SUCCESS;
307 goto prop_free;
308 cleanup:
309 mutex_destroy(&niumxds_p->niumx_mutex);
310 ddi_soft_state_free(niumx_state, ddi_get_instance(dip));
311 prop_free:
312 ddi_prop_free(reg_p);
313 done:
314 return (ret);
315
316 case DDI_RESUME:
317 default:
318 break;
319 }
320 return (ret);
321 }
322
323 static int
niumx_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)324 niumx_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
325 {
326 niumx_devstate_t *niumxds_p;
327
328 switch (cmd) {
329 case DDI_DETACH:
330
331 niumxds_p = (niumx_devstate_t *)
332 ddi_get_soft_state(niumx_state, ddi_get_instance(dip));
333
334 intr_dist_rem(niumx_intr_dist, niumxds_p);
335 ddi_fm_fini(dip);
336 niumxtool_uninit(dip);
337 mutex_destroy(&niumxds_p->niumx_mutex);
338 ddi_soft_state_free(niumx_state, ddi_get_instance(dip));
339 return (DDI_SUCCESS);
340
341 case DDI_SUSPEND:
342 default:
343 break;
344 }
345 return (DDI_FAILURE);
346 }
347
348
349 /*
350 * Function used to initialize FMA for our children nodes. Called
351 * through pci busops when child node calls ddi_fm_init.
352 */
353 /*ARGSUSED*/
354 int
niumx_fm_init_child(dev_info_t * dip,dev_info_t * cdip,int cap,ddi_iblock_cookie_t * ibc_p)355 niumx_fm_init_child(dev_info_t *dip, dev_info_t *cdip, int cap,
356 ddi_iblock_cookie_t *ibc_p)
357 {
358 niumx_devstate_t *niumxds_p = NIUMX_DIP_TO_STATE(dip);
359
360 ASSERT(ibc_p != NULL);
361 *ibc_p = niumxds_p->niumx_fm_ibc;
362
363 return (niumxds_p->niumx_fm_cap);
364 }
365
366
367 /*ARGSUSED*/
368 int
niumx_map(dev_info_t * dip,dev_info_t * rdip,ddi_map_req_t * mp,off_t offset,off_t len,caddr_t * vaddrp)369 niumx_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
370 off_t offset, off_t len, caddr_t *vaddrp)
371 {
372 struct regspec p_regspec;
373 ddi_map_req_t p_mapreq;
374 niu_regspec_t *reg_p;
375 int i, rn = mp->map_obj.rnumber, reglen, rnglen, rngnum, ret;
376 niumx_ranges_t *rng_p;
377
378 uint32_t reg_begin, rng_begin;
379
380 DBG(NIUMX_DBG_MAP, dip, "%s%d: mapping %s%d reg %d\n",
381 NIUMX_NAMEINST(dip), NIUMX_NAMEINST(rdip), rn);
382
383 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
384 "reg", (caddr_t)®_p, ®len) != DDI_SUCCESS)
385 return (DDI_FAILURE);
386
387 if (rn < 0 || (rn >= reglen / sizeof (niu_regspec_t))) {
388 DBG(NIUMX_DBG_MAP, dip, "rnumber out of range: %d\n", rn);
389 kmem_free(reg_p, reglen);
390 return (DDI_ME_RNUMBER_RANGE);
391 }
392
393 /* build regspec up for parent */
394 p_mapreq = *mp; /* dup the whole structure */
395 p_mapreq.map_type = DDI_MT_REGSPEC;
396 p_mapreq.map_obj.rp = &p_regspec;
397
398 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
399 (caddr_t)&rng_p, &rnglen) != DDI_SUCCESS) {
400 DBG(NIUMX_DBG_MAP, dip, "%s%d: no ranges property\n",
401 ddi_driver_name(dip), ddi_get_instance(dip));
402 kmem_free(reg_p, reglen);
403 return (DDI_FAILURE);
404 }
405
406 /* locate matching ranges record */
407 rngnum = rnglen / sizeof (niumx_ranges_t);
408 for (i = 0, reg_p += rn; i < rngnum; rng_p++, i++) {
409 if (reg_p->addr_high == rng_p->child_hi)
410 break;
411 }
412
413 if (i >= rngnum) {
414 DBG(NIUMX_DBG_MAP, dip, "ranges record for reg[%d] "
415 "not found.\n", rn);
416 ret = DDI_ME_REGSPEC_RANGE;
417 goto err;
418 }
419
420 /*
421 * validate request has matching bus type and within 4G
422 * limit by comparing addr.hi of "ranges" and child "reg".
423 */
424
425 ASSERT(reg_p->size_high == 0);
426
427 rng_begin = rng_p->child_lo;
428 reg_begin = reg_p->addr_low;
429 /* check to verify reg bounds are within rng bounds */
430 if (reg_begin < rng_begin || (reg_begin + (reg_p->size_low - 1)) >
431 (rng_begin + (rng_p->size_lo - 1))) {
432 DBG(NIUMX_DBG_MAP, dip, "size out of range for reg[%d].\n", rn);
433 ret = DDI_ME_REGSPEC_RANGE;
434 goto err;
435 }
436
437 p_regspec.regspec_bustype = rng_p->parent_hi;
438 p_regspec.regspec_addr = reg_begin - rng_begin + rng_p->parent_lo;
439 p_regspec.regspec_size = reg_p->size_low;
440 DBG(NIUMX_DBG_MAP, dip, "regspec:bus,addr,size = (%x,%x,%x)\n",
441 p_regspec.regspec_bustype, p_regspec.regspec_addr,
442 p_regspec.regspec_size);
443 ret = ddi_map(dip, &p_mapreq, 0, 0, vaddrp);
444 DBG(NIUMX_DBG_MAP, dip, "niumx_map: ret %d.\n", ret);
445 err:
446 kmem_free(rng_p - i, rnglen);
447 kmem_free(reg_p - rn, reglen);
448 return (ret);
449 }
450
451 /*
452 * niumx_ctlops
453 */
454 int
niumx_ctlops(dev_info_t * dip,dev_info_t * rdip,ddi_ctl_enum_t ctlop,void * arg,void * result)455 niumx_ctlops(dev_info_t *dip, dev_info_t *rdip,
456 ddi_ctl_enum_t ctlop, void *arg, void *result)
457 {
458 niu_regspec_t *reg_p;
459 int reglen, totreg;
460
461 DBG(NIUMX_DBG_CTLOPS, dip, "niumx_ctlops ctlop=%d.\n", ctlop);
462 if (rdip == (dev_info_t *)0)
463 return (DDI_FAILURE);
464
465 switch (ctlop) {
466 case DDI_CTLOPS_REPORTDEV:
467 cmn_err(CE_NOTE, "device: %s@%s, %s%d\n",
468 ddi_node_name(rdip), ddi_get_name_addr(rdip),
469 NIUMX_NAMEINST(rdip));
470 return (DDI_SUCCESS);
471
472 case DDI_CTLOPS_INITCHILD:
473 return (niumx_initchild((dev_info_t *)arg));
474
475 case DDI_CTLOPS_UNINITCHILD:
476 niumx_removechild((dev_info_t *)arg);
477 return (DDI_SUCCESS);
478
479 case DDI_CTLOPS_REGSIZE:
480 case DDI_CTLOPS_NREGS:
481 /* fall through */
482 break;
483 default:
484 DBG(NIUMX_DBG_CTLOPS, dip, "just pass to ddi_cltops.\n");
485 return (ddi_ctlops(dip, rdip, ctlop, arg, result));
486 }
487
488 /* REGSIZE/NREGS */
489
490 *(int *)result = 0;
491
492 if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS |
493 DDI_PROP_CANSLEEP, "reg", (caddr_t)®_p, ®len) != DDI_SUCCESS)
494 return (DDI_FAILURE);
495
496 totreg = reglen / sizeof (niu_regspec_t);
497 if (ctlop == DDI_CTLOPS_NREGS) {
498 DBG(NIUMX_DBG_CTLOPS, (dev_info_t *)dip,
499 "niumx_ctlops NREGS=%d.\n", totreg);
500 *(int *)result = totreg;
501 } else if (ctlop == DDI_CTLOPS_REGSIZE) {
502 int rn;
503 rn = *(int *)arg;
504 if (rn >= totreg) {
505 kmem_free(reg_p, reglen);
506 return (DDI_FAILURE);
507 }
508 *(off_t *)result = (reg_p + rn)->size_low;
509 DBG(NIUMX_DBG_CTLOPS, (dev_info_t *)dip,
510 "rn = %d, REGSIZE=%x.\n", rn, *(off_t *)result);
511 }
512
513 kmem_free(reg_p, reglen);
514 return (DDI_SUCCESS);
515 }
516
517 /*
518 * niumx_name_child
519 *
520 * This function is called from init_child to name a node. It is
521 * also passed as a callback for node merging functions.
522 *
523 * return value: DDI_SUCCESS, DDI_FAILURE
524 */
525 static int
niumx_name_child(dev_info_t * child,char * name,int namelen)526 niumx_name_child(dev_info_t *child, char *name, int namelen)
527 {
528 niu_regspec_t *r;
529 uint_t n;
530
531 DBG(NIUMX_DBG_CHK_MOD, (dev_info_t *)child, "==> niumx_name_child\n");
532
533 if (ndi_dev_is_persistent_node(child) == 0) {
534 char **unit_addr;
535
536 /* name .conf nodes by "unit-address" property */
537 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child,
538 DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) !=
539 DDI_PROP_SUCCESS) {
540 cmn_err(CE_WARN, "cannot name node from %s.conf",
541 ddi_driver_name(child));
542 return (DDI_FAILURE);
543 }
544 if (n != 1 || *unit_addr == NULL || **unit_addr == 0) {
545 cmn_err(CE_WARN, "unit-address property in %s.conf"
546 " not well-formed", ddi_driver_name(child));
547 ddi_prop_free(unit_addr);
548 return (DDI_FAILURE);
549 }
550
551 (void) snprintf(name, namelen, "%s", *unit_addr);
552 ddi_prop_free(unit_addr);
553 return (DDI_SUCCESS);
554 }
555
556 /* name hardware nodes by "reg" property */
557 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
558 "reg", (int **)&r, &n) != DDI_SUCCESS) {
559 cmn_err(CE_WARN, "reg property not well-formed");
560 return (DDI_FAILURE);
561 }
562 (void) snprintf(name, namelen, "%x", (r[0].addr_high));
563 ddi_prop_free(r);
564 return (DDI_SUCCESS);
565 }
566
567 static int
niumx_initchild(dev_info_t * child)568 niumx_initchild(dev_info_t *child)
569 {
570 char name[MAXNAMELEN];
571
572 DBG(NIUMX_DBG_CHK_MOD, (dev_info_t *)child, "==> niumx_initchild\n");
573 /*
574 * Non-peristent nodes indicate a prototype node with per-instance
575 * properties to be merged into the real h/w device node.
576 */
577 if (ndi_dev_is_persistent_node(child) == 0) {
578 niu_regspec_t *r;
579 uint_t n;
580
581 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child,
582 DDI_PROP_DONTPASS, "reg", (int **)&r, &n) ==
583 DDI_SUCCESS) {
584 cmn_err(CE_WARN,
585 "cannot merge prototype from %s.conf",
586 ddi_driver_name(child));
587 ddi_prop_free(r);
588 return (DDI_NOT_WELL_FORMED);
589 }
590
591 if (niumx_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS)
592 return (DDI_NOT_WELL_FORMED);
593
594 ddi_set_name_addr(child, name);
595 ddi_set_parent_data(child, NULL);
596
597 /*
598 * Try to merge the properties from this prototype
599 * node into real h/w nodes.
600 */
601 if (ndi_merge_node(child, niumx_name_child) == DDI_SUCCESS) {
602 /*
603 * Merged ok - return failure to remove the node.
604 */
605 ddi_set_name_addr(child, NULL);
606 return (DDI_FAILURE);
607 }
608
609 /*
610 * The child was not merged into a h/w node,
611 * but there's not much we can do with it other
612 * than return failure to cause the node to be removed.
613 */
614 cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged",
615 ddi_driver_name(child), ddi_get_name_addr(child),
616 ddi_driver_name(child));
617 ddi_set_name_addr(child, NULL);
618 return (DDI_NOT_WELL_FORMED);
619 }
620
621 /*
622 * Initialize real h/w nodes
623 */
624 if (niumx_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS)
625 return (DDI_FAILURE);
626
627 ddi_set_name_addr(child, name);
628 return (DDI_SUCCESS);
629 }
630
631 static void
niumx_removechild(dev_info_t * dip)632 niumx_removechild(dev_info_t *dip)
633 {
634 ddi_set_name_addr(dip, NULL);
635 ddi_remove_minor_node(dip, NULL);
636 impl_rem_dev_props(dip);
637 }
638
639
640
641 /*
642 * bus dma alloc handle entry point:
643 */
644 /*ARGSUSED*/
645 int
niumx_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attrp,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)646 niumx_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
647 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
648 {
649 ddi_dma_impl_t *mp;
650 int sleep = (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
651
652 DBG(NIUMX_DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", NIUMX_NAMEINST(rdip));
653
654 if (attrp->dma_attr_version != DMA_ATTR_V0) {
655 DBG(NIUMX_DBG_DMA_ALLOCH,
656 (dev_info_t *)dip, "DDI_DMA_BADATTR\n");
657 return (DDI_DMA_BADATTR);
658 }
659
660 /* Caution: we don't use zalloc to enhance performance! */
661 if ((mp = kmem_alloc(sizeof (ddi_dma_impl_t), sleep)) == 0) {
662 DBG(NIUMX_DBG_DMA_ALLOCH, dip, "can't alloc ddi_dma_impl_t\n");
663 return (DDI_FAILURE);
664 }
665 mp->dmai_rdip = rdip;
666 mp->dmai_pfnlst = NULL;
667 mp->dmai_cookie = NULL;
668 mp->dmai_ncookies = 0;
669 mp->dmai_curcookie = 0;
670 mp->dmai_fault = 0;
671 mp->dmai_fault_check = NULL;
672 mp->dmai_fault_notify = NULL;
673
674 mp->dmai_attr = *attrp; /* set requestors attr info */
675
676 DBG(NIUMX_DBG_DMA_ALLOCH, dip, "mp=%p\n", mp);
677
678 *handlep = (ddi_dma_handle_t)mp;
679 return (DDI_SUCCESS);
680 }
681
682
683 /*
684 * bus dma free handle entry point:
685 */
686 /*ARGSUSED*/
687 int
niumx_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)688 niumx_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
689 {
690 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
691
692 if (mp->dmai_cookie)
693 kmem_free(mp->dmai_cookie, sizeof (ddi_dma_cookie_t));
694 kmem_free(mp, sizeof (ddi_dma_impl_t));
695
696 return (DDI_SUCCESS);
697 }
698
699
700 /*
701 * bus dma bind handle entry point:
702 *
703 * check/enforce DMA type, setup pfn0 and some other key pieces
704 * of this dma request.
705 * Note: this only works with DMA_OTYP_VADDR, and makes use of the known
706 * fact that only contiguous memory blocks will be passed in.
707 * Therefore only one cookie will ever be returned.
708 *
709 * return values:
710 * DDI_DMA_NOMAPPING - can't get valid pfn0, or bad dma type
711 * DDI_DMA_NORESOURCES
712 * DDI_SUCCESS
713 *
714 * dma handle members affected (set on exit):
715 * mp->dmai_object - dmareq->dmar_object
716 * mp->dmai_rflags - dmareq->dmar_flags
717 * mp->dmai_pfn0 - 1st page pfn (if va/size pair and not shadow)
718 * mp->dmai_roffset - initialized to starting page offset
719 * mp->dmai_size - # of total pages of entire object
720 * mp->dmai_cookie - new cookie alloc'd
721 */
722 /*ARGSUSED*/
723 int
niumx_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,ddi_dma_req_t * dmareq,ddi_dma_cookie_t * cookiep,uint_t * ccountp)724 niumx_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
725 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq,
726 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
727 {
728 int (*waitfp)(caddr_t) = dmareq->dmar_fp;
729 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
730 ddi_dma_obj_t *dobj_p = &dmareq->dmar_object;
731 uint32_t offset;
732 pfn_t pfn0;
733 int ret;
734
735 DBG(NIUMX_DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n",
736 NIUMX_NAMEINST(rdip), mp, dmareq);
737
738 /* first check dma type */
739 mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS | DMP_NOSYNC;
740 switch (dobj_p->dmao_type) {
741 case DMA_OTYP_VADDR: {
742 caddr_t vaddr = dobj_p->dmao_obj.virt_obj.v_addr;
743 struct as *as_p = dobj_p->dmao_obj.virt_obj.v_as;
744 struct hat *hat_p = as_p ? as_p->a_hat : kas.a_hat;
745 offset = (ulong_t)vaddr & NIUMX_PAGE_OFFSET;
746 pfn0 = hat_getpfnum(hat_p, vaddr);
747 }
748 break;
749
750 case DMA_OTYP_BUFVADDR:
751 case DMA_OTYP_PAGES:
752 case DMA_OTYP_PADDR:
753 default:
754 cmn_err(CE_WARN, "%s%d requested unsupported dma type %x",
755 NIUMX_NAMEINST(mp->dmai_rdip), dobj_p->dmao_type);
756 ret = DDI_DMA_NOMAPPING;
757 goto err;
758 }
759 if (pfn0 == PFN_INVALID) {
760 cmn_err(CE_WARN, "%s%d: invalid pfn0 for DMA object %p",
761 NIUMX_NAMEINST(dip), (void *)dobj_p);
762 ret = DDI_DMA_NOMAPPING;
763 goto err;
764 }
765 mp->dmai_object = *dobj_p; /* whole object */
766 mp->dmai_pfn0 = (void *)pfn0; /* cache pfn0 */
767 mp->dmai_roffset = offset; /* pg0 offset */
768 mp->dmai_mapping = mp->dmai_roffset | NIUMX_PTOB(pfn0);
769 mp->dmai_size = mp->dmai_object.dmao_size;
770
771 DBG(NIUMX_DBG_DMA_BINDH, dip, "check pfn: mp=%p pfn0=%x\n",
772 mp, mp->dmai_pfn0);
773 if (!(mp->dmai_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t),
774 waitfp == DDI_DMA_SLEEP ? KM_SLEEP : KM_NOSLEEP))) {
775 ret = DDI_DMA_NORESOURCES;
776 goto err;
777 }
778 mp->dmai_cookie->dmac_laddress = mp->dmai_mapping;
779 mp->dmai_cookie->dmac_size = mp->dmai_size;
780 mp->dmai_ncookies = 1;
781 mp->dmai_curcookie = 0;
782 *ccountp = 1;
783 *cookiep = *mp->dmai_cookie;
784 DBG(NIUMX_DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x, count=%d\n",
785 cookiep->dmac_address, cookiep->dmac_size, *ccountp);
786 return (DDI_DMA_MAPPED);
787
788 err:
789 DBG(NIUMX_DBG_DMA_BINDH, (dev_info_t *)dip,
790 "niumx_dma_bindhdl error ret=%d\n", ret);
791 return (ret);
792 }
793
794 /*
795 * bus dma unbind handle entry point:
796 */
797 /*ARGSUSED*/
798 int
niumx_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)799 niumx_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
800 {
801 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
802
803 DBG(NIUMX_DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n",
804 ddi_driver_name(rdip), ddi_get_instance(rdip), handle);
805 if (mp->dmai_cookie) {
806 kmem_free(mp->dmai_cookie, sizeof (ddi_dma_cookie_t));
807 mp->dmai_cookie = NULL;
808 mp->dmai_ncookies = mp->dmai_curcookie = 0;
809 }
810
811 return (DDI_SUCCESS);
812 }
813
814 /*ARGSUSED*/
815 int
niumx_intr_ops(dev_info_t * dip,dev_info_t * rdip,ddi_intr_op_t intr_op,ddi_intr_handle_impl_t * hdlp,void * result)816 niumx_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
817 ddi_intr_handle_impl_t *hdlp, void *result)
818 {
819
820 int ret = DDI_SUCCESS;
821
822 DBG(NIUMX_DBG_INTROPS, dip, "niumx_intr_ops: dip=%p rdip=%p intr_op=%x "
823 "handle=%p\n", dip, rdip, intr_op, hdlp);
824
825 switch (intr_op) {
826
827 case DDI_INTROP_SUPPORTED_TYPES:
828 *(int *)result = DDI_INTR_TYPE_FIXED;
829 break;
830 case DDI_INTROP_GETCAP:
831 *(int *)result = DDI_INTR_FLAG_LEVEL;
832 break;
833 case DDI_INTROP_SETCAP:
834 ret = DDI_ENOTSUP;
835 break;
836 case DDI_INTROP_ALLOC:
837 /* scratch1 = count, # of intrs from DDI framework */
838 *(int *)result = hdlp->ih_scratch1;
839 break;
840 case DDI_INTROP_FREE:
841 /* Do we need to do anything here? */
842 break;
843 case DDI_INTROP_GETPRI:
844 *(int *)result = NIUMX_DEFAULT_PIL;
845 break;
846 case DDI_INTROP_SETPRI:
847 ret = DDI_ENOTSUP;
848 break;
849 case DDI_INTROP_ADDISR:
850 ret = niumx_add_intr(dip, rdip, hdlp);
851 break;
852 case DDI_INTROP_REMISR:
853 ret = niumx_rem_intr(dip, rdip, hdlp);
854 break;
855 case DDI_INTROP_ENABLE:
856 ret = niumx_set_intr(dip, rdip, hdlp, HV_INTR_VALID);
857 break;
858 case DDI_INTROP_DISABLE:
859 ret = niumx_set_intr(dip, rdip, hdlp, HV_INTR_NOTVALID);
860 break;
861 case DDI_INTROP_SETMASK:
862 ret = DDI_ENOTSUP;
863 break;
864 case DDI_INTROP_CLRMASK:
865 ret = DDI_ENOTSUP;
866 break;
867 case DDI_INTROP_GETPENDING:
868 ret = DDI_ENOTSUP;
869 break;
870 case DDI_INTROP_NINTRS:
871 case DDI_INTROP_NAVAIL: {
872 niudevino_t *inos_p;
873 int inoslen;
874
875 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
876 "interrupts", (caddr_t)&inos_p, &inoslen)
877 != DDI_SUCCESS) {
878 ret = DDI_FAILURE;
879 break;
880 }
881 *(int *)result = inoslen / sizeof (uint32_t);
882 kmem_free(inos_p, inoslen);
883 }
884 break;
885 case DDI_INTROP_GETTARGET: {
886 niumx_devstate_t *niumxds_p;
887
888 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state,
889 ddi_get_instance(dip));
890
891 ret = niumx_get_intr_target(niumxds_p, hdlp->ih_vector,
892 (niucpuid_t *)result);
893
894 }
895 break;
896 case DDI_INTROP_SETTARGET: {
897 niumx_devstate_t *niumxds_p;
898
899 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state,
900 ddi_get_instance(dip));
901
902 ret = niumx_set_intr_target(niumxds_p, hdlp->ih_vector,
903 *(niucpuid_t *)result);
904
905 }
906 break;
907 default:
908 ret = DDI_ENOTSUP;
909 break;
910 }
911
912 DBG(NIUMX_DBG_INTROPS, dip, "niumx_intr_ops: ret=%d\n", ret);
913 return (ret);
914 }
915
916 int
niumx_set_intr(dev_info_t * dip,dev_info_t * rdip,ddi_intr_handle_impl_t * hdlp,int valid)917 niumx_set_intr(dev_info_t *dip, dev_info_t *rdip,
918 ddi_intr_handle_impl_t *hdlp, int valid)
919 {
920 niumx_ih_t *ih_p;
921 int ret = DDI_SUCCESS;
922 uint64_t hvret;
923 niumx_devstate_t *niumxds_p; /* devstate pointer */
924 int instance = ddi_get_instance(dip);
925
926 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state,
927 instance);
928
929 ASSERT(hdlp->ih_inum < NIUMX_MAX_INTRS);
930
931 ih_p = niumxds_p->niumx_ihtable + hdlp->ih_vector;
932
933 DBG(NIUMX_DBG_A_INTX, dip,
934 "niumx_set_intr: rdip=%s%d, valid=%d %s (%x,%x)\n",
935 NIUMX_NAMEINST(rdip), valid, valid ? "enabling" : "disabling",
936 ih_p->ih_inum, ih_p->ih_sysino);
937
938 if (valid == HV_INTR_VALID)
939 (void) hvio_intr_setstate(ih_p->ih_sysino, HV_INTR_IDLE_STATE);
940 if ((hvret = hvio_intr_setvalid(ih_p->ih_sysino, valid))
941 != H_EOK) {
942 DBG(NIUMX_DBG_A_INTX, dip,
943 "hvio_intr_setvalid failed, ret 0x%x\n", hvret);
944 ret = DDI_FAILURE;
945 } else
946 ih_p->ih_state = valid;
947
948 return (ret);
949 }
950
951 int
niumx_get_intr_target(niumx_devstate_t * niumxds_p,niudevino_t ino,niucpuid_t * cpu_id)952 niumx_get_intr_target(niumx_devstate_t *niumxds_p, niudevino_t ino,
953 niucpuid_t *cpu_id)
954 {
955 niumx_ih_t *ih_p;
956 niusysino_t sysino;
957 int rval = DDI_SUCCESS;
958
959 ih_p = niumxds_p->niumx_ihtable + ino;
960
961 sysino = ih_p->ih_sysino;
962
963 if (sysino == 0) {
964 rval = EINVAL;
965 goto done;
966 }
967
968 if (hvio_intr_gettarget(sysino, cpu_id) != H_EOK) {
969 rval = EINVAL;
970 goto done;
971 }
972
973 if (ih_p->ih_cpuid != *cpu_id)
974 rval = EIO;
975
976 done:
977 return (rval);
978 }
979
980 int
niumx_set_intr_target(niumx_devstate_t * niumxds_p,niudevino_t ino,niucpuid_t cpu_id)981 niumx_set_intr_target(niumx_devstate_t *niumxds_p, niudevino_t ino,
982 niucpuid_t cpu_id)
983 {
984 dev_info_t *dip = niumxds_p->dip;
985 niumx_ih_t *ih_p;
986 niucpuid_t old_cpu_id;
987 niusysino_t sysino;
988 int ret = DDI_SUCCESS;
989 int state;
990 hrtime_t start;
991 extern const int _ncpu;
992 extern cpu_t *cpu[];
993
994 mutex_enter(&cpu_lock);
995
996 ih_p = niumxds_p->niumx_ihtable + ino;
997
998 sysino = ih_p->ih_sysino;
999 if (sysino == 0) {
1000 ret = EINVAL;
1001 goto done;
1002 }
1003
1004 if (hvio_intr_gettarget(sysino, &old_cpu_id) != H_EOK) {
1005 ret = EINVAL;
1006 goto done;
1007 }
1008 if ((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id]))) {
1009 if (cpu_id == old_cpu_id)
1010 goto done;
1011
1012 /* check for pending interrupts, busy wait if so */
1013 for (start = gethrtime(); !panicstr &&
1014 (hvio_intr_getstate(sysino, &state) == H_EOK) &&
1015 (state == HV_INTR_DELIVERED_STATE); /* */) {
1016 if (gethrtime() - start > niumx_intr_timeout) {
1017 cmn_err(CE_WARN, "%s%d: niumx_intr_dist: "
1018 "pending interrupt (%x,%lx) timedout\n",
1019 ddi_driver_name(dip), ddi_get_instance(dip),
1020 ih_p->ih_inum, sysino);
1021 (void) hvio_intr_setstate(sysino,
1022 HV_INTR_IDLE_STATE);
1023 break;
1024 }
1025 }
1026 (void) hvio_intr_settarget(sysino, cpu_id);
1027 if (ih_p->ih_state == HV_INTR_VALID)
1028 (void) hvio_intr_setvalid(sysino, HV_INTR_VALID);
1029 else
1030 (void) hvio_intr_setvalid(sysino, HV_INTR_NOTVALID);
1031 ih_p->ih_cpuid = cpu_id;
1032 } else {
1033 ret = DDI_EINVAL;
1034 }
1035
1036 done:
1037 mutex_exit(&cpu_lock);
1038 return (ret);
1039 }
1040
1041
1042 /*
1043 * niumx_add_intr:
1044 *
1045 * This function is called to register interrupts.
1046 */
1047 int
niumx_add_intr(dev_info_t * dip,dev_info_t * rdip,ddi_intr_handle_impl_t * hdlp)1048 niumx_add_intr(dev_info_t *dip, dev_info_t *rdip,
1049 ddi_intr_handle_impl_t *hdlp)
1050 {
1051 niumx_ih_t *ih_p;
1052 int ret = DDI_SUCCESS;
1053 uint64_t hvret;
1054 niusysino_t sysino;
1055 niumx_devstate_t *niumxds_p; /* devstate pointer */
1056 int instance = ddi_get_instance(dip);
1057
1058 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state,
1059 instance);
1060
1061 /* get new ino */
1062 if (hdlp->ih_inum >= NIUMX_MAX_INTRS) {
1063 DBG(NIUMX_DBG_INTR, dip, "error: inum %d out of range\n",
1064 hdlp->ih_inum);
1065 ret = DDI_FAILURE;
1066 goto done;
1067 }
1068
1069 ih_p = niumxds_p->niumx_ihtable + hdlp->ih_vector;
1070
1071 if ((hvret = hvio_intr_devino_to_sysino(NIUMX_DIP_TO_HANDLE(dip),
1072 hdlp->ih_vector, &sysino)) != H_EOK) {
1073 DBG(NIUMX_DBG_INTR, dip, "hvio_intr_devino_to_sysino failed, "
1074 "ret 0x%x\n", hvret);
1075 ret = DDI_FAILURE;
1076 goto done;
1077 }
1078 ih_p->ih_sysino = sysino;
1079 ih_p->ih_dip = rdip;
1080 ih_p->ih_inum = hdlp->ih_inum;
1081 ih_p->ih_hdlr = hdlp->ih_cb_func;
1082 ih_p->ih_arg1 = hdlp->ih_cb_arg1;
1083 ih_p->ih_arg2 = hdlp->ih_cb_arg2;
1084
1085 DBG(NIUMX_DBG_A_INTX, dip, "niumx_add_intr: rdip=%s%d inum=0x%x "
1086 "handler=%p arg1=%p arg2=%p, new ih_p = %p\n", NIUMX_NAMEINST(rdip),
1087 hdlp->ih_inum, hdlp->ih_cb_func, hdlp->ih_cb_arg1,
1088 hdlp->ih_cb_arg2, ih_p);
1089
1090 if (hdlp->ih_pri == 0)
1091 hdlp->ih_pri = NIUMX_DEFAULT_PIL;
1092
1093 ih_p->ih_pri = hdlp->ih_pri;
1094
1095 DBG(NIUMX_DBG_A_INTX, dip, "for ino %x adding (%x,%x)\n",
1096 hdlp->ih_vector, ih_p->ih_inum, ih_p->ih_sysino);
1097
1098 /* Save sysino value in hdlp */
1099 hdlp->ih_vector = ih_p->ih_sysino;
1100
1101 /* swap in our handler & arg */
1102 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, (ddi_intr_handler_t *)niumx_intr_hdlr,
1103 (void *)ih_p, NULL);
1104
1105 ret = i_ddi_add_ivintr(hdlp);
1106
1107 /* Restore orig. interrupt handler & args in handle. */
1108 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_hdlr, ih_p->ih_arg1,
1109 ih_p->ih_arg2);
1110
1111 if (ret != DDI_SUCCESS) {
1112 DBG(NIUMX_DBG_A_INTX, dip, "i_ddi_add_ivintr error ret=%x\n",
1113 ret);
1114 goto done;
1115 }
1116
1117 /* select cpu, saving it for removal */
1118 ih_p->ih_cpuid = intr_dist_cpuid();
1119
1120 if ((hvret = hvio_intr_settarget(ih_p->ih_sysino, ih_p->ih_cpuid))
1121 != H_EOK) {
1122 DBG(NIUMX_DBG_A_INTX, dip,
1123 "hvio_intr_settarget failed, ret 0x%x\n", hvret);
1124 ret = DDI_FAILURE;
1125 }
1126 done:
1127 DBG(NIUMX_DBG_A_INTX, dip, "done, ret = %d, ih_p 0x%p, hdlp 0x%p\n",
1128 ih_p, hdlp, ret);
1129 return (ret);
1130 }
1131
1132 /*
1133 * niumx_rem_intr:
1134 *
1135 * This function is called to unregister interrupts.
1136 */
1137 /*ARGSUSED*/
1138 int
niumx_rem_intr(dev_info_t * dip,dev_info_t * rdip,ddi_intr_handle_impl_t * hdlp)1139 niumx_rem_intr(dev_info_t *dip, dev_info_t *rdip,
1140 ddi_intr_handle_impl_t *hdlp)
1141 {
1142 niumx_ih_t *ih_p;
1143 int ret = DDI_SUCCESS, state;
1144 hrtime_t start;
1145 niusysino_t sysino;
1146 niumx_devstate_t *niumxds_p; /* devstate pointer */
1147 int instance = ddi_get_instance(dip);
1148
1149 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state,
1150 instance);
1151
1152 ASSERT(hdlp->ih_inum < NIUMX_MAX_INTRS);
1153
1154 ih_p = niumxds_p->niumx_ihtable + hdlp->ih_vector;
1155
1156 sysino = ih_p->ih_sysino;
1157 DBG(NIUMX_DBG_R_INTX, dip, "removing (%x,%x)\n", ih_p->ih_inum, sysino);
1158
1159 (void) hvio_intr_setvalid(sysino, HV_INTR_NOTVALID);
1160
1161 /* check for pending interrupts, busy wait if so */
1162 for (start = gethrtime(); !panicstr &&
1163 (hvio_intr_getstate(sysino, &state) == H_EOK) &&
1164 (state == HV_INTR_DELIVERED_STATE); /* */) {
1165 if (gethrtime() - start > niumx_intr_timeout) {
1166 cmn_err(CE_WARN, "%s%d: niumx_intr_dist: "
1167 "pending interrupt (%x,%lx) timedout\n",
1168 ddi_driver_name(dip), ddi_get_instance(dip),
1169 ih_p->ih_inum, sysino);
1170 ret = DDI_FAILURE;
1171 goto fail;
1172 }
1173 }
1174
1175 ih_p->ih_sysino = 0;
1176
1177 hdlp->ih_vector = (uint32_t)sysino;
1178 if (hdlp->ih_vector != 0)
1179 i_ddi_rem_ivintr(hdlp);
1180
1181 fail:
1182 return (ret);
1183 }
1184
1185 /*
1186 * niumx_intr_hdlr (our interrupt handler)
1187 */
1188 uint_t
niumx_intr_hdlr(void * arg)1189 niumx_intr_hdlr(void *arg)
1190 {
1191 niumx_ih_t *ih_p = (niumx_ih_t *)arg;
1192 uint_t r;
1193
1194 DTRACE_PROBE4(interrupt__start, dev_info_t, ih_p->ih_dip, void *,
1195 ih_p->ih_hdlr, caddr_t, ih_p->ih_arg1, caddr_t, ih_p->ih_arg2);
1196
1197 r = (*ih_p->ih_hdlr)(ih_p->ih_arg1, ih_p->ih_arg2);
1198
1199 DTRACE_PROBE4(interrupt__complete, dev_info_t, ih_p->ih_dip, void *,
1200 ih_p->ih_hdlr, caddr_t, ih_p->ih_arg1, int, r);
1201
1202 (void) hvio_intr_setstate(ih_p->ih_sysino, HV_INTR_IDLE_STATE);
1203 return (r);
1204 }
1205
1206 #ifdef DEBUG
1207 uint64_t niumx_debug_flags = 0;
1208
1209 static char *niumx_debug_sym [] = { /* same sequence as niumx_debug_bit */
1210 /* 0 */ "attach",
1211 /* 1 */ "map",
1212 /* 2 */ "nex-ctlops",
1213 /* 3 */ "introps",
1214 /* 4 */ "intr-add",
1215 /* 5 */ "intr-rem",
1216 /* 6 */ "intr",
1217 /* 7 */ "dma-alloc",
1218 /* 8 */ "dma-bind",
1219 /* 9 */ "dma-unbind",
1220 /* 10 */ "chk-dma-mode"
1221 };
1222
1223 /*ARGSUSED*/
1224 void
niumx_dbg(niumx_debug_bit_t bit,dev_info_t * dip,char * fmt,...)1225 niumx_dbg(niumx_debug_bit_t bit, dev_info_t *dip, char *fmt, ...)
1226 {
1227 va_list ap;
1228 char msgbuf[1024];
1229
1230 if (!(1ull << bit & niumx_debug_flags))
1231 return;
1232 va_start(ap, fmt);
1233 (void) vsprintf(msgbuf, fmt, ap);
1234 va_end(ap);
1235 cmn_err(CE_NOTE, "%s: %s", niumx_debug_sym[bit], msgbuf);
1236 }
1237
1238 #endif /* DEBUG */
1239