1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 /* 28 * Niagara2 Network Interface Unit (NIU) Nexus Driver 29 */ 30 31 #include <sys/conf.h> 32 #include <sys/modctl.h> 33 #include <sys/ddi_impldefs.h> 34 #include <sys/ddi_subrdefs.h> 35 #include <sys/ddi.h> 36 #include <sys/sunndi.h> 37 #include <sys/sunddi.h> 38 #include <sys/open.h> 39 #include <sys/stat.h> 40 #include <sys/file.h> 41 #include <sys/machsystm.h> 42 #include <sys/hsvc.h> 43 #include <sys/sdt.h> 44 #include <sys/hypervisor_api.h> 45 #include <sys/cpuvar.h> 46 #include "niumx_var.h" 47 48 static int niumx_fm_init_child(dev_info_t *, dev_info_t *, int, 49 ddi_iblock_cookie_t *); 50 static int niumx_intr_ops(dev_info_t *dip, dev_info_t *rdip, 51 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result); 52 static int niumx_attach(dev_info_t *devi, ddi_attach_cmd_t cmd); 53 static int niumx_detach(dev_info_t *devi, ddi_detach_cmd_t cmd); 54 static int niumx_set_intr(dev_info_t *dip, dev_info_t *rdip, 55 ddi_intr_handle_impl_t *hdlp, int valid); 56 static int niumx_add_intr(dev_info_t *dip, dev_info_t *rdip, 57 ddi_intr_handle_impl_t *hdlp); 58 static int niumx_rem_intr(dev_info_t *dip, dev_info_t *rdip, 59 ddi_intr_handle_impl_t *hdlp); 60 static uint_t niumx_intr_hdlr(void *arg); 61 static int niumx_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 62 off_t offset, off_t len, caddr_t *addrp); 63 static int niumx_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 64 ddi_dma_attr_t *attrp, 65 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep); 66 static int niumx_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 67 ddi_dma_handle_t handlep); 68 static int niumx_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 69 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 70 ddi_dma_cookie_t *cookiep, uint_t *ccountp); 71 static int niumx_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 72 ddi_dma_handle_t handle); 73 static int niumx_ctlops(dev_info_t *dip, dev_info_t *rdip, 74 ddi_ctl_enum_t op, void *arg, void *result); 75 76 int niumxtool_init(dev_info_t *dip); 77 void niumxtool_uninit(dev_info_t *dip); 78 79 int niumx_get_intr_target(niumx_devstate_t *niumxds_p, niudevino_t ino, 80 niucpuid_t *cpu_id); 81 int niumx_set_intr_target(niumx_devstate_t *niumxds_p, niudevino_t ino, 82 niucpuid_t cpu_id); 83 84 static struct bus_ops niumx_bus_ops = { 85 BUSO_REV, 86 niumx_map, 87 0, 88 0, 89 0, 90 i_ddi_map_fault, 91 0, 92 niumx_dma_allochdl, 93 niumx_dma_freehdl, 94 niumx_dma_bindhdl, 95 niumx_dma_unbindhdl, 96 0, 97 0, 98 0, 99 niumx_ctlops, 100 ddi_bus_prop_op, 101 0, /* (*bus_get_eventcookie)(); */ 102 0, /* (*bus_add_eventcall)(); */ 103 0, /* (*bus_remove_eventcall)(); */ 104 0, /* (*bus_post_event)(); */ 105 0, /* (*bus_intr_ctl)(); */ 106 0, /* (*bus_config)(); */ 107 0, /* (*bus_unconfig)(); */ 108 niumx_fm_init_child, /* (*bus_fm_init)(); */ 109 0, /* (*bus_fm_fini)(); */ 110 0, /* (*bus_enter)() */ 111 0, /* (*bus_exit)() */ 112 0, /* (*bus_power)() */ 113 niumx_intr_ops /* (*bus_intr_op)(); */ 114 }; 115 116 extern struct cb_ops niumx_cb_ops; 117 118 static struct dev_ops niumx_ops = { 119 DEVO_REV, /* devo_rev */ 120 0, /* refcnt */ 121 ddi_no_info, /* info */ 122 nulldev, /* identify */ 123 0, /* probe */ 124 niumx_attach, /* attach */ 125 niumx_detach, /* detach */ 126 nulldev, /* reset */ 127 &niumx_cb_ops, /* driver operations */ 128 &niumx_bus_ops, /* bus operations */ 129 0, /* power */ 130 ddi_quiesce_not_supported, /* devo_quiesce */ 131 }; 132 133 /* Module linkage information for the kernel. */ 134 static struct modldrv modldrv = { 135 &mod_driverops, /* Type of module */ 136 "NIU Nexus Driver", 137 &niumx_ops, /* driver ops */ 138 }; 139 140 static struct modlinkage modlinkage = { 141 MODREV_1, 142 (void *)&modldrv, 143 NULL 144 }; 145 146 void *niumx_state; 147 148 /* 149 * forward function declarations: 150 */ 151 static void niumx_removechild(dev_info_t *); 152 static int niumx_initchild(dev_info_t *child); 153 154 int 155 _init(void) 156 { 157 int e; 158 uint64_t mjrnum; 159 uint64_t mnrnum; 160 161 /* 162 * Check HV intr group api versioning. 163 * This driver uses the old interrupt routines which are supported 164 * in old firmware in the CORE API group and in newer firmware in 165 * the INTR API group. Support for these calls will be dropped 166 * once the INTR API group major goes to 2. 167 */ 168 if ((hsvc_version(HSVC_GROUP_INTR, &mjrnum, &mnrnum) == 0) && 169 (mjrnum > NIUMX_INTR_MAJOR_VER)) { 170 cmn_err(CE_WARN, "niumx: unsupported intr api group: " 171 "maj:0x%lx, min:0x%lx", mjrnum, mnrnum); 172 return (ENOTSUP); 173 } 174 175 if ((e = ddi_soft_state_init(&niumx_state, sizeof (niumx_devstate_t), 176 1)) == 0 && (e = mod_install(&modlinkage)) != 0) 177 ddi_soft_state_fini(&niumx_state); 178 return (e); 179 } 180 181 int 182 _fini(void) 183 { 184 int e; 185 if ((e = mod_remove(&modlinkage)) == 0) 186 ddi_soft_state_fini(&niumx_state); 187 return (e); 188 } 189 190 int 191 _info(struct modinfo *modinfop) 192 { 193 return (mod_info(&modlinkage, modinfop)); 194 } 195 196 197 hrtime_t niumx_intr_timeout = 2ull * NANOSEC; /* 2 seconds in nanoseconds */ 198 199 void 200 niumx_intr_dist(void *arg) 201 { 202 niumx_devstate_t *niumxds_p = (niumx_devstate_t *)arg; 203 kmutex_t *lock_p = &niumxds_p->niumx_mutex; 204 int i; 205 niumx_ih_t *ih_p = niumxds_p->niumx_ihtable; 206 207 DBG(NIUMX_DBG_A_INTX, NULL, "niumx_intr_dist entered\n"); 208 mutex_enter(lock_p); 209 for (i = 0; i < NIUMX_MAX_INTRS; i++, ih_p++) { 210 niusysino_t sysino = ih_p->ih_sysino; 211 niucpuid_t cpuid; 212 int state; 213 hrtime_t start; 214 dev_info_t *dip = ih_p->ih_dip; 215 216 if (!sysino || (cpuid = intr_dist_cpuid()) == ih_p->ih_cpuid) 217 continue; 218 219 (void) hvio_intr_setvalid(sysino, HV_INTR_NOTVALID); 220 221 /* check for pending interrupts, busy wait if so */ 222 for (start = gethrtime(); !panicstr && 223 (hvio_intr_getstate(sysino, &state) == H_EOK) && 224 (state == HV_INTR_DELIVERED_STATE); /* */) { 225 if (gethrtime() - start > niumx_intr_timeout) { 226 cmn_err(CE_WARN, "%s%d: niumx_intr_dist: " 227 "pending interrupt (%x,%lx) timedout\n", 228 ddi_driver_name(dip), ddi_get_instance(dip), 229 ih_p->ih_inum, sysino); 230 (void) hvio_intr_setstate(sysino, 231 HV_INTR_IDLE_STATE); 232 break; 233 } 234 } 235 (void) hvio_intr_settarget(sysino, cpuid); 236 237 if (ih_p->ih_state == HV_INTR_VALID) 238 (void) hvio_intr_setvalid(sysino, HV_INTR_VALID); 239 else 240 (void) hvio_intr_setvalid(sysino, HV_INTR_NOTVALID); 241 242 ih_p->ih_cpuid = cpuid; 243 } 244 mutex_exit(lock_p); 245 } 246 247 static int 248 niumx_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 249 { 250 int instance = ddi_get_instance(dip); 251 niumx_devstate_t *niumxds_p; /* devstate pointer */ 252 niu_regspec_t *reg_p; 253 niumx_ih_t *ih_p; 254 uint_t reglen; 255 int i, ret = DDI_SUCCESS; 256 257 switch (cmd) { 258 case DDI_ATTACH: 259 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 260 DDI_PROP_DONTPASS, "reg", (int **)®_p, ®len) 261 != DDI_PROP_SUCCESS) { 262 DBG(NIUMX_DBG_ATTACH, dip, "reg lookup failed\n"); 263 ret = DDI_FAILURE; 264 goto done; 265 } 266 267 /* 268 * Allocate and get soft state structure. 269 */ 270 if (ddi_soft_state_zalloc(niumx_state, instance) 271 != DDI_SUCCESS) { 272 ret = DDI_FAILURE; 273 goto prop_free; 274 } 275 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state, 276 instance); 277 niumxds_p->dip = dip; 278 niumxds_p->niumx_open_count = 0; 279 mutex_init(&niumxds_p->niumx_mutex, NULL, MUTEX_DRIVER, NULL); 280 281 DBG(NIUMX_DBG_ATTACH, dip, "soft state alloc'd instance = %d, " 282 "niumxds_p = %p\n", instance, niumxds_p); 283 284 /* hv devhdl: low 28-bit of 1st "reg" entry's addr.hi */ 285 niumxds_p->niumx_dev_hdl = (niudevhandle_t)(reg_p->addr_high & 286 NIUMX_DEVHDLE_MASK); 287 288 ih_p = niumxds_p->niumx_ihtable; 289 for (i = 0; i < NIUMX_MAX_INTRS; i++, ih_p++) { 290 ih_p->ih_sysino = 0; 291 ih_p->ih_state = HV_INTR_NOTVALID; 292 } 293 294 /* add interrupt redistribution callback */ 295 intr_dist_add(niumx_intr_dist, niumxds_p); 296 297 niumxds_p->niumx_fm_cap = DDI_FM_EREPORT_CAPABLE; 298 299 ddi_fm_init(niumxds_p->dip, &niumxds_p->niumx_fm_cap, 300 &niumxds_p->niumx_fm_ibc); 301 302 if (niumxtool_init(dip) != DDI_SUCCESS) { 303 ret = DDI_FAILURE; 304 goto cleanup; 305 } 306 307 ret = DDI_SUCCESS; 308 goto prop_free; 309 cleanup: 310 mutex_destroy(&niumxds_p->niumx_mutex); 311 ddi_soft_state_free(niumx_state, ddi_get_instance(dip)); 312 prop_free: 313 ddi_prop_free(reg_p); 314 done: 315 return (ret); 316 317 case DDI_RESUME: 318 default: 319 break; 320 } 321 return (ret); 322 } 323 324 static int 325 niumx_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 326 { 327 niumx_devstate_t *niumxds_p; 328 329 switch (cmd) { 330 case DDI_DETACH: 331 332 niumxds_p = (niumx_devstate_t *) 333 ddi_get_soft_state(niumx_state, ddi_get_instance(dip)); 334 335 intr_dist_rem(niumx_intr_dist, niumxds_p); 336 ddi_fm_fini(dip); 337 niumxtool_uninit(dip); 338 mutex_destroy(&niumxds_p->niumx_mutex); 339 ddi_soft_state_free(niumx_state, ddi_get_instance(dip)); 340 return (DDI_SUCCESS); 341 342 case DDI_SUSPEND: 343 default: 344 break; 345 } 346 return (DDI_FAILURE); 347 } 348 349 350 /* 351 * Function used to initialize FMA for our children nodes. Called 352 * through pci busops when child node calls ddi_fm_init. 353 */ 354 /*ARGSUSED*/ 355 int 356 niumx_fm_init_child(dev_info_t *dip, dev_info_t *cdip, int cap, 357 ddi_iblock_cookie_t *ibc_p) 358 { 359 niumx_devstate_t *niumxds_p = NIUMX_DIP_TO_STATE(dip); 360 361 ASSERT(ibc_p != NULL); 362 *ibc_p = niumxds_p->niumx_fm_ibc; 363 364 return (niumxds_p->niumx_fm_cap); 365 } 366 367 368 /*ARGSUSED*/ 369 int 370 niumx_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 371 off_t offset, off_t len, caddr_t *vaddrp) 372 { 373 struct regspec p_regspec; 374 ddi_map_req_t p_mapreq; 375 niu_regspec_t *reg_p; 376 int i, rn = mp->map_obj.rnumber, reglen, rnglen, rngnum, ret; 377 niumx_ranges_t *rng_p; 378 379 uint32_t reg_begin, rng_begin; 380 381 DBG(NIUMX_DBG_MAP, dip, "%s%d: mapping %s%d reg %d\n", 382 NIUMX_NAMEINST(dip), NIUMX_NAMEINST(rdip), rn); 383 384 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 385 "reg", (caddr_t)®_p, ®len) != DDI_SUCCESS) 386 return (DDI_FAILURE); 387 388 if (rn < 0 || (rn >= reglen / sizeof (niu_regspec_t))) { 389 DBG(NIUMX_DBG_MAP, dip, "rnumber out of range: %d\n", rn); 390 kmem_free(reg_p, reglen); 391 return (DDI_ME_RNUMBER_RANGE); 392 } 393 394 /* build regspec up for parent */ 395 p_mapreq = *mp; /* dup the whole structure */ 396 p_mapreq.map_type = DDI_MT_REGSPEC; 397 p_mapreq.map_obj.rp = &p_regspec; 398 399 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges", 400 (caddr_t)&rng_p, &rnglen) != DDI_SUCCESS) { 401 DBG(NIUMX_DBG_MAP, dip, "%s%d: no ranges property\n", 402 ddi_driver_name(dip), ddi_get_instance(dip)); 403 kmem_free(reg_p, reglen); 404 return (DDI_FAILURE); 405 } 406 407 /* locate matching ranges record */ 408 rngnum = rnglen / sizeof (niumx_ranges_t); 409 for (i = 0, reg_p += rn; i < rngnum; rng_p++, i++) { 410 if (reg_p->addr_high == rng_p->child_hi) 411 break; 412 } 413 414 if (i >= rngnum) { 415 DBG(NIUMX_DBG_MAP, dip, "ranges record for reg[%d] " 416 "not found.\n", rn); 417 ret = DDI_ME_REGSPEC_RANGE; 418 goto err; 419 } 420 421 /* 422 * validate request has matching bus type and within 4G 423 * limit by comparing addr.hi of "ranges" and child "reg". 424 */ 425 426 ASSERT(reg_p->size_high == 0); 427 428 rng_begin = rng_p->child_lo; 429 reg_begin = reg_p->addr_low; 430 /* check to verify reg bounds are within rng bounds */ 431 if (reg_begin < rng_begin || (reg_begin + (reg_p->size_low - 1)) > 432 (rng_begin + (rng_p->size_lo - 1))) { 433 DBG(NIUMX_DBG_MAP, dip, "size out of range for reg[%d].\n", rn); 434 ret = DDI_ME_REGSPEC_RANGE; 435 goto err; 436 } 437 438 p_regspec.regspec_bustype = rng_p->parent_hi; 439 p_regspec.regspec_addr = reg_begin - rng_begin + rng_p->parent_lo; 440 p_regspec.regspec_size = reg_p->size_low; 441 DBG(NIUMX_DBG_MAP, dip, "regspec:bus,addr,size = (%x,%x,%x)\n", 442 p_regspec.regspec_bustype, p_regspec.regspec_addr, 443 p_regspec.regspec_size); 444 ret = ddi_map(dip, &p_mapreq, 0, 0, vaddrp); 445 DBG(NIUMX_DBG_MAP, dip, "niumx_map: ret %d.\n", ret); 446 err: 447 kmem_free(rng_p - i, rnglen); 448 kmem_free(reg_p - rn, reglen); 449 return (ret); 450 } 451 452 /* 453 * niumx_ctlops 454 */ 455 int 456 niumx_ctlops(dev_info_t *dip, dev_info_t *rdip, 457 ddi_ctl_enum_t ctlop, void *arg, void *result) 458 { 459 niu_regspec_t *reg_p; 460 int reglen, totreg; 461 462 DBG(NIUMX_DBG_CTLOPS, dip, "niumx_ctlops ctlop=%d.\n", ctlop); 463 if (rdip == (dev_info_t *)0) 464 return (DDI_FAILURE); 465 466 switch (ctlop) { 467 case DDI_CTLOPS_REPORTDEV: 468 cmn_err(CE_NOTE, "device: %s@%s, %s%d\n", 469 ddi_node_name(rdip), ddi_get_name_addr(rdip), 470 NIUMX_NAMEINST(rdip)); 471 return (DDI_SUCCESS); 472 473 case DDI_CTLOPS_INITCHILD: 474 return (niumx_initchild((dev_info_t *)arg)); 475 476 case DDI_CTLOPS_UNINITCHILD: 477 niumx_removechild((dev_info_t *)arg); 478 return (DDI_SUCCESS); 479 480 case DDI_CTLOPS_REGSIZE: 481 case DDI_CTLOPS_NREGS: 482 /* fall through */ 483 break; 484 default: 485 DBG(NIUMX_DBG_CTLOPS, dip, "just pass to ddi_cltops.\n"); 486 return (ddi_ctlops(dip, rdip, ctlop, arg, result)); 487 } 488 489 /* REGSIZE/NREGS */ 490 491 *(int *)result = 0; 492 493 if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS | 494 DDI_PROP_CANSLEEP, "reg", (caddr_t)®_p, ®len) != DDI_SUCCESS) 495 return (DDI_FAILURE); 496 497 totreg = reglen / sizeof (niu_regspec_t); 498 if (ctlop == DDI_CTLOPS_NREGS) { 499 DBG(NIUMX_DBG_CTLOPS, (dev_info_t *)dip, 500 "niumx_ctlops NREGS=%d.\n", totreg); 501 *(int *)result = totreg; 502 } else if (ctlop == DDI_CTLOPS_REGSIZE) { 503 int rn; 504 rn = *(int *)arg; 505 if (rn >= totreg) { 506 kmem_free(reg_p, reglen); 507 return (DDI_FAILURE); 508 } 509 *(off_t *)result = (reg_p + rn)->size_low; 510 DBG(NIUMX_DBG_CTLOPS, (dev_info_t *)dip, 511 "rn = %d, REGSIZE=%x.\n", rn, *(off_t *)result); 512 } 513 514 kmem_free(reg_p, reglen); 515 return (DDI_SUCCESS); 516 } 517 518 /* 519 * niumx_name_child 520 * 521 * This function is called from init_child to name a node. It is 522 * also passed as a callback for node merging functions. 523 * 524 * return value: DDI_SUCCESS, DDI_FAILURE 525 */ 526 static int 527 niumx_name_child(dev_info_t *child, char *name, int namelen) 528 { 529 niu_regspec_t *r; 530 uint_t n; 531 532 DBG(NIUMX_DBG_CHK_MOD, (dev_info_t *)child, "==> niumx_name_child\n"); 533 534 if (ndi_dev_is_persistent_node(child) == 0) { 535 char **unit_addr; 536 537 /* name .conf nodes by "unit-address" property */ 538 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child, 539 DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) != 540 DDI_PROP_SUCCESS) { 541 cmn_err(CE_WARN, "cannot name node from %s.conf", 542 ddi_driver_name(child)); 543 return (DDI_FAILURE); 544 } 545 if (n != 1 || *unit_addr == NULL || **unit_addr == 0) { 546 cmn_err(CE_WARN, "unit-address property in %s.conf" 547 " not well-formed", ddi_driver_name(child)); 548 ddi_prop_free(unit_addr); 549 return (DDI_FAILURE); 550 } 551 552 (void) snprintf(name, namelen, "%s", *unit_addr); 553 ddi_prop_free(unit_addr); 554 return (DDI_SUCCESS); 555 } 556 557 /* name hardware nodes by "reg" property */ 558 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, 559 "reg", (int **)&r, &n) != DDI_SUCCESS) { 560 cmn_err(CE_WARN, "reg property not well-formed"); 561 return (DDI_FAILURE); 562 } 563 (void) snprintf(name, namelen, "%x", (r[0].addr_high)); 564 ddi_prop_free(r); 565 return (DDI_SUCCESS); 566 } 567 568 static int 569 niumx_initchild(dev_info_t *child) 570 { 571 char name[MAXNAMELEN]; 572 573 DBG(NIUMX_DBG_CHK_MOD, (dev_info_t *)child, "==> niumx_initchild\n"); 574 /* 575 * Non-peristent nodes indicate a prototype node with per-instance 576 * properties to be merged into the real h/w device node. 577 */ 578 if (ndi_dev_is_persistent_node(child) == 0) { 579 niu_regspec_t *r; 580 uint_t n; 581 582 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, 583 DDI_PROP_DONTPASS, "reg", (int **)&r, &n) == 584 DDI_SUCCESS) { 585 cmn_err(CE_WARN, 586 "cannot merge prototype from %s.conf", 587 ddi_driver_name(child)); 588 ddi_prop_free(r); 589 return (DDI_NOT_WELL_FORMED); 590 } 591 592 if (niumx_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS) 593 return (DDI_NOT_WELL_FORMED); 594 595 ddi_set_name_addr(child, name); 596 ddi_set_parent_data(child, NULL); 597 598 /* 599 * Try to merge the properties from this prototype 600 * node into real h/w nodes. 601 */ 602 if (ndi_merge_node(child, niumx_name_child) == DDI_SUCCESS) { 603 /* 604 * Merged ok - return failure to remove the node. 605 */ 606 ddi_set_name_addr(child, NULL); 607 return (DDI_FAILURE); 608 } 609 610 /* 611 * The child was not merged into a h/w node, 612 * but there's not much we can do with it other 613 * than return failure to cause the node to be removed. 614 */ 615 cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", 616 ddi_driver_name(child), ddi_get_name_addr(child), 617 ddi_driver_name(child)); 618 ddi_set_name_addr(child, NULL); 619 return (DDI_NOT_WELL_FORMED); 620 } 621 622 /* 623 * Initialize real h/w nodes 624 */ 625 if (niumx_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS) 626 return (DDI_FAILURE); 627 628 ddi_set_name_addr(child, name); 629 return (DDI_SUCCESS); 630 } 631 632 static void 633 niumx_removechild(dev_info_t *dip) 634 { 635 ddi_set_name_addr(dip, NULL); 636 ddi_remove_minor_node(dip, NULL); 637 impl_rem_dev_props(dip); 638 } 639 640 641 642 /* 643 * bus dma alloc handle entry point: 644 */ 645 /*ARGSUSED*/ 646 int 647 niumx_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 648 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 649 { 650 ddi_dma_impl_t *mp; 651 int sleep = (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 652 653 DBG(NIUMX_DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", NIUMX_NAMEINST(rdip)); 654 655 if (attrp->dma_attr_version != DMA_ATTR_V0) { 656 DBG(NIUMX_DBG_DMA_ALLOCH, 657 (dev_info_t *)dip, "DDI_DMA_BADATTR\n"); 658 return (DDI_DMA_BADATTR); 659 } 660 661 /* Caution: we don't use zalloc to enhance performance! */ 662 if ((mp = kmem_alloc(sizeof (ddi_dma_impl_t), sleep)) == 0) { 663 DBG(NIUMX_DBG_DMA_ALLOCH, dip, "can't alloc ddi_dma_impl_t\n"); 664 return (DDI_FAILURE); 665 } 666 mp->dmai_rdip = rdip; 667 mp->dmai_pfnlst = NULL; 668 mp->dmai_cookie = NULL; 669 mp->dmai_fault = 0; 670 mp->dmai_fault_check = NULL; 671 mp->dmai_fault_notify = NULL; 672 673 mp->dmai_attr = *attrp; /* set requestors attr info */ 674 675 DBG(NIUMX_DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 676 677 *handlep = (ddi_dma_handle_t)mp; 678 return (DDI_SUCCESS); 679 } 680 681 682 /* 683 * bus dma free handle entry point: 684 */ 685 /*ARGSUSED*/ 686 int 687 niumx_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 688 { 689 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 690 691 if (mp->dmai_cookie) 692 kmem_free(mp->dmai_cookie, sizeof (ddi_dma_cookie_t)); 693 kmem_free(mp, sizeof (ddi_dma_impl_t)); 694 695 return (DDI_SUCCESS); 696 } 697 698 699 /* 700 * bus dma bind handle entry point: 701 * 702 * check/enforce DMA type, setup pfn0 and some other key pieces 703 * of this dma request. 704 * Note: this only works with DMA_OTYP_VADDR, and makes use of the known 705 * fact that only contiguous memory blocks will be passed in. 706 * Therefore only one cookie will ever be returned. 707 * 708 * return values: 709 * DDI_DMA_NOMAPPING - can't get valid pfn0, or bad dma type 710 * DDI_DMA_NORESOURCES 711 * DDI_SUCCESS 712 * 713 * dma handle members affected (set on exit): 714 * mp->dmai_object - dmareq->dmar_object 715 * mp->dmai_rflags - dmareq->dmar_flags 716 * mp->dmai_pfn0 - 1st page pfn (if va/size pair and not shadow) 717 * mp->dmai_roffset - initialized to starting page offset 718 * mp->dmai_size - # of total pages of entire object 719 * mp->dmai_cookie - new cookie alloc'd 720 */ 721 /*ARGSUSED*/ 722 int 723 niumx_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 724 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 725 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 726 { 727 int (*waitfp)(caddr_t) = dmareq->dmar_fp; 728 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 729 ddi_dma_obj_t *dobj_p = &dmareq->dmar_object; 730 uint32_t offset; 731 pfn_t pfn0; 732 int ret; 733 734 DBG(NIUMX_DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 735 NIUMX_NAMEINST(rdip), mp, dmareq); 736 737 /* first check dma type */ 738 mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS | DMP_NOSYNC; 739 switch (dobj_p->dmao_type) { 740 case DMA_OTYP_VADDR: { 741 caddr_t vaddr = dobj_p->dmao_obj.virt_obj.v_addr; 742 struct as *as_p = dobj_p->dmao_obj.virt_obj.v_as; 743 struct hat *hat_p = as_p ? as_p->a_hat : kas.a_hat; 744 offset = (ulong_t)vaddr & NIUMX_PAGE_OFFSET; 745 pfn0 = hat_getpfnum(hat_p, vaddr); 746 } 747 break; 748 749 case DMA_OTYP_BUFVADDR: 750 case DMA_OTYP_PAGES: 751 case DMA_OTYP_PADDR: 752 default: 753 cmn_err(CE_WARN, "%s%d requested unsupported dma type %x", 754 NIUMX_NAMEINST(mp->dmai_rdip), dobj_p->dmao_type); 755 ret = DDI_DMA_NOMAPPING; 756 goto err; 757 } 758 if (pfn0 == PFN_INVALID) { 759 cmn_err(CE_WARN, "%s%d: invalid pfn0 for DMA object %p", 760 NIUMX_NAMEINST(dip), (void *)dobj_p); 761 ret = DDI_DMA_NOMAPPING; 762 goto err; 763 } 764 mp->dmai_object = *dobj_p; /* whole object */ 765 mp->dmai_pfn0 = (void *)pfn0; /* cache pfn0 */ 766 mp->dmai_roffset = offset; /* pg0 offset */ 767 mp->dmai_mapping = mp->dmai_roffset | NIUMX_PTOB(pfn0); 768 mp->dmai_size = mp->dmai_object.dmao_size; 769 770 DBG(NIUMX_DBG_DMA_BINDH, dip, "check pfn: mp=%p pfn0=%x\n", 771 mp, mp->dmai_pfn0); 772 if (!(mp->dmai_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t), 773 waitfp == DDI_DMA_SLEEP ? KM_SLEEP : KM_NOSLEEP))) { 774 ret = DDI_DMA_NORESOURCES; 775 goto err; 776 } 777 mp->dmai_cookie->dmac_laddress = mp->dmai_mapping; 778 mp->dmai_cookie->dmac_size = mp->dmai_size; 779 *ccountp = 1; 780 *cookiep = *mp->dmai_cookie; 781 DBG(NIUMX_DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x, count=%d\n", 782 cookiep->dmac_address, cookiep->dmac_size, *ccountp); 783 return (DDI_DMA_MAPPED); 784 785 err: 786 DBG(NIUMX_DBG_DMA_BINDH, (dev_info_t *)dip, 787 "niumx_dma_bindhdl error ret=%d\n", ret); 788 return (ret); 789 } 790 791 /* 792 * bus dma unbind handle entry point: 793 */ 794 /*ARGSUSED*/ 795 int 796 niumx_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 797 { 798 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 799 800 DBG(NIUMX_DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 801 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 802 if (mp->dmai_cookie) { 803 kmem_free(mp->dmai_cookie, sizeof (ddi_dma_cookie_t)); 804 mp->dmai_cookie = NULL; 805 } 806 807 return (DDI_SUCCESS); 808 } 809 810 /*ARGSUSED*/ 811 int 812 niumx_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 813 ddi_intr_handle_impl_t *hdlp, void *result) 814 { 815 816 int ret = DDI_SUCCESS; 817 818 DBG(NIUMX_DBG_INTROPS, dip, "niumx_intr_ops: dip=%p rdip=%p intr_op=%x " 819 "handle=%p\n", dip, rdip, intr_op, hdlp); 820 821 switch (intr_op) { 822 823 case DDI_INTROP_SUPPORTED_TYPES: 824 *(int *)result = DDI_INTR_TYPE_FIXED; 825 break; 826 case DDI_INTROP_GETCAP: 827 *(int *)result = DDI_INTR_FLAG_LEVEL | 828 DDI_INTR_FLAG_RETARGETABLE; 829 break; 830 case DDI_INTROP_SETCAP: 831 ret = DDI_ENOTSUP; 832 break; 833 case DDI_INTROP_ALLOC: 834 /* scratch1 = count, # of intrs from DDI framework */ 835 *(int *)result = hdlp->ih_scratch1; 836 break; 837 case DDI_INTROP_FREE: 838 /* Do we need to do anything here? */ 839 break; 840 case DDI_INTROP_GETPRI: 841 *(int *)result = NIUMX_DEFAULT_PIL; 842 break; 843 case DDI_INTROP_SETPRI: 844 ret = DDI_ENOTSUP; 845 break; 846 case DDI_INTROP_ADDISR: 847 ret = niumx_add_intr(dip, rdip, hdlp); 848 break; 849 case DDI_INTROP_REMISR: 850 ret = niumx_rem_intr(dip, rdip, hdlp); 851 break; 852 case DDI_INTROP_ENABLE: 853 ret = niumx_set_intr(dip, rdip, hdlp, HV_INTR_VALID); 854 break; 855 case DDI_INTROP_DISABLE: 856 ret = niumx_set_intr(dip, rdip, hdlp, HV_INTR_NOTVALID); 857 break; 858 case DDI_INTROP_SETMASK: 859 ret = DDI_ENOTSUP; 860 break; 861 case DDI_INTROP_CLRMASK: 862 ret = DDI_ENOTSUP; 863 break; 864 case DDI_INTROP_GETPENDING: 865 ret = DDI_ENOTSUP; 866 break; 867 case DDI_INTROP_NINTRS: 868 case DDI_INTROP_NAVAIL: { 869 niudevino_t *inos_p; 870 int inoslen; 871 872 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 873 "interrupts", (caddr_t)&inos_p, &inoslen) 874 != DDI_SUCCESS) { 875 ret = DDI_FAILURE; 876 break; 877 } 878 *(int *)result = inoslen / sizeof (uint32_t); 879 kmem_free(inos_p, inoslen); 880 } 881 break; 882 case DDI_INTROP_GETTARGET: { 883 niumx_devstate_t *niumxds_p; 884 885 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state, 886 ddi_get_instance(dip)); 887 888 ret = niumx_get_intr_target(niumxds_p, hdlp->ih_vector, 889 (niucpuid_t *)result); 890 891 } 892 break; 893 case DDI_INTROP_SETTARGET: { 894 niumx_devstate_t *niumxds_p; 895 896 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state, 897 ddi_get_instance(dip)); 898 899 ret = niumx_set_intr_target(niumxds_p, hdlp->ih_vector, 900 *(niucpuid_t *)result); 901 902 } 903 break; 904 default: 905 ret = DDI_ENOTSUP; 906 break; 907 } 908 909 DBG(NIUMX_DBG_INTROPS, dip, "niumx_intr_ops: ret=%d\n", ret); 910 return (ret); 911 } 912 913 int 914 niumx_set_intr(dev_info_t *dip, dev_info_t *rdip, 915 ddi_intr_handle_impl_t *hdlp, int valid) 916 { 917 niumx_ih_t *ih_p; 918 int ret = DDI_SUCCESS; 919 uint64_t hvret; 920 niumx_devstate_t *niumxds_p; /* devstate pointer */ 921 int instance = ddi_get_instance(dip); 922 923 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state, 924 instance); 925 926 ASSERT(hdlp->ih_inum < NIUMX_MAX_INTRS); 927 928 ih_p = niumxds_p->niumx_ihtable + hdlp->ih_vector; 929 930 DBG(NIUMX_DBG_A_INTX, dip, 931 "niumx_set_intr: rdip=%s%d, valid=%d %s (%x,%x)\n", 932 NIUMX_NAMEINST(rdip), valid, valid ? "enabling" : "disabling", 933 ih_p->ih_inum, ih_p->ih_sysino); 934 935 if (valid == HV_INTR_VALID) 936 (void) hvio_intr_setstate(ih_p->ih_sysino, HV_INTR_IDLE_STATE); 937 if ((hvret = hvio_intr_setvalid(ih_p->ih_sysino, valid)) 938 != H_EOK) { 939 DBG(NIUMX_DBG_A_INTX, dip, 940 "hvio_intr_setvalid failed, ret 0x%x\n", hvret); 941 ret = DDI_FAILURE; 942 } else 943 ih_p->ih_state = valid; 944 945 return (ret); 946 } 947 948 int 949 niumx_get_intr_target(niumx_devstate_t *niumxds_p, niudevino_t ino, 950 niucpuid_t *cpu_id) 951 { 952 niumx_ih_t *ih_p; 953 niusysino_t sysino; 954 int rval = DDI_SUCCESS; 955 956 ih_p = niumxds_p->niumx_ihtable + ino; 957 958 sysino = ih_p->ih_sysino; 959 960 if (sysino == 0) { 961 rval = EINVAL; 962 goto done; 963 } 964 965 if (hvio_intr_gettarget(sysino, cpu_id) != H_EOK) { 966 rval = EINVAL; 967 goto done; 968 } 969 970 if (ih_p->ih_cpuid != *cpu_id) 971 rval = EIO; 972 973 done: 974 return (rval); 975 } 976 977 int 978 niumx_set_intr_target(niumx_devstate_t *niumxds_p, niudevino_t ino, 979 niucpuid_t cpu_id) 980 { 981 dev_info_t *dip = niumxds_p->dip; 982 niumx_ih_t *ih_p; 983 niucpuid_t old_cpu_id; 984 niusysino_t sysino; 985 int ret = DDI_SUCCESS; 986 int state; 987 hrtime_t start; 988 extern const int _ncpu; 989 extern cpu_t *cpu[]; 990 991 mutex_enter(&cpu_lock); 992 993 ih_p = niumxds_p->niumx_ihtable + ino; 994 995 sysino = ih_p->ih_sysino; 996 if (sysino == 0) { 997 ret = EINVAL; 998 goto done; 999 } 1000 1001 if (hvio_intr_gettarget(sysino, &old_cpu_id) != H_EOK) { 1002 ret = EINVAL; 1003 goto done; 1004 } 1005 if ((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id]))) { 1006 if (cpu_id == old_cpu_id) 1007 goto done; 1008 1009 /* check for pending interrupts, busy wait if so */ 1010 for (start = gethrtime(); !panicstr && 1011 (hvio_intr_getstate(sysino, &state) == H_EOK) && 1012 (state == HV_INTR_DELIVERED_STATE); /* */) { 1013 if (gethrtime() - start > niumx_intr_timeout) { 1014 cmn_err(CE_WARN, "%s%d: niumx_intr_dist: " 1015 "pending interrupt (%x,%lx) timedout\n", 1016 ddi_driver_name(dip), ddi_get_instance(dip), 1017 ih_p->ih_inum, sysino); 1018 (void) hvio_intr_setstate(sysino, 1019 HV_INTR_IDLE_STATE); 1020 break; 1021 } 1022 } 1023 (void) hvio_intr_settarget(sysino, cpu_id); 1024 if (ih_p->ih_state == HV_INTR_VALID) 1025 (void) hvio_intr_setvalid(sysino, HV_INTR_VALID); 1026 else 1027 (void) hvio_intr_setvalid(sysino, HV_INTR_NOTVALID); 1028 ih_p->ih_cpuid = cpu_id; 1029 } else { 1030 ret = DDI_EINVAL; 1031 } 1032 1033 done: 1034 mutex_exit(&cpu_lock); 1035 return (ret); 1036 } 1037 1038 1039 /* 1040 * niumx_add_intr: 1041 * 1042 * This function is called to register interrupts. 1043 */ 1044 int 1045 niumx_add_intr(dev_info_t *dip, dev_info_t *rdip, 1046 ddi_intr_handle_impl_t *hdlp) 1047 { 1048 niumx_ih_t *ih_p; 1049 int ret = DDI_SUCCESS; 1050 uint64_t hvret; 1051 niusysino_t sysino; 1052 niumx_devstate_t *niumxds_p; /* devstate pointer */ 1053 int instance = ddi_get_instance(dip); 1054 1055 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state, 1056 instance); 1057 1058 /* get new ino */ 1059 if (hdlp->ih_inum >= NIUMX_MAX_INTRS) { 1060 DBG(NIUMX_DBG_INTR, dip, "error: inum %d out of range\n", 1061 hdlp->ih_inum); 1062 ret = DDI_FAILURE; 1063 goto done; 1064 } 1065 1066 ih_p = niumxds_p->niumx_ihtable + hdlp->ih_vector; 1067 1068 if ((hvret = hvio_intr_devino_to_sysino(NIUMX_DIP_TO_HANDLE(dip), 1069 hdlp->ih_vector, &sysino)) != H_EOK) { 1070 DBG(NIUMX_DBG_INTR, dip, "hvio_intr_devino_to_sysino failed, " 1071 "ret 0x%x\n", hvret); 1072 ret = DDI_FAILURE; 1073 goto done; 1074 } 1075 ih_p->ih_sysino = sysino; 1076 ih_p->ih_dip = rdip; 1077 ih_p->ih_inum = hdlp->ih_inum; 1078 ih_p->ih_hdlr = hdlp->ih_cb_func; 1079 ih_p->ih_arg1 = hdlp->ih_cb_arg1; 1080 ih_p->ih_arg2 = hdlp->ih_cb_arg2; 1081 1082 DBG(NIUMX_DBG_A_INTX, dip, "niumx_add_intr: rdip=%s%d inum=0x%x " 1083 "handler=%p arg1=%p arg2=%p, new ih_p = %p\n", NIUMX_NAMEINST(rdip), 1084 hdlp->ih_inum, hdlp->ih_cb_func, hdlp->ih_cb_arg1, 1085 hdlp->ih_cb_arg2, ih_p); 1086 1087 if (hdlp->ih_pri == 0) 1088 hdlp->ih_pri = NIUMX_DEFAULT_PIL; 1089 1090 ih_p->ih_pri = hdlp->ih_pri; 1091 1092 DBG(NIUMX_DBG_A_INTX, dip, "for ino %x adding (%x,%x)\n", 1093 hdlp->ih_vector, ih_p->ih_inum, ih_p->ih_sysino); 1094 1095 /* Save sysino value in hdlp */ 1096 hdlp->ih_vector = ih_p->ih_sysino; 1097 1098 /* swap in our handler & arg */ 1099 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, (ddi_intr_handler_t *)niumx_intr_hdlr, 1100 (void *)ih_p, NULL); 1101 1102 ret = i_ddi_add_ivintr(hdlp); 1103 1104 /* Restore orig. interrupt handler & args in handle. */ 1105 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_hdlr, ih_p->ih_arg1, 1106 ih_p->ih_arg2); 1107 1108 if (ret != DDI_SUCCESS) { 1109 DBG(NIUMX_DBG_A_INTX, dip, "i_ddi_add_ivintr error ret=%x\n", 1110 ret); 1111 goto done; 1112 } 1113 1114 /* select cpu, saving it for removal */ 1115 ih_p->ih_cpuid = intr_dist_cpuid(); 1116 1117 if ((hvret = hvio_intr_settarget(ih_p->ih_sysino, ih_p->ih_cpuid)) 1118 != H_EOK) { 1119 DBG(NIUMX_DBG_A_INTX, dip, 1120 "hvio_intr_settarget failed, ret 0x%x\n", hvret); 1121 ret = DDI_FAILURE; 1122 } 1123 done: 1124 DBG(NIUMX_DBG_A_INTX, dip, "done, ret = %d, ih_p 0x%p, hdlp 0x%p\n", 1125 ih_p, hdlp, ret); 1126 return (ret); 1127 } 1128 1129 /* 1130 * niumx_rem_intr: 1131 * 1132 * This function is called to unregister interrupts. 1133 */ 1134 /*ARGSUSED*/ 1135 int 1136 niumx_rem_intr(dev_info_t *dip, dev_info_t *rdip, 1137 ddi_intr_handle_impl_t *hdlp) 1138 { 1139 niumx_ih_t *ih_p; 1140 int ret = DDI_SUCCESS, state; 1141 hrtime_t start; 1142 niusysino_t sysino; 1143 niumx_devstate_t *niumxds_p; /* devstate pointer */ 1144 int instance = ddi_get_instance(dip); 1145 1146 niumxds_p = (niumx_devstate_t *)ddi_get_soft_state(niumx_state, 1147 instance); 1148 1149 ASSERT(hdlp->ih_inum < NIUMX_MAX_INTRS); 1150 1151 ih_p = niumxds_p->niumx_ihtable + hdlp->ih_vector; 1152 1153 sysino = ih_p->ih_sysino; 1154 DBG(NIUMX_DBG_R_INTX, dip, "removing (%x,%x)\n", ih_p->ih_inum, sysino); 1155 1156 (void) hvio_intr_setvalid(sysino, HV_INTR_NOTVALID); 1157 1158 /* check for pending interrupts, busy wait if so */ 1159 for (start = gethrtime(); !panicstr && 1160 (hvio_intr_getstate(sysino, &state) == H_EOK) && 1161 (state == HV_INTR_DELIVERED_STATE); /* */) { 1162 if (gethrtime() - start > niumx_intr_timeout) { 1163 cmn_err(CE_WARN, "%s%d: niumx_intr_dist: " 1164 "pending interrupt (%x,%lx) timedout\n", 1165 ddi_driver_name(dip), ddi_get_instance(dip), 1166 ih_p->ih_inum, sysino); 1167 ret = DDI_FAILURE; 1168 goto fail; 1169 } 1170 } 1171 1172 ih_p->ih_sysino = 0; 1173 1174 hdlp->ih_vector = (uint32_t)sysino; 1175 if (hdlp->ih_vector != NULL) i_ddi_rem_ivintr(hdlp); 1176 1177 fail: 1178 return (ret); 1179 } 1180 1181 /* 1182 * niumx_intr_hdlr (our interrupt handler) 1183 */ 1184 uint_t 1185 niumx_intr_hdlr(void *arg) 1186 { 1187 niumx_ih_t *ih_p = (niumx_ih_t *)arg; 1188 uint_t r; 1189 1190 DTRACE_PROBE4(interrupt__start, dev_info_t, ih_p->ih_dip, void *, 1191 ih_p->ih_hdlr, caddr_t, ih_p->ih_arg1, caddr_t, ih_p->ih_arg2); 1192 1193 r = (*ih_p->ih_hdlr)(ih_p->ih_arg1, ih_p->ih_arg2); 1194 1195 DTRACE_PROBE4(interrupt__complete, dev_info_t, ih_p->ih_dip, void *, 1196 ih_p->ih_hdlr, caddr_t, ih_p->ih_arg1, int, r); 1197 1198 (void) hvio_intr_setstate(ih_p->ih_sysino, HV_INTR_IDLE_STATE); 1199 return (r); 1200 } 1201 1202 #ifdef DEBUG 1203 uint64_t niumx_debug_flags = 0; 1204 1205 static char *niumx_debug_sym [] = { /* same sequence as niumx_debug_bit */ 1206 /* 0 */ "attach", 1207 /* 1 */ "map", 1208 /* 2 */ "nex-ctlops", 1209 /* 3 */ "introps", 1210 /* 4 */ "intr-add", 1211 /* 5 */ "intr-rem", 1212 /* 6 */ "intr", 1213 /* 7 */ "dma-alloc", 1214 /* 8 */ "dma-bind", 1215 /* 9 */ "dma-unbind", 1216 /* 10 */ "chk-dma-mode" 1217 }; 1218 1219 /*ARGSUSED*/ 1220 void 1221 niumx_dbg(niumx_debug_bit_t bit, dev_info_t *dip, char *fmt, ...) 1222 { 1223 va_list ap; 1224 char msgbuf[1024]; 1225 1226 if (!(1ull << bit & niumx_debug_flags)) 1227 return; 1228 va_start(ap, fmt); 1229 (void) vsprintf(msgbuf, fmt, ap); 1230 va_end(ap); 1231 cmn_err(CE_NOTE, "%s: %s", niumx_debug_sym[bit], msgbuf); 1232 } 1233 1234 #endif /* DEBUG */ 1235