1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PCI nexus interrupt handling: 28 * PCI device interrupt handler wrapper 29 * pil lookup routine 30 * PCI device interrupt related initchild code 31 */ 32 33 #include <sys/types.h> 34 #include <sys/kmem.h> 35 #include <sys/async.h> 36 #include <sys/spl.h> 37 #include <sys/sunddi.h> 38 #include <sys/machsystm.h> /* e_ddi_nodeid_to_dip() */ 39 #include <sys/ddi_impldefs.h> 40 #include <sys/pci/pci_obj.h> 41 #include <sys/sdt.h> 42 #include <sys/clock.h> 43 44 #ifdef _STARFIRE 45 #include <sys/starfire.h> 46 #endif /* _STARFIRE */ 47 48 /* 49 * interrupt jabber: 50 * 51 * When an interrupt line is jabbering, every time the state machine for the 52 * associated ino is idled, a new mondo will be sent and the ino will go into 53 * the pending state again. The mondo will cause a new call to 54 * pci_intr_wrapper() which normally idles the ino's state machine which would 55 * precipitate another trip round the loop. 56 * The loop can be broken by preventing the ino's state machine from being 57 * idled when an interrupt line is jabbering. See the comment at the 58 * beginning of pci_intr_wrapper() explaining how the 'interrupt jabber 59 * protection' code does this. 60 */ 61 62 /*LINTLIBRARY*/ 63 64 #ifdef NOT_DEFINED 65 /* 66 * This array is used to determine the sparc PIL at the which the 67 * handler for a given INO will execute. This table is for onboard 68 * devices only. A different scheme will be used for plug-in cards. 69 */ 70 71 uint_t ino_to_pil[] = { 72 73 /* pil */ /* ino */ 74 75 0, 0, 0, 0, /* 0x00 - 0x03: bus A slot 0 int#A, B, C, D */ 76 0, 0, 0, 0, /* 0x04 - 0x07: bus A slot 1 int#A, B, C, D */ 77 0, 0, 0, 0, /* 0x08 - 0x0B: unused */ 78 0, 0, 0, 0, /* 0x0C - 0x0F: unused */ 79 80 0, 0, 0, 0, /* 0x10 - 0x13: bus B slot 0 int#A, B, C, D */ 81 0, 0, 0, 0, /* 0x14 - 0x17: bus B slot 1 int#A, B, C, D */ 82 0, 0, 0, 0, /* 0x18 - 0x1B: bus B slot 2 int#A, B, C, D */ 83 4, 0, 0, 0, /* 0x1C - 0x1F: bus B slot 3 int#A, B, C, D */ 84 85 4, /* 0x20: SCSI */ 86 6, /* 0x21: ethernet */ 87 3, /* 0x22: parallel port */ 88 9, /* 0x23: audio record */ 89 9, /* 0x24: audio playback */ 90 14, /* 0x25: power fail */ 91 4, /* 0x26: 2nd SCSI */ 92 8, /* 0x27: floppy */ 93 14, /* 0x28: thermal warning */ 94 12, /* 0x29: keyboard */ 95 12, /* 0x2A: mouse */ 96 12, /* 0x2B: serial */ 97 0, /* 0x2C: timer/counter 0 */ 98 0, /* 0x2D: timer/counter 1 */ 99 14, /* 0x2E: uncorrectable ECC errors */ 100 14, /* 0x2F: correctable ECC errors */ 101 14, /* 0x30: PCI bus A error */ 102 14, /* 0x31: PCI bus B error */ 103 14, /* 0x32: power management wakeup */ 104 14, /* 0x33 */ 105 14, /* 0x34 */ 106 14, /* 0x35 */ 107 14, /* 0x36 */ 108 14, /* 0x37 */ 109 14, /* 0x38 */ 110 14, /* 0x39 */ 111 14, /* 0x3a */ 112 14, /* 0x3b */ 113 14, /* 0x3c */ 114 14, /* 0x3d */ 115 14, /* 0x3e */ 116 14, /* 0x3f */ 117 14 /* 0x40 */ 118 }; 119 #endif /* NOT_DEFINED */ 120 121 122 #define PCI_SIMBA_VENID 0x108e /* vendor id for simba */ 123 #define PCI_SIMBA_DEVID 0x5000 /* device id for simba */ 124 125 /* 126 * map_pcidev_cfg_reg - create mapping to pci device configuration registers 127 * if we have a simba AND a pci to pci bridge along the 128 * device path. 129 * Called with corresponding mutexes held!! 130 * 131 * XXX XXX XXX The purpose of this routine is to overcome a hardware 132 * defect in Sabre CPU and Simba bridge configuration 133 * which does not drain DMA write data stalled in 134 * PCI to PCI bridges (such as the DEC bridge) beyond 135 * Simba. This routine will setup the data structures 136 * to allow the pci_intr_wrapper to perform a manual 137 * drain data operation before passing the control to 138 * interrupt handlers of device drivers. 139 * return value: 140 * DDI_SUCCESS 141 * DDI_FAILURE if unable to create mapping 142 */ 143 static int 144 map_pcidev_cfg_reg(dev_info_t *dip, dev_info_t *rdip, ddi_acc_handle_t *hdl_p) 145 { 146 dev_info_t *cdip; 147 dev_info_t *pci_dip = NULL; 148 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 149 int simba_found = 0, pci_bridge_found = 0; 150 151 for (cdip = rdip; cdip && cdip != dip; cdip = ddi_get_parent(cdip)) { 152 ddi_acc_handle_t config_handle; 153 uint32_t vendor_id = ddi_getprop(DDI_DEV_T_ANY, cdip, 154 DDI_PROP_DONTPASS, "vendor-id", 0xffff); 155 156 DEBUG4(DBG_A_INTX, pci_p->pci_dip, 157 "map dev cfg reg for %s%d: @%s%d\n", 158 ddi_driver_name(rdip), ddi_get_instance(rdip), 159 ddi_driver_name(cdip), ddi_get_instance(cdip)); 160 161 if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 162 "no-dma-interrupt-sync")) 163 continue; 164 165 /* continue to search up-stream if not a PCI device */ 166 if (vendor_id == 0xffff) 167 continue; 168 169 /* record the deepest pci device */ 170 if (!pci_dip) 171 pci_dip = cdip; 172 173 /* look for simba */ 174 if (vendor_id == PCI_SIMBA_VENID) { 175 uint32_t device_id = ddi_getprop(DDI_DEV_T_ANY, 176 cdip, DDI_PROP_DONTPASS, "device-id", -1); 177 if (device_id == PCI_SIMBA_DEVID) { 178 simba_found = 1; 179 DEBUG0(DBG_A_INTX, pci_p->pci_dip, 180 "\tFound simba\n"); 181 continue; /* do not check bridge if simba */ 182 } 183 } 184 185 /* look for pci to pci bridge */ 186 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) { 187 cmn_err(CE_WARN, 188 "%s%d: can't get brdg cfg space for %s%d\n", 189 ddi_driver_name(dip), ddi_get_instance(dip), 190 ddi_driver_name(cdip), ddi_get_instance(cdip)); 191 return (DDI_FAILURE); 192 } 193 if (pci_config_get8(config_handle, PCI_CONF_BASCLASS) 194 == PCI_CLASS_BRIDGE) { 195 DEBUG0(DBG_A_INTX, pci_p->pci_dip, 196 "\tFound PCI to xBus bridge\n"); 197 pci_bridge_found = 1; 198 } 199 pci_config_teardown(&config_handle); 200 } 201 202 if (!pci_bridge_found) 203 return (DDI_SUCCESS); 204 if (!simba_found && (CHIP_TYPE(pci_p) < PCI_CHIP_SCHIZO)) 205 return (DDI_SUCCESS); 206 if (pci_config_setup(pci_dip, hdl_p) != DDI_SUCCESS) { 207 cmn_err(CE_WARN, "%s%d: can not get config space for %s%d\n", 208 ddi_driver_name(dip), ddi_get_instance(dip), 209 ddi_driver_name(cdip), ddi_get_instance(cdip)); 210 return (DDI_FAILURE); 211 } 212 return (DDI_SUCCESS); 213 } 214 215 /* 216 * If the unclaimed interrupt count has reached the limit set by 217 * pci_unclaimed_intr_max within the time limit, then all interrupts 218 * on this ino is blocked by not idling the interrupt state machine. 219 */ 220 static int 221 pci_spurintr(ib_ino_pil_t *ipil_p) { 222 ib_ino_info_t *ino_p = ipil_p->ipil_ino_p; 223 ih_t *ih_p = ipil_p->ipil_ih_start; 224 pci_t *pci_p = ino_p->ino_ib_p->ib_pci_p; 225 char *err_fmt_str; 226 boolean_t blocked = B_FALSE; 227 int i; 228 229 if (ino_p->ino_unclaimed_intrs > pci_unclaimed_intr_max) 230 return (DDI_INTR_CLAIMED); 231 232 if (!ino_p->ino_unclaimed_intrs) 233 ino_p->ino_spurintr_begin = ddi_get_lbolt(); 234 235 ino_p->ino_unclaimed_intrs++; 236 237 if (ino_p->ino_unclaimed_intrs <= pci_unclaimed_intr_max) 238 goto clear; 239 240 if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin) 241 > pci_spurintr_duration) { 242 ino_p->ino_unclaimed_intrs = 0; 243 goto clear; 244 } 245 err_fmt_str = "%s%d: ino 0x%x blocked"; 246 blocked = B_TRUE; 247 goto warn; 248 clear: 249 if (!pci_spurintr_msgs) { /* tomatillo errata #71 spurious mondo */ 250 /* clear the pending state */ 251 IB_INO_INTR_CLEAR(ino_p->ino_clr_reg); 252 return (DDI_INTR_CLAIMED); 253 } 254 255 err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x"; 256 warn: 257 cmn_err(CE_WARN, err_fmt_str, NAMEINST(pci_p->pci_dip), ino_p->ino_ino); 258 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) 259 cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip), 260 ih_p->ih_inum); 261 cmn_err(CE_CONT, "!\n"); 262 if (blocked == B_FALSE) /* clear the pending state */ 263 IB_INO_INTR_CLEAR(ino_p->ino_clr_reg); 264 265 return (DDI_INTR_CLAIMED); 266 } 267 268 /* 269 * pci_intr_wrapper 270 * 271 * This routine is used as wrapper around interrupt handlers installed by child 272 * device drivers. This routine invokes the driver interrupt handlers and 273 * examines the return codes. 274 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 275 * least one handler claims the interrupt then the counter is halved and the 276 * interrupt state machine is idled. If no handler claims the interrupt then 277 * the counter is incremented by one and the state machine is idled. 278 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 279 * then the interrupt state machine is not idled thus preventing any further 280 * interrupts on that ino. The state machine will only be idled again if a 281 * handler is subsequently added or removed. 282 * 283 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 284 * DDI_INTR_UNCLAIMED otherwise. 285 */ 286 287 extern uint64_t intr_get_time(void); 288 289 uint_t 290 pci_intr_wrapper(caddr_t arg) 291 { 292 ib_ino_pil_t *ipil_p = (ib_ino_pil_t *)arg; 293 ib_ino_info_t *ino_p = ipil_p->ipil_ino_p; 294 uint_t result = 0, r = DDI_INTR_UNCLAIMED; 295 pci_t *pci_p = ino_p->ino_ib_p->ib_pci_p; 296 pbm_t *pbm_p = pci_p->pci_pbm_p; 297 ih_t *ih_p = ipil_p->ipil_ih_start; 298 int i; 299 300 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 301 dev_info_t *dip = ih_p->ih_dip; 302 uint_t (*handler)() = ih_p->ih_handler; 303 caddr_t arg1 = ih_p->ih_handler_arg1; 304 caddr_t arg2 = ih_p->ih_handler_arg2; 305 ddi_acc_handle_t cfg_hdl = ih_p->ih_config_handle; 306 307 if (pci_intr_dma_sync && cfg_hdl && pbm_p->pbm_sync_reg_pa) { 308 (void) pci_config_get16(cfg_hdl, PCI_CONF_VENID); 309 pci_pbm_dma_sync(pbm_p, ino_p->ino_ino); 310 } 311 312 if (ih_p->ih_intr_state == PCI_INTR_STATE_DISABLE) { 313 DEBUG3(DBG_INTR, pci_p->pci_dip, 314 "pci_intr_wrapper: %s%d interrupt %d is disabled\n", 315 ddi_driver_name(dip), ddi_get_instance(dip), 316 ino_p->ino_ino); 317 318 continue; 319 } 320 321 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 322 void *, handler, caddr_t, arg1, caddr_t, arg2); 323 324 r = (*handler)(arg1, arg2); 325 326 /* 327 * Account for time used by this interrupt. Protect against 328 * conflicting writes to ih_ticks from ib_intr_dist_all() by 329 * using atomic ops. 330 */ 331 332 if (ipil_p->ipil_pil <= LOCK_LEVEL) 333 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 334 335 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 336 void *, handler, caddr_t, arg1, int, r); 337 338 result += r; 339 340 if (pci_check_all_handlers) 341 continue; 342 if (result) 343 break; 344 } 345 346 if (result) 347 ino_p->ino_claimed |= (1 << ipil_p->ipil_pil); 348 349 /* Interrupt can only be cleared after all pil levels are handled */ 350 if (ipil_p->ipil_pil != ino_p->ino_lopil) 351 return (DDI_INTR_CLAIMED); 352 353 if (!ino_p->ino_claimed) 354 return (pci_spurintr(ipil_p)); 355 356 ino_p->ino_unclaimed_intrs = 0; 357 ino_p->ino_claimed = 0; 358 359 /* Clear the pending state */ 360 IB_INO_INTR_CLEAR(ino_p->ino_clr_reg); 361 362 return (DDI_INTR_CLAIMED); 363 } 364 365 dev_info_t * 366 get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 367 { 368 dev_info_t *cdip = rdip; 369 370 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 371 ; 372 373 return (cdip); 374 } 375 376 static struct { 377 kstat_named_t pciintr_ks_name; 378 kstat_named_t pciintr_ks_type; 379 kstat_named_t pciintr_ks_cpu; 380 kstat_named_t pciintr_ks_pil; 381 kstat_named_t pciintr_ks_time; 382 kstat_named_t pciintr_ks_ino; 383 kstat_named_t pciintr_ks_cookie; 384 kstat_named_t pciintr_ks_devpath; 385 kstat_named_t pciintr_ks_buspath; 386 } pciintr_ks_template = { 387 { "name", KSTAT_DATA_CHAR }, 388 { "type", KSTAT_DATA_CHAR }, 389 { "cpu", KSTAT_DATA_UINT64 }, 390 { "pil", KSTAT_DATA_UINT64 }, 391 { "time", KSTAT_DATA_UINT64 }, 392 { "ino", KSTAT_DATA_UINT64 }, 393 { "cookie", KSTAT_DATA_UINT64 }, 394 { "devpath", KSTAT_DATA_STRING }, 395 { "buspath", KSTAT_DATA_STRING }, 396 }; 397 static uint32_t pciintr_ks_instance; 398 static char ih_devpath[MAXPATHLEN]; 399 static char ih_buspath[MAXPATHLEN]; 400 401 kmutex_t pciintr_ks_template_lock; 402 403 int 404 pci_ks_update(kstat_t *ksp, int rw) 405 { 406 ih_t *ih_p = ksp->ks_private; 407 int maxlen = sizeof (pciintr_ks_template.pciintr_ks_name.value.c); 408 ib_ino_pil_t *ipil_p = ih_p->ih_ipil_p; 409 ib_ino_info_t *ino_p = ipil_p->ipil_ino_p; 410 ib_t *ib_p = ino_p->ino_ib_p; 411 pci_t *pci_p = ib_p->ib_pci_p; 412 ib_ino_t ino; 413 414 ino = ino_p->ino_ino; 415 416 (void) snprintf(pciintr_ks_template.pciintr_ks_name.value.c, maxlen, 417 "%s%d", ddi_driver_name(ih_p->ih_dip), 418 ddi_get_instance(ih_p->ih_dip)); 419 420 (void) ddi_pathname(ih_p->ih_dip, ih_devpath); 421 (void) ddi_pathname(pci_p->pci_dip, ih_buspath); 422 kstat_named_setstr(&pciintr_ks_template.pciintr_ks_devpath, ih_devpath); 423 kstat_named_setstr(&pciintr_ks_template.pciintr_ks_buspath, ih_buspath); 424 425 if (ih_p->ih_intr_state == PCI_INTR_STATE_ENABLE) { 426 (void) strcpy(pciintr_ks_template.pciintr_ks_type.value.c, 427 "fixed"); 428 pciintr_ks_template.pciintr_ks_cpu.value.ui64 = 429 ino_p->ino_cpuid; 430 pciintr_ks_template.pciintr_ks_pil.value.ui64 = 431 ipil_p->ipil_pil; 432 pciintr_ks_template.pciintr_ks_time.value.ui64 = ih_p->ih_nsec + 433 (uint64_t)tick2ns((hrtime_t)ih_p->ih_ticks, 434 ino_p->ino_cpuid); 435 pciintr_ks_template.pciintr_ks_ino.value.ui64 = ino; 436 pciintr_ks_template.pciintr_ks_cookie.value.ui64 = 437 IB_INO_TO_MONDO(ib_p, ino); 438 } else { 439 (void) strcpy(pciintr_ks_template.pciintr_ks_type.value.c, 440 "disabled"); 441 pciintr_ks_template.pciintr_ks_cpu.value.ui64 = 0; 442 pciintr_ks_template.pciintr_ks_pil.value.ui64 = 0; 443 pciintr_ks_template.pciintr_ks_time.value.ui64 = 0; 444 pciintr_ks_template.pciintr_ks_ino.value.ui64 = 0; 445 pciintr_ks_template.pciintr_ks_cookie.value.ui64 = 0; 446 } 447 448 return (0); 449 } 450 451 int 452 pci_add_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp) 453 { 454 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 455 ib_t *ib_p = pci_p->pci_ib_p; 456 cb_t *cb_p = pci_p->pci_cb_p; 457 ih_t *ih_p; 458 ib_ino_t ino; 459 ib_ino_info_t *ino_p; /* pulse interrupts have no ino */ 460 ib_ino_pil_t *ipil_p, *ipil_list; 461 ib_mondo_t mondo; 462 uint32_t cpu_id; 463 int ret; 464 int32_t weight; 465 466 ino = IB_MONDO_TO_INO(hdlp->ih_vector); 467 468 DEBUG3(DBG_A_INTX, dip, "pci_add_intr: rdip=%s%d ino=%x\n", 469 ddi_driver_name(rdip), ddi_get_instance(rdip), ino); 470 471 if (ino > ib_p->ib_max_ino) { 472 DEBUG1(DBG_A_INTX, dip, "ino %x is invalid\n", ino); 473 return (DDI_INTR_NOTFOUND); 474 } 475 476 if (hdlp->ih_vector & PCI_PULSE_INO) { 477 volatile uint64_t *map_reg_addr; 478 map_reg_addr = ib_intr_map_reg_addr(ib_p, ino); 479 480 mondo = pci_xlate_intr(dip, rdip, ib_p, ino); 481 if (mondo == 0) 482 goto fail1; 483 484 hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo); 485 486 if (i_ddi_add_ivintr(hdlp) != DDI_SUCCESS) 487 goto fail1; 488 489 /* 490 * Select cpu and program. 491 * 492 * Since there is no good way to always derive cpuid in 493 * pci_remove_intr for PCI_PULSE_INO (esp. for STARFIRE), we 494 * don't add (or remove) device weight for pulsed interrupt 495 * sources. 496 */ 497 mutex_enter(&ib_p->ib_intr_lock); 498 cpu_id = intr_dist_cpuid(); 499 *map_reg_addr = ib_get_map_reg(mondo, cpu_id); 500 mutex_exit(&ib_p->ib_intr_lock); 501 *map_reg_addr; /* flush previous write */ 502 goto done; 503 } 504 505 if ((mondo = pci_xlate_intr(dip, rdip, pci_p->pci_ib_p, ino)) == 0) 506 goto fail1; 507 508 ino = IB_MONDO_TO_INO(mondo); 509 510 mutex_enter(&ib_p->ib_ino_lst_mutex); 511 ih_p = ib_alloc_ih(rdip, hdlp->ih_inum, 512 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 513 if (map_pcidev_cfg_reg(dip, rdip, &ih_p->ih_config_handle)) 514 goto fail2; 515 516 ino_p = ib_locate_ino(ib_p, ino); 517 ipil_list = ino_p ? ino_p->ino_ipil_p:NULL; 518 519 /* Sharing ino */ 520 if (ino_p && (ipil_p = ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 521 if (ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum)) { 522 DEBUG1(DBG_A_INTX, dip, "dup intr #%d\n", 523 hdlp->ih_inum); 524 goto fail3; 525 } 526 527 /* add weight to the cpu that we are already targeting */ 528 cpu_id = ino_p->ino_cpuid; 529 weight = pci_class_to_intr_weight(rdip); 530 intr_dist_cpuid_add_device_weight(cpu_id, rdip, weight); 531 532 ib_ino_add_intr(pci_p, ipil_p, ih_p); 533 goto ino_done; 534 } 535 536 if (hdlp->ih_pri == 0) 537 hdlp->ih_pri = pci_class_to_pil(rdip); 538 539 ipil_p = ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 540 ino_p = ipil_p->ipil_ino_p; 541 542 hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo); 543 544 /* Store this global mondo */ 545 ino_p->ino_mondo = hdlp->ih_vector; 546 547 DEBUG2(DBG_A_INTX, dip, "pci_add_intr: pil=0x%x mondo=0x%x\n", 548 hdlp->ih_pri, hdlp->ih_vector); 549 550 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 551 (ddi_intr_handler_t *)pci_intr_wrapper, (caddr_t)ipil_p, NULL); 552 553 ret = i_ddi_add_ivintr(hdlp); 554 555 /* 556 * Restore original interrupt handler 557 * and arguments in interrupt handle. 558 */ 559 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 560 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 561 562 if (ret != DDI_SUCCESS) 563 goto fail4; 564 565 /* Save the pil for this ino */ 566 ipil_p->ipil_pil = hdlp->ih_pri; 567 568 /* clear and enable interrupt */ 569 IB_INO_INTR_CLEAR(ino_p->ino_clr_reg); 570 571 /* 572 * Select cpu and compute weight, saving both for sharing and removal. 573 */ 574 if (ipil_list == NULL) 575 ino_p->ino_cpuid = pci_intr_dist_cpuid(ib_p, ino_p); 576 577 cpu_id = ino_p->ino_cpuid; 578 ino_p->ino_established = 1; 579 weight = pci_class_to_intr_weight(rdip); 580 intr_dist_cpuid_add_device_weight(cpu_id, rdip, weight); 581 582 #ifdef _STARFIRE 583 cpu_id = pc_translate_tgtid(cb_p->cb_ittrans_cookie, cpu_id, 584 IB_GET_MAPREG_INO(ino)); 585 #endif /* _STARFIRE */ 586 if (!ipil_list) { 587 *ino_p->ino_map_reg = ib_get_map_reg(mondo, cpu_id); 588 *ino_p->ino_map_reg; 589 } 590 ino_done: 591 ih_p->ih_ipil_p = ipil_p; 592 ih_p->ih_ksp = kstat_create("pci_intrs", 593 atomic_inc_32_nv(&pciintr_ks_instance), "config", "interrupts", 594 KSTAT_TYPE_NAMED, 595 sizeof (pciintr_ks_template) / sizeof (kstat_named_t), 596 KSTAT_FLAG_VIRTUAL); 597 if (ih_p->ih_ksp != NULL) { 598 ih_p->ih_ksp->ks_data_size += MAXPATHLEN * 2; 599 ih_p->ih_ksp->ks_lock = &pciintr_ks_template_lock; 600 ih_p->ih_ksp->ks_data = &pciintr_ks_template; 601 ih_p->ih_ksp->ks_private = ih_p; 602 ih_p->ih_ksp->ks_update = pci_ks_update; 603 kstat_install(ih_p->ih_ksp); 604 } 605 ib_ino_map_reg_share(ib_p, ino, ino_p); 606 mutex_exit(&ib_p->ib_ino_lst_mutex); 607 done: 608 DEBUG2(DBG_A_INTX, dip, "done! Interrupt 0x%x pil=%x\n", 609 hdlp->ih_vector, hdlp->ih_pri); 610 return (DDI_SUCCESS); 611 fail4: 612 ib_delete_ino_pil(ib_p, ipil_p); 613 fail3: 614 if (ih_p->ih_config_handle) 615 pci_config_teardown(&ih_p->ih_config_handle); 616 fail2: 617 mutex_exit(&ib_p->ib_ino_lst_mutex); 618 kmem_free(ih_p, sizeof (ih_t)); 619 fail1: 620 DEBUG2(DBG_A_INTX, dip, "Failed! Interrupt 0x%x pil=%x\n", 621 hdlp->ih_vector, hdlp->ih_pri); 622 return (DDI_FAILURE); 623 } 624 625 int 626 pci_remove_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp) 627 { 628 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 629 ib_t *ib_p = pci_p->pci_ib_p; 630 cb_t *cb_p = pci_p->pci_cb_p; 631 ib_ino_t ino; 632 ib_mondo_t mondo; 633 ib_ino_info_t *ino_p; /* non-pulse only */ 634 ib_ino_pil_t *ipil_p; /* non-pulse only */ 635 ih_t *ih_p; /* non-pulse only */ 636 637 ino = IB_MONDO_TO_INO(hdlp->ih_vector); 638 639 DEBUG3(DBG_R_INTX, dip, "pci_rem_intr: rdip=%s%d ino=%x\n", 640 ddi_driver_name(rdip), ddi_get_instance(rdip), ino); 641 642 if (hdlp->ih_vector & PCI_PULSE_INO) { /* pulse interrupt */ 643 volatile uint64_t *map_reg_addr; 644 645 /* 646 * No weight was added by pci_add_intr for PCI_PULSE_INO 647 * because it is difficult to determine cpuid here. 648 */ 649 map_reg_addr = ib_intr_map_reg_addr(ib_p, ino); 650 IB_INO_INTR_RESET(map_reg_addr); /* disable intr */ 651 *map_reg_addr; 652 653 mondo = pci_xlate_intr(dip, rdip, ib_p, ino); 654 if (mondo == 0) { 655 DEBUG1(DBG_R_INTX, dip, 656 "can't get mondo for ino %x\n", ino); 657 return (DDI_FAILURE); 658 } 659 660 if (hdlp->ih_pri == 0) 661 hdlp->ih_pri = pci_class_to_pil(rdip); 662 663 hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo); 664 665 DEBUG2(DBG_R_INTX, dip, "pci_rem_intr: pil=0x%x mondo=0x%x\n", 666 hdlp->ih_pri, hdlp->ih_vector); 667 668 i_ddi_rem_ivintr(hdlp); 669 670 DEBUG2(DBG_R_INTX, dip, "pulse success mondo=%x reg=%p\n", 671 mondo, map_reg_addr); 672 return (DDI_SUCCESS); 673 } 674 675 /* Translate the interrupt property */ 676 mondo = pci_xlate_intr(dip, rdip, pci_p->pci_ib_p, ino); 677 if (mondo == 0) { 678 DEBUG1(DBG_R_INTX, dip, "can't get mondo for ino %x\n", ino); 679 return (DDI_FAILURE); 680 } 681 ino = IB_MONDO_TO_INO(mondo); 682 683 mutex_enter(&ib_p->ib_ino_lst_mutex); 684 ino_p = ib_locate_ino(ib_p, ino); 685 if (!ino_p) { 686 int r = cb_remove_xintr(pci_p, dip, rdip, ino, mondo); 687 if (r != DDI_SUCCESS) 688 cmn_err(CE_WARN, "%s%d-xintr: ino %x is invalid", 689 ddi_driver_name(dip), ddi_get_instance(dip), ino); 690 mutex_exit(&ib_p->ib_ino_lst_mutex); 691 return (r); 692 } 693 694 ipil_p = ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 695 ih_p = ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum); 696 ib_ino_rem_intr(pci_p, ipil_p, ih_p); 697 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 698 if (ipil_p->ipil_ih_size == 0) { 699 IB_INO_INTR_PEND(ib_clear_intr_reg_addr(ib_p, ino)); 700 hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo); 701 702 i_ddi_rem_ivintr(hdlp); 703 ib_delete_ino_pil(ib_p, ipil_p); 704 } 705 706 /* re-enable interrupt only if mapping register still shared */ 707 if (ib_ino_map_reg_unshare(ib_p, ino, ino_p) || ino_p->ino_ipil_size) { 708 IB_INO_INTR_ON(ino_p->ino_map_reg); 709 *ino_p->ino_map_reg; 710 } 711 mutex_exit(&ib_p->ib_ino_lst_mutex); 712 713 if (ino_p->ino_ipil_size == 0) 714 kmem_free(ino_p, sizeof (ib_ino_info_t)); 715 716 DEBUG1(DBG_R_INTX, dip, "success! mondo=%x\n", mondo); 717 return (DDI_SUCCESS); 718 } 719 720 /* 721 * free the pci_inos array allocated during pci_intr_setup. the actual 722 * interrupts are torn down by their respective block destroy routines: 723 * cb_destroy, pbm_destroy, and ib_destroy. 724 */ 725 void 726 pci_intr_teardown(pci_t *pci_p) 727 { 728 kmem_free(pci_p->pci_inos, pci_p->pci_inos_len); 729 pci_p->pci_inos = NULL; 730 pci_p->pci_inos_len = 0; 731 } 732