1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * PCI nexus interrupt handling: 31 * PCI device interrupt handler wrapper 32 * pil lookup routine 33 * PCI device interrupt related initchild code 34 */ 35 36 #include <sys/types.h> 37 #include <sys/kmem.h> 38 #include <sys/async.h> 39 #include <sys/spl.h> 40 #include <sys/sunddi.h> 41 #include <sys/machsystm.h> /* e_ddi_nodeid_to_dip() */ 42 #include <sys/ddi_impldefs.h> 43 #include <sys/pci/pci_obj.h> 44 #include <sys/sdt.h> 45 #include <sys/clock.h> 46 47 #ifdef _STARFIRE 48 #include <sys/starfire.h> 49 #endif /* _STARFIRE */ 50 51 /* 52 * interrupt jabber: 53 * 54 * When an interrupt line is jabbering, every time the state machine for the 55 * associated ino is idled, a new mondo will be sent and the ino will go into 56 * the pending state again. The mondo will cause a new call to 57 * pci_intr_wrapper() which normally idles the ino's state machine which would 58 * precipitate another trip round the loop. 59 * The loop can be broken by preventing the ino's state machine from being 60 * idled when an interrupt line is jabbering. See the comment at the 61 * beginning of pci_intr_wrapper() explaining how the 'interrupt jabber 62 * protection' code does this. 63 */ 64 65 /*LINTLIBRARY*/ 66 67 #ifdef NOT_DEFINED 68 /* 69 * This array is used to determine the sparc PIL at the which the 70 * handler for a given INO will execute. This table is for onboard 71 * devices only. A different scheme will be used for plug-in cards. 72 */ 73 74 uint_t ino_to_pil[] = { 75 76 /* pil */ /* ino */ 77 78 0, 0, 0, 0, /* 0x00 - 0x03: bus A slot 0 int#A, B, C, D */ 79 0, 0, 0, 0, /* 0x04 - 0x07: bus A slot 1 int#A, B, C, D */ 80 0, 0, 0, 0, /* 0x08 - 0x0B: unused */ 81 0, 0, 0, 0, /* 0x0C - 0x0F: unused */ 82 83 0, 0, 0, 0, /* 0x10 - 0x13: bus B slot 0 int#A, B, C, D */ 84 0, 0, 0, 0, /* 0x14 - 0x17: bus B slot 1 int#A, B, C, D */ 85 0, 0, 0, 0, /* 0x18 - 0x1B: bus B slot 2 int#A, B, C, D */ 86 4, 0, 0, 0, /* 0x1C - 0x1F: bus B slot 3 int#A, B, C, D */ 87 88 4, /* 0x20: SCSI */ 89 6, /* 0x21: ethernet */ 90 3, /* 0x22: parallel port */ 91 9, /* 0x23: audio record */ 92 9, /* 0x24: audio playback */ 93 14, /* 0x25: power fail */ 94 4, /* 0x26: 2nd SCSI */ 95 8, /* 0x27: floppy */ 96 14, /* 0x28: thermal warning */ 97 12, /* 0x29: keyboard */ 98 12, /* 0x2A: mouse */ 99 12, /* 0x2B: serial */ 100 0, /* 0x2C: timer/counter 0 */ 101 0, /* 0x2D: timer/counter 1 */ 102 14, /* 0x2E: uncorrectable ECC errors */ 103 14, /* 0x2F: correctable ECC errors */ 104 14, /* 0x30: PCI bus A error */ 105 14, /* 0x31: PCI bus B error */ 106 14, /* 0x32: power management wakeup */ 107 14, /* 0x33 */ 108 14, /* 0x34 */ 109 14, /* 0x35 */ 110 14, /* 0x36 */ 111 14, /* 0x37 */ 112 14, /* 0x38 */ 113 14, /* 0x39 */ 114 14, /* 0x3a */ 115 14, /* 0x3b */ 116 14, /* 0x3c */ 117 14, /* 0x3d */ 118 14, /* 0x3e */ 119 14, /* 0x3f */ 120 14 /* 0x40 */ 121 }; 122 #endif /* NOT_DEFINED */ 123 124 125 #define PCI_SIMBA_VENID 0x108e /* vendor id for simba */ 126 #define PCI_SIMBA_DEVID 0x5000 /* device id for simba */ 127 128 /* 129 * map_pcidev_cfg_reg - create mapping to pci device configuration registers 130 * if we have a simba AND a pci to pci bridge along the 131 * device path. 132 * Called with corresponding mutexes held!! 133 * 134 * XXX XXX XXX The purpose of this routine is to overcome a hardware 135 * defect in Sabre CPU and Simba bridge configuration 136 * which does not drain DMA write data stalled in 137 * PCI to PCI bridges (such as the DEC bridge) beyond 138 * Simba. This routine will setup the data structures 139 * to allow the pci_intr_wrapper to perform a manual 140 * drain data operation before passing the control to 141 * interrupt handlers of device drivers. 142 * return value: 143 * DDI_SUCCESS 144 * DDI_FAILURE if unable to create mapping 145 */ 146 static int 147 map_pcidev_cfg_reg(dev_info_t *dip, dev_info_t *rdip, ddi_acc_handle_t *hdl_p) 148 { 149 dev_info_t *cdip; 150 dev_info_t *pci_dip = NULL; 151 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 152 int simba_found = 0, pci_bridge_found = 0; 153 154 for (cdip = rdip; cdip && cdip != dip; cdip = ddi_get_parent(cdip)) { 155 ddi_acc_handle_t config_handle; 156 uint32_t vendor_id = ddi_getprop(DDI_DEV_T_ANY, cdip, 157 DDI_PROP_DONTPASS, "vendor-id", 0xffff); 158 159 DEBUG4(DBG_A_INTX, pci_p->pci_dip, 160 "map dev cfg reg for %s%d: @%s%d\n", 161 ddi_driver_name(rdip), ddi_get_instance(rdip), 162 ddi_driver_name(cdip), ddi_get_instance(cdip)); 163 164 if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 165 "no-dma-interrupt-sync")) 166 continue; 167 168 /* continue to search up-stream if not a PCI device */ 169 if (vendor_id == 0xffff) 170 continue; 171 172 /* record the deepest pci device */ 173 if (!pci_dip) 174 pci_dip = cdip; 175 176 /* look for simba */ 177 if (vendor_id == PCI_SIMBA_VENID) { 178 uint32_t device_id = ddi_getprop(DDI_DEV_T_ANY, 179 cdip, DDI_PROP_DONTPASS, "device-id", -1); 180 if (device_id == PCI_SIMBA_DEVID) { 181 simba_found = 1; 182 DEBUG0(DBG_A_INTX, pci_p->pci_dip, 183 "\tFound simba\n"); 184 continue; /* do not check bridge if simba */ 185 } 186 } 187 188 /* look for pci to pci bridge */ 189 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) { 190 cmn_err(CE_WARN, 191 "%s%d: can't get brdg cfg space for %s%d\n", 192 ddi_driver_name(dip), ddi_get_instance(dip), 193 ddi_driver_name(cdip), ddi_get_instance(cdip)); 194 return (DDI_FAILURE); 195 } 196 if (pci_config_get8(config_handle, PCI_CONF_BASCLASS) 197 == PCI_CLASS_BRIDGE) { 198 DEBUG0(DBG_A_INTX, pci_p->pci_dip, 199 "\tFound PCI to xBus bridge\n"); 200 pci_bridge_found = 1; 201 } 202 pci_config_teardown(&config_handle); 203 } 204 205 if (!pci_bridge_found) 206 return (DDI_SUCCESS); 207 if (!simba_found && (CHIP_TYPE(pci_p) < PCI_CHIP_SCHIZO)) 208 return (DDI_SUCCESS); 209 if (pci_config_setup(pci_dip, hdl_p) != DDI_SUCCESS) { 210 cmn_err(CE_WARN, "%s%d: can not get config space for %s%d\n", 211 ddi_driver_name(dip), ddi_get_instance(dip), 212 ddi_driver_name(cdip), ddi_get_instance(cdip)); 213 return (DDI_FAILURE); 214 } 215 return (DDI_SUCCESS); 216 } 217 218 /* 219 * If the unclaimed interrupt count has reached the limit set by 220 * pci_unclaimed_intr_max within the time limit, then all interrupts 221 * on this ino is blocked by not idling the interrupt state machine. 222 */ 223 static int 224 pci_spurintr(ib_ino_info_t *ino_p) { 225 int i; 226 ih_t *ih_p = ino_p->ino_ih_start; 227 pci_t *pci_p = ino_p->ino_ib_p->ib_pci_p; 228 char *err_fmt_str; 229 230 if (ino_p->ino_unclaimed > pci_unclaimed_intr_max) 231 return (DDI_INTR_CLAIMED); 232 233 if (!ino_p->ino_unclaimed) 234 ino_p->ino_spurintr_begin = ddi_get_lbolt(); 235 236 ino_p->ino_unclaimed++; 237 238 if (ino_p->ino_unclaimed <= pci_unclaimed_intr_max) 239 goto clear; 240 241 if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin) 242 > pci_spurintr_duration) { 243 ino_p->ino_unclaimed = 0; 244 goto clear; 245 } 246 err_fmt_str = "%s%d: ino 0x%x blocked"; 247 goto warn; 248 clear: 249 IB_INO_INTR_CLEAR(ino_p->ino_clr_reg); /* clear the pending state */ 250 if (!pci_spurintr_msgs) /* tomatillo errata #71 spurious mondo */ 251 return (DDI_INTR_CLAIMED); 252 253 err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x"; 254 warn: 255 cmn_err(CE_WARN, err_fmt_str, NAMEINST(pci_p->pci_dip), ino_p->ino_ino); 256 for (i = 0; i < ino_p->ino_ih_size; i++, ih_p = ih_p->ih_next) 257 cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip), 258 ih_p->ih_inum); 259 cmn_err(CE_CONT, "!\n"); 260 return (DDI_INTR_CLAIMED); 261 } 262 263 /* 264 * pci_intr_wrapper 265 * 266 * This routine is used as wrapper around interrupt handlers installed by child 267 * device drivers. This routine invokes the driver interrupt handlers and 268 * examines the return codes. 269 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 270 * least one handler claims the interrupt then the counter is halved and the 271 * interrupt state machine is idled. If no handler claims the interrupt then 272 * the counter is incremented by one and the state machine is idled. 273 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 274 * then the interrupt state machine is not idled thus preventing any further 275 * interrupts on that ino. The state machine will only be idled again if a 276 * handler is subsequently added or removed. 277 * 278 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 279 * DDI_INTR_UNCLAIMED otherwise. 280 */ 281 282 extern uint64_t intr_get_time(void); 283 284 uint_t 285 pci_intr_wrapper(caddr_t arg) 286 { 287 ib_ino_info_t *ino_p = (ib_ino_info_t *)arg; 288 uint_t result = 0, r; 289 pci_t *pci_p = ino_p->ino_ib_p->ib_pci_p; 290 pbm_t *pbm_p = pci_p->pci_pbm_p; 291 ih_t *ih_p = ino_p->ino_ih_start; 292 int i; 293 294 for (i = 0; i < ino_p->ino_ih_size; i++, ih_p = ih_p->ih_next) { 295 dev_info_t *dip = ih_p->ih_dip; 296 uint_t (*handler)() = ih_p->ih_handler; 297 caddr_t arg1 = ih_p->ih_handler_arg1; 298 caddr_t arg2 = ih_p->ih_handler_arg2; 299 ddi_acc_handle_t cfg_hdl = ih_p->ih_config_handle; 300 301 if (pci_intr_dma_sync && cfg_hdl && pbm_p->pbm_sync_reg_pa) { 302 (void) pci_config_get16(cfg_hdl, PCI_CONF_VENID); 303 pci_pbm_dma_sync(pbm_p, ino_p->ino_ino); 304 } 305 306 if (ih_p->ih_intr_state == PCI_INTR_STATE_DISABLE) { 307 DEBUG3(DBG_INTR, pci_p->pci_dip, 308 "pci_intr_wrapper: %s%d interrupt %d is disabled\n", 309 ddi_driver_name(dip), ddi_get_instance(dip), 310 ino_p->ino_ino); 311 312 continue; 313 } 314 315 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 316 void *, handler, caddr_t, arg1, caddr_t, arg2); 317 318 r = (*handler)(arg1, arg2); 319 320 /* 321 * Account for time used by this interrupt. Protect against 322 * conflicting writes to ih_ticks from ib_intr_dist_all() by 323 * using atomic ops. 324 */ 325 326 if (ino_p->ino_pil <= LOCK_LEVEL) 327 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 328 329 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 330 void *, handler, caddr_t, arg1, int, r); 331 332 result += r; 333 334 if (pci_check_all_handlers) 335 continue; 336 if (result) 337 break; 338 } 339 340 if (!result) 341 return (pci_spurintr(ino_p)); 342 343 ino_p->ino_unclaimed = 0; 344 IB_INO_INTR_CLEAR(ino_p->ino_clr_reg); /* clear the pending state */ 345 346 return (DDI_INTR_CLAIMED); 347 } 348 349 dev_info_t * 350 get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 351 { 352 dev_info_t *cdip = rdip; 353 354 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 355 ; 356 357 return (cdip); 358 } 359 360 /* default class to pil value mapping */ 361 pci_class_val_t pci_default_pil [] = { 362 {0x000000, 0xff0000, 0x1}, /* Class code for pre-2.0 devices */ 363 {0x010000, 0xff0000, 0x4}, /* Mass Storage Controller */ 364 {0x020000, 0xff0000, 0x6}, /* Network Controller */ 365 {0x030000, 0xff0000, 0x9}, /* Display Controller */ 366 {0x040000, 0xff0000, 0x9}, /* Multimedia Controller */ 367 {0x050000, 0xff0000, 0xb}, /* Memory Controller */ 368 {0x060000, 0xff0000, 0xb}, /* Bridge Controller */ 369 {0x0c0000, 0xffff00, 0x9}, /* Serial Bus, FireWire (IEEE 1394) */ 370 {0x0c0100, 0xffff00, 0x4}, /* Serial Bus, ACCESS.bus */ 371 {0x0c0200, 0xffff00, 0x4}, /* Serial Bus, SSA */ 372 {0x0c0300, 0xffff00, 0x9}, /* Serial Bus Universal Serial Bus */ 373 {0x0c0400, 0xffff00, 0x6}, /* Serial Bus, Fibre Channel */ 374 {0x0c0600, 0xffff00, 0x6} /* Serial Bus, Infiniband */ 375 }; 376 377 /* 378 * Default class to intr_weight value mapping (% of CPU). A driver.conf 379 * entry on or above the pci node like 380 * 381 * pci-class-intr-weights= 0x020000, 0xff0000, 30; 382 * 383 * can be used to augment or override entries in the default table below. 384 * 385 * NB: The values below give NICs preference on redistribution, and provide 386 * NICs some isolation from other interrupt sources. We need better interfaces 387 * that allow the NIC driver to identify a specific NIC instance as high 388 * bandwidth, and thus deserving of separation from other low bandwidth 389 * NICs additional isolation from other interrupt sources. 390 * 391 * NB: We treat Infiniband like a NIC. 392 */ 393 pci_class_val_t pci_default_intr_weight [] = { 394 {0x020000, 0xff0000, 35}, /* Network Controller */ 395 {0x010000, 0xff0000, 10}, /* Mass Storage Controller */ 396 {0x0c0400, 0xffff00, 10}, /* Serial Bus, Fibre Channel */ 397 {0x0c0600, 0xffff00, 50} /* Serial Bus, Infiniband */ 398 }; 399 400 static uint32_t 401 pci_match_class_val(uint32_t key, pci_class_val_t *rec_p, int nrec, 402 uint32_t default_val) 403 { 404 int i; 405 406 for (i = 0; i < nrec; rec_p++, i++) { 407 if ((rec_p->class_code & rec_p->class_mask) == 408 (key & rec_p->class_mask)) 409 return (rec_p->class_val); 410 } 411 412 return (default_val); 413 } 414 415 /* 416 * Return the configuration value, based on class code and sub class code, 417 * from the specified property based or default pci_class_val_t table. 418 */ 419 uint32_t 420 pci_class_to_val(dev_info_t *rdip, char *property_name, pci_class_val_t *rec_p, 421 int nrec, uint32_t default_val) 422 { 423 int property_len; 424 uint32_t class_code; 425 pci_class_val_t *conf; 426 uint32_t val = default_val; 427 428 /* 429 * Use the "class-code" property to get the base and sub class 430 * codes for the requesting device. 431 */ 432 class_code = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, rdip, 433 DDI_PROP_DONTPASS, "class-code", -1); 434 435 if (class_code == -1) 436 return (val); 437 438 /* look up the val from the default table */ 439 val = pci_match_class_val(class_code, rec_p, nrec, val); 440 441 442 /* see if there is a more specific property specified value */ 443 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_NOTPROM, 444 property_name, (caddr_t)&conf, &property_len)) 445 return (val); 446 447 if ((property_len % sizeof (pci_class_val_t)) == 0) 448 val = pci_match_class_val(class_code, conf, 449 property_len / sizeof (pci_class_val_t), val); 450 kmem_free(conf, property_len); 451 return (val); 452 } 453 454 /* pci_class_to_pil: return the pil for a given PCI device. */ 455 uint32_t 456 pci_class_to_pil(dev_info_t *rdip) 457 { 458 uint32_t pil; 459 460 /* default pil is 0 (uninitialized) */ 461 pil = pci_class_to_val(rdip, 462 "pci-class-priorities", pci_default_pil, 463 sizeof (pci_default_pil) / sizeof (pci_class_val_t), 0); 464 465 /* range check the result */ 466 if (pil >= 0xf) 467 pil = 0; 468 469 return (pil); 470 } 471 472 /* pci_class_to_intr_weight: return the intr_weight for a given PCI device. */ 473 int32_t 474 pci_class_to_intr_weight(dev_info_t *rdip) 475 { 476 int32_t intr_weight; 477 478 /* default weight is 0% */ 479 intr_weight = pci_class_to_val(rdip, 480 "pci-class-intr-weights", pci_default_intr_weight, 481 sizeof (pci_default_intr_weight) / sizeof (pci_class_val_t), 0); 482 483 /* range check the result */ 484 if (intr_weight < 0) 485 intr_weight = 0; 486 if (intr_weight > 1000) 487 intr_weight = 1000; 488 489 return (intr_weight); 490 } 491 492 static struct { 493 kstat_named_t pciintr_ks_name; 494 kstat_named_t pciintr_ks_type; 495 kstat_named_t pciintr_ks_cpu; 496 kstat_named_t pciintr_ks_pil; 497 kstat_named_t pciintr_ks_time; 498 kstat_named_t pciintr_ks_ino; 499 kstat_named_t pciintr_ks_cookie; 500 kstat_named_t pciintr_ks_devpath; 501 kstat_named_t pciintr_ks_buspath; 502 } pciintr_ks_template = { 503 { "name", KSTAT_DATA_CHAR }, 504 { "type", KSTAT_DATA_CHAR }, 505 { "cpu", KSTAT_DATA_UINT64 }, 506 { "pil", KSTAT_DATA_UINT64 }, 507 { "time", KSTAT_DATA_UINT64 }, 508 { "ino", KSTAT_DATA_UINT64 }, 509 { "cookie", KSTAT_DATA_UINT64 }, 510 { "devpath", KSTAT_DATA_STRING }, 511 { "buspath", KSTAT_DATA_STRING }, 512 }; 513 static uint32_t pciintr_ks_instance; 514 515 kmutex_t pciintr_ks_template_lock; 516 517 int 518 pci_ks_update(kstat_t *ksp, int rw) 519 { 520 ih_t *ih_p = ksp->ks_private; 521 int maxlen = sizeof (pciintr_ks_template.pciintr_ks_name.value.c); 522 ib_t *ib_p = ih_p->ih_ino_p->ino_ib_p; 523 pci_t *pci_p = ib_p->ib_pci_p; 524 ib_ino_t ino; 525 char ih_devpath[MAXPATHLEN]; 526 char ih_buspath[MAXPATHLEN]; 527 528 ino = ih_p->ih_ino_p->ino_ino; 529 530 (void) snprintf(pciintr_ks_template.pciintr_ks_name.value.c, maxlen, 531 "%s%d", ddi_driver_name(ih_p->ih_dip), 532 ddi_get_instance(ih_p->ih_dip)); 533 (void) strcpy(pciintr_ks_template.pciintr_ks_type.value.c, "fixed"); 534 pciintr_ks_template.pciintr_ks_cpu.value.ui64 = 535 ih_p->ih_ino_p->ino_cpuid; 536 pciintr_ks_template.pciintr_ks_pil.value.ui64 = 537 ih_p->ih_ino_p->ino_pil; 538 pciintr_ks_template.pciintr_ks_time.value.ui64 = 539 ih_p->ih_nsec + (uint64_t) 540 tick2ns((hrtime_t)ih_p->ih_ticks, ih_p->ih_ino_p->ino_cpuid); 541 pciintr_ks_template.pciintr_ks_ino.value.ui64 = ino; 542 pciintr_ks_template.pciintr_ks_cookie.value.ui64 = 543 IB_INO_TO_MONDO(ib_p, ino); 544 545 (void) ddi_pathname(ih_p->ih_dip, ih_devpath); 546 (void) ddi_pathname(pci_p->pci_dip, ih_buspath); 547 kstat_named_setstr(&pciintr_ks_template.pciintr_ks_devpath, ih_devpath); 548 kstat_named_setstr(&pciintr_ks_template.pciintr_ks_buspath, ih_buspath); 549 550 return (0); 551 } 552 553 int 554 pci_add_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp) 555 { 556 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 557 ib_t *ib_p = pci_p->pci_ib_p; 558 cb_t *cb_p = pci_p->pci_cb_p; 559 ih_t *ih_p; 560 ib_ino_t ino; 561 ib_ino_info_t *ino_p; /* pulse interrupts have no ino */ 562 ib_mondo_t mondo; 563 uint32_t cpu_id; 564 int ret; 565 int32_t weight; 566 567 ino = IB_MONDO_TO_INO(hdlp->ih_vector); 568 569 DEBUG3(DBG_A_INTX, dip, "pci_add_intr: rdip=%s%d ino=%x\n", 570 ddi_driver_name(rdip), ddi_get_instance(rdip), ino); 571 572 if (ino > ib_p->ib_max_ino) { 573 DEBUG1(DBG_A_INTX, dip, "ino %x is invalid\n", ino); 574 return (DDI_INTR_NOTFOUND); 575 } 576 577 if (hdlp->ih_vector & PCI_PULSE_INO) { 578 volatile uint64_t *map_reg_addr; 579 map_reg_addr = ib_intr_map_reg_addr(ib_p, ino); 580 581 mondo = pci_xlate_intr(dip, rdip, ib_p, ino); 582 if (mondo == 0) 583 goto fail1; 584 585 hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo); 586 587 if (i_ddi_add_ivintr(hdlp) != DDI_SUCCESS) 588 goto fail1; 589 590 /* 591 * Select cpu and program. 592 * 593 * Since there is no good way to always derive cpuid in 594 * pci_remove_intr for PCI_PULSE_INO (esp. for STARFIRE), we 595 * don't add (or remove) device weight for pulsed interrupt 596 * sources. 597 */ 598 mutex_enter(&ib_p->ib_intr_lock); 599 cpu_id = intr_dist_cpuid(); 600 *map_reg_addr = ib_get_map_reg(mondo, cpu_id); 601 mutex_exit(&ib_p->ib_intr_lock); 602 *map_reg_addr; /* flush previous write */ 603 goto done; 604 } 605 606 if ((mondo = pci_xlate_intr(dip, rdip, pci_p->pci_ib_p, ino)) == 0) 607 goto fail1; 608 609 ino = IB_MONDO_TO_INO(mondo); 610 611 mutex_enter(&ib_p->ib_ino_lst_mutex); 612 ih_p = ib_alloc_ih(rdip, hdlp->ih_inum, 613 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 614 if (map_pcidev_cfg_reg(dip, rdip, &ih_p->ih_config_handle)) 615 goto fail2; 616 617 if (ino_p = ib_locate_ino(ib_p, ino)) { /* sharing ino */ 618 uint32_t intr_index = hdlp->ih_inum; 619 if (ib_ino_locate_intr(ino_p, rdip, intr_index)) { 620 DEBUG1(DBG_A_INTX, dip, "dup intr #%d\n", intr_index); 621 goto fail3; 622 } 623 624 /* add weight to the cpu that we are already targeting */ 625 cpu_id = ino_p->ino_cpuid; 626 weight = pci_class_to_intr_weight(rdip); 627 intr_dist_cpuid_add_device_weight(cpu_id, rdip, weight); 628 629 ib_ino_add_intr(pci_p, ino_p, ih_p); 630 goto ino_done; 631 } 632 633 ino_p = ib_new_ino(ib_p, ino, ih_p); 634 635 if (hdlp->ih_pri == 0) 636 hdlp->ih_pri = pci_class_to_pil(rdip); 637 638 hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo); 639 640 DEBUG2(DBG_A_INTX, dip, "pci_add_intr: pil=0x%x mondo=0x%x\n", 641 hdlp->ih_pri, hdlp->ih_vector); 642 643 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 644 (ddi_intr_handler_t *)pci_intr_wrapper, (caddr_t)ino_p, NULL); 645 646 ret = i_ddi_add_ivintr(hdlp); 647 648 /* 649 * Restore original interrupt handler 650 * and arguments in interrupt handle. 651 */ 652 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 653 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 654 655 if (ret != DDI_SUCCESS) 656 goto fail4; 657 658 /* Save the pil for this ino */ 659 ino_p->ino_pil = hdlp->ih_pri; 660 661 /* clear and enable interrupt */ 662 IB_INO_INTR_CLEAR(ino_p->ino_clr_reg); 663 664 /* select cpu and compute weight, saving both for sharing and removal */ 665 cpu_id = pci_intr_dist_cpuid(ib_p, ino_p); 666 ino_p->ino_cpuid = cpu_id; 667 ino_p->ino_established = 1; 668 weight = pci_class_to_intr_weight(rdip); 669 intr_dist_cpuid_add_device_weight(cpu_id, rdip, weight); 670 671 #ifdef _STARFIRE 672 cpu_id = pc_translate_tgtid(cb_p->cb_ittrans_cookie, cpu_id, 673 IB_GET_MAPREG_INO(ino)); 674 #endif /* _STARFIRE */ 675 *ino_p->ino_map_reg = ib_get_map_reg(mondo, cpu_id); 676 *ino_p->ino_map_reg; 677 ino_done: 678 ih_p->ih_ino_p = ino_p; 679 ih_p->ih_ksp = kstat_create("pci_intrs", 680 atomic_inc_32_nv(&pciintr_ks_instance), "config", "interrupts", 681 KSTAT_TYPE_NAMED, 682 sizeof (pciintr_ks_template) / sizeof (kstat_named_t), 683 KSTAT_FLAG_VIRTUAL); 684 if (ih_p->ih_ksp != NULL) { 685 ih_p->ih_ksp->ks_data_size += MAXPATHLEN * 2; 686 ih_p->ih_ksp->ks_lock = &pciintr_ks_template_lock; 687 ih_p->ih_ksp->ks_data = &pciintr_ks_template; 688 ih_p->ih_ksp->ks_private = ih_p; 689 ih_p->ih_ksp->ks_update = pci_ks_update; 690 kstat_install(ih_p->ih_ksp); 691 } 692 ib_ino_map_reg_share(ib_p, ino, ino_p); 693 mutex_exit(&ib_p->ib_ino_lst_mutex); 694 done: 695 DEBUG2(DBG_A_INTX, dip, "done! Interrupt 0x%x pil=%x\n", 696 hdlp->ih_vector, hdlp->ih_pri); 697 return (DDI_SUCCESS); 698 fail4: 699 ib_delete_ino(ib_p, ino_p); 700 fail3: 701 if (ih_p->ih_config_handle) 702 pci_config_teardown(&ih_p->ih_config_handle); 703 fail2: 704 mutex_exit(&ib_p->ib_ino_lst_mutex); 705 kmem_free(ih_p, sizeof (ih_t)); 706 fail1: 707 DEBUG2(DBG_A_INTX, dip, "Failed! Interrupt 0x%x pil=%x\n", 708 hdlp->ih_vector, hdlp->ih_pri); 709 return (DDI_FAILURE); 710 } 711 712 int 713 pci_remove_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp) 714 { 715 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 716 ib_t *ib_p = pci_p->pci_ib_p; 717 cb_t *cb_p = pci_p->pci_cb_p; 718 ib_ino_t ino; 719 ib_mondo_t mondo; 720 ib_ino_info_t *ino_p; /* non-pulse only */ 721 ih_t *ih_p; /* non-pulse only */ 722 723 ino = IB_MONDO_TO_INO(hdlp->ih_vector); 724 725 DEBUG3(DBG_R_INTX, dip, "pci_rem_intr: rdip=%s%d ino=%x\n", 726 ddi_driver_name(rdip), ddi_get_instance(rdip), ino); 727 728 if (hdlp->ih_vector & PCI_PULSE_INO) { /* pulse interrupt */ 729 volatile uint64_t *map_reg_addr; 730 731 /* 732 * No weight was added by pci_add_intr for PCI_PULSE_INO 733 * because it is difficult to determine cpuid here. 734 */ 735 map_reg_addr = ib_intr_map_reg_addr(ib_p, ino); 736 IB_INO_INTR_RESET(map_reg_addr); /* disable intr */ 737 *map_reg_addr; 738 739 mondo = pci_xlate_intr(dip, rdip, ib_p, ino); 740 if (mondo == 0) { 741 DEBUG1(DBG_R_INTX, dip, 742 "can't get mondo for ino %x\n", ino); 743 return (DDI_FAILURE); 744 } 745 746 if (hdlp->ih_pri == 0) 747 hdlp->ih_pri = pci_class_to_pil(rdip); 748 749 hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo); 750 751 DEBUG2(DBG_R_INTX, dip, "pci_rem_intr: pil=0x%x mondo=0x%x\n", 752 hdlp->ih_pri, hdlp->ih_vector); 753 754 i_ddi_rem_ivintr(hdlp); 755 756 DEBUG2(DBG_R_INTX, dip, "pulse success mondo=%x reg=%p\n", 757 mondo, map_reg_addr); 758 return (DDI_SUCCESS); 759 } 760 761 /* Translate the interrupt property */ 762 mondo = pci_xlate_intr(dip, rdip, pci_p->pci_ib_p, ino); 763 if (mondo == 0) { 764 DEBUG1(DBG_R_INTX, dip, "can't get mondo for ino %x\n", ino); 765 return (DDI_FAILURE); 766 } 767 ino = IB_MONDO_TO_INO(mondo); 768 769 mutex_enter(&ib_p->ib_ino_lst_mutex); 770 ino_p = ib_locate_ino(ib_p, ino); 771 if (!ino_p) { 772 int r = cb_remove_xintr(pci_p, dip, rdip, ino, mondo); 773 if (r != DDI_SUCCESS) 774 cmn_err(CE_WARN, "%s%d-xintr: ino %x is invalid", 775 ddi_driver_name(dip), ddi_get_instance(dip), ino); 776 mutex_exit(&ib_p->ib_ino_lst_mutex); 777 return (r); 778 } 779 780 ih_p = ib_ino_locate_intr(ino_p, rdip, hdlp->ih_inum); 781 ib_ino_rem_intr(pci_p, ino_p, ih_p); 782 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 783 if (ino_p->ino_ih_size == 0) { 784 IB_INO_INTR_PEND(ib_clear_intr_reg_addr(ib_p, ino)); 785 hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo); 786 if (hdlp->ih_pri == 0) 787 hdlp->ih_pri = pci_class_to_pil(rdip); 788 789 i_ddi_rem_ivintr(hdlp); 790 ib_delete_ino(ib_p, ino_p); 791 } 792 793 /* re-enable interrupt only if mapping register still shared */ 794 if (ib_ino_map_reg_unshare(ib_p, ino, ino_p)) { 795 IB_INO_INTR_ON(ino_p->ino_map_reg); 796 *ino_p->ino_map_reg; 797 } 798 mutex_exit(&ib_p->ib_ino_lst_mutex); 799 800 if (ino_p->ino_ih_size == 0) 801 kmem_free(ino_p, sizeof (ib_ino_info_t)); 802 803 DEBUG1(DBG_R_INTX, dip, "success! mondo=%x\n", mondo); 804 return (DDI_SUCCESS); 805 } 806 807 /* 808 * free the pci_inos array allocated during pci_intr_setup. the actual 809 * interrupts are torn down by their respective block destroy routines: 810 * cb_destroy, pbm_destroy, and ib_destroy. 811 */ 812 void 813 pci_intr_teardown(pci_t *pci_p) 814 { 815 kmem_free(pci_p->pci_inos, pci_p->pci_inos_len); 816 pci_p->pci_inos = NULL; 817 pci_p->pci_inos_len = 0; 818 } 819