1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * PCI Interrupt Block (RISCx) implementation 30 * initialization 31 * interrupt enable/disable/clear and mapping register manipulation 32 */ 33 34 #include <sys/types.h> 35 #include <sys/kmem.h> 36 #include <sys/async.h> 37 #include <sys/systm.h> /* panicstr */ 38 #include <sys/spl.h> 39 #include <sys/sunddi.h> 40 #include <sys/machsystm.h> /* intr_dist_add */ 41 #include <sys/ddi_impldefs.h> 42 #include <sys/clock.h> 43 #include <sys/cpuvar.h> 44 #include <sys/pci/pci_obj.h> 45 46 #ifdef _STARFIRE 47 #include <sys/starfire.h> 48 #endif /* _STARFIRE */ 49 50 /*LINTLIBRARY*/ 51 static uint_t ib_intr_reset(void *arg); 52 53 void 54 ib_create(pci_t *pci_p) 55 { 56 dev_info_t *dip = pci_p->pci_dip; 57 ib_t *ib_p; 58 uintptr_t a; 59 int i; 60 61 /* 62 * Allocate interrupt block state structure and link it to 63 * the pci state structure. 64 */ 65 ib_p = kmem_zalloc(sizeof (ib_t), KM_SLEEP); 66 pci_p->pci_ib_p = ib_p; 67 ib_p->ib_pci_p = pci_p; 68 69 a = pci_ib_setup(ib_p); 70 71 /* 72 * Determine virtual addresses of interrupt mapping, clear and diag 73 * registers that have common offsets. 74 */ 75 ib_p->ib_slot_clear_intr_regs = 76 a + COMMON_IB_SLOT_CLEAR_INTR_REG_OFFSET; 77 ib_p->ib_intr_retry_timer_reg = 78 (uint64_t *)(a + COMMON_IB_INTR_RETRY_TIMER_OFFSET); 79 ib_p->ib_slot_intr_state_diag_reg = 80 (uint64_t *)(a + COMMON_IB_SLOT_INTR_STATE_DIAG_REG); 81 ib_p->ib_obio_intr_state_diag_reg = 82 (uint64_t *)(a + COMMON_IB_OBIO_INTR_STATE_DIAG_REG); 83 84 if (CHIP_TYPE(pci_p) != PCI_CHIP_XMITS) { 85 ib_p->ib_upa_imr[0] = (volatile uint64_t *) 86 (a + COMMON_IB_UPA0_INTR_MAP_REG_OFFSET); 87 ib_p->ib_upa_imr[1] = (volatile uint64_t *) 88 (a + COMMON_IB_UPA1_INTR_MAP_REG_OFFSET); 89 } 90 91 DEBUG2(DBG_ATTACH, dip, "ib_create: slot_imr=%x, slot_cir=%x\n", 92 ib_p->ib_slot_intr_map_regs, ib_p->ib_obio_intr_map_regs); 93 DEBUG2(DBG_ATTACH, dip, "ib_create: obio_imr=%x, obio_cir=%x\n", 94 ib_p->ib_slot_clear_intr_regs, ib_p->ib_obio_clear_intr_regs); 95 DEBUG2(DBG_ATTACH, dip, "ib_create: upa0_imr=%x, upa1_imr=%x\n", 96 ib_p->ib_upa_imr[0], ib_p->ib_upa_imr[1]); 97 DEBUG3(DBG_ATTACH, dip, 98 "ib_create: retry_timer=%x, obio_diag=%x slot_diag=%x\n", 99 ib_p->ib_intr_retry_timer_reg, 100 ib_p->ib_obio_intr_state_diag_reg, 101 ib_p->ib_slot_intr_state_diag_reg); 102 103 ib_p->ib_ino_lst = (ib_ino_info_t *)NULL; 104 mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL); 105 mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL); 106 107 DEBUG1(DBG_ATTACH, dip, "ib_create: numproxy=%x\n", 108 pci_p->pci_numproxy); 109 for (i = 1; i <= pci_p->pci_numproxy; i++) { 110 set_intr_mapping_reg(pci_p->pci_id, 111 (uint64_t *)ib_p->ib_upa_imr[i - 1], i); 112 } 113 114 ib_configure(ib_p); 115 bus_func_register(BF_TYPE_RESINTR, ib_intr_reset, ib_p); 116 } 117 118 void 119 ib_destroy(pci_t *pci_p) 120 { 121 ib_t *ib_p = pci_p->pci_ib_p; 122 dev_info_t *dip = pci_p->pci_dip; 123 124 DEBUG0(DBG_IB, dip, "ib_destroy\n"); 125 bus_func_unregister(BF_TYPE_RESINTR, ib_intr_reset, ib_p); 126 127 intr_dist_rem_weighted(ib_intr_dist_all, ib_p); 128 mutex_destroy(&ib_p->ib_ino_lst_mutex); 129 mutex_destroy(&ib_p->ib_intr_lock); 130 131 ib_free_ino_all(ib_p); 132 133 kmem_free(ib_p, sizeof (ib_t)); 134 pci_p->pci_ib_p = NULL; 135 } 136 137 void 138 ib_configure(ib_t *ib_p) 139 { 140 /* XXX could be different between psycho and schizo */ 141 *ib_p->ib_intr_retry_timer_reg = pci_intr_retry_intv; 142 } 143 144 /* 145 * can only used for psycho internal interrupts thermal, power, 146 * ue, ce, pbm 147 */ 148 void 149 ib_intr_enable(pci_t *pci_p, ib_ino_t ino) 150 { 151 ib_t *ib_p = pci_p->pci_ib_p; 152 ib_mondo_t mondo = IB_INO_TO_MONDO(ib_p, ino); 153 volatile uint64_t *imr_p = ib_intr_map_reg_addr(ib_p, ino); 154 uint_t cpu_id; 155 156 /* 157 * Determine the cpu for the interrupt. 158 */ 159 mutex_enter(&ib_p->ib_intr_lock); 160 cpu_id = intr_dist_cpuid(); 161 #ifdef _STARFIRE 162 cpu_id = pc_translate_tgtid(IB2CB(ib_p)->cb_ittrans_cookie, cpu_id, 163 IB_GET_MAPREG_INO(ino)); 164 #endif /* _STARFIRE */ 165 DEBUG2(DBG_IB, pci_p->pci_dip, 166 "ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id); 167 168 *imr_p = ib_get_map_reg(mondo, cpu_id); 169 IB_INO_INTR_CLEAR(ib_clear_intr_reg_addr(ib_p, ino)); 170 mutex_exit(&ib_p->ib_intr_lock); 171 } 172 173 /* 174 * Disable the interrupt via its interrupt mapping register. 175 * Can only be used for internal interrupts: thermal, power, ue, ce, pbm. 176 * If called under interrupt context, wait should be set to 0 177 */ 178 void 179 ib_intr_disable(ib_t *ib_p, ib_ino_t ino, int wait) 180 { 181 volatile uint64_t *imr_p = ib_intr_map_reg_addr(ib_p, ino); 182 volatile uint64_t *state_reg_p = IB_INO_INTR_STATE_REG(ib_p, ino); 183 hrtime_t start_time; 184 185 /* disable the interrupt */ 186 mutex_enter(&ib_p->ib_intr_lock); 187 IB_INO_INTR_OFF(imr_p); 188 *imr_p; /* flush previous write */ 189 mutex_exit(&ib_p->ib_intr_lock); 190 191 if (!wait) 192 goto wait_done; 193 194 start_time = gethrtime(); 195 /* busy wait if there is interrupt being processed */ 196 while (IB_INO_INTR_PENDING(state_reg_p, ino) && !panicstr) { 197 if (gethrtime() - start_time > pci_intrpend_timeout) { 198 pbm_t *pbm_p = ib_p->ib_pci_p->pci_pbm_p; 199 cmn_err(CE_WARN, "%s:%s: ib_intr_disable timeout %x", 200 pbm_p->pbm_nameinst_str, 201 pbm_p->pbm_nameaddr_str, ino); 202 break; 203 } 204 } 205 wait_done: 206 IB_INO_INTR_PEND(ib_clear_intr_reg_addr(ib_p, ino)); 207 #ifdef _STARFIRE 208 pc_ittrans_cleanup(IB2CB(ib_p)->cb_ittrans_cookie, 209 (volatile uint64_t *)(uintptr_t)ino); 210 #endif /* _STARFIRE */ 211 } 212 213 /* can only used for psycho internal interrupts thermal, power, ue, ce, pbm */ 214 void 215 ib_nintr_clear(ib_t *ib_p, ib_ino_t ino) 216 { 217 uint64_t *clr_reg = ib_clear_intr_reg_addr(ib_p, ino); 218 IB_INO_INTR_CLEAR(clr_reg); 219 } 220 221 /* 222 * distribute PBM and UPA interrupts. ino is set to 0 by caller if we 223 * are dealing with UPA interrupts (without inos). 224 */ 225 void 226 ib_intr_dist_nintr(ib_t *ib_p, ib_ino_t ino, volatile uint64_t *imr_p) 227 { 228 volatile uint64_t imr = *imr_p; 229 uint32_t cpu_id; 230 231 if (!IB_INO_INTR_ISON(imr)) 232 return; 233 234 cpu_id = intr_dist_cpuid(); 235 236 #ifdef _STARFIRE 237 if (ino) { 238 cpu_id = pc_translate_tgtid(IB2CB(ib_p)->cb_ittrans_cookie, 239 cpu_id, IB_GET_MAPREG_INO(ino)); 240 } 241 #else /* _STARFIRE */ 242 if (ib_map_reg_get_cpu(*imr_p) == cpu_id) 243 return; 244 #endif /* _STARFIRE */ 245 246 *imr_p = ib_get_map_reg(IB_IMR2MONDO(imr), cpu_id); 247 imr = *imr_p; /* flush previous write */ 248 } 249 250 /* 251 * Converts into nsec, ticks logged with a given CPU. Adds nsec to ih. 252 */ 253 /*ARGSUSED*/ 254 void 255 ib_cpu_ticks_to_ih_nsec(ib_t *ib_p, ih_t *ih_p, uint32_t cpu_id) 256 { 257 extern kmutex_t pciintr_ks_template_lock; 258 hrtime_t ticks; 259 260 /* 261 * Because we are updating two fields in ih_t we must lock 262 * pciintr_ks_template_lock to prevent someone from reading the 263 * kstats after we set ih_ticks to 0 and before we increment 264 * ih_nsec to compensate. 265 * 266 * We must also protect against the interrupt arriving and incrementing 267 * ih_ticks between the time we read it and when we reset it to 0. 268 * To do this we use atomic_swap. 269 */ 270 271 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 272 273 mutex_enter(&pciintr_ks_template_lock); 274 ticks = atomic_swap_64(&ih_p->ih_ticks, 0); 275 ih_p->ih_nsec += (uint64_t)tick2ns(ticks, cpu_id); 276 mutex_exit(&pciintr_ks_template_lock); 277 } 278 279 static void 280 ib_intr_dist(ib_t *ib_p, ib_ino_info_t *ino_p) 281 { 282 uint32_t cpu_id = ino_p->ino_cpuid; 283 ib_ino_t ino = ino_p->ino_ino; 284 volatile uint64_t imr, *imr_p, *state_reg; 285 hrtime_t start_time; 286 287 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 288 imr_p = ib_intr_map_reg_addr(ib_p, ino); 289 state_reg = IB_INO_INTR_STATE_REG(ib_p, ino); 290 291 #ifdef _STARFIRE 292 /* 293 * For Starfire it is a pain to check the current target for 294 * the mondo since we have to read the PC asics ITTR slot 295 * assigned to this mondo. It will be much easier to assume 296 * the current target is always different and do the target 297 * reprogram all the time. 298 */ 299 cpu_id = pc_translate_tgtid(IB2CB(ib_p)->cb_ittrans_cookie, cpu_id, 300 IB_GET_MAPREG_INO(ino)); 301 #else 302 if (ib_map_reg_get_cpu(*imr_p) == cpu_id) /* same cpu, no reprog */ 303 return; 304 #endif /* _STARFIRE */ 305 306 /* disable interrupt, this could disrupt devices sharing our slot */ 307 IB_INO_INTR_OFF(imr_p); 308 imr = *imr_p; /* flush previous write */ 309 310 /* busy wait if there is interrupt being processed */ 311 start_time = gethrtime(); 312 while (IB_INO_INTR_PENDING(state_reg, ino) && !panicstr) { 313 if (gethrtime() - start_time > pci_intrpend_timeout) { 314 pbm_t *pbm_p = ib_p->ib_pci_p->pci_pbm_p; 315 cmn_err(CE_WARN, "%s:%s: ib_intr_dist(%p,%x) timeout", 316 pbm_p->pbm_nameinst_str, 317 pbm_p->pbm_nameaddr_str, 318 imr_p, IB_INO_TO_MONDO(ib_p, ino)); 319 break; 320 } 321 } 322 *imr_p = ib_get_map_reg(IB_IMR2MONDO(imr), cpu_id); 323 imr = *imr_p; /* flush previous write */ 324 } 325 326 /* 327 * Redistribute interrupts of the specified weight. The first call has a weight 328 * of weight_max, which can be used to trigger initialization for 329 * redistribution. The inos with weight [weight_max, inf.) should be processed 330 * on the "weight == weight_max" call. This first call is followed by calls 331 * of decreasing weights, inos of that weight should be processed. The final 332 * call specifies a weight of zero, this can be used to trigger processing of 333 * stragglers. 334 */ 335 void 336 ib_intr_dist_all(void *arg, int32_t weight_max, int32_t weight) 337 { 338 ib_t *ib_p = (ib_t *)arg; 339 pci_t *pci_p = ib_p->ib_pci_p; 340 ib_ino_info_t *ino_p; 341 ib_ino_pil_t *ipil_p; 342 ih_t *ih_lst; 343 int32_t dweight; 344 int i; 345 346 if (weight == 0) { 347 mutex_enter(&ib_p->ib_intr_lock); 348 if (CHIP_TYPE(pci_p) != PCI_CHIP_XMITS) { 349 for (i = 0; i < 2; i++) 350 ib_intr_dist_nintr(ib_p, 0, 351 ib_p->ib_upa_imr[i]); 352 } 353 mutex_exit(&ib_p->ib_intr_lock); 354 } 355 356 mutex_enter(&ib_p->ib_ino_lst_mutex); 357 358 /* Perform special processing for first call of a redistribution. */ 359 if (weight == weight_max) { 360 for (ino_p = ib_p->ib_ino_lst; ino_p; 361 ino_p = ino_p->ino_next_p) { 362 363 /* 364 * Clear ino_established of each ino on first call. 365 * The ino_established field may be used by a pci 366 * nexus driver's pci_intr_dist_cpuid implementation 367 * when detection of established pci slot-cpu binding 368 * for multi function pci cards. 369 */ 370 ino_p->ino_established = 0; 371 372 /* 373 * recompute the ino_intr_weight based on the device 374 * weight of all devinfo nodes sharing the ino (this 375 * will allow us to pick up new weights established by 376 * i_ddi_set_intr_weight()). 377 */ 378 ino_p->ino_intr_weight = 0; 379 380 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 381 ipil_p = ipil_p->ipil_next_p) { 382 for (i = 0, ih_lst = ipil_p->ipil_ih_head; 383 i < ipil_p->ipil_ih_size; i++, 384 ih_lst = ih_lst->ih_next) { 385 dweight = i_ddi_get_intr_weight 386 (ih_lst->ih_dip); 387 if (dweight > 0) 388 ino_p->ino_intr_weight += 389 dweight; 390 } 391 } 392 } 393 } 394 395 for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next_p) { 396 uint32_t orig_cpuid; 397 398 /* 399 * Get the weight of the ino and determine if we are going to 400 * process call. We wait until an ib_intr_dist_all call of 401 * the proper weight occurs to support redistribution of all 402 * heavy weighted interrupts first (across all nexus driver 403 * instances). This is done to ensure optimal 404 * INTR_WEIGHTED_DIST behavior. 405 */ 406 if ((weight == ino_p->ino_intr_weight) || 407 ((weight >= weight_max) && 408 (ino_p->ino_intr_weight >= weight_max))) { 409 /* select cpuid to target and mark ino established */ 410 orig_cpuid = ino_p->ino_cpuid; 411 if (cpu[orig_cpuid] == NULL) 412 orig_cpuid = CPU->cpu_id; 413 ino_p->ino_cpuid = pci_intr_dist_cpuid(ib_p, ino_p); 414 ino_p->ino_established = 1; 415 416 /* Add device weight of ino devinfos to targeted cpu. */ 417 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 418 ipil_p = ipil_p->ipil_next_p) { 419 for (i = 0, ih_lst = ipil_p->ipil_ih_head; 420 i < ipil_p->ipil_ih_size; i++, 421 ih_lst = ih_lst->ih_next) { 422 423 dweight = i_ddi_get_intr_weight( 424 ih_lst->ih_dip); 425 intr_dist_cpuid_add_device_weight( 426 ino_p->ino_cpuid, ih_lst->ih_dip, 427 dweight); 428 429 /* 430 * Different cpus may have different 431 * clock speeds. to account for this, 432 * whenever an interrupt is moved to a 433 * new CPU, we convert the accumulated 434 * ticks into nsec, based upon the clock 435 * rate of the prior CPU. 436 * 437 * It is possible that the prior CPU no 438 * longer exists. In this case, fall 439 * back to using this CPU's clock rate. 440 * 441 * Note that the value in ih_ticks has 442 * already been corrected for any power 443 * savings mode which might have been 444 * in effect. 445 */ 446 ib_cpu_ticks_to_ih_nsec(ib_p, ih_lst, 447 orig_cpuid); 448 } 449 } 450 451 /* program the hardware */ 452 ib_intr_dist(ib_p, ino_p); 453 } 454 } 455 mutex_exit(&ib_p->ib_ino_lst_mutex); 456 } 457 458 /* 459 * Reset interrupts to IDLE. This function is called during 460 * panic handling after redistributing interrupts; it's needed to 461 * support dumping to network devices after 'sync' from OBP. 462 * 463 * N.B. This routine runs in a context where all other threads 464 * are permanently suspended. 465 */ 466 static uint_t 467 ib_intr_reset(void *arg) 468 { 469 ib_t *ib_p = (ib_t *)arg; 470 ib_ino_t ino; 471 uint64_t *clr_reg; 472 473 /* 474 * Note that we only actually care about interrupts that are 475 * potentially from network devices. 476 */ 477 for (ino = 0; ino <= ib_p->ib_max_ino; ino++) { 478 clr_reg = ib_clear_intr_reg_addr(ib_p, ino); 479 IB_INO_INTR_CLEAR(clr_reg); 480 } 481 482 return (BF_NONE); 483 } 484 485 void 486 ib_suspend(ib_t *ib_p) 487 { 488 ib_ino_info_t *ip; 489 pci_t *pci_p = ib_p->ib_pci_p; 490 491 /* save ino_lst interrupts' mapping registers content */ 492 mutex_enter(&ib_p->ib_ino_lst_mutex); 493 for (ip = ib_p->ib_ino_lst; ip; ip = ip->ino_next_p) 494 ip->ino_map_reg_save = *ip->ino_map_reg; 495 mutex_exit(&ib_p->ib_ino_lst_mutex); 496 497 if (CHIP_TYPE(pci_p) != PCI_CHIP_XMITS) { 498 ib_p->ib_upa_imr_state[0] = *ib_p->ib_upa_imr[0]; 499 ib_p->ib_upa_imr_state[1] = *ib_p->ib_upa_imr[1]; 500 } 501 } 502 503 void 504 ib_resume(ib_t *ib_p) 505 { 506 ib_ino_info_t *ip; 507 pci_t *pci_p = ib_p->ib_pci_p; 508 509 /* restore ino_lst interrupts' mapping registers content */ 510 mutex_enter(&ib_p->ib_ino_lst_mutex); 511 for (ip = ib_p->ib_ino_lst; ip; ip = ip->ino_next_p) { 512 IB_INO_INTR_CLEAR(ip->ino_clr_reg); /* set intr to idle */ 513 *ip->ino_map_reg = ip->ino_map_reg_save; /* restore IMR */ 514 } 515 mutex_exit(&ib_p->ib_ino_lst_mutex); 516 517 if (CHIP_TYPE(pci_p) != PCI_CHIP_XMITS) { 518 *ib_p->ib_upa_imr[0] = ib_p->ib_upa_imr_state[0]; 519 *ib_p->ib_upa_imr[1] = ib_p->ib_upa_imr_state[1]; 520 } 521 } 522 523 /* 524 * locate ino_info structure on ib_p->ib_ino_lst according to ino# 525 * returns NULL if not found. 526 */ 527 ib_ino_info_t * 528 ib_locate_ino(ib_t *ib_p, ib_ino_t ino_num) 529 { 530 ib_ino_info_t *ino_p = ib_p->ib_ino_lst; 531 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 532 533 for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next_p); 534 return (ino_p); 535 } 536 537 #define IB_INO_TO_SLOT(ino) (IB_IS_OBIO_INO(ino) ? 0xff : ((ino) & 0x1f) >> 2) 538 539 ib_ino_pil_t * 540 ib_new_ino_pil(ib_t *ib_p, ib_ino_t ino_num, uint_t pil, ih_t *ih_p) 541 { 542 ib_ino_pil_t *ipil_p = kmem_zalloc(sizeof (ib_ino_pil_t), KM_SLEEP); 543 ib_ino_info_t *ino_p; 544 545 if ((ino_p = ib_locate_ino(ib_p, ino_num)) == NULL) { 546 ino_p = kmem_zalloc(sizeof (ib_ino_info_t), KM_SLEEP); 547 548 ino_p->ino_next_p = ib_p->ib_ino_lst; 549 ib_p->ib_ino_lst = ino_p; 550 551 ino_p->ino_ino = ino_num; 552 ino_p->ino_slot_no = IB_INO_TO_SLOT(ino_num); 553 ino_p->ino_ib_p = ib_p; 554 ino_p->ino_clr_reg = ib_clear_intr_reg_addr(ib_p, ino_num); 555 ino_p->ino_map_reg = ib_intr_map_reg_addr(ib_p, ino_num); 556 ino_p->ino_unclaimed_intrs = 0; 557 ino_p->ino_lopil = pil; 558 } 559 560 ih_p->ih_next = ih_p; 561 ipil_p->ipil_pil = pil; 562 ipil_p->ipil_ih_head = ih_p; 563 ipil_p->ipil_ih_tail = ih_p; 564 ipil_p->ipil_ih_start = ih_p; 565 ipil_p->ipil_ih_size = 1; 566 ipil_p->ipil_ino_p = ino_p; 567 568 ipil_p->ipil_next_p = ino_p->ino_ipil_p; 569 ino_p->ino_ipil_p = ipil_p; 570 ino_p->ino_ipil_size++; 571 572 if (ino_p->ino_lopil > pil) 573 ino_p->ino_lopil = pil; 574 575 return (ipil_p); 576 } 577 578 void 579 ib_delete_ino_pil(ib_t *ib_p, ib_ino_pil_t *ipil_p) 580 { 581 ib_ino_info_t *ino_p = ipil_p->ipil_ino_p; 582 ib_ino_pil_t *prev, *next; 583 ushort_t pil = ipil_p->ipil_pil; 584 585 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 586 587 if (ino_p->ino_ipil_p == ipil_p) 588 ino_p->ino_ipil_p = ipil_p->ipil_next_p; 589 else { 590 for (prev = next = ino_p->ino_ipil_p; next != ipil_p; 591 prev = next, next = next->ipil_next_p); 592 593 if (prev) 594 prev->ipil_next_p = ipil_p->ipil_next_p; 595 } 596 597 kmem_free(ipil_p, sizeof (ib_ino_pil_t)); 598 599 if (ino_p->ino_lopil == pil) { 600 for (pil = 0, next = ino_p->ino_ipil_p; next; 601 next = next->ipil_next_p) { 602 if (pil > next->ipil_pil) 603 pil = next->ipil_pil; 604 } 605 606 ino_p->ino_lopil = pil; 607 } 608 609 if (--ino_p->ino_ipil_size) 610 return; 611 612 if (ib_p->ib_ino_lst == ino_p) 613 ib_p->ib_ino_lst = ino_p->ino_next_p; 614 else { 615 ib_ino_info_t *list = ib_p->ib_ino_lst; 616 617 for (; list->ino_next_p != ino_p; list = list->ino_next_p); 618 list->ino_next_p = ino_p->ino_next_p; 619 } 620 } 621 622 /* free all ino when we are detaching */ 623 void 624 ib_free_ino_all(ib_t *ib_p) 625 { 626 ib_ino_info_t *ino_p = ib_p->ib_ino_lst; 627 ib_ino_info_t *next = NULL; 628 629 while (ino_p) { 630 next = ino_p->ino_next_p; 631 kmem_free(ino_p, sizeof (ib_ino_info_t)); 632 ino_p = next; 633 } 634 } 635 636 /* 637 * Locate ib_ino_pil_t structure on ino_p->ino_ipil_p according to ino# 638 * returns NULL if not found. 639 */ 640 ib_ino_pil_t * 641 ib_ino_locate_ipil(ib_ino_info_t *ino_p, uint_t pil) 642 { 643 ib_ino_pil_t *ipil_p = ino_p->ino_ipil_p; 644 645 for (; ipil_p && ipil_p->ipil_pil != pil; ipil_p = ipil_p->ipil_next_p); 646 647 return (ipil_p); 648 } 649 650 void 651 ib_ino_add_intr(pci_t *pci_p, ib_ino_pil_t *ipil_p, ih_t *ih_p) 652 { 653 ib_ino_info_t *ino_p = ipil_p->ipil_ino_p; 654 ib_ino_t ino = ino_p->ino_ino; 655 ib_t *ib_p = ino_p->ino_ib_p; 656 volatile uint64_t *state_reg = IB_INO_INTR_STATE_REG(ib_p, ino); 657 hrtime_t start_time; 658 659 ASSERT(ib_p == pci_p->pci_ib_p); 660 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 661 662 /* disable interrupt, this could disrupt devices sharing our slot */ 663 IB_INO_INTR_OFF(ino_p->ino_map_reg); 664 *ino_p->ino_map_reg; 665 666 /* do NOT modify the link list until after the busy wait */ 667 668 /* 669 * busy wait if there is interrupt being processed. 670 * either the pending state will be cleared by the interrupt wrapper 671 * or the interrupt will be marked as blocked indicating that it was 672 * jabbering. 673 */ 674 start_time = gethrtime(); 675 while ((ino_p->ino_unclaimed_intrs <= pci_unclaimed_intr_max) && 676 IB_INO_INTR_PENDING(state_reg, ino) && !panicstr) { 677 if (gethrtime() - start_time > pci_intrpend_timeout) { 678 pbm_t *pbm_p = pci_p->pci_pbm_p; 679 cmn_err(CE_WARN, "%s:%s: ib_ino_add_intr %x timeout", 680 pbm_p->pbm_nameinst_str, 681 pbm_p->pbm_nameaddr_str, ino); 682 break; 683 } 684 } 685 686 /* link up ih_t */ 687 ih_p->ih_next = ipil_p->ipil_ih_head; 688 ipil_p->ipil_ih_tail->ih_next = ih_p; 689 ipil_p->ipil_ih_tail = ih_p; 690 691 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head; 692 ipil_p->ipil_ih_size++; 693 694 /* 695 * if the interrupt was previously blocked (left in pending state) 696 * because of jabber we need to clear the pending state in case the 697 * jabber has gone away. 698 */ 699 if (ino_p->ino_unclaimed_intrs > pci_unclaimed_intr_max) { 700 cmn_err(CE_WARN, 701 "%s%d: ib_ino_add_intr: ino 0x%x has been unblocked", 702 ddi_driver_name(pci_p->pci_dip), 703 ddi_get_instance(pci_p->pci_dip), 704 ino_p->ino_ino); 705 ino_p->ino_unclaimed_intrs = 0; 706 IB_INO_INTR_CLEAR(ino_p->ino_clr_reg); 707 } 708 709 /* re-enable interrupt */ 710 IB_INO_INTR_ON(ino_p->ino_map_reg); 711 *ino_p->ino_map_reg; 712 } 713 714 /* 715 * removes pci_ispec_t from the ino's link list. 716 * uses hardware mutex to lock out interrupt threads. 717 * Side effects: interrupt belongs to that ino is turned off on return. 718 * if we are sharing PCI slot with other inos, the caller needs 719 * to turn it back on. 720 */ 721 void 722 ib_ino_rem_intr(pci_t *pci_p, ib_ino_pil_t *ipil_p, ih_t *ih_p) 723 { 724 ib_ino_info_t *ino_p = ipil_p->ipil_ino_p; 725 int i; 726 ib_ino_t ino = ino_p->ino_ino; 727 ih_t *ih_lst = ipil_p->ipil_ih_head; 728 volatile uint64_t *state_reg = 729 IB_INO_INTR_STATE_REG(ino_p->ino_ib_p, ino); 730 hrtime_t start_time; 731 732 ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex)); 733 /* disable interrupt, this could disrupt devices sharing our slot */ 734 IB_INO_INTR_OFF(ino_p->ino_map_reg); 735 *ino_p->ino_map_reg; 736 737 /* do NOT modify the link list until after the busy wait */ 738 739 /* 740 * busy wait if there is interrupt being processed. 741 * either the pending state will be cleared by the interrupt wrapper 742 * or the interrupt will be marked as blocked indicating that it was 743 * jabbering. 744 */ 745 start_time = gethrtime(); 746 while ((ino_p->ino_unclaimed_intrs <= pci_unclaimed_intr_max) && 747 IB_INO_INTR_PENDING(state_reg, ino) && !panicstr) { 748 if (gethrtime() - start_time > pci_intrpend_timeout) { 749 pbm_t *pbm_p = pci_p->pci_pbm_p; 750 cmn_err(CE_WARN, "%s:%s: ib_ino_rem_intr %x timeout", 751 pbm_p->pbm_nameinst_str, 752 pbm_p->pbm_nameaddr_str, ino); 753 break; 754 } 755 } 756 757 if (ipil_p->ipil_ih_size == 1) { 758 if (ih_lst != ih_p) 759 goto not_found; 760 /* no need to set head/tail as ino_p will be freed */ 761 goto reset; 762 } 763 764 /* 765 * if the interrupt was previously blocked (left in pending state) 766 * because of jabber we need to clear the pending state in case the 767 * jabber has gone away. 768 */ 769 if (ino_p->ino_unclaimed_intrs > pci_unclaimed_intr_max) { 770 cmn_err(CE_WARN, 771 "%s%d: ib_ino_rem_intr: ino 0x%x has been unblocked", 772 ddi_driver_name(pci_p->pci_dip), 773 ddi_get_instance(pci_p->pci_dip), 774 ino_p->ino_ino); 775 ino_p->ino_unclaimed_intrs = 0; 776 IB_INO_INTR_CLEAR(ino_p->ino_clr_reg); 777 } 778 779 /* search the link list for ih_p */ 780 for (i = 0; 781 (i < ipil_p->ipil_ih_size) && (ih_lst->ih_next != ih_p); 782 i++, ih_lst = ih_lst->ih_next); 783 if (ih_lst->ih_next != ih_p) 784 goto not_found; 785 786 /* remove ih_p from the link list and maintain the head/tail */ 787 ih_lst->ih_next = ih_p->ih_next; 788 if (ipil_p->ipil_ih_head == ih_p) 789 ipil_p->ipil_ih_head = ih_p->ih_next; 790 if (ipil_p->ipil_ih_tail == ih_p) 791 ipil_p->ipil_ih_tail = ih_lst; 792 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head; 793 reset: 794 if (ih_p->ih_config_handle) 795 pci_config_teardown(&ih_p->ih_config_handle); 796 if (ih_p->ih_ksp != NULL) 797 kstat_delete(ih_p->ih_ksp); 798 kmem_free(ih_p, sizeof (ih_t)); 799 ipil_p->ipil_ih_size--; 800 801 return; 802 not_found: 803 DEBUG2(DBG_R_INTX, ino_p->ino_ib_p->ib_pci_p->pci_dip, 804 "ino_p=%x does not have ih_p=%x\n", ino_p, ih_p); 805 } 806 807 ih_t * 808 ib_intr_locate_ih(ib_ino_pil_t *ipil_p, dev_info_t *rdip, uint32_t inum) 809 { 810 ih_t *ih_p = ipil_p->ipil_ih_head; 811 int i; 812 813 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 814 if (ih_p->ih_dip == rdip && ih_p->ih_inum == inum) 815 return (ih_p); 816 } 817 818 return ((ih_t *)NULL); 819 } 820 821 ih_t * 822 ib_alloc_ih(dev_info_t *rdip, uint32_t inum, 823 uint_t (*int_handler)(caddr_t int_handler_arg1, 824 caddr_t int_handler_arg2), 825 caddr_t int_handler_arg1, 826 caddr_t int_handler_arg2) 827 { 828 ih_t *ih_p; 829 830 ih_p = kmem_alloc(sizeof (ih_t), KM_SLEEP); 831 ih_p->ih_dip = rdip; 832 ih_p->ih_inum = inum; 833 ih_p->ih_intr_state = PCI_INTR_STATE_DISABLE; 834 ih_p->ih_handler = int_handler; 835 ih_p->ih_handler_arg1 = int_handler_arg1; 836 ih_p->ih_handler_arg2 = int_handler_arg2; 837 ih_p->ih_config_handle = NULL; 838 ih_p->ih_nsec = 0; 839 ih_p->ih_ticks = 0; 840 ih_p->ih_ksp = NULL; 841 842 return (ih_p); 843 } 844 845 int 846 ib_update_intr_state(pci_t *pci_p, dev_info_t *rdip, 847 ddi_intr_handle_impl_t *hdlp, uint_t new_intr_state) 848 { 849 ib_t *ib_p = pci_p->pci_ib_p; 850 ib_ino_info_t *ino_p; 851 ib_ino_pil_t *ipil_p; 852 ib_mondo_t mondo; 853 ih_t *ih_p; 854 int ret = DDI_FAILURE; 855 856 /* 857 * For PULSE interrupts, pci driver don't allocate 858 * ib_ino_info_t and ih_t data structures and also, 859 * not maintains any interrupt state information. 860 * So, just return success from here. 861 */ 862 if (hdlp->ih_vector & PCI_PULSE_INO) { 863 DEBUG0(DBG_IB, ib_p->ib_pci_p->pci_dip, 864 "ib_update_intr_state: PULSE interrupt, return success\n"); 865 866 return (DDI_SUCCESS); 867 } 868 869 mutex_enter(&ib_p->ib_ino_lst_mutex); 870 871 if ((mondo = pci_xlate_intr(pci_p->pci_dip, rdip, pci_p->pci_ib_p, 872 IB_MONDO_TO_INO(hdlp->ih_vector))) == 0) { 873 mutex_exit(&ib_p->ib_ino_lst_mutex); 874 return (ret); 875 } 876 877 ino_p = ib_locate_ino(ib_p, IB_MONDO_TO_INO(mondo)); 878 if (ino_p && (ipil_p = ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 879 if (ih_p = ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum)) { 880 ih_p->ih_intr_state = new_intr_state; 881 ret = DDI_SUCCESS; 882 } 883 } 884 885 mutex_exit(&ib_p->ib_ino_lst_mutex); 886 return (ret); 887 } 888 889 /* 890 * Return the dips or number of dips associated with a given interrupt block. 891 * Size of dips array arg is passed in as dips_ret arg. 892 * Number of dips returned is returned in dips_ret arg. 893 * Array of dips gets returned in the dips argument. 894 * Function returns number of dips existing for the given interrupt block. 895 * 896 */ 897 uint8_t 898 ib_get_ino_devs( 899 ib_t *ib_p, uint32_t ino, uint8_t *devs_ret, pcitool_intr_dev_t *devs) 900 { 901 ib_ino_info_t *ino_p; 902 ib_ino_pil_t *ipil_p; 903 ih_t *ih_p; 904 uint32_t num_devs = 0; 905 int i, j; 906 907 mutex_enter(&ib_p->ib_ino_lst_mutex); 908 ino_p = ib_locate_ino(ib_p, ino); 909 if (ino_p != NULL) { 910 for (j = 0, ipil_p = ino_p->ino_ipil_p; ipil_p; 911 ipil_p = ipil_p->ipil_next_p) { 912 num_devs += ipil_p->ipil_ih_size; 913 914 for (i = 0, ih_p = ipil_p->ipil_ih_head; 915 ((i < ipil_p->ipil_ih_size) && (i < *devs_ret)); 916 i++, j++, ih_p = ih_p->ih_next) { 917 (void) strncpy(devs[i].driver_name, 918 ddi_driver_name(ih_p->ih_dip), 919 MAXMODCONFNAME-1); 920 devs[i].driver_name[MAXMODCONFNAME] = '\0'; 921 (void) ddi_pathname(ih_p->ih_dip, devs[i].path); 922 devs[i].dev_inst = 923 ddi_get_instance(ih_p->ih_dip); 924 } 925 } 926 *devs_ret = j; 927 } 928 929 mutex_exit(&ib_p->ib_ino_lst_mutex); 930 931 return (num_devs); 932 } 933 934 void ib_log_new_cpu(ib_t *ib_p, uint32_t old_cpu_id, uint32_t new_cpu_id, 935 uint32_t ino) 936 { 937 ib_ino_info_t *ino_p; 938 ib_ino_pil_t *ipil_p; 939 ih_t *ih_p; 940 int i; 941 942 mutex_enter(&ib_p->ib_ino_lst_mutex); 943 944 /* Log in OS data structures the new CPU. */ 945 ino_p = ib_locate_ino(ib_p, ino); 946 if (ino_p != NULL) { 947 948 /* Log in OS data structures the new CPU. */ 949 ino_p->ino_cpuid = new_cpu_id; 950 951 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 952 ipil_p = ipil_p->ipil_next_p) { 953 for (i = 0, ih_p = ipil_p->ipil_ih_head; 954 (i < ipil_p->ipil_ih_size); 955 i++, ih_p = ih_p->ih_next) { 956 /* 957 * Account for any residual time 958 * to be logged for old cpu. 959 */ 960 ib_cpu_ticks_to_ih_nsec(ib_p, 961 ipil_p->ipil_ih_head, old_cpu_id); 962 } 963 } 964 } 965 966 mutex_exit(&ib_p->ib_ino_lst_mutex); 967 } 968