1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * PX Interrupt Block implementation 31 */ 32 33 #include <sys/types.h> 34 #include <sys/kmem.h> 35 #include <sys/async.h> 36 #include <sys/systm.h> /* panicstr */ 37 #include <sys/spl.h> 38 #include <sys/sunddi.h> 39 #include <sys/machsystm.h> /* intr_dist_add */ 40 #include <sys/ddi_impldefs.h> 41 #include <sys/cpuvar.h> 42 #include "px_obj.h" 43 44 /*LINTLIBRARY*/ 45 46 static void px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight); 47 static void px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, 48 uint32_t cpu_id); 49 static uint_t px_ib_intr_reset(void *arg); 50 static void px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name, 51 char *path_name, int instance); 52 53 int 54 px_ib_attach(px_t *px_p) 55 { 56 dev_info_t *dip = px_p->px_dip; 57 px_ib_t *ib_p; 58 sysino_t sysino; 59 px_fault_t *fault_p = &px_p->px_fault; 60 61 DBG(DBG_IB, dip, "px_ib_attach\n"); 62 63 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 64 px_p->px_inos[PX_INTR_PEC], &sysino) != DDI_SUCCESS) 65 return (DDI_FAILURE); 66 67 /* 68 * Allocate interrupt block state structure and link it to 69 * the px state structure. 70 */ 71 ib_p = kmem_zalloc(sizeof (px_ib_t), KM_SLEEP); 72 px_p->px_ib_p = ib_p; 73 ib_p->ib_px_p = px_p; 74 ib_p->ib_ino_lst = (px_ib_ino_info_t *)NULL; 75 76 mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL); 77 mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL); 78 79 bus_func_register(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p); 80 81 intr_dist_add_weighted(px_ib_intr_redist, ib_p); 82 83 /* 84 * Initialize PEC fault data structure 85 */ 86 fault_p->px_fh_dip = dip; 87 fault_p->px_fh_sysino = sysino; 88 fault_p->px_err_func = px_err_dmc_pec_intr; 89 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_PEC]; 90 91 return (DDI_SUCCESS); 92 } 93 94 void 95 px_ib_detach(px_t *px_p) 96 { 97 px_ib_t *ib_p = px_p->px_ib_p; 98 dev_info_t *dip = px_p->px_dip; 99 100 DBG(DBG_IB, dip, "px_ib_detach\n"); 101 102 bus_func_unregister(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p); 103 intr_dist_rem_weighted(px_ib_intr_redist, ib_p); 104 105 mutex_destroy(&ib_p->ib_ino_lst_mutex); 106 mutex_destroy(&ib_p->ib_intr_lock); 107 108 px_ib_free_ino_all(ib_p); 109 110 px_p->px_ib_p = NULL; 111 kmem_free(ib_p, sizeof (px_ib_t)); 112 } 113 114 void 115 px_ib_intr_enable(px_t *px_p, cpuid_t cpu_id, devino_t ino) 116 { 117 px_ib_t *ib_p = px_p->px_ib_p; 118 sysino_t sysino; 119 120 /* 121 * Determine the cpu for the interrupt 122 */ 123 mutex_enter(&ib_p->ib_intr_lock); 124 125 DBG(DBG_IB, px_p->px_dip, 126 "px_ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id); 127 128 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino, 129 &sysino) != DDI_SUCCESS) { 130 DBG(DBG_IB, px_p->px_dip, 131 "px_ib_intr_enable: px_intr_devino_to_sysino() failed\n"); 132 133 mutex_exit(&ib_p->ib_intr_lock); 134 return; 135 } 136 137 PX_INTR_ENABLE(px_p->px_dip, sysino, cpu_id); 138 px_lib_intr_setstate(px_p->px_dip, sysino, INTR_IDLE_STATE); 139 140 mutex_exit(&ib_p->ib_intr_lock); 141 } 142 143 /*ARGSUSED*/ 144 void 145 px_ib_intr_disable(px_ib_t *ib_p, devino_t ino, int wait) 146 { 147 sysino_t sysino; 148 149 mutex_enter(&ib_p->ib_intr_lock); 150 151 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_disable: ino=%x\n", ino); 152 153 /* Disable the interrupt */ 154 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino, 155 &sysino) != DDI_SUCCESS) { 156 DBG(DBG_IB, ib_p->ib_px_p->px_dip, 157 "px_ib_intr_disable: px_intr_devino_to_sysino() failed\n"); 158 159 mutex_exit(&ib_p->ib_intr_lock); 160 return; 161 } 162 163 PX_INTR_DISABLE(ib_p->ib_px_p->px_dip, sysino); 164 165 mutex_exit(&ib_p->ib_intr_lock); 166 } 167 168 169 void 170 px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino, 171 boolean_t wait_flag) 172 { 173 uint32_t old_cpu_id; 174 sysino_t sysino; 175 intr_valid_state_t enabled = 0; 176 hrtime_t start_time; 177 intr_state_t intr_state; 178 int e = DDI_SUCCESS; 179 180 DBG(DBG_IB, dip, "px_ib_intr_dist_en: ino=0x%x\n", ino); 181 182 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) { 183 DBG(DBG_IB, dip, "px_ib_intr_dist_en: " 184 "px_intr_devino_to_sysino() failed, ino 0x%x\n", ino); 185 return; 186 } 187 188 /* Skip enabling disabled interrupts */ 189 if (px_lib_intr_getvalid(dip, sysino, &enabled) != DDI_SUCCESS) { 190 DBG(DBG_IB, dip, "px_ib_intr_dist_en: px_intr_getvalid() " 191 "failed, sysino 0x%x\n", sysino); 192 return; 193 } 194 if (!enabled) 195 return; 196 197 /* Done if redistributed onto the same cpuid */ 198 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) { 199 DBG(DBG_IB, dip, "px_ib_intr_dist_en: " 200 "px_intr_gettarget() failed\n"); 201 return; 202 } 203 if (cpu_id == old_cpu_id) 204 return; 205 206 if (!wait_flag) 207 goto done; 208 209 /* Busy wait on pending interrupts */ 210 PX_INTR_DISABLE(dip, sysino); 211 212 for (start_time = gethrtime(); !panicstr && 213 ((e = px_lib_intr_getstate(dip, sysino, &intr_state)) == 214 DDI_SUCCESS) && 215 (intr_state == INTR_DELIVERED_STATE); /* */) { 216 if (gethrtime() - start_time > px_intrpend_timeout) { 217 cmn_err(CE_WARN, 218 "%s%d: px_ib_intr_dist_en: sysino 0x%lx(ino 0x%x) " 219 "from cpu id 0x%x to 0x%x timeout", 220 ddi_driver_name(dip), ddi_get_instance(dip), 221 sysino, ino, old_cpu_id, cpu_id); 222 223 e = DDI_FAILURE; 224 break; 225 } 226 } 227 228 if (e != DDI_SUCCESS) 229 DBG(DBG_IB, dip, "px_ib_intr_dist_en: failed, " 230 "ino 0x%x sysino 0x%x\n", ino, sysino); 231 232 done: 233 PX_INTR_ENABLE(dip, sysino, cpu_id); 234 } 235 236 static void 237 px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, uint32_t cpu_id) 238 { 239 extern kmutex_t pxintr_ks_template_lock; 240 hrtime_t ticks; 241 242 /* 243 * Because we are updating two fields in ih_t we must lock 244 * pxintr_ks_template_lock to prevent someone from reading the 245 * kstats after we set ih_ticks to 0 and before we increment 246 * ih_nsec to compensate. 247 * 248 * We must also protect against the interrupt arriving and incrementing 249 * ih_ticks between the time we read it and when we reset it to 0. 250 * To do this we use atomic_swap. 251 */ 252 253 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 254 255 mutex_enter(&pxintr_ks_template_lock); 256 ticks = atomic_swap_64(&ih_p->ih_ticks, 0); 257 ih_p->ih_nsec += (uint64_t)tick2ns(ticks, cpu_id); 258 mutex_exit(&pxintr_ks_template_lock); 259 } 260 261 262 /* 263 * Redistribute interrupts of the specified weight. The first call has a weight 264 * of weight_max, which can be used to trigger initialization for 265 * redistribution. The inos with weight [weight_max, inf.) should be processed 266 * on the "weight == weight_max" call. This first call is followed by calls 267 * of decreasing weights, inos of that weight should be processed. The final 268 * call specifies a weight of zero, this can be used to trigger processing of 269 * stragglers. 270 */ 271 static void 272 px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight) 273 { 274 px_ib_t *ib_p = (px_ib_t *)arg; 275 px_t *px_p = ib_p->ib_px_p; 276 dev_info_t *dip = px_p->px_dip; 277 px_ib_ino_info_t *ino_p; 278 px_ih_t *ih_lst; 279 int32_t dweight = 0; 280 int i; 281 282 /* Redistribute internal interrupts */ 283 if (weight == 0) { 284 devino_t ino_pec = px_p->px_inos[PX_INTR_PEC]; 285 286 mutex_enter(&ib_p->ib_intr_lock); 287 px_ib_intr_dist_en(dip, intr_dist_cpuid(), ino_pec, B_FALSE); 288 mutex_exit(&ib_p->ib_intr_lock); 289 } 290 291 /* Redistribute device interrupts */ 292 mutex_enter(&ib_p->ib_ino_lst_mutex); 293 294 for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next) { 295 uint32_t orig_cpuid; 296 297 /* 298 * Recomputes the sum of interrupt weights of devices that 299 * share the same ino upon first call marked by 300 * (weight == weight_max). 301 */ 302 if (weight == weight_max) { 303 ino_p->ino_intr_weight = 0; 304 for (i = 0, ih_lst = ino_p->ino_ih_head; 305 i < ino_p->ino_ih_size; 306 i++, ih_lst = ih_lst->ih_next) { 307 dweight = i_ddi_get_intr_weight(ih_lst->ih_dip); 308 if (dweight > 0) 309 ino_p->ino_intr_weight += dweight; 310 } 311 } 312 313 /* 314 * As part of redistributing weighted interrupts over cpus, 315 * nexus redistributes device interrupts and updates 316 * cpu weight. The purpose is for the most light weighted 317 * cpu to take the next interrupt and gain weight, therefore 318 * attention demanding device gains more cpu attention by 319 * making itself heavy. 320 */ 321 if ((weight == ino_p->ino_intr_weight) || 322 ((weight >= weight_max) && 323 (ino_p->ino_intr_weight >= weight_max))) { 324 orig_cpuid = ino_p->ino_cpuid; 325 if (cpu[orig_cpuid] == NULL) 326 orig_cpuid = CPU->cpu_id; 327 328 /* select cpuid to target and mark ino established */ 329 ino_p->ino_cpuid = intr_dist_cpuid(); 330 331 /* Add device weight to targeted cpu. */ 332 for (i = 0, ih_lst = ino_p->ino_ih_head; 333 i < ino_p->ino_ih_size; 334 i++, ih_lst = ih_lst->ih_next) { 335 336 dweight = i_ddi_get_intr_weight(ih_lst->ih_dip); 337 intr_dist_cpuid_add_device_weight( 338 ino_p->ino_cpuid, ih_lst->ih_dip, dweight); 339 340 /* 341 * Different cpus may have different clock 342 * speeds. to account for this, whenever an 343 * interrupt is moved to a new CPU, we 344 * convert the accumulated ticks into nsec, 345 * based upon the clock rate of the prior 346 * CPU. 347 * 348 * It is possible that the prior CPU no longer 349 * exists. In this case, fall back to using 350 * this CPU's clock rate. 351 * 352 * Note that the value in ih_ticks has already 353 * been corrected for any power savings mode 354 * which might have been in effect. 355 */ 356 px_ib_cpu_ticks_to_ih_nsec(ib_p, ih_lst, 357 orig_cpuid); 358 } 359 360 /* enable interrupt on new targeted cpu */ 361 px_ib_intr_dist_en(dip, ino_p->ino_cpuid, 362 ino_p->ino_ino, B_TRUE); 363 } 364 } 365 mutex_exit(&ib_p->ib_ino_lst_mutex); 366 } 367 368 /* 369 * Reset interrupts to IDLE. This function is called during 370 * panic handling after redistributing interrupts; it's needed to 371 * support dumping to network devices after 'sync' from OBP. 372 * 373 * N.B. This routine runs in a context where all other threads 374 * are permanently suspended. 375 */ 376 static uint_t 377 px_ib_intr_reset(void *arg) 378 { 379 px_ib_t *ib_p = (px_ib_t *)arg; 380 381 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_reset\n"); 382 383 if (px_lib_intr_reset(ib_p->ib_px_p->px_dip) != DDI_SUCCESS) 384 return (BF_FATAL); 385 386 return (BF_NONE); 387 } 388 389 /* 390 * Locate ino_info structure on ib_p->ib_ino_lst according to ino# 391 * returns NULL if not found. 392 */ 393 px_ib_ino_info_t * 394 px_ib_locate_ino(px_ib_t *ib_p, devino_t ino_num) 395 { 396 px_ib_ino_info_t *ino_p = ib_p->ib_ino_lst; 397 398 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 399 400 for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next); 401 402 return (ino_p); 403 } 404 405 px_ib_ino_info_t * 406 px_ib_new_ino(px_ib_t *ib_p, devino_t ino_num, px_ih_t *ih_p) 407 { 408 px_ib_ino_info_t *ino_p = kmem_alloc(sizeof (px_ib_ino_info_t), 409 KM_SLEEP); 410 sysino_t sysino; 411 412 ino_p->ino_ino = ino_num; 413 ino_p->ino_ib_p = ib_p; 414 ino_p->ino_unclaimed = 0; 415 416 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino_p->ino_ino, 417 &sysino) != DDI_SUCCESS) 418 return (NULL); 419 420 ino_p->ino_sysino = sysino; 421 422 /* 423 * Cannot disable interrupt since we might share slot 424 */ 425 ih_p->ih_next = ih_p; 426 ino_p->ino_ih_head = ih_p; 427 ino_p->ino_ih_tail = ih_p; 428 ino_p->ino_ih_start = ih_p; 429 ino_p->ino_ih_size = 1; 430 431 ino_p->ino_next = ib_p->ib_ino_lst; 432 ib_p->ib_ino_lst = ino_p; 433 434 return (ino_p); 435 } 436 437 /* 438 * The ino_p is retrieved by previous call to px_ib_locate_ino(). 439 */ 440 void 441 px_ib_delete_ino(px_ib_t *ib_p, px_ib_ino_info_t *ino_p) 442 { 443 px_ib_ino_info_t *list = ib_p->ib_ino_lst; 444 445 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 446 447 if (list == ino_p) 448 ib_p->ib_ino_lst = list->ino_next; 449 else { 450 for (; list->ino_next != ino_p; list = list->ino_next); 451 list->ino_next = ino_p->ino_next; 452 } 453 } 454 455 /* 456 * Free all ino when we are detaching. 457 */ 458 void 459 px_ib_free_ino_all(px_ib_t *ib_p) 460 { 461 px_ib_ino_info_t *tmp = ib_p->ib_ino_lst; 462 px_ib_ino_info_t *next = NULL; 463 464 while (tmp) { 465 next = tmp->ino_next; 466 kmem_free(tmp, sizeof (px_ib_ino_info_t)); 467 tmp = next; 468 } 469 } 470 471 int 472 px_ib_ino_add_intr(px_t *px_p, px_ib_ino_info_t *ino_p, px_ih_t *ih_p) 473 { 474 px_ib_t *ib_p = ino_p->ino_ib_p; 475 devino_t ino = ino_p->ino_ino; 476 sysino_t sysino = ino_p->ino_sysino; 477 dev_info_t *dip = px_p->px_dip; 478 cpuid_t curr_cpu; 479 hrtime_t start_time; 480 intr_state_t intr_state; 481 int ret = DDI_SUCCESS; 482 483 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 484 ASSERT(ib_p == px_p->px_ib_p); 485 486 DBG(DBG_IB, dip, "px_ib_ino_add_intr ino=%x\n", ino_p->ino_ino); 487 488 /* Disable the interrupt */ 489 if ((ret = px_lib_intr_gettarget(dip, sysino, 490 &curr_cpu)) != DDI_SUCCESS) { 491 DBG(DBG_IB, dip, 492 "px_ib_ino_add_intr px_intr_gettarget() failed\n"); 493 494 return (ret); 495 } 496 497 PX_INTR_DISABLE(dip, sysino); 498 499 /* Busy wait on pending interrupt */ 500 for (start_time = gethrtime(); !panicstr && 501 ((ret = px_lib_intr_getstate(dip, sysino, &intr_state)) 502 == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) { 503 if (gethrtime() - start_time > px_intrpend_timeout) { 504 cmn_err(CE_WARN, "%s%d: px_ib_ino_add_intr: pending " 505 "sysino 0x%lx(ino 0x%x) timeout", 506 ddi_driver_name(dip), ddi_get_instance(dip), 507 sysino, ino); 508 509 ret = DDI_FAILURE; 510 break; 511 } 512 } 513 514 if (ret != DDI_SUCCESS) { 515 DBG(DBG_IB, dip, "px_ib_ino_add_intr: failed, " 516 "ino 0x%x sysino 0x%x\n", ino, sysino); 517 518 return (ret); 519 } 520 521 /* Link up px_ispec_t portion of the ppd */ 522 ih_p->ih_next = ino_p->ino_ih_head; 523 ino_p->ino_ih_tail->ih_next = ih_p; 524 ino_p->ino_ih_tail = ih_p; 525 526 ino_p->ino_ih_start = ino_p->ino_ih_head; 527 ino_p->ino_ih_size++; 528 529 /* 530 * If the interrupt was previously blocked (left in pending state) 531 * because of jabber we need to clear the pending state in case the 532 * jabber has gone away. 533 */ 534 if (ino_p->ino_unclaimed > px_unclaimed_intr_max) { 535 cmn_err(CE_WARN, 536 "%s%d: px_ib_ino_add_intr: ino 0x%x has been unblocked", 537 ddi_driver_name(dip), ddi_get_instance(dip), ino); 538 539 ino_p->ino_unclaimed = 0; 540 if ((ret = px_lib_intr_setstate(dip, sysino, 541 INTR_IDLE_STATE)) != DDI_SUCCESS) { 542 DBG(DBG_IB, px_p->px_dip, 543 "px_ib_ino_add_intr px_intr_setstate failed\n"); 544 545 return (ret); 546 } 547 } 548 549 /* Re-enable interrupt */ 550 PX_INTR_ENABLE(dip, sysino, curr_cpu); 551 552 return (ret); 553 } 554 555 /* 556 * Removes px_ispec_t from the ino's link list. 557 * uses hardware mutex to lock out interrupt threads. 558 * Side effects: interrupt belongs to that ino is turned off on return. 559 * if we are sharing PX slot with other inos, the caller needs 560 * to turn it back on. 561 */ 562 int 563 px_ib_ino_rem_intr(px_t *px_p, px_ib_ino_info_t *ino_p, px_ih_t *ih_p) 564 { 565 devino_t ino = ino_p->ino_ino; 566 sysino_t sysino = ino_p->ino_sysino; 567 dev_info_t *dip = px_p->px_dip; 568 px_ih_t *ih_lst = ino_p->ino_ih_head; 569 hrtime_t start_time; 570 intr_state_t intr_state; 571 int i, ret = DDI_SUCCESS; 572 573 ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex)); 574 575 DBG(DBG_IB, px_p->px_dip, "px_ib_ino_rem_intr ino=%x\n", 576 ino_p->ino_ino); 577 578 /* Disable the interrupt */ 579 PX_INTR_DISABLE(px_p->px_dip, sysino); 580 581 if (ino_p->ino_ih_size == 1) { 582 if (ih_lst != ih_p) 583 goto not_found; 584 585 /* No need to set head/tail as ino_p will be freed */ 586 goto reset; 587 } 588 589 /* Busy wait on pending interrupt */ 590 for (start_time = gethrtime(); !panicstr && 591 ((ret = px_lib_intr_getstate(dip, sysino, &intr_state)) 592 == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) { 593 if (gethrtime() - start_time > px_intrpend_timeout) { 594 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: pending " 595 "sysino 0x%lx(ino 0x%x) timeout", 596 ddi_driver_name(dip), ddi_get_instance(dip), 597 sysino, ino); 598 599 ret = DDI_FAILURE; 600 break; 601 } 602 } 603 604 if (ret != DDI_SUCCESS) { 605 DBG(DBG_IB, dip, "px_ib_ino_rem_intr: failed, " 606 "ino 0x%x sysino 0x%x\n", ino, sysino); 607 608 return (ret); 609 } 610 611 /* 612 * If the interrupt was previously blocked (left in pending state) 613 * because of jabber we need to clear the pending state in case the 614 * jabber has gone away. 615 */ 616 if (ino_p->ino_unclaimed > px_unclaimed_intr_max) { 617 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: " 618 "ino 0x%x has been unblocked", 619 ddi_driver_name(dip), ddi_get_instance(dip), ino); 620 621 ino_p->ino_unclaimed = 0; 622 if ((ret = px_lib_intr_setstate(dip, sysino, 623 INTR_IDLE_STATE)) != DDI_SUCCESS) { 624 DBG(DBG_IB, px_p->px_dip, 625 "px_ib_ino_rem_intr px_intr_setstate failed\n"); 626 627 return (ret); 628 } 629 } 630 631 /* Search the link list for ih_p */ 632 for (i = 0; (i < ino_p->ino_ih_size) && 633 (ih_lst->ih_next != ih_p); i++, ih_lst = ih_lst->ih_next); 634 635 if (ih_lst->ih_next != ih_p) 636 goto not_found; 637 638 /* Remove ih_p from the link list and maintain the head/tail */ 639 ih_lst->ih_next = ih_p->ih_next; 640 641 if (ino_p->ino_ih_head == ih_p) 642 ino_p->ino_ih_head = ih_p->ih_next; 643 if (ino_p->ino_ih_tail == ih_p) 644 ino_p->ino_ih_tail = ih_lst; 645 646 ino_p->ino_ih_start = ino_p->ino_ih_head; 647 648 reset: 649 if (ih_p->ih_config_handle) 650 pci_config_teardown(&ih_p->ih_config_handle); 651 if (ih_p->ih_ksp != NULL) 652 kstat_delete(ih_p->ih_ksp); 653 654 kmem_free(ih_p, sizeof (px_ih_t)); 655 ino_p->ino_ih_size--; 656 657 return (ret); 658 659 not_found: 660 DBG(DBG_R_INTX, ino_p->ino_ib_p->ib_px_p->px_dip, 661 "ino_p=%x does not have ih_p=%x\n", ino_p, ih_p); 662 663 return (DDI_FAILURE); 664 } 665 666 px_ih_t * 667 px_ib_ino_locate_intr(px_ib_ino_info_t *ino_p, dev_info_t *rdip, 668 uint32_t inum, msiq_rec_type_t rec_type, msgcode_t msg_code) 669 { 670 px_ih_t *ih_lst = ino_p->ino_ih_head; 671 int i; 672 673 for (i = 0; i < ino_p->ino_ih_size; i++, ih_lst = ih_lst->ih_next) { 674 if ((ih_lst->ih_dip == rdip) && (ih_lst->ih_inum == inum) && 675 (ih_lst->ih_rec_type == rec_type) && 676 (ih_lst->ih_msg_code == msg_code)) 677 return (ih_lst); 678 } 679 680 return ((px_ih_t *)NULL); 681 } 682 683 px_ih_t * 684 px_ib_alloc_ih(dev_info_t *rdip, uint32_t inum, 685 uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2), 686 caddr_t int_handler_arg1, caddr_t int_handler_arg2, 687 msiq_rec_type_t rec_type, msgcode_t msg_code) 688 { 689 px_ih_t *ih_p; 690 691 ih_p = kmem_alloc(sizeof (px_ih_t), KM_SLEEP); 692 ih_p->ih_dip = rdip; 693 ih_p->ih_inum = inum; 694 ih_p->ih_intr_state = PX_INTR_STATE_DISABLE; 695 ih_p->ih_handler = int_handler; 696 ih_p->ih_handler_arg1 = int_handler_arg1; 697 ih_p->ih_handler_arg2 = int_handler_arg2; 698 ih_p->ih_config_handle = NULL; 699 ih_p->ih_rec_type = rec_type; 700 ih_p->ih_msg_code = msg_code; 701 ih_p->ih_nsec = 0; 702 ih_p->ih_ticks = 0; 703 ih_p->ih_ksp = NULL; 704 705 return (ih_p); 706 } 707 708 int 709 px_ib_update_intr_state(px_t *px_p, dev_info_t *rdip, 710 uint_t inum, devino_t ino, uint_t new_intr_state, 711 msiq_rec_type_t rec_type, msgcode_t msg_code) 712 { 713 px_ib_t *ib_p = px_p->px_ib_p; 714 px_ib_ino_info_t *ino_p; 715 px_ih_t *ih_p; 716 int ret = DDI_FAILURE; 717 718 DBG(DBG_IB, px_p->px_dip, "ib_update_intr_state: %s%d " 719 "inum %x devino %x state %x\n", ddi_driver_name(rdip), 720 ddi_get_instance(rdip), inum, ino, new_intr_state); 721 722 mutex_enter(&ib_p->ib_ino_lst_mutex); 723 724 if (ino_p = px_ib_locate_ino(ib_p, ino)) { 725 if (ih_p = px_ib_ino_locate_intr(ino_p, rdip, inum, rec_type, 726 msg_code)) { 727 ih_p->ih_intr_state = new_intr_state; 728 ret = DDI_SUCCESS; 729 } 730 } 731 732 mutex_exit(&ib_p->ib_ino_lst_mutex); 733 return (ret); 734 } 735 736 737 static void 738 px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name, 739 char *path_name, int instance) 740 { 741 (void) strncpy(dev->driver_name, driver_name, MAXMODCONFNAME-1); 742 dev->driver_name[MAXMODCONFNAME] = '\0'; 743 (void) strncpy(dev->path, path_name, MAXPATHLEN-1); 744 dev->dev_inst = instance; 745 } 746 747 748 /* 749 * Return the dips or number of dips associated with a given interrupt block. 750 * Size of dips array arg is passed in as dips_ret arg. 751 * Number of dips returned is returned in dips_ret arg. 752 * Array of dips gets returned in the dips argument. 753 * Function returns number of dips existing for the given interrupt block. 754 * 755 * Note: this function assumes an enabled/valid INO, which is why it returns 756 * the px node and (Internal) when it finds no other devices (and *devs_ret > 0) 757 */ 758 uint8_t 759 pxtool_ib_get_ino_devs( 760 px_t *px_p, uint32_t ino, uint8_t *devs_ret, pcitool_intr_dev_t *devs) 761 { 762 px_ib_t *ib_p = px_p->px_ib_p; 763 px_ib_ino_info_t *ino_p; 764 px_ih_t *ih_p; 765 uint32_t num_devs = 0; 766 char pathname[MAXPATHLEN]; 767 int i; 768 769 mutex_enter(&ib_p->ib_ino_lst_mutex); 770 ino_p = px_ib_locate_ino(ib_p, ino); 771 if (ino_p != NULL) { 772 num_devs = ino_p->ino_ih_size; 773 for (i = 0, ih_p = ino_p->ino_ih_head; 774 ((i < ino_p->ino_ih_size) && (i < *devs_ret)); 775 i++, ih_p = ih_p->ih_next) { 776 (void) ddi_pathname(ih_p->ih_dip, pathname); 777 px_fill_in_intr_devs(&devs[i], 778 (char *)ddi_driver_name(ih_p->ih_dip), pathname, 779 ddi_get_instance(ih_p->ih_dip)); 780 } 781 *devs_ret = i; 782 783 } else if (*devs_ret > 0) { 784 (void) ddi_pathname(px_p->px_dip, pathname); 785 strcat(pathname, " (Internal)"); 786 px_fill_in_intr_devs(&devs[0], 787 (char *)ddi_driver_name(px_p->px_dip), pathname, 788 ddi_get_instance(px_p->px_dip)); 789 num_devs = *devs_ret = 1; 790 } 791 792 mutex_exit(&ib_p->ib_ino_lst_mutex); 793 794 return (num_devs); 795 } 796 797 798 void px_ib_log_new_cpu(px_ib_t *ib_p, uint32_t old_cpu_id, uint32_t new_cpu_id, 799 uint32_t ino) 800 { 801 px_ib_ino_info_t *ino_p; 802 803 mutex_enter(&ib_p->ib_ino_lst_mutex); 804 805 /* Log in OS data structures the new CPU. */ 806 ino_p = px_ib_locate_ino(ib_p, ino); 807 if (ino_p != NULL) { 808 809 /* Log in OS data structures the new CPU. */ 810 ino_p->ino_cpuid = new_cpu_id; 811 812 /* Account for any residual time to be logged for old cpu. */ 813 px_ib_cpu_ticks_to_ih_nsec(ib_p, ino_p->ino_ih_head, 814 old_cpu_id); 815 } 816 817 mutex_exit(&ib_p->ib_ino_lst_mutex); 818 } 819