1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PX Interrupt Block implementation 28 */ 29 30 #include <sys/types.h> 31 #include <sys/kmem.h> 32 #include <sys/async.h> 33 #include <sys/systm.h> /* panicstr */ 34 #include <sys/spl.h> 35 #include <sys/sunddi.h> 36 #include <sys/machsystm.h> /* intr_dist_add */ 37 #include <sys/ddi_impldefs.h> 38 #include <sys/cpuvar.h> 39 #include <sys/time.h> 40 #include "px_obj.h" 41 42 /*LINTLIBRARY*/ 43 44 static void px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight); 45 static void px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, 46 uint32_t cpu_id); 47 static uint_t px_ib_intr_reset(void *arg); 48 static void px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name, 49 char *path_name, int instance); 50 51 extern uint64_t xc_tick_jump_limit; 52 53 int 54 px_ib_attach(px_t *px_p) 55 { 56 dev_info_t *dip = px_p->px_dip; 57 px_ib_t *ib_p; 58 sysino_t sysino; 59 px_fault_t *fault_p = &px_p->px_fault; 60 61 DBG(DBG_IB, dip, "px_ib_attach\n"); 62 63 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 64 px_p->px_inos[PX_INTR_PEC], &sysino) != DDI_SUCCESS) 65 return (DDI_FAILURE); 66 67 /* 68 * Allocate interrupt block state structure and link it to 69 * the px state structure. 70 */ 71 ib_p = kmem_zalloc(sizeof (px_ib_t), KM_SLEEP); 72 px_p->px_ib_p = ib_p; 73 ib_p->ib_px_p = px_p; 74 ib_p->ib_ino_lst = (px_ino_t *)NULL; 75 76 mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL); 77 mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL); 78 79 bus_func_register(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p); 80 81 intr_dist_add_weighted(px_ib_intr_redist, ib_p); 82 83 /* 84 * Initialize PEC fault data structure 85 */ 86 fault_p->px_fh_dip = dip; 87 fault_p->px_fh_sysino = sysino; 88 fault_p->px_err_func = px_err_dmc_pec_intr; 89 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_PEC]; 90 91 return (DDI_SUCCESS); 92 } 93 94 void 95 px_ib_detach(px_t *px_p) 96 { 97 px_ib_t *ib_p = px_p->px_ib_p; 98 dev_info_t *dip = px_p->px_dip; 99 100 DBG(DBG_IB, dip, "px_ib_detach\n"); 101 102 bus_func_unregister(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p); 103 intr_dist_rem_weighted(px_ib_intr_redist, ib_p); 104 105 mutex_destroy(&ib_p->ib_ino_lst_mutex); 106 mutex_destroy(&ib_p->ib_intr_lock); 107 108 px_ib_free_ino_all(ib_p); 109 110 px_p->px_ib_p = NULL; 111 kmem_free(ib_p, sizeof (px_ib_t)); 112 } 113 114 void 115 px_ib_intr_enable(px_t *px_p, cpuid_t cpu_id, devino_t ino) 116 { 117 px_ib_t *ib_p = px_p->px_ib_p; 118 sysino_t sysino; 119 120 /* 121 * Determine the cpu for the interrupt 122 */ 123 mutex_enter(&ib_p->ib_intr_lock); 124 125 DBG(DBG_IB, px_p->px_dip, 126 "px_ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id); 127 128 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino, 129 &sysino) != DDI_SUCCESS) { 130 DBG(DBG_IB, px_p->px_dip, 131 "px_ib_intr_enable: px_intr_devino_to_sysino() failed\n"); 132 133 mutex_exit(&ib_p->ib_intr_lock); 134 return; 135 } 136 137 PX_INTR_ENABLE(px_p->px_dip, sysino, cpu_id); 138 px_lib_intr_setstate(px_p->px_dip, sysino, INTR_IDLE_STATE); 139 140 mutex_exit(&ib_p->ib_intr_lock); 141 } 142 143 /*ARGSUSED*/ 144 void 145 px_ib_intr_disable(px_ib_t *ib_p, devino_t ino, int wait) 146 { 147 sysino_t sysino; 148 149 mutex_enter(&ib_p->ib_intr_lock); 150 151 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_disable: ino=%x\n", ino); 152 153 /* Disable the interrupt */ 154 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino, 155 &sysino) != DDI_SUCCESS) { 156 DBG(DBG_IB, ib_p->ib_px_p->px_dip, 157 "px_ib_intr_disable: px_intr_devino_to_sysino() failed\n"); 158 159 mutex_exit(&ib_p->ib_intr_lock); 160 return; 161 } 162 163 PX_INTR_DISABLE(ib_p->ib_px_p->px_dip, sysino); 164 165 mutex_exit(&ib_p->ib_intr_lock); 166 } 167 168 169 void 170 px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino, 171 boolean_t wait_flag) 172 { 173 uint32_t old_cpu_id; 174 sysino_t sysino; 175 intr_valid_state_t enabled = 0; 176 hrtime_t start_time, prev, curr, interval, jump; 177 hrtime_t intr_timeout; 178 intr_state_t intr_state; 179 int e = DDI_SUCCESS; 180 181 DBG(DBG_IB, dip, "px_ib_intr_dist_en: ino=0x%x\n", ino); 182 183 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) { 184 DBG(DBG_IB, dip, "px_ib_intr_dist_en: " 185 "px_intr_devino_to_sysino() failed, ino 0x%x\n", ino); 186 return; 187 } 188 189 /* Skip enabling disabled interrupts */ 190 if (px_lib_intr_getvalid(dip, sysino, &enabled) != DDI_SUCCESS) { 191 DBG(DBG_IB, dip, "px_ib_intr_dist_en: px_intr_getvalid() " 192 "failed, sysino 0x%x\n", sysino); 193 return; 194 } 195 if (!enabled) 196 return; 197 198 /* Done if redistributed onto the same cpuid */ 199 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) { 200 DBG(DBG_IB, dip, "px_ib_intr_dist_en: " 201 "px_intr_gettarget() failed\n"); 202 return; 203 } 204 if (cpu_id == old_cpu_id) 205 return; 206 207 if (!wait_flag) 208 goto done; 209 210 /* Busy wait on pending interrupts */ 211 PX_INTR_DISABLE(dip, sysino); 212 213 intr_timeout = px_intrpend_timeout; 214 jump = TICK_TO_NSEC(xc_tick_jump_limit); 215 216 for (curr = start_time = gethrtime(); !panicstr && 217 ((e = px_lib_intr_getstate(dip, sysino, &intr_state)) == 218 DDI_SUCCESS) && 219 (intr_state == INTR_DELIVERED_STATE); /* */) { 220 /* 221 * If we have a really large jump in hrtime, it is most 222 * probably because we entered the debugger (or OBP, 223 * in general). So, we adjust the timeout accordingly 224 * to prevent declaring an interrupt timeout. The 225 * master-interrupt mechanism in OBP should deliver 226 * the interrupts properly. 227 */ 228 prev = curr; 229 curr = gethrtime(); 230 interval = curr - prev; 231 if (interval > jump) 232 intr_timeout += interval; 233 if (curr - start_time > intr_timeout) { 234 cmn_err(CE_WARN, 235 "%s%d: px_ib_intr_dist_en: sysino 0x%lx(ino 0x%x) " 236 "from cpu id 0x%x to 0x%x timeout", 237 ddi_driver_name(dip), ddi_get_instance(dip), 238 sysino, ino, old_cpu_id, cpu_id); 239 240 e = DDI_FAILURE; 241 break; 242 } 243 } 244 245 if (e != DDI_SUCCESS) 246 DBG(DBG_IB, dip, "px_ib_intr_dist_en: failed, " 247 "ino 0x%x sysino 0x%x\n", ino, sysino); 248 249 done: 250 PX_INTR_ENABLE(dip, sysino, cpu_id); 251 } 252 253 static void 254 px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, uint32_t cpu_id) 255 { 256 extern kmutex_t pxintr_ks_template_lock; 257 hrtime_t ticks; 258 259 /* 260 * Because we are updating two fields in ih_t we must lock 261 * pxintr_ks_template_lock to prevent someone from reading the 262 * kstats after we set ih_ticks to 0 and before we increment 263 * ih_nsec to compensate. 264 * 265 * We must also protect against the interrupt arriving and incrementing 266 * ih_ticks between the time we read it and when we reset it to 0. 267 * To do this we use atomic_swap. 268 */ 269 270 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 271 272 mutex_enter(&pxintr_ks_template_lock); 273 ticks = atomic_swap_64(&ih_p->ih_ticks, 0); 274 ih_p->ih_nsec += (uint64_t)tick2ns(ticks, cpu_id); 275 mutex_exit(&pxintr_ks_template_lock); 276 } 277 278 279 /* 280 * Redistribute interrupts of the specified weight. The first call has a weight 281 * of weight_max, which can be used to trigger initialization for 282 * redistribution. The inos with weight [weight_max, inf.) should be processed 283 * on the "weight == weight_max" call. This first call is followed by calls 284 * of decreasing weights, inos of that weight should be processed. The final 285 * call specifies a weight of zero, this can be used to trigger processing of 286 * stragglers. 287 */ 288 static void 289 px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight) 290 { 291 px_ib_t *ib_p = (px_ib_t *)arg; 292 px_t *px_p = ib_p->ib_px_p; 293 dev_info_t *dip = px_p->px_dip; 294 px_ino_t *ino_p; 295 px_ino_pil_t *ipil_p; 296 px_ih_t *ih_lst; 297 int32_t dweight = 0; 298 int i; 299 300 /* Redistribute internal interrupts */ 301 if (weight == 0) { 302 mutex_enter(&ib_p->ib_intr_lock); 303 px_ib_intr_dist_en(dip, intr_dist_cpuid(), 304 px_p->px_inos[PX_INTR_PEC], B_FALSE); 305 mutex_exit(&ib_p->ib_intr_lock); 306 307 px_hp_intr_redist(px_p); 308 } 309 310 /* Redistribute device interrupts */ 311 mutex_enter(&ib_p->ib_ino_lst_mutex); 312 px_msiq_redist(px_p); 313 314 for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next_p) { 315 /* 316 * Recomputes the sum of interrupt weights of devices that 317 * share the same ino upon first call marked by 318 * (weight == weight_max). 319 */ 320 if (weight == weight_max) { 321 ino_p->ino_intr_weight = 0; 322 323 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 324 ipil_p = ipil_p->ipil_next_p) { 325 for (i = 0, ih_lst = ipil_p->ipil_ih_head; 326 i < ipil_p->ipil_ih_size; i++, 327 ih_lst = ih_lst->ih_next) { 328 dweight = i_ddi_get_intr_weight( 329 ih_lst->ih_dip); 330 if (dweight > 0) 331 ino_p->ino_intr_weight += 332 dweight; 333 } 334 } 335 } 336 337 /* 338 * As part of redistributing weighted interrupts over cpus, 339 * nexus redistributes device interrupts and updates 340 * cpu weight. The purpose is for the most light weighted 341 * cpu to take the next interrupt and gain weight, therefore 342 * attention demanding device gains more cpu attention by 343 * making itself heavy. 344 */ 345 if ((weight == ino_p->ino_intr_weight) || 346 ((weight >= weight_max) && 347 (ino_p->ino_intr_weight >= weight_max))) { 348 uint32_t orig_cpuid = ino_p->ino_cpuid; 349 350 if (cpu[orig_cpuid] == NULL) 351 orig_cpuid = CPU->cpu_id; 352 353 DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx " 354 "current cpuid 0x%x current default cpuid 0x%x\n", 355 ino_p->ino_sysino, ino_p->ino_cpuid, 356 ino_p->ino_default_cpuid); 357 358 /* select target cpuid and mark ino established */ 359 if (ino_p->ino_default_cpuid == -1) 360 ino_p->ino_cpuid = ino_p->ino_default_cpuid = 361 intr_dist_cpuid(); 362 else if ((ino_p->ino_cpuid != 363 ino_p->ino_default_cpuid) && 364 cpu[ino_p->ino_default_cpuid] && 365 cpu_intr_on(cpu[ino_p->ino_default_cpuid])) 366 ino_p->ino_cpuid = ino_p->ino_default_cpuid; 367 else if (!cpu_intr_on(cpu[ino_p->ino_cpuid])) 368 ino_p->ino_cpuid = intr_dist_cpuid(); 369 370 DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx " 371 "new cpuid 0x%x new default cpuid 0x%x\n", 372 ino_p->ino_sysino, ino_p->ino_cpuid, 373 ino_p->ino_default_cpuid); 374 375 /* Add device weight to targeted cpu. */ 376 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 377 ipil_p = ipil_p->ipil_next_p) { 378 for (i = 0, ih_lst = ipil_p->ipil_ih_head; 379 i < ipil_p->ipil_ih_size; i++, 380 ih_lst = ih_lst->ih_next) { 381 382 dweight = i_ddi_get_intr_weight( 383 ih_lst->ih_dip); 384 intr_dist_cpuid_add_device_weight( 385 ino_p->ino_cpuid, ih_lst->ih_dip, 386 dweight); 387 388 /* 389 * Different cpus may have different 390 * clock speeds. to account for this, 391 * whenever an interrupt is moved to a 392 * new CPU, we convert the accumulated 393 * ticks into nsec, based upon the clock 394 * rate of the prior CPU. 395 * 396 * It is possible that the prior CPU no 397 * longer exists. In this case, fall 398 * back to using this CPU's clock rate. 399 * 400 * Note that the value in ih_ticks has 401 * already been corrected for any power 402 * savings mode which might have been 403 * in effect. 404 */ 405 px_ib_cpu_ticks_to_ih_nsec(ib_p, ih_lst, 406 orig_cpuid); 407 } 408 } 409 410 /* enable interrupt on new targeted cpu */ 411 px_ib_intr_dist_en(dip, ino_p->ino_cpuid, 412 ino_p->ino_ino, B_TRUE); 413 } 414 } 415 mutex_exit(&ib_p->ib_ino_lst_mutex); 416 } 417 418 /* 419 * Reset interrupts to IDLE. This function is called during 420 * panic handling after redistributing interrupts; it's needed to 421 * support dumping to network devices after 'sync' from OBP. 422 * 423 * N.B. This routine runs in a context where all other threads 424 * are permanently suspended. 425 */ 426 static uint_t 427 px_ib_intr_reset(void *arg) 428 { 429 px_ib_t *ib_p = (px_ib_t *)arg; 430 431 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_reset\n"); 432 433 if (px_lib_intr_reset(ib_p->ib_px_p->px_dip) != DDI_SUCCESS) 434 return (BF_FATAL); 435 436 return (BF_NONE); 437 } 438 439 /* 440 * Locate px_ino_t structure on ib_p->ib_ino_lst according to ino# 441 * returns NULL if not found. 442 */ 443 px_ino_t * 444 px_ib_locate_ino(px_ib_t *ib_p, devino_t ino_num) 445 { 446 px_ino_t *ino_p = ib_p->ib_ino_lst; 447 448 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 449 450 for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next_p) 451 ; 452 453 return (ino_p); 454 } 455 456 px_ino_t * 457 px_ib_alloc_ino(px_ib_t *ib_p, devino_t ino_num) 458 { 459 sysino_t sysino; 460 px_ino_t *ino_p; 461 462 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, 463 ino_num, &sysino) != DDI_SUCCESS) 464 return (NULL); 465 466 ino_p = kmem_zalloc(sizeof (px_ino_t), KM_SLEEP); 467 468 ino_p->ino_next_p = ib_p->ib_ino_lst; 469 ib_p->ib_ino_lst = ino_p; 470 471 ino_p->ino_ino = ino_num; 472 ino_p->ino_sysino = sysino; 473 ino_p->ino_ib_p = ib_p; 474 ino_p->ino_unclaimed_intrs = 0; 475 ino_p->ino_lopil = 0; 476 ino_p->ino_cpuid = ino_p->ino_default_cpuid = (cpuid_t)-1; 477 478 return (ino_p); 479 } 480 481 px_ino_pil_t * 482 px_ib_new_ino_pil(px_ib_t *ib_p, devino_t ino_num, uint_t pil, px_ih_t *ih_p) 483 { 484 px_ino_pil_t *ipil_p = kmem_zalloc(sizeof (px_ino_pil_t), KM_SLEEP); 485 px_ino_t *ino_p; 486 487 if ((ino_p = px_ib_locate_ino(ib_p, ino_num)) == NULL) 488 ino_p = px_ib_alloc_ino(ib_p, ino_num); 489 490 ASSERT(ino_p != NULL); 491 492 ih_p->ih_next = ih_p; 493 ipil_p->ipil_pil = pil; 494 ipil_p->ipil_ih_head = ih_p; 495 ipil_p->ipil_ih_tail = ih_p; 496 ipil_p->ipil_ih_start = ih_p; 497 ipil_p->ipil_ih_size = 1; 498 ipil_p->ipil_ino_p = ino_p; 499 500 ipil_p->ipil_next_p = ino_p->ino_ipil_p; 501 ino_p->ino_ipil_p = ipil_p; 502 ino_p->ino_ipil_size++; 503 504 if ((ino_p->ino_lopil == 0) || (ino_p->ino_lopil > pil)) 505 ino_p->ino_lopil = pil; 506 507 return (ipil_p); 508 } 509 510 void 511 px_ib_delete_ino_pil(px_ib_t *ib_p, px_ino_pil_t *ipil_p) 512 { 513 px_ino_t *ino_p = ipil_p->ipil_ino_p; 514 ushort_t pil = ipil_p->ipil_pil; 515 px_ino_pil_t *prev, *next; 516 517 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 518 519 if (ino_p->ino_ipil_p == ipil_p) 520 ino_p->ino_ipil_p = ipil_p->ipil_next_p; 521 else { 522 for (prev = next = ino_p->ino_ipil_p; next != ipil_p; 523 prev = next, next = next->ipil_next_p) 524 ; 525 526 if (prev) 527 prev->ipil_next_p = ipil_p->ipil_next_p; 528 } 529 530 kmem_free(ipil_p, sizeof (px_ino_pil_t)); 531 532 if ((--ino_p->ino_ipil_size) && (ino_p->ino_lopil == pil)) { 533 for (next = ino_p->ino_ipil_p, pil = next->ipil_pil; 534 next; next = next->ipil_next_p) { 535 536 if (pil > next->ipil_pil) 537 pil = next->ipil_pil; 538 } 539 540 /* 541 * Value stored in pil should be the lowest pil. 542 */ 543 ino_p->ino_lopil = pil; 544 } 545 546 if (ino_p->ino_ipil_size) 547 return; 548 549 ino_p->ino_lopil = 0; 550 551 if (ino_p->ino_msiq_p) 552 return; 553 554 if (ib_p->ib_ino_lst == ino_p) 555 ib_p->ib_ino_lst = ino_p->ino_next_p; 556 else { 557 px_ino_t *list = ib_p->ib_ino_lst; 558 559 for (; list->ino_next_p != ino_p; list = list->ino_next_p) 560 ; 561 list->ino_next_p = ino_p->ino_next_p; 562 } 563 } 564 565 /* 566 * Free all ino when we are detaching. 567 */ 568 void 569 px_ib_free_ino_all(px_ib_t *ib_p) 570 { 571 px_ino_t *ino_p = ib_p->ib_ino_lst; 572 px_ino_t *next = NULL; 573 574 while (ino_p) { 575 next = ino_p->ino_next_p; 576 kmem_free(ino_p, sizeof (px_ino_t)); 577 ino_p = next; 578 } 579 } 580 581 /* 582 * Locate px_ino_pil_t structure on ino_p->ino_ipil_p according to ino# 583 * returns NULL if not found. 584 */ 585 px_ino_pil_t * 586 px_ib_ino_locate_ipil(px_ino_t *ino_p, uint_t pil) 587 { 588 px_ino_pil_t *ipil_p = ino_p->ino_ipil_p; 589 590 for (; ipil_p && ipil_p->ipil_pil != pil; ipil_p = ipil_p->ipil_next_p) 591 ; 592 593 return (ipil_p); 594 } 595 596 int 597 px_ib_ino_add_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p) 598 { 599 px_ino_t *ino_p = ipil_p->ipil_ino_p; 600 px_ib_t *ib_p = ino_p->ino_ib_p; 601 devino_t ino = ino_p->ino_ino; 602 sysino_t sysino = ino_p->ino_sysino; 603 dev_info_t *dip = px_p->px_dip; 604 cpuid_t curr_cpu; 605 hrtime_t start_time; 606 intr_state_t intr_state; 607 int ret = DDI_SUCCESS; 608 609 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 610 ASSERT(ib_p == px_p->px_ib_p); 611 612 DBG(DBG_IB, dip, "px_ib_ino_add_intr ino=%x\n", ino_p->ino_ino); 613 614 /* Disable the interrupt */ 615 if ((ret = px_lib_intr_gettarget(dip, sysino, 616 &curr_cpu)) != DDI_SUCCESS) { 617 DBG(DBG_IB, dip, 618 "px_ib_ino_add_intr px_intr_gettarget() failed\n"); 619 620 return (ret); 621 } 622 623 PX_INTR_DISABLE(dip, sysino); 624 625 /* Busy wait on pending interrupt */ 626 for (start_time = gethrtime(); !panicstr && 627 ((ret = px_lib_intr_getstate(dip, sysino, &intr_state)) 628 == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) { 629 if (gethrtime() - start_time > px_intrpend_timeout) { 630 cmn_err(CE_WARN, "%s%d: px_ib_ino_add_intr: pending " 631 "sysino 0x%lx(ino 0x%x) timeout", 632 ddi_driver_name(dip), ddi_get_instance(dip), 633 sysino, ino); 634 635 ret = DDI_FAILURE; 636 break; 637 } 638 } 639 640 /* 641 * If the interrupt was previously blocked (left in pending state) 642 * because of jabber we need to clear the pending state in case the 643 * jabber has gone away. 644 */ 645 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) { 646 cmn_err(CE_WARN, 647 "%s%d: px_ib_ino_add_intr: ino 0x%x has been unblocked", 648 ddi_driver_name(dip), ddi_get_instance(dip), ino); 649 650 ino_p->ino_unclaimed_intrs = 0; 651 ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE); 652 } 653 654 if (ret != DDI_SUCCESS) { 655 DBG(DBG_IB, dip, "px_ib_ino_add_intr: failed, " 656 "ino 0x%x sysino 0x%x\n", ino, sysino); 657 658 return (ret); 659 } 660 661 /* Link up px_ih_t */ 662 ih_p->ih_next = ipil_p->ipil_ih_head; 663 ipil_p->ipil_ih_tail->ih_next = ih_p; 664 ipil_p->ipil_ih_tail = ih_p; 665 666 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head; 667 ipil_p->ipil_ih_size++; 668 669 /* Re-enable interrupt */ 670 PX_INTR_ENABLE(dip, sysino, curr_cpu); 671 672 return (ret); 673 } 674 675 /* 676 * Removes px_ih_t from the ino's link list. 677 * uses hardware mutex to lock out interrupt threads. 678 * Side effects: interrupt belongs to that ino is turned off on return. 679 * if we are sharing PX slot with other inos, the caller needs 680 * to turn it back on. 681 */ 682 int 683 px_ib_ino_rem_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p) 684 { 685 px_ino_t *ino_p = ipil_p->ipil_ino_p; 686 devino_t ino = ino_p->ino_ino; 687 sysino_t sysino = ino_p->ino_sysino; 688 dev_info_t *dip = px_p->px_dip; 689 px_ih_t *ih_lst = ipil_p->ipil_ih_head; 690 hrtime_t start_time; 691 intr_state_t intr_state; 692 int i, ret = DDI_SUCCESS; 693 694 ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex)); 695 696 DBG(DBG_IB, px_p->px_dip, "px_ib_ino_rem_intr ino=%x\n", 697 ino_p->ino_ino); 698 699 /* Disable the interrupt */ 700 PX_INTR_DISABLE(px_p->px_dip, sysino); 701 702 /* Busy wait on pending interrupt */ 703 for (start_time = gethrtime(); !panicstr && 704 ((ret = px_lib_intr_getstate(dip, sysino, &intr_state)) 705 == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) { 706 if (gethrtime() - start_time > px_intrpend_timeout) { 707 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: pending " 708 "sysino 0x%lx(ino 0x%x) timeout", 709 ddi_driver_name(dip), ddi_get_instance(dip), 710 sysino, ino); 711 712 ret = DDI_FAILURE; 713 break; 714 } 715 } 716 717 /* 718 * If the interrupt was previously blocked (left in pending state) 719 * because of jabber we need to clear the pending state in case the 720 * jabber has gone away. 721 */ 722 if (ret == DDI_SUCCESS && 723 ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) { 724 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: " 725 "ino 0x%x has been unblocked", 726 ddi_driver_name(dip), ddi_get_instance(dip), ino); 727 728 ino_p->ino_unclaimed_intrs = 0; 729 ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE); 730 } 731 732 if (ret != DDI_SUCCESS) { 733 DBG(DBG_IB, dip, "px_ib_ino_rem_intr: failed, " 734 "ino 0x%x sysino 0x%x\n", ino, sysino); 735 736 return (ret); 737 } 738 739 if (ipil_p->ipil_ih_size == 1) { 740 if (ih_lst != ih_p) 741 goto not_found; 742 743 /* No need to set head/tail as ino_p will be freed */ 744 goto reset; 745 } 746 747 /* Search the link list for ih_p */ 748 for (i = 0; (i < ipil_p->ipil_ih_size) && 749 (ih_lst->ih_next != ih_p); i++, ih_lst = ih_lst->ih_next) 750 ; 751 752 if (ih_lst->ih_next != ih_p) 753 goto not_found; 754 755 /* Remove ih_p from the link list and maintain the head/tail */ 756 ih_lst->ih_next = ih_p->ih_next; 757 758 if (ipil_p->ipil_ih_head == ih_p) 759 ipil_p->ipil_ih_head = ih_p->ih_next; 760 if (ipil_p->ipil_ih_tail == ih_p) 761 ipil_p->ipil_ih_tail = ih_lst; 762 763 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head; 764 765 reset: 766 if (ih_p->ih_config_handle) 767 pci_config_teardown(&ih_p->ih_config_handle); 768 if (ih_p->ih_ksp != NULL) 769 kstat_delete(ih_p->ih_ksp); 770 771 kmem_free(ih_p, sizeof (px_ih_t)); 772 ipil_p->ipil_ih_size--; 773 774 return (ret); 775 776 not_found: 777 DBG(DBG_R_INTX, ino_p->ino_ib_p->ib_px_p->px_dip, 778 "ino_p=%x does not have ih_p=%x\n", ino_p, ih_p); 779 780 return (DDI_FAILURE); 781 } 782 783 px_ih_t * 784 px_ib_intr_locate_ih(px_ino_pil_t *ipil_p, dev_info_t *rdip, 785 uint32_t inum, msiq_rec_type_t rec_type, msgcode_t msg_code) 786 { 787 px_ih_t *ih_p = ipil_p->ipil_ih_head; 788 int i; 789 790 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 791 if ((ih_p->ih_dip == rdip) && (ih_p->ih_inum == inum) && 792 (ih_p->ih_rec_type == rec_type) && 793 (ih_p->ih_msg_code == msg_code)) 794 return (ih_p); 795 } 796 797 return ((px_ih_t *)NULL); 798 } 799 800 px_ih_t * 801 px_ib_alloc_ih(dev_info_t *rdip, uint32_t inum, 802 uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2), 803 caddr_t int_handler_arg1, caddr_t int_handler_arg2, 804 msiq_rec_type_t rec_type, msgcode_t msg_code) 805 { 806 px_ih_t *ih_p; 807 808 ih_p = kmem_alloc(sizeof (px_ih_t), KM_SLEEP); 809 ih_p->ih_dip = rdip; 810 ih_p->ih_inum = inum; 811 ih_p->ih_intr_state = PX_INTR_STATE_DISABLE; 812 ih_p->ih_intr_flags = PX_INTR_IDLE; 813 ih_p->ih_handler = int_handler; 814 ih_p->ih_handler_arg1 = int_handler_arg1; 815 ih_p->ih_handler_arg2 = int_handler_arg2; 816 ih_p->ih_config_handle = NULL; 817 ih_p->ih_rec_type = rec_type; 818 ih_p->ih_msg_code = msg_code; 819 ih_p->ih_nsec = 0; 820 ih_p->ih_ticks = 0; 821 ih_p->ih_ksp = NULL; 822 823 return (ih_p); 824 } 825 826 int 827 px_ib_update_intr_state(px_t *px_p, dev_info_t *rdip, 828 uint_t inum, devino_t ino, uint_t pil, 829 uint_t new_intr_state, msiq_rec_type_t rec_type, 830 msgcode_t msg_code) 831 { 832 px_ib_t *ib_p = px_p->px_ib_p; 833 px_ino_t *ino_p; 834 px_ino_pil_t *ipil_p; 835 px_ih_t *ih_p; 836 int ret = DDI_FAILURE; 837 838 DBG(DBG_IB, px_p->px_dip, "px_ib_update_intr_state: %s%d " 839 "inum %x devino %x pil %x state %x\n", ddi_driver_name(rdip), 840 ddi_get_instance(rdip), inum, ino, pil, new_intr_state); 841 842 mutex_enter(&ib_p->ib_ino_lst_mutex); 843 844 ino_p = px_ib_locate_ino(ib_p, ino); 845 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, pil))) { 846 if (ih_p = px_ib_intr_locate_ih(ipil_p, rdip, inum, rec_type, 847 msg_code)) { 848 ih_p->ih_intr_state = new_intr_state; 849 ret = DDI_SUCCESS; 850 } 851 } 852 853 mutex_exit(&ib_p->ib_ino_lst_mutex); 854 return (ret); 855 } 856 857 858 /* 859 * Get interrupt CPU for a given ino. 860 * Return info only for inos which are already mapped to devices. 861 */ 862 /*ARGSUSED*/ 863 int 864 px_ib_get_intr_target(px_t *px_p, devino_t ino, cpuid_t *cpu_id_p) 865 { 866 dev_info_t *dip = px_p->px_dip; 867 sysino_t sysino; 868 int ret; 869 870 DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: devino %x\n", ino); 871 872 /* Convert leaf-wide intr to system-wide intr */ 873 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) 874 return (DDI_FAILURE); 875 876 ret = px_lib_intr_gettarget(dip, sysino, cpu_id_p); 877 878 DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: cpu_id %x\n", 879 *cpu_id_p); 880 881 return (ret); 882 } 883 884 885 /* 886 * Associate a new CPU with a given ino. 887 * Operate only on INOs which are already mapped to devices. 888 */ 889 int 890 px_ib_set_intr_target(px_t *px_p, devino_t ino, cpuid_t cpu_id) 891 { 892 dev_info_t *dip = px_p->px_dip; 893 cpuid_t old_cpu_id; 894 sysino_t sysino; 895 int ret = DDI_SUCCESS; 896 extern const int _ncpu; 897 extern cpu_t *cpu[]; 898 899 DBG(DBG_IB, px_p->px_dip, "px_ib_set_intr_target: devino %x " 900 "cpu_id %x\n", ino, cpu_id); 901 902 mutex_enter(&cpu_lock); 903 904 /* Convert leaf-wide intr to system-wide intr */ 905 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) { 906 ret = DDI_FAILURE; 907 goto done; 908 } 909 910 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) { 911 ret = DDI_FAILURE; 912 goto done; 913 } 914 915 /* 916 * Get lock, validate cpu and write it. 917 */ 918 if ((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id]))) { 919 DBG(DBG_IB, dip, "px_ib_set_intr_target: Enabling CPU %d\n", 920 cpu_id); 921 px_ib_intr_dist_en(dip, cpu_id, ino, B_TRUE); 922 px_ib_log_new_cpu(px_p->px_ib_p, old_cpu_id, cpu_id, ino); 923 } else { /* Invalid cpu */ 924 DBG(DBG_IB, dip, "px_ib_set_intr_target: Invalid cpuid %x\n", 925 cpu_id); 926 ret = DDI_EINVAL; 927 } 928 929 done: 930 mutex_exit(&cpu_lock); 931 return (ret); 932 } 933 934 hrtime_t px_ib_msix_retarget_timeout = 120ll * NANOSEC; /* 120 seconds */ 935 936 /* 937 * Associate a new CPU with a given MSI/X. 938 * Operate only on MSI/Xs which are already mapped to devices. 939 */ 940 int 941 px_ib_set_msix_target(px_t *px_p, ddi_intr_handle_impl_t *hdlp, 942 msinum_t msi_num, cpuid_t cpu_id) 943 { 944 px_ib_t *ib_p = px_p->px_ib_p; 945 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 946 dev_info_t *dip = px_p->px_dip; 947 dev_info_t *rdip = hdlp->ih_dip; 948 msiqid_t msiq_id, old_msiq_id; 949 pci_msi_state_t msi_state; 950 msiq_rec_type_t msiq_rec_type; 951 msi_type_t msi_type; 952 px_ino_t *ino_p; 953 px_ih_t *ih_p, *old_ih_p; 954 cpuid_t old_cpu_id; 955 hrtime_t start_time, end_time; 956 int ret = DDI_SUCCESS; 957 extern const int _ncpu; 958 extern cpu_t *cpu[]; 959 960 DBG(DBG_IB, dip, "px_ib_set_msix_target: msi_num %x new cpu_id %x\n", 961 msi_num, cpu_id); 962 963 mutex_enter(&cpu_lock); 964 965 /* Check for MSI64 support */ 966 if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) { 967 msiq_rec_type = MSI64_REC; 968 msi_type = MSI64_TYPE; 969 } else { 970 msiq_rec_type = MSI32_REC; 971 msi_type = MSI32_TYPE; 972 } 973 974 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 975 &old_msiq_id)) != DDI_SUCCESS) { 976 977 mutex_exit(&cpu_lock); 978 return (ret); 979 } 980 981 DBG(DBG_IB, dip, "px_ib_set_msix_target: current msiq 0x%x\n", 982 old_msiq_id); 983 984 if ((ret = px_ib_get_intr_target(px_p, 985 px_msiqid_to_devino(px_p, old_msiq_id), 986 &old_cpu_id)) != DDI_SUCCESS) { 987 988 mutex_exit(&cpu_lock); 989 return (ret); 990 } 991 992 DBG(DBG_IB, dip, "px_ib_set_msix_target: current cpuid 0x%x\n", 993 old_cpu_id); 994 995 if (cpu_id == old_cpu_id) { 996 997 mutex_exit(&cpu_lock); 998 return (DDI_SUCCESS); 999 } 1000 1001 /* 1002 * Get lock, validate cpu and write it. 1003 */ 1004 if (!((cpu_id < _ncpu) && (cpu[cpu_id] && 1005 cpu_is_online(cpu[cpu_id])))) { 1006 /* Invalid cpu */ 1007 DBG(DBG_IB, dip, "px_ib_set_msix_target: Invalid cpuid %x\n", 1008 cpu_id); 1009 1010 mutex_exit(&cpu_lock); 1011 return (DDI_EINVAL); 1012 } 1013 1014 DBG(DBG_IB, dip, "px_ib_set_msix_target: Enabling CPU %d\n", cpu_id); 1015 1016 if ((ret = px_add_msiq_intr(dip, rdip, hdlp, 1017 msiq_rec_type, msi_num, cpu_id, &msiq_id)) != DDI_SUCCESS) { 1018 DBG(DBG_IB, dip, "px_ib_set_msix_target: Add MSI handler " 1019 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); 1020 1021 mutex_exit(&cpu_lock); 1022 return (ret); 1023 } 1024 1025 if ((ret = px_lib_msi_setmsiq(dip, msi_num, 1026 msiq_id, msi_type)) != DDI_SUCCESS) { 1027 mutex_exit(&cpu_lock); 1028 1029 (void) px_rem_msiq_intr(dip, rdip, 1030 hdlp, msiq_rec_type, msi_num, msiq_id); 1031 1032 return (ret); 1033 } 1034 1035 if ((ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 1036 px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri, 1037 PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num)) != DDI_SUCCESS) { 1038 mutex_exit(&cpu_lock); 1039 1040 (void) px_rem_msiq_intr(dip, rdip, 1041 hdlp, msiq_rec_type, msi_num, msiq_id); 1042 1043 return (ret); 1044 } 1045 1046 mutex_exit(&cpu_lock); 1047 1048 /* 1049 * Remove the old handler, but first ensure it is finished. 1050 * 1051 * Each handler sets its PENDING flag before it clears the MSI state. 1052 * Then it clears that flag when finished. If a re-target occurs while 1053 * the MSI state is DELIVERED, then it is not yet known which of the 1054 * two handlers will take the interrupt. So the re-target operation 1055 * sets a RETARGET flag on both handlers in that case. Monitoring both 1056 * flags on both handlers then determines when the old handler can be 1057 * be safely removed. 1058 */ 1059 mutex_enter(&ib_p->ib_ino_lst_mutex); 1060 1061 ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, old_msiq_id)); 1062 old_ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, 1063 hdlp->ih_pri), rdip, hdlp->ih_inum, msiq_rec_type, msi_num); 1064 1065 ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, msiq_id)); 1066 ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri), 1067 rdip, hdlp->ih_inum, msiq_rec_type, msi_num); 1068 1069 if ((ret = px_lib_msi_getstate(dip, msi_num, 1070 &msi_state)) != DDI_SUCCESS) { 1071 (void) px_rem_msiq_intr(dip, rdip, 1072 hdlp, msiq_rec_type, msi_num, msiq_id); 1073 1074 mutex_exit(&ib_p->ib_ino_lst_mutex); 1075 return (ret); 1076 } 1077 1078 if (msi_state == PCI_MSI_STATE_DELIVERED) { 1079 ih_p->ih_intr_flags |= PX_INTR_RETARGET; 1080 old_ih_p->ih_intr_flags |= PX_INTR_RETARGET; 1081 } 1082 1083 start_time = gethrtime(); 1084 while (((ih_p->ih_intr_flags & PX_INTR_RETARGET) && 1085 (old_ih_p->ih_intr_flags & PX_INTR_RETARGET)) || 1086 (old_ih_p->ih_intr_flags & PX_INTR_PENDING)) { 1087 1088 /* Wait for one second */ 1089 delay(drv_usectohz(1000000)); 1090 1091 end_time = gethrtime() - start_time; 1092 if (end_time > px_ib_msix_retarget_timeout) { 1093 cmn_err(CE_WARN, "MSIX retarget %x is not completed, " 1094 "even after waiting %llx ticks\n", 1095 msi_num, end_time); 1096 break; 1097 } 1098 } 1099 1100 ih_p->ih_intr_flags &= ~(PX_INTR_RETARGET); 1101 1102 mutex_exit(&ib_p->ib_ino_lst_mutex); 1103 1104 ret = px_rem_msiq_intr(dip, rdip, 1105 hdlp, msiq_rec_type, msi_num, old_msiq_id); 1106 1107 return (ret); 1108 } 1109 1110 1111 static void 1112 px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name, 1113 char *path_name, int instance) 1114 { 1115 (void) strncpy(dev->driver_name, driver_name, MAXMODCONFNAME-1); 1116 dev->driver_name[MAXMODCONFNAME] = '\0'; 1117 (void) strncpy(dev->path, path_name, MAXPATHLEN-1); 1118 dev->dev_inst = instance; 1119 } 1120 1121 1122 /* 1123 * Return the dips or number of dips associated with a given interrupt block. 1124 * Size of dips array arg is passed in as dips_ret arg. 1125 * Number of dips returned is returned in dips_ret arg. 1126 * Array of dips gets returned in the dips argument. 1127 * Function returns number of dips existing for the given interrupt block. 1128 * 1129 * Note: this function assumes an enabled/valid INO, which is why it returns 1130 * the px node and (Internal) when it finds no other devices (and *devs_ret > 0) 1131 */ 1132 uint8_t 1133 pxtool_ib_get_ino_devs(px_t *px_p, uint32_t ino, uint32_t msi_num, 1134 uint8_t *devs_ret, pcitool_intr_dev_t *devs) 1135 { 1136 px_ib_t *ib_p = px_p->px_ib_p; 1137 px_ino_t *ino_p; 1138 px_ino_pil_t *ipil_p; 1139 px_ih_t *ih_p; 1140 uint32_t num_devs = 0; 1141 char pathname[MAXPATHLEN]; 1142 int i, j; 1143 1144 mutex_enter(&ib_p->ib_ino_lst_mutex); 1145 ino_p = px_ib_locate_ino(ib_p, ino); 1146 if (ino_p != NULL) { 1147 for (j = 0, ipil_p = ino_p->ino_ipil_p; ipil_p; 1148 ipil_p = ipil_p->ipil_next_p) { 1149 num_devs += ipil_p->ipil_ih_size; 1150 1151 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1152 ((i < ipil_p->ipil_ih_size) && (i < *devs_ret)); 1153 i++, j++, ih_p = ih_p->ih_next) { 1154 (void) ddi_pathname(ih_p->ih_dip, pathname); 1155 1156 if (ih_p->ih_msg_code == msi_num) { 1157 num_devs = *devs_ret = 1; 1158 px_fill_in_intr_devs(&devs[0], 1159 (char *)ddi_driver_name( 1160 ih_p->ih_dip), pathname, 1161 ddi_get_instance(ih_p->ih_dip)); 1162 goto done; 1163 } 1164 1165 px_fill_in_intr_devs(&devs[j], 1166 (char *)ddi_driver_name(ih_p->ih_dip), 1167 pathname, ddi_get_instance(ih_p->ih_dip)); 1168 } 1169 } 1170 1171 *devs_ret = j; 1172 } else if (*devs_ret > 0) { 1173 (void) ddi_pathname(px_p->px_dip, pathname); 1174 strcat(pathname, " (Internal)"); 1175 px_fill_in_intr_devs(&devs[0], 1176 (char *)ddi_driver_name(px_p->px_dip), pathname, 1177 ddi_get_instance(px_p->px_dip)); 1178 num_devs = *devs_ret = 1; 1179 } 1180 1181 done: 1182 mutex_exit(&ib_p->ib_ino_lst_mutex); 1183 1184 return (num_devs); 1185 } 1186 1187 1188 int 1189 pxtool_ib_get_msi_info(px_t *px_p, devino_t ino, msinum_t msi_num, 1190 ddi_intr_handle_impl_t *hdlp) 1191 { 1192 px_ib_t *ib_p = px_p->px_ib_p; 1193 px_ino_t *ino_p; 1194 px_ino_pil_t *ipil_p; 1195 px_ih_t *ih_p; 1196 int i; 1197 1198 mutex_enter(&ib_p->ib_ino_lst_mutex); 1199 1200 if ((ino_p = px_ib_locate_ino(ib_p, ino)) == NULL) { 1201 mutex_exit(&ib_p->ib_ino_lst_mutex); 1202 return (DDI_FAILURE); 1203 } 1204 1205 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 1206 ipil_p = ipil_p->ipil_next_p) { 1207 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1208 ((i < ipil_p->ipil_ih_size) && ih_p); 1209 i++, ih_p = ih_p->ih_next) { 1210 1211 if (ih_p->ih_msg_code != msi_num) 1212 continue; 1213 1214 hdlp->ih_dip = ih_p->ih_dip; 1215 hdlp->ih_inum = ih_p->ih_inum; 1216 hdlp->ih_cb_func = ih_p->ih_handler; 1217 hdlp->ih_cb_arg1 = ih_p->ih_handler_arg1; 1218 hdlp->ih_cb_arg2 = ih_p->ih_handler_arg2; 1219 if (ih_p->ih_rec_type == MSI64_REC) 1220 hdlp->ih_cap = DDI_INTR_FLAG_MSI64; 1221 hdlp->ih_pri = ipil_p->ipil_pil; 1222 hdlp->ih_ver = DDI_INTR_VERSION; 1223 1224 mutex_exit(&ib_p->ib_ino_lst_mutex); 1225 return (DDI_SUCCESS); 1226 } 1227 } 1228 1229 mutex_exit(&ib_p->ib_ino_lst_mutex); 1230 return (DDI_FAILURE); 1231 } 1232 1233 void 1234 px_ib_log_new_cpu(px_ib_t *ib_p, cpuid_t old_cpu_id, cpuid_t new_cpu_id, 1235 uint32_t ino) 1236 { 1237 px_ino_t *ino_p; 1238 px_ino_pil_t *ipil_p; 1239 px_ih_t *ih_p; 1240 int i; 1241 1242 mutex_enter(&ib_p->ib_ino_lst_mutex); 1243 1244 /* Log in OS data structures the new CPU. */ 1245 if (ino_p = px_ib_locate_ino(ib_p, ino)) { 1246 1247 /* Log in OS data structures the new CPU. */ 1248 ino_p->ino_cpuid = new_cpu_id; 1249 1250 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 1251 ipil_p = ipil_p->ipil_next_p) { 1252 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1253 (i < ipil_p->ipil_ih_size); 1254 i++, ih_p = ih_p->ih_next) { 1255 /* 1256 * Account for any residual time 1257 * to be logged for old cpu. 1258 */ 1259 px_ib_cpu_ticks_to_ih_nsec(ib_p, 1260 ih_p, old_cpu_id); 1261 } 1262 } 1263 } 1264 1265 mutex_exit(&ib_p->ib_ino_lst_mutex); 1266 } 1267