1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PX Interrupt Block implementation 28 */ 29 30 #include <sys/types.h> 31 #include <sys/kmem.h> 32 #include <sys/async.h> 33 #include <sys/systm.h> /* panicstr */ 34 #include <sys/spl.h> 35 #include <sys/sunddi.h> 36 #include <sys/machsystm.h> /* intr_dist_add */ 37 #include <sys/ddi_impldefs.h> 38 #include <sys/cpuvar.h> 39 #include <sys/time.h> 40 #include "px_obj.h" 41 42 /*LINTLIBRARY*/ 43 44 static void px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight); 45 static void px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, 46 uint32_t cpu_id); 47 static uint_t px_ib_intr_reset(void *arg); 48 static void px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name, 49 char *path_name, int instance); 50 51 extern uint64_t xc_tick_jump_limit; 52 53 int 54 px_ib_attach(px_t *px_p) 55 { 56 dev_info_t *dip = px_p->px_dip; 57 px_ib_t *ib_p; 58 sysino_t sysino; 59 px_fault_t *fault_p = &px_p->px_fault; 60 61 DBG(DBG_IB, dip, "px_ib_attach\n"); 62 63 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 64 px_p->px_inos[PX_INTR_PEC], &sysino) != DDI_SUCCESS) 65 return (DDI_FAILURE); 66 67 /* 68 * Allocate interrupt block state structure and link it to 69 * the px state structure. 70 */ 71 ib_p = kmem_zalloc(sizeof (px_ib_t), KM_SLEEP); 72 px_p->px_ib_p = ib_p; 73 ib_p->ib_px_p = px_p; 74 ib_p->ib_ino_lst = (px_ino_t *)NULL; 75 76 mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL); 77 mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL); 78 79 bus_func_register(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p); 80 81 intr_dist_add_weighted(px_ib_intr_redist, ib_p); 82 83 /* 84 * Initialize PEC fault data structure 85 */ 86 fault_p->px_fh_dip = dip; 87 fault_p->px_fh_sysino = sysino; 88 fault_p->px_err_func = px_err_dmc_pec_intr; 89 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_PEC]; 90 91 return (DDI_SUCCESS); 92 } 93 94 void 95 px_ib_detach(px_t *px_p) 96 { 97 px_ib_t *ib_p = px_p->px_ib_p; 98 dev_info_t *dip = px_p->px_dip; 99 100 DBG(DBG_IB, dip, "px_ib_detach\n"); 101 102 bus_func_unregister(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p); 103 intr_dist_rem_weighted(px_ib_intr_redist, ib_p); 104 105 mutex_destroy(&ib_p->ib_ino_lst_mutex); 106 mutex_destroy(&ib_p->ib_intr_lock); 107 108 px_ib_free_ino_all(ib_p); 109 110 px_p->px_ib_p = NULL; 111 kmem_free(ib_p, sizeof (px_ib_t)); 112 } 113 114 void 115 px_ib_intr_enable(px_t *px_p, cpuid_t cpu_id, devino_t ino) 116 { 117 px_ib_t *ib_p = px_p->px_ib_p; 118 sysino_t sysino; 119 120 /* 121 * Determine the cpu for the interrupt 122 */ 123 mutex_enter(&ib_p->ib_intr_lock); 124 125 DBG(DBG_IB, px_p->px_dip, 126 "px_ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id); 127 128 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino, 129 &sysino) != DDI_SUCCESS) { 130 DBG(DBG_IB, px_p->px_dip, 131 "px_ib_intr_enable: px_intr_devino_to_sysino() failed\n"); 132 133 mutex_exit(&ib_p->ib_intr_lock); 134 return; 135 } 136 137 PX_INTR_ENABLE(px_p->px_dip, sysino, cpu_id); 138 px_lib_intr_setstate(px_p->px_dip, sysino, INTR_IDLE_STATE); 139 140 mutex_exit(&ib_p->ib_intr_lock); 141 } 142 143 /*ARGSUSED*/ 144 void 145 px_ib_intr_disable(px_ib_t *ib_p, devino_t ino, int wait) 146 { 147 sysino_t sysino; 148 149 mutex_enter(&ib_p->ib_intr_lock); 150 151 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_disable: ino=%x\n", ino); 152 153 /* Disable the interrupt */ 154 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino, 155 &sysino) != DDI_SUCCESS) { 156 DBG(DBG_IB, ib_p->ib_px_p->px_dip, 157 "px_ib_intr_disable: px_intr_devino_to_sysino() failed\n"); 158 159 mutex_exit(&ib_p->ib_intr_lock); 160 return; 161 } 162 163 PX_INTR_DISABLE(ib_p->ib_px_p->px_dip, sysino); 164 165 mutex_exit(&ib_p->ib_intr_lock); 166 } 167 168 169 void 170 px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino, 171 boolean_t wait_flag) 172 { 173 uint32_t old_cpu_id; 174 sysino_t sysino; 175 intr_valid_state_t enabled = 0; 176 hrtime_t start_time, prev, curr, interval, jump; 177 hrtime_t intr_timeout; 178 intr_state_t intr_state; 179 int e = DDI_SUCCESS; 180 181 DBG(DBG_IB, dip, "px_ib_intr_dist_en: ino=0x%x\n", ino); 182 183 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) { 184 DBG(DBG_IB, dip, "px_ib_intr_dist_en: " 185 "px_intr_devino_to_sysino() failed, ino 0x%x\n", ino); 186 return; 187 } 188 189 /* Skip enabling disabled interrupts */ 190 if (px_lib_intr_getvalid(dip, sysino, &enabled) != DDI_SUCCESS) { 191 DBG(DBG_IB, dip, "px_ib_intr_dist_en: px_intr_getvalid() " 192 "failed, sysino 0x%x\n", sysino); 193 return; 194 } 195 if (!enabled) 196 return; 197 198 /* Done if redistributed onto the same cpuid */ 199 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) { 200 DBG(DBG_IB, dip, "px_ib_intr_dist_en: " 201 "px_intr_gettarget() failed\n"); 202 return; 203 } 204 if (cpu_id == old_cpu_id) 205 return; 206 207 if (!wait_flag) 208 goto done; 209 210 /* Busy wait on pending interrupts */ 211 PX_INTR_DISABLE(dip, sysino); 212 213 intr_timeout = px_intrpend_timeout; 214 jump = TICK_TO_NSEC(xc_tick_jump_limit); 215 216 for (curr = start_time = gethrtime(); !panicstr && 217 ((e = px_lib_intr_getstate(dip, sysino, &intr_state)) == 218 DDI_SUCCESS) && 219 (intr_state == INTR_DELIVERED_STATE); /* */) { 220 /* 221 * If we have a really large jump in hrtime, it is most 222 * probably because we entered the debugger (or OBP, 223 * in general). So, we adjust the timeout accordingly 224 * to prevent declaring an interrupt timeout. The 225 * master-interrupt mechanism in OBP should deliver 226 * the interrupts properly. 227 */ 228 prev = curr; 229 curr = gethrtime(); 230 interval = curr - prev; 231 if (interval > jump) 232 intr_timeout += interval; 233 if (curr - start_time > intr_timeout) { 234 cmn_err(CE_WARN, 235 "%s%d: px_ib_intr_dist_en: sysino 0x%lx(ino 0x%x) " 236 "from cpu id 0x%x to 0x%x timeout", 237 ddi_driver_name(dip), ddi_get_instance(dip), 238 sysino, ino, old_cpu_id, cpu_id); 239 240 e = DDI_FAILURE; 241 break; 242 } 243 } 244 245 if (e != DDI_SUCCESS) 246 DBG(DBG_IB, dip, "px_ib_intr_dist_en: failed, " 247 "ino 0x%x sysino 0x%x\n", ino, sysino); 248 249 done: 250 PX_INTR_ENABLE(dip, sysino, cpu_id); 251 } 252 253 static void 254 px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, uint32_t cpu_id) 255 { 256 extern kmutex_t pxintr_ks_template_lock; 257 hrtime_t ticks; 258 259 /* 260 * Because we are updating two fields in ih_t we must lock 261 * pxintr_ks_template_lock to prevent someone from reading the 262 * kstats after we set ih_ticks to 0 and before we increment 263 * ih_nsec to compensate. 264 * 265 * We must also protect against the interrupt arriving and incrementing 266 * ih_ticks between the time we read it and when we reset it to 0. 267 * To do this we use atomic_swap. 268 */ 269 270 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 271 272 mutex_enter(&pxintr_ks_template_lock); 273 ticks = atomic_swap_64(&ih_p->ih_ticks, 0); 274 ih_p->ih_nsec += (uint64_t)tick2ns(ticks, cpu_id); 275 mutex_exit(&pxintr_ks_template_lock); 276 } 277 278 279 /* 280 * Redistribute interrupts of the specified weight. The first call has a weight 281 * of weight_max, which can be used to trigger initialization for 282 * redistribution. The inos with weight [weight_max, inf.) should be processed 283 * on the "weight == weight_max" call. This first call is followed by calls 284 * of decreasing weights, inos of that weight should be processed. The final 285 * call specifies a weight of zero, this can be used to trigger processing of 286 * stragglers. 287 */ 288 static void 289 px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight) 290 { 291 px_ib_t *ib_p = (px_ib_t *)arg; 292 px_t *px_p = ib_p->ib_px_p; 293 dev_info_t *dip = px_p->px_dip; 294 px_ino_t *ino_p; 295 px_ino_pil_t *ipil_p; 296 px_ih_t *ih_lst; 297 int32_t dweight = 0; 298 int i; 299 300 /* Redistribute internal interrupts */ 301 if (weight == 0) { 302 mutex_enter(&ib_p->ib_intr_lock); 303 px_ib_intr_dist_en(dip, intr_dist_cpuid(), 304 px_p->px_inos[PX_INTR_PEC], B_FALSE); 305 mutex_exit(&ib_p->ib_intr_lock); 306 307 px_hp_intr_redist(px_p); 308 } 309 310 /* Redistribute device interrupts */ 311 mutex_enter(&ib_p->ib_ino_lst_mutex); 312 px_msiq_redist(px_p); 313 314 for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next_p) { 315 /* 316 * Recomputes the sum of interrupt weights of devices that 317 * share the same ino upon first call marked by 318 * (weight == weight_max). 319 */ 320 if (weight == weight_max) { 321 ino_p->ino_intr_weight = 0; 322 323 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 324 ipil_p = ipil_p->ipil_next_p) { 325 for (i = 0, ih_lst = ipil_p->ipil_ih_head; 326 i < ipil_p->ipil_ih_size; i++, 327 ih_lst = ih_lst->ih_next) { 328 dweight = i_ddi_get_intr_weight( 329 ih_lst->ih_dip); 330 if (dweight > 0) 331 ino_p->ino_intr_weight += 332 dweight; 333 } 334 } 335 } 336 337 /* 338 * As part of redistributing weighted interrupts over cpus, 339 * nexus redistributes device interrupts and updates 340 * cpu weight. The purpose is for the most light weighted 341 * cpu to take the next interrupt and gain weight, therefore 342 * attention demanding device gains more cpu attention by 343 * making itself heavy. 344 */ 345 if ((weight == ino_p->ino_intr_weight) || 346 ((weight >= weight_max) && 347 (ino_p->ino_intr_weight >= weight_max))) { 348 uint32_t orig_cpuid = ino_p->ino_cpuid; 349 350 if (cpu[orig_cpuid] == NULL) 351 orig_cpuid = CPU->cpu_id; 352 353 DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx " 354 "current cpuid 0x%x current default cpuid 0x%x\n", 355 ino_p->ino_sysino, ino_p->ino_cpuid, 356 ino_p->ino_default_cpuid); 357 358 /* select target cpuid and mark ino established */ 359 if (ino_p->ino_default_cpuid == -1) 360 ino_p->ino_cpuid = ino_p->ino_default_cpuid = 361 intr_dist_cpuid(); 362 else if ((ino_p->ino_cpuid != 363 ino_p->ino_default_cpuid) && 364 cpu[ino_p->ino_default_cpuid] && 365 cpu_intr_on(cpu[ino_p->ino_default_cpuid])) 366 ino_p->ino_cpuid = ino_p->ino_default_cpuid; 367 else if (!cpu_intr_on(cpu[ino_p->ino_cpuid])) 368 ino_p->ino_cpuid = intr_dist_cpuid(); 369 370 DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx " 371 "new cpuid 0x%x new default cpuid 0x%x\n", 372 ino_p->ino_sysino, ino_p->ino_cpuid, 373 ino_p->ino_default_cpuid); 374 375 /* Add device weight to targeted cpu. */ 376 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 377 ipil_p = ipil_p->ipil_next_p) { 378 for (i = 0, ih_lst = ipil_p->ipil_ih_head; 379 i < ipil_p->ipil_ih_size; i++, 380 ih_lst = ih_lst->ih_next) { 381 382 dweight = i_ddi_get_intr_weight( 383 ih_lst->ih_dip); 384 intr_dist_cpuid_add_device_weight( 385 ino_p->ino_cpuid, ih_lst->ih_dip, 386 dweight); 387 388 /* 389 * Different cpus may have different 390 * clock speeds. to account for this, 391 * whenever an interrupt is moved to a 392 * new CPU, we convert the accumulated 393 * ticks into nsec, based upon the clock 394 * rate of the prior CPU. 395 * 396 * It is possible that the prior CPU no 397 * longer exists. In this case, fall 398 * back to using this CPU's clock rate. 399 * 400 * Note that the value in ih_ticks has 401 * already been corrected for any power 402 * savings mode which might have been 403 * in effect. 404 */ 405 px_ib_cpu_ticks_to_ih_nsec(ib_p, ih_lst, 406 orig_cpuid); 407 } 408 } 409 410 /* enable interrupt on new targeted cpu */ 411 px_ib_intr_dist_en(dip, ino_p->ino_cpuid, 412 ino_p->ino_ino, B_TRUE); 413 } 414 } 415 mutex_exit(&ib_p->ib_ino_lst_mutex); 416 } 417 418 /* 419 * Reset interrupts to IDLE. This function is called during 420 * panic handling after redistributing interrupts; it's needed to 421 * support dumping to network devices after 'sync' from OBP. 422 * 423 * N.B. This routine runs in a context where all other threads 424 * are permanently suspended. 425 */ 426 static uint_t 427 px_ib_intr_reset(void *arg) 428 { 429 px_ib_t *ib_p = (px_ib_t *)arg; 430 431 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_reset\n"); 432 433 if (px_lib_intr_reset(ib_p->ib_px_p->px_dip) != DDI_SUCCESS) 434 return (BF_FATAL); 435 436 return (BF_NONE); 437 } 438 439 /* 440 * Locate px_ino_t structure on ib_p->ib_ino_lst according to ino# 441 * returns NULL if not found. 442 */ 443 px_ino_t * 444 px_ib_locate_ino(px_ib_t *ib_p, devino_t ino_num) 445 { 446 px_ino_t *ino_p = ib_p->ib_ino_lst; 447 448 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 449 450 for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next_p) 451 ; 452 453 return (ino_p); 454 } 455 456 px_ino_t * 457 px_ib_alloc_ino(px_ib_t *ib_p, devino_t ino_num) 458 { 459 sysino_t sysino; 460 px_ino_t *ino_p; 461 462 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, 463 ino_num, &sysino) != DDI_SUCCESS) 464 return (NULL); 465 466 ino_p = kmem_zalloc(sizeof (px_ino_t), KM_SLEEP); 467 468 ino_p->ino_next_p = ib_p->ib_ino_lst; 469 ib_p->ib_ino_lst = ino_p; 470 471 ino_p->ino_ino = ino_num; 472 ino_p->ino_sysino = sysino; 473 ino_p->ino_ib_p = ib_p; 474 ino_p->ino_unclaimed_intrs = 0; 475 ino_p->ino_lopil = 0; 476 ino_p->ino_cpuid = ino_p->ino_default_cpuid = (cpuid_t)-1; 477 478 return (ino_p); 479 } 480 481 px_ino_pil_t * 482 px_ib_new_ino_pil(px_ib_t *ib_p, devino_t ino_num, uint_t pil, px_ih_t *ih_p) 483 { 484 px_ino_pil_t *ipil_p = kmem_zalloc(sizeof (px_ino_pil_t), KM_SLEEP); 485 px_ino_t *ino_p; 486 487 if ((ino_p = px_ib_locate_ino(ib_p, ino_num)) == NULL) 488 ino_p = px_ib_alloc_ino(ib_p, ino_num); 489 490 ASSERT(ino_p != NULL); 491 492 ih_p->ih_next = ih_p; 493 ipil_p->ipil_pil = pil; 494 ipil_p->ipil_ih_head = ih_p; 495 ipil_p->ipil_ih_tail = ih_p; 496 ipil_p->ipil_ih_start = ih_p; 497 ipil_p->ipil_ih_size = 1; 498 ipil_p->ipil_ino_p = ino_p; 499 500 ipil_p->ipil_next_p = ino_p->ino_ipil_p; 501 ino_p->ino_ipil_p = ipil_p; 502 ino_p->ino_ipil_size++; 503 504 if ((ino_p->ino_lopil == 0) || (ino_p->ino_lopil > pil)) 505 ino_p->ino_lopil = pil; 506 507 return (ipil_p); 508 } 509 510 void 511 px_ib_delete_ino_pil(px_ib_t *ib_p, px_ino_pil_t *ipil_p) 512 { 513 px_ino_t *ino_p = ipil_p->ipil_ino_p; 514 ushort_t pil = ipil_p->ipil_pil; 515 px_ino_pil_t *prev, *next; 516 517 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 518 519 if (ino_p->ino_ipil_p == ipil_p) 520 ino_p->ino_ipil_p = ipil_p->ipil_next_p; 521 else { 522 for (prev = next = ino_p->ino_ipil_p; next != ipil_p; 523 prev = next, next = next->ipil_next_p) 524 ; 525 526 if (prev) 527 prev->ipil_next_p = ipil_p->ipil_next_p; 528 } 529 530 kmem_free(ipil_p, sizeof (px_ino_pil_t)); 531 532 if ((--ino_p->ino_ipil_size) && (ino_p->ino_lopil == pil)) { 533 for (next = ino_p->ino_ipil_p, pil = next->ipil_pil; 534 next; next = next->ipil_next_p) { 535 536 if (pil > next->ipil_pil) 537 pil = next->ipil_pil; 538 } 539 540 /* 541 * Value stored in pil should be the lowest pil. 542 */ 543 ino_p->ino_lopil = pil; 544 } 545 546 if (ino_p->ino_ipil_size) 547 return; 548 549 ino_p->ino_lopil = 0; 550 551 if (ino_p->ino_msiq_p) 552 return; 553 554 if (ib_p->ib_ino_lst == ino_p) 555 ib_p->ib_ino_lst = ino_p->ino_next_p; 556 else { 557 px_ino_t *list = ib_p->ib_ino_lst; 558 559 for (; list->ino_next_p != ino_p; list = list->ino_next_p) 560 ; 561 list->ino_next_p = ino_p->ino_next_p; 562 } 563 } 564 565 /* 566 * Free all ino when we are detaching. 567 */ 568 void 569 px_ib_free_ino_all(px_ib_t *ib_p) 570 { 571 px_ino_t *ino_p = ib_p->ib_ino_lst; 572 px_ino_t *next = NULL; 573 574 while (ino_p) { 575 next = ino_p->ino_next_p; 576 kmem_free(ino_p, sizeof (px_ino_t)); 577 ino_p = next; 578 } 579 } 580 581 /* 582 * Locate px_ino_pil_t structure on ino_p->ino_ipil_p according to ino# 583 * returns NULL if not found. 584 */ 585 px_ino_pil_t * 586 px_ib_ino_locate_ipil(px_ino_t *ino_p, uint_t pil) 587 { 588 px_ino_pil_t *ipil_p = ino_p->ino_ipil_p; 589 590 for (; ipil_p && ipil_p->ipil_pil != pil; ipil_p = ipil_p->ipil_next_p) 591 ; 592 593 return (ipil_p); 594 } 595 596 int 597 px_ib_ino_add_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p) 598 { 599 px_ino_t *ino_p = ipil_p->ipil_ino_p; 600 px_ib_t *ib_p = ino_p->ino_ib_p; 601 devino_t ino = ino_p->ino_ino; 602 sysino_t sysino = ino_p->ino_sysino; 603 dev_info_t *dip = px_p->px_dip; 604 cpuid_t curr_cpu; 605 hrtime_t start_time; 606 intr_state_t intr_state; 607 int ret = DDI_SUCCESS; 608 609 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 610 ASSERT(ib_p == px_p->px_ib_p); 611 612 DBG(DBG_IB, dip, "px_ib_ino_add_intr ino=%x\n", ino_p->ino_ino); 613 614 /* Disable the interrupt */ 615 if ((ret = px_lib_intr_gettarget(dip, sysino, 616 &curr_cpu)) != DDI_SUCCESS) { 617 DBG(DBG_IB, dip, 618 "px_ib_ino_add_intr px_intr_gettarget() failed\n"); 619 620 return (ret); 621 } 622 623 PX_INTR_DISABLE(dip, sysino); 624 625 /* Busy wait on pending interrupt */ 626 for (start_time = gethrtime(); !panicstr && 627 ((ret = px_lib_intr_getstate(dip, sysino, &intr_state)) 628 == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) { 629 if (gethrtime() - start_time > px_intrpend_timeout) { 630 cmn_err(CE_WARN, "%s%d: px_ib_ino_add_intr: pending " 631 "sysino 0x%lx(ino 0x%x) timeout", 632 ddi_driver_name(dip), ddi_get_instance(dip), 633 sysino, ino); 634 635 ret = DDI_FAILURE; 636 break; 637 } 638 } 639 640 /* 641 * If the interrupt was previously blocked (left in pending state) 642 * because of jabber we need to clear the pending state in case the 643 * jabber has gone away. 644 */ 645 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) { 646 cmn_err(CE_WARN, 647 "%s%d: px_ib_ino_add_intr: ino 0x%x has been unblocked", 648 ddi_driver_name(dip), ddi_get_instance(dip), ino); 649 650 ino_p->ino_unclaimed_intrs = 0; 651 ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE); 652 } 653 654 if (ret != DDI_SUCCESS) { 655 DBG(DBG_IB, dip, "px_ib_ino_add_intr: failed, " 656 "ino 0x%x sysino 0x%x\n", ino, sysino); 657 658 return (ret); 659 } 660 661 /* Link up px_ih_t */ 662 ih_p->ih_next = ipil_p->ipil_ih_head; 663 ipil_p->ipil_ih_tail->ih_next = ih_p; 664 ipil_p->ipil_ih_tail = ih_p; 665 666 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head; 667 ipil_p->ipil_ih_size++; 668 669 /* Re-enable interrupt */ 670 PX_INTR_ENABLE(dip, sysino, curr_cpu); 671 672 return (ret); 673 } 674 675 /* 676 * Removes px_ih_t from the ino's link list. 677 * uses hardware mutex to lock out interrupt threads. 678 * Side effects: interrupt belongs to that ino is turned off on return. 679 * if we are sharing PX slot with other inos, the caller needs 680 * to turn it back on. 681 */ 682 int 683 px_ib_ino_rem_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p) 684 { 685 px_ino_t *ino_p = ipil_p->ipil_ino_p; 686 devino_t ino = ino_p->ino_ino; 687 sysino_t sysino = ino_p->ino_sysino; 688 dev_info_t *dip = px_p->px_dip; 689 px_ih_t *ih_lst = ipil_p->ipil_ih_head; 690 hrtime_t start_time; 691 intr_state_t intr_state; 692 int i, ret = DDI_SUCCESS; 693 694 ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex)); 695 696 DBG(DBG_IB, px_p->px_dip, "px_ib_ino_rem_intr ino=%x\n", 697 ino_p->ino_ino); 698 699 /* Disable the interrupt */ 700 PX_INTR_DISABLE(px_p->px_dip, sysino); 701 702 if (ipil_p->ipil_ih_size == 1) { 703 if (ih_lst != ih_p) 704 goto not_found; 705 706 /* No need to set head/tail as ino_p will be freed */ 707 goto reset; 708 } 709 710 /* Busy wait on pending interrupt */ 711 for (start_time = gethrtime(); !panicstr && 712 ((ret = px_lib_intr_getstate(dip, sysino, &intr_state)) 713 == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) { 714 if (gethrtime() - start_time > px_intrpend_timeout) { 715 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: pending " 716 "sysino 0x%lx(ino 0x%x) timeout", 717 ddi_driver_name(dip), ddi_get_instance(dip), 718 sysino, ino); 719 720 ret = DDI_FAILURE; 721 break; 722 } 723 } 724 725 /* 726 * If the interrupt was previously blocked (left in pending state) 727 * because of jabber we need to clear the pending state in case the 728 * jabber has gone away. 729 */ 730 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) { 731 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: " 732 "ino 0x%x has been unblocked", 733 ddi_driver_name(dip), ddi_get_instance(dip), ino); 734 735 ino_p->ino_unclaimed_intrs = 0; 736 ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE); 737 } 738 739 if (ret != DDI_SUCCESS) { 740 DBG(DBG_IB, dip, "px_ib_ino_rem_intr: failed, " 741 "ino 0x%x sysino 0x%x\n", ino, sysino); 742 743 return (ret); 744 } 745 746 /* Search the link list for ih_p */ 747 for (i = 0; (i < ipil_p->ipil_ih_size) && 748 (ih_lst->ih_next != ih_p); i++, ih_lst = ih_lst->ih_next) 749 ; 750 751 if (ih_lst->ih_next != ih_p) 752 goto not_found; 753 754 /* Remove ih_p from the link list and maintain the head/tail */ 755 ih_lst->ih_next = ih_p->ih_next; 756 757 if (ipil_p->ipil_ih_head == ih_p) 758 ipil_p->ipil_ih_head = ih_p->ih_next; 759 if (ipil_p->ipil_ih_tail == ih_p) 760 ipil_p->ipil_ih_tail = ih_lst; 761 762 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head; 763 764 reset: 765 if (ih_p->ih_config_handle) 766 pci_config_teardown(&ih_p->ih_config_handle); 767 if (ih_p->ih_ksp != NULL) 768 kstat_delete(ih_p->ih_ksp); 769 770 kmem_free(ih_p, sizeof (px_ih_t)); 771 ipil_p->ipil_ih_size--; 772 773 return (ret); 774 775 not_found: 776 DBG(DBG_R_INTX, ino_p->ino_ib_p->ib_px_p->px_dip, 777 "ino_p=%x does not have ih_p=%x\n", ino_p, ih_p); 778 779 return (DDI_FAILURE); 780 } 781 782 px_ih_t * 783 px_ib_intr_locate_ih(px_ino_pil_t *ipil_p, dev_info_t *rdip, 784 uint32_t inum, msiq_rec_type_t rec_type, msgcode_t msg_code) 785 { 786 px_ih_t *ih_p = ipil_p->ipil_ih_head; 787 int i; 788 789 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 790 if ((ih_p->ih_dip == rdip) && (ih_p->ih_inum == inum) && 791 (ih_p->ih_rec_type == rec_type) && 792 (ih_p->ih_msg_code == msg_code)) 793 return (ih_p); 794 } 795 796 return ((px_ih_t *)NULL); 797 } 798 799 px_ih_t * 800 px_ib_alloc_ih(dev_info_t *rdip, uint32_t inum, 801 uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2), 802 caddr_t int_handler_arg1, caddr_t int_handler_arg2, 803 msiq_rec_type_t rec_type, msgcode_t msg_code) 804 { 805 px_ih_t *ih_p; 806 807 ih_p = kmem_alloc(sizeof (px_ih_t), KM_SLEEP); 808 ih_p->ih_dip = rdip; 809 ih_p->ih_inum = inum; 810 ih_p->ih_intr_state = PX_INTR_STATE_DISABLE; 811 ih_p->ih_intr_flags = PX_INTR_IDLE; 812 ih_p->ih_handler = int_handler; 813 ih_p->ih_handler_arg1 = int_handler_arg1; 814 ih_p->ih_handler_arg2 = int_handler_arg2; 815 ih_p->ih_config_handle = NULL; 816 ih_p->ih_rec_type = rec_type; 817 ih_p->ih_msg_code = msg_code; 818 ih_p->ih_nsec = 0; 819 ih_p->ih_ticks = 0; 820 ih_p->ih_ksp = NULL; 821 822 return (ih_p); 823 } 824 825 int 826 px_ib_update_intr_state(px_t *px_p, dev_info_t *rdip, 827 uint_t inum, devino_t ino, uint_t pil, 828 uint_t new_intr_state, msiq_rec_type_t rec_type, 829 msgcode_t msg_code) 830 { 831 px_ib_t *ib_p = px_p->px_ib_p; 832 px_ino_t *ino_p; 833 px_ino_pil_t *ipil_p; 834 px_ih_t *ih_p; 835 int ret = DDI_FAILURE; 836 837 DBG(DBG_IB, px_p->px_dip, "px_ib_update_intr_state: %s%d " 838 "inum %x devino %x pil %x state %x\n", ddi_driver_name(rdip), 839 ddi_get_instance(rdip), inum, ino, pil, new_intr_state); 840 841 mutex_enter(&ib_p->ib_ino_lst_mutex); 842 843 ino_p = px_ib_locate_ino(ib_p, ino); 844 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, pil))) { 845 if (ih_p = px_ib_intr_locate_ih(ipil_p, rdip, inum, rec_type, 846 msg_code)) { 847 ih_p->ih_intr_state = new_intr_state; 848 ret = DDI_SUCCESS; 849 } 850 } 851 852 mutex_exit(&ib_p->ib_ino_lst_mutex); 853 return (ret); 854 } 855 856 857 /* 858 * Get interrupt CPU for a given ino. 859 * Return info only for inos which are already mapped to devices. 860 */ 861 /*ARGSUSED*/ 862 int 863 px_ib_get_intr_target(px_t *px_p, devino_t ino, cpuid_t *cpu_id_p) 864 { 865 dev_info_t *dip = px_p->px_dip; 866 sysino_t sysino; 867 int ret; 868 869 DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: devino %x\n", ino); 870 871 /* Convert leaf-wide intr to system-wide intr */ 872 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) 873 return (DDI_FAILURE); 874 875 ret = px_lib_intr_gettarget(dip, sysino, cpu_id_p); 876 877 DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: cpu_id %x\n", 878 *cpu_id_p); 879 880 return (ret); 881 } 882 883 884 /* 885 * Associate a new CPU with a given ino. 886 * Operate only on INOs which are already mapped to devices. 887 */ 888 int 889 px_ib_set_intr_target(px_t *px_p, devino_t ino, cpuid_t cpu_id) 890 { 891 dev_info_t *dip = px_p->px_dip; 892 cpuid_t old_cpu_id; 893 sysino_t sysino; 894 int ret = DDI_SUCCESS; 895 extern const int _ncpu; 896 extern cpu_t *cpu[]; 897 898 DBG(DBG_IB, px_p->px_dip, "px_ib_set_intr_target: devino %x " 899 "cpu_id %x\n", ino, cpu_id); 900 901 mutex_enter(&cpu_lock); 902 903 /* Convert leaf-wide intr to system-wide intr */ 904 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) { 905 ret = DDI_FAILURE; 906 goto done; 907 } 908 909 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) { 910 ret = DDI_FAILURE; 911 goto done; 912 } 913 914 /* 915 * Get lock, validate cpu and write it. 916 */ 917 if ((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id]))) { 918 DBG(DBG_IB, dip, "px_ib_set_intr_target: Enabling CPU %d\n", 919 cpu_id); 920 px_ib_intr_dist_en(dip, cpu_id, ino, B_TRUE); 921 px_ib_log_new_cpu(px_p->px_ib_p, old_cpu_id, cpu_id, ino); 922 } else { /* Invalid cpu */ 923 DBG(DBG_IB, dip, "px_ib_set_intr_target: Invalid cpuid %x\n", 924 cpu_id); 925 ret = DDI_EINVAL; 926 } 927 928 done: 929 mutex_exit(&cpu_lock); 930 return (ret); 931 } 932 933 hrtime_t px_ib_msix_retarget_timeout = 120ll * NANOSEC; /* 120 seconds */ 934 935 /* 936 * Associate a new CPU with a given MSI/X. 937 * Operate only on MSI/Xs which are already mapped to devices. 938 */ 939 int 940 px_ib_set_msix_target(px_t *px_p, ddi_intr_handle_impl_t *hdlp, 941 msinum_t msi_num, cpuid_t cpu_id) 942 { 943 px_ib_t *ib_p = px_p->px_ib_p; 944 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 945 dev_info_t *dip = px_p->px_dip; 946 dev_info_t *rdip = hdlp->ih_dip; 947 msiqid_t msiq_id, old_msiq_id; 948 pci_msi_state_t msi_state; 949 msiq_rec_type_t msiq_rec_type; 950 msi_type_t msi_type; 951 px_ino_t *ino_p; 952 px_ih_t *ih_p, *old_ih_p; 953 cpuid_t old_cpu_id; 954 hrtime_t start_time, end_time; 955 int ret = DDI_SUCCESS; 956 extern const int _ncpu; 957 extern cpu_t *cpu[]; 958 959 DBG(DBG_IB, dip, "px_ib_set_msix_target: msi_num %x new cpu_id %x\n", 960 msi_num, cpu_id); 961 962 mutex_enter(&cpu_lock); 963 964 /* Check for MSI64 support */ 965 if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) { 966 msiq_rec_type = MSI64_REC; 967 msi_type = MSI64_TYPE; 968 } else { 969 msiq_rec_type = MSI32_REC; 970 msi_type = MSI32_TYPE; 971 } 972 973 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 974 &old_msiq_id)) != DDI_SUCCESS) { 975 976 mutex_exit(&cpu_lock); 977 return (ret); 978 } 979 980 DBG(DBG_IB, dip, "px_ib_set_msix_target: current msiq 0x%x\n", 981 old_msiq_id); 982 983 if ((ret = px_ib_get_intr_target(px_p, 984 px_msiqid_to_devino(px_p, old_msiq_id), 985 &old_cpu_id)) != DDI_SUCCESS) { 986 987 mutex_exit(&cpu_lock); 988 return (ret); 989 } 990 991 DBG(DBG_IB, dip, "px_ib_set_msix_target: current cpuid 0x%x\n", 992 old_cpu_id); 993 994 if (cpu_id == old_cpu_id) { 995 996 mutex_exit(&cpu_lock); 997 return (DDI_SUCCESS); 998 } 999 1000 /* 1001 * Get lock, validate cpu and write it. 1002 */ 1003 if (!((cpu_id < _ncpu) && (cpu[cpu_id] && 1004 cpu_is_online(cpu[cpu_id])))) { 1005 /* Invalid cpu */ 1006 DBG(DBG_IB, dip, "px_ib_set_msix_target: Invalid cpuid %x\n", 1007 cpu_id); 1008 1009 mutex_exit(&cpu_lock); 1010 return (DDI_EINVAL); 1011 } 1012 1013 DBG(DBG_IB, dip, "px_ib_set_msix_target: Enabling CPU %d\n", cpu_id); 1014 1015 if ((ret = px_add_msiq_intr(dip, rdip, hdlp, 1016 msiq_rec_type, msi_num, cpu_id, &msiq_id)) != DDI_SUCCESS) { 1017 DBG(DBG_IB, dip, "px_ib_set_msix_target: Add MSI handler " 1018 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); 1019 1020 mutex_exit(&cpu_lock); 1021 return (ret); 1022 } 1023 1024 if ((ret = px_lib_msi_setmsiq(dip, msi_num, 1025 msiq_id, msi_type)) != DDI_SUCCESS) { 1026 mutex_exit(&cpu_lock); 1027 1028 (void) px_rem_msiq_intr(dip, rdip, 1029 hdlp, msiq_rec_type, msi_num, msiq_id); 1030 1031 return (ret); 1032 } 1033 1034 if ((ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 1035 px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri, 1036 PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num)) != DDI_SUCCESS) { 1037 mutex_exit(&cpu_lock); 1038 1039 (void) px_rem_msiq_intr(dip, rdip, 1040 hdlp, msiq_rec_type, msi_num, msiq_id); 1041 1042 return (ret); 1043 } 1044 1045 mutex_exit(&cpu_lock); 1046 1047 /* 1048 * Remove the old handler, but first ensure it is finished. 1049 * 1050 * Each handler sets its PENDING flag before it clears the MSI state. 1051 * Then it clears that flag when finished. If a re-target occurs while 1052 * the MSI state is DELIVERED, then it is not yet known which of the 1053 * two handlers will take the interrupt. So the re-target operation 1054 * sets a RETARGET flag on both handlers in that case. Monitoring both 1055 * flags on both handlers then determines when the old handler can be 1056 * be safely removed. 1057 */ 1058 mutex_enter(&ib_p->ib_ino_lst_mutex); 1059 1060 ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, old_msiq_id)); 1061 old_ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, 1062 hdlp->ih_pri), rdip, hdlp->ih_inum, msiq_rec_type, msi_num); 1063 1064 ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, msiq_id)); 1065 ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri), 1066 rdip, hdlp->ih_inum, msiq_rec_type, msi_num); 1067 1068 if ((ret = px_lib_msi_getstate(dip, msi_num, 1069 &msi_state)) != DDI_SUCCESS) { 1070 (void) px_rem_msiq_intr(dip, rdip, 1071 hdlp, msiq_rec_type, msi_num, msiq_id); 1072 1073 mutex_exit(&ib_p->ib_ino_lst_mutex); 1074 return (ret); 1075 } 1076 1077 if (msi_state == PCI_MSI_STATE_DELIVERED) { 1078 ih_p->ih_intr_flags |= PX_INTR_RETARGET; 1079 old_ih_p->ih_intr_flags |= PX_INTR_RETARGET; 1080 } 1081 1082 start_time = gethrtime(); 1083 while (((ih_p->ih_intr_flags & PX_INTR_RETARGET) && 1084 (old_ih_p->ih_intr_flags & PX_INTR_RETARGET)) || 1085 (old_ih_p->ih_intr_flags & PX_INTR_PENDING)) { 1086 1087 /* Wait for one second */ 1088 delay(drv_usectohz(1000000)); 1089 1090 end_time = gethrtime() - start_time; 1091 if (end_time > px_ib_msix_retarget_timeout) { 1092 cmn_err(CE_WARN, "MSIX retarget %x is not completed, " 1093 "even after waiting %llx ticks\n", 1094 msi_num, end_time); 1095 break; 1096 } 1097 } 1098 1099 ih_p->ih_intr_flags &= ~(PX_INTR_RETARGET); 1100 1101 mutex_exit(&ib_p->ib_ino_lst_mutex); 1102 1103 ret = px_rem_msiq_intr(dip, rdip, 1104 hdlp, msiq_rec_type, msi_num, old_msiq_id); 1105 1106 return (ret); 1107 } 1108 1109 1110 static void 1111 px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name, 1112 char *path_name, int instance) 1113 { 1114 (void) strncpy(dev->driver_name, driver_name, MAXMODCONFNAME-1); 1115 dev->driver_name[MAXMODCONFNAME] = '\0'; 1116 (void) strncpy(dev->path, path_name, MAXPATHLEN-1); 1117 dev->dev_inst = instance; 1118 } 1119 1120 1121 /* 1122 * Return the dips or number of dips associated with a given interrupt block. 1123 * Size of dips array arg is passed in as dips_ret arg. 1124 * Number of dips returned is returned in dips_ret arg. 1125 * Array of dips gets returned in the dips argument. 1126 * Function returns number of dips existing for the given interrupt block. 1127 * 1128 * Note: this function assumes an enabled/valid INO, which is why it returns 1129 * the px node and (Internal) when it finds no other devices (and *devs_ret > 0) 1130 */ 1131 uint8_t 1132 pxtool_ib_get_ino_devs(px_t *px_p, uint32_t ino, uint32_t msi_num, 1133 uint8_t *devs_ret, pcitool_intr_dev_t *devs) 1134 { 1135 px_ib_t *ib_p = px_p->px_ib_p; 1136 px_ino_t *ino_p; 1137 px_ino_pil_t *ipil_p; 1138 px_ih_t *ih_p; 1139 uint32_t num_devs = 0; 1140 char pathname[MAXPATHLEN]; 1141 int i, j; 1142 1143 mutex_enter(&ib_p->ib_ino_lst_mutex); 1144 ino_p = px_ib_locate_ino(ib_p, ino); 1145 if (ino_p != NULL) { 1146 for (j = 0, ipil_p = ino_p->ino_ipil_p; ipil_p; 1147 ipil_p = ipil_p->ipil_next_p) { 1148 num_devs += ipil_p->ipil_ih_size; 1149 1150 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1151 ((i < ipil_p->ipil_ih_size) && (i < *devs_ret)); 1152 i++, j++, ih_p = ih_p->ih_next) { 1153 (void) ddi_pathname(ih_p->ih_dip, pathname); 1154 1155 if (ih_p->ih_msg_code == msi_num) { 1156 num_devs = *devs_ret = 1; 1157 px_fill_in_intr_devs(&devs[0], 1158 (char *)ddi_driver_name( 1159 ih_p->ih_dip), pathname, 1160 ddi_get_instance(ih_p->ih_dip)); 1161 goto done; 1162 } 1163 1164 px_fill_in_intr_devs(&devs[j], 1165 (char *)ddi_driver_name(ih_p->ih_dip), 1166 pathname, ddi_get_instance(ih_p->ih_dip)); 1167 } 1168 } 1169 1170 *devs_ret = j; 1171 } else if (*devs_ret > 0) { 1172 (void) ddi_pathname(px_p->px_dip, pathname); 1173 strcat(pathname, " (Internal)"); 1174 px_fill_in_intr_devs(&devs[0], 1175 (char *)ddi_driver_name(px_p->px_dip), pathname, 1176 ddi_get_instance(px_p->px_dip)); 1177 num_devs = *devs_ret = 1; 1178 } 1179 1180 done: 1181 mutex_exit(&ib_p->ib_ino_lst_mutex); 1182 1183 return (num_devs); 1184 } 1185 1186 1187 int 1188 pxtool_ib_get_msi_info(px_t *px_p, devino_t ino, msinum_t msi_num, 1189 ddi_intr_handle_impl_t *hdlp) 1190 { 1191 px_ib_t *ib_p = px_p->px_ib_p; 1192 px_ino_t *ino_p; 1193 px_ino_pil_t *ipil_p; 1194 px_ih_t *ih_p; 1195 int i; 1196 1197 mutex_enter(&ib_p->ib_ino_lst_mutex); 1198 1199 if ((ino_p = px_ib_locate_ino(ib_p, ino)) == NULL) { 1200 mutex_exit(&ib_p->ib_ino_lst_mutex); 1201 return (DDI_FAILURE); 1202 } 1203 1204 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 1205 ipil_p = ipil_p->ipil_next_p) { 1206 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1207 ((i < ipil_p->ipil_ih_size) && ih_p); 1208 i++, ih_p = ih_p->ih_next) { 1209 1210 if (ih_p->ih_msg_code != msi_num) 1211 continue; 1212 1213 hdlp->ih_dip = ih_p->ih_dip; 1214 hdlp->ih_inum = ih_p->ih_inum; 1215 hdlp->ih_cb_func = ih_p->ih_handler; 1216 hdlp->ih_cb_arg1 = ih_p->ih_handler_arg1; 1217 hdlp->ih_cb_arg2 = ih_p->ih_handler_arg2; 1218 if (ih_p->ih_rec_type == MSI64_REC) 1219 hdlp->ih_cap = DDI_INTR_FLAG_MSI64; 1220 hdlp->ih_pri = ipil_p->ipil_pil; 1221 hdlp->ih_ver = DDI_INTR_VERSION; 1222 1223 mutex_exit(&ib_p->ib_ino_lst_mutex); 1224 return (DDI_SUCCESS); 1225 } 1226 } 1227 1228 mutex_exit(&ib_p->ib_ino_lst_mutex); 1229 return (DDI_FAILURE); 1230 } 1231 1232 void 1233 px_ib_log_new_cpu(px_ib_t *ib_p, cpuid_t old_cpu_id, cpuid_t new_cpu_id, 1234 uint32_t ino) 1235 { 1236 px_ino_t *ino_p; 1237 px_ino_pil_t *ipil_p; 1238 px_ih_t *ih_p; 1239 int i; 1240 1241 mutex_enter(&ib_p->ib_ino_lst_mutex); 1242 1243 /* Log in OS data structures the new CPU. */ 1244 if (ino_p = px_ib_locate_ino(ib_p, ino)) { 1245 1246 /* Log in OS data structures the new CPU. */ 1247 ino_p->ino_cpuid = new_cpu_id; 1248 1249 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 1250 ipil_p = ipil_p->ipil_next_p) { 1251 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1252 (i < ipil_p->ipil_ih_size); 1253 i++, ih_p = ih_p->ih_next) { 1254 /* 1255 * Account for any residual time 1256 * to be logged for old cpu. 1257 */ 1258 px_ib_cpu_ticks_to_ih_nsec(ib_p, 1259 ih_p, old_cpu_id); 1260 } 1261 } 1262 } 1263 1264 mutex_exit(&ib_p->ib_ino_lst_mutex); 1265 } 1266