1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PX Interrupt Block implementation 28 */ 29 30 #include <sys/types.h> 31 #include <sys/kmem.h> 32 #include <sys/async.h> 33 #include <sys/systm.h> /* panicstr */ 34 #include <sys/spl.h> 35 #include <sys/sunddi.h> 36 #include <sys/machsystm.h> /* intr_dist_add */ 37 #include <sys/ddi_impldefs.h> 38 #include <sys/cpuvar.h> 39 #include <sys/time.h> 40 #include "px_obj.h" 41 42 /*LINTLIBRARY*/ 43 44 static void px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight); 45 static void px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, 46 uint32_t cpu_id); 47 static uint_t px_ib_intr_reset(void *arg); 48 static void px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name, 49 char *path_name, int instance); 50 51 extern uint64_t xc_tick_jump_limit; 52 53 int 54 px_ib_attach(px_t *px_p) 55 { 56 dev_info_t *dip = px_p->px_dip; 57 px_ib_t *ib_p; 58 sysino_t sysino; 59 px_fault_t *fault_p = &px_p->px_fault; 60 61 DBG(DBG_IB, dip, "px_ib_attach\n"); 62 63 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 64 px_p->px_inos[PX_INTR_PEC], &sysino) != DDI_SUCCESS) 65 return (DDI_FAILURE); 66 67 /* 68 * Allocate interrupt block state structure and link it to 69 * the px state structure. 70 */ 71 ib_p = kmem_zalloc(sizeof (px_ib_t), KM_SLEEP); 72 px_p->px_ib_p = ib_p; 73 ib_p->ib_px_p = px_p; 74 ib_p->ib_ino_lst = (px_ino_t *)NULL; 75 76 mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL); 77 mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL); 78 79 bus_func_register(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p); 80 81 intr_dist_add_weighted(px_ib_intr_redist, ib_p); 82 83 /* 84 * Initialize PEC fault data structure 85 */ 86 fault_p->px_fh_dip = dip; 87 fault_p->px_fh_sysino = sysino; 88 fault_p->px_err_func = px_err_dmc_pec_intr; 89 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_PEC]; 90 91 return (DDI_SUCCESS); 92 } 93 94 void 95 px_ib_detach(px_t *px_p) 96 { 97 px_ib_t *ib_p = px_p->px_ib_p; 98 dev_info_t *dip = px_p->px_dip; 99 100 DBG(DBG_IB, dip, "px_ib_detach\n"); 101 102 bus_func_unregister(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p); 103 intr_dist_rem_weighted(px_ib_intr_redist, ib_p); 104 105 mutex_destroy(&ib_p->ib_ino_lst_mutex); 106 mutex_destroy(&ib_p->ib_intr_lock); 107 108 px_ib_free_ino_all(ib_p); 109 110 px_p->px_ib_p = NULL; 111 kmem_free(ib_p, sizeof (px_ib_t)); 112 } 113 114 void 115 px_ib_intr_enable(px_t *px_p, cpuid_t cpu_id, devino_t ino) 116 { 117 px_ib_t *ib_p = px_p->px_ib_p; 118 sysino_t sysino; 119 120 /* 121 * Determine the cpu for the interrupt 122 */ 123 mutex_enter(&ib_p->ib_intr_lock); 124 125 DBG(DBG_IB, px_p->px_dip, 126 "px_ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id); 127 128 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino, 129 &sysino) != DDI_SUCCESS) { 130 DBG(DBG_IB, px_p->px_dip, 131 "px_ib_intr_enable: px_intr_devino_to_sysino() failed\n"); 132 133 mutex_exit(&ib_p->ib_intr_lock); 134 return; 135 } 136 137 PX_INTR_ENABLE(px_p->px_dip, sysino, cpu_id); 138 px_lib_intr_setstate(px_p->px_dip, sysino, INTR_IDLE_STATE); 139 140 mutex_exit(&ib_p->ib_intr_lock); 141 } 142 143 /*ARGSUSED*/ 144 void 145 px_ib_intr_disable(px_ib_t *ib_p, devino_t ino, int wait) 146 { 147 sysino_t sysino; 148 149 mutex_enter(&ib_p->ib_intr_lock); 150 151 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_disable: ino=%x\n", ino); 152 153 /* Disable the interrupt */ 154 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino, 155 &sysino) != DDI_SUCCESS) { 156 DBG(DBG_IB, ib_p->ib_px_p->px_dip, 157 "px_ib_intr_disable: px_intr_devino_to_sysino() failed\n"); 158 159 mutex_exit(&ib_p->ib_intr_lock); 160 return; 161 } 162 163 PX_INTR_DISABLE(ib_p->ib_px_p->px_dip, sysino); 164 165 mutex_exit(&ib_p->ib_intr_lock); 166 } 167 168 169 void 170 px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino, 171 boolean_t wait_flag) 172 { 173 uint32_t old_cpu_id; 174 sysino_t sysino; 175 intr_valid_state_t enabled = 0; 176 hrtime_t start_time, prev, curr, interval, jump; 177 hrtime_t intr_timeout; 178 intr_state_t intr_state; 179 int e = DDI_SUCCESS; 180 181 DBG(DBG_IB, dip, "px_ib_intr_dist_en: ino=0x%x\n", ino); 182 183 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) { 184 DBG(DBG_IB, dip, "px_ib_intr_dist_en: " 185 "px_intr_devino_to_sysino() failed, ino 0x%x\n", ino); 186 return; 187 } 188 189 /* Skip enabling disabled interrupts */ 190 if (px_lib_intr_getvalid(dip, sysino, &enabled) != DDI_SUCCESS) { 191 DBG(DBG_IB, dip, "px_ib_intr_dist_en: px_intr_getvalid() " 192 "failed, sysino 0x%x\n", sysino); 193 return; 194 } 195 if (!enabled) 196 return; 197 198 /* Done if redistributed onto the same cpuid */ 199 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) { 200 DBG(DBG_IB, dip, "px_ib_intr_dist_en: " 201 "px_intr_gettarget() failed\n"); 202 return; 203 } 204 if (cpu_id == old_cpu_id) 205 return; 206 207 if (!wait_flag) 208 goto done; 209 210 /* Busy wait on pending interrupts */ 211 PX_INTR_DISABLE(dip, sysino); 212 213 intr_timeout = px_intrpend_timeout; 214 jump = TICK_TO_NSEC(xc_tick_jump_limit); 215 216 for (curr = start_time = gethrtime(); !panicstr && 217 ((e = px_lib_intr_getstate(dip, sysino, &intr_state)) == 218 DDI_SUCCESS) && 219 (intr_state == INTR_DELIVERED_STATE); /* */) { 220 /* 221 * If we have a really large jump in hrtime, it is most 222 * probably because we entered the debugger (or OBP, 223 * in general). So, we adjust the timeout accordingly 224 * to prevent declaring an interrupt timeout. The 225 * master-interrupt mechanism in OBP should deliver 226 * the interrupts properly. 227 */ 228 prev = curr; 229 curr = gethrtime(); 230 interval = curr - prev; 231 if (interval > jump) 232 intr_timeout += interval; 233 if (curr - start_time > intr_timeout) { 234 cmn_err(CE_WARN, 235 "%s%d: px_ib_intr_dist_en: sysino 0x%lx(ino 0x%x) " 236 "from cpu id 0x%x to 0x%x timeout", 237 ddi_driver_name(dip), ddi_get_instance(dip), 238 sysino, ino, old_cpu_id, cpu_id); 239 240 e = DDI_FAILURE; 241 break; 242 } 243 } 244 245 if (e != DDI_SUCCESS) 246 DBG(DBG_IB, dip, "px_ib_intr_dist_en: failed, " 247 "ino 0x%x sysino 0x%x\n", ino, sysino); 248 249 done: 250 PX_INTR_ENABLE(dip, sysino, cpu_id); 251 } 252 253 static void 254 px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, uint32_t cpu_id) 255 { 256 extern kmutex_t pxintr_ks_template_lock; 257 hrtime_t ticks; 258 259 /* 260 * Because we are updating two fields in ih_t we must lock 261 * pxintr_ks_template_lock to prevent someone from reading the 262 * kstats after we set ih_ticks to 0 and before we increment 263 * ih_nsec to compensate. 264 * 265 * We must also protect against the interrupt arriving and incrementing 266 * ih_ticks between the time we read it and when we reset it to 0. 267 * To do this we use atomic_swap. 268 */ 269 270 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 271 272 mutex_enter(&pxintr_ks_template_lock); 273 ticks = atomic_swap_64(&ih_p->ih_ticks, 0); 274 ih_p->ih_nsec += (uint64_t)tick2ns(ticks, cpu_id); 275 mutex_exit(&pxintr_ks_template_lock); 276 } 277 278 279 /* 280 * Redistribute interrupts of the specified weight. The first call has a weight 281 * of weight_max, which can be used to trigger initialization for 282 * redistribution. The inos with weight [weight_max, inf.) should be processed 283 * on the "weight == weight_max" call. This first call is followed by calls 284 * of decreasing weights, inos of that weight should be processed. The final 285 * call specifies a weight of zero, this can be used to trigger processing of 286 * stragglers. 287 */ 288 static void 289 px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight) 290 { 291 px_ib_t *ib_p = (px_ib_t *)arg; 292 px_t *px_p = ib_p->ib_px_p; 293 dev_info_t *dip = px_p->px_dip; 294 px_ino_t *ino_p; 295 px_ino_pil_t *ipil_p; 296 px_ih_t *ih_lst; 297 int32_t dweight = 0; 298 int i; 299 300 /* Redistribute internal interrupts */ 301 if (weight == 0) { 302 mutex_enter(&ib_p->ib_intr_lock); 303 px_ib_intr_dist_en(dip, intr_dist_cpuid(), 304 px_p->px_inos[PX_INTR_PEC], B_FALSE); 305 mutex_exit(&ib_p->ib_intr_lock); 306 307 px_hp_intr_redist(px_p); 308 } 309 310 /* Redistribute device interrupts */ 311 mutex_enter(&ib_p->ib_ino_lst_mutex); 312 px_msiq_redist(px_p); 313 314 for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next_p) { 315 /* 316 * Recomputes the sum of interrupt weights of devices that 317 * share the same ino upon first call marked by 318 * (weight == weight_max). 319 */ 320 if (weight == weight_max) { 321 ino_p->ino_intr_weight = 0; 322 323 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 324 ipil_p = ipil_p->ipil_next_p) { 325 for (i = 0, ih_lst = ipil_p->ipil_ih_head; 326 i < ipil_p->ipil_ih_size; i++, 327 ih_lst = ih_lst->ih_next) { 328 dweight = i_ddi_get_intr_weight( 329 ih_lst->ih_dip); 330 if (dweight > 0) 331 ino_p->ino_intr_weight += 332 dweight; 333 } 334 } 335 } 336 337 /* 338 * As part of redistributing weighted interrupts over cpus, 339 * nexus redistributes device interrupts and updates 340 * cpu weight. The purpose is for the most light weighted 341 * cpu to take the next interrupt and gain weight, therefore 342 * attention demanding device gains more cpu attention by 343 * making itself heavy. 344 */ 345 if ((weight == ino_p->ino_intr_weight) || 346 ((weight >= weight_max) && 347 (ino_p->ino_intr_weight >= weight_max))) { 348 uint32_t orig_cpuid = ino_p->ino_cpuid; 349 350 if (cpu[orig_cpuid] == NULL) 351 orig_cpuid = CPU->cpu_id; 352 353 DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx " 354 "current cpuid 0x%x current default cpuid 0x%x\n", 355 ino_p->ino_sysino, ino_p->ino_cpuid, 356 ino_p->ino_default_cpuid); 357 358 /* select target cpuid and mark ino established */ 359 if (ino_p->ino_default_cpuid == -1) 360 ino_p->ino_cpuid = ino_p->ino_default_cpuid = 361 intr_dist_cpuid(); 362 else if ((ino_p->ino_cpuid != 363 ino_p->ino_default_cpuid) && 364 cpu[ino_p->ino_default_cpuid] && 365 cpu_intr_on(cpu[ino_p->ino_default_cpuid])) 366 ino_p->ino_cpuid = ino_p->ino_default_cpuid; 367 else if (!cpu_intr_on(cpu[ino_p->ino_cpuid])) 368 ino_p->ino_cpuid = intr_dist_cpuid(); 369 370 DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx " 371 "new cpuid 0x%x new default cpuid 0x%x\n", 372 ino_p->ino_sysino, ino_p->ino_cpuid, 373 ino_p->ino_default_cpuid); 374 375 /* Add device weight to targeted cpu. */ 376 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 377 ipil_p = ipil_p->ipil_next_p) { 378 for (i = 0, ih_lst = ipil_p->ipil_ih_head; 379 i < ipil_p->ipil_ih_size; i++, 380 ih_lst = ih_lst->ih_next) { 381 382 dweight = i_ddi_get_intr_weight( 383 ih_lst->ih_dip); 384 intr_dist_cpuid_add_device_weight( 385 ino_p->ino_cpuid, ih_lst->ih_dip, 386 dweight); 387 388 /* 389 * Different cpus may have different 390 * clock speeds. to account for this, 391 * whenever an interrupt is moved to a 392 * new CPU, we convert the accumulated 393 * ticks into nsec, based upon the clock 394 * rate of the prior CPU. 395 * 396 * It is possible that the prior CPU no 397 * longer exists. In this case, fall 398 * back to using this CPU's clock rate. 399 * 400 * Note that the value in ih_ticks has 401 * already been corrected for any power 402 * savings mode which might have been 403 * in effect. 404 */ 405 px_ib_cpu_ticks_to_ih_nsec(ib_p, ih_lst, 406 orig_cpuid); 407 } 408 } 409 410 /* enable interrupt on new targeted cpu */ 411 px_ib_intr_dist_en(dip, ino_p->ino_cpuid, 412 ino_p->ino_ino, B_TRUE); 413 } 414 } 415 mutex_exit(&ib_p->ib_ino_lst_mutex); 416 } 417 418 /* 419 * Reset interrupts to IDLE. This function is called during 420 * panic handling after redistributing interrupts; it's needed to 421 * support dumping to network devices after 'sync' from OBP. 422 * 423 * N.B. This routine runs in a context where all other threads 424 * are permanently suspended. 425 */ 426 static uint_t 427 px_ib_intr_reset(void *arg) 428 { 429 px_ib_t *ib_p = (px_ib_t *)arg; 430 431 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_reset\n"); 432 433 if (px_lib_intr_reset(ib_p->ib_px_p->px_dip) != DDI_SUCCESS) 434 return (BF_FATAL); 435 436 return (BF_NONE); 437 } 438 439 /* 440 * Locate px_ino_t structure on ib_p->ib_ino_lst according to ino# 441 * returns NULL if not found. 442 */ 443 px_ino_t * 444 px_ib_locate_ino(px_ib_t *ib_p, devino_t ino_num) 445 { 446 px_ino_t *ino_p = ib_p->ib_ino_lst; 447 448 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 449 450 for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next_p) 451 ; 452 453 return (ino_p); 454 } 455 456 px_ino_t * 457 px_ib_alloc_ino(px_ib_t *ib_p, devino_t ino_num) 458 { 459 sysino_t sysino; 460 px_ino_t *ino_p; 461 462 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, 463 ino_num, &sysino) != DDI_SUCCESS) 464 return (NULL); 465 466 ino_p = kmem_zalloc(sizeof (px_ino_t), KM_SLEEP); 467 468 ino_p->ino_next_p = ib_p->ib_ino_lst; 469 ib_p->ib_ino_lst = ino_p; 470 471 ino_p->ino_ino = ino_num; 472 ino_p->ino_sysino = sysino; 473 ino_p->ino_ib_p = ib_p; 474 ino_p->ino_unclaimed_intrs = 0; 475 ino_p->ino_lopil = 0; 476 ino_p->ino_cpuid = ino_p->ino_default_cpuid = (cpuid_t)-1; 477 478 return (ino_p); 479 } 480 481 px_ino_pil_t * 482 px_ib_new_ino_pil(px_ib_t *ib_p, devino_t ino_num, uint_t pil, px_ih_t *ih_p) 483 { 484 px_ino_pil_t *ipil_p = kmem_zalloc(sizeof (px_ino_pil_t), KM_SLEEP); 485 px_ino_t *ino_p; 486 487 if ((ino_p = px_ib_locate_ino(ib_p, ino_num)) == NULL) 488 ino_p = px_ib_alloc_ino(ib_p, ino_num); 489 490 ASSERT(ino_p != NULL); 491 492 ih_p->ih_next = ih_p; 493 ipil_p->ipil_pil = pil; 494 ipil_p->ipil_ih_head = ih_p; 495 ipil_p->ipil_ih_tail = ih_p; 496 ipil_p->ipil_ih_start = ih_p; 497 ipil_p->ipil_ih_size = 1; 498 ipil_p->ipil_ino_p = ino_p; 499 500 ipil_p->ipil_next_p = ino_p->ino_ipil_p; 501 ino_p->ino_ipil_p = ipil_p; 502 ino_p->ino_ipil_size++; 503 504 if ((ino_p->ino_lopil == 0) || (ino_p->ino_lopil > pil)) 505 ino_p->ino_lopil = pil; 506 507 return (ipil_p); 508 } 509 510 void 511 px_ib_delete_ino_pil(px_ib_t *ib_p, px_ino_pil_t *ipil_p) 512 { 513 px_ino_t *ino_p = ipil_p->ipil_ino_p; 514 ushort_t pil = ipil_p->ipil_pil; 515 px_ino_pil_t *prev, *next; 516 517 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 518 519 if (ino_p->ino_ipil_p == ipil_p) 520 ino_p->ino_ipil_p = ipil_p->ipil_next_p; 521 else { 522 for (prev = next = ino_p->ino_ipil_p; next != ipil_p; 523 prev = next, next = next->ipil_next_p) 524 ; 525 526 if (prev) 527 prev->ipil_next_p = ipil_p->ipil_next_p; 528 } 529 530 kmem_free(ipil_p, sizeof (px_ino_pil_t)); 531 532 if ((--ino_p->ino_ipil_size) && (ino_p->ino_lopil == pil)) { 533 for (next = ino_p->ino_ipil_p, pil = next->ipil_pil; 534 next; next = next->ipil_next_p) { 535 536 if (pil > next->ipil_pil) 537 pil = next->ipil_pil; 538 } 539 540 /* 541 * Value stored in pil should be the lowest pil. 542 */ 543 ino_p->ino_lopil = pil; 544 } 545 546 if (ino_p->ino_ipil_size) 547 return; 548 549 ino_p->ino_lopil = 0; 550 551 if (ino_p->ino_msiq_p) 552 return; 553 554 if (ib_p->ib_ino_lst == ino_p) 555 ib_p->ib_ino_lst = ino_p->ino_next_p; 556 else { 557 px_ino_t *list = ib_p->ib_ino_lst; 558 559 for (; list->ino_next_p != ino_p; list = list->ino_next_p) 560 ; 561 list->ino_next_p = ino_p->ino_next_p; 562 } 563 } 564 565 /* 566 * Free all ino when we are detaching. 567 */ 568 void 569 px_ib_free_ino_all(px_ib_t *ib_p) 570 { 571 px_ino_t *ino_p = ib_p->ib_ino_lst; 572 px_ino_t *next = NULL; 573 574 while (ino_p) { 575 next = ino_p->ino_next_p; 576 kmem_free(ino_p, sizeof (px_ino_t)); 577 ino_p = next; 578 } 579 } 580 581 /* 582 * Locate px_ino_pil_t structure on ino_p->ino_ipil_p according to ino# 583 * returns NULL if not found. 584 */ 585 px_ino_pil_t * 586 px_ib_ino_locate_ipil(px_ino_t *ino_p, uint_t pil) 587 { 588 px_ino_pil_t *ipil_p = ino_p->ino_ipil_p; 589 590 for (; ipil_p && ipil_p->ipil_pil != pil; ipil_p = ipil_p->ipil_next_p) 591 ; 592 593 return (ipil_p); 594 } 595 596 int 597 px_ib_ino_add_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p) 598 { 599 px_ino_t *ino_p = ipil_p->ipil_ino_p; 600 px_ib_t *ib_p = ino_p->ino_ib_p; 601 devino_t ino = ino_p->ino_ino; 602 sysino_t sysino = ino_p->ino_sysino; 603 dev_info_t *dip = px_p->px_dip; 604 cpuid_t curr_cpu; 605 hrtime_t start_time; 606 intr_state_t intr_state; 607 int ret = DDI_SUCCESS; 608 609 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 610 ASSERT(ib_p == px_p->px_ib_p); 611 612 DBG(DBG_IB, dip, "px_ib_ino_add_intr ino=%x\n", ino_p->ino_ino); 613 614 /* Disable the interrupt */ 615 if ((ret = px_lib_intr_gettarget(dip, sysino, 616 &curr_cpu)) != DDI_SUCCESS) { 617 DBG(DBG_IB, dip, 618 "px_ib_ino_add_intr px_intr_gettarget() failed\n"); 619 620 return (ret); 621 } 622 623 PX_INTR_DISABLE(dip, sysino); 624 625 /* Busy wait on pending interrupt */ 626 for (start_time = gethrtime(); !panicstr && 627 ((ret = px_lib_intr_getstate(dip, sysino, &intr_state)) 628 == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) { 629 if (gethrtime() - start_time > px_intrpend_timeout) { 630 cmn_err(CE_WARN, "%s%d: px_ib_ino_add_intr: pending " 631 "sysino 0x%lx(ino 0x%x) timeout", 632 ddi_driver_name(dip), ddi_get_instance(dip), 633 sysino, ino); 634 635 ret = DDI_FAILURE; 636 break; 637 } 638 } 639 640 /* 641 * If the interrupt was previously blocked (left in pending state) 642 * because of jabber we need to clear the pending state in case the 643 * jabber has gone away. 644 */ 645 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) { 646 cmn_err(CE_WARN, 647 "%s%d: px_ib_ino_add_intr: ino 0x%x has been unblocked", 648 ddi_driver_name(dip), ddi_get_instance(dip), ino); 649 650 ino_p->ino_unclaimed_intrs = 0; 651 ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE); 652 } 653 654 if (ret != DDI_SUCCESS) { 655 DBG(DBG_IB, dip, "px_ib_ino_add_intr: failed, " 656 "ino 0x%x sysino 0x%x\n", ino, sysino); 657 658 return (ret); 659 } 660 661 /* Link up px_ih_t */ 662 ih_p->ih_next = ipil_p->ipil_ih_head; 663 ipil_p->ipil_ih_tail->ih_next = ih_p; 664 ipil_p->ipil_ih_tail = ih_p; 665 666 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head; 667 ipil_p->ipil_ih_size++; 668 669 /* Re-enable interrupt */ 670 PX_INTR_ENABLE(dip, sysino, curr_cpu); 671 672 return (ret); 673 } 674 675 /* 676 * Removes px_ih_t from the ino's link list. 677 * uses hardware mutex to lock out interrupt threads. 678 * Side effects: interrupt belongs to that ino is turned off on return. 679 * if we are sharing PX slot with other inos, the caller needs 680 * to turn it back on. 681 */ 682 int 683 px_ib_ino_rem_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p) 684 { 685 px_ino_t *ino_p = ipil_p->ipil_ino_p; 686 devino_t ino = ino_p->ino_ino; 687 sysino_t sysino = ino_p->ino_sysino; 688 dev_info_t *dip = px_p->px_dip; 689 px_ih_t *ih_lst = ipil_p->ipil_ih_head; 690 hrtime_t start_time; 691 intr_state_t intr_state; 692 int i, ret = DDI_SUCCESS; 693 694 ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex)); 695 696 DBG(DBG_IB, px_p->px_dip, "px_ib_ino_rem_intr ino=%x\n", 697 ino_p->ino_ino); 698 699 /* Disable the interrupt */ 700 PX_INTR_DISABLE(px_p->px_dip, sysino); 701 702 if (ipil_p->ipil_ih_size == 1) { 703 if (ih_lst != ih_p) 704 goto not_found; 705 706 /* No need to set head/tail as ino_p will be freed */ 707 goto reset; 708 } 709 710 /* Busy wait on pending interrupt */ 711 for (start_time = gethrtime(); !panicstr && 712 ((ret = px_lib_intr_getstate(dip, sysino, &intr_state)) 713 == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) { 714 if (gethrtime() - start_time > px_intrpend_timeout) { 715 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: pending " 716 "sysino 0x%lx(ino 0x%x) timeout", 717 ddi_driver_name(dip), ddi_get_instance(dip), 718 sysino, ino); 719 720 ret = DDI_FAILURE; 721 break; 722 } 723 } 724 725 /* 726 * If the interrupt was previously blocked (left in pending state) 727 * because of jabber we need to clear the pending state in case the 728 * jabber has gone away. 729 */ 730 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) { 731 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: " 732 "ino 0x%x has been unblocked", 733 ddi_driver_name(dip), ddi_get_instance(dip), ino); 734 735 ino_p->ino_unclaimed_intrs = 0; 736 ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE); 737 } 738 739 if (ret != DDI_SUCCESS) { 740 DBG(DBG_IB, dip, "px_ib_ino_rem_intr: failed, " 741 "ino 0x%x sysino 0x%x\n", ino, sysino); 742 743 return (ret); 744 } 745 746 /* Search the link list for ih_p */ 747 for (i = 0; (i < ipil_p->ipil_ih_size) && 748 (ih_lst->ih_next != ih_p); i++, ih_lst = ih_lst->ih_next) 749 ; 750 751 if (ih_lst->ih_next != ih_p) 752 goto not_found; 753 754 /* Remove ih_p from the link list and maintain the head/tail */ 755 ih_lst->ih_next = ih_p->ih_next; 756 757 if (ipil_p->ipil_ih_head == ih_p) 758 ipil_p->ipil_ih_head = ih_p->ih_next; 759 if (ipil_p->ipil_ih_tail == ih_p) 760 ipil_p->ipil_ih_tail = ih_lst; 761 762 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head; 763 764 reset: 765 if (ih_p->ih_config_handle) 766 pci_config_teardown(&ih_p->ih_config_handle); 767 if (ih_p->ih_ksp != NULL) 768 kstat_delete(ih_p->ih_ksp); 769 770 kmem_free(ih_p, sizeof (px_ih_t)); 771 ipil_p->ipil_ih_size--; 772 773 return (ret); 774 775 not_found: 776 DBG(DBG_R_INTX, ino_p->ino_ib_p->ib_px_p->px_dip, 777 "ino_p=%x does not have ih_p=%x\n", ino_p, ih_p); 778 779 return (DDI_FAILURE); 780 } 781 782 px_ih_t * 783 px_ib_intr_locate_ih(px_ino_pil_t *ipil_p, dev_info_t *rdip, 784 uint32_t inum, msiq_rec_type_t rec_type, msgcode_t msg_code) 785 { 786 px_ih_t *ih_p = ipil_p->ipil_ih_head; 787 int i; 788 789 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 790 if ((ih_p->ih_dip == rdip) && (ih_p->ih_inum == inum) && 791 (ih_p->ih_rec_type == rec_type) && 792 (ih_p->ih_msg_code == msg_code)) 793 return (ih_p); 794 } 795 796 return ((px_ih_t *)NULL); 797 } 798 799 px_ih_t * 800 px_ib_alloc_ih(dev_info_t *rdip, uint32_t inum, 801 uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2), 802 caddr_t int_handler_arg1, caddr_t int_handler_arg2, 803 msiq_rec_type_t rec_type, msgcode_t msg_code) 804 { 805 px_ih_t *ih_p; 806 807 ih_p = kmem_alloc(sizeof (px_ih_t), KM_SLEEP); 808 ih_p->ih_dip = rdip; 809 ih_p->ih_inum = inum; 810 ih_p->ih_intr_state = PX_INTR_STATE_DISABLE; 811 ih_p->ih_handler = int_handler; 812 ih_p->ih_handler_arg1 = int_handler_arg1; 813 ih_p->ih_handler_arg2 = int_handler_arg2; 814 ih_p->ih_config_handle = NULL; 815 ih_p->ih_rec_type = rec_type; 816 ih_p->ih_msg_code = msg_code; 817 ih_p->ih_nsec = 0; 818 ih_p->ih_ticks = 0; 819 ih_p->ih_ksp = NULL; 820 821 return (ih_p); 822 } 823 824 int 825 px_ib_update_intr_state(px_t *px_p, dev_info_t *rdip, 826 uint_t inum, devino_t ino, uint_t pil, 827 uint_t new_intr_state, msiq_rec_type_t rec_type, 828 msgcode_t msg_code) 829 { 830 px_ib_t *ib_p = px_p->px_ib_p; 831 px_ino_t *ino_p; 832 px_ino_pil_t *ipil_p; 833 px_ih_t *ih_p; 834 int ret = DDI_FAILURE; 835 836 DBG(DBG_IB, px_p->px_dip, "px_ib_update_intr_state: %s%d " 837 "inum %x devino %x pil %x state %x\n", ddi_driver_name(rdip), 838 ddi_get_instance(rdip), inum, ino, pil, new_intr_state); 839 840 mutex_enter(&ib_p->ib_ino_lst_mutex); 841 842 ino_p = px_ib_locate_ino(ib_p, ino); 843 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, pil))) { 844 if (ih_p = px_ib_intr_locate_ih(ipil_p, rdip, inum, rec_type, 845 msg_code)) { 846 ih_p->ih_intr_state = new_intr_state; 847 ret = DDI_SUCCESS; 848 } 849 } 850 851 mutex_exit(&ib_p->ib_ino_lst_mutex); 852 return (ret); 853 } 854 855 856 /* 857 * Get interrupt CPU for a given ino. 858 * Return info only for inos which are already mapped to devices. 859 */ 860 /*ARGSUSED*/ 861 int 862 px_ib_get_intr_target(px_t *px_p, devino_t ino, cpuid_t *cpu_id_p) 863 { 864 dev_info_t *dip = px_p->px_dip; 865 sysino_t sysino; 866 int ret; 867 868 DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: devino %x\n", ino); 869 870 /* Convert leaf-wide intr to system-wide intr */ 871 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) 872 return (DDI_FAILURE); 873 874 ret = px_lib_intr_gettarget(dip, sysino, cpu_id_p); 875 876 DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: cpu_id %x\n", 877 *cpu_id_p); 878 879 return (ret); 880 } 881 882 883 /* 884 * Associate a new CPU with a given ino. 885 * Operate only on INOs which are already mapped to devices. 886 */ 887 int 888 px_ib_set_intr_target(px_t *px_p, devino_t ino, cpuid_t cpu_id) 889 { 890 dev_info_t *dip = px_p->px_dip; 891 cpuid_t old_cpu_id; 892 sysino_t sysino; 893 int ret = DDI_SUCCESS; 894 extern const int _ncpu; 895 extern cpu_t *cpu[]; 896 897 DBG(DBG_IB, px_p->px_dip, "px_ib_set_intr_target: devino %x " 898 "cpu_id %x\n", ino, cpu_id); 899 900 mutex_enter(&cpu_lock); 901 902 /* Convert leaf-wide intr to system-wide intr */ 903 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) { 904 ret = DDI_FAILURE; 905 goto done; 906 } 907 908 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) { 909 ret = DDI_FAILURE; 910 goto done; 911 } 912 913 /* 914 * Get lock, validate cpu and write it. 915 */ 916 if ((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id]))) { 917 DBG(DBG_IB, dip, "px_ib_set_intr_target: Enabling CPU %d\n", 918 cpu_id); 919 px_ib_intr_dist_en(dip, cpu_id, ino, B_TRUE); 920 px_ib_log_new_cpu(px_p->px_ib_p, old_cpu_id, cpu_id, ino); 921 } else { /* Invalid cpu */ 922 DBG(DBG_IB, dip, "px_ib_set_intr_target: Invalid cpuid %x\n", 923 cpu_id); 924 ret = DDI_EINVAL; 925 } 926 927 done: 928 mutex_exit(&cpu_lock); 929 return (ret); 930 } 931 932 hrtime_t px_ib_msix_retarget_timeout = 120ll * NANOSEC; /* 120 seconds */ 933 934 /* 935 * Associate a new CPU with a given MSI/X. 936 * Operate only on MSI/Xs which are already mapped to devices. 937 */ 938 int 939 px_ib_set_msix_target(px_t *px_p, ddi_intr_handle_impl_t *hdlp, 940 msinum_t msi_num, cpuid_t cpu_id) 941 { 942 px_ib_t *ib_p = px_p->px_ib_p; 943 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 944 dev_info_t *dip = px_p->px_dip; 945 dev_info_t *rdip = hdlp->ih_dip; 946 msiqid_t msiq_id, old_msiq_id; 947 pci_msi_state_t msi_state; 948 msiq_rec_type_t msiq_rec_type; 949 msi_type_t msi_type; 950 px_ino_t *ino_p; 951 px_ih_t *ih_p, *old_ih_p; 952 cpuid_t old_cpu_id; 953 hrtime_t start_time, end_time; 954 int ret = DDI_SUCCESS; 955 extern const int _ncpu; 956 extern cpu_t *cpu[]; 957 958 DBG(DBG_IB, dip, "px_ib_set_msix_target: msi_num %x new cpu_id %x\n", 959 msi_num, cpu_id); 960 961 mutex_enter(&cpu_lock); 962 963 /* Check for MSI64 support */ 964 if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) { 965 msiq_rec_type = MSI64_REC; 966 msi_type = MSI64_TYPE; 967 } else { 968 msiq_rec_type = MSI32_REC; 969 msi_type = MSI32_TYPE; 970 } 971 972 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 973 &old_msiq_id)) != DDI_SUCCESS) { 974 975 mutex_exit(&cpu_lock); 976 return (ret); 977 } 978 979 DBG(DBG_IB, dip, "px_ib_set_msix_target: current msiq 0x%x\n", 980 old_msiq_id); 981 982 if ((ret = px_ib_get_intr_target(px_p, 983 px_msiqid_to_devino(px_p, old_msiq_id), 984 &old_cpu_id)) != DDI_SUCCESS) { 985 986 mutex_exit(&cpu_lock); 987 return (ret); 988 } 989 990 DBG(DBG_IB, dip, "px_ib_set_msix_target: current cpuid 0x%x\n", 991 old_cpu_id); 992 993 if (cpu_id == old_cpu_id) { 994 995 mutex_exit(&cpu_lock); 996 return (DDI_SUCCESS); 997 } 998 999 /* 1000 * Get lock, validate cpu and write it. 1001 */ 1002 if (!((cpu_id < _ncpu) && (cpu[cpu_id] && 1003 cpu_is_online(cpu[cpu_id])))) { 1004 /* Invalid cpu */ 1005 DBG(DBG_IB, dip, "px_ib_set_msix_target: Invalid cpuid %x\n", 1006 cpu_id); 1007 1008 mutex_exit(&cpu_lock); 1009 return (DDI_EINVAL); 1010 } 1011 1012 DBG(DBG_IB, dip, "px_ib_set_msix_target: Enabling CPU %d\n", cpu_id); 1013 1014 if ((ret = px_add_msiq_intr(dip, rdip, hdlp, 1015 msiq_rec_type, msi_num, cpu_id, &msiq_id)) != DDI_SUCCESS) { 1016 DBG(DBG_IB, dip, "px_ib_set_msix_target: Add MSI handler " 1017 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); 1018 1019 mutex_exit(&cpu_lock); 1020 return (ret); 1021 } 1022 1023 if ((ret = px_lib_msi_setmsiq(dip, msi_num, 1024 msiq_id, msi_type)) != DDI_SUCCESS) { 1025 (void) px_rem_msiq_intr(dip, rdip, 1026 hdlp, msiq_rec_type, msi_num, msiq_id); 1027 1028 mutex_exit(&cpu_lock); 1029 return (ret); 1030 } 1031 1032 if ((ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 1033 px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri, 1034 PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num)) != DDI_SUCCESS) { 1035 (void) px_rem_msiq_intr(dip, rdip, 1036 hdlp, msiq_rec_type, msi_num, msiq_id); 1037 1038 mutex_exit(&cpu_lock); 1039 return (ret); 1040 } 1041 1042 mutex_exit(&cpu_lock); 1043 mutex_enter(&ib_p->ib_ino_lst_mutex); 1044 1045 ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, old_msiq_id)); 1046 old_ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, 1047 hdlp->ih_pri), rdip, hdlp->ih_inum, msiq_rec_type, msi_num); 1048 old_ih_p->ih_retarget_flag = B_TRUE; 1049 1050 ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, msiq_id)); 1051 ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri), 1052 rdip, hdlp->ih_inum, msiq_rec_type, msi_num); 1053 ih_p->ih_retarget_flag = B_TRUE; 1054 1055 if ((ret = px_lib_msi_getstate(dip, msi_num, 1056 &msi_state)) != DDI_SUCCESS) { 1057 (void) px_rem_msiq_intr(dip, rdip, 1058 hdlp, msiq_rec_type, msi_num, msiq_id); 1059 1060 mutex_exit(&ib_p->ib_ino_lst_mutex); 1061 return (ret); 1062 } 1063 1064 if (msi_state == PCI_MSI_STATE_IDLE) 1065 ih_p->ih_retarget_flag = B_FALSE; 1066 1067 start_time = gethrtime(); 1068 while ((ih_p->ih_retarget_flag == B_TRUE) && 1069 (old_ih_p->ih_retarget_flag == B_TRUE)) { 1070 if ((end_time = (gethrtime() - start_time)) > 1071 px_ib_msix_retarget_timeout) { 1072 cmn_err(CE_WARN, "MSIX retarget %x is not completed, " 1073 "even after waiting %llx ticks\n", 1074 msi_num, end_time); 1075 1076 break; 1077 } 1078 1079 /* Wait for one second */ 1080 delay(drv_usectohz(1000000)); 1081 } 1082 1083 mutex_exit(&ib_p->ib_ino_lst_mutex); 1084 1085 ret = px_rem_msiq_intr(dip, rdip, 1086 hdlp, msiq_rec_type, msi_num, old_msiq_id); 1087 1088 return (ret); 1089 } 1090 1091 1092 static void 1093 px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name, 1094 char *path_name, int instance) 1095 { 1096 (void) strncpy(dev->driver_name, driver_name, MAXMODCONFNAME-1); 1097 dev->driver_name[MAXMODCONFNAME] = '\0'; 1098 (void) strncpy(dev->path, path_name, MAXPATHLEN-1); 1099 dev->dev_inst = instance; 1100 } 1101 1102 1103 /* 1104 * Return the dips or number of dips associated with a given interrupt block. 1105 * Size of dips array arg is passed in as dips_ret arg. 1106 * Number of dips returned is returned in dips_ret arg. 1107 * Array of dips gets returned in the dips argument. 1108 * Function returns number of dips existing for the given interrupt block. 1109 * 1110 * Note: this function assumes an enabled/valid INO, which is why it returns 1111 * the px node and (Internal) when it finds no other devices (and *devs_ret > 0) 1112 */ 1113 uint8_t 1114 pxtool_ib_get_ino_devs(px_t *px_p, uint32_t ino, uint32_t msi_num, 1115 uint8_t *devs_ret, pcitool_intr_dev_t *devs) 1116 { 1117 px_ib_t *ib_p = px_p->px_ib_p; 1118 px_ino_t *ino_p; 1119 px_ino_pil_t *ipil_p; 1120 px_ih_t *ih_p; 1121 uint32_t num_devs = 0; 1122 char pathname[MAXPATHLEN]; 1123 int i, j; 1124 1125 mutex_enter(&ib_p->ib_ino_lst_mutex); 1126 ino_p = px_ib_locate_ino(ib_p, ino); 1127 if (ino_p != NULL) { 1128 for (j = 0, ipil_p = ino_p->ino_ipil_p; ipil_p; 1129 ipil_p = ipil_p->ipil_next_p) { 1130 num_devs += ipil_p->ipil_ih_size; 1131 1132 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1133 ((i < ipil_p->ipil_ih_size) && (i < *devs_ret)); 1134 i++, j++, ih_p = ih_p->ih_next) { 1135 (void) ddi_pathname(ih_p->ih_dip, pathname); 1136 1137 if (ih_p->ih_msg_code == msi_num) { 1138 num_devs = *devs_ret = 1; 1139 px_fill_in_intr_devs(&devs[0], 1140 (char *)ddi_driver_name( 1141 ih_p->ih_dip), pathname, 1142 ddi_get_instance(ih_p->ih_dip)); 1143 goto done; 1144 } 1145 1146 px_fill_in_intr_devs(&devs[j], 1147 (char *)ddi_driver_name(ih_p->ih_dip), 1148 pathname, ddi_get_instance(ih_p->ih_dip)); 1149 } 1150 } 1151 1152 *devs_ret = j; 1153 } else if (*devs_ret > 0) { 1154 (void) ddi_pathname(px_p->px_dip, pathname); 1155 strcat(pathname, " (Internal)"); 1156 px_fill_in_intr_devs(&devs[0], 1157 (char *)ddi_driver_name(px_p->px_dip), pathname, 1158 ddi_get_instance(px_p->px_dip)); 1159 num_devs = *devs_ret = 1; 1160 } 1161 1162 done: 1163 mutex_exit(&ib_p->ib_ino_lst_mutex); 1164 1165 return (num_devs); 1166 } 1167 1168 1169 int 1170 pxtool_ib_get_msi_info(px_t *px_p, devino_t ino, msinum_t msi_num, 1171 ddi_intr_handle_impl_t *hdlp) 1172 { 1173 px_ib_t *ib_p = px_p->px_ib_p; 1174 px_ino_t *ino_p; 1175 px_ino_pil_t *ipil_p; 1176 px_ih_t *ih_p; 1177 int i; 1178 1179 mutex_enter(&ib_p->ib_ino_lst_mutex); 1180 1181 if ((ino_p = px_ib_locate_ino(ib_p, ino)) == NULL) { 1182 mutex_exit(&ib_p->ib_ino_lst_mutex); 1183 return (DDI_FAILURE); 1184 } 1185 1186 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 1187 ipil_p = ipil_p->ipil_next_p) { 1188 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1189 ((i < ipil_p->ipil_ih_size) && ih_p); 1190 i++, ih_p = ih_p->ih_next) { 1191 1192 if (ih_p->ih_msg_code != msi_num) 1193 continue; 1194 1195 hdlp->ih_dip = ih_p->ih_dip; 1196 hdlp->ih_inum = ih_p->ih_inum; 1197 hdlp->ih_cb_func = ih_p->ih_handler; 1198 hdlp->ih_cb_arg1 = ih_p->ih_handler_arg1; 1199 hdlp->ih_cb_arg2 = ih_p->ih_handler_arg2; 1200 if (ih_p->ih_rec_type == MSI64_REC) 1201 hdlp->ih_cap = DDI_INTR_FLAG_MSI64; 1202 hdlp->ih_pri = ipil_p->ipil_pil; 1203 hdlp->ih_ver = DDI_INTR_VERSION; 1204 1205 mutex_exit(&ib_p->ib_ino_lst_mutex); 1206 return (DDI_SUCCESS); 1207 } 1208 } 1209 1210 mutex_exit(&ib_p->ib_ino_lst_mutex); 1211 return (DDI_FAILURE); 1212 } 1213 1214 void 1215 px_ib_log_new_cpu(px_ib_t *ib_p, cpuid_t old_cpu_id, cpuid_t new_cpu_id, 1216 uint32_t ino) 1217 { 1218 px_ino_t *ino_p; 1219 px_ino_pil_t *ipil_p; 1220 px_ih_t *ih_p; 1221 int i; 1222 1223 mutex_enter(&ib_p->ib_ino_lst_mutex); 1224 1225 /* Log in OS data structures the new CPU. */ 1226 if (ino_p = px_ib_locate_ino(ib_p, ino)) { 1227 1228 /* Log in OS data structures the new CPU. */ 1229 ino_p->ino_cpuid = new_cpu_id; 1230 1231 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 1232 ipil_p = ipil_p->ipil_next_p) { 1233 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1234 (i < ipil_p->ipil_ih_size); 1235 i++, ih_p = ih_p->ih_next) { 1236 /* 1237 * Account for any residual time 1238 * to be logged for old cpu. 1239 */ 1240 px_ib_cpu_ticks_to_ih_nsec(ib_p, 1241 ih_p, old_cpu_id); 1242 } 1243 } 1244 } 1245 1246 mutex_exit(&ib_p->ib_ino_lst_mutex); 1247 } 1248