1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PX Interrupt Block implementation 28 */ 29 30 #include <sys/types.h> 31 #include <sys/kmem.h> 32 #include <sys/async.h> 33 #include <sys/systm.h> /* panicstr */ 34 #include <sys/spl.h> 35 #include <sys/sunddi.h> 36 #include <sys/machsystm.h> /* intr_dist_add */ 37 #include <sys/ddi_impldefs.h> 38 #include <sys/cpuvar.h> 39 #include <sys/time.h> 40 #include "px_obj.h" 41 42 /*LINTLIBRARY*/ 43 44 static void px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight); 45 static void px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, 46 uint32_t cpu_id); 47 static uint_t px_ib_intr_reset(void *arg); 48 static void px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name, 49 char *path_name, int instance); 50 51 extern uint64_t xc_tick_jump_limit; 52 53 int 54 px_ib_attach(px_t *px_p) 55 { 56 dev_info_t *dip = px_p->px_dip; 57 px_ib_t *ib_p; 58 sysino_t sysino; 59 px_fault_t *fault_p = &px_p->px_fault; 60 61 DBG(DBG_IB, dip, "px_ib_attach\n"); 62 63 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 64 px_p->px_inos[PX_INTR_PEC], &sysino) != DDI_SUCCESS) 65 return (DDI_FAILURE); 66 67 /* 68 * Allocate interrupt block state structure and link it to 69 * the px state structure. 70 */ 71 ib_p = kmem_zalloc(sizeof (px_ib_t), KM_SLEEP); 72 px_p->px_ib_p = ib_p; 73 ib_p->ib_px_p = px_p; 74 ib_p->ib_ino_lst = (px_ino_t *)NULL; 75 76 mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL); 77 mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL); 78 79 bus_func_register(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p); 80 81 intr_dist_add_weighted(px_ib_intr_redist, ib_p); 82 83 /* 84 * Initialize PEC fault data structure 85 */ 86 fault_p->px_fh_dip = dip; 87 fault_p->px_fh_sysino = sysino; 88 fault_p->px_err_func = px_err_dmc_pec_intr; 89 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_PEC]; 90 91 return (DDI_SUCCESS); 92 } 93 94 void 95 px_ib_detach(px_t *px_p) 96 { 97 px_ib_t *ib_p = px_p->px_ib_p; 98 dev_info_t *dip = px_p->px_dip; 99 100 DBG(DBG_IB, dip, "px_ib_detach\n"); 101 102 bus_func_unregister(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p); 103 intr_dist_rem_weighted(px_ib_intr_redist, ib_p); 104 105 mutex_destroy(&ib_p->ib_ino_lst_mutex); 106 mutex_destroy(&ib_p->ib_intr_lock); 107 108 px_ib_free_ino_all(ib_p); 109 110 px_p->px_ib_p = NULL; 111 kmem_free(ib_p, sizeof (px_ib_t)); 112 } 113 114 void 115 px_ib_intr_enable(px_t *px_p, cpuid_t cpu_id, devino_t ino) 116 { 117 px_ib_t *ib_p = px_p->px_ib_p; 118 sysino_t sysino; 119 120 /* 121 * Determine the cpu for the interrupt 122 */ 123 mutex_enter(&ib_p->ib_intr_lock); 124 125 DBG(DBG_IB, px_p->px_dip, 126 "px_ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id); 127 128 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino, 129 &sysino) != DDI_SUCCESS) { 130 DBG(DBG_IB, px_p->px_dip, 131 "px_ib_intr_enable: px_intr_devino_to_sysino() failed\n"); 132 133 mutex_exit(&ib_p->ib_intr_lock); 134 return; 135 } 136 137 PX_INTR_ENABLE(px_p->px_dip, sysino, cpu_id); 138 px_lib_intr_setstate(px_p->px_dip, sysino, INTR_IDLE_STATE); 139 140 mutex_exit(&ib_p->ib_intr_lock); 141 } 142 143 /*ARGSUSED*/ 144 void 145 px_ib_intr_disable(px_ib_t *ib_p, devino_t ino, int wait) 146 { 147 sysino_t sysino; 148 149 mutex_enter(&ib_p->ib_intr_lock); 150 151 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_disable: ino=%x\n", ino); 152 153 /* Disable the interrupt */ 154 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino, 155 &sysino) != DDI_SUCCESS) { 156 DBG(DBG_IB, ib_p->ib_px_p->px_dip, 157 "px_ib_intr_disable: px_intr_devino_to_sysino() failed\n"); 158 159 mutex_exit(&ib_p->ib_intr_lock); 160 return; 161 } 162 163 PX_INTR_DISABLE(ib_p->ib_px_p->px_dip, sysino); 164 165 mutex_exit(&ib_p->ib_intr_lock); 166 } 167 168 169 void 170 px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino, 171 boolean_t wait_flag) 172 { 173 uint32_t old_cpu_id; 174 sysino_t sysino; 175 intr_valid_state_t enabled = 0; 176 hrtime_t start_time, prev, curr, interval, jump; 177 hrtime_t intr_timeout; 178 intr_state_t intr_state; 179 int e = DDI_SUCCESS; 180 181 DBG(DBG_IB, dip, "px_ib_intr_dist_en: ino=0x%x\n", ino); 182 183 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) { 184 DBG(DBG_IB, dip, "px_ib_intr_dist_en: " 185 "px_intr_devino_to_sysino() failed, ino 0x%x\n", ino); 186 return; 187 } 188 189 /* Skip enabling disabled interrupts */ 190 if (px_lib_intr_getvalid(dip, sysino, &enabled) != DDI_SUCCESS) { 191 DBG(DBG_IB, dip, "px_ib_intr_dist_en: px_intr_getvalid() " 192 "failed, sysino 0x%x\n", sysino); 193 return; 194 } 195 if (!enabled) 196 return; 197 198 /* Done if redistributed onto the same cpuid */ 199 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) { 200 DBG(DBG_IB, dip, "px_ib_intr_dist_en: " 201 "px_intr_gettarget() failed\n"); 202 return; 203 } 204 if (cpu_id == old_cpu_id) 205 return; 206 207 if (!wait_flag) 208 goto done; 209 210 /* Busy wait on pending interrupts */ 211 PX_INTR_DISABLE(dip, sysino); 212 213 intr_timeout = px_intrpend_timeout; 214 jump = TICK_TO_NSEC(xc_tick_jump_limit); 215 216 for (curr = start_time = gethrtime(); !panicstr && 217 ((e = px_lib_intr_getstate(dip, sysino, &intr_state)) == 218 DDI_SUCCESS) && 219 (intr_state == INTR_DELIVERED_STATE); /* */) { 220 /* 221 * If we have a really large jump in hrtime, it is most 222 * probably because we entered the debugger (or OBP, 223 * in general). So, we adjust the timeout accordingly 224 * to prevent declaring an interrupt timeout. The 225 * master-interrupt mechanism in OBP should deliver 226 * the interrupts properly. 227 */ 228 prev = curr; 229 curr = gethrtime(); 230 interval = curr - prev; 231 if (interval > jump) 232 intr_timeout += interval; 233 if (curr - start_time > intr_timeout) { 234 cmn_err(CE_WARN, 235 "%s%d: px_ib_intr_dist_en: sysino 0x%lx(ino 0x%x) " 236 "from cpu id 0x%x to 0x%x timeout", 237 ddi_driver_name(dip), ddi_get_instance(dip), 238 sysino, ino, old_cpu_id, cpu_id); 239 240 e = DDI_FAILURE; 241 break; 242 } 243 } 244 245 if (e != DDI_SUCCESS) 246 DBG(DBG_IB, dip, "px_ib_intr_dist_en: failed, " 247 "ino 0x%x sysino 0x%x\n", ino, sysino); 248 249 done: 250 PX_INTR_ENABLE(dip, sysino, cpu_id); 251 } 252 253 static void 254 px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, uint32_t cpu_id) 255 { 256 extern kmutex_t pxintr_ks_template_lock; 257 hrtime_t ticks; 258 259 /* 260 * Because we are updating two fields in ih_t we must lock 261 * pxintr_ks_template_lock to prevent someone from reading the 262 * kstats after we set ih_ticks to 0 and before we increment 263 * ih_nsec to compensate. 264 * 265 * We must also protect against the interrupt arriving and incrementing 266 * ih_ticks between the time we read it and when we reset it to 0. 267 * To do this we use atomic_swap. 268 */ 269 270 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 271 272 mutex_enter(&pxintr_ks_template_lock); 273 ticks = atomic_swap_64(&ih_p->ih_ticks, 0); 274 ih_p->ih_nsec += (uint64_t)tick2ns(ticks, cpu_id); 275 mutex_exit(&pxintr_ks_template_lock); 276 } 277 278 279 /* 280 * Redistribute interrupts of the specified weight. The first call has a weight 281 * of weight_max, which can be used to trigger initialization for 282 * redistribution. The inos with weight [weight_max, inf.) should be processed 283 * on the "weight == weight_max" call. This first call is followed by calls 284 * of decreasing weights, inos of that weight should be processed. The final 285 * call specifies a weight of zero, this can be used to trigger processing of 286 * stragglers. 287 */ 288 static void 289 px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight) 290 { 291 px_ib_t *ib_p = (px_ib_t *)arg; 292 px_t *px_p = ib_p->ib_px_p; 293 dev_info_t *dip = px_p->px_dip; 294 px_ino_t *ino_p; 295 px_ino_pil_t *ipil_p; 296 px_ih_t *ih_lst; 297 int32_t dweight = 0; 298 int i; 299 300 /* Redistribute internal interrupts */ 301 if (weight == 0) { 302 mutex_enter(&ib_p->ib_intr_lock); 303 px_ib_intr_dist_en(dip, intr_dist_cpuid(), 304 px_p->px_inos[PX_INTR_PEC], B_FALSE); 305 mutex_exit(&ib_p->ib_intr_lock); 306 307 px_hp_intr_redist(px_p); 308 } 309 310 /* Redistribute device interrupts */ 311 mutex_enter(&ib_p->ib_ino_lst_mutex); 312 px_msiq_redist(px_p); 313 314 for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next_p) { 315 /* 316 * Recomputes the sum of interrupt weights of devices that 317 * share the same ino upon first call marked by 318 * (weight == weight_max). 319 */ 320 if (weight == weight_max) { 321 ino_p->ino_intr_weight = 0; 322 323 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 324 ipil_p = ipil_p->ipil_next_p) { 325 for (i = 0, ih_lst = ipil_p->ipil_ih_head; 326 i < ipil_p->ipil_ih_size; i++, 327 ih_lst = ih_lst->ih_next) { 328 dweight = i_ddi_get_intr_weight( 329 ih_lst->ih_dip); 330 if (dweight > 0) 331 ino_p->ino_intr_weight += 332 dweight; 333 } 334 } 335 } 336 337 /* 338 * As part of redistributing weighted interrupts over cpus, 339 * nexus redistributes device interrupts and updates 340 * cpu weight. The purpose is for the most light weighted 341 * cpu to take the next interrupt and gain weight, therefore 342 * attention demanding device gains more cpu attention by 343 * making itself heavy. 344 */ 345 if ((weight == ino_p->ino_intr_weight) || 346 ((weight >= weight_max) && 347 (ino_p->ino_intr_weight >= weight_max))) { 348 uint32_t orig_cpuid = ino_p->ino_cpuid; 349 350 if (cpu[orig_cpuid] == NULL) 351 orig_cpuid = CPU->cpu_id; 352 353 DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx " 354 "current cpuid 0x%x current default cpuid 0x%x\n", 355 ino_p->ino_sysino, ino_p->ino_cpuid, 356 ino_p->ino_default_cpuid); 357 358 /* select target cpuid and mark ino established */ 359 if (ino_p->ino_default_cpuid == -1) 360 ino_p->ino_cpuid = ino_p->ino_default_cpuid = 361 intr_dist_cpuid(); 362 else if ((ino_p->ino_cpuid != 363 ino_p->ino_default_cpuid) && 364 (cpu_intr_on(cpu[ino_p->ino_default_cpuid]))) 365 ino_p->ino_cpuid = ino_p->ino_default_cpuid; 366 else if (!cpu_intr_on(cpu[ino_p->ino_cpuid])) 367 ino_p->ino_cpuid = intr_dist_cpuid(); 368 369 DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx " 370 "new cpuid 0x%x new default cpuid 0x%x\n", 371 ino_p->ino_sysino, ino_p->ino_cpuid, 372 ino_p->ino_default_cpuid); 373 374 /* Add device weight to targeted cpu. */ 375 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 376 ipil_p = ipil_p->ipil_next_p) { 377 for (i = 0, ih_lst = ipil_p->ipil_ih_head; 378 i < ipil_p->ipil_ih_size; i++, 379 ih_lst = ih_lst->ih_next) { 380 381 dweight = i_ddi_get_intr_weight( 382 ih_lst->ih_dip); 383 intr_dist_cpuid_add_device_weight( 384 ino_p->ino_cpuid, ih_lst->ih_dip, 385 dweight); 386 387 /* 388 * Different cpus may have different 389 * clock speeds. to account for this, 390 * whenever an interrupt is moved to a 391 * new CPU, we convert the accumulated 392 * ticks into nsec, based upon the clock 393 * rate of the prior CPU. 394 * 395 * It is possible that the prior CPU no 396 * longer exists. In this case, fall 397 * back to using this CPU's clock rate. 398 * 399 * Note that the value in ih_ticks has 400 * already been corrected for any power 401 * savings mode which might have been 402 * in effect. 403 */ 404 px_ib_cpu_ticks_to_ih_nsec(ib_p, ih_lst, 405 orig_cpuid); 406 } 407 } 408 409 /* enable interrupt on new targeted cpu */ 410 px_ib_intr_dist_en(dip, ino_p->ino_cpuid, 411 ino_p->ino_ino, B_TRUE); 412 } 413 } 414 mutex_exit(&ib_p->ib_ino_lst_mutex); 415 } 416 417 /* 418 * Reset interrupts to IDLE. This function is called during 419 * panic handling after redistributing interrupts; it's needed to 420 * support dumping to network devices after 'sync' from OBP. 421 * 422 * N.B. This routine runs in a context where all other threads 423 * are permanently suspended. 424 */ 425 static uint_t 426 px_ib_intr_reset(void *arg) 427 { 428 px_ib_t *ib_p = (px_ib_t *)arg; 429 430 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_reset\n"); 431 432 if (px_lib_intr_reset(ib_p->ib_px_p->px_dip) != DDI_SUCCESS) 433 return (BF_FATAL); 434 435 return (BF_NONE); 436 } 437 438 /* 439 * Locate px_ino_t structure on ib_p->ib_ino_lst according to ino# 440 * returns NULL if not found. 441 */ 442 px_ino_t * 443 px_ib_locate_ino(px_ib_t *ib_p, devino_t ino_num) 444 { 445 px_ino_t *ino_p = ib_p->ib_ino_lst; 446 447 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 448 449 for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next_p) 450 ; 451 452 return (ino_p); 453 } 454 455 px_ino_t * 456 px_ib_alloc_ino(px_ib_t *ib_p, devino_t ino_num) 457 { 458 sysino_t sysino; 459 px_ino_t *ino_p; 460 461 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, 462 ino_num, &sysino) != DDI_SUCCESS) 463 return (NULL); 464 465 ino_p = kmem_zalloc(sizeof (px_ino_t), KM_SLEEP); 466 467 ino_p->ino_next_p = ib_p->ib_ino_lst; 468 ib_p->ib_ino_lst = ino_p; 469 470 ino_p->ino_ino = ino_num; 471 ino_p->ino_sysino = sysino; 472 ino_p->ino_ib_p = ib_p; 473 ino_p->ino_unclaimed_intrs = 0; 474 ino_p->ino_lopil = 0; 475 ino_p->ino_cpuid = ino_p->ino_default_cpuid = (cpuid_t)-1; 476 477 return (ino_p); 478 } 479 480 px_ino_pil_t * 481 px_ib_new_ino_pil(px_ib_t *ib_p, devino_t ino_num, uint_t pil, px_ih_t *ih_p) 482 { 483 px_ino_pil_t *ipil_p = kmem_zalloc(sizeof (px_ino_pil_t), KM_SLEEP); 484 px_ino_t *ino_p; 485 486 if ((ino_p = px_ib_locate_ino(ib_p, ino_num)) == NULL) 487 ino_p = px_ib_alloc_ino(ib_p, ino_num); 488 489 ASSERT(ino_p != NULL); 490 491 ih_p->ih_next = ih_p; 492 ipil_p->ipil_pil = pil; 493 ipil_p->ipil_ih_head = ih_p; 494 ipil_p->ipil_ih_tail = ih_p; 495 ipil_p->ipil_ih_start = ih_p; 496 ipil_p->ipil_ih_size = 1; 497 ipil_p->ipil_ino_p = ino_p; 498 499 ipil_p->ipil_next_p = ino_p->ino_ipil_p; 500 ino_p->ino_ipil_p = ipil_p; 501 ino_p->ino_ipil_size++; 502 503 if ((ino_p->ino_lopil == 0) || (ino_p->ino_lopil > pil)) 504 ino_p->ino_lopil = pil; 505 506 return (ipil_p); 507 } 508 509 void 510 px_ib_delete_ino_pil(px_ib_t *ib_p, px_ino_pil_t *ipil_p) 511 { 512 px_ino_t *ino_p = ipil_p->ipil_ino_p; 513 ushort_t pil = ipil_p->ipil_pil; 514 px_ino_pil_t *prev, *next; 515 516 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 517 518 if (ino_p->ino_ipil_p == ipil_p) 519 ino_p->ino_ipil_p = ipil_p->ipil_next_p; 520 else { 521 for (prev = next = ino_p->ino_ipil_p; next != ipil_p; 522 prev = next, next = next->ipil_next_p) 523 ; 524 525 if (prev) 526 prev->ipil_next_p = ipil_p->ipil_next_p; 527 } 528 529 kmem_free(ipil_p, sizeof (px_ino_pil_t)); 530 531 if ((--ino_p->ino_ipil_size) && (ino_p->ino_lopil == pil)) { 532 for (next = ino_p->ino_ipil_p, pil = next->ipil_pil; 533 next; next = next->ipil_next_p) { 534 535 if (pil > next->ipil_pil) 536 pil = next->ipil_pil; 537 } 538 539 /* 540 * Value stored in pil should be the lowest pil. 541 */ 542 ino_p->ino_lopil = pil; 543 } 544 545 if (ino_p->ino_ipil_size) 546 return; 547 548 ino_p->ino_lopil = 0; 549 550 if (ino_p->ino_msiq_p) 551 return; 552 553 if (ib_p->ib_ino_lst == ino_p) 554 ib_p->ib_ino_lst = ino_p->ino_next_p; 555 else { 556 px_ino_t *list = ib_p->ib_ino_lst; 557 558 for (; list->ino_next_p != ino_p; list = list->ino_next_p) 559 ; 560 list->ino_next_p = ino_p->ino_next_p; 561 } 562 } 563 564 /* 565 * Free all ino when we are detaching. 566 */ 567 void 568 px_ib_free_ino_all(px_ib_t *ib_p) 569 { 570 px_ino_t *ino_p = ib_p->ib_ino_lst; 571 px_ino_t *next = NULL; 572 573 while (ino_p) { 574 next = ino_p->ino_next_p; 575 kmem_free(ino_p, sizeof (px_ino_t)); 576 ino_p = next; 577 } 578 } 579 580 /* 581 * Locate px_ino_pil_t structure on ino_p->ino_ipil_p according to ino# 582 * returns NULL if not found. 583 */ 584 px_ino_pil_t * 585 px_ib_ino_locate_ipil(px_ino_t *ino_p, uint_t pil) 586 { 587 px_ino_pil_t *ipil_p = ino_p->ino_ipil_p; 588 589 for (; ipil_p && ipil_p->ipil_pil != pil; ipil_p = ipil_p->ipil_next_p) 590 ; 591 592 return (ipil_p); 593 } 594 595 int 596 px_ib_ino_add_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p) 597 { 598 px_ino_t *ino_p = ipil_p->ipil_ino_p; 599 px_ib_t *ib_p = ino_p->ino_ib_p; 600 devino_t ino = ino_p->ino_ino; 601 sysino_t sysino = ino_p->ino_sysino; 602 dev_info_t *dip = px_p->px_dip; 603 cpuid_t curr_cpu; 604 hrtime_t start_time; 605 intr_state_t intr_state; 606 int ret = DDI_SUCCESS; 607 608 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex)); 609 ASSERT(ib_p == px_p->px_ib_p); 610 611 DBG(DBG_IB, dip, "px_ib_ino_add_intr ino=%x\n", ino_p->ino_ino); 612 613 /* Disable the interrupt */ 614 if ((ret = px_lib_intr_gettarget(dip, sysino, 615 &curr_cpu)) != DDI_SUCCESS) { 616 DBG(DBG_IB, dip, 617 "px_ib_ino_add_intr px_intr_gettarget() failed\n"); 618 619 return (ret); 620 } 621 622 PX_INTR_DISABLE(dip, sysino); 623 624 /* Busy wait on pending interrupt */ 625 for (start_time = gethrtime(); !panicstr && 626 ((ret = px_lib_intr_getstate(dip, sysino, &intr_state)) 627 == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) { 628 if (gethrtime() - start_time > px_intrpend_timeout) { 629 cmn_err(CE_WARN, "%s%d: px_ib_ino_add_intr: pending " 630 "sysino 0x%lx(ino 0x%x) timeout", 631 ddi_driver_name(dip), ddi_get_instance(dip), 632 sysino, ino); 633 634 ret = DDI_FAILURE; 635 break; 636 } 637 } 638 639 /* 640 * If the interrupt was previously blocked (left in pending state) 641 * because of jabber we need to clear the pending state in case the 642 * jabber has gone away. 643 */ 644 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) { 645 cmn_err(CE_WARN, 646 "%s%d: px_ib_ino_add_intr: ino 0x%x has been unblocked", 647 ddi_driver_name(dip), ddi_get_instance(dip), ino); 648 649 ino_p->ino_unclaimed_intrs = 0; 650 ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE); 651 } 652 653 if (ret != DDI_SUCCESS) { 654 DBG(DBG_IB, dip, "px_ib_ino_add_intr: failed, " 655 "ino 0x%x sysino 0x%x\n", ino, sysino); 656 657 return (ret); 658 } 659 660 /* Link up px_ih_t */ 661 ih_p->ih_next = ipil_p->ipil_ih_head; 662 ipil_p->ipil_ih_tail->ih_next = ih_p; 663 ipil_p->ipil_ih_tail = ih_p; 664 665 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head; 666 ipil_p->ipil_ih_size++; 667 668 /* Re-enable interrupt */ 669 PX_INTR_ENABLE(dip, sysino, curr_cpu); 670 671 return (ret); 672 } 673 674 /* 675 * Removes px_ih_t from the ino's link list. 676 * uses hardware mutex to lock out interrupt threads. 677 * Side effects: interrupt belongs to that ino is turned off on return. 678 * if we are sharing PX slot with other inos, the caller needs 679 * to turn it back on. 680 */ 681 int 682 px_ib_ino_rem_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p) 683 { 684 px_ino_t *ino_p = ipil_p->ipil_ino_p; 685 devino_t ino = ino_p->ino_ino; 686 sysino_t sysino = ino_p->ino_sysino; 687 dev_info_t *dip = px_p->px_dip; 688 px_ih_t *ih_lst = ipil_p->ipil_ih_head; 689 hrtime_t start_time; 690 intr_state_t intr_state; 691 int i, ret = DDI_SUCCESS; 692 693 ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex)); 694 695 DBG(DBG_IB, px_p->px_dip, "px_ib_ino_rem_intr ino=%x\n", 696 ino_p->ino_ino); 697 698 /* Disable the interrupt */ 699 PX_INTR_DISABLE(px_p->px_dip, sysino); 700 701 if (ipil_p->ipil_ih_size == 1) { 702 if (ih_lst != ih_p) 703 goto not_found; 704 705 /* No need to set head/tail as ino_p will be freed */ 706 goto reset; 707 } 708 709 /* Busy wait on pending interrupt */ 710 for (start_time = gethrtime(); !panicstr && 711 ((ret = px_lib_intr_getstate(dip, sysino, &intr_state)) 712 == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) { 713 if (gethrtime() - start_time > px_intrpend_timeout) { 714 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: pending " 715 "sysino 0x%lx(ino 0x%x) timeout", 716 ddi_driver_name(dip), ddi_get_instance(dip), 717 sysino, ino); 718 719 ret = DDI_FAILURE; 720 break; 721 } 722 } 723 724 /* 725 * If the interrupt was previously blocked (left in pending state) 726 * because of jabber we need to clear the pending state in case the 727 * jabber has gone away. 728 */ 729 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) { 730 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: " 731 "ino 0x%x has been unblocked", 732 ddi_driver_name(dip), ddi_get_instance(dip), ino); 733 734 ino_p->ino_unclaimed_intrs = 0; 735 ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE); 736 } 737 738 if (ret != DDI_SUCCESS) { 739 DBG(DBG_IB, dip, "px_ib_ino_rem_intr: failed, " 740 "ino 0x%x sysino 0x%x\n", ino, sysino); 741 742 return (ret); 743 } 744 745 /* Search the link list for ih_p */ 746 for (i = 0; (i < ipil_p->ipil_ih_size) && 747 (ih_lst->ih_next != ih_p); i++, ih_lst = ih_lst->ih_next) 748 ; 749 750 if (ih_lst->ih_next != ih_p) 751 goto not_found; 752 753 /* Remove ih_p from the link list and maintain the head/tail */ 754 ih_lst->ih_next = ih_p->ih_next; 755 756 if (ipil_p->ipil_ih_head == ih_p) 757 ipil_p->ipil_ih_head = ih_p->ih_next; 758 if (ipil_p->ipil_ih_tail == ih_p) 759 ipil_p->ipil_ih_tail = ih_lst; 760 761 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head; 762 763 reset: 764 if (ih_p->ih_config_handle) 765 pci_config_teardown(&ih_p->ih_config_handle); 766 if (ih_p->ih_ksp != NULL) 767 kstat_delete(ih_p->ih_ksp); 768 769 kmem_free(ih_p, sizeof (px_ih_t)); 770 ipil_p->ipil_ih_size--; 771 772 return (ret); 773 774 not_found: 775 DBG(DBG_R_INTX, ino_p->ino_ib_p->ib_px_p->px_dip, 776 "ino_p=%x does not have ih_p=%x\n", ino_p, ih_p); 777 778 return (DDI_FAILURE); 779 } 780 781 px_ih_t * 782 px_ib_intr_locate_ih(px_ino_pil_t *ipil_p, dev_info_t *rdip, 783 uint32_t inum, msiq_rec_type_t rec_type, msgcode_t msg_code) 784 { 785 px_ih_t *ih_p = ipil_p->ipil_ih_head; 786 int i; 787 788 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 789 if ((ih_p->ih_dip == rdip) && (ih_p->ih_inum == inum) && 790 (ih_p->ih_rec_type == rec_type) && 791 (ih_p->ih_msg_code == msg_code)) 792 return (ih_p); 793 } 794 795 return ((px_ih_t *)NULL); 796 } 797 798 px_ih_t * 799 px_ib_alloc_ih(dev_info_t *rdip, uint32_t inum, 800 uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2), 801 caddr_t int_handler_arg1, caddr_t int_handler_arg2, 802 msiq_rec_type_t rec_type, msgcode_t msg_code) 803 { 804 px_ih_t *ih_p; 805 806 ih_p = kmem_alloc(sizeof (px_ih_t), KM_SLEEP); 807 ih_p->ih_dip = rdip; 808 ih_p->ih_inum = inum; 809 ih_p->ih_intr_state = PX_INTR_STATE_DISABLE; 810 ih_p->ih_handler = int_handler; 811 ih_p->ih_handler_arg1 = int_handler_arg1; 812 ih_p->ih_handler_arg2 = int_handler_arg2; 813 ih_p->ih_config_handle = NULL; 814 ih_p->ih_rec_type = rec_type; 815 ih_p->ih_msg_code = msg_code; 816 ih_p->ih_nsec = 0; 817 ih_p->ih_ticks = 0; 818 ih_p->ih_ksp = NULL; 819 820 return (ih_p); 821 } 822 823 int 824 px_ib_update_intr_state(px_t *px_p, dev_info_t *rdip, 825 uint_t inum, devino_t ino, uint_t pil, 826 uint_t new_intr_state, msiq_rec_type_t rec_type, 827 msgcode_t msg_code) 828 { 829 px_ib_t *ib_p = px_p->px_ib_p; 830 px_ino_t *ino_p; 831 px_ino_pil_t *ipil_p; 832 px_ih_t *ih_p; 833 int ret = DDI_FAILURE; 834 835 DBG(DBG_IB, px_p->px_dip, "px_ib_update_intr_state: %s%d " 836 "inum %x devino %x pil %x state %x\n", ddi_driver_name(rdip), 837 ddi_get_instance(rdip), inum, ino, pil, new_intr_state); 838 839 mutex_enter(&ib_p->ib_ino_lst_mutex); 840 841 ino_p = px_ib_locate_ino(ib_p, ino); 842 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, pil))) { 843 if (ih_p = px_ib_intr_locate_ih(ipil_p, rdip, inum, rec_type, 844 msg_code)) { 845 ih_p->ih_intr_state = new_intr_state; 846 ret = DDI_SUCCESS; 847 } 848 } 849 850 mutex_exit(&ib_p->ib_ino_lst_mutex); 851 return (ret); 852 } 853 854 855 /* 856 * Get interrupt CPU for a given ino. 857 * Return info only for inos which are already mapped to devices. 858 */ 859 /*ARGSUSED*/ 860 int 861 px_ib_get_intr_target(px_t *px_p, devino_t ino, cpuid_t *cpu_id_p) 862 { 863 dev_info_t *dip = px_p->px_dip; 864 sysino_t sysino; 865 int ret; 866 867 DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: devino %x\n", ino); 868 869 /* Convert leaf-wide intr to system-wide intr */ 870 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) 871 return (DDI_FAILURE); 872 873 ret = px_lib_intr_gettarget(dip, sysino, cpu_id_p); 874 875 DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: cpu_id %x\n", 876 *cpu_id_p); 877 878 return (ret); 879 } 880 881 882 /* 883 * Associate a new CPU with a given ino. 884 * Operate only on INOs which are already mapped to devices. 885 */ 886 int 887 px_ib_set_intr_target(px_t *px_p, devino_t ino, cpuid_t cpu_id) 888 { 889 dev_info_t *dip = px_p->px_dip; 890 cpuid_t old_cpu_id; 891 sysino_t sysino; 892 int ret = DDI_SUCCESS; 893 extern const int _ncpu; 894 extern cpu_t *cpu[]; 895 896 DBG(DBG_IB, px_p->px_dip, "px_ib_set_intr_target: devino %x " 897 "cpu_id %x\n", ino, cpu_id); 898 899 mutex_enter(&cpu_lock); 900 901 /* Convert leaf-wide intr to system-wide intr */ 902 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) { 903 ret = DDI_FAILURE; 904 goto done; 905 } 906 907 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) { 908 ret = DDI_FAILURE; 909 goto done; 910 } 911 912 /* 913 * Get lock, validate cpu and write it. 914 */ 915 if ((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id]))) { 916 DBG(DBG_IB, dip, "px_ib_set_intr_target: Enabling CPU %d\n", 917 cpu_id); 918 px_ib_intr_dist_en(dip, cpu_id, ino, B_TRUE); 919 px_ib_log_new_cpu(px_p->px_ib_p, old_cpu_id, cpu_id, ino); 920 } else { /* Invalid cpu */ 921 DBG(DBG_IB, dip, "px_ib_set_intr_target: Invalid cpuid %x\n", 922 cpu_id); 923 ret = DDI_EINVAL; 924 } 925 926 done: 927 mutex_exit(&cpu_lock); 928 return (ret); 929 } 930 931 hrtime_t px_ib_msix_retarget_timeout = 120ll * NANOSEC; /* 120 seconds */ 932 933 /* 934 * Associate a new CPU with a given MSI/X. 935 * Operate only on MSI/Xs which are already mapped to devices. 936 */ 937 int 938 px_ib_set_msix_target(px_t *px_p, ddi_intr_handle_impl_t *hdlp, 939 msinum_t msi_num, cpuid_t cpu_id) 940 { 941 px_ib_t *ib_p = px_p->px_ib_p; 942 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 943 dev_info_t *dip = px_p->px_dip; 944 dev_info_t *rdip = hdlp->ih_dip; 945 msiqid_t msiq_id, old_msiq_id; 946 pci_msi_state_t msi_state; 947 msiq_rec_type_t msiq_rec_type; 948 msi_type_t msi_type; 949 px_ino_t *ino_p; 950 px_ih_t *ih_p, *old_ih_p; 951 cpuid_t old_cpu_id; 952 hrtime_t start_time, end_time; 953 int ret = DDI_SUCCESS; 954 extern const int _ncpu; 955 extern cpu_t *cpu[]; 956 957 DBG(DBG_IB, dip, "px_ib_set_msix_target: msi_num %x new cpu_id %x\n", 958 msi_num, cpu_id); 959 960 mutex_enter(&cpu_lock); 961 962 /* Check for MSI64 support */ 963 if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) { 964 msiq_rec_type = MSI64_REC; 965 msi_type = MSI64_TYPE; 966 } else { 967 msiq_rec_type = MSI32_REC; 968 msi_type = MSI32_TYPE; 969 } 970 971 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 972 &old_msiq_id)) != DDI_SUCCESS) { 973 974 mutex_exit(&cpu_lock); 975 return (ret); 976 } 977 978 DBG(DBG_IB, dip, "px_ib_set_msix_target: current msiq 0x%x\n", 979 old_msiq_id); 980 981 if ((ret = px_ib_get_intr_target(px_p, 982 px_msiqid_to_devino(px_p, old_msiq_id), 983 &old_cpu_id)) != DDI_SUCCESS) { 984 985 mutex_exit(&cpu_lock); 986 return (ret); 987 } 988 989 DBG(DBG_IB, dip, "px_ib_set_msix_target: current cpuid 0x%x\n", 990 old_cpu_id); 991 992 if (cpu_id == old_cpu_id) { 993 994 mutex_exit(&cpu_lock); 995 return (DDI_SUCCESS); 996 } 997 998 /* 999 * Get lock, validate cpu and write it. 1000 */ 1001 if (!((cpu_id < _ncpu) && (cpu[cpu_id] && 1002 cpu_is_online(cpu[cpu_id])))) { 1003 /* Invalid cpu */ 1004 DBG(DBG_IB, dip, "px_ib_set_msix_target: Invalid cpuid %x\n", 1005 cpu_id); 1006 1007 mutex_exit(&cpu_lock); 1008 return (DDI_EINVAL); 1009 } 1010 1011 DBG(DBG_IB, dip, "px_ib_set_msix_target: Enabling CPU %d\n", cpu_id); 1012 1013 if ((ret = px_add_msiq_intr(dip, rdip, hdlp, 1014 msiq_rec_type, msi_num, cpu_id, &msiq_id)) != DDI_SUCCESS) { 1015 DBG(DBG_IB, dip, "px_ib_set_msix_target: Add MSI handler " 1016 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); 1017 1018 mutex_exit(&cpu_lock); 1019 return (ret); 1020 } 1021 1022 if ((ret = px_lib_msi_setmsiq(dip, msi_num, 1023 msiq_id, msi_type)) != DDI_SUCCESS) { 1024 (void) px_rem_msiq_intr(dip, rdip, 1025 hdlp, msiq_rec_type, msi_num, msiq_id); 1026 1027 mutex_exit(&cpu_lock); 1028 return (ret); 1029 } 1030 1031 if ((ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 1032 px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri, 1033 PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num)) != DDI_SUCCESS) { 1034 (void) px_rem_msiq_intr(dip, rdip, 1035 hdlp, msiq_rec_type, msi_num, msiq_id); 1036 1037 mutex_exit(&cpu_lock); 1038 return (ret); 1039 } 1040 1041 mutex_exit(&cpu_lock); 1042 mutex_enter(&ib_p->ib_ino_lst_mutex); 1043 1044 ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, old_msiq_id)); 1045 old_ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, 1046 hdlp->ih_pri), rdip, hdlp->ih_inum, msiq_rec_type, msi_num); 1047 old_ih_p->ih_retarget_flag = B_TRUE; 1048 1049 ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, msiq_id)); 1050 ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri), 1051 rdip, hdlp->ih_inum, msiq_rec_type, msi_num); 1052 ih_p->ih_retarget_flag = B_TRUE; 1053 1054 if ((ret = px_lib_msi_getstate(dip, msi_num, 1055 &msi_state)) != DDI_SUCCESS) { 1056 (void) px_rem_msiq_intr(dip, rdip, 1057 hdlp, msiq_rec_type, msi_num, msiq_id); 1058 1059 mutex_exit(&ib_p->ib_ino_lst_mutex); 1060 return (ret); 1061 } 1062 1063 if (msi_state == PCI_MSI_STATE_IDLE) 1064 ih_p->ih_retarget_flag = B_FALSE; 1065 1066 start_time = gethrtime(); 1067 while ((ih_p->ih_retarget_flag == B_TRUE) && 1068 (old_ih_p->ih_retarget_flag == B_TRUE)) { 1069 if ((end_time = (gethrtime() - start_time)) > 1070 px_ib_msix_retarget_timeout) { 1071 cmn_err(CE_WARN, "MSIX retarget %x is not completed, " 1072 "even after waiting %llx ticks\n", 1073 msi_num, end_time); 1074 1075 break; 1076 } 1077 1078 /* Wait for one second */ 1079 delay(drv_usectohz(1000000)); 1080 } 1081 1082 mutex_exit(&ib_p->ib_ino_lst_mutex); 1083 1084 ret = px_rem_msiq_intr(dip, rdip, 1085 hdlp, msiq_rec_type, msi_num, old_msiq_id); 1086 1087 return (ret); 1088 } 1089 1090 1091 static void 1092 px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name, 1093 char *path_name, int instance) 1094 { 1095 (void) strncpy(dev->driver_name, driver_name, MAXMODCONFNAME-1); 1096 dev->driver_name[MAXMODCONFNAME] = '\0'; 1097 (void) strncpy(dev->path, path_name, MAXPATHLEN-1); 1098 dev->dev_inst = instance; 1099 } 1100 1101 1102 /* 1103 * Return the dips or number of dips associated with a given interrupt block. 1104 * Size of dips array arg is passed in as dips_ret arg. 1105 * Number of dips returned is returned in dips_ret arg. 1106 * Array of dips gets returned in the dips argument. 1107 * Function returns number of dips existing for the given interrupt block. 1108 * 1109 * Note: this function assumes an enabled/valid INO, which is why it returns 1110 * the px node and (Internal) when it finds no other devices (and *devs_ret > 0) 1111 */ 1112 uint8_t 1113 pxtool_ib_get_ino_devs(px_t *px_p, uint32_t ino, uint32_t msi_num, 1114 uint8_t *devs_ret, pcitool_intr_dev_t *devs) 1115 { 1116 px_ib_t *ib_p = px_p->px_ib_p; 1117 px_ino_t *ino_p; 1118 px_ino_pil_t *ipil_p; 1119 px_ih_t *ih_p; 1120 uint32_t num_devs = 0; 1121 char pathname[MAXPATHLEN]; 1122 int i, j; 1123 1124 mutex_enter(&ib_p->ib_ino_lst_mutex); 1125 ino_p = px_ib_locate_ino(ib_p, ino); 1126 if (ino_p != NULL) { 1127 for (j = 0, ipil_p = ino_p->ino_ipil_p; ipil_p; 1128 ipil_p = ipil_p->ipil_next_p) { 1129 num_devs += ipil_p->ipil_ih_size; 1130 1131 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1132 ((i < ipil_p->ipil_ih_size) && (i < *devs_ret)); 1133 i++, j++, ih_p = ih_p->ih_next) { 1134 (void) ddi_pathname(ih_p->ih_dip, pathname); 1135 1136 if (ih_p->ih_msg_code == msi_num) { 1137 num_devs = *devs_ret = 1; 1138 px_fill_in_intr_devs(&devs[0], 1139 (char *)ddi_driver_name( 1140 ih_p->ih_dip), pathname, 1141 ddi_get_instance(ih_p->ih_dip)); 1142 goto done; 1143 } 1144 1145 px_fill_in_intr_devs(&devs[j], 1146 (char *)ddi_driver_name(ih_p->ih_dip), 1147 pathname, ddi_get_instance(ih_p->ih_dip)); 1148 } 1149 } 1150 1151 *devs_ret = j; 1152 } else if (*devs_ret > 0) { 1153 (void) ddi_pathname(px_p->px_dip, pathname); 1154 strcat(pathname, " (Internal)"); 1155 px_fill_in_intr_devs(&devs[0], 1156 (char *)ddi_driver_name(px_p->px_dip), pathname, 1157 ddi_get_instance(px_p->px_dip)); 1158 num_devs = *devs_ret = 1; 1159 } 1160 1161 done: 1162 mutex_exit(&ib_p->ib_ino_lst_mutex); 1163 1164 return (num_devs); 1165 } 1166 1167 1168 int 1169 pxtool_ib_get_msi_info(px_t *px_p, devino_t ino, msinum_t msi_num, 1170 ddi_intr_handle_impl_t *hdlp) 1171 { 1172 px_ib_t *ib_p = px_p->px_ib_p; 1173 px_ino_t *ino_p; 1174 px_ino_pil_t *ipil_p; 1175 px_ih_t *ih_p; 1176 int i; 1177 1178 mutex_enter(&ib_p->ib_ino_lst_mutex); 1179 1180 if ((ino_p = px_ib_locate_ino(ib_p, ino)) == NULL) { 1181 mutex_exit(&ib_p->ib_ino_lst_mutex); 1182 return (DDI_FAILURE); 1183 } 1184 1185 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 1186 ipil_p = ipil_p->ipil_next_p) { 1187 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1188 ((i < ipil_p->ipil_ih_size) && ih_p); 1189 i++, ih_p = ih_p->ih_next) { 1190 1191 if (ih_p->ih_msg_code != msi_num) 1192 continue; 1193 1194 hdlp->ih_dip = ih_p->ih_dip; 1195 hdlp->ih_inum = ih_p->ih_inum; 1196 hdlp->ih_cb_func = ih_p->ih_handler; 1197 hdlp->ih_cb_arg1 = ih_p->ih_handler_arg1; 1198 hdlp->ih_cb_arg2 = ih_p->ih_handler_arg2; 1199 if (ih_p->ih_rec_type == MSI64_REC) 1200 hdlp->ih_cap = DDI_INTR_FLAG_MSI64; 1201 hdlp->ih_pri = ipil_p->ipil_pil; 1202 hdlp->ih_ver = DDI_INTR_VERSION; 1203 1204 mutex_exit(&ib_p->ib_ino_lst_mutex); 1205 return (DDI_SUCCESS); 1206 } 1207 } 1208 1209 mutex_exit(&ib_p->ib_ino_lst_mutex); 1210 return (DDI_FAILURE); 1211 } 1212 1213 void 1214 px_ib_log_new_cpu(px_ib_t *ib_p, cpuid_t old_cpu_id, cpuid_t new_cpu_id, 1215 uint32_t ino) 1216 { 1217 px_ino_t *ino_p; 1218 px_ino_pil_t *ipil_p; 1219 px_ih_t *ih_p; 1220 int i; 1221 1222 mutex_enter(&ib_p->ib_ino_lst_mutex); 1223 1224 /* Log in OS data structures the new CPU. */ 1225 if (ino_p = px_ib_locate_ino(ib_p, ino)) { 1226 1227 /* Log in OS data structures the new CPU. */ 1228 ino_p->ino_cpuid = new_cpu_id; 1229 1230 for (ipil_p = ino_p->ino_ipil_p; ipil_p; 1231 ipil_p = ipil_p->ipil_next_p) { 1232 for (i = 0, ih_p = ipil_p->ipil_ih_head; 1233 (i < ipil_p->ipil_ih_size); 1234 i++, ih_p = ih_p->ih_next) { 1235 /* 1236 * Account for any residual time 1237 * to be logged for old cpu. 1238 */ 1239 px_ib_cpu_ticks_to_ih_nsec(ib_p, 1240 ih_p, old_cpu_id); 1241 } 1242 } 1243 } 1244 1245 mutex_exit(&ib_p->ib_ino_lst_mutex); 1246 } 1247