1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/sysmacros.h> 29 #include <sys/stack.h> 30 #include <sys/cpuvar.h> 31 #include <sys/ivintr.h> 32 #include <sys/intreg.h> 33 #include <sys/membar.h> 34 #include <sys/kmem.h> 35 #include <sys/intr.h> 36 #include <sys/sunndi.h> 37 #include <sys/cmn_err.h> 38 #include <sys/privregs.h> 39 #include <sys/systm.h> 40 #include <sys/archsystm.h> 41 #include <sys/machsystm.h> 42 #include <sys/x_call.h> 43 #include <vm/seg_kp.h> 44 #include <sys/debug.h> 45 #include <sys/cyclic.h> 46 47 #include <sys/cpu_sgnblk_defs.h> 48 49 /* Global locks which protect the interrupt distribution lists */ 50 static kmutex_t intr_dist_lock; 51 static kmutex_t intr_dist_cpu_lock; 52 53 /* Head of the interrupt distribution lists */ 54 static struct intr_dist *intr_dist_head = NULL; 55 static struct intr_dist *intr_dist_whead = NULL; 56 57 uint64_t siron_inum; 58 uint64_t poke_cpu_inum; 59 uint_t poke_cpu_intr(caddr_t arg1, caddr_t arg2); 60 61 /* 62 * Note:- 63 * siron_pending was originally created to prevent a resource over consumption 64 * bug in setsoftint(exhaustion of interrupt pool free list). 65 * It's original intention is obsolete with the use of iv_pending in 66 * setsoftint. However, siron_pending stayed around, acting as a second 67 * gatekeeper preventing soft interrupts from being queued. In this capacity, 68 * it can lead to hangs on MP systems, where due to global visibility issues 69 * it can end up set while iv_pending is reset, preventing soft interrupts from 70 * ever being processed. In addition to its gatekeeper role, init_intr also 71 * uses it to flag the situation where siron() was called before siron_inum has 72 * been defined. 73 * 74 * siron() does not need an extra gatekeeper; any cpu that wishes should be 75 * allowed to queue a soft interrupt. It is softint()'s job to ensure 76 * correct handling of the queues. Therefore, siron_pending has been 77 * stripped of its gatekeeper task, retaining only its intr_init job, where 78 * it indicates that there is a pending need to call siron(). 79 */ 80 int siron_pending; 81 82 int intr_policy = INTR_WEIGHTED_DIST; /* interrupt distribution policy */ 83 int intr_dist_debug = 0; 84 int32_t intr_dist_weight_max = 1; 85 int32_t intr_dist_weight_maxmax = 1000; 86 int intr_dist_weight_maxfactor = 2; 87 #define INTR_DEBUG(args) if (intr_dist_debug) cmn_err args 88 89 /* 90 * intr_init() - Interrupt initialization 91 * Initialize the system's interrupt vector table. 92 */ 93 void 94 intr_init(cpu_t *cp) 95 { 96 extern uint_t softlevel1(); 97 98 init_ivintr(); 99 REGISTER_BBUS_INTR(); 100 101 siron_inum = add_softintr(PIL_1, softlevel1, 0, SOFTINT_ST); 102 poke_cpu_inum = add_softintr(PIL_13, poke_cpu_intr, 0, SOFTINT_MT); 103 cp->cpu_m.poke_cpu_outstanding = B_FALSE; 104 105 mutex_init(&intr_dist_lock, NULL, MUTEX_DEFAULT, NULL); 106 mutex_init(&intr_dist_cpu_lock, NULL, MUTEX_DEFAULT, NULL); 107 108 /* 109 * A soft interrupt may have been requested prior to the initialization 110 * of soft interrupts. Soft interrupts can't be dispatched until after 111 * init_intr(), so we have to wait until now before we can dispatch the 112 * pending soft interrupt (if any). 113 */ 114 if (siron_pending) { 115 siron_pending = 0; 116 siron(); 117 } 118 } 119 120 /* 121 * poke_cpu_intr - fall through when poke_cpu calls 122 */ 123 /* ARGSUSED */ 124 uint_t 125 poke_cpu_intr(caddr_t arg1, caddr_t arg2) 126 { 127 CPU->cpu_m.poke_cpu_outstanding = B_FALSE; 128 membar_stld_stst(); 129 return (1); 130 } 131 132 /* 133 * siron - primitive for sun/os/softint.c 134 */ 135 void 136 siron(void) 137 { 138 if (siron_inum != 0) 139 setsoftint(siron_inum); 140 else 141 siron_pending = 1; 142 } 143 144 /* 145 * no_ivintr() 146 * called by setvecint_tl1() through sys_trap() 147 * vector interrupt received but not valid or not 148 * registered in intr_vec_table 149 * considered as a spurious mondo interrupt 150 */ 151 /* ARGSUSED */ 152 void 153 no_ivintr(struct regs *rp, int inum, int pil) 154 { 155 cmn_err(CE_WARN, "invalid vector intr: number 0x%x, pil 0x%x", 156 inum, pil); 157 158 #ifdef DEBUG_VEC_INTR 159 prom_enter_mon(); 160 #endif /* DEBUG_VEC_INTR */ 161 } 162 163 void 164 intr_dequeue_req(uint_t pil, uint64_t inum) 165 { 166 intr_vec_t *iv, *next, *prev; 167 struct machcpu *mcpu; 168 uint32_t clr; 169 processorid_t cpu_id; 170 extern uint_t getpstate(void); 171 172 ASSERT((getpstate() & PSTATE_IE) == 0); 173 174 mcpu = &CPU->cpu_m; 175 cpu_id = CPU->cpu_id; 176 177 iv = (intr_vec_t *)inum; 178 prev = NULL; 179 next = mcpu->intr_head[pil]; 180 181 /* Find a matching entry in the list */ 182 while (next != NULL) { 183 if (next == iv) 184 break; 185 prev = next; 186 next = IV_GET_PIL_NEXT(next, cpu_id); 187 } 188 189 if (next != NULL) { 190 intr_vec_t *next_iv = IV_GET_PIL_NEXT(next, cpu_id); 191 192 /* Remove entry from list */ 193 if (prev != NULL) 194 IV_SET_PIL_NEXT(prev, cpu_id, next_iv); /* non-head */ 195 else 196 mcpu->intr_head[pil] = next_iv; /* head */ 197 198 if (next_iv == NULL) 199 mcpu->intr_tail[pil] = prev; /* tail */ 200 } 201 202 /* Clear pending interrupts at this level if the list is empty */ 203 if (mcpu->intr_head[pil] == NULL) { 204 clr = 1 << pil; 205 if (pil == PIL_14) 206 clr |= (TICK_INT_MASK | STICK_INT_MASK); 207 wr_clr_softint(clr); 208 } 209 } 210 211 212 /* 213 * Send a directed interrupt of specified interrupt number id to a cpu. 214 */ 215 void 216 send_dirint( 217 int cpuix, /* cpu to be interrupted */ 218 int intr_id) /* interrupt number id */ 219 { 220 xt_one(cpuix, setsoftint_tl1, intr_id, 0); 221 } 222 223 /* 224 * Take the specified CPU out of participation in interrupts. 225 * Called by p_online(2) when a processor is being taken off-line. 226 * This allows interrupt threads being handled on the processor to 227 * complete before the processor is idled. 228 */ 229 int 230 cpu_disable_intr(struct cpu *cp) 231 { 232 ASSERT(MUTEX_HELD(&cpu_lock)); 233 234 /* 235 * Turn off the CPU_ENABLE flag before calling the redistribution 236 * function, since it checks for this in the cpu flags. 237 */ 238 cp->cpu_flags &= ~CPU_ENABLE; 239 240 intr_redist_all_cpus(); 241 242 return (0); 243 } 244 245 /* 246 * Allow the specified CPU to participate in interrupts. 247 * Called by p_online(2) if a processor could not be taken off-line 248 * because of bound threads, in order to resume processing interrupts. 249 * Also called after starting a processor. 250 */ 251 void 252 cpu_enable_intr(struct cpu *cp) 253 { 254 ASSERT(MUTEX_HELD(&cpu_lock)); 255 256 cp->cpu_flags |= CPU_ENABLE; 257 258 intr_redist_all_cpus(); 259 } 260 261 /* 262 * Add function to callback list for intr_redist_all_cpus. We keep two lists, 263 * one for weighted callbacks and one for normal callbacks. Weighted callbacks 264 * are issued to redirect interrupts of a specified weight, from heavy to 265 * light. This allows all the interrupts of a given weight to be redistributed 266 * for all weighted nexus drivers prior to those of less weight. 267 */ 268 static void 269 intr_dist_add_list(struct intr_dist **phead, void (*func)(void *), void *arg) 270 { 271 struct intr_dist *new = kmem_alloc(sizeof (*new), KM_SLEEP); 272 struct intr_dist *iptr; 273 struct intr_dist **pptr; 274 275 ASSERT(func); 276 new->func = func; 277 new->arg = arg; 278 new->next = NULL; 279 280 /* Add to tail so that redistribution occurs in original order. */ 281 mutex_enter(&intr_dist_lock); 282 for (iptr = *phead, pptr = phead; iptr != NULL; 283 pptr = &iptr->next, iptr = iptr->next) { 284 /* check for problems as we locate the tail */ 285 if ((iptr->func == func) && (iptr->arg == arg)) { 286 cmn_err(CE_PANIC, "intr_dist_add_list(): duplicate"); 287 /*NOTREACHED*/ 288 } 289 } 290 *pptr = new; 291 292 mutex_exit(&intr_dist_lock); 293 } 294 295 void 296 intr_dist_add(void (*func)(void *), void *arg) 297 { 298 intr_dist_add_list(&intr_dist_head, (void (*)(void *))func, arg); 299 } 300 301 void 302 intr_dist_add_weighted(void (*func)(void *, int32_t, int32_t), void *arg) 303 { 304 intr_dist_add_list(&intr_dist_whead, (void (*)(void *))func, arg); 305 } 306 307 /* 308 * Search for the interrupt distribution structure with the specified 309 * mondo vec reg in the interrupt distribution list. If a match is found, 310 * then delete the entry from the list. The caller is responsible for 311 * modifying the mondo vector registers. 312 */ 313 static void 314 intr_dist_rem_list(struct intr_dist **headp, void (*func)(void *), void *arg) 315 { 316 struct intr_dist *iptr; 317 struct intr_dist **vect; 318 319 mutex_enter(&intr_dist_lock); 320 for (iptr = *headp, vect = headp; 321 iptr != NULL; vect = &iptr->next, iptr = iptr->next) { 322 if ((iptr->func == func) && (iptr->arg == arg)) { 323 *vect = iptr->next; 324 kmem_free(iptr, sizeof (struct intr_dist)); 325 mutex_exit(&intr_dist_lock); 326 return; 327 } 328 } 329 330 if (!panicstr) 331 cmn_err(CE_PANIC, "intr_dist_rem_list: not found"); 332 mutex_exit(&intr_dist_lock); 333 } 334 335 void 336 intr_dist_rem(void (*func)(void *), void *arg) 337 { 338 intr_dist_rem_list(&intr_dist_head, (void (*)(void *))func, arg); 339 } 340 341 void 342 intr_dist_rem_weighted(void (*func)(void *, int32_t, int32_t), void *arg) 343 { 344 intr_dist_rem_list(&intr_dist_whead, (void (*)(void *))func, arg); 345 } 346 347 /* 348 * Initiate interrupt redistribution. Redistribution improves the isolation 349 * associated with interrupt weights by ordering operations from heavy weight 350 * to light weight. When a CPUs orientation changes relative to interrupts, 351 * there is *always* a redistribution to accommodate this change (call to 352 * intr_redist_all_cpus()). As devices (not CPUs) attach/detach it is possible 353 * that a redistribution could improve the quality of an initialization. For 354 * example, if you are not using a NIC it may not be attached with s10 (devfs). 355 * If you then configure the NIC (ifconfig), this may cause the NIC to attach 356 * and plumb interrupts. The CPU assignment for the NIC's interrupts is 357 * occurring late, so optimal "isolation" relative to weight is not occurring. 358 * The same applies to detach, although in this case doing the redistribution 359 * might improve "spread" for medium weight devices since the "isolation" of 360 * a higher weight device may no longer be present. 361 * 362 * NB: We should provide a utility to trigger redistribution (ala "intradm -r"). 363 * 364 * NB: There is risk associated with automatically triggering execution of the 365 * redistribution code at arbitrary times. The risk comes from the fact that 366 * there is a lot of low-level hardware interaction associated with a 367 * redistribution. At some point we may want this code to perform automatic 368 * redistribution (redistribution thread; trigger timeout when add/remove 369 * weight delta is large enough, and call cv_signal from timeout - causing 370 * thead to call i_ddi_intr_redist_all_cpus()) but this is considered too 371 * risky at this time. 372 */ 373 void 374 i_ddi_intr_redist_all_cpus() 375 { 376 mutex_enter(&cpu_lock); 377 INTR_DEBUG((CE_CONT, "intr_dist: i_ddi_intr_redist_all_cpus\n")); 378 intr_redist_all_cpus(); 379 mutex_exit(&cpu_lock); 380 } 381 382 /* 383 * Redistribute all interrupts 384 * 385 * This function redistributes all interrupting devices, running the 386 * parent callback functions for each node. 387 */ 388 void 389 intr_redist_all_cpus(void) 390 { 391 struct cpu *cp; 392 struct intr_dist *iptr; 393 int32_t weight, max_weight; 394 395 ASSERT(MUTEX_HELD(&cpu_lock)); 396 mutex_enter(&intr_dist_lock); 397 398 /* 399 * zero cpu_intr_weight on all cpus - it is safe to traverse 400 * cpu_list since we hold cpu_lock. 401 */ 402 cp = cpu_list; 403 do { 404 cp->cpu_intr_weight = 0; 405 } while ((cp = cp->cpu_next) != cpu_list); 406 407 /* 408 * Assume that this redistribution may encounter a device weight 409 * via driver.conf tuning of "ddi-intr-weight" that is at most 410 * intr_dist_weight_maxfactor times larger. 411 */ 412 max_weight = intr_dist_weight_max * intr_dist_weight_maxfactor; 413 if (max_weight > intr_dist_weight_maxmax) 414 max_weight = intr_dist_weight_maxmax; 415 intr_dist_weight_max = 1; 416 417 INTR_DEBUG((CE_CONT, "intr_dist: " 418 "intr_redist_all_cpus: %d-0\n", max_weight)); 419 420 /* 421 * Redistribute weighted, from heavy to light. The callback that 422 * specifies a weight equal to weight_max should redirect all 423 * interrupts of weight weight_max or greater [weight_max, inf.). 424 * Interrupts of lesser weight should be processed on the call with 425 * the matching weight. This allows all the heaver weight interrupts 426 * on all weighted busses (multiple pci busses) to be redirected prior 427 * to any lesser weight interrupts. 428 */ 429 for (weight = max_weight; weight >= 0; weight--) 430 for (iptr = intr_dist_whead; iptr != NULL; iptr = iptr->next) 431 ((void (*)(void *, int32_t, int32_t))iptr->func) 432 (iptr->arg, max_weight, weight); 433 434 /* redistribute normal (non-weighted) interrupts */ 435 for (iptr = intr_dist_head; iptr != NULL; iptr = iptr->next) 436 ((void (*)(void *))iptr->func)(iptr->arg); 437 mutex_exit(&intr_dist_lock); 438 } 439 440 void 441 intr_redist_all_cpus_shutdown(void) 442 { 443 intr_policy = INTR_CURRENT_CPU; 444 intr_redist_all_cpus(); 445 } 446 447 /* 448 * Determine what CPU to target, based on interrupt policy. 449 * 450 * INTR_FLAT_DIST: hold a current CPU pointer in a static variable and 451 * advance through interrupt enabled cpus (round-robin). 452 * 453 * INTR_WEIGHTED_DIST: search for an enabled CPU with the lowest 454 * cpu_intr_weight, round robin when all equal. 455 * 456 * Weighted interrupt distribution provides two things: "spread" of weight 457 * (associated with algorithm itself) and "isolation" (associated with a 458 * particular device weight). A redistribution is what provides optimal 459 * "isolation" of heavy weight interrupts, optimal "spread" of weight 460 * (relative to what came before) is always occurring. 461 * 462 * An interrupt weight is a subjective number that represents the 463 * percentage of a CPU required to service a device's interrupts: the 464 * default weight is 0% (however the algorithm still maintains 465 * round-robin), a network interface controller (NIC) may have a large 466 * weight (35%). Interrupt weight only has meaning relative to the 467 * interrupt weight of other devices: a CPU can be weighted more than 468 * 100%, and a single device might consume more than 100% of a CPU. 469 * 470 * A coarse interrupt weight can be defined by the parent nexus driver 471 * based on bus specific information, like pci class codes. A nexus 472 * driver that supports device interrupt weighting for its children 473 * should call intr_dist_cpuid_add/rem_device_weight(), which adds 474 * and removes the weight of a device from the CPU that an interrupt 475 * is directed at. The quality of initialization improves when the 476 * device interrupt weights more accuracy reflect actual run-time weights, 477 * and as the assignments are ordered from is heavy to light. 478 * 479 * The implementation also supports interrupt weight being specified in 480 * driver.conf files via the property "ddi-intr-weight", which takes 481 * precedence over the nexus supplied weight. This support is added to 482 * permit possible tweaking in the product in response to customer 483 * problems. This is not a formal or committed interface. 484 * 485 * While a weighted approach chooses the CPU providing the best spread 486 * given past weights, less than optimal isolation can result in cases 487 * where heavy weight devices show up last. The nexus driver's interrupt 488 * redistribution logic should use intr_dist_add/rem_weighted so that 489 * interrupts can be redistributed heavy first for optimal isolation. 490 */ 491 uint32_t 492 intr_dist_cpuid(void) 493 { 494 static struct cpu *curr_cpu; 495 struct cpu *start_cpu; 496 struct cpu *new_cpu; 497 struct cpu *cp; 498 int cpuid = -1; 499 500 /* Establish exclusion for curr_cpu and cpu_intr_weight manipulation */ 501 mutex_enter(&intr_dist_cpu_lock); 502 503 switch (intr_policy) { 504 case INTR_CURRENT_CPU: 505 cpuid = CPU->cpu_id; 506 break; 507 508 case INTR_BOOT_CPU: 509 panic("INTR_BOOT_CPU no longer supported."); 510 /*NOTREACHED*/ 511 512 case INTR_FLAT_DIST: 513 case INTR_WEIGHTED_DIST: 514 default: 515 /* 516 * Ensure that curr_cpu is valid - cpu_next will be NULL if 517 * the cpu has been deleted (cpu structs are never freed). 518 */ 519 if (curr_cpu == NULL || curr_cpu->cpu_next == NULL) 520 curr_cpu = CPU; 521 522 /* 523 * Advance to online CPU after curr_cpu (round-robin). For 524 * INTR_WEIGHTED_DIST we choose the cpu with the lightest 525 * weight. For a nexus that does not support weight the 526 * default weight of zero is used. We degrade to round-robin 527 * behavior among equal weightes. The default weight is zero 528 * and round-robin behavior continues. 529 * 530 * Disable preemption while traversing cpu_next_onln to 531 * ensure the list does not change. This works because 532 * modifiers of this list and other lists in a struct cpu 533 * call pause_cpus() before making changes. 534 */ 535 kpreempt_disable(); 536 cp = start_cpu = curr_cpu->cpu_next_onln; 537 new_cpu = NULL; 538 do { 539 /* Skip CPUs with interrupts disabled */ 540 if ((cp->cpu_flags & CPU_ENABLE) == 0) 541 continue; 542 543 if (intr_policy == INTR_FLAT_DIST) { 544 /* select CPU */ 545 new_cpu = cp; 546 break; 547 } else if ((new_cpu == NULL) || 548 (cp->cpu_intr_weight < new_cpu->cpu_intr_weight)) { 549 /* Choose if lighter weight */ 550 new_cpu = cp; 551 } 552 } while ((cp = cp->cpu_next_onln) != start_cpu); 553 ASSERT(new_cpu); 554 cpuid = new_cpu->cpu_id; 555 556 INTR_DEBUG((CE_CONT, "intr_dist: cpu %2d weight %3d: " 557 "targeted\n", cpuid, new_cpu->cpu_intr_weight)); 558 559 /* update static pointer for next round-robin */ 560 curr_cpu = new_cpu; 561 kpreempt_enable(); 562 break; 563 } 564 mutex_exit(&intr_dist_cpu_lock); 565 return (cpuid); 566 } 567 568 /* 569 * Add or remove the the weight of a device from a CPUs interrupt weight. 570 * 571 * We expect nexus drivers to call intr_dist_cpuid_add/rem_device_weight for 572 * their children to improve the overall quality of interrupt initialization. 573 * 574 * If a nexues shares the CPU returned by a single intr_dist_cpuid() call 575 * among multiple devices (sharing ino) then the nexus should call 576 * intr_dist_cpuid_add/rem_device_weight for each device separately. Devices 577 * that share must specify the same cpuid. 578 * 579 * If a nexus driver is unable to determine the cpu at remove_intr time 580 * for some of its interrupts, then it should not call add_device_weight - 581 * intr_dist_cpuid will still provide round-robin. 582 * 583 * An established device weight (from dev_info node) takes precedence over 584 * the weight passed in. If a device weight is not already established 585 * then the passed in nexus weight is established. 586 */ 587 void 588 intr_dist_cpuid_add_device_weight(uint32_t cpuid, 589 dev_info_t *dip, int32_t nweight) 590 { 591 int32_t eweight; 592 593 /* 594 * For non-weighted policy everything has weight of zero (and we get 595 * round-robin distribution from intr_dist_cpuid). 596 * NB: intr_policy is limited to this file. A weighted nexus driver is 597 * calls this rouitne even if intr_policy has been patched to 598 * INTR_FLAG_DIST. 599 */ 600 ASSERT(dip); 601 if (intr_policy != INTR_WEIGHTED_DIST) 602 return; 603 604 eweight = i_ddi_get_intr_weight(dip); 605 INTR_DEBUG((CE_CONT, "intr_dist: cpu %2d weight %3d: +%2d/%2d for " 606 "%s#%d/%s#%d\n", cpuid, cpu[cpuid]->cpu_intr_weight, 607 nweight, eweight, ddi_driver_name(ddi_get_parent(dip)), 608 ddi_get_instance(ddi_get_parent(dip)), 609 ddi_driver_name(dip), ddi_get_instance(dip))); 610 611 /* if no establish weight, establish nexus weight */ 612 if (eweight < 0) { 613 if (nweight > 0) 614 (void) i_ddi_set_intr_weight(dip, nweight); 615 else 616 nweight = 0; 617 } else 618 nweight = eweight; /* use established weight */ 619 620 /* Establish exclusion for cpu_intr_weight manipulation */ 621 mutex_enter(&intr_dist_cpu_lock); 622 cpu[cpuid]->cpu_intr_weight += nweight; 623 624 /* update intr_dist_weight_max */ 625 if (nweight > intr_dist_weight_max) 626 intr_dist_weight_max = nweight; 627 mutex_exit(&intr_dist_cpu_lock); 628 } 629 630 void 631 intr_dist_cpuid_rem_device_weight(uint32_t cpuid, dev_info_t *dip) 632 { 633 struct cpu *cp; 634 int32_t weight; 635 636 ASSERT(dip); 637 if (intr_policy != INTR_WEIGHTED_DIST) 638 return; 639 640 /* remove weight of device from cpu */ 641 weight = i_ddi_get_intr_weight(dip); 642 if (weight < 0) 643 weight = 0; 644 INTR_DEBUG((CE_CONT, "intr_dist: cpu %2d weight %3d: -%2d for " 645 "%s#%d/%s#%d\n", cpuid, cpu[cpuid]->cpu_intr_weight, weight, 646 ddi_driver_name(ddi_get_parent(dip)), 647 ddi_get_instance(ddi_get_parent(dip)), 648 ddi_driver_name(dip), ddi_get_instance(dip))); 649 650 /* Establish exclusion for cpu_intr_weight manipulation */ 651 mutex_enter(&intr_dist_cpu_lock); 652 cp = cpu[cpuid]; 653 cp->cpu_intr_weight -= weight; 654 if (cp->cpu_intr_weight < 0) 655 cp->cpu_intr_weight = 0; /* sanity */ 656 mutex_exit(&intr_dist_cpu_lock); 657 } 658