1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #define PSMI_1_5 29 #include <sys/smp_impldefs.h> 30 #include <sys/psm.h> 31 #include <sys/psm_modctl.h> 32 #include <sys/pit.h> 33 #include <sys/cmn_err.h> 34 #include <sys/strlog.h> 35 #include <sys/clock.h> 36 #include <sys/debug.h> 37 #include <sys/rtc.h> 38 #include <sys/x86_archext.h> 39 #include <sys/cpupart.h> 40 #include <sys/cpuvar.h> 41 #include <sys/cmt.h> 42 #include <sys/cpu.h> 43 #include <sys/disp.h> 44 #include <sys/archsystm.h> 45 #include <sys/machsystm.h> 46 #include <sys/sysmacros.h> 47 #include <sys/memlist.h> 48 #include <sys/param.h> 49 #include <sys/promif.h> 50 #if defined(__xpv) 51 #include <sys/hypervisor.h> 52 #endif 53 #include <sys/mach_intr.h> 54 #include <vm/hat_i86.h> 55 #include <sys/kdi_machimpl.h> 56 57 #define OFFSETOF(s, m) (size_t)(&(((s *)0)->m)) 58 59 /* 60 * Local function prototypes 61 */ 62 static int mp_disable_intr(processorid_t cpun); 63 static void mp_enable_intr(processorid_t cpun); 64 static void mach_init(); 65 static void mach_picinit(); 66 static int machhztomhz(uint64_t cpu_freq_hz); 67 static uint64_t mach_getcpufreq(void); 68 static void mach_fixcpufreq(void); 69 static int mach_clkinit(int, int *); 70 static void mach_smpinit(void); 71 static int mach_softlvl_to_vect(int ipl); 72 static void mach_get_platform(int owner); 73 static void mach_construct_info(); 74 static int mach_translate_irq(dev_info_t *dip, int irqno); 75 static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 76 psm_intr_op_t, int *); 77 static void mach_notify_error(int level, char *errmsg); 78 static hrtime_t dummy_hrtime(void); 79 static void dummy_scalehrtime(hrtime_t *); 80 static void cpu_idle(void); 81 static void cpu_wakeup(cpu_t *, int); 82 #ifndef __xpv 83 static void cpu_idle_mwait(void); 84 static void cpu_wakeup_mwait(cpu_t *, int); 85 #endif 86 /* 87 * External reference functions 88 */ 89 extern void return_instr(); 90 extern uint64_t freq_tsc(uint32_t *); 91 #if defined(__i386) 92 extern uint64_t freq_notsc(uint32_t *); 93 #endif 94 extern void pc_gethrestime(timestruc_t *); 95 extern int cpuid_get_coreid(cpu_t *); 96 extern int cpuid_get_chipid(cpu_t *); 97 98 /* 99 * PSM functions initialization 100 */ 101 void (*psm_shutdownf)(int, int) = (void (*)(int, int))return_instr; 102 void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr; 103 void (*psm_notifyf)(int) = (void (*)(int))return_instr; 104 void (*psm_set_idle_cpuf)(int) = (void (*)(int))return_instr; 105 void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr; 106 void (*psminitf)() = mach_init; 107 void (*picinitf)() = return_instr; 108 int (*clkinitf)(int, int *) = (int (*)(int, int *))return_instr; 109 int (*ap_mlsetup)() = (int (*)(void))return_instr; 110 void (*send_dirintf)() = return_instr; 111 void (*setspl)(int) = (void (*)(int))return_instr; 112 int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 113 int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 114 void (*kdisetsoftint)(int, struct av_softinfo *)= 115 (void (*)(int, struct av_softinfo *))return_instr; 116 void (*setsoftint)(int, struct av_softinfo *)= 117 (void (*)(int, struct av_softinfo *))return_instr; 118 int (*slvltovect)(int) = (int (*)(int))return_instr; 119 int (*setlvl)(int, int *) = (int (*)(int, int *))return_instr; 120 void (*setlvlx)(int, int) = (void (*)(int, int))return_instr; 121 int (*psm_disable_intr)(int) = mp_disable_intr; 122 void (*psm_enable_intr)(int) = mp_enable_intr; 123 hrtime_t (*gethrtimef)(void) = dummy_hrtime; 124 hrtime_t (*gethrtimeunscaledf)(void) = dummy_hrtime; 125 void (*scalehrtimef)(hrtime_t *) = dummy_scalehrtime; 126 int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq; 127 void (*gethrestimef)(timestruc_t *) = pc_gethrestime; 128 void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL; 129 int (*psm_get_clockirq)(int) = NULL; 130 int (*psm_get_ipivect)(int, int) = NULL; 131 132 int (*psm_clkinit)(int) = NULL; 133 void (*psm_timer_reprogram)(hrtime_t) = NULL; 134 void (*psm_timer_enable)(void) = NULL; 135 void (*psm_timer_disable)(void) = NULL; 136 void (*psm_post_cyclic_setup)(void *arg) = NULL; 137 int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t, 138 int *) = mach_intr_ops; 139 140 void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr; 141 void (*hrtime_tick)(void) = return_instr; 142 143 /* 144 * True if the generic TSC code is our source of hrtime, rather than whatever 145 * the PSM can provide. 146 */ 147 #ifdef __xpv 148 int tsc_gethrtime_enable = 0; 149 #else 150 int tsc_gethrtime_enable = 1; 151 #endif 152 int tsc_gethrtime_initted = 0; 153 154 /* 155 * True if the hrtime implementation is "hires"; namely, better than microdata. 156 */ 157 int gethrtime_hires = 0; 158 159 /* 160 * Local Static Data 161 */ 162 static struct psm_ops mach_ops; 163 static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL}; 164 static ushort_t mach_ver[4] = {0, 0, 0, 0}; 165 166 /* 167 * If non-zero, idle cpus will become "halted" when there's 168 * no work to do. 169 */ 170 int idle_cpu_use_hlt = 1; 171 172 #ifndef __xpv 173 /* 174 * If non-zero, idle cpus will use mwait if available to halt instead of hlt. 175 */ 176 int idle_cpu_prefer_mwait = 1; 177 #endif 178 179 /*ARGSUSED*/ 180 int 181 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw) 182 { 183 switch (hw) { 184 case PGHW_IPIPE: 185 if (x86_feature & (X86_HTT)) { 186 /* 187 * Hyper-threading is SMT 188 */ 189 return (1); 190 } else { 191 return (0); 192 } 193 case PGHW_CHIP: 194 if (x86_feature & (X86_CMP|X86_HTT)) 195 return (1); 196 else 197 return (0); 198 case PGHW_CACHE: 199 if (cpuid_get_ncpu_sharing_last_cache(cp) > 1) 200 return (1); 201 else 202 return (0); 203 default: 204 return (0); 205 } 206 } 207 208 /* 209 * Compare two CPUs and see if they have a pghw_type_t sharing relationship 210 * If pghw_type_t is an unsupported hardware type, then return -1 211 */ 212 int 213 pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw) 214 { 215 id_t pgp_a, pgp_b; 216 217 pgp_a = pg_plat_hw_instance_id(cpu_a, hw); 218 pgp_b = pg_plat_hw_instance_id(cpu_b, hw); 219 220 if (pgp_a == -1 || pgp_b == -1) 221 return (-1); 222 223 return (pgp_a == pgp_b); 224 } 225 226 /* 227 * Return a physical instance identifier for known hardware sharing 228 * relationships 229 */ 230 id_t 231 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw) 232 { 233 switch (hw) { 234 case PGHW_IPIPE: 235 return (cpuid_get_coreid(cpu)); 236 case PGHW_CACHE: 237 return (cpuid_get_last_lvl_cacheid(cpu)); 238 case PGHW_CHIP: 239 return (cpuid_get_chipid(cpu)); 240 default: 241 return (-1); 242 } 243 } 244 245 int 246 pg_plat_hw_level(pghw_type_t hw) 247 { 248 int i; 249 static pghw_type_t hw_hier[] = { 250 PGHW_IPIPE, 251 PGHW_CACHE, 252 PGHW_CHIP, 253 PGHW_NUM_COMPONENTS 254 }; 255 256 for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) { 257 if (hw_hier[i] == hw) 258 return (i); 259 } 260 return (-1); 261 } 262 263 /* 264 * Return 1 if CMT load balancing policies should be 265 * implemented across instances of the specified hardware 266 * sharing relationship. 267 */ 268 int 269 pg_plat_cmt_load_bal_hw(pghw_type_t hw) 270 { 271 if (hw == PGHW_IPIPE || 272 hw == PGHW_FPU || 273 hw == PGHW_CHIP || 274 hw == PGHW_CACHE) 275 return (1); 276 else 277 return (0); 278 } 279 280 281 /* 282 * Return 1 if thread affinity polices should be implemented 283 * for instances of the specifed hardware sharing relationship. 284 */ 285 int 286 pg_plat_cmt_affinity_hw(pghw_type_t hw) 287 { 288 if (hw == PGHW_CACHE) 289 return (1); 290 else 291 return (0); 292 } 293 294 id_t 295 pg_plat_get_core_id(cpu_t *cpu) 296 { 297 return ((id_t)cpuid_get_coreid(cpu)); 298 } 299 300 void 301 cmp_set_nosteal_interval(void) 302 { 303 /* Set the nosteal interval (used by disp_getbest()) to 100us */ 304 nosteal_nsec = 100000UL; 305 } 306 307 /* 308 * Routine to ensure initial callers to hrtime gets 0 as return 309 */ 310 static hrtime_t 311 dummy_hrtime(void) 312 { 313 return (0); 314 } 315 316 /* ARGSUSED */ 317 static void 318 dummy_scalehrtime(hrtime_t *ticks) 319 {} 320 321 /* 322 * Idle the present CPU until awoken via an interrupt 323 */ 324 static void 325 cpu_idle(void) 326 { 327 cpu_t *cpup = CPU; 328 processorid_t cpun = cpup->cpu_id; 329 cpupart_t *cp = cpup->cpu_part; 330 int hset_update = 1; 331 332 /* 333 * If this CPU is online, and there's multiple CPUs 334 * in the system, then we should notate our halting 335 * by adding ourselves to the partition's halted CPU 336 * bitmap. This allows other CPUs to find/awaken us when 337 * work becomes available. 338 */ 339 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 340 hset_update = 0; 341 342 /* 343 * Add ourselves to the partition's halted CPUs bitmask 344 * and set our HALTED flag, if necessary. 345 * 346 * When a thread becomes runnable, it is placed on the queue 347 * and then the halted cpuset is checked to determine who 348 * (if anyone) should be awoken. We therefore need to first 349 * add ourselves to the halted cpuset, and and then check if there 350 * is any work available. 351 * 352 * Note that memory barriers after updating the HALTED flag 353 * are not necessary since an atomic operation (updating the bitmap) 354 * immediately follows. On x86 the atomic operation acts as a 355 * memory barrier for the update of cpu_disp_flags. 356 */ 357 if (hset_update) { 358 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 359 CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun); 360 } 361 362 /* 363 * Check to make sure there's really nothing to do. 364 * Work destined for this CPU may become available after 365 * this check. We'll be notified through the clearing of our 366 * bit in the halted CPU bitmask, and a poke. 367 */ 368 if (disp_anywork()) { 369 if (hset_update) { 370 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 371 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 372 } 373 return; 374 } 375 376 /* 377 * We're on our way to being halted. 378 * 379 * Disable interrupts now, so that we'll awaken immediately 380 * after halting if someone tries to poke us between now and 381 * the time we actually halt. 382 * 383 * We check for the presence of our bit after disabling interrupts. 384 * If it's cleared, we'll return. If the bit is cleared after 385 * we check then the poke will pop us out of the halted state. 386 * 387 * This means that the ordering of the poke and the clearing 388 * of the bit by cpu_wakeup is important. 389 * cpu_wakeup() must clear, then poke. 390 * cpu_idle() must disable interrupts, then check for the bit. 391 */ 392 cli(); 393 394 if (hset_update && !CPU_IN_SET(cp->cp_mach->mc_haltset, cpun)) { 395 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 396 sti(); 397 return; 398 } 399 400 /* 401 * The check for anything locally runnable is here for performance 402 * and isn't needed for correctness. disp_nrunnable ought to be 403 * in our cache still, so it's inexpensive to check, and if there 404 * is anything runnable we won't have to wait for the poke. 405 */ 406 if (cpup->cpu_disp->disp_nrunnable != 0) { 407 if (hset_update) { 408 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 409 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 410 } 411 sti(); 412 return; 413 } 414 415 mach_cpu_idle(); 416 417 /* 418 * We're no longer halted 419 */ 420 if (hset_update) { 421 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 422 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 423 } 424 } 425 426 427 /* 428 * If "cpu" is halted, then wake it up clearing its halted bit in advance. 429 * Otherwise, see if other CPUs in the cpu partition are halted and need to 430 * be woken up so that they can steal the thread we placed on this CPU. 431 * This function is only used on MP systems. 432 */ 433 static void 434 cpu_wakeup(cpu_t *cpu, int bound) 435 { 436 uint_t cpu_found; 437 int result; 438 cpupart_t *cp; 439 440 cp = cpu->cpu_part; 441 if (CPU_IN_SET(cp->cp_mach->mc_haltset, cpu->cpu_id)) { 442 /* 443 * Clear the halted bit for that CPU since it will be 444 * poked in a moment. 445 */ 446 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpu->cpu_id); 447 /* 448 * We may find the current CPU present in the halted cpuset 449 * if we're in the context of an interrupt that occurred 450 * before we had a chance to clear our bit in cpu_idle(). 451 * Poking ourself is obviously unnecessary, since if 452 * we're here, we're not halted. 453 */ 454 if (cpu != CPU) 455 poke_cpu(cpu->cpu_id); 456 return; 457 } else { 458 /* 459 * This cpu isn't halted, but it's idle or undergoing a 460 * context switch. No need to awaken anyone else. 461 */ 462 if (cpu->cpu_thread == cpu->cpu_idle_thread || 463 cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL) 464 return; 465 } 466 467 /* 468 * No need to wake up other CPUs if the thread we just enqueued 469 * is bound. 470 */ 471 if (bound) 472 return; 473 474 475 /* 476 * See if there's any other halted CPUs. If there are, then 477 * select one, and awaken it. 478 * It's possible that after we find a CPU, somebody else 479 * will awaken it before we get the chance. 480 * In that case, look again. 481 */ 482 do { 483 CPUSET_FIND(cp->cp_mach->mc_haltset, cpu_found); 484 if (cpu_found == CPUSET_NOTINSET) 485 return; 486 487 ASSERT(cpu_found >= 0 && cpu_found < NCPU); 488 CPUSET_ATOMIC_XDEL(cp->cp_mach->mc_haltset, cpu_found, result); 489 } while (result < 0); 490 491 if (cpu_found != CPU->cpu_id) 492 poke_cpu(cpu_found); 493 } 494 495 #ifndef __xpv 496 /* 497 * Idle the present CPU until awoken via touching its monitored line 498 */ 499 static void 500 cpu_idle_mwait(void) 501 { 502 volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait; 503 cpu_t *cpup = CPU; 504 processorid_t cpun = cpup->cpu_id; 505 cpupart_t *cp = cpup->cpu_part; 506 int hset_update = 1; 507 508 /* 509 * Set our mcpu_mwait here, so we can tell if anyone trys to 510 * wake us between now and when we call mwait. No other cpu will 511 * attempt to set our mcpu_mwait until we add ourself to the haltset. 512 */ 513 *mcpu_mwait = MWAIT_HALTED; 514 515 /* 516 * If this CPU is online, and there's multiple CPUs 517 * in the system, then we should notate our halting 518 * by adding ourselves to the partition's halted CPU 519 * bitmap. This allows other CPUs to find/awaken us when 520 * work becomes available. 521 */ 522 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 523 hset_update = 0; 524 525 /* 526 * Add ourselves to the partition's halted CPUs bitmask 527 * and set our HALTED flag, if necessary. 528 * 529 * When a thread becomes runnable, it is placed on the queue 530 * and then the halted cpuset is checked to determine who 531 * (if anyone) should be awoken. We therefore need to first 532 * add ourselves to the halted cpuset, and and then check if there 533 * is any work available. 534 * 535 * Note that memory barriers after updating the HALTED flag 536 * are not necessary since an atomic operation (updating the bitmap) 537 * immediately follows. On x86 the atomic operation acts as a 538 * memory barrier for the update of cpu_disp_flags. 539 */ 540 if (hset_update) { 541 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 542 CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun); 543 } 544 545 /* 546 * Check to make sure there's really nothing to do. 547 * Work destined for this CPU may become available after 548 * this check. We'll be notified through the clearing of our 549 * bit in the halted CPU bitmask, and a write to our mcpu_mwait. 550 * 551 * disp_anywork() checks disp_nrunnable, so we do not have to later. 552 */ 553 if (disp_anywork()) { 554 if (hset_update) { 555 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 556 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 557 } 558 return; 559 } 560 561 /* 562 * We're on our way to being halted. 563 * To avoid a lost wakeup, arm the monitor before checking if another 564 * cpu wrote to mcpu_mwait to wake us up. 565 */ 566 i86_monitor(mcpu_mwait, 0, 0); 567 if (*mcpu_mwait == MWAIT_HALTED) { 568 tlb_going_idle(); 569 i86_mwait(0, 0); 570 tlb_service(); 571 } 572 573 /* 574 * We're no longer halted 575 */ 576 if (hset_update) { 577 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 578 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 579 } 580 } 581 582 /* 583 * If "cpu" is halted in mwait, then wake it up clearing its halted bit in 584 * advance. Otherwise, see if other CPUs in the cpu partition are halted and 585 * need to be woken up so that they can steal the thread we placed on this CPU. 586 * This function is only used on MP systems. 587 */ 588 static void 589 cpu_wakeup_mwait(cpu_t *cp, int bound) 590 { 591 cpupart_t *cpu_part; 592 uint_t cpu_found; 593 int result; 594 595 cpu_part = cp->cpu_part; 596 597 /* 598 * Clear the halted bit for that CPU since it will be woken up 599 * in a moment. 600 */ 601 if (CPU_IN_SET(cpu_part->cp_mach->mc_haltset, cp->cpu_id)) { 602 /* 603 * Clear the halted bit for that CPU since it will be 604 * poked in a moment. 605 */ 606 CPUSET_ATOMIC_DEL(cpu_part->cp_mach->mc_haltset, cp->cpu_id); 607 /* 608 * We may find the current CPU present in the halted cpuset 609 * if we're in the context of an interrupt that occurred 610 * before we had a chance to clear our bit in cpu_idle(). 611 * Waking ourself is obviously unnecessary, since if 612 * we're here, we're not halted. 613 * 614 * monitor/mwait wakeup via writing to our cache line is 615 * harmless and less expensive than always checking if we 616 * are waking ourself which is an uncommon case. 617 */ 618 MWAIT_WAKEUP(cp); /* write to monitored line */ 619 return; 620 } else { 621 /* 622 * This cpu isn't halted, but it's idle or undergoing a 623 * context switch. No need to awaken anyone else. 624 */ 625 if (cp->cpu_thread == cp->cpu_idle_thread || 626 cp->cpu_disp_flags & CPU_DISP_DONTSTEAL) 627 return; 628 } 629 630 /* 631 * No need to wake up other CPUs if the thread we just enqueued 632 * is bound. 633 */ 634 if (bound) 635 return; 636 637 638 /* 639 * See if there's any other halted CPUs. If there are, then 640 * select one, and awaken it. 641 * It's possible that after we find a CPU, somebody else 642 * will awaken it before we get the chance. 643 * In that case, look again. 644 */ 645 do { 646 CPUSET_FIND(cpu_part->cp_mach->mc_haltset, cpu_found); 647 if (cpu_found == CPUSET_NOTINSET) 648 return; 649 650 ASSERT(cpu_found >= 0 && cpu_found < NCPU); 651 CPUSET_ATOMIC_XDEL(cpu_part->cp_mach->mc_haltset, cpu_found, 652 result); 653 } while (result < 0); 654 655 /* 656 * Do not check if cpu_found is ourself as monitor/mwait wakeup is 657 * cheap. 658 */ 659 MWAIT_WAKEUP(cpu[cpu_found]); /* write to monitored line */ 660 } 661 #endif 662 663 void (*cpu_pause_handler)(volatile char *) = NULL; 664 665 static int 666 mp_disable_intr(int cpun) 667 { 668 /* 669 * switch to the offline cpu 670 */ 671 affinity_set(cpun); 672 /* 673 * raise ipl to just below cross call 674 */ 675 splx(XC_MED_PIL-1); 676 /* 677 * set base spl to prevent the next swtch to idle from 678 * lowering back to ipl 0 679 */ 680 CPU->cpu_intr_actv |= (1 << (XC_MED_PIL-1)); 681 set_base_spl(); 682 affinity_clear(); 683 return (DDI_SUCCESS); 684 } 685 686 static void 687 mp_enable_intr(int cpun) 688 { 689 /* 690 * switch to the online cpu 691 */ 692 affinity_set(cpun); 693 /* 694 * clear the interrupt active mask 695 */ 696 CPU->cpu_intr_actv &= ~(1 << (XC_MED_PIL-1)); 697 set_base_spl(); 698 (void) spl0(); 699 affinity_clear(); 700 } 701 702 static void 703 mach_get_platform(int owner) 704 { 705 void **srv_opsp; 706 void **clt_opsp; 707 int i; 708 int total_ops; 709 710 /* fix up psm ops */ 711 srv_opsp = (void **)mach_set[0]; 712 clt_opsp = (void **)mach_set[owner]; 713 if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01) 714 total_ops = sizeof (struct psm_ops_ver01) / 715 sizeof (void (*)(void)); 716 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1) 717 /* no psm_notify_func */ 718 total_ops = OFFSETOF(struct psm_ops, psm_notify_func) / 719 sizeof (void (*)(void)); 720 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2) 721 /* no psm_timer funcs */ 722 total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) / 723 sizeof (void (*)(void)); 724 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3) 725 /* no psm_preshutdown function */ 726 total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) / 727 sizeof (void (*)(void)); 728 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4) 729 /* no psm_preshutdown function */ 730 total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) / 731 sizeof (void (*)(void)); 732 else 733 total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void)); 734 735 /* 736 * Save the version of the PSM module, in case we need to 737 * bahave differently based on version. 738 */ 739 mach_ver[0] = mach_ver[owner]; 740 741 for (i = 0; i < total_ops; i++) 742 if (clt_opsp[i] != NULL) 743 srv_opsp[i] = clt_opsp[i]; 744 } 745 746 static void 747 mach_construct_info() 748 { 749 struct psm_sw *swp; 750 int mach_cnt[PSM_OWN_OVERRIDE+1] = {0}; 751 int conflict_owner = 0; 752 753 if (psmsw->psw_forw == psmsw) 754 panic("No valid PSM modules found"); 755 mutex_enter(&psmsw_lock); 756 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 757 if (!(swp->psw_flag & PSM_MOD_IDENTIFY)) 758 continue; 759 mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops; 760 mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version; 761 mach_cnt[swp->psw_infop->p_owner]++; 762 } 763 mutex_exit(&psmsw_lock); 764 765 mach_get_platform(PSM_OWN_SYS_DEFAULT); 766 767 /* check to see are there any conflicts */ 768 if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1) 769 conflict_owner = PSM_OWN_EXCLUSIVE; 770 if (mach_cnt[PSM_OWN_OVERRIDE] > 1) 771 conflict_owner = PSM_OWN_OVERRIDE; 772 if (conflict_owner) { 773 /* remove all psm modules except uppc */ 774 cmn_err(CE_WARN, 775 "Conflicts detected on the following PSM modules:"); 776 mutex_enter(&psmsw_lock); 777 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 778 if (swp->psw_infop->p_owner == conflict_owner) 779 cmn_err(CE_WARN, "%s ", 780 swp->psw_infop->p_mach_idstring); 781 } 782 mutex_exit(&psmsw_lock); 783 cmn_err(CE_WARN, 784 "Setting the system back to SINGLE processor mode!"); 785 cmn_err(CE_WARN, 786 "Please edit /etc/mach to remove the invalid PSM module."); 787 return; 788 } 789 790 if (mach_set[PSM_OWN_EXCLUSIVE]) 791 mach_get_platform(PSM_OWN_EXCLUSIVE); 792 793 if (mach_set[PSM_OWN_OVERRIDE]) 794 mach_get_platform(PSM_OWN_OVERRIDE); 795 } 796 797 static void 798 mach_init() 799 { 800 struct psm_ops *pops; 801 802 mach_construct_info(); 803 804 pops = mach_set[0]; 805 806 /* register the interrupt and clock initialization rotuines */ 807 picinitf = mach_picinit; 808 clkinitf = mach_clkinit; 809 psm_get_clockirq = pops->psm_get_clockirq; 810 811 /* register the interrupt setup code */ 812 slvltovect = mach_softlvl_to_vect; 813 addspl = pops->psm_addspl; 814 delspl = pops->psm_delspl; 815 816 if (pops->psm_translate_irq) 817 psm_translate_irq = pops->psm_translate_irq; 818 if (pops->psm_intr_ops) 819 psm_intr_ops = pops->psm_intr_ops; 820 821 #if defined(PSMI_1_2) || defined(PSMI_1_3) || defined(PSMI_1_4) 822 /* 823 * Time-of-day functionality now handled in TOD modules. 824 * (Warn about PSM modules that think that we're going to use 825 * their ops vectors.) 826 */ 827 if (pops->psm_tod_get) 828 cmn_err(CE_WARN, "obsolete psm_tod_get op %p", 829 (void *)pops->psm_tod_get); 830 831 if (pops->psm_tod_set) 832 cmn_err(CE_WARN, "obsolete psm_tod_set op %p", 833 (void *)pops->psm_tod_set); 834 #endif 835 836 if (pops->psm_notify_error) { 837 psm_notify_error = mach_notify_error; 838 notify_error = pops->psm_notify_error; 839 } 840 841 (*pops->psm_softinit)(); 842 843 /* 844 * Initialize the dispatcher's function hooks 845 * to enable CPU halting when idle. 846 * Do not use monitor/mwait if idle_cpu_use_hlt is not set(spin idle) 847 * or idle_cpu_prefer_mwait is not set. 848 * Allocate monitor/mwait buffer for cpu0. 849 */ 850 if (idle_cpu_use_hlt) { 851 idle_cpu = cpu_idle; 852 #ifndef __xpv 853 if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) { 854 CPU->cpu_m.mcpu_mwait = cpuid_mwait_alloc(CPU); 855 /* 856 * Protect ourself from insane mwait size. 857 */ 858 if (CPU->cpu_m.mcpu_mwait == NULL) { 859 #ifdef DEBUG 860 cmn_err(CE_NOTE, "Using hlt idle. Cannot " 861 "handle cpu 0 mwait size."); 862 #endif 863 idle_cpu_prefer_mwait = 0; 864 idle_cpu = cpu_idle; 865 } else { 866 idle_cpu = cpu_idle_mwait; 867 } 868 } else { 869 idle_cpu = cpu_idle; 870 } 871 #endif 872 } 873 874 mach_smpinit(); 875 } 876 877 static void 878 mach_smpinit(void) 879 { 880 struct psm_ops *pops; 881 processorid_t cpu_id; 882 int cnt; 883 cpuset_t cpumask; 884 885 pops = mach_set[0]; 886 887 cpu_id = -1; 888 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 889 for (cnt = 0, CPUSET_ZERO(cpumask); cpu_id != -1; cnt++) { 890 CPUSET_ADD(cpumask, cpu_id); 891 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 892 } 893 894 mp_cpus = cpumask; 895 896 /* MP related routines */ 897 ap_mlsetup = pops->psm_post_cpu_start; 898 send_dirintf = pops->psm_send_ipi; 899 900 /* optional MP related routines */ 901 if (pops->psm_shutdown) 902 psm_shutdownf = pops->psm_shutdown; 903 if (pops->psm_preshutdown) 904 psm_preshutdownf = pops->psm_preshutdown; 905 if (pops->psm_notify_func) 906 psm_notifyf = pops->psm_notify_func; 907 if (pops->psm_set_idlecpu) 908 psm_set_idle_cpuf = pops->psm_set_idlecpu; 909 if (pops->psm_unset_idlecpu) 910 psm_unset_idle_cpuf = pops->psm_unset_idlecpu; 911 912 psm_clkinit = pops->psm_clkinit; 913 914 if (pops->psm_timer_reprogram) 915 psm_timer_reprogram = pops->psm_timer_reprogram; 916 917 if (pops->psm_timer_enable) 918 psm_timer_enable = pops->psm_timer_enable; 919 920 if (pops->psm_timer_disable) 921 psm_timer_disable = pops->psm_timer_disable; 922 923 if (pops->psm_post_cyclic_setup) 924 psm_post_cyclic_setup = pops->psm_post_cyclic_setup; 925 926 /* check for multiple cpu's */ 927 if (cnt < 2) 928 return; 929 930 /* check for MP platforms */ 931 if (pops->psm_cpu_start == NULL) 932 return; 933 934 /* 935 * Set the dispatcher hook to enable cpu "wake up" 936 * when a thread becomes runnable. 937 */ 938 if (idle_cpu_use_hlt) { 939 disp_enq_thread = cpu_wakeup; 940 #ifndef __xpv 941 if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) 942 disp_enq_thread = cpu_wakeup_mwait; 943 #endif 944 } 945 946 if (pops->psm_disable_intr) 947 psm_disable_intr = pops->psm_disable_intr; 948 if (pops->psm_enable_intr) 949 psm_enable_intr = pops->psm_enable_intr; 950 951 psm_get_ipivect = pops->psm_get_ipivect; 952 953 (void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_hi_intr", 954 (*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI), 955 (caddr_t)X_CALL_HIPRI, NULL, NULL, NULL); 956 (void) add_avintr((void *)NULL, XC_MED_PIL, xc_serv, "xc_med_intr", 957 (*pops->psm_get_ipivect)(XC_MED_PIL, PSM_INTR_IPI_LO), 958 (caddr_t)X_CALL_MEDPRI, NULL, NULL, NULL); 959 960 (void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE); 961 } 962 963 static void 964 mach_picinit() 965 { 966 struct psm_ops *pops; 967 968 pops = mach_set[0]; 969 970 /* register the interrupt handlers */ 971 setlvl = pops->psm_intr_enter; 972 setlvlx = pops->psm_intr_exit; 973 974 /* initialize the interrupt hardware */ 975 (*pops->psm_picinit)(); 976 977 /* set interrupt mask for current ipl */ 978 setspl = pops->psm_setspl; 979 cli(); 980 setspl(CPU->cpu_pri); 981 } 982 983 uint_t cpu_freq; /* MHz */ 984 uint64_t cpu_freq_hz; /* measured (in hertz) */ 985 986 #define MEGA_HZ 1000000 987 988 #ifdef __xpv 989 990 int xpv_cpufreq_workaround = 1; 991 int xpv_cpufreq_verbose = 0; 992 993 #else /* __xpv */ 994 995 static uint64_t 996 mach_calchz(uint32_t pit_counter, uint64_t *processor_clks) 997 { 998 uint64_t cpu_hz; 999 1000 if ((pit_counter == 0) || (*processor_clks == 0) || 1001 (*processor_clks > (((uint64_t)-1) / PIT_HZ))) 1002 return (0); 1003 1004 cpu_hz = ((uint64_t)PIT_HZ * *processor_clks) / pit_counter; 1005 1006 return (cpu_hz); 1007 } 1008 1009 #endif /* __xpv */ 1010 1011 static uint64_t 1012 mach_getcpufreq(void) 1013 { 1014 #if defined(__xpv) 1015 vcpu_time_info_t *vti = &CPU->cpu_m.mcpu_vcpu_info->time; 1016 uint64_t cpu_hz; 1017 1018 /* 1019 * During dom0 bringup, it was noted that on at least one older 1020 * Intel HT machine, the hypervisor initially gives a tsc_to_system_mul 1021 * value that is quite wrong (the 3.06GHz clock was reported 1022 * as 4.77GHz) 1023 * 1024 * The curious thing is, that if you stop the kernel at entry, 1025 * breakpoint here and inspect the value with kmdb, the value 1026 * is correct - but if you don't stop and simply enable the 1027 * printf statement (below), you can see the bad value printed 1028 * here. Almost as if something kmdb did caused the hypervisor to 1029 * figure it out correctly. And, note that the hypervisor 1030 * eventually -does- figure it out correctly ... if you look at 1031 * the field later in the life of dom0, it is correct. 1032 * 1033 * For now, on dom0, we employ a slightly cheesy workaround of 1034 * using the DOM0_PHYSINFO hypercall. 1035 */ 1036 if (DOMAIN_IS_INITDOMAIN(xen_info) && xpv_cpufreq_workaround) { 1037 xen_sysctl_t op0, *op = &op0; 1038 1039 op->cmd = XEN_SYSCTL_physinfo; 1040 op->interface_version = XEN_SYSCTL_INTERFACE_VERSION; 1041 if (HYPERVISOR_sysctl(op) != 0) 1042 panic("physinfo op refused"); 1043 1044 cpu_hz = 1000 * (uint64_t)op->u.physinfo.cpu_khz; 1045 } else { 1046 cpu_hz = (UINT64_C(1000000000) << 32) / vti->tsc_to_system_mul; 1047 1048 if (vti->tsc_shift < 0) 1049 cpu_hz <<= -vti->tsc_shift; 1050 else 1051 cpu_hz >>= vti->tsc_shift; 1052 } 1053 1054 if (xpv_cpufreq_verbose) 1055 printf("mach_getcpufreq: system_mul 0x%x, shift %d, " 1056 "cpu_hz %" PRId64 "Hz\n", 1057 vti->tsc_to_system_mul, vti->tsc_shift, cpu_hz); 1058 1059 return (cpu_hz); 1060 #else /* __xpv */ 1061 uint32_t pit_counter; 1062 uint64_t processor_clks; 1063 1064 if (x86_feature & X86_TSC) { 1065 /* 1066 * We have a TSC. freq_tsc() knows how to measure the number 1067 * of clock cycles sampled against the PIT. 1068 */ 1069 ulong_t flags = clear_int_flag(); 1070 processor_clks = freq_tsc(&pit_counter); 1071 restore_int_flag(flags); 1072 return (mach_calchz(pit_counter, &processor_clks)); 1073 } else if (x86_vendor == X86_VENDOR_Cyrix || x86_type == X86_TYPE_P5) { 1074 #if defined(__amd64) 1075 panic("mach_getcpufreq: no TSC!"); 1076 #elif defined(__i386) 1077 /* 1078 * We are a Cyrix based on a 6x86 core or an Intel Pentium 1079 * for which freq_notsc() knows how to measure the number of 1080 * elapsed clock cycles sampled against the PIT 1081 */ 1082 ulong_t flags = clear_int_flag(); 1083 processor_clks = freq_notsc(&pit_counter); 1084 restore_int_flag(flags); 1085 return (mach_calchz(pit_counter, &processor_clks)); 1086 #endif /* __i386 */ 1087 } 1088 1089 /* We do not know how to calculate cpu frequency for this cpu. */ 1090 return (0); 1091 #endif /* __xpv */ 1092 } 1093 1094 /* 1095 * If the clock speed of a cpu is found to be reported incorrectly, do not add 1096 * to this array, instead improve the accuracy of the algorithm that determines 1097 * the clock speed of the processor or extend the implementation to support the 1098 * vendor as appropriate. This is here only to support adjusting the speed on 1099 * older slower processors that mach_fixcpufreq() would not be able to account 1100 * for otherwise. 1101 */ 1102 static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 }; 1103 1104 /* 1105 * On fast processors the clock frequency that is measured may be off by 1106 * a few MHz from the value printed on the part. This is a combination of 1107 * the factors that for such fast parts being off by this much is within 1108 * the tolerances for manufacture and because of the difficulties in the 1109 * measurement that can lead to small error. This function uses some 1110 * heuristics in order to tweak the value that was measured to match what 1111 * is most likely printed on the part. 1112 * 1113 * Some examples: 1114 * AMD Athlon 1000 mhz measured as 998 mhz 1115 * Intel Pentium III Xeon 733 mhz measured as 731 mhz 1116 * Intel Pentium IV 1500 mhz measured as 1495mhz 1117 * 1118 * If in the future this function is no longer sufficient to correct 1119 * for the error in the measurement, then the algorithm used to perform 1120 * the measurement will have to be improved in order to increase accuracy 1121 * rather than adding horrible and questionable kludges here. 1122 * 1123 * This is called after the cyclics subsystem because of the potential 1124 * that the heuristics within may give a worse estimate of the clock 1125 * frequency than the value that was measured. 1126 */ 1127 static void 1128 mach_fixcpufreq(void) 1129 { 1130 uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i; 1131 1132 freq = (uint32_t)cpu_freq; 1133 1134 /* 1135 * Find the nearest integer multiple of 200/3 (about 66) MHz to the 1136 * measured speed taking into account that the 667 MHz parts were 1137 * the first to round-up. 1138 */ 1139 mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200); 1140 near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3); 1141 delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66); 1142 1143 /* Find the nearest integer multiple of 50 MHz to the measured speed */ 1144 mul = (freq + 25) / 50; 1145 near50 = mul * 50; 1146 delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50); 1147 1148 /* Find the closer of the two */ 1149 if (delta66 < delta50) { 1150 fixed = near66; 1151 delta = delta66; 1152 } else { 1153 fixed = near50; 1154 delta = delta50; 1155 } 1156 1157 if (fixed > INT_MAX) 1158 return; 1159 1160 /* 1161 * Some older parts have a core clock frequency that is not an 1162 * integral multiple of 50 or 66 MHz. Check if one of the old 1163 * clock frequencies is closer to the measured value than any 1164 * of the integral multiples of 50 an 66, and if so set fixed 1165 * and delta appropriately to represent the closest value. 1166 */ 1167 i = sizeof (x86_cpu_freq) / sizeof (int); 1168 while (i > 0) { 1169 i--; 1170 1171 if (x86_cpu_freq[i] <= freq) { 1172 mul = freq - x86_cpu_freq[i]; 1173 1174 if (mul < delta) { 1175 fixed = x86_cpu_freq[i]; 1176 delta = mul; 1177 } 1178 1179 break; 1180 } 1181 1182 mul = x86_cpu_freq[i] - freq; 1183 1184 if (mul < delta) { 1185 fixed = x86_cpu_freq[i]; 1186 delta = mul; 1187 } 1188 } 1189 1190 /* 1191 * Set a reasonable maximum for how much to correct the measured 1192 * result by. This check is here to prevent the adjustment made 1193 * by this function from being more harm than good. It is entirely 1194 * possible that in the future parts will be made that are not 1195 * integral multiples of 66 or 50 in clock frequency or that 1196 * someone may overclock a part to some odd frequency. If the 1197 * measured value is farther from the corrected value than 1198 * allowed, then assume the corrected value is in error and use 1199 * the measured value. 1200 */ 1201 if (6 < delta) 1202 return; 1203 1204 cpu_freq = (int)fixed; 1205 } 1206 1207 1208 static int 1209 machhztomhz(uint64_t cpu_freq_hz) 1210 { 1211 uint64_t cpu_mhz; 1212 1213 /* Round to nearest MHZ */ 1214 cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ; 1215 1216 if (cpu_mhz > INT_MAX) 1217 return (0); 1218 1219 return ((int)cpu_mhz); 1220 1221 } 1222 1223 1224 static int 1225 mach_clkinit(int preferred_mode, int *set_mode) 1226 { 1227 struct psm_ops *pops; 1228 int resolution; 1229 1230 pops = mach_set[0]; 1231 1232 cpu_freq_hz = mach_getcpufreq(); 1233 1234 cpu_freq = machhztomhz(cpu_freq_hz); 1235 1236 if (!(x86_feature & X86_TSC) || (cpu_freq == 0)) 1237 tsc_gethrtime_enable = 0; 1238 1239 #ifndef __xpv 1240 if (tsc_gethrtime_enable) { 1241 tsc_hrtimeinit(cpu_freq_hz); 1242 } else 1243 #endif 1244 { 1245 if (pops->psm_hrtimeinit) 1246 (*pops->psm_hrtimeinit)(); 1247 gethrtimef = pops->psm_gethrtime; 1248 gethrtimeunscaledf = gethrtimef; 1249 /* scalehrtimef will remain dummy */ 1250 } 1251 1252 mach_fixcpufreq(); 1253 1254 if (mach_ver[0] >= PSM_INFO_VER01_3) { 1255 if (preferred_mode == TIMER_ONESHOT) { 1256 1257 resolution = (*pops->psm_clkinit)(0); 1258 if (resolution != 0) { 1259 *set_mode = TIMER_ONESHOT; 1260 return (resolution); 1261 } 1262 } 1263 1264 /* 1265 * either periodic mode was requested or could not set to 1266 * one-shot mode 1267 */ 1268 resolution = (*pops->psm_clkinit)(hz); 1269 /* 1270 * psm should be able to do periodic, so we do not check 1271 * for return value of psm_clkinit here. 1272 */ 1273 *set_mode = TIMER_PERIODIC; 1274 return (resolution); 1275 } else { 1276 /* 1277 * PSMI interface prior to PSMI_3 does not define a return 1278 * value for psm_clkinit, so the return value is ignored. 1279 */ 1280 (void) (*pops->psm_clkinit)(hz); 1281 *set_mode = TIMER_PERIODIC; 1282 return (nsec_per_tick); 1283 } 1284 } 1285 1286 1287 /*ARGSUSED*/ 1288 static int 1289 mach_softlvl_to_vect(int ipl) 1290 { 1291 setsoftint = av_set_softint_pending; 1292 kdisetsoftint = kdi_av_set_softint_pending; 1293 1294 return (PSM_SV_SOFTWARE); 1295 } 1296 1297 #ifdef DEBUG 1298 /* 1299 * This is here to allow us to simulate cpus that refuse to start. 1300 */ 1301 cpuset_t cpufailset; 1302 #endif 1303 1304 int 1305 mach_cpu_start(struct cpu *cp, void *ctx) 1306 { 1307 struct psm_ops *pops = mach_set[0]; 1308 processorid_t id = cp->cpu_id; 1309 1310 #ifdef DEBUG 1311 if (CPU_IN_SET(cpufailset, id)) 1312 return (0); 1313 #endif 1314 return ((*pops->psm_cpu_start)(id, ctx)); 1315 } 1316 1317 /*ARGSUSED*/ 1318 static int 1319 mach_translate_irq(dev_info_t *dip, int irqno) 1320 { 1321 return (irqno); /* default to NO translation */ 1322 } 1323 1324 static void 1325 mach_notify_error(int level, char *errmsg) 1326 { 1327 /* 1328 * SL_FATAL is pass in once panicstr is set, deliver it 1329 * as CE_PANIC. Also, translate SL_ codes back to CE_ 1330 * codes for the psmi handler 1331 */ 1332 if (level & SL_FATAL) 1333 (*notify_error)(CE_PANIC, errmsg); 1334 else if (level & SL_WARN) 1335 (*notify_error)(CE_WARN, errmsg); 1336 else if (level & SL_NOTE) 1337 (*notify_error)(CE_NOTE, errmsg); 1338 else if (level & SL_CONSOLE) 1339 (*notify_error)(CE_CONT, errmsg); 1340 } 1341 1342 /* 1343 * It provides the default basic intr_ops interface for the new DDI 1344 * interrupt framework if the PSM doesn't have one. 1345 * 1346 * Input: 1347 * dip - pointer to the dev_info structure of the requested device 1348 * hdlp - pointer to the internal interrupt handle structure for the 1349 * requested interrupt 1350 * intr_op - opcode for this call 1351 * result - pointer to the integer that will hold the result to be 1352 * passed back if return value is PSM_SUCCESS 1353 * 1354 * Output: 1355 * return value is either PSM_SUCCESS or PSM_FAILURE 1356 */ 1357 static int 1358 mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 1359 psm_intr_op_t intr_op, int *result) 1360 { 1361 struct intrspec *ispec; 1362 1363 switch (intr_op) { 1364 case PSM_INTR_OP_CHECK_MSI: 1365 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 1366 DDI_INTR_TYPE_MSIX); 1367 break; 1368 case PSM_INTR_OP_ALLOC_VECTORS: 1369 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1370 *result = 1; 1371 else 1372 *result = 0; 1373 break; 1374 case PSM_INTR_OP_FREE_VECTORS: 1375 break; 1376 case PSM_INTR_OP_NAVAIL_VECTORS: 1377 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1378 *result = 1; 1379 else 1380 *result = 0; 1381 break; 1382 case PSM_INTR_OP_XLATE_VECTOR: 1383 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 1384 *result = psm_translate_irq(dip, ispec->intrspec_vec); 1385 break; 1386 case PSM_INTR_OP_GET_CAP: 1387 *result = 0; 1388 break; 1389 case PSM_INTR_OP_GET_PENDING: 1390 case PSM_INTR_OP_CLEAR_MASK: 1391 case PSM_INTR_OP_SET_MASK: 1392 case PSM_INTR_OP_GET_SHARED: 1393 case PSM_INTR_OP_SET_PRI: 1394 case PSM_INTR_OP_SET_CAP: 1395 case PSM_INTR_OP_SET_CPU: 1396 case PSM_INTR_OP_GET_INTR: 1397 default: 1398 return (PSM_FAILURE); 1399 } 1400 return (PSM_SUCCESS); 1401 } 1402 /* 1403 * Return 1 if CMT load balancing policies should be 1404 * implemented across instances of the specified hardware 1405 * sharing relationship. 1406 */ 1407 int 1408 pg_cmt_load_bal_hw(pghw_type_t hw) 1409 { 1410 if (hw == PGHW_IPIPE || 1411 hw == PGHW_FPU || 1412 hw == PGHW_CHIP) 1413 return (1); 1414 else 1415 return (0); 1416 } 1417 /* 1418 * Return 1 if thread affinity polices should be implemented 1419 * for instances of the specifed hardware sharing relationship. 1420 */ 1421 int 1422 pg_cmt_affinity_hw(pghw_type_t hw) 1423 { 1424 if (hw == PGHW_CACHE) 1425 return (1); 1426 else 1427 return (0); 1428 } 1429