1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #define PSMI_1_6 29 #include <sys/smp_impldefs.h> 30 #include <sys/psm.h> 31 #include <sys/psm_modctl.h> 32 #include <sys/pit.h> 33 #include <sys/cmn_err.h> 34 #include <sys/strlog.h> 35 #include <sys/clock.h> 36 #include <sys/debug.h> 37 #include <sys/rtc.h> 38 #include <sys/x86_archext.h> 39 #include <sys/cpupart.h> 40 #include <sys/cpuvar.h> 41 #include <sys/cmt.h> 42 #include <sys/cpu.h> 43 #include <sys/disp.h> 44 #include <sys/archsystm.h> 45 #include <sys/machsystm.h> 46 #include <sys/sysmacros.h> 47 #include <sys/memlist.h> 48 #include <sys/param.h> 49 #include <sys/promif.h> 50 #if defined(__xpv) 51 #include <sys/hypervisor.h> 52 #endif 53 #include <sys/mach_intr.h> 54 #include <vm/hat_i86.h> 55 #include <sys/kdi_machimpl.h> 56 #include <sys/sdt.h> 57 58 #define OFFSETOF(s, m) (size_t)(&(((s *)0)->m)) 59 60 /* 61 * Local function prototypes 62 */ 63 static int mp_disable_intr(processorid_t cpun); 64 static void mp_enable_intr(processorid_t cpun); 65 static void mach_init(); 66 static void mach_picinit(); 67 static int machhztomhz(uint64_t cpu_freq_hz); 68 static uint64_t mach_getcpufreq(void); 69 static void mach_fixcpufreq(void); 70 static int mach_clkinit(int, int *); 71 static void mach_smpinit(void); 72 static int mach_softlvl_to_vect(int ipl); 73 static void mach_get_platform(int owner); 74 static void mach_construct_info(); 75 static int mach_translate_irq(dev_info_t *dip, int irqno); 76 static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 77 psm_intr_op_t, int *); 78 static void mach_notify_error(int level, char *errmsg); 79 static hrtime_t dummy_hrtime(void); 80 static void dummy_scalehrtime(hrtime_t *); 81 static void cpu_idle(void); 82 static void cpu_wakeup(cpu_t *, int); 83 #ifndef __xpv 84 static void cpu_idle_mwait(void); 85 static void cpu_wakeup_mwait(cpu_t *, int); 86 #endif 87 /* 88 * External reference functions 89 */ 90 extern void return_instr(); 91 extern uint64_t freq_tsc(uint32_t *); 92 #if defined(__i386) 93 extern uint64_t freq_notsc(uint32_t *); 94 #endif 95 extern void pc_gethrestime(timestruc_t *); 96 extern int cpuid_get_coreid(cpu_t *); 97 extern int cpuid_get_chipid(cpu_t *); 98 99 /* 100 * PSM functions initialization 101 */ 102 void (*psm_shutdownf)(int, int) = (void (*)(int, int))return_instr; 103 void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr; 104 void (*psm_notifyf)(int) = (void (*)(int))return_instr; 105 void (*psm_set_idle_cpuf)(int) = (void (*)(int))return_instr; 106 void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr; 107 void (*psminitf)() = mach_init; 108 void (*picinitf)() = return_instr; 109 int (*clkinitf)(int, int *) = (int (*)(int, int *))return_instr; 110 int (*ap_mlsetup)() = (int (*)(void))return_instr; 111 void (*send_dirintf)() = return_instr; 112 void (*setspl)(int) = (void (*)(int))return_instr; 113 int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 114 int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 115 void (*kdisetsoftint)(int, struct av_softinfo *)= 116 (void (*)(int, struct av_softinfo *))return_instr; 117 void (*setsoftint)(int, struct av_softinfo *)= 118 (void (*)(int, struct av_softinfo *))return_instr; 119 int (*slvltovect)(int) = (int (*)(int))return_instr; 120 int (*setlvl)(int, int *) = (int (*)(int, int *))return_instr; 121 void (*setlvlx)(int, int) = (void (*)(int, int))return_instr; 122 int (*psm_disable_intr)(int) = mp_disable_intr; 123 void (*psm_enable_intr)(int) = mp_enable_intr; 124 hrtime_t (*gethrtimef)(void) = dummy_hrtime; 125 hrtime_t (*gethrtimeunscaledf)(void) = dummy_hrtime; 126 void (*scalehrtimef)(hrtime_t *) = dummy_scalehrtime; 127 int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq; 128 void (*gethrestimef)(timestruc_t *) = pc_gethrestime; 129 void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL; 130 int (*psm_get_clockirq)(int) = NULL; 131 int (*psm_get_ipivect)(int, int) = NULL; 132 133 int (*psm_clkinit)(int) = NULL; 134 void (*psm_timer_reprogram)(hrtime_t) = NULL; 135 void (*psm_timer_enable)(void) = NULL; 136 void (*psm_timer_disable)(void) = NULL; 137 void (*psm_post_cyclic_setup)(void *arg) = NULL; 138 int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t, 139 int *) = mach_intr_ops; 140 int (*psm_state)(psm_state_request_t *) = (int (*)(psm_state_request_t *)) 141 return_instr; 142 143 void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr; 144 void (*hrtime_tick)(void) = return_instr; 145 146 /* 147 * True if the generic TSC code is our source of hrtime, rather than whatever 148 * the PSM can provide. 149 */ 150 #ifdef __xpv 151 int tsc_gethrtime_enable = 0; 152 #else 153 int tsc_gethrtime_enable = 1; 154 #endif 155 int tsc_gethrtime_initted = 0; 156 157 /* 158 * True if the hrtime implementation is "hires"; namely, better than microdata. 159 */ 160 int gethrtime_hires = 0; 161 162 /* 163 * Local Static Data 164 */ 165 static struct psm_ops mach_ops; 166 static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL}; 167 static ushort_t mach_ver[4] = {0, 0, 0, 0}; 168 169 /* 170 * If non-zero, idle cpus will become "halted" when there's 171 * no work to do. 172 */ 173 int idle_cpu_use_hlt = 1; 174 175 #ifndef __xpv 176 /* 177 * If non-zero, idle cpus will use mwait if available to halt instead of hlt. 178 */ 179 int idle_cpu_prefer_mwait = 1; 180 #endif 181 182 /*ARGSUSED*/ 183 int 184 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw) 185 { 186 switch (hw) { 187 case PGHW_IPIPE: 188 if (x86_feature & (X86_HTT)) { 189 /* 190 * Hyper-threading is SMT 191 */ 192 return (1); 193 } else { 194 return (0); 195 } 196 case PGHW_CHIP: 197 if (x86_feature & (X86_CMP|X86_HTT)) 198 return (1); 199 else 200 return (0); 201 case PGHW_CACHE: 202 if (cpuid_get_ncpu_sharing_last_cache(cp) > 1) 203 return (1); 204 else 205 return (0); 206 default: 207 return (0); 208 } 209 } 210 211 /* 212 * Compare two CPUs and see if they have a pghw_type_t sharing relationship 213 * If pghw_type_t is an unsupported hardware type, then return -1 214 */ 215 int 216 pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw) 217 { 218 id_t pgp_a, pgp_b; 219 220 pgp_a = pg_plat_hw_instance_id(cpu_a, hw); 221 pgp_b = pg_plat_hw_instance_id(cpu_b, hw); 222 223 if (pgp_a == -1 || pgp_b == -1) 224 return (-1); 225 226 return (pgp_a == pgp_b); 227 } 228 229 /* 230 * Return a physical instance identifier for known hardware sharing 231 * relationships 232 */ 233 id_t 234 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw) 235 { 236 switch (hw) { 237 case PGHW_IPIPE: 238 return (cpuid_get_coreid(cpu)); 239 case PGHW_CACHE: 240 return (cpuid_get_last_lvl_cacheid(cpu)); 241 case PGHW_CHIP: 242 return (cpuid_get_chipid(cpu)); 243 default: 244 return (-1); 245 } 246 } 247 248 int 249 pg_plat_hw_level(pghw_type_t hw) 250 { 251 int i; 252 static pghw_type_t hw_hier[] = { 253 PGHW_IPIPE, 254 PGHW_CACHE, 255 PGHW_CHIP, 256 PGHW_NUM_COMPONENTS 257 }; 258 259 for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) { 260 if (hw_hier[i] == hw) 261 return (i); 262 } 263 return (-1); 264 } 265 266 /* 267 * Return 1 if CMT load balancing policies should be 268 * implemented across instances of the specified hardware 269 * sharing relationship. 270 */ 271 int 272 pg_plat_cmt_load_bal_hw(pghw_type_t hw) 273 { 274 if (hw == PGHW_IPIPE || 275 hw == PGHW_FPU || 276 hw == PGHW_CHIP || 277 hw == PGHW_CACHE) 278 return (1); 279 else 280 return (0); 281 } 282 283 284 /* 285 * Return 1 if thread affinity polices should be implemented 286 * for instances of the specifed hardware sharing relationship. 287 */ 288 int 289 pg_plat_cmt_affinity_hw(pghw_type_t hw) 290 { 291 if (hw == PGHW_CACHE) 292 return (1); 293 else 294 return (0); 295 } 296 297 id_t 298 pg_plat_get_core_id(cpu_t *cpu) 299 { 300 return ((id_t)cpuid_get_coreid(cpu)); 301 } 302 303 void 304 cmp_set_nosteal_interval(void) 305 { 306 /* Set the nosteal interval (used by disp_getbest()) to 100us */ 307 nosteal_nsec = 100000UL; 308 } 309 310 /* 311 * Routine to ensure initial callers to hrtime gets 0 as return 312 */ 313 static hrtime_t 314 dummy_hrtime(void) 315 { 316 return (0); 317 } 318 319 /* ARGSUSED */ 320 static void 321 dummy_scalehrtime(hrtime_t *ticks) 322 {} 323 324 /* 325 * Idle the present CPU until awoken via an interrupt 326 */ 327 static void 328 cpu_idle(void) 329 { 330 cpu_t *cpup = CPU; 331 processorid_t cpun = cpup->cpu_id; 332 cpupart_t *cp = cpup->cpu_part; 333 int hset_update = 1; 334 335 /* 336 * If this CPU is online, and there's multiple CPUs 337 * in the system, then we should notate our halting 338 * by adding ourselves to the partition's halted CPU 339 * bitmap. This allows other CPUs to find/awaken us when 340 * work becomes available. 341 */ 342 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 343 hset_update = 0; 344 345 /* 346 * Add ourselves to the partition's halted CPUs bitmask 347 * and set our HALTED flag, if necessary. 348 * 349 * When a thread becomes runnable, it is placed on the queue 350 * and then the halted cpuset is checked to determine who 351 * (if anyone) should be awoken. We therefore need to first 352 * add ourselves to the halted cpuset, and and then check if there 353 * is any work available. 354 * 355 * Note that memory barriers after updating the HALTED flag 356 * are not necessary since an atomic operation (updating the bitmap) 357 * immediately follows. On x86 the atomic operation acts as a 358 * memory barrier for the update of cpu_disp_flags. 359 */ 360 if (hset_update) { 361 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 362 CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun); 363 } 364 365 /* 366 * Check to make sure there's really nothing to do. 367 * Work destined for this CPU may become available after 368 * this check. We'll be notified through the clearing of our 369 * bit in the halted CPU bitmask, and a poke. 370 */ 371 if (disp_anywork()) { 372 if (hset_update) { 373 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 374 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 375 } 376 return; 377 } 378 379 /* 380 * We're on our way to being halted. 381 * 382 * Disable interrupts now, so that we'll awaken immediately 383 * after halting if someone tries to poke us between now and 384 * the time we actually halt. 385 * 386 * We check for the presence of our bit after disabling interrupts. 387 * If it's cleared, we'll return. If the bit is cleared after 388 * we check then the poke will pop us out of the halted state. 389 * 390 * This means that the ordering of the poke and the clearing 391 * of the bit by cpu_wakeup is important. 392 * cpu_wakeup() must clear, then poke. 393 * cpu_idle() must disable interrupts, then check for the bit. 394 */ 395 cli(); 396 397 if (hset_update && !CPU_IN_SET(cp->cp_mach->mc_haltset, cpun)) { 398 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 399 sti(); 400 return; 401 } 402 403 /* 404 * The check for anything locally runnable is here for performance 405 * and isn't needed for correctness. disp_nrunnable ought to be 406 * in our cache still, so it's inexpensive to check, and if there 407 * is anything runnable we won't have to wait for the poke. 408 */ 409 if (cpup->cpu_disp->disp_nrunnable != 0) { 410 if (hset_update) { 411 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 412 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 413 } 414 sti(); 415 return; 416 } 417 418 DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C1); 419 420 mach_cpu_idle(); 421 422 DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C0); 423 424 /* 425 * We're no longer halted 426 */ 427 if (hset_update) { 428 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 429 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 430 } 431 } 432 433 434 /* 435 * If "cpu" is halted, then wake it up clearing its halted bit in advance. 436 * Otherwise, see if other CPUs in the cpu partition are halted and need to 437 * be woken up so that they can steal the thread we placed on this CPU. 438 * This function is only used on MP systems. 439 */ 440 static void 441 cpu_wakeup(cpu_t *cpu, int bound) 442 { 443 uint_t cpu_found; 444 int result; 445 cpupart_t *cp; 446 447 cp = cpu->cpu_part; 448 if (CPU_IN_SET(cp->cp_mach->mc_haltset, cpu->cpu_id)) { 449 /* 450 * Clear the halted bit for that CPU since it will be 451 * poked in a moment. 452 */ 453 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpu->cpu_id); 454 /* 455 * We may find the current CPU present in the halted cpuset 456 * if we're in the context of an interrupt that occurred 457 * before we had a chance to clear our bit in cpu_idle(). 458 * Poking ourself is obviously unnecessary, since if 459 * we're here, we're not halted. 460 */ 461 if (cpu != CPU) 462 poke_cpu(cpu->cpu_id); 463 return; 464 } else { 465 /* 466 * This cpu isn't halted, but it's idle or undergoing a 467 * context switch. No need to awaken anyone else. 468 */ 469 if (cpu->cpu_thread == cpu->cpu_idle_thread || 470 cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL) 471 return; 472 } 473 474 /* 475 * No need to wake up other CPUs if the thread we just enqueued 476 * is bound. 477 */ 478 if (bound) 479 return; 480 481 482 /* 483 * See if there's any other halted CPUs. If there are, then 484 * select one, and awaken it. 485 * It's possible that after we find a CPU, somebody else 486 * will awaken it before we get the chance. 487 * In that case, look again. 488 */ 489 do { 490 CPUSET_FIND(cp->cp_mach->mc_haltset, cpu_found); 491 if (cpu_found == CPUSET_NOTINSET) 492 return; 493 494 ASSERT(cpu_found >= 0 && cpu_found < NCPU); 495 CPUSET_ATOMIC_XDEL(cp->cp_mach->mc_haltset, cpu_found, result); 496 } while (result < 0); 497 498 if (cpu_found != CPU->cpu_id) 499 poke_cpu(cpu_found); 500 } 501 502 #ifndef __xpv 503 /* 504 * Idle the present CPU until awoken via touching its monitored line 505 */ 506 static void 507 cpu_idle_mwait(void) 508 { 509 volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait; 510 cpu_t *cpup = CPU; 511 processorid_t cpun = cpup->cpu_id; 512 cpupart_t *cp = cpup->cpu_part; 513 int hset_update = 1; 514 515 /* 516 * Set our mcpu_mwait here, so we can tell if anyone trys to 517 * wake us between now and when we call mwait. No other cpu will 518 * attempt to set our mcpu_mwait until we add ourself to the haltset. 519 */ 520 *mcpu_mwait = MWAIT_HALTED; 521 522 /* 523 * If this CPU is online, and there's multiple CPUs 524 * in the system, then we should notate our halting 525 * by adding ourselves to the partition's halted CPU 526 * bitmap. This allows other CPUs to find/awaken us when 527 * work becomes available. 528 */ 529 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 530 hset_update = 0; 531 532 /* 533 * Add ourselves to the partition's halted CPUs bitmask 534 * and set our HALTED flag, if necessary. 535 * 536 * When a thread becomes runnable, it is placed on the queue 537 * and then the halted cpuset is checked to determine who 538 * (if anyone) should be awoken. We therefore need to first 539 * add ourselves to the halted cpuset, and and then check if there 540 * is any work available. 541 * 542 * Note that memory barriers after updating the HALTED flag 543 * are not necessary since an atomic operation (updating the bitmap) 544 * immediately follows. On x86 the atomic operation acts as a 545 * memory barrier for the update of cpu_disp_flags. 546 */ 547 if (hset_update) { 548 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 549 CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun); 550 } 551 552 /* 553 * Check to make sure there's really nothing to do. 554 * Work destined for this CPU may become available after 555 * this check. We'll be notified through the clearing of our 556 * bit in the halted CPU bitmask, and a write to our mcpu_mwait. 557 * 558 * disp_anywork() checks disp_nrunnable, so we do not have to later. 559 */ 560 if (disp_anywork()) { 561 if (hset_update) { 562 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 563 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 564 } 565 return; 566 } 567 568 /* 569 * We're on our way to being halted. 570 * To avoid a lost wakeup, arm the monitor before checking if another 571 * cpu wrote to mcpu_mwait to wake us up. 572 */ 573 i86_monitor(mcpu_mwait, 0, 0); 574 if (*mcpu_mwait == MWAIT_HALTED) { 575 DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C1); 576 577 tlb_going_idle(); 578 i86_mwait(0, 0); 579 tlb_service(); 580 581 DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C0); 582 } 583 584 /* 585 * We're no longer halted 586 */ 587 if (hset_update) { 588 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 589 CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun); 590 } 591 } 592 593 /* 594 * If "cpu" is halted in mwait, then wake it up clearing its halted bit in 595 * advance. Otherwise, see if other CPUs in the cpu partition are halted and 596 * need to be woken up so that they can steal the thread we placed on this CPU. 597 * This function is only used on MP systems. 598 */ 599 static void 600 cpu_wakeup_mwait(cpu_t *cp, int bound) 601 { 602 cpupart_t *cpu_part; 603 uint_t cpu_found; 604 int result; 605 606 cpu_part = cp->cpu_part; 607 608 /* 609 * Clear the halted bit for that CPU since it will be woken up 610 * in a moment. 611 */ 612 if (CPU_IN_SET(cpu_part->cp_mach->mc_haltset, cp->cpu_id)) { 613 /* 614 * Clear the halted bit for that CPU since it will be 615 * poked in a moment. 616 */ 617 CPUSET_ATOMIC_DEL(cpu_part->cp_mach->mc_haltset, cp->cpu_id); 618 /* 619 * We may find the current CPU present in the halted cpuset 620 * if we're in the context of an interrupt that occurred 621 * before we had a chance to clear our bit in cpu_idle(). 622 * Waking ourself is obviously unnecessary, since if 623 * we're here, we're not halted. 624 * 625 * monitor/mwait wakeup via writing to our cache line is 626 * harmless and less expensive than always checking if we 627 * are waking ourself which is an uncommon case. 628 */ 629 MWAIT_WAKEUP(cp); /* write to monitored line */ 630 return; 631 } else { 632 /* 633 * This cpu isn't halted, but it's idle or undergoing a 634 * context switch. No need to awaken anyone else. 635 */ 636 if (cp->cpu_thread == cp->cpu_idle_thread || 637 cp->cpu_disp_flags & CPU_DISP_DONTSTEAL) 638 return; 639 } 640 641 /* 642 * No need to wake up other CPUs if the thread we just enqueued 643 * is bound. 644 */ 645 if (bound) 646 return; 647 648 649 /* 650 * See if there's any other halted CPUs. If there are, then 651 * select one, and awaken it. 652 * It's possible that after we find a CPU, somebody else 653 * will awaken it before we get the chance. 654 * In that case, look again. 655 */ 656 do { 657 CPUSET_FIND(cpu_part->cp_mach->mc_haltset, cpu_found); 658 if (cpu_found == CPUSET_NOTINSET) 659 return; 660 661 ASSERT(cpu_found >= 0 && cpu_found < NCPU); 662 CPUSET_ATOMIC_XDEL(cpu_part->cp_mach->mc_haltset, cpu_found, 663 result); 664 } while (result < 0); 665 666 /* 667 * Do not check if cpu_found is ourself as monitor/mwait wakeup is 668 * cheap. 669 */ 670 MWAIT_WAKEUP(cpu[cpu_found]); /* write to monitored line */ 671 } 672 #endif 673 674 void (*cpu_pause_handler)(volatile char *) = NULL; 675 676 static int 677 mp_disable_intr(int cpun) 678 { 679 /* 680 * switch to the offline cpu 681 */ 682 affinity_set(cpun); 683 /* 684 * raise ipl to just below cross call 685 */ 686 splx(XC_MED_PIL-1); 687 /* 688 * set base spl to prevent the next swtch to idle from 689 * lowering back to ipl 0 690 */ 691 CPU->cpu_intr_actv |= (1 << (XC_MED_PIL-1)); 692 set_base_spl(); 693 affinity_clear(); 694 return (DDI_SUCCESS); 695 } 696 697 static void 698 mp_enable_intr(int cpun) 699 { 700 /* 701 * switch to the online cpu 702 */ 703 affinity_set(cpun); 704 /* 705 * clear the interrupt active mask 706 */ 707 CPU->cpu_intr_actv &= ~(1 << (XC_MED_PIL-1)); 708 set_base_spl(); 709 (void) spl0(); 710 affinity_clear(); 711 } 712 713 static void 714 mach_get_platform(int owner) 715 { 716 void **srv_opsp; 717 void **clt_opsp; 718 int i; 719 int total_ops; 720 721 /* fix up psm ops */ 722 srv_opsp = (void **)mach_set[0]; 723 clt_opsp = (void **)mach_set[owner]; 724 if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01) 725 total_ops = sizeof (struct psm_ops_ver01) / 726 sizeof (void (*)(void)); 727 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1) 728 /* no psm_notify_func */ 729 total_ops = OFFSETOF(struct psm_ops, psm_notify_func) / 730 sizeof (void (*)(void)); 731 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2) 732 /* no psm_timer funcs */ 733 total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) / 734 sizeof (void (*)(void)); 735 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3) 736 /* no psm_preshutdown function */ 737 total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) / 738 sizeof (void (*)(void)); 739 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4) 740 /* no psm_preshutdown function */ 741 total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) / 742 sizeof (void (*)(void)); 743 else 744 total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void)); 745 746 /* 747 * Save the version of the PSM module, in case we need to 748 * bahave differently based on version. 749 */ 750 mach_ver[0] = mach_ver[owner]; 751 752 for (i = 0; i < total_ops; i++) 753 if (clt_opsp[i] != NULL) 754 srv_opsp[i] = clt_opsp[i]; 755 } 756 757 static void 758 mach_construct_info() 759 { 760 struct psm_sw *swp; 761 int mach_cnt[PSM_OWN_OVERRIDE+1] = {0}; 762 int conflict_owner = 0; 763 764 if (psmsw->psw_forw == psmsw) 765 panic("No valid PSM modules found"); 766 mutex_enter(&psmsw_lock); 767 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 768 if (!(swp->psw_flag & PSM_MOD_IDENTIFY)) 769 continue; 770 mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops; 771 mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version; 772 mach_cnt[swp->psw_infop->p_owner]++; 773 } 774 mutex_exit(&psmsw_lock); 775 776 mach_get_platform(PSM_OWN_SYS_DEFAULT); 777 778 /* check to see are there any conflicts */ 779 if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1) 780 conflict_owner = PSM_OWN_EXCLUSIVE; 781 if (mach_cnt[PSM_OWN_OVERRIDE] > 1) 782 conflict_owner = PSM_OWN_OVERRIDE; 783 if (conflict_owner) { 784 /* remove all psm modules except uppc */ 785 cmn_err(CE_WARN, 786 "Conflicts detected on the following PSM modules:"); 787 mutex_enter(&psmsw_lock); 788 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 789 if (swp->psw_infop->p_owner == conflict_owner) 790 cmn_err(CE_WARN, "%s ", 791 swp->psw_infop->p_mach_idstring); 792 } 793 mutex_exit(&psmsw_lock); 794 cmn_err(CE_WARN, 795 "Setting the system back to SINGLE processor mode!"); 796 cmn_err(CE_WARN, 797 "Please edit /etc/mach to remove the invalid PSM module."); 798 return; 799 } 800 801 if (mach_set[PSM_OWN_EXCLUSIVE]) 802 mach_get_platform(PSM_OWN_EXCLUSIVE); 803 804 if (mach_set[PSM_OWN_OVERRIDE]) 805 mach_get_platform(PSM_OWN_OVERRIDE); 806 } 807 808 static void 809 mach_init() 810 { 811 struct psm_ops *pops; 812 813 mach_construct_info(); 814 815 pops = mach_set[0]; 816 817 /* register the interrupt and clock initialization rotuines */ 818 picinitf = mach_picinit; 819 clkinitf = mach_clkinit; 820 psm_get_clockirq = pops->psm_get_clockirq; 821 822 /* register the interrupt setup code */ 823 slvltovect = mach_softlvl_to_vect; 824 addspl = pops->psm_addspl; 825 delspl = pops->psm_delspl; 826 827 if (pops->psm_translate_irq) 828 psm_translate_irq = pops->psm_translate_irq; 829 if (pops->psm_intr_ops) 830 psm_intr_ops = pops->psm_intr_ops; 831 832 #if defined(PSMI_1_2) || defined(PSMI_1_3) || defined(PSMI_1_4) 833 /* 834 * Time-of-day functionality now handled in TOD modules. 835 * (Warn about PSM modules that think that we're going to use 836 * their ops vectors.) 837 */ 838 if (pops->psm_tod_get) 839 cmn_err(CE_WARN, "obsolete psm_tod_get op %p", 840 (void *)pops->psm_tod_get); 841 842 if (pops->psm_tod_set) 843 cmn_err(CE_WARN, "obsolete psm_tod_set op %p", 844 (void *)pops->psm_tod_set); 845 #endif 846 847 if (pops->psm_notify_error) { 848 psm_notify_error = mach_notify_error; 849 notify_error = pops->psm_notify_error; 850 } 851 852 (*pops->psm_softinit)(); 853 854 /* 855 * Initialize the dispatcher's function hooks 856 * to enable CPU halting when idle. 857 * Do not use monitor/mwait if idle_cpu_use_hlt is not set(spin idle) 858 * or idle_cpu_prefer_mwait is not set. 859 * Allocate monitor/mwait buffer for cpu0. 860 */ 861 if (idle_cpu_use_hlt) { 862 idle_cpu = cpu_idle; 863 #ifndef __xpv 864 if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) { 865 CPU->cpu_m.mcpu_mwait = cpuid_mwait_alloc(CPU); 866 /* 867 * Protect ourself from insane mwait size. 868 */ 869 if (CPU->cpu_m.mcpu_mwait == NULL) { 870 #ifdef DEBUG 871 cmn_err(CE_NOTE, "Using hlt idle. Cannot " 872 "handle cpu 0 mwait size."); 873 #endif 874 idle_cpu_prefer_mwait = 0; 875 idle_cpu = cpu_idle; 876 } else { 877 idle_cpu = cpu_idle_mwait; 878 } 879 } else { 880 idle_cpu = cpu_idle; 881 } 882 #endif 883 } 884 885 mach_smpinit(); 886 } 887 888 static void 889 mach_smpinit(void) 890 { 891 struct psm_ops *pops; 892 processorid_t cpu_id; 893 int cnt; 894 cpuset_t cpumask; 895 896 pops = mach_set[0]; 897 CPUSET_ZERO(cpumask); 898 899 cpu_id = -1; 900 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 901 for (cnt = 0; cpu_id != -1; cnt++) { 902 CPUSET_ADD(cpumask, cpu_id); 903 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 904 } 905 906 mp_cpus = cpumask; 907 908 /* MP related routines */ 909 ap_mlsetup = pops->psm_post_cpu_start; 910 send_dirintf = pops->psm_send_ipi; 911 912 /* optional MP related routines */ 913 if (pops->psm_shutdown) 914 psm_shutdownf = pops->psm_shutdown; 915 if (pops->psm_preshutdown) 916 psm_preshutdownf = pops->psm_preshutdown; 917 if (pops->psm_notify_func) 918 psm_notifyf = pops->psm_notify_func; 919 if (pops->psm_set_idlecpu) 920 psm_set_idle_cpuf = pops->psm_set_idlecpu; 921 if (pops->psm_unset_idlecpu) 922 psm_unset_idle_cpuf = pops->psm_unset_idlecpu; 923 924 psm_clkinit = pops->psm_clkinit; 925 926 if (pops->psm_timer_reprogram) 927 psm_timer_reprogram = pops->psm_timer_reprogram; 928 929 if (pops->psm_timer_enable) 930 psm_timer_enable = pops->psm_timer_enable; 931 932 if (pops->psm_timer_disable) 933 psm_timer_disable = pops->psm_timer_disable; 934 935 if (pops->psm_post_cyclic_setup) 936 psm_post_cyclic_setup = pops->psm_post_cyclic_setup; 937 938 if (pops->psm_state) 939 psm_state = pops->psm_state; 940 941 /* 942 * Set these vectors here so they can be used by Suspend/Resume 943 * on UP machines. 944 */ 945 if (pops->psm_disable_intr) 946 psm_disable_intr = pops->psm_disable_intr; 947 if (pops->psm_enable_intr) 948 psm_enable_intr = pops->psm_enable_intr; 949 950 /* check for multiple CPUs */ 951 if (cnt < 2) 952 return; 953 954 /* check for MP platforms */ 955 if (pops->psm_cpu_start == NULL) 956 return; 957 958 /* 959 * Set the dispatcher hook to enable cpu "wake up" 960 * when a thread becomes runnable. 961 */ 962 if (idle_cpu_use_hlt) { 963 disp_enq_thread = cpu_wakeup; 964 #ifndef __xpv 965 if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) 966 disp_enq_thread = cpu_wakeup_mwait; 967 #endif 968 } 969 970 psm_get_ipivect = pops->psm_get_ipivect; 971 972 (void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_hi_intr", 973 (*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI), 974 (caddr_t)X_CALL_HIPRI, NULL, NULL, NULL); 975 (void) add_avintr((void *)NULL, XC_MED_PIL, xc_serv, "xc_med_intr", 976 (*pops->psm_get_ipivect)(XC_MED_PIL, PSM_INTR_IPI_LO), 977 (caddr_t)X_CALL_MEDPRI, NULL, NULL, NULL); 978 979 (void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE); 980 } 981 982 static void 983 mach_picinit() 984 { 985 struct psm_ops *pops; 986 987 pops = mach_set[0]; 988 989 /* register the interrupt handlers */ 990 setlvl = pops->psm_intr_enter; 991 setlvlx = pops->psm_intr_exit; 992 993 /* initialize the interrupt hardware */ 994 (*pops->psm_picinit)(); 995 996 /* set interrupt mask for current ipl */ 997 setspl = pops->psm_setspl; 998 cli(); 999 setspl(CPU->cpu_pri); 1000 } 1001 1002 uint_t cpu_freq; /* MHz */ 1003 uint64_t cpu_freq_hz; /* measured (in hertz) */ 1004 1005 #define MEGA_HZ 1000000 1006 1007 #ifdef __xpv 1008 1009 int xpv_cpufreq_workaround = 1; 1010 int xpv_cpufreq_verbose = 0; 1011 1012 #else /* __xpv */ 1013 1014 static uint64_t 1015 mach_calchz(uint32_t pit_counter, uint64_t *processor_clks) 1016 { 1017 uint64_t cpu_hz; 1018 1019 if ((pit_counter == 0) || (*processor_clks == 0) || 1020 (*processor_clks > (((uint64_t)-1) / PIT_HZ))) 1021 return (0); 1022 1023 cpu_hz = ((uint64_t)PIT_HZ * *processor_clks) / pit_counter; 1024 1025 return (cpu_hz); 1026 } 1027 1028 #endif /* __xpv */ 1029 1030 static uint64_t 1031 mach_getcpufreq(void) 1032 { 1033 #if defined(__xpv) 1034 vcpu_time_info_t *vti = &CPU->cpu_m.mcpu_vcpu_info->time; 1035 uint64_t cpu_hz; 1036 1037 /* 1038 * During dom0 bringup, it was noted that on at least one older 1039 * Intel HT machine, the hypervisor initially gives a tsc_to_system_mul 1040 * value that is quite wrong (the 3.06GHz clock was reported 1041 * as 4.77GHz) 1042 * 1043 * The curious thing is, that if you stop the kernel at entry, 1044 * breakpoint here and inspect the value with kmdb, the value 1045 * is correct - but if you don't stop and simply enable the 1046 * printf statement (below), you can see the bad value printed 1047 * here. Almost as if something kmdb did caused the hypervisor to 1048 * figure it out correctly. And, note that the hypervisor 1049 * eventually -does- figure it out correctly ... if you look at 1050 * the field later in the life of dom0, it is correct. 1051 * 1052 * For now, on dom0, we employ a slightly cheesy workaround of 1053 * using the DOM0_PHYSINFO hypercall. 1054 */ 1055 if (DOMAIN_IS_INITDOMAIN(xen_info) && xpv_cpufreq_workaround) { 1056 xen_sysctl_t op0, *op = &op0; 1057 1058 op->cmd = XEN_SYSCTL_physinfo; 1059 op->interface_version = XEN_SYSCTL_INTERFACE_VERSION; 1060 if (HYPERVISOR_sysctl(op) != 0) 1061 panic("physinfo op refused"); 1062 1063 cpu_hz = 1000 * (uint64_t)op->u.physinfo.cpu_khz; 1064 } else { 1065 cpu_hz = (UINT64_C(1000000000) << 32) / vti->tsc_to_system_mul; 1066 1067 if (vti->tsc_shift < 0) 1068 cpu_hz <<= -vti->tsc_shift; 1069 else 1070 cpu_hz >>= vti->tsc_shift; 1071 } 1072 1073 if (xpv_cpufreq_verbose) 1074 printf("mach_getcpufreq: system_mul 0x%x, shift %d, " 1075 "cpu_hz %" PRId64 "Hz\n", 1076 vti->tsc_to_system_mul, vti->tsc_shift, cpu_hz); 1077 1078 return (cpu_hz); 1079 #else /* __xpv */ 1080 uint32_t pit_counter; 1081 uint64_t processor_clks; 1082 1083 if (x86_feature & X86_TSC) { 1084 /* 1085 * We have a TSC. freq_tsc() knows how to measure the number 1086 * of clock cycles sampled against the PIT. 1087 */ 1088 ulong_t flags = clear_int_flag(); 1089 processor_clks = freq_tsc(&pit_counter); 1090 restore_int_flag(flags); 1091 return (mach_calchz(pit_counter, &processor_clks)); 1092 } else if (x86_vendor == X86_VENDOR_Cyrix || x86_type == X86_TYPE_P5) { 1093 #if defined(__amd64) 1094 panic("mach_getcpufreq: no TSC!"); 1095 #elif defined(__i386) 1096 /* 1097 * We are a Cyrix based on a 6x86 core or an Intel Pentium 1098 * for which freq_notsc() knows how to measure the number of 1099 * elapsed clock cycles sampled against the PIT 1100 */ 1101 ulong_t flags = clear_int_flag(); 1102 processor_clks = freq_notsc(&pit_counter); 1103 restore_int_flag(flags); 1104 return (mach_calchz(pit_counter, &processor_clks)); 1105 #endif /* __i386 */ 1106 } 1107 1108 /* We do not know how to calculate cpu frequency for this cpu. */ 1109 return (0); 1110 #endif /* __xpv */ 1111 } 1112 1113 /* 1114 * If the clock speed of a cpu is found to be reported incorrectly, do not add 1115 * to this array, instead improve the accuracy of the algorithm that determines 1116 * the clock speed of the processor or extend the implementation to support the 1117 * vendor as appropriate. This is here only to support adjusting the speed on 1118 * older slower processors that mach_fixcpufreq() would not be able to account 1119 * for otherwise. 1120 */ 1121 static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 }; 1122 1123 /* 1124 * On fast processors the clock frequency that is measured may be off by 1125 * a few MHz from the value printed on the part. This is a combination of 1126 * the factors that for such fast parts being off by this much is within 1127 * the tolerances for manufacture and because of the difficulties in the 1128 * measurement that can lead to small error. This function uses some 1129 * heuristics in order to tweak the value that was measured to match what 1130 * is most likely printed on the part. 1131 * 1132 * Some examples: 1133 * AMD Athlon 1000 mhz measured as 998 mhz 1134 * Intel Pentium III Xeon 733 mhz measured as 731 mhz 1135 * Intel Pentium IV 1500 mhz measured as 1495mhz 1136 * 1137 * If in the future this function is no longer sufficient to correct 1138 * for the error in the measurement, then the algorithm used to perform 1139 * the measurement will have to be improved in order to increase accuracy 1140 * rather than adding horrible and questionable kludges here. 1141 * 1142 * This is called after the cyclics subsystem because of the potential 1143 * that the heuristics within may give a worse estimate of the clock 1144 * frequency than the value that was measured. 1145 */ 1146 static void 1147 mach_fixcpufreq(void) 1148 { 1149 uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i; 1150 1151 freq = (uint32_t)cpu_freq; 1152 1153 /* 1154 * Find the nearest integer multiple of 200/3 (about 66) MHz to the 1155 * measured speed taking into account that the 667 MHz parts were 1156 * the first to round-up. 1157 */ 1158 mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200); 1159 near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3); 1160 delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66); 1161 1162 /* Find the nearest integer multiple of 50 MHz to the measured speed */ 1163 mul = (freq + 25) / 50; 1164 near50 = mul * 50; 1165 delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50); 1166 1167 /* Find the closer of the two */ 1168 if (delta66 < delta50) { 1169 fixed = near66; 1170 delta = delta66; 1171 } else { 1172 fixed = near50; 1173 delta = delta50; 1174 } 1175 1176 if (fixed > INT_MAX) 1177 return; 1178 1179 /* 1180 * Some older parts have a core clock frequency that is not an 1181 * integral multiple of 50 or 66 MHz. Check if one of the old 1182 * clock frequencies is closer to the measured value than any 1183 * of the integral multiples of 50 an 66, and if so set fixed 1184 * and delta appropriately to represent the closest value. 1185 */ 1186 i = sizeof (x86_cpu_freq) / sizeof (int); 1187 while (i > 0) { 1188 i--; 1189 1190 if (x86_cpu_freq[i] <= freq) { 1191 mul = freq - x86_cpu_freq[i]; 1192 1193 if (mul < delta) { 1194 fixed = x86_cpu_freq[i]; 1195 delta = mul; 1196 } 1197 1198 break; 1199 } 1200 1201 mul = x86_cpu_freq[i] - freq; 1202 1203 if (mul < delta) { 1204 fixed = x86_cpu_freq[i]; 1205 delta = mul; 1206 } 1207 } 1208 1209 /* 1210 * Set a reasonable maximum for how much to correct the measured 1211 * result by. This check is here to prevent the adjustment made 1212 * by this function from being more harm than good. It is entirely 1213 * possible that in the future parts will be made that are not 1214 * integral multiples of 66 or 50 in clock frequency or that 1215 * someone may overclock a part to some odd frequency. If the 1216 * measured value is farther from the corrected value than 1217 * allowed, then assume the corrected value is in error and use 1218 * the measured value. 1219 */ 1220 if (6 < delta) 1221 return; 1222 1223 cpu_freq = (int)fixed; 1224 } 1225 1226 1227 static int 1228 machhztomhz(uint64_t cpu_freq_hz) 1229 { 1230 uint64_t cpu_mhz; 1231 1232 /* Round to nearest MHZ */ 1233 cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ; 1234 1235 if (cpu_mhz > INT_MAX) 1236 return (0); 1237 1238 return ((int)cpu_mhz); 1239 1240 } 1241 1242 1243 static int 1244 mach_clkinit(int preferred_mode, int *set_mode) 1245 { 1246 struct psm_ops *pops; 1247 int resolution; 1248 1249 pops = mach_set[0]; 1250 1251 cpu_freq_hz = mach_getcpufreq(); 1252 1253 cpu_freq = machhztomhz(cpu_freq_hz); 1254 1255 if (!(x86_feature & X86_TSC) || (cpu_freq == 0)) 1256 tsc_gethrtime_enable = 0; 1257 1258 #ifndef __xpv 1259 if (tsc_gethrtime_enable) { 1260 tsc_hrtimeinit(cpu_freq_hz); 1261 } else 1262 #endif 1263 { 1264 if (pops->psm_hrtimeinit) 1265 (*pops->psm_hrtimeinit)(); 1266 gethrtimef = pops->psm_gethrtime; 1267 gethrtimeunscaledf = gethrtimef; 1268 /* scalehrtimef will remain dummy */ 1269 } 1270 1271 mach_fixcpufreq(); 1272 1273 if (mach_ver[0] >= PSM_INFO_VER01_3) { 1274 if (preferred_mode == TIMER_ONESHOT) { 1275 1276 resolution = (*pops->psm_clkinit)(0); 1277 if (resolution != 0) { 1278 *set_mode = TIMER_ONESHOT; 1279 return (resolution); 1280 } 1281 } 1282 1283 /* 1284 * either periodic mode was requested or could not set to 1285 * one-shot mode 1286 */ 1287 resolution = (*pops->psm_clkinit)(hz); 1288 /* 1289 * psm should be able to do periodic, so we do not check 1290 * for return value of psm_clkinit here. 1291 */ 1292 *set_mode = TIMER_PERIODIC; 1293 return (resolution); 1294 } else { 1295 /* 1296 * PSMI interface prior to PSMI_3 does not define a return 1297 * value for psm_clkinit, so the return value is ignored. 1298 */ 1299 (void) (*pops->psm_clkinit)(hz); 1300 *set_mode = TIMER_PERIODIC; 1301 return (nsec_per_tick); 1302 } 1303 } 1304 1305 1306 /*ARGSUSED*/ 1307 static int 1308 mach_softlvl_to_vect(int ipl) 1309 { 1310 setsoftint = av_set_softint_pending; 1311 kdisetsoftint = kdi_av_set_softint_pending; 1312 1313 return (PSM_SV_SOFTWARE); 1314 } 1315 1316 #ifdef DEBUG 1317 /* 1318 * This is here to allow us to simulate cpus that refuse to start. 1319 */ 1320 cpuset_t cpufailset; 1321 #endif 1322 1323 int 1324 mach_cpu_start(struct cpu *cp, void *ctx) 1325 { 1326 struct psm_ops *pops = mach_set[0]; 1327 processorid_t id = cp->cpu_id; 1328 1329 #ifdef DEBUG 1330 if (CPU_IN_SET(cpufailset, id)) 1331 return (0); 1332 #endif 1333 return ((*pops->psm_cpu_start)(id, ctx)); 1334 } 1335 1336 int 1337 mach_cpuid_start(processorid_t id, void *ctx) 1338 { 1339 struct psm_ops *pops = mach_set[0]; 1340 1341 #ifdef DEBUG 1342 if (CPU_IN_SET(cpufailset, id)) 1343 return (0); 1344 #endif 1345 return ((*pops->psm_cpu_start)(id, ctx)); 1346 } 1347 1348 /*ARGSUSED*/ 1349 static int 1350 mach_translate_irq(dev_info_t *dip, int irqno) 1351 { 1352 return (irqno); /* default to NO translation */ 1353 } 1354 1355 static void 1356 mach_notify_error(int level, char *errmsg) 1357 { 1358 /* 1359 * SL_FATAL is pass in once panicstr is set, deliver it 1360 * as CE_PANIC. Also, translate SL_ codes back to CE_ 1361 * codes for the psmi handler 1362 */ 1363 if (level & SL_FATAL) 1364 (*notify_error)(CE_PANIC, errmsg); 1365 else if (level & SL_WARN) 1366 (*notify_error)(CE_WARN, errmsg); 1367 else if (level & SL_NOTE) 1368 (*notify_error)(CE_NOTE, errmsg); 1369 else if (level & SL_CONSOLE) 1370 (*notify_error)(CE_CONT, errmsg); 1371 } 1372 1373 /* 1374 * It provides the default basic intr_ops interface for the new DDI 1375 * interrupt framework if the PSM doesn't have one. 1376 * 1377 * Input: 1378 * dip - pointer to the dev_info structure of the requested device 1379 * hdlp - pointer to the internal interrupt handle structure for the 1380 * requested interrupt 1381 * intr_op - opcode for this call 1382 * result - pointer to the integer that will hold the result to be 1383 * passed back if return value is PSM_SUCCESS 1384 * 1385 * Output: 1386 * return value is either PSM_SUCCESS or PSM_FAILURE 1387 */ 1388 static int 1389 mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 1390 psm_intr_op_t intr_op, int *result) 1391 { 1392 struct intrspec *ispec; 1393 1394 switch (intr_op) { 1395 case PSM_INTR_OP_CHECK_MSI: 1396 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 1397 DDI_INTR_TYPE_MSIX); 1398 break; 1399 case PSM_INTR_OP_ALLOC_VECTORS: 1400 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1401 *result = 1; 1402 else 1403 *result = 0; 1404 break; 1405 case PSM_INTR_OP_FREE_VECTORS: 1406 break; 1407 case PSM_INTR_OP_NAVAIL_VECTORS: 1408 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1409 *result = 1; 1410 else 1411 *result = 0; 1412 break; 1413 case PSM_INTR_OP_XLATE_VECTOR: 1414 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 1415 *result = psm_translate_irq(dip, ispec->intrspec_vec); 1416 break; 1417 case PSM_INTR_OP_GET_CAP: 1418 *result = 0; 1419 break; 1420 case PSM_INTR_OP_GET_PENDING: 1421 case PSM_INTR_OP_CLEAR_MASK: 1422 case PSM_INTR_OP_SET_MASK: 1423 case PSM_INTR_OP_GET_SHARED: 1424 case PSM_INTR_OP_SET_PRI: 1425 case PSM_INTR_OP_SET_CAP: 1426 case PSM_INTR_OP_SET_CPU: 1427 case PSM_INTR_OP_GET_INTR: 1428 default: 1429 return (PSM_FAILURE); 1430 } 1431 return (PSM_SUCCESS); 1432 } 1433 /* 1434 * Return 1 if CMT load balancing policies should be 1435 * implemented across instances of the specified hardware 1436 * sharing relationship. 1437 */ 1438 int 1439 pg_cmt_load_bal_hw(pghw_type_t hw) 1440 { 1441 if (hw == PGHW_IPIPE || 1442 hw == PGHW_FPU || 1443 hw == PGHW_CHIP) 1444 return (1); 1445 else 1446 return (0); 1447 } 1448 /* 1449 * Return 1 if thread affinity polices should be implemented 1450 * for instances of the specifed hardware sharing relationship. 1451 */ 1452 int 1453 pg_cmt_affinity_hw(pghw_type_t hw) 1454 { 1455 if (hw == PGHW_CACHE) 1456 return (1); 1457 else 1458 return (0); 1459 } 1460