1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #define PSMI_1_6 27 #include <sys/smp_impldefs.h> 28 #include <sys/psm.h> 29 #include <sys/psm_modctl.h> 30 #include <sys/pit.h> 31 #include <sys/cmn_err.h> 32 #include <sys/strlog.h> 33 #include <sys/clock.h> 34 #include <sys/debug.h> 35 #include <sys/rtc.h> 36 #include <sys/x86_archext.h> 37 #include <sys/cpupart.h> 38 #include <sys/cpuvar.h> 39 #include <sys/cmt.h> 40 #include <sys/cpu.h> 41 #include <sys/disp.h> 42 #include <sys/archsystm.h> 43 #include <sys/machsystm.h> 44 #include <sys/sysmacros.h> 45 #include <sys/memlist.h> 46 #include <sys/param.h> 47 #include <sys/promif.h> 48 #if defined(__xpv) 49 #include <sys/hypervisor.h> 50 #endif 51 #include <sys/mach_intr.h> 52 #include <vm/hat_i86.h> 53 #include <sys/kdi_machimpl.h> 54 #include <sys/sdt.h> 55 56 #define OFFSETOF(s, m) (size_t)(&(((s *)0)->m)) 57 58 /* 59 * Local function prototypes 60 */ 61 static int mp_disable_intr(processorid_t cpun); 62 static void mp_enable_intr(processorid_t cpun); 63 static void mach_init(); 64 static void mach_picinit(); 65 static int machhztomhz(uint64_t cpu_freq_hz); 66 static uint64_t mach_getcpufreq(void); 67 static void mach_fixcpufreq(void); 68 static int mach_clkinit(int, int *); 69 static void mach_smpinit(void); 70 static int mach_softlvl_to_vect(int ipl); 71 static void mach_get_platform(int owner); 72 static void mach_construct_info(); 73 static int mach_translate_irq(dev_info_t *dip, int irqno); 74 static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 75 psm_intr_op_t, int *); 76 static void mach_notify_error(int level, char *errmsg); 77 static hrtime_t dummy_hrtime(void); 78 static void dummy_scalehrtime(hrtime_t *); 79 static void cpu_idle(void); 80 static void cpu_wakeup(cpu_t *, int); 81 #ifndef __xpv 82 static void cpu_idle_mwait(void); 83 static void cpu_wakeup_mwait(cpu_t *, int); 84 #endif 85 /* 86 * External reference functions 87 */ 88 extern void return_instr(); 89 extern uint64_t freq_tsc(uint32_t *); 90 #if defined(__i386) 91 extern uint64_t freq_notsc(uint32_t *); 92 #endif 93 extern void pc_gethrestime(timestruc_t *); 94 extern int cpuid_get_coreid(cpu_t *); 95 extern int cpuid_get_chipid(cpu_t *); 96 97 /* 98 * PSM functions initialization 99 */ 100 void (*psm_shutdownf)(int, int) = (void (*)(int, int))return_instr; 101 void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr; 102 void (*psm_notifyf)(int) = (void (*)(int))return_instr; 103 void (*psm_set_idle_cpuf)(int) = (void (*)(int))return_instr; 104 void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr; 105 void (*psminitf)() = mach_init; 106 void (*picinitf)() = return_instr; 107 int (*clkinitf)(int, int *) = (int (*)(int, int *))return_instr; 108 int (*ap_mlsetup)() = (int (*)(void))return_instr; 109 void (*send_dirintf)() = return_instr; 110 void (*setspl)(int) = (void (*)(int))return_instr; 111 int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 112 int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 113 void (*kdisetsoftint)(int, struct av_softinfo *)= 114 (void (*)(int, struct av_softinfo *))return_instr; 115 void (*setsoftint)(int, struct av_softinfo *)= 116 (void (*)(int, struct av_softinfo *))return_instr; 117 int (*slvltovect)(int) = (int (*)(int))return_instr; 118 int (*setlvl)(int, int *) = (int (*)(int, int *))return_instr; 119 void (*setlvlx)(int, int) = (void (*)(int, int))return_instr; 120 int (*psm_disable_intr)(int) = mp_disable_intr; 121 void (*psm_enable_intr)(int) = mp_enable_intr; 122 hrtime_t (*gethrtimef)(void) = dummy_hrtime; 123 hrtime_t (*gethrtimeunscaledf)(void) = dummy_hrtime; 124 void (*scalehrtimef)(hrtime_t *) = dummy_scalehrtime; 125 int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq; 126 void (*gethrestimef)(timestruc_t *) = pc_gethrestime; 127 void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL; 128 int (*psm_get_clockirq)(int) = NULL; 129 int (*psm_get_ipivect)(int, int) = NULL; 130 131 int (*psm_clkinit)(int) = NULL; 132 void (*psm_timer_reprogram)(hrtime_t) = NULL; 133 void (*psm_timer_enable)(void) = NULL; 134 void (*psm_timer_disable)(void) = NULL; 135 void (*psm_post_cyclic_setup)(void *arg) = NULL; 136 int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t, 137 int *) = mach_intr_ops; 138 int (*psm_state)(psm_state_request_t *) = (int (*)(psm_state_request_t *)) 139 return_instr; 140 141 void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr; 142 void (*hrtime_tick)(void) = return_instr; 143 144 /* 145 * True if the generic TSC code is our source of hrtime, rather than whatever 146 * the PSM can provide. 147 */ 148 #ifdef __xpv 149 int tsc_gethrtime_enable = 0; 150 #else 151 int tsc_gethrtime_enable = 1; 152 #endif 153 int tsc_gethrtime_initted = 0; 154 155 /* 156 * True if the hrtime implementation is "hires"; namely, better than microdata. 157 */ 158 int gethrtime_hires = 0; 159 160 /* 161 * Local Static Data 162 */ 163 static struct psm_ops mach_ops; 164 static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL}; 165 static ushort_t mach_ver[4] = {0, 0, 0, 0}; 166 167 /* 168 * If non-zero, idle cpus will become "halted" when there's 169 * no work to do. 170 */ 171 int idle_cpu_use_hlt = 1; 172 173 #ifndef __xpv 174 /* 175 * If non-zero, idle cpus will use mwait if available to halt instead of hlt. 176 */ 177 int idle_cpu_prefer_mwait = 1; 178 /* 179 * Set to 0 to avoid MONITOR+CLFLUSH assertion. 180 */ 181 int idle_cpu_assert_cflush_monitor = 1; 182 183 #endif 184 185 /*ARGSUSED*/ 186 int 187 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw) 188 { 189 switch (hw) { 190 case PGHW_IPIPE: 191 if (x86_feature & (X86_HTT)) { 192 /* 193 * Hyper-threading is SMT 194 */ 195 return (1); 196 } else { 197 return (0); 198 } 199 case PGHW_CHIP: 200 if (x86_feature & (X86_CMP|X86_HTT)) 201 return (1); 202 else 203 return (0); 204 case PGHW_CACHE: 205 if (cpuid_get_ncpu_sharing_last_cache(cp) > 1) 206 return (1); 207 else 208 return (0); 209 default: 210 return (0); 211 } 212 } 213 214 /* 215 * Compare two CPUs and see if they have a pghw_type_t sharing relationship 216 * If pghw_type_t is an unsupported hardware type, then return -1 217 */ 218 int 219 pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw) 220 { 221 id_t pgp_a, pgp_b; 222 223 pgp_a = pg_plat_hw_instance_id(cpu_a, hw); 224 pgp_b = pg_plat_hw_instance_id(cpu_b, hw); 225 226 if (pgp_a == -1 || pgp_b == -1) 227 return (-1); 228 229 return (pgp_a == pgp_b); 230 } 231 232 /* 233 * Return a physical instance identifier for known hardware sharing 234 * relationships 235 */ 236 id_t 237 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw) 238 { 239 switch (hw) { 240 case PGHW_IPIPE: 241 return (cpuid_get_coreid(cpu)); 242 case PGHW_CACHE: 243 return (cpuid_get_last_lvl_cacheid(cpu)); 244 case PGHW_CHIP: 245 return (cpuid_get_chipid(cpu)); 246 default: 247 return (-1); 248 } 249 } 250 251 int 252 pg_plat_hw_level(pghw_type_t hw) 253 { 254 int i; 255 static pghw_type_t hw_hier[] = { 256 PGHW_IPIPE, 257 PGHW_CACHE, 258 PGHW_CHIP, 259 PGHW_NUM_COMPONENTS 260 }; 261 262 for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) { 263 if (hw_hier[i] == hw) 264 return (i); 265 } 266 return (-1); 267 } 268 269 /* 270 * Return 1 if CMT load balancing policies should be 271 * implemented across instances of the specified hardware 272 * sharing relationship. 273 */ 274 int 275 pg_plat_cmt_load_bal_hw(pghw_type_t hw) 276 { 277 if (hw == PGHW_IPIPE || 278 hw == PGHW_FPU || 279 hw == PGHW_CHIP || 280 hw == PGHW_CACHE) 281 return (1); 282 else 283 return (0); 284 } 285 286 287 /* 288 * Return 1 if thread affinity polices should be implemented 289 * for instances of the specifed hardware sharing relationship. 290 */ 291 int 292 pg_plat_cmt_affinity_hw(pghw_type_t hw) 293 { 294 if (hw == PGHW_CACHE) 295 return (1); 296 else 297 return (0); 298 } 299 300 id_t 301 pg_plat_get_core_id(cpu_t *cpu) 302 { 303 return ((id_t)cpuid_get_coreid(cpu)); 304 } 305 306 void 307 cmp_set_nosteal_interval(void) 308 { 309 /* Set the nosteal interval (used by disp_getbest()) to 100us */ 310 nosteal_nsec = 100000UL; 311 } 312 313 /* 314 * Routine to ensure initial callers to hrtime gets 0 as return 315 */ 316 static hrtime_t 317 dummy_hrtime(void) 318 { 319 return (0); 320 } 321 322 /* ARGSUSED */ 323 static void 324 dummy_scalehrtime(hrtime_t *ticks) 325 {} 326 327 /* 328 * Idle the present CPU until awoken via an interrupt 329 */ 330 static void 331 cpu_idle(void) 332 { 333 cpu_t *cpup = CPU; 334 processorid_t cpu_sid = cpup->cpu_seqid; 335 cpupart_t *cp = cpup->cpu_part; 336 int hset_update = 1; 337 338 /* 339 * If this CPU is online, and there's multiple CPUs 340 * in the system, then we should notate our halting 341 * by adding ourselves to the partition's halted CPU 342 * bitmap. This allows other CPUs to find/awaken us when 343 * work becomes available. 344 */ 345 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 346 hset_update = 0; 347 348 /* 349 * Add ourselves to the partition's halted CPUs bitmap 350 * and set our HALTED flag, if necessary. 351 * 352 * When a thread becomes runnable, it is placed on the queue 353 * and then the halted CPU bitmap is checked to determine who 354 * (if anyone) should be awoken. We therefore need to first 355 * add ourselves to the bitmap, and and then check if there 356 * is any work available. The order is important to prevent a race 357 * that can lead to work languishing on a run queue somewhere while 358 * this CPU remains halted. 359 * 360 * Either the producing CPU will see we're halted and will awaken us, 361 * or this CPU will see the work available in disp_anywork(). 362 * 363 * Note that memory barriers after updating the HALTED flag 364 * are not necessary since an atomic operation (updating the bitset) 365 * immediately follows. On x86 the atomic operation acts as a 366 * memory barrier for the update of cpu_disp_flags. 367 */ 368 if (hset_update) { 369 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 370 bitset_atomic_add(&cp->cp_haltset, cpu_sid); 371 } 372 373 /* 374 * Check to make sure there's really nothing to do. 375 * Work destined for this CPU may become available after 376 * this check. We'll be notified through the clearing of our 377 * bit in the halted CPU bitmap, and a poke. 378 */ 379 if (disp_anywork()) { 380 if (hset_update) { 381 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 382 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 383 } 384 return; 385 } 386 387 /* 388 * We're on our way to being halted. 389 * 390 * Disable interrupts now, so that we'll awaken immediately 391 * after halting if someone tries to poke us between now and 392 * the time we actually halt. 393 * 394 * We check for the presence of our bit after disabling interrupts. 395 * If it's cleared, we'll return. If the bit is cleared after 396 * we check then the poke will pop us out of the halted state. 397 * 398 * This means that the ordering of the poke and the clearing 399 * of the bit by cpu_wakeup is important. 400 * cpu_wakeup() must clear, then poke. 401 * cpu_idle() must disable interrupts, then check for the bit. 402 */ 403 cli(); 404 405 if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) { 406 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 407 sti(); 408 return; 409 } 410 411 /* 412 * The check for anything locally runnable is here for performance 413 * and isn't needed for correctness. disp_nrunnable ought to be 414 * in our cache still, so it's inexpensive to check, and if there 415 * is anything runnable we won't have to wait for the poke. 416 */ 417 if (cpup->cpu_disp->disp_nrunnable != 0) { 418 if (hset_update) { 419 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 420 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 421 } 422 sti(); 423 return; 424 } 425 426 DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C1); 427 428 mach_cpu_idle(); 429 430 DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C0); 431 432 /* 433 * We're no longer halted 434 */ 435 if (hset_update) { 436 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 437 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 438 } 439 } 440 441 442 /* 443 * If "cpu" is halted, then wake it up clearing its halted bit in advance. 444 * Otherwise, see if other CPUs in the cpu partition are halted and need to 445 * be woken up so that they can steal the thread we placed on this CPU. 446 * This function is only used on MP systems. 447 */ 448 static void 449 cpu_wakeup(cpu_t *cpu, int bound) 450 { 451 uint_t cpu_found; 452 processorid_t cpu_sid; 453 cpupart_t *cp; 454 455 cp = cpu->cpu_part; 456 cpu_sid = cpu->cpu_seqid; 457 if (bitset_in_set(&cp->cp_haltset, cpu_sid)) { 458 /* 459 * Clear the halted bit for that CPU since it will be 460 * poked in a moment. 461 */ 462 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 463 /* 464 * We may find the current CPU present in the halted cpuset 465 * if we're in the context of an interrupt that occurred 466 * before we had a chance to clear our bit in cpu_idle(). 467 * Poking ourself is obviously unnecessary, since if 468 * we're here, we're not halted. 469 */ 470 if (cpu != CPU) 471 poke_cpu(cpu->cpu_id); 472 return; 473 } else { 474 /* 475 * This cpu isn't halted, but it's idle or undergoing a 476 * context switch. No need to awaken anyone else. 477 */ 478 if (cpu->cpu_thread == cpu->cpu_idle_thread || 479 cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL) 480 return; 481 } 482 483 /* 484 * No need to wake up other CPUs if this is for a bound thread. 485 */ 486 if (bound) 487 return; 488 489 /* 490 * The CPU specified for wakeup isn't currently halted, so check 491 * to see if there are any other halted CPUs in the partition, 492 * and if there are then awaken one. 493 */ 494 do { 495 cpu_found = bitset_find(&cp->cp_haltset); 496 if (cpu_found == (uint_t)-1) 497 return; 498 } while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0); 499 500 if (cpu_found != CPU->cpu_seqid) { 501 poke_cpu(cpu_seq[cpu_found]->cpu_id); 502 } 503 } 504 505 #ifndef __xpv 506 /* 507 * Idle the present CPU until awoken via touching its monitored line 508 */ 509 static void 510 cpu_idle_mwait(void) 511 { 512 volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait; 513 cpu_t *cpup = CPU; 514 processorid_t cpu_sid = cpup->cpu_seqid; 515 cpupart_t *cp = cpup->cpu_part; 516 int hset_update = 1; 517 518 /* 519 * Set our mcpu_mwait here, so we can tell if anyone trys to 520 * wake us between now and when we call mwait. No other cpu will 521 * attempt to set our mcpu_mwait until we add ourself to the halted 522 * CPU bitmap. 523 */ 524 *mcpu_mwait = MWAIT_HALTED; 525 526 /* 527 * If this CPU is online, and there's multiple CPUs 528 * in the system, then we should notate our halting 529 * by adding ourselves to the partition's halted CPU 530 * bitmap. This allows other CPUs to find/awaken us when 531 * work becomes available. 532 */ 533 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 534 hset_update = 0; 535 536 /* 537 * Add ourselves to the partition's halted CPUs bitmap 538 * and set our HALTED flag, if necessary. 539 * 540 * When a thread becomes runnable, it is placed on the queue 541 * and then the halted CPU bitmap is checked to determine who 542 * (if anyone) should be awoken. We therefore need to first 543 * add ourselves to the bitmap, and and then check if there 544 * is any work available. 545 * 546 * Note that memory barriers after updating the HALTED flag 547 * are not necessary since an atomic operation (updating the bitmap) 548 * immediately follows. On x86 the atomic operation acts as a 549 * memory barrier for the update of cpu_disp_flags. 550 */ 551 if (hset_update) { 552 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 553 bitset_atomic_add(&cp->cp_haltset, cpu_sid); 554 } 555 556 /* 557 * Check to make sure there's really nothing to do. 558 * Work destined for this CPU may become available after 559 * this check. We'll be notified through the clearing of our 560 * bit in the halted CPU bitmap, and a write to our mcpu_mwait. 561 * 562 * disp_anywork() checks disp_nrunnable, so we do not have to later. 563 */ 564 if (disp_anywork()) { 565 if (hset_update) { 566 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 567 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 568 } 569 return; 570 } 571 572 /* 573 * We're on our way to being halted. 574 * To avoid a lost wakeup, arm the monitor before checking if another 575 * cpu wrote to mcpu_mwait to wake us up. 576 */ 577 i86_monitor(mcpu_mwait, 0, 0); 578 if (*mcpu_mwait == MWAIT_HALTED) { 579 DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C1); 580 581 tlb_going_idle(); 582 i86_mwait(0, 0); 583 tlb_service(); 584 585 DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C0); 586 } 587 588 /* 589 * We're no longer halted 590 */ 591 if (hset_update) { 592 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 593 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 594 } 595 } 596 597 /* 598 * If "cpu" is halted in mwait, then wake it up clearing its halted bit in 599 * advance. Otherwise, see if other CPUs in the cpu partition are halted and 600 * need to be woken up so that they can steal the thread we placed on this CPU. 601 * This function is only used on MP systems. 602 */ 603 static void 604 cpu_wakeup_mwait(cpu_t *cp, int bound) 605 { 606 cpupart_t *cpu_part; 607 uint_t cpu_found; 608 processorid_t cpu_sid; 609 610 cpu_part = cp->cpu_part; 611 cpu_sid = cp->cpu_seqid; 612 613 /* 614 * Clear the halted bit for that CPU since it will be woken up 615 * in a moment. 616 */ 617 if (bitset_in_set(&cpu_part->cp_haltset, cpu_sid)) { 618 /* 619 * Clear the halted bit for that CPU since it will be 620 * poked in a moment. 621 */ 622 bitset_atomic_del(&cpu_part->cp_haltset, cpu_sid); 623 /* 624 * We may find the current CPU present in the halted cpuset 625 * if we're in the context of an interrupt that occurred 626 * before we had a chance to clear our bit in cpu_idle(). 627 * Waking ourself is obviously unnecessary, since if 628 * we're here, we're not halted. 629 * 630 * monitor/mwait wakeup via writing to our cache line is 631 * harmless and less expensive than always checking if we 632 * are waking ourself which is an uncommon case. 633 */ 634 MWAIT_WAKEUP(cp); /* write to monitored line */ 635 return; 636 } else { 637 /* 638 * This cpu isn't halted, but it's idle or undergoing a 639 * context switch. No need to awaken anyone else. 640 */ 641 if (cp->cpu_thread == cp->cpu_idle_thread || 642 cp->cpu_disp_flags & CPU_DISP_DONTSTEAL) 643 return; 644 } 645 646 /* 647 * No need to wake up other CPUs if the thread we just enqueued 648 * is bound. 649 */ 650 if (bound || ncpus == 1) 651 return; 652 653 /* 654 * See if there's any other halted CPUs. If there are, then 655 * select one, and awaken it. 656 * It's possible that after we find a CPU, somebody else 657 * will awaken it before we get the chance. 658 * In that case, look again. 659 */ 660 do { 661 cpu_found = bitset_find(&cpu_part->cp_haltset); 662 if (cpu_found == (uint_t)-1) 663 return; 664 } while (bitset_atomic_test_and_del(&cpu_part->cp_haltset, 665 cpu_found) < 0); 666 667 /* 668 * Do not check if cpu_found is ourself as monitor/mwait 669 * wakeup is cheap. 670 */ 671 MWAIT_WAKEUP(cpu_seq[cpu_found]); /* write to monitored line */ 672 } 673 674 #endif 675 676 void (*cpu_pause_handler)(volatile char *) = NULL; 677 678 static int 679 mp_disable_intr(int cpun) 680 { 681 /* 682 * switch to the offline cpu 683 */ 684 affinity_set(cpun); 685 /* 686 * raise ipl to just below cross call 687 */ 688 splx(XC_MED_PIL-1); 689 /* 690 * set base spl to prevent the next swtch to idle from 691 * lowering back to ipl 0 692 */ 693 CPU->cpu_intr_actv |= (1 << (XC_MED_PIL-1)); 694 set_base_spl(); 695 affinity_clear(); 696 return (DDI_SUCCESS); 697 } 698 699 static void 700 mp_enable_intr(int cpun) 701 { 702 /* 703 * switch to the online cpu 704 */ 705 affinity_set(cpun); 706 /* 707 * clear the interrupt active mask 708 */ 709 CPU->cpu_intr_actv &= ~(1 << (XC_MED_PIL-1)); 710 set_base_spl(); 711 (void) spl0(); 712 affinity_clear(); 713 } 714 715 static void 716 mach_get_platform(int owner) 717 { 718 void **srv_opsp; 719 void **clt_opsp; 720 int i; 721 int total_ops; 722 723 /* fix up psm ops */ 724 srv_opsp = (void **)mach_set[0]; 725 clt_opsp = (void **)mach_set[owner]; 726 if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01) 727 total_ops = sizeof (struct psm_ops_ver01) / 728 sizeof (void (*)(void)); 729 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1) 730 /* no psm_notify_func */ 731 total_ops = OFFSETOF(struct psm_ops, psm_notify_func) / 732 sizeof (void (*)(void)); 733 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2) 734 /* no psm_timer funcs */ 735 total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) / 736 sizeof (void (*)(void)); 737 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3) 738 /* no psm_preshutdown function */ 739 total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) / 740 sizeof (void (*)(void)); 741 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4) 742 /* no psm_preshutdown function */ 743 total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) / 744 sizeof (void (*)(void)); 745 else 746 total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void)); 747 748 /* 749 * Save the version of the PSM module, in case we need to 750 * bahave differently based on version. 751 */ 752 mach_ver[0] = mach_ver[owner]; 753 754 for (i = 0; i < total_ops; i++) 755 if (clt_opsp[i] != NULL) 756 srv_opsp[i] = clt_opsp[i]; 757 } 758 759 static void 760 mach_construct_info() 761 { 762 struct psm_sw *swp; 763 int mach_cnt[PSM_OWN_OVERRIDE+1] = {0}; 764 int conflict_owner = 0; 765 766 if (psmsw->psw_forw == psmsw) 767 panic("No valid PSM modules found"); 768 mutex_enter(&psmsw_lock); 769 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 770 if (!(swp->psw_flag & PSM_MOD_IDENTIFY)) 771 continue; 772 mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops; 773 mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version; 774 mach_cnt[swp->psw_infop->p_owner]++; 775 } 776 mutex_exit(&psmsw_lock); 777 778 mach_get_platform(PSM_OWN_SYS_DEFAULT); 779 780 /* check to see are there any conflicts */ 781 if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1) 782 conflict_owner = PSM_OWN_EXCLUSIVE; 783 if (mach_cnt[PSM_OWN_OVERRIDE] > 1) 784 conflict_owner = PSM_OWN_OVERRIDE; 785 if (conflict_owner) { 786 /* remove all psm modules except uppc */ 787 cmn_err(CE_WARN, 788 "Conflicts detected on the following PSM modules:"); 789 mutex_enter(&psmsw_lock); 790 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 791 if (swp->psw_infop->p_owner == conflict_owner) 792 cmn_err(CE_WARN, "%s ", 793 swp->psw_infop->p_mach_idstring); 794 } 795 mutex_exit(&psmsw_lock); 796 cmn_err(CE_WARN, 797 "Setting the system back to SINGLE processor mode!"); 798 cmn_err(CE_WARN, 799 "Please edit /etc/mach to remove the invalid PSM module."); 800 return; 801 } 802 803 if (mach_set[PSM_OWN_EXCLUSIVE]) 804 mach_get_platform(PSM_OWN_EXCLUSIVE); 805 806 if (mach_set[PSM_OWN_OVERRIDE]) 807 mach_get_platform(PSM_OWN_OVERRIDE); 808 } 809 810 static void 811 mach_init() 812 { 813 struct psm_ops *pops; 814 815 mach_construct_info(); 816 817 pops = mach_set[0]; 818 819 /* register the interrupt and clock initialization rotuines */ 820 picinitf = mach_picinit; 821 clkinitf = mach_clkinit; 822 psm_get_clockirq = pops->psm_get_clockirq; 823 824 /* register the interrupt setup code */ 825 slvltovect = mach_softlvl_to_vect; 826 addspl = pops->psm_addspl; 827 delspl = pops->psm_delspl; 828 829 if (pops->psm_translate_irq) 830 psm_translate_irq = pops->psm_translate_irq; 831 if (pops->psm_intr_ops) 832 psm_intr_ops = pops->psm_intr_ops; 833 834 #if defined(PSMI_1_2) || defined(PSMI_1_3) || defined(PSMI_1_4) 835 /* 836 * Time-of-day functionality now handled in TOD modules. 837 * (Warn about PSM modules that think that we're going to use 838 * their ops vectors.) 839 */ 840 if (pops->psm_tod_get) 841 cmn_err(CE_WARN, "obsolete psm_tod_get op %p", 842 (void *)pops->psm_tod_get); 843 844 if (pops->psm_tod_set) 845 cmn_err(CE_WARN, "obsolete psm_tod_set op %p", 846 (void *)pops->psm_tod_set); 847 #endif 848 849 if (pops->psm_notify_error) { 850 psm_notify_error = mach_notify_error; 851 notify_error = pops->psm_notify_error; 852 } 853 854 (*pops->psm_softinit)(); 855 856 /* 857 * Initialize the dispatcher's function hooks 858 * to enable CPU halting when idle. 859 * Do not use monitor/mwait if idle_cpu_use_hlt is not set(spin idle) 860 * or idle_cpu_prefer_mwait is not set. 861 * Allocate monitor/mwait buffer for cpu0. 862 */ 863 if (idle_cpu_use_hlt) { 864 idle_cpu = cpu_idle; 865 #ifndef __xpv 866 if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) { 867 CPU->cpu_m.mcpu_mwait = cpuid_mwait_alloc(CPU); 868 /* 869 * Protect ourself from insane mwait size. 870 */ 871 if (CPU->cpu_m.mcpu_mwait == NULL) { 872 #ifdef DEBUG 873 cmn_err(CE_NOTE, "Using hlt idle. Cannot " 874 "handle cpu 0 mwait size."); 875 #endif 876 idle_cpu_prefer_mwait = 0; 877 idle_cpu = cpu_idle; 878 } else { 879 idle_cpu = cpu_idle_mwait; 880 } 881 } else { 882 idle_cpu = cpu_idle; 883 } 884 #endif 885 } 886 887 mach_smpinit(); 888 } 889 890 static void 891 mach_smpinit(void) 892 { 893 struct psm_ops *pops; 894 processorid_t cpu_id; 895 int cnt; 896 cpuset_t cpumask; 897 898 pops = mach_set[0]; 899 CPUSET_ZERO(cpumask); 900 901 cpu_id = -1; 902 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 903 for (cnt = 0; cpu_id != -1; cnt++) { 904 CPUSET_ADD(cpumask, cpu_id); 905 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 906 } 907 908 mp_cpus = cpumask; 909 910 /* MP related routines */ 911 ap_mlsetup = pops->psm_post_cpu_start; 912 send_dirintf = pops->psm_send_ipi; 913 914 /* optional MP related routines */ 915 if (pops->psm_shutdown) 916 psm_shutdownf = pops->psm_shutdown; 917 if (pops->psm_preshutdown) 918 psm_preshutdownf = pops->psm_preshutdown; 919 if (pops->psm_notify_func) 920 psm_notifyf = pops->psm_notify_func; 921 if (pops->psm_set_idlecpu) 922 psm_set_idle_cpuf = pops->psm_set_idlecpu; 923 if (pops->psm_unset_idlecpu) 924 psm_unset_idle_cpuf = pops->psm_unset_idlecpu; 925 926 psm_clkinit = pops->psm_clkinit; 927 928 if (pops->psm_timer_reprogram) 929 psm_timer_reprogram = pops->psm_timer_reprogram; 930 931 if (pops->psm_timer_enable) 932 psm_timer_enable = pops->psm_timer_enable; 933 934 if (pops->psm_timer_disable) 935 psm_timer_disable = pops->psm_timer_disable; 936 937 if (pops->psm_post_cyclic_setup) 938 psm_post_cyclic_setup = pops->psm_post_cyclic_setup; 939 940 if (pops->psm_state) 941 psm_state = pops->psm_state; 942 943 /* 944 * Set these vectors here so they can be used by Suspend/Resume 945 * on UP machines. 946 */ 947 if (pops->psm_disable_intr) 948 psm_disable_intr = pops->psm_disable_intr; 949 if (pops->psm_enable_intr) 950 psm_enable_intr = pops->psm_enable_intr; 951 952 /* check for multiple CPUs */ 953 if (cnt < 2) 954 return; 955 956 /* check for MP platforms */ 957 if (pops->psm_cpu_start == NULL) 958 return; 959 960 /* 961 * Set the dispatcher hook to enable cpu "wake up" 962 * when a thread becomes runnable. 963 */ 964 if (idle_cpu_use_hlt) { 965 disp_enq_thread = cpu_wakeup; 966 #ifndef __xpv 967 if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) 968 disp_enq_thread = cpu_wakeup_mwait; 969 #endif 970 } 971 972 psm_get_ipivect = pops->psm_get_ipivect; 973 974 (void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_hi_intr", 975 (*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI), 976 (caddr_t)X_CALL_HIPRI, NULL, NULL, NULL); 977 (void) add_avintr((void *)NULL, XC_MED_PIL, xc_serv, "xc_med_intr", 978 (*pops->psm_get_ipivect)(XC_MED_PIL, PSM_INTR_IPI_LO), 979 (caddr_t)X_CALL_MEDPRI, NULL, NULL, NULL); 980 981 (void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE); 982 } 983 984 static void 985 mach_picinit() 986 { 987 struct psm_ops *pops; 988 989 pops = mach_set[0]; 990 991 /* register the interrupt handlers */ 992 setlvl = pops->psm_intr_enter; 993 setlvlx = pops->psm_intr_exit; 994 995 /* initialize the interrupt hardware */ 996 (*pops->psm_picinit)(); 997 998 /* set interrupt mask for current ipl */ 999 setspl = pops->psm_setspl; 1000 cli(); 1001 setspl(CPU->cpu_pri); 1002 } 1003 1004 uint_t cpu_freq; /* MHz */ 1005 uint64_t cpu_freq_hz; /* measured (in hertz) */ 1006 1007 #define MEGA_HZ 1000000 1008 1009 #ifdef __xpv 1010 1011 int xpv_cpufreq_workaround = 1; 1012 int xpv_cpufreq_verbose = 0; 1013 1014 #else /* __xpv */ 1015 1016 static uint64_t 1017 mach_calchz(uint32_t pit_counter, uint64_t *processor_clks) 1018 { 1019 uint64_t cpu_hz; 1020 1021 if ((pit_counter == 0) || (*processor_clks == 0) || 1022 (*processor_clks > (((uint64_t)-1) / PIT_HZ))) 1023 return (0); 1024 1025 cpu_hz = ((uint64_t)PIT_HZ * *processor_clks) / pit_counter; 1026 1027 return (cpu_hz); 1028 } 1029 1030 #endif /* __xpv */ 1031 1032 static uint64_t 1033 mach_getcpufreq(void) 1034 { 1035 #if defined(__xpv) 1036 vcpu_time_info_t *vti = &CPU->cpu_m.mcpu_vcpu_info->time; 1037 uint64_t cpu_hz; 1038 1039 /* 1040 * During dom0 bringup, it was noted that on at least one older 1041 * Intel HT machine, the hypervisor initially gives a tsc_to_system_mul 1042 * value that is quite wrong (the 3.06GHz clock was reported 1043 * as 4.77GHz) 1044 * 1045 * The curious thing is, that if you stop the kernel at entry, 1046 * breakpoint here and inspect the value with kmdb, the value 1047 * is correct - but if you don't stop and simply enable the 1048 * printf statement (below), you can see the bad value printed 1049 * here. Almost as if something kmdb did caused the hypervisor to 1050 * figure it out correctly. And, note that the hypervisor 1051 * eventually -does- figure it out correctly ... if you look at 1052 * the field later in the life of dom0, it is correct. 1053 * 1054 * For now, on dom0, we employ a slightly cheesy workaround of 1055 * using the DOM0_PHYSINFO hypercall. 1056 */ 1057 if (DOMAIN_IS_INITDOMAIN(xen_info) && xpv_cpufreq_workaround) { 1058 xen_sysctl_t op0, *op = &op0; 1059 1060 op->cmd = XEN_SYSCTL_physinfo; 1061 op->interface_version = XEN_SYSCTL_INTERFACE_VERSION; 1062 if (HYPERVISOR_sysctl(op) != 0) 1063 panic("physinfo op refused"); 1064 1065 cpu_hz = 1000 * (uint64_t)op->u.physinfo.cpu_khz; 1066 } else { 1067 cpu_hz = (UINT64_C(1000000000) << 32) / vti->tsc_to_system_mul; 1068 1069 if (vti->tsc_shift < 0) 1070 cpu_hz <<= -vti->tsc_shift; 1071 else 1072 cpu_hz >>= vti->tsc_shift; 1073 } 1074 1075 if (xpv_cpufreq_verbose) 1076 printf("mach_getcpufreq: system_mul 0x%x, shift %d, " 1077 "cpu_hz %" PRId64 "Hz\n", 1078 vti->tsc_to_system_mul, vti->tsc_shift, cpu_hz); 1079 1080 return (cpu_hz); 1081 #else /* __xpv */ 1082 uint32_t pit_counter; 1083 uint64_t processor_clks; 1084 1085 if (x86_feature & X86_TSC) { 1086 /* 1087 * We have a TSC. freq_tsc() knows how to measure the number 1088 * of clock cycles sampled against the PIT. 1089 */ 1090 ulong_t flags = clear_int_flag(); 1091 processor_clks = freq_tsc(&pit_counter); 1092 restore_int_flag(flags); 1093 return (mach_calchz(pit_counter, &processor_clks)); 1094 } else if (x86_vendor == X86_VENDOR_Cyrix || x86_type == X86_TYPE_P5) { 1095 #if defined(__amd64) 1096 panic("mach_getcpufreq: no TSC!"); 1097 #elif defined(__i386) 1098 /* 1099 * We are a Cyrix based on a 6x86 core or an Intel Pentium 1100 * for which freq_notsc() knows how to measure the number of 1101 * elapsed clock cycles sampled against the PIT 1102 */ 1103 ulong_t flags = clear_int_flag(); 1104 processor_clks = freq_notsc(&pit_counter); 1105 restore_int_flag(flags); 1106 return (mach_calchz(pit_counter, &processor_clks)); 1107 #endif /* __i386 */ 1108 } 1109 1110 /* We do not know how to calculate cpu frequency for this cpu. */ 1111 return (0); 1112 #endif /* __xpv */ 1113 } 1114 1115 /* 1116 * If the clock speed of a cpu is found to be reported incorrectly, do not add 1117 * to this array, instead improve the accuracy of the algorithm that determines 1118 * the clock speed of the processor or extend the implementation to support the 1119 * vendor as appropriate. This is here only to support adjusting the speed on 1120 * older slower processors that mach_fixcpufreq() would not be able to account 1121 * for otherwise. 1122 */ 1123 static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 }; 1124 1125 /* 1126 * On fast processors the clock frequency that is measured may be off by 1127 * a few MHz from the value printed on the part. This is a combination of 1128 * the factors that for such fast parts being off by this much is within 1129 * the tolerances for manufacture and because of the difficulties in the 1130 * measurement that can lead to small error. This function uses some 1131 * heuristics in order to tweak the value that was measured to match what 1132 * is most likely printed on the part. 1133 * 1134 * Some examples: 1135 * AMD Athlon 1000 mhz measured as 998 mhz 1136 * Intel Pentium III Xeon 733 mhz measured as 731 mhz 1137 * Intel Pentium IV 1500 mhz measured as 1495mhz 1138 * 1139 * If in the future this function is no longer sufficient to correct 1140 * for the error in the measurement, then the algorithm used to perform 1141 * the measurement will have to be improved in order to increase accuracy 1142 * rather than adding horrible and questionable kludges here. 1143 * 1144 * This is called after the cyclics subsystem because of the potential 1145 * that the heuristics within may give a worse estimate of the clock 1146 * frequency than the value that was measured. 1147 */ 1148 static void 1149 mach_fixcpufreq(void) 1150 { 1151 uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i; 1152 1153 freq = (uint32_t)cpu_freq; 1154 1155 /* 1156 * Find the nearest integer multiple of 200/3 (about 66) MHz to the 1157 * measured speed taking into account that the 667 MHz parts were 1158 * the first to round-up. 1159 */ 1160 mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200); 1161 near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3); 1162 delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66); 1163 1164 /* Find the nearest integer multiple of 50 MHz to the measured speed */ 1165 mul = (freq + 25) / 50; 1166 near50 = mul * 50; 1167 delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50); 1168 1169 /* Find the closer of the two */ 1170 if (delta66 < delta50) { 1171 fixed = near66; 1172 delta = delta66; 1173 } else { 1174 fixed = near50; 1175 delta = delta50; 1176 } 1177 1178 if (fixed > INT_MAX) 1179 return; 1180 1181 /* 1182 * Some older parts have a core clock frequency that is not an 1183 * integral multiple of 50 or 66 MHz. Check if one of the old 1184 * clock frequencies is closer to the measured value than any 1185 * of the integral multiples of 50 an 66, and if so set fixed 1186 * and delta appropriately to represent the closest value. 1187 */ 1188 i = sizeof (x86_cpu_freq) / sizeof (int); 1189 while (i > 0) { 1190 i--; 1191 1192 if (x86_cpu_freq[i] <= freq) { 1193 mul = freq - x86_cpu_freq[i]; 1194 1195 if (mul < delta) { 1196 fixed = x86_cpu_freq[i]; 1197 delta = mul; 1198 } 1199 1200 break; 1201 } 1202 1203 mul = x86_cpu_freq[i] - freq; 1204 1205 if (mul < delta) { 1206 fixed = x86_cpu_freq[i]; 1207 delta = mul; 1208 } 1209 } 1210 1211 /* 1212 * Set a reasonable maximum for how much to correct the measured 1213 * result by. This check is here to prevent the adjustment made 1214 * by this function from being more harm than good. It is entirely 1215 * possible that in the future parts will be made that are not 1216 * integral multiples of 66 or 50 in clock frequency or that 1217 * someone may overclock a part to some odd frequency. If the 1218 * measured value is farther from the corrected value than 1219 * allowed, then assume the corrected value is in error and use 1220 * the measured value. 1221 */ 1222 if (6 < delta) 1223 return; 1224 1225 cpu_freq = (int)fixed; 1226 } 1227 1228 1229 static int 1230 machhztomhz(uint64_t cpu_freq_hz) 1231 { 1232 uint64_t cpu_mhz; 1233 1234 /* Round to nearest MHZ */ 1235 cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ; 1236 1237 if (cpu_mhz > INT_MAX) 1238 return (0); 1239 1240 return ((int)cpu_mhz); 1241 1242 } 1243 1244 1245 static int 1246 mach_clkinit(int preferred_mode, int *set_mode) 1247 { 1248 struct psm_ops *pops; 1249 int resolution; 1250 1251 pops = mach_set[0]; 1252 1253 cpu_freq_hz = mach_getcpufreq(); 1254 1255 cpu_freq = machhztomhz(cpu_freq_hz); 1256 1257 if (!(x86_feature & X86_TSC) || (cpu_freq == 0)) 1258 tsc_gethrtime_enable = 0; 1259 1260 #ifndef __xpv 1261 if (tsc_gethrtime_enable) { 1262 tsc_hrtimeinit(cpu_freq_hz); 1263 } else 1264 #endif 1265 { 1266 if (pops->psm_hrtimeinit) 1267 (*pops->psm_hrtimeinit)(); 1268 gethrtimef = pops->psm_gethrtime; 1269 gethrtimeunscaledf = gethrtimef; 1270 /* scalehrtimef will remain dummy */ 1271 } 1272 1273 mach_fixcpufreq(); 1274 1275 if (mach_ver[0] >= PSM_INFO_VER01_3) { 1276 if (preferred_mode == TIMER_ONESHOT) { 1277 1278 resolution = (*pops->psm_clkinit)(0); 1279 if (resolution != 0) { 1280 *set_mode = TIMER_ONESHOT; 1281 return (resolution); 1282 } 1283 } 1284 1285 /* 1286 * either periodic mode was requested or could not set to 1287 * one-shot mode 1288 */ 1289 resolution = (*pops->psm_clkinit)(hz); 1290 /* 1291 * psm should be able to do periodic, so we do not check 1292 * for return value of psm_clkinit here. 1293 */ 1294 *set_mode = TIMER_PERIODIC; 1295 return (resolution); 1296 } else { 1297 /* 1298 * PSMI interface prior to PSMI_3 does not define a return 1299 * value for psm_clkinit, so the return value is ignored. 1300 */ 1301 (void) (*pops->psm_clkinit)(hz); 1302 *set_mode = TIMER_PERIODIC; 1303 return (nsec_per_tick); 1304 } 1305 } 1306 1307 1308 /*ARGSUSED*/ 1309 static int 1310 mach_softlvl_to_vect(int ipl) 1311 { 1312 setsoftint = av_set_softint_pending; 1313 kdisetsoftint = kdi_av_set_softint_pending; 1314 1315 return (PSM_SV_SOFTWARE); 1316 } 1317 1318 #ifdef DEBUG 1319 /* 1320 * This is here to allow us to simulate cpus that refuse to start. 1321 */ 1322 cpuset_t cpufailset; 1323 #endif 1324 1325 int 1326 mach_cpu_start(struct cpu *cp, void *ctx) 1327 { 1328 struct psm_ops *pops = mach_set[0]; 1329 processorid_t id = cp->cpu_id; 1330 1331 #ifdef DEBUG 1332 if (CPU_IN_SET(cpufailset, id)) 1333 return (0); 1334 #endif 1335 return ((*pops->psm_cpu_start)(id, ctx)); 1336 } 1337 1338 int 1339 mach_cpuid_start(processorid_t id, void *ctx) 1340 { 1341 struct psm_ops *pops = mach_set[0]; 1342 1343 #ifdef DEBUG 1344 if (CPU_IN_SET(cpufailset, id)) 1345 return (0); 1346 #endif 1347 return ((*pops->psm_cpu_start)(id, ctx)); 1348 } 1349 1350 /*ARGSUSED*/ 1351 static int 1352 mach_translate_irq(dev_info_t *dip, int irqno) 1353 { 1354 return (irqno); /* default to NO translation */ 1355 } 1356 1357 static void 1358 mach_notify_error(int level, char *errmsg) 1359 { 1360 /* 1361 * SL_FATAL is pass in once panicstr is set, deliver it 1362 * as CE_PANIC. Also, translate SL_ codes back to CE_ 1363 * codes for the psmi handler 1364 */ 1365 if (level & SL_FATAL) 1366 (*notify_error)(CE_PANIC, errmsg); 1367 else if (level & SL_WARN) 1368 (*notify_error)(CE_WARN, errmsg); 1369 else if (level & SL_NOTE) 1370 (*notify_error)(CE_NOTE, errmsg); 1371 else if (level & SL_CONSOLE) 1372 (*notify_error)(CE_CONT, errmsg); 1373 } 1374 1375 /* 1376 * It provides the default basic intr_ops interface for the new DDI 1377 * interrupt framework if the PSM doesn't have one. 1378 * 1379 * Input: 1380 * dip - pointer to the dev_info structure of the requested device 1381 * hdlp - pointer to the internal interrupt handle structure for the 1382 * requested interrupt 1383 * intr_op - opcode for this call 1384 * result - pointer to the integer that will hold the result to be 1385 * passed back if return value is PSM_SUCCESS 1386 * 1387 * Output: 1388 * return value is either PSM_SUCCESS or PSM_FAILURE 1389 */ 1390 static int 1391 mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 1392 psm_intr_op_t intr_op, int *result) 1393 { 1394 struct intrspec *ispec; 1395 1396 switch (intr_op) { 1397 case PSM_INTR_OP_CHECK_MSI: 1398 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 1399 DDI_INTR_TYPE_MSIX); 1400 break; 1401 case PSM_INTR_OP_ALLOC_VECTORS: 1402 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1403 *result = 1; 1404 else 1405 *result = 0; 1406 break; 1407 case PSM_INTR_OP_FREE_VECTORS: 1408 break; 1409 case PSM_INTR_OP_NAVAIL_VECTORS: 1410 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1411 *result = 1; 1412 else 1413 *result = 0; 1414 break; 1415 case PSM_INTR_OP_XLATE_VECTOR: 1416 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 1417 *result = psm_translate_irq(dip, ispec->intrspec_vec); 1418 break; 1419 case PSM_INTR_OP_GET_CAP: 1420 *result = 0; 1421 break; 1422 case PSM_INTR_OP_GET_PENDING: 1423 case PSM_INTR_OP_CLEAR_MASK: 1424 case PSM_INTR_OP_SET_MASK: 1425 case PSM_INTR_OP_GET_SHARED: 1426 case PSM_INTR_OP_SET_PRI: 1427 case PSM_INTR_OP_SET_CAP: 1428 case PSM_INTR_OP_SET_CPU: 1429 case PSM_INTR_OP_GET_INTR: 1430 default: 1431 return (PSM_FAILURE); 1432 } 1433 return (PSM_SUCCESS); 1434 } 1435 /* 1436 * Return 1 if CMT load balancing policies should be 1437 * implemented across instances of the specified hardware 1438 * sharing relationship. 1439 */ 1440 int 1441 pg_cmt_load_bal_hw(pghw_type_t hw) 1442 { 1443 if (hw == PGHW_IPIPE || 1444 hw == PGHW_FPU || 1445 hw == PGHW_CHIP) 1446 return (1); 1447 else 1448 return (0); 1449 } 1450 /* 1451 * Return 1 if thread affinity polices should be implemented 1452 * for instances of the specifed hardware sharing relationship. 1453 */ 1454 int 1455 pg_cmt_affinity_hw(pghw_type_t hw) 1456 { 1457 if (hw == PGHW_CACHE) 1458 return (1); 1459 else 1460 return (0); 1461 } 1462