1 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright (c) 2009-2010, Intel Corporation. 27 * All rights reserved. 28 * Copyright 2018 Joyent, Inc. 29 */ 30 31 #define PSMI_1_7 32 #include <sys/smp_impldefs.h> 33 #include <sys/psm.h> 34 #include <sys/psm_modctl.h> 35 #include <sys/pit.h> 36 #include <sys/cmn_err.h> 37 #include <sys/strlog.h> 38 #include <sys/clock.h> 39 #include <sys/debug.h> 40 #include <sys/rtc.h> 41 #include <sys/x86_archext.h> 42 #include <sys/cpupart.h> 43 #include <sys/cpuvar.h> 44 #include <sys/cpu_event.h> 45 #include <sys/cmt.h> 46 #include <sys/cpu.h> 47 #include <sys/disp.h> 48 #include <sys/archsystm.h> 49 #include <sys/machsystm.h> 50 #include <sys/sysmacros.h> 51 #include <sys/memlist.h> 52 #include <sys/param.h> 53 #include <sys/promif.h> 54 #include <sys/cpu_pm.h> 55 #if defined(__xpv) 56 #include <sys/hypervisor.h> 57 #endif 58 #include <sys/mach_intr.h> 59 #include <vm/hat_i86.h> 60 #include <sys/kdi_machimpl.h> 61 #include <sys/sdt.h> 62 #include <sys/hpet.h> 63 #include <sys/sunddi.h> 64 #include <sys/sunndi.h> 65 #include <sys/cpc_pcbe.h> 66 67 #define OFFSETOF(s, m) (size_t)(&(((s *)0)->m)) 68 69 /* 70 * Local function prototypes 71 */ 72 static int mp_disable_intr(processorid_t cpun); 73 static void mp_enable_intr(processorid_t cpun); 74 static void mach_init(); 75 static void mach_picinit(); 76 static int machhztomhz(uint64_t cpu_freq_hz); 77 static uint64_t mach_getcpufreq(void); 78 static void mach_fixcpufreq(void); 79 static int mach_clkinit(int, int *); 80 static void mach_smpinit(void); 81 static int mach_softlvl_to_vect(int ipl); 82 static void mach_get_platform(int owner); 83 static void mach_construct_info(); 84 static int mach_translate_irq(dev_info_t *dip, int irqno); 85 static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 86 psm_intr_op_t, int *); 87 static void mach_notify_error(int level, char *errmsg); 88 static hrtime_t dummy_hrtime(void); 89 static void dummy_scalehrtime(hrtime_t *); 90 static uint64_t dummy_unscalehrtime(hrtime_t); 91 void cpu_idle(void); 92 static void cpu_wakeup(cpu_t *, int); 93 #ifndef __xpv 94 void cpu_idle_mwait(void); 95 static void cpu_wakeup_mwait(cpu_t *, int); 96 #endif 97 static int mach_cpu_create_devinfo(cpu_t *cp, dev_info_t **dipp); 98 99 /* 100 * External reference functions 101 */ 102 extern void return_instr(); 103 extern uint64_t freq_tsc(uint32_t *); 104 #if defined(__i386) 105 extern uint64_t freq_notsc(uint32_t *); 106 #endif 107 extern void pc_gethrestime(timestruc_t *); 108 extern int cpuid_get_coreid(cpu_t *); 109 extern int cpuid_get_chipid(cpu_t *); 110 111 /* 112 * PSM functions initialization 113 */ 114 void (*psm_shutdownf)(int, int) = (void (*)(int, int))return_instr; 115 void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr; 116 void (*psm_notifyf)(int) = (void (*)(int))return_instr; 117 void (*psm_set_idle_cpuf)(int) = (void (*)(int))return_instr; 118 void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr; 119 void (*psminitf)() = mach_init; 120 void (*picinitf)() = return_instr; 121 int (*clkinitf)(int, int *) = (int (*)(int, int *))return_instr; 122 int (*ap_mlsetup)() = (int (*)(void))return_instr; 123 void (*send_dirintf)() = return_instr; 124 void (*setspl)(int) = (void (*)(int))return_instr; 125 int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 126 int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr; 127 int (*get_pending_spl)(void) = (int (*)(void))return_instr; 128 int (*addintr)(void *, int, avfunc, char *, int, caddr_t, caddr_t, 129 uint64_t *, dev_info_t *) = NULL; 130 void (*remintr)(void *, int, avfunc, int) = NULL; 131 void (*kdisetsoftint)(int, struct av_softinfo *)= 132 (void (*)(int, struct av_softinfo *))return_instr; 133 void (*setsoftint)(int, struct av_softinfo *)= 134 (void (*)(int, struct av_softinfo *))return_instr; 135 int (*slvltovect)(int) = (int (*)(int))return_instr; 136 int (*setlvl)(int, int *) = (int (*)(int, int *))return_instr; 137 void (*setlvlx)(int, int) = (void (*)(int, int))return_instr; 138 int (*psm_disable_intr)(int) = mp_disable_intr; 139 void (*psm_enable_intr)(int) = mp_enable_intr; 140 hrtime_t (*gethrtimef)(void) = dummy_hrtime; 141 hrtime_t (*gethrtimeunscaledf)(void) = dummy_hrtime; 142 void (*scalehrtimef)(hrtime_t *) = dummy_scalehrtime; 143 uint64_t (*unscalehrtimef)(hrtime_t) = dummy_unscalehrtime; 144 int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq; 145 void (*gethrestimef)(timestruc_t *) = pc_gethrestime; 146 void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL; 147 int (*psm_get_clockirq)(int) = NULL; 148 int (*psm_get_ipivect)(int, int) = NULL; 149 uchar_t (*psm_get_ioapicid)(uchar_t) = NULL; 150 uint32_t (*psm_get_localapicid)(uint32_t) = NULL; 151 uchar_t (*psm_xlate_vector_by_irq)(uchar_t) = NULL; 152 int (*psm_get_pir_ipivect)(void) = NULL; 153 void (*psm_send_pir_ipi)(processorid_t) = NULL; 154 void (*psm_cmci_setup)(processorid_t, boolean_t) = NULL; 155 156 int (*psm_clkinit)(int) = NULL; 157 void (*psm_timer_reprogram)(hrtime_t) = NULL; 158 void (*psm_timer_enable)(void) = NULL; 159 void (*psm_timer_disable)(void) = NULL; 160 void (*psm_post_cyclic_setup)(void *arg) = NULL; 161 int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t, 162 int *) = mach_intr_ops; 163 int (*psm_state)(psm_state_request_t *) = (int (*)(psm_state_request_t *)) 164 return_instr; 165 166 void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr; 167 void (*hrtime_tick)(void) = return_instr; 168 169 int (*psm_cpu_create_devinfo)(cpu_t *, dev_info_t **) = mach_cpu_create_devinfo; 170 int (*psm_cpu_get_devinfo)(cpu_t *, dev_info_t **) = NULL; 171 172 /* global IRM pool for APIX (PSM) module */ 173 ddi_irm_pool_t *apix_irm_pool_p = NULL; 174 175 /* 176 * True if the generic TSC code is our source of hrtime, rather than whatever 177 * the PSM can provide. 178 */ 179 #ifdef __xpv 180 int tsc_gethrtime_enable = 0; 181 #else 182 int tsc_gethrtime_enable = 1; 183 #endif 184 int tsc_gethrtime_initted = 0; 185 186 /* 187 * True if the hrtime implementation is "hires"; namely, better than microdata. 188 */ 189 int gethrtime_hires = 0; 190 191 /* 192 * Local Static Data 193 */ 194 static struct psm_ops mach_ops; 195 static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL}; 196 static ushort_t mach_ver[4] = {0, 0, 0, 0}; 197 198 /* 199 * virtualization support for psm 200 */ 201 void *psm_vt_ops = NULL; 202 /* 203 * If non-zero, idle cpus will become "halted" when there's 204 * no work to do. 205 */ 206 int idle_cpu_use_hlt = 1; 207 208 #ifndef __xpv 209 /* 210 * If non-zero, idle cpus will use mwait if available to halt instead of hlt. 211 */ 212 int idle_cpu_prefer_mwait = 1; 213 /* 214 * Set to 0 to avoid MONITOR+CLFLUSH assertion. 215 */ 216 int idle_cpu_assert_cflush_monitor = 1; 217 218 /* 219 * If non-zero, idle cpus will not use power saving Deep C-States idle loop. 220 */ 221 int idle_cpu_no_deep_c = 0; 222 /* 223 * Non-power saving idle loop and wakeup pointers. 224 * Allows user to toggle Deep Idle power saving feature on/off. 225 */ 226 void (*non_deep_idle_cpu)() = cpu_idle; 227 void (*non_deep_idle_disp_enq_thread)(cpu_t *, int); 228 229 /* 230 * Object for the kernel to access the HPET. 231 */ 232 hpet_t hpet; 233 234 #endif /* ifndef __xpv */ 235 236 uint_t cp_haltset_fanout = 0; 237 238 /*ARGSUSED*/ 239 int 240 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw) 241 { 242 switch (hw) { 243 case PGHW_IPIPE: 244 if (is_x86_feature(x86_featureset, X86FSET_HTT)) { 245 /* 246 * Hyper-threading is SMT 247 */ 248 return (1); 249 } else { 250 return (0); 251 } 252 case PGHW_FPU: 253 if (cpuid_get_cores_per_compunit(cp) > 1) 254 return (1); 255 else 256 return (0); 257 case PGHW_PROCNODE: 258 if (cpuid_get_procnodes_per_pkg(cp) > 1) 259 return (1); 260 else 261 return (0); 262 case PGHW_CHIP: 263 if (is_x86_feature(x86_featureset, X86FSET_CMP) || 264 is_x86_feature(x86_featureset, X86FSET_HTT)) 265 return (1); 266 else 267 return (0); 268 case PGHW_CACHE: 269 if (cpuid_get_ncpu_sharing_last_cache(cp) > 1) 270 return (1); 271 else 272 return (0); 273 case PGHW_POW_ACTIVE: 274 if (cpupm_domain_id(cp, CPUPM_DTYPE_ACTIVE) != (id_t)-1) 275 return (1); 276 else 277 return (0); 278 case PGHW_POW_IDLE: 279 if (cpupm_domain_id(cp, CPUPM_DTYPE_IDLE) != (id_t)-1) 280 return (1); 281 else 282 return (0); 283 default: 284 return (0); 285 } 286 } 287 288 /* 289 * Compare two CPUs and see if they have a pghw_type_t sharing relationship 290 * If pghw_type_t is an unsupported hardware type, then return -1 291 */ 292 int 293 pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw) 294 { 295 id_t pgp_a, pgp_b; 296 297 pgp_a = pg_plat_hw_instance_id(cpu_a, hw); 298 pgp_b = pg_plat_hw_instance_id(cpu_b, hw); 299 300 if (pgp_a == -1 || pgp_b == -1) 301 return (-1); 302 303 return (pgp_a == pgp_b); 304 } 305 306 /* 307 * Return a physical instance identifier for known hardware sharing 308 * relationships 309 */ 310 id_t 311 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw) 312 { 313 switch (hw) { 314 case PGHW_IPIPE: 315 return (cpuid_get_coreid(cpu)); 316 case PGHW_CACHE: 317 return (cpuid_get_last_lvl_cacheid(cpu)); 318 case PGHW_FPU: 319 return (cpuid_get_compunitid(cpu)); 320 case PGHW_PROCNODE: 321 return (cpuid_get_procnodeid(cpu)); 322 case PGHW_CHIP: 323 return (cpuid_get_chipid(cpu)); 324 case PGHW_POW_ACTIVE: 325 return (cpupm_domain_id(cpu, CPUPM_DTYPE_ACTIVE)); 326 case PGHW_POW_IDLE: 327 return (cpupm_domain_id(cpu, CPUPM_DTYPE_IDLE)); 328 default: 329 return (-1); 330 } 331 } 332 333 /* 334 * Express preference for optimizing for sharing relationship 335 * hw1 vs hw2 336 */ 337 pghw_type_t 338 pg_plat_hw_rank(pghw_type_t hw1, pghw_type_t hw2) 339 { 340 int i, rank1, rank2; 341 342 static pghw_type_t hw_hier[] = { 343 PGHW_IPIPE, 344 PGHW_CACHE, 345 PGHW_FPU, 346 PGHW_PROCNODE, 347 PGHW_CHIP, 348 PGHW_POW_IDLE, 349 PGHW_POW_ACTIVE, 350 PGHW_NUM_COMPONENTS 351 }; 352 353 for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) { 354 if (hw_hier[i] == hw1) 355 rank1 = i; 356 if (hw_hier[i] == hw2) 357 rank2 = i; 358 } 359 360 if (rank1 > rank2) 361 return (hw1); 362 else 363 return (hw2); 364 } 365 366 /* 367 * Override the default CMT dispatcher policy for the specified 368 * hardware sharing relationship 369 */ 370 pg_cmt_policy_t 371 pg_plat_cmt_policy(pghw_type_t hw) 372 { 373 /* 374 * For shared caches, also load balance across them to 375 * maximize aggregate cache capacity 376 * 377 * On AMD family 0x15 CPUs, cores come in pairs called 378 * compute units, sharing the FPU and the I$ and L2 379 * caches. Use balancing and cache affinity. 380 */ 381 switch (hw) { 382 case PGHW_FPU: 383 case PGHW_CACHE: 384 return (CMT_BALANCE|CMT_AFFINITY); 385 default: 386 return (CMT_NO_POLICY); 387 } 388 } 389 390 id_t 391 pg_plat_get_core_id(cpu_t *cpu) 392 { 393 return ((id_t)cpuid_get_coreid(cpu)); 394 } 395 396 void 397 cmp_set_nosteal_interval(void) 398 { 399 /* Set the nosteal interval (used by disp_getbest()) to 100us */ 400 nosteal_nsec = 100000UL; 401 } 402 403 /* 404 * Routine to ensure initial callers to hrtime gets 0 as return 405 */ 406 static hrtime_t 407 dummy_hrtime(void) 408 { 409 return (0); 410 } 411 412 /* ARGSUSED */ 413 static void 414 dummy_scalehrtime(hrtime_t *ticks) 415 {} 416 417 static uint64_t 418 dummy_unscalehrtime(hrtime_t nsecs) 419 { 420 return ((uint64_t)nsecs); 421 } 422 423 /* 424 * Supports Deep C-State power saving idle loop. 425 */ 426 void 427 cpu_idle_adaptive(void) 428 { 429 (*CPU->cpu_m.mcpu_idle_cpu)(); 430 } 431 432 /* 433 * Function called by CPU idle notification framework to check whether CPU 434 * has been awakened. It will be called with interrupt disabled. 435 * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle 436 * notification framework. 437 */ 438 /*ARGSUSED*/ 439 static void 440 cpu_idle_check_wakeup(void *arg) 441 { 442 /* 443 * Toggle interrupt flag to detect pending interrupts. 444 * If interrupt happened, do_interrupt() will notify CPU idle 445 * notification framework so no need to call cpu_idle_exit() here. 446 */ 447 sti(); 448 SMT_PAUSE(); 449 cli(); 450 } 451 452 /* 453 * Idle the present CPU until wakened via an interrupt 454 */ 455 void 456 cpu_idle(void) 457 { 458 cpu_t *cpup = CPU; 459 processorid_t cpu_sid = cpup->cpu_seqid; 460 cpupart_t *cp = cpup->cpu_part; 461 int hset_update = 1; 462 463 /* 464 * If this CPU is online, and there's multiple CPUs 465 * in the system, then we should notate our halting 466 * by adding ourselves to the partition's halted CPU 467 * bitmap. This allows other CPUs to find/awaken us when 468 * work becomes available. 469 */ 470 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 471 hset_update = 0; 472 473 /* 474 * Add ourselves to the partition's halted CPUs bitmap 475 * and set our HALTED flag, if necessary. 476 * 477 * When a thread becomes runnable, it is placed on the queue 478 * and then the halted CPU bitmap is checked to determine who 479 * (if anyone) should be awakened. We therefore need to first 480 * add ourselves to the bitmap, and and then check if there 481 * is any work available. The order is important to prevent a race 482 * that can lead to work languishing on a run queue somewhere while 483 * this CPU remains halted. 484 * 485 * Either the producing CPU will see we're halted and will awaken us, 486 * or this CPU will see the work available in disp_anywork(). 487 * 488 * Note that memory barriers after updating the HALTED flag 489 * are not necessary since an atomic operation (updating the bitset) 490 * immediately follows. On x86 the atomic operation acts as a 491 * memory barrier for the update of cpu_disp_flags. 492 */ 493 if (hset_update) { 494 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 495 bitset_atomic_add(&cp->cp_haltset, cpu_sid); 496 } 497 498 /* 499 * Check to make sure there's really nothing to do. 500 * Work destined for this CPU may become available after 501 * this check. We'll be notified through the clearing of our 502 * bit in the halted CPU bitmap, and a poke. 503 */ 504 if (disp_anywork()) { 505 if (hset_update) { 506 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 507 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 508 } 509 return; 510 } 511 512 /* 513 * We're on our way to being halted. 514 * 515 * Disable interrupts now, so that we'll awaken immediately 516 * after halting if someone tries to poke us between now and 517 * the time we actually halt. 518 * 519 * We check for the presence of our bit after disabling interrupts. 520 * If it's cleared, we'll return. If the bit is cleared after 521 * we check then the poke will pop us out of the halted state. 522 * 523 * This means that the ordering of the poke and the clearing 524 * of the bit by cpu_wakeup is important. 525 * cpu_wakeup() must clear, then poke. 526 * cpu_idle() must disable interrupts, then check for the bit. 527 */ 528 cli(); 529 530 if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) { 531 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 532 sti(); 533 return; 534 } 535 536 /* 537 * The check for anything locally runnable is here for performance 538 * and isn't needed for correctness. disp_nrunnable ought to be 539 * in our cache still, so it's inexpensive to check, and if there 540 * is anything runnable we won't have to wait for the poke. 541 */ 542 if (cpup->cpu_disp->disp_nrunnable != 0) { 543 if (hset_update) { 544 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 545 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 546 } 547 sti(); 548 return; 549 } 550 551 if (cpu_idle_enter(IDLE_STATE_C1, 0, 552 cpu_idle_check_wakeup, NULL) == 0) { 553 mach_cpu_idle(); 554 cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 555 } 556 557 /* 558 * We're no longer halted 559 */ 560 if (hset_update) { 561 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 562 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 563 } 564 } 565 566 567 /* 568 * If "cpu" is halted, then wake it up clearing its halted bit in advance. 569 * Otherwise, see if other CPUs in the cpu partition are halted and need to 570 * be woken up so that they can steal the thread we placed on this CPU. 571 * This function is only used on MP systems. 572 */ 573 static void 574 cpu_wakeup(cpu_t *cpu, int bound) 575 { 576 uint_t cpu_found; 577 processorid_t cpu_sid; 578 cpupart_t *cp; 579 580 cp = cpu->cpu_part; 581 cpu_sid = cpu->cpu_seqid; 582 if (bitset_in_set(&cp->cp_haltset, cpu_sid)) { 583 /* 584 * Clear the halted bit for that CPU since it will be 585 * poked in a moment. 586 */ 587 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 588 /* 589 * We may find the current CPU present in the halted cpuset 590 * if we're in the context of an interrupt that occurred 591 * before we had a chance to clear our bit in cpu_idle(). 592 * Poking ourself is obviously unnecessary, since if 593 * we're here, we're not halted. 594 */ 595 if (cpu != CPU) 596 poke_cpu(cpu->cpu_id); 597 return; 598 } else { 599 /* 600 * This cpu isn't halted, but it's idle or undergoing a 601 * context switch. No need to awaken anyone else. 602 */ 603 if (cpu->cpu_thread == cpu->cpu_idle_thread || 604 cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL) 605 return; 606 } 607 608 /* 609 * No need to wake up other CPUs if this is for a bound thread. 610 */ 611 if (bound) 612 return; 613 614 /* 615 * The CPU specified for wakeup isn't currently halted, so check 616 * to see if there are any other halted CPUs in the partition, 617 * and if there are then awaken one. 618 */ 619 do { 620 cpu_found = bitset_find(&cp->cp_haltset); 621 if (cpu_found == (uint_t)-1) 622 return; 623 } while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0); 624 625 if (cpu_found != CPU->cpu_seqid) { 626 poke_cpu(cpu_seq[cpu_found]->cpu_id); 627 } 628 } 629 630 #ifndef __xpv 631 /* 632 * Function called by CPU idle notification framework to check whether CPU 633 * has been awakened. It will be called with interrupt disabled. 634 * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle 635 * notification framework. 636 */ 637 static void 638 cpu_idle_mwait_check_wakeup(void *arg) 639 { 640 volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg; 641 642 ASSERT(arg != NULL); 643 if (*mcpu_mwait != MWAIT_HALTED) { 644 /* 645 * CPU has been awakened, notify CPU idle notification system. 646 */ 647 cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 648 } else { 649 /* 650 * Toggle interrupt flag to detect pending interrupts. 651 * If interrupt happened, do_interrupt() will notify CPU idle 652 * notification framework so no need to call cpu_idle_exit() 653 * here. 654 */ 655 sti(); 656 SMT_PAUSE(); 657 cli(); 658 } 659 } 660 661 /* 662 * Idle the present CPU until awakened via touching its monitored line 663 */ 664 void 665 cpu_idle_mwait(void) 666 { 667 volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait; 668 cpu_t *cpup = CPU; 669 processorid_t cpu_sid = cpup->cpu_seqid; 670 cpupart_t *cp = cpup->cpu_part; 671 int hset_update = 1; 672 673 /* 674 * Set our mcpu_mwait here, so we can tell if anyone tries to 675 * wake us between now and when we call mwait. No other cpu will 676 * attempt to set our mcpu_mwait until we add ourself to the halted 677 * CPU bitmap. 678 */ 679 *mcpu_mwait = MWAIT_HALTED; 680 681 /* 682 * If this CPU is online, and there's multiple CPUs 683 * in the system, then we should note our halting 684 * by adding ourselves to the partition's halted CPU 685 * bitmap. This allows other CPUs to find/awaken us when 686 * work becomes available. 687 */ 688 if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 689 hset_update = 0; 690 691 /* 692 * Add ourselves to the partition's halted CPUs bitmap 693 * and set our HALTED flag, if necessary. 694 * 695 * When a thread becomes runnable, it is placed on the queue 696 * and then the halted CPU bitmap is checked to determine who 697 * (if anyone) should be awakened. We therefore need to first 698 * add ourselves to the bitmap, and and then check if there 699 * is any work available. 700 * 701 * Note that memory barriers after updating the HALTED flag 702 * are not necessary since an atomic operation (updating the bitmap) 703 * immediately follows. On x86 the atomic operation acts as a 704 * memory barrier for the update of cpu_disp_flags. 705 */ 706 if (hset_update) { 707 cpup->cpu_disp_flags |= CPU_DISP_HALTED; 708 bitset_atomic_add(&cp->cp_haltset, cpu_sid); 709 } 710 711 /* 712 * Check to make sure there's really nothing to do. 713 * Work destined for this CPU may become available after 714 * this check. We'll be notified through the clearing of our 715 * bit in the halted CPU bitmap, and a write to our mcpu_mwait. 716 * 717 * disp_anywork() checks disp_nrunnable, so we do not have to later. 718 */ 719 if (disp_anywork()) { 720 if (hset_update) { 721 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 722 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 723 } 724 return; 725 } 726 727 /* 728 * We're on our way to being halted. 729 * To avoid a lost wakeup, arm the monitor before checking if another 730 * cpu wrote to mcpu_mwait to wake us up. 731 */ 732 i86_monitor(mcpu_mwait, 0, 0); 733 if (*mcpu_mwait == MWAIT_HALTED) { 734 if (cpu_idle_enter(IDLE_STATE_C1, 0, 735 cpu_idle_mwait_check_wakeup, (void *)mcpu_mwait) == 0) { 736 if (*mcpu_mwait == MWAIT_HALTED) { 737 i86_mwait(0, 0); 738 } 739 cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 740 } 741 } 742 743 /* 744 * We're no longer halted 745 */ 746 if (hset_update) { 747 cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 748 bitset_atomic_del(&cp->cp_haltset, cpu_sid); 749 } 750 } 751 752 /* 753 * If "cpu" is halted in mwait, then wake it up clearing its halted bit in 754 * advance. Otherwise, see if other CPUs in the cpu partition are halted and 755 * need to be woken up so that they can steal the thread we placed on this CPU. 756 * This function is only used on MP systems. 757 */ 758 static void 759 cpu_wakeup_mwait(cpu_t *cp, int bound) 760 { 761 cpupart_t *cpu_part; 762 uint_t cpu_found; 763 processorid_t cpu_sid; 764 765 cpu_part = cp->cpu_part; 766 cpu_sid = cp->cpu_seqid; 767 768 /* 769 * Clear the halted bit for that CPU since it will be woken up 770 * in a moment. 771 */ 772 if (bitset_in_set(&cpu_part->cp_haltset, cpu_sid)) { 773 /* 774 * Clear the halted bit for that CPU since it will be 775 * poked in a moment. 776 */ 777 bitset_atomic_del(&cpu_part->cp_haltset, cpu_sid); 778 /* 779 * We may find the current CPU present in the halted cpuset 780 * if we're in the context of an interrupt that occurred 781 * before we had a chance to clear our bit in cpu_idle(). 782 * Waking ourself is obviously unnecessary, since if 783 * we're here, we're not halted. 784 * 785 * monitor/mwait wakeup via writing to our cache line is 786 * harmless and less expensive than always checking if we 787 * are waking ourself which is an uncommon case. 788 */ 789 MWAIT_WAKEUP(cp); /* write to monitored line */ 790 return; 791 } else { 792 /* 793 * This cpu isn't halted, but it's idle or undergoing a 794 * context switch. No need to awaken anyone else. 795 */ 796 if (cp->cpu_thread == cp->cpu_idle_thread || 797 cp->cpu_disp_flags & CPU_DISP_DONTSTEAL) 798 return; 799 } 800 801 /* 802 * No need to wake up other CPUs if the thread we just enqueued 803 * is bound. 804 */ 805 if (bound || ncpus == 1) 806 return; 807 808 /* 809 * See if there's any other halted CPUs. If there are, then 810 * select one, and awaken it. 811 * It's possible that after we find a CPU, somebody else 812 * will awaken it before we get the chance. 813 * In that case, look again. 814 */ 815 do { 816 cpu_found = bitset_find(&cpu_part->cp_haltset); 817 if (cpu_found == (uint_t)-1) 818 return; 819 } while (bitset_atomic_test_and_del(&cpu_part->cp_haltset, 820 cpu_found) < 0); 821 822 /* 823 * Do not check if cpu_found is ourself as monitor/mwait 824 * wakeup is cheap. 825 */ 826 MWAIT_WAKEUP(cpu_seq[cpu_found]); /* write to monitored line */ 827 } 828 829 #endif 830 831 void (*cpu_pause_handler)(volatile char *) = NULL; 832 833 static int 834 mp_disable_intr(int cpun) 835 { 836 /* 837 * switch to the offline cpu 838 */ 839 affinity_set(cpun); 840 /* 841 * raise ipl to just below cross call 842 */ 843 splx(XC_SYS_PIL - 1); 844 /* 845 * set base spl to prevent the next swtch to idle from 846 * lowering back to ipl 0 847 */ 848 CPU->cpu_intr_actv |= (1 << (XC_SYS_PIL - 1)); 849 set_base_spl(); 850 affinity_clear(); 851 return (DDI_SUCCESS); 852 } 853 854 static void 855 mp_enable_intr(int cpun) 856 { 857 /* 858 * switch to the online cpu 859 */ 860 affinity_set(cpun); 861 /* 862 * clear the interrupt active mask 863 */ 864 CPU->cpu_intr_actv &= ~(1 << (XC_SYS_PIL - 1)); 865 set_base_spl(); 866 (void) spl0(); 867 affinity_clear(); 868 } 869 870 static void 871 mach_get_platform(int owner) 872 { 873 void **srv_opsp; 874 void **clt_opsp; 875 int i; 876 int total_ops; 877 878 /* fix up psm ops */ 879 srv_opsp = (void **)mach_set[0]; 880 clt_opsp = (void **)mach_set[owner]; 881 if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01) 882 total_ops = sizeof (struct psm_ops_ver01) / 883 sizeof (void (*)(void)); 884 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1) 885 /* no psm_notify_func */ 886 total_ops = OFFSETOF(struct psm_ops, psm_notify_func) / 887 sizeof (void (*)(void)); 888 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2) 889 /* no psm_timer funcs */ 890 total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) / 891 sizeof (void (*)(void)); 892 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3) 893 /* no psm_preshutdown function */ 894 total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) / 895 sizeof (void (*)(void)); 896 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4) 897 /* no psm_intr_ops function */ 898 total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) / 899 sizeof (void (*)(void)); 900 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_5) 901 /* no psm_state function */ 902 total_ops = OFFSETOF(struct psm_ops, psm_state) / 903 sizeof (void (*)(void)); 904 else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_6) 905 /* no psm_cpu_ops function */ 906 total_ops = OFFSETOF(struct psm_ops, psm_cpu_ops) / 907 sizeof (void (*)(void)); 908 else 909 total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void)); 910 911 /* 912 * Save the version of the PSM module, in case we need to 913 * behave differently based on version. 914 */ 915 mach_ver[0] = mach_ver[owner]; 916 917 for (i = 0; i < total_ops; i++) 918 if (clt_opsp[i] != NULL) 919 srv_opsp[i] = clt_opsp[i]; 920 } 921 922 static void 923 mach_construct_info() 924 { 925 struct psm_sw *swp; 926 int mach_cnt[PSM_OWN_OVERRIDE+1] = {0}; 927 int conflict_owner = 0; 928 929 if (psmsw->psw_forw == psmsw) 930 panic("No valid PSM modules found"); 931 mutex_enter(&psmsw_lock); 932 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 933 if (!(swp->psw_flag & PSM_MOD_IDENTIFY)) 934 continue; 935 mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops; 936 mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version; 937 mach_cnt[swp->psw_infop->p_owner]++; 938 } 939 mutex_exit(&psmsw_lock); 940 941 mach_get_platform(PSM_OWN_SYS_DEFAULT); 942 943 /* check to see are there any conflicts */ 944 if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1) 945 conflict_owner = PSM_OWN_EXCLUSIVE; 946 if (mach_cnt[PSM_OWN_OVERRIDE] > 1) 947 conflict_owner = PSM_OWN_OVERRIDE; 948 if (conflict_owner) { 949 /* remove all psm modules except uppc */ 950 cmn_err(CE_WARN, 951 "Conflicts detected on the following PSM modules:"); 952 mutex_enter(&psmsw_lock); 953 for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) { 954 if (swp->psw_infop->p_owner == conflict_owner) 955 cmn_err(CE_WARN, "%s ", 956 swp->psw_infop->p_mach_idstring); 957 } 958 mutex_exit(&psmsw_lock); 959 cmn_err(CE_WARN, 960 "Setting the system back to SINGLE processor mode!"); 961 cmn_err(CE_WARN, 962 "Please edit /etc/mach to remove the invalid PSM module."); 963 return; 964 } 965 966 if (mach_set[PSM_OWN_EXCLUSIVE]) 967 mach_get_platform(PSM_OWN_EXCLUSIVE); 968 969 if (mach_set[PSM_OWN_OVERRIDE]) 970 mach_get_platform(PSM_OWN_OVERRIDE); 971 } 972 973 static void 974 mach_init() 975 { 976 struct psm_ops *pops; 977 978 mach_construct_info(); 979 980 pops = mach_set[0]; 981 982 /* register the interrupt and clock initialization rotuines */ 983 picinitf = mach_picinit; 984 clkinitf = mach_clkinit; 985 psm_get_clockirq = pops->psm_get_clockirq; 986 987 /* register the interrupt setup code */ 988 slvltovect = mach_softlvl_to_vect; 989 addspl = pops->psm_addspl; 990 delspl = pops->psm_delspl; 991 992 if (pops->psm_translate_irq) 993 psm_translate_irq = pops->psm_translate_irq; 994 if (pops->psm_intr_ops) 995 psm_intr_ops = pops->psm_intr_ops; 996 997 #if defined(PSMI_1_2) || defined(PSMI_1_3) || defined(PSMI_1_4) 998 /* 999 * Time-of-day functionality now handled in TOD modules. 1000 * (Warn about PSM modules that think that we're going to use 1001 * their ops vectors.) 1002 */ 1003 if (pops->psm_tod_get) 1004 cmn_err(CE_WARN, "obsolete psm_tod_get op %p", 1005 (void *)pops->psm_tod_get); 1006 1007 if (pops->psm_tod_set) 1008 cmn_err(CE_WARN, "obsolete psm_tod_set op %p", 1009 (void *)pops->psm_tod_set); 1010 #endif 1011 1012 if (pops->psm_notify_error) { 1013 psm_notify_error = mach_notify_error; 1014 notify_error = pops->psm_notify_error; 1015 } 1016 1017 (*pops->psm_softinit)(); 1018 1019 /* 1020 * Initialize the dispatcher's function hooks to enable CPU halting 1021 * when idle. Set both the deep-idle and non-deep-idle hooks. 1022 * 1023 * Assume we can use power saving deep-idle loop cpu_idle_adaptive. 1024 * Platform deep-idle driver will reset our idle loop to 1025 * non_deep_idle_cpu if power saving deep-idle feature is not available. 1026 * 1027 * Do not use monitor/mwait if idle_cpu_use_hlt is not set(spin idle) 1028 * or idle_cpu_prefer_mwait is not set. 1029 * Allocate monitor/mwait buffer for cpu0. 1030 */ 1031 #ifndef __xpv 1032 non_deep_idle_disp_enq_thread = disp_enq_thread; 1033 #endif 1034 if (idle_cpu_use_hlt) { 1035 idle_cpu = cpu_idle_adaptive; 1036 CPU->cpu_m.mcpu_idle_cpu = cpu_idle; 1037 #ifndef __xpv 1038 if (is_x86_feature(x86_featureset, X86FSET_MWAIT) && 1039 idle_cpu_prefer_mwait) { 1040 CPU->cpu_m.mcpu_mwait = cpuid_mwait_alloc(CPU); 1041 /* 1042 * Protect ourself from insane mwait size. 1043 */ 1044 if (CPU->cpu_m.mcpu_mwait == NULL) { 1045 #ifdef DEBUG 1046 cmn_err(CE_NOTE, "Using hlt idle. Cannot " 1047 "handle cpu 0 mwait size."); 1048 #endif 1049 idle_cpu_prefer_mwait = 0; 1050 CPU->cpu_m.mcpu_idle_cpu = cpu_idle; 1051 } else { 1052 CPU->cpu_m.mcpu_idle_cpu = cpu_idle_mwait; 1053 } 1054 } else { 1055 CPU->cpu_m.mcpu_idle_cpu = cpu_idle; 1056 } 1057 non_deep_idle_cpu = CPU->cpu_m.mcpu_idle_cpu; 1058 1059 /* 1060 * Disable power saving deep idle loop? 1061 */ 1062 if (idle_cpu_no_deep_c) { 1063 idle_cpu = non_deep_idle_cpu; 1064 } 1065 #endif 1066 } 1067 1068 mach_smpinit(); 1069 } 1070 1071 static void 1072 mach_smpinit(void) 1073 { 1074 struct psm_ops *pops; 1075 processorid_t cpu_id; 1076 int cnt; 1077 cpuset_t cpumask; 1078 1079 pops = mach_set[0]; 1080 CPUSET_ZERO(cpumask); 1081 1082 cpu_id = -1; 1083 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 1084 /* 1085 * Only add boot_ncpus CPUs to mp_cpus. Other CPUs will be handled 1086 * by CPU DR driver at runtime. 1087 */ 1088 for (cnt = 0; cpu_id != -1 && cnt < boot_ncpus; cnt++) { 1089 CPUSET_ADD(cpumask, cpu_id); 1090 cpu_id = (*pops->psm_get_next_processorid)(cpu_id); 1091 } 1092 1093 mp_cpus = cpumask; 1094 1095 /* MP related routines */ 1096 ap_mlsetup = pops->psm_post_cpu_start; 1097 send_dirintf = pops->psm_send_ipi; 1098 1099 /* optional MP related routines */ 1100 if (pops->psm_shutdown) 1101 psm_shutdownf = pops->psm_shutdown; 1102 if (pops->psm_preshutdown) 1103 psm_preshutdownf = pops->psm_preshutdown; 1104 if (pops->psm_notify_func) 1105 psm_notifyf = pops->psm_notify_func; 1106 if (pops->psm_set_idlecpu) 1107 psm_set_idle_cpuf = pops->psm_set_idlecpu; 1108 if (pops->psm_unset_idlecpu) 1109 psm_unset_idle_cpuf = pops->psm_unset_idlecpu; 1110 1111 psm_clkinit = pops->psm_clkinit; 1112 1113 if (pops->psm_timer_reprogram) 1114 psm_timer_reprogram = pops->psm_timer_reprogram; 1115 1116 if (pops->psm_timer_enable) 1117 psm_timer_enable = pops->psm_timer_enable; 1118 1119 if (pops->psm_timer_disable) 1120 psm_timer_disable = pops->psm_timer_disable; 1121 1122 if (pops->psm_post_cyclic_setup) 1123 psm_post_cyclic_setup = pops->psm_post_cyclic_setup; 1124 1125 if (pops->psm_state) 1126 psm_state = pops->psm_state; 1127 1128 /* 1129 * Set these vectors here so they can be used by Suspend/Resume 1130 * on UP machines. 1131 */ 1132 if (pops->psm_disable_intr) 1133 psm_disable_intr = pops->psm_disable_intr; 1134 if (pops->psm_enable_intr) 1135 psm_enable_intr = pops->psm_enable_intr; 1136 1137 /* 1138 * Set this vector so it can be used by vmbus (for Hyper-V) 1139 * Need this even for single-CPU systems. This works for 1140 * "pcplusmp" and "apix" platforms, but not "uppc" (because 1141 * "Uni-processor PC" does not provide a _get_ipivect). 1142 */ 1143 psm_get_ipivect = pops->psm_get_ipivect; 1144 1145 /* check for multiple CPUs */ 1146 if (cnt < 2 && plat_dr_support_cpu() == B_FALSE) 1147 return; 1148 1149 /* check for MP platforms */ 1150 if (pops->psm_cpu_start == NULL) 1151 return; 1152 1153 /* 1154 * Set the dispatcher hook to enable cpu "wake up" 1155 * when a thread becomes runnable. 1156 */ 1157 if (idle_cpu_use_hlt) { 1158 disp_enq_thread = cpu_wakeup; 1159 #ifndef __xpv 1160 if (is_x86_feature(x86_featureset, X86FSET_MWAIT) && 1161 idle_cpu_prefer_mwait) 1162 disp_enq_thread = cpu_wakeup_mwait; 1163 non_deep_idle_disp_enq_thread = disp_enq_thread; 1164 #endif 1165 } 1166 1167 psm_get_pir_ipivect = pops->psm_get_pir_ipivect; 1168 psm_send_pir_ipi = pops->psm_send_pir_ipi; 1169 psm_cmci_setup = pops->psm_cmci_setup; 1170 1171 1172 (void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_intr", 1173 (*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI), 1174 NULL, NULL, NULL, NULL); 1175 1176 (void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE); 1177 } 1178 1179 static void 1180 mach_picinit() 1181 { 1182 struct psm_ops *pops; 1183 1184 pops = mach_set[0]; 1185 1186 /* register the interrupt handlers */ 1187 setlvl = pops->psm_intr_enter; 1188 setlvlx = pops->psm_intr_exit; 1189 1190 /* initialize the interrupt hardware */ 1191 (*pops->psm_picinit)(); 1192 1193 /* set interrupt mask for current ipl */ 1194 setspl = pops->psm_setspl; 1195 cli(); 1196 setspl(CPU->cpu_pri); 1197 } 1198 1199 uint_t cpu_freq; /* MHz */ 1200 uint64_t cpu_freq_hz; /* measured (in hertz) */ 1201 1202 #define MEGA_HZ 1000000 1203 1204 #ifdef __xpv 1205 1206 int xpv_cpufreq_workaround = 1; 1207 int xpv_cpufreq_verbose = 0; 1208 1209 #else /* __xpv */ 1210 1211 static uint64_t 1212 mach_calchz(uint32_t pit_counter, uint64_t *processor_clks) 1213 { 1214 uint64_t cpu_hz; 1215 1216 if ((pit_counter == 0) || (*processor_clks == 0) || 1217 (*processor_clks > (((uint64_t)-1) / PIT_HZ))) 1218 return (0); 1219 1220 cpu_hz = ((uint64_t)PIT_HZ * *processor_clks) / pit_counter; 1221 1222 return (cpu_hz); 1223 } 1224 1225 #endif /* __xpv */ 1226 1227 static uint64_t 1228 mach_getcpufreq(void) 1229 { 1230 #if defined(__xpv) 1231 vcpu_time_info_t *vti = &CPU->cpu_m.mcpu_vcpu_info->time; 1232 uint64_t cpu_hz; 1233 1234 /* 1235 * During dom0 bringup, it was noted that on at least one older 1236 * Intel HT machine, the hypervisor initially gives a tsc_to_system_mul 1237 * value that is quite wrong (the 3.06GHz clock was reported 1238 * as 4.77GHz) 1239 * 1240 * The curious thing is, that if you stop the kernel at entry, 1241 * breakpoint here and inspect the value with kmdb, the value 1242 * is correct - but if you don't stop and simply enable the 1243 * printf statement (below), you can see the bad value printed 1244 * here. Almost as if something kmdb did caused the hypervisor to 1245 * figure it out correctly. And, note that the hypervisor 1246 * eventually -does- figure it out correctly ... if you look at 1247 * the field later in the life of dom0, it is correct. 1248 * 1249 * For now, on dom0, we employ a slightly cheesy workaround of 1250 * using the DOM0_PHYSINFO hypercall. 1251 */ 1252 if (DOMAIN_IS_INITDOMAIN(xen_info) && xpv_cpufreq_workaround) { 1253 cpu_hz = 1000 * xpv_cpu_khz(); 1254 } else { 1255 cpu_hz = (UINT64_C(1000000000) << 32) / vti->tsc_to_system_mul; 1256 1257 if (vti->tsc_shift < 0) 1258 cpu_hz <<= -vti->tsc_shift; 1259 else 1260 cpu_hz >>= vti->tsc_shift; 1261 } 1262 1263 if (xpv_cpufreq_verbose) 1264 printf("mach_getcpufreq: system_mul 0x%x, shift %d, " 1265 "cpu_hz %" PRId64 "Hz\n", 1266 vti->tsc_to_system_mul, vti->tsc_shift, cpu_hz); 1267 1268 return (cpu_hz); 1269 #else /* __xpv */ 1270 uint32_t pit_counter; 1271 uint64_t processor_clks; 1272 1273 if (is_x86_feature(x86_featureset, X86FSET_TSC)) { 1274 /* 1275 * We have a TSC. freq_tsc() knows how to measure the number 1276 * of clock cycles sampled against the PIT. 1277 */ 1278 ulong_t flags = clear_int_flag(); 1279 processor_clks = freq_tsc(&pit_counter); 1280 restore_int_flag(flags); 1281 return (mach_calchz(pit_counter, &processor_clks)); 1282 } else if (x86_vendor == X86_VENDOR_Cyrix || x86_type == X86_TYPE_P5) { 1283 #if defined(__amd64) 1284 panic("mach_getcpufreq: no TSC!"); 1285 #elif defined(__i386) 1286 /* 1287 * We are a Cyrix based on a 6x86 core or an Intel Pentium 1288 * for which freq_notsc() knows how to measure the number of 1289 * elapsed clock cycles sampled against the PIT 1290 */ 1291 ulong_t flags = clear_int_flag(); 1292 processor_clks = freq_notsc(&pit_counter); 1293 restore_int_flag(flags); 1294 return (mach_calchz(pit_counter, &processor_clks)); 1295 #endif /* __i386 */ 1296 } 1297 1298 /* We do not know how to calculate cpu frequency for this cpu. */ 1299 return (0); 1300 #endif /* __xpv */ 1301 } 1302 1303 /* 1304 * If the clock speed of a cpu is found to be reported incorrectly, do not add 1305 * to this array, instead improve the accuracy of the algorithm that determines 1306 * the clock speed of the processor or extend the implementation to support the 1307 * vendor as appropriate. This is here only to support adjusting the speed on 1308 * older slower processors that mach_fixcpufreq() would not be able to account 1309 * for otherwise. 1310 */ 1311 static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 }; 1312 1313 /* 1314 * On fast processors the clock frequency that is measured may be off by 1315 * a few MHz from the value printed on the part. This is a combination of 1316 * the factors that for such fast parts being off by this much is within 1317 * the tolerances for manufacture and because of the difficulties in the 1318 * measurement that can lead to small error. This function uses some 1319 * heuristics in order to tweak the value that was measured to match what 1320 * is most likely printed on the part. 1321 * 1322 * Some examples: 1323 * AMD Athlon 1000 mhz measured as 998 mhz 1324 * Intel Pentium III Xeon 733 mhz measured as 731 mhz 1325 * Intel Pentium IV 1500 mhz measured as 1495mhz 1326 * 1327 * If in the future this function is no longer sufficient to correct 1328 * for the error in the measurement, then the algorithm used to perform 1329 * the measurement will have to be improved in order to increase accuracy 1330 * rather than adding horrible and questionable kludges here. 1331 * 1332 * This is called after the cyclics subsystem because of the potential 1333 * that the heuristics within may give a worse estimate of the clock 1334 * frequency than the value that was measured. 1335 */ 1336 static void 1337 mach_fixcpufreq(void) 1338 { 1339 uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i; 1340 1341 freq = (uint32_t)cpu_freq; 1342 1343 /* 1344 * Find the nearest integer multiple of 200/3 (about 66) MHz to the 1345 * measured speed taking into account that the 667 MHz parts were 1346 * the first to round-up. 1347 */ 1348 mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200); 1349 near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3); 1350 delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66); 1351 1352 /* Find the nearest integer multiple of 50 MHz to the measured speed */ 1353 mul = (freq + 25) / 50; 1354 near50 = mul * 50; 1355 delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50); 1356 1357 /* Find the closer of the two */ 1358 if (delta66 < delta50) { 1359 fixed = near66; 1360 delta = delta66; 1361 } else { 1362 fixed = near50; 1363 delta = delta50; 1364 } 1365 1366 if (fixed > INT_MAX) 1367 return; 1368 1369 /* 1370 * Some older parts have a core clock frequency that is not an 1371 * integral multiple of 50 or 66 MHz. Check if one of the old 1372 * clock frequencies is closer to the measured value than any 1373 * of the integral multiples of 50 an 66, and if so set fixed 1374 * and delta appropriately to represent the closest value. 1375 */ 1376 i = sizeof (x86_cpu_freq) / sizeof (int); 1377 while (i > 0) { 1378 i--; 1379 1380 if (x86_cpu_freq[i] <= freq) { 1381 mul = freq - x86_cpu_freq[i]; 1382 1383 if (mul < delta) { 1384 fixed = x86_cpu_freq[i]; 1385 delta = mul; 1386 } 1387 1388 break; 1389 } 1390 1391 mul = x86_cpu_freq[i] - freq; 1392 1393 if (mul < delta) { 1394 fixed = x86_cpu_freq[i]; 1395 delta = mul; 1396 } 1397 } 1398 1399 /* 1400 * Set a reasonable maximum for how much to correct the measured 1401 * result by. This check is here to prevent the adjustment made 1402 * by this function from being more harm than good. It is entirely 1403 * possible that in the future parts will be made that are not 1404 * integral multiples of 66 or 50 in clock frequency or that 1405 * someone may overclock a part to some odd frequency. If the 1406 * measured value is farther from the corrected value than 1407 * allowed, then assume the corrected value is in error and use 1408 * the measured value. 1409 */ 1410 if (6 < delta) 1411 return; 1412 1413 cpu_freq = (int)fixed; 1414 } 1415 1416 1417 static int 1418 machhztomhz(uint64_t cpu_freq_hz) 1419 { 1420 uint64_t cpu_mhz; 1421 1422 /* Round to nearest MHZ */ 1423 cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ; 1424 1425 if (cpu_mhz > INT_MAX) 1426 return (0); 1427 1428 return ((int)cpu_mhz); 1429 1430 } 1431 1432 1433 static int 1434 mach_clkinit(int preferred_mode, int *set_mode) 1435 { 1436 struct psm_ops *pops; 1437 int resolution; 1438 1439 pops = mach_set[0]; 1440 1441 cpu_freq_hz = mach_getcpufreq(); 1442 1443 cpu_freq = machhztomhz(cpu_freq_hz); 1444 1445 if (!is_x86_feature(x86_featureset, X86FSET_TSC) || (cpu_freq == 0)) 1446 tsc_gethrtime_enable = 0; 1447 1448 #ifndef __xpv 1449 if (tsc_gethrtime_enable) { 1450 tsc_hrtimeinit(cpu_freq_hz); 1451 } else 1452 #endif 1453 { 1454 if (pops->psm_hrtimeinit) 1455 (*pops->psm_hrtimeinit)(); 1456 gethrtimef = pops->psm_gethrtime; 1457 gethrtimeunscaledf = gethrtimef; 1458 /* scalehrtimef will remain dummy */ 1459 } 1460 1461 mach_fixcpufreq(); 1462 1463 if (mach_ver[0] >= PSM_INFO_VER01_3) { 1464 if (preferred_mode == TIMER_ONESHOT) { 1465 1466 resolution = (*pops->psm_clkinit)(0); 1467 if (resolution != 0) { 1468 *set_mode = TIMER_ONESHOT; 1469 return (resolution); 1470 } 1471 } 1472 1473 /* 1474 * either periodic mode was requested or could not set to 1475 * one-shot mode 1476 */ 1477 resolution = (*pops->psm_clkinit)(hz); 1478 /* 1479 * psm should be able to do periodic, so we do not check 1480 * for return value of psm_clkinit here. 1481 */ 1482 *set_mode = TIMER_PERIODIC; 1483 return (resolution); 1484 } else { 1485 /* 1486 * PSMI interface prior to PSMI_3 does not define a return 1487 * value for psm_clkinit, so the return value is ignored. 1488 */ 1489 (void) (*pops->psm_clkinit)(hz); 1490 *set_mode = TIMER_PERIODIC; 1491 return (nsec_per_tick); 1492 } 1493 } 1494 1495 1496 /*ARGSUSED*/ 1497 static int 1498 mach_softlvl_to_vect(int ipl) 1499 { 1500 setsoftint = av_set_softint_pending; 1501 kdisetsoftint = kdi_av_set_softint_pending; 1502 1503 return (PSM_SV_SOFTWARE); 1504 } 1505 1506 #ifdef DEBUG 1507 /* 1508 * This is here to allow us to simulate cpus that refuse to start. 1509 */ 1510 cpuset_t cpufailset; 1511 #endif 1512 1513 int 1514 mach_cpu_start(struct cpu *cp, void *ctx) 1515 { 1516 struct psm_ops *pops = mach_set[0]; 1517 processorid_t id = cp->cpu_id; 1518 1519 #ifdef DEBUG 1520 if (CPU_IN_SET(cpufailset, id)) 1521 return (0); 1522 #endif 1523 return ((*pops->psm_cpu_start)(id, ctx)); 1524 } 1525 1526 int 1527 mach_cpuid_start(processorid_t id, void *ctx) 1528 { 1529 struct psm_ops *pops = mach_set[0]; 1530 1531 #ifdef DEBUG 1532 if (CPU_IN_SET(cpufailset, id)) 1533 return (0); 1534 #endif 1535 return ((*pops->psm_cpu_start)(id, ctx)); 1536 } 1537 1538 int 1539 mach_cpu_stop(cpu_t *cp, void *ctx) 1540 { 1541 struct psm_ops *pops = mach_set[0]; 1542 psm_cpu_request_t request; 1543 1544 if (pops->psm_cpu_ops == NULL) { 1545 return (ENOTSUP); 1546 } 1547 1548 ASSERT(cp->cpu_id != -1); 1549 request.pcr_cmd = PSM_CPU_STOP; 1550 request.req.cpu_stop.cpuid = cp->cpu_id; 1551 request.req.cpu_stop.ctx = ctx; 1552 1553 return ((*pops->psm_cpu_ops)(&request)); 1554 } 1555 1556 int 1557 mach_cpu_add(mach_cpu_add_arg_t *argp, processorid_t *cpuidp) 1558 { 1559 int rc; 1560 struct psm_ops *pops = mach_set[0]; 1561 psm_cpu_request_t request; 1562 1563 if (pops->psm_cpu_ops == NULL) { 1564 return (ENOTSUP); 1565 } 1566 1567 request.pcr_cmd = PSM_CPU_ADD; 1568 request.req.cpu_add.argp = argp; 1569 request.req.cpu_add.cpuid = -1; 1570 rc = (*pops->psm_cpu_ops)(&request); 1571 if (rc == 0) { 1572 ASSERT(request.req.cpu_add.cpuid != -1); 1573 *cpuidp = request.req.cpu_add.cpuid; 1574 } 1575 1576 return (rc); 1577 } 1578 1579 int 1580 mach_cpu_remove(processorid_t cpuid) 1581 { 1582 struct psm_ops *pops = mach_set[0]; 1583 psm_cpu_request_t request; 1584 1585 if (pops->psm_cpu_ops == NULL) { 1586 return (ENOTSUP); 1587 } 1588 1589 request.pcr_cmd = PSM_CPU_REMOVE; 1590 request.req.cpu_remove.cpuid = cpuid; 1591 1592 return ((*pops->psm_cpu_ops)(&request)); 1593 } 1594 1595 /* 1596 * Default handler to create device node for CPU. 1597 * One reference count will be held on created device node. 1598 */ 1599 static int 1600 mach_cpu_create_devinfo(cpu_t *cp, dev_info_t **dipp) 1601 { 1602 int rv, circ; 1603 dev_info_t *dip; 1604 static kmutex_t cpu_node_lock; 1605 static dev_info_t *cpu_nex_devi = NULL; 1606 1607 ASSERT(cp != NULL); 1608 ASSERT(dipp != NULL); 1609 *dipp = NULL; 1610 1611 if (cpu_nex_devi == NULL) { 1612 mutex_enter(&cpu_node_lock); 1613 /* First check whether cpus exists. */ 1614 cpu_nex_devi = ddi_find_devinfo("cpus", -1, 0); 1615 /* Create cpus if it doesn't exist. */ 1616 if (cpu_nex_devi == NULL) { 1617 ndi_devi_enter(ddi_root_node(), &circ); 1618 rv = ndi_devi_alloc(ddi_root_node(), "cpus", 1619 (pnode_t)DEVI_SID_NODEID, &dip); 1620 if (rv != NDI_SUCCESS) { 1621 mutex_exit(&cpu_node_lock); 1622 cmn_err(CE_CONT, 1623 "?failed to create cpu nexus device.\n"); 1624 return (PSM_FAILURE); 1625 } 1626 ASSERT(dip != NULL); 1627 (void) ndi_devi_online(dip, 0); 1628 ndi_devi_exit(ddi_root_node(), circ); 1629 cpu_nex_devi = dip; 1630 } 1631 mutex_exit(&cpu_node_lock); 1632 } 1633 1634 /* 1635 * create a child node for cpu identified as 'cpu_id' 1636 */ 1637 ndi_devi_enter(cpu_nex_devi, &circ); 1638 dip = ddi_add_child(cpu_nex_devi, "cpu", DEVI_SID_NODEID, -1); 1639 if (dip == NULL) { 1640 cmn_err(CE_CONT, 1641 "?failed to create device node for cpu%d.\n", cp->cpu_id); 1642 rv = PSM_FAILURE; 1643 } else { 1644 *dipp = dip; 1645 (void) ndi_hold_devi(dip); 1646 rv = PSM_SUCCESS; 1647 } 1648 ndi_devi_exit(cpu_nex_devi, circ); 1649 1650 return (rv); 1651 } 1652 1653 /* 1654 * Create cpu device node in device tree and online it. 1655 * Return created dip with reference count held if requested. 1656 */ 1657 int 1658 mach_cpu_create_device_node(struct cpu *cp, dev_info_t **dipp) 1659 { 1660 int rv; 1661 dev_info_t *dip = NULL; 1662 1663 ASSERT(psm_cpu_create_devinfo != NULL); 1664 rv = psm_cpu_create_devinfo(cp, &dip); 1665 if (rv == PSM_SUCCESS) { 1666 cpuid_set_cpu_properties(dip, cp->cpu_id, cp->cpu_m.mcpu_cpi); 1667 /* Recursively attach driver for parent nexus device. */ 1668 if (i_ddi_attach_node_hierarchy(ddi_get_parent(dip)) == 1669 DDI_SUCCESS) { 1670 /* Configure cpu itself and descendants. */ 1671 (void) ndi_devi_online(dip, 1672 NDI_ONLINE_ATTACH | NDI_CONFIG); 1673 } 1674 if (dipp != NULL) { 1675 *dipp = dip; 1676 } else { 1677 (void) ndi_rele_devi(dip); 1678 } 1679 } 1680 1681 return (rv); 1682 } 1683 1684 /* 1685 * The dipp contains one of following values on return: 1686 * - NULL if no device node found 1687 * - pointer to device node if found 1688 */ 1689 int 1690 mach_cpu_get_device_node(struct cpu *cp, dev_info_t **dipp) 1691 { 1692 *dipp = NULL; 1693 if (psm_cpu_get_devinfo != NULL) { 1694 if (psm_cpu_get_devinfo(cp, dipp) == PSM_SUCCESS) { 1695 return (PSM_SUCCESS); 1696 } 1697 } 1698 1699 return (PSM_FAILURE); 1700 } 1701 1702 /*ARGSUSED*/ 1703 static int 1704 mach_translate_irq(dev_info_t *dip, int irqno) 1705 { 1706 return (irqno); /* default to NO translation */ 1707 } 1708 1709 static void 1710 mach_notify_error(int level, char *errmsg) 1711 { 1712 /* 1713 * SL_FATAL is pass in once panicstr is set, deliver it 1714 * as CE_PANIC. Also, translate SL_ codes back to CE_ 1715 * codes for the psmi handler 1716 */ 1717 if (level & SL_FATAL) 1718 (*notify_error)(CE_PANIC, errmsg); 1719 else if (level & SL_WARN) 1720 (*notify_error)(CE_WARN, errmsg); 1721 else if (level & SL_NOTE) 1722 (*notify_error)(CE_NOTE, errmsg); 1723 else if (level & SL_CONSOLE) 1724 (*notify_error)(CE_CONT, errmsg); 1725 } 1726 1727 /* 1728 * It provides the default basic intr_ops interface for the new DDI 1729 * interrupt framework if the PSM doesn't have one. 1730 * 1731 * Input: 1732 * dip - pointer to the dev_info structure of the requested device 1733 * hdlp - pointer to the internal interrupt handle structure for the 1734 * requested interrupt 1735 * intr_op - opcode for this call 1736 * result - pointer to the integer that will hold the result to be 1737 * passed back if return value is PSM_SUCCESS 1738 * 1739 * Output: 1740 * return value is either PSM_SUCCESS or PSM_FAILURE 1741 */ 1742 static int 1743 mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 1744 psm_intr_op_t intr_op, int *result) 1745 { 1746 struct intrspec *ispec; 1747 1748 switch (intr_op) { 1749 case PSM_INTR_OP_CHECK_MSI: 1750 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 1751 DDI_INTR_TYPE_MSIX); 1752 break; 1753 case PSM_INTR_OP_ALLOC_VECTORS: 1754 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1755 *result = 1; 1756 else 1757 *result = 0; 1758 break; 1759 case PSM_INTR_OP_FREE_VECTORS: 1760 break; 1761 case PSM_INTR_OP_NAVAIL_VECTORS: 1762 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1763 *result = 1; 1764 else 1765 *result = 0; 1766 break; 1767 case PSM_INTR_OP_XLATE_VECTOR: 1768 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 1769 *result = psm_translate_irq(dip, ispec->intrspec_vec); 1770 break; 1771 case PSM_INTR_OP_GET_CAP: 1772 *result = 0; 1773 break; 1774 case PSM_INTR_OP_GET_PENDING: 1775 case PSM_INTR_OP_CLEAR_MASK: 1776 case PSM_INTR_OP_SET_MASK: 1777 case PSM_INTR_OP_GET_SHARED: 1778 case PSM_INTR_OP_SET_PRI: 1779 case PSM_INTR_OP_SET_CAP: 1780 case PSM_INTR_OP_SET_CPU: 1781 case PSM_INTR_OP_GET_INTR: 1782 default: 1783 return (PSM_FAILURE); 1784 } 1785 return (PSM_SUCCESS); 1786 } 1787 /* 1788 * Return 1 if CMT load balancing policies should be 1789 * implemented across instances of the specified hardware 1790 * sharing relationship. 1791 */ 1792 int 1793 pg_cmt_load_bal_hw(pghw_type_t hw) 1794 { 1795 if (hw == PGHW_IPIPE || 1796 hw == PGHW_FPU || 1797 hw == PGHW_PROCNODE || 1798 hw == PGHW_CHIP) 1799 return (1); 1800 else 1801 return (0); 1802 } 1803 /* 1804 * Return 1 if thread affinity polices should be implemented 1805 * for instances of the specifed hardware sharing relationship. 1806 */ 1807 int 1808 pg_cmt_affinity_hw(pghw_type_t hw) 1809 { 1810 if (hw == PGHW_CACHE) 1811 return (1); 1812 else 1813 return (0); 1814 } 1815 1816 /* 1817 * Return number of counter events requested to measure hardware capacity and 1818 * utilization and setup CPC requests for specified CPU as needed 1819 * 1820 * May return 0 when platform or processor specific code knows that no CPC 1821 * events should be programmed on this CPU or -1 when platform or processor 1822 * specific code doesn't know which counter events are best to use and common 1823 * code should decide for itself 1824 */ 1825 int 1826 /* LINTED E_FUNC_ARG_UNUSED */ 1827 cu_plat_cpc_init(cpu_t *cp, kcpc_request_list_t *reqs, int nreqs) 1828 { 1829 const char *impl_name; 1830 1831 /* 1832 * Return error if pcbe_ops not set 1833 */ 1834 if (pcbe_ops == NULL) 1835 return (-1); 1836 1837 /* 1838 * Return that no CPC events should be programmed on hyperthreaded 1839 * Pentium 4 and return error for all other x86 processors to tell 1840 * common code to decide what counter events to program on those CPUs 1841 * for measuring hardware capacity and utilization 1842 */ 1843 impl_name = pcbe_ops->pcbe_impl_name(); 1844 if (impl_name != NULL && strcmp(impl_name, PCBE_IMPL_NAME_P4HT) == 0) 1845 return (0); 1846 else 1847 return (-1); 1848 } 1849