1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2022 Oxide Computer Co. 24 */ 25 /* 26 * Copyright (c) 2009, Intel Corporation. 27 * All Rights Reserved. 28 */ 29 30 #include <sys/x86_archext.h> 31 #include <sys/machsystm.h> 32 #include <sys/archsystm.h> 33 #include <sys/x_call.h> 34 #include <sys/acpi/acpi.h> 35 #include <sys/acpica.h> 36 #include <sys/speedstep.h> 37 #include <sys/cpu_acpi.h> 38 #include <sys/cpupm.h> 39 #include <sys/dtrace.h> 40 #include <sys/sdt.h> 41 42 static int speedstep_init(cpu_t *); 43 static void speedstep_fini(cpu_t *); 44 static void speedstep_power(cpuset_t, uint32_t); 45 static void speedstep_stop(cpu_t *); 46 static boolean_t speedstep_turbo_supported(void); 47 48 /* 49 * Interfaces for modules implementing Intel's Enhanced SpeedStep. 50 */ 51 cpupm_state_ops_t speedstep_ops = { 52 "Enhanced SpeedStep Technology", 53 speedstep_init, 54 speedstep_fini, 55 speedstep_power, 56 speedstep_stop 57 }; 58 59 /* 60 * Error returns 61 */ 62 #define ESS_RET_SUCCESS 0x00 63 #define ESS_RET_NO_PM 0x01 64 #define ESS_RET_UNSUP_STATE 0x02 65 66 /* 67 * MSR registers for changing and reading processor power state. 68 */ 69 #define IA32_PERF_STAT_MSR 0x198 70 #define IA32_PERF_CTL_MSR 0x199 71 72 #define IA32_CPUID_TSC_CONSTANT 0xF30 73 #define IA32_MISC_ENABLE_MSR 0x1A0 74 #define IA32_MISC_ENABLE_EST (1<<16) 75 #define IA32_MISC_ENABLE_CXE (1<<25) 76 77 #define CPUID_TURBO_SUPPORT (1 << 1) 78 79 /* 80 * Debugging support 81 */ 82 #ifdef DEBUG 83 volatile int ess_debug = 0; 84 #define ESSDEBUG(arglist) if (ess_debug) printf arglist; 85 #else 86 #define ESSDEBUG(arglist) 87 #endif 88 89 /* 90 * Write the ctrl register. How it is written, depends upon the _PCT 91 * APCI object value. 92 */ 93 static void 94 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl) 95 { 96 cpu_acpi_pct_t *pct_ctrl; 97 uint64_t reg; 98 99 pct_ctrl = CPU_ACPI_PCT_CTRL(handle); 100 101 switch (pct_ctrl->cr_addrspace_id) { 102 case ACPI_ADR_SPACE_FIXED_HARDWARE: 103 /* 104 * Read current power state because reserved bits must be 105 * preserved, compose new value, and write it. 106 */ 107 reg = rdmsr(IA32_PERF_CTL_MSR); 108 reg &= ~((uint64_t)0xFFFF); 109 reg |= ctrl; 110 wrmsr(IA32_PERF_CTL_MSR, reg); 111 break; 112 113 case ACPI_ADR_SPACE_SYSTEM_IO: 114 (void) cpu_acpi_write_port(pct_ctrl->cr_address, ctrl, 115 pct_ctrl->cr_width); 116 break; 117 118 default: 119 DTRACE_PROBE1(ess_ctrl_unsupported_type, uint8_t, 120 pct_ctrl->cr_addrspace_id); 121 return; 122 } 123 124 DTRACE_PROBE1(ess_ctrl_write, uint32_t, ctrl); 125 } 126 127 /* 128 * Transition the current processor to the requested state. 129 */ 130 int 131 speedstep_pstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused, 132 xc_arg_t arg3 __unused) 133 { 134 uint32_t req_state = (uint32_t)arg1; 135 cpupm_mach_state_t *mach_state = 136 (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state; 137 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 138 cpu_acpi_pstate_t *req_pstate; 139 uint32_t ctrl; 140 141 req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 142 req_pstate += req_state; 143 144 DTRACE_PROBE1(ess_transition, uint32_t, CPU_ACPI_FREQ(req_pstate)); 145 146 /* 147 * Initiate the processor p-state change. 148 */ 149 ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate); 150 write_ctrl(handle, ctrl); 151 152 if (mach_state->ms_turbo != NULL) 153 cpupm_record_turbo_info(mach_state->ms_turbo, 154 mach_state->ms_pstate.cma_state.pstate, req_state); 155 156 mach_state->ms_pstate.cma_state.pstate = req_state; 157 cpu_set_curr_clock(((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000)); 158 return (0); 159 } 160 161 static void 162 speedstep_power(cpuset_t set, uint32_t req_state) 163 { 164 /* 165 * If thread is already running on target CPU then just 166 * make the transition request. Otherwise, we'll need to 167 * make a cross-call. 168 */ 169 kpreempt_disable(); 170 if (CPU_IN_SET(set, CPU->cpu_id)) { 171 (void) speedstep_pstate_transition(req_state, 0, 0); 172 CPUSET_DEL(set, CPU->cpu_id); 173 } 174 if (!CPUSET_ISNULL(set)) { 175 xc_call((xc_arg_t)req_state, 0, 0, CPUSET2BV(set), 176 speedstep_pstate_transition); 177 } 178 kpreempt_enable(); 179 } 180 181 /* 182 * Validate that this processor supports Speedstep and if so, 183 * get the P-state data from ACPI and cache it. 184 */ 185 static int 186 speedstep_init(cpu_t *cp) 187 { 188 cpupm_mach_state_t *mach_state = 189 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; 190 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 191 cpu_acpi_pct_t *pct_stat; 192 static int logged = 0; 193 194 ESSDEBUG(("speedstep_init: processor %d\n", cp->cpu_id)); 195 196 /* 197 * Cache the P-state specific ACPI data. 198 */ 199 if (cpu_acpi_cache_pstate_data(handle) != 0) { 200 if (!logged) { 201 cmn_err(CE_NOTE, "!SpeedStep support is being " 202 "disabled due to errors parsing ACPI P-state " 203 "objects exported by BIOS."); 204 logged = 1; 205 } 206 speedstep_fini(cp); 207 return (ESS_RET_NO_PM); 208 } 209 210 pct_stat = CPU_ACPI_PCT_STATUS(handle); 211 switch (pct_stat->cr_addrspace_id) { 212 case ACPI_ADR_SPACE_FIXED_HARDWARE: 213 ESSDEBUG(("Transitions will use fixed hardware\n")); 214 break; 215 case ACPI_ADR_SPACE_SYSTEM_IO: 216 ESSDEBUG(("Transitions will use system IO\n")); 217 break; 218 default: 219 cmn_err(CE_WARN, "!_PCT conifgured for unsupported " 220 "addrspace = %d.", pct_stat->cr_addrspace_id); 221 cmn_err(CE_NOTE, "!CPU power management will not function."); 222 speedstep_fini(cp); 223 return (ESS_RET_NO_PM); 224 } 225 226 cpupm_alloc_domains(cp, CPUPM_P_STATES); 227 228 if (speedstep_turbo_supported()) 229 mach_state->ms_turbo = cpupm_turbo_init(cp); 230 231 ESSDEBUG(("Processor %d succeeded.\n", cp->cpu_id)) 232 return (ESS_RET_SUCCESS); 233 } 234 235 /* 236 * Free resources allocated by speedstep_init(). 237 */ 238 static void 239 speedstep_fini(cpu_t *cp) 240 { 241 cpupm_mach_state_t *mach_state = 242 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); 243 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 244 245 cpupm_free_domains(&cpupm_pstate_domains); 246 cpu_acpi_free_pstate_data(handle); 247 248 if (mach_state->ms_turbo != NULL) 249 cpupm_turbo_fini(mach_state->ms_turbo); 250 mach_state->ms_turbo = NULL; 251 } 252 253 static void 254 speedstep_stop(cpu_t *cp) 255 { 256 cpupm_mach_state_t *mach_state = 257 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); 258 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 259 260 cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains); 261 cpu_acpi_free_pstate_data(handle); 262 263 if (mach_state->ms_turbo != NULL) 264 cpupm_turbo_fini(mach_state->ms_turbo); 265 mach_state->ms_turbo = NULL; 266 } 267 268 boolean_t 269 speedstep_supported(uint_t family, uint_t model) 270 { 271 struct cpuid_regs cpu_regs; 272 273 /* Required features */ 274 ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID)); 275 if (!is_x86_feature(x86_featureset, X86FSET_MSR)) { 276 return (B_FALSE); 277 } 278 279 /* 280 * We only support family/model combinations which 281 * are P-state TSC invariant. 282 */ 283 if (!((family == 0xf && model >= 0x3) || 284 (family == 0x6 && model >= 0xe))) { 285 return (B_FALSE); 286 } 287 288 /* 289 * Enhanced SpeedStep supported? 290 */ 291 cpu_regs.cp_eax = 0x1; 292 (void) __cpuid_insn(&cpu_regs); 293 if (!(cpu_regs.cp_ecx & CPUID_INTC_ECX_EST)) { 294 return (B_FALSE); 295 } 296 297 return (B_TRUE); 298 } 299 300 boolean_t 301 speedstep_turbo_supported(void) 302 { 303 struct cpuid_regs cpu_regs; 304 305 /* Required features */ 306 ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID)); 307 if (!is_x86_feature(x86_featureset, X86FSET_MSR)) { 308 return (B_FALSE); 309 } 310 311 /* 312 * turbo mode supported? 313 */ 314 cpu_regs.cp_eax = 0x6; 315 (void) __cpuid_insn(&cpu_regs); 316 if (!(cpu_regs.cp_eax & CPUID_TURBO_SUPPORT)) { 317 return (B_FALSE); 318 } 319 320 return (B_TRUE); 321 } 322