1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 #include <sys/x86_archext.h> 26 #include <sys/machsystm.h> 27 #include <sys/x_call.h> 28 #include <sys/acpi/acpi.h> 29 #include <sys/acpica.h> 30 #include <sys/pwrnow.h> 31 #include <sys/cpu_acpi.h> 32 #include <sys/cpupm.h> 33 #include <sys/dtrace.h> 34 #include <sys/sdt.h> 35 36 static int pwrnow_init(cpu_t *); 37 static void pwrnow_fini(cpu_t *); 38 static void pwrnow_power(cpuset_t, uint32_t); 39 static void pwrnow_stop(cpu_t *); 40 41 /* 42 * Interfaces for modules implementing AMD's PowerNow!. 43 */ 44 cpupm_state_ops_t pwrnow_ops = { 45 "PowerNow! Technology", 46 pwrnow_init, 47 pwrnow_fini, 48 pwrnow_power, 49 pwrnow_stop 50 }; 51 52 /* 53 * Error returns 54 */ 55 #define PWRNOW_RET_SUCCESS 0x00 56 #define PWRNOW_RET_NO_PM 0x01 57 #define PWRNOW_RET_UNSUP_STATE 0x02 58 #define PWRNOW_RET_TRANS_INCOMPLETE 0x03 59 60 #define PWRNOW_LATENCY_WAIT 10 61 62 /* 63 * MSR registers for changing and reading processor power state. 64 */ 65 #define PWRNOW_PERF_CTL_MSR 0xC0010062 66 #define PWRNOW_PERF_STATUS_MSR 0xC0010063 67 68 #define AMD_CPUID_PSTATE_HARDWARE (1<<7) 69 #define AMD_CPUID_TSC_CONSTANT (1<<8) 70 71 /* 72 * Debugging support 73 */ 74 #ifdef DEBUG 75 volatile int pwrnow_debug = 0; 76 #define PWRNOW_DEBUG(arglist) if (pwrnow_debug) printf arglist; 77 #else 78 #define PWRNOW_DEBUG(arglist) 79 #endif 80 81 /* 82 * Write the ctrl register. 83 */ 84 static void 85 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl) 86 { 87 cpu_acpi_pct_t *pct_ctrl; 88 uint64_t reg; 89 90 pct_ctrl = CPU_ACPI_PCT_CTRL(handle); 91 92 switch (pct_ctrl->cr_addrspace_id) { 93 case ACPI_ADR_SPACE_FIXED_HARDWARE: 94 reg = ctrl; 95 wrmsr(PWRNOW_PERF_CTL_MSR, reg); 96 break; 97 98 default: 99 DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t, 100 pct_ctrl->cr_addrspace_id); 101 return; 102 } 103 104 DTRACE_PROBE1(pwrnow_ctrl_write, uint32_t, ctrl); 105 } 106 107 /* 108 * Transition the current processor to the requested state. 109 */ 110 static void 111 pwrnow_pstate_transition(uint32_t req_state) 112 { 113 cpupm_mach_state_t *mach_state = 114 (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state; 115 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 116 cpu_acpi_pstate_t *req_pstate; 117 uint32_t ctrl; 118 119 req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 120 req_pstate += req_state; 121 122 DTRACE_PROBE1(pwrnow_transition_freq, uint32_t, 123 CPU_ACPI_FREQ(req_pstate)); 124 125 /* 126 * Initiate the processor p-state change. 127 */ 128 ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate); 129 write_ctrl(handle, ctrl); 130 131 mach_state->ms_pstate.cma_state.pstate = req_state; 132 cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000); 133 } 134 135 static void 136 pwrnow_power(cpuset_t set, uint32_t req_state) 137 { 138 /* 139 * If thread is already running on target CPU then just 140 * make the transition request. Otherwise, we'll need to 141 * make a cross-call. 142 */ 143 kpreempt_disable(); 144 if (CPU_IN_SET(set, CPU->cpu_id)) { 145 pwrnow_pstate_transition(req_state); 146 CPUSET_DEL(set, CPU->cpu_id); 147 } 148 if (!CPUSET_ISNULL(set)) { 149 xc_call((xc_arg_t)req_state, NULL, NULL, 150 CPUSET2BV(set), (xc_func_t)pwrnow_pstate_transition); 151 } 152 kpreempt_enable(); 153 } 154 155 /* 156 * Validate that this processor supports PowerNow! and if so, 157 * get the P-state data from ACPI and cache it. 158 */ 159 static int 160 pwrnow_init(cpu_t *cp) 161 { 162 cpupm_mach_state_t *mach_state = 163 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; 164 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 165 cpu_acpi_pct_t *pct_stat; 166 167 PWRNOW_DEBUG(("pwrnow_init: processor %d\n", cp->cpu_id)); 168 169 /* 170 * Cache the P-state specific ACPI data. 171 */ 172 if (cpu_acpi_cache_pstate_data(handle) != 0) { 173 cmn_err(CE_NOTE, "!PowerNow! support is being " 174 "disabled due to errors parsing ACPI P-state objects " 175 "exported by BIOS."); 176 pwrnow_fini(cp); 177 return (PWRNOW_RET_NO_PM); 178 } 179 180 pct_stat = CPU_ACPI_PCT_STATUS(handle); 181 switch (pct_stat->cr_addrspace_id) { 182 case ACPI_ADR_SPACE_FIXED_HARDWARE: 183 PWRNOW_DEBUG(("Transitions will use fixed hardware\n")); 184 break; 185 default: 186 cmn_err(CE_WARN, "!_PCT configured for unsupported " 187 "addrspace = %d.", pct_stat->cr_addrspace_id); 188 cmn_err(CE_NOTE, "!CPU power management will not function."); 189 pwrnow_fini(cp); 190 return (PWRNOW_RET_NO_PM); 191 } 192 193 cpupm_alloc_domains(cp, CPUPM_P_STATES); 194 195 PWRNOW_DEBUG(("Processor %d succeeded.\n", cp->cpu_id)) 196 return (PWRNOW_RET_SUCCESS); 197 } 198 199 /* 200 * Free resources allocated by pwrnow_init(). 201 */ 202 static void 203 pwrnow_fini(cpu_t *cp) 204 { 205 cpupm_mach_state_t *mach_state = 206 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); 207 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 208 209 cpupm_free_domains(&cpupm_pstate_domains); 210 cpu_acpi_free_pstate_data(handle); 211 } 212 213 boolean_t 214 pwrnow_supported() 215 { 216 struct cpuid_regs cpu_regs; 217 218 /* Required features */ 219 if (!is_x86_feature(x86_featureset, X86FSET_CPUID) || 220 !is_x86_feature(x86_featureset, X86FSET_MSR)) { 221 PWRNOW_DEBUG(("No CPUID or MSR support.")); 222 return (B_FALSE); 223 } 224 225 /* 226 * Get the Advanced Power Management Information. 227 */ 228 cpu_regs.cp_eax = 0x80000007; 229 (void) __cpuid_insn(&cpu_regs); 230 231 /* 232 * We currently only support CPU power management of 233 * processors that are P-state TSC invariant 234 */ 235 if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) { 236 PWRNOW_DEBUG(("No support for CPUs that are not P-state " 237 "TSC invariant.\n")); 238 return (B_FALSE); 239 } 240 241 /* 242 * We only support the "Fire and Forget" style of PowerNow! (i.e., 243 * single MSR write to change speed). 244 */ 245 if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) { 246 PWRNOW_DEBUG(("Hardware P-State control is not supported.\n")); 247 return (B_FALSE); 248 } 249 return (B_TRUE); 250 } 251 252 static void 253 pwrnow_stop(cpu_t *cp) 254 { 255 cpupm_mach_state_t *mach_state = 256 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); 257 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 258 259 cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains); 260 cpu_acpi_free_pstate_data(handle); 261 } 262