1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/x86_archext.h> 27 #include <sys/machsystm.h> 28 #include <sys/x_call.h> 29 #include <sys/acpi/acpi.h> 30 #include <sys/acpica.h> 31 #include <sys/pwrnow.h> 32 #include <sys/cpu_acpi.h> 33 #include <sys/cpupm.h> 34 #include <sys/dtrace.h> 35 #include <sys/sdt.h> 36 37 static int pwrnow_init(cpu_t *); 38 static void pwrnow_fini(cpu_t *); 39 static void pwrnow_power(cpuset_t, uint32_t); 40 41 /* 42 * Interfaces for modules implementing AMD's PowerNow!. 43 */ 44 cpupm_state_ops_t pwrnow_ops = { 45 "PowerNow! Technology", 46 pwrnow_init, 47 pwrnow_fini, 48 pwrnow_power 49 }; 50 51 /* 52 * Error returns 53 */ 54 #define PWRNOW_RET_SUCCESS 0x00 55 #define PWRNOW_RET_NO_PM 0x01 56 #define PWRNOW_RET_UNSUP_STATE 0x02 57 #define PWRNOW_RET_TRANS_INCOMPLETE 0x03 58 59 #define PWRNOW_LATENCY_WAIT 10 60 61 /* 62 * MSR registers for changing and reading processor power state. 63 */ 64 #define PWRNOW_PERF_CTL_MSR 0xC0010062 65 #define PWRNOW_PERF_STATUS_MSR 0xC0010063 66 67 #define AMD_CPUID_PSTATE_HARDWARE (1<<7) 68 #define AMD_CPUID_TSC_CONSTANT (1<<8) 69 70 /* 71 * Debugging support 72 */ 73 #ifdef DEBUG 74 volatile int pwrnow_debug = 0; 75 #define PWRNOW_DEBUG(arglist) if (pwrnow_debug) printf arglist; 76 #else 77 #define PWRNOW_DEBUG(arglist) 78 #endif 79 80 /* 81 * Write the ctrl register. 82 */ 83 static void 84 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl) 85 { 86 cpu_acpi_pct_t *pct_ctrl; 87 uint64_t reg; 88 89 pct_ctrl = CPU_ACPI_PCT_CTRL(handle); 90 91 switch (pct_ctrl->cr_addrspace_id) { 92 case ACPI_ADR_SPACE_FIXED_HARDWARE: 93 reg = ctrl; 94 wrmsr(PWRNOW_PERF_CTL_MSR, reg); 95 break; 96 97 default: 98 DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t, 99 pct_ctrl->cr_addrspace_id); 100 return; 101 } 102 103 DTRACE_PROBE1(pwrnow_ctrl_write, uint32_t, ctrl); 104 } 105 106 /* 107 * Transition the current processor to the requested state. 108 */ 109 static void 110 pwrnow_pstate_transition(uint32_t req_state) 111 { 112 cpupm_mach_state_t *mach_state = 113 (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state; 114 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 115 cpu_acpi_pstate_t *req_pstate; 116 uint32_t ctrl; 117 118 req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 119 req_pstate += req_state; 120 121 DTRACE_PROBE1(pwrnow_transition_freq, uint32_t, 122 CPU_ACPI_FREQ(req_pstate)); 123 124 /* 125 * Initiate the processor p-state change. 126 */ 127 ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate); 128 write_ctrl(handle, ctrl); 129 130 mach_state->ms_pstate.cma_state.pstate = req_state; 131 cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000); 132 } 133 134 static void 135 pwrnow_power(cpuset_t set, uint32_t req_state) 136 { 137 /* 138 * If thread is already running on target CPU then just 139 * make the transition request. Otherwise, we'll need to 140 * make a cross-call. 141 */ 142 kpreempt_disable(); 143 if (CPU_IN_SET(set, CPU->cpu_id)) { 144 pwrnow_pstate_transition(req_state); 145 CPUSET_DEL(set, CPU->cpu_id); 146 } 147 if (!CPUSET_ISNULL(set)) { 148 xc_call((xc_arg_t)req_state, NULL, NULL, X_CALL_HIPRI, 149 set, (xc_func_t)pwrnow_pstate_transition); 150 } 151 kpreempt_enable(); 152 } 153 154 /* 155 * Validate that this processor supports PowerNow! and if so, 156 * get the P-state data from ACPI and cache it. 157 */ 158 static int 159 pwrnow_init(cpu_t *cp) 160 { 161 cpupm_mach_state_t *mach_state = 162 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; 163 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 164 cpu_acpi_pct_t *pct_stat; 165 166 PWRNOW_DEBUG(("pwrnow_init: processor %d\n", cp->cpu_id)); 167 168 /* 169 * Cache the P-state specific ACPI data. 170 */ 171 if (cpu_acpi_cache_pstate_data(handle) != 0) { 172 PWRNOW_DEBUG(("Failed to cache ACPI data\n")); 173 pwrnow_fini(cp); 174 return (PWRNOW_RET_NO_PM); 175 } 176 177 pct_stat = CPU_ACPI_PCT_STATUS(handle); 178 switch (pct_stat->cr_addrspace_id) { 179 case ACPI_ADR_SPACE_FIXED_HARDWARE: 180 PWRNOW_DEBUG(("Transitions will use fixed hardware\n")); 181 break; 182 default: 183 cmn_err(CE_WARN, "!_PCT configured for unsupported " 184 "addrspace = %d.", pct_stat->cr_addrspace_id); 185 cmn_err(CE_NOTE, "!CPU power management will not function."); 186 pwrnow_fini(cp); 187 return (PWRNOW_RET_NO_PM); 188 } 189 190 cpupm_alloc_domains(cp, CPUPM_P_STATES); 191 192 PWRNOW_DEBUG(("Processor %d succeeded.\n", cp->cpu_id)) 193 return (PWRNOW_RET_SUCCESS); 194 } 195 196 /* 197 * Free resources allocated by pwrnow_init(). 198 */ 199 static void 200 pwrnow_fini(cpu_t *cp) 201 { 202 cpupm_mach_state_t *mach_state = 203 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); 204 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 205 206 cpupm_free_domains(&cpupm_pstate_domains); 207 cpu_acpi_free_pstate_data(handle); 208 } 209 210 boolean_t 211 pwrnow_supported() 212 { 213 struct cpuid_regs cpu_regs; 214 215 /* Required features */ 216 if (!(x86_feature & X86_CPUID) || 217 !(x86_feature & X86_MSR)) { 218 PWRNOW_DEBUG(("No CPUID or MSR support.")); 219 return (B_FALSE); 220 } 221 222 /* 223 * Get the Advanced Power Management Information. 224 */ 225 cpu_regs.cp_eax = 0x80000007; 226 (void) __cpuid_insn(&cpu_regs); 227 228 /* 229 * We currently only support CPU power management of 230 * processors that are P-state TSC invariant 231 */ 232 if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) { 233 PWRNOW_DEBUG(("No support for CPUs that are not P-state " 234 "TSC invariant.\n")); 235 return (B_FALSE); 236 } 237 238 /* 239 * We only support the "Fire and Forget" style of PowerNow! (i.e., 240 * single MSR write to change speed). 241 */ 242 if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) { 243 PWRNOW_DEBUG(("Hardware P-State control is not supported.\n")); 244 return (B_FALSE); 245 } 246 return (B_TRUE); 247 } 248