1 /*- 2 * Copyright (c) 2001 Michael Smith 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include "opt_acpi.h" 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/bus.h> 33 34 #include <machine/bus_pio.h> 35 #include <machine/bus.h> 36 #include <machine/resource.h> 37 #include <sys/rman.h> 38 39 #include "acpi.h" 40 41 #include <dev/acpica/acpivar.h> 42 43 /* 44 * Support for ACPI Processor devices. 45 * 46 * Note that this only provides ACPI 1.0 support (with the exception of the 47 * PSTATE_CNT field). 2.0 support will involve implementing _PTC, _PCT, 48 * _PSS and _PPC. 49 */ 50 51 /* 52 * Hooks for the ACPI CA debugging infrastructure 53 */ 54 #define _COMPONENT ACPI_PROCESSOR 55 MODULE_NAME("PROCESSOR") 56 57 struct acpi_cpu_softc { 58 device_t cpu_dev; 59 ACPI_HANDLE cpu_handle; 60 61 u_int32_t cpu_id; 62 63 /* CPU throttling control register */ 64 struct resource *cpu_p_blk; 65 #define CPU_GET_P_CNT(sc) (bus_space_read_4(rman_get_bustag((sc)->cpu_p_blk), \ 66 rman_get_bushandle((sc)->cpu_p_blk), \ 67 0)) 68 #define CPU_SET_P_CNT(sc, val) (bus_space_write_4(rman_get_bustag((sc)->cpu_p_blk), \ 69 rman_get_bushandle((sc)->cpu_p_blk), \ 70 0, (val))) 71 #define CPU_P_CNT_THT_EN (1<<4) 72 }; 73 74 /* 75 * Speeds are stored in counts, from 1 - CPU_MAX_SPEED, and 76 * reported to the user in tenths of a percent. 77 */ 78 static u_int32_t cpu_duty_offset; 79 static u_int32_t cpu_duty_width; 80 #define CPU_MAX_SPEED (1 << cpu_duty_width) 81 #define CPU_SPEED_PERCENT(x) ((1000 * (x)) / CPU_MAX_SPEED) 82 #define CPU_SPEED_PRINTABLE(x) (CPU_SPEED_PERCENT(x) / 10),(CPU_SPEED_PERCENT(x) % 10) 83 84 static u_int32_t cpu_smi_cmd; /* should be a generic way to do this */ 85 static u_int8_t cpu_pstate_cnt; 86 87 static u_int32_t cpu_current_state; 88 static u_int32_t cpu_performance_state; 89 static u_int32_t cpu_economy_state; 90 static u_int32_t cpu_max_state; 91 92 static device_t *cpu_devices; 93 static int cpu_ndevices; 94 95 static struct sysctl_ctx_list acpi_cpu_sysctl_ctx; 96 static struct sysctl_oid *acpi_cpu_sysctl_tree; 97 98 static int acpi_cpu_probe(device_t dev); 99 static int acpi_cpu_attach(device_t dev); 100 static void acpi_cpu_init_throttling(void *arg); 101 static void acpi_cpu_set_speed(u_int32_t speed); 102 static void acpi_cpu_powerprofile(void *arg); 103 static int acpi_cpu_speed_sysctl(SYSCTL_HANDLER_ARGS); 104 105 static device_method_t acpi_cpu_methods[] = { 106 /* Device interface */ 107 DEVMETHOD(device_probe, acpi_cpu_probe), 108 DEVMETHOD(device_attach, acpi_cpu_attach), 109 110 {0, 0} 111 }; 112 113 static driver_t acpi_cpu_driver = { 114 "acpi_cpu", 115 acpi_cpu_methods, 116 sizeof(struct acpi_cpu_softc), 117 }; 118 119 devclass_t acpi_cpu_devclass; 120 DRIVER_MODULE(acpi_cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0); 121 122 static int 123 acpi_cpu_probe(device_t dev) 124 { 125 if (acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) { 126 device_set_desc(dev, "CPU"); /* XXX get more verbose description? */ 127 return(0); 128 } 129 return(ENXIO); 130 } 131 132 static int 133 acpi_cpu_attach(device_t dev) 134 { 135 struct acpi_cpu_softc *sc; 136 struct acpi_softc *acpi_sc; 137 ACPI_OBJECT processor; 138 ACPI_BUFFER buf; 139 ACPI_STATUS status; 140 u_int32_t p_blk; 141 u_int32_t p_blk_length; 142 u_int32_t duty_end; 143 int rid; 144 145 FUNCTION_TRACE(__func__); 146 147 ACPI_ASSERTLOCK; 148 149 sc = device_get_softc(dev); 150 sc->cpu_dev = dev; 151 sc->cpu_handle = acpi_get_handle(dev); 152 153 /* 154 * Get global parameters from the FADT. 155 */ 156 if (device_get_unit(sc->cpu_dev) == 0) { 157 cpu_duty_offset = AcpiGbl_FADT->DutyOffset; 158 cpu_duty_width = AcpiGbl_FADT->DutyWidth; 159 cpu_smi_cmd = AcpiGbl_FADT->SmiCmd; 160 cpu_pstate_cnt = AcpiGbl_FADT->PstateCnt; 161 162 /* validate the offset/width */ 163 duty_end = cpu_duty_offset + cpu_duty_width - 1; 164 /* check that it fits */ 165 if (duty_end > 31) { 166 printf("acpi_cpu: CLK_VAL field overflows P_CNT register\n"); 167 cpu_duty_width = 0; 168 } 169 /* check for overlap with the THT_EN bit */ 170 if ((cpu_duty_offset <= 4) && (duty_end >= 4)) { 171 printf("acpi_cpu: CLK_VAL field overlaps THT_EN bit\n"); 172 cpu_duty_width = 0; 173 } 174 175 /* 176 * Start the throttling process once the probe phase completes, if we think that 177 * it's going to be useful. If the duty width value is zero, there are no significant 178 * bits in the register and thus no throttled states. 179 */ 180 if (cpu_duty_width > 0) { 181 AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_init_throttling, NULL); 182 183 acpi_sc = acpi_device_get_parent_softc(dev); 184 sysctl_ctx_init(&acpi_cpu_sysctl_ctx); 185 acpi_cpu_sysctl_tree = SYSCTL_ADD_NODE(&acpi_cpu_sysctl_ctx, 186 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), 187 OID_AUTO, "cpu", CTLFLAG_RD, 0, ""); 188 189 SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 190 OID_AUTO, "max_speed", CTLFLAG_RD, 191 &cpu_max_state, 0, "maximum CPU speed"); 192 SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 193 OID_AUTO, "current_speed", CTLFLAG_RD, 194 &cpu_current_state, 0, "current CPU speed"); 195 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 196 OID_AUTO, "performance_speed", CTLTYPE_INT | CTLFLAG_RW, 197 &cpu_performance_state, 0, acpi_cpu_speed_sysctl, "I", ""); 198 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 199 OID_AUTO, "economy_speed", CTLTYPE_INT | CTLFLAG_RW, 200 &cpu_economy_state, 0, acpi_cpu_speed_sysctl, "I", ""); 201 } 202 } 203 204 /* 205 * Get the processor object. 206 */ 207 buf.Pointer = &processor; 208 buf.Length = sizeof(processor); 209 if (ACPI_FAILURE(status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf))) { 210 device_printf(sc->cpu_dev, "couldn't get Processor object - %s\n", AcpiFormatException(status)); 211 return_VALUE(ENXIO); 212 } 213 if (processor.Type != ACPI_TYPE_PROCESSOR) { 214 device_printf(sc->cpu_dev, "Processor object has bad type %d\n", processor.Type); 215 return_VALUE(ENXIO); 216 } 217 sc->cpu_id = processor.Processor.ProcId; 218 219 /* 220 * If it looks like we support throttling, find this CPU's P_BLK. 221 * 222 * Note that some systems seem to duplicate the P_BLK pointer across 223 * multiple CPUs, so not getting the resource is not fatal. 224 * 225 * XXX should support _PTC here as well, once we work out how to parse it. 226 * 227 * XXX is it valid to assume that the P_BLK must be 6 bytes long? 228 */ 229 if (cpu_duty_width > 0) { 230 p_blk = processor.Processor.PblkAddress; 231 p_blk_length = processor.Processor.PblkLength; 232 233 /* allocate bus space if possible */ 234 if ((p_blk > 0) && (p_blk_length == 6)) { 235 rid = 0; 236 bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT, rid, p_blk, p_blk_length); 237 sc->cpu_p_blk = bus_alloc_resource(sc->cpu_dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, 238 RF_ACTIVE); 239 240 DEBUG_PRINT(TRACE_IO, ("acpi_cpu%d: throttling with P_BLK at 0x%x/%d%s\n", 241 device_get_unit(sc->cpu_dev), p_blk, p_blk_length, 242 sc->cpu_p_blk ? "" : " (shadowed)")); 243 } 244 } 245 return_VALUE(0); 246 } 247 248 /* 249 * Call this *after* all CPUs have been attached. 250 * 251 * Takes the ACPI lock to avoid fighting anyone over the SMI command 252 * port. Could probably lock less code. 253 */ 254 static void 255 acpi_cpu_init_throttling(void *arg) 256 { 257 258 ACPI_LOCK; 259 260 /* get set of CPU devices */ 261 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices); 262 263 /* initialise throttling states */ 264 cpu_max_state = CPU_MAX_SPEED; 265 cpu_performance_state = cpu_max_state; 266 cpu_economy_state = cpu_performance_state / 2; 267 if (cpu_economy_state == 0) /* 0 is 'reserved' */ 268 cpu_economy_state++; 269 270 /* register performance profile change handler */ 271 EVENTHANDLER_REGISTER(powerprofile_change, acpi_cpu_powerprofile, NULL, 0); 272 273 /* if ACPI 2.0+, signal platform that we are taking over throttling */ 274 if (cpu_pstate_cnt != 0) { 275 /* XXX should be a generic interface for this */ 276 AcpiOsWritePort(cpu_smi_cmd, cpu_pstate_cnt, 8); 277 } 278 279 ACPI_UNLOCK; 280 281 /* set initial speed */ 282 acpi_cpu_powerprofile(NULL); 283 284 printf("acpi_cpu: CPU throttling enabled, %d steps from 100%% to %d.%d%%\n", 285 CPU_MAX_SPEED, CPU_SPEED_PRINTABLE(1)); 286 } 287 288 /* 289 * Set CPUs to the new state. 290 * 291 * Must be called with the ACPI lock held. 292 */ 293 static void 294 acpi_cpu_set_speed(u_int32_t speed) 295 { 296 struct acpi_cpu_softc *sc; 297 int i; 298 u_int32_t p_cnt, clk_val; 299 300 ACPI_ASSERTLOCK; 301 302 /* iterate over processors */ 303 for (i = 0; i < cpu_ndevices; i++) { 304 sc = device_get_softc(cpu_devices[i]); 305 if (sc->cpu_p_blk == NULL) 306 continue; 307 308 /* get the current P_CNT value and disable throttling */ 309 p_cnt = CPU_GET_P_CNT(sc); 310 p_cnt &= ~CPU_P_CNT_THT_EN; 311 CPU_SET_P_CNT(sc, p_cnt); 312 313 /* if we're at maximum speed, that's all */ 314 if (speed < CPU_MAX_SPEED) { 315 316 /* mask the old CLK_VAL off and or-in the new value */ 317 clk_val = CPU_MAX_SPEED << cpu_duty_offset; 318 p_cnt &= ~clk_val; 319 p_cnt |= (speed << cpu_duty_offset); 320 321 /* write the new P_CNT value and then enable throttling */ 322 CPU_SET_P_CNT(sc, p_cnt); 323 p_cnt |= CPU_P_CNT_THT_EN; 324 CPU_SET_P_CNT(sc, p_cnt); 325 } 326 device_printf(sc->cpu_dev, "set speed to %d.%d%%\n", CPU_SPEED_PRINTABLE(speed)); 327 } 328 cpu_current_state = speed; 329 } 330 331 /* 332 * Power profile change hook. 333 * 334 * Uses the ACPI lock to avoid reentrancy. 335 */ 336 static void 337 acpi_cpu_powerprofile(void *arg) 338 { 339 u_int32_t new; 340 341 ACPI_LOCK; 342 343 new = (powerprofile_get_state() == POWERPROFILE_PERFORMANCE) ? cpu_performance_state : cpu_economy_state; 344 if (cpu_current_state != new) 345 acpi_cpu_set_speed(new); 346 347 ACPI_UNLOCK; 348 } 349 350 /* 351 * Handle changes in the performance/ecomony CPU settings. 352 * 353 * Does not need the ACPI lock (although setting *argp should 354 * probably be atomic). 355 */ 356 static int 357 acpi_cpu_speed_sysctl(SYSCTL_HANDLER_ARGS) 358 { 359 u_int32_t *argp; 360 u_int32_t arg; 361 int error; 362 363 argp = (u_int32_t *)oidp->oid_arg1; 364 arg = *argp; 365 error = sysctl_handle_int(oidp, &arg, 0, req); 366 367 /* error or no new value */ 368 if ((error != 0) || (req->newptr == NULL)) 369 return(error); 370 371 /* range check */ 372 if ((arg < 1) || (arg > cpu_max_state)) 373 return(EINVAL); 374 375 /* set new value and possibly switch */ 376 *argp = arg; 377 acpi_cpu_powerprofile(NULL); 378 379 return(0); 380 } 381