1 /*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_acpi.h" 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/cpu.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/rman.h> 39 40 #include <machine/bus.h> 41 42 #include <contrib/dev/acpica/include/acpi.h> 43 44 #include <dev/acpica/acpivar.h> 45 #include <dev/pci/pcivar.h> 46 47 #include "cpufreq_if.h" 48 49 /* 50 * Throttling provides relative frequency control. It involves modulating 51 * the clock so that the CPU is active for only a fraction of the normal 52 * clock cycle. It does not change voltage and so is less efficient than 53 * other mechanisms. Since it is relative, it can be used in addition to 54 * absolute cpufreq drivers. We support the ACPI 2.0 specification. 55 */ 56 57 struct acpi_throttle_softc { 58 device_t cpu_dev; 59 ACPI_HANDLE cpu_handle; 60 uint32_t cpu_p_blk; /* ACPI P_BLK location */ 61 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */ 62 struct resource *cpu_p_cnt; /* Throttling control register */ 63 int cpu_p_type; /* Resource type for cpu_p_cnt. */ 64 uint32_t cpu_thr_state; /* Current throttle setting. */ 65 }; 66 67 #define THR_GET_REG(reg) \ 68 (bus_space_read_4(rman_get_bustag((reg)), \ 69 rman_get_bushandle((reg)), 0)) 70 #define THR_SET_REG(reg, val) \ 71 (bus_space_write_4(rman_get_bustag((reg)), \ 72 rman_get_bushandle((reg)), 0, (val))) 73 74 /* 75 * Speeds are stored in counts, from 1 to CPU_MAX_SPEED, and 76 * reported to the user in hundredths of a percent. 77 */ 78 #define CPU_MAX_SPEED (1 << cpu_duty_width) 79 #define CPU_SPEED_PERCENT(x) ((10000 * (x)) / CPU_MAX_SPEED) 80 #define CPU_SPEED_PRINTABLE(x) (CPU_SPEED_PERCENT(x) / 10), \ 81 (CPU_SPEED_PERCENT(x) % 10) 82 #define CPU_P_CNT_THT_EN (1<<4) 83 #define CPU_QUIRK_NO_THROTTLE (1<<1) /* Throttling is not usable. */ 84 85 #define PCI_VENDOR_INTEL 0x8086 86 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 87 #define PCI_REVISION_A_STEP 0 88 #define PCI_REVISION_B_STEP 1 89 90 static uint32_t cpu_duty_offset; /* Offset in P_CNT of throttle val. */ 91 static uint32_t cpu_duty_width; /* Bit width of throttle value. */ 92 static int thr_rid; /* Driver-wide resource id. */ 93 static int thr_quirks; /* Indicate any hardware bugs. */ 94 95 static void acpi_throttle_identify(driver_t *driver, device_t parent); 96 static int acpi_throttle_probe(device_t dev); 97 static int acpi_throttle_attach(device_t dev); 98 static int acpi_throttle_evaluate(struct acpi_throttle_softc *sc); 99 static void acpi_throttle_quirks(struct acpi_throttle_softc *sc); 100 static int acpi_thr_settings(device_t dev, struct cf_setting *sets, 101 int *count); 102 static int acpi_thr_set(device_t dev, const struct cf_setting *set); 103 static int acpi_thr_get(device_t dev, struct cf_setting *set); 104 static int acpi_thr_type(device_t dev, int *type); 105 106 static device_method_t acpi_throttle_methods[] = { 107 /* Device interface */ 108 DEVMETHOD(device_identify, acpi_throttle_identify), 109 DEVMETHOD(device_probe, acpi_throttle_probe), 110 DEVMETHOD(device_attach, acpi_throttle_attach), 111 112 /* cpufreq interface */ 113 DEVMETHOD(cpufreq_drv_set, acpi_thr_set), 114 DEVMETHOD(cpufreq_drv_get, acpi_thr_get), 115 DEVMETHOD(cpufreq_drv_type, acpi_thr_type), 116 DEVMETHOD(cpufreq_drv_settings, acpi_thr_settings), 117 DEVMETHOD_END 118 }; 119 120 static driver_t acpi_throttle_driver = { 121 "acpi_throttle", 122 acpi_throttle_methods, 123 sizeof(struct acpi_throttle_softc), 124 }; 125 126 static devclass_t acpi_throttle_devclass; 127 DRIVER_MODULE(acpi_throttle, cpu, acpi_throttle_driver, acpi_throttle_devclass, 128 0, 0); 129 130 static void 131 acpi_throttle_identify(driver_t *driver, device_t parent) 132 { 133 ACPI_BUFFER buf; 134 ACPI_HANDLE handle; 135 ACPI_OBJECT *obj; 136 137 /* Make sure we're not being doubly invoked. */ 138 if (device_find_child(parent, "acpi_throttle", -1)) 139 return; 140 141 /* Check for a valid duty width and parent CPU type. */ 142 handle = acpi_get_handle(parent); 143 if (handle == NULL) 144 return; 145 if (AcpiGbl_FADT.DutyWidth == 0 || 146 acpi_get_type(parent) != ACPI_TYPE_PROCESSOR) 147 return; 148 149 /* 150 * Add a child if there's a non-NULL P_BLK and correct length, or 151 * if the _PTC method is present. 152 */ 153 buf.Pointer = NULL; 154 buf.Length = ACPI_ALLOCATE_BUFFER; 155 if (ACPI_FAILURE(AcpiEvaluateObject(handle, NULL, NULL, &buf))) 156 return; 157 obj = (ACPI_OBJECT *)buf.Pointer; 158 if ((obj->Processor.PblkAddress && obj->Processor.PblkLength >= 4) || 159 ACPI_SUCCESS(AcpiEvaluateObject(handle, "_PTC", NULL, NULL))) { 160 if (BUS_ADD_CHILD(parent, 0, "acpi_throttle", 161 device_get_unit(parent)) == NULL) 162 device_printf(parent, "add throttle child failed\n"); 163 } 164 AcpiOsFree(obj); 165 } 166 167 static int 168 acpi_throttle_probe(device_t dev) 169 { 170 171 if (resource_disabled("acpi_throttle", 0)) 172 return (ENXIO); 173 174 /* 175 * On i386 platforms at least, ACPI throttling is accomplished by 176 * the chipset modulating the STPCLK# pin based on the duty cycle. 177 * Since p4tcc uses the same mechanism (but internal to the CPU), 178 * we disable acpi_throttle when p4tcc is also present. 179 */ 180 if (device_find_child(device_get_parent(dev), "p4tcc", -1) && 181 !resource_disabled("p4tcc", 0)) 182 return (ENXIO); 183 184 device_set_desc(dev, "ACPI CPU Throttling"); 185 return (0); 186 } 187 188 static int 189 acpi_throttle_attach(device_t dev) 190 { 191 struct acpi_throttle_softc *sc; 192 struct cf_setting set; 193 ACPI_BUFFER buf; 194 ACPI_OBJECT *obj; 195 ACPI_STATUS status; 196 int error; 197 198 sc = device_get_softc(dev); 199 sc->cpu_dev = dev; 200 sc->cpu_handle = acpi_get_handle(dev); 201 202 buf.Pointer = NULL; 203 buf.Length = ACPI_ALLOCATE_BUFFER; 204 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf); 205 if (ACPI_FAILURE(status)) { 206 device_printf(dev, "attach failed to get Processor obj - %s\n", 207 AcpiFormatException(status)); 208 return (ENXIO); 209 } 210 obj = (ACPI_OBJECT *)buf.Pointer; 211 sc->cpu_p_blk = obj->Processor.PblkAddress; 212 sc->cpu_p_blk_len = obj->Processor.PblkLength; 213 AcpiOsFree(obj); 214 215 /* If this is the first device probed, check for quirks. */ 216 if (device_get_unit(dev) == 0) 217 acpi_throttle_quirks(sc); 218 219 /* Attempt to attach the actual throttling register. */ 220 error = acpi_throttle_evaluate(sc); 221 if (error) 222 return (error); 223 224 /* 225 * Set our initial frequency to the highest since some systems 226 * seem to boot with this at the lowest setting. 227 */ 228 set.freq = 10000; 229 acpi_thr_set(dev, &set); 230 231 /* Everything went ok, register with cpufreq(4). */ 232 cpufreq_register(dev); 233 return (0); 234 } 235 236 static int 237 acpi_throttle_evaluate(struct acpi_throttle_softc *sc) 238 { 239 uint32_t duty_end; 240 ACPI_BUFFER buf; 241 ACPI_OBJECT obj; 242 ACPI_GENERIC_ADDRESS gas; 243 ACPI_STATUS status; 244 245 /* Get throttling parameters from the FADT. 0 means not supported. */ 246 if (device_get_unit(sc->cpu_dev) == 0) { 247 cpu_duty_offset = AcpiGbl_FADT.DutyOffset; 248 cpu_duty_width = AcpiGbl_FADT.DutyWidth; 249 } 250 if (cpu_duty_width == 0 || (thr_quirks & CPU_QUIRK_NO_THROTTLE) != 0) 251 return (ENXIO); 252 253 /* Validate the duty offset/width. */ 254 duty_end = cpu_duty_offset + cpu_duty_width - 1; 255 if (duty_end > 31) { 256 device_printf(sc->cpu_dev, 257 "CLK_VAL field overflows P_CNT register\n"); 258 return (ENXIO); 259 } 260 if (cpu_duty_offset <= 4 && duty_end >= 4) { 261 device_printf(sc->cpu_dev, 262 "CLK_VAL field overlaps THT_EN bit\n"); 263 return (ENXIO); 264 } 265 266 /* 267 * If not present, fall back to using the processor's P_BLK to find 268 * the P_CNT register. 269 * 270 * Note that some systems seem to duplicate the P_BLK pointer 271 * across multiple CPUs, so not getting the resource is not fatal. 272 */ 273 buf.Pointer = &obj; 274 buf.Length = sizeof(obj); 275 status = AcpiEvaluateObject(sc->cpu_handle, "_PTC", NULL, &buf); 276 if (ACPI_SUCCESS(status)) { 277 if (obj.Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3) { 278 device_printf(sc->cpu_dev, "_PTC buffer too small\n"); 279 return (ENXIO); 280 } 281 memcpy(&gas, obj.Buffer.Pointer + 3, sizeof(gas)); 282 acpi_bus_alloc_gas(sc->cpu_dev, &sc->cpu_p_type, &thr_rid, 283 &gas, &sc->cpu_p_cnt, 0); 284 if (sc->cpu_p_cnt != NULL && bootverbose) { 285 device_printf(sc->cpu_dev, "P_CNT from _PTC %#jx\n", 286 gas.Address); 287 } 288 } 289 290 /* If _PTC not present or other failure, try the P_BLK. */ 291 if (sc->cpu_p_cnt == NULL) { 292 /* 293 * The spec says P_BLK must be 6 bytes long. However, some 294 * systems use it to indicate a fractional set of features 295 * present so we take anything >= 4. 296 */ 297 if (sc->cpu_p_blk_len < 4) 298 return (ENXIO); 299 gas.Address = sc->cpu_p_blk; 300 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 301 gas.BitWidth = 32; 302 acpi_bus_alloc_gas(sc->cpu_dev, &sc->cpu_p_type, &thr_rid, 303 &gas, &sc->cpu_p_cnt, 0); 304 if (sc->cpu_p_cnt != NULL) { 305 if (bootverbose) 306 device_printf(sc->cpu_dev, 307 "P_CNT from P_BLK %#x\n", sc->cpu_p_blk); 308 } else { 309 device_printf(sc->cpu_dev, "failed to attach P_CNT\n"); 310 return (ENXIO); 311 } 312 } 313 thr_rid++; 314 315 return (0); 316 } 317 318 static void 319 acpi_throttle_quirks(struct acpi_throttle_softc *sc) 320 { 321 #ifdef __i386__ 322 device_t acpi_dev; 323 324 /* Look for various quirks of the PIIX4 part. */ 325 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 326 if (acpi_dev) { 327 switch (pci_get_revid(acpi_dev)) { 328 /* 329 * Disable throttling control on PIIX4 A and B-step. 330 * See specification changes #13 ("Manual Throttle Duty Cycle") 331 * and #14 ("Enabling and Disabling Manual Throttle"), plus 332 * erratum #5 ("STPCLK# Deassertion Time") from the January 333 * 2002 PIIX4 specification update. Note that few (if any) 334 * mobile systems ever used this part. 335 */ 336 case PCI_REVISION_A_STEP: 337 case PCI_REVISION_B_STEP: 338 thr_quirks |= CPU_QUIRK_NO_THROTTLE; 339 break; 340 default: 341 break; 342 } 343 } 344 #endif 345 } 346 347 static int 348 acpi_thr_settings(device_t dev, struct cf_setting *sets, int *count) 349 { 350 int i, speed; 351 352 if (sets == NULL || count == NULL) 353 return (EINVAL); 354 if (*count < CPU_MAX_SPEED) 355 return (E2BIG); 356 357 /* Return a list of valid settings for this driver. */ 358 memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * CPU_MAX_SPEED); 359 for (i = 0, speed = CPU_MAX_SPEED; speed != 0; i++, speed--) { 360 sets[i].freq = CPU_SPEED_PERCENT(speed); 361 sets[i].dev = dev; 362 } 363 *count = CPU_MAX_SPEED; 364 365 return (0); 366 } 367 368 static int 369 acpi_thr_set(device_t dev, const struct cf_setting *set) 370 { 371 struct acpi_throttle_softc *sc; 372 uint32_t clk_val, p_cnt, speed; 373 374 if (set == NULL) 375 return (EINVAL); 376 sc = device_get_softc(dev); 377 378 /* 379 * Validate requested state converts to a duty cycle that is an 380 * integer from [1 .. CPU_MAX_SPEED]. 381 */ 382 speed = set->freq * CPU_MAX_SPEED / 10000; 383 if (speed * 10000 != set->freq * CPU_MAX_SPEED || 384 speed < 1 || speed > CPU_MAX_SPEED) 385 return (EINVAL); 386 387 /* If we're at this setting, don't bother applying it again. */ 388 if (speed == sc->cpu_thr_state) 389 return (0); 390 391 /* Get the current P_CNT value and disable throttling */ 392 p_cnt = THR_GET_REG(sc->cpu_p_cnt); 393 p_cnt &= ~CPU_P_CNT_THT_EN; 394 THR_SET_REG(sc->cpu_p_cnt, p_cnt); 395 396 /* If we're at maximum speed, that's all */ 397 if (speed < CPU_MAX_SPEED) { 398 /* Mask the old CLK_VAL off and OR in the new value */ 399 clk_val = (CPU_MAX_SPEED - 1) << cpu_duty_offset; 400 p_cnt &= ~clk_val; 401 p_cnt |= (speed << cpu_duty_offset); 402 403 /* Write the new P_CNT value and then enable throttling */ 404 THR_SET_REG(sc->cpu_p_cnt, p_cnt); 405 p_cnt |= CPU_P_CNT_THT_EN; 406 THR_SET_REG(sc->cpu_p_cnt, p_cnt); 407 } 408 sc->cpu_thr_state = speed; 409 410 return (0); 411 } 412 413 static int 414 acpi_thr_get(device_t dev, struct cf_setting *set) 415 { 416 struct acpi_throttle_softc *sc; 417 uint32_t p_cnt, clk_val; 418 419 if (set == NULL) 420 return (EINVAL); 421 sc = device_get_softc(dev); 422 423 /* Get the current throttling setting from P_CNT. */ 424 p_cnt = THR_GET_REG(sc->cpu_p_cnt); 425 clk_val = (p_cnt >> cpu_duty_offset) & (CPU_MAX_SPEED - 1); 426 sc->cpu_thr_state = clk_val; 427 428 memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set)); 429 set->freq = CPU_SPEED_PERCENT(clk_val); 430 set->dev = dev; 431 432 return (0); 433 } 434 435 static int 436 acpi_thr_type(device_t dev, int *type) 437 { 438 439 if (type == NULL) 440 return (EINVAL); 441 442 *type = CPUFREQ_TYPE_RELATIVE; 443 return (0); 444 } 445