1 /*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_acpi.h" 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/cpu.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/rman.h> 39 40 #include <machine/bus.h> 41 42 #include <contrib/dev/acpica/include/acpi.h> 43 44 #include <dev/acpica/acpivar.h> 45 #include <dev/pci/pcivar.h> 46 47 #include "cpufreq_if.h" 48 49 /* 50 * Throttling provides relative frequency control. It involves modulating 51 * the clock so that the CPU is active for only a fraction of the normal 52 * clock cycle. It does not change voltage and so is less efficient than 53 * other mechanisms. Since it is relative, it can be used in addition to 54 * absolute cpufreq drivers. We support the ACPI 2.0 specification. 55 */ 56 57 struct acpi_throttle_softc { 58 device_t cpu_dev; 59 ACPI_HANDLE cpu_handle; 60 uint32_t cpu_p_blk; /* ACPI P_BLK location */ 61 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */ 62 struct resource *cpu_p_cnt; /* Throttling control register */ 63 int cpu_p_type; /* Resource type for cpu_p_cnt. */ 64 uint32_t cpu_thr_state; /* Current throttle setting. */ 65 }; 66 67 #define THR_GET_REG(reg) \ 68 (bus_space_read_4(rman_get_bustag((reg)), \ 69 rman_get_bushandle((reg)), 0)) 70 #define THR_SET_REG(reg, val) \ 71 (bus_space_write_4(rman_get_bustag((reg)), \ 72 rman_get_bushandle((reg)), 0, (val))) 73 74 /* 75 * Speeds are stored in counts, from 1 to CPU_MAX_SPEED, and 76 * reported to the user in hundredths of a percent. 77 */ 78 #define CPU_MAX_SPEED (1 << cpu_duty_width) 79 #define CPU_SPEED_PERCENT(x) ((10000 * (x)) / CPU_MAX_SPEED) 80 #define CPU_SPEED_PRINTABLE(x) (CPU_SPEED_PERCENT(x) / 10), \ 81 (CPU_SPEED_PERCENT(x) % 10) 82 #define CPU_P_CNT_THT_EN (1<<4) 83 #define CPU_QUIRK_NO_THROTTLE (1<<1) /* Throttling is not usable. */ 84 85 #define PCI_VENDOR_INTEL 0x8086 86 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 87 #define PCI_REVISION_A_STEP 0 88 #define PCI_REVISION_B_STEP 1 89 90 static uint32_t cpu_duty_offset; /* Offset in P_CNT of throttle val. */ 91 static uint32_t cpu_duty_width; /* Bit width of throttle value. */ 92 static int thr_rid; /* Driver-wide resource id. */ 93 static int thr_quirks; /* Indicate any hardware bugs. */ 94 95 static void acpi_throttle_identify(driver_t *driver, device_t parent); 96 static int acpi_throttle_probe(device_t dev); 97 static int acpi_throttle_attach(device_t dev); 98 static int acpi_throttle_evaluate(struct acpi_throttle_softc *sc); 99 static int acpi_throttle_quirks(struct acpi_throttle_softc *sc); 100 static int acpi_thr_settings(device_t dev, struct cf_setting *sets, 101 int *count); 102 static int acpi_thr_set(device_t dev, const struct cf_setting *set); 103 static int acpi_thr_get(device_t dev, struct cf_setting *set); 104 static int acpi_thr_type(device_t dev, int *type); 105 106 static device_method_t acpi_throttle_methods[] = { 107 /* Device interface */ 108 DEVMETHOD(device_identify, acpi_throttle_identify), 109 DEVMETHOD(device_probe, acpi_throttle_probe), 110 DEVMETHOD(device_attach, acpi_throttle_attach), 111 112 /* cpufreq interface */ 113 DEVMETHOD(cpufreq_drv_set, acpi_thr_set), 114 DEVMETHOD(cpufreq_drv_get, acpi_thr_get), 115 DEVMETHOD(cpufreq_drv_type, acpi_thr_type), 116 DEVMETHOD(cpufreq_drv_settings, acpi_thr_settings), 117 DEVMETHOD_END 118 }; 119 120 static driver_t acpi_throttle_driver = { 121 "acpi_throttle", 122 acpi_throttle_methods, 123 sizeof(struct acpi_throttle_softc), 124 }; 125 126 static devclass_t acpi_throttle_devclass; 127 DRIVER_MODULE(acpi_throttle, cpu, acpi_throttle_driver, acpi_throttle_devclass, 128 0, 0); 129 130 static void 131 acpi_throttle_identify(driver_t *driver, device_t parent) 132 { 133 ACPI_BUFFER buf; 134 ACPI_HANDLE handle; 135 ACPI_OBJECT *obj; 136 137 /* Make sure we're not being doubly invoked. */ 138 if (device_find_child(parent, "acpi_throttle", -1)) 139 return; 140 141 /* Check for a valid duty width and parent CPU type. */ 142 handle = acpi_get_handle(parent); 143 if (handle == NULL) 144 return; 145 if (AcpiGbl_FADT.DutyWidth == 0 || 146 acpi_get_type(parent) != ACPI_TYPE_PROCESSOR) 147 return; 148 149 /* 150 * Add a child if there's a non-NULL P_BLK and correct length, or 151 * if the _PTC method is present. 152 */ 153 buf.Pointer = NULL; 154 buf.Length = ACPI_ALLOCATE_BUFFER; 155 if (ACPI_FAILURE(AcpiEvaluateObject(handle, NULL, NULL, &buf))) 156 return; 157 obj = (ACPI_OBJECT *)buf.Pointer; 158 if ((obj->Processor.PblkAddress && obj->Processor.PblkLength >= 4) || 159 ACPI_SUCCESS(AcpiEvaluateObject(handle, "_PTC", NULL, NULL))) { 160 if (BUS_ADD_CHILD(parent, 0, "acpi_throttle", -1) == NULL) 161 device_printf(parent, "add throttle child failed\n"); 162 } 163 AcpiOsFree(obj); 164 } 165 166 static int 167 acpi_throttle_probe(device_t dev) 168 { 169 170 if (resource_disabled("acpi_throttle", 0)) 171 return (ENXIO); 172 173 /* 174 * On i386 platforms at least, ACPI throttling is accomplished by 175 * the chipset modulating the STPCLK# pin based on the duty cycle. 176 * Since p4tcc uses the same mechanism (but internal to the CPU), 177 * we disable acpi_throttle when p4tcc is also present. 178 */ 179 if (device_find_child(device_get_parent(dev), "p4tcc", -1) && 180 !resource_disabled("p4tcc", 0)) 181 return (ENXIO); 182 183 device_set_desc(dev, "ACPI CPU Throttling"); 184 return (0); 185 } 186 187 static int 188 acpi_throttle_attach(device_t dev) 189 { 190 struct acpi_throttle_softc *sc; 191 struct cf_setting set; 192 ACPI_BUFFER buf; 193 ACPI_OBJECT *obj; 194 ACPI_STATUS status; 195 int error; 196 197 sc = device_get_softc(dev); 198 sc->cpu_dev = dev; 199 sc->cpu_handle = acpi_get_handle(dev); 200 201 buf.Pointer = NULL; 202 buf.Length = ACPI_ALLOCATE_BUFFER; 203 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf); 204 if (ACPI_FAILURE(status)) { 205 device_printf(dev, "attach failed to get Processor obj - %s\n", 206 AcpiFormatException(status)); 207 return (ENXIO); 208 } 209 obj = (ACPI_OBJECT *)buf.Pointer; 210 sc->cpu_p_blk = obj->Processor.PblkAddress; 211 sc->cpu_p_blk_len = obj->Processor.PblkLength; 212 AcpiOsFree(obj); 213 214 /* If this is the first device probed, check for quirks. */ 215 if (device_get_unit(dev) == 0) 216 acpi_throttle_quirks(sc); 217 218 /* Attempt to attach the actual throttling register. */ 219 error = acpi_throttle_evaluate(sc); 220 if (error) 221 return (error); 222 223 /* 224 * Set our initial frequency to the highest since some systems 225 * seem to boot with this at the lowest setting. 226 */ 227 set.freq = 10000; 228 acpi_thr_set(dev, &set); 229 230 /* Everything went ok, register with cpufreq(4). */ 231 cpufreq_register(dev); 232 return (0); 233 } 234 235 static int 236 acpi_throttle_evaluate(struct acpi_throttle_softc *sc) 237 { 238 uint32_t duty_end; 239 ACPI_BUFFER buf; 240 ACPI_OBJECT obj; 241 ACPI_GENERIC_ADDRESS gas; 242 ACPI_STATUS status; 243 244 /* Get throttling parameters from the FADT. 0 means not supported. */ 245 if (device_get_unit(sc->cpu_dev) == 0) { 246 cpu_duty_offset = AcpiGbl_FADT.DutyOffset; 247 cpu_duty_width = AcpiGbl_FADT.DutyWidth; 248 } 249 if (cpu_duty_width == 0 || (thr_quirks & CPU_QUIRK_NO_THROTTLE) != 0) 250 return (ENXIO); 251 252 /* Validate the duty offset/width. */ 253 duty_end = cpu_duty_offset + cpu_duty_width - 1; 254 if (duty_end > 31) { 255 device_printf(sc->cpu_dev, 256 "CLK_VAL field overflows P_CNT register\n"); 257 return (ENXIO); 258 } 259 if (cpu_duty_offset <= 4 && duty_end >= 4) { 260 device_printf(sc->cpu_dev, 261 "CLK_VAL field overlaps THT_EN bit\n"); 262 return (ENXIO); 263 } 264 265 /* 266 * If not present, fall back to using the processor's P_BLK to find 267 * the P_CNT register. 268 * 269 * Note that some systems seem to duplicate the P_BLK pointer 270 * across multiple CPUs, so not getting the resource is not fatal. 271 */ 272 buf.Pointer = &obj; 273 buf.Length = sizeof(obj); 274 status = AcpiEvaluateObject(sc->cpu_handle, "_PTC", NULL, &buf); 275 if (ACPI_SUCCESS(status)) { 276 if (obj.Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3) { 277 device_printf(sc->cpu_dev, "_PTC buffer too small\n"); 278 return (ENXIO); 279 } 280 memcpy(&gas, obj.Buffer.Pointer + 3, sizeof(gas)); 281 acpi_bus_alloc_gas(sc->cpu_dev, &sc->cpu_p_type, &thr_rid, 282 &gas, &sc->cpu_p_cnt, 0); 283 if (sc->cpu_p_cnt != NULL && bootverbose) { 284 device_printf(sc->cpu_dev, "P_CNT from _PTC %#jx\n", 285 gas.Address); 286 } 287 } 288 289 /* If _PTC not present or other failure, try the P_BLK. */ 290 if (sc->cpu_p_cnt == NULL) { 291 /* 292 * The spec says P_BLK must be 6 bytes long. However, some 293 * systems use it to indicate a fractional set of features 294 * present so we take anything >= 4. 295 */ 296 if (sc->cpu_p_blk_len < 4) 297 return (ENXIO); 298 gas.Address = sc->cpu_p_blk; 299 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 300 gas.BitWidth = 32; 301 acpi_bus_alloc_gas(sc->cpu_dev, &sc->cpu_p_type, &thr_rid, 302 &gas, &sc->cpu_p_cnt, 0); 303 if (sc->cpu_p_cnt != NULL) { 304 if (bootverbose) 305 device_printf(sc->cpu_dev, 306 "P_CNT from P_BLK %#x\n", sc->cpu_p_blk); 307 } else { 308 device_printf(sc->cpu_dev, "failed to attach P_CNT\n"); 309 return (ENXIO); 310 } 311 } 312 thr_rid++; 313 314 return (0); 315 } 316 317 static int 318 acpi_throttle_quirks(struct acpi_throttle_softc *sc) 319 { 320 device_t acpi_dev; 321 322 /* Look for various quirks of the PIIX4 part. */ 323 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 324 if (acpi_dev) { 325 switch (pci_get_revid(acpi_dev)) { 326 /* 327 * Disable throttling control on PIIX4 A and B-step. 328 * See specification changes #13 ("Manual Throttle Duty Cycle") 329 * and #14 ("Enabling and Disabling Manual Throttle"), plus 330 * erratum #5 ("STPCLK# Deassertion Time") from the January 331 * 2002 PIIX4 specification update. Note that few (if any) 332 * mobile systems ever used this part. 333 */ 334 case PCI_REVISION_A_STEP: 335 case PCI_REVISION_B_STEP: 336 thr_quirks |= CPU_QUIRK_NO_THROTTLE; 337 break; 338 default: 339 break; 340 } 341 } 342 343 return (0); 344 } 345 346 static int 347 acpi_thr_settings(device_t dev, struct cf_setting *sets, int *count) 348 { 349 int i, speed; 350 351 if (sets == NULL || count == NULL) 352 return (EINVAL); 353 if (*count < CPU_MAX_SPEED) 354 return (E2BIG); 355 356 /* Return a list of valid settings for this driver. */ 357 memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * CPU_MAX_SPEED); 358 for (i = 0, speed = CPU_MAX_SPEED; speed != 0; i++, speed--) { 359 sets[i].freq = CPU_SPEED_PERCENT(speed); 360 sets[i].dev = dev; 361 } 362 *count = CPU_MAX_SPEED; 363 364 return (0); 365 } 366 367 static int 368 acpi_thr_set(device_t dev, const struct cf_setting *set) 369 { 370 struct acpi_throttle_softc *sc; 371 uint32_t clk_val, p_cnt, speed; 372 373 if (set == NULL) 374 return (EINVAL); 375 sc = device_get_softc(dev); 376 377 /* 378 * Validate requested state converts to a duty cycle that is an 379 * integer from [1 .. CPU_MAX_SPEED]. 380 */ 381 speed = set->freq * CPU_MAX_SPEED / 10000; 382 if (speed * 10000 != set->freq * CPU_MAX_SPEED || 383 speed < 1 || speed > CPU_MAX_SPEED) 384 return (EINVAL); 385 386 /* If we're at this setting, don't bother applying it again. */ 387 if (speed == sc->cpu_thr_state) 388 return (0); 389 390 /* Get the current P_CNT value and disable throttling */ 391 p_cnt = THR_GET_REG(sc->cpu_p_cnt); 392 p_cnt &= ~CPU_P_CNT_THT_EN; 393 THR_SET_REG(sc->cpu_p_cnt, p_cnt); 394 395 /* If we're at maximum speed, that's all */ 396 if (speed < CPU_MAX_SPEED) { 397 /* Mask the old CLK_VAL off and OR in the new value */ 398 clk_val = (CPU_MAX_SPEED - 1) << cpu_duty_offset; 399 p_cnt &= ~clk_val; 400 p_cnt |= (speed << cpu_duty_offset); 401 402 /* Write the new P_CNT value and then enable throttling */ 403 THR_SET_REG(sc->cpu_p_cnt, p_cnt); 404 p_cnt |= CPU_P_CNT_THT_EN; 405 THR_SET_REG(sc->cpu_p_cnt, p_cnt); 406 } 407 sc->cpu_thr_state = speed; 408 409 return (0); 410 } 411 412 static int 413 acpi_thr_get(device_t dev, struct cf_setting *set) 414 { 415 struct acpi_throttle_softc *sc; 416 uint32_t p_cnt, clk_val; 417 418 if (set == NULL) 419 return (EINVAL); 420 sc = device_get_softc(dev); 421 422 /* Get the current throttling setting from P_CNT. */ 423 p_cnt = THR_GET_REG(sc->cpu_p_cnt); 424 clk_val = (p_cnt >> cpu_duty_offset) & (CPU_MAX_SPEED - 1); 425 sc->cpu_thr_state = clk_val; 426 427 memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set)); 428 set->freq = CPU_SPEED_PERCENT(clk_val); 429 set->dev = dev; 430 431 return (0); 432 } 433 434 static int 435 acpi_thr_type(device_t dev, int *type) 436 { 437 438 if (type == NULL) 439 return (EINVAL); 440 441 *type = CPUFREQ_TYPE_RELATIVE; 442 return (0); 443 } 444