1 /*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_acpi.h" 31 #include <sys/param.h> 32 #include <sys/kernel.h> 33 #include <sys/proc.h> 34 #include <sys/sched.h> 35 #include <sys/bus.h> 36 #include <sys/cpu.h> 37 #include <sys/power.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/sbuf.h> 41 #include <sys/pcpu.h> 42 43 #include <machine/bus_pio.h> 44 #include <machine/bus.h> 45 #include <machine/resource.h> 46 #include <sys/rman.h> 47 48 #include "acpi.h" 49 #include <dev/acpica/acpivar.h> 50 51 #include "cpufreq_if.h" 52 53 /* 54 * Support for ACPI processor performance states (Px) according to 55 * section 8.3.3 of the ACPI 2.0c specification. 56 */ 57 58 struct acpi_px { 59 uint32_t core_freq; 60 uint32_t power; 61 uint32_t trans_lat; 62 uint32_t bm_lat; 63 uint32_t ctrl_val; 64 uint32_t sts_val; 65 }; 66 67 #define MAX_PX_STATES 16 68 69 struct acpi_perf_softc { 70 device_t dev; 71 ACPI_HANDLE handle; 72 struct resource *perf_ctrl; /* Set new performance state. */ 73 int perf_ctrl_type; /* Resource type for perf_ctrl. */ 74 struct resource *perf_status; /* Check that transition succeeded. */ 75 int perf_sts_type; /* Resource type for perf_status. */ 76 struct acpi_px *px_states; /* ACPI perf states. */ 77 uint32_t px_count; /* Total number of perf states. */ 78 uint32_t px_max_avail; /* Lowest index state available. */ 79 int px_curr_state; /* Active state index. */ 80 int px_rid; 81 int info_only; /* Can we set new states? */ 82 }; 83 84 #define PX_GET_REG(reg) \ 85 (bus_space_read_4(rman_get_bustag((reg)), \ 86 rman_get_bushandle((reg)), 0)) 87 #define PX_SET_REG(reg, val) \ 88 (bus_space_write_4(rman_get_bustag((reg)), \ 89 rman_get_bushandle((reg)), 0, (val))) 90 91 #define ACPI_NOTIFY_PERF_STATES 0x80 /* _PSS changed. */ 92 93 static void acpi_perf_identify(driver_t *driver, device_t parent); 94 static int acpi_perf_probe(device_t dev); 95 static int acpi_perf_attach(device_t dev); 96 static int acpi_perf_detach(device_t dev); 97 static int acpi_perf_evaluate(device_t dev); 98 static int acpi_px_to_set(device_t dev, struct acpi_px *px, 99 struct cf_setting *set); 100 static void acpi_px_available(struct acpi_perf_softc *sc); 101 static void acpi_px_startup(void *arg); 102 static void acpi_px_notify(ACPI_HANDLE h, UINT32 notify, void *context); 103 static int acpi_px_settings(device_t dev, struct cf_setting *sets, 104 int *count, int *type); 105 static int acpi_px_set(device_t dev, const struct cf_setting *set); 106 static int acpi_px_get(device_t dev, struct cf_setting *set); 107 108 static device_method_t acpi_perf_methods[] = { 109 /* Device interface */ 110 DEVMETHOD(device_identify, acpi_perf_identify), 111 DEVMETHOD(device_probe, acpi_perf_probe), 112 DEVMETHOD(device_attach, acpi_perf_attach), 113 DEVMETHOD(device_detach, acpi_perf_detach), 114 115 /* cpufreq interface */ 116 DEVMETHOD(cpufreq_drv_set, acpi_px_set), 117 DEVMETHOD(cpufreq_drv_get, acpi_px_get), 118 DEVMETHOD(cpufreq_drv_settings, acpi_px_settings), 119 {0, 0} 120 }; 121 122 static driver_t acpi_perf_driver = { 123 "acpi_perf", 124 acpi_perf_methods, 125 sizeof(struct acpi_perf_softc), 126 }; 127 128 static devclass_t acpi_perf_devclass; 129 DRIVER_MODULE(acpi_perf, cpu, acpi_perf_driver, acpi_perf_devclass, 0, 0); 130 MODULE_DEPEND(acpi_perf, acpi, 1, 1, 1); 131 132 MALLOC_DEFINE(M_ACPIPERF, "acpi_perf", "ACPI Performance states"); 133 134 static void 135 acpi_perf_identify(driver_t *driver, device_t parent) 136 { 137 ACPI_HANDLE handle; 138 139 /* Make sure we're not being doubly invoked. */ 140 if (device_find_child(parent, "acpi_perf", -1) != NULL) 141 return; 142 143 /* Get the handle for the Processor object and check for perf states. */ 144 handle = acpi_get_handle(parent); 145 if (handle == NULL) 146 return; 147 if (ACPI_FAILURE(AcpiEvaluateObject(handle, "_PSS", NULL, NULL))) 148 return; 149 if (BUS_ADD_CHILD(parent, 0, "acpi_perf", -1) == NULL) 150 device_printf(parent, "add acpi_perf child failed\n"); 151 } 152 153 static int 154 acpi_perf_probe(device_t dev) 155 { 156 ACPI_HANDLE handle; 157 ACPI_OBJECT *pkg; 158 struct resource *res; 159 ACPI_BUFFER buf; 160 int error, rid, type; 161 162 /* 163 * Check the performance state registers. If they are of type 164 * "functional fixed hardware", we attach quietly since we will 165 * only be providing information on settings to other drivers. 166 */ 167 error = ENXIO; 168 handle = acpi_get_handle(dev); 169 buf.Pointer = NULL; 170 buf.Length = ACPI_ALLOCATE_BUFFER; 171 if (ACPI_FAILURE(AcpiEvaluateObject(handle, "_PCT", NULL, &buf))) 172 return (error); 173 pkg = (ACPI_OBJECT *)buf.Pointer; 174 if (ACPI_PKG_VALID(pkg, 2)) { 175 rid = 0; 176 error = acpi_PkgGas(dev, pkg, 0, &type, &rid, &res); 177 switch (error) { 178 case 0: 179 bus_release_resource(dev, type, rid, res); 180 device_set_desc(dev, "ACPI CPU Frequency Control"); 181 break; 182 case EOPNOTSUPP: 183 device_quiet(dev); 184 error = 0; 185 break; 186 } 187 } 188 AcpiOsFree(buf.Pointer); 189 190 return (error); 191 } 192 193 static int 194 acpi_perf_attach(device_t dev) 195 { 196 struct acpi_perf_softc *sc; 197 198 sc = device_get_softc(dev); 199 sc->dev = dev; 200 sc->handle = acpi_get_handle(dev); 201 sc->px_max_avail = 0; 202 sc->px_curr_state = CPUFREQ_VAL_UNKNOWN; 203 if (acpi_perf_evaluate(dev) != 0) 204 return (ENXIO); 205 cpufreq_register(dev); 206 AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_px_startup, NULL); 207 208 return (0); 209 } 210 211 static int 212 acpi_perf_detach(device_t dev) 213 { 214 /* TODO: teardown registers, remove notify handler. */ 215 return (ENXIO); 216 } 217 218 /* Probe and setup any valid performance states (Px). */ 219 static int 220 acpi_perf_evaluate(device_t dev) 221 { 222 struct acpi_perf_softc *sc; 223 ACPI_BUFFER buf; 224 ACPI_OBJECT *pkg, *res; 225 ACPI_STATUS status; 226 int error, i, j; 227 uint32_t *p; 228 229 /* Get the control values and parameters for each state. */ 230 error = ENXIO; 231 sc = device_get_softc(dev); 232 buf.Pointer = NULL; 233 buf.Length = ACPI_ALLOCATE_BUFFER; 234 status = AcpiEvaluateObject(sc->handle, "_PSS", NULL, &buf); 235 if (ACPI_FAILURE(status)) 236 return (ENXIO); 237 238 pkg = (ACPI_OBJECT *)buf.Pointer; 239 if (!ACPI_PKG_VALID(pkg, 1)) { 240 device_printf(dev, "invalid top level _PSS package\n"); 241 goto out; 242 } 243 sc->px_count = pkg->Package.Count; 244 245 sc->px_states = malloc(sc->px_count * sizeof(struct acpi_px), 246 M_ACPIPERF, M_WAITOK | M_ZERO); 247 if (sc->px_states == NULL) 248 goto out; 249 250 /* 251 * Each state is a package of {CoreFreq, Power, TransitionLatency, 252 * BusMasterLatency, ControlVal, StatusVal}, sorted from highest 253 * performance to lowest. 254 */ 255 for (i = 0; i < sc->px_count; i++) { 256 res = &pkg->Package.Elements[i]; 257 if (!ACPI_PKG_VALID(res, 6)) { 258 device_printf(dev, "invalid _PSS package\n"); 259 continue; 260 } 261 p = &sc->px_states[i].core_freq; 262 for (j = 0; j < 6; j++, p++) 263 acpi_PkgInt32(res, j, p); 264 } 265 AcpiOsFree(buf.Pointer); 266 267 /* Get the control and status registers (one of each). */ 268 buf.Pointer = NULL; 269 buf.Length = ACPI_ALLOCATE_BUFFER; 270 status = AcpiEvaluateObject(sc->handle, "_PCT", NULL, &buf); 271 if (ACPI_FAILURE(status)) 272 goto out; 273 274 /* Check the package of two registers, each a Buffer in GAS format. */ 275 pkg = (ACPI_OBJECT *)buf.Pointer; 276 if (!ACPI_PKG_VALID(pkg, 2)) { 277 device_printf(dev, "invalid perf register package\n"); 278 goto out; 279 } 280 281 error = acpi_PkgGas(sc->dev, pkg, 0, &sc->perf_ctrl_type, &sc->px_rid, 282 &sc->perf_ctrl); 283 if (error) { 284 /* 285 * If the register is of type FFixedHW, we can only return 286 * info, we can't get or set new settings. 287 */ 288 if (error == EOPNOTSUPP) { 289 sc->info_only = TRUE; 290 error = 0; 291 } else 292 device_printf(dev, "failed in PERF_CTL attach\n"); 293 goto out; 294 } 295 sc->px_rid++; 296 297 error = acpi_PkgGas(sc->dev, pkg, 1, &sc->perf_sts_type, &sc->px_rid, 298 &sc->perf_status); 299 if (error) { 300 if (error == EOPNOTSUPP) { 301 sc->info_only = TRUE; 302 error = 0; 303 } else 304 device_printf(dev, "failed in PERF_STATUS attach\n"); 305 goto out; 306 } 307 sc->px_rid++; 308 309 /* Get our current limit and register for notifies. */ 310 acpi_px_available(sc); 311 AcpiInstallNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY, 312 acpi_px_notify, sc); 313 error = 0; 314 315 out: 316 if (error) { 317 if (sc->px_states) 318 free(sc->px_states, M_ACPIPERF); 319 sc->px_count = 0; 320 } 321 if (buf.Pointer) 322 AcpiOsFree(buf.Pointer); 323 return (error); 324 } 325 326 static void 327 acpi_px_startup(void *arg) 328 { 329 330 /* Signal to the platform that we are taking over CPU control. */ 331 if (AcpiGbl_FADT->PstateCnt == 0) 332 return; 333 ACPI_LOCK(acpi); 334 AcpiOsWritePort(AcpiGbl_FADT->SmiCmd, AcpiGbl_FADT->PstateCnt, 8); 335 ACPI_UNLOCK(acpi); 336 } 337 338 static void 339 acpi_px_notify(ACPI_HANDLE h, UINT32 notify, void *context) 340 { 341 struct acpi_perf_softc *sc; 342 343 sc = context; 344 if (notify != ACPI_NOTIFY_PERF_STATES) 345 return; 346 347 acpi_px_available(sc); 348 349 /* TODO: Implement notification when frequency changes. */ 350 } 351 352 /* 353 * Find the highest currently-supported performance state. 354 * This can be called at runtime (e.g., due to a docking event) at 355 * the request of a Notify on the processor object. 356 */ 357 static void 358 acpi_px_available(struct acpi_perf_softc *sc) 359 { 360 ACPI_STATUS status; 361 struct cf_setting set; 362 363 status = acpi_GetInteger(sc->handle, "_PPC", &sc->px_max_avail); 364 365 /* If the old state is too high, set current state to the new max. */ 366 if (ACPI_SUCCESS(status)) { 367 if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN && 368 sc->px_curr_state > sc->px_max_avail) { 369 acpi_px_to_set(sc->dev, 370 &sc->px_states[sc->px_max_avail], &set); 371 acpi_px_set(sc->dev, &set); 372 } 373 } else 374 sc->px_max_avail = 0; 375 } 376 377 static int 378 acpi_px_to_set(device_t dev, struct acpi_px *px, struct cf_setting *set) 379 { 380 381 if (px == NULL || set == NULL) 382 return (EINVAL); 383 384 set->freq = px->core_freq; 385 set->power = px->power; 386 /* XXX Include BM latency too? */ 387 set->lat = px->trans_lat; 388 set->volts = CPUFREQ_VAL_UNKNOWN; 389 set->dev = dev; 390 391 return (0); 392 } 393 394 static int 395 acpi_px_settings(device_t dev, struct cf_setting *sets, int *count, int *type) 396 { 397 struct acpi_perf_softc *sc; 398 int x, y; 399 400 sc = device_get_softc(dev); 401 if (sets == NULL || count == NULL) 402 return (EINVAL); 403 if (*count < sc->px_count - sc->px_max_avail) 404 return (ENOMEM); 405 406 /* Return a list of settings that are currently valid. */ 407 y = 0; 408 for (x = sc->px_max_avail; x < sc->px_count; x++, y++) 409 acpi_px_to_set(dev, &sc->px_states[x], &sets[y]); 410 *count = sc->px_count - sc->px_max_avail; 411 *type = CPUFREQ_TYPE_ABSOLUTE; 412 if (sc->info_only) 413 *type |= CPUFREQ_FLAG_INFO_ONLY; 414 415 return (0); 416 } 417 418 static int 419 acpi_px_set(device_t dev, const struct cf_setting *set) 420 { 421 struct acpi_perf_softc *sc; 422 int i, status, sts_val, tries; 423 424 if (set == NULL) 425 return (EINVAL); 426 sc = device_get_softc(dev); 427 428 /* If we can't set new states, return immediately. */ 429 if (sc->info_only) 430 return (ENXIO); 431 432 /* Look up appropriate state, based on frequency. */ 433 for (i = sc->px_max_avail; i < sc->px_count; i++) { 434 if (CPUFREQ_CMP(set->freq, sc->px_states[i].core_freq)) 435 break; 436 } 437 if (i == sc->px_count) 438 return (EINVAL); 439 440 /* Write the appropriate value to the register. */ 441 PX_SET_REG(sc->perf_ctrl, sc->px_states[i].ctrl_val); 442 443 /* Try for up to 1 ms to verify the desired state was selected. */ 444 sts_val = sc->px_states[i].sts_val; 445 for (tries = 0; tries < 100; tries++) { 446 status = PX_GET_REG(sc->perf_status); 447 if (status == sts_val) 448 break; 449 DELAY(10); 450 } 451 if (tries == 100) { 452 device_printf(dev, "Px transition to %d failed\n", 453 sc->px_states[i].core_freq); 454 return (ENXIO); 455 } 456 sc->px_curr_state = i; 457 458 return (0); 459 } 460 461 static int 462 acpi_px_get(device_t dev, struct cf_setting *set) 463 { 464 struct acpi_perf_softc *sc; 465 uint64_t rate; 466 int i; 467 struct pcpu *pc; 468 469 if (set == NULL) 470 return (EINVAL); 471 sc = device_get_softc(dev); 472 473 /* If we can't get new states, return immediately. */ 474 if (sc->info_only) 475 return (ENXIO); 476 477 /* If we've set the rate before, use the cached value. */ 478 if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN) { 479 acpi_px_to_set(dev, &sc->px_states[sc->px_curr_state], set); 480 return (0); 481 } 482 483 /* Otherwise, estimate and try to match against our settings. */ 484 pc = cpu_get_pcpu(dev); 485 if (pc == NULL) 486 return (ENXIO); 487 cpu_est_clockrate(pc->pc_cpuid, &rate); 488 rate /= 1000000; 489 for (i = 0; i < sc->px_count; i++) { 490 if (CPUFREQ_CMP(sc->px_states[i].core_freq, rate)) { 491 sc->px_curr_state = i; 492 acpi_px_to_set(dev, &sc->px_states[i], set); 493 break; 494 } 495 } 496 497 /* No match, give up. */ 498 if (i == sc->px_count) { 499 sc->px_curr_state = CPUFREQ_VAL_UNKNOWN; 500 set->freq = CPUFREQ_VAL_UNKNOWN; 501 } 502 503 return (0); 504 } 505