1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/cpu_acpi.h> 27 #include <sys/cpu_idle.h> 28 29 /* 30 * List of the processor ACPI object types that are being used. 31 */ 32 typedef enum cpu_acpi_obj { 33 PDC_OBJ = 0, 34 PCT_OBJ, 35 PSS_OBJ, 36 PSD_OBJ, 37 PPC_OBJ, 38 PTC_OBJ, 39 TSS_OBJ, 40 TSD_OBJ, 41 TPC_OBJ, 42 CSD_OBJ, 43 } cpu_acpi_obj_t; 44 45 /* 46 * Container to store object name. 47 * Other attributes can be added in the future as necessary. 48 */ 49 typedef struct cpu_acpi_obj_attr { 50 char *name; 51 } cpu_acpi_obj_attr_t; 52 53 /* 54 * List of object attributes. 55 * NOTE: Please keep the ordering of the list as same as cpu_acpi_obj_t. 56 */ 57 static cpu_acpi_obj_attr_t cpu_acpi_obj_attrs[] = { 58 {"_PDC"}, 59 {"_PCT"}, 60 {"_PSS"}, 61 {"_PSD"}, 62 {"_PPC"}, 63 {"_PTC"}, 64 {"_TSS"}, 65 {"_TSD"}, 66 {"_TPC"}, 67 {"_CSD"} 68 }; 69 70 /* 71 * Cache the ACPI CPU control data objects. 72 */ 73 static int 74 cpu_acpi_cache_ctrl_regs(cpu_acpi_handle_t handle, cpu_acpi_obj_t objtype, 75 cpu_acpi_ctrl_regs_t *regs) 76 { 77 ACPI_BUFFER abuf; 78 ACPI_OBJECT *obj; 79 AML_RESOURCE_GENERIC_REGISTER *greg; 80 int ret = -1; 81 int i; 82 83 /* 84 * Fetch the control registers (if present) for the CPU node. 85 * Since they are optional, non-existence is not a failure 86 * (we just consider it a fixed hardware case). 87 */ 88 abuf.Length = ACPI_ALLOCATE_BUFFER; 89 abuf.Pointer = NULL; 90 if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, 91 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, 92 ACPI_TYPE_PACKAGE))) { 93 regs[0].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE; 94 regs[1].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE; 95 return (1); 96 } 97 98 obj = abuf.Pointer; 99 if (obj->Package.Count != 2) { 100 cmn_err(CE_NOTE, "!cpu_acpi: %s package bad count %d.", 101 cpu_acpi_obj_attrs[objtype].name, obj->Package.Count); 102 goto out; 103 } 104 105 /* 106 * Does the package look coherent? 107 */ 108 for (i = 0; i < obj->Package.Count; i++) { 109 if (obj->Package.Elements[i].Type != ACPI_TYPE_BUFFER) { 110 cmn_err(CE_NOTE, "!cpu_acpi: " 111 "Unexpected data in %s package.", 112 cpu_acpi_obj_attrs[objtype].name); 113 goto out; 114 } 115 116 greg = (AML_RESOURCE_GENERIC_REGISTER *) 117 obj->Package.Elements[i].Buffer.Pointer; 118 if (greg->DescriptorType != 119 ACPI_RESOURCE_NAME_GENERIC_REGISTER) { 120 cmn_err(CE_NOTE, "!cpu_acpi: " 121 "%s package has format error.", 122 cpu_acpi_obj_attrs[objtype].name); 123 goto out; 124 } 125 if (greg->ResourceLength != 126 ACPI_AML_SIZE_LARGE(AML_RESOURCE_GENERIC_REGISTER)) { 127 cmn_err(CE_NOTE, "!cpu_acpi: " 128 "%s package not right size.", 129 cpu_acpi_obj_attrs[objtype].name); 130 goto out; 131 } 132 if (greg->AddressSpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE && 133 greg->AddressSpaceId != ACPI_ADR_SPACE_SYSTEM_IO) { 134 cmn_err(CE_NOTE, "!cpu_apci: %s contains unsupported " 135 "address space type %x", 136 cpu_acpi_obj_attrs[objtype].name, 137 greg->AddressSpaceId); 138 goto out; 139 } 140 } 141 142 /* 143 * Looks good! 144 */ 145 for (i = 0; i < obj->Package.Count; i++) { 146 greg = (AML_RESOURCE_GENERIC_REGISTER *) 147 obj->Package.Elements[i].Buffer.Pointer; 148 regs[i].cr_addrspace_id = greg->AddressSpaceId; 149 regs[i].cr_width = greg->BitWidth; 150 regs[i].cr_offset = greg->BitOffset; 151 regs[i].cr_asize = greg->AccessSize; 152 regs[i].cr_address = greg->Address; 153 } 154 ret = 0; 155 out: 156 AcpiOsFree(abuf.Pointer); 157 return (ret); 158 } 159 160 /* 161 * Cache the ACPI _PCT data. The _PCT data defines the interface to use 162 * when making power level transitions (i.e., system IO ports, fixed 163 * hardware port, etc). 164 */ 165 static int 166 cpu_acpi_cache_pct(cpu_acpi_handle_t handle) 167 { 168 cpu_acpi_pct_t *pct; 169 int ret; 170 171 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PCT_CACHED); 172 pct = &CPU_ACPI_PCT(handle)[0]; 173 if ((ret = cpu_acpi_cache_ctrl_regs(handle, PCT_OBJ, pct)) == 0) 174 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PCT_CACHED); 175 return (ret); 176 } 177 178 /* 179 * Cache the ACPI _PTC data. The _PTC data defines the interface to use 180 * when making T-state transitions (i.e., system IO ports, fixed 181 * hardware port, etc). 182 */ 183 static int 184 cpu_acpi_cache_ptc(cpu_acpi_handle_t handle) 185 { 186 cpu_acpi_ptc_t *ptc; 187 int ret; 188 189 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PTC_CACHED); 190 ptc = &CPU_ACPI_PTC(handle)[0]; 191 if ((ret = cpu_acpi_cache_ctrl_regs(handle, PTC_OBJ, ptc)) == 0) 192 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PTC_CACHED); 193 return (ret); 194 } 195 196 /* 197 * Cache the ACPI CPU state dependency data objects. 198 */ 199 static int 200 cpu_acpi_cache_state_dependencies(cpu_acpi_handle_t handle, 201 cpu_acpi_obj_t objtype, cpu_acpi_state_dependency_t *sd) 202 { 203 ACPI_BUFFER abuf; 204 ACPI_OBJECT *pkg, *elements; 205 int number; 206 int ret = -1; 207 208 if (objtype == CSD_OBJ) { 209 number = 6; 210 } else { 211 number = 5; 212 } 213 /* 214 * Fetch the dependencies (if present) for the CPU node. 215 * Since they are optional, non-existence is not a failure 216 * (it's up to the caller to determine how to handle non-existence). 217 */ 218 abuf.Length = ACPI_ALLOCATE_BUFFER; 219 abuf.Pointer = NULL; 220 if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, 221 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, 222 ACPI_TYPE_PACKAGE))) { 223 return (1); 224 } 225 226 pkg = abuf.Pointer; 227 228 if (((objtype != CSD_OBJ) && (pkg->Package.Count != 1)) || 229 ((objtype == CSD_OBJ) && (pkg->Package.Count != 1) && 230 (pkg->Package.Count != 2))) { 231 cmn_err(CE_NOTE, "!cpu_acpi: %s unsupported package " 232 "count %d.", cpu_acpi_obj_attrs[objtype].name, 233 pkg->Package.Count); 234 goto out; 235 } 236 237 /* 238 * For C-state domain, we assume C2 and C3 have the same 239 * domain information 240 */ 241 if (pkg->Package.Elements[0].Type != ACPI_TYPE_PACKAGE || 242 pkg->Package.Elements[0].Package.Count != number) { 243 cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in %s package.", 244 cpu_acpi_obj_attrs[objtype].name); 245 goto out; 246 } 247 elements = pkg->Package.Elements[0].Package.Elements; 248 if (elements[0].Integer.Value != number || 249 elements[1].Integer.Value != 0) { 250 cmn_err(CE_NOTE, "!cpu_acpi: Unexpected %s revision.", 251 cpu_acpi_obj_attrs[objtype].name); 252 goto out; 253 } 254 255 sd->sd_entries = elements[0].Integer.Value; 256 sd->sd_revision = elements[1].Integer.Value; 257 sd->sd_domain = elements[2].Integer.Value; 258 sd->sd_type = elements[3].Integer.Value; 259 sd->sd_num = elements[4].Integer.Value; 260 if (objtype == CSD_OBJ) { 261 sd->sd_index = elements[5].Integer.Value; 262 } 263 264 ret = 0; 265 out: 266 AcpiOsFree(abuf.Pointer); 267 return (ret); 268 } 269 270 /* 271 * Cache the ACPI _PSD data. The _PSD data defines P-state CPU dependencies 272 * (think CPU domains). 273 */ 274 static int 275 cpu_acpi_cache_psd(cpu_acpi_handle_t handle) 276 { 277 cpu_acpi_psd_t *psd; 278 int ret; 279 280 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSD_CACHED); 281 psd = &CPU_ACPI_PSD(handle); 282 ret = cpu_acpi_cache_state_dependencies(handle, PSD_OBJ, psd); 283 if (ret == 0) 284 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSD_CACHED); 285 return (ret); 286 287 } 288 289 /* 290 * Cache the ACPI _TSD data. The _TSD data defines T-state CPU dependencies 291 * (think CPU domains). 292 */ 293 static int 294 cpu_acpi_cache_tsd(cpu_acpi_handle_t handle) 295 { 296 cpu_acpi_tsd_t *tsd; 297 int ret; 298 299 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSD_CACHED); 300 tsd = &CPU_ACPI_TSD(handle); 301 ret = cpu_acpi_cache_state_dependencies(handle, TSD_OBJ, tsd); 302 if (ret == 0) 303 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSD_CACHED); 304 return (ret); 305 306 } 307 308 /* 309 * Cache the ACPI _CSD data. The _CSD data defines C-state CPU dependencies 310 * (think CPU domains). 311 */ 312 static int 313 cpu_acpi_cache_csd(cpu_acpi_handle_t handle) 314 { 315 cpu_acpi_csd_t *csd; 316 int ret; 317 318 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_CSD_CACHED); 319 csd = &CPU_ACPI_CSD(handle); 320 ret = cpu_acpi_cache_state_dependencies(handle, CSD_OBJ, csd); 321 if (ret == 0) 322 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_CSD_CACHED); 323 return (ret); 324 325 } 326 327 static void 328 cpu_acpi_cache_pstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt) 329 { 330 cpu_acpi_pstate_t *pstate; 331 ACPI_OBJECT *q, *l; 332 int i, j; 333 334 CPU_ACPI_PSTATES_COUNT(handle) = cnt; 335 CPU_ACPI_PSTATES(handle) = kmem_zalloc(CPU_ACPI_PSTATES_SIZE(cnt), 336 KM_SLEEP); 337 pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 338 for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) { 339 uint32_t *up; 340 341 q = obj->Package.Elements[i].Package.Elements; 342 343 /* 344 * Skip duplicate entries. 345 */ 346 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 347 continue; 348 349 up = (uint32_t *)pstate; 350 for (j = 0; j < CPU_ACPI_PSS_CNT; j++) 351 up[j] = q[j].Integer.Value; 352 pstate++; 353 cnt--; 354 } 355 } 356 357 static void 358 cpu_acpi_cache_tstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt) 359 { 360 cpu_acpi_tstate_t *tstate; 361 ACPI_OBJECT *q, *l; 362 int i, j; 363 364 CPU_ACPI_TSTATES_COUNT(handle) = cnt; 365 CPU_ACPI_TSTATES(handle) = kmem_zalloc(CPU_ACPI_TSTATES_SIZE(cnt), 366 KM_SLEEP); 367 tstate = (cpu_acpi_tstate_t *)CPU_ACPI_TSTATES(handle); 368 for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) { 369 uint32_t *up; 370 371 q = obj->Package.Elements[i].Package.Elements; 372 373 /* 374 * Skip duplicate entries. 375 */ 376 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 377 continue; 378 379 up = (uint32_t *)tstate; 380 for (j = 0; j < CPU_ACPI_TSS_CNT; j++) 381 up[j] = q[j].Integer.Value; 382 tstate++; 383 cnt--; 384 } 385 } 386 387 /* 388 * Cache the _PSS or _TSS data. 389 */ 390 static int 391 cpu_acpi_cache_supported_states(cpu_acpi_handle_t handle, 392 cpu_acpi_obj_t objtype, int fcnt) 393 { 394 ACPI_BUFFER abuf; 395 ACPI_OBJECT *obj, *q, *l; 396 boolean_t eot = B_FALSE; 397 int ret = -1; 398 int cnt; 399 int i, j; 400 401 /* 402 * Fetch the data (if present) for the CPU node. 403 */ 404 abuf.Length = ACPI_ALLOCATE_BUFFER; 405 abuf.Pointer = NULL; 406 if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, 407 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, 408 ACPI_TYPE_PACKAGE))) { 409 cmn_err(CE_NOTE, "!cpu_acpi: %s package not found.", 410 cpu_acpi_obj_attrs[objtype].name); 411 return (1); 412 } 413 obj = abuf.Pointer; 414 if (obj->Package.Count < 2) { 415 cmn_err(CE_NOTE, "!cpu_acpi: %s package bad count %d.", 416 cpu_acpi_obj_attrs[objtype].name, obj->Package.Count); 417 goto out; 418 } 419 420 /* 421 * Does the package look coherent? 422 */ 423 cnt = 0; 424 for (i = 0, l = NULL; i < obj->Package.Count; i++, l = q) { 425 if (obj->Package.Elements[i].Type != ACPI_TYPE_PACKAGE || 426 obj->Package.Elements[i].Package.Count != fcnt) { 427 cmn_err(CE_NOTE, "!cpu_acpi: " 428 "Unexpected data in %s package.", 429 cpu_acpi_obj_attrs[objtype].name); 430 goto out; 431 } 432 433 q = obj->Package.Elements[i].Package.Elements; 434 for (j = 0; j < fcnt; j++) { 435 if (q[j].Type != ACPI_TYPE_INTEGER) { 436 cmn_err(CE_NOTE, "!cpu_acpi: " 437 "%s element invalid (type)", 438 cpu_acpi_obj_attrs[objtype].name); 439 goto out; 440 } 441 } 442 443 /* 444 * Ignore duplicate entries. 445 */ 446 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 447 continue; 448 449 /* 450 * Some supported state tables are larger than required 451 * and unused elements are filled with patterns 452 * of 0xff. Simply check here for frequency = 0xffff 453 * and stop counting if found. 454 */ 455 if (q[0].Integer.Value == 0xffff) { 456 eot = B_TRUE; 457 continue; 458 } 459 460 /* 461 * We should never find a valid entry after we've hit 462 * an the end-of-table entry. 463 */ 464 if (eot) { 465 cmn_err(CE_NOTE, "!cpu_acpi: " 466 "Unexpected data in %s package after eot.", 467 cpu_acpi_obj_attrs[objtype].name); 468 goto out; 469 } 470 471 /* 472 * states must be defined in order from highest to lowest. 473 */ 474 if (l != NULL && l[0].Integer.Value < q[0].Integer.Value) { 475 cmn_err(CE_NOTE, "!cpu_acpi: " 476 "%s package state definitions out of order.", 477 cpu_acpi_obj_attrs[objtype].name); 478 goto out; 479 } 480 481 /* 482 * This entry passes. 483 */ 484 cnt++; 485 } 486 if (cnt == 0) 487 goto out; 488 489 /* 490 * Yes, fill in the structure. 491 */ 492 ASSERT(objtype == PSS_OBJ || objtype == TSS_OBJ); 493 (objtype == PSS_OBJ) ? cpu_acpi_cache_pstate(handle, obj, cnt) : 494 cpu_acpi_cache_tstate(handle, obj, cnt); 495 496 ret = 0; 497 out: 498 AcpiOsFree(abuf.Pointer); 499 return (ret); 500 } 501 502 /* 503 * Cache the _PSS data. The _PSS data defines the different power levels 504 * supported by the CPU and the attributes associated with each power level 505 * (i.e., frequency, voltage, etc.). The power levels are number from 506 * highest to lowest. That is, the highest power level is _PSS entry 0 507 * and the lowest power level is the last _PSS entry. 508 */ 509 static int 510 cpu_acpi_cache_pstates(cpu_acpi_handle_t handle) 511 { 512 int ret; 513 514 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSS_CACHED); 515 ret = cpu_acpi_cache_supported_states(handle, PSS_OBJ, 516 CPU_ACPI_PSS_CNT); 517 if (ret == 0) 518 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSS_CACHED); 519 return (ret); 520 } 521 522 /* 523 * Cache the _TSS data. The _TSS data defines the different freq throttle 524 * levels supported by the CPU and the attributes associated with each 525 * throttle level (i.e., frequency throttle percentage, voltage, etc.). 526 * The throttle levels are number from highest to lowest. 527 */ 528 static int 529 cpu_acpi_cache_tstates(cpu_acpi_handle_t handle) 530 { 531 int ret; 532 533 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSS_CACHED); 534 ret = cpu_acpi_cache_supported_states(handle, TSS_OBJ, 535 CPU_ACPI_TSS_CNT); 536 if (ret == 0) 537 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSS_CACHED); 538 return (ret); 539 } 540 541 /* 542 * Cache the ACPI CPU present capabilities data objects. 543 */ 544 static int 545 cpu_acpi_cache_present_capabilities(cpu_acpi_handle_t handle, 546 cpu_acpi_obj_t objtype, cpu_acpi_present_capabilities_t *pc) 547 548 { 549 ACPI_BUFFER abuf; 550 ACPI_OBJECT *obj; 551 552 /* 553 * Fetch the present capabilites object (if present) for the CPU node. 554 * Since they are optional, non-existence is not a failure. 555 */ 556 abuf.Length = ACPI_ALLOCATE_BUFFER; 557 abuf.Pointer = NULL; 558 if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, 559 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf)) || 560 abuf.Length == 0) { 561 *pc = 0; 562 return (1); 563 } 564 565 obj = (ACPI_OBJECT *)abuf.Pointer; 566 *pc = obj->Integer.Value; 567 AcpiOsFree(abuf.Pointer); 568 return (0); 569 } 570 571 /* 572 * Cache the _PPC data. The _PPC simply contains an integer value which 573 * represents the highest power level that a CPU should transition to. 574 * That is, it's an index into the array of _PSS entries and will be 575 * greater than or equal to zero. 576 */ 577 void 578 cpu_acpi_cache_ppc(cpu_acpi_handle_t handle) 579 { 580 cpu_acpi_ppc_t *ppc; 581 int ret; 582 583 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PPC_CACHED); 584 ppc = &CPU_ACPI_PPC(handle); 585 ret = cpu_acpi_cache_present_capabilities(handle, PPC_OBJ, ppc); 586 if (ret == 0) 587 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PPC_CACHED); 588 } 589 590 /* 591 * Cache the _TPC data. The _TPC simply contains an integer value which 592 * represents the throttle level that a CPU should transition to. 593 * That is, it's an index into the array of _TSS entries and will be 594 * greater than or equal to zero. 595 */ 596 void 597 cpu_acpi_cache_tpc(cpu_acpi_handle_t handle) 598 { 599 cpu_acpi_tpc_t *tpc; 600 int ret; 601 602 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TPC_CACHED); 603 tpc = &CPU_ACPI_TPC(handle); 604 ret = cpu_acpi_cache_present_capabilities(handle, TPC_OBJ, tpc); 605 if (ret == 0) 606 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TPC_CACHED); 607 } 608 609 int 610 cpu_acpi_verify_cstate(cpu_acpi_cstate_t *cstate) 611 { 612 uint32_t addrspaceid = cstate->cs_addrspace_id; 613 614 if ((addrspaceid != ACPI_ADR_SPACE_FIXED_HARDWARE) && 615 (addrspaceid != ACPI_ADR_SPACE_SYSTEM_IO)) { 616 cmn_err(CE_WARN, "!_CST: unsupported address space id" 617 ":C%d, type: %d\n", cstate->cs_type, addrspaceid); 618 return (1); 619 } 620 return (0); 621 } 622 623 int 624 cpu_acpi_cache_cst(cpu_acpi_handle_t handle) 625 { 626 ACPI_BUFFER abuf; 627 ACPI_OBJECT *obj; 628 ACPI_INTEGER cnt; 629 cpu_acpi_cstate_t *cstate, *p; 630 int i, count; 631 632 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_CST_CACHED); 633 634 abuf.Length = ACPI_ALLOCATE_BUFFER; 635 abuf.Pointer = NULL; 636 637 if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, "_CST", 638 NULL, &abuf))) { 639 cmn_err(CE_NOTE, "!cpu_acpi: _CST evaluate failure"); 640 return (-1); 641 } 642 obj = (ACPI_OBJECT *)abuf.Pointer; 643 if (obj->Package.Count < 2) { 644 cmn_err(CE_NOTE, "!cpu_acpi: _CST package bad count %d.", 645 obj->Package.Count); 646 AcpiOsFree(abuf.Pointer); 647 return (-1); 648 } 649 650 /* 651 * Does the package look coherent? 652 */ 653 cnt = obj->Package.Elements[0].Integer.Value; 654 if (cnt < 1 || cnt != obj->Package.Count - 1) { 655 cmn_err(CE_NOTE, "!cpu_acpi: _CST invalid element count %d != " 656 "Package count %d\n", 657 (int)cnt, (int)obj->Package.Count - 1); 658 AcpiOsFree(abuf.Pointer); 659 return (-1); 660 } 661 662 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)cnt; 663 CPU_ACPI_CSTATES(handle) = kmem_zalloc(CPU_ACPI_CSTATES_SIZE(cnt), 664 KM_SLEEP); 665 CPU_ACPI_BM_INFO(handle) = 0; 666 cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 667 p = cstate; 668 669 for (i = 1, count = 1; i <= cnt; i++) { 670 ACPI_OBJECT *pkg; 671 AML_RESOURCE_GENERIC_REGISTER *reg; 672 ACPI_OBJECT *element; 673 674 pkg = &(obj->Package.Elements[i]); 675 reg = (AML_RESOURCE_GENERIC_REGISTER *) 676 pkg->Package.Elements[0].Buffer.Pointer; 677 cstate->cs_addrspace_id = reg->AddressSpaceId; 678 cstate->cs_address = reg->Address; 679 element = &(pkg->Package.Elements[1]); 680 cstate->cs_type = element->Integer.Value; 681 element = &(pkg->Package.Elements[2]); 682 cstate->cs_latency = element->Integer.Value; 683 element = &(pkg->Package.Elements[3]); 684 cstate->cs_power = element->Integer.Value; 685 686 if (cpu_acpi_verify_cstate(cstate)) { 687 /* 688 * ignore this entry if it's not valid 689 */ 690 continue; 691 } 692 if (cstate == p) { 693 cstate++; 694 } else if (p->cs_type == cstate->cs_type) { 695 /* 696 * if there are duplicate entries, we keep the 697 * last one. This fixes: 698 * 1) some buggy BIOS have total duplicate entries. 699 * 2) ACPI Spec allows the same cstate entry with 700 * different power and latency, we use the one 701 * with more power saving. 702 */ 703 (void) memcpy(p, cstate, sizeof (cpu_acpi_cstate_t)); 704 } else { 705 /* 706 * we got a valid entry, cache it to the 707 * cstate structure 708 */ 709 p = cstate++; 710 count++; 711 } 712 } 713 714 if (count < 2) { 715 cmn_err(CE_NOTE, "!cpu_acpi: _CST invalid count %d < 2", 716 count); 717 AcpiOsFree(abuf.Pointer); 718 return (-1); 719 } 720 cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 721 if (cstate[0].cs_type != CPU_ACPI_C1) { 722 cmn_err(CE_NOTE, "!cpu_acpi: _CST first element type not C1: " 723 "%d", (int)cstate->cs_type); 724 AcpiOsFree(abuf.Pointer); 725 return (-1); 726 } 727 728 if (count != cnt) 729 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)count; 730 731 AcpiOsFree(abuf.Pointer); 732 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_CST_CACHED); 733 return (0); 734 } 735 736 /* 737 * Cache the _PCT, _PSS, _PSD and _PPC data. 738 */ 739 int 740 cpu_acpi_cache_pstate_data(cpu_acpi_handle_t handle) 741 { 742 if (cpu_acpi_cache_pct(handle) < 0) { 743 cmn_err(CE_WARN, "!cpu_acpi: error parsing _PCT for " 744 "CPU %d", handle->cs_id); 745 return (-1); 746 } 747 748 if (cpu_acpi_cache_pstates(handle) != 0) { 749 cmn_err(CE_WARN, "!cpu_acpi: error parsing _PSS for " 750 "CPU %d", handle->cs_id); 751 return (-1); 752 } 753 754 if (cpu_acpi_cache_psd(handle) < 0) { 755 cmn_err(CE_WARN, "!cpu_acpi: error parsing _PSD for " 756 "CPU %d", handle->cs_id); 757 return (-1); 758 } 759 760 cpu_acpi_cache_ppc(handle); 761 762 return (0); 763 } 764 765 void 766 cpu_acpi_free_pstate_data(cpu_acpi_handle_t handle) 767 { 768 if (handle != NULL) { 769 if (CPU_ACPI_PSTATES(handle)) { 770 kmem_free(CPU_ACPI_PSTATES(handle), 771 CPU_ACPI_PSTATES_SIZE( 772 CPU_ACPI_PSTATES_COUNT(handle))); 773 CPU_ACPI_PSTATES(handle) = NULL; 774 } 775 } 776 } 777 778 /* 779 * Cache the _PTC, _TSS, _TSD and _TPC data. 780 */ 781 int 782 cpu_acpi_cache_tstate_data(cpu_acpi_handle_t handle) 783 { 784 if (cpu_acpi_cache_ptc(handle) < 0) { 785 cmn_err(CE_WARN, "!cpu_acpi: error parsing _PTC for " 786 "CPU %d", handle->cs_id); 787 return (-1); 788 } 789 790 if (cpu_acpi_cache_tstates(handle) != 0) { 791 cmn_err(CE_WARN, "!cpu_acpi: error parsing _TSS for " 792 "CPU %d", handle->cs_id); 793 return (-1); 794 } 795 796 if (cpu_acpi_cache_tsd(handle) < 0) { 797 cmn_err(CE_WARN, "!cpu_acpi: error parsing _TSD for " 798 "CPU %d", handle->cs_id); 799 return (-1); 800 } 801 802 cpu_acpi_cache_tpc(handle); 803 804 return (0); 805 } 806 807 void 808 cpu_acpi_free_tstate_data(cpu_acpi_handle_t handle) 809 { 810 if (handle != NULL) { 811 if (CPU_ACPI_TSTATES(handle)) { 812 kmem_free(CPU_ACPI_TSTATES(handle), 813 CPU_ACPI_TSTATES_SIZE( 814 CPU_ACPI_TSTATES_COUNT(handle))); 815 CPU_ACPI_TSTATES(handle) = NULL; 816 } 817 } 818 } 819 820 /* 821 * Cache the _CST data. 822 */ 823 int 824 cpu_acpi_cache_cstate_data(cpu_acpi_handle_t handle) 825 { 826 if (cpu_acpi_cache_cst(handle) < 0) { 827 cmn_err(CE_WARN, "!cpu_acpi: error parsing _CST for " 828 "CPU %d", handle->cs_id); 829 return (-1); 830 } 831 832 if (cpu_acpi_cache_csd(handle) < 0) { 833 cmn_err(CE_WARN, "!cpu_acpi: error parsing _CSD for " 834 "CPU %d", handle->cs_id); 835 return (-1); 836 } 837 838 return (0); 839 } 840 841 void 842 cpu_acpi_free_cstate_data(cpu_acpi_handle_t handle) 843 { 844 if (handle != NULL) { 845 if (CPU_ACPI_CSTATES(handle)) { 846 kmem_free(CPU_ACPI_CSTATES(handle), 847 CPU_ACPI_CSTATES_SIZE( 848 CPU_ACPI_CSTATES_COUNT(handle))); 849 CPU_ACPI_CSTATES(handle) = NULL; 850 } 851 } 852 } 853 854 /* 855 * Register a handler for processor change notifications. 856 */ 857 void 858 cpu_acpi_install_notify_handler(cpu_acpi_handle_t handle, 859 ACPI_NOTIFY_HANDLER handler, void *ctx) 860 { 861 if (ACPI_FAILURE(AcpiInstallNotifyHandler(handle->cs_handle, 862 ACPI_DEVICE_NOTIFY, handler, ctx))) 863 cmn_err(CE_NOTE, "!cpu_acpi: Unable to register " 864 "notify handler for CPU"); 865 } 866 867 /* 868 * Remove a handler for processor change notifications. 869 */ 870 void 871 cpu_acpi_remove_notify_handler(cpu_acpi_handle_t handle, 872 ACPI_NOTIFY_HANDLER handler) 873 { 874 if (ACPI_FAILURE(AcpiRemoveNotifyHandler(handle->cs_handle, 875 ACPI_DEVICE_NOTIFY, handler))) 876 cmn_err(CE_NOTE, "!cpu_acpi: Unable to remove " 877 "notify handler for CPU"); 878 } 879 880 /* 881 * Write _PDC. 882 */ 883 int 884 cpu_acpi_write_pdc(cpu_acpi_handle_t handle, uint32_t revision, uint32_t count, 885 uint32_t *capabilities) 886 { 887 ACPI_OBJECT obj; 888 ACPI_OBJECT_LIST list = { 1, &obj}; 889 uint32_t *buffer; 890 uint32_t *bufptr; 891 uint32_t bufsize; 892 int i; 893 894 bufsize = (count + 2) * sizeof (uint32_t); 895 buffer = kmem_zalloc(bufsize, KM_SLEEP); 896 buffer[0] = revision; 897 buffer[1] = count; 898 bufptr = &buffer[2]; 899 for (i = 0; i < count; i++) 900 *bufptr++ = *capabilities++; 901 902 obj.Type = ACPI_TYPE_BUFFER; 903 obj.Buffer.Length = bufsize; 904 obj.Buffer.Pointer = (void *)buffer; 905 906 /* 907 * _PDC is optional, so don't log failure. 908 */ 909 if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, "_PDC", 910 &list, NULL))) { 911 kmem_free(buffer, bufsize); 912 return (-1); 913 } 914 915 kmem_free(buffer, bufsize); 916 return (0); 917 } 918 919 /* 920 * Write to system IO port. 921 */ 922 int 923 cpu_acpi_write_port(ACPI_IO_ADDRESS address, uint32_t value, uint32_t width) 924 { 925 if (ACPI_FAILURE(AcpiOsWritePort(address, value, width))) { 926 cmn_err(CE_NOTE, "cpu_acpi: error writing system IO port " 927 "%lx.", (long)address); 928 return (-1); 929 } 930 return (0); 931 } 932 933 /* 934 * Read from a system IO port. 935 */ 936 int 937 cpu_acpi_read_port(ACPI_IO_ADDRESS address, uint32_t *value, uint32_t width) 938 { 939 if (ACPI_FAILURE(AcpiOsReadPort(address, value, width))) { 940 cmn_err(CE_NOTE, "cpu_acpi: error reading system IO port " 941 "%lx.", (long)address); 942 return (-1); 943 } 944 return (0); 945 } 946 947 /* 948 * Return supported frequencies. 949 */ 950 uint_t 951 cpu_acpi_get_speeds(cpu_acpi_handle_t handle, int **speeds) 952 { 953 cpu_acpi_pstate_t *pstate; 954 int *hspeeds; 955 uint_t nspeeds; 956 int i; 957 958 nspeeds = CPU_ACPI_PSTATES_COUNT(handle); 959 pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 960 hspeeds = kmem_zalloc(nspeeds * sizeof (int), KM_SLEEP); 961 for (i = 0; i < nspeeds; i++) { 962 hspeeds[i] = CPU_ACPI_FREQ(pstate); 963 pstate++; 964 } 965 *speeds = hspeeds; 966 return (nspeeds); 967 } 968 969 /* 970 * Free resources allocated by cpu_acpi_get_speeds(). 971 */ 972 void 973 cpu_acpi_free_speeds(int *speeds, uint_t nspeeds) 974 { 975 kmem_free(speeds, nspeeds * sizeof (int)); 976 } 977 978 uint_t 979 cpu_acpi_get_max_cstates(cpu_acpi_handle_t handle) 980 { 981 if (CPU_ACPI_CSTATES(handle)) 982 return (CPU_ACPI_CSTATES_COUNT(handle)); 983 else 984 return (1); 985 } 986 987 void 988 cpu_acpi_set_register(uint32_t bitreg, uint32_t value) 989 { 990 AcpiSetRegister(bitreg, value); 991 } 992 993 void 994 cpu_acpi_get_register(uint32_t bitreg, uint32_t *value) 995 { 996 AcpiGetRegister(bitreg, value); 997 } 998 999 /* 1000 * Map the dip to an ACPI handle for the device. 1001 */ 1002 cpu_acpi_handle_t 1003 cpu_acpi_init(cpu_t *cp) 1004 { 1005 cpu_acpi_handle_t handle; 1006 1007 handle = kmem_zalloc(sizeof (cpu_acpi_state_t), KM_SLEEP); 1008 1009 if (ACPI_FAILURE(acpica_get_handle_cpu(cp->cpu_id, 1010 &handle->cs_handle))) { 1011 kmem_free(handle, sizeof (cpu_acpi_state_t)); 1012 return (NULL); 1013 } 1014 handle->cs_id = cp->cpu_id; 1015 return (handle); 1016 } 1017 1018 /* 1019 * Free any resources. 1020 */ 1021 void 1022 cpu_acpi_fini(cpu_acpi_handle_t handle) 1023 { 1024 if (handle) 1025 kmem_free(handle, sizeof (cpu_acpi_state_t)); 1026 } 1027