1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/cpu_acpi.h> 27 #include <sys/cpu_idle.h> 28 #include <sys/dtrace.h> 29 #include <sys/sdt.h> 30 31 /* 32 * List of the processor ACPI object types that are being used. 33 */ 34 typedef enum cpu_acpi_obj { 35 PDC_OBJ = 0, 36 PCT_OBJ, 37 PSS_OBJ, 38 PSD_OBJ, 39 PPC_OBJ, 40 PTC_OBJ, 41 TSS_OBJ, 42 TSD_OBJ, 43 TPC_OBJ, 44 CSD_OBJ, 45 } cpu_acpi_obj_t; 46 47 /* 48 * Container to store object name. 49 * Other attributes can be added in the future as necessary. 50 */ 51 typedef struct cpu_acpi_obj_attr { 52 char *name; 53 } cpu_acpi_obj_attr_t; 54 55 /* 56 * List of object attributes. 57 * NOTE: Please keep the ordering of the list as same as cpu_acpi_obj_t. 58 */ 59 static cpu_acpi_obj_attr_t cpu_acpi_obj_attrs[] = { 60 {"_PDC"}, 61 {"_PCT"}, 62 {"_PSS"}, 63 {"_PSD"}, 64 {"_PPC"}, 65 {"_PTC"}, 66 {"_TSS"}, 67 {"_TSD"}, 68 {"_TPC"}, 69 {"_CSD"} 70 }; 71 72 /* 73 * To avoid user confusion about ACPI T-State related error log messages, 74 * most of the T-State related error messages will be activated through 75 * DTrace 76 */ 77 #define ERR_MSG_SIZE 128 78 static char err_msg[ERR_MSG_SIZE]; 79 80 #define PRINT_ERR_MSG(err_lvl, msg, obj_type) { \ 81 switch (obj_type) {\ 82 case (PTC_OBJ): \ 83 case (TSS_OBJ): \ 84 case (TSD_OBJ): \ 85 case (TPC_OBJ): \ 86 DTRACE_PROBE1(cpu_ts_err_msg, char *, msg); \ 87 break; \ 88 default: \ 89 cmn_err(err_lvl, "!%s", msg); \ 90 break; \ 91 } \ 92 } 93 94 95 /* 96 * Cache the ACPI CPU control data objects. 97 */ 98 static int 99 cpu_acpi_cache_ctrl_regs(cpu_acpi_handle_t handle, cpu_acpi_obj_t objtype, 100 cpu_acpi_ctrl_regs_t *regs) 101 { 102 ACPI_BUFFER abuf; 103 ACPI_OBJECT *obj; 104 AML_RESOURCE_GENERIC_REGISTER *greg; 105 int ret = -1; 106 int i; 107 int p_res; 108 109 /* 110 * Fetch the control registers (if present) for the CPU node. 111 * Since they are optional, non-existence is not a failure 112 * (we just consider it a fixed hardware case). 113 */ 114 abuf.Length = ACPI_ALLOCATE_BUFFER; 115 abuf.Pointer = NULL; 116 if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, 117 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, 118 ACPI_TYPE_PACKAGE))) { 119 regs[0].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE; 120 regs[1].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE; 121 return (1); 122 } 123 124 obj = abuf.Pointer; 125 if (obj->Package.Count != 2) { 126 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: %s package" 127 " bad count %d.", cpu_acpi_obj_attrs[objtype].name, 128 obj->Package.Count); 129 if (p_res >= 0) 130 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 131 132 goto out; 133 } 134 135 /* 136 * Does the package look coherent? 137 */ 138 for (i = 0; i < obj->Package.Count; i++) { 139 if (obj->Package.Elements[i].Type != ACPI_TYPE_BUFFER) { 140 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: " 141 "Unexpected data in %s package.", 142 cpu_acpi_obj_attrs[objtype].name); 143 if (p_res >= 0) 144 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 145 goto out; 146 } 147 148 greg = (AML_RESOURCE_GENERIC_REGISTER *) 149 obj->Package.Elements[i].Buffer.Pointer; 150 if (greg->DescriptorType != 151 ACPI_RESOURCE_NAME_GENERIC_REGISTER) { 152 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: " 153 "%s package has format error.", 154 cpu_acpi_obj_attrs[objtype].name); 155 if (p_res >= 0) 156 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 157 goto out; 158 } 159 if (greg->ResourceLength != 160 ACPI_AML_SIZE_LARGE(AML_RESOURCE_GENERIC_REGISTER)) { 161 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: " 162 "%s package not right size.", 163 cpu_acpi_obj_attrs[objtype].name); 164 if (p_res >= 0) 165 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 166 goto out; 167 } 168 if (greg->AddressSpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE && 169 greg->AddressSpaceId != ACPI_ADR_SPACE_SYSTEM_IO) { 170 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_apci: " 171 "%s contains unsupported address space type %x", 172 cpu_acpi_obj_attrs[objtype].name, 173 greg->AddressSpaceId); 174 if (p_res >= 0) 175 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 176 goto out; 177 } 178 } 179 180 /* 181 * Looks good! 182 */ 183 for (i = 0; i < obj->Package.Count; i++) { 184 greg = (AML_RESOURCE_GENERIC_REGISTER *) 185 obj->Package.Elements[i].Buffer.Pointer; 186 regs[i].cr_addrspace_id = greg->AddressSpaceId; 187 regs[i].cr_width = greg->BitWidth; 188 regs[i].cr_offset = greg->BitOffset; 189 regs[i].cr_asize = greg->AccessSize; 190 regs[i].cr_address = greg->Address; 191 } 192 ret = 0; 193 out: 194 AcpiOsFree(abuf.Pointer); 195 return (ret); 196 } 197 198 /* 199 * Cache the ACPI _PCT data. The _PCT data defines the interface to use 200 * when making power level transitions (i.e., system IO ports, fixed 201 * hardware port, etc). 202 */ 203 static int 204 cpu_acpi_cache_pct(cpu_acpi_handle_t handle) 205 { 206 cpu_acpi_pct_t *pct; 207 int ret; 208 209 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PCT_CACHED); 210 pct = &CPU_ACPI_PCT(handle)[0]; 211 if ((ret = cpu_acpi_cache_ctrl_regs(handle, PCT_OBJ, pct)) == 0) 212 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PCT_CACHED); 213 return (ret); 214 } 215 216 /* 217 * Cache the ACPI _PTC data. The _PTC data defines the interface to use 218 * when making T-state transitions (i.e., system IO ports, fixed 219 * hardware port, etc). 220 */ 221 static int 222 cpu_acpi_cache_ptc(cpu_acpi_handle_t handle) 223 { 224 cpu_acpi_ptc_t *ptc; 225 int ret; 226 227 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PTC_CACHED); 228 ptc = &CPU_ACPI_PTC(handle)[0]; 229 if ((ret = cpu_acpi_cache_ctrl_regs(handle, PTC_OBJ, ptc)) == 0) 230 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PTC_CACHED); 231 return (ret); 232 } 233 234 /* 235 * Cache the ACPI CPU state dependency data objects. 236 */ 237 static int 238 cpu_acpi_cache_state_dependencies(cpu_acpi_handle_t handle, 239 cpu_acpi_obj_t objtype, cpu_acpi_state_dependency_t *sd) 240 { 241 ACPI_BUFFER abuf; 242 ACPI_OBJECT *pkg, *elements; 243 int number; 244 int ret = -1; 245 int p_res; 246 247 if (objtype == CSD_OBJ) { 248 number = 6; 249 } else { 250 number = 5; 251 } 252 /* 253 * Fetch the dependencies (if present) for the CPU node. 254 * Since they are optional, non-existence is not a failure 255 * (it's up to the caller to determine how to handle non-existence). 256 */ 257 abuf.Length = ACPI_ALLOCATE_BUFFER; 258 abuf.Pointer = NULL; 259 if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, 260 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, 261 ACPI_TYPE_PACKAGE))) { 262 return (1); 263 } 264 265 pkg = abuf.Pointer; 266 267 if (((objtype != CSD_OBJ) && (pkg->Package.Count != 1)) || 268 ((objtype == CSD_OBJ) && (pkg->Package.Count != 1) && 269 (pkg->Package.Count != 2))) { 270 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: %s " 271 "unsupported package count %d.", 272 cpu_acpi_obj_attrs[objtype].name, pkg->Package.Count); 273 if (p_res >= 0) 274 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 275 goto out; 276 } 277 278 /* 279 * For C-state domain, we assume C2 and C3 have the same 280 * domain information 281 */ 282 if (pkg->Package.Elements[0].Type != ACPI_TYPE_PACKAGE || 283 pkg->Package.Elements[0].Package.Count != number) { 284 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: " 285 "Unexpected data in %s package.", 286 cpu_acpi_obj_attrs[objtype].name); 287 if (p_res >= 0) 288 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 289 goto out; 290 } 291 elements = pkg->Package.Elements[0].Package.Elements; 292 if (elements[0].Integer.Value != number || 293 elements[1].Integer.Value != 0) { 294 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: Unexpected" 295 " %s revision.", cpu_acpi_obj_attrs[objtype].name); 296 if (p_res >= 0) 297 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 298 goto out; 299 } 300 301 sd->sd_entries = elements[0].Integer.Value; 302 sd->sd_revision = elements[1].Integer.Value; 303 sd->sd_domain = elements[2].Integer.Value; 304 sd->sd_type = elements[3].Integer.Value; 305 sd->sd_num = elements[4].Integer.Value; 306 if (objtype == CSD_OBJ) { 307 sd->sd_index = elements[5].Integer.Value; 308 } 309 310 ret = 0; 311 out: 312 AcpiOsFree(abuf.Pointer); 313 return (ret); 314 } 315 316 /* 317 * Cache the ACPI _PSD data. The _PSD data defines P-state CPU dependencies 318 * (think CPU domains). 319 */ 320 static int 321 cpu_acpi_cache_psd(cpu_acpi_handle_t handle) 322 { 323 cpu_acpi_psd_t *psd; 324 int ret; 325 326 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSD_CACHED); 327 psd = &CPU_ACPI_PSD(handle); 328 ret = cpu_acpi_cache_state_dependencies(handle, PSD_OBJ, psd); 329 if (ret == 0) 330 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSD_CACHED); 331 return (ret); 332 333 } 334 335 /* 336 * Cache the ACPI _TSD data. The _TSD data defines T-state CPU dependencies 337 * (think CPU domains). 338 */ 339 static int 340 cpu_acpi_cache_tsd(cpu_acpi_handle_t handle) 341 { 342 cpu_acpi_tsd_t *tsd; 343 int ret; 344 345 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSD_CACHED); 346 tsd = &CPU_ACPI_TSD(handle); 347 ret = cpu_acpi_cache_state_dependencies(handle, TSD_OBJ, tsd); 348 if (ret == 0) 349 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSD_CACHED); 350 return (ret); 351 352 } 353 354 /* 355 * Cache the ACPI _CSD data. The _CSD data defines C-state CPU dependencies 356 * (think CPU domains). 357 */ 358 static int 359 cpu_acpi_cache_csd(cpu_acpi_handle_t handle) 360 { 361 cpu_acpi_csd_t *csd; 362 int ret; 363 364 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_CSD_CACHED); 365 csd = &CPU_ACPI_CSD(handle); 366 ret = cpu_acpi_cache_state_dependencies(handle, CSD_OBJ, csd); 367 if (ret == 0) 368 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_CSD_CACHED); 369 return (ret); 370 371 } 372 373 static void 374 cpu_acpi_cache_pstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt) 375 { 376 cpu_acpi_pstate_t *pstate; 377 ACPI_OBJECT *q, *l; 378 int i, j; 379 380 CPU_ACPI_PSTATES_COUNT(handle) = cnt; 381 CPU_ACPI_PSTATES(handle) = kmem_zalloc(CPU_ACPI_PSTATES_SIZE(cnt), 382 KM_SLEEP); 383 pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 384 for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) { 385 uint32_t *up; 386 387 q = obj->Package.Elements[i].Package.Elements; 388 389 /* 390 * Skip duplicate entries. 391 */ 392 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 393 continue; 394 395 up = (uint32_t *)pstate; 396 for (j = 0; j < CPU_ACPI_PSS_CNT; j++) 397 up[j] = q[j].Integer.Value; 398 pstate++; 399 cnt--; 400 } 401 } 402 403 static void 404 cpu_acpi_cache_tstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt) 405 { 406 cpu_acpi_tstate_t *tstate; 407 ACPI_OBJECT *q, *l; 408 int i, j; 409 410 CPU_ACPI_TSTATES_COUNT(handle) = cnt; 411 CPU_ACPI_TSTATES(handle) = kmem_zalloc(CPU_ACPI_TSTATES_SIZE(cnt), 412 KM_SLEEP); 413 tstate = (cpu_acpi_tstate_t *)CPU_ACPI_TSTATES(handle); 414 for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) { 415 uint32_t *up; 416 417 q = obj->Package.Elements[i].Package.Elements; 418 419 /* 420 * Skip duplicate entries. 421 */ 422 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 423 continue; 424 425 up = (uint32_t *)tstate; 426 for (j = 0; j < CPU_ACPI_TSS_CNT; j++) 427 up[j] = q[j].Integer.Value; 428 tstate++; 429 cnt--; 430 } 431 } 432 433 /* 434 * Cache the _PSS or _TSS data. 435 */ 436 static int 437 cpu_acpi_cache_supported_states(cpu_acpi_handle_t handle, 438 cpu_acpi_obj_t objtype, int fcnt) 439 { 440 ACPI_BUFFER abuf; 441 ACPI_OBJECT *obj, *q, *l; 442 boolean_t eot = B_FALSE; 443 int ret = -1; 444 int cnt; 445 int i, j; 446 int p_res; 447 448 /* 449 * Fetch the data (if present) for the CPU node. 450 */ 451 abuf.Length = ACPI_ALLOCATE_BUFFER; 452 abuf.Pointer = NULL; 453 if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, 454 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, 455 ACPI_TYPE_PACKAGE))) { 456 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: %s " 457 "package not found.", cpu_acpi_obj_attrs[objtype].name); 458 if (p_res >= 0) 459 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 460 return (1); 461 } 462 obj = abuf.Pointer; 463 if (obj->Package.Count < 2) { 464 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: %s package" 465 " bad count %d.", cpu_acpi_obj_attrs[objtype].name, 466 obj->Package.Count); 467 if (p_res >= 0) 468 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 469 goto out; 470 } 471 472 /* 473 * Does the package look coherent? 474 */ 475 cnt = 0; 476 for (i = 0, l = NULL; i < obj->Package.Count; i++, l = q) { 477 if (obj->Package.Elements[i].Type != ACPI_TYPE_PACKAGE || 478 obj->Package.Elements[i].Package.Count != fcnt) { 479 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: " 480 "Unexpected data in %s package.", 481 cpu_acpi_obj_attrs[objtype].name); 482 if (p_res >= 0) 483 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 484 goto out; 485 } 486 487 q = obj->Package.Elements[i].Package.Elements; 488 for (j = 0; j < fcnt; j++) { 489 if (q[j].Type != ACPI_TYPE_INTEGER) { 490 p_res = snprintf(err_msg, ERR_MSG_SIZE, 491 "cpu_acpi: %s element invalid (type)", 492 cpu_acpi_obj_attrs[objtype].name); 493 if (p_res >= 0) 494 PRINT_ERR_MSG(CE_NOTE, err_msg, 495 objtype); 496 goto out; 497 } 498 } 499 500 /* 501 * Ignore duplicate entries. 502 */ 503 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 504 continue; 505 506 /* 507 * Some supported state tables are larger than required 508 * and unused elements are filled with patterns 509 * of 0xff. Simply check here for frequency = 0xffff 510 * and stop counting if found. 511 */ 512 if (q[0].Integer.Value == 0xffff) { 513 eot = B_TRUE; 514 continue; 515 } 516 517 /* 518 * We should never find a valid entry after we've hit 519 * an the end-of-table entry. 520 */ 521 if (eot) { 522 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: " 523 "Unexpected data in %s package after eot.", 524 cpu_acpi_obj_attrs[objtype].name); 525 if (p_res >= 0) 526 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 527 goto out; 528 } 529 530 /* 531 * states must be defined in order from highest to lowest. 532 */ 533 if (l != NULL && l[0].Integer.Value < q[0].Integer.Value) { 534 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: " 535 "%s package state definitions out of order.", 536 cpu_acpi_obj_attrs[objtype].name); 537 if (p_res >= 0) 538 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 539 goto out; 540 } 541 542 /* 543 * This entry passes. 544 */ 545 cnt++; 546 } 547 if (cnt == 0) 548 goto out; 549 550 /* 551 * Yes, fill in the structure. 552 */ 553 ASSERT(objtype == PSS_OBJ || objtype == TSS_OBJ); 554 (objtype == PSS_OBJ) ? cpu_acpi_cache_pstate(handle, obj, cnt) : 555 cpu_acpi_cache_tstate(handle, obj, cnt); 556 557 ret = 0; 558 out: 559 AcpiOsFree(abuf.Pointer); 560 return (ret); 561 } 562 563 /* 564 * Cache the _PSS data. The _PSS data defines the different power levels 565 * supported by the CPU and the attributes associated with each power level 566 * (i.e., frequency, voltage, etc.). The power levels are number from 567 * highest to lowest. That is, the highest power level is _PSS entry 0 568 * and the lowest power level is the last _PSS entry. 569 */ 570 static int 571 cpu_acpi_cache_pstates(cpu_acpi_handle_t handle) 572 { 573 int ret; 574 575 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSS_CACHED); 576 ret = cpu_acpi_cache_supported_states(handle, PSS_OBJ, 577 CPU_ACPI_PSS_CNT); 578 if (ret == 0) 579 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSS_CACHED); 580 return (ret); 581 } 582 583 /* 584 * Cache the _TSS data. The _TSS data defines the different freq throttle 585 * levels supported by the CPU and the attributes associated with each 586 * throttle level (i.e., frequency throttle percentage, voltage, etc.). 587 * The throttle levels are number from highest to lowest. 588 */ 589 static int 590 cpu_acpi_cache_tstates(cpu_acpi_handle_t handle) 591 { 592 int ret; 593 594 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSS_CACHED); 595 ret = cpu_acpi_cache_supported_states(handle, TSS_OBJ, 596 CPU_ACPI_TSS_CNT); 597 if (ret == 0) 598 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSS_CACHED); 599 return (ret); 600 } 601 602 /* 603 * Cache the ACPI CPU present capabilities data objects. 604 */ 605 static int 606 cpu_acpi_cache_present_capabilities(cpu_acpi_handle_t handle, 607 cpu_acpi_obj_t objtype, cpu_acpi_present_capabilities_t *pc) 608 609 { 610 ACPI_BUFFER abuf; 611 ACPI_OBJECT *obj; 612 613 /* 614 * Fetch the present capabilites object (if present) for the CPU node. 615 * Since they are optional, non-existence is not a failure. 616 */ 617 abuf.Length = ACPI_ALLOCATE_BUFFER; 618 abuf.Pointer = NULL; 619 if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, 620 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf)) || 621 abuf.Length == 0) { 622 *pc = 0; 623 return (1); 624 } 625 626 obj = (ACPI_OBJECT *)abuf.Pointer; 627 *pc = obj->Integer.Value; 628 AcpiOsFree(abuf.Pointer); 629 return (0); 630 } 631 632 /* 633 * Cache the _PPC data. The _PPC simply contains an integer value which 634 * represents the highest power level that a CPU should transition to. 635 * That is, it's an index into the array of _PSS entries and will be 636 * greater than or equal to zero. 637 */ 638 void 639 cpu_acpi_cache_ppc(cpu_acpi_handle_t handle) 640 { 641 cpu_acpi_ppc_t *ppc; 642 int ret; 643 644 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PPC_CACHED); 645 ppc = &CPU_ACPI_PPC(handle); 646 ret = cpu_acpi_cache_present_capabilities(handle, PPC_OBJ, ppc); 647 if (ret == 0) 648 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PPC_CACHED); 649 } 650 651 /* 652 * Cache the _TPC data. The _TPC simply contains an integer value which 653 * represents the throttle level that a CPU should transition to. 654 * That is, it's an index into the array of _TSS entries and will be 655 * greater than or equal to zero. 656 */ 657 void 658 cpu_acpi_cache_tpc(cpu_acpi_handle_t handle) 659 { 660 cpu_acpi_tpc_t *tpc; 661 int ret; 662 663 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TPC_CACHED); 664 tpc = &CPU_ACPI_TPC(handle); 665 ret = cpu_acpi_cache_present_capabilities(handle, TPC_OBJ, tpc); 666 if (ret == 0) 667 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TPC_CACHED); 668 } 669 670 int 671 cpu_acpi_verify_cstate(cpu_acpi_cstate_t *cstate) 672 { 673 uint32_t addrspaceid = cstate->cs_addrspace_id; 674 675 if ((addrspaceid != ACPI_ADR_SPACE_FIXED_HARDWARE) && 676 (addrspaceid != ACPI_ADR_SPACE_SYSTEM_IO)) { 677 cmn_err(CE_WARN, "!_CST: unsupported address space id" 678 ":C%d, type: %d\n", cstate->cs_type, addrspaceid); 679 return (1); 680 } 681 return (0); 682 } 683 684 int 685 cpu_acpi_cache_cst(cpu_acpi_handle_t handle) 686 { 687 ACPI_BUFFER abuf; 688 ACPI_OBJECT *obj; 689 ACPI_INTEGER cnt; 690 cpu_acpi_cstate_t *cstate, *p; 691 size_t alloc_size; 692 int i, count; 693 694 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_CST_CACHED); 695 696 abuf.Length = ACPI_ALLOCATE_BUFFER; 697 abuf.Pointer = NULL; 698 699 if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, "_CST", 700 NULL, &abuf, ACPI_TYPE_PACKAGE))) { 701 cmn_err(CE_NOTE, "!cpu_acpi: _CST evaluate failure"); 702 return (-1); 703 } 704 obj = (ACPI_OBJECT *)abuf.Pointer; 705 if (obj->Package.Count < 2) { 706 cmn_err(CE_NOTE, "!cpu_acpi: _CST package bad count %d.", 707 obj->Package.Count); 708 AcpiOsFree(abuf.Pointer); 709 return (-1); 710 } 711 712 /* 713 * Does the package look coherent? 714 */ 715 cnt = obj->Package.Elements[0].Integer.Value; 716 if (cnt < 1 || cnt != obj->Package.Count - 1) { 717 cmn_err(CE_NOTE, "!cpu_acpi: _CST invalid element count %d != " 718 "Package count %d\n", 719 (int)cnt, (int)obj->Package.Count - 1); 720 AcpiOsFree(abuf.Pointer); 721 return (-1); 722 } 723 724 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)cnt; 725 alloc_size = CPU_ACPI_CSTATES_SIZE(cnt); 726 CPU_ACPI_CSTATES(handle) = kmem_zalloc(alloc_size, KM_SLEEP); 727 CPU_ACPI_BM_INFO(handle) = 0; 728 cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 729 p = cstate; 730 731 for (i = 1, count = 1; i <= cnt; i++) { 732 ACPI_OBJECT *pkg; 733 AML_RESOURCE_GENERIC_REGISTER *reg; 734 ACPI_OBJECT *element; 735 736 pkg = &(obj->Package.Elements[i]); 737 reg = (AML_RESOURCE_GENERIC_REGISTER *) 738 pkg->Package.Elements[0].Buffer.Pointer; 739 cstate->cs_addrspace_id = reg->AddressSpaceId; 740 cstate->cs_address = reg->Address; 741 element = &(pkg->Package.Elements[1]); 742 cstate->cs_type = element->Integer.Value; 743 element = &(pkg->Package.Elements[2]); 744 cstate->cs_latency = element->Integer.Value; 745 element = &(pkg->Package.Elements[3]); 746 cstate->cs_power = element->Integer.Value; 747 748 if (cpu_acpi_verify_cstate(cstate)) { 749 /* 750 * ignore this entry if it's not valid 751 */ 752 continue; 753 } 754 if (cstate == p) { 755 cstate++; 756 } else if (p->cs_type == cstate->cs_type) { 757 /* 758 * if there are duplicate entries, we keep the 759 * last one. This fixes: 760 * 1) some buggy BIOS have total duplicate entries. 761 * 2) ACPI Spec allows the same cstate entry with 762 * different power and latency, we use the one 763 * with more power saving. 764 */ 765 (void) memcpy(p, cstate, sizeof (cpu_acpi_cstate_t)); 766 } else { 767 /* 768 * we got a valid entry, cache it to the 769 * cstate structure 770 */ 771 p = cstate++; 772 count++; 773 } 774 } 775 776 if (count < 2) { 777 cmn_err(CE_NOTE, "!cpu_acpi: _CST invalid count %d < 2", 778 count); 779 kmem_free(CPU_ACPI_CSTATES(handle), alloc_size); 780 CPU_ACPI_CSTATES(handle) = NULL; 781 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)0; 782 AcpiOsFree(abuf.Pointer); 783 return (-1); 784 } 785 cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 786 if (cstate[0].cs_type != CPU_ACPI_C1) { 787 cmn_err(CE_NOTE, "!cpu_acpi: _CST first element type not C1: " 788 "%d", (int)cstate->cs_type); 789 kmem_free(CPU_ACPI_CSTATES(handle), alloc_size); 790 CPU_ACPI_CSTATES(handle) = NULL; 791 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)0; 792 AcpiOsFree(abuf.Pointer); 793 return (-1); 794 } 795 796 if (count != cnt) { 797 void *orig = CPU_ACPI_CSTATES(handle); 798 799 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)count; 800 CPU_ACPI_CSTATES(handle) = kmem_zalloc( 801 CPU_ACPI_CSTATES_SIZE(count), KM_SLEEP); 802 (void) memcpy(CPU_ACPI_CSTATES(handle), orig, 803 CPU_ACPI_CSTATES_SIZE(count)); 804 kmem_free(orig, alloc_size); 805 } 806 807 AcpiOsFree(abuf.Pointer); 808 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_CST_CACHED); 809 return (0); 810 } 811 812 /* 813 * Cache the _PCT, _PSS, _PSD and _PPC data. 814 */ 815 int 816 cpu_acpi_cache_pstate_data(cpu_acpi_handle_t handle) 817 { 818 if (cpu_acpi_cache_pct(handle) < 0) { 819 cmn_err(CE_WARN, "!cpu_acpi: error parsing _PCT for " 820 "CPU %d", handle->cs_id); 821 return (-1); 822 } 823 824 if (cpu_acpi_cache_pstates(handle) != 0) { 825 cmn_err(CE_WARN, "!cpu_acpi: error parsing _PSS for " 826 "CPU %d", handle->cs_id); 827 return (-1); 828 } 829 830 if (cpu_acpi_cache_psd(handle) < 0) { 831 cmn_err(CE_WARN, "!cpu_acpi: error parsing _PSD for " 832 "CPU %d", handle->cs_id); 833 return (-1); 834 } 835 836 cpu_acpi_cache_ppc(handle); 837 838 return (0); 839 } 840 841 void 842 cpu_acpi_free_pstate_data(cpu_acpi_handle_t handle) 843 { 844 if (handle != NULL) { 845 if (CPU_ACPI_PSTATES(handle)) { 846 kmem_free(CPU_ACPI_PSTATES(handle), 847 CPU_ACPI_PSTATES_SIZE( 848 CPU_ACPI_PSTATES_COUNT(handle))); 849 CPU_ACPI_PSTATES(handle) = NULL; 850 } 851 } 852 } 853 854 /* 855 * Cache the _PTC, _TSS, _TSD and _TPC data. 856 */ 857 int 858 cpu_acpi_cache_tstate_data(cpu_acpi_handle_t handle) 859 { 860 int p_res; 861 862 if (cpu_acpi_cache_ptc(handle) < 0) { 863 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: error " 864 "parsing _PTC for CPU %d", handle->cs_id); 865 if (p_res >= 0) 866 PRINT_ERR_MSG(CE_NOTE, err_msg, PTC_OBJ); 867 return (-1); 868 } 869 870 if (cpu_acpi_cache_tstates(handle) != 0) { 871 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: error " 872 "parsing _TSS for CPU %d", handle->cs_id); 873 if (p_res >= 0) 874 PRINT_ERR_MSG(CE_NOTE, err_msg, TSS_OBJ); 875 return (-1); 876 } 877 878 if (cpu_acpi_cache_tsd(handle) < 0) { 879 p_res = snprintf(err_msg, ERR_MSG_SIZE, "cpu_acpi: error " 880 "parsing _TSD for CPU %d", handle->cs_id); 881 if (p_res >= 0) 882 PRINT_ERR_MSG(CE_NOTE, err_msg, TSD_OBJ); 883 return (-1); 884 } 885 886 cpu_acpi_cache_tpc(handle); 887 888 return (0); 889 } 890 891 void 892 cpu_acpi_free_tstate_data(cpu_acpi_handle_t handle) 893 { 894 if (handle != NULL) { 895 if (CPU_ACPI_TSTATES(handle)) { 896 kmem_free(CPU_ACPI_TSTATES(handle), 897 CPU_ACPI_TSTATES_SIZE( 898 CPU_ACPI_TSTATES_COUNT(handle))); 899 CPU_ACPI_TSTATES(handle) = NULL; 900 } 901 } 902 } 903 904 /* 905 * Cache the _CST data. 906 */ 907 int 908 cpu_acpi_cache_cstate_data(cpu_acpi_handle_t handle) 909 { 910 if (cpu_acpi_cache_cst(handle) < 0) { 911 cmn_err(CE_WARN, "!cpu_acpi: error parsing _CST for " 912 "CPU %d", handle->cs_id); 913 return (-1); 914 } 915 916 if (cpu_acpi_cache_csd(handle) < 0) { 917 cmn_err(CE_WARN, "!cpu_acpi: error parsing _CSD for " 918 "CPU %d", handle->cs_id); 919 return (-1); 920 } 921 922 return (0); 923 } 924 925 void 926 cpu_acpi_free_cstate_data(cpu_acpi_handle_t handle) 927 { 928 if (handle != NULL) { 929 if (CPU_ACPI_CSTATES(handle)) { 930 kmem_free(CPU_ACPI_CSTATES(handle), 931 CPU_ACPI_CSTATES_SIZE( 932 CPU_ACPI_CSTATES_COUNT(handle))); 933 CPU_ACPI_CSTATES(handle) = NULL; 934 } 935 } 936 } 937 938 /* 939 * Register a handler for processor change notifications. 940 */ 941 void 942 cpu_acpi_install_notify_handler(cpu_acpi_handle_t handle, 943 ACPI_NOTIFY_HANDLER handler, void *ctx) 944 { 945 if (ACPI_FAILURE(AcpiInstallNotifyHandler(handle->cs_handle, 946 ACPI_DEVICE_NOTIFY, handler, ctx))) 947 cmn_err(CE_NOTE, "!cpu_acpi: Unable to register " 948 "notify handler for CPU"); 949 } 950 951 /* 952 * Remove a handler for processor change notifications. 953 */ 954 void 955 cpu_acpi_remove_notify_handler(cpu_acpi_handle_t handle, 956 ACPI_NOTIFY_HANDLER handler) 957 { 958 if (ACPI_FAILURE(AcpiRemoveNotifyHandler(handle->cs_handle, 959 ACPI_DEVICE_NOTIFY, handler))) 960 cmn_err(CE_NOTE, "!cpu_acpi: Unable to remove " 961 "notify handler for CPU"); 962 } 963 964 /* 965 * Write _PDC. 966 */ 967 int 968 cpu_acpi_write_pdc(cpu_acpi_handle_t handle, uint32_t revision, uint32_t count, 969 uint32_t *capabilities) 970 { 971 ACPI_OBJECT obj; 972 ACPI_OBJECT_LIST list = { 1, &obj}; 973 uint32_t *buffer; 974 uint32_t *bufptr; 975 uint32_t bufsize; 976 int i; 977 978 bufsize = (count + 2) * sizeof (uint32_t); 979 buffer = kmem_zalloc(bufsize, KM_SLEEP); 980 buffer[0] = revision; 981 buffer[1] = count; 982 bufptr = &buffer[2]; 983 for (i = 0; i < count; i++) 984 *bufptr++ = *capabilities++; 985 986 obj.Type = ACPI_TYPE_BUFFER; 987 obj.Buffer.Length = bufsize; 988 obj.Buffer.Pointer = (void *)buffer; 989 990 /* 991 * _PDC is optional, so don't log failure. 992 */ 993 if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, "_PDC", 994 &list, NULL))) { 995 kmem_free(buffer, bufsize); 996 return (-1); 997 } 998 999 kmem_free(buffer, bufsize); 1000 return (0); 1001 } 1002 1003 /* 1004 * Write to system IO port. 1005 */ 1006 int 1007 cpu_acpi_write_port(ACPI_IO_ADDRESS address, uint32_t value, uint32_t width) 1008 { 1009 if (ACPI_FAILURE(AcpiOsWritePort(address, value, width))) { 1010 cmn_err(CE_NOTE, "cpu_acpi: error writing system IO port " 1011 "%lx.", (long)address); 1012 return (-1); 1013 } 1014 return (0); 1015 } 1016 1017 /* 1018 * Read from a system IO port. 1019 */ 1020 int 1021 cpu_acpi_read_port(ACPI_IO_ADDRESS address, uint32_t *value, uint32_t width) 1022 { 1023 if (ACPI_FAILURE(AcpiOsReadPort(address, value, width))) { 1024 cmn_err(CE_NOTE, "cpu_acpi: error reading system IO port " 1025 "%lx.", (long)address); 1026 return (-1); 1027 } 1028 return (0); 1029 } 1030 1031 /* 1032 * Return supported frequencies. 1033 */ 1034 uint_t 1035 cpu_acpi_get_speeds(cpu_acpi_handle_t handle, int **speeds) 1036 { 1037 cpu_acpi_pstate_t *pstate; 1038 int *hspeeds; 1039 uint_t nspeeds; 1040 int i; 1041 1042 nspeeds = CPU_ACPI_PSTATES_COUNT(handle); 1043 pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 1044 hspeeds = kmem_zalloc(nspeeds * sizeof (int), KM_SLEEP); 1045 for (i = 0; i < nspeeds; i++) { 1046 hspeeds[i] = CPU_ACPI_FREQ(pstate); 1047 pstate++; 1048 } 1049 *speeds = hspeeds; 1050 return (nspeeds); 1051 } 1052 1053 /* 1054 * Free resources allocated by cpu_acpi_get_speeds(). 1055 */ 1056 void 1057 cpu_acpi_free_speeds(int *speeds, uint_t nspeeds) 1058 { 1059 kmem_free(speeds, nspeeds * sizeof (int)); 1060 } 1061 1062 uint_t 1063 cpu_acpi_get_max_cstates(cpu_acpi_handle_t handle) 1064 { 1065 if (CPU_ACPI_CSTATES(handle)) 1066 return (CPU_ACPI_CSTATES_COUNT(handle)); 1067 else 1068 return (1); 1069 } 1070 1071 void 1072 cpu_acpi_set_register(uint32_t bitreg, uint32_t value) 1073 { 1074 AcpiWriteBitRegister(bitreg, value); 1075 } 1076 1077 void 1078 cpu_acpi_get_register(uint32_t bitreg, uint32_t *value) 1079 { 1080 AcpiReadBitRegister(bitreg, value); 1081 } 1082 1083 /* 1084 * Map the dip to an ACPI handle for the device. 1085 */ 1086 cpu_acpi_handle_t 1087 cpu_acpi_init(cpu_t *cp) 1088 { 1089 cpu_acpi_handle_t handle; 1090 1091 handle = kmem_zalloc(sizeof (cpu_acpi_state_t), KM_SLEEP); 1092 1093 if (ACPI_FAILURE(acpica_get_handle_cpu(cp->cpu_id, 1094 &handle->cs_handle))) { 1095 kmem_free(handle, sizeof (cpu_acpi_state_t)); 1096 return (NULL); 1097 } 1098 handle->cs_id = cp->cpu_id; 1099 return (handle); 1100 } 1101 1102 /* 1103 * Free any resources. 1104 */ 1105 void 1106 cpu_acpi_fini(cpu_acpi_handle_t handle) 1107 { 1108 if (handle) 1109 kmem_free(handle, sizeof (cpu_acpi_state_t)); 1110 } 1111