1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/cpu_acpi.h> 27 #include <sys/cpu_idle.h> 28 #include <sys/dtrace.h> 29 #include <sys/sdt.h> 30 31 /* 32 * List of the processor ACPI object types that are being used. 33 */ 34 typedef enum cpu_acpi_obj { 35 PDC_OBJ = 0, 36 PCT_OBJ, 37 PSS_OBJ, 38 PSD_OBJ, 39 PPC_OBJ, 40 PTC_OBJ, 41 TSS_OBJ, 42 TSD_OBJ, 43 TPC_OBJ, 44 CSD_OBJ, 45 } cpu_acpi_obj_t; 46 47 /* 48 * Container to store object name. 49 * Other attributes can be added in the future as necessary. 50 */ 51 typedef struct cpu_acpi_obj_attr { 52 char *name; 53 } cpu_acpi_obj_attr_t; 54 55 /* 56 * List of object attributes. 57 * NOTE: Please keep the ordering of the list as same as cpu_acpi_obj_t. 58 */ 59 static cpu_acpi_obj_attr_t cpu_acpi_obj_attrs[] = { 60 {"_PDC"}, 61 {"_PCT"}, 62 {"_PSS"}, 63 {"_PSD"}, 64 {"_PPC"}, 65 {"_PTC"}, 66 {"_TSS"}, 67 {"_TSD"}, 68 {"_TPC"}, 69 {"_CSD"} 70 }; 71 72 /* 73 * To avoid user confusion about ACPI T-State related error log messages, 74 * most of the T-State related error messages will be activated through 75 * DTrace 76 */ 77 #define ERR_MSG_SIZE 128 78 static char err_msg[ERR_MSG_SIZE]; 79 80 #define PRINT_ERR_MSG(err_lvl, msg, obj_type) { \ 81 switch (obj_type) {\ 82 case (PTC_OBJ): \ 83 case (TSS_OBJ): \ 84 case (TSD_OBJ): \ 85 case (TPC_OBJ): \ 86 DTRACE_PROBE1(cpu_ts_err_msg, char *, msg); \ 87 break; \ 88 default: \ 89 cmn_err(err_lvl, "%s", msg); \ 90 break; \ 91 } \ 92 } 93 94 95 /* 96 * Cache the ACPI CPU control data objects. 97 */ 98 static int 99 cpu_acpi_cache_ctrl_regs(cpu_acpi_handle_t handle, cpu_acpi_obj_t objtype, 100 cpu_acpi_ctrl_regs_t *regs) 101 { 102 ACPI_BUFFER abuf; 103 ACPI_OBJECT *obj; 104 AML_RESOURCE_GENERIC_REGISTER *greg; 105 int ret = -1; 106 int i; 107 int p_res; 108 109 /* 110 * Fetch the control registers (if present) for the CPU node. 111 * Since they are optional, non-existence is not a failure 112 * (we just consider it a fixed hardware case). 113 */ 114 abuf.Length = ACPI_ALLOCATE_BUFFER; 115 abuf.Pointer = NULL; 116 if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, 117 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, 118 ACPI_TYPE_PACKAGE))) { 119 regs[0].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE; 120 regs[1].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE; 121 return (1); 122 } 123 124 obj = abuf.Pointer; 125 if (obj->Package.Count != 2) { 126 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: %s package" 127 " bad count %d.", cpu_acpi_obj_attrs[objtype].name, 128 obj->Package.Count); 129 if (p_res >= 0) 130 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 131 132 goto out; 133 } 134 135 /* 136 * Does the package look coherent? 137 */ 138 for (i = 0; i < obj->Package.Count; i++) { 139 if (obj->Package.Elements[i].Type != ACPI_TYPE_BUFFER) { 140 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: " 141 "Unexpected data in %s package.", 142 cpu_acpi_obj_attrs[objtype].name); 143 if (p_res >= 0) 144 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 145 goto out; 146 } 147 148 greg = (AML_RESOURCE_GENERIC_REGISTER *) 149 obj->Package.Elements[i].Buffer.Pointer; 150 if (greg->DescriptorType != 151 ACPI_RESOURCE_NAME_GENERIC_REGISTER) { 152 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: " 153 "%s package has format error.", 154 cpu_acpi_obj_attrs[objtype].name); 155 if (p_res >= 0) 156 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 157 goto out; 158 } 159 if (greg->ResourceLength != 160 ACPI_AML_SIZE_LARGE(AML_RESOURCE_GENERIC_REGISTER)) { 161 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: " 162 "%s package not right size.", 163 cpu_acpi_obj_attrs[objtype].name); 164 if (p_res >= 0) 165 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 166 goto out; 167 } 168 if (greg->AddressSpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE && 169 greg->AddressSpaceId != ACPI_ADR_SPACE_SYSTEM_IO) { 170 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_apci: " 171 "%s contains unsupported address space type %x", 172 cpu_acpi_obj_attrs[objtype].name, 173 greg->AddressSpaceId); 174 if (p_res >= 0) 175 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 176 goto out; 177 } 178 } 179 180 /* 181 * Looks good! 182 */ 183 for (i = 0; i < obj->Package.Count; i++) { 184 greg = (AML_RESOURCE_GENERIC_REGISTER *) 185 obj->Package.Elements[i].Buffer.Pointer; 186 regs[i].cr_addrspace_id = greg->AddressSpaceId; 187 regs[i].cr_width = greg->BitWidth; 188 regs[i].cr_offset = greg->BitOffset; 189 regs[i].cr_asize = greg->AccessSize; 190 regs[i].cr_address = greg->Address; 191 } 192 ret = 0; 193 out: 194 AcpiOsFree(abuf.Pointer); 195 return (ret); 196 } 197 198 /* 199 * Cache the ACPI _PCT data. The _PCT data defines the interface to use 200 * when making power level transitions (i.e., system IO ports, fixed 201 * hardware port, etc). 202 */ 203 static int 204 cpu_acpi_cache_pct(cpu_acpi_handle_t handle) 205 { 206 cpu_acpi_pct_t *pct; 207 int ret; 208 209 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PCT_CACHED); 210 pct = &CPU_ACPI_PCT(handle)[0]; 211 if ((ret = cpu_acpi_cache_ctrl_regs(handle, PCT_OBJ, pct)) == 0) 212 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PCT_CACHED); 213 return (ret); 214 } 215 216 /* 217 * Cache the ACPI _PTC data. The _PTC data defines the interface to use 218 * when making T-state transitions (i.e., system IO ports, fixed 219 * hardware port, etc). 220 */ 221 static int 222 cpu_acpi_cache_ptc(cpu_acpi_handle_t handle) 223 { 224 cpu_acpi_ptc_t *ptc; 225 int ret; 226 227 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PTC_CACHED); 228 ptc = &CPU_ACPI_PTC(handle)[0]; 229 if ((ret = cpu_acpi_cache_ctrl_regs(handle, PTC_OBJ, ptc)) == 0) 230 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PTC_CACHED); 231 return (ret); 232 } 233 234 /* 235 * Cache the ACPI CPU state dependency data objects. 236 */ 237 static int 238 cpu_acpi_cache_state_dependencies(cpu_acpi_handle_t handle, 239 cpu_acpi_obj_t objtype, cpu_acpi_state_dependency_t *sd) 240 { 241 ACPI_BUFFER abuf; 242 ACPI_OBJECT *pkg, *elements; 243 int number; 244 int ret = -1; 245 int p_res; 246 247 if (objtype == CSD_OBJ) { 248 number = 6; 249 } else { 250 number = 5; 251 } 252 /* 253 * Fetch the dependencies (if present) for the CPU node. 254 * Since they are optional, non-existence is not a failure 255 * (it's up to the caller to determine how to handle non-existence). 256 */ 257 abuf.Length = ACPI_ALLOCATE_BUFFER; 258 abuf.Pointer = NULL; 259 if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, 260 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, 261 ACPI_TYPE_PACKAGE))) { 262 return (1); 263 } 264 265 pkg = abuf.Pointer; 266 267 if (((objtype != CSD_OBJ) && (pkg->Package.Count != 1)) || 268 ((objtype == CSD_OBJ) && (pkg->Package.Count != 1) && 269 (pkg->Package.Count != 2))) { 270 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: %s " 271 "unsupported package count %d.", 272 cpu_acpi_obj_attrs[objtype].name, pkg->Package.Count); 273 if (p_res >= 0) 274 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 275 goto out; 276 } 277 278 /* 279 * For C-state domain, we assume C2 and C3 have the same 280 * domain information 281 */ 282 if (pkg->Package.Elements[0].Type != ACPI_TYPE_PACKAGE || 283 pkg->Package.Elements[0].Package.Count != number) { 284 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: " 285 "Unexpected data in %s package.", 286 cpu_acpi_obj_attrs[objtype].name); 287 if (p_res >= 0) 288 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 289 goto out; 290 } 291 elements = pkg->Package.Elements[0].Package.Elements; 292 if (elements[0].Integer.Value != number || 293 elements[1].Integer.Value != 0) { 294 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: Unexpected" 295 " %s revision.", cpu_acpi_obj_attrs[objtype].name); 296 if (p_res >= 0) 297 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 298 goto out; 299 } 300 301 sd->sd_entries = elements[0].Integer.Value; 302 sd->sd_revision = elements[1].Integer.Value; 303 sd->sd_domain = elements[2].Integer.Value; 304 sd->sd_type = elements[3].Integer.Value; 305 sd->sd_num = elements[4].Integer.Value; 306 if (objtype == CSD_OBJ) { 307 sd->sd_index = elements[5].Integer.Value; 308 } 309 310 ret = 0; 311 out: 312 AcpiOsFree(abuf.Pointer); 313 return (ret); 314 } 315 316 /* 317 * Cache the ACPI _PSD data. The _PSD data defines P-state CPU dependencies 318 * (think CPU domains). 319 */ 320 static int 321 cpu_acpi_cache_psd(cpu_acpi_handle_t handle) 322 { 323 cpu_acpi_psd_t *psd; 324 int ret; 325 326 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSD_CACHED); 327 psd = &CPU_ACPI_PSD(handle); 328 ret = cpu_acpi_cache_state_dependencies(handle, PSD_OBJ, psd); 329 if (ret == 0) 330 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSD_CACHED); 331 return (ret); 332 333 } 334 335 /* 336 * Cache the ACPI _TSD data. The _TSD data defines T-state CPU dependencies 337 * (think CPU domains). 338 */ 339 static int 340 cpu_acpi_cache_tsd(cpu_acpi_handle_t handle) 341 { 342 cpu_acpi_tsd_t *tsd; 343 int ret; 344 345 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSD_CACHED); 346 tsd = &CPU_ACPI_TSD(handle); 347 ret = cpu_acpi_cache_state_dependencies(handle, TSD_OBJ, tsd); 348 if (ret == 0) 349 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSD_CACHED); 350 return (ret); 351 352 } 353 354 /* 355 * Cache the ACPI _CSD data. The _CSD data defines C-state CPU dependencies 356 * (think CPU domains). 357 */ 358 static int 359 cpu_acpi_cache_csd(cpu_acpi_handle_t handle) 360 { 361 cpu_acpi_csd_t *csd; 362 int ret; 363 364 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_CSD_CACHED); 365 csd = &CPU_ACPI_CSD(handle); 366 ret = cpu_acpi_cache_state_dependencies(handle, CSD_OBJ, csd); 367 if (ret == 0) 368 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_CSD_CACHED); 369 return (ret); 370 371 } 372 373 static void 374 cpu_acpi_cache_pstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt) 375 { 376 cpu_acpi_pstate_t *pstate; 377 ACPI_OBJECT *q, *l; 378 int i, j; 379 380 CPU_ACPI_PSTATES_COUNT(handle) = cnt; 381 CPU_ACPI_PSTATES(handle) = kmem_zalloc(CPU_ACPI_PSTATES_SIZE(cnt), 382 KM_SLEEP); 383 pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 384 for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) { 385 uint32_t *up; 386 387 q = obj->Package.Elements[i].Package.Elements; 388 389 /* 390 * Skip duplicate entries. 391 */ 392 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 393 continue; 394 395 up = (uint32_t *)pstate; 396 for (j = 0; j < CPU_ACPI_PSS_CNT; j++) 397 up[j] = q[j].Integer.Value; 398 pstate++; 399 cnt--; 400 } 401 } 402 403 static void 404 cpu_acpi_cache_tstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt) 405 { 406 cpu_acpi_tstate_t *tstate; 407 ACPI_OBJECT *q, *l; 408 int i, j; 409 410 CPU_ACPI_TSTATES_COUNT(handle) = cnt; 411 CPU_ACPI_TSTATES(handle) = kmem_zalloc(CPU_ACPI_TSTATES_SIZE(cnt), 412 KM_SLEEP); 413 tstate = (cpu_acpi_tstate_t *)CPU_ACPI_TSTATES(handle); 414 for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) { 415 uint32_t *up; 416 417 q = obj->Package.Elements[i].Package.Elements; 418 419 /* 420 * Skip duplicate entries. 421 */ 422 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 423 continue; 424 425 up = (uint32_t *)tstate; 426 for (j = 0; j < CPU_ACPI_TSS_CNT; j++) 427 up[j] = q[j].Integer.Value; 428 tstate++; 429 cnt--; 430 } 431 } 432 433 /* 434 * Cache the _PSS or _TSS data. 435 */ 436 static int 437 cpu_acpi_cache_supported_states(cpu_acpi_handle_t handle, 438 cpu_acpi_obj_t objtype, int fcnt) 439 { 440 ACPI_BUFFER abuf; 441 ACPI_OBJECT *obj, *q, *l; 442 boolean_t eot = B_FALSE; 443 int ret = -1; 444 int cnt; 445 int i, j; 446 int p_res; 447 448 /* 449 * Fetch the data (if present) for the CPU node. 450 */ 451 abuf.Length = ACPI_ALLOCATE_BUFFER; 452 abuf.Pointer = NULL; 453 if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, 454 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, 455 ACPI_TYPE_PACKAGE))) { 456 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: %s " 457 "package not found.", cpu_acpi_obj_attrs[objtype].name); 458 if (p_res >= 0) 459 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 460 return (1); 461 } 462 obj = abuf.Pointer; 463 if (obj->Package.Count < 2) { 464 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: %s package" 465 " bad count %d.", cpu_acpi_obj_attrs[objtype].name, 466 obj->Package.Count); 467 if (p_res >= 0) 468 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 469 goto out; 470 } 471 472 /* 473 * Does the package look coherent? 474 */ 475 cnt = 0; 476 for (i = 0, l = NULL; i < obj->Package.Count; i++, l = q) { 477 if (obj->Package.Elements[i].Type != ACPI_TYPE_PACKAGE || 478 obj->Package.Elements[i].Package.Count != fcnt) { 479 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: " 480 "Unexpected data in %s package.", 481 cpu_acpi_obj_attrs[objtype].name); 482 if (p_res >= 0) 483 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 484 goto out; 485 } 486 487 q = obj->Package.Elements[i].Package.Elements; 488 for (j = 0; j < fcnt; j++) { 489 if (q[j].Type != ACPI_TYPE_INTEGER) { 490 p_res = snprintf(err_msg, ERR_MSG_SIZE, 491 "!cpu_acpi: %s element invalid (type)", 492 cpu_acpi_obj_attrs[objtype].name); 493 if (p_res >= 0) 494 PRINT_ERR_MSG(CE_NOTE, err_msg, 495 objtype); 496 goto out; 497 } 498 } 499 500 /* 501 * Ignore duplicate entries. 502 */ 503 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 504 continue; 505 506 /* 507 * Some supported state tables are larger than required 508 * and unused elements are filled with patterns 509 * of 0xff. Simply check here for frequency = 0xffff 510 * and stop counting if found. 511 */ 512 if (q[0].Integer.Value == 0xffff) { 513 eot = B_TRUE; 514 continue; 515 } 516 517 /* 518 * We should never find a valid entry after we've hit 519 * an the end-of-table entry. 520 */ 521 if (eot) { 522 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: " 523 "Unexpected data in %s package after eot.", 524 cpu_acpi_obj_attrs[objtype].name); 525 if (p_res >= 0) 526 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 527 goto out; 528 } 529 530 /* 531 * states must be defined in order from highest to lowest. 532 */ 533 if (l != NULL && l[0].Integer.Value < q[0].Integer.Value) { 534 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: " 535 "%s package state definitions out of order.", 536 cpu_acpi_obj_attrs[objtype].name); 537 if (p_res >= 0) 538 PRINT_ERR_MSG(CE_NOTE, err_msg, objtype); 539 goto out; 540 } 541 542 /* 543 * This entry passes. 544 */ 545 cnt++; 546 } 547 if (cnt == 0) 548 goto out; 549 550 /* 551 * Yes, fill in the structure. 552 */ 553 ASSERT(objtype == PSS_OBJ || objtype == TSS_OBJ); 554 (objtype == PSS_OBJ) ? cpu_acpi_cache_pstate(handle, obj, cnt) : 555 cpu_acpi_cache_tstate(handle, obj, cnt); 556 557 ret = 0; 558 out: 559 AcpiOsFree(abuf.Pointer); 560 return (ret); 561 } 562 563 /* 564 * Cache the _PSS data. The _PSS data defines the different power levels 565 * supported by the CPU and the attributes associated with each power level 566 * (i.e., frequency, voltage, etc.). The power levels are number from 567 * highest to lowest. That is, the highest power level is _PSS entry 0 568 * and the lowest power level is the last _PSS entry. 569 */ 570 static int 571 cpu_acpi_cache_pstates(cpu_acpi_handle_t handle) 572 { 573 int ret; 574 575 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSS_CACHED); 576 ret = cpu_acpi_cache_supported_states(handle, PSS_OBJ, 577 CPU_ACPI_PSS_CNT); 578 if (ret == 0) 579 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSS_CACHED); 580 return (ret); 581 } 582 583 /* 584 * Cache the _TSS data. The _TSS data defines the different freq throttle 585 * levels supported by the CPU and the attributes associated with each 586 * throttle level (i.e., frequency throttle percentage, voltage, etc.). 587 * The throttle levels are number from highest to lowest. 588 */ 589 static int 590 cpu_acpi_cache_tstates(cpu_acpi_handle_t handle) 591 { 592 int ret; 593 594 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSS_CACHED); 595 ret = cpu_acpi_cache_supported_states(handle, TSS_OBJ, 596 CPU_ACPI_TSS_CNT); 597 if (ret == 0) 598 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSS_CACHED); 599 return (ret); 600 } 601 602 /* 603 * Cache the ACPI CPU present capabilities data objects. 604 */ 605 static int 606 cpu_acpi_cache_present_capabilities(cpu_acpi_handle_t handle, 607 cpu_acpi_obj_t objtype, cpu_acpi_present_capabilities_t *pc) 608 609 { 610 ACPI_BUFFER abuf; 611 ACPI_OBJECT *obj; 612 613 /* 614 * Fetch the present capabilites object (if present) for the CPU node. 615 * Since they are optional, non-existence is not a failure. 616 */ 617 abuf.Length = ACPI_ALLOCATE_BUFFER; 618 abuf.Pointer = NULL; 619 if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, 620 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf)) || 621 abuf.Length == 0) { 622 *pc = 0; 623 return (1); 624 } 625 626 obj = (ACPI_OBJECT *)abuf.Pointer; 627 *pc = obj->Integer.Value; 628 AcpiOsFree(abuf.Pointer); 629 return (0); 630 } 631 632 /* 633 * Cache the _PPC data. The _PPC simply contains an integer value which 634 * represents the highest power level that a CPU should transition to. 635 * That is, it's an index into the array of _PSS entries and will be 636 * greater than or equal to zero. 637 */ 638 void 639 cpu_acpi_cache_ppc(cpu_acpi_handle_t handle) 640 { 641 cpu_acpi_ppc_t *ppc; 642 int ret; 643 644 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PPC_CACHED); 645 ppc = &CPU_ACPI_PPC(handle); 646 ret = cpu_acpi_cache_present_capabilities(handle, PPC_OBJ, ppc); 647 if (ret == 0) 648 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PPC_CACHED); 649 } 650 651 /* 652 * Cache the _TPC data. The _TPC simply contains an integer value which 653 * represents the throttle level that a CPU should transition to. 654 * That is, it's an index into the array of _TSS entries and will be 655 * greater than or equal to zero. 656 */ 657 void 658 cpu_acpi_cache_tpc(cpu_acpi_handle_t handle) 659 { 660 cpu_acpi_tpc_t *tpc; 661 int ret; 662 663 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TPC_CACHED); 664 tpc = &CPU_ACPI_TPC(handle); 665 ret = cpu_acpi_cache_present_capabilities(handle, TPC_OBJ, tpc); 666 if (ret == 0) 667 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TPC_CACHED); 668 } 669 670 int 671 cpu_acpi_verify_cstate(cpu_acpi_cstate_t *cstate) 672 { 673 uint32_t addrspaceid = cstate->cs_addrspace_id; 674 675 if ((addrspaceid != ACPI_ADR_SPACE_FIXED_HARDWARE) && 676 (addrspaceid != ACPI_ADR_SPACE_SYSTEM_IO)) { 677 cmn_err(CE_WARN, "!_CST: unsupported address space id" 678 ":C%d, type: %d\n", cstate->cs_type, addrspaceid); 679 return (1); 680 } 681 return (0); 682 } 683 684 int 685 cpu_acpi_cache_cst(cpu_acpi_handle_t handle) 686 { 687 ACPI_BUFFER abuf; 688 ACPI_OBJECT *obj; 689 ACPI_INTEGER cnt; 690 cpu_acpi_cstate_t *cstate, *p; 691 int i, count; 692 693 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_CST_CACHED); 694 695 abuf.Length = ACPI_ALLOCATE_BUFFER; 696 abuf.Pointer = NULL; 697 698 if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, "_CST", 699 NULL, &abuf))) { 700 cmn_err(CE_NOTE, "!cpu_acpi: _CST evaluate failure"); 701 return (-1); 702 } 703 obj = (ACPI_OBJECT *)abuf.Pointer; 704 if (obj->Package.Count < 2) { 705 cmn_err(CE_NOTE, "!cpu_acpi: _CST package bad count %d.", 706 obj->Package.Count); 707 AcpiOsFree(abuf.Pointer); 708 return (-1); 709 } 710 711 /* 712 * Does the package look coherent? 713 */ 714 cnt = obj->Package.Elements[0].Integer.Value; 715 if (cnt < 1 || cnt != obj->Package.Count - 1) { 716 cmn_err(CE_NOTE, "!cpu_acpi: _CST invalid element count %d != " 717 "Package count %d\n", 718 (int)cnt, (int)obj->Package.Count - 1); 719 AcpiOsFree(abuf.Pointer); 720 return (-1); 721 } 722 723 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)cnt; 724 CPU_ACPI_CSTATES(handle) = kmem_zalloc(CPU_ACPI_CSTATES_SIZE(cnt), 725 KM_SLEEP); 726 CPU_ACPI_BM_INFO(handle) = 0; 727 cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 728 p = cstate; 729 730 for (i = 1, count = 1; i <= cnt; i++) { 731 ACPI_OBJECT *pkg; 732 AML_RESOURCE_GENERIC_REGISTER *reg; 733 ACPI_OBJECT *element; 734 735 pkg = &(obj->Package.Elements[i]); 736 reg = (AML_RESOURCE_GENERIC_REGISTER *) 737 pkg->Package.Elements[0].Buffer.Pointer; 738 cstate->cs_addrspace_id = reg->AddressSpaceId; 739 cstate->cs_address = reg->Address; 740 element = &(pkg->Package.Elements[1]); 741 cstate->cs_type = element->Integer.Value; 742 element = &(pkg->Package.Elements[2]); 743 cstate->cs_latency = element->Integer.Value; 744 element = &(pkg->Package.Elements[3]); 745 cstate->cs_power = element->Integer.Value; 746 747 if (cpu_acpi_verify_cstate(cstate)) { 748 /* 749 * ignore this entry if it's not valid 750 */ 751 continue; 752 } 753 if (cstate == p) { 754 cstate++; 755 } else if (p->cs_type == cstate->cs_type) { 756 /* 757 * if there are duplicate entries, we keep the 758 * last one. This fixes: 759 * 1) some buggy BIOS have total duplicate entries. 760 * 2) ACPI Spec allows the same cstate entry with 761 * different power and latency, we use the one 762 * with more power saving. 763 */ 764 (void) memcpy(p, cstate, sizeof (cpu_acpi_cstate_t)); 765 } else { 766 /* 767 * we got a valid entry, cache it to the 768 * cstate structure 769 */ 770 p = cstate++; 771 count++; 772 } 773 } 774 775 if (count < 2) { 776 cmn_err(CE_NOTE, "!cpu_acpi: _CST invalid count %d < 2", 777 count); 778 AcpiOsFree(abuf.Pointer); 779 return (-1); 780 } 781 cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 782 if (cstate[0].cs_type != CPU_ACPI_C1) { 783 cmn_err(CE_NOTE, "!cpu_acpi: _CST first element type not C1: " 784 "%d", (int)cstate->cs_type); 785 AcpiOsFree(abuf.Pointer); 786 return (-1); 787 } 788 789 if (count != cnt) 790 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)count; 791 792 AcpiOsFree(abuf.Pointer); 793 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_CST_CACHED); 794 return (0); 795 } 796 797 /* 798 * Cache the _PCT, _PSS, _PSD and _PPC data. 799 */ 800 int 801 cpu_acpi_cache_pstate_data(cpu_acpi_handle_t handle) 802 { 803 if (cpu_acpi_cache_pct(handle) < 0) { 804 cmn_err(CE_WARN, "!cpu_acpi: error parsing _PCT for " 805 "CPU %d", handle->cs_id); 806 return (-1); 807 } 808 809 if (cpu_acpi_cache_pstates(handle) != 0) { 810 cmn_err(CE_WARN, "!cpu_acpi: error parsing _PSS for " 811 "CPU %d", handle->cs_id); 812 return (-1); 813 } 814 815 if (cpu_acpi_cache_psd(handle) < 0) { 816 cmn_err(CE_WARN, "!cpu_acpi: error parsing _PSD for " 817 "CPU %d", handle->cs_id); 818 return (-1); 819 } 820 821 cpu_acpi_cache_ppc(handle); 822 823 return (0); 824 } 825 826 void 827 cpu_acpi_free_pstate_data(cpu_acpi_handle_t handle) 828 { 829 if (handle != NULL) { 830 if (CPU_ACPI_PSTATES(handle)) { 831 kmem_free(CPU_ACPI_PSTATES(handle), 832 CPU_ACPI_PSTATES_SIZE( 833 CPU_ACPI_PSTATES_COUNT(handle))); 834 CPU_ACPI_PSTATES(handle) = NULL; 835 } 836 } 837 } 838 839 /* 840 * Cache the _PTC, _TSS, _TSD and _TPC data. 841 */ 842 int 843 cpu_acpi_cache_tstate_data(cpu_acpi_handle_t handle) 844 { 845 int p_res; 846 847 if (cpu_acpi_cache_ptc(handle) < 0) { 848 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: error " 849 "parsing _PTC for CPU %d", handle->cs_id); 850 if (p_res >= 0) 851 PRINT_ERR_MSG(CE_NOTE, err_msg, PTC_OBJ); 852 return (-1); 853 } 854 855 if (cpu_acpi_cache_tstates(handle) != 0) { 856 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: error " 857 "parsing _TSS for CPU %d", handle->cs_id); 858 if (p_res >= 0) 859 PRINT_ERR_MSG(CE_NOTE, err_msg, TSS_OBJ); 860 return (-1); 861 } 862 863 if (cpu_acpi_cache_tsd(handle) < 0) { 864 p_res = snprintf(err_msg, ERR_MSG_SIZE, "!cpu_acpi: error " 865 "parsing _TSD for CPU %d", handle->cs_id); 866 if (p_res >= 0) 867 PRINT_ERR_MSG(CE_NOTE, err_msg, TSD_OBJ); 868 return (-1); 869 } 870 871 cpu_acpi_cache_tpc(handle); 872 873 return (0); 874 } 875 876 void 877 cpu_acpi_free_tstate_data(cpu_acpi_handle_t handle) 878 { 879 if (handle != NULL) { 880 if (CPU_ACPI_TSTATES(handle)) { 881 kmem_free(CPU_ACPI_TSTATES(handle), 882 CPU_ACPI_TSTATES_SIZE( 883 CPU_ACPI_TSTATES_COUNT(handle))); 884 CPU_ACPI_TSTATES(handle) = NULL; 885 } 886 } 887 } 888 889 /* 890 * Cache the _CST data. 891 */ 892 int 893 cpu_acpi_cache_cstate_data(cpu_acpi_handle_t handle) 894 { 895 if (cpu_acpi_cache_cst(handle) < 0) { 896 cmn_err(CE_WARN, "!cpu_acpi: error parsing _CST for " 897 "CPU %d", handle->cs_id); 898 return (-1); 899 } 900 901 if (cpu_acpi_cache_csd(handle) < 0) { 902 cmn_err(CE_WARN, "!cpu_acpi: error parsing _CSD for " 903 "CPU %d", handle->cs_id); 904 return (-1); 905 } 906 907 return (0); 908 } 909 910 void 911 cpu_acpi_free_cstate_data(cpu_acpi_handle_t handle) 912 { 913 if (handle != NULL) { 914 if (CPU_ACPI_CSTATES(handle)) { 915 kmem_free(CPU_ACPI_CSTATES(handle), 916 CPU_ACPI_CSTATES_SIZE( 917 CPU_ACPI_CSTATES_COUNT(handle))); 918 CPU_ACPI_CSTATES(handle) = NULL; 919 } 920 } 921 } 922 923 /* 924 * Register a handler for processor change notifications. 925 */ 926 void 927 cpu_acpi_install_notify_handler(cpu_acpi_handle_t handle, 928 ACPI_NOTIFY_HANDLER handler, void *ctx) 929 { 930 if (ACPI_FAILURE(AcpiInstallNotifyHandler(handle->cs_handle, 931 ACPI_DEVICE_NOTIFY, handler, ctx))) 932 cmn_err(CE_NOTE, "!cpu_acpi: Unable to register " 933 "notify handler for CPU"); 934 } 935 936 /* 937 * Remove a handler for processor change notifications. 938 */ 939 void 940 cpu_acpi_remove_notify_handler(cpu_acpi_handle_t handle, 941 ACPI_NOTIFY_HANDLER handler) 942 { 943 if (ACPI_FAILURE(AcpiRemoveNotifyHandler(handle->cs_handle, 944 ACPI_DEVICE_NOTIFY, handler))) 945 cmn_err(CE_NOTE, "!cpu_acpi: Unable to remove " 946 "notify handler for CPU"); 947 } 948 949 /* 950 * Write _PDC. 951 */ 952 int 953 cpu_acpi_write_pdc(cpu_acpi_handle_t handle, uint32_t revision, uint32_t count, 954 uint32_t *capabilities) 955 { 956 ACPI_OBJECT obj; 957 ACPI_OBJECT_LIST list = { 1, &obj}; 958 uint32_t *buffer; 959 uint32_t *bufptr; 960 uint32_t bufsize; 961 int i; 962 963 bufsize = (count + 2) * sizeof (uint32_t); 964 buffer = kmem_zalloc(bufsize, KM_SLEEP); 965 buffer[0] = revision; 966 buffer[1] = count; 967 bufptr = &buffer[2]; 968 for (i = 0; i < count; i++) 969 *bufptr++ = *capabilities++; 970 971 obj.Type = ACPI_TYPE_BUFFER; 972 obj.Buffer.Length = bufsize; 973 obj.Buffer.Pointer = (void *)buffer; 974 975 /* 976 * _PDC is optional, so don't log failure. 977 */ 978 if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, "_PDC", 979 &list, NULL))) { 980 kmem_free(buffer, bufsize); 981 return (-1); 982 } 983 984 kmem_free(buffer, bufsize); 985 return (0); 986 } 987 988 /* 989 * Write to system IO port. 990 */ 991 int 992 cpu_acpi_write_port(ACPI_IO_ADDRESS address, uint32_t value, uint32_t width) 993 { 994 if (ACPI_FAILURE(AcpiOsWritePort(address, value, width))) { 995 cmn_err(CE_NOTE, "cpu_acpi: error writing system IO port " 996 "%lx.", (long)address); 997 return (-1); 998 } 999 return (0); 1000 } 1001 1002 /* 1003 * Read from a system IO port. 1004 */ 1005 int 1006 cpu_acpi_read_port(ACPI_IO_ADDRESS address, uint32_t *value, uint32_t width) 1007 { 1008 if (ACPI_FAILURE(AcpiOsReadPort(address, value, width))) { 1009 cmn_err(CE_NOTE, "cpu_acpi: error reading system IO port " 1010 "%lx.", (long)address); 1011 return (-1); 1012 } 1013 return (0); 1014 } 1015 1016 /* 1017 * Return supported frequencies. 1018 */ 1019 uint_t 1020 cpu_acpi_get_speeds(cpu_acpi_handle_t handle, int **speeds) 1021 { 1022 cpu_acpi_pstate_t *pstate; 1023 int *hspeeds; 1024 uint_t nspeeds; 1025 int i; 1026 1027 nspeeds = CPU_ACPI_PSTATES_COUNT(handle); 1028 pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 1029 hspeeds = kmem_zalloc(nspeeds * sizeof (int), KM_SLEEP); 1030 for (i = 0; i < nspeeds; i++) { 1031 hspeeds[i] = CPU_ACPI_FREQ(pstate); 1032 pstate++; 1033 } 1034 *speeds = hspeeds; 1035 return (nspeeds); 1036 } 1037 1038 /* 1039 * Free resources allocated by cpu_acpi_get_speeds(). 1040 */ 1041 void 1042 cpu_acpi_free_speeds(int *speeds, uint_t nspeeds) 1043 { 1044 kmem_free(speeds, nspeeds * sizeof (int)); 1045 } 1046 1047 uint_t 1048 cpu_acpi_get_max_cstates(cpu_acpi_handle_t handle) 1049 { 1050 if (CPU_ACPI_CSTATES(handle)) 1051 return (CPU_ACPI_CSTATES_COUNT(handle)); 1052 else 1053 return (1); 1054 } 1055 1056 void 1057 cpu_acpi_set_register(uint32_t bitreg, uint32_t value) 1058 { 1059 AcpiSetRegister(bitreg, value); 1060 } 1061 1062 void 1063 cpu_acpi_get_register(uint32_t bitreg, uint32_t *value) 1064 { 1065 AcpiGetRegister(bitreg, value); 1066 } 1067 1068 /* 1069 * Map the dip to an ACPI handle for the device. 1070 */ 1071 cpu_acpi_handle_t 1072 cpu_acpi_init(cpu_t *cp) 1073 { 1074 cpu_acpi_handle_t handle; 1075 1076 handle = kmem_zalloc(sizeof (cpu_acpi_state_t), KM_SLEEP); 1077 1078 if (ACPI_FAILURE(acpica_get_handle_cpu(cp->cpu_id, 1079 &handle->cs_handle))) { 1080 kmem_free(handle, sizeof (cpu_acpi_state_t)); 1081 return (NULL); 1082 } 1083 handle->cs_id = cp->cpu_id; 1084 return (handle); 1085 } 1086 1087 /* 1088 * Free any resources. 1089 */ 1090 void 1091 cpu_acpi_fini(cpu_acpi_handle_t handle) 1092 { 1093 if (handle) 1094 kmem_free(handle, sizeof (cpu_acpi_state_t)); 1095 } 1096