1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 #include <sys/cpu_acpi.h> 26 #include <sys/cpu_idle.h> 27 #include <sys/dtrace.h> 28 #include <sys/sdt.h> 29 30 /* 31 * List of the processor ACPI object types that are being used. 32 */ 33 typedef enum cpu_acpi_obj { 34 PDC_OBJ = 0, 35 PCT_OBJ, 36 PSS_OBJ, 37 PSD_OBJ, 38 PPC_OBJ, 39 PTC_OBJ, 40 TSS_OBJ, 41 TSD_OBJ, 42 TPC_OBJ, 43 CST_OBJ, 44 CSD_OBJ, 45 } cpu_acpi_obj_t; 46 47 /* 48 * Container to store object name. 49 * Other attributes can be added in the future as necessary. 50 */ 51 typedef struct cpu_acpi_obj_attr { 52 char *name; 53 } cpu_acpi_obj_attr_t; 54 55 /* 56 * List of object attributes. 57 * NOTE: Please keep the ordering of the list as same as cpu_acpi_obj_t. 58 */ 59 static cpu_acpi_obj_attr_t cpu_acpi_obj_attrs[] = { 60 {"_PDC"}, 61 {"_PCT"}, 62 {"_PSS"}, 63 {"_PSD"}, 64 {"_PPC"}, 65 {"_PTC"}, 66 {"_TSS"}, 67 {"_TSD"}, 68 {"_TPC"}, 69 {"_CST"}, 70 {"_CSD"} 71 }; 72 73 /* 74 * Cache the ACPI CPU control data objects. 75 */ 76 static int 77 cpu_acpi_cache_ctrl_regs(cpu_acpi_handle_t handle, cpu_acpi_obj_t objtype, 78 cpu_acpi_ctrl_regs_t *regs) 79 { 80 ACPI_STATUS astatus; 81 ACPI_BUFFER abuf; 82 ACPI_OBJECT *obj; 83 AML_RESOURCE_GENERIC_REGISTER *greg; 84 int ret = -1; 85 int i; 86 87 /* 88 * Fetch the control registers (if present) for the CPU node. 89 * Since they are optional, non-existence is not a failure 90 * (we just consider it a fixed hardware case). 91 */ 92 abuf.Length = ACPI_ALLOCATE_BUFFER; 93 abuf.Pointer = NULL; 94 astatus = AcpiEvaluateObjectTyped(handle->cs_handle, 95 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, ACPI_TYPE_PACKAGE); 96 if (ACPI_FAILURE(astatus)) { 97 if (astatus == AE_NOT_FOUND) { 98 DTRACE_PROBE3(cpu_acpi__eval__err, int, handle->cs_id, 99 int, objtype, int, astatus); 100 regs[0].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE; 101 regs[1].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE; 102 return (1); 103 } 104 cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating %s package " 105 "for CPU %d.", astatus, cpu_acpi_obj_attrs[objtype].name, 106 handle->cs_id); 107 goto out; 108 } 109 110 obj = abuf.Pointer; 111 if (obj->Package.Count != 2) { 112 cmn_err(CE_NOTE, "!cpu_acpi: %s package bad count %d for " 113 "CPU %d.", cpu_acpi_obj_attrs[objtype].name, 114 obj->Package.Count, handle->cs_id); 115 goto out; 116 } 117 118 /* 119 * Does the package look coherent? 120 */ 121 for (i = 0; i < obj->Package.Count; i++) { 122 if (obj->Package.Elements[i].Type != ACPI_TYPE_BUFFER) { 123 cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in " 124 "%s package for CPU %d.", 125 cpu_acpi_obj_attrs[objtype].name, 126 handle->cs_id); 127 goto out; 128 } 129 130 greg = (AML_RESOURCE_GENERIC_REGISTER *) 131 obj->Package.Elements[i].Buffer.Pointer; 132 if (greg->DescriptorType != 133 ACPI_RESOURCE_NAME_GENERIC_REGISTER) { 134 cmn_err(CE_NOTE, "!cpu_acpi: %s package has format " 135 "error for CPU %d.", 136 cpu_acpi_obj_attrs[objtype].name, 137 handle->cs_id); 138 goto out; 139 } 140 if (greg->ResourceLength != 141 ACPI_AML_SIZE_LARGE(AML_RESOURCE_GENERIC_REGISTER)) { 142 cmn_err(CE_NOTE, "!cpu_acpi: %s package not right " 143 "size for CPU %d.", 144 cpu_acpi_obj_attrs[objtype].name, 145 handle->cs_id); 146 goto out; 147 } 148 if (greg->AddressSpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE && 149 greg->AddressSpaceId != ACPI_ADR_SPACE_SYSTEM_IO) { 150 cmn_err(CE_NOTE, "!cpu_apci: %s contains unsupported " 151 "address space type %x for CPU %d.", 152 cpu_acpi_obj_attrs[objtype].name, 153 greg->AddressSpaceId, 154 handle->cs_id); 155 goto out; 156 } 157 } 158 159 /* 160 * Looks good! 161 */ 162 for (i = 0; i < obj->Package.Count; i++) { 163 greg = (AML_RESOURCE_GENERIC_REGISTER *) 164 obj->Package.Elements[i].Buffer.Pointer; 165 regs[i].cr_addrspace_id = greg->AddressSpaceId; 166 regs[i].cr_width = greg->BitWidth; 167 regs[i].cr_offset = greg->BitOffset; 168 regs[i].cr_asize = greg->AccessSize; 169 regs[i].cr_address = greg->Address; 170 } 171 ret = 0; 172 out: 173 if (abuf.Pointer != NULL) 174 AcpiOsFree(abuf.Pointer); 175 return (ret); 176 } 177 178 /* 179 * Cache the ACPI _PCT data. The _PCT data defines the interface to use 180 * when making power level transitions (i.e., system IO ports, fixed 181 * hardware port, etc). 182 */ 183 static int 184 cpu_acpi_cache_pct(cpu_acpi_handle_t handle) 185 { 186 cpu_acpi_pct_t *pct; 187 int ret; 188 189 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PCT_CACHED); 190 pct = &CPU_ACPI_PCT(handle)[0]; 191 if ((ret = cpu_acpi_cache_ctrl_regs(handle, PCT_OBJ, pct)) == 0) 192 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PCT_CACHED); 193 return (ret); 194 } 195 196 /* 197 * Cache the ACPI _PTC data. The _PTC data defines the interface to use 198 * when making T-state transitions (i.e., system IO ports, fixed 199 * hardware port, etc). 200 */ 201 static int 202 cpu_acpi_cache_ptc(cpu_acpi_handle_t handle) 203 { 204 cpu_acpi_ptc_t *ptc; 205 int ret; 206 207 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PTC_CACHED); 208 ptc = &CPU_ACPI_PTC(handle)[0]; 209 if ((ret = cpu_acpi_cache_ctrl_regs(handle, PTC_OBJ, ptc)) == 0) 210 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PTC_CACHED); 211 return (ret); 212 } 213 214 /* 215 * Cache the ACPI CPU state dependency data objects. 216 */ 217 static int 218 cpu_acpi_cache_state_dependencies(cpu_acpi_handle_t handle, 219 cpu_acpi_obj_t objtype, cpu_acpi_state_dependency_t *sd) 220 { 221 ACPI_STATUS astatus; 222 ACPI_BUFFER abuf; 223 ACPI_OBJECT *pkg, *elements; 224 int number; 225 int ret = -1; 226 227 if (objtype == CSD_OBJ) { 228 number = 6; 229 } else { 230 number = 5; 231 } 232 /* 233 * Fetch the dependencies (if present) for the CPU node. 234 * Since they are optional, non-existence is not a failure 235 * (it's up to the caller to determine how to handle non-existence). 236 */ 237 abuf.Length = ACPI_ALLOCATE_BUFFER; 238 abuf.Pointer = NULL; 239 astatus = AcpiEvaluateObjectTyped(handle->cs_handle, 240 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, ACPI_TYPE_PACKAGE); 241 if (ACPI_FAILURE(astatus)) { 242 if (astatus == AE_NOT_FOUND) { 243 DTRACE_PROBE3(cpu_acpi__eval__err, int, handle->cs_id, 244 int, objtype, int, astatus); 245 return (1); 246 } 247 cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating %s package " 248 "for CPU %d.", astatus, cpu_acpi_obj_attrs[objtype].name, 249 handle->cs_id); 250 goto out; 251 } 252 253 pkg = abuf.Pointer; 254 255 if (((objtype != CSD_OBJ) && (pkg->Package.Count != 1)) || 256 ((objtype == CSD_OBJ) && (pkg->Package.Count != 1) && 257 (pkg->Package.Count != 2))) { 258 cmn_err(CE_NOTE, "!cpu_acpi: %s unsupported package count %d " 259 "for CPU %d.", cpu_acpi_obj_attrs[objtype].name, 260 pkg->Package.Count, handle->cs_id); 261 goto out; 262 } 263 264 /* 265 * For C-state domain, we assume C2 and C3 have the same 266 * domain information 267 */ 268 if (pkg->Package.Elements[0].Type != ACPI_TYPE_PACKAGE || 269 pkg->Package.Elements[0].Package.Count != number) { 270 cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in %s package " 271 "for CPU %d.", cpu_acpi_obj_attrs[objtype].name, 272 handle->cs_id); 273 goto out; 274 } 275 elements = pkg->Package.Elements[0].Package.Elements; 276 if (elements[0].Integer.Value != number || 277 elements[1].Integer.Value != 0) { 278 cmn_err(CE_NOTE, "!cpu_acpi: Unexpected %s revision for " 279 "CPU %d.", cpu_acpi_obj_attrs[objtype].name, 280 handle->cs_id); 281 goto out; 282 } 283 284 sd->sd_entries = elements[0].Integer.Value; 285 sd->sd_revision = elements[1].Integer.Value; 286 sd->sd_domain = elements[2].Integer.Value; 287 sd->sd_type = elements[3].Integer.Value; 288 sd->sd_num = elements[4].Integer.Value; 289 if (objtype == CSD_OBJ) { 290 sd->sd_index = elements[5].Integer.Value; 291 } 292 293 ret = 0; 294 out: 295 if (abuf.Pointer != NULL) 296 AcpiOsFree(abuf.Pointer); 297 return (ret); 298 } 299 300 /* 301 * Cache the ACPI _PSD data. The _PSD data defines P-state CPU dependencies 302 * (think CPU domains). 303 */ 304 static int 305 cpu_acpi_cache_psd(cpu_acpi_handle_t handle) 306 { 307 cpu_acpi_psd_t *psd; 308 int ret; 309 310 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSD_CACHED); 311 psd = &CPU_ACPI_PSD(handle); 312 ret = cpu_acpi_cache_state_dependencies(handle, PSD_OBJ, psd); 313 if (ret == 0) 314 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSD_CACHED); 315 return (ret); 316 317 } 318 319 /* 320 * Cache the ACPI _TSD data. The _TSD data defines T-state CPU dependencies 321 * (think CPU domains). 322 */ 323 static int 324 cpu_acpi_cache_tsd(cpu_acpi_handle_t handle) 325 { 326 cpu_acpi_tsd_t *tsd; 327 int ret; 328 329 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSD_CACHED); 330 tsd = &CPU_ACPI_TSD(handle); 331 ret = cpu_acpi_cache_state_dependencies(handle, TSD_OBJ, tsd); 332 if (ret == 0) 333 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSD_CACHED); 334 return (ret); 335 336 } 337 338 /* 339 * Cache the ACPI _CSD data. The _CSD data defines C-state CPU dependencies 340 * (think CPU domains). 341 */ 342 static int 343 cpu_acpi_cache_csd(cpu_acpi_handle_t handle) 344 { 345 cpu_acpi_csd_t *csd; 346 int ret; 347 348 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_CSD_CACHED); 349 csd = &CPU_ACPI_CSD(handle); 350 ret = cpu_acpi_cache_state_dependencies(handle, CSD_OBJ, csd); 351 if (ret == 0) 352 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_CSD_CACHED); 353 return (ret); 354 355 } 356 357 static void 358 cpu_acpi_cache_pstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt) 359 { 360 cpu_acpi_pstate_t *pstate; 361 ACPI_OBJECT *q, *l; 362 int i, j; 363 364 CPU_ACPI_PSTATES_COUNT(handle) = cnt; 365 CPU_ACPI_PSTATES(handle) = kmem_zalloc(CPU_ACPI_PSTATES_SIZE(cnt), 366 KM_SLEEP); 367 pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 368 for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) { 369 uint32_t *up; 370 371 q = obj->Package.Elements[i].Package.Elements; 372 373 /* 374 * Skip duplicate entries. 375 */ 376 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 377 continue; 378 379 up = (uint32_t *)pstate; 380 for (j = 0; j < CPU_ACPI_PSS_CNT; j++) 381 up[j] = q[j].Integer.Value; 382 pstate++; 383 cnt--; 384 } 385 } 386 387 static void 388 cpu_acpi_cache_tstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt) 389 { 390 cpu_acpi_tstate_t *tstate; 391 ACPI_OBJECT *q, *l; 392 int i, j; 393 394 CPU_ACPI_TSTATES_COUNT(handle) = cnt; 395 CPU_ACPI_TSTATES(handle) = kmem_zalloc(CPU_ACPI_TSTATES_SIZE(cnt), 396 KM_SLEEP); 397 tstate = (cpu_acpi_tstate_t *)CPU_ACPI_TSTATES(handle); 398 for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) { 399 uint32_t *up; 400 401 q = obj->Package.Elements[i].Package.Elements; 402 403 /* 404 * Skip duplicate entries. 405 */ 406 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 407 continue; 408 409 up = (uint32_t *)tstate; 410 for (j = 0; j < CPU_ACPI_TSS_CNT; j++) 411 up[j] = q[j].Integer.Value; 412 tstate++; 413 cnt--; 414 } 415 } 416 417 /* 418 * Cache the _PSS or _TSS data. 419 */ 420 static int 421 cpu_acpi_cache_supported_states(cpu_acpi_handle_t handle, 422 cpu_acpi_obj_t objtype, int fcnt) 423 { 424 ACPI_STATUS astatus; 425 ACPI_BUFFER abuf; 426 ACPI_OBJECT *obj, *q, *l; 427 boolean_t eot = B_FALSE; 428 int ret = -1; 429 int cnt; 430 int i, j; 431 432 /* 433 * Fetch the state data (if present) for the CPU node. 434 */ 435 abuf.Length = ACPI_ALLOCATE_BUFFER; 436 abuf.Pointer = NULL; 437 astatus = AcpiEvaluateObjectTyped(handle->cs_handle, 438 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf, 439 ACPI_TYPE_PACKAGE); 440 if (ACPI_FAILURE(astatus)) { 441 if (astatus == AE_NOT_FOUND) { 442 DTRACE_PROBE3(cpu_acpi__eval__err, int, handle->cs_id, 443 int, objtype, int, astatus); 444 if (objtype == PSS_OBJ) 445 cmn_err(CE_NOTE, "!cpu_acpi: _PSS package " 446 "evaluation failed for with status %d for " 447 "CPU %d.", astatus, handle->cs_id); 448 return (1); 449 } 450 cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating %s package " 451 "for CPU %d.", astatus, cpu_acpi_obj_attrs[objtype].name, 452 handle->cs_id); 453 goto out; 454 } 455 obj = abuf.Pointer; 456 if (obj->Package.Count < 2) { 457 cmn_err(CE_NOTE, "!cpu_acpi: %s package bad count %d for " 458 "CPU %d.", cpu_acpi_obj_attrs[objtype].name, 459 obj->Package.Count, handle->cs_id); 460 goto out; 461 } 462 463 /* 464 * Does the package look coherent? 465 */ 466 cnt = 0; 467 for (i = 0, l = NULL; i < obj->Package.Count; i++, l = q) { 468 if (obj->Package.Elements[i].Type != ACPI_TYPE_PACKAGE || 469 obj->Package.Elements[i].Package.Count != fcnt) { 470 cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in " 471 "%s package for CPU %d.", 472 cpu_acpi_obj_attrs[objtype].name, 473 handle->cs_id); 474 goto out; 475 } 476 477 q = obj->Package.Elements[i].Package.Elements; 478 for (j = 0; j < fcnt; j++) { 479 if (q[j].Type != ACPI_TYPE_INTEGER) { 480 cmn_err(CE_NOTE, "!cpu_acpi: %s element " 481 "invalid (type) for CPU %d.", 482 cpu_acpi_obj_attrs[objtype].name, 483 handle->cs_id); 484 goto out; 485 } 486 } 487 488 /* 489 * Ignore duplicate entries. 490 */ 491 if (l != NULL && l[0].Integer.Value == q[0].Integer.Value) 492 continue; 493 494 /* 495 * Some supported state tables are larger than required 496 * and unused elements are filled with patterns 497 * of 0xff. Simply check here for frequency = 0xffff 498 * and stop counting if found. 499 */ 500 if (q[0].Integer.Value == 0xffff) { 501 eot = B_TRUE; 502 continue; 503 } 504 505 /* 506 * We should never find a valid entry after we've hit 507 * an the end-of-table entry. 508 */ 509 if (eot) { 510 cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in %s " 511 "package after eot for CPU %d.", 512 cpu_acpi_obj_attrs[objtype].name, 513 handle->cs_id); 514 goto out; 515 } 516 517 /* 518 * states must be defined in order from highest to lowest. 519 */ 520 if (l != NULL && l[0].Integer.Value < q[0].Integer.Value) { 521 cmn_err(CE_NOTE, "!cpu_acpi: %s package state " 522 "definitions out of order for CPU %d.", 523 cpu_acpi_obj_attrs[objtype].name, 524 handle->cs_id); 525 goto out; 526 } 527 528 /* 529 * This entry passes. 530 */ 531 cnt++; 532 } 533 if (cnt == 0) 534 goto out; 535 536 /* 537 * Yes, fill in the structure. 538 */ 539 ASSERT(objtype == PSS_OBJ || objtype == TSS_OBJ); 540 (objtype == PSS_OBJ) ? cpu_acpi_cache_pstate(handle, obj, cnt) : 541 cpu_acpi_cache_tstate(handle, obj, cnt); 542 543 ret = 0; 544 out: 545 if (abuf.Pointer != NULL) 546 AcpiOsFree(abuf.Pointer); 547 return (ret); 548 } 549 550 /* 551 * Cache the _PSS data. The _PSS data defines the different power levels 552 * supported by the CPU and the attributes associated with each power level 553 * (i.e., frequency, voltage, etc.). The power levels are number from 554 * highest to lowest. That is, the highest power level is _PSS entry 0 555 * and the lowest power level is the last _PSS entry. 556 */ 557 static int 558 cpu_acpi_cache_pstates(cpu_acpi_handle_t handle) 559 { 560 int ret; 561 562 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSS_CACHED); 563 ret = cpu_acpi_cache_supported_states(handle, PSS_OBJ, 564 CPU_ACPI_PSS_CNT); 565 if (ret == 0) 566 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSS_CACHED); 567 return (ret); 568 } 569 570 /* 571 * Cache the _TSS data. The _TSS data defines the different freq throttle 572 * levels supported by the CPU and the attributes associated with each 573 * throttle level (i.e., frequency throttle percentage, voltage, etc.). 574 * The throttle levels are number from highest to lowest. 575 */ 576 static int 577 cpu_acpi_cache_tstates(cpu_acpi_handle_t handle) 578 { 579 int ret; 580 581 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSS_CACHED); 582 ret = cpu_acpi_cache_supported_states(handle, TSS_OBJ, 583 CPU_ACPI_TSS_CNT); 584 if (ret == 0) 585 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSS_CACHED); 586 return (ret); 587 } 588 589 /* 590 * Cache the ACPI CPU present capabilities data objects. 591 */ 592 static int 593 cpu_acpi_cache_present_capabilities(cpu_acpi_handle_t handle, 594 cpu_acpi_obj_t objtype, cpu_acpi_present_capabilities_t *pc) 595 596 { 597 ACPI_STATUS astatus; 598 ACPI_BUFFER abuf; 599 ACPI_OBJECT *obj; 600 int ret = -1; 601 602 /* 603 * Fetch the present capabilites object (if present) for the CPU node. 604 */ 605 abuf.Length = ACPI_ALLOCATE_BUFFER; 606 abuf.Pointer = NULL; 607 astatus = AcpiEvaluateObject(handle->cs_handle, 608 cpu_acpi_obj_attrs[objtype].name, NULL, &abuf); 609 if (ACPI_FAILURE(astatus) && astatus != AE_NOT_FOUND) { 610 cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating %s " 611 "package for CPU %d.", astatus, 612 cpu_acpi_obj_attrs[objtype].name, handle->cs_id); 613 goto out; 614 } 615 if (astatus == AE_NOT_FOUND || abuf.Length == 0) { 616 *pc = 0; 617 return (1); 618 } 619 620 obj = (ACPI_OBJECT *)abuf.Pointer; 621 *pc = obj->Integer.Value; 622 623 ret = 0; 624 out: 625 if (abuf.Pointer != NULL) 626 AcpiOsFree(abuf.Pointer); 627 return (ret); 628 } 629 630 /* 631 * Cache the _PPC data. The _PPC simply contains an integer value which 632 * represents the highest power level that a CPU should transition to. 633 * That is, it's an index into the array of _PSS entries and will be 634 * greater than or equal to zero. 635 */ 636 void 637 cpu_acpi_cache_ppc(cpu_acpi_handle_t handle) 638 { 639 cpu_acpi_ppc_t *ppc; 640 int ret; 641 642 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PPC_CACHED); 643 ppc = &CPU_ACPI_PPC(handle); 644 ret = cpu_acpi_cache_present_capabilities(handle, PPC_OBJ, ppc); 645 if (ret == 0) 646 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PPC_CACHED); 647 } 648 649 /* 650 * Cache the _TPC data. The _TPC simply contains an integer value which 651 * represents the throttle level that a CPU should transition to. 652 * That is, it's an index into the array of _TSS entries and will be 653 * greater than or equal to zero. 654 */ 655 void 656 cpu_acpi_cache_tpc(cpu_acpi_handle_t handle) 657 { 658 cpu_acpi_tpc_t *tpc; 659 int ret; 660 661 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TPC_CACHED); 662 tpc = &CPU_ACPI_TPC(handle); 663 ret = cpu_acpi_cache_present_capabilities(handle, TPC_OBJ, tpc); 664 if (ret == 0) 665 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TPC_CACHED); 666 } 667 668 int 669 cpu_acpi_verify_cstate(cpu_acpi_cstate_t *cstate) 670 { 671 uint32_t addrspaceid = cstate->cs_addrspace_id; 672 673 if ((addrspaceid != ACPI_ADR_SPACE_FIXED_HARDWARE) && 674 (addrspaceid != ACPI_ADR_SPACE_SYSTEM_IO)) { 675 cmn_err(CE_NOTE, "!cpu_acpi: _CST unsupported address space id" 676 ":C%d, type: %d\n", cstate->cs_type, addrspaceid); 677 return (1); 678 } 679 return (0); 680 } 681 682 int 683 cpu_acpi_cache_cst(cpu_acpi_handle_t handle) 684 { 685 ACPI_STATUS astatus; 686 ACPI_BUFFER abuf; 687 ACPI_OBJECT *obj; 688 ACPI_INTEGER cnt, old_cnt; 689 cpu_acpi_cstate_t *cstate, *p; 690 size_t alloc_size; 691 int i, count; 692 int ret = 1; 693 694 CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_CST_CACHED); 695 696 abuf.Length = ACPI_ALLOCATE_BUFFER; 697 abuf.Pointer = NULL; 698 699 /* 700 * Fetch the C-state data (if present) for the CPU node. 701 */ 702 astatus = AcpiEvaluateObjectTyped(handle->cs_handle, "_CST", 703 NULL, &abuf, ACPI_TYPE_PACKAGE); 704 if (ACPI_FAILURE(astatus)) { 705 if (astatus == AE_NOT_FOUND) { 706 DTRACE_PROBE3(cpu_acpi__eval__err, int, handle->cs_id, 707 int, CST_OBJ, int, astatus); 708 return (1); 709 } 710 cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating _CST package " 711 "for CPU %d.", astatus, handle->cs_id); 712 goto out; 713 714 } 715 obj = (ACPI_OBJECT *)abuf.Pointer; 716 if (obj->Package.Count < 2) { 717 cmn_err(CE_NOTE, "!cpu_acpi: _CST unsupported package " 718 "count %d for CPU %d.", obj->Package.Count, handle->cs_id); 719 goto out; 720 } 721 722 /* 723 * Does the package look coherent? 724 */ 725 cnt = obj->Package.Elements[0].Integer.Value; 726 if (cnt < 1 || cnt != obj->Package.Count - 1) { 727 cmn_err(CE_NOTE, "!cpu_acpi: _CST invalid element " 728 "count %d != Package count %d for CPU %d", 729 (int)cnt, (int)obj->Package.Count - 1, handle->cs_id); 730 goto out; 731 } 732 733 /* 734 * Reuse the old buffer if the number of C states is the same. 735 */ 736 if (CPU_ACPI_CSTATES(handle) && 737 (old_cnt = CPU_ACPI_CSTATES_COUNT(handle)) != cnt) { 738 kmem_free(CPU_ACPI_CSTATES(handle), 739 CPU_ACPI_CSTATES_SIZE(old_cnt)); 740 CPU_ACPI_CSTATES(handle) = NULL; 741 } 742 743 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)cnt; 744 alloc_size = CPU_ACPI_CSTATES_SIZE(cnt); 745 if (CPU_ACPI_CSTATES(handle) == NULL) 746 CPU_ACPI_CSTATES(handle) = kmem_zalloc(alloc_size, KM_SLEEP); 747 cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 748 p = cstate; 749 750 for (i = 1, count = 1; i <= cnt; i++) { 751 ACPI_OBJECT *pkg; 752 AML_RESOURCE_GENERIC_REGISTER *reg; 753 ACPI_OBJECT *element; 754 755 pkg = &(obj->Package.Elements[i]); 756 reg = (AML_RESOURCE_GENERIC_REGISTER *) 757 pkg->Package.Elements[0].Buffer.Pointer; 758 cstate->cs_addrspace_id = reg->AddressSpaceId; 759 cstate->cs_address = reg->Address; 760 element = &(pkg->Package.Elements[1]); 761 cstate->cs_type = element->Integer.Value; 762 element = &(pkg->Package.Elements[2]); 763 cstate->cs_latency = element->Integer.Value; 764 element = &(pkg->Package.Elements[3]); 765 cstate->cs_power = element->Integer.Value; 766 767 if (cpu_acpi_verify_cstate(cstate)) { 768 /* 769 * ignore this entry if it's not valid 770 */ 771 continue; 772 } 773 if (cstate == p) { 774 cstate++; 775 } else if (p->cs_type == cstate->cs_type) { 776 /* 777 * if there are duplicate entries, we keep the 778 * last one. This fixes: 779 * 1) some buggy BIOS have total duplicate entries. 780 * 2) ACPI Spec allows the same cstate entry with 781 * different power and latency, we use the one 782 * with more power saving. 783 */ 784 (void) memcpy(p, cstate, sizeof (cpu_acpi_cstate_t)); 785 } else { 786 /* 787 * we got a valid entry, cache it to the 788 * cstate structure 789 */ 790 p = cstate++; 791 count++; 792 } 793 } 794 795 if (count < 2) { 796 cmn_err(CE_NOTE, "!cpu_acpi: _CST invalid count %d < 2 for " 797 "CPU %d", count, handle->cs_id); 798 kmem_free(CPU_ACPI_CSTATES(handle), alloc_size); 799 CPU_ACPI_CSTATES(handle) = NULL; 800 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)0; 801 goto out; 802 } 803 cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 804 if (cstate[0].cs_type != CPU_ACPI_C1) { 805 cmn_err(CE_NOTE, "!cpu_acpi: _CST first element type not " 806 "C1: %d for CPU %d", (int)cstate->cs_type, handle->cs_id); 807 kmem_free(CPU_ACPI_CSTATES(handle), alloc_size); 808 CPU_ACPI_CSTATES(handle) = NULL; 809 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)0; 810 goto out; 811 } 812 813 if (count != cnt) { 814 void *orig = CPU_ACPI_CSTATES(handle); 815 816 CPU_ACPI_CSTATES_COUNT(handle) = (uint32_t)count; 817 CPU_ACPI_CSTATES(handle) = kmem_zalloc( 818 CPU_ACPI_CSTATES_SIZE(count), KM_SLEEP); 819 (void) memcpy(CPU_ACPI_CSTATES(handle), orig, 820 CPU_ACPI_CSTATES_SIZE(count)); 821 kmem_free(orig, alloc_size); 822 } 823 824 CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_CST_CACHED); 825 826 ret = 0; 827 828 out: 829 if (abuf.Pointer != NULL) 830 AcpiOsFree(abuf.Pointer); 831 return (ret); 832 } 833 834 /* 835 * Cache the _PCT, _PSS, _PSD and _PPC data. 836 */ 837 int 838 cpu_acpi_cache_pstate_data(cpu_acpi_handle_t handle) 839 { 840 if (cpu_acpi_cache_pct(handle) < 0) { 841 DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id, 842 int, PCT_OBJ); 843 cmn_err(CE_NOTE, "!cpu_acpi: error parsing _PCT for " 844 "CPU %d", handle->cs_id); 845 return (-1); 846 } 847 848 if (cpu_acpi_cache_pstates(handle) != 0) { 849 DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id, 850 int, PSS_OBJ); 851 cmn_err(CE_NOTE, "!cpu_acpi: error parsing _PSS for " 852 "CPU %d", handle->cs_id); 853 return (-1); 854 } 855 856 if (cpu_acpi_cache_psd(handle) < 0) { 857 DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id, 858 int, PSD_OBJ); 859 cmn_err(CE_NOTE, "!cpu_acpi: error parsing _PSD for " 860 "CPU %d", handle->cs_id); 861 return (-1); 862 } 863 864 cpu_acpi_cache_ppc(handle); 865 866 return (0); 867 } 868 869 void 870 cpu_acpi_free_pstate_data(cpu_acpi_handle_t handle) 871 { 872 if (handle != NULL) { 873 if (CPU_ACPI_PSTATES(handle)) { 874 kmem_free(CPU_ACPI_PSTATES(handle), 875 CPU_ACPI_PSTATES_SIZE( 876 CPU_ACPI_PSTATES_COUNT(handle))); 877 CPU_ACPI_PSTATES(handle) = NULL; 878 } 879 } 880 } 881 882 /* 883 * Cache the _PTC, _TSS, _TSD and _TPC data. 884 */ 885 int 886 cpu_acpi_cache_tstate_data(cpu_acpi_handle_t handle) 887 { 888 int ret; 889 890 if (cpu_acpi_cache_ptc(handle) < 0) { 891 DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id, 892 int, PTC_OBJ); 893 return (-1); 894 } 895 896 if ((ret = cpu_acpi_cache_tstates(handle)) != 0) { 897 DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id, 898 int, TSS_OBJ); 899 return (ret); 900 } 901 902 if (cpu_acpi_cache_tsd(handle) < 0) { 903 DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id, 904 int, TSD_OBJ); 905 return (-1); 906 } 907 908 cpu_acpi_cache_tpc(handle); 909 910 return (0); 911 } 912 913 void 914 cpu_acpi_free_tstate_data(cpu_acpi_handle_t handle) 915 { 916 if (handle != NULL) { 917 if (CPU_ACPI_TSTATES(handle)) { 918 kmem_free(CPU_ACPI_TSTATES(handle), 919 CPU_ACPI_TSTATES_SIZE( 920 CPU_ACPI_TSTATES_COUNT(handle))); 921 CPU_ACPI_TSTATES(handle) = NULL; 922 } 923 } 924 } 925 926 /* 927 * Cache the _CST data. 928 */ 929 int 930 cpu_acpi_cache_cstate_data(cpu_acpi_handle_t handle) 931 { 932 int ret; 933 934 if ((ret = cpu_acpi_cache_cst(handle)) != 0) { 935 DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id, 936 int, CST_OBJ); 937 return (ret); 938 } 939 940 if (cpu_acpi_cache_csd(handle) < 0) { 941 DTRACE_PROBE2(cpu_acpi__cache__err, int, handle->cs_id, 942 int, CSD_OBJ); 943 return (-1); 944 } 945 946 return (0); 947 } 948 949 void 950 cpu_acpi_free_cstate_data(cpu_acpi_handle_t handle) 951 { 952 if (handle != NULL) { 953 if (CPU_ACPI_CSTATES(handle)) { 954 kmem_free(CPU_ACPI_CSTATES(handle), 955 CPU_ACPI_CSTATES_SIZE( 956 CPU_ACPI_CSTATES_COUNT(handle))); 957 CPU_ACPI_CSTATES(handle) = NULL; 958 } 959 } 960 } 961 962 /* 963 * Register a handler for processor change notifications. 964 */ 965 void 966 cpu_acpi_install_notify_handler(cpu_acpi_handle_t handle, 967 ACPI_NOTIFY_HANDLER handler, void *ctx) 968 { 969 if (ACPI_FAILURE(AcpiInstallNotifyHandler(handle->cs_handle, 970 ACPI_DEVICE_NOTIFY, handler, ctx))) 971 cmn_err(CE_NOTE, "!cpu_acpi: Unable to register " 972 "notify handler for CPU %d.", handle->cs_id); 973 } 974 975 /* 976 * Remove a handler for processor change notifications. 977 */ 978 void 979 cpu_acpi_remove_notify_handler(cpu_acpi_handle_t handle, 980 ACPI_NOTIFY_HANDLER handler) 981 { 982 if (ACPI_FAILURE(AcpiRemoveNotifyHandler(handle->cs_handle, 983 ACPI_DEVICE_NOTIFY, handler))) 984 cmn_err(CE_NOTE, "!cpu_acpi: Unable to remove " 985 "notify handler for CPU %d.", handle->cs_id); 986 } 987 988 /* 989 * Write _PDC. 990 */ 991 int 992 cpu_acpi_write_pdc(cpu_acpi_handle_t handle, uint32_t revision, uint32_t count, 993 uint32_t *capabilities) 994 { 995 ACPI_STATUS astatus; 996 ACPI_OBJECT obj; 997 ACPI_OBJECT_LIST list = { 1, &obj}; 998 uint32_t *buffer; 999 uint32_t *bufptr; 1000 uint32_t bufsize; 1001 int i; 1002 int ret = 0; 1003 1004 bufsize = (count + 2) * sizeof (uint32_t); 1005 buffer = kmem_zalloc(bufsize, KM_SLEEP); 1006 buffer[0] = revision; 1007 buffer[1] = count; 1008 bufptr = &buffer[2]; 1009 for (i = 0; i < count; i++) 1010 *bufptr++ = *capabilities++; 1011 1012 obj.Type = ACPI_TYPE_BUFFER; 1013 obj.Buffer.Length = bufsize; 1014 obj.Buffer.Pointer = (void *)buffer; 1015 1016 /* 1017 * Fetch the ??? (if present) for the CPU node. 1018 */ 1019 astatus = AcpiEvaluateObject(handle->cs_handle, "_PDC", &list, NULL); 1020 if (ACPI_FAILURE(astatus)) { 1021 if (astatus == AE_NOT_FOUND) { 1022 DTRACE_PROBE3(cpu_acpi__eval__err, int, handle->cs_id, 1023 int, PDC_OBJ, int, astatus); 1024 ret = 1; 1025 } else { 1026 cmn_err(CE_NOTE, "!cpu_acpi: error %d evaluating _PDC " 1027 "package for CPU %d.", astatus, handle->cs_id); 1028 ret = -1; 1029 } 1030 } 1031 1032 kmem_free(buffer, bufsize); 1033 return (ret); 1034 } 1035 1036 /* 1037 * Write to system IO port. 1038 */ 1039 int 1040 cpu_acpi_write_port(ACPI_IO_ADDRESS address, uint32_t value, uint32_t width) 1041 { 1042 if (ACPI_FAILURE(AcpiOsWritePort(address, value, width))) { 1043 cmn_err(CE_NOTE, "!cpu_acpi: error writing system IO port " 1044 "%lx.", (long)address); 1045 return (-1); 1046 } 1047 return (0); 1048 } 1049 1050 /* 1051 * Read from a system IO port. 1052 */ 1053 int 1054 cpu_acpi_read_port(ACPI_IO_ADDRESS address, uint32_t *value, uint32_t width) 1055 { 1056 if (ACPI_FAILURE(AcpiOsReadPort(address, value, width))) { 1057 cmn_err(CE_NOTE, "!cpu_acpi: error reading system IO port " 1058 "%lx.", (long)address); 1059 return (-1); 1060 } 1061 return (0); 1062 } 1063 1064 /* 1065 * Return supported frequencies. 1066 */ 1067 uint_t 1068 cpu_acpi_get_speeds(cpu_acpi_handle_t handle, int **speeds) 1069 { 1070 cpu_acpi_pstate_t *pstate; 1071 int *hspeeds; 1072 uint_t nspeeds; 1073 int i; 1074 1075 nspeeds = CPU_ACPI_PSTATES_COUNT(handle); 1076 pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle); 1077 hspeeds = kmem_zalloc(nspeeds * sizeof (int), KM_SLEEP); 1078 for (i = 0; i < nspeeds; i++) { 1079 hspeeds[i] = CPU_ACPI_FREQ(pstate); 1080 pstate++; 1081 } 1082 *speeds = hspeeds; 1083 return (nspeeds); 1084 } 1085 1086 /* 1087 * Free resources allocated by cpu_acpi_get_speeds(). 1088 */ 1089 void 1090 cpu_acpi_free_speeds(int *speeds, uint_t nspeeds) 1091 { 1092 kmem_free(speeds, nspeeds * sizeof (int)); 1093 } 1094 1095 uint_t 1096 cpu_acpi_get_max_cstates(cpu_acpi_handle_t handle) 1097 { 1098 if (CPU_ACPI_CSTATES(handle)) 1099 return (CPU_ACPI_CSTATES_COUNT(handle)); 1100 else 1101 return (1); 1102 } 1103 1104 void 1105 cpu_acpi_set_register(uint32_t bitreg, uint32_t value) 1106 { 1107 (void) AcpiWriteBitRegister(bitreg, value); 1108 } 1109 1110 void 1111 cpu_acpi_get_register(uint32_t bitreg, uint32_t *value) 1112 { 1113 (void) AcpiReadBitRegister(bitreg, value); 1114 } 1115 1116 /* 1117 * Map the dip to an ACPI handle for the device. 1118 */ 1119 cpu_acpi_handle_t 1120 cpu_acpi_init(cpu_t *cp) 1121 { 1122 cpu_acpi_handle_t handle; 1123 1124 handle = kmem_zalloc(sizeof (cpu_acpi_state_t), KM_SLEEP); 1125 1126 if (ACPI_FAILURE(acpica_get_handle_cpu(cp->cpu_id, 1127 &handle->cs_handle))) { 1128 kmem_free(handle, sizeof (cpu_acpi_state_t)); 1129 return (NULL); 1130 } 1131 handle->cs_id = cp->cpu_id; 1132 return (handle); 1133 } 1134 1135 /* 1136 * Free any resources. 1137 */ 1138 void 1139 cpu_acpi_fini(cpu_acpi_handle_t handle) 1140 { 1141 if (handle) 1142 kmem_free(handle, sizeof (cpu_acpi_state_t)); 1143 } 1144