1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers. 4 * 5 * (C) Copyright 2014, 2015 Linaro Ltd. 6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> 7 * 8 * CPPC describes a few methods for controlling CPU performance using 9 * information from a per CPU table called CPC. This table is described in 10 * the ACPI v5.0+ specification. The table consists of a list of 11 * registers which may be memory mapped or hardware registers and also may 12 * include some static integer values. 13 * 14 * CPU performance is on an abstract continuous scale as against a discretized 15 * P-state scale which is tied to CPU frequency only. In brief, the basic 16 * operation involves: 17 * 18 * - OS makes a CPU performance request. (Can provide min and max bounds) 19 * 20 * - Platform (such as BMC) is free to optimize request within requested bounds 21 * depending on power/thermal budgets etc. 22 * 23 * - Platform conveys its decision back to OS 24 * 25 * The communication between OS and platform occurs through another medium 26 * called (PCC) Platform Communication Channel. This is a generic mailbox like 27 * mechanism which includes doorbell semantics to indicate register updates. 28 * See drivers/mailbox/pcc.c for details on PCC. 29 * 30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and 31 * above specifications. 32 */ 33 34 #define pr_fmt(fmt) "ACPI CPPC: " fmt 35 36 #include <linux/delay.h> 37 #include <linux/iopoll.h> 38 #include <linux/ktime.h> 39 #include <linux/rwsem.h> 40 #include <linux/wait.h> 41 #include <linux/topology.h> 42 #include <linux/dmi.h> 43 #include <linux/units.h> 44 #include <asm/unaligned.h> 45 46 #include <acpi/cppc_acpi.h> 47 48 struct cppc_pcc_data { 49 struct pcc_mbox_chan *pcc_channel; 50 void __iomem *pcc_comm_addr; 51 bool pcc_channel_acquired; 52 unsigned int deadline_us; 53 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; 54 55 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ 56 bool platform_owns_pcc; /* Ownership of PCC subspace */ 57 unsigned int pcc_write_cnt; /* Running count of PCC write commands */ 58 59 /* 60 * Lock to provide controlled access to the PCC channel. 61 * 62 * For performance critical usecases(currently cppc_set_perf) 63 * We need to take read_lock and check if channel belongs to OSPM 64 * before reading or writing to PCC subspace 65 * We need to take write_lock before transferring the channel 66 * ownership to the platform via a Doorbell 67 * This allows us to batch a number of CPPC requests if they happen 68 * to originate in about the same time 69 * 70 * For non-performance critical usecases(init) 71 * Take write_lock for all purposes which gives exclusive access 72 */ 73 struct rw_semaphore pcc_lock; 74 75 /* Wait queue for CPUs whose requests were batched */ 76 wait_queue_head_t pcc_write_wait_q; 77 ktime_t last_cmd_cmpl_time; 78 ktime_t last_mpar_reset; 79 int mpar_count; 80 int refcount; 81 }; 82 83 /* Array to represent the PCC channel per subspace ID */ 84 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES]; 85 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */ 86 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx); 87 88 /* 89 * The cpc_desc structure contains the ACPI register details 90 * as described in the per CPU _CPC tables. The details 91 * include the type of register (e.g. PCC, System IO, FFH etc.) 92 * and destination addresses which lets us READ/WRITE CPU performance 93 * information using the appropriate I/O methods. 94 */ 95 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); 96 97 /* pcc mapped address + header size + offset within PCC subspace */ 98 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \ 99 0x8 + (offs)) 100 101 /* Check if a CPC register is in PCC */ 102 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ 103 (cpc)->cpc_entry.reg.space_id == \ 104 ACPI_ADR_SPACE_PLATFORM_COMM) 105 106 /* Check if a CPC register is in SystemMemory */ 107 #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ 108 (cpc)->cpc_entry.reg.space_id == \ 109 ACPI_ADR_SPACE_SYSTEM_MEMORY) 110 111 /* Check if a CPC register is in SystemIo */ 112 #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ 113 (cpc)->cpc_entry.reg.space_id == \ 114 ACPI_ADR_SPACE_SYSTEM_IO) 115 116 /* Evaluates to True if reg is a NULL register descriptor */ 117 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ 118 (reg)->address == 0 && \ 119 (reg)->bit_width == 0 && \ 120 (reg)->bit_offset == 0 && \ 121 (reg)->access_width == 0) 122 123 /* Evaluates to True if an optional cpc field is supported */ 124 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \ 125 !!(cpc)->cpc_entry.int_value : \ 126 !IS_NULL_REG(&(cpc)->cpc_entry.reg)) 127 /* 128 * Arbitrary Retries in case the remote processor is slow to respond 129 * to PCC commands. Keeping it high enough to cover emulators where 130 * the processors run painfully slow. 131 */ 132 #define NUM_RETRIES 500ULL 133 134 #define OVER_16BTS_MASK ~0xFFFFULL 135 136 #define define_one_cppc_ro(_name) \ 137 static struct kobj_attribute _name = \ 138 __ATTR(_name, 0444, show_##_name, NULL) 139 140 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) 141 142 #define show_cppc_data(access_fn, struct_name, member_name) \ 143 static ssize_t show_##member_name(struct kobject *kobj, \ 144 struct kobj_attribute *attr, char *buf) \ 145 { \ 146 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \ 147 struct struct_name st_name = {0}; \ 148 int ret; \ 149 \ 150 ret = access_fn(cpc_ptr->cpu_id, &st_name); \ 151 if (ret) \ 152 return ret; \ 153 \ 154 return sysfs_emit(buf, "%llu\n", \ 155 (u64)st_name.member_name); \ 156 } \ 157 define_one_cppc_ro(member_name) 158 159 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf); 160 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf); 161 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf); 162 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf); 163 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq); 164 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq); 165 166 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); 167 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); 168 169 /* Check for valid access_width, otherwise, fallback to using bit_width */ 170 #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width) 171 172 /* Shift and apply the mask for CPC reads/writes */ 173 #define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \ 174 GENMASK(((reg)->bit_width) - 1, 0)) 175 176 static ssize_t show_feedback_ctrs(struct kobject *kobj, 177 struct kobj_attribute *attr, char *buf) 178 { 179 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); 180 struct cppc_perf_fb_ctrs fb_ctrs = {0}; 181 int ret; 182 183 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); 184 if (ret) 185 return ret; 186 187 return sysfs_emit(buf, "ref:%llu del:%llu\n", 188 fb_ctrs.reference, fb_ctrs.delivered); 189 } 190 define_one_cppc_ro(feedback_ctrs); 191 192 static struct attribute *cppc_attrs[] = { 193 &feedback_ctrs.attr, 194 &reference_perf.attr, 195 &wraparound_time.attr, 196 &highest_perf.attr, 197 &lowest_perf.attr, 198 &lowest_nonlinear_perf.attr, 199 &nominal_perf.attr, 200 &nominal_freq.attr, 201 &lowest_freq.attr, 202 NULL 203 }; 204 ATTRIBUTE_GROUPS(cppc); 205 206 static const struct kobj_type cppc_ktype = { 207 .sysfs_ops = &kobj_sysfs_ops, 208 .default_groups = cppc_groups, 209 }; 210 211 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit) 212 { 213 int ret, status; 214 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 215 struct acpi_pcct_shared_memory __iomem *generic_comm_base = 216 pcc_ss_data->pcc_comm_addr; 217 218 if (!pcc_ss_data->platform_owns_pcc) 219 return 0; 220 221 /* 222 * Poll PCC status register every 3us(delay_us) for maximum of 223 * deadline_us(timeout_us) until PCC command complete bit is set(cond) 224 */ 225 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status, 226 status & PCC_CMD_COMPLETE_MASK, 3, 227 pcc_ss_data->deadline_us); 228 229 if (likely(!ret)) { 230 pcc_ss_data->platform_owns_pcc = false; 231 if (chk_err_bit && (status & PCC_ERROR_MASK)) 232 ret = -EIO; 233 } 234 235 if (unlikely(ret)) 236 pr_err("PCC check channel failed for ss: %d. ret=%d\n", 237 pcc_ss_id, ret); 238 239 return ret; 240 } 241 242 /* 243 * This function transfers the ownership of the PCC to the platform 244 * So it must be called while holding write_lock(pcc_lock) 245 */ 246 static int send_pcc_cmd(int pcc_ss_id, u16 cmd) 247 { 248 int ret = -EIO, i; 249 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 250 struct acpi_pcct_shared_memory __iomem *generic_comm_base = 251 pcc_ss_data->pcc_comm_addr; 252 unsigned int time_delta; 253 254 /* 255 * For CMD_WRITE we know for a fact the caller should have checked 256 * the channel before writing to PCC space 257 */ 258 if (cmd == CMD_READ) { 259 /* 260 * If there are pending cpc_writes, then we stole the channel 261 * before write completion, so first send a WRITE command to 262 * platform 263 */ 264 if (pcc_ss_data->pending_pcc_write_cmd) 265 send_pcc_cmd(pcc_ss_id, CMD_WRITE); 266 267 ret = check_pcc_chan(pcc_ss_id, false); 268 if (ret) 269 goto end; 270 } else /* CMD_WRITE */ 271 pcc_ss_data->pending_pcc_write_cmd = FALSE; 272 273 /* 274 * Handle the Minimum Request Turnaround Time(MRTT) 275 * "The minimum amount of time that OSPM must wait after the completion 276 * of a command before issuing the next command, in microseconds" 277 */ 278 if (pcc_ss_data->pcc_mrtt) { 279 time_delta = ktime_us_delta(ktime_get(), 280 pcc_ss_data->last_cmd_cmpl_time); 281 if (pcc_ss_data->pcc_mrtt > time_delta) 282 udelay(pcc_ss_data->pcc_mrtt - time_delta); 283 } 284 285 /* 286 * Handle the non-zero Maximum Periodic Access Rate(MPAR) 287 * "The maximum number of periodic requests that the subspace channel can 288 * support, reported in commands per minute. 0 indicates no limitation." 289 * 290 * This parameter should be ideally zero or large enough so that it can 291 * handle maximum number of requests that all the cores in the system can 292 * collectively generate. If it is not, we will follow the spec and just 293 * not send the request to the platform after hitting the MPAR limit in 294 * any 60s window 295 */ 296 if (pcc_ss_data->pcc_mpar) { 297 if (pcc_ss_data->mpar_count == 0) { 298 time_delta = ktime_ms_delta(ktime_get(), 299 pcc_ss_data->last_mpar_reset); 300 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) { 301 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit", 302 pcc_ss_id); 303 ret = -EIO; 304 goto end; 305 } 306 pcc_ss_data->last_mpar_reset = ktime_get(); 307 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar; 308 } 309 pcc_ss_data->mpar_count--; 310 } 311 312 /* Write to the shared comm region. */ 313 writew_relaxed(cmd, &generic_comm_base->command); 314 315 /* Flip CMD COMPLETE bit */ 316 writew_relaxed(0, &generic_comm_base->status); 317 318 pcc_ss_data->platform_owns_pcc = true; 319 320 /* Ring doorbell */ 321 ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd); 322 if (ret < 0) { 323 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n", 324 pcc_ss_id, cmd, ret); 325 goto end; 326 } 327 328 /* wait for completion and check for PCC error bit */ 329 ret = check_pcc_chan(pcc_ss_id, true); 330 331 if (pcc_ss_data->pcc_mrtt) 332 pcc_ss_data->last_cmd_cmpl_time = ktime_get(); 333 334 if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq) 335 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret); 336 else 337 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret); 338 339 end: 340 if (cmd == CMD_WRITE) { 341 if (unlikely(ret)) { 342 for_each_possible_cpu(i) { 343 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); 344 345 if (!desc) 346 continue; 347 348 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt) 349 desc->write_cmd_status = ret; 350 } 351 } 352 pcc_ss_data->pcc_write_cnt++; 353 wake_up_all(&pcc_ss_data->pcc_write_wait_q); 354 } 355 356 return ret; 357 } 358 359 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret) 360 { 361 if (ret < 0) 362 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n", 363 *(u16 *)msg, ret); 364 else 365 pr_debug("TX completed. CMD sent:%x, ret:%d\n", 366 *(u16 *)msg, ret); 367 } 368 369 static struct mbox_client cppc_mbox_cl = { 370 .tx_done = cppc_chan_tx_done, 371 .knows_txdone = true, 372 }; 373 374 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) 375 { 376 int result = -EFAULT; 377 acpi_status status = AE_OK; 378 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 379 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; 380 struct acpi_buffer state = {0, NULL}; 381 union acpi_object *psd = NULL; 382 struct acpi_psd_package *pdomain; 383 384 status = acpi_evaluate_object_typed(handle, "_PSD", NULL, 385 &buffer, ACPI_TYPE_PACKAGE); 386 if (status == AE_NOT_FOUND) /* _PSD is optional */ 387 return 0; 388 if (ACPI_FAILURE(status)) 389 return -ENODEV; 390 391 psd = buffer.pointer; 392 if (!psd || psd->package.count != 1) { 393 pr_debug("Invalid _PSD data\n"); 394 goto end; 395 } 396 397 pdomain = &(cpc_ptr->domain_info); 398 399 state.length = sizeof(struct acpi_psd_package); 400 state.pointer = pdomain; 401 402 status = acpi_extract_package(&(psd->package.elements[0]), 403 &format, &state); 404 if (ACPI_FAILURE(status)) { 405 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id); 406 goto end; 407 } 408 409 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 410 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id); 411 goto end; 412 } 413 414 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 415 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id); 416 goto end; 417 } 418 419 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 420 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 421 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 422 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id); 423 goto end; 424 } 425 426 result = 0; 427 end: 428 kfree(buffer.pointer); 429 return result; 430 } 431 432 bool acpi_cpc_valid(void) 433 { 434 struct cpc_desc *cpc_ptr; 435 int cpu; 436 437 if (acpi_disabled) 438 return false; 439 440 for_each_present_cpu(cpu) { 441 cpc_ptr = per_cpu(cpc_desc_ptr, cpu); 442 if (!cpc_ptr) 443 return false; 444 } 445 446 return true; 447 } 448 EXPORT_SYMBOL_GPL(acpi_cpc_valid); 449 450 bool cppc_allow_fast_switch(void) 451 { 452 struct cpc_register_resource *desired_reg; 453 struct cpc_desc *cpc_ptr; 454 int cpu; 455 456 for_each_possible_cpu(cpu) { 457 cpc_ptr = per_cpu(cpc_desc_ptr, cpu); 458 desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF]; 459 if (!CPC_IN_SYSTEM_MEMORY(desired_reg) && 460 !CPC_IN_SYSTEM_IO(desired_reg)) 461 return false; 462 } 463 464 return true; 465 } 466 EXPORT_SYMBOL_GPL(cppc_allow_fast_switch); 467 468 /** 469 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu 470 * @cpu: Find all CPUs that share a domain with cpu. 471 * @cpu_data: Pointer to CPU specific CPPC data including PSD info. 472 * 473 * Return: 0 for success or negative value for err. 474 */ 475 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data) 476 { 477 struct cpc_desc *cpc_ptr, *match_cpc_ptr; 478 struct acpi_psd_package *match_pdomain; 479 struct acpi_psd_package *pdomain; 480 int count_target, i; 481 482 /* 483 * Now that we have _PSD data from all CPUs, let's setup P-state 484 * domain info. 485 */ 486 cpc_ptr = per_cpu(cpc_desc_ptr, cpu); 487 if (!cpc_ptr) 488 return -EFAULT; 489 490 pdomain = &(cpc_ptr->domain_info); 491 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map); 492 if (pdomain->num_processors <= 1) 493 return 0; 494 495 /* Validate the Domain info */ 496 count_target = pdomain->num_processors; 497 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 498 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL; 499 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 500 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW; 501 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 502 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY; 503 504 for_each_possible_cpu(i) { 505 if (i == cpu) 506 continue; 507 508 match_cpc_ptr = per_cpu(cpc_desc_ptr, i); 509 if (!match_cpc_ptr) 510 goto err_fault; 511 512 match_pdomain = &(match_cpc_ptr->domain_info); 513 if (match_pdomain->domain != pdomain->domain) 514 continue; 515 516 /* Here i and cpu are in the same domain */ 517 if (match_pdomain->num_processors != count_target) 518 goto err_fault; 519 520 if (pdomain->coord_type != match_pdomain->coord_type) 521 goto err_fault; 522 523 cpumask_set_cpu(i, cpu_data->shared_cpu_map); 524 } 525 526 return 0; 527 528 err_fault: 529 /* Assume no coordination on any error parsing domain info */ 530 cpumask_clear(cpu_data->shared_cpu_map); 531 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map); 532 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE; 533 534 return -EFAULT; 535 } 536 EXPORT_SYMBOL_GPL(acpi_get_psd_map); 537 538 static int register_pcc_channel(int pcc_ss_idx) 539 { 540 struct pcc_mbox_chan *pcc_chan; 541 u64 usecs_lat; 542 543 if (pcc_ss_idx >= 0) { 544 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx); 545 546 if (IS_ERR(pcc_chan)) { 547 pr_err("Failed to find PCC channel for subspace %d\n", 548 pcc_ss_idx); 549 return -ENODEV; 550 } 551 552 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan; 553 /* 554 * cppc_ss->latency is just a Nominal value. In reality 555 * the remote processor could be much slower to reply. 556 * So add an arbitrary amount of wait on top of Nominal. 557 */ 558 usecs_lat = NUM_RETRIES * pcc_chan->latency; 559 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat; 560 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time; 561 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate; 562 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency; 563 564 pcc_data[pcc_ss_idx]->pcc_comm_addr = 565 acpi_os_ioremap(pcc_chan->shmem_base_addr, 566 pcc_chan->shmem_size); 567 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) { 568 pr_err("Failed to ioremap PCC comm region mem for %d\n", 569 pcc_ss_idx); 570 return -ENOMEM; 571 } 572 573 /* Set flag so that we don't come here for each CPU. */ 574 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; 575 } 576 577 return 0; 578 } 579 580 /** 581 * cpc_ffh_supported() - check if FFH reading supported 582 * 583 * Check if the architecture has support for functional fixed hardware 584 * read/write capability. 585 * 586 * Return: true for supported, false for not supported 587 */ 588 bool __weak cpc_ffh_supported(void) 589 { 590 return false; 591 } 592 593 /** 594 * cpc_supported_by_cpu() - check if CPPC is supported by CPU 595 * 596 * Check if the architectural support for CPPC is present even 597 * if the _OSC hasn't prescribed it 598 * 599 * Return: true for supported, false for not supported 600 */ 601 bool __weak cpc_supported_by_cpu(void) 602 { 603 return false; 604 } 605 606 /** 607 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace 608 * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package. 609 * 610 * Check and allocate the cppc_pcc_data memory. 611 * In some processor configurations it is possible that same subspace 612 * is shared between multiple CPUs. This is seen especially in CPUs 613 * with hardware multi-threading support. 614 * 615 * Return: 0 for success, errno for failure 616 */ 617 static int pcc_data_alloc(int pcc_ss_id) 618 { 619 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES) 620 return -EINVAL; 621 622 if (pcc_data[pcc_ss_id]) { 623 pcc_data[pcc_ss_id]->refcount++; 624 } else { 625 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data), 626 GFP_KERNEL); 627 if (!pcc_data[pcc_ss_id]) 628 return -ENOMEM; 629 pcc_data[pcc_ss_id]->refcount++; 630 } 631 632 return 0; 633 } 634 635 /* 636 * An example CPC table looks like the following. 637 * 638 * Name (_CPC, Package() { 639 * 17, // NumEntries 640 * 1, // Revision 641 * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance 642 * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance 643 * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance 644 * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance 645 * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register 646 * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register 647 * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)}, 648 * ... 649 * ... 650 * ... 651 * } 652 * Each Register() encodes how to access that specific register. 653 * e.g. a sample PCC entry has the following encoding: 654 * 655 * Register ( 656 * PCC, // AddressSpaceKeyword 657 * 8, // RegisterBitWidth 658 * 8, // RegisterBitOffset 659 * 0x30, // RegisterAddress 660 * 9, // AccessSize (subspace ID) 661 * ) 662 */ 663 664 #ifndef arch_init_invariance_cppc 665 static inline void arch_init_invariance_cppc(void) { } 666 #endif 667 668 /** 669 * acpi_cppc_processor_probe - Search for per CPU _CPC objects. 670 * @pr: Ptr to acpi_processor containing this CPU's logical ID. 671 * 672 * Return: 0 for success or negative value for err. 673 */ 674 int acpi_cppc_processor_probe(struct acpi_processor *pr) 675 { 676 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 677 union acpi_object *out_obj, *cpc_obj; 678 struct cpc_desc *cpc_ptr; 679 struct cpc_reg *gas_t; 680 struct device *cpu_dev; 681 acpi_handle handle = pr->handle; 682 unsigned int num_ent, i, cpc_rev; 683 int pcc_subspace_id = -1; 684 acpi_status status; 685 int ret = -ENODATA; 686 687 if (!osc_sb_cppc2_support_acked) { 688 pr_debug("CPPC v2 _OSC not acked\n"); 689 if (!cpc_supported_by_cpu()) { 690 pr_debug("CPPC is not supported by the CPU\n"); 691 return -ENODEV; 692 } 693 } 694 695 /* Parse the ACPI _CPC table for this CPU. */ 696 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, 697 ACPI_TYPE_PACKAGE); 698 if (ACPI_FAILURE(status)) { 699 ret = -ENODEV; 700 goto out_buf_free; 701 } 702 703 out_obj = (union acpi_object *) output.pointer; 704 705 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL); 706 if (!cpc_ptr) { 707 ret = -ENOMEM; 708 goto out_buf_free; 709 } 710 711 /* First entry is NumEntries. */ 712 cpc_obj = &out_obj->package.elements[0]; 713 if (cpc_obj->type == ACPI_TYPE_INTEGER) { 714 num_ent = cpc_obj->integer.value; 715 if (num_ent <= 1) { 716 pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n", 717 num_ent, pr->id); 718 goto out_free; 719 } 720 } else { 721 pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n", 722 cpc_obj->type, pr->id); 723 goto out_free; 724 } 725 726 /* Second entry should be revision. */ 727 cpc_obj = &out_obj->package.elements[1]; 728 if (cpc_obj->type == ACPI_TYPE_INTEGER) { 729 cpc_rev = cpc_obj->integer.value; 730 } else { 731 pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n", 732 cpc_obj->type, pr->id); 733 goto out_free; 734 } 735 736 if (cpc_rev < CPPC_V2_REV) { 737 pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev, 738 pr->id); 739 goto out_free; 740 } 741 742 /* 743 * Disregard _CPC if the number of entries in the return pachage is not 744 * as expected, but support future revisions being proper supersets of 745 * the v3 and only causing more entries to be returned by _CPC. 746 */ 747 if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) || 748 (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) || 749 (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) { 750 pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n", 751 num_ent, pr->id); 752 goto out_free; 753 } 754 if (cpc_rev > CPPC_V3_REV) { 755 num_ent = CPPC_V3_NUM_ENT; 756 cpc_rev = CPPC_V3_REV; 757 } 758 759 cpc_ptr->num_entries = num_ent; 760 cpc_ptr->version = cpc_rev; 761 762 /* Iterate through remaining entries in _CPC */ 763 for (i = 2; i < num_ent; i++) { 764 cpc_obj = &out_obj->package.elements[i]; 765 766 if (cpc_obj->type == ACPI_TYPE_INTEGER) { 767 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER; 768 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value; 769 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) { 770 gas_t = (struct cpc_reg *) 771 cpc_obj->buffer.pointer; 772 773 /* 774 * The PCC Subspace index is encoded inside 775 * the CPC table entries. The same PCC index 776 * will be used for all the PCC entries, 777 * so extract it only once. 778 */ 779 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { 780 if (pcc_subspace_id < 0) { 781 pcc_subspace_id = gas_t->access_width; 782 if (pcc_data_alloc(pcc_subspace_id)) 783 goto out_free; 784 } else if (pcc_subspace_id != gas_t->access_width) { 785 pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n", 786 pr->id); 787 goto out_free; 788 } 789 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 790 if (gas_t->address) { 791 void __iomem *addr; 792 size_t access_width; 793 794 if (!osc_cpc_flexible_adr_space_confirmed) { 795 pr_debug("Flexible address space capability not supported\n"); 796 if (!cpc_supported_by_cpu()) 797 goto out_free; 798 } 799 800 access_width = GET_BIT_WIDTH(gas_t) / 8; 801 addr = ioremap(gas_t->address, access_width); 802 if (!addr) 803 goto out_free; 804 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; 805 } 806 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 807 if (gas_t->access_width < 1 || gas_t->access_width > 3) { 808 /* 809 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit. 810 * SystemIO doesn't implement 64-bit 811 * registers. 812 */ 813 pr_debug("Invalid access width %d for SystemIO register in _CPC\n", 814 gas_t->access_width); 815 goto out_free; 816 } 817 if (gas_t->address & OVER_16BTS_MASK) { 818 /* SystemIO registers use 16-bit integer addresses */ 819 pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n", 820 gas_t->address); 821 goto out_free; 822 } 823 if (!osc_cpc_flexible_adr_space_confirmed) { 824 pr_debug("Flexible address space capability not supported\n"); 825 if (!cpc_supported_by_cpu()) 826 goto out_free; 827 } 828 } else { 829 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { 830 /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */ 831 pr_debug("Unsupported register type (%d) in _CPC\n", 832 gas_t->space_id); 833 goto out_free; 834 } 835 } 836 837 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; 838 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t)); 839 } else { 840 pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n", 841 i, pr->id); 842 goto out_free; 843 } 844 } 845 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id; 846 847 /* 848 * Initialize the remaining cpc_regs as unsupported. 849 * Example: In case FW exposes CPPC v2, the below loop will initialize 850 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported 851 */ 852 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) { 853 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER; 854 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0; 855 } 856 857 858 /* Store CPU Logical ID */ 859 cpc_ptr->cpu_id = pr->id; 860 861 /* Parse PSD data for this CPU */ 862 ret = acpi_get_psd(cpc_ptr, handle); 863 if (ret) 864 goto out_free; 865 866 /* Register PCC channel once for all PCC subspace ID. */ 867 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) { 868 ret = register_pcc_channel(pcc_subspace_id); 869 if (ret) 870 goto out_free; 871 872 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock); 873 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q); 874 } 875 876 /* Everything looks okay */ 877 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); 878 879 /* Add per logical CPU nodes for reading its feedback counters. */ 880 cpu_dev = get_cpu_device(pr->id); 881 if (!cpu_dev) { 882 ret = -EINVAL; 883 goto out_free; 884 } 885 886 /* Plug PSD data into this CPU's CPC descriptor. */ 887 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; 888 889 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, 890 "acpi_cppc"); 891 if (ret) { 892 per_cpu(cpc_desc_ptr, pr->id) = NULL; 893 kobject_put(&cpc_ptr->kobj); 894 goto out_free; 895 } 896 897 arch_init_invariance_cppc(); 898 899 kfree(output.pointer); 900 return 0; 901 902 out_free: 903 /* Free all the mapped sys mem areas for this CPU */ 904 for (i = 2; i < cpc_ptr->num_entries; i++) { 905 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; 906 907 if (addr) 908 iounmap(addr); 909 } 910 kfree(cpc_ptr); 911 912 out_buf_free: 913 kfree(output.pointer); 914 return ret; 915 } 916 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); 917 918 /** 919 * acpi_cppc_processor_exit - Cleanup CPC structs. 920 * @pr: Ptr to acpi_processor containing this CPU's logical ID. 921 * 922 * Return: Void 923 */ 924 void acpi_cppc_processor_exit(struct acpi_processor *pr) 925 { 926 struct cpc_desc *cpc_ptr; 927 unsigned int i; 928 void __iomem *addr; 929 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id); 930 931 if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) { 932 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) { 933 pcc_data[pcc_ss_id]->refcount--; 934 if (!pcc_data[pcc_ss_id]->refcount) { 935 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel); 936 kfree(pcc_data[pcc_ss_id]); 937 pcc_data[pcc_ss_id] = NULL; 938 } 939 } 940 } 941 942 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); 943 if (!cpc_ptr) 944 return; 945 946 /* Free all the mapped sys mem areas for this CPU */ 947 for (i = 2; i < cpc_ptr->num_entries; i++) { 948 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; 949 if (addr) 950 iounmap(addr); 951 } 952 953 kobject_put(&cpc_ptr->kobj); 954 kfree(cpc_ptr); 955 } 956 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); 957 958 /** 959 * cpc_read_ffh() - Read FFH register 960 * @cpunum: CPU number to read 961 * @reg: cppc register information 962 * @val: place holder for return value 963 * 964 * Read bit_width bits from a specified address and bit_offset 965 * 966 * Return: 0 for success and error code 967 */ 968 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) 969 { 970 return -ENOTSUPP; 971 } 972 973 /** 974 * cpc_write_ffh() - Write FFH register 975 * @cpunum: CPU number to write 976 * @reg: cppc register information 977 * @val: value to write 978 * 979 * Write value of bit_width bits to a specified address and bit_offset 980 * 981 * Return: 0 for success and error code 982 */ 983 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) 984 { 985 return -ENOTSUPP; 986 } 987 988 /* 989 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be 990 * as fast as possible. We have already mapped the PCC subspace during init, so 991 * we can directly write to it. 992 */ 993 994 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) 995 { 996 void __iomem *vaddr = NULL; 997 int size; 998 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 999 struct cpc_reg *reg = ®_res->cpc_entry.reg; 1000 1001 if (reg_res->type == ACPI_TYPE_INTEGER) { 1002 *val = reg_res->cpc_entry.int_value; 1003 return 0; 1004 } 1005 1006 *val = 0; 1007 size = GET_BIT_WIDTH(reg); 1008 1009 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 1010 u32 val_u32; 1011 acpi_status status; 1012 1013 status = acpi_os_read_port((acpi_io_address)reg->address, 1014 &val_u32, size); 1015 if (ACPI_FAILURE(status)) { 1016 pr_debug("Error: Failed to read SystemIO port %llx\n", 1017 reg->address); 1018 return -EFAULT; 1019 } 1020 1021 *val = val_u32; 1022 return 0; 1023 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) { 1024 /* 1025 * For registers in PCC space, the register size is determined 1026 * by the bit width field; the access size is used to indicate 1027 * the PCC subspace id. 1028 */ 1029 size = reg->bit_width; 1030 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); 1031 } 1032 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 1033 vaddr = reg_res->sys_mem_vaddr; 1034 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) 1035 return cpc_read_ffh(cpu, reg, val); 1036 else 1037 return acpi_os_read_memory((acpi_physical_address)reg->address, 1038 val, size); 1039 1040 switch (size) { 1041 case 8: 1042 *val = readb_relaxed(vaddr); 1043 break; 1044 case 16: 1045 *val = readw_relaxed(vaddr); 1046 break; 1047 case 32: 1048 *val = readl_relaxed(vaddr); 1049 break; 1050 case 64: 1051 *val = readq_relaxed(vaddr); 1052 break; 1053 default: 1054 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 1055 pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n", 1056 size, reg->address); 1057 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { 1058 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n", 1059 size, pcc_ss_id); 1060 } 1061 return -EFAULT; 1062 } 1063 1064 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 1065 *val = MASK_VAL(reg, *val); 1066 1067 return 0; 1068 } 1069 1070 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) 1071 { 1072 int ret_val = 0; 1073 int size; 1074 void __iomem *vaddr = NULL; 1075 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 1076 struct cpc_reg *reg = ®_res->cpc_entry.reg; 1077 1078 size = GET_BIT_WIDTH(reg); 1079 1080 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 1081 acpi_status status; 1082 1083 status = acpi_os_write_port((acpi_io_address)reg->address, 1084 (u32)val, size); 1085 if (ACPI_FAILURE(status)) { 1086 pr_debug("Error: Failed to write SystemIO port %llx\n", 1087 reg->address); 1088 return -EFAULT; 1089 } 1090 1091 return 0; 1092 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) { 1093 /* 1094 * For registers in PCC space, the register size is determined 1095 * by the bit width field; the access size is used to indicate 1096 * the PCC subspace id. 1097 */ 1098 size = reg->bit_width; 1099 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); 1100 } 1101 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 1102 vaddr = reg_res->sys_mem_vaddr; 1103 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) 1104 return cpc_write_ffh(cpu, reg, val); 1105 else 1106 return acpi_os_write_memory((acpi_physical_address)reg->address, 1107 val, size); 1108 1109 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 1110 val = MASK_VAL(reg, val); 1111 1112 switch (size) { 1113 case 8: 1114 writeb_relaxed(val, vaddr); 1115 break; 1116 case 16: 1117 writew_relaxed(val, vaddr); 1118 break; 1119 case 32: 1120 writel_relaxed(val, vaddr); 1121 break; 1122 case 64: 1123 writeq_relaxed(val, vaddr); 1124 break; 1125 default: 1126 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 1127 pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n", 1128 size, reg->address); 1129 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { 1130 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n", 1131 size, pcc_ss_id); 1132 } 1133 ret_val = -EFAULT; 1134 break; 1135 } 1136 1137 return ret_val; 1138 } 1139 1140 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf) 1141 { 1142 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 1143 struct cpc_register_resource *reg; 1144 1145 if (!cpc_desc) { 1146 pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 1147 return -ENODEV; 1148 } 1149 1150 reg = &cpc_desc->cpc_regs[reg_idx]; 1151 1152 if (CPC_IN_PCC(reg)) { 1153 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 1154 struct cppc_pcc_data *pcc_ss_data = NULL; 1155 int ret = 0; 1156 1157 if (pcc_ss_id < 0) 1158 return -EIO; 1159 1160 pcc_ss_data = pcc_data[pcc_ss_id]; 1161 1162 down_write(&pcc_ss_data->pcc_lock); 1163 1164 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) 1165 cpc_read(cpunum, reg, perf); 1166 else 1167 ret = -EIO; 1168 1169 up_write(&pcc_ss_data->pcc_lock); 1170 1171 return ret; 1172 } 1173 1174 cpc_read(cpunum, reg, perf); 1175 1176 return 0; 1177 } 1178 1179 /** 1180 * cppc_get_desired_perf - Get the desired performance register value. 1181 * @cpunum: CPU from which to get desired performance. 1182 * @desired_perf: Return address. 1183 * 1184 * Return: 0 for success, -EIO otherwise. 1185 */ 1186 int cppc_get_desired_perf(int cpunum, u64 *desired_perf) 1187 { 1188 return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf); 1189 } 1190 EXPORT_SYMBOL_GPL(cppc_get_desired_perf); 1191 1192 /** 1193 * cppc_get_nominal_perf - Get the nominal performance register value. 1194 * @cpunum: CPU from which to get nominal performance. 1195 * @nominal_perf: Return address. 1196 * 1197 * Return: 0 for success, -EIO otherwise. 1198 */ 1199 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf) 1200 { 1201 return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf); 1202 } 1203 1204 /** 1205 * cppc_get_highest_perf - Get the highest performance register value. 1206 * @cpunum: CPU from which to get highest performance. 1207 * @highest_perf: Return address. 1208 * 1209 * Return: 0 for success, -EIO otherwise. 1210 */ 1211 int cppc_get_highest_perf(int cpunum, u64 *highest_perf) 1212 { 1213 return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf); 1214 } 1215 EXPORT_SYMBOL_GPL(cppc_get_highest_perf); 1216 1217 /** 1218 * cppc_get_epp_perf - Get the epp register value. 1219 * @cpunum: CPU from which to get epp preference value. 1220 * @epp_perf: Return address. 1221 * 1222 * Return: 0 for success, -EIO otherwise. 1223 */ 1224 int cppc_get_epp_perf(int cpunum, u64 *epp_perf) 1225 { 1226 return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf); 1227 } 1228 EXPORT_SYMBOL_GPL(cppc_get_epp_perf); 1229 1230 /** 1231 * cppc_get_perf_caps - Get a CPU's performance capabilities. 1232 * @cpunum: CPU from which to get capabilities info. 1233 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h 1234 * 1235 * Return: 0 for success with perf_caps populated else -ERRNO. 1236 */ 1237 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) 1238 { 1239 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 1240 struct cpc_register_resource *highest_reg, *lowest_reg, 1241 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg, 1242 *low_freq_reg = NULL, *nom_freq_reg = NULL; 1243 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0; 1244 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 1245 struct cppc_pcc_data *pcc_ss_data = NULL; 1246 int ret = 0, regs_in_pcc = 0; 1247 1248 if (!cpc_desc) { 1249 pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 1250 return -ENODEV; 1251 } 1252 1253 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF]; 1254 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF]; 1255 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF]; 1256 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; 1257 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ]; 1258 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ]; 1259 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF]; 1260 1261 /* Are any of the regs PCC ?*/ 1262 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || 1263 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) || 1264 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) { 1265 if (pcc_ss_id < 0) { 1266 pr_debug("Invalid pcc_ss_id\n"); 1267 return -ENODEV; 1268 } 1269 pcc_ss_data = pcc_data[pcc_ss_id]; 1270 regs_in_pcc = 1; 1271 down_write(&pcc_ss_data->pcc_lock); 1272 /* Ring doorbell once to update PCC subspace */ 1273 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { 1274 ret = -EIO; 1275 goto out_err; 1276 } 1277 } 1278 1279 cpc_read(cpunum, highest_reg, &high); 1280 perf_caps->highest_perf = high; 1281 1282 cpc_read(cpunum, lowest_reg, &low); 1283 perf_caps->lowest_perf = low; 1284 1285 cpc_read(cpunum, nominal_reg, &nom); 1286 perf_caps->nominal_perf = nom; 1287 1288 if (guaranteed_reg->type != ACPI_TYPE_BUFFER || 1289 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { 1290 perf_caps->guaranteed_perf = 0; 1291 } else { 1292 cpc_read(cpunum, guaranteed_reg, &guaranteed); 1293 perf_caps->guaranteed_perf = guaranteed; 1294 } 1295 1296 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); 1297 perf_caps->lowest_nonlinear_perf = min_nonlinear; 1298 1299 if (!high || !low || !nom || !min_nonlinear) 1300 ret = -EFAULT; 1301 1302 /* Read optional lowest and nominal frequencies if present */ 1303 if (CPC_SUPPORTED(low_freq_reg)) 1304 cpc_read(cpunum, low_freq_reg, &low_f); 1305 1306 if (CPC_SUPPORTED(nom_freq_reg)) 1307 cpc_read(cpunum, nom_freq_reg, &nom_f); 1308 1309 perf_caps->lowest_freq = low_f; 1310 perf_caps->nominal_freq = nom_f; 1311 1312 1313 out_err: 1314 if (regs_in_pcc) 1315 up_write(&pcc_ss_data->pcc_lock); 1316 return ret; 1317 } 1318 EXPORT_SYMBOL_GPL(cppc_get_perf_caps); 1319 1320 /** 1321 * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region. 1322 * 1323 * CPPC has flexibility about how CPU performance counters are accessed. 1324 * One of the choices is PCC regions, which can have a high access latency. This 1325 * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time. 1326 * 1327 * Return: true if any of the counters are in PCC regions, false otherwise 1328 */ 1329 bool cppc_perf_ctrs_in_pcc(void) 1330 { 1331 int cpu; 1332 1333 for_each_present_cpu(cpu) { 1334 struct cpc_register_resource *ref_perf_reg; 1335 struct cpc_desc *cpc_desc; 1336 1337 cpc_desc = per_cpu(cpc_desc_ptr, cpu); 1338 1339 if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) || 1340 CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) || 1341 CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME])) 1342 return true; 1343 1344 1345 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; 1346 1347 /* 1348 * If reference perf register is not supported then we should 1349 * use the nominal perf value 1350 */ 1351 if (!CPC_SUPPORTED(ref_perf_reg)) 1352 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; 1353 1354 if (CPC_IN_PCC(ref_perf_reg)) 1355 return true; 1356 } 1357 1358 return false; 1359 } 1360 EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc); 1361 1362 /** 1363 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters. 1364 * @cpunum: CPU from which to read counters. 1365 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h 1366 * 1367 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO. 1368 */ 1369 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) 1370 { 1371 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 1372 struct cpc_register_resource *delivered_reg, *reference_reg, 1373 *ref_perf_reg, *ctr_wrap_reg; 1374 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 1375 struct cppc_pcc_data *pcc_ss_data = NULL; 1376 u64 delivered, reference, ref_perf, ctr_wrap_time; 1377 int ret = 0, regs_in_pcc = 0; 1378 1379 if (!cpc_desc) { 1380 pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 1381 return -ENODEV; 1382 } 1383 1384 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; 1385 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; 1386 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; 1387 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; 1388 1389 /* 1390 * If reference perf register is not supported then we should 1391 * use the nominal perf value 1392 */ 1393 if (!CPC_SUPPORTED(ref_perf_reg)) 1394 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; 1395 1396 /* Are any of the regs PCC ?*/ 1397 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || 1398 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { 1399 if (pcc_ss_id < 0) { 1400 pr_debug("Invalid pcc_ss_id\n"); 1401 return -ENODEV; 1402 } 1403 pcc_ss_data = pcc_data[pcc_ss_id]; 1404 down_write(&pcc_ss_data->pcc_lock); 1405 regs_in_pcc = 1; 1406 /* Ring doorbell once to update PCC subspace */ 1407 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { 1408 ret = -EIO; 1409 goto out_err; 1410 } 1411 } 1412 1413 cpc_read(cpunum, delivered_reg, &delivered); 1414 cpc_read(cpunum, reference_reg, &reference); 1415 cpc_read(cpunum, ref_perf_reg, &ref_perf); 1416 1417 /* 1418 * Per spec, if ctr_wrap_time optional register is unsupported, then the 1419 * performance counters are assumed to never wrap during the lifetime of 1420 * platform 1421 */ 1422 ctr_wrap_time = (u64)(~((u64)0)); 1423 if (CPC_SUPPORTED(ctr_wrap_reg)) 1424 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time); 1425 1426 if (!delivered || !reference || !ref_perf) { 1427 ret = -EFAULT; 1428 goto out_err; 1429 } 1430 1431 perf_fb_ctrs->delivered = delivered; 1432 perf_fb_ctrs->reference = reference; 1433 perf_fb_ctrs->reference_perf = ref_perf; 1434 perf_fb_ctrs->wraparound_time = ctr_wrap_time; 1435 out_err: 1436 if (regs_in_pcc) 1437 up_write(&pcc_ss_data->pcc_lock); 1438 return ret; 1439 } 1440 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); 1441 1442 /* 1443 * Set Energy Performance Preference Register value through 1444 * Performance Controls Interface 1445 */ 1446 int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable) 1447 { 1448 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 1449 struct cpc_register_resource *epp_set_reg; 1450 struct cpc_register_resource *auto_sel_reg; 1451 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); 1452 struct cppc_pcc_data *pcc_ss_data = NULL; 1453 int ret; 1454 1455 if (!cpc_desc) { 1456 pr_debug("No CPC descriptor for CPU:%d\n", cpu); 1457 return -ENODEV; 1458 } 1459 1460 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; 1461 epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF]; 1462 1463 if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) { 1464 if (pcc_ss_id < 0) { 1465 pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu); 1466 return -ENODEV; 1467 } 1468 1469 if (CPC_SUPPORTED(auto_sel_reg)) { 1470 ret = cpc_write(cpu, auto_sel_reg, enable); 1471 if (ret) 1472 return ret; 1473 } 1474 1475 if (CPC_SUPPORTED(epp_set_reg)) { 1476 ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf); 1477 if (ret) 1478 return ret; 1479 } 1480 1481 pcc_ss_data = pcc_data[pcc_ss_id]; 1482 1483 down_write(&pcc_ss_data->pcc_lock); 1484 /* after writing CPC, transfer the ownership of PCC to platform */ 1485 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); 1486 up_write(&pcc_ss_data->pcc_lock); 1487 } else { 1488 ret = -ENOTSUPP; 1489 pr_debug("_CPC in PCC is not supported\n"); 1490 } 1491 1492 return ret; 1493 } 1494 EXPORT_SYMBOL_GPL(cppc_set_epp_perf); 1495 1496 /** 1497 * cppc_get_auto_sel_caps - Read autonomous selection register. 1498 * @cpunum : CPU from which to read register. 1499 * @perf_caps : struct where autonomous selection register value is updated. 1500 */ 1501 int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps) 1502 { 1503 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 1504 struct cpc_register_resource *auto_sel_reg; 1505 u64 auto_sel; 1506 1507 if (!cpc_desc) { 1508 pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 1509 return -ENODEV; 1510 } 1511 1512 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; 1513 1514 if (!CPC_SUPPORTED(auto_sel_reg)) 1515 pr_warn_once("Autonomous mode is not unsupported!\n"); 1516 1517 if (CPC_IN_PCC(auto_sel_reg)) { 1518 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 1519 struct cppc_pcc_data *pcc_ss_data = NULL; 1520 int ret = 0; 1521 1522 if (pcc_ss_id < 0) 1523 return -ENODEV; 1524 1525 pcc_ss_data = pcc_data[pcc_ss_id]; 1526 1527 down_write(&pcc_ss_data->pcc_lock); 1528 1529 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) { 1530 cpc_read(cpunum, auto_sel_reg, &auto_sel); 1531 perf_caps->auto_sel = (bool)auto_sel; 1532 } else { 1533 ret = -EIO; 1534 } 1535 1536 up_write(&pcc_ss_data->pcc_lock); 1537 1538 return ret; 1539 } 1540 1541 return 0; 1542 } 1543 EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps); 1544 1545 /** 1546 * cppc_set_auto_sel - Write autonomous selection register. 1547 * @cpu : CPU to which to write register. 1548 * @enable : the desired value of autonomous selection resiter to be updated. 1549 */ 1550 int cppc_set_auto_sel(int cpu, bool enable) 1551 { 1552 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 1553 struct cpc_register_resource *auto_sel_reg; 1554 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); 1555 struct cppc_pcc_data *pcc_ss_data = NULL; 1556 int ret = -EINVAL; 1557 1558 if (!cpc_desc) { 1559 pr_debug("No CPC descriptor for CPU:%d\n", cpu); 1560 return -ENODEV; 1561 } 1562 1563 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE]; 1564 1565 if (CPC_IN_PCC(auto_sel_reg)) { 1566 if (pcc_ss_id < 0) { 1567 pr_debug("Invalid pcc_ss_id\n"); 1568 return -ENODEV; 1569 } 1570 1571 if (CPC_SUPPORTED(auto_sel_reg)) { 1572 ret = cpc_write(cpu, auto_sel_reg, enable); 1573 if (ret) 1574 return ret; 1575 } 1576 1577 pcc_ss_data = pcc_data[pcc_ss_id]; 1578 1579 down_write(&pcc_ss_data->pcc_lock); 1580 /* after writing CPC, transfer the ownership of PCC to platform */ 1581 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); 1582 up_write(&pcc_ss_data->pcc_lock); 1583 } else { 1584 ret = -ENOTSUPP; 1585 pr_debug("_CPC in PCC is not supported\n"); 1586 } 1587 1588 return ret; 1589 } 1590 EXPORT_SYMBOL_GPL(cppc_set_auto_sel); 1591 1592 /** 1593 * cppc_set_enable - Set to enable CPPC on the processor by writing the 1594 * Continuous Performance Control package EnableRegister field. 1595 * @cpu: CPU for which to enable CPPC register. 1596 * @enable: 0 - disable, 1 - enable CPPC feature on the processor. 1597 * 1598 * Return: 0 for success, -ERRNO or -EIO otherwise. 1599 */ 1600 int cppc_set_enable(int cpu, bool enable) 1601 { 1602 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 1603 struct cpc_register_resource *enable_reg; 1604 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); 1605 struct cppc_pcc_data *pcc_ss_data = NULL; 1606 int ret = -EINVAL; 1607 1608 if (!cpc_desc) { 1609 pr_debug("No CPC descriptor for CPU:%d\n", cpu); 1610 return -EINVAL; 1611 } 1612 1613 enable_reg = &cpc_desc->cpc_regs[ENABLE]; 1614 1615 if (CPC_IN_PCC(enable_reg)) { 1616 1617 if (pcc_ss_id < 0) 1618 return -EIO; 1619 1620 ret = cpc_write(cpu, enable_reg, enable); 1621 if (ret) 1622 return ret; 1623 1624 pcc_ss_data = pcc_data[pcc_ss_id]; 1625 1626 down_write(&pcc_ss_data->pcc_lock); 1627 /* after writing CPC, transfer the ownership of PCC to platfrom */ 1628 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); 1629 up_write(&pcc_ss_data->pcc_lock); 1630 return ret; 1631 } 1632 1633 return cpc_write(cpu, enable_reg, enable); 1634 } 1635 EXPORT_SYMBOL_GPL(cppc_set_enable); 1636 1637 /** 1638 * cppc_set_perf - Set a CPU's performance controls. 1639 * @cpu: CPU for which to set performance controls. 1640 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h 1641 * 1642 * Return: 0 for success, -ERRNO otherwise. 1643 */ 1644 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) 1645 { 1646 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); 1647 struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg; 1648 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 1649 struct cppc_pcc_data *pcc_ss_data = NULL; 1650 int ret = 0; 1651 1652 if (!cpc_desc) { 1653 pr_debug("No CPC descriptor for CPU:%d\n", cpu); 1654 return -ENODEV; 1655 } 1656 1657 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; 1658 min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF]; 1659 max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF]; 1660 1661 /* 1662 * This is Phase-I where we want to write to CPC registers 1663 * -> We want all CPUs to be able to execute this phase in parallel 1664 * 1665 * Since read_lock can be acquired by multiple CPUs simultaneously we 1666 * achieve that goal here 1667 */ 1668 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { 1669 if (pcc_ss_id < 0) { 1670 pr_debug("Invalid pcc_ss_id\n"); 1671 return -ENODEV; 1672 } 1673 pcc_ss_data = pcc_data[pcc_ss_id]; 1674 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */ 1675 if (pcc_ss_data->platform_owns_pcc) { 1676 ret = check_pcc_chan(pcc_ss_id, false); 1677 if (ret) { 1678 up_read(&pcc_ss_data->pcc_lock); 1679 return ret; 1680 } 1681 } 1682 /* 1683 * Update the pending_write to make sure a PCC CMD_READ will not 1684 * arrive and steal the channel during the switch to write lock 1685 */ 1686 pcc_ss_data->pending_pcc_write_cmd = true; 1687 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt; 1688 cpc_desc->write_cmd_status = 0; 1689 } 1690 1691 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); 1692 1693 /* 1694 * Only write if min_perf and max_perf not zero. Some drivers pass zero 1695 * value to min and max perf, but they don't mean to set the zero value, 1696 * they just don't want to write to those registers. 1697 */ 1698 if (perf_ctrls->min_perf) 1699 cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf); 1700 if (perf_ctrls->max_perf) 1701 cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf); 1702 1703 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) 1704 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */ 1705 /* 1706 * This is Phase-II where we transfer the ownership of PCC to Platform 1707 * 1708 * Short Summary: Basically if we think of a group of cppc_set_perf 1709 * requests that happened in short overlapping interval. The last CPU to 1710 * come out of Phase-I will enter Phase-II and ring the doorbell. 1711 * 1712 * We have the following requirements for Phase-II: 1713 * 1. We want to execute Phase-II only when there are no CPUs 1714 * currently executing in Phase-I 1715 * 2. Once we start Phase-II we want to avoid all other CPUs from 1716 * entering Phase-I. 1717 * 3. We want only one CPU among all those who went through Phase-I 1718 * to run phase-II 1719 * 1720 * If write_trylock fails to get the lock and doesn't transfer the 1721 * PCC ownership to the platform, then one of the following will be TRUE 1722 * 1. There is at-least one CPU in Phase-I which will later execute 1723 * write_trylock, so the CPUs in Phase-I will be responsible for 1724 * executing the Phase-II. 1725 * 2. Some other CPU has beaten this CPU to successfully execute the 1726 * write_trylock and has already acquired the write_lock. We know for a 1727 * fact it (other CPU acquiring the write_lock) couldn't have happened 1728 * before this CPU's Phase-I as we held the read_lock. 1729 * 3. Some other CPU executing pcc CMD_READ has stolen the 1730 * down_write, in which case, send_pcc_cmd will check for pending 1731 * CMD_WRITE commands by checking the pending_pcc_write_cmd. 1732 * So this CPU can be certain that its request will be delivered 1733 * So in all cases, this CPU knows that its request will be delivered 1734 * by another CPU and can return 1735 * 1736 * After getting the down_write we still need to check for 1737 * pending_pcc_write_cmd to take care of the following scenario 1738 * The thread running this code could be scheduled out between 1739 * Phase-I and Phase-II. Before it is scheduled back on, another CPU 1740 * could have delivered the request to Platform by triggering the 1741 * doorbell and transferred the ownership of PCC to platform. So this 1742 * avoids triggering an unnecessary doorbell and more importantly before 1743 * triggering the doorbell it makes sure that the PCC channel ownership 1744 * is still with OSPM. 1745 * pending_pcc_write_cmd can also be cleared by a different CPU, if 1746 * there was a pcc CMD_READ waiting on down_write and it steals the lock 1747 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this 1748 * case during a CMD_READ and if there are pending writes it delivers 1749 * the write command before servicing the read command 1750 */ 1751 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) { 1752 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */ 1753 /* Update only if there are pending write commands */ 1754 if (pcc_ss_data->pending_pcc_write_cmd) 1755 send_pcc_cmd(pcc_ss_id, CMD_WRITE); 1756 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */ 1757 } else 1758 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ 1759 wait_event(pcc_ss_data->pcc_write_wait_q, 1760 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt); 1761 1762 /* send_pcc_cmd updates the status in case of failure */ 1763 ret = cpc_desc->write_cmd_status; 1764 } 1765 return ret; 1766 } 1767 EXPORT_SYMBOL_GPL(cppc_set_perf); 1768 1769 /** 1770 * cppc_get_transition_latency - returns frequency transition latency in ns 1771 * @cpu_num: CPU number for per_cpu(). 1772 * 1773 * ACPI CPPC does not explicitly specify how a platform can specify the 1774 * transition latency for performance change requests. The closest we have 1775 * is the timing information from the PCCT tables which provides the info 1776 * on the number and frequency of PCC commands the platform can handle. 1777 * 1778 * If desired_reg is in the SystemMemory or SystemIo ACPI address space, 1779 * then assume there is no latency. 1780 */ 1781 unsigned int cppc_get_transition_latency(int cpu_num) 1782 { 1783 /* 1784 * Expected transition latency is based on the PCCT timing values 1785 * Below are definition from ACPI spec: 1786 * pcc_nominal- Expected latency to process a command, in microseconds 1787 * pcc_mpar - The maximum number of periodic requests that the subspace 1788 * channel can support, reported in commands per minute. 0 1789 * indicates no limitation. 1790 * pcc_mrtt - The minimum amount of time that OSPM must wait after the 1791 * completion of a command before issuing the next command, 1792 * in microseconds. 1793 */ 1794 unsigned int latency_ns = 0; 1795 struct cpc_desc *cpc_desc; 1796 struct cpc_register_resource *desired_reg; 1797 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num); 1798 struct cppc_pcc_data *pcc_ss_data; 1799 1800 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); 1801 if (!cpc_desc) 1802 return CPUFREQ_ETERNAL; 1803 1804 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; 1805 if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg)) 1806 return 0; 1807 else if (!CPC_IN_PCC(desired_reg)) 1808 return CPUFREQ_ETERNAL; 1809 1810 if (pcc_ss_id < 0) 1811 return CPUFREQ_ETERNAL; 1812 1813 pcc_ss_data = pcc_data[pcc_ss_id]; 1814 if (pcc_ss_data->pcc_mpar) 1815 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar); 1816 1817 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000); 1818 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000); 1819 1820 return latency_ns; 1821 } 1822 EXPORT_SYMBOL_GPL(cppc_get_transition_latency); 1823 1824 /* Minimum struct length needed for the DMI processor entry we want */ 1825 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 1826 1827 /* Offset in the DMI processor structure for the max frequency */ 1828 #define DMI_PROCESSOR_MAX_SPEED 0x14 1829 1830 /* Callback function used to retrieve the max frequency from DMI */ 1831 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) 1832 { 1833 const u8 *dmi_data = (const u8 *)dm; 1834 u16 *mhz = (u16 *)private; 1835 1836 if (dm->type == DMI_ENTRY_PROCESSOR && 1837 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { 1838 u16 val = (u16)get_unaligned((const u16 *) 1839 (dmi_data + DMI_PROCESSOR_MAX_SPEED)); 1840 *mhz = val > *mhz ? val : *mhz; 1841 } 1842 } 1843 1844 /* Look up the max frequency in DMI */ 1845 static u64 cppc_get_dmi_max_khz(void) 1846 { 1847 u16 mhz = 0; 1848 1849 dmi_walk(cppc_find_dmi_mhz, &mhz); 1850 1851 /* 1852 * Real stupid fallback value, just in case there is no 1853 * actual value set. 1854 */ 1855 mhz = mhz ? mhz : 1; 1856 1857 return KHZ_PER_MHZ * mhz; 1858 } 1859 1860 /* 1861 * If CPPC lowest_freq and nominal_freq registers are exposed then we can 1862 * use them to convert perf to freq and vice versa. The conversion is 1863 * extrapolated as an affine function passing by the 2 points: 1864 * - (Low perf, Low freq) 1865 * - (Nominal perf, Nominal freq) 1866 */ 1867 unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf) 1868 { 1869 s64 retval, offset = 0; 1870 static u64 max_khz; 1871 u64 mul, div; 1872 1873 if (caps->lowest_freq && caps->nominal_freq) { 1874 mul = caps->nominal_freq - caps->lowest_freq; 1875 mul *= KHZ_PER_MHZ; 1876 div = caps->nominal_perf - caps->lowest_perf; 1877 offset = caps->nominal_freq * KHZ_PER_MHZ - 1878 div64_u64(caps->nominal_perf * mul, div); 1879 } else { 1880 if (!max_khz) 1881 max_khz = cppc_get_dmi_max_khz(); 1882 mul = max_khz; 1883 div = caps->highest_perf; 1884 } 1885 1886 retval = offset + div64_u64(perf * mul, div); 1887 if (retval >= 0) 1888 return retval; 1889 return 0; 1890 } 1891 EXPORT_SYMBOL_GPL(cppc_perf_to_khz); 1892 1893 unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq) 1894 { 1895 s64 retval, offset = 0; 1896 static u64 max_khz; 1897 u64 mul, div; 1898 1899 if (caps->lowest_freq && caps->nominal_freq) { 1900 mul = caps->nominal_perf - caps->lowest_perf; 1901 div = caps->nominal_freq - caps->lowest_freq; 1902 /* 1903 * We don't need to convert to kHz for computing offset and can 1904 * directly use nominal_freq and lowest_freq as the div64_u64 1905 * will remove the frequency unit. 1906 */ 1907 offset = caps->nominal_perf - 1908 div64_u64(caps->nominal_freq * mul, div); 1909 /* But we need it for computing the perf level. */ 1910 div *= KHZ_PER_MHZ; 1911 } else { 1912 if (!max_khz) 1913 max_khz = cppc_get_dmi_max_khz(); 1914 mul = caps->highest_perf; 1915 div = max_khz; 1916 } 1917 1918 retval = offset + div64_u64(freq * mul, div); 1919 if (retval >= 0) 1920 return retval; 1921 return 0; 1922 } 1923 EXPORT_SYMBOL_GPL(cppc_khz_to_perf); 1924