1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4 *
5 * (C) Copyright 2014, 2015 Linaro Ltd.
6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7 *
8 * CPPC describes a few methods for controlling CPU performance using
9 * information from a per CPU table called CPC. This table is described in
10 * the ACPI v5.0+ specification. The table consists of a list of
11 * registers which may be memory mapped or hardware registers and also may
12 * include some static integer values.
13 *
14 * CPU performance is on an abstract continuous scale as against a discretized
15 * P-state scale which is tied to CPU frequency only. In brief, the basic
16 * operation involves:
17 *
18 * - OS makes a CPU performance request. (Can provide min and max bounds)
19 *
20 * - Platform (such as BMC) is free to optimize request within requested bounds
21 * depending on power/thermal budgets etc.
22 *
23 * - Platform conveys its decision back to OS
24 *
25 * The communication between OS and platform occurs through another medium
26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
27 * mechanism which includes doorbell semantics to indicate register updates.
28 * See drivers/mailbox/pcc.c for details on PCC.
29 *
30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31 * above specifications.
32 */
33
34 #define pr_fmt(fmt) "ACPI CPPC: " fmt
35
36 #include <linux/delay.h>
37 #include <linux/iopoll.h>
38 #include <linux/ktime.h>
39 #include <linux/rwsem.h>
40 #include <linux/wait.h>
41 #include <linux/topology.h>
42 #include <linux/dmi.h>
43 #include <linux/units.h>
44 #include <linux/unaligned.h>
45
46 #include <acpi/cppc_acpi.h>
47
48 struct cppc_pcc_data {
49 struct pcc_mbox_chan *pcc_channel;
50 bool pcc_channel_acquired;
51 unsigned int deadline_us;
52 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
53
54 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
55 bool platform_owns_pcc; /* Ownership of PCC subspace */
56 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
57
58 /*
59 * Lock to provide controlled access to the PCC channel.
60 *
61 * For performance critical usecases(currently cppc_set_perf)
62 * We need to take read_lock and check if channel belongs to OSPM
63 * before reading or writing to PCC subspace
64 * We need to take write_lock before transferring the channel
65 * ownership to the platform via a Doorbell
66 * This allows us to batch a number of CPPC requests if they happen
67 * to originate in about the same time
68 *
69 * For non-performance critical usecases(init)
70 * Take write_lock for all purposes which gives exclusive access
71 */
72 struct rw_semaphore pcc_lock;
73
74 /* Wait queue for CPUs whose requests were batched */
75 wait_queue_head_t pcc_write_wait_q;
76 ktime_t last_cmd_cmpl_time;
77 ktime_t last_mpar_reset;
78 int mpar_count;
79 int refcount;
80 };
81
82 /* Array to represent the PCC channel per subspace ID */
83 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
84 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
85 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
86
87 /*
88 * The cpc_desc structure contains the ACPI register details
89 * as described in the per CPU _CPC tables. The details
90 * include the type of register (e.g. PCC, System IO, FFH etc.)
91 * and destination addresses which lets us READ/WRITE CPU performance
92 * information using the appropriate I/O methods.
93 */
94 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
95
96 /* pcc mapped address + header size + offset within PCC subspace */
97 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_channel->shmem + \
98 0x8 + (offs))
99
100 /* Check if a CPC register is in PCC */
101 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
102 (cpc)->cpc_entry.reg.space_id == \
103 ACPI_ADR_SPACE_PLATFORM_COMM)
104
105 /* Check if a CPC register is in FFH */
106 #define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
107 (cpc)->cpc_entry.reg.space_id == \
108 ACPI_ADR_SPACE_FIXED_HARDWARE)
109
110 /* Check if a CPC register is in SystemMemory */
111 #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
112 (cpc)->cpc_entry.reg.space_id == \
113 ACPI_ADR_SPACE_SYSTEM_MEMORY)
114
115 /* Check if a CPC register is in SystemIo */
116 #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
117 (cpc)->cpc_entry.reg.space_id == \
118 ACPI_ADR_SPACE_SYSTEM_IO)
119
120 /* Evaluates to True if reg is a NULL register descriptor */
121 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
122 (reg)->address == 0 && \
123 (reg)->bit_width == 0 && \
124 (reg)->bit_offset == 0 && \
125 (reg)->access_width == 0)
126
127 /* Evaluates to True if an optional cpc field is supported */
128 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
129 !!(cpc)->cpc_entry.int_value : \
130 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
131
132 /*
133 * Each bit indicates the optionality of the register in per-cpu
134 * cpc_regs[] with the corresponding index. 0 means mandatory and 1
135 * means optional.
136 */
137 #define REG_OPTIONAL (0x1FC7D0)
138
139 /*
140 * Use the index of the register in per-cpu cpc_regs[] to check if
141 * it's an optional one.
142 */
143 #define IS_OPTIONAL_CPC_REG(reg_idx) (REG_OPTIONAL & (1U << (reg_idx)))
144
145 /*
146 * Arbitrary Retries in case the remote processor is slow to respond
147 * to PCC commands. Keeping it high enough to cover emulators where
148 * the processors run painfully slow.
149 */
150 #define NUM_RETRIES 500ULL
151
152 #define OVER_16BTS_MASK ~0xFFFFULL
153
154 #define define_one_cppc_ro(_name) \
155 static struct kobj_attribute _name = \
156 __ATTR(_name, 0444, show_##_name, NULL)
157
158 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
159
160 #define show_cppc_data(access_fn, struct_name, member_name) \
161 static ssize_t show_##member_name(struct kobject *kobj, \
162 struct kobj_attribute *attr, char *buf) \
163 { \
164 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
165 struct struct_name st_name = {0}; \
166 int ret; \
167 \
168 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
169 if (ret) \
170 return ret; \
171 \
172 return sysfs_emit(buf, "%llu\n", \
173 (u64)st_name.member_name); \
174 } \
175 define_one_cppc_ro(member_name)
176
177 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
178 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
179 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
180 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
181 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf);
182 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
183 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
184
185 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
186 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
187
188 /* Check for valid access_width, otherwise, fallback to using bit_width */
189 #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
190
191 /* Shift and apply the mask for CPC reads/writes */
192 #define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \
193 GENMASK(((reg)->bit_width) - 1, 0))
194 #define MASK_VAL_WRITE(reg, prev_val, val) \
195 ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \
196 ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
197
show_feedback_ctrs(struct kobject * kobj,struct kobj_attribute * attr,char * buf)198 static ssize_t show_feedback_ctrs(struct kobject *kobj,
199 struct kobj_attribute *attr, char *buf)
200 {
201 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
202 struct cppc_perf_fb_ctrs fb_ctrs = {0};
203 int ret;
204
205 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
206 if (ret)
207 return ret;
208
209 return sysfs_emit(buf, "ref:%llu del:%llu\n",
210 fb_ctrs.reference, fb_ctrs.delivered);
211 }
212 define_one_cppc_ro(feedback_ctrs);
213
214 static struct attribute *cppc_attrs[] = {
215 &feedback_ctrs.attr,
216 &reference_perf.attr,
217 &wraparound_time.attr,
218 &highest_perf.attr,
219 &lowest_perf.attr,
220 &lowest_nonlinear_perf.attr,
221 &guaranteed_perf.attr,
222 &nominal_perf.attr,
223 &nominal_freq.attr,
224 &lowest_freq.attr,
225 NULL
226 };
227 ATTRIBUTE_GROUPS(cppc);
228
229 static const struct kobj_type cppc_ktype = {
230 .sysfs_ops = &kobj_sysfs_ops,
231 .default_groups = cppc_groups,
232 };
233
check_pcc_chan(int pcc_ss_id,bool chk_err_bit)234 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
235 {
236 int ret, status;
237 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
238 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
239 pcc_ss_data->pcc_channel->shmem;
240
241 if (!pcc_ss_data->platform_owns_pcc)
242 return 0;
243
244 /*
245 * Poll PCC status register every 3us(delay_us) for maximum of
246 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
247 */
248 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
249 status & PCC_CMD_COMPLETE_MASK, 3,
250 pcc_ss_data->deadline_us);
251
252 if (likely(!ret)) {
253 pcc_ss_data->platform_owns_pcc = false;
254 if (chk_err_bit && (status & PCC_ERROR_MASK))
255 ret = -EIO;
256 }
257
258 if (unlikely(ret))
259 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
260 pcc_ss_id, ret);
261
262 return ret;
263 }
264
265 /*
266 * This function transfers the ownership of the PCC to the platform
267 * So it must be called while holding write_lock(pcc_lock)
268 */
send_pcc_cmd(int pcc_ss_id,u16 cmd)269 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
270 {
271 int ret = -EIO, i;
272 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
273 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
274 pcc_ss_data->pcc_channel->shmem;
275 unsigned int time_delta;
276
277 /*
278 * For CMD_WRITE we know for a fact the caller should have checked
279 * the channel before writing to PCC space
280 */
281 if (cmd == CMD_READ) {
282 /*
283 * If there are pending cpc_writes, then we stole the channel
284 * before write completion, so first send a WRITE command to
285 * platform
286 */
287 if (pcc_ss_data->pending_pcc_write_cmd)
288 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
289
290 ret = check_pcc_chan(pcc_ss_id, false);
291 if (ret)
292 goto end;
293 } else /* CMD_WRITE */
294 pcc_ss_data->pending_pcc_write_cmd = FALSE;
295
296 /*
297 * Handle the Minimum Request Turnaround Time(MRTT)
298 * "The minimum amount of time that OSPM must wait after the completion
299 * of a command before issuing the next command, in microseconds"
300 */
301 if (pcc_ss_data->pcc_mrtt) {
302 time_delta = ktime_us_delta(ktime_get(),
303 pcc_ss_data->last_cmd_cmpl_time);
304 if (pcc_ss_data->pcc_mrtt > time_delta)
305 udelay(pcc_ss_data->pcc_mrtt - time_delta);
306 }
307
308 /*
309 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
310 * "The maximum number of periodic requests that the subspace channel can
311 * support, reported in commands per minute. 0 indicates no limitation."
312 *
313 * This parameter should be ideally zero or large enough so that it can
314 * handle maximum number of requests that all the cores in the system can
315 * collectively generate. If it is not, we will follow the spec and just
316 * not send the request to the platform after hitting the MPAR limit in
317 * any 60s window
318 */
319 if (pcc_ss_data->pcc_mpar) {
320 if (pcc_ss_data->mpar_count == 0) {
321 time_delta = ktime_ms_delta(ktime_get(),
322 pcc_ss_data->last_mpar_reset);
323 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
324 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
325 pcc_ss_id);
326 ret = -EIO;
327 goto end;
328 }
329 pcc_ss_data->last_mpar_reset = ktime_get();
330 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
331 }
332 pcc_ss_data->mpar_count--;
333 }
334
335 /* Write to the shared comm region. */
336 writew_relaxed(cmd, &generic_comm_base->command);
337
338 /* Flip CMD COMPLETE bit */
339 writew_relaxed(0, &generic_comm_base->status);
340
341 pcc_ss_data->platform_owns_pcc = true;
342
343 /* Ring doorbell */
344 ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
345 if (ret < 0) {
346 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
347 pcc_ss_id, cmd, ret);
348 goto end;
349 }
350
351 /* wait for completion and check for PCC error bit */
352 ret = check_pcc_chan(pcc_ss_id, true);
353
354 if (pcc_ss_data->pcc_mrtt)
355 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
356
357 if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
358 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
359 else
360 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
361
362 end:
363 if (cmd == CMD_WRITE) {
364 if (unlikely(ret)) {
365 for_each_online_cpu(i) {
366 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
367
368 if (!desc)
369 continue;
370
371 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
372 desc->write_cmd_status = ret;
373 }
374 }
375 pcc_ss_data->pcc_write_cnt++;
376 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
377 }
378
379 return ret;
380 }
381
cppc_chan_tx_done(struct mbox_client * cl,void * msg,int ret)382 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
383 {
384 if (ret < 0)
385 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
386 *(u16 *)msg, ret);
387 else
388 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
389 *(u16 *)msg, ret);
390 }
391
392 static struct mbox_client cppc_mbox_cl = {
393 .tx_done = cppc_chan_tx_done,
394 .knows_txdone = true,
395 };
396
acpi_get_psd(struct cpc_desc * cpc_ptr,acpi_handle handle)397 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
398 {
399 int result = -EFAULT;
400 acpi_status status = AE_OK;
401 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
402 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
403 struct acpi_buffer state = {0, NULL};
404 union acpi_object *psd = NULL;
405 struct acpi_psd_package *pdomain;
406
407 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
408 &buffer, ACPI_TYPE_PACKAGE);
409 if (status == AE_NOT_FOUND) /* _PSD is optional */
410 return 0;
411 if (ACPI_FAILURE(status))
412 return -ENODEV;
413
414 psd = buffer.pointer;
415 if (!psd || psd->package.count != 1) {
416 pr_debug("Invalid _PSD data\n");
417 goto end;
418 }
419
420 pdomain = &(cpc_ptr->domain_info);
421
422 state.length = sizeof(struct acpi_psd_package);
423 state.pointer = pdomain;
424
425 status = acpi_extract_package(&(psd->package.elements[0]),
426 &format, &state);
427 if (ACPI_FAILURE(status)) {
428 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
429 goto end;
430 }
431
432 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
433 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
434 goto end;
435 }
436
437 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
438 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
439 goto end;
440 }
441
442 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
443 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
444 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
445 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
446 goto end;
447 }
448
449 result = 0;
450 end:
451 kfree(buffer.pointer);
452 return result;
453 }
454
acpi_cpc_valid(void)455 bool acpi_cpc_valid(void)
456 {
457 struct cpc_desc *cpc_ptr;
458 int cpu;
459
460 if (acpi_disabled)
461 return false;
462
463 for_each_online_cpu(cpu) {
464 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
465 if (!cpc_ptr)
466 return false;
467 }
468
469 return true;
470 }
471 EXPORT_SYMBOL_GPL(acpi_cpc_valid);
472
cppc_allow_fast_switch(void)473 bool cppc_allow_fast_switch(void)
474 {
475 struct cpc_register_resource *desired_reg;
476 struct cpc_desc *cpc_ptr;
477 int cpu;
478
479 for_each_online_cpu(cpu) {
480 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
481 desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
482 if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
483 !CPC_IN_SYSTEM_IO(desired_reg))
484 return false;
485 }
486
487 return true;
488 }
489 EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
490
491 /**
492 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
493 * @cpu: Find all CPUs that share a domain with cpu.
494 * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
495 *
496 * Return: 0 for success or negative value for err.
497 */
acpi_get_psd_map(unsigned int cpu,struct cppc_cpudata * cpu_data)498 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
499 {
500 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
501 struct acpi_psd_package *match_pdomain;
502 struct acpi_psd_package *pdomain;
503 int count_target, i;
504
505 /*
506 * Now that we have _PSD data from all CPUs, let's setup P-state
507 * domain info.
508 */
509 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
510 if (!cpc_ptr)
511 return -EFAULT;
512
513 pdomain = &(cpc_ptr->domain_info);
514 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
515 if (pdomain->num_processors <= 1)
516 return 0;
517
518 /* Validate the Domain info */
519 count_target = pdomain->num_processors;
520 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
521 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
522 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
523 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
524 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
525 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
526
527 for_each_online_cpu(i) {
528 if (i == cpu)
529 continue;
530
531 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
532 if (!match_cpc_ptr)
533 goto err_fault;
534
535 match_pdomain = &(match_cpc_ptr->domain_info);
536 if (match_pdomain->domain != pdomain->domain)
537 continue;
538
539 /* Here i and cpu are in the same domain */
540 if (match_pdomain->num_processors != count_target)
541 goto err_fault;
542
543 if (pdomain->coord_type != match_pdomain->coord_type)
544 goto err_fault;
545
546 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
547 }
548
549 return 0;
550
551 err_fault:
552 /* Assume no coordination on any error parsing domain info */
553 cpumask_clear(cpu_data->shared_cpu_map);
554 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
555 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
556
557 return -EFAULT;
558 }
559 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
560
register_pcc_channel(int pcc_ss_idx)561 static int register_pcc_channel(int pcc_ss_idx)
562 {
563 struct pcc_mbox_chan *pcc_chan;
564 u64 usecs_lat;
565
566 if (pcc_ss_idx >= 0) {
567 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
568
569 if (IS_ERR(pcc_chan)) {
570 pr_err("Failed to find PCC channel for subspace %d\n",
571 pcc_ss_idx);
572 return -ENODEV;
573 }
574
575 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
576 /*
577 * cppc_ss->latency is just a Nominal value. In reality
578 * the remote processor could be much slower to reply.
579 * So add an arbitrary amount of wait on top of Nominal.
580 */
581 usecs_lat = NUM_RETRIES * pcc_chan->latency;
582 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
583 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
584 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
585 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
586
587 /* Set flag so that we don't come here for each CPU. */
588 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
589 }
590
591 return 0;
592 }
593
594 /**
595 * cpc_ffh_supported() - check if FFH reading supported
596 *
597 * Check if the architecture has support for functional fixed hardware
598 * read/write capability.
599 *
600 * Return: true for supported, false for not supported
601 */
cpc_ffh_supported(void)602 bool __weak cpc_ffh_supported(void)
603 {
604 return false;
605 }
606
607 /**
608 * cpc_supported_by_cpu() - check if CPPC is supported by CPU
609 *
610 * Check if the architectural support for CPPC is present even
611 * if the _OSC hasn't prescribed it
612 *
613 * Return: true for supported, false for not supported
614 */
cpc_supported_by_cpu(void)615 bool __weak cpc_supported_by_cpu(void)
616 {
617 return false;
618 }
619
620 /**
621 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
622 * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
623 *
624 * Check and allocate the cppc_pcc_data memory.
625 * In some processor configurations it is possible that same subspace
626 * is shared between multiple CPUs. This is seen especially in CPUs
627 * with hardware multi-threading support.
628 *
629 * Return: 0 for success, errno for failure
630 */
pcc_data_alloc(int pcc_ss_id)631 static int pcc_data_alloc(int pcc_ss_id)
632 {
633 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
634 return -EINVAL;
635
636 if (pcc_data[pcc_ss_id]) {
637 pcc_data[pcc_ss_id]->refcount++;
638 } else {
639 pcc_data[pcc_ss_id] = kzalloc_obj(struct cppc_pcc_data);
640 if (!pcc_data[pcc_ss_id])
641 return -ENOMEM;
642 pcc_data[pcc_ss_id]->refcount++;
643 }
644
645 return 0;
646 }
647
648 /*
649 * An example CPC table looks like the following.
650 *
651 * Name (_CPC, Package() {
652 * 17, // NumEntries
653 * 1, // Revision
654 * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
655 * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
656 * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
657 * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
658 * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
659 * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
660 * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
661 * ...
662 * ...
663 * ...
664 * }
665 * Each Register() encodes how to access that specific register.
666 * e.g. a sample PCC entry has the following encoding:
667 *
668 * Register (
669 * PCC, // AddressSpaceKeyword
670 * 8, // RegisterBitWidth
671 * 8, // RegisterBitOffset
672 * 0x30, // RegisterAddress
673 * 9, // AccessSize (subspace ID)
674 * )
675 */
676
677 /**
678 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
679 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
680 *
681 * Return: 0 for success or negative value for err.
682 */
acpi_cppc_processor_probe(struct acpi_processor * pr)683 int acpi_cppc_processor_probe(struct acpi_processor *pr)
684 {
685 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
686 union acpi_object *out_obj, *cpc_obj;
687 struct cpc_desc *cpc_ptr;
688 struct cpc_reg *gas_t;
689 struct device *cpu_dev;
690 acpi_handle handle = pr->handle;
691 unsigned int num_ent, i, cpc_rev;
692 int pcc_subspace_id = -1;
693 acpi_status status;
694 int ret = -ENODATA;
695
696 if (!osc_sb_cppc2_support_acked) {
697 pr_debug("CPPC v2 _OSC not acked\n");
698 if (!cpc_supported_by_cpu()) {
699 pr_debug("CPPC is not supported by the CPU\n");
700 return -ENODEV;
701 }
702 }
703
704 /* Parse the ACPI _CPC table for this CPU. */
705 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
706 ACPI_TYPE_PACKAGE);
707 if (ACPI_FAILURE(status)) {
708 ret = -ENODEV;
709 goto out_buf_free;
710 }
711
712 out_obj = (union acpi_object *) output.pointer;
713
714 cpc_ptr = kzalloc_obj(struct cpc_desc);
715 if (!cpc_ptr) {
716 ret = -ENOMEM;
717 goto out_buf_free;
718 }
719
720 /* First entry is NumEntries. */
721 cpc_obj = &out_obj->package.elements[0];
722 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
723 num_ent = cpc_obj->integer.value;
724 if (num_ent <= 1) {
725 pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
726 num_ent, pr->id);
727 goto out_free;
728 }
729 } else {
730 pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
731 cpc_obj->type, pr->id);
732 goto out_free;
733 }
734
735 /* Second entry should be revision. */
736 cpc_obj = &out_obj->package.elements[1];
737 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
738 cpc_rev = cpc_obj->integer.value;
739 } else {
740 pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
741 cpc_obj->type, pr->id);
742 goto out_free;
743 }
744
745 if (cpc_rev < CPPC_V2_REV) {
746 pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
747 pr->id);
748 goto out_free;
749 }
750
751 /*
752 * Disregard _CPC if the number of entries in the return package is not
753 * as expected, but support future revisions being proper supersets of
754 * the v3 and only causing more entries to be returned by _CPC.
755 */
756 if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
757 (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
758 (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
759 pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
760 num_ent, pr->id);
761 goto out_free;
762 }
763 if (cpc_rev > CPPC_V3_REV) {
764 num_ent = CPPC_V3_NUM_ENT;
765 cpc_rev = CPPC_V3_REV;
766 }
767
768 cpc_ptr->num_entries = num_ent;
769 cpc_ptr->version = cpc_rev;
770
771 /* Iterate through remaining entries in _CPC */
772 for (i = 2; i < num_ent; i++) {
773 cpc_obj = &out_obj->package.elements[i];
774
775 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
776 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
777 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
778 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
779 gas_t = (struct cpc_reg *)
780 cpc_obj->buffer.pointer;
781
782 /*
783 * The PCC Subspace index is encoded inside
784 * the CPC table entries. The same PCC index
785 * will be used for all the PCC entries,
786 * so extract it only once.
787 */
788 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
789 if (pcc_subspace_id < 0) {
790 pcc_subspace_id = gas_t->access_width;
791 if (pcc_data_alloc(pcc_subspace_id))
792 goto out_free;
793 } else if (pcc_subspace_id != gas_t->access_width) {
794 pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
795 pr->id);
796 goto out_free;
797 }
798 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
799 if (gas_t->address) {
800 void __iomem *addr;
801 size_t access_width;
802
803 if (!osc_cpc_flexible_adr_space_confirmed) {
804 pr_debug("Flexible address space capability not supported\n");
805 if (!cpc_supported_by_cpu())
806 goto out_free;
807 }
808
809 access_width = GET_BIT_WIDTH(gas_t) / 8;
810 addr = ioremap(gas_t->address, access_width);
811 if (!addr)
812 goto out_free;
813 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
814 }
815 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
816 if (gas_t->access_width < 1 || gas_t->access_width > 3) {
817 /*
818 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
819 * SystemIO doesn't implement 64-bit
820 * registers.
821 */
822 pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
823 gas_t->access_width);
824 goto out_free;
825 }
826 if (gas_t->address & OVER_16BTS_MASK) {
827 /* SystemIO registers use 16-bit integer addresses */
828 pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
829 gas_t->address);
830 goto out_free;
831 }
832 if (!osc_cpc_flexible_adr_space_confirmed) {
833 pr_debug("Flexible address space capability not supported\n");
834 if (!cpc_supported_by_cpu())
835 goto out_free;
836 }
837 } else {
838 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
839 /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
840 pr_debug("Unsupported register type (%d) in _CPC\n",
841 gas_t->space_id);
842 goto out_free;
843 }
844 }
845
846 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
847 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
848 } else {
849 pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
850 i, pr->id);
851 goto out_free;
852 }
853 }
854 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
855
856 /*
857 * Initialize the remaining cpc_regs as unsupported.
858 * Example: In case FW exposes CPPC v2, the below loop will initialize
859 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
860 */
861 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
862 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
863 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
864 }
865
866
867 /* Store CPU Logical ID */
868 cpc_ptr->cpu_id = pr->id;
869 raw_spin_lock_init(&cpc_ptr->rmw_lock);
870
871 /* Parse PSD data for this CPU */
872 ret = acpi_get_psd(cpc_ptr, handle);
873 if (ret)
874 goto out_free;
875
876 /* Register PCC channel once for all PCC subspace ID. */
877 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
878 ret = register_pcc_channel(pcc_subspace_id);
879 if (ret)
880 goto out_free;
881
882 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
883 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
884 }
885
886 /* Everything looks okay */
887 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
888
889 /* Add per logical CPU nodes for reading its feedback counters. */
890 cpu_dev = get_cpu_device(pr->id);
891 if (!cpu_dev) {
892 ret = -EINVAL;
893 goto out_free;
894 }
895
896 /* Plug PSD data into this CPU's CPC descriptor. */
897 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
898
899 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
900 "acpi_cppc");
901 if (ret) {
902 per_cpu(cpc_desc_ptr, pr->id) = NULL;
903 kobject_put(&cpc_ptr->kobj);
904 goto out_free;
905 }
906
907 kfree(output.pointer);
908 return 0;
909
910 out_free:
911 /* Free all the mapped sys mem areas for this CPU */
912 for (i = 2; i < cpc_ptr->num_entries; i++) {
913 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
914
915 if (addr)
916 iounmap(addr);
917 }
918 kfree(cpc_ptr);
919
920 out_buf_free:
921 kfree(output.pointer);
922 return ret;
923 }
924 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
925
926 /**
927 * acpi_cppc_processor_exit - Cleanup CPC structs.
928 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
929 *
930 * Return: Void
931 */
acpi_cppc_processor_exit(struct acpi_processor * pr)932 void acpi_cppc_processor_exit(struct acpi_processor *pr)
933 {
934 struct cpc_desc *cpc_ptr;
935 unsigned int i;
936 void __iomem *addr;
937 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
938
939 if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
940 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
941 pcc_data[pcc_ss_id]->refcount--;
942 if (!pcc_data[pcc_ss_id]->refcount) {
943 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
944 kfree(pcc_data[pcc_ss_id]);
945 pcc_data[pcc_ss_id] = NULL;
946 }
947 }
948 }
949
950 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
951 if (!cpc_ptr)
952 return;
953
954 /* Free all the mapped sys mem areas for this CPU */
955 for (i = 2; i < cpc_ptr->num_entries; i++) {
956 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
957 if (addr)
958 iounmap(addr);
959 }
960
961 kobject_put(&cpc_ptr->kobj);
962 kfree(cpc_ptr);
963 }
964 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
965
966 /**
967 * cpc_read_ffh() - Read FFH register
968 * @cpunum: CPU number to read
969 * @reg: cppc register information
970 * @val: place holder for return value
971 *
972 * Read bit_width bits from a specified address and bit_offset
973 *
974 * Return: 0 for success and error code
975 */
cpc_read_ffh(int cpunum,struct cpc_reg * reg,u64 * val)976 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
977 {
978 return -ENOTSUPP;
979 }
980
981 /**
982 * cpc_write_ffh() - Write FFH register
983 * @cpunum: CPU number to write
984 * @reg: cppc register information
985 * @val: value to write
986 *
987 * Write value of bit_width bits to a specified address and bit_offset
988 *
989 * Return: 0 for success and error code
990 */
cpc_write_ffh(int cpunum,struct cpc_reg * reg,u64 val)991 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
992 {
993 return -ENOTSUPP;
994 }
995
996 /*
997 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
998 * as fast as possible. We have already mapped the PCC subspace during init, so
999 * we can directly write to it.
1000 */
1001
cpc_read(int cpu,struct cpc_register_resource * reg_res,u64 * val)1002 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
1003 {
1004 void __iomem *vaddr = NULL;
1005 int size;
1006 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1007 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1008
1009 if (reg_res->type == ACPI_TYPE_INTEGER) {
1010 *val = reg_res->cpc_entry.int_value;
1011 return 0;
1012 }
1013
1014 *val = 0;
1015 size = GET_BIT_WIDTH(reg);
1016
1017 if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
1018 reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1019 u32 val_u32;
1020 acpi_status status;
1021
1022 status = acpi_os_read_port((acpi_io_address)reg->address,
1023 &val_u32, size);
1024 if (ACPI_FAILURE(status)) {
1025 pr_debug("Error: Failed to read SystemIO port %llx\n",
1026 reg->address);
1027 return -EFAULT;
1028 }
1029
1030 *val = val_u32;
1031 return 0;
1032 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
1033 /*
1034 * For registers in PCC space, the register size is determined
1035 * by the bit width field; the access size is used to indicate
1036 * the PCC subspace id.
1037 */
1038 size = reg->bit_width;
1039 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1040 }
1041 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1042 vaddr = reg_res->sys_mem_vaddr;
1043 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1044 return cpc_read_ffh(cpu, reg, val);
1045 else
1046 return acpi_os_read_memory((acpi_physical_address)reg->address,
1047 val, size);
1048
1049 switch (size) {
1050 case 8:
1051 *val = readb_relaxed(vaddr);
1052 break;
1053 case 16:
1054 *val = readw_relaxed(vaddr);
1055 break;
1056 case 32:
1057 *val = readl_relaxed(vaddr);
1058 break;
1059 case 64:
1060 *val = readq_relaxed(vaddr);
1061 break;
1062 default:
1063 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1064 pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
1065 size, reg->address);
1066 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
1067 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1068 size, pcc_ss_id);
1069 }
1070 return -EFAULT;
1071 }
1072
1073 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1074 *val = MASK_VAL_READ(reg, *val);
1075
1076 return 0;
1077 }
1078
cpc_write(int cpu,struct cpc_register_resource * reg_res,u64 val)1079 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1080 {
1081 int ret_val = 0;
1082 int size;
1083 u64 prev_val;
1084 void __iomem *vaddr = NULL;
1085 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1086 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1087 struct cpc_desc *cpc_desc;
1088 unsigned long flags;
1089
1090 size = GET_BIT_WIDTH(reg);
1091
1092 if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
1093 reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1094 acpi_status status;
1095
1096 status = acpi_os_write_port((acpi_io_address)reg->address,
1097 (u32)val, size);
1098 if (ACPI_FAILURE(status)) {
1099 pr_debug("Error: Failed to write SystemIO port %llx\n",
1100 reg->address);
1101 return -EFAULT;
1102 }
1103
1104 return 0;
1105 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
1106 /*
1107 * For registers in PCC space, the register size is determined
1108 * by the bit width field; the access size is used to indicate
1109 * the PCC subspace id.
1110 */
1111 size = reg->bit_width;
1112 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1113 }
1114 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1115 vaddr = reg_res->sys_mem_vaddr;
1116 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1117 return cpc_write_ffh(cpu, reg, val);
1118 else
1119 return acpi_os_write_memory((acpi_physical_address)reg->address,
1120 val, size);
1121
1122 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1123 cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1124 if (!cpc_desc) {
1125 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1126 return -ENODEV;
1127 }
1128
1129 raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags);
1130 switch (size) {
1131 case 8:
1132 prev_val = readb_relaxed(vaddr);
1133 break;
1134 case 16:
1135 prev_val = readw_relaxed(vaddr);
1136 break;
1137 case 32:
1138 prev_val = readl_relaxed(vaddr);
1139 break;
1140 case 64:
1141 prev_val = readq_relaxed(vaddr);
1142 break;
1143 default:
1144 raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
1145 return -EFAULT;
1146 }
1147 val = MASK_VAL_WRITE(reg, prev_val, val);
1148 }
1149
1150 switch (size) {
1151 case 8:
1152 writeb_relaxed(val, vaddr);
1153 break;
1154 case 16:
1155 writew_relaxed(val, vaddr);
1156 break;
1157 case 32:
1158 writel_relaxed(val, vaddr);
1159 break;
1160 case 64:
1161 writeq_relaxed(val, vaddr);
1162 break;
1163 default:
1164 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1165 pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
1166 size, reg->address);
1167 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
1168 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1169 size, pcc_ss_id);
1170 }
1171 ret_val = -EFAULT;
1172 break;
1173 }
1174
1175 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1176 raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
1177
1178 return ret_val;
1179 }
1180
cppc_get_reg_val_in_pcc(int cpu,struct cpc_register_resource * reg,u64 * val)1181 static int cppc_get_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 *val)
1182 {
1183 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1184 struct cppc_pcc_data *pcc_ss_data = NULL;
1185 int ret;
1186
1187 if (pcc_ss_id < 0) {
1188 pr_debug("Invalid pcc_ss_id\n");
1189 return -ENODEV;
1190 }
1191
1192 pcc_ss_data = pcc_data[pcc_ss_id];
1193
1194 down_write(&pcc_ss_data->pcc_lock);
1195
1196 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1197 ret = cpc_read(cpu, reg, val);
1198 else
1199 ret = -EIO;
1200
1201 up_write(&pcc_ss_data->pcc_lock);
1202
1203 return ret;
1204 }
1205
cppc_get_reg_val(int cpu,enum cppc_regs reg_idx,u64 * val)1206 static int cppc_get_reg_val(int cpu, enum cppc_regs reg_idx, u64 *val)
1207 {
1208 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1209 struct cpc_register_resource *reg;
1210
1211 if (val == NULL)
1212 return -EINVAL;
1213
1214 if (!cpc_desc) {
1215 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1216 return -ENODEV;
1217 }
1218
1219 reg = &cpc_desc->cpc_regs[reg_idx];
1220
1221 if ((reg->type == ACPI_TYPE_INTEGER && IS_OPTIONAL_CPC_REG(reg_idx) &&
1222 !reg->cpc_entry.int_value) || (reg->type != ACPI_TYPE_INTEGER &&
1223 IS_NULL_REG(®->cpc_entry.reg))) {
1224 pr_debug("CPC register is not supported\n");
1225 return -EOPNOTSUPP;
1226 }
1227
1228 if (CPC_IN_PCC(reg))
1229 return cppc_get_reg_val_in_pcc(cpu, reg, val);
1230
1231 return cpc_read(cpu, reg, val);
1232 }
1233
cppc_set_reg_val_in_pcc(int cpu,struct cpc_register_resource * reg,u64 val)1234 static int cppc_set_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 val)
1235 {
1236 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1237 struct cppc_pcc_data *pcc_ss_data = NULL;
1238 int ret;
1239
1240 if (pcc_ss_id < 0) {
1241 pr_debug("Invalid pcc_ss_id\n");
1242 return -ENODEV;
1243 }
1244
1245 ret = cpc_write(cpu, reg, val);
1246 if (ret)
1247 return ret;
1248
1249 pcc_ss_data = pcc_data[pcc_ss_id];
1250
1251 down_write(&pcc_ss_data->pcc_lock);
1252 /* after writing CPC, transfer the ownership of PCC to platform */
1253 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1254 up_write(&pcc_ss_data->pcc_lock);
1255
1256 return ret;
1257 }
1258
cppc_set_reg_val(int cpu,enum cppc_regs reg_idx,u64 val)1259 static int cppc_set_reg_val(int cpu, enum cppc_regs reg_idx, u64 val)
1260 {
1261 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1262 struct cpc_register_resource *reg;
1263
1264 if (!cpc_desc) {
1265 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1266 return -ENODEV;
1267 }
1268
1269 reg = &cpc_desc->cpc_regs[reg_idx];
1270
1271 /* if a register is writeable, it must be a buffer and not null */
1272 if ((reg->type != ACPI_TYPE_BUFFER) || IS_NULL_REG(®->cpc_entry.reg)) {
1273 pr_debug("CPC register is not supported\n");
1274 return -EOPNOTSUPP;
1275 }
1276
1277 if (CPC_IN_PCC(reg))
1278 return cppc_set_reg_val_in_pcc(cpu, reg, val);
1279
1280 return cpc_write(cpu, reg, val);
1281 }
1282
1283 /**
1284 * cppc_get_desired_perf - Get the desired performance register value.
1285 * @cpunum: CPU from which to get desired performance.
1286 * @desired_perf: Return address.
1287 *
1288 * Return: 0 for success, -EIO otherwise.
1289 */
cppc_get_desired_perf(int cpunum,u64 * desired_perf)1290 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1291 {
1292 return cppc_get_reg_val(cpunum, DESIRED_PERF, desired_perf);
1293 }
1294 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1295
1296 /**
1297 * cppc_get_nominal_perf - Get the nominal performance register value.
1298 * @cpunum: CPU from which to get nominal performance.
1299 * @nominal_perf: Return address.
1300 *
1301 * Return: 0 for success, -EIO otherwise.
1302 */
cppc_get_nominal_perf(int cpunum,u64 * nominal_perf)1303 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1304 {
1305 return cppc_get_reg_val(cpunum, NOMINAL_PERF, nominal_perf);
1306 }
1307
1308 /**
1309 * cppc_get_highest_perf - Get the highest performance register value.
1310 * @cpunum: CPU from which to get highest performance.
1311 * @highest_perf: Return address.
1312 *
1313 * Return: 0 for success, -EIO otherwise.
1314 */
cppc_get_highest_perf(int cpunum,u64 * highest_perf)1315 int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
1316 {
1317 return cppc_get_reg_val(cpunum, HIGHEST_PERF, highest_perf);
1318 }
1319 EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
1320
1321 /**
1322 * cppc_get_epp_perf - Get the epp register value.
1323 * @cpunum: CPU from which to get epp preference value.
1324 * @epp_perf: Return address.
1325 *
1326 * Return: 0 for success, -EIO otherwise.
1327 */
cppc_get_epp_perf(int cpunum,u64 * epp_perf)1328 int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
1329 {
1330 return cppc_get_reg_val(cpunum, ENERGY_PERF, epp_perf);
1331 }
1332 EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
1333
1334 /**
1335 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1336 * @cpunum: CPU from which to get capabilities info.
1337 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1338 *
1339 * Return: 0 for success with perf_caps populated else -ERRNO.
1340 */
cppc_get_perf_caps(int cpunum,struct cppc_perf_caps * perf_caps)1341 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1342 {
1343 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1344 struct cpc_register_resource *highest_reg, *lowest_reg,
1345 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1346 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1347 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1348 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1349 struct cppc_pcc_data *pcc_ss_data = NULL;
1350 int ret = 0, regs_in_pcc = 0;
1351
1352 if (!cpc_desc) {
1353 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1354 return -ENODEV;
1355 }
1356
1357 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1358 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1359 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1360 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1361 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1362 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1363 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1364
1365 /* Are any of the regs PCC ?*/
1366 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1367 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1368 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg) ||
1369 CPC_IN_PCC(guaranteed_reg)) {
1370 if (pcc_ss_id < 0) {
1371 pr_debug("Invalid pcc_ss_id\n");
1372 return -ENODEV;
1373 }
1374 pcc_ss_data = pcc_data[pcc_ss_id];
1375 regs_in_pcc = 1;
1376 down_write(&pcc_ss_data->pcc_lock);
1377 /* Ring doorbell once to update PCC subspace */
1378 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1379 ret = -EIO;
1380 goto out_err;
1381 }
1382 }
1383
1384 cpc_read(cpunum, highest_reg, &high);
1385 perf_caps->highest_perf = high;
1386
1387 cpc_read(cpunum, lowest_reg, &low);
1388 perf_caps->lowest_perf = low;
1389
1390 cpc_read(cpunum, nominal_reg, &nom);
1391 perf_caps->nominal_perf = nom;
1392
1393 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1394 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1395 perf_caps->guaranteed_perf = 0;
1396 } else {
1397 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1398 perf_caps->guaranteed_perf = guaranteed;
1399 }
1400
1401 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1402 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1403
1404 if (!high || !low || !nom || !min_nonlinear)
1405 ret = -EFAULT;
1406
1407 /* Read optional lowest and nominal frequencies if present */
1408 if (CPC_SUPPORTED(low_freq_reg))
1409 cpc_read(cpunum, low_freq_reg, &low_f);
1410
1411 if (CPC_SUPPORTED(nom_freq_reg))
1412 cpc_read(cpunum, nom_freq_reg, &nom_f);
1413
1414 perf_caps->lowest_freq = low_f;
1415 perf_caps->nominal_freq = nom_f;
1416
1417
1418 out_err:
1419 if (regs_in_pcc)
1420 up_write(&pcc_ss_data->pcc_lock);
1421 return ret;
1422 }
1423 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1424
1425 /**
1426 * cppc_perf_ctrs_in_pcc_cpu - Check if any perf counters of a CPU are in PCC.
1427 * @cpu: CPU on which to check perf counters.
1428 *
1429 * Return: true if any of the counters are in PCC regions, false otherwise
1430 */
cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu)1431 bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu)
1432 {
1433 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1434 struct cpc_register_resource *ref_perf_reg;
1435
1436 /*
1437 * If reference perf register is not supported then we should use the
1438 * nominal perf value
1439 */
1440 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1441 if (!CPC_SUPPORTED(ref_perf_reg))
1442 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1443
1444 return CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
1445 CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
1446 CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]) ||
1447 CPC_IN_PCC(ref_perf_reg);
1448 }
1449 EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc_cpu);
1450
1451 /**
1452 * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
1453 *
1454 * CPPC has flexibility about how CPU performance counters are accessed.
1455 * One of the choices is PCC regions, which can have a high access latency. This
1456 * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
1457 *
1458 * Return: true if any of the counters are in PCC regions, false otherwise
1459 */
cppc_perf_ctrs_in_pcc(void)1460 bool cppc_perf_ctrs_in_pcc(void)
1461 {
1462 int cpu;
1463
1464 for_each_online_cpu(cpu) {
1465 if (cppc_perf_ctrs_in_pcc_cpu(cpu))
1466 return true;
1467 }
1468
1469 return false;
1470 }
1471 EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
1472
1473 /**
1474 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1475 * @cpunum: CPU from which to read counters.
1476 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1477 *
1478 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1479 */
cppc_get_perf_ctrs(int cpunum,struct cppc_perf_fb_ctrs * perf_fb_ctrs)1480 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1481 {
1482 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1483 struct cpc_register_resource *delivered_reg, *reference_reg,
1484 *ref_perf_reg, *ctr_wrap_reg;
1485 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1486 struct cppc_pcc_data *pcc_ss_data = NULL;
1487 u64 delivered, reference, ref_perf, ctr_wrap_time;
1488 int ret = 0, regs_in_pcc = 0;
1489
1490 if (!cpc_desc) {
1491 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1492 return -ENODEV;
1493 }
1494
1495 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1496 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1497 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1498 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1499
1500 /*
1501 * If reference perf register is not supported then we should
1502 * use the nominal perf value
1503 */
1504 if (!CPC_SUPPORTED(ref_perf_reg))
1505 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1506
1507 /* Are any of the regs PCC ?*/
1508 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1509 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1510 if (pcc_ss_id < 0) {
1511 pr_debug("Invalid pcc_ss_id\n");
1512 return -ENODEV;
1513 }
1514 pcc_ss_data = pcc_data[pcc_ss_id];
1515 down_write(&pcc_ss_data->pcc_lock);
1516 regs_in_pcc = 1;
1517 /* Ring doorbell once to update PCC subspace */
1518 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1519 ret = -EIO;
1520 goto out_err;
1521 }
1522 }
1523
1524 cpc_read(cpunum, delivered_reg, &delivered);
1525 cpc_read(cpunum, reference_reg, &reference);
1526 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1527
1528 /*
1529 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1530 * performance counters are assumed to never wrap during the lifetime of
1531 * platform
1532 */
1533 ctr_wrap_time = (u64)(~((u64)0));
1534 if (CPC_SUPPORTED(ctr_wrap_reg))
1535 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1536
1537 if (!delivered || !reference || !ref_perf) {
1538 ret = -EFAULT;
1539 goto out_err;
1540 }
1541
1542 perf_fb_ctrs->delivered = delivered;
1543 perf_fb_ctrs->reference = reference;
1544 perf_fb_ctrs->reference_perf = ref_perf;
1545 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1546 out_err:
1547 if (regs_in_pcc)
1548 up_write(&pcc_ss_data->pcc_lock);
1549 return ret;
1550 }
1551 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1552
1553 /*
1554 * Set Energy Performance Preference Register value through
1555 * Performance Controls Interface
1556 */
cppc_set_epp_perf(int cpu,struct cppc_perf_ctrls * perf_ctrls,bool enable)1557 int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
1558 {
1559 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1560 struct cpc_register_resource *epp_set_reg;
1561 struct cpc_register_resource *auto_sel_reg;
1562 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1563 struct cppc_pcc_data *pcc_ss_data = NULL;
1564 int ret;
1565
1566 if (!cpc_desc) {
1567 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1568 return -ENODEV;
1569 }
1570
1571 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1572 epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
1573
1574 if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
1575 if (pcc_ss_id < 0) {
1576 pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
1577 return -ENODEV;
1578 }
1579
1580 if (CPC_SUPPORTED(auto_sel_reg)) {
1581 ret = cpc_write(cpu, auto_sel_reg, enable);
1582 if (ret)
1583 return ret;
1584 }
1585
1586 if (CPC_SUPPORTED(epp_set_reg)) {
1587 ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1588 if (ret)
1589 return ret;
1590 }
1591
1592 pcc_ss_data = pcc_data[pcc_ss_id];
1593
1594 down_write(&pcc_ss_data->pcc_lock);
1595 /* after writing CPC, transfer the ownership of PCC to platform */
1596 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1597 up_write(&pcc_ss_data->pcc_lock);
1598 } else if (osc_cpc_flexible_adr_space_confirmed &&
1599 CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
1600 ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1601 } else {
1602 ret = -ENOTSUPP;
1603 pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
1604 }
1605
1606 return ret;
1607 }
1608 EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
1609
1610 /**
1611 * cppc_set_epp() - Write the EPP register.
1612 * @cpu: CPU on which to write register.
1613 * @epp_val: Value to write to the EPP register.
1614 */
cppc_set_epp(int cpu,u64 epp_val)1615 int cppc_set_epp(int cpu, u64 epp_val)
1616 {
1617 if (epp_val > CPPC_EPP_ENERGY_EFFICIENCY_PREF)
1618 return -EINVAL;
1619
1620 return cppc_set_reg_val(cpu, ENERGY_PERF, epp_val);
1621 }
1622 EXPORT_SYMBOL_GPL(cppc_set_epp);
1623
1624 /**
1625 * cppc_get_auto_act_window() - Read autonomous activity window register.
1626 * @cpu: CPU from which to read register.
1627 * @auto_act_window: Return address.
1628 *
1629 * According to ACPI 6.5, s8.4.6.1.6, the value read from the autonomous
1630 * activity window register consists of two parts: a 7 bits value indicate
1631 * significand and a 3 bits value indicate exponent.
1632 */
cppc_get_auto_act_window(int cpu,u64 * auto_act_window)1633 int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
1634 {
1635 unsigned int exp;
1636 u64 val, sig;
1637 int ret;
1638
1639 if (auto_act_window == NULL)
1640 return -EINVAL;
1641
1642 ret = cppc_get_reg_val(cpu, AUTO_ACT_WINDOW, &val);
1643 if (ret)
1644 return ret;
1645
1646 sig = val & CPPC_AUTO_ACT_WINDOW_MAX_SIG;
1647 exp = (val >> CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) & CPPC_AUTO_ACT_WINDOW_MAX_EXP;
1648 *auto_act_window = sig * int_pow(10, exp);
1649
1650 return 0;
1651 }
1652 EXPORT_SYMBOL_GPL(cppc_get_auto_act_window);
1653
1654 /**
1655 * cppc_set_auto_act_window() - Write autonomous activity window register.
1656 * @cpu: CPU on which to write register.
1657 * @auto_act_window: usec value to write to the autonomous activity window register.
1658 *
1659 * According to ACPI 6.5, s8.4.6.1.6, the value to write to the autonomous
1660 * activity window register consists of two parts: a 7 bits value indicate
1661 * significand and a 3 bits value indicate exponent.
1662 */
cppc_set_auto_act_window(int cpu,u64 auto_act_window)1663 int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
1664 {
1665 /* The max value to store is 1270000000 */
1666 u64 max_val = CPPC_AUTO_ACT_WINDOW_MAX_SIG * int_pow(10, CPPC_AUTO_ACT_WINDOW_MAX_EXP);
1667 int exp = 0;
1668 u64 val;
1669
1670 if (auto_act_window > max_val)
1671 return -EINVAL;
1672
1673 /*
1674 * The max significand is 127, when auto_act_window is larger than
1675 * 129, discard the precision of the last digit and increase the
1676 * exponent by 1.
1677 */
1678 while (auto_act_window > CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH) {
1679 auto_act_window /= 10;
1680 exp += 1;
1681 }
1682
1683 /* For 128 and 129, cut it to 127. */
1684 if (auto_act_window > CPPC_AUTO_ACT_WINDOW_MAX_SIG)
1685 auto_act_window = CPPC_AUTO_ACT_WINDOW_MAX_SIG;
1686
1687 val = (exp << CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) + auto_act_window;
1688
1689 return cppc_set_reg_val(cpu, AUTO_ACT_WINDOW, val);
1690 }
1691 EXPORT_SYMBOL_GPL(cppc_set_auto_act_window);
1692
1693 /**
1694 * cppc_get_auto_sel() - Read autonomous selection register.
1695 * @cpu: CPU from which to read register.
1696 * @enable: Return address.
1697 */
cppc_get_auto_sel(int cpu,bool * enable)1698 int cppc_get_auto_sel(int cpu, bool *enable)
1699 {
1700 u64 auto_sel;
1701 int ret;
1702
1703 if (enable == NULL)
1704 return -EINVAL;
1705
1706 ret = cppc_get_reg_val(cpu, AUTO_SEL_ENABLE, &auto_sel);
1707 if (ret)
1708 return ret;
1709
1710 *enable = (bool)auto_sel;
1711
1712 return 0;
1713 }
1714 EXPORT_SYMBOL_GPL(cppc_get_auto_sel);
1715
1716 /**
1717 * cppc_set_auto_sel - Write autonomous selection register.
1718 * @cpu : CPU to which to write register.
1719 * @enable : the desired value of autonomous selection resiter to be updated.
1720 */
cppc_set_auto_sel(int cpu,bool enable)1721 int cppc_set_auto_sel(int cpu, bool enable)
1722 {
1723 return cppc_set_reg_val(cpu, AUTO_SEL_ENABLE, enable);
1724 }
1725 EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
1726
1727 /**
1728 * cppc_set_enable - Set to enable CPPC on the processor by writing the
1729 * Continuous Performance Control package EnableRegister field.
1730 * @cpu: CPU for which to enable CPPC register.
1731 * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1732 *
1733 * Return: 0 for success, -ERRNO or -EIO otherwise.
1734 */
cppc_set_enable(int cpu,bool enable)1735 int cppc_set_enable(int cpu, bool enable)
1736 {
1737 return cppc_set_reg_val(cpu, ENABLE, enable);
1738 }
1739 EXPORT_SYMBOL_GPL(cppc_set_enable);
1740
1741 /**
1742 * cppc_set_perf - Set a CPU's performance controls.
1743 * @cpu: CPU for which to set performance controls.
1744 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1745 *
1746 * Return: 0 for success, -ERRNO otherwise.
1747 */
cppc_set_perf(int cpu,struct cppc_perf_ctrls * perf_ctrls)1748 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1749 {
1750 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1751 struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
1752 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1753 struct cppc_pcc_data *pcc_ss_data = NULL;
1754 int ret = 0;
1755
1756 if (!cpc_desc) {
1757 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1758 return -ENODEV;
1759 }
1760
1761 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1762 min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
1763 max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
1764
1765 /*
1766 * This is Phase-I where we want to write to CPC registers
1767 * -> We want all CPUs to be able to execute this phase in parallel
1768 *
1769 * Since read_lock can be acquired by multiple CPUs simultaneously we
1770 * achieve that goal here
1771 */
1772 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1773 if (pcc_ss_id < 0) {
1774 pr_debug("Invalid pcc_ss_id\n");
1775 return -ENODEV;
1776 }
1777 pcc_ss_data = pcc_data[pcc_ss_id];
1778 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1779 if (pcc_ss_data->platform_owns_pcc) {
1780 ret = check_pcc_chan(pcc_ss_id, false);
1781 if (ret) {
1782 up_read(&pcc_ss_data->pcc_lock);
1783 return ret;
1784 }
1785 }
1786 /*
1787 * Update the pending_write to make sure a PCC CMD_READ will not
1788 * arrive and steal the channel during the switch to write lock
1789 */
1790 pcc_ss_data->pending_pcc_write_cmd = true;
1791 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1792 cpc_desc->write_cmd_status = 0;
1793 }
1794
1795 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1796
1797 /*
1798 * Only write if min_perf and max_perf not zero. Some drivers pass zero
1799 * value to min and max perf, but they don't mean to set the zero value,
1800 * they just don't want to write to those registers.
1801 */
1802 if (perf_ctrls->min_perf)
1803 cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
1804 if (perf_ctrls->max_perf)
1805 cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
1806
1807 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
1808 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1809 /*
1810 * This is Phase-II where we transfer the ownership of PCC to Platform
1811 *
1812 * Short Summary: Basically if we think of a group of cppc_set_perf
1813 * requests that happened in short overlapping interval. The last CPU to
1814 * come out of Phase-I will enter Phase-II and ring the doorbell.
1815 *
1816 * We have the following requirements for Phase-II:
1817 * 1. We want to execute Phase-II only when there are no CPUs
1818 * currently executing in Phase-I
1819 * 2. Once we start Phase-II we want to avoid all other CPUs from
1820 * entering Phase-I.
1821 * 3. We want only one CPU among all those who went through Phase-I
1822 * to run phase-II
1823 *
1824 * If write_trylock fails to get the lock and doesn't transfer the
1825 * PCC ownership to the platform, then one of the following will be TRUE
1826 * 1. There is at-least one CPU in Phase-I which will later execute
1827 * write_trylock, so the CPUs in Phase-I will be responsible for
1828 * executing the Phase-II.
1829 * 2. Some other CPU has beaten this CPU to successfully execute the
1830 * write_trylock and has already acquired the write_lock. We know for a
1831 * fact it (other CPU acquiring the write_lock) couldn't have happened
1832 * before this CPU's Phase-I as we held the read_lock.
1833 * 3. Some other CPU executing pcc CMD_READ has stolen the
1834 * down_write, in which case, send_pcc_cmd will check for pending
1835 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1836 * So this CPU can be certain that its request will be delivered
1837 * So in all cases, this CPU knows that its request will be delivered
1838 * by another CPU and can return
1839 *
1840 * After getting the down_write we still need to check for
1841 * pending_pcc_write_cmd to take care of the following scenario
1842 * The thread running this code could be scheduled out between
1843 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1844 * could have delivered the request to Platform by triggering the
1845 * doorbell and transferred the ownership of PCC to platform. So this
1846 * avoids triggering an unnecessary doorbell and more importantly before
1847 * triggering the doorbell it makes sure that the PCC channel ownership
1848 * is still with OSPM.
1849 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1850 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1851 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1852 * case during a CMD_READ and if there are pending writes it delivers
1853 * the write command before servicing the read command
1854 */
1855 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1856 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1857 /* Update only if there are pending write commands */
1858 if (pcc_ss_data->pending_pcc_write_cmd)
1859 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1860 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1861 } else
1862 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1863 wait_event(pcc_ss_data->pcc_write_wait_q,
1864 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1865
1866 /* send_pcc_cmd updates the status in case of failure */
1867 ret = cpc_desc->write_cmd_status;
1868 }
1869 return ret;
1870 }
1871 EXPORT_SYMBOL_GPL(cppc_set_perf);
1872
1873 /**
1874 * cppc_get_transition_latency - returns frequency transition latency in ns
1875 * @cpu_num: CPU number for per_cpu().
1876 *
1877 * ACPI CPPC does not explicitly specify how a platform can specify the
1878 * transition latency for performance change requests. The closest we have
1879 * is the timing information from the PCCT tables which provides the info
1880 * on the number and frequency of PCC commands the platform can handle.
1881 *
1882 * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
1883 * then assume there is no latency.
1884 */
cppc_get_transition_latency(int cpu_num)1885 int cppc_get_transition_latency(int cpu_num)
1886 {
1887 /*
1888 * Expected transition latency is based on the PCCT timing values
1889 * Below are definition from ACPI spec:
1890 * pcc_nominal- Expected latency to process a command, in microseconds
1891 * pcc_mpar - The maximum number of periodic requests that the subspace
1892 * channel can support, reported in commands per minute. 0
1893 * indicates no limitation.
1894 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1895 * completion of a command before issuing the next command,
1896 * in microseconds.
1897 */
1898 struct cpc_desc *cpc_desc;
1899 struct cpc_register_resource *desired_reg;
1900 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1901 struct cppc_pcc_data *pcc_ss_data;
1902 int latency_ns = 0;
1903
1904 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1905 if (!cpc_desc)
1906 return -ENODATA;
1907
1908 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1909 if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1910 return 0;
1911
1912 if (!CPC_IN_PCC(desired_reg) || pcc_ss_id < 0)
1913 return -ENODATA;
1914
1915 pcc_ss_data = pcc_data[pcc_ss_id];
1916 if (pcc_ss_data->pcc_mpar)
1917 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1918
1919 latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_nominal * 1000);
1920 latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1921
1922 return latency_ns;
1923 }
1924 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1925
1926 /* Minimum struct length needed for the DMI processor entry we want */
1927 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
1928
1929 /* Offset in the DMI processor structure for the max frequency */
1930 #define DMI_PROCESSOR_MAX_SPEED 0x14
1931
1932 /* Callback function used to retrieve the max frequency from DMI */
cppc_find_dmi_mhz(const struct dmi_header * dm,void * private)1933 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
1934 {
1935 const u8 *dmi_data = (const u8 *)dm;
1936 u16 *mhz = (u16 *)private;
1937
1938 if (dm->type == DMI_ENTRY_PROCESSOR &&
1939 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
1940 u16 val = (u16)get_unaligned((const u16 *)
1941 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
1942 *mhz = umax(val, *mhz);
1943 }
1944 }
1945
1946 /* Look up the max frequency in DMI */
cppc_get_dmi_max_khz(void)1947 static u64 cppc_get_dmi_max_khz(void)
1948 {
1949 u16 mhz = 0;
1950
1951 dmi_walk(cppc_find_dmi_mhz, &mhz);
1952
1953 /*
1954 * Real stupid fallback value, just in case there is no
1955 * actual value set.
1956 */
1957 mhz = mhz ? mhz : 1;
1958
1959 return KHZ_PER_MHZ * mhz;
1960 }
1961
1962 /*
1963 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
1964 * use them to convert perf to freq and vice versa. The conversion is
1965 * extrapolated as an affine function passing by the 2 points:
1966 * - (Low perf, Low freq)
1967 * - (Nominal perf, Nominal freq)
1968 */
cppc_perf_to_khz(struct cppc_perf_caps * caps,unsigned int perf)1969 unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
1970 {
1971 s64 retval, offset = 0;
1972 static u64 max_khz;
1973 u64 mul, div;
1974
1975 if (caps->lowest_freq && caps->nominal_freq) {
1976 /* Avoid special case when nominal_freq is equal to lowest_freq */
1977 if (caps->lowest_freq == caps->nominal_freq) {
1978 mul = caps->nominal_freq;
1979 div = caps->nominal_perf;
1980 } else {
1981 mul = caps->nominal_freq - caps->lowest_freq;
1982 div = caps->nominal_perf - caps->lowest_perf;
1983 }
1984 mul *= KHZ_PER_MHZ;
1985 offset = caps->nominal_freq * KHZ_PER_MHZ -
1986 div64_u64(caps->nominal_perf * mul, div);
1987 } else {
1988 if (!max_khz)
1989 max_khz = cppc_get_dmi_max_khz();
1990 mul = max_khz;
1991 div = caps->highest_perf;
1992 }
1993
1994 retval = offset + div64_u64(perf * mul, div);
1995 if (retval >= 0)
1996 return retval;
1997 return 0;
1998 }
1999 EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
2000
cppc_khz_to_perf(struct cppc_perf_caps * caps,unsigned int freq)2001 unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
2002 {
2003 s64 retval, offset = 0;
2004 static u64 max_khz;
2005 u64 mul, div;
2006
2007 if (caps->lowest_freq && caps->nominal_freq) {
2008 /* Avoid special case when nominal_freq is equal to lowest_freq */
2009 if (caps->lowest_freq == caps->nominal_freq) {
2010 mul = caps->nominal_perf;
2011 div = caps->nominal_freq;
2012 } else {
2013 mul = caps->nominal_perf - caps->lowest_perf;
2014 div = caps->nominal_freq - caps->lowest_freq;
2015 }
2016 /*
2017 * We don't need to convert to kHz for computing offset and can
2018 * directly use nominal_freq and lowest_freq as the div64_u64
2019 * will remove the frequency unit.
2020 */
2021 offset = caps->nominal_perf -
2022 div64_u64(caps->nominal_freq * mul, div);
2023 /* But we need it for computing the perf level. */
2024 div *= KHZ_PER_MHZ;
2025 } else {
2026 if (!max_khz)
2027 max_khz = cppc_get_dmi_max_khz();
2028 mul = caps->highest_perf;
2029 div = max_khz;
2030 }
2031
2032 retval = offset + div64_u64(freq * mul, div);
2033 if (retval >= 0)
2034 return retval;
2035 return 0;
2036 }
2037 EXPORT_SYMBOL_GPL(cppc_khz_to_perf);
2038