1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4 *
5 * (C) Copyright 2014, 2015 Linaro Ltd.
6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7 *
8 * CPPC describes a few methods for controlling CPU performance using
9 * information from a per CPU table called CPC. This table is described in
10 * the ACPI v5.0+ specification. The table consists of a list of
11 * registers which may be memory mapped or hardware registers and also may
12 * include some static integer values.
13 *
14 * CPU performance is on an abstract continuous scale as against a discretized
15 * P-state scale which is tied to CPU frequency only. In brief, the basic
16 * operation involves:
17 *
18 * - OS makes a CPU performance request. (Can provide min and max bounds)
19 *
20 * - Platform (such as BMC) is free to optimize request within requested bounds
21 * depending on power/thermal budgets etc.
22 *
23 * - Platform conveys its decision back to OS
24 *
25 * The communication between OS and platform occurs through another medium
26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
27 * mechanism which includes doorbell semantics to indicate register updates.
28 * See drivers/mailbox/pcc.c for details on PCC.
29 *
30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31 * above specifications.
32 */
33
34 #define pr_fmt(fmt) "ACPI CPPC: " fmt
35
36 #include <linux/delay.h>
37 #include <linux/iopoll.h>
38 #include <linux/ktime.h>
39 #include <linux/rwsem.h>
40 #include <linux/wait.h>
41 #include <linux/topology.h>
42 #include <linux/dmi.h>
43 #include <linux/units.h>
44 #include <asm/unaligned.h>
45
46 #include <acpi/cppc_acpi.h>
47
48 struct cppc_pcc_data {
49 struct pcc_mbox_chan *pcc_channel;
50 void __iomem *pcc_comm_addr;
51 bool pcc_channel_acquired;
52 unsigned int deadline_us;
53 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
54
55 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
56 bool platform_owns_pcc; /* Ownership of PCC subspace */
57 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
58
59 /*
60 * Lock to provide controlled access to the PCC channel.
61 *
62 * For performance critical usecases(currently cppc_set_perf)
63 * We need to take read_lock and check if channel belongs to OSPM
64 * before reading or writing to PCC subspace
65 * We need to take write_lock before transferring the channel
66 * ownership to the platform via a Doorbell
67 * This allows us to batch a number of CPPC requests if they happen
68 * to originate in about the same time
69 *
70 * For non-performance critical usecases(init)
71 * Take write_lock for all purposes which gives exclusive access
72 */
73 struct rw_semaphore pcc_lock;
74
75 /* Wait queue for CPUs whose requests were batched */
76 wait_queue_head_t pcc_write_wait_q;
77 ktime_t last_cmd_cmpl_time;
78 ktime_t last_mpar_reset;
79 int mpar_count;
80 int refcount;
81 };
82
83 /* Array to represent the PCC channel per subspace ID */
84 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
85 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
86 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
87
88 /*
89 * The cpc_desc structure contains the ACPI register details
90 * as described in the per CPU _CPC tables. The details
91 * include the type of register (e.g. PCC, System IO, FFH etc.)
92 * and destination addresses which lets us READ/WRITE CPU performance
93 * information using the appropriate I/O methods.
94 */
95 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
96
97 /* pcc mapped address + header size + offset within PCC subspace */
98 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
99 0x8 + (offs))
100
101 /* Check if a CPC register is in PCC */
102 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
103 (cpc)->cpc_entry.reg.space_id == \
104 ACPI_ADR_SPACE_PLATFORM_COMM)
105
106 /* Check if a CPC register is in FFH */
107 #define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
108 (cpc)->cpc_entry.reg.space_id == \
109 ACPI_ADR_SPACE_FIXED_HARDWARE)
110
111 /* Check if a CPC register is in SystemMemory */
112 #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
113 (cpc)->cpc_entry.reg.space_id == \
114 ACPI_ADR_SPACE_SYSTEM_MEMORY)
115
116 /* Check if a CPC register is in SystemIo */
117 #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
118 (cpc)->cpc_entry.reg.space_id == \
119 ACPI_ADR_SPACE_SYSTEM_IO)
120
121 /* Evaluates to True if reg is a NULL register descriptor */
122 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
123 (reg)->address == 0 && \
124 (reg)->bit_width == 0 && \
125 (reg)->bit_offset == 0 && \
126 (reg)->access_width == 0)
127
128 /* Evaluates to True if an optional cpc field is supported */
129 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
130 !!(cpc)->cpc_entry.int_value : \
131 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
132 /*
133 * Arbitrary Retries in case the remote processor is slow to respond
134 * to PCC commands. Keeping it high enough to cover emulators where
135 * the processors run painfully slow.
136 */
137 #define NUM_RETRIES 500ULL
138
139 #define OVER_16BTS_MASK ~0xFFFFULL
140
141 #define define_one_cppc_ro(_name) \
142 static struct kobj_attribute _name = \
143 __ATTR(_name, 0444, show_##_name, NULL)
144
145 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
146
147 #define show_cppc_data(access_fn, struct_name, member_name) \
148 static ssize_t show_##member_name(struct kobject *kobj, \
149 struct kobj_attribute *attr, char *buf) \
150 { \
151 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
152 struct struct_name st_name = {0}; \
153 int ret; \
154 \
155 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
156 if (ret) \
157 return ret; \
158 \
159 return sysfs_emit(buf, "%llu\n", \
160 (u64)st_name.member_name); \
161 } \
162 define_one_cppc_ro(member_name)
163
164 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
165 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
166 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
167 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
168 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf);
169 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
170 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
171
172 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
173 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
174
175 /* Check for valid access_width, otherwise, fallback to using bit_width */
176 #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
177
178 /* Shift and apply the mask for CPC reads/writes */
179 #define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \
180 GENMASK(((reg)->bit_width) - 1, 0))
181 #define MASK_VAL_WRITE(reg, prev_val, val) \
182 ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \
183 ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
184
show_feedback_ctrs(struct kobject * kobj,struct kobj_attribute * attr,char * buf)185 static ssize_t show_feedback_ctrs(struct kobject *kobj,
186 struct kobj_attribute *attr, char *buf)
187 {
188 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
189 struct cppc_perf_fb_ctrs fb_ctrs = {0};
190 int ret;
191
192 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
193 if (ret)
194 return ret;
195
196 return sysfs_emit(buf, "ref:%llu del:%llu\n",
197 fb_ctrs.reference, fb_ctrs.delivered);
198 }
199 define_one_cppc_ro(feedback_ctrs);
200
201 static struct attribute *cppc_attrs[] = {
202 &feedback_ctrs.attr,
203 &reference_perf.attr,
204 &wraparound_time.attr,
205 &highest_perf.attr,
206 &lowest_perf.attr,
207 &lowest_nonlinear_perf.attr,
208 &guaranteed_perf.attr,
209 &nominal_perf.attr,
210 &nominal_freq.attr,
211 &lowest_freq.attr,
212 NULL
213 };
214 ATTRIBUTE_GROUPS(cppc);
215
216 static const struct kobj_type cppc_ktype = {
217 .sysfs_ops = &kobj_sysfs_ops,
218 .default_groups = cppc_groups,
219 };
220
check_pcc_chan(int pcc_ss_id,bool chk_err_bit)221 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
222 {
223 int ret, status;
224 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
225 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
226 pcc_ss_data->pcc_comm_addr;
227
228 if (!pcc_ss_data->platform_owns_pcc)
229 return 0;
230
231 /*
232 * Poll PCC status register every 3us(delay_us) for maximum of
233 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
234 */
235 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
236 status & PCC_CMD_COMPLETE_MASK, 3,
237 pcc_ss_data->deadline_us);
238
239 if (likely(!ret)) {
240 pcc_ss_data->platform_owns_pcc = false;
241 if (chk_err_bit && (status & PCC_ERROR_MASK))
242 ret = -EIO;
243 }
244
245 if (unlikely(ret))
246 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
247 pcc_ss_id, ret);
248
249 return ret;
250 }
251
252 /*
253 * This function transfers the ownership of the PCC to the platform
254 * So it must be called while holding write_lock(pcc_lock)
255 */
send_pcc_cmd(int pcc_ss_id,u16 cmd)256 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
257 {
258 int ret = -EIO, i;
259 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
260 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
261 pcc_ss_data->pcc_comm_addr;
262 unsigned int time_delta;
263
264 /*
265 * For CMD_WRITE we know for a fact the caller should have checked
266 * the channel before writing to PCC space
267 */
268 if (cmd == CMD_READ) {
269 /*
270 * If there are pending cpc_writes, then we stole the channel
271 * before write completion, so first send a WRITE command to
272 * platform
273 */
274 if (pcc_ss_data->pending_pcc_write_cmd)
275 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
276
277 ret = check_pcc_chan(pcc_ss_id, false);
278 if (ret)
279 goto end;
280 } else /* CMD_WRITE */
281 pcc_ss_data->pending_pcc_write_cmd = FALSE;
282
283 /*
284 * Handle the Minimum Request Turnaround Time(MRTT)
285 * "The minimum amount of time that OSPM must wait after the completion
286 * of a command before issuing the next command, in microseconds"
287 */
288 if (pcc_ss_data->pcc_mrtt) {
289 time_delta = ktime_us_delta(ktime_get(),
290 pcc_ss_data->last_cmd_cmpl_time);
291 if (pcc_ss_data->pcc_mrtt > time_delta)
292 udelay(pcc_ss_data->pcc_mrtt - time_delta);
293 }
294
295 /*
296 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
297 * "The maximum number of periodic requests that the subspace channel can
298 * support, reported in commands per minute. 0 indicates no limitation."
299 *
300 * This parameter should be ideally zero or large enough so that it can
301 * handle maximum number of requests that all the cores in the system can
302 * collectively generate. If it is not, we will follow the spec and just
303 * not send the request to the platform after hitting the MPAR limit in
304 * any 60s window
305 */
306 if (pcc_ss_data->pcc_mpar) {
307 if (pcc_ss_data->mpar_count == 0) {
308 time_delta = ktime_ms_delta(ktime_get(),
309 pcc_ss_data->last_mpar_reset);
310 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
311 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
312 pcc_ss_id);
313 ret = -EIO;
314 goto end;
315 }
316 pcc_ss_data->last_mpar_reset = ktime_get();
317 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
318 }
319 pcc_ss_data->mpar_count--;
320 }
321
322 /* Write to the shared comm region. */
323 writew_relaxed(cmd, &generic_comm_base->command);
324
325 /* Flip CMD COMPLETE bit */
326 writew_relaxed(0, &generic_comm_base->status);
327
328 pcc_ss_data->platform_owns_pcc = true;
329
330 /* Ring doorbell */
331 ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
332 if (ret < 0) {
333 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
334 pcc_ss_id, cmd, ret);
335 goto end;
336 }
337
338 /* wait for completion and check for PCC error bit */
339 ret = check_pcc_chan(pcc_ss_id, true);
340
341 if (pcc_ss_data->pcc_mrtt)
342 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
343
344 if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
345 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
346 else
347 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
348
349 end:
350 if (cmd == CMD_WRITE) {
351 if (unlikely(ret)) {
352 for_each_possible_cpu(i) {
353 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
354
355 if (!desc)
356 continue;
357
358 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
359 desc->write_cmd_status = ret;
360 }
361 }
362 pcc_ss_data->pcc_write_cnt++;
363 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
364 }
365
366 return ret;
367 }
368
cppc_chan_tx_done(struct mbox_client * cl,void * msg,int ret)369 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
370 {
371 if (ret < 0)
372 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
373 *(u16 *)msg, ret);
374 else
375 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
376 *(u16 *)msg, ret);
377 }
378
379 static struct mbox_client cppc_mbox_cl = {
380 .tx_done = cppc_chan_tx_done,
381 .knows_txdone = true,
382 };
383
acpi_get_psd(struct cpc_desc * cpc_ptr,acpi_handle handle)384 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
385 {
386 int result = -EFAULT;
387 acpi_status status = AE_OK;
388 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
389 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
390 struct acpi_buffer state = {0, NULL};
391 union acpi_object *psd = NULL;
392 struct acpi_psd_package *pdomain;
393
394 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
395 &buffer, ACPI_TYPE_PACKAGE);
396 if (status == AE_NOT_FOUND) /* _PSD is optional */
397 return 0;
398 if (ACPI_FAILURE(status))
399 return -ENODEV;
400
401 psd = buffer.pointer;
402 if (!psd || psd->package.count != 1) {
403 pr_debug("Invalid _PSD data\n");
404 goto end;
405 }
406
407 pdomain = &(cpc_ptr->domain_info);
408
409 state.length = sizeof(struct acpi_psd_package);
410 state.pointer = pdomain;
411
412 status = acpi_extract_package(&(psd->package.elements[0]),
413 &format, &state);
414 if (ACPI_FAILURE(status)) {
415 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
416 goto end;
417 }
418
419 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
420 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
421 goto end;
422 }
423
424 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
425 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
426 goto end;
427 }
428
429 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
430 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
431 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
432 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
433 goto end;
434 }
435
436 result = 0;
437 end:
438 kfree(buffer.pointer);
439 return result;
440 }
441
acpi_cpc_valid(void)442 bool acpi_cpc_valid(void)
443 {
444 struct cpc_desc *cpc_ptr;
445 int cpu;
446
447 if (acpi_disabled)
448 return false;
449
450 for_each_present_cpu(cpu) {
451 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
452 if (!cpc_ptr)
453 return false;
454 }
455
456 return true;
457 }
458 EXPORT_SYMBOL_GPL(acpi_cpc_valid);
459
cppc_allow_fast_switch(void)460 bool cppc_allow_fast_switch(void)
461 {
462 struct cpc_register_resource *desired_reg;
463 struct cpc_desc *cpc_ptr;
464 int cpu;
465
466 for_each_possible_cpu(cpu) {
467 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
468 desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
469 if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
470 !CPC_IN_SYSTEM_IO(desired_reg))
471 return false;
472 }
473
474 return true;
475 }
476 EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
477
478 /**
479 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
480 * @cpu: Find all CPUs that share a domain with cpu.
481 * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
482 *
483 * Return: 0 for success or negative value for err.
484 */
acpi_get_psd_map(unsigned int cpu,struct cppc_cpudata * cpu_data)485 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
486 {
487 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
488 struct acpi_psd_package *match_pdomain;
489 struct acpi_psd_package *pdomain;
490 int count_target, i;
491
492 /*
493 * Now that we have _PSD data from all CPUs, let's setup P-state
494 * domain info.
495 */
496 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
497 if (!cpc_ptr)
498 return -EFAULT;
499
500 pdomain = &(cpc_ptr->domain_info);
501 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
502 if (pdomain->num_processors <= 1)
503 return 0;
504
505 /* Validate the Domain info */
506 count_target = pdomain->num_processors;
507 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
508 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
509 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
510 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
511 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
512 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
513
514 for_each_possible_cpu(i) {
515 if (i == cpu)
516 continue;
517
518 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
519 if (!match_cpc_ptr)
520 goto err_fault;
521
522 match_pdomain = &(match_cpc_ptr->domain_info);
523 if (match_pdomain->domain != pdomain->domain)
524 continue;
525
526 /* Here i and cpu are in the same domain */
527 if (match_pdomain->num_processors != count_target)
528 goto err_fault;
529
530 if (pdomain->coord_type != match_pdomain->coord_type)
531 goto err_fault;
532
533 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
534 }
535
536 return 0;
537
538 err_fault:
539 /* Assume no coordination on any error parsing domain info */
540 cpumask_clear(cpu_data->shared_cpu_map);
541 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
542 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
543
544 return -EFAULT;
545 }
546 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
547
register_pcc_channel(int pcc_ss_idx)548 static int register_pcc_channel(int pcc_ss_idx)
549 {
550 struct pcc_mbox_chan *pcc_chan;
551 u64 usecs_lat;
552
553 if (pcc_ss_idx >= 0) {
554 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
555
556 if (IS_ERR(pcc_chan)) {
557 pr_err("Failed to find PCC channel for subspace %d\n",
558 pcc_ss_idx);
559 return -ENODEV;
560 }
561
562 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
563 /*
564 * cppc_ss->latency is just a Nominal value. In reality
565 * the remote processor could be much slower to reply.
566 * So add an arbitrary amount of wait on top of Nominal.
567 */
568 usecs_lat = NUM_RETRIES * pcc_chan->latency;
569 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
570 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
571 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
572 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
573
574 pcc_data[pcc_ss_idx]->pcc_comm_addr =
575 acpi_os_ioremap(pcc_chan->shmem_base_addr,
576 pcc_chan->shmem_size);
577 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
578 pr_err("Failed to ioremap PCC comm region mem for %d\n",
579 pcc_ss_idx);
580 return -ENOMEM;
581 }
582
583 /* Set flag so that we don't come here for each CPU. */
584 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
585 }
586
587 return 0;
588 }
589
590 /**
591 * cpc_ffh_supported() - check if FFH reading supported
592 *
593 * Check if the architecture has support for functional fixed hardware
594 * read/write capability.
595 *
596 * Return: true for supported, false for not supported
597 */
cpc_ffh_supported(void)598 bool __weak cpc_ffh_supported(void)
599 {
600 return false;
601 }
602
603 /**
604 * cpc_supported_by_cpu() - check if CPPC is supported by CPU
605 *
606 * Check if the architectural support for CPPC is present even
607 * if the _OSC hasn't prescribed it
608 *
609 * Return: true for supported, false for not supported
610 */
cpc_supported_by_cpu(void)611 bool __weak cpc_supported_by_cpu(void)
612 {
613 return false;
614 }
615
616 /**
617 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
618 * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
619 *
620 * Check and allocate the cppc_pcc_data memory.
621 * In some processor configurations it is possible that same subspace
622 * is shared between multiple CPUs. This is seen especially in CPUs
623 * with hardware multi-threading support.
624 *
625 * Return: 0 for success, errno for failure
626 */
pcc_data_alloc(int pcc_ss_id)627 static int pcc_data_alloc(int pcc_ss_id)
628 {
629 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
630 return -EINVAL;
631
632 if (pcc_data[pcc_ss_id]) {
633 pcc_data[pcc_ss_id]->refcount++;
634 } else {
635 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
636 GFP_KERNEL);
637 if (!pcc_data[pcc_ss_id])
638 return -ENOMEM;
639 pcc_data[pcc_ss_id]->refcount++;
640 }
641
642 return 0;
643 }
644
645 /*
646 * An example CPC table looks like the following.
647 *
648 * Name (_CPC, Package() {
649 * 17, // NumEntries
650 * 1, // Revision
651 * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
652 * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
653 * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
654 * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
655 * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
656 * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
657 * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
658 * ...
659 * ...
660 * ...
661 * }
662 * Each Register() encodes how to access that specific register.
663 * e.g. a sample PCC entry has the following encoding:
664 *
665 * Register (
666 * PCC, // AddressSpaceKeyword
667 * 8, // RegisterBitWidth
668 * 8, // RegisterBitOffset
669 * 0x30, // RegisterAddress
670 * 9, // AccessSize (subspace ID)
671 * )
672 */
673
674 #ifndef arch_init_invariance_cppc
arch_init_invariance_cppc(void)675 static inline void arch_init_invariance_cppc(void) { }
676 #endif
677
678 /**
679 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
680 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
681 *
682 * Return: 0 for success or negative value for err.
683 */
acpi_cppc_processor_probe(struct acpi_processor * pr)684 int acpi_cppc_processor_probe(struct acpi_processor *pr)
685 {
686 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
687 union acpi_object *out_obj, *cpc_obj;
688 struct cpc_desc *cpc_ptr;
689 struct cpc_reg *gas_t;
690 struct device *cpu_dev;
691 acpi_handle handle = pr->handle;
692 unsigned int num_ent, i, cpc_rev;
693 int pcc_subspace_id = -1;
694 acpi_status status;
695 int ret = -ENODATA;
696
697 if (!osc_sb_cppc2_support_acked) {
698 pr_debug("CPPC v2 _OSC not acked\n");
699 if (!cpc_supported_by_cpu()) {
700 pr_debug("CPPC is not supported by the CPU\n");
701 return -ENODEV;
702 }
703 }
704
705 /* Parse the ACPI _CPC table for this CPU. */
706 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
707 ACPI_TYPE_PACKAGE);
708 if (ACPI_FAILURE(status)) {
709 ret = -ENODEV;
710 goto out_buf_free;
711 }
712
713 out_obj = (union acpi_object *) output.pointer;
714
715 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
716 if (!cpc_ptr) {
717 ret = -ENOMEM;
718 goto out_buf_free;
719 }
720
721 /* First entry is NumEntries. */
722 cpc_obj = &out_obj->package.elements[0];
723 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
724 num_ent = cpc_obj->integer.value;
725 if (num_ent <= 1) {
726 pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
727 num_ent, pr->id);
728 goto out_free;
729 }
730 } else {
731 pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
732 cpc_obj->type, pr->id);
733 goto out_free;
734 }
735
736 /* Second entry should be revision. */
737 cpc_obj = &out_obj->package.elements[1];
738 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
739 cpc_rev = cpc_obj->integer.value;
740 } else {
741 pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
742 cpc_obj->type, pr->id);
743 goto out_free;
744 }
745
746 if (cpc_rev < CPPC_V2_REV) {
747 pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
748 pr->id);
749 goto out_free;
750 }
751
752 /*
753 * Disregard _CPC if the number of entries in the return pachage is not
754 * as expected, but support future revisions being proper supersets of
755 * the v3 and only causing more entries to be returned by _CPC.
756 */
757 if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
758 (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
759 (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
760 pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
761 num_ent, pr->id);
762 goto out_free;
763 }
764 if (cpc_rev > CPPC_V3_REV) {
765 num_ent = CPPC_V3_NUM_ENT;
766 cpc_rev = CPPC_V3_REV;
767 }
768
769 cpc_ptr->num_entries = num_ent;
770 cpc_ptr->version = cpc_rev;
771
772 /* Iterate through remaining entries in _CPC */
773 for (i = 2; i < num_ent; i++) {
774 cpc_obj = &out_obj->package.elements[i];
775
776 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
777 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
778 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
779 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
780 gas_t = (struct cpc_reg *)
781 cpc_obj->buffer.pointer;
782
783 /*
784 * The PCC Subspace index is encoded inside
785 * the CPC table entries. The same PCC index
786 * will be used for all the PCC entries,
787 * so extract it only once.
788 */
789 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
790 if (pcc_subspace_id < 0) {
791 pcc_subspace_id = gas_t->access_width;
792 if (pcc_data_alloc(pcc_subspace_id))
793 goto out_free;
794 } else if (pcc_subspace_id != gas_t->access_width) {
795 pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
796 pr->id);
797 goto out_free;
798 }
799 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
800 if (gas_t->address) {
801 void __iomem *addr;
802 size_t access_width;
803
804 if (!osc_cpc_flexible_adr_space_confirmed) {
805 pr_debug("Flexible address space capability not supported\n");
806 if (!cpc_supported_by_cpu())
807 goto out_free;
808 }
809
810 access_width = GET_BIT_WIDTH(gas_t) / 8;
811 addr = ioremap(gas_t->address, access_width);
812 if (!addr)
813 goto out_free;
814 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
815 }
816 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
817 if (gas_t->access_width < 1 || gas_t->access_width > 3) {
818 /*
819 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
820 * SystemIO doesn't implement 64-bit
821 * registers.
822 */
823 pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
824 gas_t->access_width);
825 goto out_free;
826 }
827 if (gas_t->address & OVER_16BTS_MASK) {
828 /* SystemIO registers use 16-bit integer addresses */
829 pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
830 gas_t->address);
831 goto out_free;
832 }
833 if (!osc_cpc_flexible_adr_space_confirmed) {
834 pr_debug("Flexible address space capability not supported\n");
835 if (!cpc_supported_by_cpu())
836 goto out_free;
837 }
838 } else {
839 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
840 /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
841 pr_debug("Unsupported register type (%d) in _CPC\n",
842 gas_t->space_id);
843 goto out_free;
844 }
845 }
846
847 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
848 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
849 } else {
850 pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
851 i, pr->id);
852 goto out_free;
853 }
854 }
855 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
856
857 /*
858 * Initialize the remaining cpc_regs as unsupported.
859 * Example: In case FW exposes CPPC v2, the below loop will initialize
860 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
861 */
862 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
863 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
864 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
865 }
866
867
868 /* Store CPU Logical ID */
869 cpc_ptr->cpu_id = pr->id;
870 spin_lock_init(&cpc_ptr->rmw_lock);
871
872 /* Parse PSD data for this CPU */
873 ret = acpi_get_psd(cpc_ptr, handle);
874 if (ret)
875 goto out_free;
876
877 /* Register PCC channel once for all PCC subspace ID. */
878 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
879 ret = register_pcc_channel(pcc_subspace_id);
880 if (ret)
881 goto out_free;
882
883 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
884 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
885 }
886
887 /* Everything looks okay */
888 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
889
890 /* Add per logical CPU nodes for reading its feedback counters. */
891 cpu_dev = get_cpu_device(pr->id);
892 if (!cpu_dev) {
893 ret = -EINVAL;
894 goto out_free;
895 }
896
897 /* Plug PSD data into this CPU's CPC descriptor. */
898 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
899
900 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
901 "acpi_cppc");
902 if (ret) {
903 per_cpu(cpc_desc_ptr, pr->id) = NULL;
904 kobject_put(&cpc_ptr->kobj);
905 goto out_free;
906 }
907
908 arch_init_invariance_cppc();
909
910 kfree(output.pointer);
911 return 0;
912
913 out_free:
914 /* Free all the mapped sys mem areas for this CPU */
915 for (i = 2; i < cpc_ptr->num_entries; i++) {
916 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
917
918 if (addr)
919 iounmap(addr);
920 }
921 kfree(cpc_ptr);
922
923 out_buf_free:
924 kfree(output.pointer);
925 return ret;
926 }
927 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
928
929 /**
930 * acpi_cppc_processor_exit - Cleanup CPC structs.
931 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
932 *
933 * Return: Void
934 */
acpi_cppc_processor_exit(struct acpi_processor * pr)935 void acpi_cppc_processor_exit(struct acpi_processor *pr)
936 {
937 struct cpc_desc *cpc_ptr;
938 unsigned int i;
939 void __iomem *addr;
940 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
941
942 if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
943 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
944 pcc_data[pcc_ss_id]->refcount--;
945 if (!pcc_data[pcc_ss_id]->refcount) {
946 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
947 kfree(pcc_data[pcc_ss_id]);
948 pcc_data[pcc_ss_id] = NULL;
949 }
950 }
951 }
952
953 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
954 if (!cpc_ptr)
955 return;
956
957 /* Free all the mapped sys mem areas for this CPU */
958 for (i = 2; i < cpc_ptr->num_entries; i++) {
959 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
960 if (addr)
961 iounmap(addr);
962 }
963
964 kobject_put(&cpc_ptr->kobj);
965 kfree(cpc_ptr);
966 }
967 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
968
969 /**
970 * cpc_read_ffh() - Read FFH register
971 * @cpunum: CPU number to read
972 * @reg: cppc register information
973 * @val: place holder for return value
974 *
975 * Read bit_width bits from a specified address and bit_offset
976 *
977 * Return: 0 for success and error code
978 */
cpc_read_ffh(int cpunum,struct cpc_reg * reg,u64 * val)979 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
980 {
981 return -ENOTSUPP;
982 }
983
984 /**
985 * cpc_write_ffh() - Write FFH register
986 * @cpunum: CPU number to write
987 * @reg: cppc register information
988 * @val: value to write
989 *
990 * Write value of bit_width bits to a specified address and bit_offset
991 *
992 * Return: 0 for success and error code
993 */
cpc_write_ffh(int cpunum,struct cpc_reg * reg,u64 val)994 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
995 {
996 return -ENOTSUPP;
997 }
998
999 /*
1000 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
1001 * as fast as possible. We have already mapped the PCC subspace during init, so
1002 * we can directly write to it.
1003 */
1004
cpc_read(int cpu,struct cpc_register_resource * reg_res,u64 * val)1005 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
1006 {
1007 void __iomem *vaddr = NULL;
1008 int size;
1009 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1010 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1011
1012 if (reg_res->type == ACPI_TYPE_INTEGER) {
1013 *val = reg_res->cpc_entry.int_value;
1014 return 0;
1015 }
1016
1017 *val = 0;
1018 size = GET_BIT_WIDTH(reg);
1019
1020 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1021 u32 val_u32;
1022 acpi_status status;
1023
1024 status = acpi_os_read_port((acpi_io_address)reg->address,
1025 &val_u32, size);
1026 if (ACPI_FAILURE(status)) {
1027 pr_debug("Error: Failed to read SystemIO port %llx\n",
1028 reg->address);
1029 return -EFAULT;
1030 }
1031
1032 *val = val_u32;
1033 return 0;
1034 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
1035 /*
1036 * For registers in PCC space, the register size is determined
1037 * by the bit width field; the access size is used to indicate
1038 * the PCC subspace id.
1039 */
1040 size = reg->bit_width;
1041 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1042 }
1043 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1044 vaddr = reg_res->sys_mem_vaddr;
1045 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1046 return cpc_read_ffh(cpu, reg, val);
1047 else
1048 return acpi_os_read_memory((acpi_physical_address)reg->address,
1049 val, size);
1050
1051 switch (size) {
1052 case 8:
1053 *val = readb_relaxed(vaddr);
1054 break;
1055 case 16:
1056 *val = readw_relaxed(vaddr);
1057 break;
1058 case 32:
1059 *val = readl_relaxed(vaddr);
1060 break;
1061 case 64:
1062 *val = readq_relaxed(vaddr);
1063 break;
1064 default:
1065 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1066 pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
1067 size, reg->address);
1068 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
1069 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1070 size, pcc_ss_id);
1071 }
1072 return -EFAULT;
1073 }
1074
1075 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1076 *val = MASK_VAL_READ(reg, *val);
1077
1078 return 0;
1079 }
1080
cpc_write(int cpu,struct cpc_register_resource * reg_res,u64 val)1081 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1082 {
1083 int ret_val = 0;
1084 int size;
1085 u64 prev_val;
1086 void __iomem *vaddr = NULL;
1087 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1088 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1089 struct cpc_desc *cpc_desc;
1090
1091 size = GET_BIT_WIDTH(reg);
1092
1093 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1094 acpi_status status;
1095
1096 status = acpi_os_write_port((acpi_io_address)reg->address,
1097 (u32)val, size);
1098 if (ACPI_FAILURE(status)) {
1099 pr_debug("Error: Failed to write SystemIO port %llx\n",
1100 reg->address);
1101 return -EFAULT;
1102 }
1103
1104 return 0;
1105 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
1106 /*
1107 * For registers in PCC space, the register size is determined
1108 * by the bit width field; the access size is used to indicate
1109 * the PCC subspace id.
1110 */
1111 size = reg->bit_width;
1112 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1113 }
1114 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1115 vaddr = reg_res->sys_mem_vaddr;
1116 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1117 return cpc_write_ffh(cpu, reg, val);
1118 else
1119 return acpi_os_write_memory((acpi_physical_address)reg->address,
1120 val, size);
1121
1122 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1123 cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1124 if (!cpc_desc) {
1125 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1126 return -ENODEV;
1127 }
1128
1129 spin_lock(&cpc_desc->rmw_lock);
1130 switch (size) {
1131 case 8:
1132 prev_val = readb_relaxed(vaddr);
1133 break;
1134 case 16:
1135 prev_val = readw_relaxed(vaddr);
1136 break;
1137 case 32:
1138 prev_val = readl_relaxed(vaddr);
1139 break;
1140 case 64:
1141 prev_val = readq_relaxed(vaddr);
1142 break;
1143 default:
1144 spin_unlock(&cpc_desc->rmw_lock);
1145 return -EFAULT;
1146 }
1147 val = MASK_VAL_WRITE(reg, prev_val, val);
1148 val |= prev_val;
1149 }
1150
1151 switch (size) {
1152 case 8:
1153 writeb_relaxed(val, vaddr);
1154 break;
1155 case 16:
1156 writew_relaxed(val, vaddr);
1157 break;
1158 case 32:
1159 writel_relaxed(val, vaddr);
1160 break;
1161 case 64:
1162 writeq_relaxed(val, vaddr);
1163 break;
1164 default:
1165 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1166 pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
1167 size, reg->address);
1168 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
1169 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1170 size, pcc_ss_id);
1171 }
1172 ret_val = -EFAULT;
1173 break;
1174 }
1175
1176 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1177 spin_unlock(&cpc_desc->rmw_lock);
1178
1179 return ret_val;
1180 }
1181
cppc_get_perf(int cpunum,enum cppc_regs reg_idx,u64 * perf)1182 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
1183 {
1184 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1185 struct cpc_register_resource *reg;
1186
1187 if (!cpc_desc) {
1188 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1189 return -ENODEV;
1190 }
1191
1192 reg = &cpc_desc->cpc_regs[reg_idx];
1193
1194 if (CPC_IN_PCC(reg)) {
1195 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1196 struct cppc_pcc_data *pcc_ss_data = NULL;
1197 int ret = 0;
1198
1199 if (pcc_ss_id < 0)
1200 return -EIO;
1201
1202 pcc_ss_data = pcc_data[pcc_ss_id];
1203
1204 down_write(&pcc_ss_data->pcc_lock);
1205
1206 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1207 cpc_read(cpunum, reg, perf);
1208 else
1209 ret = -EIO;
1210
1211 up_write(&pcc_ss_data->pcc_lock);
1212
1213 return ret;
1214 }
1215
1216 cpc_read(cpunum, reg, perf);
1217
1218 return 0;
1219 }
1220
1221 /**
1222 * cppc_get_desired_perf - Get the desired performance register value.
1223 * @cpunum: CPU from which to get desired performance.
1224 * @desired_perf: Return address.
1225 *
1226 * Return: 0 for success, -EIO otherwise.
1227 */
cppc_get_desired_perf(int cpunum,u64 * desired_perf)1228 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1229 {
1230 return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1231 }
1232 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1233
1234 /**
1235 * cppc_get_nominal_perf - Get the nominal performance register value.
1236 * @cpunum: CPU from which to get nominal performance.
1237 * @nominal_perf: Return address.
1238 *
1239 * Return: 0 for success, -EIO otherwise.
1240 */
cppc_get_nominal_perf(int cpunum,u64 * nominal_perf)1241 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1242 {
1243 return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1244 }
1245
1246 /**
1247 * cppc_get_highest_perf - Get the highest performance register value.
1248 * @cpunum: CPU from which to get highest performance.
1249 * @highest_perf: Return address.
1250 *
1251 * Return: 0 for success, -EIO otherwise.
1252 */
cppc_get_highest_perf(int cpunum,u64 * highest_perf)1253 int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
1254 {
1255 return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
1256 }
1257 EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
1258
1259 /**
1260 * cppc_get_epp_perf - Get the epp register value.
1261 * @cpunum: CPU from which to get epp preference value.
1262 * @epp_perf: Return address.
1263 *
1264 * Return: 0 for success, -EIO otherwise.
1265 */
cppc_get_epp_perf(int cpunum,u64 * epp_perf)1266 int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
1267 {
1268 return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
1269 }
1270 EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
1271
1272 /**
1273 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1274 * @cpunum: CPU from which to get capabilities info.
1275 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1276 *
1277 * Return: 0 for success with perf_caps populated else -ERRNO.
1278 */
cppc_get_perf_caps(int cpunum,struct cppc_perf_caps * perf_caps)1279 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1280 {
1281 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1282 struct cpc_register_resource *highest_reg, *lowest_reg,
1283 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1284 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1285 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1286 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1287 struct cppc_pcc_data *pcc_ss_data = NULL;
1288 int ret = 0, regs_in_pcc = 0;
1289
1290 if (!cpc_desc) {
1291 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1292 return -ENODEV;
1293 }
1294
1295 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1296 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1297 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1298 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1299 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1300 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1301 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1302
1303 /* Are any of the regs PCC ?*/
1304 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1305 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1306 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1307 if (pcc_ss_id < 0) {
1308 pr_debug("Invalid pcc_ss_id\n");
1309 return -ENODEV;
1310 }
1311 pcc_ss_data = pcc_data[pcc_ss_id];
1312 regs_in_pcc = 1;
1313 down_write(&pcc_ss_data->pcc_lock);
1314 /* Ring doorbell once to update PCC subspace */
1315 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1316 ret = -EIO;
1317 goto out_err;
1318 }
1319 }
1320
1321 cpc_read(cpunum, highest_reg, &high);
1322 perf_caps->highest_perf = high;
1323
1324 cpc_read(cpunum, lowest_reg, &low);
1325 perf_caps->lowest_perf = low;
1326
1327 cpc_read(cpunum, nominal_reg, &nom);
1328 perf_caps->nominal_perf = nom;
1329
1330 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1331 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1332 perf_caps->guaranteed_perf = 0;
1333 } else {
1334 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1335 perf_caps->guaranteed_perf = guaranteed;
1336 }
1337
1338 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1339 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1340
1341 if (!high || !low || !nom || !min_nonlinear)
1342 ret = -EFAULT;
1343
1344 /* Read optional lowest and nominal frequencies if present */
1345 if (CPC_SUPPORTED(low_freq_reg))
1346 cpc_read(cpunum, low_freq_reg, &low_f);
1347
1348 if (CPC_SUPPORTED(nom_freq_reg))
1349 cpc_read(cpunum, nom_freq_reg, &nom_f);
1350
1351 perf_caps->lowest_freq = low_f;
1352 perf_caps->nominal_freq = nom_f;
1353
1354
1355 out_err:
1356 if (regs_in_pcc)
1357 up_write(&pcc_ss_data->pcc_lock);
1358 return ret;
1359 }
1360 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1361
1362 /**
1363 * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
1364 *
1365 * CPPC has flexibility about how CPU performance counters are accessed.
1366 * One of the choices is PCC regions, which can have a high access latency. This
1367 * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
1368 *
1369 * Return: true if any of the counters are in PCC regions, false otherwise
1370 */
cppc_perf_ctrs_in_pcc(void)1371 bool cppc_perf_ctrs_in_pcc(void)
1372 {
1373 int cpu;
1374
1375 for_each_present_cpu(cpu) {
1376 struct cpc_register_resource *ref_perf_reg;
1377 struct cpc_desc *cpc_desc;
1378
1379 cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1380
1381 if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
1382 CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
1383 CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
1384 return true;
1385
1386
1387 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1388
1389 /*
1390 * If reference perf register is not supported then we should
1391 * use the nominal perf value
1392 */
1393 if (!CPC_SUPPORTED(ref_perf_reg))
1394 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1395
1396 if (CPC_IN_PCC(ref_perf_reg))
1397 return true;
1398 }
1399
1400 return false;
1401 }
1402 EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
1403
1404 /**
1405 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1406 * @cpunum: CPU from which to read counters.
1407 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1408 *
1409 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1410 */
cppc_get_perf_ctrs(int cpunum,struct cppc_perf_fb_ctrs * perf_fb_ctrs)1411 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1412 {
1413 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1414 struct cpc_register_resource *delivered_reg, *reference_reg,
1415 *ref_perf_reg, *ctr_wrap_reg;
1416 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1417 struct cppc_pcc_data *pcc_ss_data = NULL;
1418 u64 delivered, reference, ref_perf, ctr_wrap_time;
1419 int ret = 0, regs_in_pcc = 0;
1420
1421 if (!cpc_desc) {
1422 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1423 return -ENODEV;
1424 }
1425
1426 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1427 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1428 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1429 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1430
1431 /*
1432 * If reference perf register is not supported then we should
1433 * use the nominal perf value
1434 */
1435 if (!CPC_SUPPORTED(ref_perf_reg))
1436 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1437
1438 /* Are any of the regs PCC ?*/
1439 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1440 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1441 if (pcc_ss_id < 0) {
1442 pr_debug("Invalid pcc_ss_id\n");
1443 return -ENODEV;
1444 }
1445 pcc_ss_data = pcc_data[pcc_ss_id];
1446 down_write(&pcc_ss_data->pcc_lock);
1447 regs_in_pcc = 1;
1448 /* Ring doorbell once to update PCC subspace */
1449 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1450 ret = -EIO;
1451 goto out_err;
1452 }
1453 }
1454
1455 cpc_read(cpunum, delivered_reg, &delivered);
1456 cpc_read(cpunum, reference_reg, &reference);
1457 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1458
1459 /*
1460 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1461 * performance counters are assumed to never wrap during the lifetime of
1462 * platform
1463 */
1464 ctr_wrap_time = (u64)(~((u64)0));
1465 if (CPC_SUPPORTED(ctr_wrap_reg))
1466 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1467
1468 if (!delivered || !reference || !ref_perf) {
1469 ret = -EFAULT;
1470 goto out_err;
1471 }
1472
1473 perf_fb_ctrs->delivered = delivered;
1474 perf_fb_ctrs->reference = reference;
1475 perf_fb_ctrs->reference_perf = ref_perf;
1476 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1477 out_err:
1478 if (regs_in_pcc)
1479 up_write(&pcc_ss_data->pcc_lock);
1480 return ret;
1481 }
1482 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1483
1484 /*
1485 * Set Energy Performance Preference Register value through
1486 * Performance Controls Interface
1487 */
cppc_set_epp_perf(int cpu,struct cppc_perf_ctrls * perf_ctrls,bool enable)1488 int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
1489 {
1490 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1491 struct cpc_register_resource *epp_set_reg;
1492 struct cpc_register_resource *auto_sel_reg;
1493 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1494 struct cppc_pcc_data *pcc_ss_data = NULL;
1495 int ret;
1496
1497 if (!cpc_desc) {
1498 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1499 return -ENODEV;
1500 }
1501
1502 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1503 epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
1504
1505 if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
1506 if (pcc_ss_id < 0) {
1507 pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
1508 return -ENODEV;
1509 }
1510
1511 if (CPC_SUPPORTED(auto_sel_reg)) {
1512 ret = cpc_write(cpu, auto_sel_reg, enable);
1513 if (ret)
1514 return ret;
1515 }
1516
1517 if (CPC_SUPPORTED(epp_set_reg)) {
1518 ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1519 if (ret)
1520 return ret;
1521 }
1522
1523 pcc_ss_data = pcc_data[pcc_ss_id];
1524
1525 down_write(&pcc_ss_data->pcc_lock);
1526 /* after writing CPC, transfer the ownership of PCC to platform */
1527 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1528 up_write(&pcc_ss_data->pcc_lock);
1529 } else if (osc_cpc_flexible_adr_space_confirmed &&
1530 CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
1531 ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1532 } else {
1533 ret = -ENOTSUPP;
1534 pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
1535 }
1536
1537 return ret;
1538 }
1539 EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
1540
1541 /**
1542 * cppc_get_auto_sel_caps - Read autonomous selection register.
1543 * @cpunum : CPU from which to read register.
1544 * @perf_caps : struct where autonomous selection register value is updated.
1545 */
cppc_get_auto_sel_caps(int cpunum,struct cppc_perf_caps * perf_caps)1546 int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1547 {
1548 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1549 struct cpc_register_resource *auto_sel_reg;
1550 u64 auto_sel;
1551
1552 if (!cpc_desc) {
1553 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1554 return -ENODEV;
1555 }
1556
1557 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1558
1559 if (!CPC_SUPPORTED(auto_sel_reg))
1560 pr_warn_once("Autonomous mode is not unsupported!\n");
1561
1562 if (CPC_IN_PCC(auto_sel_reg)) {
1563 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1564 struct cppc_pcc_data *pcc_ss_data = NULL;
1565 int ret = 0;
1566
1567 if (pcc_ss_id < 0)
1568 return -ENODEV;
1569
1570 pcc_ss_data = pcc_data[pcc_ss_id];
1571
1572 down_write(&pcc_ss_data->pcc_lock);
1573
1574 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
1575 cpc_read(cpunum, auto_sel_reg, &auto_sel);
1576 perf_caps->auto_sel = (bool)auto_sel;
1577 } else {
1578 ret = -EIO;
1579 }
1580
1581 up_write(&pcc_ss_data->pcc_lock);
1582
1583 return ret;
1584 }
1585
1586 return 0;
1587 }
1588 EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
1589
1590 /**
1591 * cppc_set_auto_sel - Write autonomous selection register.
1592 * @cpu : CPU to which to write register.
1593 * @enable : the desired value of autonomous selection resiter to be updated.
1594 */
cppc_set_auto_sel(int cpu,bool enable)1595 int cppc_set_auto_sel(int cpu, bool enable)
1596 {
1597 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1598 struct cpc_register_resource *auto_sel_reg;
1599 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1600 struct cppc_pcc_data *pcc_ss_data = NULL;
1601 int ret = -EINVAL;
1602
1603 if (!cpc_desc) {
1604 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1605 return -ENODEV;
1606 }
1607
1608 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1609
1610 if (CPC_IN_PCC(auto_sel_reg)) {
1611 if (pcc_ss_id < 0) {
1612 pr_debug("Invalid pcc_ss_id\n");
1613 return -ENODEV;
1614 }
1615
1616 if (CPC_SUPPORTED(auto_sel_reg)) {
1617 ret = cpc_write(cpu, auto_sel_reg, enable);
1618 if (ret)
1619 return ret;
1620 }
1621
1622 pcc_ss_data = pcc_data[pcc_ss_id];
1623
1624 down_write(&pcc_ss_data->pcc_lock);
1625 /* after writing CPC, transfer the ownership of PCC to platform */
1626 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1627 up_write(&pcc_ss_data->pcc_lock);
1628 } else {
1629 ret = -ENOTSUPP;
1630 pr_debug("_CPC in PCC is not supported\n");
1631 }
1632
1633 return ret;
1634 }
1635 EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
1636
1637 /**
1638 * cppc_set_enable - Set to enable CPPC on the processor by writing the
1639 * Continuous Performance Control package EnableRegister field.
1640 * @cpu: CPU for which to enable CPPC register.
1641 * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1642 *
1643 * Return: 0 for success, -ERRNO or -EIO otherwise.
1644 */
cppc_set_enable(int cpu,bool enable)1645 int cppc_set_enable(int cpu, bool enable)
1646 {
1647 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1648 struct cpc_register_resource *enable_reg;
1649 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1650 struct cppc_pcc_data *pcc_ss_data = NULL;
1651 int ret = -EINVAL;
1652
1653 if (!cpc_desc) {
1654 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1655 return -EINVAL;
1656 }
1657
1658 enable_reg = &cpc_desc->cpc_regs[ENABLE];
1659
1660 if (CPC_IN_PCC(enable_reg)) {
1661
1662 if (pcc_ss_id < 0)
1663 return -EIO;
1664
1665 ret = cpc_write(cpu, enable_reg, enable);
1666 if (ret)
1667 return ret;
1668
1669 pcc_ss_data = pcc_data[pcc_ss_id];
1670
1671 down_write(&pcc_ss_data->pcc_lock);
1672 /* after writing CPC, transfer the ownership of PCC to platfrom */
1673 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1674 up_write(&pcc_ss_data->pcc_lock);
1675 return ret;
1676 }
1677
1678 return cpc_write(cpu, enable_reg, enable);
1679 }
1680 EXPORT_SYMBOL_GPL(cppc_set_enable);
1681
1682 /**
1683 * cppc_set_perf - Set a CPU's performance controls.
1684 * @cpu: CPU for which to set performance controls.
1685 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1686 *
1687 * Return: 0 for success, -ERRNO otherwise.
1688 */
cppc_set_perf(int cpu,struct cppc_perf_ctrls * perf_ctrls)1689 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1690 {
1691 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1692 struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
1693 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1694 struct cppc_pcc_data *pcc_ss_data = NULL;
1695 int ret = 0;
1696
1697 if (!cpc_desc) {
1698 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1699 return -ENODEV;
1700 }
1701
1702 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1703 min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
1704 max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
1705
1706 /*
1707 * This is Phase-I where we want to write to CPC registers
1708 * -> We want all CPUs to be able to execute this phase in parallel
1709 *
1710 * Since read_lock can be acquired by multiple CPUs simultaneously we
1711 * achieve that goal here
1712 */
1713 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1714 if (pcc_ss_id < 0) {
1715 pr_debug("Invalid pcc_ss_id\n");
1716 return -ENODEV;
1717 }
1718 pcc_ss_data = pcc_data[pcc_ss_id];
1719 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1720 if (pcc_ss_data->platform_owns_pcc) {
1721 ret = check_pcc_chan(pcc_ss_id, false);
1722 if (ret) {
1723 up_read(&pcc_ss_data->pcc_lock);
1724 return ret;
1725 }
1726 }
1727 /*
1728 * Update the pending_write to make sure a PCC CMD_READ will not
1729 * arrive and steal the channel during the switch to write lock
1730 */
1731 pcc_ss_data->pending_pcc_write_cmd = true;
1732 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1733 cpc_desc->write_cmd_status = 0;
1734 }
1735
1736 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1737
1738 /*
1739 * Only write if min_perf and max_perf not zero. Some drivers pass zero
1740 * value to min and max perf, but they don't mean to set the zero value,
1741 * they just don't want to write to those registers.
1742 */
1743 if (perf_ctrls->min_perf)
1744 cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
1745 if (perf_ctrls->max_perf)
1746 cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
1747
1748 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
1749 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1750 /*
1751 * This is Phase-II where we transfer the ownership of PCC to Platform
1752 *
1753 * Short Summary: Basically if we think of a group of cppc_set_perf
1754 * requests that happened in short overlapping interval. The last CPU to
1755 * come out of Phase-I will enter Phase-II and ring the doorbell.
1756 *
1757 * We have the following requirements for Phase-II:
1758 * 1. We want to execute Phase-II only when there are no CPUs
1759 * currently executing in Phase-I
1760 * 2. Once we start Phase-II we want to avoid all other CPUs from
1761 * entering Phase-I.
1762 * 3. We want only one CPU among all those who went through Phase-I
1763 * to run phase-II
1764 *
1765 * If write_trylock fails to get the lock and doesn't transfer the
1766 * PCC ownership to the platform, then one of the following will be TRUE
1767 * 1. There is at-least one CPU in Phase-I which will later execute
1768 * write_trylock, so the CPUs in Phase-I will be responsible for
1769 * executing the Phase-II.
1770 * 2. Some other CPU has beaten this CPU to successfully execute the
1771 * write_trylock and has already acquired the write_lock. We know for a
1772 * fact it (other CPU acquiring the write_lock) couldn't have happened
1773 * before this CPU's Phase-I as we held the read_lock.
1774 * 3. Some other CPU executing pcc CMD_READ has stolen the
1775 * down_write, in which case, send_pcc_cmd will check for pending
1776 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1777 * So this CPU can be certain that its request will be delivered
1778 * So in all cases, this CPU knows that its request will be delivered
1779 * by another CPU and can return
1780 *
1781 * After getting the down_write we still need to check for
1782 * pending_pcc_write_cmd to take care of the following scenario
1783 * The thread running this code could be scheduled out between
1784 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1785 * could have delivered the request to Platform by triggering the
1786 * doorbell and transferred the ownership of PCC to platform. So this
1787 * avoids triggering an unnecessary doorbell and more importantly before
1788 * triggering the doorbell it makes sure that the PCC channel ownership
1789 * is still with OSPM.
1790 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1791 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1792 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1793 * case during a CMD_READ and if there are pending writes it delivers
1794 * the write command before servicing the read command
1795 */
1796 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1797 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1798 /* Update only if there are pending write commands */
1799 if (pcc_ss_data->pending_pcc_write_cmd)
1800 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1801 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1802 } else
1803 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1804 wait_event(pcc_ss_data->pcc_write_wait_q,
1805 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1806
1807 /* send_pcc_cmd updates the status in case of failure */
1808 ret = cpc_desc->write_cmd_status;
1809 }
1810 return ret;
1811 }
1812 EXPORT_SYMBOL_GPL(cppc_set_perf);
1813
1814 /**
1815 * cppc_get_transition_latency - returns frequency transition latency in ns
1816 * @cpu_num: CPU number for per_cpu().
1817 *
1818 * ACPI CPPC does not explicitly specify how a platform can specify the
1819 * transition latency for performance change requests. The closest we have
1820 * is the timing information from the PCCT tables which provides the info
1821 * on the number and frequency of PCC commands the platform can handle.
1822 *
1823 * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
1824 * then assume there is no latency.
1825 */
cppc_get_transition_latency(int cpu_num)1826 unsigned int cppc_get_transition_latency(int cpu_num)
1827 {
1828 /*
1829 * Expected transition latency is based on the PCCT timing values
1830 * Below are definition from ACPI spec:
1831 * pcc_nominal- Expected latency to process a command, in microseconds
1832 * pcc_mpar - The maximum number of periodic requests that the subspace
1833 * channel can support, reported in commands per minute. 0
1834 * indicates no limitation.
1835 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1836 * completion of a command before issuing the next command,
1837 * in microseconds.
1838 */
1839 unsigned int latency_ns = 0;
1840 struct cpc_desc *cpc_desc;
1841 struct cpc_register_resource *desired_reg;
1842 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1843 struct cppc_pcc_data *pcc_ss_data;
1844
1845 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1846 if (!cpc_desc)
1847 return CPUFREQ_ETERNAL;
1848
1849 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1850 if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1851 return 0;
1852 else if (!CPC_IN_PCC(desired_reg))
1853 return CPUFREQ_ETERNAL;
1854
1855 if (pcc_ss_id < 0)
1856 return CPUFREQ_ETERNAL;
1857
1858 pcc_ss_data = pcc_data[pcc_ss_id];
1859 if (pcc_ss_data->pcc_mpar)
1860 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1861
1862 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1863 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1864
1865 return latency_ns;
1866 }
1867 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1868
1869 /* Minimum struct length needed for the DMI processor entry we want */
1870 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
1871
1872 /* Offset in the DMI processor structure for the max frequency */
1873 #define DMI_PROCESSOR_MAX_SPEED 0x14
1874
1875 /* Callback function used to retrieve the max frequency from DMI */
cppc_find_dmi_mhz(const struct dmi_header * dm,void * private)1876 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
1877 {
1878 const u8 *dmi_data = (const u8 *)dm;
1879 u16 *mhz = (u16 *)private;
1880
1881 if (dm->type == DMI_ENTRY_PROCESSOR &&
1882 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
1883 u16 val = (u16)get_unaligned((const u16 *)
1884 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
1885 *mhz = umax(val, *mhz);
1886 }
1887 }
1888
1889 /* Look up the max frequency in DMI */
cppc_get_dmi_max_khz(void)1890 static u64 cppc_get_dmi_max_khz(void)
1891 {
1892 u16 mhz = 0;
1893
1894 dmi_walk(cppc_find_dmi_mhz, &mhz);
1895
1896 /*
1897 * Real stupid fallback value, just in case there is no
1898 * actual value set.
1899 */
1900 mhz = mhz ? mhz : 1;
1901
1902 return KHZ_PER_MHZ * mhz;
1903 }
1904
1905 /*
1906 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
1907 * use them to convert perf to freq and vice versa. The conversion is
1908 * extrapolated as an affine function passing by the 2 points:
1909 * - (Low perf, Low freq)
1910 * - (Nominal perf, Nominal freq)
1911 */
cppc_perf_to_khz(struct cppc_perf_caps * caps,unsigned int perf)1912 unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
1913 {
1914 s64 retval, offset = 0;
1915 static u64 max_khz;
1916 u64 mul, div;
1917
1918 if (caps->lowest_freq && caps->nominal_freq) {
1919 mul = caps->nominal_freq - caps->lowest_freq;
1920 mul *= KHZ_PER_MHZ;
1921 div = caps->nominal_perf - caps->lowest_perf;
1922 offset = caps->nominal_freq * KHZ_PER_MHZ -
1923 div64_u64(caps->nominal_perf * mul, div);
1924 } else {
1925 if (!max_khz)
1926 max_khz = cppc_get_dmi_max_khz();
1927 mul = max_khz;
1928 div = caps->highest_perf;
1929 }
1930
1931 retval = offset + div64_u64(perf * mul, div);
1932 if (retval >= 0)
1933 return retval;
1934 return 0;
1935 }
1936 EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
1937
cppc_khz_to_perf(struct cppc_perf_caps * caps,unsigned int freq)1938 unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
1939 {
1940 s64 retval, offset = 0;
1941 static u64 max_khz;
1942 u64 mul, div;
1943
1944 if (caps->lowest_freq && caps->nominal_freq) {
1945 mul = caps->nominal_perf - caps->lowest_perf;
1946 div = caps->nominal_freq - caps->lowest_freq;
1947 /*
1948 * We don't need to convert to kHz for computing offset and can
1949 * directly use nominal_freq and lowest_freq as the div64_u64
1950 * will remove the frequency unit.
1951 */
1952 offset = caps->nominal_perf -
1953 div64_u64(caps->nominal_freq * mul, div);
1954 /* But we need it for computing the perf level. */
1955 div *= KHZ_PER_MHZ;
1956 } else {
1957 if (!max_khz)
1958 max_khz = cppc_get_dmi_max_khz();
1959 mul = caps->highest_perf;
1960 div = max_khz;
1961 }
1962
1963 retval = offset + div64_u64(freq * mul, div);
1964 if (retval >= 0)
1965 return retval;
1966 return 0;
1967 }
1968 EXPORT_SYMBOL_GPL(cppc_khz_to_perf);
1969