xref: /linux/drivers/acpi/cppc_acpi.c (revision c49731a04e7c14b2ccd146a7fddf92e78ccae143)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4  *
5  * (C) Copyright 2014, 2015 Linaro Ltd.
6  * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7  *
8  * CPPC describes a few methods for controlling CPU performance using
9  * information from a per CPU table called CPC. This table is described in
10  * the ACPI v5.0+ specification. The table consists of a list of
11  * registers which may be memory mapped or hardware registers and also may
12  * include some static integer values.
13  *
14  * CPU performance is on an abstract continuous scale as against a discretized
15  * P-state scale which is tied to CPU frequency only. In brief, the basic
16  * operation involves:
17  *
18  * - OS makes a CPU performance request. (Can provide min and max bounds)
19  *
20  * - Platform (such as BMC) is free to optimize request within requested bounds
21  *   depending on power/thermal budgets etc.
22  *
23  * - Platform conveys its decision back to OS
24  *
25  * The communication between OS and platform occurs through another medium
26  * called (PCC) Platform Communication Channel. This is a generic mailbox like
27  * mechanism which includes doorbell semantics to indicate register updates.
28  * See drivers/mailbox/pcc.c for details on PCC.
29  *
30  * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31  * above specifications.
32  */
33 
34 #define pr_fmt(fmt)	"ACPI CPPC: " fmt
35 
36 #include <linux/delay.h>
37 #include <linux/iopoll.h>
38 #include <linux/ktime.h>
39 #include <linux/rwsem.h>
40 #include <linux/wait.h>
41 #include <linux/topology.h>
42 
43 #include <acpi/cppc_acpi.h>
44 
45 struct cppc_pcc_data {
46 	struct pcc_mbox_chan *pcc_channel;
47 	void __iomem *pcc_comm_addr;
48 	bool pcc_channel_acquired;
49 	unsigned int deadline_us;
50 	unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
51 
52 	bool pending_pcc_write_cmd;	/* Any pending/batched PCC write cmds? */
53 	bool platform_owns_pcc;		/* Ownership of PCC subspace */
54 	unsigned int pcc_write_cnt;	/* Running count of PCC write commands */
55 
56 	/*
57 	 * Lock to provide controlled access to the PCC channel.
58 	 *
59 	 * For performance critical usecases(currently cppc_set_perf)
60 	 *	We need to take read_lock and check if channel belongs to OSPM
61 	 * before reading or writing to PCC subspace
62 	 *	We need to take write_lock before transferring the channel
63 	 * ownership to the platform via a Doorbell
64 	 *	This allows us to batch a number of CPPC requests if they happen
65 	 * to originate in about the same time
66 	 *
67 	 * For non-performance critical usecases(init)
68 	 *	Take write_lock for all purposes which gives exclusive access
69 	 */
70 	struct rw_semaphore pcc_lock;
71 
72 	/* Wait queue for CPUs whose requests were batched */
73 	wait_queue_head_t pcc_write_wait_q;
74 	ktime_t last_cmd_cmpl_time;
75 	ktime_t last_mpar_reset;
76 	int mpar_count;
77 	int refcount;
78 };
79 
80 /* Array to represent the PCC channel per subspace ID */
81 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
82 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
83 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
84 
85 /*
86  * The cpc_desc structure contains the ACPI register details
87  * as described in the per CPU _CPC tables. The details
88  * include the type of register (e.g. PCC, System IO, FFH etc.)
89  * and destination addresses which lets us READ/WRITE CPU performance
90  * information using the appropriate I/O methods.
91  */
92 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
93 
94 /* pcc mapped address + header size + offset within PCC subspace */
95 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
96 						0x8 + (offs))
97 
98 /* Check if a CPC register is in PCC */
99 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&		\
100 				(cpc)->cpc_entry.reg.space_id ==	\
101 				ACPI_ADR_SPACE_PLATFORM_COMM)
102 
103 /* Check if a CPC register is in SystemMemory */
104 #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&	\
105 				(cpc)->cpc_entry.reg.space_id ==	\
106 				ACPI_ADR_SPACE_SYSTEM_MEMORY)
107 
108 /* Check if a CPC register is in SystemIo */
109 #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&	\
110 				(cpc)->cpc_entry.reg.space_id ==	\
111 				ACPI_ADR_SPACE_SYSTEM_IO)
112 
113 /* Evaluates to True if reg is a NULL register descriptor */
114 #define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
115 				(reg)->address == 0 &&			\
116 				(reg)->bit_width == 0 &&		\
117 				(reg)->bit_offset == 0 &&		\
118 				(reg)->access_width == 0)
119 
120 /* Evaluates to True if an optional cpc field is supported */
121 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?		\
122 				!!(cpc)->cpc_entry.int_value :		\
123 				!IS_NULL_REG(&(cpc)->cpc_entry.reg))
124 /*
125  * Arbitrary Retries in case the remote processor is slow to respond
126  * to PCC commands. Keeping it high enough to cover emulators where
127  * the processors run painfully slow.
128  */
129 #define NUM_RETRIES 500ULL
130 
131 #define OVER_16BTS_MASK ~0xFFFFULL
132 
133 #define define_one_cppc_ro(_name)		\
134 static struct kobj_attribute _name =		\
135 __ATTR(_name, 0444, show_##_name, NULL)
136 
137 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
138 
139 #define show_cppc_data(access_fn, struct_name, member_name)		\
140 	static ssize_t show_##member_name(struct kobject *kobj,		\
141 				struct kobj_attribute *attr, char *buf)	\
142 	{								\
143 		struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);		\
144 		struct struct_name st_name = {0};			\
145 		int ret;						\
146 									\
147 		ret = access_fn(cpc_ptr->cpu_id, &st_name);		\
148 		if (ret)						\
149 			return ret;					\
150 									\
151 		return scnprintf(buf, PAGE_SIZE, "%llu\n",		\
152 				(u64)st_name.member_name);		\
153 	}								\
154 	define_one_cppc_ro(member_name)
155 
156 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
157 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
158 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
159 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
160 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
161 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
162 
163 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
164 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
165 
166 static ssize_t show_feedback_ctrs(struct kobject *kobj,
167 		struct kobj_attribute *attr, char *buf)
168 {
169 	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
170 	struct cppc_perf_fb_ctrs fb_ctrs = {0};
171 	int ret;
172 
173 	ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
174 	if (ret)
175 		return ret;
176 
177 	return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
178 			fb_ctrs.reference, fb_ctrs.delivered);
179 }
180 define_one_cppc_ro(feedback_ctrs);
181 
182 static struct attribute *cppc_attrs[] = {
183 	&feedback_ctrs.attr,
184 	&reference_perf.attr,
185 	&wraparound_time.attr,
186 	&highest_perf.attr,
187 	&lowest_perf.attr,
188 	&lowest_nonlinear_perf.attr,
189 	&nominal_perf.attr,
190 	&nominal_freq.attr,
191 	&lowest_freq.attr,
192 	NULL
193 };
194 ATTRIBUTE_GROUPS(cppc);
195 
196 static struct kobj_type cppc_ktype = {
197 	.sysfs_ops = &kobj_sysfs_ops,
198 	.default_groups = cppc_groups,
199 };
200 
201 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
202 {
203 	int ret, status;
204 	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
205 	struct acpi_pcct_shared_memory __iomem *generic_comm_base =
206 		pcc_ss_data->pcc_comm_addr;
207 
208 	if (!pcc_ss_data->platform_owns_pcc)
209 		return 0;
210 
211 	/*
212 	 * Poll PCC status register every 3us(delay_us) for maximum of
213 	 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
214 	 */
215 	ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
216 					status & PCC_CMD_COMPLETE_MASK, 3,
217 					pcc_ss_data->deadline_us);
218 
219 	if (likely(!ret)) {
220 		pcc_ss_data->platform_owns_pcc = false;
221 		if (chk_err_bit && (status & PCC_ERROR_MASK))
222 			ret = -EIO;
223 	}
224 
225 	if (unlikely(ret))
226 		pr_err("PCC check channel failed for ss: %d. ret=%d\n",
227 		       pcc_ss_id, ret);
228 
229 	return ret;
230 }
231 
232 /*
233  * This function transfers the ownership of the PCC to the platform
234  * So it must be called while holding write_lock(pcc_lock)
235  */
236 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
237 {
238 	int ret = -EIO, i;
239 	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
240 	struct acpi_pcct_shared_memory __iomem *generic_comm_base =
241 		pcc_ss_data->pcc_comm_addr;
242 	unsigned int time_delta;
243 
244 	/*
245 	 * For CMD_WRITE we know for a fact the caller should have checked
246 	 * the channel before writing to PCC space
247 	 */
248 	if (cmd == CMD_READ) {
249 		/*
250 		 * If there are pending cpc_writes, then we stole the channel
251 		 * before write completion, so first send a WRITE command to
252 		 * platform
253 		 */
254 		if (pcc_ss_data->pending_pcc_write_cmd)
255 			send_pcc_cmd(pcc_ss_id, CMD_WRITE);
256 
257 		ret = check_pcc_chan(pcc_ss_id, false);
258 		if (ret)
259 			goto end;
260 	} else /* CMD_WRITE */
261 		pcc_ss_data->pending_pcc_write_cmd = FALSE;
262 
263 	/*
264 	 * Handle the Minimum Request Turnaround Time(MRTT)
265 	 * "The minimum amount of time that OSPM must wait after the completion
266 	 * of a command before issuing the next command, in microseconds"
267 	 */
268 	if (pcc_ss_data->pcc_mrtt) {
269 		time_delta = ktime_us_delta(ktime_get(),
270 					    pcc_ss_data->last_cmd_cmpl_time);
271 		if (pcc_ss_data->pcc_mrtt > time_delta)
272 			udelay(pcc_ss_data->pcc_mrtt - time_delta);
273 	}
274 
275 	/*
276 	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
277 	 * "The maximum number of periodic requests that the subspace channel can
278 	 * support, reported in commands per minute. 0 indicates no limitation."
279 	 *
280 	 * This parameter should be ideally zero or large enough so that it can
281 	 * handle maximum number of requests that all the cores in the system can
282 	 * collectively generate. If it is not, we will follow the spec and just
283 	 * not send the request to the platform after hitting the MPAR limit in
284 	 * any 60s window
285 	 */
286 	if (pcc_ss_data->pcc_mpar) {
287 		if (pcc_ss_data->mpar_count == 0) {
288 			time_delta = ktime_ms_delta(ktime_get(),
289 						    pcc_ss_data->last_mpar_reset);
290 			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
291 				pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
292 					 pcc_ss_id);
293 				ret = -EIO;
294 				goto end;
295 			}
296 			pcc_ss_data->last_mpar_reset = ktime_get();
297 			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
298 		}
299 		pcc_ss_data->mpar_count--;
300 	}
301 
302 	/* Write to the shared comm region. */
303 	writew_relaxed(cmd, &generic_comm_base->command);
304 
305 	/* Flip CMD COMPLETE bit */
306 	writew_relaxed(0, &generic_comm_base->status);
307 
308 	pcc_ss_data->platform_owns_pcc = true;
309 
310 	/* Ring doorbell */
311 	ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
312 	if (ret < 0) {
313 		pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
314 		       pcc_ss_id, cmd, ret);
315 		goto end;
316 	}
317 
318 	/* wait for completion and check for PCC error bit */
319 	ret = check_pcc_chan(pcc_ss_id, true);
320 
321 	if (pcc_ss_data->pcc_mrtt)
322 		pcc_ss_data->last_cmd_cmpl_time = ktime_get();
323 
324 	if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
325 		mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
326 	else
327 		mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
328 
329 end:
330 	if (cmd == CMD_WRITE) {
331 		if (unlikely(ret)) {
332 			for_each_possible_cpu(i) {
333 				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
334 
335 				if (!desc)
336 					continue;
337 
338 				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
339 					desc->write_cmd_status = ret;
340 			}
341 		}
342 		pcc_ss_data->pcc_write_cnt++;
343 		wake_up_all(&pcc_ss_data->pcc_write_wait_q);
344 	}
345 
346 	return ret;
347 }
348 
349 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
350 {
351 	if (ret < 0)
352 		pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
353 				*(u16 *)msg, ret);
354 	else
355 		pr_debug("TX completed. CMD sent:%x, ret:%d\n",
356 				*(u16 *)msg, ret);
357 }
358 
359 static struct mbox_client cppc_mbox_cl = {
360 	.tx_done = cppc_chan_tx_done,
361 	.knows_txdone = true,
362 };
363 
364 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
365 {
366 	int result = -EFAULT;
367 	acpi_status status = AE_OK;
368 	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
369 	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
370 	struct acpi_buffer state = {0, NULL};
371 	union acpi_object  *psd = NULL;
372 	struct acpi_psd_package *pdomain;
373 
374 	status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
375 					    &buffer, ACPI_TYPE_PACKAGE);
376 	if (status == AE_NOT_FOUND)	/* _PSD is optional */
377 		return 0;
378 	if (ACPI_FAILURE(status))
379 		return -ENODEV;
380 
381 	psd = buffer.pointer;
382 	if (!psd || psd->package.count != 1) {
383 		pr_debug("Invalid _PSD data\n");
384 		goto end;
385 	}
386 
387 	pdomain = &(cpc_ptr->domain_info);
388 
389 	state.length = sizeof(struct acpi_psd_package);
390 	state.pointer = pdomain;
391 
392 	status = acpi_extract_package(&(psd->package.elements[0]),
393 		&format, &state);
394 	if (ACPI_FAILURE(status)) {
395 		pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
396 		goto end;
397 	}
398 
399 	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
400 		pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
401 		goto end;
402 	}
403 
404 	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
405 		pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
406 		goto end;
407 	}
408 
409 	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
410 	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
411 	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
412 		pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
413 		goto end;
414 	}
415 
416 	result = 0;
417 end:
418 	kfree(buffer.pointer);
419 	return result;
420 }
421 
422 bool acpi_cpc_valid(void)
423 {
424 	struct cpc_desc *cpc_ptr;
425 	int cpu;
426 
427 	for_each_present_cpu(cpu) {
428 		cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
429 		if (!cpc_ptr)
430 			return false;
431 	}
432 
433 	return true;
434 }
435 EXPORT_SYMBOL_GPL(acpi_cpc_valid);
436 
437 bool cppc_allow_fast_switch(void)
438 {
439 	struct cpc_register_resource *desired_reg;
440 	struct cpc_desc *cpc_ptr;
441 	int cpu;
442 
443 	for_each_possible_cpu(cpu) {
444 		cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
445 		desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
446 		if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
447 				!CPC_IN_SYSTEM_IO(desired_reg))
448 			return false;
449 	}
450 
451 	return true;
452 }
453 EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
454 
455 /**
456  * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
457  * @cpu: Find all CPUs that share a domain with cpu.
458  * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
459  *
460  *	Return: 0 for success or negative value for err.
461  */
462 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
463 {
464 	struct cpc_desc *cpc_ptr, *match_cpc_ptr;
465 	struct acpi_psd_package *match_pdomain;
466 	struct acpi_psd_package *pdomain;
467 	int count_target, i;
468 
469 	/*
470 	 * Now that we have _PSD data from all CPUs, let's setup P-state
471 	 * domain info.
472 	 */
473 	cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
474 	if (!cpc_ptr)
475 		return -EFAULT;
476 
477 	pdomain = &(cpc_ptr->domain_info);
478 	cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
479 	if (pdomain->num_processors <= 1)
480 		return 0;
481 
482 	/* Validate the Domain info */
483 	count_target = pdomain->num_processors;
484 	if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
485 		cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
486 	else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
487 		cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
488 	else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
489 		cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
490 
491 	for_each_possible_cpu(i) {
492 		if (i == cpu)
493 			continue;
494 
495 		match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
496 		if (!match_cpc_ptr)
497 			goto err_fault;
498 
499 		match_pdomain = &(match_cpc_ptr->domain_info);
500 		if (match_pdomain->domain != pdomain->domain)
501 			continue;
502 
503 		/* Here i and cpu are in the same domain */
504 		if (match_pdomain->num_processors != count_target)
505 			goto err_fault;
506 
507 		if (pdomain->coord_type != match_pdomain->coord_type)
508 			goto err_fault;
509 
510 		cpumask_set_cpu(i, cpu_data->shared_cpu_map);
511 	}
512 
513 	return 0;
514 
515 err_fault:
516 	/* Assume no coordination on any error parsing domain info */
517 	cpumask_clear(cpu_data->shared_cpu_map);
518 	cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
519 	cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
520 
521 	return -EFAULT;
522 }
523 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
524 
525 static int register_pcc_channel(int pcc_ss_idx)
526 {
527 	struct pcc_mbox_chan *pcc_chan;
528 	u64 usecs_lat;
529 
530 	if (pcc_ss_idx >= 0) {
531 		pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
532 
533 		if (IS_ERR(pcc_chan)) {
534 			pr_err("Failed to find PCC channel for subspace %d\n",
535 			       pcc_ss_idx);
536 			return -ENODEV;
537 		}
538 
539 		pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
540 		/*
541 		 * cppc_ss->latency is just a Nominal value. In reality
542 		 * the remote processor could be much slower to reply.
543 		 * So add an arbitrary amount of wait on top of Nominal.
544 		 */
545 		usecs_lat = NUM_RETRIES * pcc_chan->latency;
546 		pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
547 		pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
548 		pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
549 		pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
550 
551 		pcc_data[pcc_ss_idx]->pcc_comm_addr =
552 			acpi_os_ioremap(pcc_chan->shmem_base_addr,
553 					pcc_chan->shmem_size);
554 		if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
555 			pr_err("Failed to ioremap PCC comm region mem for %d\n",
556 			       pcc_ss_idx);
557 			return -ENOMEM;
558 		}
559 
560 		/* Set flag so that we don't come here for each CPU. */
561 		pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
562 	}
563 
564 	return 0;
565 }
566 
567 /**
568  * cpc_ffh_supported() - check if FFH reading supported
569  *
570  * Check if the architecture has support for functional fixed hardware
571  * read/write capability.
572  *
573  * Return: true for supported, false for not supported
574  */
575 bool __weak cpc_ffh_supported(void)
576 {
577 	return false;
578 }
579 
580 /**
581  * cpc_supported_by_cpu() - check if CPPC is supported by CPU
582  *
583  * Check if the architectural support for CPPC is present even
584  * if the _OSC hasn't prescribed it
585  *
586  * Return: true for supported, false for not supported
587  */
588 bool __weak cpc_supported_by_cpu(void)
589 {
590 	return false;
591 }
592 
593 /**
594  * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
595  *
596  * Check and allocate the cppc_pcc_data memory.
597  * In some processor configurations it is possible that same subspace
598  * is shared between multiple CPUs. This is seen especially in CPUs
599  * with hardware multi-threading support.
600  *
601  * Return: 0 for success, errno for failure
602  */
603 static int pcc_data_alloc(int pcc_ss_id)
604 {
605 	if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
606 		return -EINVAL;
607 
608 	if (pcc_data[pcc_ss_id]) {
609 		pcc_data[pcc_ss_id]->refcount++;
610 	} else {
611 		pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
612 					      GFP_KERNEL);
613 		if (!pcc_data[pcc_ss_id])
614 			return -ENOMEM;
615 		pcc_data[pcc_ss_id]->refcount++;
616 	}
617 
618 	return 0;
619 }
620 
621 /* Check if CPPC revision + num_ent combination is supported */
622 static bool is_cppc_supported(int revision, int num_ent)
623 {
624 	int expected_num_ent;
625 
626 	switch (revision) {
627 	case CPPC_V2_REV:
628 		expected_num_ent = CPPC_V2_NUM_ENT;
629 		break;
630 	case CPPC_V3_REV:
631 		expected_num_ent = CPPC_V3_NUM_ENT;
632 		break;
633 	default:
634 		pr_debug("Firmware exports unsupported CPPC revision: %d\n",
635 			revision);
636 		return false;
637 	}
638 
639 	if (expected_num_ent != num_ent) {
640 		pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
641 			num_ent, expected_num_ent, revision);
642 		return false;
643 	}
644 
645 	return true;
646 }
647 
648 /*
649  * An example CPC table looks like the following.
650  *
651  *  Name (_CPC, Package() {
652  *      17,							// NumEntries
653  *      1,							// Revision
654  *      ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)},	// Highest Performance
655  *      ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)},	// Nominal Performance
656  *      ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)},	// Lowest Nonlinear Performance
657  *      ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)},	// Lowest Performance
658  *      ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)},	// Guaranteed Performance Register
659  *      ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)},	// Desired Performance Register
660  *      ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
661  *      ...
662  *      ...
663  *      ...
664  *  }
665  * Each Register() encodes how to access that specific register.
666  * e.g. a sample PCC entry has the following encoding:
667  *
668  *  Register (
669  *      PCC,	// AddressSpaceKeyword
670  *      8,	// RegisterBitWidth
671  *      8,	// RegisterBitOffset
672  *      0x30,	// RegisterAddress
673  *      9,	// AccessSize (subspace ID)
674  *  )
675  */
676 
677 #ifndef arch_init_invariance_cppc
678 static inline void arch_init_invariance_cppc(void) { }
679 #endif
680 
681 /**
682  * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
683  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
684  *
685  *	Return: 0 for success or negative value for err.
686  */
687 int acpi_cppc_processor_probe(struct acpi_processor *pr)
688 {
689 	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
690 	union acpi_object *out_obj, *cpc_obj;
691 	struct cpc_desc *cpc_ptr;
692 	struct cpc_reg *gas_t;
693 	struct device *cpu_dev;
694 	acpi_handle handle = pr->handle;
695 	unsigned int num_ent, i, cpc_rev;
696 	int pcc_subspace_id = -1;
697 	acpi_status status;
698 	int ret = -ENODATA;
699 
700 	if (!osc_sb_cppc2_support_acked) {
701 		pr_debug("CPPC v2 _OSC not acked\n");
702 		if (!cpc_supported_by_cpu())
703 			return -ENODEV;
704 	}
705 
706 	/* Parse the ACPI _CPC table for this CPU. */
707 	status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
708 			ACPI_TYPE_PACKAGE);
709 	if (ACPI_FAILURE(status)) {
710 		ret = -ENODEV;
711 		goto out_buf_free;
712 	}
713 
714 	out_obj = (union acpi_object *) output.pointer;
715 
716 	cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
717 	if (!cpc_ptr) {
718 		ret = -ENOMEM;
719 		goto out_buf_free;
720 	}
721 
722 	/* First entry is NumEntries. */
723 	cpc_obj = &out_obj->package.elements[0];
724 	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
725 		num_ent = cpc_obj->integer.value;
726 		if (num_ent <= 1) {
727 			pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
728 				 num_ent, pr->id);
729 			goto out_free;
730 		}
731 	} else {
732 		pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
733 			 cpc_obj->type, pr->id);
734 		goto out_free;
735 	}
736 	cpc_ptr->num_entries = num_ent;
737 
738 	/* Second entry should be revision. */
739 	cpc_obj = &out_obj->package.elements[1];
740 	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
741 		cpc_rev = cpc_obj->integer.value;
742 	} else {
743 		pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
744 			 cpc_obj->type, pr->id);
745 		goto out_free;
746 	}
747 	cpc_ptr->version = cpc_rev;
748 
749 	if (!is_cppc_supported(cpc_rev, num_ent))
750 		goto out_free;
751 
752 	/* Iterate through remaining entries in _CPC */
753 	for (i = 2; i < num_ent; i++) {
754 		cpc_obj = &out_obj->package.elements[i];
755 
756 		if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
757 			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
758 			cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
759 		} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
760 			gas_t = (struct cpc_reg *)
761 				cpc_obj->buffer.pointer;
762 
763 			/*
764 			 * The PCC Subspace index is encoded inside
765 			 * the CPC table entries. The same PCC index
766 			 * will be used for all the PCC entries,
767 			 * so extract it only once.
768 			 */
769 			if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
770 				if (pcc_subspace_id < 0) {
771 					pcc_subspace_id = gas_t->access_width;
772 					if (pcc_data_alloc(pcc_subspace_id))
773 						goto out_free;
774 				} else if (pcc_subspace_id != gas_t->access_width) {
775 					pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
776 						 pr->id);
777 					goto out_free;
778 				}
779 			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
780 				if (gas_t->address) {
781 					void __iomem *addr;
782 
783 					if (!osc_cpc_flexible_adr_space_confirmed) {
784 						pr_debug("Flexible address space capability not supported\n");
785 						goto out_free;
786 					}
787 
788 					addr = ioremap(gas_t->address, gas_t->bit_width/8);
789 					if (!addr)
790 						goto out_free;
791 					cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
792 				}
793 			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
794 				if (gas_t->access_width < 1 || gas_t->access_width > 3) {
795 					/*
796 					 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
797 					 * SystemIO doesn't implement 64-bit
798 					 * registers.
799 					 */
800 					pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
801 						 gas_t->access_width);
802 					goto out_free;
803 				}
804 				if (gas_t->address & OVER_16BTS_MASK) {
805 					/* SystemIO registers use 16-bit integer addresses */
806 					pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
807 						 gas_t->address);
808 					goto out_free;
809 				}
810 				if (!osc_cpc_flexible_adr_space_confirmed) {
811 					pr_debug("Flexible address space capability not supported\n");
812 					goto out_free;
813 				}
814 			} else {
815 				if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
816 					/* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
817 					pr_debug("Unsupported register type (%d) in _CPC\n",
818 						 gas_t->space_id);
819 					goto out_free;
820 				}
821 			}
822 
823 			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
824 			memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
825 		} else {
826 			pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
827 				 i, pr->id);
828 			goto out_free;
829 		}
830 	}
831 	per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
832 
833 	/*
834 	 * Initialize the remaining cpc_regs as unsupported.
835 	 * Example: In case FW exposes CPPC v2, the below loop will initialize
836 	 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
837 	 */
838 	for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
839 		cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
840 		cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
841 	}
842 
843 
844 	/* Store CPU Logical ID */
845 	cpc_ptr->cpu_id = pr->id;
846 
847 	/* Parse PSD data for this CPU */
848 	ret = acpi_get_psd(cpc_ptr, handle);
849 	if (ret)
850 		goto out_free;
851 
852 	/* Register PCC channel once for all PCC subspace ID. */
853 	if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
854 		ret = register_pcc_channel(pcc_subspace_id);
855 		if (ret)
856 			goto out_free;
857 
858 		init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
859 		init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
860 	}
861 
862 	/* Everything looks okay */
863 	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
864 
865 	/* Add per logical CPU nodes for reading its feedback counters. */
866 	cpu_dev = get_cpu_device(pr->id);
867 	if (!cpu_dev) {
868 		ret = -EINVAL;
869 		goto out_free;
870 	}
871 
872 	/* Plug PSD data into this CPU's CPC descriptor. */
873 	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
874 
875 	ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
876 			"acpi_cppc");
877 	if (ret) {
878 		per_cpu(cpc_desc_ptr, pr->id) = NULL;
879 		kobject_put(&cpc_ptr->kobj);
880 		goto out_free;
881 	}
882 
883 	arch_init_invariance_cppc();
884 
885 	kfree(output.pointer);
886 	return 0;
887 
888 out_free:
889 	/* Free all the mapped sys mem areas for this CPU */
890 	for (i = 2; i < cpc_ptr->num_entries; i++) {
891 		void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
892 
893 		if (addr)
894 			iounmap(addr);
895 	}
896 	kfree(cpc_ptr);
897 
898 out_buf_free:
899 	kfree(output.pointer);
900 	return ret;
901 }
902 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
903 
904 /**
905  * acpi_cppc_processor_exit - Cleanup CPC structs.
906  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
907  *
908  * Return: Void
909  */
910 void acpi_cppc_processor_exit(struct acpi_processor *pr)
911 {
912 	struct cpc_desc *cpc_ptr;
913 	unsigned int i;
914 	void __iomem *addr;
915 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
916 
917 	if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
918 		if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
919 			pcc_data[pcc_ss_id]->refcount--;
920 			if (!pcc_data[pcc_ss_id]->refcount) {
921 				pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
922 				kfree(pcc_data[pcc_ss_id]);
923 				pcc_data[pcc_ss_id] = NULL;
924 			}
925 		}
926 	}
927 
928 	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
929 	if (!cpc_ptr)
930 		return;
931 
932 	/* Free all the mapped sys mem areas for this CPU */
933 	for (i = 2; i < cpc_ptr->num_entries; i++) {
934 		addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
935 		if (addr)
936 			iounmap(addr);
937 	}
938 
939 	kobject_put(&cpc_ptr->kobj);
940 	kfree(cpc_ptr);
941 }
942 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
943 
944 /**
945  * cpc_read_ffh() - Read FFH register
946  * @cpunum:	CPU number to read
947  * @reg:	cppc register information
948  * @val:	place holder for return value
949  *
950  * Read bit_width bits from a specified address and bit_offset
951  *
952  * Return: 0 for success and error code
953  */
954 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
955 {
956 	return -ENOTSUPP;
957 }
958 
959 /**
960  * cpc_write_ffh() - Write FFH register
961  * @cpunum:	CPU number to write
962  * @reg:	cppc register information
963  * @val:	value to write
964  *
965  * Write value of bit_width bits to a specified address and bit_offset
966  *
967  * Return: 0 for success and error code
968  */
969 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
970 {
971 	return -ENOTSUPP;
972 }
973 
974 /*
975  * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
976  * as fast as possible. We have already mapped the PCC subspace during init, so
977  * we can directly write to it.
978  */
979 
980 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
981 {
982 	void __iomem *vaddr = NULL;
983 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
984 	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
985 
986 	if (reg_res->type == ACPI_TYPE_INTEGER) {
987 		*val = reg_res->cpc_entry.int_value;
988 		return 0;
989 	}
990 
991 	*val = 0;
992 
993 	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
994 		u32 width = 8 << (reg->access_width - 1);
995 		u32 val_u32;
996 		acpi_status status;
997 
998 		status = acpi_os_read_port((acpi_io_address)reg->address,
999 					   &val_u32, width);
1000 		if (ACPI_FAILURE(status)) {
1001 			pr_debug("Error: Failed to read SystemIO port %llx\n",
1002 				 reg->address);
1003 			return -EFAULT;
1004 		}
1005 
1006 		*val = val_u32;
1007 		return 0;
1008 	} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1009 		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1010 	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1011 		vaddr = reg_res->sys_mem_vaddr;
1012 	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1013 		return cpc_read_ffh(cpu, reg, val);
1014 	else
1015 		return acpi_os_read_memory((acpi_physical_address)reg->address,
1016 				val, reg->bit_width);
1017 
1018 	switch (reg->bit_width) {
1019 	case 8:
1020 		*val = readb_relaxed(vaddr);
1021 		break;
1022 	case 16:
1023 		*val = readw_relaxed(vaddr);
1024 		break;
1025 	case 32:
1026 		*val = readl_relaxed(vaddr);
1027 		break;
1028 	case 64:
1029 		*val = readq_relaxed(vaddr);
1030 		break;
1031 	default:
1032 		pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1033 			 reg->bit_width, pcc_ss_id);
1034 		return -EFAULT;
1035 	}
1036 
1037 	return 0;
1038 }
1039 
1040 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1041 {
1042 	int ret_val = 0;
1043 	void __iomem *vaddr = NULL;
1044 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1045 	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
1046 
1047 	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1048 		u32 width = 8 << (reg->access_width - 1);
1049 		acpi_status status;
1050 
1051 		status = acpi_os_write_port((acpi_io_address)reg->address,
1052 					    (u32)val, width);
1053 		if (ACPI_FAILURE(status)) {
1054 			pr_debug("Error: Failed to write SystemIO port %llx\n",
1055 				 reg->address);
1056 			return -EFAULT;
1057 		}
1058 
1059 		return 0;
1060 	} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1061 		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1062 	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1063 		vaddr = reg_res->sys_mem_vaddr;
1064 	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1065 		return cpc_write_ffh(cpu, reg, val);
1066 	else
1067 		return acpi_os_write_memory((acpi_physical_address)reg->address,
1068 				val, reg->bit_width);
1069 
1070 	switch (reg->bit_width) {
1071 	case 8:
1072 		writeb_relaxed(val, vaddr);
1073 		break;
1074 	case 16:
1075 		writew_relaxed(val, vaddr);
1076 		break;
1077 	case 32:
1078 		writel_relaxed(val, vaddr);
1079 		break;
1080 	case 64:
1081 		writeq_relaxed(val, vaddr);
1082 		break;
1083 	default:
1084 		pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1085 			 reg->bit_width, pcc_ss_id);
1086 		ret_val = -EFAULT;
1087 		break;
1088 	}
1089 
1090 	return ret_val;
1091 }
1092 
1093 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
1094 {
1095 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1096 	struct cpc_register_resource *reg;
1097 
1098 	if (!cpc_desc) {
1099 		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1100 		return -ENODEV;
1101 	}
1102 
1103 	reg = &cpc_desc->cpc_regs[reg_idx];
1104 
1105 	if (CPC_IN_PCC(reg)) {
1106 		int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1107 		struct cppc_pcc_data *pcc_ss_data = NULL;
1108 		int ret = 0;
1109 
1110 		if (pcc_ss_id < 0)
1111 			return -EIO;
1112 
1113 		pcc_ss_data = pcc_data[pcc_ss_id];
1114 
1115 		down_write(&pcc_ss_data->pcc_lock);
1116 
1117 		if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1118 			cpc_read(cpunum, reg, perf);
1119 		else
1120 			ret = -EIO;
1121 
1122 		up_write(&pcc_ss_data->pcc_lock);
1123 
1124 		return ret;
1125 	}
1126 
1127 	cpc_read(cpunum, reg, perf);
1128 
1129 	return 0;
1130 }
1131 
1132 /**
1133  * cppc_get_desired_perf - Get the desired performance register value.
1134  * @cpunum: CPU from which to get desired performance.
1135  * @desired_perf: Return address.
1136  *
1137  * Return: 0 for success, -EIO otherwise.
1138  */
1139 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1140 {
1141 	return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1142 }
1143 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1144 
1145 /**
1146  * cppc_get_nominal_perf - Get the nominal performance register value.
1147  * @cpunum: CPU from which to get nominal performance.
1148  * @nominal_perf: Return address.
1149  *
1150  * Return: 0 for success, -EIO otherwise.
1151  */
1152 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1153 {
1154 	return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1155 }
1156 
1157 /**
1158  * cppc_get_perf_caps - Get a CPU's performance capabilities.
1159  * @cpunum: CPU from which to get capabilities info.
1160  * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1161  *
1162  * Return: 0 for success with perf_caps populated else -ERRNO.
1163  */
1164 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1165 {
1166 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1167 	struct cpc_register_resource *highest_reg, *lowest_reg,
1168 		*lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1169 		*low_freq_reg = NULL, *nom_freq_reg = NULL;
1170 	u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1171 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1172 	struct cppc_pcc_data *pcc_ss_data = NULL;
1173 	int ret = 0, regs_in_pcc = 0;
1174 
1175 	if (!cpc_desc) {
1176 		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1177 		return -ENODEV;
1178 	}
1179 
1180 	highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1181 	lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1182 	lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1183 	nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1184 	low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1185 	nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1186 	guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1187 
1188 	/* Are any of the regs PCC ?*/
1189 	if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1190 		CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1191 		CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1192 		if (pcc_ss_id < 0) {
1193 			pr_debug("Invalid pcc_ss_id\n");
1194 			return -ENODEV;
1195 		}
1196 		pcc_ss_data = pcc_data[pcc_ss_id];
1197 		regs_in_pcc = 1;
1198 		down_write(&pcc_ss_data->pcc_lock);
1199 		/* Ring doorbell once to update PCC subspace */
1200 		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1201 			ret = -EIO;
1202 			goto out_err;
1203 		}
1204 	}
1205 
1206 	cpc_read(cpunum, highest_reg, &high);
1207 	perf_caps->highest_perf = high;
1208 
1209 	cpc_read(cpunum, lowest_reg, &low);
1210 	perf_caps->lowest_perf = low;
1211 
1212 	cpc_read(cpunum, nominal_reg, &nom);
1213 	perf_caps->nominal_perf = nom;
1214 
1215 	if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
1216 	    IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1217 		perf_caps->guaranteed_perf = 0;
1218 	} else {
1219 		cpc_read(cpunum, guaranteed_reg, &guaranteed);
1220 		perf_caps->guaranteed_perf = guaranteed;
1221 	}
1222 
1223 	cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1224 	perf_caps->lowest_nonlinear_perf = min_nonlinear;
1225 
1226 	if (!high || !low || !nom || !min_nonlinear)
1227 		ret = -EFAULT;
1228 
1229 	/* Read optional lowest and nominal frequencies if present */
1230 	if (CPC_SUPPORTED(low_freq_reg))
1231 		cpc_read(cpunum, low_freq_reg, &low_f);
1232 
1233 	if (CPC_SUPPORTED(nom_freq_reg))
1234 		cpc_read(cpunum, nom_freq_reg, &nom_f);
1235 
1236 	perf_caps->lowest_freq = low_f;
1237 	perf_caps->nominal_freq = nom_f;
1238 
1239 
1240 out_err:
1241 	if (regs_in_pcc)
1242 		up_write(&pcc_ss_data->pcc_lock);
1243 	return ret;
1244 }
1245 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1246 
1247 /**
1248  * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1249  * @cpunum: CPU from which to read counters.
1250  * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1251  *
1252  * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1253  */
1254 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1255 {
1256 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1257 	struct cpc_register_resource *delivered_reg, *reference_reg,
1258 		*ref_perf_reg, *ctr_wrap_reg;
1259 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1260 	struct cppc_pcc_data *pcc_ss_data = NULL;
1261 	u64 delivered, reference, ref_perf, ctr_wrap_time;
1262 	int ret = 0, regs_in_pcc = 0;
1263 
1264 	if (!cpc_desc) {
1265 		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1266 		return -ENODEV;
1267 	}
1268 
1269 	delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1270 	reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1271 	ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1272 	ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1273 
1274 	/*
1275 	 * If reference perf register is not supported then we should
1276 	 * use the nominal perf value
1277 	 */
1278 	if (!CPC_SUPPORTED(ref_perf_reg))
1279 		ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1280 
1281 	/* Are any of the regs PCC ?*/
1282 	if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1283 		CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1284 		if (pcc_ss_id < 0) {
1285 			pr_debug("Invalid pcc_ss_id\n");
1286 			return -ENODEV;
1287 		}
1288 		pcc_ss_data = pcc_data[pcc_ss_id];
1289 		down_write(&pcc_ss_data->pcc_lock);
1290 		regs_in_pcc = 1;
1291 		/* Ring doorbell once to update PCC subspace */
1292 		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1293 			ret = -EIO;
1294 			goto out_err;
1295 		}
1296 	}
1297 
1298 	cpc_read(cpunum, delivered_reg, &delivered);
1299 	cpc_read(cpunum, reference_reg, &reference);
1300 	cpc_read(cpunum, ref_perf_reg, &ref_perf);
1301 
1302 	/*
1303 	 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1304 	 * performance counters are assumed to never wrap during the lifetime of
1305 	 * platform
1306 	 */
1307 	ctr_wrap_time = (u64)(~((u64)0));
1308 	if (CPC_SUPPORTED(ctr_wrap_reg))
1309 		cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1310 
1311 	if (!delivered || !reference ||	!ref_perf) {
1312 		ret = -EFAULT;
1313 		goto out_err;
1314 	}
1315 
1316 	perf_fb_ctrs->delivered = delivered;
1317 	perf_fb_ctrs->reference = reference;
1318 	perf_fb_ctrs->reference_perf = ref_perf;
1319 	perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1320 out_err:
1321 	if (regs_in_pcc)
1322 		up_write(&pcc_ss_data->pcc_lock);
1323 	return ret;
1324 }
1325 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1326 
1327 /**
1328  * cppc_set_enable - Set to enable CPPC on the processor by writing the
1329  * Continuous Performance Control package EnableRegister field.
1330  * @cpu: CPU for which to enable CPPC register.
1331  * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1332  *
1333  * Return: 0 for success, -ERRNO or -EIO otherwise.
1334  */
1335 int cppc_set_enable(int cpu, bool enable)
1336 {
1337 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1338 	struct cpc_register_resource *enable_reg;
1339 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1340 	struct cppc_pcc_data *pcc_ss_data = NULL;
1341 	int ret = -EINVAL;
1342 
1343 	if (!cpc_desc) {
1344 		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1345 		return -EINVAL;
1346 	}
1347 
1348 	enable_reg = &cpc_desc->cpc_regs[ENABLE];
1349 
1350 	if (CPC_IN_PCC(enable_reg)) {
1351 
1352 		if (pcc_ss_id < 0)
1353 			return -EIO;
1354 
1355 		ret = cpc_write(cpu, enable_reg, enable);
1356 		if (ret)
1357 			return ret;
1358 
1359 		pcc_ss_data = pcc_data[pcc_ss_id];
1360 
1361 		down_write(&pcc_ss_data->pcc_lock);
1362 		/* after writing CPC, transfer the ownership of PCC to platfrom */
1363 		ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1364 		up_write(&pcc_ss_data->pcc_lock);
1365 		return ret;
1366 	}
1367 
1368 	return cpc_write(cpu, enable_reg, enable);
1369 }
1370 EXPORT_SYMBOL_GPL(cppc_set_enable);
1371 
1372 /**
1373  * cppc_set_perf - Set a CPU's performance controls.
1374  * @cpu: CPU for which to set performance controls.
1375  * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1376  *
1377  * Return: 0 for success, -ERRNO otherwise.
1378  */
1379 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1380 {
1381 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1382 	struct cpc_register_resource *desired_reg;
1383 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1384 	struct cppc_pcc_data *pcc_ss_data = NULL;
1385 	int ret = 0;
1386 
1387 	if (!cpc_desc) {
1388 		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1389 		return -ENODEV;
1390 	}
1391 
1392 	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1393 
1394 	/*
1395 	 * This is Phase-I where we want to write to CPC registers
1396 	 * -> We want all CPUs to be able to execute this phase in parallel
1397 	 *
1398 	 * Since read_lock can be acquired by multiple CPUs simultaneously we
1399 	 * achieve that goal here
1400 	 */
1401 	if (CPC_IN_PCC(desired_reg)) {
1402 		if (pcc_ss_id < 0) {
1403 			pr_debug("Invalid pcc_ss_id\n");
1404 			return -ENODEV;
1405 		}
1406 		pcc_ss_data = pcc_data[pcc_ss_id];
1407 		down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1408 		if (pcc_ss_data->platform_owns_pcc) {
1409 			ret = check_pcc_chan(pcc_ss_id, false);
1410 			if (ret) {
1411 				up_read(&pcc_ss_data->pcc_lock);
1412 				return ret;
1413 			}
1414 		}
1415 		/*
1416 		 * Update the pending_write to make sure a PCC CMD_READ will not
1417 		 * arrive and steal the channel during the switch to write lock
1418 		 */
1419 		pcc_ss_data->pending_pcc_write_cmd = true;
1420 		cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1421 		cpc_desc->write_cmd_status = 0;
1422 	}
1423 
1424 	/*
1425 	 * Skip writing MIN/MAX until Linux knows how to come up with
1426 	 * useful values.
1427 	 */
1428 	cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1429 
1430 	if (CPC_IN_PCC(desired_reg))
1431 		up_read(&pcc_ss_data->pcc_lock);	/* END Phase-I */
1432 	/*
1433 	 * This is Phase-II where we transfer the ownership of PCC to Platform
1434 	 *
1435 	 * Short Summary: Basically if we think of a group of cppc_set_perf
1436 	 * requests that happened in short overlapping interval. The last CPU to
1437 	 * come out of Phase-I will enter Phase-II and ring the doorbell.
1438 	 *
1439 	 * We have the following requirements for Phase-II:
1440 	 *     1. We want to execute Phase-II only when there are no CPUs
1441 	 * currently executing in Phase-I
1442 	 *     2. Once we start Phase-II we want to avoid all other CPUs from
1443 	 * entering Phase-I.
1444 	 *     3. We want only one CPU among all those who went through Phase-I
1445 	 * to run phase-II
1446 	 *
1447 	 * If write_trylock fails to get the lock and doesn't transfer the
1448 	 * PCC ownership to the platform, then one of the following will be TRUE
1449 	 *     1. There is at-least one CPU in Phase-I which will later execute
1450 	 * write_trylock, so the CPUs in Phase-I will be responsible for
1451 	 * executing the Phase-II.
1452 	 *     2. Some other CPU has beaten this CPU to successfully execute the
1453 	 * write_trylock and has already acquired the write_lock. We know for a
1454 	 * fact it (other CPU acquiring the write_lock) couldn't have happened
1455 	 * before this CPU's Phase-I as we held the read_lock.
1456 	 *     3. Some other CPU executing pcc CMD_READ has stolen the
1457 	 * down_write, in which case, send_pcc_cmd will check for pending
1458 	 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1459 	 * So this CPU can be certain that its request will be delivered
1460 	 *    So in all cases, this CPU knows that its request will be delivered
1461 	 * by another CPU and can return
1462 	 *
1463 	 * After getting the down_write we still need to check for
1464 	 * pending_pcc_write_cmd to take care of the following scenario
1465 	 *    The thread running this code could be scheduled out between
1466 	 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1467 	 * could have delivered the request to Platform by triggering the
1468 	 * doorbell and transferred the ownership of PCC to platform. So this
1469 	 * avoids triggering an unnecessary doorbell and more importantly before
1470 	 * triggering the doorbell it makes sure that the PCC channel ownership
1471 	 * is still with OSPM.
1472 	 *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1473 	 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1474 	 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1475 	 * case during a CMD_READ and if there are pending writes it delivers
1476 	 * the write command before servicing the read command
1477 	 */
1478 	if (CPC_IN_PCC(desired_reg)) {
1479 		if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1480 			/* Update only if there are pending write commands */
1481 			if (pcc_ss_data->pending_pcc_write_cmd)
1482 				send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1483 			up_write(&pcc_ss_data->pcc_lock);	/* END Phase-II */
1484 		} else
1485 			/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1486 			wait_event(pcc_ss_data->pcc_write_wait_q,
1487 				   cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1488 
1489 		/* send_pcc_cmd updates the status in case of failure */
1490 		ret = cpc_desc->write_cmd_status;
1491 	}
1492 	return ret;
1493 }
1494 EXPORT_SYMBOL_GPL(cppc_set_perf);
1495 
1496 /**
1497  * cppc_get_transition_latency - returns frequency transition latency in ns
1498  *
1499  * ACPI CPPC does not explicitly specify how a platform can specify the
1500  * transition latency for performance change requests. The closest we have
1501  * is the timing information from the PCCT tables which provides the info
1502  * on the number and frequency of PCC commands the platform can handle.
1503  *
1504  * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
1505  * then assume there is no latency.
1506  */
1507 unsigned int cppc_get_transition_latency(int cpu_num)
1508 {
1509 	/*
1510 	 * Expected transition latency is based on the PCCT timing values
1511 	 * Below are definition from ACPI spec:
1512 	 * pcc_nominal- Expected latency to process a command, in microseconds
1513 	 * pcc_mpar   - The maximum number of periodic requests that the subspace
1514 	 *              channel can support, reported in commands per minute. 0
1515 	 *              indicates no limitation.
1516 	 * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1517 	 *              completion of a command before issuing the next command,
1518 	 *              in microseconds.
1519 	 */
1520 	unsigned int latency_ns = 0;
1521 	struct cpc_desc *cpc_desc;
1522 	struct cpc_register_resource *desired_reg;
1523 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1524 	struct cppc_pcc_data *pcc_ss_data;
1525 
1526 	cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1527 	if (!cpc_desc)
1528 		return CPUFREQ_ETERNAL;
1529 
1530 	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1531 	if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1532 		return 0;
1533 	else if (!CPC_IN_PCC(desired_reg))
1534 		return CPUFREQ_ETERNAL;
1535 
1536 	if (pcc_ss_id < 0)
1537 		return CPUFREQ_ETERNAL;
1538 
1539 	pcc_ss_data = pcc_data[pcc_ss_id];
1540 	if (pcc_ss_data->pcc_mpar)
1541 		latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1542 
1543 	latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1544 	latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1545 
1546 	return latency_ns;
1547 }
1548 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1549