xref: /linux/drivers/acpi/processor_perflib.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
4  *
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
8  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *  			- Added processor hotplug support
10  */
11 
12 #define pr_fmt(fmt) "ACPI: " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/cpufreq.h>
18 #include <linux/slab.h>
19 #include <linux/acpi.h>
20 #include <acpi/processor.h>
21 #ifdef CONFIG_X86
22 #include <asm/cpufeature.h>
23 #include <asm/msr.h>
24 #endif
25 
26 #define ACPI_PROCESSOR_FILE_PERFORMANCE	"performance"
27 
28 /*
29  * _PPC support is implemented as a CPUfreq policy notifier:
30  * This means each time a CPUfreq driver registered also with
31  * the ACPI core is asked to change the speed policy, the maximum
32  * value is adjusted so that it is within the platform limit.
33  *
34  * Also, when a new platform limit value is detected, the CPUfreq
35  * policy is adjusted accordingly.
36  */
37 
38 /* ignore_ppc:
39  * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
40  *       ignore _PPC
41  *  0 -> cpufreq low level drivers initialized -> consider _PPC values
42  *  1 -> ignore _PPC totally -> forced by user through boot param
43  */
44 static int ignore_ppc = -1;
45 module_param(ignore_ppc, int, 0644);
46 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
47 		 "limited by BIOS, this should help");
48 
49 static bool acpi_processor_ppc_in_use;
50 
acpi_processor_get_platform_limit(struct acpi_processor * pr)51 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
52 {
53 	acpi_status status = 0;
54 	unsigned long long ppc = 0;
55 	s32 qos_value;
56 	int index;
57 	int ret;
58 
59 	if (!pr)
60 		return -EINVAL;
61 
62 	/*
63 	 * _PPC indicates the maximum state currently supported by the platform
64 	 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
65 	 */
66 	status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
67 	if (status != AE_NOT_FOUND) {
68 		acpi_processor_ppc_in_use = true;
69 
70 		if (ACPI_FAILURE(status)) {
71 			acpi_evaluation_failure_warn(pr->handle, "_PPC", status);
72 			return -ENODEV;
73 		}
74 	}
75 
76 	index = ppc;
77 
78 	if (pr->performance_platform_limit == index ||
79 	    ppc >= pr->performance->state_count)
80 		return 0;
81 
82 	pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
83 		 index, index ? "is" : "is not");
84 
85 	pr->performance_platform_limit = index;
86 
87 	if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
88 		return 0;
89 
90 	/*
91 	 * If _PPC returns 0, it means that all of the available states can be
92 	 * used ("no limit").
93 	 */
94 	if (index == 0)
95 		qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
96 	else
97 		qos_value = pr->performance->states[index].core_frequency * 1000;
98 
99 	ret = freq_qos_update_request(&pr->perflib_req, qos_value);
100 	if (ret < 0) {
101 		pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
102 			pr->id, ret);
103 	}
104 
105 	return 0;
106 }
107 
108 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE	0x80
109 /*
110  * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
111  * @handle: ACPI processor handle
112  * @status: the status code of _PPC evaluation
113  *	0: success. OSPM is now using the performance state specified.
114  *	1: failure. OSPM has not changed the number of P-states in use
115  */
acpi_processor_ppc_ost(acpi_handle handle,int status)116 static void acpi_processor_ppc_ost(acpi_handle handle, int status)
117 {
118 	if (acpi_has_method(handle, "_OST"))
119 		acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
120 				  status, NULL);
121 }
122 
acpi_processor_ppc_has_changed(struct acpi_processor * pr,int event_flag)123 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
124 {
125 	int ret;
126 
127 	if (ignore_ppc || !pr->performance) {
128 		/*
129 		 * Only when it is notification event, the _OST object
130 		 * will be evaluated. Otherwise it is skipped.
131 		 */
132 		if (event_flag)
133 			acpi_processor_ppc_ost(pr->handle, 1);
134 		return;
135 	}
136 
137 	ret = acpi_processor_get_platform_limit(pr);
138 	/*
139 	 * Only when it is notification event, the _OST object
140 	 * will be evaluated. Otherwise it is skipped.
141 	 */
142 	if (event_flag) {
143 		if (ret < 0)
144 			acpi_processor_ppc_ost(pr->handle, 1);
145 		else
146 			acpi_processor_ppc_ost(pr->handle, 0);
147 	}
148 	if (ret >= 0)
149 		cpufreq_update_limits(pr->id);
150 }
151 
acpi_processor_get_bios_limit(int cpu,unsigned int * limit)152 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
153 {
154 	struct acpi_processor *pr;
155 
156 	pr = per_cpu(processors, cpu);
157 	if (!pr || !pr->performance || !pr->performance->state_count)
158 		return -ENODEV;
159 
160 	*limit = pr->performance->states[pr->performance_platform_limit].
161 		core_frequency * 1000;
162 	return 0;
163 }
164 EXPORT_SYMBOL(acpi_processor_get_bios_limit);
165 
acpi_processor_ignore_ppc_init(void)166 void acpi_processor_ignore_ppc_init(void)
167 {
168 	if (ignore_ppc < 0)
169 		ignore_ppc = 0;
170 }
171 
acpi_processor_ppc_init(struct cpufreq_policy * policy)172 void acpi_processor_ppc_init(struct cpufreq_policy *policy)
173 {
174 	unsigned int cpu;
175 
176 	if (ignore_ppc == 1)
177 		return;
178 
179 	for_each_cpu(cpu, policy->related_cpus) {
180 		struct acpi_processor *pr = per_cpu(processors, cpu);
181 		int ret;
182 
183 		if (!pr)
184 			continue;
185 
186 		/*
187 		 * Reset performance_platform_limit in case there is a stale
188 		 * value in it, so as to make it match the "no limit" QoS value
189 		 * below.
190 		 */
191 		pr->performance_platform_limit = 0;
192 
193 		ret = freq_qos_add_request(&policy->constraints,
194 					   &pr->perflib_req, FREQ_QOS_MAX,
195 					   FREQ_QOS_MAX_DEFAULT_VALUE);
196 		if (ret < 0)
197 			pr_err("Failed to add freq constraint for CPU%d (%d)\n",
198 			       cpu, ret);
199 
200 		if (!pr->performance)
201 			continue;
202 
203 		ret = acpi_processor_get_platform_limit(pr);
204 		if (ret)
205 			pr_err("Failed to update freq constraint for CPU%d (%d)\n",
206 			       cpu, ret);
207 	}
208 }
209 
acpi_processor_ppc_exit(struct cpufreq_policy * policy)210 void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
211 {
212 	unsigned int cpu;
213 
214 	for_each_cpu(cpu, policy->related_cpus) {
215 		struct acpi_processor *pr = per_cpu(processors, cpu);
216 
217 		if (pr)
218 			freq_qos_remove_request(&pr->perflib_req);
219 	}
220 }
221 
222 #ifdef CONFIG_X86
223 
224 static DEFINE_MUTEX(performance_mutex);
225 
acpi_processor_get_performance_control(struct acpi_processor * pr)226 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
227 {
228 	int result = 0;
229 	acpi_status status = 0;
230 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
231 	union acpi_object *pct = NULL;
232 	union acpi_object obj = { 0 };
233 
234 	status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
235 	if (ACPI_FAILURE(status)) {
236 		acpi_evaluation_failure_warn(pr->handle, "_PCT", status);
237 		return -ENODEV;
238 	}
239 
240 	pct = (union acpi_object *)buffer.pointer;
241 	if (!pct || pct->type != ACPI_TYPE_PACKAGE || pct->package.count != 2) {
242 		pr_err("Invalid _PCT data\n");
243 		result = -EFAULT;
244 		goto end;
245 	}
246 
247 	/*
248 	 * control_register
249 	 */
250 
251 	obj = pct->package.elements[0];
252 
253 	if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER ||
254 	    obj.buffer.length < sizeof(struct acpi_pct_register)) {
255 		pr_err("Invalid _PCT data (control_register)\n");
256 		result = -EFAULT;
257 		goto end;
258 	}
259 	memcpy(&pr->performance->control_register, obj.buffer.pointer,
260 	       sizeof(struct acpi_pct_register));
261 
262 	/*
263 	 * status_register
264 	 */
265 
266 	obj = pct->package.elements[1];
267 
268 	if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER ||
269 	    obj.buffer.length < sizeof(struct acpi_pct_register)) {
270 		pr_err("Invalid _PCT data (status_register)\n");
271 		result = -EFAULT;
272 		goto end;
273 	}
274 
275 	memcpy(&pr->performance->status_register, obj.buffer.pointer,
276 	       sizeof(struct acpi_pct_register));
277 
278 end:
279 	kfree(buffer.pointer);
280 
281 	return result;
282 }
283 
284 /*
285  * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
286  * in their ACPI data. Calculate the real values and fix up the _PSS data.
287  */
amd_fixup_frequency(struct acpi_processor_px * px,int i)288 static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
289 {
290 	u32 hi, lo, fid, did;
291 	int index = px->control & 0x00000007;
292 
293 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
294 		return;
295 
296 	if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) ||
297 	    boot_cpu_data.x86 == 0x11) {
298 		rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
299 		/*
300 		 * MSR C001_0064+:
301 		 * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
302 		 */
303 		if (!(hi & BIT(31)))
304 			return;
305 
306 		fid = lo & 0x3f;
307 		did = (lo >> 6) & 7;
308 		if (boot_cpu_data.x86 == 0x10)
309 			px->core_frequency = (100 * (fid + 0x10)) >> did;
310 		else
311 			px->core_frequency = (100 * (fid + 8)) >> did;
312 	}
313 }
314 
acpi_processor_get_performance_states(struct acpi_processor * pr)315 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
316 {
317 	int result = 0;
318 	acpi_status status = AE_OK;
319 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
320 	struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
321 	struct acpi_buffer state = { 0, NULL };
322 	union acpi_object *pss = NULL;
323 	int i;
324 	int last_invalid = -1;
325 
326 	status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
327 	if (ACPI_FAILURE(status)) {
328 		acpi_evaluation_failure_warn(pr->handle, "_PSS", status);
329 		return -ENODEV;
330 	}
331 
332 	pss = buffer.pointer;
333 	if (!pss || pss->type != ACPI_TYPE_PACKAGE) {
334 		pr_err("Invalid _PSS data\n");
335 		result = -EFAULT;
336 		goto end;
337 	}
338 
339 	acpi_handle_debug(pr->handle, "Found %d performance states\n",
340 			  pss->package.count);
341 
342 	pr->performance->state_count = pss->package.count;
343 	pr->performance->states =
344 	    kmalloc_objs(struct acpi_processor_px, pss->package.count);
345 	if (!pr->performance->states) {
346 		result = -ENOMEM;
347 		goto end;
348 	}
349 
350 	for (i = 0; i < pr->performance->state_count; i++) {
351 
352 		struct acpi_processor_px *px = &(pr->performance->states[i]);
353 
354 		state.length = sizeof(struct acpi_processor_px);
355 		state.pointer = px;
356 
357 		acpi_handle_debug(pr->handle, "Extracting state %d\n", i);
358 
359 		status = acpi_extract_package(&(pss->package.elements[i]),
360 					      &format, &state);
361 		if (ACPI_FAILURE(status)) {
362 			acpi_handle_warn(pr->handle, "Invalid _PSS data: %s\n",
363 					 acpi_format_exception(status));
364 			result = -EFAULT;
365 			kfree(pr->performance->states);
366 			goto end;
367 		}
368 
369 		amd_fixup_frequency(px, i);
370 
371 		acpi_handle_debug(pr->handle,
372 				  "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
373 				  i,
374 				  (u32) px->core_frequency,
375 				  (u32) px->power,
376 				  (u32) px->transition_latency,
377 				  (u32) px->bus_master_latency,
378 				  (u32) px->control, (u32) px->status);
379 
380 		/*
381 		 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
382 		 */
383 		if (!px->core_frequency ||
384 		    (u32)(px->core_frequency * 1000) != px->core_frequency * 1000) {
385 			pr_err(FW_BUG
386 			       "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
387 			       pr->id, px->core_frequency);
388 			if (last_invalid == -1)
389 				last_invalid = i;
390 		} else {
391 			if (last_invalid != -1) {
392 				/*
393 				 * Copy this valid entry over last_invalid entry
394 				 */
395 				memcpy(&(pr->performance->states[last_invalid]),
396 				       px, sizeof(struct acpi_processor_px));
397 				++last_invalid;
398 			}
399 		}
400 	}
401 
402 	if (last_invalid == 0) {
403 		pr_err(FW_BUG
404 			   "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
405 		result = -EFAULT;
406 		kfree(pr->performance->states);
407 		pr->performance->states = NULL;
408 	}
409 
410 	if (last_invalid > 0)
411 		pr->performance->state_count = last_invalid;
412 
413 end:
414 	kfree(buffer.pointer);
415 
416 	return result;
417 }
418 
acpi_processor_get_performance_info(struct acpi_processor * pr)419 int acpi_processor_get_performance_info(struct acpi_processor *pr)
420 {
421 	int result = 0;
422 
423 	if (!pr || !pr->performance || !pr->handle)
424 		return -EINVAL;
425 
426 	if (!acpi_has_method(pr->handle, "_PCT")) {
427 		acpi_handle_debug(pr->handle,
428 				  "ACPI-based processor performance control unavailable\n");
429 		return -ENODEV;
430 	}
431 
432 	result = acpi_processor_get_performance_control(pr);
433 	if (result)
434 		goto update_bios;
435 
436 	result = acpi_processor_get_performance_states(pr);
437 	if (result)
438 		goto update_bios;
439 
440 	/* We need to call _PPC once when cpufreq starts */
441 	if (ignore_ppc != 1)
442 		result = acpi_processor_get_platform_limit(pr);
443 
444 	return result;
445 
446 	/*
447 	 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
448 	 * the BIOS is older than the CPU and does not know its frequencies
449 	 */
450  update_bios:
451 	if (acpi_has_method(pr->handle, "_PPC")) {
452 		if(boot_cpu_has(X86_FEATURE_EST))
453 			pr_warn(FW_BUG "BIOS needs update for CPU "
454 			       "frequency support\n");
455 	}
456 	return result;
457 }
458 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
459 
acpi_processor_pstate_control(void)460 int acpi_processor_pstate_control(void)
461 {
462 	acpi_status status;
463 
464 	if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
465 		return 0;
466 
467 	pr_debug("Writing pstate_control [0x%x] to smi_command [0x%x]\n",
468 		 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command);
469 
470 	status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
471 				    (u32)acpi_gbl_FADT.pstate_control, 8);
472 	if (ACPI_SUCCESS(status))
473 		return 1;
474 
475 	pr_warn("Failed to write pstate_control [0x%x] to smi_command [0x%x]: %s\n",
476 		acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command,
477 		acpi_format_exception(status));
478 	return -EIO;
479 }
480 
acpi_processor_notify_smm(struct module * calling_module)481 int acpi_processor_notify_smm(struct module *calling_module)
482 {
483 	static int is_done;
484 	int result = 0;
485 
486 	if (!acpi_processor_cpufreq_init)
487 		return -EBUSY;
488 
489 	if (!try_module_get(calling_module))
490 		return -EINVAL;
491 
492 	/*
493 	 * is_done is set to negative if an error occurs and to 1 if no error
494 	 * occurrs, but SMM has been notified already. This avoids repeated
495 	 * notification which might lead to unexpected results.
496 	 */
497 	if (is_done != 0) {
498 		if (is_done < 0)
499 			result = is_done;
500 
501 		goto out_put;
502 	}
503 
504 	result = acpi_processor_pstate_control();
505 	if (result <= 0) {
506 		if (result) {
507 			is_done = result;
508 		} else {
509 			pr_debug("No SMI port or pstate_control\n");
510 			is_done = 1;
511 		}
512 		goto out_put;
513 	}
514 
515 	is_done = 1;
516 	/*
517 	 * Success. If there _PPC, unloading the cpufreq driver would be risky,
518 	 * so disallow it in that case.
519 	 */
520 	if (acpi_processor_ppc_in_use)
521 		return 0;
522 
523 out_put:
524 	module_put(calling_module);
525 	return result;
526 }
527 EXPORT_SYMBOL(acpi_processor_notify_smm);
528 
acpi_processor_get_psd(acpi_handle handle,struct acpi_psd_package * pdomain)529 int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
530 {
531 	int result = 0;
532 	acpi_status status = AE_OK;
533 	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
534 	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
535 	struct acpi_buffer state = {0, NULL};
536 	union acpi_object  *psd = NULL;
537 
538 	status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
539 	if (ACPI_FAILURE(status)) {
540 		return -ENODEV;
541 	}
542 
543 	psd = buffer.pointer;
544 	if (!psd || psd->type != ACPI_TYPE_PACKAGE) {
545 		pr_err("Invalid _PSD data\n");
546 		result = -EFAULT;
547 		goto end;
548 	}
549 
550 	if (psd->package.count != 1) {
551 		pr_err("Invalid _PSD data\n");
552 		result = -EFAULT;
553 		goto end;
554 	}
555 
556 	state.length = sizeof(struct acpi_psd_package);
557 	state.pointer = pdomain;
558 
559 	status = acpi_extract_package(&(psd->package.elements[0]), &format, &state);
560 	if (ACPI_FAILURE(status)) {
561 		pr_err("Invalid _PSD data\n");
562 		result = -EFAULT;
563 		goto end;
564 	}
565 
566 	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
567 		pr_err("Unknown _PSD:num_entries\n");
568 		result = -EFAULT;
569 		goto end;
570 	}
571 
572 	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
573 		pr_err("Unknown _PSD:revision\n");
574 		result = -EFAULT;
575 		goto end;
576 	}
577 
578 	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
579 	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
580 	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
581 		pr_err("Invalid _PSD:coord_type\n");
582 		result = -EFAULT;
583 		goto end;
584 	}
585 end:
586 	kfree(buffer.pointer);
587 	return result;
588 }
589 EXPORT_SYMBOL(acpi_processor_get_psd);
590 
acpi_processor_preregister_performance(struct acpi_processor_performance __percpu * performance)591 int acpi_processor_preregister_performance(
592 		struct acpi_processor_performance __percpu *performance)
593 {
594 	int count_target;
595 	int retval = 0;
596 	unsigned int i, j;
597 	cpumask_var_t covered_cpus;
598 	struct acpi_processor *pr;
599 	struct acpi_psd_package *pdomain;
600 	struct acpi_processor *match_pr;
601 	struct acpi_psd_package *match_pdomain;
602 
603 	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
604 		return -ENOMEM;
605 
606 	mutex_lock(&performance_mutex);
607 
608 	/*
609 	 * Check if another driver has already registered, and abort before
610 	 * changing pr->performance if it has. Check input data as well.
611 	 */
612 	for_each_possible_cpu(i) {
613 		pr = per_cpu(processors, i);
614 		if (!pr) {
615 			/* Look only at processors in ACPI namespace */
616 			continue;
617 		}
618 
619 		if (pr->performance) {
620 			retval = -EBUSY;
621 			goto err_out;
622 		}
623 
624 		if (!performance || !per_cpu_ptr(performance, i)) {
625 			retval = -EINVAL;
626 			goto err_out;
627 		}
628 	}
629 
630 	/* Call _PSD for all CPUs */
631 	for_each_possible_cpu(i) {
632 		pr = per_cpu(processors, i);
633 		if (!pr)
634 			continue;
635 
636 		pr->performance = per_cpu_ptr(performance, i);
637 		pdomain = &(pr->performance->domain_info);
638 		if (acpi_processor_get_psd(pr->handle, pdomain)) {
639 			retval = -EINVAL;
640 			continue;
641 		}
642 	}
643 	if (retval)
644 		goto err_ret;
645 
646 	/*
647 	 * Now that we have _PSD data from all CPUs, lets setup P-state
648 	 * domain info.
649 	 */
650 	for_each_possible_cpu(i) {
651 		pr = per_cpu(processors, i);
652 		if (!pr)
653 			continue;
654 
655 		if (cpumask_test_cpu(i, covered_cpus))
656 			continue;
657 
658 		pdomain = &(pr->performance->domain_info);
659 		cpumask_set_cpu(i, pr->performance->shared_cpu_map);
660 		cpumask_set_cpu(i, covered_cpus);
661 		if (pdomain->num_processors <= 1)
662 			continue;
663 
664 		/* Validate the Domain info */
665 		count_target = pdomain->num_processors;
666 		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
667 			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
668 		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
669 			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
670 		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
671 			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
672 
673 		for_each_possible_cpu(j) {
674 			if (i == j)
675 				continue;
676 
677 			match_pr = per_cpu(processors, j);
678 			if (!match_pr)
679 				continue;
680 
681 			match_pdomain = &(match_pr->performance->domain_info);
682 			if (match_pdomain->domain != pdomain->domain)
683 				continue;
684 
685 			/* Here i and j are in the same domain */
686 
687 			if (match_pdomain->num_processors != count_target) {
688 				retval = -EINVAL;
689 				goto err_ret;
690 			}
691 
692 			if (pdomain->coord_type != match_pdomain->coord_type) {
693 				retval = -EINVAL;
694 				goto err_ret;
695 			}
696 
697 			cpumask_set_cpu(j, covered_cpus);
698 			cpumask_set_cpu(j, pr->performance->shared_cpu_map);
699 		}
700 
701 		for_each_possible_cpu(j) {
702 			if (i == j)
703 				continue;
704 
705 			match_pr = per_cpu(processors, j);
706 			if (!match_pr)
707 				continue;
708 
709 			match_pdomain = &(match_pr->performance->domain_info);
710 			if (match_pdomain->domain != pdomain->domain)
711 				continue;
712 
713 			match_pr->performance->shared_type =
714 					pr->performance->shared_type;
715 			cpumask_copy(match_pr->performance->shared_cpu_map,
716 				     pr->performance->shared_cpu_map);
717 		}
718 	}
719 
720 err_ret:
721 	for_each_possible_cpu(i) {
722 		pr = per_cpu(processors, i);
723 		if (!pr || !pr->performance)
724 			continue;
725 
726 		/* Assume no coordination on any error parsing domain info */
727 		if (retval) {
728 			cpumask_clear(pr->performance->shared_cpu_map);
729 			cpumask_set_cpu(i, pr->performance->shared_cpu_map);
730 			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE;
731 		}
732 		pr->performance = NULL; /* Will be set for real in register */
733 	}
734 
735 err_out:
736 	mutex_unlock(&performance_mutex);
737 	free_cpumask_var(covered_cpus);
738 	return retval;
739 }
740 EXPORT_SYMBOL(acpi_processor_preregister_performance);
741 
acpi_processor_register_performance(struct acpi_processor_performance * performance,unsigned int cpu)742 int acpi_processor_register_performance(struct acpi_processor_performance
743 					*performance, unsigned int cpu)
744 {
745 	struct acpi_processor *pr;
746 
747 	if (!acpi_processor_cpufreq_init)
748 		return -EINVAL;
749 
750 	mutex_lock(&performance_mutex);
751 
752 	pr = per_cpu(processors, cpu);
753 	if (!pr) {
754 		mutex_unlock(&performance_mutex);
755 		return -ENODEV;
756 	}
757 
758 	if (pr->performance) {
759 		mutex_unlock(&performance_mutex);
760 		return -EBUSY;
761 	}
762 
763 	WARN_ON(!performance);
764 
765 	pr->performance = performance;
766 
767 	if (acpi_processor_get_performance_info(pr)) {
768 		pr->performance = NULL;
769 		mutex_unlock(&performance_mutex);
770 		return -EIO;
771 	}
772 
773 	mutex_unlock(&performance_mutex);
774 	return 0;
775 }
776 EXPORT_SYMBOL(acpi_processor_register_performance);
777 
acpi_processor_unregister_performance(unsigned int cpu)778 void acpi_processor_unregister_performance(unsigned int cpu)
779 {
780 	struct acpi_processor *pr;
781 
782 	mutex_lock(&performance_mutex);
783 
784 	pr = per_cpu(processors, cpu);
785 	if (!pr)
786 		goto unlock;
787 
788 	if (pr->performance)
789 		kfree(pr->performance->states);
790 
791 	pr->performance = NULL;
792 
793 unlock:
794 	mutex_unlock(&performance_mutex);
795 }
796 EXPORT_SYMBOL(acpi_processor_unregister_performance);
797 #endif
798