xref: /linux/drivers/acpi/processor_throttling.c (revision bd361f5de2b338218c276d17a510701a16075deb)
1 /*
2  * processor_throttling.c - Throttling submodule of the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/sched.h>
30 #include <linux/cpufreq.h>
31 #include <linux/acpi.h>
32 #include <acpi/processor.h>
33 #include <asm/io.h>
34 #include <linux/uaccess.h>
35 
36 #define PREFIX "ACPI: "
37 
38 #define ACPI_PROCESSOR_CLASS            "processor"
39 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
40 ACPI_MODULE_NAME("processor_throttling");
41 
42 /* ignore_tpc:
43  *  0 -> acpi processor driver doesn't ignore _TPC values
44  *  1 -> acpi processor driver ignores _TPC values
45  */
46 static int ignore_tpc;
47 module_param(ignore_tpc, int, 0644);
48 MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
49 
50 struct throttling_tstate {
51 	unsigned int cpu;		/* cpu nr */
52 	int target_state;		/* target T-state */
53 };
54 
55 struct acpi_processor_throttling_arg {
56 	struct acpi_processor *pr;
57 	int target_state;
58 	bool force;
59 };
60 
61 #define THROTTLING_PRECHANGE       (1)
62 #define THROTTLING_POSTCHANGE      (2)
63 
64 static int acpi_processor_get_throttling(struct acpi_processor *pr);
65 int acpi_processor_set_throttling(struct acpi_processor *pr,
66 						int state, bool force);
67 
68 static int acpi_processor_update_tsd_coord(void)
69 {
70 	int count, count_target;
71 	int retval = 0;
72 	unsigned int i, j;
73 	cpumask_var_t covered_cpus;
74 	struct acpi_processor *pr, *match_pr;
75 	struct acpi_tsd_package *pdomain, *match_pdomain;
76 	struct acpi_processor_throttling *pthrottling, *match_pthrottling;
77 
78 	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
79 		return -ENOMEM;
80 
81 	/*
82 	 * Now that we have _TSD data from all CPUs, lets setup T-state
83 	 * coordination between all CPUs.
84 	 */
85 	for_each_possible_cpu(i) {
86 		pr = per_cpu(processors, i);
87 		if (!pr)
88 			continue;
89 
90 		/* Basic validity check for domain info */
91 		pthrottling = &(pr->throttling);
92 
93 		/*
94 		 * If tsd package for one cpu is invalid, the coordination
95 		 * among all CPUs is thought as invalid.
96 		 * Maybe it is ugly.
97 		 */
98 		if (!pthrottling->tsd_valid_flag) {
99 			retval = -EINVAL;
100 			break;
101 		}
102 	}
103 	if (retval)
104 		goto err_ret;
105 
106 	for_each_possible_cpu(i) {
107 		pr = per_cpu(processors, i);
108 		if (!pr)
109 			continue;
110 
111 		if (cpumask_test_cpu(i, covered_cpus))
112 			continue;
113 		pthrottling = &pr->throttling;
114 
115 		pdomain = &(pthrottling->domain_info);
116 		cpumask_set_cpu(i, pthrottling->shared_cpu_map);
117 		cpumask_set_cpu(i, covered_cpus);
118 		/*
119 		 * If the number of processor in the TSD domain is 1, it is
120 		 * unnecessary to parse the coordination for this CPU.
121 		 */
122 		if (pdomain->num_processors <= 1)
123 			continue;
124 
125 		/* Validate the Domain info */
126 		count_target = pdomain->num_processors;
127 		count = 1;
128 
129 		for_each_possible_cpu(j) {
130 			if (i == j)
131 				continue;
132 
133 			match_pr = per_cpu(processors, j);
134 			if (!match_pr)
135 				continue;
136 
137 			match_pthrottling = &(match_pr->throttling);
138 			match_pdomain = &(match_pthrottling->domain_info);
139 			if (match_pdomain->domain != pdomain->domain)
140 				continue;
141 
142 			/* Here i and j are in the same domain.
143 			 * If two TSD packages have the same domain, they
144 			 * should have the same num_porcessors and
145 			 * coordination type. Otherwise it will be regarded
146 			 * as illegal.
147 			 */
148 			if (match_pdomain->num_processors != count_target) {
149 				retval = -EINVAL;
150 				goto err_ret;
151 			}
152 
153 			if (pdomain->coord_type != match_pdomain->coord_type) {
154 				retval = -EINVAL;
155 				goto err_ret;
156 			}
157 
158 			cpumask_set_cpu(j, covered_cpus);
159 			cpumask_set_cpu(j, pthrottling->shared_cpu_map);
160 			count++;
161 		}
162 		for_each_possible_cpu(j) {
163 			if (i == j)
164 				continue;
165 
166 			match_pr = per_cpu(processors, j);
167 			if (!match_pr)
168 				continue;
169 
170 			match_pthrottling = &(match_pr->throttling);
171 			match_pdomain = &(match_pthrottling->domain_info);
172 			if (match_pdomain->domain != pdomain->domain)
173 				continue;
174 
175 			/*
176 			 * If some CPUS have the same domain, they
177 			 * will have the same shared_cpu_map.
178 			 */
179 			cpumask_copy(match_pthrottling->shared_cpu_map,
180 				     pthrottling->shared_cpu_map);
181 		}
182 	}
183 
184 err_ret:
185 	free_cpumask_var(covered_cpus);
186 
187 	for_each_possible_cpu(i) {
188 		pr = per_cpu(processors, i);
189 		if (!pr)
190 			continue;
191 
192 		/*
193 		 * Assume no coordination on any error parsing domain info.
194 		 * The coordination type will be forced as SW_ALL.
195 		 */
196 		if (retval) {
197 			pthrottling = &(pr->throttling);
198 			cpumask_clear(pthrottling->shared_cpu_map);
199 			cpumask_set_cpu(i, pthrottling->shared_cpu_map);
200 			pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
201 		}
202 	}
203 
204 	return retval;
205 }
206 
207 /*
208  * Update the T-state coordination after the _TSD
209  * data for all cpus is obtained.
210  */
211 void acpi_processor_throttling_init(void)
212 {
213 	if (acpi_processor_update_tsd_coord()) {
214 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
215 			"Assume no T-state coordination\n"));
216 	}
217 
218 	return;
219 }
220 
221 static int acpi_processor_throttling_notifier(unsigned long event, void *data)
222 {
223 	struct throttling_tstate *p_tstate = data;
224 	struct acpi_processor *pr;
225 	unsigned int cpu ;
226 	int target_state;
227 	struct acpi_processor_limit *p_limit;
228 	struct acpi_processor_throttling *p_throttling;
229 
230 	cpu = p_tstate->cpu;
231 	pr = per_cpu(processors, cpu);
232 	if (!pr) {
233 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
234 		return 0;
235 	}
236 	if (!pr->flags.throttling) {
237 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
238 				"unsupported on CPU %d\n", cpu));
239 		return 0;
240 	}
241 	target_state = p_tstate->target_state;
242 	p_throttling = &(pr->throttling);
243 	switch (event) {
244 	case THROTTLING_PRECHANGE:
245 		/*
246 		 * Prechange event is used to choose one proper t-state,
247 		 * which meets the limits of thermal, user and _TPC.
248 		 */
249 		p_limit = &pr->limit;
250 		if (p_limit->thermal.tx > target_state)
251 			target_state = p_limit->thermal.tx;
252 		if (p_limit->user.tx > target_state)
253 			target_state = p_limit->user.tx;
254 		if (pr->throttling_platform_limit > target_state)
255 			target_state = pr->throttling_platform_limit;
256 		if (target_state >= p_throttling->state_count) {
257 			printk(KERN_WARNING
258 				"Exceed the limit of T-state \n");
259 			target_state = p_throttling->state_count - 1;
260 		}
261 		p_tstate->target_state = target_state;
262 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
263 				"target T-state of CPU %d is T%d\n",
264 				cpu, target_state));
265 		break;
266 	case THROTTLING_POSTCHANGE:
267 		/*
268 		 * Postchange event is only used to update the
269 		 * T-state flag of acpi_processor_throttling.
270 		 */
271 		p_throttling->state = target_state;
272 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
273 				"CPU %d is switched to T%d\n",
274 				cpu, target_state));
275 		break;
276 	default:
277 		printk(KERN_WARNING
278 			"Unsupported Throttling notifier event\n");
279 		break;
280 	}
281 
282 	return 0;
283 }
284 
285 /*
286  * _TPC - Throttling Present Capabilities
287  */
288 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
289 {
290 	acpi_status status = 0;
291 	unsigned long long tpc = 0;
292 
293 	if (!pr)
294 		return -EINVAL;
295 
296 	if (ignore_tpc)
297 		goto end;
298 
299 	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
300 	if (ACPI_FAILURE(status)) {
301 		if (status != AE_NOT_FOUND) {
302 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
303 		}
304 		return -ENODEV;
305 	}
306 
307 end:
308 	pr->throttling_platform_limit = (int)tpc;
309 	return 0;
310 }
311 
312 int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
313 {
314 	int result = 0;
315 	int throttling_limit;
316 	int current_state;
317 	struct acpi_processor_limit *limit;
318 	int target_state;
319 
320 	if (ignore_tpc)
321 		return 0;
322 
323 	result = acpi_processor_get_platform_limit(pr);
324 	if (result) {
325 		/* Throttling Limit is unsupported */
326 		return result;
327 	}
328 
329 	throttling_limit = pr->throttling_platform_limit;
330 	if (throttling_limit >= pr->throttling.state_count) {
331 		/* Uncorrect Throttling Limit */
332 		return -EINVAL;
333 	}
334 
335 	current_state = pr->throttling.state;
336 	if (current_state > throttling_limit) {
337 		/*
338 		 * The current state can meet the requirement of
339 		 * _TPC limit. But it is reasonable that OSPM changes
340 		 * t-states from high to low for better performance.
341 		 * Of course the limit condition of thermal
342 		 * and user should be considered.
343 		 */
344 		limit = &pr->limit;
345 		target_state = throttling_limit;
346 		if (limit->thermal.tx > target_state)
347 			target_state = limit->thermal.tx;
348 		if (limit->user.tx > target_state)
349 			target_state = limit->user.tx;
350 	} else if (current_state == throttling_limit) {
351 		/*
352 		 * Unnecessary to change the throttling state
353 		 */
354 		return 0;
355 	} else {
356 		/*
357 		 * If the current state is lower than the limit of _TPC, it
358 		 * will be forced to switch to the throttling state defined
359 		 * by throttling_platfor_limit.
360 		 * Because the previous state meets with the limit condition
361 		 * of thermal and user, it is unnecessary to check it again.
362 		 */
363 		target_state = throttling_limit;
364 	}
365 	return acpi_processor_set_throttling(pr, target_state, false);
366 }
367 
368 /*
369  * This function is used to reevaluate whether the T-state is valid
370  * after one CPU is onlined/offlined.
371  * It is noted that it won't reevaluate the following properties for
372  * the T-state.
373  *	1. Control method.
374  *	2. the number of supported T-state
375  *	3. TSD domain
376  */
377 void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
378 					bool is_dead)
379 {
380 	int result = 0;
381 
382 	if (is_dead) {
383 		/* When one CPU is offline, the T-state throttling
384 		 * will be invalidated.
385 		 */
386 		pr->flags.throttling = 0;
387 		return;
388 	}
389 	/* the following is to recheck whether the T-state is valid for
390 	 * the online CPU
391 	 */
392 	if (!pr->throttling.state_count) {
393 		/* If the number of T-state is invalid, it is
394 		 * invalidated.
395 		 */
396 		pr->flags.throttling = 0;
397 		return;
398 	}
399 	pr->flags.throttling = 1;
400 
401 	/* Disable throttling (if enabled).  We'll let subsequent
402 	 * policy (e.g.thermal) decide to lower performance if it
403 	 * so chooses, but for now we'll crank up the speed.
404 	 */
405 
406 	result = acpi_processor_get_throttling(pr);
407 	if (result)
408 		goto end;
409 
410 	if (pr->throttling.state) {
411 		result = acpi_processor_set_throttling(pr, 0, false);
412 		if (result)
413 			goto end;
414 	}
415 
416 end:
417 	if (result)
418 		pr->flags.throttling = 0;
419 }
420 /*
421  * _PTC - Processor Throttling Control (and status) register location
422  */
423 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
424 {
425 	int result = 0;
426 	acpi_status status = 0;
427 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
428 	union acpi_object *ptc = NULL;
429 	union acpi_object obj = { 0 };
430 	struct acpi_processor_throttling *throttling;
431 
432 	status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
433 	if (ACPI_FAILURE(status)) {
434 		if (status != AE_NOT_FOUND) {
435 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
436 		}
437 		return -ENODEV;
438 	}
439 
440 	ptc = (union acpi_object *)buffer.pointer;
441 	if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
442 	    || (ptc->package.count != 2)) {
443 		printk(KERN_ERR PREFIX "Invalid _PTC data\n");
444 		result = -EFAULT;
445 		goto end;
446 	}
447 
448 	/*
449 	 * control_register
450 	 */
451 
452 	obj = ptc->package.elements[0];
453 
454 	if ((obj.type != ACPI_TYPE_BUFFER)
455 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
456 	    || (obj.buffer.pointer == NULL)) {
457 		printk(KERN_ERR PREFIX
458 		       "Invalid _PTC data (control_register)\n");
459 		result = -EFAULT;
460 		goto end;
461 	}
462 	memcpy(&pr->throttling.control_register, obj.buffer.pointer,
463 	       sizeof(struct acpi_ptc_register));
464 
465 	/*
466 	 * status_register
467 	 */
468 
469 	obj = ptc->package.elements[1];
470 
471 	if ((obj.type != ACPI_TYPE_BUFFER)
472 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
473 	    || (obj.buffer.pointer == NULL)) {
474 		printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
475 		result = -EFAULT;
476 		goto end;
477 	}
478 
479 	memcpy(&pr->throttling.status_register, obj.buffer.pointer,
480 	       sizeof(struct acpi_ptc_register));
481 
482 	throttling = &pr->throttling;
483 
484 	if ((throttling->control_register.bit_width +
485 		throttling->control_register.bit_offset) > 32) {
486 		printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
487 		result = -EFAULT;
488 		goto end;
489 	}
490 
491 	if ((throttling->status_register.bit_width +
492 		throttling->status_register.bit_offset) > 32) {
493 		printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
494 		result = -EFAULT;
495 		goto end;
496 	}
497 
498       end:
499 	kfree(buffer.pointer);
500 
501 	return result;
502 }
503 
504 /*
505  * _TSS - Throttling Supported States
506  */
507 static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
508 {
509 	int result = 0;
510 	acpi_status status = AE_OK;
511 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
512 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
513 	struct acpi_buffer state = { 0, NULL };
514 	union acpi_object *tss = NULL;
515 	int i;
516 
517 	status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
518 	if (ACPI_FAILURE(status)) {
519 		if (status != AE_NOT_FOUND) {
520 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
521 		}
522 		return -ENODEV;
523 	}
524 
525 	tss = buffer.pointer;
526 	if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
527 		printk(KERN_ERR PREFIX "Invalid _TSS data\n");
528 		result = -EFAULT;
529 		goto end;
530 	}
531 
532 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
533 			  tss->package.count));
534 
535 	pr->throttling.state_count = tss->package.count;
536 	pr->throttling.states_tss =
537 	    kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
538 		    GFP_KERNEL);
539 	if (!pr->throttling.states_tss) {
540 		result = -ENOMEM;
541 		goto end;
542 	}
543 
544 	for (i = 0; i < pr->throttling.state_count; i++) {
545 
546 		struct acpi_processor_tx_tss *tx =
547 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
548 						      states_tss[i]);
549 
550 		state.length = sizeof(struct acpi_processor_tx_tss);
551 		state.pointer = tx;
552 
553 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
554 
555 		status = acpi_extract_package(&(tss->package.elements[i]),
556 					      &format, &state);
557 		if (ACPI_FAILURE(status)) {
558 			ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
559 			result = -EFAULT;
560 			kfree(pr->throttling.states_tss);
561 			goto end;
562 		}
563 
564 		if (!tx->freqpercentage) {
565 			printk(KERN_ERR PREFIX
566 			       "Invalid _TSS data: freq is zero\n");
567 			result = -EFAULT;
568 			kfree(pr->throttling.states_tss);
569 			goto end;
570 		}
571 	}
572 
573       end:
574 	kfree(buffer.pointer);
575 
576 	return result;
577 }
578 
579 /*
580  * _TSD - T-State Dependencies
581  */
582 static int acpi_processor_get_tsd(struct acpi_processor *pr)
583 {
584 	int result = 0;
585 	acpi_status status = AE_OK;
586 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
587 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
588 	struct acpi_buffer state = { 0, NULL };
589 	union acpi_object *tsd = NULL;
590 	struct acpi_tsd_package *pdomain;
591 	struct acpi_processor_throttling *pthrottling;
592 
593 	pthrottling = &pr->throttling;
594 	pthrottling->tsd_valid_flag = 0;
595 
596 	status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
597 	if (ACPI_FAILURE(status)) {
598 		if (status != AE_NOT_FOUND) {
599 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
600 		}
601 		return -ENODEV;
602 	}
603 
604 	tsd = buffer.pointer;
605 	if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
606 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
607 		result = -EFAULT;
608 		goto end;
609 	}
610 
611 	if (tsd->package.count != 1) {
612 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
613 		result = -EFAULT;
614 		goto end;
615 	}
616 
617 	pdomain = &(pr->throttling.domain_info);
618 
619 	state.length = sizeof(struct acpi_tsd_package);
620 	state.pointer = pdomain;
621 
622 	status = acpi_extract_package(&(tsd->package.elements[0]),
623 				      &format, &state);
624 	if (ACPI_FAILURE(status)) {
625 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
626 		result = -EFAULT;
627 		goto end;
628 	}
629 
630 	if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
631 		printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
632 		result = -EFAULT;
633 		goto end;
634 	}
635 
636 	if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
637 		printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
638 		result = -EFAULT;
639 		goto end;
640 	}
641 
642 	pthrottling = &pr->throttling;
643 	pthrottling->tsd_valid_flag = 1;
644 	pthrottling->shared_type = pdomain->coord_type;
645 	cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
646 	/*
647 	 * If the coordination type is not defined in ACPI spec,
648 	 * the tsd_valid_flag will be clear and coordination type
649 	 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
650 	 */
651 	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
652 		pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
653 		pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
654 		pthrottling->tsd_valid_flag = 0;
655 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
656 	}
657 
658       end:
659 	kfree(buffer.pointer);
660 	return result;
661 }
662 
663 /* --------------------------------------------------------------------------
664                               Throttling Control
665    -------------------------------------------------------------------------- */
666 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
667 {
668 	int state = 0;
669 	u32 value = 0;
670 	u32 duty_mask = 0;
671 	u32 duty_value = 0;
672 
673 	if (!pr)
674 		return -EINVAL;
675 
676 	if (!pr->flags.throttling)
677 		return -ENODEV;
678 
679 	/*
680 	 * We don't care about error returns - we just try to mark
681 	 * these reserved so that nobody else is confused into thinking
682 	 * that this region might be unused..
683 	 *
684 	 * (In particular, allocating the IO range for Cardbus)
685 	 */
686 	request_region(pr->throttling.address, 6, "ACPI CPU throttle");
687 
688 	pr->throttling.state = 0;
689 
690 	duty_mask = pr->throttling.state_count - 1;
691 
692 	duty_mask <<= pr->throttling.duty_offset;
693 
694 	local_irq_disable();
695 
696 	value = inl(pr->throttling.address);
697 
698 	/*
699 	 * Compute the current throttling state when throttling is enabled
700 	 * (bit 4 is on).
701 	 */
702 	if (value & 0x10) {
703 		duty_value = value & duty_mask;
704 		duty_value >>= pr->throttling.duty_offset;
705 
706 		if (duty_value)
707 			state = pr->throttling.state_count - duty_value;
708 	}
709 
710 	pr->throttling.state = state;
711 
712 	local_irq_enable();
713 
714 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
715 			  "Throttling state is T%d (%d%% throttling applied)\n",
716 			  state, pr->throttling.states[state].performance));
717 
718 	return 0;
719 }
720 
721 #ifdef CONFIG_X86
722 static int acpi_throttling_rdmsr(u64 *value)
723 {
724 	u64 msr_high, msr_low;
725 	u64 msr = 0;
726 	int ret = -1;
727 
728 	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
729 		!this_cpu_has(X86_FEATURE_ACPI)) {
730 		printk(KERN_ERR PREFIX
731 			"HARDWARE addr space,NOT supported yet\n");
732 	} else {
733 		msr_low = 0;
734 		msr_high = 0;
735 		rdmsr_safe(MSR_IA32_THERM_CONTROL,
736 			(u32 *)&msr_low , (u32 *) &msr_high);
737 		msr = (msr_high << 32) | msr_low;
738 		*value = (u64) msr;
739 		ret = 0;
740 	}
741 	return ret;
742 }
743 
744 static int acpi_throttling_wrmsr(u64 value)
745 {
746 	int ret = -1;
747 	u64 msr;
748 
749 	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
750 		!this_cpu_has(X86_FEATURE_ACPI)) {
751 		printk(KERN_ERR PREFIX
752 			"HARDWARE addr space,NOT supported yet\n");
753 	} else {
754 		msr = value;
755 		wrmsr_safe(MSR_IA32_THERM_CONTROL,
756 			msr & 0xffffffff, msr >> 32);
757 		ret = 0;
758 	}
759 	return ret;
760 }
761 #else
762 static int acpi_throttling_rdmsr(u64 *value)
763 {
764 	printk(KERN_ERR PREFIX
765 		"HARDWARE addr space,NOT supported yet\n");
766 	return -1;
767 }
768 
769 static int acpi_throttling_wrmsr(u64 value)
770 {
771 	printk(KERN_ERR PREFIX
772 		"HARDWARE addr space,NOT supported yet\n");
773 	return -1;
774 }
775 #endif
776 
777 static int acpi_read_throttling_status(struct acpi_processor *pr,
778 					u64 *value)
779 {
780 	u32 bit_width, bit_offset;
781 	u32 ptc_value;
782 	u64 ptc_mask;
783 	struct acpi_processor_throttling *throttling;
784 	int ret = -1;
785 
786 	throttling = &pr->throttling;
787 	switch (throttling->status_register.space_id) {
788 	case ACPI_ADR_SPACE_SYSTEM_IO:
789 		bit_width = throttling->status_register.bit_width;
790 		bit_offset = throttling->status_register.bit_offset;
791 
792 		acpi_os_read_port((acpi_io_address) throttling->status_register.
793 				  address, &ptc_value,
794 				  (u32) (bit_width + bit_offset));
795 		ptc_mask = (1 << bit_width) - 1;
796 		*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
797 		ret = 0;
798 		break;
799 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
800 		ret = acpi_throttling_rdmsr(value);
801 		break;
802 	default:
803 		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
804 		       (u32) (throttling->status_register.space_id));
805 	}
806 	return ret;
807 }
808 
809 static int acpi_write_throttling_state(struct acpi_processor *pr,
810 				u64 value)
811 {
812 	u32 bit_width, bit_offset;
813 	u64 ptc_value;
814 	u64 ptc_mask;
815 	struct acpi_processor_throttling *throttling;
816 	int ret = -1;
817 
818 	throttling = &pr->throttling;
819 	switch (throttling->control_register.space_id) {
820 	case ACPI_ADR_SPACE_SYSTEM_IO:
821 		bit_width = throttling->control_register.bit_width;
822 		bit_offset = throttling->control_register.bit_offset;
823 		ptc_mask = (1 << bit_width) - 1;
824 		ptc_value = value & ptc_mask;
825 
826 		acpi_os_write_port((acpi_io_address) throttling->
827 					control_register.address,
828 					(u32) (ptc_value << bit_offset),
829 					(u32) (bit_width + bit_offset));
830 		ret = 0;
831 		break;
832 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
833 		ret = acpi_throttling_wrmsr(value);
834 		break;
835 	default:
836 		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
837 		       (u32) (throttling->control_register.space_id));
838 	}
839 	return ret;
840 }
841 
842 static int acpi_get_throttling_state(struct acpi_processor *pr,
843 				u64 value)
844 {
845 	int i;
846 
847 	for (i = 0; i < pr->throttling.state_count; i++) {
848 		struct acpi_processor_tx_tss *tx =
849 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
850 						      states_tss[i]);
851 		if (tx->control == value)
852 			return i;
853 	}
854 	return -1;
855 }
856 
857 static int acpi_get_throttling_value(struct acpi_processor *pr,
858 			int state, u64 *value)
859 {
860 	int ret = -1;
861 
862 	if (state >= 0 && state <= pr->throttling.state_count) {
863 		struct acpi_processor_tx_tss *tx =
864 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
865 						      states_tss[state]);
866 		*value = tx->control;
867 		ret = 0;
868 	}
869 	return ret;
870 }
871 
872 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
873 {
874 	int state = 0;
875 	int ret;
876 	u64 value;
877 
878 	if (!pr)
879 		return -EINVAL;
880 
881 	if (!pr->flags.throttling)
882 		return -ENODEV;
883 
884 	pr->throttling.state = 0;
885 
886 	value = 0;
887 	ret = acpi_read_throttling_status(pr, &value);
888 	if (ret >= 0) {
889 		state = acpi_get_throttling_state(pr, value);
890 		if (state == -1) {
891 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
892 				"Invalid throttling state, reset\n"));
893 			state = 0;
894 			ret = acpi_processor_set_throttling(pr, state, true);
895 			if (ret)
896 				return ret;
897 		}
898 		pr->throttling.state = state;
899 	}
900 
901 	return 0;
902 }
903 
904 static int acpi_processor_get_throttling(struct acpi_processor *pr)
905 {
906 	cpumask_var_t saved_mask;
907 	int ret;
908 
909 	if (!pr)
910 		return -EINVAL;
911 
912 	if (!pr->flags.throttling)
913 		return -ENODEV;
914 
915 	if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
916 		return -ENOMEM;
917 
918 	/*
919 	 * Migrate task to the cpu pointed by pr.
920 	 */
921 	cpumask_copy(saved_mask, &current->cpus_allowed);
922 	/* FIXME: use work_on_cpu() */
923 	if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
924 		/* Can't migrate to the target pr->id CPU. Exit */
925 		free_cpumask_var(saved_mask);
926 		return -ENODEV;
927 	}
928 	ret = pr->throttling.acpi_processor_get_throttling(pr);
929 	/* restore the previous state */
930 	set_cpus_allowed_ptr(current, saved_mask);
931 	free_cpumask_var(saved_mask);
932 
933 	return ret;
934 }
935 
936 static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
937 {
938 	int i, step;
939 
940 	if (!pr->throttling.address) {
941 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
942 		return -EINVAL;
943 	} else if (!pr->throttling.duty_width) {
944 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
945 		return -EINVAL;
946 	}
947 	/* TBD: Support duty_cycle values that span bit 4. */
948 	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
949 		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
950 		return -EINVAL;
951 	}
952 
953 	pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
954 
955 	/*
956 	 * Compute state values. Note that throttling displays a linear power
957 	 * performance relationship (at 50% performance the CPU will consume
958 	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
959 	 */
960 
961 	step = (1000 / pr->throttling.state_count);
962 
963 	for (i = 0; i < pr->throttling.state_count; i++) {
964 		pr->throttling.states[i].performance = 1000 - step * i;
965 		pr->throttling.states[i].power = 1000 - step * i;
966 	}
967 	return 0;
968 }
969 
970 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
971 					      int state, bool force)
972 {
973 	u32 value = 0;
974 	u32 duty_mask = 0;
975 	u32 duty_value = 0;
976 
977 	if (!pr)
978 		return -EINVAL;
979 
980 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
981 		return -EINVAL;
982 
983 	if (!pr->flags.throttling)
984 		return -ENODEV;
985 
986 	if (!force && (state == pr->throttling.state))
987 		return 0;
988 
989 	if (state < pr->throttling_platform_limit)
990 		return -EPERM;
991 	/*
992 	 * Calculate the duty_value and duty_mask.
993 	 */
994 	if (state) {
995 		duty_value = pr->throttling.state_count - state;
996 
997 		duty_value <<= pr->throttling.duty_offset;
998 
999 		/* Used to clear all duty_value bits */
1000 		duty_mask = pr->throttling.state_count - 1;
1001 
1002 		duty_mask <<= acpi_gbl_FADT.duty_offset;
1003 		duty_mask = ~duty_mask;
1004 	}
1005 
1006 	local_irq_disable();
1007 
1008 	/*
1009 	 * Disable throttling by writing a 0 to bit 4.  Note that we must
1010 	 * turn it off before you can change the duty_value.
1011 	 */
1012 	value = inl(pr->throttling.address);
1013 	if (value & 0x10) {
1014 		value &= 0xFFFFFFEF;
1015 		outl(value, pr->throttling.address);
1016 	}
1017 
1018 	/*
1019 	 * Write the new duty_value and then enable throttling.  Note
1020 	 * that a state value of 0 leaves throttling disabled.
1021 	 */
1022 	if (state) {
1023 		value &= duty_mask;
1024 		value |= duty_value;
1025 		outl(value, pr->throttling.address);
1026 
1027 		value |= 0x00000010;
1028 		outl(value, pr->throttling.address);
1029 	}
1030 
1031 	pr->throttling.state = state;
1032 
1033 	local_irq_enable();
1034 
1035 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1036 			  "Throttling state set to T%d (%d%%)\n", state,
1037 			  (pr->throttling.states[state].performance ? pr->
1038 			   throttling.states[state].performance / 10 : 0)));
1039 
1040 	return 0;
1041 }
1042 
1043 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
1044 					     int state, bool force)
1045 {
1046 	int ret;
1047 	u64 value;
1048 
1049 	if (!pr)
1050 		return -EINVAL;
1051 
1052 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1053 		return -EINVAL;
1054 
1055 	if (!pr->flags.throttling)
1056 		return -ENODEV;
1057 
1058 	if (!force && (state == pr->throttling.state))
1059 		return 0;
1060 
1061 	if (state < pr->throttling_platform_limit)
1062 		return -EPERM;
1063 
1064 	value = 0;
1065 	ret = acpi_get_throttling_value(pr, state, &value);
1066 	if (ret >= 0) {
1067 		acpi_write_throttling_state(pr, value);
1068 		pr->throttling.state = state;
1069 	}
1070 
1071 	return 0;
1072 }
1073 
1074 static long acpi_processor_throttling_fn(void *data)
1075 {
1076 	struct acpi_processor_throttling_arg *arg = data;
1077 	struct acpi_processor *pr = arg->pr;
1078 
1079 	return pr->throttling.acpi_processor_set_throttling(pr,
1080 			arg->target_state, arg->force);
1081 }
1082 
1083 int acpi_processor_set_throttling(struct acpi_processor *pr,
1084 						int state, bool force)
1085 {
1086 	int ret = 0;
1087 	unsigned int i;
1088 	struct acpi_processor *match_pr;
1089 	struct acpi_processor_throttling *p_throttling;
1090 	struct acpi_processor_throttling_arg arg;
1091 	struct throttling_tstate t_state;
1092 
1093 	if (!pr)
1094 		return -EINVAL;
1095 
1096 	if (!pr->flags.throttling)
1097 		return -ENODEV;
1098 
1099 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1100 		return -EINVAL;
1101 
1102 	if (cpu_is_offline(pr->id)) {
1103 		/*
1104 		 * the cpu pointed by pr->id is offline. Unnecessary to change
1105 		 * the throttling state any more.
1106 		 */
1107 		return -ENODEV;
1108 	}
1109 
1110 	t_state.target_state = state;
1111 	p_throttling = &(pr->throttling);
1112 
1113 	/*
1114 	 * The throttling notifier will be called for every
1115 	 * affected cpu in order to get one proper T-state.
1116 	 * The notifier event is THROTTLING_PRECHANGE.
1117 	 */
1118 	for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
1119 		t_state.cpu = i;
1120 		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1121 							&t_state);
1122 	}
1123 	/*
1124 	 * The function of acpi_processor_set_throttling will be called
1125 	 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
1126 	 * it is necessary to call it for every affected cpu. Otherwise
1127 	 * it can be called only for the cpu pointed by pr.
1128 	 */
1129 	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1130 		arg.pr = pr;
1131 		arg.target_state = state;
1132 		arg.force = force;
1133 		ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
1134 	} else {
1135 		/*
1136 		 * When the T-state coordination is SW_ALL or HW_ALL,
1137 		 * it is necessary to set T-state for every affected
1138 		 * cpus.
1139 		 */
1140 		for_each_cpu_and(i, cpu_online_mask,
1141 		    p_throttling->shared_cpu_map) {
1142 			match_pr = per_cpu(processors, i);
1143 			/*
1144 			 * If the pointer is invalid, we will report the
1145 			 * error message and continue.
1146 			 */
1147 			if (!match_pr) {
1148 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1149 					"Invalid Pointer for CPU %d\n", i));
1150 				continue;
1151 			}
1152 			/*
1153 			 * If the throttling control is unsupported on CPU i,
1154 			 * we will report the error message and continue.
1155 			 */
1156 			if (!match_pr->flags.throttling) {
1157 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1158 					"Throttling Control is unsupported "
1159 					"on CPU %d\n", i));
1160 				continue;
1161 			}
1162 
1163 			arg.pr = match_pr;
1164 			arg.target_state = state;
1165 			arg.force = force;
1166 			ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
1167 				&arg);
1168 		}
1169 	}
1170 	/*
1171 	 * After the set_throttling is called, the
1172 	 * throttling notifier is called for every
1173 	 * affected cpu to update the T-states.
1174 	 * The notifier event is THROTTLING_POSTCHANGE
1175 	 */
1176 	for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
1177 		t_state.cpu = i;
1178 		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1179 							&t_state);
1180 	}
1181 
1182 	return ret;
1183 }
1184 
1185 int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1186 {
1187 	int result = 0;
1188 	struct acpi_processor_throttling *pthrottling;
1189 
1190 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1191 			  "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1192 			  pr->throttling.address,
1193 			  pr->throttling.duty_offset,
1194 			  pr->throttling.duty_width));
1195 
1196 	/*
1197 	 * Evaluate _PTC, _TSS and _TPC
1198 	 * They must all be present or none of them can be used.
1199 	 */
1200 	if (acpi_processor_get_throttling_control(pr) ||
1201 		acpi_processor_get_throttling_states(pr) ||
1202 		acpi_processor_get_platform_limit(pr))
1203 	{
1204 		pr->throttling.acpi_processor_get_throttling =
1205 		    &acpi_processor_get_throttling_fadt;
1206 		pr->throttling.acpi_processor_set_throttling =
1207 		    &acpi_processor_set_throttling_fadt;
1208 		if (acpi_processor_get_fadt_info(pr))
1209 			return 0;
1210 	} else {
1211 		pr->throttling.acpi_processor_get_throttling =
1212 		    &acpi_processor_get_throttling_ptc;
1213 		pr->throttling.acpi_processor_set_throttling =
1214 		    &acpi_processor_set_throttling_ptc;
1215 	}
1216 
1217 	/*
1218 	 * If TSD package for one CPU can't be parsed successfully, it means
1219 	 * that this CPU will have no coordination with other CPUs.
1220 	 */
1221 	if (acpi_processor_get_tsd(pr)) {
1222 		pthrottling = &pr->throttling;
1223 		pthrottling->tsd_valid_flag = 0;
1224 		cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
1225 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
1226 	}
1227 
1228 	/*
1229 	 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1230 	 * This shouldn't be an issue as few (if any) mobile systems ever
1231 	 * used this part.
1232 	 */
1233 	if (errata.piix4.throttle) {
1234 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1235 				  "Throttling not supported on PIIX4 A- or B-step\n"));
1236 		return 0;
1237 	}
1238 
1239 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
1240 			  pr->throttling.state_count));
1241 
1242 	pr->flags.throttling = 1;
1243 
1244 	/*
1245 	 * Disable throttling (if enabled).  We'll let subsequent policy (e.g.
1246 	 * thermal) decide to lower performance if it so chooses, but for now
1247 	 * we'll crank up the speed.
1248 	 */
1249 
1250 	result = acpi_processor_get_throttling(pr);
1251 	if (result)
1252 		goto end;
1253 
1254 	if (pr->throttling.state) {
1255 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1256 				  "Disabling throttling (was T%d)\n",
1257 				  pr->throttling.state));
1258 		result = acpi_processor_set_throttling(pr, 0, false);
1259 		if (result)
1260 			goto end;
1261 	}
1262 
1263       end:
1264 	if (result)
1265 		pr->flags.throttling = 0;
1266 
1267 	return result;
1268 }
1269 
1270