xref: /linux/drivers/acpi/processor_throttling.c (revision cc4589ebfae6f8dbb5cf880a0a67eedab3416492)
1 /*
2  * processor_throttling.c - Throttling submodule of the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 
38 #include <asm/io.h>
39 #include <asm/uaccess.h>
40 
41 #include <acpi/acpi_bus.h>
42 #include <acpi/acpi_drivers.h>
43 #include <acpi/processor.h>
44 
45 #define PREFIX "ACPI: "
46 
47 #define ACPI_PROCESSOR_CLASS            "processor"
48 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
49 ACPI_MODULE_NAME("processor_throttling");
50 
51 /* ignore_tpc:
52  *  0 -> acpi processor driver doesn't ignore _TPC values
53  *  1 -> acpi processor driver ignores _TPC values
54  */
55 static int ignore_tpc;
56 module_param(ignore_tpc, int, 0644);
57 MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
58 
59 struct throttling_tstate {
60 	unsigned int cpu;		/* cpu nr */
61 	int target_state;		/* target T-state */
62 };
63 
64 #define THROTTLING_PRECHANGE       (1)
65 #define THROTTLING_POSTCHANGE      (2)
66 
67 static int acpi_processor_get_throttling(struct acpi_processor *pr);
68 int acpi_processor_set_throttling(struct acpi_processor *pr,
69 						int state, bool force);
70 
71 static int acpi_processor_update_tsd_coord(void)
72 {
73 	int count, count_target;
74 	int retval = 0;
75 	unsigned int i, j;
76 	cpumask_var_t covered_cpus;
77 	struct acpi_processor *pr, *match_pr;
78 	struct acpi_tsd_package *pdomain, *match_pdomain;
79 	struct acpi_processor_throttling *pthrottling, *match_pthrottling;
80 
81 	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
82 		return -ENOMEM;
83 
84 	/*
85 	 * Now that we have _TSD data from all CPUs, lets setup T-state
86 	 * coordination between all CPUs.
87 	 */
88 	for_each_possible_cpu(i) {
89 		pr = per_cpu(processors, i);
90 		if (!pr)
91 			continue;
92 
93 		/* Basic validity check for domain info */
94 		pthrottling = &(pr->throttling);
95 
96 		/*
97 		 * If tsd package for one cpu is invalid, the coordination
98 		 * among all CPUs is thought as invalid.
99 		 * Maybe it is ugly.
100 		 */
101 		if (!pthrottling->tsd_valid_flag) {
102 			retval = -EINVAL;
103 			break;
104 		}
105 	}
106 	if (retval)
107 		goto err_ret;
108 
109 	for_each_possible_cpu(i) {
110 		pr = per_cpu(processors, i);
111 		if (!pr)
112 			continue;
113 
114 		if (cpumask_test_cpu(i, covered_cpus))
115 			continue;
116 		pthrottling = &pr->throttling;
117 
118 		pdomain = &(pthrottling->domain_info);
119 		cpumask_set_cpu(i, pthrottling->shared_cpu_map);
120 		cpumask_set_cpu(i, covered_cpus);
121 		/*
122 		 * If the number of processor in the TSD domain is 1, it is
123 		 * unnecessary to parse the coordination for this CPU.
124 		 */
125 		if (pdomain->num_processors <= 1)
126 			continue;
127 
128 		/* Validate the Domain info */
129 		count_target = pdomain->num_processors;
130 		count = 1;
131 
132 		for_each_possible_cpu(j) {
133 			if (i == j)
134 				continue;
135 
136 			match_pr = per_cpu(processors, j);
137 			if (!match_pr)
138 				continue;
139 
140 			match_pthrottling = &(match_pr->throttling);
141 			match_pdomain = &(match_pthrottling->domain_info);
142 			if (match_pdomain->domain != pdomain->domain)
143 				continue;
144 
145 			/* Here i and j are in the same domain.
146 			 * If two TSD packages have the same domain, they
147 			 * should have the same num_porcessors and
148 			 * coordination type. Otherwise it will be regarded
149 			 * as illegal.
150 			 */
151 			if (match_pdomain->num_processors != count_target) {
152 				retval = -EINVAL;
153 				goto err_ret;
154 			}
155 
156 			if (pdomain->coord_type != match_pdomain->coord_type) {
157 				retval = -EINVAL;
158 				goto err_ret;
159 			}
160 
161 			cpumask_set_cpu(j, covered_cpus);
162 			cpumask_set_cpu(j, pthrottling->shared_cpu_map);
163 			count++;
164 		}
165 		for_each_possible_cpu(j) {
166 			if (i == j)
167 				continue;
168 
169 			match_pr = per_cpu(processors, j);
170 			if (!match_pr)
171 				continue;
172 
173 			match_pthrottling = &(match_pr->throttling);
174 			match_pdomain = &(match_pthrottling->domain_info);
175 			if (match_pdomain->domain != pdomain->domain)
176 				continue;
177 
178 			/*
179 			 * If some CPUS have the same domain, they
180 			 * will have the same shared_cpu_map.
181 			 */
182 			cpumask_copy(match_pthrottling->shared_cpu_map,
183 				     pthrottling->shared_cpu_map);
184 		}
185 	}
186 
187 err_ret:
188 	free_cpumask_var(covered_cpus);
189 
190 	for_each_possible_cpu(i) {
191 		pr = per_cpu(processors, i);
192 		if (!pr)
193 			continue;
194 
195 		/*
196 		 * Assume no coordination on any error parsing domain info.
197 		 * The coordination type will be forced as SW_ALL.
198 		 */
199 		if (retval) {
200 			pthrottling = &(pr->throttling);
201 			cpumask_clear(pthrottling->shared_cpu_map);
202 			cpumask_set_cpu(i, pthrottling->shared_cpu_map);
203 			pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
204 		}
205 	}
206 
207 	return retval;
208 }
209 
210 /*
211  * Update the T-state coordination after the _TSD
212  * data for all cpus is obtained.
213  */
214 void acpi_processor_throttling_init(void)
215 {
216 	if (acpi_processor_update_tsd_coord())
217 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
218 			"Assume no T-state coordination\n"));
219 
220 	return;
221 }
222 
223 static int acpi_processor_throttling_notifier(unsigned long event, void *data)
224 {
225 	struct throttling_tstate *p_tstate = data;
226 	struct acpi_processor *pr;
227 	unsigned int cpu ;
228 	int target_state;
229 	struct acpi_processor_limit *p_limit;
230 	struct acpi_processor_throttling *p_throttling;
231 
232 	cpu = p_tstate->cpu;
233 	pr = per_cpu(processors, cpu);
234 	if (!pr) {
235 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
236 		return 0;
237 	}
238 	if (!pr->flags.throttling) {
239 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
240 				"unsupported on CPU %d\n", cpu));
241 		return 0;
242 	}
243 	target_state = p_tstate->target_state;
244 	p_throttling = &(pr->throttling);
245 	switch (event) {
246 	case THROTTLING_PRECHANGE:
247 		/*
248 		 * Prechange event is used to choose one proper t-state,
249 		 * which meets the limits of thermal, user and _TPC.
250 		 */
251 		p_limit = &pr->limit;
252 		if (p_limit->thermal.tx > target_state)
253 			target_state = p_limit->thermal.tx;
254 		if (p_limit->user.tx > target_state)
255 			target_state = p_limit->user.tx;
256 		if (pr->throttling_platform_limit > target_state)
257 			target_state = pr->throttling_platform_limit;
258 		if (target_state >= p_throttling->state_count) {
259 			printk(KERN_WARNING
260 				"Exceed the limit of T-state \n");
261 			target_state = p_throttling->state_count - 1;
262 		}
263 		p_tstate->target_state = target_state;
264 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
265 				"target T-state of CPU %d is T%d\n",
266 				cpu, target_state));
267 		break;
268 	case THROTTLING_POSTCHANGE:
269 		/*
270 		 * Postchange event is only used to update the
271 		 * T-state flag of acpi_processor_throttling.
272 		 */
273 		p_throttling->state = target_state;
274 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
275 				"CPU %d is switched to T%d\n",
276 				cpu, target_state));
277 		break;
278 	default:
279 		printk(KERN_WARNING
280 			"Unsupported Throttling notifier event\n");
281 		break;
282 	}
283 
284 	return 0;
285 }
286 
287 /*
288  * _TPC - Throttling Present Capabilities
289  */
290 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
291 {
292 	acpi_status status = 0;
293 	unsigned long long tpc = 0;
294 
295 	if (!pr)
296 		return -EINVAL;
297 
298 	if (ignore_tpc)
299 		goto end;
300 
301 	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
302 	if (ACPI_FAILURE(status)) {
303 		if (status != AE_NOT_FOUND) {
304 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
305 		}
306 		return -ENODEV;
307 	}
308 
309 end:
310 	pr->throttling_platform_limit = (int)tpc;
311 	return 0;
312 }
313 
314 int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
315 {
316 	int result = 0;
317 	int throttling_limit;
318 	int current_state;
319 	struct acpi_processor_limit *limit;
320 	int target_state;
321 
322 	if (ignore_tpc)
323 		return 0;
324 
325 	result = acpi_processor_get_platform_limit(pr);
326 	if (result) {
327 		/* Throttling Limit is unsupported */
328 		return result;
329 	}
330 
331 	throttling_limit = pr->throttling_platform_limit;
332 	if (throttling_limit >= pr->throttling.state_count) {
333 		/* Uncorrect Throttling Limit */
334 		return -EINVAL;
335 	}
336 
337 	current_state = pr->throttling.state;
338 	if (current_state > throttling_limit) {
339 		/*
340 		 * The current state can meet the requirement of
341 		 * _TPC limit. But it is reasonable that OSPM changes
342 		 * t-states from high to low for better performance.
343 		 * Of course the limit condition of thermal
344 		 * and user should be considered.
345 		 */
346 		limit = &pr->limit;
347 		target_state = throttling_limit;
348 		if (limit->thermal.tx > target_state)
349 			target_state = limit->thermal.tx;
350 		if (limit->user.tx > target_state)
351 			target_state = limit->user.tx;
352 	} else if (current_state == throttling_limit) {
353 		/*
354 		 * Unnecessary to change the throttling state
355 		 */
356 		return 0;
357 	} else {
358 		/*
359 		 * If the current state is lower than the limit of _TPC, it
360 		 * will be forced to switch to the throttling state defined
361 		 * by throttling_platfor_limit.
362 		 * Because the previous state meets with the limit condition
363 		 * of thermal and user, it is unnecessary to check it again.
364 		 */
365 		target_state = throttling_limit;
366 	}
367 	return acpi_processor_set_throttling(pr, target_state, false);
368 }
369 
370 /*
371  * _PTC - Processor Throttling Control (and status) register location
372  */
373 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
374 {
375 	int result = 0;
376 	acpi_status status = 0;
377 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
378 	union acpi_object *ptc = NULL;
379 	union acpi_object obj = { 0 };
380 	struct acpi_processor_throttling *throttling;
381 
382 	status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
383 	if (ACPI_FAILURE(status)) {
384 		if (status != AE_NOT_FOUND) {
385 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
386 		}
387 		return -ENODEV;
388 	}
389 
390 	ptc = (union acpi_object *)buffer.pointer;
391 	if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
392 	    || (ptc->package.count != 2)) {
393 		printk(KERN_ERR PREFIX "Invalid _PTC data\n");
394 		result = -EFAULT;
395 		goto end;
396 	}
397 
398 	/*
399 	 * control_register
400 	 */
401 
402 	obj = ptc->package.elements[0];
403 
404 	if ((obj.type != ACPI_TYPE_BUFFER)
405 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
406 	    || (obj.buffer.pointer == NULL)) {
407 		printk(KERN_ERR PREFIX
408 		       "Invalid _PTC data (control_register)\n");
409 		result = -EFAULT;
410 		goto end;
411 	}
412 	memcpy(&pr->throttling.control_register, obj.buffer.pointer,
413 	       sizeof(struct acpi_ptc_register));
414 
415 	/*
416 	 * status_register
417 	 */
418 
419 	obj = ptc->package.elements[1];
420 
421 	if ((obj.type != ACPI_TYPE_BUFFER)
422 	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
423 	    || (obj.buffer.pointer == NULL)) {
424 		printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
425 		result = -EFAULT;
426 		goto end;
427 	}
428 
429 	memcpy(&pr->throttling.status_register, obj.buffer.pointer,
430 	       sizeof(struct acpi_ptc_register));
431 
432 	throttling = &pr->throttling;
433 
434 	if ((throttling->control_register.bit_width +
435 		throttling->control_register.bit_offset) > 32) {
436 		printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
437 		result = -EFAULT;
438 		goto end;
439 	}
440 
441 	if ((throttling->status_register.bit_width +
442 		throttling->status_register.bit_offset) > 32) {
443 		printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
444 		result = -EFAULT;
445 		goto end;
446 	}
447 
448       end:
449 	kfree(buffer.pointer);
450 
451 	return result;
452 }
453 
454 /*
455  * _TSS - Throttling Supported States
456  */
457 static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
458 {
459 	int result = 0;
460 	acpi_status status = AE_OK;
461 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
462 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
463 	struct acpi_buffer state = { 0, NULL };
464 	union acpi_object *tss = NULL;
465 	int i;
466 
467 	status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
468 	if (ACPI_FAILURE(status)) {
469 		if (status != AE_NOT_FOUND) {
470 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
471 		}
472 		return -ENODEV;
473 	}
474 
475 	tss = buffer.pointer;
476 	if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
477 		printk(KERN_ERR PREFIX "Invalid _TSS data\n");
478 		result = -EFAULT;
479 		goto end;
480 	}
481 
482 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
483 			  tss->package.count));
484 
485 	pr->throttling.state_count = tss->package.count;
486 	pr->throttling.states_tss =
487 	    kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
488 		    GFP_KERNEL);
489 	if (!pr->throttling.states_tss) {
490 		result = -ENOMEM;
491 		goto end;
492 	}
493 
494 	for (i = 0; i < pr->throttling.state_count; i++) {
495 
496 		struct acpi_processor_tx_tss *tx =
497 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
498 						      states_tss[i]);
499 
500 		state.length = sizeof(struct acpi_processor_tx_tss);
501 		state.pointer = tx;
502 
503 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
504 
505 		status = acpi_extract_package(&(tss->package.elements[i]),
506 					      &format, &state);
507 		if (ACPI_FAILURE(status)) {
508 			ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
509 			result = -EFAULT;
510 			kfree(pr->throttling.states_tss);
511 			goto end;
512 		}
513 
514 		if (!tx->freqpercentage) {
515 			printk(KERN_ERR PREFIX
516 			       "Invalid _TSS data: freq is zero\n");
517 			result = -EFAULT;
518 			kfree(pr->throttling.states_tss);
519 			goto end;
520 		}
521 	}
522 
523       end:
524 	kfree(buffer.pointer);
525 
526 	return result;
527 }
528 
529 /*
530  * _TSD - T-State Dependencies
531  */
532 static int acpi_processor_get_tsd(struct acpi_processor *pr)
533 {
534 	int result = 0;
535 	acpi_status status = AE_OK;
536 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
537 	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
538 	struct acpi_buffer state = { 0, NULL };
539 	union acpi_object *tsd = NULL;
540 	struct acpi_tsd_package *pdomain;
541 	struct acpi_processor_throttling *pthrottling;
542 
543 	pthrottling = &pr->throttling;
544 	pthrottling->tsd_valid_flag = 0;
545 
546 	status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
547 	if (ACPI_FAILURE(status)) {
548 		if (status != AE_NOT_FOUND) {
549 			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
550 		}
551 		return -ENODEV;
552 	}
553 
554 	tsd = buffer.pointer;
555 	if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
556 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
557 		result = -EFAULT;
558 		goto end;
559 	}
560 
561 	if (tsd->package.count != 1) {
562 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
563 		result = -EFAULT;
564 		goto end;
565 	}
566 
567 	pdomain = &(pr->throttling.domain_info);
568 
569 	state.length = sizeof(struct acpi_tsd_package);
570 	state.pointer = pdomain;
571 
572 	status = acpi_extract_package(&(tsd->package.elements[0]),
573 				      &format, &state);
574 	if (ACPI_FAILURE(status)) {
575 		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
576 		result = -EFAULT;
577 		goto end;
578 	}
579 
580 	if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
581 		printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
582 		result = -EFAULT;
583 		goto end;
584 	}
585 
586 	if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
587 		printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
588 		result = -EFAULT;
589 		goto end;
590 	}
591 
592 	pthrottling = &pr->throttling;
593 	pthrottling->tsd_valid_flag = 1;
594 	pthrottling->shared_type = pdomain->coord_type;
595 	cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
596 	/*
597 	 * If the coordination type is not defined in ACPI spec,
598 	 * the tsd_valid_flag will be clear and coordination type
599 	 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
600 	 */
601 	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
602 		pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
603 		pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
604 		pthrottling->tsd_valid_flag = 0;
605 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
606 	}
607 
608       end:
609 	kfree(buffer.pointer);
610 	return result;
611 }
612 
613 /* --------------------------------------------------------------------------
614                               Throttling Control
615    -------------------------------------------------------------------------- */
616 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
617 {
618 	int state = 0;
619 	u32 value = 0;
620 	u32 duty_mask = 0;
621 	u32 duty_value = 0;
622 
623 	if (!pr)
624 		return -EINVAL;
625 
626 	if (!pr->flags.throttling)
627 		return -ENODEV;
628 
629 	pr->throttling.state = 0;
630 
631 	duty_mask = pr->throttling.state_count - 1;
632 
633 	duty_mask <<= pr->throttling.duty_offset;
634 
635 	local_irq_disable();
636 
637 	value = inl(pr->throttling.address);
638 
639 	/*
640 	 * Compute the current throttling state when throttling is enabled
641 	 * (bit 4 is on).
642 	 */
643 	if (value & 0x10) {
644 		duty_value = value & duty_mask;
645 		duty_value >>= pr->throttling.duty_offset;
646 
647 		if (duty_value)
648 			state = pr->throttling.state_count - duty_value;
649 	}
650 
651 	pr->throttling.state = state;
652 
653 	local_irq_enable();
654 
655 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
656 			  "Throttling state is T%d (%d%% throttling applied)\n",
657 			  state, pr->throttling.states[state].performance));
658 
659 	return 0;
660 }
661 
662 #ifdef CONFIG_X86
663 static int acpi_throttling_rdmsr(struct acpi_processor *pr,
664 					u64 *value)
665 {
666 	struct cpuinfo_x86 *c;
667 	u64 msr_high, msr_low;
668 	unsigned int cpu;
669 	u64 msr = 0;
670 	int ret = -1;
671 
672 	cpu = pr->id;
673 	c = &cpu_data(cpu);
674 
675 	if ((c->x86_vendor != X86_VENDOR_INTEL) ||
676 		!cpu_has(c, X86_FEATURE_ACPI)) {
677 		printk(KERN_ERR PREFIX
678 			"HARDWARE addr space,NOT supported yet\n");
679 	} else {
680 		msr_low = 0;
681 		msr_high = 0;
682 		rdmsr_safe(MSR_IA32_THERM_CONTROL,
683 			(u32 *)&msr_low , (u32 *) &msr_high);
684 		msr = (msr_high << 32) | msr_low;
685 		*value = (u64) msr;
686 		ret = 0;
687 	}
688 	return ret;
689 }
690 
691 static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value)
692 {
693 	struct cpuinfo_x86 *c;
694 	unsigned int cpu;
695 	int ret = -1;
696 	u64 msr;
697 
698 	cpu = pr->id;
699 	c = &cpu_data(cpu);
700 
701 	if ((c->x86_vendor != X86_VENDOR_INTEL) ||
702 		!cpu_has(c, X86_FEATURE_ACPI)) {
703 		printk(KERN_ERR PREFIX
704 			"HARDWARE addr space,NOT supported yet\n");
705 	} else {
706 		msr = value;
707 		wrmsr_safe(MSR_IA32_THERM_CONTROL,
708 			msr & 0xffffffff, msr >> 32);
709 		ret = 0;
710 	}
711 	return ret;
712 }
713 #else
714 static int acpi_throttling_rdmsr(struct acpi_processor *pr,
715 				u64 *value)
716 {
717 	printk(KERN_ERR PREFIX
718 		"HARDWARE addr space,NOT supported yet\n");
719 	return -1;
720 }
721 
722 static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value)
723 {
724 	printk(KERN_ERR PREFIX
725 		"HARDWARE addr space,NOT supported yet\n");
726 	return -1;
727 }
728 #endif
729 
730 static int acpi_read_throttling_status(struct acpi_processor *pr,
731 					u64 *value)
732 {
733 	u32 bit_width, bit_offset;
734 	u64 ptc_value;
735 	u64 ptc_mask;
736 	struct acpi_processor_throttling *throttling;
737 	int ret = -1;
738 
739 	throttling = &pr->throttling;
740 	switch (throttling->status_register.space_id) {
741 	case ACPI_ADR_SPACE_SYSTEM_IO:
742 		ptc_value = 0;
743 		bit_width = throttling->status_register.bit_width;
744 		bit_offset = throttling->status_register.bit_offset;
745 
746 		acpi_os_read_port((acpi_io_address) throttling->status_register.
747 				  address, (u32 *) &ptc_value,
748 				  (u32) (bit_width + bit_offset));
749 		ptc_mask = (1 << bit_width) - 1;
750 		*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
751 		ret = 0;
752 		break;
753 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
754 		ret = acpi_throttling_rdmsr(pr, value);
755 		break;
756 	default:
757 		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
758 		       (u32) (throttling->status_register.space_id));
759 	}
760 	return ret;
761 }
762 
763 static int acpi_write_throttling_state(struct acpi_processor *pr,
764 				u64 value)
765 {
766 	u32 bit_width, bit_offset;
767 	u64 ptc_value;
768 	u64 ptc_mask;
769 	struct acpi_processor_throttling *throttling;
770 	int ret = -1;
771 
772 	throttling = &pr->throttling;
773 	switch (throttling->control_register.space_id) {
774 	case ACPI_ADR_SPACE_SYSTEM_IO:
775 		bit_width = throttling->control_register.bit_width;
776 		bit_offset = throttling->control_register.bit_offset;
777 		ptc_mask = (1 << bit_width) - 1;
778 		ptc_value = value & ptc_mask;
779 
780 		acpi_os_write_port((acpi_io_address) throttling->
781 					control_register.address,
782 					(u32) (ptc_value << bit_offset),
783 					(u32) (bit_width + bit_offset));
784 		ret = 0;
785 		break;
786 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
787 		ret = acpi_throttling_wrmsr(pr, value);
788 		break;
789 	default:
790 		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
791 		       (u32) (throttling->control_register.space_id));
792 	}
793 	return ret;
794 }
795 
796 static int acpi_get_throttling_state(struct acpi_processor *pr,
797 				u64 value)
798 {
799 	int i;
800 
801 	for (i = 0; i < pr->throttling.state_count; i++) {
802 		struct acpi_processor_tx_tss *tx =
803 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
804 						      states_tss[i]);
805 		if (tx->control == value)
806 			return i;
807 	}
808 	return -1;
809 }
810 
811 static int acpi_get_throttling_value(struct acpi_processor *pr,
812 			int state, u64 *value)
813 {
814 	int ret = -1;
815 
816 	if (state >= 0 && state <= pr->throttling.state_count) {
817 		struct acpi_processor_tx_tss *tx =
818 		    (struct acpi_processor_tx_tss *)&(pr->throttling.
819 						      states_tss[state]);
820 		*value = tx->control;
821 		ret = 0;
822 	}
823 	return ret;
824 }
825 
826 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
827 {
828 	int state = 0;
829 	int ret;
830 	u64 value;
831 
832 	if (!pr)
833 		return -EINVAL;
834 
835 	if (!pr->flags.throttling)
836 		return -ENODEV;
837 
838 	pr->throttling.state = 0;
839 
840 	value = 0;
841 	ret = acpi_read_throttling_status(pr, &value);
842 	if (ret >= 0) {
843 		state = acpi_get_throttling_state(pr, value);
844 		if (state == -1) {
845 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
846 				"Invalid throttling state, reset\n"));
847 			state = 0;
848 			ret = acpi_processor_set_throttling(pr, state, true);
849 			if (ret)
850 				return ret;
851 		}
852 		pr->throttling.state = state;
853 	}
854 
855 	return 0;
856 }
857 
858 static int acpi_processor_get_throttling(struct acpi_processor *pr)
859 {
860 	cpumask_var_t saved_mask;
861 	int ret;
862 
863 	if (!pr)
864 		return -EINVAL;
865 
866 	if (!pr->flags.throttling)
867 		return -ENODEV;
868 
869 	if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
870 		return -ENOMEM;
871 
872 	/*
873 	 * Migrate task to the cpu pointed by pr.
874 	 */
875 	cpumask_copy(saved_mask, &current->cpus_allowed);
876 	/* FIXME: use work_on_cpu() */
877 	set_cpus_allowed_ptr(current, cpumask_of(pr->id));
878 	ret = pr->throttling.acpi_processor_get_throttling(pr);
879 	/* restore the previous state */
880 	set_cpus_allowed_ptr(current, saved_mask);
881 	free_cpumask_var(saved_mask);
882 
883 	return ret;
884 }
885 
886 static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
887 {
888 	int i, step;
889 
890 	if (!pr->throttling.address) {
891 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
892 		return -EINVAL;
893 	} else if (!pr->throttling.duty_width) {
894 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
895 		return -EINVAL;
896 	}
897 	/* TBD: Support duty_cycle values that span bit 4. */
898 	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
899 		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
900 		return -EINVAL;
901 	}
902 
903 	pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
904 
905 	/*
906 	 * Compute state values. Note that throttling displays a linear power
907 	 * performance relationship (at 50% performance the CPU will consume
908 	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
909 	 */
910 
911 	step = (1000 / pr->throttling.state_count);
912 
913 	for (i = 0; i < pr->throttling.state_count; i++) {
914 		pr->throttling.states[i].performance = 1000 - step * i;
915 		pr->throttling.states[i].power = 1000 - step * i;
916 	}
917 	return 0;
918 }
919 
920 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
921 					      int state, bool force)
922 {
923 	u32 value = 0;
924 	u32 duty_mask = 0;
925 	u32 duty_value = 0;
926 
927 	if (!pr)
928 		return -EINVAL;
929 
930 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
931 		return -EINVAL;
932 
933 	if (!pr->flags.throttling)
934 		return -ENODEV;
935 
936 	if (!force && (state == pr->throttling.state))
937 		return 0;
938 
939 	if (state < pr->throttling_platform_limit)
940 		return -EPERM;
941 	/*
942 	 * Calculate the duty_value and duty_mask.
943 	 */
944 	if (state) {
945 		duty_value = pr->throttling.state_count - state;
946 
947 		duty_value <<= pr->throttling.duty_offset;
948 
949 		/* Used to clear all duty_value bits */
950 		duty_mask = pr->throttling.state_count - 1;
951 
952 		duty_mask <<= acpi_gbl_FADT.duty_offset;
953 		duty_mask = ~duty_mask;
954 	}
955 
956 	local_irq_disable();
957 
958 	/*
959 	 * Disable throttling by writing a 0 to bit 4.  Note that we must
960 	 * turn it off before you can change the duty_value.
961 	 */
962 	value = inl(pr->throttling.address);
963 	if (value & 0x10) {
964 		value &= 0xFFFFFFEF;
965 		outl(value, pr->throttling.address);
966 	}
967 
968 	/*
969 	 * Write the new duty_value and then enable throttling.  Note
970 	 * that a state value of 0 leaves throttling disabled.
971 	 */
972 	if (state) {
973 		value &= duty_mask;
974 		value |= duty_value;
975 		outl(value, pr->throttling.address);
976 
977 		value |= 0x00000010;
978 		outl(value, pr->throttling.address);
979 	}
980 
981 	pr->throttling.state = state;
982 
983 	local_irq_enable();
984 
985 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
986 			  "Throttling state set to T%d (%d%%)\n", state,
987 			  (pr->throttling.states[state].performance ? pr->
988 			   throttling.states[state].performance / 10 : 0)));
989 
990 	return 0;
991 }
992 
993 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
994 					     int state, bool force)
995 {
996 	int ret;
997 	u64 value;
998 
999 	if (!pr)
1000 		return -EINVAL;
1001 
1002 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1003 		return -EINVAL;
1004 
1005 	if (!pr->flags.throttling)
1006 		return -ENODEV;
1007 
1008 	if (!force && (state == pr->throttling.state))
1009 		return 0;
1010 
1011 	if (state < pr->throttling_platform_limit)
1012 		return -EPERM;
1013 
1014 	value = 0;
1015 	ret = acpi_get_throttling_value(pr, state, &value);
1016 	if (ret >= 0) {
1017 		acpi_write_throttling_state(pr, value);
1018 		pr->throttling.state = state;
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 int acpi_processor_set_throttling(struct acpi_processor *pr,
1025 						int state, bool force)
1026 {
1027 	cpumask_var_t saved_mask;
1028 	int ret = 0;
1029 	unsigned int i;
1030 	struct acpi_processor *match_pr;
1031 	struct acpi_processor_throttling *p_throttling;
1032 	struct throttling_tstate t_state;
1033 	cpumask_var_t online_throttling_cpus;
1034 
1035 	if (!pr)
1036 		return -EINVAL;
1037 
1038 	if (!pr->flags.throttling)
1039 		return -ENODEV;
1040 
1041 	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1042 		return -EINVAL;
1043 
1044 	if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
1045 		return -ENOMEM;
1046 
1047 	if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
1048 		free_cpumask_var(saved_mask);
1049 		return -ENOMEM;
1050 	}
1051 
1052 	cpumask_copy(saved_mask, &current->cpus_allowed);
1053 	t_state.target_state = state;
1054 	p_throttling = &(pr->throttling);
1055 	cpumask_and(online_throttling_cpus, cpu_online_mask,
1056 		    p_throttling->shared_cpu_map);
1057 	/*
1058 	 * The throttling notifier will be called for every
1059 	 * affected cpu in order to get one proper T-state.
1060 	 * The notifier event is THROTTLING_PRECHANGE.
1061 	 */
1062 	for_each_cpu(i, online_throttling_cpus) {
1063 		t_state.cpu = i;
1064 		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1065 							&t_state);
1066 	}
1067 	/*
1068 	 * The function of acpi_processor_set_throttling will be called
1069 	 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
1070 	 * it is necessary to call it for every affected cpu. Otherwise
1071 	 * it can be called only for the cpu pointed by pr.
1072 	 */
1073 	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1074 		/* FIXME: use work_on_cpu() */
1075 		set_cpus_allowed_ptr(current, cpumask_of(pr->id));
1076 		ret = p_throttling->acpi_processor_set_throttling(pr,
1077 						t_state.target_state, force);
1078 	} else {
1079 		/*
1080 		 * When the T-state coordination is SW_ALL or HW_ALL,
1081 		 * it is necessary to set T-state for every affected
1082 		 * cpus.
1083 		 */
1084 		for_each_cpu(i, online_throttling_cpus) {
1085 			match_pr = per_cpu(processors, i);
1086 			/*
1087 			 * If the pointer is invalid, we will report the
1088 			 * error message and continue.
1089 			 */
1090 			if (!match_pr) {
1091 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1092 					"Invalid Pointer for CPU %d\n", i));
1093 				continue;
1094 			}
1095 			/*
1096 			 * If the throttling control is unsupported on CPU i,
1097 			 * we will report the error message and continue.
1098 			 */
1099 			if (!match_pr->flags.throttling) {
1100 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1101 					"Throttling Controll is unsupported "
1102 					"on CPU %d\n", i));
1103 				continue;
1104 			}
1105 			t_state.cpu = i;
1106 			/* FIXME: use work_on_cpu() */
1107 			set_cpus_allowed_ptr(current, cpumask_of(i));
1108 			ret = match_pr->throttling.
1109 				acpi_processor_set_throttling(
1110 				match_pr, t_state.target_state, force);
1111 		}
1112 	}
1113 	/*
1114 	 * After the set_throttling is called, the
1115 	 * throttling notifier is called for every
1116 	 * affected cpu to update the T-states.
1117 	 * The notifier event is THROTTLING_POSTCHANGE
1118 	 */
1119 	for_each_cpu(i, online_throttling_cpus) {
1120 		t_state.cpu = i;
1121 		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1122 							&t_state);
1123 	}
1124 	/* restore the previous state */
1125 	/* FIXME: use work_on_cpu() */
1126 	set_cpus_allowed_ptr(current, saved_mask);
1127 	free_cpumask_var(online_throttling_cpus);
1128 	free_cpumask_var(saved_mask);
1129 	return ret;
1130 }
1131 
1132 int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1133 {
1134 	int result = 0;
1135 	struct acpi_processor_throttling *pthrottling;
1136 
1137 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1138 			  "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1139 			  pr->throttling.address,
1140 			  pr->throttling.duty_offset,
1141 			  pr->throttling.duty_width));
1142 
1143 	/*
1144 	 * Evaluate _PTC, _TSS and _TPC
1145 	 * They must all be present or none of them can be used.
1146 	 */
1147 	if (acpi_processor_get_throttling_control(pr) ||
1148 		acpi_processor_get_throttling_states(pr) ||
1149 		acpi_processor_get_platform_limit(pr))
1150 	{
1151 		pr->throttling.acpi_processor_get_throttling =
1152 		    &acpi_processor_get_throttling_fadt;
1153 		pr->throttling.acpi_processor_set_throttling =
1154 		    &acpi_processor_set_throttling_fadt;
1155 		if (acpi_processor_get_fadt_info(pr))
1156 			return 0;
1157 	} else {
1158 		pr->throttling.acpi_processor_get_throttling =
1159 		    &acpi_processor_get_throttling_ptc;
1160 		pr->throttling.acpi_processor_set_throttling =
1161 		    &acpi_processor_set_throttling_ptc;
1162 	}
1163 
1164 	/*
1165 	 * If TSD package for one CPU can't be parsed successfully, it means
1166 	 * that this CPU will have no coordination with other CPUs.
1167 	 */
1168 	if (acpi_processor_get_tsd(pr)) {
1169 		pthrottling = &pr->throttling;
1170 		pthrottling->tsd_valid_flag = 0;
1171 		cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
1172 		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
1173 	}
1174 
1175 	/*
1176 	 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1177 	 * This shouldn't be an issue as few (if any) mobile systems ever
1178 	 * used this part.
1179 	 */
1180 	if (errata.piix4.throttle) {
1181 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1182 				  "Throttling not supported on PIIX4 A- or B-step\n"));
1183 		return 0;
1184 	}
1185 
1186 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
1187 			  pr->throttling.state_count));
1188 
1189 	pr->flags.throttling = 1;
1190 
1191 	/*
1192 	 * Disable throttling (if enabled).  We'll let subsequent policy (e.g.
1193 	 * thermal) decide to lower performance if it so chooses, but for now
1194 	 * we'll crank up the speed.
1195 	 */
1196 
1197 	result = acpi_processor_get_throttling(pr);
1198 	if (result)
1199 		goto end;
1200 
1201 	if (pr->throttling.state) {
1202 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1203 				  "Disabling throttling (was T%d)\n",
1204 				  pr->throttling.state));
1205 		result = acpi_processor_set_throttling(pr, 0, false);
1206 		if (result)
1207 			goto end;
1208 	}
1209 
1210       end:
1211 	if (result)
1212 		pr->flags.throttling = 0;
1213 
1214 	return result;
1215 }
1216 
1217 /* proc interface */
1218 #ifdef CONFIG_ACPI_PROCFS
1219 static int acpi_processor_throttling_seq_show(struct seq_file *seq,
1220 					      void *offset)
1221 {
1222 	struct acpi_processor *pr = seq->private;
1223 	int i = 0;
1224 	int result = 0;
1225 
1226 	if (!pr)
1227 		goto end;
1228 
1229 	if (!(pr->throttling.state_count > 0)) {
1230 		seq_puts(seq, "<not supported>\n");
1231 		goto end;
1232 	}
1233 
1234 	result = acpi_processor_get_throttling(pr);
1235 
1236 	if (result) {
1237 		seq_puts(seq,
1238 			 "Could not determine current throttling state.\n");
1239 		goto end;
1240 	}
1241 
1242 	seq_printf(seq, "state count:             %d\n"
1243 		   "active state:            T%d\n"
1244 		   "state available: T%d to T%d\n",
1245 		   pr->throttling.state_count, pr->throttling.state,
1246 		   pr->throttling_platform_limit,
1247 		   pr->throttling.state_count - 1);
1248 
1249 	seq_puts(seq, "states:\n");
1250 	if (pr->throttling.acpi_processor_get_throttling ==
1251 			acpi_processor_get_throttling_fadt) {
1252 		for (i = 0; i < pr->throttling.state_count; i++)
1253 			seq_printf(seq, "   %cT%d:                  %02d%%\n",
1254 				   (i == pr->throttling.state ? '*' : ' '), i,
1255 				   (pr->throttling.states[i].performance ? pr->
1256 				    throttling.states[i].performance / 10 : 0));
1257 	} else {
1258 		for (i = 0; i < pr->throttling.state_count; i++)
1259 			seq_printf(seq, "   %cT%d:                  %02d%%\n",
1260 				   (i == pr->throttling.state ? '*' : ' '), i,
1261 				   (int)pr->throttling.states_tss[i].
1262 				   freqpercentage);
1263 	}
1264 
1265       end:
1266 	return 0;
1267 }
1268 
1269 static int acpi_processor_throttling_open_fs(struct inode *inode,
1270 					     struct file *file)
1271 {
1272 	return single_open(file, acpi_processor_throttling_seq_show,
1273 			   PDE(inode)->data);
1274 }
1275 
1276 static ssize_t acpi_processor_write_throttling(struct file *file,
1277 					       const char __user * buffer,
1278 					       size_t count, loff_t * data)
1279 {
1280 	int result = 0;
1281 	struct seq_file *m = file->private_data;
1282 	struct acpi_processor *pr = m->private;
1283 	char state_string[5] = "";
1284 	char *charp = NULL;
1285 	size_t state_val = 0;
1286 	char tmpbuf[5] = "";
1287 
1288 	if (!pr || (count > sizeof(state_string) - 1))
1289 		return -EINVAL;
1290 
1291 	if (copy_from_user(state_string, buffer, count))
1292 		return -EFAULT;
1293 
1294 	state_string[count] = '\0';
1295 	if ((count > 0) && (state_string[count-1] == '\n'))
1296 		state_string[count-1] = '\0';
1297 
1298 	charp = state_string;
1299 	if ((state_string[0] == 't') || (state_string[0] == 'T'))
1300 		charp++;
1301 
1302 	state_val = simple_strtoul(charp, NULL, 0);
1303 	if (state_val >= pr->throttling.state_count)
1304 		return -EINVAL;
1305 
1306 	snprintf(tmpbuf, 5, "%zu", state_val);
1307 
1308 	if (strcmp(tmpbuf, charp) != 0)
1309 		return -EINVAL;
1310 
1311 	result = acpi_processor_set_throttling(pr, state_val, false);
1312 	if (result)
1313 		return result;
1314 
1315 	return count;
1316 }
1317 
1318 const struct file_operations acpi_processor_throttling_fops = {
1319 	.owner = THIS_MODULE,
1320 	.open = acpi_processor_throttling_open_fs,
1321 	.read = seq_read,
1322 	.write = acpi_processor_write_throttling,
1323 	.llseek = seq_lseek,
1324 	.release = single_release,
1325 };
1326 #endif
1327