xref: /linux/drivers/cpufreq/cpufreq.c (revision f8324e20f8289dffc646d64366332e05eaacab25)
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *
7  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8  *	Added handling for CPU hotplug
9  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10  *	Fix handling for CPU hotplug -- affected CPUs
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
31 
32 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 						"cpufreq-core", msg)
34 
35 /**
36  * The "cpufreq driver" - the arch- or hardware-dependent low
37  * level driver of CPUFreq support, and its spinlock. This lock
38  * also protects the cpufreq_cpu_data array.
39  */
40 static struct cpufreq_driver *cpufreq_driver;
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
42 #ifdef CONFIG_HOTPLUG_CPU
43 /* This one keeps track of the previously set governor of a removed CPU */
44 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
45 #endif
46 static DEFINE_SPINLOCK(cpufreq_driver_lock);
47 
48 /*
49  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50  * all cpufreq/hotplug/workqueue/etc related lock issues.
51  *
52  * The rules for this semaphore:
53  * - Any routine that wants to read from the policy structure will
54  *   do a down_read on this semaphore.
55  * - Any routine that will write to the policy structure and/or may take away
56  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
57  *   mode before doing so.
58  *
59  * Additional rules:
60  * - All holders of the lock should check to make sure that the CPU they
61  *   are concerned with are online after they get the lock.
62  * - Governor routines that can be called in cpufreq hotplug path should not
63  *   take this sem as top level hotplug notifier handler takes this.
64  * - Lock should not be held across
65  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
66  */
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69 
70 #define lock_policy_rwsem(mode, cpu)					\
71 int lock_policy_rwsem_##mode						\
72 (int cpu)								\
73 {									\
74 	int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);		\
75 	BUG_ON(policy_cpu == -1);					\
76 	down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));		\
77 	if (unlikely(!cpu_online(cpu))) {				\
78 		up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));	\
79 		return -1;						\
80 	}								\
81 									\
82 	return 0;							\
83 }
84 
85 lock_policy_rwsem(read, cpu);
86 EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
87 
88 lock_policy_rwsem(write, cpu);
89 EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
90 
91 void unlock_policy_rwsem_read(int cpu)
92 {
93 	int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
94 	BUG_ON(policy_cpu == -1);
95 	up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
96 }
97 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
98 
99 void unlock_policy_rwsem_write(int cpu)
100 {
101 	int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
102 	BUG_ON(policy_cpu == -1);
103 	up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
104 }
105 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
106 
107 
108 /* internal prototypes */
109 static int __cpufreq_governor(struct cpufreq_policy *policy,
110 		unsigned int event);
111 static unsigned int __cpufreq_get(unsigned int cpu);
112 static void handle_update(struct work_struct *work);
113 
114 /**
115  * Two notifier lists: the "policy" list is involved in the
116  * validation process for a new CPU frequency policy; the
117  * "transition" list for kernel code that needs to handle
118  * changes to devices when the CPU clock speed changes.
119  * The mutex locks both lists.
120  */
121 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
122 static struct srcu_notifier_head cpufreq_transition_notifier_list;
123 
124 static bool init_cpufreq_transition_notifier_list_called;
125 static int __init init_cpufreq_transition_notifier_list(void)
126 {
127 	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
128 	init_cpufreq_transition_notifier_list_called = true;
129 	return 0;
130 }
131 pure_initcall(init_cpufreq_transition_notifier_list);
132 
133 static LIST_HEAD(cpufreq_governor_list);
134 static DEFINE_MUTEX(cpufreq_governor_mutex);
135 
136 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
137 {
138 	struct cpufreq_policy *data;
139 	unsigned long flags;
140 
141 	if (cpu >= nr_cpu_ids)
142 		goto err_out;
143 
144 	/* get the cpufreq driver */
145 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
146 
147 	if (!cpufreq_driver)
148 		goto err_out_unlock;
149 
150 	if (!try_module_get(cpufreq_driver->owner))
151 		goto err_out_unlock;
152 
153 
154 	/* get the CPU */
155 	data = per_cpu(cpufreq_cpu_data, cpu);
156 
157 	if (!data)
158 		goto err_out_put_module;
159 
160 	if (!kobject_get(&data->kobj))
161 		goto err_out_put_module;
162 
163 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
164 	return data;
165 
166 err_out_put_module:
167 	module_put(cpufreq_driver->owner);
168 err_out_unlock:
169 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
170 err_out:
171 	return NULL;
172 }
173 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
174 
175 
176 void cpufreq_cpu_put(struct cpufreq_policy *data)
177 {
178 	kobject_put(&data->kobj);
179 	module_put(cpufreq_driver->owner);
180 }
181 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
182 
183 
184 /*********************************************************************
185  *                     UNIFIED DEBUG HELPERS                         *
186  *********************************************************************/
187 #ifdef CONFIG_CPU_FREQ_DEBUG
188 
189 /* what part(s) of the CPUfreq subsystem are debugged? */
190 static unsigned int debug;
191 
192 /* is the debug output ratelimit'ed using printk_ratelimit? User can
193  * set or modify this value.
194  */
195 static unsigned int debug_ratelimit = 1;
196 
197 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
198  * loading of a cpufreq driver, temporarily disabled when a new policy
199  * is set, and disabled upon cpufreq driver removal
200  */
201 static unsigned int disable_ratelimit = 1;
202 static DEFINE_SPINLOCK(disable_ratelimit_lock);
203 
204 static void cpufreq_debug_enable_ratelimit(void)
205 {
206 	unsigned long flags;
207 
208 	spin_lock_irqsave(&disable_ratelimit_lock, flags);
209 	if (disable_ratelimit)
210 		disable_ratelimit--;
211 	spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
212 }
213 
214 static void cpufreq_debug_disable_ratelimit(void)
215 {
216 	unsigned long flags;
217 
218 	spin_lock_irqsave(&disable_ratelimit_lock, flags);
219 	disable_ratelimit++;
220 	spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
221 }
222 
223 void cpufreq_debug_printk(unsigned int type, const char *prefix,
224 			const char *fmt, ...)
225 {
226 	char s[256];
227 	va_list args;
228 	unsigned int len;
229 	unsigned long flags;
230 
231 	WARN_ON(!prefix);
232 	if (type & debug) {
233 		spin_lock_irqsave(&disable_ratelimit_lock, flags);
234 		if (!disable_ratelimit && debug_ratelimit
235 					&& !printk_ratelimit()) {
236 			spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
237 			return;
238 		}
239 		spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
240 
241 		len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
242 
243 		va_start(args, fmt);
244 		len += vsnprintf(&s[len], (256 - len), fmt, args);
245 		va_end(args);
246 
247 		printk(s);
248 
249 		WARN_ON(len < 5);
250 	}
251 }
252 EXPORT_SYMBOL(cpufreq_debug_printk);
253 
254 
255 module_param(debug, uint, 0644);
256 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
257 			" 2 to debug drivers, and 4 to debug governors.");
258 
259 module_param(debug_ratelimit, uint, 0644);
260 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
261 					" set to 0 to disable ratelimiting.");
262 
263 #else /* !CONFIG_CPU_FREQ_DEBUG */
264 
265 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
266 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
267 
268 #endif /* CONFIG_CPU_FREQ_DEBUG */
269 
270 
271 /*********************************************************************
272  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
273  *********************************************************************/
274 
275 /**
276  * adjust_jiffies - adjust the system "loops_per_jiffy"
277  *
278  * This function alters the system "loops_per_jiffy" for the clock
279  * speed change. Note that loops_per_jiffy cannot be updated on SMP
280  * systems as each CPU might be scaled differently. So, use the arch
281  * per-CPU loops_per_jiffy value wherever possible.
282  */
283 #ifndef CONFIG_SMP
284 static unsigned long l_p_j_ref;
285 static unsigned int  l_p_j_ref_freq;
286 
287 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
288 {
289 	if (ci->flags & CPUFREQ_CONST_LOOPS)
290 		return;
291 
292 	if (!l_p_j_ref_freq) {
293 		l_p_j_ref = loops_per_jiffy;
294 		l_p_j_ref_freq = ci->old;
295 		dprintk("saving %lu as reference value for loops_per_jiffy; "
296 			"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
297 	}
298 	if ((val == CPUFREQ_PRECHANGE  && ci->old < ci->new) ||
299 	    (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
300 	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
301 		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
302 								ci->new);
303 		dprintk("scaling loops_per_jiffy to %lu "
304 			"for frequency %u kHz\n", loops_per_jiffy, ci->new);
305 	}
306 }
307 #else
308 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
309 {
310 	return;
311 }
312 #endif
313 
314 
315 /**
316  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
317  * on frequency transition.
318  *
319  * This function calls the transition notifiers and the "adjust_jiffies"
320  * function. It is called twice on all CPU frequency changes that have
321  * external effects.
322  */
323 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
324 {
325 	struct cpufreq_policy *policy;
326 
327 	BUG_ON(irqs_disabled());
328 
329 	freqs->flags = cpufreq_driver->flags;
330 	dprintk("notification %u of frequency transition to %u kHz\n",
331 		state, freqs->new);
332 
333 	policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
334 	switch (state) {
335 
336 	case CPUFREQ_PRECHANGE:
337 		/* detect if the driver reported a value as "old frequency"
338 		 * which is not equal to what the cpufreq core thinks is
339 		 * "old frequency".
340 		 */
341 		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
342 			if ((policy) && (policy->cpu == freqs->cpu) &&
343 			    (policy->cur) && (policy->cur != freqs->old)) {
344 				dprintk("Warning: CPU frequency is"
345 					" %u, cpufreq assumed %u kHz.\n",
346 					freqs->old, policy->cur);
347 				freqs->old = policy->cur;
348 			}
349 		}
350 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
351 				CPUFREQ_PRECHANGE, freqs);
352 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
353 		break;
354 
355 	case CPUFREQ_POSTCHANGE:
356 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
357 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
358 				CPUFREQ_POSTCHANGE, freqs);
359 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
360 			policy->cur = freqs->new;
361 		break;
362 	}
363 }
364 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
365 
366 
367 
368 /*********************************************************************
369  *                          SYSFS INTERFACE                          *
370  *********************************************************************/
371 
372 static struct cpufreq_governor *__find_governor(const char *str_governor)
373 {
374 	struct cpufreq_governor *t;
375 
376 	list_for_each_entry(t, &cpufreq_governor_list, governor_list)
377 		if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
378 			return t;
379 
380 	return NULL;
381 }
382 
383 /**
384  * cpufreq_parse_governor - parse a governor string
385  */
386 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
387 				struct cpufreq_governor **governor)
388 {
389 	int err = -EINVAL;
390 
391 	if (!cpufreq_driver)
392 		goto out;
393 
394 	if (cpufreq_driver->setpolicy) {
395 		if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
396 			*policy = CPUFREQ_POLICY_PERFORMANCE;
397 			err = 0;
398 		} else if (!strnicmp(str_governor, "powersave",
399 						CPUFREQ_NAME_LEN)) {
400 			*policy = CPUFREQ_POLICY_POWERSAVE;
401 			err = 0;
402 		}
403 	} else if (cpufreq_driver->target) {
404 		struct cpufreq_governor *t;
405 
406 		mutex_lock(&cpufreq_governor_mutex);
407 
408 		t = __find_governor(str_governor);
409 
410 		if (t == NULL) {
411 			char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
412 								str_governor);
413 
414 			if (name) {
415 				int ret;
416 
417 				mutex_unlock(&cpufreq_governor_mutex);
418 				ret = request_module("%s", name);
419 				mutex_lock(&cpufreq_governor_mutex);
420 
421 				if (ret == 0)
422 					t = __find_governor(str_governor);
423 			}
424 
425 			kfree(name);
426 		}
427 
428 		if (t != NULL) {
429 			*governor = t;
430 			err = 0;
431 		}
432 
433 		mutex_unlock(&cpufreq_governor_mutex);
434 	}
435 out:
436 	return err;
437 }
438 
439 
440 /**
441  * cpufreq_per_cpu_attr_read() / show_##file_name() -
442  * print out cpufreq information
443  *
444  * Write out information from cpufreq_driver->policy[cpu]; object must be
445  * "unsigned int".
446  */
447 
448 #define show_one(file_name, object)			\
449 static ssize_t show_##file_name				\
450 (struct cpufreq_policy *policy, char *buf)		\
451 {							\
452 	return sprintf(buf, "%u\n", policy->object);	\
453 }
454 
455 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
456 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
457 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
458 show_one(scaling_min_freq, min);
459 show_one(scaling_max_freq, max);
460 show_one(scaling_cur_freq, cur);
461 
462 static int __cpufreq_set_policy(struct cpufreq_policy *data,
463 				struct cpufreq_policy *policy);
464 
465 /**
466  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
467  */
468 #define store_one(file_name, object)			\
469 static ssize_t store_##file_name					\
470 (struct cpufreq_policy *policy, const char *buf, size_t count)		\
471 {									\
472 	unsigned int ret = -EINVAL;					\
473 	struct cpufreq_policy new_policy;				\
474 									\
475 	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
476 	if (ret)							\
477 		return -EINVAL;						\
478 									\
479 	ret = sscanf(buf, "%u", &new_policy.object);			\
480 	if (ret != 1)							\
481 		return -EINVAL;						\
482 									\
483 	ret = __cpufreq_set_policy(policy, &new_policy);		\
484 	policy->user_policy.object = policy->object;			\
485 									\
486 	return ret ? ret : count;					\
487 }
488 
489 store_one(scaling_min_freq, min);
490 store_one(scaling_max_freq, max);
491 
492 /**
493  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
494  */
495 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
496 					char *buf)
497 {
498 	unsigned int cur_freq = __cpufreq_get(policy->cpu);
499 	if (!cur_freq)
500 		return sprintf(buf, "<unknown>");
501 	return sprintf(buf, "%u\n", cur_freq);
502 }
503 
504 
505 /**
506  * show_scaling_governor - show the current policy for the specified CPU
507  */
508 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
509 {
510 	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
511 		return sprintf(buf, "powersave\n");
512 	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 		return sprintf(buf, "performance\n");
514 	else if (policy->governor)
515 		return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
516 				policy->governor->name);
517 	return -EINVAL;
518 }
519 
520 
521 /**
522  * store_scaling_governor - store policy for the specified CPU
523  */
524 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
525 					const char *buf, size_t count)
526 {
527 	unsigned int ret = -EINVAL;
528 	char	str_governor[16];
529 	struct cpufreq_policy new_policy;
530 
531 	ret = cpufreq_get_policy(&new_policy, policy->cpu);
532 	if (ret)
533 		return ret;
534 
535 	ret = sscanf(buf, "%15s", str_governor);
536 	if (ret != 1)
537 		return -EINVAL;
538 
539 	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
540 						&new_policy.governor))
541 		return -EINVAL;
542 
543 	/* Do not use cpufreq_set_policy here or the user_policy.max
544 	   will be wrongly overridden */
545 	ret = __cpufreq_set_policy(policy, &new_policy);
546 
547 	policy->user_policy.policy = policy->policy;
548 	policy->user_policy.governor = policy->governor;
549 
550 	if (ret)
551 		return ret;
552 	else
553 		return count;
554 }
555 
556 /**
557  * show_scaling_driver - show the cpufreq driver currently loaded
558  */
559 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
560 {
561 	return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
562 }
563 
564 /**
565  * show_scaling_available_governors - show the available CPUfreq governors
566  */
567 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
568 						char *buf)
569 {
570 	ssize_t i = 0;
571 	struct cpufreq_governor *t;
572 
573 	if (!cpufreq_driver->target) {
574 		i += sprintf(buf, "performance powersave");
575 		goto out;
576 	}
577 
578 	list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
579 		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
580 		    - (CPUFREQ_NAME_LEN + 2)))
581 			goto out;
582 		i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
583 	}
584 out:
585 	i += sprintf(&buf[i], "\n");
586 	return i;
587 }
588 
589 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
590 {
591 	ssize_t i = 0;
592 	unsigned int cpu;
593 
594 	for_each_cpu(cpu, mask) {
595 		if (i)
596 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
597 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
598 		if (i >= (PAGE_SIZE - 5))
599 			break;
600 	}
601 	i += sprintf(&buf[i], "\n");
602 	return i;
603 }
604 
605 /**
606  * show_related_cpus - show the CPUs affected by each transition even if
607  * hw coordination is in use
608  */
609 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
610 {
611 	if (cpumask_empty(policy->related_cpus))
612 		return show_cpus(policy->cpus, buf);
613 	return show_cpus(policy->related_cpus, buf);
614 }
615 
616 /**
617  * show_affected_cpus - show the CPUs affected by each transition
618  */
619 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
620 {
621 	return show_cpus(policy->cpus, buf);
622 }
623 
624 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
625 					const char *buf, size_t count)
626 {
627 	unsigned int freq = 0;
628 	unsigned int ret;
629 
630 	if (!policy->governor || !policy->governor->store_setspeed)
631 		return -EINVAL;
632 
633 	ret = sscanf(buf, "%u", &freq);
634 	if (ret != 1)
635 		return -EINVAL;
636 
637 	policy->governor->store_setspeed(policy, freq);
638 
639 	return count;
640 }
641 
642 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
643 {
644 	if (!policy->governor || !policy->governor->show_setspeed)
645 		return sprintf(buf, "<unsupported>\n");
646 
647 	return policy->governor->show_setspeed(policy, buf);
648 }
649 
650 /**
651  * show_scaling_driver - show the current cpufreq HW/BIOS limitation
652  */
653 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
654 {
655 	unsigned int limit;
656 	int ret;
657 	if (cpufreq_driver->bios_limit) {
658 		ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
659 		if (!ret)
660 			return sprintf(buf, "%u\n", limit);
661 	}
662 	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
663 }
664 
665 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
666 cpufreq_freq_attr_ro(cpuinfo_min_freq);
667 cpufreq_freq_attr_ro(cpuinfo_max_freq);
668 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
669 cpufreq_freq_attr_ro(scaling_available_governors);
670 cpufreq_freq_attr_ro(scaling_driver);
671 cpufreq_freq_attr_ro(scaling_cur_freq);
672 cpufreq_freq_attr_ro(bios_limit);
673 cpufreq_freq_attr_ro(related_cpus);
674 cpufreq_freq_attr_ro(affected_cpus);
675 cpufreq_freq_attr_rw(scaling_min_freq);
676 cpufreq_freq_attr_rw(scaling_max_freq);
677 cpufreq_freq_attr_rw(scaling_governor);
678 cpufreq_freq_attr_rw(scaling_setspeed);
679 
680 static struct attribute *default_attrs[] = {
681 	&cpuinfo_min_freq.attr,
682 	&cpuinfo_max_freq.attr,
683 	&cpuinfo_transition_latency.attr,
684 	&scaling_min_freq.attr,
685 	&scaling_max_freq.attr,
686 	&affected_cpus.attr,
687 	&related_cpus.attr,
688 	&scaling_governor.attr,
689 	&scaling_driver.attr,
690 	&scaling_available_governors.attr,
691 	&scaling_setspeed.attr,
692 	NULL
693 };
694 
695 struct kobject *cpufreq_global_kobject;
696 EXPORT_SYMBOL(cpufreq_global_kobject);
697 
698 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
699 #define to_attr(a) container_of(a, struct freq_attr, attr)
700 
701 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
702 {
703 	struct cpufreq_policy *policy = to_policy(kobj);
704 	struct freq_attr *fattr = to_attr(attr);
705 	ssize_t ret = -EINVAL;
706 	policy = cpufreq_cpu_get(policy->cpu);
707 	if (!policy)
708 		goto no_policy;
709 
710 	if (lock_policy_rwsem_read(policy->cpu) < 0)
711 		goto fail;
712 
713 	if (fattr->show)
714 		ret = fattr->show(policy, buf);
715 	else
716 		ret = -EIO;
717 
718 	unlock_policy_rwsem_read(policy->cpu);
719 fail:
720 	cpufreq_cpu_put(policy);
721 no_policy:
722 	return ret;
723 }
724 
725 static ssize_t store(struct kobject *kobj, struct attribute *attr,
726 		     const char *buf, size_t count)
727 {
728 	struct cpufreq_policy *policy = to_policy(kobj);
729 	struct freq_attr *fattr = to_attr(attr);
730 	ssize_t ret = -EINVAL;
731 	policy = cpufreq_cpu_get(policy->cpu);
732 	if (!policy)
733 		goto no_policy;
734 
735 	if (lock_policy_rwsem_write(policy->cpu) < 0)
736 		goto fail;
737 
738 	if (fattr->store)
739 		ret = fattr->store(policy, buf, count);
740 	else
741 		ret = -EIO;
742 
743 	unlock_policy_rwsem_write(policy->cpu);
744 fail:
745 	cpufreq_cpu_put(policy);
746 no_policy:
747 	return ret;
748 }
749 
750 static void cpufreq_sysfs_release(struct kobject *kobj)
751 {
752 	struct cpufreq_policy *policy = to_policy(kobj);
753 	dprintk("last reference is dropped\n");
754 	complete(&policy->kobj_unregister);
755 }
756 
757 static const struct sysfs_ops sysfs_ops = {
758 	.show	= show,
759 	.store	= store,
760 };
761 
762 static struct kobj_type ktype_cpufreq = {
763 	.sysfs_ops	= &sysfs_ops,
764 	.default_attrs	= default_attrs,
765 	.release	= cpufreq_sysfs_release,
766 };
767 
768 /*
769  * Returns:
770  *   Negative: Failure
771  *   0:        Success
772  *   Positive: When we have a managed CPU and the sysfs got symlinked
773  */
774 static int cpufreq_add_dev_policy(unsigned int cpu,
775 				  struct cpufreq_policy *policy,
776 				  struct sys_device *sys_dev)
777 {
778 	int ret = 0;
779 #ifdef CONFIG_SMP
780 	unsigned long flags;
781 	unsigned int j;
782 #ifdef CONFIG_HOTPLUG_CPU
783 	struct cpufreq_governor *gov;
784 
785 	gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
786 	if (gov) {
787 		policy->governor = gov;
788 		dprintk("Restoring governor %s for cpu %d\n",
789 		       policy->governor->name, cpu);
790 	}
791 #endif
792 
793 	for_each_cpu(j, policy->cpus) {
794 		struct cpufreq_policy *managed_policy;
795 
796 		if (cpu == j)
797 			continue;
798 
799 		/* Check for existing affected CPUs.
800 		 * They may not be aware of it due to CPU Hotplug.
801 		 * cpufreq_cpu_put is called when the device is removed
802 		 * in __cpufreq_remove_dev()
803 		 */
804 		managed_policy = cpufreq_cpu_get(j);
805 		if (unlikely(managed_policy)) {
806 
807 			/* Set proper policy_cpu */
808 			unlock_policy_rwsem_write(cpu);
809 			per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
810 
811 			if (lock_policy_rwsem_write(cpu) < 0) {
812 				/* Should not go through policy unlock path */
813 				if (cpufreq_driver->exit)
814 					cpufreq_driver->exit(policy);
815 				cpufreq_cpu_put(managed_policy);
816 				return -EBUSY;
817 			}
818 
819 			spin_lock_irqsave(&cpufreq_driver_lock, flags);
820 			cpumask_copy(managed_policy->cpus, policy->cpus);
821 			per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
822 			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
823 
824 			dprintk("CPU already managed, adding link\n");
825 			ret = sysfs_create_link(&sys_dev->kobj,
826 						&managed_policy->kobj,
827 						"cpufreq");
828 			if (ret)
829 				cpufreq_cpu_put(managed_policy);
830 			/*
831 			 * Success. We only needed to be added to the mask.
832 			 * Call driver->exit() because only the cpu parent of
833 			 * the kobj needed to call init().
834 			 */
835 			if (cpufreq_driver->exit)
836 				cpufreq_driver->exit(policy);
837 
838 			if (!ret)
839 				return 1;
840 			else
841 				return ret;
842 		}
843 	}
844 #endif
845 	return ret;
846 }
847 
848 
849 /* symlink affected CPUs */
850 static int cpufreq_add_dev_symlink(unsigned int cpu,
851 				   struct cpufreq_policy *policy)
852 {
853 	unsigned int j;
854 	int ret = 0;
855 
856 	for_each_cpu(j, policy->cpus) {
857 		struct cpufreq_policy *managed_policy;
858 		struct sys_device *cpu_sys_dev;
859 
860 		if (j == cpu)
861 			continue;
862 		if (!cpu_online(j))
863 			continue;
864 
865 		dprintk("CPU %u already managed, adding link\n", j);
866 		managed_policy = cpufreq_cpu_get(cpu);
867 		cpu_sys_dev = get_cpu_sysdev(j);
868 		ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
869 					"cpufreq");
870 		if (ret) {
871 			cpufreq_cpu_put(managed_policy);
872 			return ret;
873 		}
874 	}
875 	return ret;
876 }
877 
878 static int cpufreq_add_dev_interface(unsigned int cpu,
879 				     struct cpufreq_policy *policy,
880 				     struct sys_device *sys_dev)
881 {
882 	struct cpufreq_policy new_policy;
883 	struct freq_attr **drv_attr;
884 	unsigned long flags;
885 	int ret = 0;
886 	unsigned int j;
887 
888 	/* prepare interface data */
889 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
890 				   &sys_dev->kobj, "cpufreq");
891 	if (ret)
892 		return ret;
893 
894 	/* set up files for this cpu device */
895 	drv_attr = cpufreq_driver->attr;
896 	while ((drv_attr) && (*drv_attr)) {
897 		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
898 		if (ret)
899 			goto err_out_kobj_put;
900 		drv_attr++;
901 	}
902 	if (cpufreq_driver->get) {
903 		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
904 		if (ret)
905 			goto err_out_kobj_put;
906 	}
907 	if (cpufreq_driver->target) {
908 		ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
909 		if (ret)
910 			goto err_out_kobj_put;
911 	}
912 	if (cpufreq_driver->bios_limit) {
913 		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
914 		if (ret)
915 			goto err_out_kobj_put;
916 	}
917 
918 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
919 	for_each_cpu(j, policy->cpus) {
920 	if (!cpu_online(j))
921 		continue;
922 		per_cpu(cpufreq_cpu_data, j) = policy;
923 		per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
924 	}
925 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
926 
927 	ret = cpufreq_add_dev_symlink(cpu, policy);
928 	if (ret)
929 		goto err_out_kobj_put;
930 
931 	memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
932 	/* assure that the starting sequence is run in __cpufreq_set_policy */
933 	policy->governor = NULL;
934 
935 	/* set default policy */
936 	ret = __cpufreq_set_policy(policy, &new_policy);
937 	policy->user_policy.policy = policy->policy;
938 	policy->user_policy.governor = policy->governor;
939 
940 	if (ret) {
941 		dprintk("setting policy failed\n");
942 		if (cpufreq_driver->exit)
943 			cpufreq_driver->exit(policy);
944 	}
945 	return ret;
946 
947 err_out_kobj_put:
948 	kobject_put(&policy->kobj);
949 	wait_for_completion(&policy->kobj_unregister);
950 	return ret;
951 }
952 
953 
954 /**
955  * cpufreq_add_dev - add a CPU device
956  *
957  * Adds the cpufreq interface for a CPU device.
958  *
959  * The Oracle says: try running cpufreq registration/unregistration concurrently
960  * with with cpu hotplugging and all hell will break loose. Tried to clean this
961  * mess up, but more thorough testing is needed. - Mathieu
962  */
963 static int cpufreq_add_dev(struct sys_device *sys_dev)
964 {
965 	unsigned int cpu = sys_dev->id;
966 	int ret = 0, found = 0;
967 	struct cpufreq_policy *policy;
968 	unsigned long flags;
969 	unsigned int j;
970 #ifdef CONFIG_HOTPLUG_CPU
971 	int sibling;
972 #endif
973 
974 	if (cpu_is_offline(cpu))
975 		return 0;
976 
977 	cpufreq_debug_disable_ratelimit();
978 	dprintk("adding CPU %u\n", cpu);
979 
980 #ifdef CONFIG_SMP
981 	/* check whether a different CPU already registered this
982 	 * CPU because it is in the same boat. */
983 	policy = cpufreq_cpu_get(cpu);
984 	if (unlikely(policy)) {
985 		cpufreq_cpu_put(policy);
986 		cpufreq_debug_enable_ratelimit();
987 		return 0;
988 	}
989 #endif
990 
991 	if (!try_module_get(cpufreq_driver->owner)) {
992 		ret = -EINVAL;
993 		goto module_out;
994 	}
995 
996 	ret = -ENOMEM;
997 	policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
998 	if (!policy)
999 		goto nomem_out;
1000 
1001 	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1002 		goto err_free_policy;
1003 
1004 	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1005 		goto err_free_cpumask;
1006 
1007 	policy->cpu = cpu;
1008 	cpumask_copy(policy->cpus, cpumask_of(cpu));
1009 
1010 	/* Initially set CPU itself as the policy_cpu */
1011 	per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1012 	ret = (lock_policy_rwsem_write(cpu) < 0);
1013 	WARN_ON(ret);
1014 
1015 	init_completion(&policy->kobj_unregister);
1016 	INIT_WORK(&policy->update, handle_update);
1017 
1018 	/* Set governor before ->init, so that driver could check it */
1019 #ifdef CONFIG_HOTPLUG_CPU
1020 	for_each_online_cpu(sibling) {
1021 		struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1022 		if (cp && cp->governor &&
1023 		    (cpumask_test_cpu(cpu, cp->related_cpus))) {
1024 			policy->governor = cp->governor;
1025 			found = 1;
1026 			break;
1027 		}
1028 	}
1029 #endif
1030 	if (!found)
1031 		policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1032 	/* call driver. From then on the cpufreq must be able
1033 	 * to accept all calls to ->verify and ->setpolicy for this CPU
1034 	 */
1035 	ret = cpufreq_driver->init(policy);
1036 	if (ret) {
1037 		dprintk("initialization failed\n");
1038 		goto err_unlock_policy;
1039 	}
1040 	policy->user_policy.min = policy->min;
1041 	policy->user_policy.max = policy->max;
1042 
1043 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1044 				     CPUFREQ_START, policy);
1045 
1046 	ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
1047 	if (ret) {
1048 		if (ret > 0)
1049 			/* This is a managed cpu, symlink created,
1050 			   exit with 0 */
1051 			ret = 0;
1052 		goto err_unlock_policy;
1053 	}
1054 
1055 	ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
1056 	if (ret)
1057 		goto err_out_unregister;
1058 
1059 	unlock_policy_rwsem_write(cpu);
1060 
1061 	kobject_uevent(&policy->kobj, KOBJ_ADD);
1062 	module_put(cpufreq_driver->owner);
1063 	dprintk("initialization complete\n");
1064 	cpufreq_debug_enable_ratelimit();
1065 
1066 	return 0;
1067 
1068 
1069 err_out_unregister:
1070 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1071 	for_each_cpu(j, policy->cpus)
1072 		per_cpu(cpufreq_cpu_data, j) = NULL;
1073 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1074 
1075 	kobject_put(&policy->kobj);
1076 	wait_for_completion(&policy->kobj_unregister);
1077 
1078 err_unlock_policy:
1079 	unlock_policy_rwsem_write(cpu);
1080 err_free_cpumask:
1081 	free_cpumask_var(policy->cpus);
1082 err_free_policy:
1083 	kfree(policy);
1084 nomem_out:
1085 	module_put(cpufreq_driver->owner);
1086 module_out:
1087 	cpufreq_debug_enable_ratelimit();
1088 	return ret;
1089 }
1090 
1091 
1092 /**
1093  * __cpufreq_remove_dev - remove a CPU device
1094  *
1095  * Removes the cpufreq interface for a CPU device.
1096  * Caller should already have policy_rwsem in write mode for this CPU.
1097  * This routine frees the rwsem before returning.
1098  */
1099 static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1100 {
1101 	unsigned int cpu = sys_dev->id;
1102 	unsigned long flags;
1103 	struct cpufreq_policy *data;
1104 	struct kobject *kobj;
1105 	struct completion *cmp;
1106 #ifdef CONFIG_SMP
1107 	struct sys_device *cpu_sys_dev;
1108 	unsigned int j;
1109 #endif
1110 
1111 	cpufreq_debug_disable_ratelimit();
1112 	dprintk("unregistering CPU %u\n", cpu);
1113 
1114 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1115 	data = per_cpu(cpufreq_cpu_data, cpu);
1116 
1117 	if (!data) {
1118 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1119 		cpufreq_debug_enable_ratelimit();
1120 		unlock_policy_rwsem_write(cpu);
1121 		return -EINVAL;
1122 	}
1123 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
1124 
1125 
1126 #ifdef CONFIG_SMP
1127 	/* if this isn't the CPU which is the parent of the kobj, we
1128 	 * only need to unlink, put and exit
1129 	 */
1130 	if (unlikely(cpu != data->cpu)) {
1131 		dprintk("removing link\n");
1132 		cpumask_clear_cpu(cpu, data->cpus);
1133 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1134 		kobj = &sys_dev->kobj;
1135 		cpufreq_cpu_put(data);
1136 		cpufreq_debug_enable_ratelimit();
1137 		unlock_policy_rwsem_write(cpu);
1138 		sysfs_remove_link(kobj, "cpufreq");
1139 		return 0;
1140 	}
1141 #endif
1142 
1143 #ifdef CONFIG_SMP
1144 
1145 #ifdef CONFIG_HOTPLUG_CPU
1146 	strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1147 			CPUFREQ_NAME_LEN);
1148 #endif
1149 
1150 	/* if we have other CPUs still registered, we need to unlink them,
1151 	 * or else wait_for_completion below will lock up. Clean the
1152 	 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1153 	 * the sysfs links afterwards.
1154 	 */
1155 	if (unlikely(cpumask_weight(data->cpus) > 1)) {
1156 		for_each_cpu(j, data->cpus) {
1157 			if (j == cpu)
1158 				continue;
1159 			per_cpu(cpufreq_cpu_data, j) = NULL;
1160 		}
1161 	}
1162 
1163 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1164 
1165 	if (unlikely(cpumask_weight(data->cpus) > 1)) {
1166 		for_each_cpu(j, data->cpus) {
1167 			if (j == cpu)
1168 				continue;
1169 			dprintk("removing link for cpu %u\n", j);
1170 #ifdef CONFIG_HOTPLUG_CPU
1171 			strncpy(per_cpu(cpufreq_cpu_governor, j),
1172 				data->governor->name, CPUFREQ_NAME_LEN);
1173 #endif
1174 			cpu_sys_dev = get_cpu_sysdev(j);
1175 			kobj = &cpu_sys_dev->kobj;
1176 			unlock_policy_rwsem_write(cpu);
1177 			sysfs_remove_link(kobj, "cpufreq");
1178 			lock_policy_rwsem_write(cpu);
1179 			cpufreq_cpu_put(data);
1180 		}
1181 	}
1182 #else
1183 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1184 #endif
1185 
1186 	if (cpufreq_driver->target)
1187 		__cpufreq_governor(data, CPUFREQ_GOV_STOP);
1188 
1189 	kobj = &data->kobj;
1190 	cmp = &data->kobj_unregister;
1191 	unlock_policy_rwsem_write(cpu);
1192 	kobject_put(kobj);
1193 
1194 	/* we need to make sure that the underlying kobj is actually
1195 	 * not referenced anymore by anybody before we proceed with
1196 	 * unloading.
1197 	 */
1198 	dprintk("waiting for dropping of refcount\n");
1199 	wait_for_completion(cmp);
1200 	dprintk("wait complete\n");
1201 
1202 	lock_policy_rwsem_write(cpu);
1203 	if (cpufreq_driver->exit)
1204 		cpufreq_driver->exit(data);
1205 	unlock_policy_rwsem_write(cpu);
1206 
1207 	free_cpumask_var(data->related_cpus);
1208 	free_cpumask_var(data->cpus);
1209 	kfree(data);
1210 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
1211 
1212 	cpufreq_debug_enable_ratelimit();
1213 	return 0;
1214 }
1215 
1216 
1217 static int cpufreq_remove_dev(struct sys_device *sys_dev)
1218 {
1219 	unsigned int cpu = sys_dev->id;
1220 	int retval;
1221 
1222 	if (cpu_is_offline(cpu))
1223 		return 0;
1224 
1225 	if (unlikely(lock_policy_rwsem_write(cpu)))
1226 		BUG();
1227 
1228 	retval = __cpufreq_remove_dev(sys_dev);
1229 	return retval;
1230 }
1231 
1232 
1233 static void handle_update(struct work_struct *work)
1234 {
1235 	struct cpufreq_policy *policy =
1236 		container_of(work, struct cpufreq_policy, update);
1237 	unsigned int cpu = policy->cpu;
1238 	dprintk("handle_update for cpu %u called\n", cpu);
1239 	cpufreq_update_policy(cpu);
1240 }
1241 
1242 /**
1243  *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1244  *	@cpu: cpu number
1245  *	@old_freq: CPU frequency the kernel thinks the CPU runs at
1246  *	@new_freq: CPU frequency the CPU actually runs at
1247  *
1248  *	We adjust to current frequency first, and need to clean up later.
1249  *	So either call to cpufreq_update_policy() or schedule handle_update()).
1250  */
1251 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1252 				unsigned int new_freq)
1253 {
1254 	struct cpufreq_freqs freqs;
1255 
1256 	dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1257 	       "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1258 
1259 	freqs.cpu = cpu;
1260 	freqs.old = old_freq;
1261 	freqs.new = new_freq;
1262 	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1263 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1264 }
1265 
1266 
1267 /**
1268  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1269  * @cpu: CPU number
1270  *
1271  * This is the last known freq, without actually getting it from the driver.
1272  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1273  */
1274 unsigned int cpufreq_quick_get(unsigned int cpu)
1275 {
1276 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1277 	unsigned int ret_freq = 0;
1278 
1279 	if (policy) {
1280 		ret_freq = policy->cur;
1281 		cpufreq_cpu_put(policy);
1282 	}
1283 
1284 	return ret_freq;
1285 }
1286 EXPORT_SYMBOL(cpufreq_quick_get);
1287 
1288 
1289 static unsigned int __cpufreq_get(unsigned int cpu)
1290 {
1291 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1292 	unsigned int ret_freq = 0;
1293 
1294 	if (!cpufreq_driver->get)
1295 		return ret_freq;
1296 
1297 	ret_freq = cpufreq_driver->get(cpu);
1298 
1299 	if (ret_freq && policy->cur &&
1300 		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1301 		/* verify no discrepancy between actual and
1302 					saved value exists */
1303 		if (unlikely(ret_freq != policy->cur)) {
1304 			cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1305 			schedule_work(&policy->update);
1306 		}
1307 	}
1308 
1309 	return ret_freq;
1310 }
1311 
1312 /**
1313  * cpufreq_get - get the current CPU frequency (in kHz)
1314  * @cpu: CPU number
1315  *
1316  * Get the CPU current (static) CPU frequency
1317  */
1318 unsigned int cpufreq_get(unsigned int cpu)
1319 {
1320 	unsigned int ret_freq = 0;
1321 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1322 
1323 	if (!policy)
1324 		goto out;
1325 
1326 	if (unlikely(lock_policy_rwsem_read(cpu)))
1327 		goto out_policy;
1328 
1329 	ret_freq = __cpufreq_get(cpu);
1330 
1331 	unlock_policy_rwsem_read(cpu);
1332 
1333 out_policy:
1334 	cpufreq_cpu_put(policy);
1335 out:
1336 	return ret_freq;
1337 }
1338 EXPORT_SYMBOL(cpufreq_get);
1339 
1340 
1341 /**
1342  *	cpufreq_suspend - let the low level driver prepare for suspend
1343  */
1344 
1345 static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1346 {
1347 	int ret = 0;
1348 
1349 	int cpu = sysdev->id;
1350 	struct cpufreq_policy *cpu_policy;
1351 
1352 	dprintk("suspending cpu %u\n", cpu);
1353 
1354 	if (!cpu_online(cpu))
1355 		return 0;
1356 
1357 	/* we may be lax here as interrupts are off. Nonetheless
1358 	 * we need to grab the correct cpu policy, as to check
1359 	 * whether we really run on this CPU.
1360 	 */
1361 
1362 	cpu_policy = cpufreq_cpu_get(cpu);
1363 	if (!cpu_policy)
1364 		return -EINVAL;
1365 
1366 	/* only handle each CPU group once */
1367 	if (unlikely(cpu_policy->cpu != cpu))
1368 		goto out;
1369 
1370 	if (cpufreq_driver->suspend) {
1371 		ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1372 		if (ret)
1373 			printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1374 					"step on CPU %u\n", cpu_policy->cpu);
1375 	}
1376 
1377 out:
1378 	cpufreq_cpu_put(cpu_policy);
1379 	return ret;
1380 }
1381 
1382 /**
1383  *	cpufreq_resume -  restore proper CPU frequency handling after resume
1384  *
1385  *	1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1386  *	2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1387  *	    restored. It will verify that the current freq is in sync with
1388  *	    what we believe it to be. This is a bit later than when it
1389  *	    should be, but nonethteless it's better than calling
1390  *	    cpufreq_driver->get() here which might re-enable interrupts...
1391  */
1392 static int cpufreq_resume(struct sys_device *sysdev)
1393 {
1394 	int ret = 0;
1395 
1396 	int cpu = sysdev->id;
1397 	struct cpufreq_policy *cpu_policy;
1398 
1399 	dprintk("resuming cpu %u\n", cpu);
1400 
1401 	if (!cpu_online(cpu))
1402 		return 0;
1403 
1404 	/* we may be lax here as interrupts are off. Nonetheless
1405 	 * we need to grab the correct cpu policy, as to check
1406 	 * whether we really run on this CPU.
1407 	 */
1408 
1409 	cpu_policy = cpufreq_cpu_get(cpu);
1410 	if (!cpu_policy)
1411 		return -EINVAL;
1412 
1413 	/* only handle each CPU group once */
1414 	if (unlikely(cpu_policy->cpu != cpu))
1415 		goto fail;
1416 
1417 	if (cpufreq_driver->resume) {
1418 		ret = cpufreq_driver->resume(cpu_policy);
1419 		if (ret) {
1420 			printk(KERN_ERR "cpufreq: resume failed in ->resume "
1421 					"step on CPU %u\n", cpu_policy->cpu);
1422 			goto fail;
1423 		}
1424 	}
1425 
1426 	schedule_work(&cpu_policy->update);
1427 
1428 fail:
1429 	cpufreq_cpu_put(cpu_policy);
1430 	return ret;
1431 }
1432 
1433 static struct sysdev_driver cpufreq_sysdev_driver = {
1434 	.add		= cpufreq_add_dev,
1435 	.remove		= cpufreq_remove_dev,
1436 	.suspend	= cpufreq_suspend,
1437 	.resume		= cpufreq_resume,
1438 };
1439 
1440 
1441 /*********************************************************************
1442  *                     NOTIFIER LISTS INTERFACE                      *
1443  *********************************************************************/
1444 
1445 /**
1446  *	cpufreq_register_notifier - register a driver with cpufreq
1447  *	@nb: notifier function to register
1448  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1449  *
1450  *	Add a driver to one of two lists: either a list of drivers that
1451  *      are notified about clock rate changes (once before and once after
1452  *      the transition), or a list of drivers that are notified about
1453  *      changes in cpufreq policy.
1454  *
1455  *	This function may sleep, and has the same return conditions as
1456  *	blocking_notifier_chain_register.
1457  */
1458 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1459 {
1460 	int ret;
1461 
1462 	WARN_ON(!init_cpufreq_transition_notifier_list_called);
1463 
1464 	switch (list) {
1465 	case CPUFREQ_TRANSITION_NOTIFIER:
1466 		ret = srcu_notifier_chain_register(
1467 				&cpufreq_transition_notifier_list, nb);
1468 		break;
1469 	case CPUFREQ_POLICY_NOTIFIER:
1470 		ret = blocking_notifier_chain_register(
1471 				&cpufreq_policy_notifier_list, nb);
1472 		break;
1473 	default:
1474 		ret = -EINVAL;
1475 	}
1476 
1477 	return ret;
1478 }
1479 EXPORT_SYMBOL(cpufreq_register_notifier);
1480 
1481 
1482 /**
1483  *	cpufreq_unregister_notifier - unregister a driver with cpufreq
1484  *	@nb: notifier block to be unregistered
1485  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1486  *
1487  *	Remove a driver from the CPU frequency notifier list.
1488  *
1489  *	This function may sleep, and has the same return conditions as
1490  *	blocking_notifier_chain_unregister.
1491  */
1492 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1493 {
1494 	int ret;
1495 
1496 	switch (list) {
1497 	case CPUFREQ_TRANSITION_NOTIFIER:
1498 		ret = srcu_notifier_chain_unregister(
1499 				&cpufreq_transition_notifier_list, nb);
1500 		break;
1501 	case CPUFREQ_POLICY_NOTIFIER:
1502 		ret = blocking_notifier_chain_unregister(
1503 				&cpufreq_policy_notifier_list, nb);
1504 		break;
1505 	default:
1506 		ret = -EINVAL;
1507 	}
1508 
1509 	return ret;
1510 }
1511 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1512 
1513 
1514 /*********************************************************************
1515  *                              GOVERNORS                            *
1516  *********************************************************************/
1517 
1518 
1519 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1520 			    unsigned int target_freq,
1521 			    unsigned int relation)
1522 {
1523 	int retval = -EINVAL;
1524 
1525 	dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1526 		target_freq, relation);
1527 	if (cpu_online(policy->cpu) && cpufreq_driver->target)
1528 		retval = cpufreq_driver->target(policy, target_freq, relation);
1529 
1530 	return retval;
1531 }
1532 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1533 
1534 int cpufreq_driver_target(struct cpufreq_policy *policy,
1535 			  unsigned int target_freq,
1536 			  unsigned int relation)
1537 {
1538 	int ret = -EINVAL;
1539 
1540 	policy = cpufreq_cpu_get(policy->cpu);
1541 	if (!policy)
1542 		goto no_policy;
1543 
1544 	if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1545 		goto fail;
1546 
1547 	ret = __cpufreq_driver_target(policy, target_freq, relation);
1548 
1549 	unlock_policy_rwsem_write(policy->cpu);
1550 
1551 fail:
1552 	cpufreq_cpu_put(policy);
1553 no_policy:
1554 	return ret;
1555 }
1556 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1557 
1558 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1559 {
1560 	int ret = 0;
1561 
1562 	policy = cpufreq_cpu_get(policy->cpu);
1563 	if (!policy)
1564 		return -EINVAL;
1565 
1566 	if (cpu_online(cpu) && cpufreq_driver->getavg)
1567 		ret = cpufreq_driver->getavg(policy, cpu);
1568 
1569 	cpufreq_cpu_put(policy);
1570 	return ret;
1571 }
1572 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1573 
1574 /*
1575  * when "event" is CPUFREQ_GOV_LIMITS
1576  */
1577 
1578 static int __cpufreq_governor(struct cpufreq_policy *policy,
1579 					unsigned int event)
1580 {
1581 	int ret;
1582 
1583 	/* Only must be defined when default governor is known to have latency
1584 	   restrictions, like e.g. conservative or ondemand.
1585 	   That this is the case is already ensured in Kconfig
1586 	*/
1587 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1588 	struct cpufreq_governor *gov = &cpufreq_gov_performance;
1589 #else
1590 	struct cpufreq_governor *gov = NULL;
1591 #endif
1592 
1593 	if (policy->governor->max_transition_latency &&
1594 	    policy->cpuinfo.transition_latency >
1595 	    policy->governor->max_transition_latency) {
1596 		if (!gov)
1597 			return -EINVAL;
1598 		else {
1599 			printk(KERN_WARNING "%s governor failed, too long"
1600 			       " transition latency of HW, fallback"
1601 			       " to %s governor\n",
1602 			       policy->governor->name,
1603 			       gov->name);
1604 			policy->governor = gov;
1605 		}
1606 	}
1607 
1608 	if (!try_module_get(policy->governor->owner))
1609 		return -EINVAL;
1610 
1611 	dprintk("__cpufreq_governor for CPU %u, event %u\n",
1612 						policy->cpu, event);
1613 	ret = policy->governor->governor(policy, event);
1614 
1615 	/* we keep one module reference alive for
1616 			each CPU governed by this CPU */
1617 	if ((event != CPUFREQ_GOV_START) || ret)
1618 		module_put(policy->governor->owner);
1619 	if ((event == CPUFREQ_GOV_STOP) && !ret)
1620 		module_put(policy->governor->owner);
1621 
1622 	return ret;
1623 }
1624 
1625 
1626 int cpufreq_register_governor(struct cpufreq_governor *governor)
1627 {
1628 	int err;
1629 
1630 	if (!governor)
1631 		return -EINVAL;
1632 
1633 	mutex_lock(&cpufreq_governor_mutex);
1634 
1635 	err = -EBUSY;
1636 	if (__find_governor(governor->name) == NULL) {
1637 		err = 0;
1638 		list_add(&governor->governor_list, &cpufreq_governor_list);
1639 	}
1640 
1641 	mutex_unlock(&cpufreq_governor_mutex);
1642 	return err;
1643 }
1644 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1645 
1646 
1647 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1648 {
1649 #ifdef CONFIG_HOTPLUG_CPU
1650 	int cpu;
1651 #endif
1652 
1653 	if (!governor)
1654 		return;
1655 
1656 #ifdef CONFIG_HOTPLUG_CPU
1657 	for_each_present_cpu(cpu) {
1658 		if (cpu_online(cpu))
1659 			continue;
1660 		if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1661 			strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1662 	}
1663 #endif
1664 
1665 	mutex_lock(&cpufreq_governor_mutex);
1666 	list_del(&governor->governor_list);
1667 	mutex_unlock(&cpufreq_governor_mutex);
1668 	return;
1669 }
1670 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1671 
1672 
1673 
1674 /*********************************************************************
1675  *                          POLICY INTERFACE                         *
1676  *********************************************************************/
1677 
1678 /**
1679  * cpufreq_get_policy - get the current cpufreq_policy
1680  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1681  *	is written
1682  *
1683  * Reads the current cpufreq policy.
1684  */
1685 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1686 {
1687 	struct cpufreq_policy *cpu_policy;
1688 	if (!policy)
1689 		return -EINVAL;
1690 
1691 	cpu_policy = cpufreq_cpu_get(cpu);
1692 	if (!cpu_policy)
1693 		return -EINVAL;
1694 
1695 	memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1696 
1697 	cpufreq_cpu_put(cpu_policy);
1698 	return 0;
1699 }
1700 EXPORT_SYMBOL(cpufreq_get_policy);
1701 
1702 
1703 /*
1704  * data   : current policy.
1705  * policy : policy to be set.
1706  */
1707 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1708 				struct cpufreq_policy *policy)
1709 {
1710 	int ret = 0;
1711 
1712 	cpufreq_debug_disable_ratelimit();
1713 	dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1714 		policy->min, policy->max);
1715 
1716 	memcpy(&policy->cpuinfo, &data->cpuinfo,
1717 				sizeof(struct cpufreq_cpuinfo));
1718 
1719 	if (policy->min > data->max || policy->max < data->min) {
1720 		ret = -EINVAL;
1721 		goto error_out;
1722 	}
1723 
1724 	/* verify the cpu speed can be set within this limit */
1725 	ret = cpufreq_driver->verify(policy);
1726 	if (ret)
1727 		goto error_out;
1728 
1729 	/* adjust if necessary - all reasons */
1730 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1731 			CPUFREQ_ADJUST, policy);
1732 
1733 	/* adjust if necessary - hardware incompatibility*/
1734 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1735 			CPUFREQ_INCOMPATIBLE, policy);
1736 
1737 	/* verify the cpu speed can be set within this limit,
1738 	   which might be different to the first one */
1739 	ret = cpufreq_driver->verify(policy);
1740 	if (ret)
1741 		goto error_out;
1742 
1743 	/* notification of the new policy */
1744 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1745 			CPUFREQ_NOTIFY, policy);
1746 
1747 	data->min = policy->min;
1748 	data->max = policy->max;
1749 
1750 	dprintk("new min and max freqs are %u - %u kHz\n",
1751 					data->min, data->max);
1752 
1753 	if (cpufreq_driver->setpolicy) {
1754 		data->policy = policy->policy;
1755 		dprintk("setting range\n");
1756 		ret = cpufreq_driver->setpolicy(policy);
1757 	} else {
1758 		if (policy->governor != data->governor) {
1759 			/* save old, working values */
1760 			struct cpufreq_governor *old_gov = data->governor;
1761 
1762 			dprintk("governor switch\n");
1763 
1764 			/* end old governor */
1765 			if (data->governor) {
1766 				/*
1767 				 * Need to release the rwsem around governor
1768 				 * stop due to lock dependency between
1769 				 * cancel_delayed_work_sync and the read lock
1770 				 * taken in the delayed work handler.
1771 				 */
1772 				unlock_policy_rwsem_write(data->cpu);
1773 				__cpufreq_governor(data, CPUFREQ_GOV_STOP);
1774 				lock_policy_rwsem_write(data->cpu);
1775 			}
1776 
1777 			/* start new governor */
1778 			data->governor = policy->governor;
1779 			if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1780 				/* new governor failed, so re-start old one */
1781 				dprintk("starting governor %s failed\n",
1782 							data->governor->name);
1783 				if (old_gov) {
1784 					data->governor = old_gov;
1785 					__cpufreq_governor(data,
1786 							   CPUFREQ_GOV_START);
1787 				}
1788 				ret = -EINVAL;
1789 				goto error_out;
1790 			}
1791 			/* might be a policy change, too, so fall through */
1792 		}
1793 		dprintk("governor: change or update limits\n");
1794 		__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1795 	}
1796 
1797 error_out:
1798 	cpufreq_debug_enable_ratelimit();
1799 	return ret;
1800 }
1801 
1802 /**
1803  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
1804  *	@cpu: CPU which shall be re-evaluated
1805  *
1806  *	Usefull for policy notifiers which have different necessities
1807  *	at different times.
1808  */
1809 int cpufreq_update_policy(unsigned int cpu)
1810 {
1811 	struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1812 	struct cpufreq_policy policy;
1813 	int ret;
1814 
1815 	if (!data) {
1816 		ret = -ENODEV;
1817 		goto no_policy;
1818 	}
1819 
1820 	if (unlikely(lock_policy_rwsem_write(cpu))) {
1821 		ret = -EINVAL;
1822 		goto fail;
1823 	}
1824 
1825 	dprintk("updating policy for CPU %u\n", cpu);
1826 	memcpy(&policy, data, sizeof(struct cpufreq_policy));
1827 	policy.min = data->user_policy.min;
1828 	policy.max = data->user_policy.max;
1829 	policy.policy = data->user_policy.policy;
1830 	policy.governor = data->user_policy.governor;
1831 
1832 	/* BIOS might change freq behind our back
1833 	  -> ask driver for current freq and notify governors about a change */
1834 	if (cpufreq_driver->get) {
1835 		policy.cur = cpufreq_driver->get(cpu);
1836 		if (!data->cur) {
1837 			dprintk("Driver did not initialize current freq");
1838 			data->cur = policy.cur;
1839 		} else {
1840 			if (data->cur != policy.cur)
1841 				cpufreq_out_of_sync(cpu, data->cur,
1842 								policy.cur);
1843 		}
1844 	}
1845 
1846 	ret = __cpufreq_set_policy(data, &policy);
1847 
1848 	unlock_policy_rwsem_write(cpu);
1849 
1850 fail:
1851 	cpufreq_cpu_put(data);
1852 no_policy:
1853 	return ret;
1854 }
1855 EXPORT_SYMBOL(cpufreq_update_policy);
1856 
1857 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1858 					unsigned long action, void *hcpu)
1859 {
1860 	unsigned int cpu = (unsigned long)hcpu;
1861 	struct sys_device *sys_dev;
1862 
1863 	sys_dev = get_cpu_sysdev(cpu);
1864 	if (sys_dev) {
1865 		switch (action) {
1866 		case CPU_ONLINE:
1867 		case CPU_ONLINE_FROZEN:
1868 			cpufreq_add_dev(sys_dev);
1869 			break;
1870 		case CPU_DOWN_PREPARE:
1871 		case CPU_DOWN_PREPARE_FROZEN:
1872 			if (unlikely(lock_policy_rwsem_write(cpu)))
1873 				BUG();
1874 
1875 			__cpufreq_remove_dev(sys_dev);
1876 			break;
1877 		case CPU_DOWN_FAILED:
1878 		case CPU_DOWN_FAILED_FROZEN:
1879 			cpufreq_add_dev(sys_dev);
1880 			break;
1881 		}
1882 	}
1883 	return NOTIFY_OK;
1884 }
1885 
1886 static struct notifier_block __refdata cpufreq_cpu_notifier =
1887 {
1888     .notifier_call = cpufreq_cpu_callback,
1889 };
1890 
1891 /*********************************************************************
1892  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1893  *********************************************************************/
1894 
1895 /**
1896  * cpufreq_register_driver - register a CPU Frequency driver
1897  * @driver_data: A struct cpufreq_driver containing the values#
1898  * submitted by the CPU Frequency driver.
1899  *
1900  *   Registers a CPU Frequency driver to this core code. This code
1901  * returns zero on success, -EBUSY when another driver got here first
1902  * (and isn't unregistered in the meantime).
1903  *
1904  */
1905 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1906 {
1907 	unsigned long flags;
1908 	int ret;
1909 
1910 	if (!driver_data || !driver_data->verify || !driver_data->init ||
1911 	    ((!driver_data->setpolicy) && (!driver_data->target)))
1912 		return -EINVAL;
1913 
1914 	dprintk("trying to register driver %s\n", driver_data->name);
1915 
1916 	if (driver_data->setpolicy)
1917 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
1918 
1919 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1920 	if (cpufreq_driver) {
1921 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1922 		return -EBUSY;
1923 	}
1924 	cpufreq_driver = driver_data;
1925 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1926 
1927 	ret = sysdev_driver_register(&cpu_sysdev_class,
1928 					&cpufreq_sysdev_driver);
1929 
1930 	if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1931 		int i;
1932 		ret = -ENODEV;
1933 
1934 		/* check for at least one working CPU */
1935 		for (i = 0; i < nr_cpu_ids; i++)
1936 			if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1937 				ret = 0;
1938 				break;
1939 			}
1940 
1941 		/* if all ->init() calls failed, unregister */
1942 		if (ret) {
1943 			dprintk("no CPU initialized for driver %s\n",
1944 							driver_data->name);
1945 			sysdev_driver_unregister(&cpu_sysdev_class,
1946 						&cpufreq_sysdev_driver);
1947 
1948 			spin_lock_irqsave(&cpufreq_driver_lock, flags);
1949 			cpufreq_driver = NULL;
1950 			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1951 		}
1952 	}
1953 
1954 	if (!ret) {
1955 		register_hotcpu_notifier(&cpufreq_cpu_notifier);
1956 		dprintk("driver %s up and running\n", driver_data->name);
1957 		cpufreq_debug_enable_ratelimit();
1958 	}
1959 
1960 	return ret;
1961 }
1962 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1963 
1964 
1965 /**
1966  * cpufreq_unregister_driver - unregister the current CPUFreq driver
1967  *
1968  *    Unregister the current CPUFreq driver. Only call this if you have
1969  * the right to do so, i.e. if you have succeeded in initialising before!
1970  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1971  * currently not initialised.
1972  */
1973 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1974 {
1975 	unsigned long flags;
1976 
1977 	cpufreq_debug_disable_ratelimit();
1978 
1979 	if (!cpufreq_driver || (driver != cpufreq_driver)) {
1980 		cpufreq_debug_enable_ratelimit();
1981 		return -EINVAL;
1982 	}
1983 
1984 	dprintk("unregistering driver %s\n", driver->name);
1985 
1986 	sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1987 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1988 
1989 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1990 	cpufreq_driver = NULL;
1991 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1992 
1993 	return 0;
1994 }
1995 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1996 
1997 static int __init cpufreq_core_init(void)
1998 {
1999 	int cpu;
2000 
2001 	for_each_possible_cpu(cpu) {
2002 		per_cpu(cpufreq_policy_cpu, cpu) = -1;
2003 		init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2004 	}
2005 
2006 	cpufreq_global_kobject = kobject_create_and_add("cpufreq",
2007 						&cpu_sysdev_class.kset.kobj);
2008 	BUG_ON(!cpufreq_global_kobject);
2009 
2010 	return 0;
2011 }
2012 core_initcall(cpufreq_core_init);
2013