xref: /linux/drivers/devfreq/devfreq.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3  *	    for Non-CPU Devices.
4  *
5  * Copyright (C) 2011 Samsung Electronics
6  *	MyungJoo Ham <myungjoo.ham@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/opp.h>
20 #include <linux/devfreq.h>
21 #include <linux/workqueue.h>
22 #include <linux/platform_device.h>
23 #include <linux/list.h>
24 #include <linux/printk.h>
25 #include <linux/hrtimer.h>
26 #include "governor.h"
27 
28 struct class *devfreq_class;
29 
30 /*
31  * devfreq_work periodically monitors every registered device.
32  * The minimum polling interval is one jiffy. The polling interval is
33  * determined by the minimum polling period among all polling devfreq
34  * devices. The resolution of polling interval is one jiffy.
35  */
36 static bool polling;
37 static struct workqueue_struct *devfreq_wq;
38 static struct delayed_work devfreq_work;
39 
40 /* wait removing if this is to be removed */
41 static struct devfreq *wait_remove_device;
42 
43 /* The list of all device-devfreq */
44 static LIST_HEAD(devfreq_list);
45 static DEFINE_MUTEX(devfreq_list_lock);
46 
47 /**
48  * find_device_devfreq() - find devfreq struct using device pointer
49  * @dev:	device pointer used to lookup device devfreq.
50  *
51  * Search the list of device devfreqs and return the matched device's
52  * devfreq info. devfreq_list_lock should be held by the caller.
53  */
54 static struct devfreq *find_device_devfreq(struct device *dev)
55 {
56 	struct devfreq *tmp_devfreq;
57 
58 	if (unlikely(IS_ERR_OR_NULL(dev))) {
59 		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
60 		return ERR_PTR(-EINVAL);
61 	}
62 	WARN(!mutex_is_locked(&devfreq_list_lock),
63 	     "devfreq_list_lock must be locked.");
64 
65 	list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
66 		if (tmp_devfreq->dev.parent == dev)
67 			return tmp_devfreq;
68 	}
69 
70 	return ERR_PTR(-ENODEV);
71 }
72 
73 /**
74  * update_devfreq() - Reevaluate the device and configure frequency.
75  * @devfreq:	the devfreq instance.
76  *
77  * Note: Lock devfreq->lock before calling update_devfreq
78  *	 This function is exported for governors.
79  */
80 int update_devfreq(struct devfreq *devfreq)
81 {
82 	unsigned long freq;
83 	int err = 0;
84 
85 	if (!mutex_is_locked(&devfreq->lock)) {
86 		WARN(true, "devfreq->lock must be locked by the caller.\n");
87 		return -EINVAL;
88 	}
89 
90 	/* Reevaluate the proper frequency */
91 	err = devfreq->governor->get_target_freq(devfreq, &freq);
92 	if (err)
93 		return err;
94 
95 	err = devfreq->profile->target(devfreq->dev.parent, &freq);
96 	if (err)
97 		return err;
98 
99 	devfreq->previous_freq = freq;
100 	return err;
101 }
102 
103 /**
104  * devfreq_notifier_call() - Notify that the device frequency requirements
105  *			   has been changed out of devfreq framework.
106  * @nb		the notifier_block (supposed to be devfreq->nb)
107  * @type	not used
108  * @devp	not used
109  *
110  * Called by a notifier that uses devfreq->nb.
111  */
112 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
113 				 void *devp)
114 {
115 	struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
116 	int ret;
117 
118 	mutex_lock(&devfreq->lock);
119 	ret = update_devfreq(devfreq);
120 	mutex_unlock(&devfreq->lock);
121 
122 	return ret;
123 }
124 
125 /**
126  * _remove_devfreq() - Remove devfreq from the device.
127  * @devfreq:	the devfreq struct
128  * @skip:	skip calling device_unregister().
129  *
130  * Note that the caller should lock devfreq->lock before calling
131  * this. _remove_devfreq() will unlock it and free devfreq
132  * internally. devfreq_list_lock should be locked by the caller
133  * as well (not relased at return)
134  *
135  * Lock usage:
136  * devfreq->lock: locked before call.
137  *		  unlocked at return (and freed)
138  * devfreq_list_lock: locked before call.
139  *		      kept locked at return.
140  *		      if devfreq is centrally polled.
141  *
142  * Freed memory:
143  * devfreq
144  */
145 static void _remove_devfreq(struct devfreq *devfreq, bool skip)
146 {
147 	if (!mutex_is_locked(&devfreq->lock)) {
148 		WARN(true, "devfreq->lock must be locked by the caller.\n");
149 		return;
150 	}
151 	if (!devfreq->governor->no_central_polling &&
152 	    !mutex_is_locked(&devfreq_list_lock)) {
153 		WARN(true, "devfreq_list_lock must be locked by the caller.\n");
154 		return;
155 	}
156 
157 	if (devfreq->being_removed)
158 		return;
159 
160 	devfreq->being_removed = true;
161 
162 	if (devfreq->profile->exit)
163 		devfreq->profile->exit(devfreq->dev.parent);
164 
165 	if (devfreq->governor->exit)
166 		devfreq->governor->exit(devfreq);
167 
168 	if (!skip && get_device(&devfreq->dev)) {
169 		device_unregister(&devfreq->dev);
170 		put_device(&devfreq->dev);
171 	}
172 
173 	if (!devfreq->governor->no_central_polling)
174 		list_del(&devfreq->node);
175 
176 	mutex_unlock(&devfreq->lock);
177 	mutex_destroy(&devfreq->lock);
178 
179 	kfree(devfreq);
180 }
181 
182 /**
183  * devfreq_dev_release() - Callback for struct device to release the device.
184  * @dev:	the devfreq device
185  *
186  * This calls _remove_devfreq() if _remove_devfreq() is not called.
187  * Note that devfreq_dev_release() could be called by _remove_devfreq() as
188  * well as by others unregistering the device.
189  */
190 static void devfreq_dev_release(struct device *dev)
191 {
192 	struct devfreq *devfreq = to_devfreq(dev);
193 	bool central_polling = !devfreq->governor->no_central_polling;
194 
195 	/*
196 	 * If devfreq_dev_release() was called by device_unregister() of
197 	 * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
198 	 * being_removed is already set. This also partially checks the case
199 	 * where devfreq_dev_release() is called from a thread other than
200 	 * the one called _remove_devfreq(); however, this case is
201 	 * dealt completely with another following being_removed check.
202 	 *
203 	 * Because being_removed is never being
204 	 * unset, we do not need to worry about race conditions on
205 	 * being_removed.
206 	 */
207 	if (devfreq->being_removed)
208 		return;
209 
210 	if (central_polling)
211 		mutex_lock(&devfreq_list_lock);
212 
213 	mutex_lock(&devfreq->lock);
214 
215 	/*
216 	 * Check being_removed flag again for the case where
217 	 * devfreq_dev_release() was called in a thread other than the one
218 	 * possibly called _remove_devfreq().
219 	 */
220 	if (devfreq->being_removed) {
221 		mutex_unlock(&devfreq->lock);
222 		goto out;
223 	}
224 
225 	/* devfreq->lock is unlocked and removed in _removed_devfreq() */
226 	_remove_devfreq(devfreq, true);
227 
228 out:
229 	if (central_polling)
230 		mutex_unlock(&devfreq_list_lock);
231 }
232 
233 /**
234  * devfreq_monitor() - Periodically poll devfreq objects.
235  * @work: the work struct used to run devfreq_monitor periodically.
236  *
237  */
238 static void devfreq_monitor(struct work_struct *work)
239 {
240 	static unsigned long last_polled_at;
241 	struct devfreq *devfreq, *tmp;
242 	int error;
243 	unsigned long jiffies_passed;
244 	unsigned long next_jiffies = ULONG_MAX, now = jiffies;
245 	struct device *dev;
246 
247 	/* Initially last_polled_at = 0, polling every device at bootup */
248 	jiffies_passed = now - last_polled_at;
249 	last_polled_at = now;
250 	if (jiffies_passed == 0)
251 		jiffies_passed = 1;
252 
253 	mutex_lock(&devfreq_list_lock);
254 	list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
255 		mutex_lock(&devfreq->lock);
256 		dev = devfreq->dev.parent;
257 
258 		/* Do not remove tmp for a while */
259 		wait_remove_device = tmp;
260 
261 		if (devfreq->governor->no_central_polling ||
262 		    devfreq->next_polling == 0) {
263 			mutex_unlock(&devfreq->lock);
264 			continue;
265 		}
266 		mutex_unlock(&devfreq_list_lock);
267 
268 		/*
269 		 * Reduce more next_polling if devfreq_wq took an extra
270 		 * delay. (i.e., CPU has been idled.)
271 		 */
272 		if (devfreq->next_polling <= jiffies_passed) {
273 			error = update_devfreq(devfreq);
274 
275 			/* Remove a devfreq with an error. */
276 			if (error && error != -EAGAIN) {
277 
278 				dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
279 					error, devfreq->governor->name);
280 
281 				/*
282 				 * Unlock devfreq before locking the list
283 				 * in order to avoid deadlock with
284 				 * find_device_devfreq or others
285 				 */
286 				mutex_unlock(&devfreq->lock);
287 				mutex_lock(&devfreq_list_lock);
288 				/* Check if devfreq is already removed */
289 				if (IS_ERR(find_device_devfreq(dev)))
290 					continue;
291 				mutex_lock(&devfreq->lock);
292 				/* This unlocks devfreq->lock and free it */
293 				_remove_devfreq(devfreq, false);
294 				continue;
295 			}
296 			devfreq->next_polling = devfreq->polling_jiffies;
297 		} else {
298 			devfreq->next_polling -= jiffies_passed;
299 		}
300 
301 		if (devfreq->next_polling)
302 			next_jiffies = (next_jiffies > devfreq->next_polling) ?
303 					devfreq->next_polling : next_jiffies;
304 
305 		mutex_unlock(&devfreq->lock);
306 		mutex_lock(&devfreq_list_lock);
307 	}
308 	wait_remove_device = NULL;
309 	mutex_unlock(&devfreq_list_lock);
310 
311 	if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
312 		polling = true;
313 		queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
314 	} else {
315 		polling = false;
316 	}
317 }
318 
319 /**
320  * devfreq_add_device() - Add devfreq feature to the device
321  * @dev:	the device to add devfreq feature.
322  * @profile:	device-specific profile to run devfreq.
323  * @governor:	the policy to choose frequency.
324  * @data:	private data for the governor. The devfreq framework does not
325  *		touch this value.
326  */
327 struct devfreq *devfreq_add_device(struct device *dev,
328 				   struct devfreq_dev_profile *profile,
329 				   const struct devfreq_governor *governor,
330 				   void *data)
331 {
332 	struct devfreq *devfreq;
333 	int err = 0;
334 
335 	if (!dev || !profile || !governor) {
336 		dev_err(dev, "%s: Invalid parameters.\n", __func__);
337 		return ERR_PTR(-EINVAL);
338 	}
339 
340 
341 	if (!governor->no_central_polling) {
342 		mutex_lock(&devfreq_list_lock);
343 		devfreq = find_device_devfreq(dev);
344 		mutex_unlock(&devfreq_list_lock);
345 		if (!IS_ERR(devfreq)) {
346 			dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
347 			err = -EINVAL;
348 			goto out;
349 		}
350 	}
351 
352 	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
353 	if (!devfreq) {
354 		dev_err(dev, "%s: Unable to create devfreq for the device\n",
355 			__func__);
356 		err = -ENOMEM;
357 		goto out;
358 	}
359 
360 	mutex_init(&devfreq->lock);
361 	mutex_lock(&devfreq->lock);
362 	devfreq->dev.parent = dev;
363 	devfreq->dev.class = devfreq_class;
364 	devfreq->dev.release = devfreq_dev_release;
365 	devfreq->profile = profile;
366 	devfreq->governor = governor;
367 	devfreq->previous_freq = profile->initial_freq;
368 	devfreq->data = data;
369 	devfreq->next_polling = devfreq->polling_jiffies
370 			      = msecs_to_jiffies(devfreq->profile->polling_ms);
371 	devfreq->nb.notifier_call = devfreq_notifier_call;
372 
373 	dev_set_name(&devfreq->dev, dev_name(dev));
374 	err = device_register(&devfreq->dev);
375 	if (err) {
376 		put_device(&devfreq->dev);
377 		goto err_dev;
378 	}
379 
380 	if (governor->init)
381 		err = governor->init(devfreq);
382 	if (err)
383 		goto err_init;
384 
385 	mutex_unlock(&devfreq->lock);
386 
387 	if (governor->no_central_polling)
388 		goto out;
389 
390 	mutex_lock(&devfreq_list_lock);
391 
392 	list_add(&devfreq->node, &devfreq_list);
393 
394 	if (devfreq_wq && devfreq->next_polling && !polling) {
395 		polling = true;
396 		queue_delayed_work(devfreq_wq, &devfreq_work,
397 				   devfreq->next_polling);
398 	}
399 	mutex_unlock(&devfreq_list_lock);
400 	goto out;
401 err_init:
402 	device_unregister(&devfreq->dev);
403 err_dev:
404 	mutex_unlock(&devfreq->lock);
405 	kfree(devfreq);
406 out:
407 	if (err)
408 		return ERR_PTR(err);
409 	else
410 		return devfreq;
411 }
412 
413 /**
414  * devfreq_remove_device() - Remove devfreq feature from a device.
415  * @devfreq	the devfreq instance to be removed
416  */
417 int devfreq_remove_device(struct devfreq *devfreq)
418 {
419 	if (!devfreq)
420 		return -EINVAL;
421 
422 	if (!devfreq->governor->no_central_polling) {
423 		mutex_lock(&devfreq_list_lock);
424 		while (wait_remove_device == devfreq) {
425 			mutex_unlock(&devfreq_list_lock);
426 			schedule();
427 			mutex_lock(&devfreq_list_lock);
428 		}
429 	}
430 
431 	mutex_lock(&devfreq->lock);
432 	_remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
433 
434 	if (!devfreq->governor->no_central_polling)
435 		mutex_unlock(&devfreq_list_lock);
436 
437 	return 0;
438 }
439 
440 static ssize_t show_governor(struct device *dev,
441 			     struct device_attribute *attr, char *buf)
442 {
443 	return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
444 }
445 
446 static ssize_t show_freq(struct device *dev,
447 			 struct device_attribute *attr, char *buf)
448 {
449 	return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
450 }
451 
452 static ssize_t show_polling_interval(struct device *dev,
453 				     struct device_attribute *attr, char *buf)
454 {
455 	return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
456 }
457 
458 static ssize_t store_polling_interval(struct device *dev,
459 				      struct device_attribute *attr,
460 				      const char *buf, size_t count)
461 {
462 	struct devfreq *df = to_devfreq(dev);
463 	unsigned int value;
464 	int ret;
465 
466 	ret = sscanf(buf, "%u", &value);
467 	if (ret != 1)
468 		goto out;
469 
470 	mutex_lock(&df->lock);
471 	df->profile->polling_ms = value;
472 	df->next_polling = df->polling_jiffies
473 			 = msecs_to_jiffies(value);
474 	mutex_unlock(&df->lock);
475 
476 	ret = count;
477 
478 	if (df->governor->no_central_polling)
479 		goto out;
480 
481 	mutex_lock(&devfreq_list_lock);
482 	if (df->next_polling > 0 && !polling) {
483 		polling = true;
484 		queue_delayed_work(devfreq_wq, &devfreq_work,
485 				   df->next_polling);
486 	}
487 	mutex_unlock(&devfreq_list_lock);
488 out:
489 	return ret;
490 }
491 
492 static ssize_t show_central_polling(struct device *dev,
493 				    struct device_attribute *attr, char *buf)
494 {
495 	return sprintf(buf, "%d\n",
496 		       !to_devfreq(dev)->governor->no_central_polling);
497 }
498 
499 static struct device_attribute devfreq_attrs[] = {
500 	__ATTR(governor, S_IRUGO, show_governor, NULL),
501 	__ATTR(cur_freq, S_IRUGO, show_freq, NULL),
502 	__ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
503 	__ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
504 	       store_polling_interval),
505 	{ },
506 };
507 
508 /**
509  * devfreq_start_polling() - Initialize data structure for devfreq framework and
510  *			   start polling registered devfreq devices.
511  */
512 static int __init devfreq_start_polling(void)
513 {
514 	mutex_lock(&devfreq_list_lock);
515 	polling = false;
516 	devfreq_wq = create_freezable_workqueue("devfreq_wq");
517 	INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
518 	mutex_unlock(&devfreq_list_lock);
519 
520 	devfreq_monitor(&devfreq_work.work);
521 	return 0;
522 }
523 late_initcall(devfreq_start_polling);
524 
525 static int __init devfreq_init(void)
526 {
527 	devfreq_class = class_create(THIS_MODULE, "devfreq");
528 	if (IS_ERR(devfreq_class)) {
529 		pr_err("%s: couldn't create class\n", __FILE__);
530 		return PTR_ERR(devfreq_class);
531 	}
532 	devfreq_class->dev_attrs = devfreq_attrs;
533 	return 0;
534 }
535 subsys_initcall(devfreq_init);
536 
537 static void __exit devfreq_exit(void)
538 {
539 	class_destroy(devfreq_class);
540 }
541 module_exit(devfreq_exit);
542 
543 /*
544  * The followings are helper functions for devfreq user device drivers with
545  * OPP framework.
546  */
547 
548 /**
549  * devfreq_recommended_opp() - Helper function to get proper OPP for the
550  *			     freq value given to target callback.
551  * @dev		The devfreq user device. (parent of devfreq)
552  * @freq	The frequency given to target function
553  *
554  */
555 struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq)
556 {
557 	struct opp *opp = opp_find_freq_ceil(dev, freq);
558 
559 	if (opp == ERR_PTR(-ENODEV))
560 		opp = opp_find_freq_floor(dev, freq);
561 	return opp;
562 }
563 
564 /**
565  * devfreq_register_opp_notifier() - Helper function to get devfreq notified
566  *				   for any changes in the OPP availability
567  *				   changes
568  * @dev		The devfreq user device. (parent of devfreq)
569  * @devfreq	The devfreq object.
570  */
571 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
572 {
573 	struct srcu_notifier_head *nh = opp_get_notifier(dev);
574 
575 	if (IS_ERR(nh))
576 		return PTR_ERR(nh);
577 	return srcu_notifier_chain_register(nh, &devfreq->nb);
578 }
579 
580 /**
581  * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
582  *				     notified for any changes in the OPP
583  *				     availability changes anymore.
584  * @dev		The devfreq user device. (parent of devfreq)
585  * @devfreq	The devfreq object.
586  *
587  * At exit() callback of devfreq_dev_profile, this must be included if
588  * devfreq_recommended_opp is used.
589  */
590 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
591 {
592 	struct srcu_notifier_head *nh = opp_get_notifier(dev);
593 
594 	if (IS_ERR(nh))
595 		return PTR_ERR(nh);
596 	return srcu_notifier_chain_unregister(nh, &devfreq->nb);
597 }
598 
599 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
600 MODULE_DESCRIPTION("devfreq class support");
601 MODULE_LICENSE("GPL");
602