1 /* 2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework 3 * for Non-CPU Devices. 4 * 5 * Copyright (C) 2011 Samsung Electronics 6 * MyungJoo Ham <myungjoo.ham@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/stat.h> 21 #include <linux/opp.h> 22 #include <linux/devfreq.h> 23 #include <linux/workqueue.h> 24 #include <linux/platform_device.h> 25 #include <linux/list.h> 26 #include <linux/printk.h> 27 #include <linux/hrtimer.h> 28 #include "governor.h" 29 30 struct class *devfreq_class; 31 32 /* 33 * devfreq_work periodically monitors every registered device. 34 * The minimum polling interval is one jiffy. The polling interval is 35 * determined by the minimum polling period among all polling devfreq 36 * devices. The resolution of polling interval is one jiffy. 37 */ 38 static bool polling; 39 static struct workqueue_struct *devfreq_wq; 40 static struct delayed_work devfreq_work; 41 42 /* wait removing if this is to be removed */ 43 static struct devfreq *wait_remove_device; 44 45 /* The list of all device-devfreq */ 46 static LIST_HEAD(devfreq_list); 47 static DEFINE_MUTEX(devfreq_list_lock); 48 49 /** 50 * find_device_devfreq() - find devfreq struct using device pointer 51 * @dev: device pointer used to lookup device devfreq. 52 * 53 * Search the list of device devfreqs and return the matched device's 54 * devfreq info. devfreq_list_lock should be held by the caller. 55 */ 56 static struct devfreq *find_device_devfreq(struct device *dev) 57 { 58 struct devfreq *tmp_devfreq; 59 60 if (unlikely(IS_ERR_OR_NULL(dev))) { 61 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 62 return ERR_PTR(-EINVAL); 63 } 64 WARN(!mutex_is_locked(&devfreq_list_lock), 65 "devfreq_list_lock must be locked."); 66 67 list_for_each_entry(tmp_devfreq, &devfreq_list, node) { 68 if (tmp_devfreq->dev.parent == dev) 69 return tmp_devfreq; 70 } 71 72 return ERR_PTR(-ENODEV); 73 } 74 75 /** 76 * update_devfreq() - Reevaluate the device and configure frequency. 77 * @devfreq: the devfreq instance. 78 * 79 * Note: Lock devfreq->lock before calling update_devfreq 80 * This function is exported for governors. 81 */ 82 int update_devfreq(struct devfreq *devfreq) 83 { 84 unsigned long freq; 85 int err = 0; 86 87 if (!mutex_is_locked(&devfreq->lock)) { 88 WARN(true, "devfreq->lock must be locked by the caller.\n"); 89 return -EINVAL; 90 } 91 92 /* Reevaluate the proper frequency */ 93 err = devfreq->governor->get_target_freq(devfreq, &freq); 94 if (err) 95 return err; 96 97 err = devfreq->profile->target(devfreq->dev.parent, &freq); 98 if (err) 99 return err; 100 101 devfreq->previous_freq = freq; 102 return err; 103 } 104 105 /** 106 * devfreq_notifier_call() - Notify that the device frequency requirements 107 * has been changed out of devfreq framework. 108 * @nb the notifier_block (supposed to be devfreq->nb) 109 * @type not used 110 * @devp not used 111 * 112 * Called by a notifier that uses devfreq->nb. 113 */ 114 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, 115 void *devp) 116 { 117 struct devfreq *devfreq = container_of(nb, struct devfreq, nb); 118 int ret; 119 120 mutex_lock(&devfreq->lock); 121 ret = update_devfreq(devfreq); 122 mutex_unlock(&devfreq->lock); 123 124 return ret; 125 } 126 127 /** 128 * _remove_devfreq() - Remove devfreq from the device. 129 * @devfreq: the devfreq struct 130 * @skip: skip calling device_unregister(). 131 * 132 * Note that the caller should lock devfreq->lock before calling 133 * this. _remove_devfreq() will unlock it and free devfreq 134 * internally. devfreq_list_lock should be locked by the caller 135 * as well (not relased at return) 136 * 137 * Lock usage: 138 * devfreq->lock: locked before call. 139 * unlocked at return (and freed) 140 * devfreq_list_lock: locked before call. 141 * kept locked at return. 142 * if devfreq is centrally polled. 143 * 144 * Freed memory: 145 * devfreq 146 */ 147 static void _remove_devfreq(struct devfreq *devfreq, bool skip) 148 { 149 if (!mutex_is_locked(&devfreq->lock)) { 150 WARN(true, "devfreq->lock must be locked by the caller.\n"); 151 return; 152 } 153 if (!devfreq->governor->no_central_polling && 154 !mutex_is_locked(&devfreq_list_lock)) { 155 WARN(true, "devfreq_list_lock must be locked by the caller.\n"); 156 return; 157 } 158 159 if (devfreq->being_removed) 160 return; 161 162 devfreq->being_removed = true; 163 164 if (devfreq->profile->exit) 165 devfreq->profile->exit(devfreq->dev.parent); 166 167 if (devfreq->governor->exit) 168 devfreq->governor->exit(devfreq); 169 170 if (!skip && get_device(&devfreq->dev)) { 171 device_unregister(&devfreq->dev); 172 put_device(&devfreq->dev); 173 } 174 175 if (!devfreq->governor->no_central_polling) 176 list_del(&devfreq->node); 177 178 mutex_unlock(&devfreq->lock); 179 mutex_destroy(&devfreq->lock); 180 181 kfree(devfreq); 182 } 183 184 /** 185 * devfreq_dev_release() - Callback for struct device to release the device. 186 * @dev: the devfreq device 187 * 188 * This calls _remove_devfreq() if _remove_devfreq() is not called. 189 * Note that devfreq_dev_release() could be called by _remove_devfreq() as 190 * well as by others unregistering the device. 191 */ 192 static void devfreq_dev_release(struct device *dev) 193 { 194 struct devfreq *devfreq = to_devfreq(dev); 195 bool central_polling = !devfreq->governor->no_central_polling; 196 197 /* 198 * If devfreq_dev_release() was called by device_unregister() of 199 * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and 200 * being_removed is already set. This also partially checks the case 201 * where devfreq_dev_release() is called from a thread other than 202 * the one called _remove_devfreq(); however, this case is 203 * dealt completely with another following being_removed check. 204 * 205 * Because being_removed is never being 206 * unset, we do not need to worry about race conditions on 207 * being_removed. 208 */ 209 if (devfreq->being_removed) 210 return; 211 212 if (central_polling) 213 mutex_lock(&devfreq_list_lock); 214 215 mutex_lock(&devfreq->lock); 216 217 /* 218 * Check being_removed flag again for the case where 219 * devfreq_dev_release() was called in a thread other than the one 220 * possibly called _remove_devfreq(). 221 */ 222 if (devfreq->being_removed) { 223 mutex_unlock(&devfreq->lock); 224 goto out; 225 } 226 227 /* devfreq->lock is unlocked and removed in _removed_devfreq() */ 228 _remove_devfreq(devfreq, true); 229 230 out: 231 if (central_polling) 232 mutex_unlock(&devfreq_list_lock); 233 } 234 235 /** 236 * devfreq_monitor() - Periodically poll devfreq objects. 237 * @work: the work struct used to run devfreq_monitor periodically. 238 * 239 */ 240 static void devfreq_monitor(struct work_struct *work) 241 { 242 static unsigned long last_polled_at; 243 struct devfreq *devfreq, *tmp; 244 int error; 245 unsigned long jiffies_passed; 246 unsigned long next_jiffies = ULONG_MAX, now = jiffies; 247 struct device *dev; 248 249 /* Initially last_polled_at = 0, polling every device at bootup */ 250 jiffies_passed = now - last_polled_at; 251 last_polled_at = now; 252 if (jiffies_passed == 0) 253 jiffies_passed = 1; 254 255 mutex_lock(&devfreq_list_lock); 256 list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) { 257 mutex_lock(&devfreq->lock); 258 dev = devfreq->dev.parent; 259 260 /* Do not remove tmp for a while */ 261 wait_remove_device = tmp; 262 263 if (devfreq->governor->no_central_polling || 264 devfreq->next_polling == 0) { 265 mutex_unlock(&devfreq->lock); 266 continue; 267 } 268 mutex_unlock(&devfreq_list_lock); 269 270 /* 271 * Reduce more next_polling if devfreq_wq took an extra 272 * delay. (i.e., CPU has been idled.) 273 */ 274 if (devfreq->next_polling <= jiffies_passed) { 275 error = update_devfreq(devfreq); 276 277 /* Remove a devfreq with an error. */ 278 if (error && error != -EAGAIN) { 279 280 dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n", 281 error, devfreq->governor->name); 282 283 /* 284 * Unlock devfreq before locking the list 285 * in order to avoid deadlock with 286 * find_device_devfreq or others 287 */ 288 mutex_unlock(&devfreq->lock); 289 mutex_lock(&devfreq_list_lock); 290 /* Check if devfreq is already removed */ 291 if (IS_ERR(find_device_devfreq(dev))) 292 continue; 293 mutex_lock(&devfreq->lock); 294 /* This unlocks devfreq->lock and free it */ 295 _remove_devfreq(devfreq, false); 296 continue; 297 } 298 devfreq->next_polling = devfreq->polling_jiffies; 299 } else { 300 devfreq->next_polling -= jiffies_passed; 301 } 302 303 if (devfreq->next_polling) 304 next_jiffies = (next_jiffies > devfreq->next_polling) ? 305 devfreq->next_polling : next_jiffies; 306 307 mutex_unlock(&devfreq->lock); 308 mutex_lock(&devfreq_list_lock); 309 } 310 wait_remove_device = NULL; 311 mutex_unlock(&devfreq_list_lock); 312 313 if (next_jiffies > 0 && next_jiffies < ULONG_MAX) { 314 polling = true; 315 queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies); 316 } else { 317 polling = false; 318 } 319 } 320 321 /** 322 * devfreq_add_device() - Add devfreq feature to the device 323 * @dev: the device to add devfreq feature. 324 * @profile: device-specific profile to run devfreq. 325 * @governor: the policy to choose frequency. 326 * @data: private data for the governor. The devfreq framework does not 327 * touch this value. 328 */ 329 struct devfreq *devfreq_add_device(struct device *dev, 330 struct devfreq_dev_profile *profile, 331 const struct devfreq_governor *governor, 332 void *data) 333 { 334 struct devfreq *devfreq; 335 int err = 0; 336 337 if (!dev || !profile || !governor) { 338 dev_err(dev, "%s: Invalid parameters.\n", __func__); 339 return ERR_PTR(-EINVAL); 340 } 341 342 343 if (!governor->no_central_polling) { 344 mutex_lock(&devfreq_list_lock); 345 devfreq = find_device_devfreq(dev); 346 mutex_unlock(&devfreq_list_lock); 347 if (!IS_ERR(devfreq)) { 348 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); 349 err = -EINVAL; 350 goto err_out; 351 } 352 } 353 354 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); 355 if (!devfreq) { 356 dev_err(dev, "%s: Unable to create devfreq for the device\n", 357 __func__); 358 err = -ENOMEM; 359 goto err_out; 360 } 361 362 mutex_init(&devfreq->lock); 363 mutex_lock(&devfreq->lock); 364 devfreq->dev.parent = dev; 365 devfreq->dev.class = devfreq_class; 366 devfreq->dev.release = devfreq_dev_release; 367 devfreq->profile = profile; 368 devfreq->governor = governor; 369 devfreq->previous_freq = profile->initial_freq; 370 devfreq->data = data; 371 devfreq->next_polling = devfreq->polling_jiffies 372 = msecs_to_jiffies(devfreq->profile->polling_ms); 373 devfreq->nb.notifier_call = devfreq_notifier_call; 374 375 dev_set_name(&devfreq->dev, dev_name(dev)); 376 err = device_register(&devfreq->dev); 377 if (err) { 378 put_device(&devfreq->dev); 379 goto err_dev; 380 } 381 382 if (governor->init) 383 err = governor->init(devfreq); 384 if (err) 385 goto err_init; 386 387 mutex_unlock(&devfreq->lock); 388 389 if (governor->no_central_polling) 390 goto out; 391 392 mutex_lock(&devfreq_list_lock); 393 394 list_add(&devfreq->node, &devfreq_list); 395 396 if (devfreq_wq && devfreq->next_polling && !polling) { 397 polling = true; 398 queue_delayed_work(devfreq_wq, &devfreq_work, 399 devfreq->next_polling); 400 } 401 mutex_unlock(&devfreq_list_lock); 402 out: 403 return devfreq; 404 405 err_init: 406 device_unregister(&devfreq->dev); 407 err_dev: 408 mutex_unlock(&devfreq->lock); 409 kfree(devfreq); 410 err_out: 411 return ERR_PTR(err); 412 } 413 414 /** 415 * devfreq_remove_device() - Remove devfreq feature from a device. 416 * @devfreq the devfreq instance to be removed 417 */ 418 int devfreq_remove_device(struct devfreq *devfreq) 419 { 420 bool central_polling; 421 422 if (!devfreq) 423 return -EINVAL; 424 425 central_polling = !devfreq->governor->no_central_polling; 426 427 if (central_polling) { 428 mutex_lock(&devfreq_list_lock); 429 while (wait_remove_device == devfreq) { 430 mutex_unlock(&devfreq_list_lock); 431 schedule(); 432 mutex_lock(&devfreq_list_lock); 433 } 434 } 435 436 mutex_lock(&devfreq->lock); 437 _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ 438 439 if (central_polling) 440 mutex_unlock(&devfreq_list_lock); 441 442 return 0; 443 } 444 445 static ssize_t show_governor(struct device *dev, 446 struct device_attribute *attr, char *buf) 447 { 448 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); 449 } 450 451 static ssize_t show_freq(struct device *dev, 452 struct device_attribute *attr, char *buf) 453 { 454 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); 455 } 456 457 static ssize_t show_polling_interval(struct device *dev, 458 struct device_attribute *attr, char *buf) 459 { 460 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); 461 } 462 463 static ssize_t store_polling_interval(struct device *dev, 464 struct device_attribute *attr, 465 const char *buf, size_t count) 466 { 467 struct devfreq *df = to_devfreq(dev); 468 unsigned int value; 469 int ret; 470 471 ret = sscanf(buf, "%u", &value); 472 if (ret != 1) 473 goto out; 474 475 mutex_lock(&df->lock); 476 df->profile->polling_ms = value; 477 df->next_polling = df->polling_jiffies 478 = msecs_to_jiffies(value); 479 mutex_unlock(&df->lock); 480 481 ret = count; 482 483 if (df->governor->no_central_polling) 484 goto out; 485 486 mutex_lock(&devfreq_list_lock); 487 if (df->next_polling > 0 && !polling) { 488 polling = true; 489 queue_delayed_work(devfreq_wq, &devfreq_work, 490 df->next_polling); 491 } 492 mutex_unlock(&devfreq_list_lock); 493 out: 494 return ret; 495 } 496 497 static ssize_t show_central_polling(struct device *dev, 498 struct device_attribute *attr, char *buf) 499 { 500 return sprintf(buf, "%d\n", 501 !to_devfreq(dev)->governor->no_central_polling); 502 } 503 504 static struct device_attribute devfreq_attrs[] = { 505 __ATTR(governor, S_IRUGO, show_governor, NULL), 506 __ATTR(cur_freq, S_IRUGO, show_freq, NULL), 507 __ATTR(central_polling, S_IRUGO, show_central_polling, NULL), 508 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, 509 store_polling_interval), 510 { }, 511 }; 512 513 /** 514 * devfreq_start_polling() - Initialize data structure for devfreq framework and 515 * start polling registered devfreq devices. 516 */ 517 static int __init devfreq_start_polling(void) 518 { 519 mutex_lock(&devfreq_list_lock); 520 polling = false; 521 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 522 INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor); 523 mutex_unlock(&devfreq_list_lock); 524 525 devfreq_monitor(&devfreq_work.work); 526 return 0; 527 } 528 late_initcall(devfreq_start_polling); 529 530 static int __init devfreq_init(void) 531 { 532 devfreq_class = class_create(THIS_MODULE, "devfreq"); 533 if (IS_ERR(devfreq_class)) { 534 pr_err("%s: couldn't create class\n", __FILE__); 535 return PTR_ERR(devfreq_class); 536 } 537 devfreq_class->dev_attrs = devfreq_attrs; 538 return 0; 539 } 540 subsys_initcall(devfreq_init); 541 542 static void __exit devfreq_exit(void) 543 { 544 class_destroy(devfreq_class); 545 } 546 module_exit(devfreq_exit); 547 548 /* 549 * The followings are helper functions for devfreq user device drivers with 550 * OPP framework. 551 */ 552 553 /** 554 * devfreq_recommended_opp() - Helper function to get proper OPP for the 555 * freq value given to target callback. 556 * @dev The devfreq user device. (parent of devfreq) 557 * @freq The frequency given to target function 558 * 559 */ 560 struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq) 561 { 562 struct opp *opp = opp_find_freq_ceil(dev, freq); 563 564 if (opp == ERR_PTR(-ENODEV)) 565 opp = opp_find_freq_floor(dev, freq); 566 return opp; 567 } 568 569 /** 570 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 571 * for any changes in the OPP availability 572 * changes 573 * @dev The devfreq user device. (parent of devfreq) 574 * @devfreq The devfreq object. 575 */ 576 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) 577 { 578 struct srcu_notifier_head *nh = opp_get_notifier(dev); 579 580 if (IS_ERR(nh)) 581 return PTR_ERR(nh); 582 return srcu_notifier_chain_register(nh, &devfreq->nb); 583 } 584 585 /** 586 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 587 * notified for any changes in the OPP 588 * availability changes anymore. 589 * @dev The devfreq user device. (parent of devfreq) 590 * @devfreq The devfreq object. 591 * 592 * At exit() callback of devfreq_dev_profile, this must be included if 593 * devfreq_recommended_opp is used. 594 */ 595 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) 596 { 597 struct srcu_notifier_head *nh = opp_get_notifier(dev); 598 599 if (IS_ERR(nh)) 600 return PTR_ERR(nh); 601 return srcu_notifier_chain_unregister(nh, &devfreq->nb); 602 } 603 604 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 605 MODULE_DESCRIPTION("devfreq class support"); 606 MODULE_LICENSE("GPL"); 607