1 /* 2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework 3 * for Non-CPU Devices. 4 * 5 * Copyright (C) 2011 Samsung Electronics 6 * MyungJoo Ham <myungjoo.ham@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/errno.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/stat.h> 21 #include <linux/pm_opp.h> 22 #include <linux/devfreq.h> 23 #include <linux/workqueue.h> 24 #include <linux/platform_device.h> 25 #include <linux/list.h> 26 #include <linux/printk.h> 27 #include <linux/hrtimer.h> 28 #include <linux/of.h> 29 #include "governor.h" 30 31 static struct class *devfreq_class; 32 33 /* 34 * devfreq core provides delayed work based load monitoring helper 35 * functions. Governors can use these or can implement their own 36 * monitoring mechanism. 37 */ 38 static struct workqueue_struct *devfreq_wq; 39 40 /* The list of all device-devfreq governors */ 41 static LIST_HEAD(devfreq_governor_list); 42 /* The list of all device-devfreq */ 43 static LIST_HEAD(devfreq_list); 44 static DEFINE_MUTEX(devfreq_list_lock); 45 46 /** 47 * find_device_devfreq() - find devfreq struct using device pointer 48 * @dev: device pointer used to lookup device devfreq. 49 * 50 * Search the list of device devfreqs and return the matched device's 51 * devfreq info. devfreq_list_lock should be held by the caller. 52 */ 53 static struct devfreq *find_device_devfreq(struct device *dev) 54 { 55 struct devfreq *tmp_devfreq; 56 57 if (IS_ERR_OR_NULL(dev)) { 58 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 59 return ERR_PTR(-EINVAL); 60 } 61 WARN(!mutex_is_locked(&devfreq_list_lock), 62 "devfreq_list_lock must be locked."); 63 64 list_for_each_entry(tmp_devfreq, &devfreq_list, node) { 65 if (tmp_devfreq->dev.parent == dev) 66 return tmp_devfreq; 67 } 68 69 return ERR_PTR(-ENODEV); 70 } 71 72 /** 73 * devfreq_get_freq_level() - Lookup freq_table for the frequency 74 * @devfreq: the devfreq instance 75 * @freq: the target frequency 76 */ 77 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) 78 { 79 int lev; 80 81 for (lev = 0; lev < devfreq->profile->max_state; lev++) 82 if (freq == devfreq->profile->freq_table[lev]) 83 return lev; 84 85 return -EINVAL; 86 } 87 88 /** 89 * devfreq_set_freq_table() - Initialize freq_table for the frequency 90 * @devfreq: the devfreq instance 91 */ 92 static void devfreq_set_freq_table(struct devfreq *devfreq) 93 { 94 struct devfreq_dev_profile *profile = devfreq->profile; 95 struct dev_pm_opp *opp; 96 unsigned long freq; 97 int i, count; 98 99 /* Initialize the freq_table from OPP table */ 100 count = dev_pm_opp_get_opp_count(devfreq->dev.parent); 101 if (count <= 0) 102 return; 103 104 profile->max_state = count; 105 profile->freq_table = devm_kcalloc(devfreq->dev.parent, 106 profile->max_state, 107 sizeof(*profile->freq_table), 108 GFP_KERNEL); 109 if (!profile->freq_table) { 110 profile->max_state = 0; 111 return; 112 } 113 114 rcu_read_lock(); 115 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) { 116 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); 117 if (IS_ERR(opp)) { 118 devm_kfree(devfreq->dev.parent, profile->freq_table); 119 profile->max_state = 0; 120 rcu_read_unlock(); 121 return; 122 } 123 profile->freq_table[i] = freq; 124 } 125 rcu_read_unlock(); 126 } 127 128 /** 129 * devfreq_update_status() - Update statistics of devfreq behavior 130 * @devfreq: the devfreq instance 131 * @freq: the update target frequency 132 */ 133 static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) 134 { 135 int lev, prev_lev, ret = 0; 136 unsigned long cur_time; 137 138 cur_time = jiffies; 139 140 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); 141 if (prev_lev < 0) { 142 ret = prev_lev; 143 goto out; 144 } 145 146 devfreq->time_in_state[prev_lev] += 147 cur_time - devfreq->last_stat_updated; 148 149 lev = devfreq_get_freq_level(devfreq, freq); 150 if (lev < 0) { 151 ret = lev; 152 goto out; 153 } 154 155 if (lev != prev_lev) { 156 devfreq->trans_table[(prev_lev * 157 devfreq->profile->max_state) + lev]++; 158 devfreq->total_trans++; 159 } 160 161 out: 162 devfreq->last_stat_updated = cur_time; 163 return ret; 164 } 165 166 /** 167 * find_devfreq_governor() - find devfreq governor from name 168 * @name: name of the governor 169 * 170 * Search the list of devfreq governors and return the matched 171 * governor's pointer. devfreq_list_lock should be held by the caller. 172 */ 173 static struct devfreq_governor *find_devfreq_governor(const char *name) 174 { 175 struct devfreq_governor *tmp_governor; 176 177 if (IS_ERR_OR_NULL(name)) { 178 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); 179 return ERR_PTR(-EINVAL); 180 } 181 WARN(!mutex_is_locked(&devfreq_list_lock), 182 "devfreq_list_lock must be locked."); 183 184 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { 185 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) 186 return tmp_governor; 187 } 188 189 return ERR_PTR(-ENODEV); 190 } 191 192 static int devfreq_notify_transition(struct devfreq *devfreq, 193 struct devfreq_freqs *freqs, unsigned int state) 194 { 195 if (!devfreq) 196 return -EINVAL; 197 198 switch (state) { 199 case DEVFREQ_PRECHANGE: 200 srcu_notifier_call_chain(&devfreq->transition_notifier_list, 201 DEVFREQ_PRECHANGE, freqs); 202 break; 203 204 case DEVFREQ_POSTCHANGE: 205 srcu_notifier_call_chain(&devfreq->transition_notifier_list, 206 DEVFREQ_POSTCHANGE, freqs); 207 break; 208 default: 209 return -EINVAL; 210 } 211 212 return 0; 213 } 214 215 /* Load monitoring helper functions for governors use */ 216 217 /** 218 * update_devfreq() - Reevaluate the device and configure frequency. 219 * @devfreq: the devfreq instance. 220 * 221 * Note: Lock devfreq->lock before calling update_devfreq 222 * This function is exported for governors. 223 */ 224 int update_devfreq(struct devfreq *devfreq) 225 { 226 struct devfreq_freqs freqs; 227 unsigned long freq, cur_freq; 228 int err = 0; 229 u32 flags = 0; 230 231 if (!mutex_is_locked(&devfreq->lock)) { 232 WARN(true, "devfreq->lock must be locked by the caller.\n"); 233 return -EINVAL; 234 } 235 236 if (!devfreq->governor) 237 return -EINVAL; 238 239 /* Reevaluate the proper frequency */ 240 err = devfreq->governor->get_target_freq(devfreq, &freq); 241 if (err) 242 return err; 243 244 /* 245 * Adjust the frequency with user freq and QoS. 246 * 247 * List from the highest priority 248 * max_freq 249 * min_freq 250 */ 251 252 if (devfreq->min_freq && freq < devfreq->min_freq) { 253 freq = devfreq->min_freq; 254 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ 255 } 256 if (devfreq->max_freq && freq > devfreq->max_freq) { 257 freq = devfreq->max_freq; 258 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ 259 } 260 261 if (devfreq->profile->get_cur_freq) 262 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq); 263 else 264 cur_freq = devfreq->previous_freq; 265 266 freqs.old = cur_freq; 267 freqs.new = freq; 268 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); 269 270 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); 271 if (err) { 272 freqs.new = cur_freq; 273 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 274 return err; 275 } 276 277 freqs.new = freq; 278 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 279 280 if (devfreq->profile->freq_table) 281 if (devfreq_update_status(devfreq, freq)) 282 dev_err(&devfreq->dev, 283 "Couldn't update frequency transition information.\n"); 284 285 devfreq->previous_freq = freq; 286 return err; 287 } 288 EXPORT_SYMBOL(update_devfreq); 289 290 /** 291 * devfreq_monitor() - Periodically poll devfreq objects. 292 * @work: the work struct used to run devfreq_monitor periodically. 293 * 294 */ 295 static void devfreq_monitor(struct work_struct *work) 296 { 297 int err; 298 struct devfreq *devfreq = container_of(work, 299 struct devfreq, work.work); 300 301 mutex_lock(&devfreq->lock); 302 err = update_devfreq(devfreq); 303 if (err) 304 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); 305 306 queue_delayed_work(devfreq_wq, &devfreq->work, 307 msecs_to_jiffies(devfreq->profile->polling_ms)); 308 mutex_unlock(&devfreq->lock); 309 } 310 311 /** 312 * devfreq_monitor_start() - Start load monitoring of devfreq instance 313 * @devfreq: the devfreq instance. 314 * 315 * Helper function for starting devfreq device load monitoing. By 316 * default delayed work based monitoring is supported. Function 317 * to be called from governor in response to DEVFREQ_GOV_START 318 * event when device is added to devfreq framework. 319 */ 320 void devfreq_monitor_start(struct devfreq *devfreq) 321 { 322 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); 323 if (devfreq->profile->polling_ms) 324 queue_delayed_work(devfreq_wq, &devfreq->work, 325 msecs_to_jiffies(devfreq->profile->polling_ms)); 326 } 327 EXPORT_SYMBOL(devfreq_monitor_start); 328 329 /** 330 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance 331 * @devfreq: the devfreq instance. 332 * 333 * Helper function to stop devfreq device load monitoing. Function 334 * to be called from governor in response to DEVFREQ_GOV_STOP 335 * event when device is removed from devfreq framework. 336 */ 337 void devfreq_monitor_stop(struct devfreq *devfreq) 338 { 339 cancel_delayed_work_sync(&devfreq->work); 340 } 341 EXPORT_SYMBOL(devfreq_monitor_stop); 342 343 /** 344 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance 345 * @devfreq: the devfreq instance. 346 * 347 * Helper function to suspend devfreq device load monitoing. Function 348 * to be called from governor in response to DEVFREQ_GOV_SUSPEND 349 * event or when polling interval is set to zero. 350 * 351 * Note: Though this function is same as devfreq_monitor_stop(), 352 * intentionally kept separate to provide hooks for collecting 353 * transition statistics. 354 */ 355 void devfreq_monitor_suspend(struct devfreq *devfreq) 356 { 357 mutex_lock(&devfreq->lock); 358 if (devfreq->stop_polling) { 359 mutex_unlock(&devfreq->lock); 360 return; 361 } 362 363 devfreq_update_status(devfreq, devfreq->previous_freq); 364 devfreq->stop_polling = true; 365 mutex_unlock(&devfreq->lock); 366 cancel_delayed_work_sync(&devfreq->work); 367 } 368 EXPORT_SYMBOL(devfreq_monitor_suspend); 369 370 /** 371 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance 372 * @devfreq: the devfreq instance. 373 * 374 * Helper function to resume devfreq device load monitoing. Function 375 * to be called from governor in response to DEVFREQ_GOV_RESUME 376 * event or when polling interval is set to non-zero. 377 */ 378 void devfreq_monitor_resume(struct devfreq *devfreq) 379 { 380 unsigned long freq; 381 382 mutex_lock(&devfreq->lock); 383 if (!devfreq->stop_polling) 384 goto out; 385 386 if (!delayed_work_pending(&devfreq->work) && 387 devfreq->profile->polling_ms) 388 queue_delayed_work(devfreq_wq, &devfreq->work, 389 msecs_to_jiffies(devfreq->profile->polling_ms)); 390 391 devfreq->last_stat_updated = jiffies; 392 devfreq->stop_polling = false; 393 394 if (devfreq->profile->get_cur_freq && 395 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 396 devfreq->previous_freq = freq; 397 398 out: 399 mutex_unlock(&devfreq->lock); 400 } 401 EXPORT_SYMBOL(devfreq_monitor_resume); 402 403 /** 404 * devfreq_interval_update() - Update device devfreq monitoring interval 405 * @devfreq: the devfreq instance. 406 * @delay: new polling interval to be set. 407 * 408 * Helper function to set new load monitoring polling interval. Function 409 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. 410 */ 411 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) 412 { 413 unsigned int cur_delay = devfreq->profile->polling_ms; 414 unsigned int new_delay = *delay; 415 416 mutex_lock(&devfreq->lock); 417 devfreq->profile->polling_ms = new_delay; 418 419 if (devfreq->stop_polling) 420 goto out; 421 422 /* if new delay is zero, stop polling */ 423 if (!new_delay) { 424 mutex_unlock(&devfreq->lock); 425 cancel_delayed_work_sync(&devfreq->work); 426 return; 427 } 428 429 /* if current delay is zero, start polling with new delay */ 430 if (!cur_delay) { 431 queue_delayed_work(devfreq_wq, &devfreq->work, 432 msecs_to_jiffies(devfreq->profile->polling_ms)); 433 goto out; 434 } 435 436 /* if current delay is greater than new delay, restart polling */ 437 if (cur_delay > new_delay) { 438 mutex_unlock(&devfreq->lock); 439 cancel_delayed_work_sync(&devfreq->work); 440 mutex_lock(&devfreq->lock); 441 if (!devfreq->stop_polling) 442 queue_delayed_work(devfreq_wq, &devfreq->work, 443 msecs_to_jiffies(devfreq->profile->polling_ms)); 444 } 445 out: 446 mutex_unlock(&devfreq->lock); 447 } 448 EXPORT_SYMBOL(devfreq_interval_update); 449 450 /** 451 * devfreq_notifier_call() - Notify that the device frequency requirements 452 * has been changed out of devfreq framework. 453 * @nb: the notifier_block (supposed to be devfreq->nb) 454 * @type: not used 455 * @devp: not used 456 * 457 * Called by a notifier that uses devfreq->nb. 458 */ 459 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, 460 void *devp) 461 { 462 struct devfreq *devfreq = container_of(nb, struct devfreq, nb); 463 int ret; 464 465 mutex_lock(&devfreq->lock); 466 ret = update_devfreq(devfreq); 467 mutex_unlock(&devfreq->lock); 468 469 return ret; 470 } 471 472 /** 473 * _remove_devfreq() - Remove devfreq from the list and release its resources. 474 * @devfreq: the devfreq struct 475 */ 476 static void _remove_devfreq(struct devfreq *devfreq) 477 { 478 mutex_lock(&devfreq_list_lock); 479 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { 480 mutex_unlock(&devfreq_list_lock); 481 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); 482 return; 483 } 484 list_del(&devfreq->node); 485 mutex_unlock(&devfreq_list_lock); 486 487 if (devfreq->governor) 488 devfreq->governor->event_handler(devfreq, 489 DEVFREQ_GOV_STOP, NULL); 490 491 if (devfreq->profile->exit) 492 devfreq->profile->exit(devfreq->dev.parent); 493 494 mutex_destroy(&devfreq->lock); 495 kfree(devfreq); 496 } 497 498 /** 499 * devfreq_dev_release() - Callback for struct device to release the device. 500 * @dev: the devfreq device 501 * 502 * This calls _remove_devfreq() if _remove_devfreq() is not called. 503 */ 504 static void devfreq_dev_release(struct device *dev) 505 { 506 struct devfreq *devfreq = to_devfreq(dev); 507 508 _remove_devfreq(devfreq); 509 } 510 511 /** 512 * devfreq_add_device() - Add devfreq feature to the device 513 * @dev: the device to add devfreq feature. 514 * @profile: device-specific profile to run devfreq. 515 * @governor_name: name of the policy to choose frequency. 516 * @data: private data for the governor. The devfreq framework does not 517 * touch this value. 518 */ 519 struct devfreq *devfreq_add_device(struct device *dev, 520 struct devfreq_dev_profile *profile, 521 const char *governor_name, 522 void *data) 523 { 524 struct devfreq *devfreq; 525 struct devfreq_governor *governor; 526 int err = 0; 527 528 if (!dev || !profile || !governor_name) { 529 dev_err(dev, "%s: Invalid parameters.\n", __func__); 530 return ERR_PTR(-EINVAL); 531 } 532 533 mutex_lock(&devfreq_list_lock); 534 devfreq = find_device_devfreq(dev); 535 mutex_unlock(&devfreq_list_lock); 536 if (!IS_ERR(devfreq)) { 537 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); 538 err = -EINVAL; 539 goto err_out; 540 } 541 542 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); 543 if (!devfreq) { 544 dev_err(dev, "%s: Unable to create devfreq for the device\n", 545 __func__); 546 err = -ENOMEM; 547 goto err_out; 548 } 549 550 mutex_init(&devfreq->lock); 551 mutex_lock(&devfreq->lock); 552 devfreq->dev.parent = dev; 553 devfreq->dev.class = devfreq_class; 554 devfreq->dev.release = devfreq_dev_release; 555 devfreq->profile = profile; 556 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); 557 devfreq->previous_freq = profile->initial_freq; 558 devfreq->last_status.current_frequency = profile->initial_freq; 559 devfreq->data = data; 560 devfreq->nb.notifier_call = devfreq_notifier_call; 561 562 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) { 563 mutex_unlock(&devfreq->lock); 564 devfreq_set_freq_table(devfreq); 565 mutex_lock(&devfreq->lock); 566 } 567 568 dev_set_name(&devfreq->dev, "%s", dev_name(dev)); 569 err = device_register(&devfreq->dev); 570 if (err) { 571 mutex_unlock(&devfreq->lock); 572 goto err_out; 573 } 574 575 devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) * 576 devfreq->profile->max_state * 577 devfreq->profile->max_state, 578 GFP_KERNEL); 579 devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) * 580 devfreq->profile->max_state, 581 GFP_KERNEL); 582 devfreq->last_stat_updated = jiffies; 583 584 srcu_init_notifier_head(&devfreq->transition_notifier_list); 585 586 mutex_unlock(&devfreq->lock); 587 588 mutex_lock(&devfreq_list_lock); 589 list_add(&devfreq->node, &devfreq_list); 590 591 governor = find_devfreq_governor(devfreq->governor_name); 592 if (!IS_ERR(governor)) 593 devfreq->governor = governor; 594 if (devfreq->governor) 595 err = devfreq->governor->event_handler(devfreq, 596 DEVFREQ_GOV_START, NULL); 597 mutex_unlock(&devfreq_list_lock); 598 if (err) { 599 dev_err(dev, "%s: Unable to start governor for the device\n", 600 __func__); 601 goto err_init; 602 } 603 604 return devfreq; 605 606 err_init: 607 list_del(&devfreq->node); 608 device_unregister(&devfreq->dev); 609 err_out: 610 return ERR_PTR(err); 611 } 612 EXPORT_SYMBOL(devfreq_add_device); 613 614 /** 615 * devfreq_remove_device() - Remove devfreq feature from a device. 616 * @devfreq: the devfreq instance to be removed 617 * 618 * The opposite of devfreq_add_device(). 619 */ 620 int devfreq_remove_device(struct devfreq *devfreq) 621 { 622 if (!devfreq) 623 return -EINVAL; 624 625 device_unregister(&devfreq->dev); 626 627 return 0; 628 } 629 EXPORT_SYMBOL(devfreq_remove_device); 630 631 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data) 632 { 633 struct devfreq **r = res; 634 635 if (WARN_ON(!r || !*r)) 636 return 0; 637 638 return *r == data; 639 } 640 641 static void devm_devfreq_dev_release(struct device *dev, void *res) 642 { 643 devfreq_remove_device(*(struct devfreq **)res); 644 } 645 646 /** 647 * devm_devfreq_add_device() - Resource-managed devfreq_add_device() 648 * @dev: the device to add devfreq feature. 649 * @profile: device-specific profile to run devfreq. 650 * @governor_name: name of the policy to choose frequency. 651 * @data: private data for the governor. The devfreq framework does not 652 * touch this value. 653 * 654 * This function manages automatically the memory of devfreq device using device 655 * resource management and simplify the free operation for memory of devfreq 656 * device. 657 */ 658 struct devfreq *devm_devfreq_add_device(struct device *dev, 659 struct devfreq_dev_profile *profile, 660 const char *governor_name, 661 void *data) 662 { 663 struct devfreq **ptr, *devfreq; 664 665 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL); 666 if (!ptr) 667 return ERR_PTR(-ENOMEM); 668 669 devfreq = devfreq_add_device(dev, profile, governor_name, data); 670 if (IS_ERR(devfreq)) { 671 devres_free(ptr); 672 return ERR_PTR(-ENOMEM); 673 } 674 675 *ptr = devfreq; 676 devres_add(dev, ptr); 677 678 return devfreq; 679 } 680 EXPORT_SYMBOL(devm_devfreq_add_device); 681 682 #ifdef CONFIG_OF 683 /* 684 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree 685 * @dev - instance to the given device 686 * @index - index into list of devfreq 687 * 688 * return the instance of devfreq device 689 */ 690 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) 691 { 692 struct device_node *node; 693 struct devfreq *devfreq; 694 695 if (!dev) 696 return ERR_PTR(-EINVAL); 697 698 if (!dev->of_node) 699 return ERR_PTR(-EINVAL); 700 701 node = of_parse_phandle(dev->of_node, "devfreq", index); 702 if (!node) 703 return ERR_PTR(-ENODEV); 704 705 mutex_lock(&devfreq_list_lock); 706 list_for_each_entry(devfreq, &devfreq_list, node) { 707 if (devfreq->dev.parent 708 && devfreq->dev.parent->of_node == node) { 709 mutex_unlock(&devfreq_list_lock); 710 return devfreq; 711 } 712 } 713 mutex_unlock(&devfreq_list_lock); 714 715 return ERR_PTR(-EPROBE_DEFER); 716 } 717 #else 718 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) 719 { 720 return ERR_PTR(-ENODEV); 721 } 722 #endif /* CONFIG_OF */ 723 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle); 724 725 /** 726 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device() 727 * @dev: the device to add devfreq feature. 728 * @devfreq: the devfreq instance to be removed 729 */ 730 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq) 731 { 732 WARN_ON(devres_release(dev, devm_devfreq_dev_release, 733 devm_devfreq_dev_match, devfreq)); 734 } 735 EXPORT_SYMBOL(devm_devfreq_remove_device); 736 737 /** 738 * devfreq_suspend_device() - Suspend devfreq of a device. 739 * @devfreq: the devfreq instance to be suspended 740 * 741 * This function is intended to be called by the pm callbacks 742 * (e.g., runtime_suspend, suspend) of the device driver that 743 * holds the devfreq. 744 */ 745 int devfreq_suspend_device(struct devfreq *devfreq) 746 { 747 if (!devfreq) 748 return -EINVAL; 749 750 if (!devfreq->governor) 751 return 0; 752 753 return devfreq->governor->event_handler(devfreq, 754 DEVFREQ_GOV_SUSPEND, NULL); 755 } 756 EXPORT_SYMBOL(devfreq_suspend_device); 757 758 /** 759 * devfreq_resume_device() - Resume devfreq of a device. 760 * @devfreq: the devfreq instance to be resumed 761 * 762 * This function is intended to be called by the pm callbacks 763 * (e.g., runtime_resume, resume) of the device driver that 764 * holds the devfreq. 765 */ 766 int devfreq_resume_device(struct devfreq *devfreq) 767 { 768 if (!devfreq) 769 return -EINVAL; 770 771 if (!devfreq->governor) 772 return 0; 773 774 return devfreq->governor->event_handler(devfreq, 775 DEVFREQ_GOV_RESUME, NULL); 776 } 777 EXPORT_SYMBOL(devfreq_resume_device); 778 779 /** 780 * devfreq_add_governor() - Add devfreq governor 781 * @governor: the devfreq governor to be added 782 */ 783 int devfreq_add_governor(struct devfreq_governor *governor) 784 { 785 struct devfreq_governor *g; 786 struct devfreq *devfreq; 787 int err = 0; 788 789 if (!governor) { 790 pr_err("%s: Invalid parameters.\n", __func__); 791 return -EINVAL; 792 } 793 794 mutex_lock(&devfreq_list_lock); 795 g = find_devfreq_governor(governor->name); 796 if (!IS_ERR(g)) { 797 pr_err("%s: governor %s already registered\n", __func__, 798 g->name); 799 err = -EINVAL; 800 goto err_out; 801 } 802 803 list_add(&governor->node, &devfreq_governor_list); 804 805 list_for_each_entry(devfreq, &devfreq_list, node) { 806 int ret = 0; 807 struct device *dev = devfreq->dev.parent; 808 809 if (!strncmp(devfreq->governor_name, governor->name, 810 DEVFREQ_NAME_LEN)) { 811 /* The following should never occur */ 812 if (devfreq->governor) { 813 dev_warn(dev, 814 "%s: Governor %s already present\n", 815 __func__, devfreq->governor->name); 816 ret = devfreq->governor->event_handler(devfreq, 817 DEVFREQ_GOV_STOP, NULL); 818 if (ret) { 819 dev_warn(dev, 820 "%s: Governor %s stop = %d\n", 821 __func__, 822 devfreq->governor->name, ret); 823 } 824 /* Fall through */ 825 } 826 devfreq->governor = governor; 827 ret = devfreq->governor->event_handler(devfreq, 828 DEVFREQ_GOV_START, NULL); 829 if (ret) { 830 dev_warn(dev, "%s: Governor %s start=%d\n", 831 __func__, devfreq->governor->name, 832 ret); 833 } 834 } 835 } 836 837 err_out: 838 mutex_unlock(&devfreq_list_lock); 839 840 return err; 841 } 842 EXPORT_SYMBOL(devfreq_add_governor); 843 844 /** 845 * devfreq_remove_device() - Remove devfreq feature from a device. 846 * @governor: the devfreq governor to be removed 847 */ 848 int devfreq_remove_governor(struct devfreq_governor *governor) 849 { 850 struct devfreq_governor *g; 851 struct devfreq *devfreq; 852 int err = 0; 853 854 if (!governor) { 855 pr_err("%s: Invalid parameters.\n", __func__); 856 return -EINVAL; 857 } 858 859 mutex_lock(&devfreq_list_lock); 860 g = find_devfreq_governor(governor->name); 861 if (IS_ERR(g)) { 862 pr_err("%s: governor %s not registered\n", __func__, 863 governor->name); 864 err = PTR_ERR(g); 865 goto err_out; 866 } 867 list_for_each_entry(devfreq, &devfreq_list, node) { 868 int ret; 869 struct device *dev = devfreq->dev.parent; 870 871 if (!strncmp(devfreq->governor_name, governor->name, 872 DEVFREQ_NAME_LEN)) { 873 /* we should have a devfreq governor! */ 874 if (!devfreq->governor) { 875 dev_warn(dev, "%s: Governor %s NOT present\n", 876 __func__, governor->name); 877 continue; 878 /* Fall through */ 879 } 880 ret = devfreq->governor->event_handler(devfreq, 881 DEVFREQ_GOV_STOP, NULL); 882 if (ret) { 883 dev_warn(dev, "%s: Governor %s stop=%d\n", 884 __func__, devfreq->governor->name, 885 ret); 886 } 887 devfreq->governor = NULL; 888 } 889 } 890 891 list_del(&governor->node); 892 err_out: 893 mutex_unlock(&devfreq_list_lock); 894 895 return err; 896 } 897 EXPORT_SYMBOL(devfreq_remove_governor); 898 899 static ssize_t governor_show(struct device *dev, 900 struct device_attribute *attr, char *buf) 901 { 902 if (!to_devfreq(dev)->governor) 903 return -EINVAL; 904 905 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); 906 } 907 908 static ssize_t governor_store(struct device *dev, struct device_attribute *attr, 909 const char *buf, size_t count) 910 { 911 struct devfreq *df = to_devfreq(dev); 912 int ret; 913 char str_governor[DEVFREQ_NAME_LEN + 1]; 914 struct devfreq_governor *governor; 915 916 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); 917 if (ret != 1) 918 return -EINVAL; 919 920 mutex_lock(&devfreq_list_lock); 921 governor = find_devfreq_governor(str_governor); 922 if (IS_ERR(governor)) { 923 ret = PTR_ERR(governor); 924 goto out; 925 } 926 if (df->governor == governor) { 927 ret = 0; 928 goto out; 929 } 930 931 if (df->governor) { 932 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); 933 if (ret) { 934 dev_warn(dev, "%s: Governor %s not stopped(%d)\n", 935 __func__, df->governor->name, ret); 936 goto out; 937 } 938 } 939 df->governor = governor; 940 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); 941 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); 942 if (ret) 943 dev_warn(dev, "%s: Governor %s not started(%d)\n", 944 __func__, df->governor->name, ret); 945 out: 946 mutex_unlock(&devfreq_list_lock); 947 948 if (!ret) 949 ret = count; 950 return ret; 951 } 952 static DEVICE_ATTR_RW(governor); 953 954 static ssize_t available_governors_show(struct device *d, 955 struct device_attribute *attr, 956 char *buf) 957 { 958 struct devfreq_governor *tmp_governor; 959 ssize_t count = 0; 960 961 mutex_lock(&devfreq_list_lock); 962 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) 963 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 964 "%s ", tmp_governor->name); 965 mutex_unlock(&devfreq_list_lock); 966 967 /* Truncate the trailing space */ 968 if (count) 969 count--; 970 971 count += sprintf(&buf[count], "\n"); 972 973 return count; 974 } 975 static DEVICE_ATTR_RO(available_governors); 976 977 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, 978 char *buf) 979 { 980 unsigned long freq; 981 struct devfreq *devfreq = to_devfreq(dev); 982 983 if (devfreq->profile->get_cur_freq && 984 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) 985 return sprintf(buf, "%lu\n", freq); 986 987 return sprintf(buf, "%lu\n", devfreq->previous_freq); 988 } 989 static DEVICE_ATTR_RO(cur_freq); 990 991 static ssize_t target_freq_show(struct device *dev, 992 struct device_attribute *attr, char *buf) 993 { 994 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); 995 } 996 static DEVICE_ATTR_RO(target_freq); 997 998 static ssize_t polling_interval_show(struct device *dev, 999 struct device_attribute *attr, char *buf) 1000 { 1001 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); 1002 } 1003 1004 static ssize_t polling_interval_store(struct device *dev, 1005 struct device_attribute *attr, 1006 const char *buf, size_t count) 1007 { 1008 struct devfreq *df = to_devfreq(dev); 1009 unsigned int value; 1010 int ret; 1011 1012 if (!df->governor) 1013 return -EINVAL; 1014 1015 ret = sscanf(buf, "%u", &value); 1016 if (ret != 1) 1017 return -EINVAL; 1018 1019 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); 1020 ret = count; 1021 1022 return ret; 1023 } 1024 static DEVICE_ATTR_RW(polling_interval); 1025 1026 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, 1027 const char *buf, size_t count) 1028 { 1029 struct devfreq *df = to_devfreq(dev); 1030 unsigned long value; 1031 int ret; 1032 unsigned long max; 1033 1034 ret = sscanf(buf, "%lu", &value); 1035 if (ret != 1) 1036 return -EINVAL; 1037 1038 mutex_lock(&df->lock); 1039 max = df->max_freq; 1040 if (value && max && value > max) { 1041 ret = -EINVAL; 1042 goto unlock; 1043 } 1044 1045 df->min_freq = value; 1046 update_devfreq(df); 1047 ret = count; 1048 unlock: 1049 mutex_unlock(&df->lock); 1050 return ret; 1051 } 1052 1053 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, 1054 const char *buf, size_t count) 1055 { 1056 struct devfreq *df = to_devfreq(dev); 1057 unsigned long value; 1058 int ret; 1059 unsigned long min; 1060 1061 ret = sscanf(buf, "%lu", &value); 1062 if (ret != 1) 1063 return -EINVAL; 1064 1065 mutex_lock(&df->lock); 1066 min = df->min_freq; 1067 if (value && min && value < min) { 1068 ret = -EINVAL; 1069 goto unlock; 1070 } 1071 1072 df->max_freq = value; 1073 update_devfreq(df); 1074 ret = count; 1075 unlock: 1076 mutex_unlock(&df->lock); 1077 return ret; 1078 } 1079 1080 #define show_one(name) \ 1081 static ssize_t name##_show \ 1082 (struct device *dev, struct device_attribute *attr, char *buf) \ 1083 { \ 1084 return sprintf(buf, "%lu\n", to_devfreq(dev)->name); \ 1085 } 1086 show_one(min_freq); 1087 show_one(max_freq); 1088 1089 static DEVICE_ATTR_RW(min_freq); 1090 static DEVICE_ATTR_RW(max_freq); 1091 1092 static ssize_t available_frequencies_show(struct device *d, 1093 struct device_attribute *attr, 1094 char *buf) 1095 { 1096 struct devfreq *df = to_devfreq(d); 1097 struct device *dev = df->dev.parent; 1098 struct dev_pm_opp *opp; 1099 ssize_t count = 0; 1100 unsigned long freq = 0; 1101 1102 rcu_read_lock(); 1103 do { 1104 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1105 if (IS_ERR(opp)) 1106 break; 1107 1108 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), 1109 "%lu ", freq); 1110 freq++; 1111 } while (1); 1112 rcu_read_unlock(); 1113 1114 /* Truncate the trailing space */ 1115 if (count) 1116 count--; 1117 1118 count += sprintf(&buf[count], "\n"); 1119 1120 return count; 1121 } 1122 static DEVICE_ATTR_RO(available_frequencies); 1123 1124 static ssize_t trans_stat_show(struct device *dev, 1125 struct device_attribute *attr, char *buf) 1126 { 1127 struct devfreq *devfreq = to_devfreq(dev); 1128 ssize_t len; 1129 int i, j; 1130 unsigned int max_state = devfreq->profile->max_state; 1131 1132 if (!devfreq->stop_polling && 1133 devfreq_update_status(devfreq, devfreq->previous_freq)) 1134 return 0; 1135 if (max_state == 0) 1136 return sprintf(buf, "Not Supported.\n"); 1137 1138 len = sprintf(buf, " From : To\n"); 1139 len += sprintf(buf + len, " :"); 1140 for (i = 0; i < max_state; i++) 1141 len += sprintf(buf + len, "%10lu", 1142 devfreq->profile->freq_table[i]); 1143 1144 len += sprintf(buf + len, " time(ms)\n"); 1145 1146 for (i = 0; i < max_state; i++) { 1147 if (devfreq->profile->freq_table[i] 1148 == devfreq->previous_freq) { 1149 len += sprintf(buf + len, "*"); 1150 } else { 1151 len += sprintf(buf + len, " "); 1152 } 1153 len += sprintf(buf + len, "%10lu:", 1154 devfreq->profile->freq_table[i]); 1155 for (j = 0; j < max_state; j++) 1156 len += sprintf(buf + len, "%10u", 1157 devfreq->trans_table[(i * max_state) + j]); 1158 len += sprintf(buf + len, "%10u\n", 1159 jiffies_to_msecs(devfreq->time_in_state[i])); 1160 } 1161 1162 len += sprintf(buf + len, "Total transition : %u\n", 1163 devfreq->total_trans); 1164 return len; 1165 } 1166 static DEVICE_ATTR_RO(trans_stat); 1167 1168 static struct attribute *devfreq_attrs[] = { 1169 &dev_attr_governor.attr, 1170 &dev_attr_available_governors.attr, 1171 &dev_attr_cur_freq.attr, 1172 &dev_attr_available_frequencies.attr, 1173 &dev_attr_target_freq.attr, 1174 &dev_attr_polling_interval.attr, 1175 &dev_attr_min_freq.attr, 1176 &dev_attr_max_freq.attr, 1177 &dev_attr_trans_stat.attr, 1178 NULL, 1179 }; 1180 ATTRIBUTE_GROUPS(devfreq); 1181 1182 static int __init devfreq_init(void) 1183 { 1184 devfreq_class = class_create(THIS_MODULE, "devfreq"); 1185 if (IS_ERR(devfreq_class)) { 1186 pr_err("%s: couldn't create class\n", __FILE__); 1187 return PTR_ERR(devfreq_class); 1188 } 1189 1190 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 1191 if (!devfreq_wq) { 1192 class_destroy(devfreq_class); 1193 pr_err("%s: couldn't create workqueue\n", __FILE__); 1194 return -ENOMEM; 1195 } 1196 devfreq_class->dev_groups = devfreq_groups; 1197 1198 return 0; 1199 } 1200 subsys_initcall(devfreq_init); 1201 1202 static void __exit devfreq_exit(void) 1203 { 1204 class_destroy(devfreq_class); 1205 destroy_workqueue(devfreq_wq); 1206 } 1207 module_exit(devfreq_exit); 1208 1209 /* 1210 * The followings are helper functions for devfreq user device drivers with 1211 * OPP framework. 1212 */ 1213 1214 /** 1215 * devfreq_recommended_opp() - Helper function to get proper OPP for the 1216 * freq value given to target callback. 1217 * @dev: The devfreq user device. (parent of devfreq) 1218 * @freq: The frequency given to target function 1219 * @flags: Flags handed from devfreq framework. 1220 * 1221 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 1222 * protected pointer. The reason for the same is that the opp pointer which is 1223 * returned will remain valid for use with opp_get_{voltage, freq} only while 1224 * under the locked area. The pointer returned must be used prior to unlocking 1225 * with rcu_read_unlock() to maintain the integrity of the pointer. 1226 */ 1227 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, 1228 unsigned long *freq, 1229 u32 flags) 1230 { 1231 struct dev_pm_opp *opp; 1232 1233 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { 1234 /* The freq is an upper bound. opp should be lower */ 1235 opp = dev_pm_opp_find_freq_floor(dev, freq); 1236 1237 /* If not available, use the closest opp */ 1238 if (opp == ERR_PTR(-ERANGE)) 1239 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1240 } else { 1241 /* The freq is an lower bound. opp should be higher */ 1242 opp = dev_pm_opp_find_freq_ceil(dev, freq); 1243 1244 /* If not available, use the closest opp */ 1245 if (opp == ERR_PTR(-ERANGE)) 1246 opp = dev_pm_opp_find_freq_floor(dev, freq); 1247 } 1248 1249 return opp; 1250 } 1251 EXPORT_SYMBOL(devfreq_recommended_opp); 1252 1253 /** 1254 * devfreq_register_opp_notifier() - Helper function to get devfreq notified 1255 * for any changes in the OPP availability 1256 * changes 1257 * @dev: The devfreq user device. (parent of devfreq) 1258 * @devfreq: The devfreq object. 1259 */ 1260 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) 1261 { 1262 struct srcu_notifier_head *nh; 1263 int ret = 0; 1264 1265 rcu_read_lock(); 1266 nh = dev_pm_opp_get_notifier(dev); 1267 if (IS_ERR(nh)) 1268 ret = PTR_ERR(nh); 1269 rcu_read_unlock(); 1270 if (!ret) 1271 ret = srcu_notifier_chain_register(nh, &devfreq->nb); 1272 1273 return ret; 1274 } 1275 EXPORT_SYMBOL(devfreq_register_opp_notifier); 1276 1277 /** 1278 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq 1279 * notified for any changes in the OPP 1280 * availability changes anymore. 1281 * @dev: The devfreq user device. (parent of devfreq) 1282 * @devfreq: The devfreq object. 1283 * 1284 * At exit() callback of devfreq_dev_profile, this must be included if 1285 * devfreq_recommended_opp is used. 1286 */ 1287 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) 1288 { 1289 struct srcu_notifier_head *nh; 1290 int ret = 0; 1291 1292 rcu_read_lock(); 1293 nh = dev_pm_opp_get_notifier(dev); 1294 if (IS_ERR(nh)) 1295 ret = PTR_ERR(nh); 1296 rcu_read_unlock(); 1297 if (!ret) 1298 ret = srcu_notifier_chain_unregister(nh, &devfreq->nb); 1299 1300 return ret; 1301 } 1302 EXPORT_SYMBOL(devfreq_unregister_opp_notifier); 1303 1304 static void devm_devfreq_opp_release(struct device *dev, void *res) 1305 { 1306 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res); 1307 } 1308 1309 /** 1310 * devm_ devfreq_register_opp_notifier() 1311 * - Resource-managed devfreq_register_opp_notifier() 1312 * @dev: The devfreq user device. (parent of devfreq) 1313 * @devfreq: The devfreq object. 1314 */ 1315 int devm_devfreq_register_opp_notifier(struct device *dev, 1316 struct devfreq *devfreq) 1317 { 1318 struct devfreq **ptr; 1319 int ret; 1320 1321 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL); 1322 if (!ptr) 1323 return -ENOMEM; 1324 1325 ret = devfreq_register_opp_notifier(dev, devfreq); 1326 if (ret) { 1327 devres_free(ptr); 1328 return ret; 1329 } 1330 1331 *ptr = devfreq; 1332 devres_add(dev, ptr); 1333 1334 return 0; 1335 } 1336 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier); 1337 1338 /** 1339 * devm_devfreq_unregister_opp_notifier() 1340 * - Resource-managed devfreq_unregister_opp_notifier() 1341 * @dev: The devfreq user device. (parent of devfreq) 1342 * @devfreq: The devfreq object. 1343 */ 1344 void devm_devfreq_unregister_opp_notifier(struct device *dev, 1345 struct devfreq *devfreq) 1346 { 1347 WARN_ON(devres_release(dev, devm_devfreq_opp_release, 1348 devm_devfreq_dev_match, devfreq)); 1349 } 1350 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier); 1351 1352 /** 1353 * devfreq_register_notifier() - Register a driver with devfreq 1354 * @devfreq: The devfreq object. 1355 * @nb: The notifier block to register. 1356 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1357 */ 1358 int devfreq_register_notifier(struct devfreq *devfreq, 1359 struct notifier_block *nb, 1360 unsigned int list) 1361 { 1362 int ret = 0; 1363 1364 if (!devfreq) 1365 return -EINVAL; 1366 1367 switch (list) { 1368 case DEVFREQ_TRANSITION_NOTIFIER: 1369 ret = srcu_notifier_chain_register( 1370 &devfreq->transition_notifier_list, nb); 1371 break; 1372 default: 1373 ret = -EINVAL; 1374 } 1375 1376 return ret; 1377 } 1378 EXPORT_SYMBOL(devfreq_register_notifier); 1379 1380 /* 1381 * devfreq_unregister_notifier() - Unregister a driver with devfreq 1382 * @devfreq: The devfreq object. 1383 * @nb: The notifier block to be unregistered. 1384 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1385 */ 1386 int devfreq_unregister_notifier(struct devfreq *devfreq, 1387 struct notifier_block *nb, 1388 unsigned int list) 1389 { 1390 int ret = 0; 1391 1392 if (!devfreq) 1393 return -EINVAL; 1394 1395 switch (list) { 1396 case DEVFREQ_TRANSITION_NOTIFIER: 1397 ret = srcu_notifier_chain_unregister( 1398 &devfreq->transition_notifier_list, nb); 1399 break; 1400 default: 1401 ret = -EINVAL; 1402 } 1403 1404 return ret; 1405 } 1406 EXPORT_SYMBOL(devfreq_unregister_notifier); 1407 1408 struct devfreq_notifier_devres { 1409 struct devfreq *devfreq; 1410 struct notifier_block *nb; 1411 unsigned int list; 1412 }; 1413 1414 static void devm_devfreq_notifier_release(struct device *dev, void *res) 1415 { 1416 struct devfreq_notifier_devres *this = res; 1417 1418 devfreq_unregister_notifier(this->devfreq, this->nb, this->list); 1419 } 1420 1421 /** 1422 * devm_devfreq_register_notifier() 1423 - Resource-managed devfreq_register_notifier() 1424 * @dev: The devfreq user device. (parent of devfreq) 1425 * @devfreq: The devfreq object. 1426 * @nb: The notifier block to be unregistered. 1427 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1428 */ 1429 int devm_devfreq_register_notifier(struct device *dev, 1430 struct devfreq *devfreq, 1431 struct notifier_block *nb, 1432 unsigned int list) 1433 { 1434 struct devfreq_notifier_devres *ptr; 1435 int ret; 1436 1437 ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr), 1438 GFP_KERNEL); 1439 if (!ptr) 1440 return -ENOMEM; 1441 1442 ret = devfreq_register_notifier(devfreq, nb, list); 1443 if (ret) { 1444 devres_free(ptr); 1445 return ret; 1446 } 1447 1448 ptr->devfreq = devfreq; 1449 ptr->nb = nb; 1450 ptr->list = list; 1451 devres_add(dev, ptr); 1452 1453 return 0; 1454 } 1455 EXPORT_SYMBOL(devm_devfreq_register_notifier); 1456 1457 /** 1458 * devm_devfreq_unregister_notifier() 1459 - Resource-managed devfreq_unregister_notifier() 1460 * @dev: The devfreq user device. (parent of devfreq) 1461 * @devfreq: The devfreq object. 1462 * @nb: The notifier block to be unregistered. 1463 * @list: DEVFREQ_TRANSITION_NOTIFIER. 1464 */ 1465 void devm_devfreq_unregister_notifier(struct device *dev, 1466 struct devfreq *devfreq, 1467 struct notifier_block *nb, 1468 unsigned int list) 1469 { 1470 WARN_ON(devres_release(dev, devm_devfreq_notifier_release, 1471 devm_devfreq_dev_match, devfreq)); 1472 } 1473 EXPORT_SYMBOL(devm_devfreq_unregister_notifier); 1474 1475 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 1476 MODULE_DESCRIPTION("devfreq class support"); 1477 MODULE_LICENSE("GPL"); 1478