1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/drivers/thermal/cpufreq_cooling.c 4 * 5 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) 6 * 7 * Copyright (C) 2012-2018 Linaro Limited. 8 * 9 * Authors: Amit Daniel <amit.kachhap@linaro.org> 10 * Viresh Kumar <viresh.kumar@linaro.org> 11 * 12 */ 13 #include <linux/cpu.h> 14 #include <linux/cpufreq.h> 15 #include <linux/cpu_cooling.h> 16 #include <linux/device.h> 17 #include <linux/energy_model.h> 18 #include <linux/err.h> 19 #include <linux/export.h> 20 #include <linux/pm_opp.h> 21 #include <linux/pm_qos.h> 22 #include <linux/slab.h> 23 #include <linux/thermal.h> 24 #include <linux/units.h> 25 26 #include "thermal_trace.h" 27 28 /* 29 * Cooling state <-> CPUFreq frequency 30 * 31 * Cooling states are translated to frequencies throughout this driver and this 32 * is the relation between them. 33 * 34 * Highest cooling state corresponds to lowest possible frequency. 35 * 36 * i.e. 37 * level 0 --> 1st Max Freq 38 * level 1 --> 2nd Max Freq 39 * ... 40 */ 41 42 /** 43 * struct time_in_idle - Idle time stats 44 * @time: previous reading of the absolute time that this cpu was idle 45 * @timestamp: wall time of the last invocation of get_cpu_idle_time_us() 46 */ 47 struct time_in_idle { 48 u64 time; 49 u64 timestamp; 50 }; 51 52 /** 53 * struct cpufreq_cooling_device - data for cooling device with cpufreq 54 * @last_load: load measured by the latest call to cpufreq_get_requested_power() 55 * @cpufreq_state: integer value representing the current state of cpufreq 56 * cooling devices. 57 * @max_level: maximum cooling level. One less than total number of valid 58 * cpufreq frequencies. 59 * @em: Reference on the Energy Model of the device 60 * @policy: cpufreq policy. 61 * @cooling_ops: cpufreq callbacks to thermal cooling device ops 62 * @idle_time: idle time stats 63 * @qos_req: PM QoS contraint to apply 64 * 65 * This structure is required for keeping information of each registered 66 * cpufreq_cooling_device. 67 */ 68 struct cpufreq_cooling_device { 69 u32 last_load; 70 unsigned int cpufreq_state; 71 unsigned int max_level; 72 struct em_perf_domain *em; 73 struct cpufreq_policy *policy; 74 struct thermal_cooling_device_ops cooling_ops; 75 #ifndef CONFIG_SMP 76 struct time_in_idle *idle_time; 77 #endif 78 struct freq_qos_request qos_req; 79 }; 80 81 #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR 82 /** 83 * get_level: Find the level for a particular frequency 84 * @cpufreq_cdev: cpufreq_cdev for which the property is required 85 * @freq: Frequency 86 * 87 * Return: level corresponding to the frequency. 88 */ 89 static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev, 90 unsigned int freq) 91 { 92 struct em_perf_state *table; 93 int i; 94 95 rcu_read_lock(); 96 table = em_perf_state_from_pd(cpufreq_cdev->em); 97 for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { 98 if (freq > table[i].frequency) 99 break; 100 } 101 rcu_read_unlock(); 102 103 return cpufreq_cdev->max_level - i - 1; 104 } 105 106 static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev, 107 u32 freq) 108 { 109 struct em_perf_state *table; 110 unsigned long power_mw; 111 int i; 112 113 rcu_read_lock(); 114 table = em_perf_state_from_pd(cpufreq_cdev->em); 115 for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { 116 if (freq > table[i].frequency) 117 break; 118 } 119 120 power_mw = table[i + 1].power; 121 power_mw /= MICROWATT_PER_MILLIWATT; 122 rcu_read_unlock(); 123 124 return power_mw; 125 } 126 127 static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, 128 u32 power) 129 { 130 struct em_perf_state *table; 131 unsigned long em_power_mw; 132 u32 freq; 133 int i; 134 135 rcu_read_lock(); 136 table = em_perf_state_from_pd(cpufreq_cdev->em); 137 for (i = cpufreq_cdev->max_level; i > 0; i--) { 138 /* Convert EM power to milli-Watts to make safe comparison */ 139 em_power_mw = table[i].power; 140 em_power_mw /= MICROWATT_PER_MILLIWATT; 141 if (power >= em_power_mw) 142 break; 143 } 144 freq = table[i].frequency; 145 rcu_read_unlock(); 146 147 return freq; 148 } 149 150 /** 151 * get_load() - get load for a cpu 152 * @cpufreq_cdev: struct cpufreq_cooling_device for the cpu 153 * @cpu: cpu number 154 * @cpu_idx: index of the cpu in time_in_idle array 155 * 156 * Return: The average load of cpu @cpu in percentage since this 157 * function was last called. 158 */ 159 #ifdef CONFIG_SMP 160 static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, 161 int cpu_idx) 162 { 163 unsigned long util = sched_cpu_util(cpu); 164 165 return (util * 100) / arch_scale_cpu_capacity(cpu); 166 } 167 #else /* !CONFIG_SMP */ 168 static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, 169 int cpu_idx) 170 { 171 u32 load; 172 u64 now, now_idle, delta_time, delta_idle; 173 struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx]; 174 175 now_idle = get_cpu_idle_time(cpu, &now, 0); 176 delta_idle = now_idle - idle_time->time; 177 delta_time = now - idle_time->timestamp; 178 179 if (delta_time <= delta_idle) 180 load = 0; 181 else 182 load = div64_u64(100 * (delta_time - delta_idle), delta_time); 183 184 idle_time->time = now_idle; 185 idle_time->timestamp = now; 186 187 return load; 188 } 189 #endif /* CONFIG_SMP */ 190 191 /** 192 * get_dynamic_power() - calculate the dynamic power 193 * @cpufreq_cdev: &cpufreq_cooling_device for this cdev 194 * @freq: current frequency 195 * 196 * Return: the dynamic power consumed by the cpus described by 197 * @cpufreq_cdev. 198 */ 199 static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev, 200 unsigned long freq) 201 { 202 u32 raw_cpu_power; 203 204 raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq); 205 return (raw_cpu_power * cpufreq_cdev->last_load) / 100; 206 } 207 208 /** 209 * cpufreq_get_requested_power() - get the current power 210 * @cdev: &thermal_cooling_device pointer 211 * @power: pointer in which to store the resulting power 212 * 213 * Calculate the current power consumption of the cpus in milliwatts 214 * and store it in @power. This function should actually calculate 215 * the requested power, but it's hard to get the frequency that 216 * cpufreq would have assigned if there were no thermal limits. 217 * Instead, we calculate the current power on the assumption that the 218 * immediate future will look like the immediate past. 219 * 220 * We use the current frequency and the average load since this 221 * function was last called. In reality, there could have been 222 * multiple opps since this function was last called and that affects 223 * the load calculation. While it's not perfectly accurate, this 224 * simplification is good enough and works. REVISIT this, as more 225 * complex code may be needed if experiments show that it's not 226 * accurate enough. 227 * 228 * Return: 0 on success, this function doesn't fail. 229 */ 230 static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, 231 u32 *power) 232 { 233 unsigned long freq; 234 int i = 0, cpu; 235 u32 total_load = 0; 236 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 237 struct cpufreq_policy *policy = cpufreq_cdev->policy; 238 239 freq = cpufreq_quick_get(policy->cpu); 240 241 for_each_cpu(cpu, policy->related_cpus) { 242 u32 load; 243 244 if (cpu_online(cpu)) 245 load = get_load(cpufreq_cdev, cpu, i); 246 else 247 load = 0; 248 249 total_load += load; 250 } 251 252 cpufreq_cdev->last_load = total_load; 253 254 *power = get_dynamic_power(cpufreq_cdev, freq); 255 256 trace_thermal_power_cpu_get_power_simple(policy->cpu, *power); 257 258 return 0; 259 } 260 261 /** 262 * cpufreq_state2power() - convert a cpu cdev state to power consumed 263 * @cdev: &thermal_cooling_device pointer 264 * @state: cooling device state to be converted 265 * @power: pointer in which to store the resulting power 266 * 267 * Convert cooling device state @state into power consumption in 268 * milliwatts assuming 100% load. Store the calculated power in 269 * @power. 270 * 271 * Return: 0 on success, -EINVAL if the cooling device state is bigger 272 * than maximum allowed. 273 */ 274 static int cpufreq_state2power(struct thermal_cooling_device *cdev, 275 unsigned long state, u32 *power) 276 { 277 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 278 unsigned int freq, num_cpus, idx; 279 struct em_perf_state *table; 280 281 /* Request state should be less than max_level */ 282 if (state > cpufreq_cdev->max_level) 283 return -EINVAL; 284 285 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); 286 287 idx = cpufreq_cdev->max_level - state; 288 289 rcu_read_lock(); 290 table = em_perf_state_from_pd(cpufreq_cdev->em); 291 freq = table[idx].frequency; 292 rcu_read_unlock(); 293 294 *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus; 295 296 return 0; 297 } 298 299 /** 300 * cpufreq_power2state() - convert power to a cooling device state 301 * @cdev: &thermal_cooling_device pointer 302 * @power: power in milliwatts to be converted 303 * @state: pointer in which to store the resulting state 304 * 305 * Calculate a cooling device state for the cpus described by @cdev 306 * that would allow them to consume at most @power mW and store it in 307 * @state. Note that this calculation depends on external factors 308 * such as the CPUs load. Calling this function with the same power 309 * as input can yield different cooling device states depending on those 310 * external factors. 311 * 312 * Return: 0 on success, this function doesn't fail. 313 */ 314 static int cpufreq_power2state(struct thermal_cooling_device *cdev, 315 u32 power, unsigned long *state) 316 { 317 unsigned int target_freq; 318 u32 last_load, normalised_power; 319 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 320 struct cpufreq_policy *policy = cpufreq_cdev->policy; 321 322 last_load = cpufreq_cdev->last_load ?: 1; 323 normalised_power = (power * 100) / last_load; 324 target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power); 325 326 *state = get_level(cpufreq_cdev, target_freq); 327 trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state, 328 power); 329 return 0; 330 } 331 332 static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev, 333 struct em_perf_domain *em) { 334 struct cpufreq_policy *policy; 335 unsigned int nr_levels; 336 337 if (!em || em_is_artificial(em)) 338 return false; 339 340 policy = cpufreq_cdev->policy; 341 if (!cpumask_equal(policy->related_cpus, em_span_cpus(em))) { 342 pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n", 343 cpumask_pr_args(em_span_cpus(em)), 344 cpumask_pr_args(policy->related_cpus)); 345 return false; 346 } 347 348 nr_levels = cpufreq_cdev->max_level + 1; 349 if (em_pd_nr_perf_states(em) != nr_levels) { 350 pr_err("The number of performance states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n", 351 cpumask_pr_args(em_span_cpus(em)), 352 em_pd_nr_perf_states(em), nr_levels); 353 return false; 354 } 355 356 return true; 357 } 358 #endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */ 359 360 #ifdef CONFIG_SMP 361 static inline int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 362 { 363 return 0; 364 } 365 366 static inline void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 367 { 368 } 369 #else 370 static int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 371 { 372 unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus); 373 374 cpufreq_cdev->idle_time = kcalloc(num_cpus, 375 sizeof(*cpufreq_cdev->idle_time), 376 GFP_KERNEL); 377 if (!cpufreq_cdev->idle_time) 378 return -ENOMEM; 379 380 return 0; 381 } 382 383 static void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 384 { 385 kfree(cpufreq_cdev->idle_time); 386 cpufreq_cdev->idle_time = NULL; 387 } 388 #endif /* CONFIG_SMP */ 389 390 static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev, 391 unsigned long state) 392 { 393 struct cpufreq_policy *policy; 394 unsigned long idx; 395 396 #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR 397 /* Use the Energy Model table if available */ 398 if (cpufreq_cdev->em) { 399 struct em_perf_state *table; 400 unsigned int freq; 401 402 idx = cpufreq_cdev->max_level - state; 403 404 rcu_read_lock(); 405 table = em_perf_state_from_pd(cpufreq_cdev->em); 406 freq = table[idx].frequency; 407 rcu_read_unlock(); 408 409 return freq; 410 } 411 #endif 412 413 /* Otherwise, fallback on the CPUFreq table */ 414 policy = cpufreq_cdev->policy; 415 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) 416 idx = cpufreq_cdev->max_level - state; 417 else 418 idx = state; 419 420 return policy->freq_table[idx].frequency; 421 } 422 423 /* cpufreq cooling device callback functions are defined below */ 424 425 /** 426 * cpufreq_get_max_state - callback function to get the max cooling state. 427 * @cdev: thermal cooling device pointer. 428 * @state: fill this variable with the max cooling state. 429 * 430 * Callback for the thermal cooling device to return the cpufreq 431 * max cooling state. 432 * 433 * Return: 0 on success, this function doesn't fail. 434 */ 435 static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, 436 unsigned long *state) 437 { 438 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 439 440 *state = cpufreq_cdev->max_level; 441 return 0; 442 } 443 444 /** 445 * cpufreq_get_cur_state - callback function to get the current cooling state. 446 * @cdev: thermal cooling device pointer. 447 * @state: fill this variable with the current cooling state. 448 * 449 * Callback for the thermal cooling device to return the cpufreq 450 * current cooling state. 451 * 452 * Return: 0 on success, this function doesn't fail. 453 */ 454 static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, 455 unsigned long *state) 456 { 457 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 458 459 *state = cpufreq_cdev->cpufreq_state; 460 461 return 0; 462 } 463 464 /** 465 * cpufreq_set_cur_state - callback function to set the current cooling state. 466 * @cdev: thermal cooling device pointer. 467 * @state: set this variable to the current cooling state. 468 * 469 * Callback for the thermal cooling device to change the cpufreq 470 * current cooling state. 471 * 472 * Return: 0 on success, an error code otherwise. 473 */ 474 static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, 475 unsigned long state) 476 { 477 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 478 unsigned int frequency; 479 int ret; 480 481 /* Request state should be less than max_level */ 482 if (state > cpufreq_cdev->max_level) 483 return -EINVAL; 484 485 /* Check if the old cooling action is same as new cooling action */ 486 if (cpufreq_cdev->cpufreq_state == state) 487 return 0; 488 489 frequency = get_state_freq(cpufreq_cdev, state); 490 491 ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency); 492 if (ret >= 0) { 493 cpufreq_cdev->cpufreq_state = state; 494 ret = 0; 495 } 496 497 return ret; 498 } 499 500 /** 501 * __cpufreq_cooling_register - helper function to create cpufreq cooling device 502 * @np: a valid struct device_node to the cooling device tree node 503 * @policy: cpufreq policy 504 * Normally this should be same as cpufreq policy->related_cpus. 505 * @em: Energy Model of the cpufreq policy 506 * 507 * This interface function registers the cpufreq cooling device with the name 508 * "cpufreq-%s". This API can support multiple instances of cpufreq 509 * cooling devices. It also gives the opportunity to link the cooling device 510 * with a device tree node, in order to bind it via the thermal DT code. 511 * 512 * Return: a valid struct thermal_cooling_device pointer on success, 513 * on failure, it returns a corresponding ERR_PTR(). 514 */ 515 static struct thermal_cooling_device * 516 __cpufreq_cooling_register(struct device_node *np, 517 struct cpufreq_policy *policy, 518 struct em_perf_domain *em) 519 { 520 struct thermal_cooling_device *cdev; 521 struct cpufreq_cooling_device *cpufreq_cdev; 522 unsigned int i; 523 struct device *dev; 524 int ret; 525 struct thermal_cooling_device_ops *cooling_ops; 526 char *name; 527 528 if (IS_ERR_OR_NULL(policy)) { 529 pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy); 530 return ERR_PTR(-EINVAL); 531 } 532 533 dev = get_cpu_device(policy->cpu); 534 if (unlikely(!dev)) { 535 pr_warn("No cpu device for cpu %d\n", policy->cpu); 536 return ERR_PTR(-ENODEV); 537 } 538 539 i = cpufreq_table_count_valid_entries(policy); 540 if (!i) { 541 pr_debug("%s: CPUFreq table not found or has no valid entries\n", 542 __func__); 543 return ERR_PTR(-ENODEV); 544 } 545 546 cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL); 547 if (!cpufreq_cdev) 548 return ERR_PTR(-ENOMEM); 549 550 cpufreq_cdev->policy = policy; 551 552 ret = allocate_idle_time(cpufreq_cdev); 553 if (ret) { 554 cdev = ERR_PTR(ret); 555 goto free_cdev; 556 } 557 558 /* max_level is an index, not a counter */ 559 cpufreq_cdev->max_level = i - 1; 560 561 cooling_ops = &cpufreq_cdev->cooling_ops; 562 cooling_ops->get_max_state = cpufreq_get_max_state; 563 cooling_ops->get_cur_state = cpufreq_get_cur_state; 564 cooling_ops->set_cur_state = cpufreq_set_cur_state; 565 566 #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR 567 if (em_is_sane(cpufreq_cdev, em)) { 568 cpufreq_cdev->em = em; 569 cooling_ops->get_requested_power = cpufreq_get_requested_power; 570 cooling_ops->state2power = cpufreq_state2power; 571 cooling_ops->power2state = cpufreq_power2state; 572 } else 573 #endif 574 if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) { 575 pr_err("%s: unsorted frequency tables are not supported\n", 576 __func__); 577 cdev = ERR_PTR(-EINVAL); 578 goto free_idle_time; 579 } 580 581 ret = freq_qos_add_request(&policy->constraints, 582 &cpufreq_cdev->qos_req, FREQ_QOS_MAX, 583 get_state_freq(cpufreq_cdev, 0)); 584 if (ret < 0) { 585 pr_err("%s: Failed to add freq constraint (%d)\n", __func__, 586 ret); 587 cdev = ERR_PTR(ret); 588 goto free_idle_time; 589 } 590 591 cdev = ERR_PTR(-ENOMEM); 592 name = kasprintf(GFP_KERNEL, "cpufreq-%s", dev_name(dev)); 593 if (!name) 594 goto remove_qos_req; 595 596 cdev = thermal_of_cooling_device_register(np, name, cpufreq_cdev, 597 cooling_ops); 598 kfree(name); 599 600 if (IS_ERR(cdev)) 601 goto remove_qos_req; 602 603 return cdev; 604 605 remove_qos_req: 606 freq_qos_remove_request(&cpufreq_cdev->qos_req); 607 free_idle_time: 608 free_idle_time(cpufreq_cdev); 609 free_cdev: 610 kfree(cpufreq_cdev); 611 return cdev; 612 } 613 614 /** 615 * cpufreq_cooling_register - function to create cpufreq cooling device. 616 * @policy: cpufreq policy 617 * 618 * This interface function registers the cpufreq cooling device with the name 619 * "cpufreq-%s". This API can support multiple instances of cpufreq cooling 620 * devices. 621 * 622 * Return: a valid struct thermal_cooling_device pointer on success, 623 * on failure, it returns a corresponding ERR_PTR(). 624 */ 625 struct thermal_cooling_device * 626 cpufreq_cooling_register(struct cpufreq_policy *policy) 627 { 628 return __cpufreq_cooling_register(NULL, policy, NULL); 629 } 630 EXPORT_SYMBOL_GPL(cpufreq_cooling_register); 631 632 /** 633 * of_cpufreq_cooling_register - function to create cpufreq cooling device. 634 * @policy: cpufreq policy 635 * 636 * This interface function registers the cpufreq cooling device with the name 637 * "cpufreq-%s". This API can support multiple instances of cpufreq cooling 638 * devices. Using this API, the cpufreq cooling device will be linked to the 639 * device tree node provided. 640 * 641 * Using this function, the cooling device will implement the power 642 * extensions by using the Energy Model (if present). The cpus must have 643 * registered their OPPs using the OPP library. 644 * 645 * Return: a valid struct thermal_cooling_device pointer on success, 646 * and NULL on failure. 647 */ 648 struct thermal_cooling_device * 649 of_cpufreq_cooling_register(struct cpufreq_policy *policy) 650 { 651 struct device_node *np = of_get_cpu_node(policy->cpu, NULL); 652 struct thermal_cooling_device *cdev = NULL; 653 654 if (!np) { 655 pr_err("cpufreq_cooling: OF node not available for cpu%d\n", 656 policy->cpu); 657 return NULL; 658 } 659 660 if (of_property_present(np, "#cooling-cells")) { 661 struct em_perf_domain *em = em_cpu_get(policy->cpu); 662 663 cdev = __cpufreq_cooling_register(np, policy, em); 664 if (IS_ERR(cdev)) { 665 pr_err("cpufreq_cooling: cpu%d failed to register as cooling device: %ld\n", 666 policy->cpu, PTR_ERR(cdev)); 667 cdev = NULL; 668 } 669 } 670 671 of_node_put(np); 672 return cdev; 673 } 674 EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); 675 676 /** 677 * cpufreq_cooling_unregister - function to remove cpufreq cooling device. 678 * @cdev: thermal cooling device pointer. 679 * 680 * This interface function unregisters the "cpufreq-%x" cooling device. 681 */ 682 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 683 { 684 struct cpufreq_cooling_device *cpufreq_cdev; 685 686 if (!cdev) 687 return; 688 689 cpufreq_cdev = cdev->devdata; 690 691 thermal_cooling_device_unregister(cdev); 692 freq_qos_remove_request(&cpufreq_cdev->qos_req); 693 free_idle_time(cpufreq_cdev); 694 kfree(cpufreq_cdev); 695 } 696 EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister); 697