1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic OPP Interface 4 * 5 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 6 * Nishanth Menon 7 * Romit Dasgupta 8 * Kevin Hilman 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/clk.h> 14 #include <linux/errno.h> 15 #include <linux/err.h> 16 #include <linux/device.h> 17 #include <linux/export.h> 18 #include <linux/pm_domain.h> 19 #include <linux/regulator/consumer.h> 20 #include <linux/slab.h> 21 #include <linux/xarray.h> 22 23 #include "opp.h" 24 25 /* 26 * The root of the list of all opp-tables. All opp_table structures branch off 27 * from here, with each opp_table containing the list of opps it supports in 28 * various states of availability. 29 */ 30 LIST_HEAD(opp_tables); 31 32 /* Lock to allow exclusive modification to the device and opp lists */ 33 DEFINE_MUTEX(opp_table_lock); 34 /* Flag indicating that opp_tables list is being updated at the moment */ 35 static bool opp_tables_busy; 36 37 /* OPP ID allocator */ 38 static DEFINE_XARRAY_ALLOC1(opp_configs); 39 40 static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) 41 { 42 struct opp_device *opp_dev; 43 bool found = false; 44 45 mutex_lock(&opp_table->lock); 46 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 47 if (opp_dev->dev == dev) { 48 found = true; 49 break; 50 } 51 52 mutex_unlock(&opp_table->lock); 53 return found; 54 } 55 56 static struct opp_table *_find_opp_table_unlocked(struct device *dev) 57 { 58 struct opp_table *opp_table; 59 60 list_for_each_entry(opp_table, &opp_tables, node) { 61 if (_find_opp_dev(dev, opp_table)) { 62 _get_opp_table_kref(opp_table); 63 return opp_table; 64 } 65 } 66 67 return ERR_PTR(-ENODEV); 68 } 69 70 /** 71 * _find_opp_table() - find opp_table struct using device pointer 72 * @dev: device pointer used to lookup OPP table 73 * 74 * Search OPP table for one containing matching device. 75 * 76 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or 77 * -EINVAL based on type of error. 78 * 79 * The callers must call dev_pm_opp_put_opp_table() after the table is used. 80 */ 81 struct opp_table *_find_opp_table(struct device *dev) 82 { 83 struct opp_table *opp_table; 84 85 if (IS_ERR_OR_NULL(dev)) { 86 pr_err("%s: Invalid parameters\n", __func__); 87 return ERR_PTR(-EINVAL); 88 } 89 90 mutex_lock(&opp_table_lock); 91 opp_table = _find_opp_table_unlocked(dev); 92 mutex_unlock(&opp_table_lock); 93 94 return opp_table; 95 } 96 97 /* 98 * Returns true if multiple clocks aren't there, else returns false with WARN. 99 * 100 * We don't force clk_count == 1 here as there are users who don't have a clock 101 * representation in the OPP table and manage the clock configuration themselves 102 * in an platform specific way. 103 */ 104 static bool assert_single_clk(struct opp_table *opp_table, 105 unsigned int __always_unused index) 106 { 107 return !WARN_ON(opp_table->clk_count > 1); 108 } 109 110 /* 111 * Returns true if clock table is large enough to contain the clock index. 112 */ 113 static bool assert_clk_index(struct opp_table *opp_table, 114 unsigned int index) 115 { 116 return opp_table->clk_count > index; 117 } 118 119 /* 120 * Returns true if bandwidth table is large enough to contain the bandwidth index. 121 */ 122 static bool assert_bandwidth_index(struct opp_table *opp_table, 123 unsigned int index) 124 { 125 return opp_table->path_count > index; 126 } 127 128 /** 129 * dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp 130 * @opp: opp for which bandwidth has to be returned for 131 * @peak: select peak or average bandwidth 132 * @index: bandwidth index 133 * 134 * Return: bandwidth in kBps, else return 0 135 */ 136 unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index) 137 { 138 if (IS_ERR_OR_NULL(opp)) { 139 pr_err("%s: Invalid parameters\n", __func__); 140 return 0; 141 } 142 143 if (index >= opp->opp_table->path_count) 144 return 0; 145 146 if (!opp->bandwidth) 147 return 0; 148 149 return peak ? opp->bandwidth[index].peak : opp->bandwidth[index].avg; 150 } 151 EXPORT_SYMBOL_GPL(dev_pm_opp_get_bw); 152 153 /** 154 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp 155 * @opp: opp for which voltage has to be returned for 156 * 157 * Return: voltage in micro volt corresponding to the opp, else 158 * return 0 159 * 160 * This is useful only for devices with single power supply. 161 */ 162 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 163 { 164 if (IS_ERR_OR_NULL(opp)) { 165 pr_err("%s: Invalid parameters\n", __func__); 166 return 0; 167 } 168 169 return opp->supplies[0].u_volt; 170 } 171 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); 172 173 /** 174 * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp 175 * @opp: opp for which voltage has to be returned for 176 * @supplies: Placeholder for copying the supply information. 177 * 178 * Return: negative error number on failure, 0 otherwise on success after 179 * setting @supplies. 180 * 181 * This can be used for devices with any number of power supplies. The caller 182 * must ensure the @supplies array must contain space for each regulator. 183 */ 184 int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, 185 struct dev_pm_opp_supply *supplies) 186 { 187 if (IS_ERR_OR_NULL(opp) || !supplies) { 188 pr_err("%s: Invalid parameters\n", __func__); 189 return -EINVAL; 190 } 191 192 memcpy(supplies, opp->supplies, 193 sizeof(*supplies) * opp->opp_table->regulator_count); 194 return 0; 195 } 196 EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies); 197 198 /** 199 * dev_pm_opp_get_power() - Gets the power corresponding to an opp 200 * @opp: opp for which power has to be returned for 201 * 202 * Return: power in micro watt corresponding to the opp, else 203 * return 0 204 * 205 * This is useful only for devices with single power supply. 206 */ 207 unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) 208 { 209 unsigned long opp_power = 0; 210 int i; 211 212 if (IS_ERR_OR_NULL(opp)) { 213 pr_err("%s: Invalid parameters\n", __func__); 214 return 0; 215 } 216 for (i = 0; i < opp->opp_table->regulator_count; i++) 217 opp_power += opp->supplies[i].u_watt; 218 219 return opp_power; 220 } 221 EXPORT_SYMBOL_GPL(dev_pm_opp_get_power); 222 223 /** 224 * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an 225 * available opp with specified index 226 * @opp: opp for which frequency has to be returned for 227 * @index: index of the frequency within the required opp 228 * 229 * Return: frequency in hertz corresponding to the opp with specified index, 230 * else return 0 231 */ 232 unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index) 233 { 234 if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) { 235 pr_err("%s: Invalid parameters\n", __func__); 236 return 0; 237 } 238 239 return opp->rates[index]; 240 } 241 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed); 242 243 /** 244 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp 245 * @opp: opp for which level value has to be returned for 246 * 247 * Return: level read from device tree corresponding to the opp, else 248 * return U32_MAX. 249 */ 250 unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) 251 { 252 if (IS_ERR_OR_NULL(opp) || !opp->available) { 253 pr_err("%s: Invalid parameters\n", __func__); 254 return 0; 255 } 256 257 return opp->level; 258 } 259 EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); 260 261 /** 262 * dev_pm_opp_get_required_pstate() - Gets the required performance state 263 * corresponding to an available opp 264 * @opp: opp for which performance state has to be returned for 265 * @index: index of the required opp 266 * 267 * Return: performance state read from device tree corresponding to the 268 * required opp, else return U32_MAX. 269 */ 270 unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, 271 unsigned int index) 272 { 273 if (IS_ERR_OR_NULL(opp) || !opp->available || 274 index >= opp->opp_table->required_opp_count) { 275 pr_err("%s: Invalid parameters\n", __func__); 276 return 0; 277 } 278 279 /* required-opps not fully initialized yet */ 280 if (lazy_linking_pending(opp->opp_table)) 281 return 0; 282 283 /* The required OPP table must belong to a genpd */ 284 if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) { 285 pr_err("%s: Performance state is only valid for genpds.\n", __func__); 286 return 0; 287 } 288 289 return opp->required_opps[index]->level; 290 } 291 EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate); 292 293 /** 294 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not 295 * @opp: opp for which turbo mode is being verified 296 * 297 * Turbo OPPs are not for normal use, and can be enabled (under certain 298 * conditions) for short duration of times to finish high throughput work 299 * quickly. Running on them for longer times may overheat the chip. 300 * 301 * Return: true if opp is turbo opp, else false. 302 */ 303 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) 304 { 305 if (IS_ERR_OR_NULL(opp) || !opp->available) { 306 pr_err("%s: Invalid parameters\n", __func__); 307 return false; 308 } 309 310 return opp->turbo; 311 } 312 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); 313 314 /** 315 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds 316 * @dev: device for which we do this operation 317 * 318 * Return: This function returns the max clock latency in nanoseconds. 319 */ 320 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) 321 { 322 struct opp_table *opp_table; 323 unsigned long clock_latency_ns; 324 325 opp_table = _find_opp_table(dev); 326 if (IS_ERR(opp_table)) 327 return 0; 328 329 clock_latency_ns = opp_table->clock_latency_ns_max; 330 331 dev_pm_opp_put_opp_table(opp_table); 332 333 return clock_latency_ns; 334 } 335 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); 336 337 /** 338 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds 339 * @dev: device for which we do this operation 340 * 341 * Return: This function returns the max voltage latency in nanoseconds. 342 */ 343 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) 344 { 345 struct opp_table *opp_table; 346 struct dev_pm_opp *opp; 347 struct regulator *reg; 348 unsigned long latency_ns = 0; 349 int ret, i, count; 350 struct { 351 unsigned long min; 352 unsigned long max; 353 } *uV; 354 355 opp_table = _find_opp_table(dev); 356 if (IS_ERR(opp_table)) 357 return 0; 358 359 /* Regulator may not be required for the device */ 360 if (!opp_table->regulators) 361 goto put_opp_table; 362 363 count = opp_table->regulator_count; 364 365 uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); 366 if (!uV) 367 goto put_opp_table; 368 369 mutex_lock(&opp_table->lock); 370 371 for (i = 0; i < count; i++) { 372 uV[i].min = ~0; 373 uV[i].max = 0; 374 375 list_for_each_entry(opp, &opp_table->opp_list, node) { 376 if (!opp->available) 377 continue; 378 379 if (opp->supplies[i].u_volt_min < uV[i].min) 380 uV[i].min = opp->supplies[i].u_volt_min; 381 if (opp->supplies[i].u_volt_max > uV[i].max) 382 uV[i].max = opp->supplies[i].u_volt_max; 383 } 384 } 385 386 mutex_unlock(&opp_table->lock); 387 388 /* 389 * The caller needs to ensure that opp_table (and hence the regulator) 390 * isn't freed, while we are executing this routine. 391 */ 392 for (i = 0; i < count; i++) { 393 reg = opp_table->regulators[i]; 394 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); 395 if (ret > 0) 396 latency_ns += ret * 1000; 397 } 398 399 kfree(uV); 400 put_opp_table: 401 dev_pm_opp_put_opp_table(opp_table); 402 403 return latency_ns; 404 } 405 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency); 406 407 /** 408 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in 409 * nanoseconds 410 * @dev: device for which we do this operation 411 * 412 * Return: This function returns the max transition latency, in nanoseconds, to 413 * switch from one OPP to other. 414 */ 415 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) 416 { 417 return dev_pm_opp_get_max_volt_latency(dev) + 418 dev_pm_opp_get_max_clock_latency(dev); 419 } 420 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); 421 422 /** 423 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz 424 * @dev: device for which we do this operation 425 * 426 * Return: This function returns the frequency of the OPP marked as suspend_opp 427 * if one is available, else returns 0; 428 */ 429 unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) 430 { 431 struct opp_table *opp_table; 432 unsigned long freq = 0; 433 434 opp_table = _find_opp_table(dev); 435 if (IS_ERR(opp_table)) 436 return 0; 437 438 if (opp_table->suspend_opp && opp_table->suspend_opp->available) 439 freq = dev_pm_opp_get_freq(opp_table->suspend_opp); 440 441 dev_pm_opp_put_opp_table(opp_table); 442 443 return freq; 444 } 445 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); 446 447 int _get_opp_count(struct opp_table *opp_table) 448 { 449 struct dev_pm_opp *opp; 450 int count = 0; 451 452 mutex_lock(&opp_table->lock); 453 454 list_for_each_entry(opp, &opp_table->opp_list, node) { 455 if (opp->available) 456 count++; 457 } 458 459 mutex_unlock(&opp_table->lock); 460 461 return count; 462 } 463 464 /** 465 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table 466 * @dev: device for which we do this operation 467 * 468 * Return: This function returns the number of available opps if there are any, 469 * else returns 0 if none or the corresponding error value. 470 */ 471 int dev_pm_opp_get_opp_count(struct device *dev) 472 { 473 struct opp_table *opp_table; 474 int count; 475 476 opp_table = _find_opp_table(dev); 477 if (IS_ERR(opp_table)) { 478 count = PTR_ERR(opp_table); 479 dev_dbg(dev, "%s: OPP table not found (%d)\n", 480 __func__, count); 481 return count; 482 } 483 484 count = _get_opp_count(opp_table); 485 dev_pm_opp_put_opp_table(opp_table); 486 487 return count; 488 } 489 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); 490 491 /* Helpers to read keys */ 492 static unsigned long _read_freq(struct dev_pm_opp *opp, int index) 493 { 494 return opp->rates[index]; 495 } 496 497 static unsigned long _read_level(struct dev_pm_opp *opp, int index) 498 { 499 return opp->level; 500 } 501 502 static unsigned long _read_bw(struct dev_pm_opp *opp, int index) 503 { 504 return opp->bandwidth[index].peak; 505 } 506 507 /* Generic comparison helpers */ 508 static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 509 unsigned long opp_key, unsigned long key) 510 { 511 if (opp_key == key) { 512 *opp = temp_opp; 513 return true; 514 } 515 516 return false; 517 } 518 519 static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 520 unsigned long opp_key, unsigned long key) 521 { 522 if (opp_key >= key) { 523 *opp = temp_opp; 524 return true; 525 } 526 527 return false; 528 } 529 530 static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 531 unsigned long opp_key, unsigned long key) 532 { 533 if (opp_key > key) 534 return true; 535 536 *opp = temp_opp; 537 return false; 538 } 539 540 /* Generic key finding helpers */ 541 static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, 542 unsigned long *key, int index, bool available, 543 unsigned long (*read)(struct dev_pm_opp *opp, int index), 544 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 545 unsigned long opp_key, unsigned long key), 546 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 547 { 548 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 549 550 /* Assert that the requirement is met */ 551 if (assert && !assert(opp_table, index)) 552 return ERR_PTR(-EINVAL); 553 554 mutex_lock(&opp_table->lock); 555 556 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 557 if (temp_opp->available == available) { 558 if (compare(&opp, temp_opp, read(temp_opp, index), *key)) 559 break; 560 } 561 } 562 563 /* Increment the reference count of OPP */ 564 if (!IS_ERR(opp)) { 565 *key = read(opp, index); 566 dev_pm_opp_get(opp); 567 } 568 569 mutex_unlock(&opp_table->lock); 570 571 return opp; 572 } 573 574 static struct dev_pm_opp * 575 _find_key(struct device *dev, unsigned long *key, int index, bool available, 576 unsigned long (*read)(struct dev_pm_opp *opp, int index), 577 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 578 unsigned long opp_key, unsigned long key), 579 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 580 { 581 struct opp_table *opp_table; 582 struct dev_pm_opp *opp; 583 584 opp_table = _find_opp_table(dev); 585 if (IS_ERR(opp_table)) { 586 dev_err(dev, "%s: OPP table not found (%ld)\n", __func__, 587 PTR_ERR(opp_table)); 588 return ERR_CAST(opp_table); 589 } 590 591 opp = _opp_table_find_key(opp_table, key, index, available, read, 592 compare, assert); 593 594 dev_pm_opp_put_opp_table(opp_table); 595 596 return opp; 597 } 598 599 static struct dev_pm_opp *_find_key_exact(struct device *dev, 600 unsigned long key, int index, bool available, 601 unsigned long (*read)(struct dev_pm_opp *opp, int index), 602 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 603 { 604 /* 605 * The value of key will be updated here, but will be ignored as the 606 * caller doesn't need it. 607 */ 608 return _find_key(dev, &key, index, available, read, _compare_exact, 609 assert); 610 } 611 612 static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table, 613 unsigned long *key, int index, bool available, 614 unsigned long (*read)(struct dev_pm_opp *opp, int index), 615 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 616 { 617 return _opp_table_find_key(opp_table, key, index, available, read, 618 _compare_ceil, assert); 619 } 620 621 static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key, 622 int index, bool available, 623 unsigned long (*read)(struct dev_pm_opp *opp, int index), 624 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 625 { 626 return _find_key(dev, key, index, available, read, _compare_ceil, 627 assert); 628 } 629 630 static struct dev_pm_opp *_find_key_floor(struct device *dev, 631 unsigned long *key, int index, bool available, 632 unsigned long (*read)(struct dev_pm_opp *opp, int index), 633 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 634 { 635 return _find_key(dev, key, index, available, read, _compare_floor, 636 assert); 637 } 638 639 /** 640 * dev_pm_opp_find_freq_exact() - search for an exact frequency 641 * @dev: device for which we do this operation 642 * @freq: frequency to search for 643 * @available: true/false - match for available opp 644 * 645 * Return: Searches for exact match in the opp table and returns pointer to the 646 * matching opp if found, else returns ERR_PTR in case of error and should 647 * be handled using IS_ERR. Error return values can be: 648 * EINVAL: for bad pointer 649 * ERANGE: no match found for search 650 * ENODEV: if device not found in list of registered devices 651 * 652 * Note: available is a modifier for the search. if available=true, then the 653 * match is for exact matching frequency and is available in the stored OPP 654 * table. if false, the match is for exact frequency which is not available. 655 * 656 * This provides a mechanism to enable an opp which is not available currently 657 * or the opposite as well. 658 * 659 * The callers are required to call dev_pm_opp_put() for the returned OPP after 660 * use. 661 */ 662 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 663 unsigned long freq, bool available) 664 { 665 return _find_key_exact(dev, freq, 0, available, _read_freq, 666 assert_single_clk); 667 } 668 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); 669 670 /** 671 * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the 672 * clock corresponding to the index 673 * @dev: Device for which we do this operation 674 * @freq: frequency to search for 675 * @index: Clock index 676 * @available: true/false - match for available opp 677 * 678 * Search for the matching exact OPP for the clock corresponding to the 679 * specified index from a starting freq for a device. 680 * 681 * Return: matching *opp , else returns ERR_PTR in case of error and should be 682 * handled using IS_ERR. Error return values can be: 683 * EINVAL: for bad pointer 684 * ERANGE: no match found for search 685 * ENODEV: if device not found in list of registered devices 686 * 687 * The callers are required to call dev_pm_opp_put() for the returned OPP after 688 * use. 689 */ 690 struct dev_pm_opp * 691 dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, 692 u32 index, bool available) 693 { 694 return _find_key_exact(dev, freq, index, available, _read_freq, 695 assert_clk_index); 696 } 697 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed); 698 699 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, 700 unsigned long *freq) 701 { 702 return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq, 703 assert_single_clk); 704 } 705 706 /** 707 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq 708 * @dev: device for which we do this operation 709 * @freq: Start frequency 710 * 711 * Search for the matching ceil *available* OPP from a starting freq 712 * for a device. 713 * 714 * Return: matching *opp and refreshes *freq accordingly, else returns 715 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 716 * values can be: 717 * EINVAL: for bad pointer 718 * ERANGE: no match found for search 719 * ENODEV: if device not found in list of registered devices 720 * 721 * The callers are required to call dev_pm_opp_put() for the returned OPP after 722 * use. 723 */ 724 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 725 unsigned long *freq) 726 { 727 return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk); 728 } 729 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); 730 731 /** 732 * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the 733 * clock corresponding to the index 734 * @dev: Device for which we do this operation 735 * @freq: Start frequency 736 * @index: Clock index 737 * 738 * Search for the matching ceil *available* OPP for the clock corresponding to 739 * the specified index from a starting freq for a device. 740 * 741 * Return: matching *opp and refreshes *freq accordingly, else returns 742 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 743 * values can be: 744 * EINVAL: for bad pointer 745 * ERANGE: no match found for search 746 * ENODEV: if device not found in list of registered devices 747 * 748 * The callers are required to call dev_pm_opp_put() for the returned OPP after 749 * use. 750 */ 751 struct dev_pm_opp * 752 dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, 753 u32 index) 754 { 755 return _find_key_ceil(dev, freq, index, true, _read_freq, 756 assert_clk_index); 757 } 758 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed); 759 760 /** 761 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq 762 * @dev: device for which we do this operation 763 * @freq: Start frequency 764 * 765 * Search for the matching floor *available* OPP from a starting freq 766 * for a device. 767 * 768 * Return: matching *opp and refreshes *freq accordingly, else returns 769 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 770 * values can be: 771 * EINVAL: for bad pointer 772 * ERANGE: no match found for search 773 * ENODEV: if device not found in list of registered devices 774 * 775 * The callers are required to call dev_pm_opp_put() for the returned OPP after 776 * use. 777 */ 778 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 779 unsigned long *freq) 780 { 781 return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk); 782 } 783 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 784 785 /** 786 * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the 787 * clock corresponding to the index 788 * @dev: Device for which we do this operation 789 * @freq: Start frequency 790 * @index: Clock index 791 * 792 * Search for the matching floor *available* OPP for the clock corresponding to 793 * the specified index from a starting freq for a device. 794 * 795 * Return: matching *opp and refreshes *freq accordingly, else returns 796 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 797 * values can be: 798 * EINVAL: for bad pointer 799 * ERANGE: no match found for search 800 * ENODEV: if device not found in list of registered devices 801 * 802 * The callers are required to call dev_pm_opp_put() for the returned OPP after 803 * use. 804 */ 805 struct dev_pm_opp * 806 dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, 807 u32 index) 808 { 809 return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index); 810 } 811 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed); 812 813 /** 814 * dev_pm_opp_find_level_exact() - search for an exact level 815 * @dev: device for which we do this operation 816 * @level: level to search for 817 * 818 * Return: Searches for exact match in the opp table and returns pointer to the 819 * matching opp if found, else returns ERR_PTR in case of error and should 820 * be handled using IS_ERR. Error return values can be: 821 * EINVAL: for bad pointer 822 * ERANGE: no match found for search 823 * ENODEV: if device not found in list of registered devices 824 * 825 * The callers are required to call dev_pm_opp_put() for the returned OPP after 826 * use. 827 */ 828 struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, 829 unsigned int level) 830 { 831 return _find_key_exact(dev, level, 0, true, _read_level, NULL); 832 } 833 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); 834 835 /** 836 * dev_pm_opp_find_level_ceil() - search for an rounded up level 837 * @dev: device for which we do this operation 838 * @level: level to search for 839 * 840 * Return: Searches for rounded up match in the opp table and returns pointer 841 * to the matching opp if found, else returns ERR_PTR in case of error and 842 * should be handled using IS_ERR. Error return values can be: 843 * EINVAL: for bad pointer 844 * ERANGE: no match found for search 845 * ENODEV: if device not found in list of registered devices 846 * 847 * The callers are required to call dev_pm_opp_put() for the returned OPP after 848 * use. 849 */ 850 struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, 851 unsigned int *level) 852 { 853 unsigned long temp = *level; 854 struct dev_pm_opp *opp; 855 856 opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL); 857 if (IS_ERR(opp)) 858 return opp; 859 860 /* False match */ 861 if (temp == OPP_LEVEL_UNSET) { 862 dev_err(dev, "%s: OPP levels aren't available\n", __func__); 863 dev_pm_opp_put(opp); 864 return ERR_PTR(-ENODEV); 865 } 866 867 *level = temp; 868 return opp; 869 } 870 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); 871 872 /** 873 * dev_pm_opp_find_level_floor() - Search for a rounded floor level 874 * @dev: device for which we do this operation 875 * @level: Start level 876 * 877 * Search for the matching floor *available* OPP from a starting level 878 * for a device. 879 * 880 * Return: matching *opp and refreshes *level accordingly, else returns 881 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 882 * values can be: 883 * EINVAL: for bad pointer 884 * ERANGE: no match found for search 885 * ENODEV: if device not found in list of registered devices 886 * 887 * The callers are required to call dev_pm_opp_put() for the returned OPP after 888 * use. 889 */ 890 struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, 891 unsigned int *level) 892 { 893 unsigned long temp = *level; 894 struct dev_pm_opp *opp; 895 896 opp = _find_key_floor(dev, &temp, 0, true, _read_level, NULL); 897 *level = temp; 898 return opp; 899 } 900 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor); 901 902 /** 903 * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth 904 * @dev: device for which we do this operation 905 * @bw: start bandwidth 906 * @index: which bandwidth to compare, in case of OPPs with several values 907 * 908 * Search for the matching floor *available* OPP from a starting bandwidth 909 * for a device. 910 * 911 * Return: matching *opp and refreshes *bw accordingly, else returns 912 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 913 * values can be: 914 * EINVAL: for bad pointer 915 * ERANGE: no match found for search 916 * ENODEV: if device not found in list of registered devices 917 * 918 * The callers are required to call dev_pm_opp_put() for the returned OPP after 919 * use. 920 */ 921 struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, 922 int index) 923 { 924 unsigned long temp = *bw; 925 struct dev_pm_opp *opp; 926 927 opp = _find_key_ceil(dev, &temp, index, true, _read_bw, 928 assert_bandwidth_index); 929 *bw = temp; 930 return opp; 931 } 932 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil); 933 934 /** 935 * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth 936 * @dev: device for which we do this operation 937 * @bw: start bandwidth 938 * @index: which bandwidth to compare, in case of OPPs with several values 939 * 940 * Search for the matching floor *available* OPP from a starting bandwidth 941 * for a device. 942 * 943 * Return: matching *opp and refreshes *bw accordingly, else returns 944 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 945 * values can be: 946 * EINVAL: for bad pointer 947 * ERANGE: no match found for search 948 * ENODEV: if device not found in list of registered devices 949 * 950 * The callers are required to call dev_pm_opp_put() for the returned OPP after 951 * use. 952 */ 953 struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, 954 unsigned int *bw, int index) 955 { 956 unsigned long temp = *bw; 957 struct dev_pm_opp *opp; 958 959 opp = _find_key_floor(dev, &temp, index, true, _read_bw, 960 assert_bandwidth_index); 961 *bw = temp; 962 return opp; 963 } 964 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor); 965 966 static int _set_opp_voltage(struct device *dev, struct regulator *reg, 967 struct dev_pm_opp_supply *supply) 968 { 969 int ret; 970 971 /* Regulator not available for device */ 972 if (IS_ERR(reg)) { 973 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__, 974 PTR_ERR(reg)); 975 return 0; 976 } 977 978 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, 979 supply->u_volt_min, supply->u_volt, supply->u_volt_max); 980 981 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min, 982 supply->u_volt, supply->u_volt_max); 983 if (ret) 984 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n", 985 __func__, supply->u_volt_min, supply->u_volt, 986 supply->u_volt_max, ret); 987 988 return ret; 989 } 990 991 static int 992 _opp_config_clk_single(struct device *dev, struct opp_table *opp_table, 993 struct dev_pm_opp *opp, void *data, bool scaling_down) 994 { 995 unsigned long *target = data; 996 unsigned long freq; 997 int ret; 998 999 /* One of target and opp must be available */ 1000 if (target) { 1001 freq = *target; 1002 } else if (opp) { 1003 freq = opp->rates[0]; 1004 } else { 1005 WARN_ON(1); 1006 return -EINVAL; 1007 } 1008 1009 ret = clk_set_rate(opp_table->clk, freq); 1010 if (ret) { 1011 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 1012 ret); 1013 } else { 1014 opp_table->current_rate_single_clk = freq; 1015 } 1016 1017 return ret; 1018 } 1019 1020 /* 1021 * Simple implementation for configuring multiple clocks. Configure clocks in 1022 * the order in which they are present in the array while scaling up. 1023 */ 1024 int dev_pm_opp_config_clks_simple(struct device *dev, 1025 struct opp_table *opp_table, struct dev_pm_opp *opp, void *data, 1026 bool scaling_down) 1027 { 1028 int ret, i; 1029 1030 if (scaling_down) { 1031 for (i = opp_table->clk_count - 1; i >= 0; i--) { 1032 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]); 1033 if (ret) { 1034 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 1035 ret); 1036 return ret; 1037 } 1038 } 1039 } else { 1040 for (i = 0; i < opp_table->clk_count; i++) { 1041 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]); 1042 if (ret) { 1043 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 1044 ret); 1045 return ret; 1046 } 1047 } 1048 } 1049 1050 return 0; 1051 } 1052 EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple); 1053 1054 static int _opp_config_regulator_single(struct device *dev, 1055 struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp, 1056 struct regulator **regulators, unsigned int count) 1057 { 1058 struct regulator *reg = regulators[0]; 1059 int ret; 1060 1061 /* This function only supports single regulator per device */ 1062 if (WARN_ON(count > 1)) { 1063 dev_err(dev, "multiple regulators are not supported\n"); 1064 return -EINVAL; 1065 } 1066 1067 ret = _set_opp_voltage(dev, reg, new_opp->supplies); 1068 if (ret) 1069 return ret; 1070 1071 /* 1072 * Enable the regulator after setting its voltages, otherwise it breaks 1073 * some boot-enabled regulators. 1074 */ 1075 if (unlikely(!new_opp->opp_table->enabled)) { 1076 ret = regulator_enable(reg); 1077 if (ret < 0) 1078 dev_warn(dev, "Failed to enable regulator: %d", ret); 1079 } 1080 1081 return 0; 1082 } 1083 1084 static int _set_opp_bw(const struct opp_table *opp_table, 1085 struct dev_pm_opp *opp, struct device *dev) 1086 { 1087 u32 avg, peak; 1088 int i, ret; 1089 1090 if (!opp_table->paths) 1091 return 0; 1092 1093 for (i = 0; i < opp_table->path_count; i++) { 1094 if (!opp) { 1095 avg = 0; 1096 peak = 0; 1097 } else { 1098 avg = opp->bandwidth[i].avg; 1099 peak = opp->bandwidth[i].peak; 1100 } 1101 ret = icc_set_bw(opp_table->paths[i], avg, peak); 1102 if (ret) { 1103 dev_err(dev, "Failed to %s bandwidth[%d]: %d\n", 1104 opp ? "set" : "remove", i, ret); 1105 return ret; 1106 } 1107 } 1108 1109 return 0; 1110 } 1111 1112 static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp) 1113 { 1114 unsigned int level = 0; 1115 int ret = 0; 1116 1117 if (opp) { 1118 if (opp->level == OPP_LEVEL_UNSET) 1119 return 0; 1120 1121 level = opp->level; 1122 } 1123 1124 /* Request a new performance state through the device's PM domain. */ 1125 ret = dev_pm_domain_set_performance_state(dev, level); 1126 if (ret) 1127 dev_err(dev, "Failed to set performance state %u (%d)\n", level, 1128 ret); 1129 1130 return ret; 1131 } 1132 1133 /* This is only called for PM domain for now */ 1134 static int _set_required_opps(struct device *dev, struct opp_table *opp_table, 1135 struct dev_pm_opp *opp, bool up) 1136 { 1137 struct device **devs = opp_table->required_devs; 1138 struct dev_pm_opp *required_opp; 1139 int index, target, delta, ret; 1140 1141 if (!devs) 1142 return 0; 1143 1144 /* required-opps not fully initialized yet */ 1145 if (lazy_linking_pending(opp_table)) 1146 return -EBUSY; 1147 1148 /* Scaling up? Set required OPPs in normal order, else reverse */ 1149 if (up) { 1150 index = 0; 1151 target = opp_table->required_opp_count; 1152 delta = 1; 1153 } else { 1154 index = opp_table->required_opp_count - 1; 1155 target = -1; 1156 delta = -1; 1157 } 1158 1159 while (index != target) { 1160 if (devs[index]) { 1161 required_opp = opp ? opp->required_opps[index] : NULL; 1162 1163 ret = _set_opp_level(devs[index], required_opp); 1164 if (ret) 1165 return ret; 1166 } 1167 1168 index += delta; 1169 } 1170 1171 return 0; 1172 } 1173 1174 static void _find_current_opp(struct device *dev, struct opp_table *opp_table) 1175 { 1176 struct dev_pm_opp *opp = ERR_PTR(-ENODEV); 1177 unsigned long freq; 1178 1179 if (!IS_ERR(opp_table->clk)) { 1180 freq = clk_get_rate(opp_table->clk); 1181 opp = _find_freq_ceil(opp_table, &freq); 1182 } 1183 1184 /* 1185 * Unable to find the current OPP ? Pick the first from the list since 1186 * it is in ascending order, otherwise rest of the code will need to 1187 * make special checks to validate current_opp. 1188 */ 1189 if (IS_ERR(opp)) { 1190 mutex_lock(&opp_table->lock); 1191 opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node); 1192 dev_pm_opp_get(opp); 1193 mutex_unlock(&opp_table->lock); 1194 } 1195 1196 opp_table->current_opp = opp; 1197 } 1198 1199 static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) 1200 { 1201 int ret; 1202 1203 if (!opp_table->enabled) 1204 return 0; 1205 1206 /* 1207 * Some drivers need to support cases where some platforms may 1208 * have OPP table for the device, while others don't and 1209 * opp_set_rate() just needs to behave like clk_set_rate(). 1210 */ 1211 if (!_get_opp_count(opp_table)) 1212 return 0; 1213 1214 ret = _set_opp_bw(opp_table, NULL, dev); 1215 if (ret) 1216 return ret; 1217 1218 if (opp_table->regulators) 1219 regulator_disable(opp_table->regulators[0]); 1220 1221 ret = _set_opp_level(dev, NULL); 1222 if (ret) 1223 goto out; 1224 1225 ret = _set_required_opps(dev, opp_table, NULL, false); 1226 1227 out: 1228 opp_table->enabled = false; 1229 return ret; 1230 } 1231 1232 static int _set_opp(struct device *dev, struct opp_table *opp_table, 1233 struct dev_pm_opp *opp, void *clk_data, bool forced) 1234 { 1235 struct dev_pm_opp *old_opp; 1236 int scaling_down, ret; 1237 1238 if (unlikely(!opp)) 1239 return _disable_opp_table(dev, opp_table); 1240 1241 /* Find the currently set OPP if we don't know already */ 1242 if (unlikely(!opp_table->current_opp)) 1243 _find_current_opp(dev, opp_table); 1244 1245 old_opp = opp_table->current_opp; 1246 1247 /* Return early if nothing to do */ 1248 if (!forced && old_opp == opp && opp_table->enabled) { 1249 dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__); 1250 return 0; 1251 } 1252 1253 dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n", 1254 __func__, old_opp->rates[0], opp->rates[0], old_opp->level, 1255 opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0, 1256 opp->bandwidth ? opp->bandwidth[0].peak : 0); 1257 1258 scaling_down = _opp_compare_key(opp_table, old_opp, opp); 1259 if (scaling_down == -1) 1260 scaling_down = 0; 1261 1262 /* Scaling up? Configure required OPPs before frequency */ 1263 if (!scaling_down) { 1264 ret = _set_required_opps(dev, opp_table, opp, true); 1265 if (ret) { 1266 dev_err(dev, "Failed to set required opps: %d\n", ret); 1267 return ret; 1268 } 1269 1270 ret = _set_opp_level(dev, opp); 1271 if (ret) 1272 return ret; 1273 1274 ret = _set_opp_bw(opp_table, opp, dev); 1275 if (ret) { 1276 dev_err(dev, "Failed to set bw: %d\n", ret); 1277 return ret; 1278 } 1279 1280 if (opp_table->config_regulators) { 1281 ret = opp_table->config_regulators(dev, old_opp, opp, 1282 opp_table->regulators, 1283 opp_table->regulator_count); 1284 if (ret) { 1285 dev_err(dev, "Failed to set regulator voltages: %d\n", 1286 ret); 1287 return ret; 1288 } 1289 } 1290 } 1291 1292 if (opp_table->config_clks) { 1293 ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down); 1294 if (ret) 1295 return ret; 1296 } 1297 1298 /* Scaling down? Configure required OPPs after frequency */ 1299 if (scaling_down) { 1300 if (opp_table->config_regulators) { 1301 ret = opp_table->config_regulators(dev, old_opp, opp, 1302 opp_table->regulators, 1303 opp_table->regulator_count); 1304 if (ret) { 1305 dev_err(dev, "Failed to set regulator voltages: %d\n", 1306 ret); 1307 return ret; 1308 } 1309 } 1310 1311 ret = _set_opp_bw(opp_table, opp, dev); 1312 if (ret) { 1313 dev_err(dev, "Failed to set bw: %d\n", ret); 1314 return ret; 1315 } 1316 1317 ret = _set_opp_level(dev, opp); 1318 if (ret) 1319 return ret; 1320 1321 ret = _set_required_opps(dev, opp_table, opp, false); 1322 if (ret) { 1323 dev_err(dev, "Failed to set required opps: %d\n", ret); 1324 return ret; 1325 } 1326 } 1327 1328 opp_table->enabled = true; 1329 dev_pm_opp_put(old_opp); 1330 1331 /* Make sure current_opp doesn't get freed */ 1332 dev_pm_opp_get(opp); 1333 opp_table->current_opp = opp; 1334 1335 return ret; 1336 } 1337 1338 /** 1339 * dev_pm_opp_set_rate() - Configure new OPP based on frequency 1340 * @dev: device for which we do this operation 1341 * @target_freq: frequency to achieve 1342 * 1343 * This configures the power-supplies to the levels specified by the OPP 1344 * corresponding to the target_freq, and programs the clock to a value <= 1345 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax 1346 * provided by the opp, should have already rounded to the target OPP's 1347 * frequency. 1348 */ 1349 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) 1350 { 1351 struct opp_table *opp_table; 1352 unsigned long freq = 0, temp_freq; 1353 struct dev_pm_opp *opp = NULL; 1354 bool forced = false; 1355 int ret; 1356 1357 opp_table = _find_opp_table(dev); 1358 if (IS_ERR(opp_table)) { 1359 dev_err(dev, "%s: device's opp table doesn't exist\n", __func__); 1360 return PTR_ERR(opp_table); 1361 } 1362 1363 if (target_freq) { 1364 /* 1365 * For IO devices which require an OPP on some platforms/SoCs 1366 * while just needing to scale the clock on some others 1367 * we look for empty OPP tables with just a clock handle and 1368 * scale only the clk. This makes dev_pm_opp_set_rate() 1369 * equivalent to a clk_set_rate() 1370 */ 1371 if (!_get_opp_count(opp_table)) { 1372 ret = opp_table->config_clks(dev, opp_table, NULL, 1373 &target_freq, false); 1374 goto put_opp_table; 1375 } 1376 1377 freq = clk_round_rate(opp_table->clk, target_freq); 1378 if ((long)freq <= 0) 1379 freq = target_freq; 1380 1381 /* 1382 * The clock driver may support finer resolution of the 1383 * frequencies than the OPP table, don't update the frequency we 1384 * pass to clk_set_rate() here. 1385 */ 1386 temp_freq = freq; 1387 opp = _find_freq_ceil(opp_table, &temp_freq); 1388 if (IS_ERR(opp)) { 1389 ret = PTR_ERR(opp); 1390 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", 1391 __func__, freq, ret); 1392 goto put_opp_table; 1393 } 1394 1395 /* 1396 * An OPP entry specifies the highest frequency at which other 1397 * properties of the OPP entry apply. Even if the new OPP is 1398 * same as the old one, we may still reach here for a different 1399 * value of the frequency. In such a case, do not abort but 1400 * configure the hardware to the desired frequency forcefully. 1401 */ 1402 forced = opp_table->current_rate_single_clk != freq; 1403 } 1404 1405 ret = _set_opp(dev, opp_table, opp, &freq, forced); 1406 1407 if (freq) 1408 dev_pm_opp_put(opp); 1409 1410 put_opp_table: 1411 dev_pm_opp_put_opp_table(opp_table); 1412 return ret; 1413 } 1414 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); 1415 1416 /** 1417 * dev_pm_opp_set_opp() - Configure device for OPP 1418 * @dev: device for which we do this operation 1419 * @opp: OPP to set to 1420 * 1421 * This configures the device based on the properties of the OPP passed to this 1422 * routine. 1423 * 1424 * Return: 0 on success, a negative error number otherwise. 1425 */ 1426 int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) 1427 { 1428 struct opp_table *opp_table; 1429 int ret; 1430 1431 opp_table = _find_opp_table(dev); 1432 if (IS_ERR(opp_table)) { 1433 dev_err(dev, "%s: device opp doesn't exist\n", __func__); 1434 return PTR_ERR(opp_table); 1435 } 1436 1437 ret = _set_opp(dev, opp_table, opp, NULL, false); 1438 dev_pm_opp_put_opp_table(opp_table); 1439 1440 return ret; 1441 } 1442 EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); 1443 1444 /* OPP-dev Helpers */ 1445 static void _remove_opp_dev(struct opp_device *opp_dev, 1446 struct opp_table *opp_table) 1447 { 1448 opp_debug_unregister(opp_dev, opp_table); 1449 list_del(&opp_dev->node); 1450 kfree(opp_dev); 1451 } 1452 1453 struct opp_device *_add_opp_dev(const struct device *dev, 1454 struct opp_table *opp_table) 1455 { 1456 struct opp_device *opp_dev; 1457 1458 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); 1459 if (!opp_dev) 1460 return NULL; 1461 1462 /* Initialize opp-dev */ 1463 opp_dev->dev = dev; 1464 1465 mutex_lock(&opp_table->lock); 1466 list_add(&opp_dev->node, &opp_table->dev_list); 1467 mutex_unlock(&opp_table->lock); 1468 1469 /* Create debugfs entries for the opp_table */ 1470 opp_debug_register(opp_dev, opp_table); 1471 1472 return opp_dev; 1473 } 1474 1475 static struct opp_table *_allocate_opp_table(struct device *dev, int index) 1476 { 1477 struct opp_table *opp_table; 1478 struct opp_device *opp_dev; 1479 int ret; 1480 1481 /* 1482 * Allocate a new OPP table. In the infrequent case where a new 1483 * device is needed to be added, we pay this penalty. 1484 */ 1485 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL); 1486 if (!opp_table) 1487 return ERR_PTR(-ENOMEM); 1488 1489 mutex_init(&opp_table->lock); 1490 INIT_LIST_HEAD(&opp_table->dev_list); 1491 INIT_LIST_HEAD(&opp_table->lazy); 1492 1493 opp_table->clk = ERR_PTR(-ENODEV); 1494 1495 /* Mark regulator count uninitialized */ 1496 opp_table->regulator_count = -1; 1497 1498 opp_dev = _add_opp_dev(dev, opp_table); 1499 if (!opp_dev) { 1500 ret = -ENOMEM; 1501 goto err; 1502 } 1503 1504 _of_init_opp_table(opp_table, dev, index); 1505 1506 /* Find interconnect path(s) for the device */ 1507 ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); 1508 if (ret) { 1509 if (ret == -EPROBE_DEFER) 1510 goto remove_opp_dev; 1511 1512 dev_warn(dev, "%s: Error finding interconnect paths: %d\n", 1513 __func__, ret); 1514 } 1515 1516 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); 1517 INIT_LIST_HEAD(&opp_table->opp_list); 1518 kref_init(&opp_table->kref); 1519 1520 return opp_table; 1521 1522 remove_opp_dev: 1523 _of_clear_opp_table(opp_table); 1524 _remove_opp_dev(opp_dev, opp_table); 1525 mutex_destroy(&opp_table->lock); 1526 err: 1527 kfree(opp_table); 1528 return ERR_PTR(ret); 1529 } 1530 1531 void _get_opp_table_kref(struct opp_table *opp_table) 1532 { 1533 kref_get(&opp_table->kref); 1534 } 1535 1536 static struct opp_table *_update_opp_table_clk(struct device *dev, 1537 struct opp_table *opp_table, 1538 bool getclk) 1539 { 1540 int ret; 1541 1542 /* 1543 * Return early if we don't need to get clk or we have already done it 1544 * earlier. 1545 */ 1546 if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) || 1547 opp_table->clks) 1548 return opp_table; 1549 1550 /* Find clk for the device */ 1551 opp_table->clk = clk_get(dev, NULL); 1552 1553 ret = PTR_ERR_OR_ZERO(opp_table->clk); 1554 if (!ret) { 1555 opp_table->config_clks = _opp_config_clk_single; 1556 opp_table->clk_count = 1; 1557 return opp_table; 1558 } 1559 1560 if (ret == -ENOENT) { 1561 /* 1562 * There are few platforms which don't want the OPP core to 1563 * manage device's clock settings. In such cases neither the 1564 * platform provides the clks explicitly to us, nor the DT 1565 * contains a valid clk entry. The OPP nodes in DT may still 1566 * contain "opp-hz" property though, which we need to parse and 1567 * allow the platform to find an OPP based on freq later on. 1568 * 1569 * This is a simple solution to take care of such corner cases, 1570 * i.e. make the clk_count 1, which lets us allocate space for 1571 * frequency in opp->rates and also parse the entries in DT. 1572 */ 1573 opp_table->clk_count = 1; 1574 1575 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret); 1576 return opp_table; 1577 } 1578 1579 dev_pm_opp_put_opp_table(opp_table); 1580 dev_err_probe(dev, ret, "Couldn't find clock\n"); 1581 1582 return ERR_PTR(ret); 1583 } 1584 1585 /* 1586 * We need to make sure that the OPP table for a device doesn't get added twice, 1587 * if this routine gets called in parallel with the same device pointer. 1588 * 1589 * The simplest way to enforce that is to perform everything (find existing 1590 * table and if not found, create a new one) under the opp_table_lock, so only 1591 * one creator gets access to the same. But that expands the critical section 1592 * under the lock and may end up causing circular dependencies with frameworks 1593 * like debugfs, interconnect or clock framework as they may be direct or 1594 * indirect users of OPP core. 1595 * 1596 * And for that reason we have to go for a bit tricky implementation here, which 1597 * uses the opp_tables_busy flag to indicate if another creator is in the middle 1598 * of adding an OPP table and others should wait for it to finish. 1599 */ 1600 struct opp_table *_add_opp_table_indexed(struct device *dev, int index, 1601 bool getclk) 1602 { 1603 struct opp_table *opp_table; 1604 1605 again: 1606 mutex_lock(&opp_table_lock); 1607 1608 opp_table = _find_opp_table_unlocked(dev); 1609 if (!IS_ERR(opp_table)) 1610 goto unlock; 1611 1612 /* 1613 * The opp_tables list or an OPP table's dev_list is getting updated by 1614 * another user, wait for it to finish. 1615 */ 1616 if (unlikely(opp_tables_busy)) { 1617 mutex_unlock(&opp_table_lock); 1618 cpu_relax(); 1619 goto again; 1620 } 1621 1622 opp_tables_busy = true; 1623 opp_table = _managed_opp(dev, index); 1624 1625 /* Drop the lock to reduce the size of critical section */ 1626 mutex_unlock(&opp_table_lock); 1627 1628 if (opp_table) { 1629 if (!_add_opp_dev(dev, opp_table)) { 1630 dev_pm_opp_put_opp_table(opp_table); 1631 opp_table = ERR_PTR(-ENOMEM); 1632 } 1633 1634 mutex_lock(&opp_table_lock); 1635 } else { 1636 opp_table = _allocate_opp_table(dev, index); 1637 1638 mutex_lock(&opp_table_lock); 1639 if (!IS_ERR(opp_table)) 1640 list_add(&opp_table->node, &opp_tables); 1641 } 1642 1643 opp_tables_busy = false; 1644 1645 unlock: 1646 mutex_unlock(&opp_table_lock); 1647 1648 return _update_opp_table_clk(dev, opp_table, getclk); 1649 } 1650 1651 static struct opp_table *_add_opp_table(struct device *dev, bool getclk) 1652 { 1653 return _add_opp_table_indexed(dev, 0, getclk); 1654 } 1655 1656 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) 1657 { 1658 return _find_opp_table(dev); 1659 } 1660 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); 1661 1662 static void _opp_table_kref_release(struct kref *kref) 1663 { 1664 struct opp_table *opp_table = container_of(kref, struct opp_table, kref); 1665 struct opp_device *opp_dev, *temp; 1666 int i; 1667 1668 /* Drop the lock as soon as we can */ 1669 list_del(&opp_table->node); 1670 mutex_unlock(&opp_table_lock); 1671 1672 if (opp_table->current_opp) 1673 dev_pm_opp_put(opp_table->current_opp); 1674 1675 _of_clear_opp_table(opp_table); 1676 1677 /* Release automatically acquired single clk */ 1678 if (!IS_ERR(opp_table->clk)) 1679 clk_put(opp_table->clk); 1680 1681 if (opp_table->paths) { 1682 for (i = 0; i < opp_table->path_count; i++) 1683 icc_put(opp_table->paths[i]); 1684 kfree(opp_table->paths); 1685 } 1686 1687 WARN_ON(!list_empty(&opp_table->opp_list)); 1688 1689 list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) 1690 _remove_opp_dev(opp_dev, opp_table); 1691 1692 mutex_destroy(&opp_table->lock); 1693 kfree(opp_table); 1694 } 1695 1696 void dev_pm_opp_put_opp_table(struct opp_table *opp_table) 1697 { 1698 kref_put_mutex(&opp_table->kref, _opp_table_kref_release, 1699 &opp_table_lock); 1700 } 1701 EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table); 1702 1703 void _opp_free(struct dev_pm_opp *opp) 1704 { 1705 kfree(opp); 1706 } 1707 1708 static void _opp_kref_release(struct kref *kref) 1709 { 1710 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); 1711 struct opp_table *opp_table = opp->opp_table; 1712 1713 list_del(&opp->node); 1714 mutex_unlock(&opp_table->lock); 1715 1716 /* 1717 * Notify the changes in the availability of the operable 1718 * frequency/voltage list. 1719 */ 1720 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); 1721 _of_clear_opp(opp_table, opp); 1722 opp_debug_remove_one(opp); 1723 kfree(opp); 1724 } 1725 1726 void dev_pm_opp_get(struct dev_pm_opp *opp) 1727 { 1728 kref_get(&opp->kref); 1729 } 1730 1731 void dev_pm_opp_put(struct dev_pm_opp *opp) 1732 { 1733 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); 1734 } 1735 EXPORT_SYMBOL_GPL(dev_pm_opp_put); 1736 1737 /** 1738 * dev_pm_opp_remove() - Remove an OPP from OPP table 1739 * @dev: device for which we do this operation 1740 * @freq: OPP to remove with matching 'freq' 1741 * 1742 * This function removes an opp from the opp table. 1743 */ 1744 void dev_pm_opp_remove(struct device *dev, unsigned long freq) 1745 { 1746 struct dev_pm_opp *opp = NULL, *iter; 1747 struct opp_table *opp_table; 1748 1749 opp_table = _find_opp_table(dev); 1750 if (IS_ERR(opp_table)) 1751 return; 1752 1753 if (!assert_single_clk(opp_table, 0)) 1754 goto put_table; 1755 1756 mutex_lock(&opp_table->lock); 1757 1758 list_for_each_entry(iter, &opp_table->opp_list, node) { 1759 if (iter->rates[0] == freq) { 1760 opp = iter; 1761 break; 1762 } 1763 } 1764 1765 mutex_unlock(&opp_table->lock); 1766 1767 if (opp) { 1768 dev_pm_opp_put(opp); 1769 1770 /* Drop the reference taken by dev_pm_opp_add() */ 1771 dev_pm_opp_put_opp_table(opp_table); 1772 } else { 1773 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", 1774 __func__, freq); 1775 } 1776 1777 put_table: 1778 /* Drop the reference taken by _find_opp_table() */ 1779 dev_pm_opp_put_opp_table(opp_table); 1780 } 1781 EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1782 1783 static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, 1784 bool dynamic) 1785 { 1786 struct dev_pm_opp *opp = NULL, *temp; 1787 1788 mutex_lock(&opp_table->lock); 1789 list_for_each_entry(temp, &opp_table->opp_list, node) { 1790 /* 1791 * Refcount must be dropped only once for each OPP by OPP core, 1792 * do that with help of "removed" flag. 1793 */ 1794 if (!temp->removed && dynamic == temp->dynamic) { 1795 opp = temp; 1796 break; 1797 } 1798 } 1799 1800 mutex_unlock(&opp_table->lock); 1801 return opp; 1802 } 1803 1804 /* 1805 * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to 1806 * happen lock less to avoid circular dependency issues. This routine must be 1807 * called without the opp_table->lock held. 1808 */ 1809 static void _opp_remove_all(struct opp_table *opp_table, bool dynamic) 1810 { 1811 struct dev_pm_opp *opp; 1812 1813 while ((opp = _opp_get_next(opp_table, dynamic))) { 1814 opp->removed = true; 1815 dev_pm_opp_put(opp); 1816 1817 /* Drop the references taken by dev_pm_opp_add() */ 1818 if (dynamic) 1819 dev_pm_opp_put_opp_table(opp_table); 1820 } 1821 } 1822 1823 bool _opp_remove_all_static(struct opp_table *opp_table) 1824 { 1825 mutex_lock(&opp_table->lock); 1826 1827 if (!opp_table->parsed_static_opps) { 1828 mutex_unlock(&opp_table->lock); 1829 return false; 1830 } 1831 1832 if (--opp_table->parsed_static_opps) { 1833 mutex_unlock(&opp_table->lock); 1834 return true; 1835 } 1836 1837 mutex_unlock(&opp_table->lock); 1838 1839 _opp_remove_all(opp_table, false); 1840 return true; 1841 } 1842 1843 /** 1844 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs 1845 * @dev: device for which we do this operation 1846 * 1847 * This function removes all dynamically created OPPs from the opp table. 1848 */ 1849 void dev_pm_opp_remove_all_dynamic(struct device *dev) 1850 { 1851 struct opp_table *opp_table; 1852 1853 opp_table = _find_opp_table(dev); 1854 if (IS_ERR(opp_table)) 1855 return; 1856 1857 _opp_remove_all(opp_table, true); 1858 1859 /* Drop the reference taken by _find_opp_table() */ 1860 dev_pm_opp_put_opp_table(opp_table); 1861 } 1862 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); 1863 1864 struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table) 1865 { 1866 struct dev_pm_opp *opp; 1867 int supply_count, supply_size, icc_size, clk_size; 1868 1869 /* Allocate space for at least one supply */ 1870 supply_count = opp_table->regulator_count > 0 ? 1871 opp_table->regulator_count : 1; 1872 supply_size = sizeof(*opp->supplies) * supply_count; 1873 clk_size = sizeof(*opp->rates) * opp_table->clk_count; 1874 icc_size = sizeof(*opp->bandwidth) * opp_table->path_count; 1875 1876 /* allocate new OPP node and supplies structures */ 1877 opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL); 1878 if (!opp) 1879 return NULL; 1880 1881 /* Put the supplies, bw and clock at the end of the OPP structure */ 1882 opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); 1883 1884 opp->rates = (unsigned long *)(opp->supplies + supply_count); 1885 1886 if (icc_size) 1887 opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count); 1888 1889 INIT_LIST_HEAD(&opp->node); 1890 1891 opp->level = OPP_LEVEL_UNSET; 1892 1893 return opp; 1894 } 1895 1896 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, 1897 struct opp_table *opp_table) 1898 { 1899 struct regulator *reg; 1900 int i; 1901 1902 if (!opp_table->regulators) 1903 return true; 1904 1905 for (i = 0; i < opp_table->regulator_count; i++) { 1906 reg = opp_table->regulators[i]; 1907 1908 if (!regulator_is_supported_voltage(reg, 1909 opp->supplies[i].u_volt_min, 1910 opp->supplies[i].u_volt_max)) { 1911 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", 1912 __func__, opp->supplies[i].u_volt_min, 1913 opp->supplies[i].u_volt_max); 1914 return false; 1915 } 1916 } 1917 1918 return true; 1919 } 1920 1921 static int _opp_compare_rate(struct opp_table *opp_table, 1922 struct dev_pm_opp *opp1, struct dev_pm_opp *opp2) 1923 { 1924 int i; 1925 1926 for (i = 0; i < opp_table->clk_count; i++) { 1927 if (opp1->rates[i] != opp2->rates[i]) 1928 return opp1->rates[i] < opp2->rates[i] ? -1 : 1; 1929 } 1930 1931 /* Same rates for both OPPs */ 1932 return 0; 1933 } 1934 1935 static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1, 1936 struct dev_pm_opp *opp2) 1937 { 1938 int i; 1939 1940 for (i = 0; i < opp_table->path_count; i++) { 1941 if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak) 1942 return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1; 1943 } 1944 1945 /* Same bw for both OPPs */ 1946 return 0; 1947 } 1948 1949 /* 1950 * Returns 1951 * 0: opp1 == opp2 1952 * 1: opp1 > opp2 1953 * -1: opp1 < opp2 1954 */ 1955 int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, 1956 struct dev_pm_opp *opp2) 1957 { 1958 int ret; 1959 1960 ret = _opp_compare_rate(opp_table, opp1, opp2); 1961 if (ret) 1962 return ret; 1963 1964 ret = _opp_compare_bw(opp_table, opp1, opp2); 1965 if (ret) 1966 return ret; 1967 1968 if (opp1->level != opp2->level) 1969 return opp1->level < opp2->level ? -1 : 1; 1970 1971 /* Duplicate OPPs */ 1972 return 0; 1973 } 1974 1975 static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp, 1976 struct opp_table *opp_table, 1977 struct list_head **head) 1978 { 1979 struct dev_pm_opp *opp; 1980 int opp_cmp; 1981 1982 /* 1983 * Insert new OPP in order of increasing frequency and discard if 1984 * already present. 1985 * 1986 * Need to use &opp_table->opp_list in the condition part of the 'for' 1987 * loop, don't replace it with head otherwise it will become an infinite 1988 * loop. 1989 */ 1990 list_for_each_entry(opp, &opp_table->opp_list, node) { 1991 opp_cmp = _opp_compare_key(opp_table, new_opp, opp); 1992 if (opp_cmp > 0) { 1993 *head = &opp->node; 1994 continue; 1995 } 1996 1997 if (opp_cmp < 0) 1998 return 0; 1999 2000 /* Duplicate OPPs */ 2001 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", 2002 __func__, opp->rates[0], opp->supplies[0].u_volt, 2003 opp->available, new_opp->rates[0], 2004 new_opp->supplies[0].u_volt, new_opp->available); 2005 2006 /* Should we compare voltages for all regulators here ? */ 2007 return opp->available && 2008 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST; 2009 } 2010 2011 return 0; 2012 } 2013 2014 void _required_opps_available(struct dev_pm_opp *opp, int count) 2015 { 2016 int i; 2017 2018 for (i = 0; i < count; i++) { 2019 if (opp->required_opps[i]->available) 2020 continue; 2021 2022 opp->available = false; 2023 pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n", 2024 __func__, opp->required_opps[i]->np, opp->rates[0]); 2025 return; 2026 } 2027 } 2028 2029 /* 2030 * Returns: 2031 * 0: On success. And appropriate error message for duplicate OPPs. 2032 * -EBUSY: For OPP with same freq/volt and is available. The callers of 2033 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make 2034 * sure we don't print error messages unnecessarily if different parts of 2035 * kernel try to initialize the OPP table. 2036 * -EEXIST: For OPP with same freq but different volt or is unavailable. This 2037 * should be considered an error by the callers of _opp_add(). 2038 */ 2039 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 2040 struct opp_table *opp_table) 2041 { 2042 struct list_head *head; 2043 int ret; 2044 2045 mutex_lock(&opp_table->lock); 2046 head = &opp_table->opp_list; 2047 2048 ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); 2049 if (ret) { 2050 mutex_unlock(&opp_table->lock); 2051 return ret; 2052 } 2053 2054 list_add(&new_opp->node, head); 2055 mutex_unlock(&opp_table->lock); 2056 2057 new_opp->opp_table = opp_table; 2058 kref_init(&new_opp->kref); 2059 2060 opp_debug_create_one(new_opp, opp_table); 2061 2062 if (!_opp_supported_by_regulators(new_opp, opp_table)) { 2063 new_opp->available = false; 2064 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n", 2065 __func__, new_opp->rates[0]); 2066 } 2067 2068 /* required-opps not fully initialized yet */ 2069 if (lazy_linking_pending(opp_table)) 2070 return 0; 2071 2072 _required_opps_available(new_opp, opp_table->required_opp_count); 2073 2074 return 0; 2075 } 2076 2077 /** 2078 * _opp_add_v1() - Allocate a OPP based on v1 bindings. 2079 * @opp_table: OPP table 2080 * @dev: device for which we do this operation 2081 * @data: The OPP data for the OPP to add 2082 * @dynamic: Dynamically added OPPs. 2083 * 2084 * This function adds an opp definition to the opp table and returns status. 2085 * The opp is made available by default and it can be controlled using 2086 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. 2087 * 2088 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table 2089 * and freed by dev_pm_opp_of_remove_table. 2090 * 2091 * Return: 2092 * 0 On success OR 2093 * Duplicate OPPs (both freq and volt are same) and opp->available 2094 * -EEXIST Freq are same and volt are different OR 2095 * Duplicate OPPs (both freq and volt are same) and !opp->available 2096 * -ENOMEM Memory allocation failure 2097 */ 2098 int _opp_add_v1(struct opp_table *opp_table, struct device *dev, 2099 struct dev_pm_opp_data *data, bool dynamic) 2100 { 2101 struct dev_pm_opp *new_opp; 2102 unsigned long tol, u_volt = data->u_volt; 2103 int ret; 2104 2105 if (!assert_single_clk(opp_table, 0)) 2106 return -EINVAL; 2107 2108 new_opp = _opp_allocate(opp_table); 2109 if (!new_opp) 2110 return -ENOMEM; 2111 2112 /* populate the opp table */ 2113 new_opp->rates[0] = data->freq; 2114 new_opp->level = data->level; 2115 new_opp->turbo = data->turbo; 2116 tol = u_volt * opp_table->voltage_tolerance_v1 / 100; 2117 new_opp->supplies[0].u_volt = u_volt; 2118 new_opp->supplies[0].u_volt_min = u_volt - tol; 2119 new_opp->supplies[0].u_volt_max = u_volt + tol; 2120 new_opp->available = true; 2121 new_opp->dynamic = dynamic; 2122 2123 ret = _opp_add(dev, new_opp, opp_table); 2124 if (ret) { 2125 /* Don't return error for duplicate OPPs */ 2126 if (ret == -EBUSY) 2127 ret = 0; 2128 goto free_opp; 2129 } 2130 2131 /* 2132 * Notify the changes in the availability of the operable 2133 * frequency/voltage list. 2134 */ 2135 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); 2136 return 0; 2137 2138 free_opp: 2139 _opp_free(new_opp); 2140 2141 return ret; 2142 } 2143 2144 /* 2145 * This is required only for the V2 bindings, and it enables a platform to 2146 * specify the hierarchy of versions it supports. OPP layer will then enable 2147 * OPPs, which are available for those versions, based on its 'opp-supported-hw' 2148 * property. 2149 */ 2150 static int _opp_set_supported_hw(struct opp_table *opp_table, 2151 const u32 *versions, unsigned int count) 2152 { 2153 /* Another CPU that shares the OPP table has set the property ? */ 2154 if (opp_table->supported_hw) 2155 return 0; 2156 2157 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), 2158 GFP_KERNEL); 2159 if (!opp_table->supported_hw) 2160 return -ENOMEM; 2161 2162 opp_table->supported_hw_count = count; 2163 2164 return 0; 2165 } 2166 2167 static void _opp_put_supported_hw(struct opp_table *opp_table) 2168 { 2169 if (opp_table->supported_hw) { 2170 kfree(opp_table->supported_hw); 2171 opp_table->supported_hw = NULL; 2172 opp_table->supported_hw_count = 0; 2173 } 2174 } 2175 2176 /* 2177 * This is required only for the V2 bindings, and it enables a platform to 2178 * specify the extn to be used for certain property names. The properties to 2179 * which the extension will apply are opp-microvolt and opp-microamp. OPP core 2180 * should postfix the property name with -<name> while looking for them. 2181 */ 2182 static int _opp_set_prop_name(struct opp_table *opp_table, const char *name) 2183 { 2184 /* Another CPU that shares the OPP table has set the property ? */ 2185 if (!opp_table->prop_name) { 2186 opp_table->prop_name = kstrdup(name, GFP_KERNEL); 2187 if (!opp_table->prop_name) 2188 return -ENOMEM; 2189 } 2190 2191 return 0; 2192 } 2193 2194 static void _opp_put_prop_name(struct opp_table *opp_table) 2195 { 2196 if (opp_table->prop_name) { 2197 kfree(opp_table->prop_name); 2198 opp_table->prop_name = NULL; 2199 } 2200 } 2201 2202 /* 2203 * In order to support OPP switching, OPP layer needs to know the name of the 2204 * device's regulators, as the core would be required to switch voltages as 2205 * well. 2206 * 2207 * This must be called before any OPPs are initialized for the device. 2208 */ 2209 static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev, 2210 const char * const names[]) 2211 { 2212 const char * const *temp = names; 2213 struct regulator *reg; 2214 int count = 0, ret, i; 2215 2216 /* Count number of regulators */ 2217 while (*temp++) 2218 count++; 2219 2220 if (!count) 2221 return -EINVAL; 2222 2223 /* Another CPU that shares the OPP table has set the regulators ? */ 2224 if (opp_table->regulators) 2225 return 0; 2226 2227 opp_table->regulators = kmalloc_array(count, 2228 sizeof(*opp_table->regulators), 2229 GFP_KERNEL); 2230 if (!opp_table->regulators) 2231 return -ENOMEM; 2232 2233 for (i = 0; i < count; i++) { 2234 reg = regulator_get_optional(dev, names[i]); 2235 if (IS_ERR(reg)) { 2236 ret = dev_err_probe(dev, PTR_ERR(reg), 2237 "%s: no regulator (%s) found\n", 2238 __func__, names[i]); 2239 goto free_regulators; 2240 } 2241 2242 opp_table->regulators[i] = reg; 2243 } 2244 2245 opp_table->regulator_count = count; 2246 2247 /* Set generic config_regulators() for single regulators here */ 2248 if (count == 1) 2249 opp_table->config_regulators = _opp_config_regulator_single; 2250 2251 return 0; 2252 2253 free_regulators: 2254 while (i != 0) 2255 regulator_put(opp_table->regulators[--i]); 2256 2257 kfree(opp_table->regulators); 2258 opp_table->regulators = NULL; 2259 opp_table->regulator_count = -1; 2260 2261 return ret; 2262 } 2263 2264 static void _opp_put_regulators(struct opp_table *opp_table) 2265 { 2266 int i; 2267 2268 if (!opp_table->regulators) 2269 return; 2270 2271 if (opp_table->enabled) { 2272 for (i = opp_table->regulator_count - 1; i >= 0; i--) 2273 regulator_disable(opp_table->regulators[i]); 2274 } 2275 2276 for (i = opp_table->regulator_count - 1; i >= 0; i--) 2277 regulator_put(opp_table->regulators[i]); 2278 2279 kfree(opp_table->regulators); 2280 opp_table->regulators = NULL; 2281 opp_table->regulator_count = -1; 2282 } 2283 2284 static void _put_clks(struct opp_table *opp_table, int count) 2285 { 2286 int i; 2287 2288 for (i = count - 1; i >= 0; i--) 2289 clk_put(opp_table->clks[i]); 2290 2291 kfree(opp_table->clks); 2292 opp_table->clks = NULL; 2293 } 2294 2295 /* 2296 * In order to support OPP switching, OPP layer needs to get pointers to the 2297 * clocks for the device. Simple cases work fine without using this routine 2298 * (i.e. by passing connection-id as NULL), but for a device with multiple 2299 * clocks available, the OPP core needs to know the exact names of the clks to 2300 * use. 2301 * 2302 * This must be called before any OPPs are initialized for the device. 2303 */ 2304 static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev, 2305 const char * const names[], 2306 config_clks_t config_clks) 2307 { 2308 const char * const *temp = names; 2309 int count = 0, ret, i; 2310 struct clk *clk; 2311 2312 /* Count number of clks */ 2313 while (*temp++) 2314 count++; 2315 2316 /* 2317 * This is a special case where we have a single clock, whose connection 2318 * id name is NULL, i.e. first two entries are NULL in the array. 2319 */ 2320 if (!count && !names[1]) 2321 count = 1; 2322 2323 /* Fail early for invalid configurations */ 2324 if (!count || (!config_clks && count > 1)) 2325 return -EINVAL; 2326 2327 /* Another CPU that shares the OPP table has set the clkname ? */ 2328 if (opp_table->clks) 2329 return 0; 2330 2331 opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks), 2332 GFP_KERNEL); 2333 if (!opp_table->clks) 2334 return -ENOMEM; 2335 2336 /* Find clks for the device */ 2337 for (i = 0; i < count; i++) { 2338 clk = clk_get(dev, names[i]); 2339 if (IS_ERR(clk)) { 2340 ret = dev_err_probe(dev, PTR_ERR(clk), 2341 "%s: Couldn't find clock with name: %s\n", 2342 __func__, names[i]); 2343 goto free_clks; 2344 } 2345 2346 opp_table->clks[i] = clk; 2347 } 2348 2349 opp_table->clk_count = count; 2350 opp_table->config_clks = config_clks; 2351 2352 /* Set generic single clk set here */ 2353 if (count == 1) { 2354 if (!opp_table->config_clks) 2355 opp_table->config_clks = _opp_config_clk_single; 2356 2357 /* 2358 * We could have just dropped the "clk" field and used "clks" 2359 * everywhere. Instead we kept the "clk" field around for 2360 * following reasons: 2361 * 2362 * - avoiding clks[0] everywhere else. 2363 * - not running single clk helpers for multiple clk usecase by 2364 * mistake. 2365 * 2366 * Since this is single-clk case, just update the clk pointer 2367 * too. 2368 */ 2369 opp_table->clk = opp_table->clks[0]; 2370 } 2371 2372 return 0; 2373 2374 free_clks: 2375 _put_clks(opp_table, i); 2376 return ret; 2377 } 2378 2379 static void _opp_put_clknames(struct opp_table *opp_table) 2380 { 2381 if (!opp_table->clks) 2382 return; 2383 2384 opp_table->config_clks = NULL; 2385 opp_table->clk = ERR_PTR(-ENODEV); 2386 2387 _put_clks(opp_table, opp_table->clk_count); 2388 } 2389 2390 /* 2391 * This is useful to support platforms with multiple regulators per device. 2392 * 2393 * This must be called before any OPPs are initialized for the device. 2394 */ 2395 static int _opp_set_config_regulators_helper(struct opp_table *opp_table, 2396 struct device *dev, config_regulators_t config_regulators) 2397 { 2398 /* Another CPU that shares the OPP table has set the helper ? */ 2399 if (!opp_table->config_regulators) 2400 opp_table->config_regulators = config_regulators; 2401 2402 return 0; 2403 } 2404 2405 static void _opp_put_config_regulators_helper(struct opp_table *opp_table) 2406 { 2407 if (opp_table->config_regulators) 2408 opp_table->config_regulators = NULL; 2409 } 2410 2411 static int _opp_set_required_dev(struct opp_table *opp_table, 2412 struct device *dev, 2413 struct device *required_dev, 2414 unsigned int index) 2415 { 2416 struct opp_table *required_table, *pd_table; 2417 struct device *gdev; 2418 2419 /* Genpd core takes care of propagation to parent genpd */ 2420 if (opp_table->is_genpd) { 2421 dev_err(dev, "%s: Operation not supported for genpds\n", __func__); 2422 return -EOPNOTSUPP; 2423 } 2424 2425 if (index >= opp_table->required_opp_count) { 2426 dev_err(dev, "Required OPPs not available, can't set required devs\n"); 2427 return -EINVAL; 2428 } 2429 2430 required_table = opp_table->required_opp_tables[index]; 2431 if (IS_ERR(required_table)) { 2432 dev_err(dev, "Missing OPP table, unable to set the required devs\n"); 2433 return -ENODEV; 2434 } 2435 2436 /* 2437 * The required_opp_tables parsing is not perfect, as the OPP core does 2438 * the parsing solely based on the DT node pointers. The core sets the 2439 * required_opp_tables entry to the first OPP table in the "opp_tables" 2440 * list, that matches with the node pointer. 2441 * 2442 * If the target DT OPP table is used by multiple devices and they all 2443 * create separate instances of 'struct opp_table' from it, then it is 2444 * possible that the required_opp_tables entry may be set to the 2445 * incorrect sibling device. 2446 * 2447 * Cross check it again and fix if required. 2448 */ 2449 gdev = dev_to_genpd_dev(required_dev); 2450 if (IS_ERR(gdev)) 2451 return PTR_ERR(gdev); 2452 2453 pd_table = _find_opp_table(gdev); 2454 if (!IS_ERR(pd_table)) { 2455 if (pd_table != required_table) { 2456 dev_pm_opp_put_opp_table(required_table); 2457 opp_table->required_opp_tables[index] = pd_table; 2458 } else { 2459 dev_pm_opp_put_opp_table(pd_table); 2460 } 2461 } 2462 2463 opp_table->required_devs[index] = required_dev; 2464 return 0; 2465 } 2466 2467 static void _opp_put_required_dev(struct opp_table *opp_table, 2468 unsigned int index) 2469 { 2470 opp_table->required_devs[index] = NULL; 2471 } 2472 2473 static void _opp_clear_config(struct opp_config_data *data) 2474 { 2475 if (data->flags & OPP_CONFIG_REQUIRED_DEV) 2476 _opp_put_required_dev(data->opp_table, 2477 data->required_dev_index); 2478 if (data->flags & OPP_CONFIG_REGULATOR) 2479 _opp_put_regulators(data->opp_table); 2480 if (data->flags & OPP_CONFIG_SUPPORTED_HW) 2481 _opp_put_supported_hw(data->opp_table); 2482 if (data->flags & OPP_CONFIG_REGULATOR_HELPER) 2483 _opp_put_config_regulators_helper(data->opp_table); 2484 if (data->flags & OPP_CONFIG_PROP_NAME) 2485 _opp_put_prop_name(data->opp_table); 2486 if (data->flags & OPP_CONFIG_CLK) 2487 _opp_put_clknames(data->opp_table); 2488 2489 dev_pm_opp_put_opp_table(data->opp_table); 2490 kfree(data); 2491 } 2492 2493 /** 2494 * dev_pm_opp_set_config() - Set OPP configuration for the device. 2495 * @dev: Device for which configuration is being set. 2496 * @config: OPP configuration. 2497 * 2498 * This allows all device OPP configurations to be performed at once. 2499 * 2500 * This must be called before any OPPs are initialized for the device. This may 2501 * be called multiple times for the same OPP table, for example once for each 2502 * CPU that share the same table. This must be balanced by the same number of 2503 * calls to dev_pm_opp_clear_config() in order to free the OPP table properly. 2504 * 2505 * This returns a token to the caller, which must be passed to 2506 * dev_pm_opp_clear_config() to free the resources later. The value of the 2507 * returned token will be >= 1 for success and negative for errors. The minimum 2508 * value of 1 is chosen here to make it easy for callers to manage the resource. 2509 */ 2510 int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) 2511 { 2512 struct opp_table *opp_table; 2513 struct opp_config_data *data; 2514 unsigned int id; 2515 int ret; 2516 2517 data = kmalloc(sizeof(*data), GFP_KERNEL); 2518 if (!data) 2519 return -ENOMEM; 2520 2521 opp_table = _add_opp_table(dev, false); 2522 if (IS_ERR(opp_table)) { 2523 kfree(data); 2524 return PTR_ERR(opp_table); 2525 } 2526 2527 data->opp_table = opp_table; 2528 data->flags = 0; 2529 2530 /* This should be called before OPPs are initialized */ 2531 if (WARN_ON(!list_empty(&opp_table->opp_list))) { 2532 ret = -EBUSY; 2533 goto err; 2534 } 2535 2536 /* Configure clocks */ 2537 if (config->clk_names) { 2538 ret = _opp_set_clknames(opp_table, dev, config->clk_names, 2539 config->config_clks); 2540 if (ret) 2541 goto err; 2542 2543 data->flags |= OPP_CONFIG_CLK; 2544 } else if (config->config_clks) { 2545 /* Don't allow config callback without clocks */ 2546 ret = -EINVAL; 2547 goto err; 2548 } 2549 2550 /* Configure property names */ 2551 if (config->prop_name) { 2552 ret = _opp_set_prop_name(opp_table, config->prop_name); 2553 if (ret) 2554 goto err; 2555 2556 data->flags |= OPP_CONFIG_PROP_NAME; 2557 } 2558 2559 /* Configure config_regulators helper */ 2560 if (config->config_regulators) { 2561 ret = _opp_set_config_regulators_helper(opp_table, dev, 2562 config->config_regulators); 2563 if (ret) 2564 goto err; 2565 2566 data->flags |= OPP_CONFIG_REGULATOR_HELPER; 2567 } 2568 2569 /* Configure supported hardware */ 2570 if (config->supported_hw) { 2571 ret = _opp_set_supported_hw(opp_table, config->supported_hw, 2572 config->supported_hw_count); 2573 if (ret) 2574 goto err; 2575 2576 data->flags |= OPP_CONFIG_SUPPORTED_HW; 2577 } 2578 2579 /* Configure supplies */ 2580 if (config->regulator_names) { 2581 ret = _opp_set_regulators(opp_table, dev, 2582 config->regulator_names); 2583 if (ret) 2584 goto err; 2585 2586 data->flags |= OPP_CONFIG_REGULATOR; 2587 } 2588 2589 if (config->required_dev) { 2590 ret = _opp_set_required_dev(opp_table, dev, 2591 config->required_dev, 2592 config->required_dev_index); 2593 if (ret) 2594 goto err; 2595 2596 data->required_dev_index = config->required_dev_index; 2597 data->flags |= OPP_CONFIG_REQUIRED_DEV; 2598 } 2599 2600 ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX), 2601 GFP_KERNEL); 2602 if (ret) 2603 goto err; 2604 2605 return id; 2606 2607 err: 2608 _opp_clear_config(data); 2609 return ret; 2610 } 2611 EXPORT_SYMBOL_GPL(dev_pm_opp_set_config); 2612 2613 /** 2614 * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration. 2615 * @token: The token returned by dev_pm_opp_set_config() previously. 2616 * 2617 * This allows all device OPP configurations to be cleared at once. This must be 2618 * called once for each call made to dev_pm_opp_set_config(), in order to free 2619 * the OPPs properly. 2620 * 2621 * Currently the first call itself ends up freeing all the OPP configurations, 2622 * while the later ones only drop the OPP table reference. This works well for 2623 * now as we would never want to use an half initialized OPP table and want to 2624 * remove the configurations together. 2625 */ 2626 void dev_pm_opp_clear_config(int token) 2627 { 2628 struct opp_config_data *data; 2629 2630 /* 2631 * This lets the callers call this unconditionally and keep their code 2632 * simple. 2633 */ 2634 if (unlikely(token <= 0)) 2635 return; 2636 2637 data = xa_erase(&opp_configs, token); 2638 if (WARN_ON(!data)) 2639 return; 2640 2641 _opp_clear_config(data); 2642 } 2643 EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config); 2644 2645 static void devm_pm_opp_config_release(void *token) 2646 { 2647 dev_pm_opp_clear_config((unsigned long)token); 2648 } 2649 2650 /** 2651 * devm_pm_opp_set_config() - Set OPP configuration for the device. 2652 * @dev: Device for which configuration is being set. 2653 * @config: OPP configuration. 2654 * 2655 * This allows all device OPP configurations to be performed at once. 2656 * This is a resource-managed variant of dev_pm_opp_set_config(). 2657 * 2658 * Return: 0 on success and errorno otherwise. 2659 */ 2660 int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) 2661 { 2662 int token = dev_pm_opp_set_config(dev, config); 2663 2664 if (token < 0) 2665 return token; 2666 2667 return devm_add_action_or_reset(dev, devm_pm_opp_config_release, 2668 (void *) ((unsigned long) token)); 2669 } 2670 EXPORT_SYMBOL_GPL(devm_pm_opp_set_config); 2671 2672 /** 2673 * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP. 2674 * @src_table: OPP table which has @dst_table as one of its required OPP table. 2675 * @dst_table: Required OPP table of the @src_table. 2676 * @src_opp: OPP from the @src_table. 2677 * 2678 * This function returns the OPP (present in @dst_table) pointed out by the 2679 * "required-opps" property of the @src_opp (present in @src_table). 2680 * 2681 * The callers are required to call dev_pm_opp_put() for the returned OPP after 2682 * use. 2683 * 2684 * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise. 2685 */ 2686 struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, 2687 struct opp_table *dst_table, 2688 struct dev_pm_opp *src_opp) 2689 { 2690 struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV); 2691 int i; 2692 2693 if (!src_table || !dst_table || !src_opp || 2694 !src_table->required_opp_tables) 2695 return ERR_PTR(-EINVAL); 2696 2697 /* required-opps not fully initialized yet */ 2698 if (lazy_linking_pending(src_table)) 2699 return ERR_PTR(-EBUSY); 2700 2701 for (i = 0; i < src_table->required_opp_count; i++) { 2702 if (src_table->required_opp_tables[i] == dst_table) { 2703 mutex_lock(&src_table->lock); 2704 2705 list_for_each_entry(opp, &src_table->opp_list, node) { 2706 if (opp == src_opp) { 2707 dest_opp = opp->required_opps[i]; 2708 dev_pm_opp_get(dest_opp); 2709 break; 2710 } 2711 } 2712 2713 mutex_unlock(&src_table->lock); 2714 break; 2715 } 2716 } 2717 2718 if (IS_ERR(dest_opp)) { 2719 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, 2720 src_table, dst_table); 2721 } 2722 2723 return dest_opp; 2724 } 2725 EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp); 2726 2727 /** 2728 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. 2729 * @src_table: OPP table which has dst_table as one of its required OPP table. 2730 * @dst_table: Required OPP table of the src_table. 2731 * @pstate: Current performance state of the src_table. 2732 * 2733 * This Returns pstate of the OPP (present in @dst_table) pointed out by the 2734 * "required-opps" property of the OPP (present in @src_table) which has 2735 * performance state set to @pstate. 2736 * 2737 * Return: Zero or positive performance state on success, otherwise negative 2738 * value on errors. 2739 */ 2740 int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, 2741 struct opp_table *dst_table, 2742 unsigned int pstate) 2743 { 2744 struct dev_pm_opp *opp; 2745 int dest_pstate = -EINVAL; 2746 int i; 2747 2748 /* 2749 * Normally the src_table will have the "required_opps" property set to 2750 * point to one of the OPPs in the dst_table, but in some cases the 2751 * genpd and its master have one to one mapping of performance states 2752 * and so none of them have the "required-opps" property set. Return the 2753 * pstate of the src_table as it is in such cases. 2754 */ 2755 if (!src_table || !src_table->required_opp_count) 2756 return pstate; 2757 2758 /* Both OPP tables must belong to genpds */ 2759 if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) { 2760 pr_err("%s: Performance state is only valid for genpds.\n", __func__); 2761 return -EINVAL; 2762 } 2763 2764 /* required-opps not fully initialized yet */ 2765 if (lazy_linking_pending(src_table)) 2766 return -EBUSY; 2767 2768 for (i = 0; i < src_table->required_opp_count; i++) { 2769 if (src_table->required_opp_tables[i]->np == dst_table->np) 2770 break; 2771 } 2772 2773 if (unlikely(i == src_table->required_opp_count)) { 2774 pr_err("%s: Couldn't find matching OPP table (%p: %p)\n", 2775 __func__, src_table, dst_table); 2776 return -EINVAL; 2777 } 2778 2779 mutex_lock(&src_table->lock); 2780 2781 list_for_each_entry(opp, &src_table->opp_list, node) { 2782 if (opp->level == pstate) { 2783 dest_pstate = opp->required_opps[i]->level; 2784 goto unlock; 2785 } 2786 } 2787 2788 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, 2789 dst_table); 2790 2791 unlock: 2792 mutex_unlock(&src_table->lock); 2793 2794 return dest_pstate; 2795 } 2796 2797 /** 2798 * dev_pm_opp_add_dynamic() - Add an OPP table from a table definitions 2799 * @dev: The device for which we do this operation 2800 * @data: The OPP data for the OPP to add 2801 * 2802 * This function adds an opp definition to the opp table and returns status. 2803 * The opp is made available by default and it can be controlled using 2804 * dev_pm_opp_enable/disable functions. 2805 * 2806 * Return: 2807 * 0 On success OR 2808 * Duplicate OPPs (both freq and volt are same) and opp->available 2809 * -EEXIST Freq are same and volt are different OR 2810 * Duplicate OPPs (both freq and volt are same) and !opp->available 2811 * -ENOMEM Memory allocation failure 2812 */ 2813 int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data) 2814 { 2815 struct opp_table *opp_table; 2816 int ret; 2817 2818 opp_table = _add_opp_table(dev, true); 2819 if (IS_ERR(opp_table)) 2820 return PTR_ERR(opp_table); 2821 2822 /* Fix regulator count for dynamic OPPs */ 2823 opp_table->regulator_count = 1; 2824 2825 ret = _opp_add_v1(opp_table, dev, data, true); 2826 if (ret) 2827 dev_pm_opp_put_opp_table(opp_table); 2828 2829 return ret; 2830 } 2831 EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic); 2832 2833 /** 2834 * _opp_set_availability() - helper to set the availability of an opp 2835 * @dev: device for which we do this operation 2836 * @freq: OPP frequency to modify availability 2837 * @availability_req: availability status requested for this opp 2838 * 2839 * Set the availability of an OPP, opp_{enable,disable} share a common logic 2840 * which is isolated here. 2841 * 2842 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2843 * copy operation, returns 0 if no modification was done OR modification was 2844 * successful. 2845 */ 2846 static int _opp_set_availability(struct device *dev, unsigned long freq, 2847 bool availability_req) 2848 { 2849 struct opp_table *opp_table; 2850 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); 2851 int r = 0; 2852 2853 /* Find the opp_table */ 2854 opp_table = _find_opp_table(dev); 2855 if (IS_ERR(opp_table)) { 2856 r = PTR_ERR(opp_table); 2857 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 2858 return r; 2859 } 2860 2861 if (!assert_single_clk(opp_table, 0)) { 2862 r = -EINVAL; 2863 goto put_table; 2864 } 2865 2866 mutex_lock(&opp_table->lock); 2867 2868 /* Do we have the frequency? */ 2869 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2870 if (tmp_opp->rates[0] == freq) { 2871 opp = tmp_opp; 2872 break; 2873 } 2874 } 2875 2876 if (IS_ERR(opp)) { 2877 r = PTR_ERR(opp); 2878 goto unlock; 2879 } 2880 2881 /* Is update really needed? */ 2882 if (opp->available == availability_req) 2883 goto unlock; 2884 2885 opp->available = availability_req; 2886 2887 dev_pm_opp_get(opp); 2888 mutex_unlock(&opp_table->lock); 2889 2890 /* Notify the change of the OPP availability */ 2891 if (availability_req) 2892 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, 2893 opp); 2894 else 2895 blocking_notifier_call_chain(&opp_table->head, 2896 OPP_EVENT_DISABLE, opp); 2897 2898 dev_pm_opp_put(opp); 2899 goto put_table; 2900 2901 unlock: 2902 mutex_unlock(&opp_table->lock); 2903 put_table: 2904 dev_pm_opp_put_opp_table(opp_table); 2905 return r; 2906 } 2907 2908 /** 2909 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP 2910 * @dev: device for which we do this operation 2911 * @freq: OPP frequency to adjust voltage of 2912 * @u_volt: new OPP target voltage 2913 * @u_volt_min: new OPP min voltage 2914 * @u_volt_max: new OPP max voltage 2915 * 2916 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2917 * copy operation, returns 0 if no modifcation was done OR modification was 2918 * successful. 2919 */ 2920 int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, 2921 unsigned long u_volt, unsigned long u_volt_min, 2922 unsigned long u_volt_max) 2923 2924 { 2925 struct opp_table *opp_table; 2926 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); 2927 int r = 0; 2928 2929 /* Find the opp_table */ 2930 opp_table = _find_opp_table(dev); 2931 if (IS_ERR(opp_table)) { 2932 r = PTR_ERR(opp_table); 2933 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 2934 return r; 2935 } 2936 2937 if (!assert_single_clk(opp_table, 0)) { 2938 r = -EINVAL; 2939 goto put_table; 2940 } 2941 2942 mutex_lock(&opp_table->lock); 2943 2944 /* Do we have the frequency? */ 2945 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2946 if (tmp_opp->rates[0] == freq) { 2947 opp = tmp_opp; 2948 break; 2949 } 2950 } 2951 2952 if (IS_ERR(opp)) { 2953 r = PTR_ERR(opp); 2954 goto adjust_unlock; 2955 } 2956 2957 /* Is update really needed? */ 2958 if (opp->supplies->u_volt == u_volt) 2959 goto adjust_unlock; 2960 2961 opp->supplies->u_volt = u_volt; 2962 opp->supplies->u_volt_min = u_volt_min; 2963 opp->supplies->u_volt_max = u_volt_max; 2964 2965 dev_pm_opp_get(opp); 2966 mutex_unlock(&opp_table->lock); 2967 2968 /* Notify the voltage change of the OPP */ 2969 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE, 2970 opp); 2971 2972 dev_pm_opp_put(opp); 2973 goto put_table; 2974 2975 adjust_unlock: 2976 mutex_unlock(&opp_table->lock); 2977 put_table: 2978 dev_pm_opp_put_opp_table(opp_table); 2979 return r; 2980 } 2981 EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); 2982 2983 /** 2984 * dev_pm_opp_sync_regulators() - Sync state of voltage regulators 2985 * @dev: device for which we do this operation 2986 * 2987 * Sync voltage state of the OPP table regulators. 2988 * 2989 * Return: 0 on success or a negative error value. 2990 */ 2991 int dev_pm_opp_sync_regulators(struct device *dev) 2992 { 2993 struct opp_table *opp_table; 2994 struct regulator *reg; 2995 int i, ret = 0; 2996 2997 /* Device may not have OPP table */ 2998 opp_table = _find_opp_table(dev); 2999 if (IS_ERR(opp_table)) 3000 return 0; 3001 3002 /* Regulator may not be required for the device */ 3003 if (unlikely(!opp_table->regulators)) 3004 goto put_table; 3005 3006 /* Nothing to sync if voltage wasn't changed */ 3007 if (!opp_table->enabled) 3008 goto put_table; 3009 3010 for (i = 0; i < opp_table->regulator_count; i++) { 3011 reg = opp_table->regulators[i]; 3012 ret = regulator_sync_voltage(reg); 3013 if (ret) 3014 break; 3015 } 3016 put_table: 3017 /* Drop reference taken by _find_opp_table() */ 3018 dev_pm_opp_put_opp_table(opp_table); 3019 3020 return ret; 3021 } 3022 EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); 3023 3024 /** 3025 * dev_pm_opp_enable() - Enable a specific OPP 3026 * @dev: device for which we do this operation 3027 * @freq: OPP frequency to enable 3028 * 3029 * Enables a provided opp. If the operation is valid, this returns 0, else the 3030 * corresponding error value. It is meant to be used for users an OPP available 3031 * after being temporarily made unavailable with dev_pm_opp_disable. 3032 * 3033 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 3034 * copy operation, returns 0 if no modification was done OR modification was 3035 * successful. 3036 */ 3037 int dev_pm_opp_enable(struct device *dev, unsigned long freq) 3038 { 3039 return _opp_set_availability(dev, freq, true); 3040 } 3041 EXPORT_SYMBOL_GPL(dev_pm_opp_enable); 3042 3043 /** 3044 * dev_pm_opp_disable() - Disable a specific OPP 3045 * @dev: device for which we do this operation 3046 * @freq: OPP frequency to disable 3047 * 3048 * Disables a provided opp. If the operation is valid, this returns 3049 * 0, else the corresponding error value. It is meant to be a temporary 3050 * control by users to make this OPP not available until the circumstances are 3051 * right to make it available again (with a call to dev_pm_opp_enable). 3052 * 3053 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 3054 * copy operation, returns 0 if no modification was done OR modification was 3055 * successful. 3056 */ 3057 int dev_pm_opp_disable(struct device *dev, unsigned long freq) 3058 { 3059 return _opp_set_availability(dev, freq, false); 3060 } 3061 EXPORT_SYMBOL_GPL(dev_pm_opp_disable); 3062 3063 /** 3064 * dev_pm_opp_register_notifier() - Register OPP notifier for the device 3065 * @dev: Device for which notifier needs to be registered 3066 * @nb: Notifier block to be registered 3067 * 3068 * Return: 0 on success or a negative error value. 3069 */ 3070 int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) 3071 { 3072 struct opp_table *opp_table; 3073 int ret; 3074 3075 opp_table = _find_opp_table(dev); 3076 if (IS_ERR(opp_table)) 3077 return PTR_ERR(opp_table); 3078 3079 ret = blocking_notifier_chain_register(&opp_table->head, nb); 3080 3081 dev_pm_opp_put_opp_table(opp_table); 3082 3083 return ret; 3084 } 3085 EXPORT_SYMBOL(dev_pm_opp_register_notifier); 3086 3087 /** 3088 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device 3089 * @dev: Device for which notifier needs to be unregistered 3090 * @nb: Notifier block to be unregistered 3091 * 3092 * Return: 0 on success or a negative error value. 3093 */ 3094 int dev_pm_opp_unregister_notifier(struct device *dev, 3095 struct notifier_block *nb) 3096 { 3097 struct opp_table *opp_table; 3098 int ret; 3099 3100 opp_table = _find_opp_table(dev); 3101 if (IS_ERR(opp_table)) 3102 return PTR_ERR(opp_table); 3103 3104 ret = blocking_notifier_chain_unregister(&opp_table->head, nb); 3105 3106 dev_pm_opp_put_opp_table(opp_table); 3107 3108 return ret; 3109 } 3110 EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); 3111 3112 /** 3113 * dev_pm_opp_remove_table() - Free all OPPs associated with the device 3114 * @dev: device pointer used to lookup OPP table. 3115 * 3116 * Free both OPPs created using static entries present in DT and the 3117 * dynamically added entries. 3118 */ 3119 void dev_pm_opp_remove_table(struct device *dev) 3120 { 3121 struct opp_table *opp_table; 3122 3123 /* Check for existing table for 'dev' */ 3124 opp_table = _find_opp_table(dev); 3125 if (IS_ERR(opp_table)) { 3126 int error = PTR_ERR(opp_table); 3127 3128 if (error != -ENODEV) 3129 WARN(1, "%s: opp_table: %d\n", 3130 IS_ERR_OR_NULL(dev) ? 3131 "Invalid device" : dev_name(dev), 3132 error); 3133 return; 3134 } 3135 3136 /* 3137 * Drop the extra reference only if the OPP table was successfully added 3138 * with dev_pm_opp_of_add_table() earlier. 3139 **/ 3140 if (_opp_remove_all_static(opp_table)) 3141 dev_pm_opp_put_opp_table(opp_table); 3142 3143 /* Drop reference taken by _find_opp_table() */ 3144 dev_pm_opp_put_opp_table(opp_table); 3145 } 3146 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); 3147