1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic OPP Interface 4 * 5 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 6 * Nishanth Menon 7 * Romit Dasgupta 8 * Kevin Hilman 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/clk.h> 14 #include <linux/errno.h> 15 #include <linux/err.h> 16 #include <linux/device.h> 17 #include <linux/export.h> 18 #include <linux/pm_domain.h> 19 #include <linux/regulator/consumer.h> 20 #include <linux/slab.h> 21 #include <linux/xarray.h> 22 23 #include "opp.h" 24 25 /* 26 * The root of the list of all opp-tables. All opp_table structures branch off 27 * from here, with each opp_table containing the list of opps it supports in 28 * various states of availability. 29 */ 30 LIST_HEAD(opp_tables); 31 32 /* Lock to allow exclusive modification to the device and opp lists */ 33 DEFINE_MUTEX(opp_table_lock); 34 /* Flag indicating that opp_tables list is being updated at the moment */ 35 static bool opp_tables_busy; 36 37 /* OPP ID allocator */ 38 static DEFINE_XARRAY_ALLOC1(opp_configs); 39 40 static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) 41 { 42 struct opp_device *opp_dev; 43 bool found = false; 44 45 mutex_lock(&opp_table->lock); 46 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 47 if (opp_dev->dev == dev) { 48 found = true; 49 break; 50 } 51 52 mutex_unlock(&opp_table->lock); 53 return found; 54 } 55 56 static struct opp_table *_find_opp_table_unlocked(struct device *dev) 57 { 58 struct opp_table *opp_table; 59 60 list_for_each_entry(opp_table, &opp_tables, node) { 61 if (_find_opp_dev(dev, opp_table)) { 62 _get_opp_table_kref(opp_table); 63 return opp_table; 64 } 65 } 66 67 return ERR_PTR(-ENODEV); 68 } 69 70 /** 71 * _find_opp_table() - find opp_table struct using device pointer 72 * @dev: device pointer used to lookup OPP table 73 * 74 * Search OPP table for one containing matching device. 75 * 76 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or 77 * -EINVAL based on type of error. 78 * 79 * The callers must call dev_pm_opp_put_opp_table() after the table is used. 80 */ 81 struct opp_table *_find_opp_table(struct device *dev) 82 { 83 struct opp_table *opp_table; 84 85 if (IS_ERR_OR_NULL(dev)) { 86 pr_err("%s: Invalid parameters\n", __func__); 87 return ERR_PTR(-EINVAL); 88 } 89 90 mutex_lock(&opp_table_lock); 91 opp_table = _find_opp_table_unlocked(dev); 92 mutex_unlock(&opp_table_lock); 93 94 return opp_table; 95 } 96 97 /* 98 * Returns true if multiple clocks aren't there, else returns false with WARN. 99 * 100 * We don't force clk_count == 1 here as there are users who don't have a clock 101 * representation in the OPP table and manage the clock configuration themselves 102 * in an platform specific way. 103 */ 104 static bool assert_single_clk(struct opp_table *opp_table) 105 { 106 return !WARN_ON(opp_table->clk_count > 1); 107 } 108 109 /** 110 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp 111 * @opp: opp for which voltage has to be returned for 112 * 113 * Return: voltage in micro volt corresponding to the opp, else 114 * return 0 115 * 116 * This is useful only for devices with single power supply. 117 */ 118 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 119 { 120 if (IS_ERR_OR_NULL(opp)) { 121 pr_err("%s: Invalid parameters\n", __func__); 122 return 0; 123 } 124 125 return opp->supplies[0].u_volt; 126 } 127 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); 128 129 /** 130 * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp 131 * @opp: opp for which voltage has to be returned for 132 * @supplies: Placeholder for copying the supply information. 133 * 134 * Return: negative error number on failure, 0 otherwise on success after 135 * setting @supplies. 136 * 137 * This can be used for devices with any number of power supplies. The caller 138 * must ensure the @supplies array must contain space for each regulator. 139 */ 140 int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, 141 struct dev_pm_opp_supply *supplies) 142 { 143 if (IS_ERR_OR_NULL(opp) || !supplies) { 144 pr_err("%s: Invalid parameters\n", __func__); 145 return -EINVAL; 146 } 147 148 memcpy(supplies, opp->supplies, 149 sizeof(*supplies) * opp->opp_table->regulator_count); 150 return 0; 151 } 152 EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies); 153 154 /** 155 * dev_pm_opp_get_power() - Gets the power corresponding to an opp 156 * @opp: opp for which power has to be returned for 157 * 158 * Return: power in micro watt corresponding to the opp, else 159 * return 0 160 * 161 * This is useful only for devices with single power supply. 162 */ 163 unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) 164 { 165 unsigned long opp_power = 0; 166 int i; 167 168 if (IS_ERR_OR_NULL(opp)) { 169 pr_err("%s: Invalid parameters\n", __func__); 170 return 0; 171 } 172 for (i = 0; i < opp->opp_table->regulator_count; i++) 173 opp_power += opp->supplies[i].u_watt; 174 175 return opp_power; 176 } 177 EXPORT_SYMBOL_GPL(dev_pm_opp_get_power); 178 179 /** 180 * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an 181 * available opp with specified index 182 * @opp: opp for which frequency has to be returned for 183 * @index: index of the frequency within the required opp 184 * 185 * Return: frequency in hertz corresponding to the opp with specified index, 186 * else return 0 187 */ 188 unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index) 189 { 190 if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) { 191 pr_err("%s: Invalid parameters\n", __func__); 192 return 0; 193 } 194 195 return opp->rates[index]; 196 } 197 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed); 198 199 /** 200 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp 201 * @opp: opp for which level value has to be returned for 202 * 203 * Return: level read from device tree corresponding to the opp, else 204 * return 0. 205 */ 206 unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) 207 { 208 if (IS_ERR_OR_NULL(opp) || !opp->available) { 209 pr_err("%s: Invalid parameters\n", __func__); 210 return 0; 211 } 212 213 return opp->level; 214 } 215 EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); 216 217 /** 218 * dev_pm_opp_get_required_pstate() - Gets the required performance state 219 * corresponding to an available opp 220 * @opp: opp for which performance state has to be returned for 221 * @index: index of the required opp 222 * 223 * Return: performance state read from device tree corresponding to the 224 * required opp, else return 0. 225 */ 226 unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, 227 unsigned int index) 228 { 229 if (IS_ERR_OR_NULL(opp) || !opp->available || 230 index >= opp->opp_table->required_opp_count) { 231 pr_err("%s: Invalid parameters\n", __func__); 232 return 0; 233 } 234 235 /* required-opps not fully initialized yet */ 236 if (lazy_linking_pending(opp->opp_table)) 237 return 0; 238 239 /* The required OPP table must belong to a genpd */ 240 if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) { 241 pr_err("%s: Performance state is only valid for genpds.\n", __func__); 242 return 0; 243 } 244 245 return opp->required_opps[index]->level; 246 } 247 EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate); 248 249 /** 250 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not 251 * @opp: opp for which turbo mode is being verified 252 * 253 * Turbo OPPs are not for normal use, and can be enabled (under certain 254 * conditions) for short duration of times to finish high throughput work 255 * quickly. Running on them for longer times may overheat the chip. 256 * 257 * Return: true if opp is turbo opp, else false. 258 */ 259 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) 260 { 261 if (IS_ERR_OR_NULL(opp) || !opp->available) { 262 pr_err("%s: Invalid parameters\n", __func__); 263 return false; 264 } 265 266 return opp->turbo; 267 } 268 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); 269 270 /** 271 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds 272 * @dev: device for which we do this operation 273 * 274 * Return: This function returns the max clock latency in nanoseconds. 275 */ 276 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) 277 { 278 struct opp_table *opp_table; 279 unsigned long clock_latency_ns; 280 281 opp_table = _find_opp_table(dev); 282 if (IS_ERR(opp_table)) 283 return 0; 284 285 clock_latency_ns = opp_table->clock_latency_ns_max; 286 287 dev_pm_opp_put_opp_table(opp_table); 288 289 return clock_latency_ns; 290 } 291 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); 292 293 /** 294 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds 295 * @dev: device for which we do this operation 296 * 297 * Return: This function returns the max voltage latency in nanoseconds. 298 */ 299 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) 300 { 301 struct opp_table *opp_table; 302 struct dev_pm_opp *opp; 303 struct regulator *reg; 304 unsigned long latency_ns = 0; 305 int ret, i, count; 306 struct { 307 unsigned long min; 308 unsigned long max; 309 } *uV; 310 311 opp_table = _find_opp_table(dev); 312 if (IS_ERR(opp_table)) 313 return 0; 314 315 /* Regulator may not be required for the device */ 316 if (!opp_table->regulators) 317 goto put_opp_table; 318 319 count = opp_table->regulator_count; 320 321 uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); 322 if (!uV) 323 goto put_opp_table; 324 325 mutex_lock(&opp_table->lock); 326 327 for (i = 0; i < count; i++) { 328 uV[i].min = ~0; 329 uV[i].max = 0; 330 331 list_for_each_entry(opp, &opp_table->opp_list, node) { 332 if (!opp->available) 333 continue; 334 335 if (opp->supplies[i].u_volt_min < uV[i].min) 336 uV[i].min = opp->supplies[i].u_volt_min; 337 if (opp->supplies[i].u_volt_max > uV[i].max) 338 uV[i].max = opp->supplies[i].u_volt_max; 339 } 340 } 341 342 mutex_unlock(&opp_table->lock); 343 344 /* 345 * The caller needs to ensure that opp_table (and hence the regulator) 346 * isn't freed, while we are executing this routine. 347 */ 348 for (i = 0; i < count; i++) { 349 reg = opp_table->regulators[i]; 350 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); 351 if (ret > 0) 352 latency_ns += ret * 1000; 353 } 354 355 kfree(uV); 356 put_opp_table: 357 dev_pm_opp_put_opp_table(opp_table); 358 359 return latency_ns; 360 } 361 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency); 362 363 /** 364 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in 365 * nanoseconds 366 * @dev: device for which we do this operation 367 * 368 * Return: This function returns the max transition latency, in nanoseconds, to 369 * switch from one OPP to other. 370 */ 371 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) 372 { 373 return dev_pm_opp_get_max_volt_latency(dev) + 374 dev_pm_opp_get_max_clock_latency(dev); 375 } 376 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); 377 378 /** 379 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz 380 * @dev: device for which we do this operation 381 * 382 * Return: This function returns the frequency of the OPP marked as suspend_opp 383 * if one is available, else returns 0; 384 */ 385 unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) 386 { 387 struct opp_table *opp_table; 388 unsigned long freq = 0; 389 390 opp_table = _find_opp_table(dev); 391 if (IS_ERR(opp_table)) 392 return 0; 393 394 if (opp_table->suspend_opp && opp_table->suspend_opp->available) 395 freq = dev_pm_opp_get_freq(opp_table->suspend_opp); 396 397 dev_pm_opp_put_opp_table(opp_table); 398 399 return freq; 400 } 401 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); 402 403 int _get_opp_count(struct opp_table *opp_table) 404 { 405 struct dev_pm_opp *opp; 406 int count = 0; 407 408 mutex_lock(&opp_table->lock); 409 410 list_for_each_entry(opp, &opp_table->opp_list, node) { 411 if (opp->available) 412 count++; 413 } 414 415 mutex_unlock(&opp_table->lock); 416 417 return count; 418 } 419 420 /** 421 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table 422 * @dev: device for which we do this operation 423 * 424 * Return: This function returns the number of available opps if there are any, 425 * else returns 0 if none or the corresponding error value. 426 */ 427 int dev_pm_opp_get_opp_count(struct device *dev) 428 { 429 struct opp_table *opp_table; 430 int count; 431 432 opp_table = _find_opp_table(dev); 433 if (IS_ERR(opp_table)) { 434 count = PTR_ERR(opp_table); 435 dev_dbg(dev, "%s: OPP table not found (%d)\n", 436 __func__, count); 437 return count; 438 } 439 440 count = _get_opp_count(opp_table); 441 dev_pm_opp_put_opp_table(opp_table); 442 443 return count; 444 } 445 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); 446 447 /* Helpers to read keys */ 448 static unsigned long _read_freq(struct dev_pm_opp *opp, int index) 449 { 450 return opp->rates[index]; 451 } 452 453 static unsigned long _read_level(struct dev_pm_opp *opp, int index) 454 { 455 return opp->level; 456 } 457 458 static unsigned long _read_bw(struct dev_pm_opp *opp, int index) 459 { 460 return opp->bandwidth[index].peak; 461 } 462 463 /* Generic comparison helpers */ 464 static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 465 unsigned long opp_key, unsigned long key) 466 { 467 if (opp_key == key) { 468 *opp = temp_opp; 469 return true; 470 } 471 472 return false; 473 } 474 475 static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 476 unsigned long opp_key, unsigned long key) 477 { 478 if (opp_key >= key) { 479 *opp = temp_opp; 480 return true; 481 } 482 483 return false; 484 } 485 486 static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 487 unsigned long opp_key, unsigned long key) 488 { 489 if (opp_key > key) 490 return true; 491 492 *opp = temp_opp; 493 return false; 494 } 495 496 /* Generic key finding helpers */ 497 static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, 498 unsigned long *key, int index, bool available, 499 unsigned long (*read)(struct dev_pm_opp *opp, int index), 500 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 501 unsigned long opp_key, unsigned long key), 502 bool (*assert)(struct opp_table *opp_table)) 503 { 504 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 505 506 /* Assert that the requirement is met */ 507 if (assert && !assert(opp_table)) 508 return ERR_PTR(-EINVAL); 509 510 mutex_lock(&opp_table->lock); 511 512 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 513 if (temp_opp->available == available) { 514 if (compare(&opp, temp_opp, read(temp_opp, index), *key)) 515 break; 516 } 517 } 518 519 /* Increment the reference count of OPP */ 520 if (!IS_ERR(opp)) { 521 *key = read(opp, index); 522 dev_pm_opp_get(opp); 523 } 524 525 mutex_unlock(&opp_table->lock); 526 527 return opp; 528 } 529 530 static struct dev_pm_opp * 531 _find_key(struct device *dev, unsigned long *key, int index, bool available, 532 unsigned long (*read)(struct dev_pm_opp *opp, int index), 533 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 534 unsigned long opp_key, unsigned long key), 535 bool (*assert)(struct opp_table *opp_table)) 536 { 537 struct opp_table *opp_table; 538 struct dev_pm_opp *opp; 539 540 opp_table = _find_opp_table(dev); 541 if (IS_ERR(opp_table)) { 542 dev_err(dev, "%s: OPP table not found (%ld)\n", __func__, 543 PTR_ERR(opp_table)); 544 return ERR_CAST(opp_table); 545 } 546 547 opp = _opp_table_find_key(opp_table, key, index, available, read, 548 compare, assert); 549 550 dev_pm_opp_put_opp_table(opp_table); 551 552 return opp; 553 } 554 555 static struct dev_pm_opp *_find_key_exact(struct device *dev, 556 unsigned long key, int index, bool available, 557 unsigned long (*read)(struct dev_pm_opp *opp, int index), 558 bool (*assert)(struct opp_table *opp_table)) 559 { 560 /* 561 * The value of key will be updated here, but will be ignored as the 562 * caller doesn't need it. 563 */ 564 return _find_key(dev, &key, index, available, read, _compare_exact, 565 assert); 566 } 567 568 static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table, 569 unsigned long *key, int index, bool available, 570 unsigned long (*read)(struct dev_pm_opp *opp, int index), 571 bool (*assert)(struct opp_table *opp_table)) 572 { 573 return _opp_table_find_key(opp_table, key, index, available, read, 574 _compare_ceil, assert); 575 } 576 577 static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key, 578 int index, bool available, 579 unsigned long (*read)(struct dev_pm_opp *opp, int index), 580 bool (*assert)(struct opp_table *opp_table)) 581 { 582 return _find_key(dev, key, index, available, read, _compare_ceil, 583 assert); 584 } 585 586 static struct dev_pm_opp *_find_key_floor(struct device *dev, 587 unsigned long *key, int index, bool available, 588 unsigned long (*read)(struct dev_pm_opp *opp, int index), 589 bool (*assert)(struct opp_table *opp_table)) 590 { 591 return _find_key(dev, key, index, available, read, _compare_floor, 592 assert); 593 } 594 595 /** 596 * dev_pm_opp_find_freq_exact() - search for an exact frequency 597 * @dev: device for which we do this operation 598 * @freq: frequency to search for 599 * @available: true/false - match for available opp 600 * 601 * Return: Searches for exact match in the opp table and returns pointer to the 602 * matching opp if found, else returns ERR_PTR in case of error and should 603 * be handled using IS_ERR. Error return values can be: 604 * EINVAL: for bad pointer 605 * ERANGE: no match found for search 606 * ENODEV: if device not found in list of registered devices 607 * 608 * Note: available is a modifier for the search. if available=true, then the 609 * match is for exact matching frequency and is available in the stored OPP 610 * table. if false, the match is for exact frequency which is not available. 611 * 612 * This provides a mechanism to enable an opp which is not available currently 613 * or the opposite as well. 614 * 615 * The callers are required to call dev_pm_opp_put() for the returned OPP after 616 * use. 617 */ 618 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 619 unsigned long freq, bool available) 620 { 621 return _find_key_exact(dev, freq, 0, available, _read_freq, 622 assert_single_clk); 623 } 624 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); 625 626 /** 627 * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the 628 * clock corresponding to the index 629 * @dev: Device for which we do this operation 630 * @freq: frequency to search for 631 * @index: Clock index 632 * @available: true/false - match for available opp 633 * 634 * Search for the matching exact OPP for the clock corresponding to the 635 * specified index from a starting freq for a device. 636 * 637 * Return: matching *opp , else returns ERR_PTR in case of error and should be 638 * handled using IS_ERR. Error return values can be: 639 * EINVAL: for bad pointer 640 * ERANGE: no match found for search 641 * ENODEV: if device not found in list of registered devices 642 * 643 * The callers are required to call dev_pm_opp_put() for the returned OPP after 644 * use. 645 */ 646 struct dev_pm_opp * 647 dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, 648 u32 index, bool available) 649 { 650 return _find_key_exact(dev, freq, index, available, _read_freq, NULL); 651 } 652 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed); 653 654 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, 655 unsigned long *freq) 656 { 657 return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq, 658 assert_single_clk); 659 } 660 661 /** 662 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq 663 * @dev: device for which we do this operation 664 * @freq: Start frequency 665 * 666 * Search for the matching ceil *available* OPP from a starting freq 667 * for a device. 668 * 669 * Return: matching *opp and refreshes *freq accordingly, else returns 670 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 671 * values can be: 672 * EINVAL: for bad pointer 673 * ERANGE: no match found for search 674 * ENODEV: if device not found in list of registered devices 675 * 676 * The callers are required to call dev_pm_opp_put() for the returned OPP after 677 * use. 678 */ 679 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 680 unsigned long *freq) 681 { 682 return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk); 683 } 684 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); 685 686 /** 687 * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the 688 * clock corresponding to the index 689 * @dev: Device for which we do this operation 690 * @freq: Start frequency 691 * @index: Clock index 692 * 693 * Search for the matching ceil *available* OPP for the clock corresponding to 694 * the specified index from a starting freq for a device. 695 * 696 * Return: matching *opp and refreshes *freq accordingly, else returns 697 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 698 * values can be: 699 * EINVAL: for bad pointer 700 * ERANGE: no match found for search 701 * ENODEV: if device not found in list of registered devices 702 * 703 * The callers are required to call dev_pm_opp_put() for the returned OPP after 704 * use. 705 */ 706 struct dev_pm_opp * 707 dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, 708 u32 index) 709 { 710 return _find_key_ceil(dev, freq, index, true, _read_freq, NULL); 711 } 712 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed); 713 714 /** 715 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq 716 * @dev: device for which we do this operation 717 * @freq: Start frequency 718 * 719 * Search for the matching floor *available* OPP from a starting freq 720 * for a device. 721 * 722 * Return: matching *opp and refreshes *freq accordingly, else returns 723 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 724 * values can be: 725 * EINVAL: for bad pointer 726 * ERANGE: no match found for search 727 * ENODEV: if device not found in list of registered devices 728 * 729 * The callers are required to call dev_pm_opp_put() for the returned OPP after 730 * use. 731 */ 732 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 733 unsigned long *freq) 734 { 735 return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk); 736 } 737 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 738 739 /** 740 * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the 741 * clock corresponding to the index 742 * @dev: Device for which we do this operation 743 * @freq: Start frequency 744 * @index: Clock index 745 * 746 * Search for the matching floor *available* OPP for the clock corresponding to 747 * the specified index from a starting freq for a device. 748 * 749 * Return: matching *opp and refreshes *freq accordingly, else returns 750 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 751 * values can be: 752 * EINVAL: for bad pointer 753 * ERANGE: no match found for search 754 * ENODEV: if device not found in list of registered devices 755 * 756 * The callers are required to call dev_pm_opp_put() for the returned OPP after 757 * use. 758 */ 759 struct dev_pm_opp * 760 dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, 761 u32 index) 762 { 763 return _find_key_floor(dev, freq, index, true, _read_freq, NULL); 764 } 765 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed); 766 767 /** 768 * dev_pm_opp_find_level_exact() - search for an exact level 769 * @dev: device for which we do this operation 770 * @level: level to search for 771 * 772 * Return: Searches for exact match in the opp table and returns pointer to the 773 * matching opp if found, else returns ERR_PTR in case of error and should 774 * be handled using IS_ERR. Error return values can be: 775 * EINVAL: for bad pointer 776 * ERANGE: no match found for search 777 * ENODEV: if device not found in list of registered devices 778 * 779 * The callers are required to call dev_pm_opp_put() for the returned OPP after 780 * use. 781 */ 782 struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, 783 unsigned int level) 784 { 785 return _find_key_exact(dev, level, 0, true, _read_level, NULL); 786 } 787 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); 788 789 /** 790 * dev_pm_opp_find_level_ceil() - search for an rounded up level 791 * @dev: device for which we do this operation 792 * @level: level to search for 793 * 794 * Return: Searches for rounded up match in the opp table and returns pointer 795 * to the matching opp if found, else returns ERR_PTR in case of error and 796 * should be handled using IS_ERR. Error return values can be: 797 * EINVAL: for bad pointer 798 * ERANGE: no match found for search 799 * ENODEV: if device not found in list of registered devices 800 * 801 * The callers are required to call dev_pm_opp_put() for the returned OPP after 802 * use. 803 */ 804 struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, 805 unsigned int *level) 806 { 807 unsigned long temp = *level; 808 struct dev_pm_opp *opp; 809 810 opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL); 811 *level = temp; 812 return opp; 813 } 814 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); 815 816 /** 817 * dev_pm_opp_find_level_floor() - Search for a rounded floor level 818 * @dev: device for which we do this operation 819 * @level: Start level 820 * 821 * Search for the matching floor *available* OPP from a starting level 822 * for a device. 823 * 824 * Return: matching *opp and refreshes *level accordingly, else returns 825 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 826 * values can be: 827 * EINVAL: for bad pointer 828 * ERANGE: no match found for search 829 * ENODEV: if device not found in list of registered devices 830 * 831 * The callers are required to call dev_pm_opp_put() for the returned OPP after 832 * use. 833 */ 834 struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, 835 unsigned long *level) 836 { 837 return _find_key_floor(dev, level, 0, true, _read_level, NULL); 838 } 839 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor); 840 841 /** 842 * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth 843 * @dev: device for which we do this operation 844 * @bw: start bandwidth 845 * @index: which bandwidth to compare, in case of OPPs with several values 846 * 847 * Search for the matching floor *available* OPP from a starting bandwidth 848 * for a device. 849 * 850 * Return: matching *opp and refreshes *bw accordingly, else returns 851 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 852 * values can be: 853 * EINVAL: for bad pointer 854 * ERANGE: no match found for search 855 * ENODEV: if device not found in list of registered devices 856 * 857 * The callers are required to call dev_pm_opp_put() for the returned OPP after 858 * use. 859 */ 860 struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, 861 int index) 862 { 863 unsigned long temp = *bw; 864 struct dev_pm_opp *opp; 865 866 opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL); 867 *bw = temp; 868 return opp; 869 } 870 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil); 871 872 /** 873 * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth 874 * @dev: device for which we do this operation 875 * @bw: start bandwidth 876 * @index: which bandwidth to compare, in case of OPPs with several values 877 * 878 * Search for the matching floor *available* OPP from a starting bandwidth 879 * for a device. 880 * 881 * Return: matching *opp and refreshes *bw accordingly, else returns 882 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 883 * values can be: 884 * EINVAL: for bad pointer 885 * ERANGE: no match found for search 886 * ENODEV: if device not found in list of registered devices 887 * 888 * The callers are required to call dev_pm_opp_put() for the returned OPP after 889 * use. 890 */ 891 struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, 892 unsigned int *bw, int index) 893 { 894 unsigned long temp = *bw; 895 struct dev_pm_opp *opp; 896 897 opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL); 898 *bw = temp; 899 return opp; 900 } 901 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor); 902 903 static int _set_opp_voltage(struct device *dev, struct regulator *reg, 904 struct dev_pm_opp_supply *supply) 905 { 906 int ret; 907 908 /* Regulator not available for device */ 909 if (IS_ERR(reg)) { 910 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__, 911 PTR_ERR(reg)); 912 return 0; 913 } 914 915 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, 916 supply->u_volt_min, supply->u_volt, supply->u_volt_max); 917 918 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min, 919 supply->u_volt, supply->u_volt_max); 920 if (ret) 921 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n", 922 __func__, supply->u_volt_min, supply->u_volt, 923 supply->u_volt_max, ret); 924 925 return ret; 926 } 927 928 static int 929 _opp_config_clk_single(struct device *dev, struct opp_table *opp_table, 930 struct dev_pm_opp *opp, void *data, bool scaling_down) 931 { 932 unsigned long *target = data; 933 unsigned long freq; 934 int ret; 935 936 /* One of target and opp must be available */ 937 if (target) { 938 freq = *target; 939 } else if (opp) { 940 freq = opp->rates[0]; 941 } else { 942 WARN_ON(1); 943 return -EINVAL; 944 } 945 946 ret = clk_set_rate(opp_table->clk, freq); 947 if (ret) { 948 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 949 ret); 950 } else { 951 opp_table->rate_clk_single = freq; 952 } 953 954 return ret; 955 } 956 957 /* 958 * Simple implementation for configuring multiple clocks. Configure clocks in 959 * the order in which they are present in the array while scaling up. 960 */ 961 int dev_pm_opp_config_clks_simple(struct device *dev, 962 struct opp_table *opp_table, struct dev_pm_opp *opp, void *data, 963 bool scaling_down) 964 { 965 int ret, i; 966 967 if (scaling_down) { 968 for (i = opp_table->clk_count - 1; i >= 0; i--) { 969 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]); 970 if (ret) { 971 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 972 ret); 973 return ret; 974 } 975 } 976 } else { 977 for (i = 0; i < opp_table->clk_count; i++) { 978 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]); 979 if (ret) { 980 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 981 ret); 982 return ret; 983 } 984 } 985 } 986 987 return 0; 988 } 989 EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple); 990 991 static int _opp_config_regulator_single(struct device *dev, 992 struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp, 993 struct regulator **regulators, unsigned int count) 994 { 995 struct regulator *reg = regulators[0]; 996 int ret; 997 998 /* This function only supports single regulator per device */ 999 if (WARN_ON(count > 1)) { 1000 dev_err(dev, "multiple regulators are not supported\n"); 1001 return -EINVAL; 1002 } 1003 1004 ret = _set_opp_voltage(dev, reg, new_opp->supplies); 1005 if (ret) 1006 return ret; 1007 1008 /* 1009 * Enable the regulator after setting its voltages, otherwise it breaks 1010 * some boot-enabled regulators. 1011 */ 1012 if (unlikely(!new_opp->opp_table->enabled)) { 1013 ret = regulator_enable(reg); 1014 if (ret < 0) 1015 dev_warn(dev, "Failed to enable regulator: %d", ret); 1016 } 1017 1018 return 0; 1019 } 1020 1021 static int _set_opp_bw(const struct opp_table *opp_table, 1022 struct dev_pm_opp *opp, struct device *dev) 1023 { 1024 u32 avg, peak; 1025 int i, ret; 1026 1027 if (!opp_table->paths) 1028 return 0; 1029 1030 for (i = 0; i < opp_table->path_count; i++) { 1031 if (!opp) { 1032 avg = 0; 1033 peak = 0; 1034 } else { 1035 avg = opp->bandwidth[i].avg; 1036 peak = opp->bandwidth[i].peak; 1037 } 1038 ret = icc_set_bw(opp_table->paths[i], avg, peak); 1039 if (ret) { 1040 dev_err(dev, "Failed to %s bandwidth[%d]: %d\n", 1041 opp ? "set" : "remove", i, ret); 1042 return ret; 1043 } 1044 } 1045 1046 return 0; 1047 } 1048 1049 static int _set_performance_state(struct device *dev, struct device *pd_dev, 1050 struct dev_pm_opp *opp, int i) 1051 { 1052 unsigned int pstate = likely(opp) ? opp->required_opps[i]->level: 0; 1053 int ret; 1054 1055 if (!pd_dev) 1056 return 0; 1057 1058 ret = dev_pm_domain_set_performance_state(pd_dev, pstate); 1059 if (ret) { 1060 dev_err(dev, "Failed to set performance state of %s: %d (%d)\n", 1061 dev_name(pd_dev), pstate, ret); 1062 } 1063 1064 return ret; 1065 } 1066 1067 static int _opp_set_required_opps_generic(struct device *dev, 1068 struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down) 1069 { 1070 dev_err(dev, "setting required-opps isn't supported for non-genpd devices\n"); 1071 return -ENOENT; 1072 } 1073 1074 static int _opp_set_required_opps_genpd(struct device *dev, 1075 struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down) 1076 { 1077 struct device **genpd_virt_devs = 1078 opp_table->genpd_virt_devs ? opp_table->genpd_virt_devs : &dev; 1079 int index, target, delta, ret; 1080 1081 /* Scaling up? Set required OPPs in normal order, else reverse */ 1082 if (!scaling_down) { 1083 index = 0; 1084 target = opp_table->required_opp_count; 1085 delta = 1; 1086 } else { 1087 index = opp_table->required_opp_count - 1; 1088 target = -1; 1089 delta = -1; 1090 } 1091 1092 while (index != target) { 1093 ret = _set_performance_state(dev, genpd_virt_devs[index], opp, index); 1094 if (ret) 1095 return ret; 1096 1097 index += delta; 1098 } 1099 1100 return 0; 1101 } 1102 1103 /* This is only called for PM domain for now */ 1104 static int _set_required_opps(struct device *dev, struct opp_table *opp_table, 1105 struct dev_pm_opp *opp, bool up) 1106 { 1107 /* required-opps not fully initialized yet */ 1108 if (lazy_linking_pending(opp_table)) 1109 return -EBUSY; 1110 1111 if (opp_table->set_required_opps) 1112 return opp_table->set_required_opps(dev, opp_table, opp, up); 1113 1114 return 0; 1115 } 1116 1117 /* Update set_required_opps handler */ 1118 void _update_set_required_opps(struct opp_table *opp_table) 1119 { 1120 /* Already set */ 1121 if (opp_table->set_required_opps) 1122 return; 1123 1124 /* All required OPPs will belong to genpd or none */ 1125 if (opp_table->required_opp_tables[0]->is_genpd) 1126 opp_table->set_required_opps = _opp_set_required_opps_genpd; 1127 else 1128 opp_table->set_required_opps = _opp_set_required_opps_generic; 1129 } 1130 1131 static int _set_opp_level(struct device *dev, struct opp_table *opp_table, 1132 struct dev_pm_opp *opp) 1133 { 1134 unsigned int level = 0; 1135 int ret = 0; 1136 1137 if (opp) { 1138 if (!opp->level) 1139 return 0; 1140 1141 level = opp->level; 1142 } 1143 1144 /* Request a new performance state through the device's PM domain. */ 1145 ret = dev_pm_domain_set_performance_state(dev, level); 1146 if (ret) 1147 dev_err(dev, "Failed to set performance state %u (%d)\n", level, 1148 ret); 1149 1150 return ret; 1151 } 1152 1153 static void _find_current_opp(struct device *dev, struct opp_table *opp_table) 1154 { 1155 struct dev_pm_opp *opp = ERR_PTR(-ENODEV); 1156 unsigned long freq; 1157 1158 if (!IS_ERR(opp_table->clk)) { 1159 freq = clk_get_rate(opp_table->clk); 1160 opp = _find_freq_ceil(opp_table, &freq); 1161 } 1162 1163 /* 1164 * Unable to find the current OPP ? Pick the first from the list since 1165 * it is in ascending order, otherwise rest of the code will need to 1166 * make special checks to validate current_opp. 1167 */ 1168 if (IS_ERR(opp)) { 1169 mutex_lock(&opp_table->lock); 1170 opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node); 1171 dev_pm_opp_get(opp); 1172 mutex_unlock(&opp_table->lock); 1173 } 1174 1175 opp_table->current_opp = opp; 1176 } 1177 1178 static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) 1179 { 1180 int ret; 1181 1182 if (!opp_table->enabled) 1183 return 0; 1184 1185 /* 1186 * Some drivers need to support cases where some platforms may 1187 * have OPP table for the device, while others don't and 1188 * opp_set_rate() just needs to behave like clk_set_rate(). 1189 */ 1190 if (!_get_opp_count(opp_table)) 1191 return 0; 1192 1193 ret = _set_opp_bw(opp_table, NULL, dev); 1194 if (ret) 1195 return ret; 1196 1197 if (opp_table->regulators) 1198 regulator_disable(opp_table->regulators[0]); 1199 1200 ret = _set_opp_level(dev, opp_table, NULL); 1201 if (ret) 1202 goto out; 1203 1204 ret = _set_required_opps(dev, opp_table, NULL, false); 1205 1206 out: 1207 opp_table->enabled = false; 1208 return ret; 1209 } 1210 1211 static int _set_opp(struct device *dev, struct opp_table *opp_table, 1212 struct dev_pm_opp *opp, void *clk_data, bool forced) 1213 { 1214 struct dev_pm_opp *old_opp; 1215 int scaling_down, ret; 1216 1217 if (unlikely(!opp)) 1218 return _disable_opp_table(dev, opp_table); 1219 1220 /* Find the currently set OPP if we don't know already */ 1221 if (unlikely(!opp_table->current_opp)) 1222 _find_current_opp(dev, opp_table); 1223 1224 old_opp = opp_table->current_opp; 1225 1226 /* Return early if nothing to do */ 1227 if (!forced && old_opp == opp && opp_table->enabled) { 1228 dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__); 1229 return 0; 1230 } 1231 1232 dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n", 1233 __func__, old_opp->rates[0], opp->rates[0], old_opp->level, 1234 opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0, 1235 opp->bandwidth ? opp->bandwidth[0].peak : 0); 1236 1237 scaling_down = _opp_compare_key(opp_table, old_opp, opp); 1238 if (scaling_down == -1) 1239 scaling_down = 0; 1240 1241 /* Scaling up? Configure required OPPs before frequency */ 1242 if (!scaling_down) { 1243 ret = _set_required_opps(dev, opp_table, opp, true); 1244 if (ret) { 1245 dev_err(dev, "Failed to set required opps: %d\n", ret); 1246 return ret; 1247 } 1248 1249 ret = _set_opp_level(dev, opp_table, opp); 1250 if (ret) 1251 return ret; 1252 1253 ret = _set_opp_bw(opp_table, opp, dev); 1254 if (ret) { 1255 dev_err(dev, "Failed to set bw: %d\n", ret); 1256 return ret; 1257 } 1258 1259 if (opp_table->config_regulators) { 1260 ret = opp_table->config_regulators(dev, old_opp, opp, 1261 opp_table->regulators, 1262 opp_table->regulator_count); 1263 if (ret) { 1264 dev_err(dev, "Failed to set regulator voltages: %d\n", 1265 ret); 1266 return ret; 1267 } 1268 } 1269 } 1270 1271 if (opp_table->config_clks) { 1272 ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down); 1273 if (ret) 1274 return ret; 1275 } 1276 1277 /* Scaling down? Configure required OPPs after frequency */ 1278 if (scaling_down) { 1279 if (opp_table->config_regulators) { 1280 ret = opp_table->config_regulators(dev, old_opp, opp, 1281 opp_table->regulators, 1282 opp_table->regulator_count); 1283 if (ret) { 1284 dev_err(dev, "Failed to set regulator voltages: %d\n", 1285 ret); 1286 return ret; 1287 } 1288 } 1289 1290 ret = _set_opp_bw(opp_table, opp, dev); 1291 if (ret) { 1292 dev_err(dev, "Failed to set bw: %d\n", ret); 1293 return ret; 1294 } 1295 1296 ret = _set_opp_level(dev, opp_table, opp); 1297 if (ret) 1298 return ret; 1299 1300 ret = _set_required_opps(dev, opp_table, opp, false); 1301 if (ret) { 1302 dev_err(dev, "Failed to set required opps: %d\n", ret); 1303 return ret; 1304 } 1305 } 1306 1307 opp_table->enabled = true; 1308 dev_pm_opp_put(old_opp); 1309 1310 /* Make sure current_opp doesn't get freed */ 1311 dev_pm_opp_get(opp); 1312 opp_table->current_opp = opp; 1313 1314 return ret; 1315 } 1316 1317 /** 1318 * dev_pm_opp_set_rate() - Configure new OPP based on frequency 1319 * @dev: device for which we do this operation 1320 * @target_freq: frequency to achieve 1321 * 1322 * This configures the power-supplies to the levels specified by the OPP 1323 * corresponding to the target_freq, and programs the clock to a value <= 1324 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax 1325 * provided by the opp, should have already rounded to the target OPP's 1326 * frequency. 1327 */ 1328 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) 1329 { 1330 struct opp_table *opp_table; 1331 unsigned long freq = 0, temp_freq; 1332 struct dev_pm_opp *opp = NULL; 1333 bool forced = false; 1334 int ret; 1335 1336 opp_table = _find_opp_table(dev); 1337 if (IS_ERR(opp_table)) { 1338 dev_err(dev, "%s: device's opp table doesn't exist\n", __func__); 1339 return PTR_ERR(opp_table); 1340 } 1341 1342 if (target_freq) { 1343 /* 1344 * For IO devices which require an OPP on some platforms/SoCs 1345 * while just needing to scale the clock on some others 1346 * we look for empty OPP tables with just a clock handle and 1347 * scale only the clk. This makes dev_pm_opp_set_rate() 1348 * equivalent to a clk_set_rate() 1349 */ 1350 if (!_get_opp_count(opp_table)) { 1351 ret = opp_table->config_clks(dev, opp_table, NULL, 1352 &target_freq, false); 1353 goto put_opp_table; 1354 } 1355 1356 freq = clk_round_rate(opp_table->clk, target_freq); 1357 if ((long)freq <= 0) 1358 freq = target_freq; 1359 1360 /* 1361 * The clock driver may support finer resolution of the 1362 * frequencies than the OPP table, don't update the frequency we 1363 * pass to clk_set_rate() here. 1364 */ 1365 temp_freq = freq; 1366 opp = _find_freq_ceil(opp_table, &temp_freq); 1367 if (IS_ERR(opp)) { 1368 ret = PTR_ERR(opp); 1369 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", 1370 __func__, freq, ret); 1371 goto put_opp_table; 1372 } 1373 1374 /* 1375 * An OPP entry specifies the highest frequency at which other 1376 * properties of the OPP entry apply. Even if the new OPP is 1377 * same as the old one, we may still reach here for a different 1378 * value of the frequency. In such a case, do not abort but 1379 * configure the hardware to the desired frequency forcefully. 1380 */ 1381 forced = opp_table->rate_clk_single != target_freq; 1382 } 1383 1384 ret = _set_opp(dev, opp_table, opp, &target_freq, forced); 1385 1386 if (target_freq) 1387 dev_pm_opp_put(opp); 1388 1389 put_opp_table: 1390 dev_pm_opp_put_opp_table(opp_table); 1391 return ret; 1392 } 1393 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); 1394 1395 /** 1396 * dev_pm_opp_set_opp() - Configure device for OPP 1397 * @dev: device for which we do this operation 1398 * @opp: OPP to set to 1399 * 1400 * This configures the device based on the properties of the OPP passed to this 1401 * routine. 1402 * 1403 * Return: 0 on success, a negative error number otherwise. 1404 */ 1405 int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) 1406 { 1407 struct opp_table *opp_table; 1408 int ret; 1409 1410 opp_table = _find_opp_table(dev); 1411 if (IS_ERR(opp_table)) { 1412 dev_err(dev, "%s: device opp doesn't exist\n", __func__); 1413 return PTR_ERR(opp_table); 1414 } 1415 1416 ret = _set_opp(dev, opp_table, opp, NULL, false); 1417 dev_pm_opp_put_opp_table(opp_table); 1418 1419 return ret; 1420 } 1421 EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); 1422 1423 /* OPP-dev Helpers */ 1424 static void _remove_opp_dev(struct opp_device *opp_dev, 1425 struct opp_table *opp_table) 1426 { 1427 opp_debug_unregister(opp_dev, opp_table); 1428 list_del(&opp_dev->node); 1429 kfree(opp_dev); 1430 } 1431 1432 struct opp_device *_add_opp_dev(const struct device *dev, 1433 struct opp_table *opp_table) 1434 { 1435 struct opp_device *opp_dev; 1436 1437 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); 1438 if (!opp_dev) 1439 return NULL; 1440 1441 /* Initialize opp-dev */ 1442 opp_dev->dev = dev; 1443 1444 mutex_lock(&opp_table->lock); 1445 list_add(&opp_dev->node, &opp_table->dev_list); 1446 mutex_unlock(&opp_table->lock); 1447 1448 /* Create debugfs entries for the opp_table */ 1449 opp_debug_register(opp_dev, opp_table); 1450 1451 return opp_dev; 1452 } 1453 1454 static struct opp_table *_allocate_opp_table(struct device *dev, int index) 1455 { 1456 struct opp_table *opp_table; 1457 struct opp_device *opp_dev; 1458 int ret; 1459 1460 /* 1461 * Allocate a new OPP table. In the infrequent case where a new 1462 * device is needed to be added, we pay this penalty. 1463 */ 1464 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL); 1465 if (!opp_table) 1466 return ERR_PTR(-ENOMEM); 1467 1468 mutex_init(&opp_table->lock); 1469 INIT_LIST_HEAD(&opp_table->dev_list); 1470 INIT_LIST_HEAD(&opp_table->lazy); 1471 1472 opp_table->clk = ERR_PTR(-ENODEV); 1473 1474 /* Mark regulator count uninitialized */ 1475 opp_table->regulator_count = -1; 1476 1477 opp_dev = _add_opp_dev(dev, opp_table); 1478 if (!opp_dev) { 1479 ret = -ENOMEM; 1480 goto err; 1481 } 1482 1483 _of_init_opp_table(opp_table, dev, index); 1484 1485 /* Find interconnect path(s) for the device */ 1486 ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); 1487 if (ret) { 1488 if (ret == -EPROBE_DEFER) 1489 goto remove_opp_dev; 1490 1491 dev_warn(dev, "%s: Error finding interconnect paths: %d\n", 1492 __func__, ret); 1493 } 1494 1495 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); 1496 INIT_LIST_HEAD(&opp_table->opp_list); 1497 kref_init(&opp_table->kref); 1498 1499 return opp_table; 1500 1501 remove_opp_dev: 1502 _of_clear_opp_table(opp_table); 1503 _remove_opp_dev(opp_dev, opp_table); 1504 mutex_destroy(&opp_table->lock); 1505 err: 1506 kfree(opp_table); 1507 return ERR_PTR(ret); 1508 } 1509 1510 void _get_opp_table_kref(struct opp_table *opp_table) 1511 { 1512 kref_get(&opp_table->kref); 1513 } 1514 1515 static struct opp_table *_update_opp_table_clk(struct device *dev, 1516 struct opp_table *opp_table, 1517 bool getclk) 1518 { 1519 int ret; 1520 1521 /* 1522 * Return early if we don't need to get clk or we have already done it 1523 * earlier. 1524 */ 1525 if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) || 1526 opp_table->clks) 1527 return opp_table; 1528 1529 /* Find clk for the device */ 1530 opp_table->clk = clk_get(dev, NULL); 1531 1532 ret = PTR_ERR_OR_ZERO(opp_table->clk); 1533 if (!ret) { 1534 opp_table->config_clks = _opp_config_clk_single; 1535 opp_table->clk_count = 1; 1536 return opp_table; 1537 } 1538 1539 if (ret == -ENOENT) { 1540 /* 1541 * There are few platforms which don't want the OPP core to 1542 * manage device's clock settings. In such cases neither the 1543 * platform provides the clks explicitly to us, nor the DT 1544 * contains a valid clk entry. The OPP nodes in DT may still 1545 * contain "opp-hz" property though, which we need to parse and 1546 * allow the platform to find an OPP based on freq later on. 1547 * 1548 * This is a simple solution to take care of such corner cases, 1549 * i.e. make the clk_count 1, which lets us allocate space for 1550 * frequency in opp->rates and also parse the entries in DT. 1551 */ 1552 opp_table->clk_count = 1; 1553 1554 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret); 1555 return opp_table; 1556 } 1557 1558 dev_pm_opp_put_opp_table(opp_table); 1559 dev_err_probe(dev, ret, "Couldn't find clock\n"); 1560 1561 return ERR_PTR(ret); 1562 } 1563 1564 /* 1565 * We need to make sure that the OPP table for a device doesn't get added twice, 1566 * if this routine gets called in parallel with the same device pointer. 1567 * 1568 * The simplest way to enforce that is to perform everything (find existing 1569 * table and if not found, create a new one) under the opp_table_lock, so only 1570 * one creator gets access to the same. But that expands the critical section 1571 * under the lock and may end up causing circular dependencies with frameworks 1572 * like debugfs, interconnect or clock framework as they may be direct or 1573 * indirect users of OPP core. 1574 * 1575 * And for that reason we have to go for a bit tricky implementation here, which 1576 * uses the opp_tables_busy flag to indicate if another creator is in the middle 1577 * of adding an OPP table and others should wait for it to finish. 1578 */ 1579 struct opp_table *_add_opp_table_indexed(struct device *dev, int index, 1580 bool getclk) 1581 { 1582 struct opp_table *opp_table; 1583 1584 again: 1585 mutex_lock(&opp_table_lock); 1586 1587 opp_table = _find_opp_table_unlocked(dev); 1588 if (!IS_ERR(opp_table)) 1589 goto unlock; 1590 1591 /* 1592 * The opp_tables list or an OPP table's dev_list is getting updated by 1593 * another user, wait for it to finish. 1594 */ 1595 if (unlikely(opp_tables_busy)) { 1596 mutex_unlock(&opp_table_lock); 1597 cpu_relax(); 1598 goto again; 1599 } 1600 1601 opp_tables_busy = true; 1602 opp_table = _managed_opp(dev, index); 1603 1604 /* Drop the lock to reduce the size of critical section */ 1605 mutex_unlock(&opp_table_lock); 1606 1607 if (opp_table) { 1608 if (!_add_opp_dev(dev, opp_table)) { 1609 dev_pm_opp_put_opp_table(opp_table); 1610 opp_table = ERR_PTR(-ENOMEM); 1611 } 1612 1613 mutex_lock(&opp_table_lock); 1614 } else { 1615 opp_table = _allocate_opp_table(dev, index); 1616 1617 mutex_lock(&opp_table_lock); 1618 if (!IS_ERR(opp_table)) 1619 list_add(&opp_table->node, &opp_tables); 1620 } 1621 1622 opp_tables_busy = false; 1623 1624 unlock: 1625 mutex_unlock(&opp_table_lock); 1626 1627 return _update_opp_table_clk(dev, opp_table, getclk); 1628 } 1629 1630 static struct opp_table *_add_opp_table(struct device *dev, bool getclk) 1631 { 1632 return _add_opp_table_indexed(dev, 0, getclk); 1633 } 1634 1635 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) 1636 { 1637 return _find_opp_table(dev); 1638 } 1639 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); 1640 1641 static void _opp_table_kref_release(struct kref *kref) 1642 { 1643 struct opp_table *opp_table = container_of(kref, struct opp_table, kref); 1644 struct opp_device *opp_dev, *temp; 1645 int i; 1646 1647 /* Drop the lock as soon as we can */ 1648 list_del(&opp_table->node); 1649 mutex_unlock(&opp_table_lock); 1650 1651 if (opp_table->current_opp) 1652 dev_pm_opp_put(opp_table->current_opp); 1653 1654 _of_clear_opp_table(opp_table); 1655 1656 /* Release automatically acquired single clk */ 1657 if (!IS_ERR(opp_table->clk)) 1658 clk_put(opp_table->clk); 1659 1660 if (opp_table->paths) { 1661 for (i = 0; i < opp_table->path_count; i++) 1662 icc_put(opp_table->paths[i]); 1663 kfree(opp_table->paths); 1664 } 1665 1666 WARN_ON(!list_empty(&opp_table->opp_list)); 1667 1668 list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) 1669 _remove_opp_dev(opp_dev, opp_table); 1670 1671 mutex_destroy(&opp_table->lock); 1672 kfree(opp_table); 1673 } 1674 1675 void dev_pm_opp_put_opp_table(struct opp_table *opp_table) 1676 { 1677 kref_put_mutex(&opp_table->kref, _opp_table_kref_release, 1678 &opp_table_lock); 1679 } 1680 EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table); 1681 1682 void _opp_free(struct dev_pm_opp *opp) 1683 { 1684 kfree(opp); 1685 } 1686 1687 static void _opp_kref_release(struct kref *kref) 1688 { 1689 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); 1690 struct opp_table *opp_table = opp->opp_table; 1691 1692 list_del(&opp->node); 1693 mutex_unlock(&opp_table->lock); 1694 1695 /* 1696 * Notify the changes in the availability of the operable 1697 * frequency/voltage list. 1698 */ 1699 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); 1700 _of_clear_opp(opp_table, opp); 1701 opp_debug_remove_one(opp); 1702 kfree(opp); 1703 } 1704 1705 void dev_pm_opp_get(struct dev_pm_opp *opp) 1706 { 1707 kref_get(&opp->kref); 1708 } 1709 1710 void dev_pm_opp_put(struct dev_pm_opp *opp) 1711 { 1712 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); 1713 } 1714 EXPORT_SYMBOL_GPL(dev_pm_opp_put); 1715 1716 /** 1717 * dev_pm_opp_remove() - Remove an OPP from OPP table 1718 * @dev: device for which we do this operation 1719 * @freq: OPP to remove with matching 'freq' 1720 * 1721 * This function removes an opp from the opp table. 1722 */ 1723 void dev_pm_opp_remove(struct device *dev, unsigned long freq) 1724 { 1725 struct dev_pm_opp *opp = NULL, *iter; 1726 struct opp_table *opp_table; 1727 1728 opp_table = _find_opp_table(dev); 1729 if (IS_ERR(opp_table)) 1730 return; 1731 1732 if (!assert_single_clk(opp_table)) 1733 goto put_table; 1734 1735 mutex_lock(&opp_table->lock); 1736 1737 list_for_each_entry(iter, &opp_table->opp_list, node) { 1738 if (iter->rates[0] == freq) { 1739 opp = iter; 1740 break; 1741 } 1742 } 1743 1744 mutex_unlock(&opp_table->lock); 1745 1746 if (opp) { 1747 dev_pm_opp_put(opp); 1748 1749 /* Drop the reference taken by dev_pm_opp_add() */ 1750 dev_pm_opp_put_opp_table(opp_table); 1751 } else { 1752 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", 1753 __func__, freq); 1754 } 1755 1756 put_table: 1757 /* Drop the reference taken by _find_opp_table() */ 1758 dev_pm_opp_put_opp_table(opp_table); 1759 } 1760 EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1761 1762 static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, 1763 bool dynamic) 1764 { 1765 struct dev_pm_opp *opp = NULL, *temp; 1766 1767 mutex_lock(&opp_table->lock); 1768 list_for_each_entry(temp, &opp_table->opp_list, node) { 1769 /* 1770 * Refcount must be dropped only once for each OPP by OPP core, 1771 * do that with help of "removed" flag. 1772 */ 1773 if (!temp->removed && dynamic == temp->dynamic) { 1774 opp = temp; 1775 break; 1776 } 1777 } 1778 1779 mutex_unlock(&opp_table->lock); 1780 return opp; 1781 } 1782 1783 /* 1784 * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to 1785 * happen lock less to avoid circular dependency issues. This routine must be 1786 * called without the opp_table->lock held. 1787 */ 1788 static void _opp_remove_all(struct opp_table *opp_table, bool dynamic) 1789 { 1790 struct dev_pm_opp *opp; 1791 1792 while ((opp = _opp_get_next(opp_table, dynamic))) { 1793 opp->removed = true; 1794 dev_pm_opp_put(opp); 1795 1796 /* Drop the references taken by dev_pm_opp_add() */ 1797 if (dynamic) 1798 dev_pm_opp_put_opp_table(opp_table); 1799 } 1800 } 1801 1802 bool _opp_remove_all_static(struct opp_table *opp_table) 1803 { 1804 mutex_lock(&opp_table->lock); 1805 1806 if (!opp_table->parsed_static_opps) { 1807 mutex_unlock(&opp_table->lock); 1808 return false; 1809 } 1810 1811 if (--opp_table->parsed_static_opps) { 1812 mutex_unlock(&opp_table->lock); 1813 return true; 1814 } 1815 1816 mutex_unlock(&opp_table->lock); 1817 1818 _opp_remove_all(opp_table, false); 1819 return true; 1820 } 1821 1822 /** 1823 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs 1824 * @dev: device for which we do this operation 1825 * 1826 * This function removes all dynamically created OPPs from the opp table. 1827 */ 1828 void dev_pm_opp_remove_all_dynamic(struct device *dev) 1829 { 1830 struct opp_table *opp_table; 1831 1832 opp_table = _find_opp_table(dev); 1833 if (IS_ERR(opp_table)) 1834 return; 1835 1836 _opp_remove_all(opp_table, true); 1837 1838 /* Drop the reference taken by _find_opp_table() */ 1839 dev_pm_opp_put_opp_table(opp_table); 1840 } 1841 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); 1842 1843 struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table) 1844 { 1845 struct dev_pm_opp *opp; 1846 int supply_count, supply_size, icc_size, clk_size; 1847 1848 /* Allocate space for at least one supply */ 1849 supply_count = opp_table->regulator_count > 0 ? 1850 opp_table->regulator_count : 1; 1851 supply_size = sizeof(*opp->supplies) * supply_count; 1852 clk_size = sizeof(*opp->rates) * opp_table->clk_count; 1853 icc_size = sizeof(*opp->bandwidth) * opp_table->path_count; 1854 1855 /* allocate new OPP node and supplies structures */ 1856 opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL); 1857 if (!opp) 1858 return NULL; 1859 1860 /* Put the supplies, bw and clock at the end of the OPP structure */ 1861 opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); 1862 1863 opp->rates = (unsigned long *)(opp->supplies + supply_count); 1864 1865 if (icc_size) 1866 opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count); 1867 1868 INIT_LIST_HEAD(&opp->node); 1869 1870 return opp; 1871 } 1872 1873 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, 1874 struct opp_table *opp_table) 1875 { 1876 struct regulator *reg; 1877 int i; 1878 1879 if (!opp_table->regulators) 1880 return true; 1881 1882 for (i = 0; i < opp_table->regulator_count; i++) { 1883 reg = opp_table->regulators[i]; 1884 1885 if (!regulator_is_supported_voltage(reg, 1886 opp->supplies[i].u_volt_min, 1887 opp->supplies[i].u_volt_max)) { 1888 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", 1889 __func__, opp->supplies[i].u_volt_min, 1890 opp->supplies[i].u_volt_max); 1891 return false; 1892 } 1893 } 1894 1895 return true; 1896 } 1897 1898 static int _opp_compare_rate(struct opp_table *opp_table, 1899 struct dev_pm_opp *opp1, struct dev_pm_opp *opp2) 1900 { 1901 int i; 1902 1903 for (i = 0; i < opp_table->clk_count; i++) { 1904 if (opp1->rates[i] != opp2->rates[i]) 1905 return opp1->rates[i] < opp2->rates[i] ? -1 : 1; 1906 } 1907 1908 /* Same rates for both OPPs */ 1909 return 0; 1910 } 1911 1912 static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1, 1913 struct dev_pm_opp *opp2) 1914 { 1915 int i; 1916 1917 for (i = 0; i < opp_table->path_count; i++) { 1918 if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak) 1919 return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1; 1920 } 1921 1922 /* Same bw for both OPPs */ 1923 return 0; 1924 } 1925 1926 /* 1927 * Returns 1928 * 0: opp1 == opp2 1929 * 1: opp1 > opp2 1930 * -1: opp1 < opp2 1931 */ 1932 int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, 1933 struct dev_pm_opp *opp2) 1934 { 1935 int ret; 1936 1937 ret = _opp_compare_rate(opp_table, opp1, opp2); 1938 if (ret) 1939 return ret; 1940 1941 ret = _opp_compare_bw(opp_table, opp1, opp2); 1942 if (ret) 1943 return ret; 1944 1945 if (opp1->level != opp2->level) 1946 return opp1->level < opp2->level ? -1 : 1; 1947 1948 /* Duplicate OPPs */ 1949 return 0; 1950 } 1951 1952 static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp, 1953 struct opp_table *opp_table, 1954 struct list_head **head) 1955 { 1956 struct dev_pm_opp *opp; 1957 int opp_cmp; 1958 1959 /* 1960 * Insert new OPP in order of increasing frequency and discard if 1961 * already present. 1962 * 1963 * Need to use &opp_table->opp_list in the condition part of the 'for' 1964 * loop, don't replace it with head otherwise it will become an infinite 1965 * loop. 1966 */ 1967 list_for_each_entry(opp, &opp_table->opp_list, node) { 1968 opp_cmp = _opp_compare_key(opp_table, new_opp, opp); 1969 if (opp_cmp > 0) { 1970 *head = &opp->node; 1971 continue; 1972 } 1973 1974 if (opp_cmp < 0) 1975 return 0; 1976 1977 /* Duplicate OPPs */ 1978 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", 1979 __func__, opp->rates[0], opp->supplies[0].u_volt, 1980 opp->available, new_opp->rates[0], 1981 new_opp->supplies[0].u_volt, new_opp->available); 1982 1983 /* Should we compare voltages for all regulators here ? */ 1984 return opp->available && 1985 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST; 1986 } 1987 1988 return 0; 1989 } 1990 1991 void _required_opps_available(struct dev_pm_opp *opp, int count) 1992 { 1993 int i; 1994 1995 for (i = 0; i < count; i++) { 1996 if (opp->required_opps[i]->available) 1997 continue; 1998 1999 opp->available = false; 2000 pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n", 2001 __func__, opp->required_opps[i]->np, opp->rates[0]); 2002 return; 2003 } 2004 } 2005 2006 /* 2007 * Returns: 2008 * 0: On success. And appropriate error message for duplicate OPPs. 2009 * -EBUSY: For OPP with same freq/volt and is available. The callers of 2010 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make 2011 * sure we don't print error messages unnecessarily if different parts of 2012 * kernel try to initialize the OPP table. 2013 * -EEXIST: For OPP with same freq but different volt or is unavailable. This 2014 * should be considered an error by the callers of _opp_add(). 2015 */ 2016 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 2017 struct opp_table *opp_table) 2018 { 2019 struct list_head *head; 2020 int ret; 2021 2022 mutex_lock(&opp_table->lock); 2023 head = &opp_table->opp_list; 2024 2025 ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); 2026 if (ret) { 2027 mutex_unlock(&opp_table->lock); 2028 return ret; 2029 } 2030 2031 list_add(&new_opp->node, head); 2032 mutex_unlock(&opp_table->lock); 2033 2034 new_opp->opp_table = opp_table; 2035 kref_init(&new_opp->kref); 2036 2037 opp_debug_create_one(new_opp, opp_table); 2038 2039 if (!_opp_supported_by_regulators(new_opp, opp_table)) { 2040 new_opp->available = false; 2041 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n", 2042 __func__, new_opp->rates[0]); 2043 } 2044 2045 /* required-opps not fully initialized yet */ 2046 if (lazy_linking_pending(opp_table)) 2047 return 0; 2048 2049 _required_opps_available(new_opp, opp_table->required_opp_count); 2050 2051 return 0; 2052 } 2053 2054 /** 2055 * _opp_add_v1() - Allocate a OPP based on v1 bindings. 2056 * @opp_table: OPP table 2057 * @dev: device for which we do this operation 2058 * @data: The OPP data for the OPP to add 2059 * @dynamic: Dynamically added OPPs. 2060 * 2061 * This function adds an opp definition to the opp table and returns status. 2062 * The opp is made available by default and it can be controlled using 2063 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. 2064 * 2065 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table 2066 * and freed by dev_pm_opp_of_remove_table. 2067 * 2068 * Return: 2069 * 0 On success OR 2070 * Duplicate OPPs (both freq and volt are same) and opp->available 2071 * -EEXIST Freq are same and volt are different OR 2072 * Duplicate OPPs (both freq and volt are same) and !opp->available 2073 * -ENOMEM Memory allocation failure 2074 */ 2075 int _opp_add_v1(struct opp_table *opp_table, struct device *dev, 2076 struct dev_pm_opp_data *data, bool dynamic) 2077 { 2078 struct dev_pm_opp *new_opp; 2079 unsigned long tol, u_volt = data->u_volt; 2080 int ret; 2081 2082 if (!assert_single_clk(opp_table)) 2083 return -EINVAL; 2084 2085 new_opp = _opp_allocate(opp_table); 2086 if (!new_opp) 2087 return -ENOMEM; 2088 2089 /* populate the opp table */ 2090 new_opp->rates[0] = data->freq; 2091 new_opp->level = data->level; 2092 tol = u_volt * opp_table->voltage_tolerance_v1 / 100; 2093 new_opp->supplies[0].u_volt = u_volt; 2094 new_opp->supplies[0].u_volt_min = u_volt - tol; 2095 new_opp->supplies[0].u_volt_max = u_volt + tol; 2096 new_opp->available = true; 2097 new_opp->dynamic = dynamic; 2098 2099 ret = _opp_add(dev, new_opp, opp_table); 2100 if (ret) { 2101 /* Don't return error for duplicate OPPs */ 2102 if (ret == -EBUSY) 2103 ret = 0; 2104 goto free_opp; 2105 } 2106 2107 /* 2108 * Notify the changes in the availability of the operable 2109 * frequency/voltage list. 2110 */ 2111 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); 2112 return 0; 2113 2114 free_opp: 2115 _opp_free(new_opp); 2116 2117 return ret; 2118 } 2119 2120 /* 2121 * This is required only for the V2 bindings, and it enables a platform to 2122 * specify the hierarchy of versions it supports. OPP layer will then enable 2123 * OPPs, which are available for those versions, based on its 'opp-supported-hw' 2124 * property. 2125 */ 2126 static int _opp_set_supported_hw(struct opp_table *opp_table, 2127 const u32 *versions, unsigned int count) 2128 { 2129 /* Another CPU that shares the OPP table has set the property ? */ 2130 if (opp_table->supported_hw) 2131 return 0; 2132 2133 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), 2134 GFP_KERNEL); 2135 if (!opp_table->supported_hw) 2136 return -ENOMEM; 2137 2138 opp_table->supported_hw_count = count; 2139 2140 return 0; 2141 } 2142 2143 static void _opp_put_supported_hw(struct opp_table *opp_table) 2144 { 2145 if (opp_table->supported_hw) { 2146 kfree(opp_table->supported_hw); 2147 opp_table->supported_hw = NULL; 2148 opp_table->supported_hw_count = 0; 2149 } 2150 } 2151 2152 /* 2153 * This is required only for the V2 bindings, and it enables a platform to 2154 * specify the extn to be used for certain property names. The properties to 2155 * which the extension will apply are opp-microvolt and opp-microamp. OPP core 2156 * should postfix the property name with -<name> while looking for them. 2157 */ 2158 static int _opp_set_prop_name(struct opp_table *opp_table, const char *name) 2159 { 2160 /* Another CPU that shares the OPP table has set the property ? */ 2161 if (!opp_table->prop_name) { 2162 opp_table->prop_name = kstrdup(name, GFP_KERNEL); 2163 if (!opp_table->prop_name) 2164 return -ENOMEM; 2165 } 2166 2167 return 0; 2168 } 2169 2170 static void _opp_put_prop_name(struct opp_table *opp_table) 2171 { 2172 if (opp_table->prop_name) { 2173 kfree(opp_table->prop_name); 2174 opp_table->prop_name = NULL; 2175 } 2176 } 2177 2178 /* 2179 * In order to support OPP switching, OPP layer needs to know the name of the 2180 * device's regulators, as the core would be required to switch voltages as 2181 * well. 2182 * 2183 * This must be called before any OPPs are initialized for the device. 2184 */ 2185 static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev, 2186 const char * const names[]) 2187 { 2188 const char * const *temp = names; 2189 struct regulator *reg; 2190 int count = 0, ret, i; 2191 2192 /* Count number of regulators */ 2193 while (*temp++) 2194 count++; 2195 2196 if (!count) 2197 return -EINVAL; 2198 2199 /* Another CPU that shares the OPP table has set the regulators ? */ 2200 if (opp_table->regulators) 2201 return 0; 2202 2203 opp_table->regulators = kmalloc_array(count, 2204 sizeof(*opp_table->regulators), 2205 GFP_KERNEL); 2206 if (!opp_table->regulators) 2207 return -ENOMEM; 2208 2209 for (i = 0; i < count; i++) { 2210 reg = regulator_get_optional(dev, names[i]); 2211 if (IS_ERR(reg)) { 2212 ret = dev_err_probe(dev, PTR_ERR(reg), 2213 "%s: no regulator (%s) found\n", 2214 __func__, names[i]); 2215 goto free_regulators; 2216 } 2217 2218 opp_table->regulators[i] = reg; 2219 } 2220 2221 opp_table->regulator_count = count; 2222 2223 /* Set generic config_regulators() for single regulators here */ 2224 if (count == 1) 2225 opp_table->config_regulators = _opp_config_regulator_single; 2226 2227 return 0; 2228 2229 free_regulators: 2230 while (i != 0) 2231 regulator_put(opp_table->regulators[--i]); 2232 2233 kfree(opp_table->regulators); 2234 opp_table->regulators = NULL; 2235 opp_table->regulator_count = -1; 2236 2237 return ret; 2238 } 2239 2240 static void _opp_put_regulators(struct opp_table *opp_table) 2241 { 2242 int i; 2243 2244 if (!opp_table->regulators) 2245 return; 2246 2247 if (opp_table->enabled) { 2248 for (i = opp_table->regulator_count - 1; i >= 0; i--) 2249 regulator_disable(opp_table->regulators[i]); 2250 } 2251 2252 for (i = opp_table->regulator_count - 1; i >= 0; i--) 2253 regulator_put(opp_table->regulators[i]); 2254 2255 kfree(opp_table->regulators); 2256 opp_table->regulators = NULL; 2257 opp_table->regulator_count = -1; 2258 } 2259 2260 static void _put_clks(struct opp_table *opp_table, int count) 2261 { 2262 int i; 2263 2264 for (i = count - 1; i >= 0; i--) 2265 clk_put(opp_table->clks[i]); 2266 2267 kfree(opp_table->clks); 2268 opp_table->clks = NULL; 2269 } 2270 2271 /* 2272 * In order to support OPP switching, OPP layer needs to get pointers to the 2273 * clocks for the device. Simple cases work fine without using this routine 2274 * (i.e. by passing connection-id as NULL), but for a device with multiple 2275 * clocks available, the OPP core needs to know the exact names of the clks to 2276 * use. 2277 * 2278 * This must be called before any OPPs are initialized for the device. 2279 */ 2280 static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev, 2281 const char * const names[], 2282 config_clks_t config_clks) 2283 { 2284 const char * const *temp = names; 2285 int count = 0, ret, i; 2286 struct clk *clk; 2287 2288 /* Count number of clks */ 2289 while (*temp++) 2290 count++; 2291 2292 /* 2293 * This is a special case where we have a single clock, whose connection 2294 * id name is NULL, i.e. first two entries are NULL in the array. 2295 */ 2296 if (!count && !names[1]) 2297 count = 1; 2298 2299 /* Fail early for invalid configurations */ 2300 if (!count || (!config_clks && count > 1)) 2301 return -EINVAL; 2302 2303 /* Another CPU that shares the OPP table has set the clkname ? */ 2304 if (opp_table->clks) 2305 return 0; 2306 2307 opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks), 2308 GFP_KERNEL); 2309 if (!opp_table->clks) 2310 return -ENOMEM; 2311 2312 /* Find clks for the device */ 2313 for (i = 0; i < count; i++) { 2314 clk = clk_get(dev, names[i]); 2315 if (IS_ERR(clk)) { 2316 ret = dev_err_probe(dev, PTR_ERR(clk), 2317 "%s: Couldn't find clock with name: %s\n", 2318 __func__, names[i]); 2319 goto free_clks; 2320 } 2321 2322 opp_table->clks[i] = clk; 2323 } 2324 2325 opp_table->clk_count = count; 2326 opp_table->config_clks = config_clks; 2327 2328 /* Set generic single clk set here */ 2329 if (count == 1) { 2330 if (!opp_table->config_clks) 2331 opp_table->config_clks = _opp_config_clk_single; 2332 2333 /* 2334 * We could have just dropped the "clk" field and used "clks" 2335 * everywhere. Instead we kept the "clk" field around for 2336 * following reasons: 2337 * 2338 * - avoiding clks[0] everywhere else. 2339 * - not running single clk helpers for multiple clk usecase by 2340 * mistake. 2341 * 2342 * Since this is single-clk case, just update the clk pointer 2343 * too. 2344 */ 2345 opp_table->clk = opp_table->clks[0]; 2346 } 2347 2348 return 0; 2349 2350 free_clks: 2351 _put_clks(opp_table, i); 2352 return ret; 2353 } 2354 2355 static void _opp_put_clknames(struct opp_table *opp_table) 2356 { 2357 if (!opp_table->clks) 2358 return; 2359 2360 opp_table->config_clks = NULL; 2361 opp_table->clk = ERR_PTR(-ENODEV); 2362 2363 _put_clks(opp_table, opp_table->clk_count); 2364 } 2365 2366 /* 2367 * This is useful to support platforms with multiple regulators per device. 2368 * 2369 * This must be called before any OPPs are initialized for the device. 2370 */ 2371 static int _opp_set_config_regulators_helper(struct opp_table *opp_table, 2372 struct device *dev, config_regulators_t config_regulators) 2373 { 2374 /* Another CPU that shares the OPP table has set the helper ? */ 2375 if (!opp_table->config_regulators) 2376 opp_table->config_regulators = config_regulators; 2377 2378 return 0; 2379 } 2380 2381 static void _opp_put_config_regulators_helper(struct opp_table *opp_table) 2382 { 2383 if (opp_table->config_regulators) 2384 opp_table->config_regulators = NULL; 2385 } 2386 2387 static void _opp_detach_genpd(struct opp_table *opp_table) 2388 { 2389 int index; 2390 2391 if (!opp_table->genpd_virt_devs) 2392 return; 2393 2394 for (index = 0; index < opp_table->required_opp_count; index++) { 2395 if (!opp_table->genpd_virt_devs[index]) 2396 continue; 2397 2398 dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false); 2399 opp_table->genpd_virt_devs[index] = NULL; 2400 } 2401 2402 kfree(opp_table->genpd_virt_devs); 2403 opp_table->genpd_virt_devs = NULL; 2404 } 2405 2406 /* 2407 * Multiple generic power domains for a device are supported with the help of 2408 * virtual genpd devices, which are created for each consumer device - genpd 2409 * pair. These are the device structures which are attached to the power domain 2410 * and are required by the OPP core to set the performance state of the genpd. 2411 * The same API also works for the case where single genpd is available and so 2412 * we don't need to support that separately. 2413 * 2414 * This helper will normally be called by the consumer driver of the device 2415 * "dev", as only that has details of the genpd names. 2416 * 2417 * This helper needs to be called once with a list of all genpd to attach. 2418 * Otherwise the original device structure will be used instead by the OPP core. 2419 * 2420 * The order of entries in the names array must match the order in which 2421 * "required-opps" are added in DT. 2422 */ 2423 static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, 2424 const char * const *names, struct device ***virt_devs) 2425 { 2426 struct device *virt_dev; 2427 int index = 0, ret = -EINVAL; 2428 const char * const *name = names; 2429 2430 if (opp_table->genpd_virt_devs) 2431 return 0; 2432 2433 opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count, 2434 sizeof(*opp_table->genpd_virt_devs), 2435 GFP_KERNEL); 2436 if (!opp_table->genpd_virt_devs) 2437 return -ENOMEM; 2438 2439 while (*name) { 2440 if (index >= opp_table->required_opp_count) { 2441 dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n", 2442 *name, opp_table->required_opp_count, index); 2443 goto err; 2444 } 2445 2446 virt_dev = dev_pm_domain_attach_by_name(dev, *name); 2447 if (IS_ERR_OR_NULL(virt_dev)) { 2448 ret = virt_dev ? PTR_ERR(virt_dev) : -ENODEV; 2449 dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret); 2450 goto err; 2451 } 2452 2453 opp_table->genpd_virt_devs[index] = virt_dev; 2454 index++; 2455 name++; 2456 } 2457 2458 if (virt_devs) 2459 *virt_devs = opp_table->genpd_virt_devs; 2460 2461 return 0; 2462 2463 err: 2464 _opp_detach_genpd(opp_table); 2465 return ret; 2466 2467 } 2468 2469 static void _opp_clear_config(struct opp_config_data *data) 2470 { 2471 if (data->flags & OPP_CONFIG_GENPD) 2472 _opp_detach_genpd(data->opp_table); 2473 if (data->flags & OPP_CONFIG_REGULATOR) 2474 _opp_put_regulators(data->opp_table); 2475 if (data->flags & OPP_CONFIG_SUPPORTED_HW) 2476 _opp_put_supported_hw(data->opp_table); 2477 if (data->flags & OPP_CONFIG_REGULATOR_HELPER) 2478 _opp_put_config_regulators_helper(data->opp_table); 2479 if (data->flags & OPP_CONFIG_PROP_NAME) 2480 _opp_put_prop_name(data->opp_table); 2481 if (data->flags & OPP_CONFIG_CLK) 2482 _opp_put_clknames(data->opp_table); 2483 2484 dev_pm_opp_put_opp_table(data->opp_table); 2485 kfree(data); 2486 } 2487 2488 /** 2489 * dev_pm_opp_set_config() - Set OPP configuration for the device. 2490 * @dev: Device for which configuration is being set. 2491 * @config: OPP configuration. 2492 * 2493 * This allows all device OPP configurations to be performed at once. 2494 * 2495 * This must be called before any OPPs are initialized for the device. This may 2496 * be called multiple times for the same OPP table, for example once for each 2497 * CPU that share the same table. This must be balanced by the same number of 2498 * calls to dev_pm_opp_clear_config() in order to free the OPP table properly. 2499 * 2500 * This returns a token to the caller, which must be passed to 2501 * dev_pm_opp_clear_config() to free the resources later. The value of the 2502 * returned token will be >= 1 for success and negative for errors. The minimum 2503 * value of 1 is chosen here to make it easy for callers to manage the resource. 2504 */ 2505 int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) 2506 { 2507 struct opp_table *opp_table; 2508 struct opp_config_data *data; 2509 unsigned int id; 2510 int ret; 2511 2512 data = kmalloc(sizeof(*data), GFP_KERNEL); 2513 if (!data) 2514 return -ENOMEM; 2515 2516 opp_table = _add_opp_table(dev, false); 2517 if (IS_ERR(opp_table)) { 2518 kfree(data); 2519 return PTR_ERR(opp_table); 2520 } 2521 2522 data->opp_table = opp_table; 2523 data->flags = 0; 2524 2525 /* This should be called before OPPs are initialized */ 2526 if (WARN_ON(!list_empty(&opp_table->opp_list))) { 2527 ret = -EBUSY; 2528 goto err; 2529 } 2530 2531 /* Configure clocks */ 2532 if (config->clk_names) { 2533 ret = _opp_set_clknames(opp_table, dev, config->clk_names, 2534 config->config_clks); 2535 if (ret) 2536 goto err; 2537 2538 data->flags |= OPP_CONFIG_CLK; 2539 } else if (config->config_clks) { 2540 /* Don't allow config callback without clocks */ 2541 ret = -EINVAL; 2542 goto err; 2543 } 2544 2545 /* Configure property names */ 2546 if (config->prop_name) { 2547 ret = _opp_set_prop_name(opp_table, config->prop_name); 2548 if (ret) 2549 goto err; 2550 2551 data->flags |= OPP_CONFIG_PROP_NAME; 2552 } 2553 2554 /* Configure config_regulators helper */ 2555 if (config->config_regulators) { 2556 ret = _opp_set_config_regulators_helper(opp_table, dev, 2557 config->config_regulators); 2558 if (ret) 2559 goto err; 2560 2561 data->flags |= OPP_CONFIG_REGULATOR_HELPER; 2562 } 2563 2564 /* Configure supported hardware */ 2565 if (config->supported_hw) { 2566 ret = _opp_set_supported_hw(opp_table, config->supported_hw, 2567 config->supported_hw_count); 2568 if (ret) 2569 goto err; 2570 2571 data->flags |= OPP_CONFIG_SUPPORTED_HW; 2572 } 2573 2574 /* Configure supplies */ 2575 if (config->regulator_names) { 2576 ret = _opp_set_regulators(opp_table, dev, 2577 config->regulator_names); 2578 if (ret) 2579 goto err; 2580 2581 data->flags |= OPP_CONFIG_REGULATOR; 2582 } 2583 2584 /* Attach genpds */ 2585 if (config->genpd_names) { 2586 ret = _opp_attach_genpd(opp_table, dev, config->genpd_names, 2587 config->virt_devs); 2588 if (ret) 2589 goto err; 2590 2591 data->flags |= OPP_CONFIG_GENPD; 2592 } 2593 2594 ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX), 2595 GFP_KERNEL); 2596 if (ret) 2597 goto err; 2598 2599 return id; 2600 2601 err: 2602 _opp_clear_config(data); 2603 return ret; 2604 } 2605 EXPORT_SYMBOL_GPL(dev_pm_opp_set_config); 2606 2607 /** 2608 * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration. 2609 * @token: The token returned by dev_pm_opp_set_config() previously. 2610 * 2611 * This allows all device OPP configurations to be cleared at once. This must be 2612 * called once for each call made to dev_pm_opp_set_config(), in order to free 2613 * the OPPs properly. 2614 * 2615 * Currently the first call itself ends up freeing all the OPP configurations, 2616 * while the later ones only drop the OPP table reference. This works well for 2617 * now as we would never want to use an half initialized OPP table and want to 2618 * remove the configurations together. 2619 */ 2620 void dev_pm_opp_clear_config(int token) 2621 { 2622 struct opp_config_data *data; 2623 2624 /* 2625 * This lets the callers call this unconditionally and keep their code 2626 * simple. 2627 */ 2628 if (unlikely(token <= 0)) 2629 return; 2630 2631 data = xa_erase(&opp_configs, token); 2632 if (WARN_ON(!data)) 2633 return; 2634 2635 _opp_clear_config(data); 2636 } 2637 EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config); 2638 2639 static void devm_pm_opp_config_release(void *token) 2640 { 2641 dev_pm_opp_clear_config((unsigned long)token); 2642 } 2643 2644 /** 2645 * devm_pm_opp_set_config() - Set OPP configuration for the device. 2646 * @dev: Device for which configuration is being set. 2647 * @config: OPP configuration. 2648 * 2649 * This allows all device OPP configurations to be performed at once. 2650 * This is a resource-managed variant of dev_pm_opp_set_config(). 2651 * 2652 * Return: 0 on success and errorno otherwise. 2653 */ 2654 int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) 2655 { 2656 int token = dev_pm_opp_set_config(dev, config); 2657 2658 if (token < 0) 2659 return token; 2660 2661 return devm_add_action_or_reset(dev, devm_pm_opp_config_release, 2662 (void *) ((unsigned long) token)); 2663 } 2664 EXPORT_SYMBOL_GPL(devm_pm_opp_set_config); 2665 2666 /** 2667 * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP. 2668 * @src_table: OPP table which has @dst_table as one of its required OPP table. 2669 * @dst_table: Required OPP table of the @src_table. 2670 * @src_opp: OPP from the @src_table. 2671 * 2672 * This function returns the OPP (present in @dst_table) pointed out by the 2673 * "required-opps" property of the @src_opp (present in @src_table). 2674 * 2675 * The callers are required to call dev_pm_opp_put() for the returned OPP after 2676 * use. 2677 * 2678 * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise. 2679 */ 2680 struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, 2681 struct opp_table *dst_table, 2682 struct dev_pm_opp *src_opp) 2683 { 2684 struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV); 2685 int i; 2686 2687 if (!src_table || !dst_table || !src_opp || 2688 !src_table->required_opp_tables) 2689 return ERR_PTR(-EINVAL); 2690 2691 /* required-opps not fully initialized yet */ 2692 if (lazy_linking_pending(src_table)) 2693 return ERR_PTR(-EBUSY); 2694 2695 for (i = 0; i < src_table->required_opp_count; i++) { 2696 if (src_table->required_opp_tables[i] == dst_table) { 2697 mutex_lock(&src_table->lock); 2698 2699 list_for_each_entry(opp, &src_table->opp_list, node) { 2700 if (opp == src_opp) { 2701 dest_opp = opp->required_opps[i]; 2702 dev_pm_opp_get(dest_opp); 2703 break; 2704 } 2705 } 2706 2707 mutex_unlock(&src_table->lock); 2708 break; 2709 } 2710 } 2711 2712 if (IS_ERR(dest_opp)) { 2713 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, 2714 src_table, dst_table); 2715 } 2716 2717 return dest_opp; 2718 } 2719 EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp); 2720 2721 /** 2722 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. 2723 * @src_table: OPP table which has dst_table as one of its required OPP table. 2724 * @dst_table: Required OPP table of the src_table. 2725 * @pstate: Current performance state of the src_table. 2726 * 2727 * This Returns pstate of the OPP (present in @dst_table) pointed out by the 2728 * "required-opps" property of the OPP (present in @src_table) which has 2729 * performance state set to @pstate. 2730 * 2731 * Return: Zero or positive performance state on success, otherwise negative 2732 * value on errors. 2733 */ 2734 int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, 2735 struct opp_table *dst_table, 2736 unsigned int pstate) 2737 { 2738 struct dev_pm_opp *opp; 2739 int dest_pstate = -EINVAL; 2740 int i; 2741 2742 /* 2743 * Normally the src_table will have the "required_opps" property set to 2744 * point to one of the OPPs in the dst_table, but in some cases the 2745 * genpd and its master have one to one mapping of performance states 2746 * and so none of them have the "required-opps" property set. Return the 2747 * pstate of the src_table as it is in such cases. 2748 */ 2749 if (!src_table || !src_table->required_opp_count) 2750 return pstate; 2751 2752 /* Both OPP tables must belong to genpds */ 2753 if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) { 2754 pr_err("%s: Performance state is only valid for genpds.\n", __func__); 2755 return -EINVAL; 2756 } 2757 2758 /* required-opps not fully initialized yet */ 2759 if (lazy_linking_pending(src_table)) 2760 return -EBUSY; 2761 2762 for (i = 0; i < src_table->required_opp_count; i++) { 2763 if (src_table->required_opp_tables[i]->np == dst_table->np) 2764 break; 2765 } 2766 2767 if (unlikely(i == src_table->required_opp_count)) { 2768 pr_err("%s: Couldn't find matching OPP table (%p: %p)\n", 2769 __func__, src_table, dst_table); 2770 return -EINVAL; 2771 } 2772 2773 mutex_lock(&src_table->lock); 2774 2775 list_for_each_entry(opp, &src_table->opp_list, node) { 2776 if (opp->level == pstate) { 2777 dest_pstate = opp->required_opps[i]->level; 2778 goto unlock; 2779 } 2780 } 2781 2782 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, 2783 dst_table); 2784 2785 unlock: 2786 mutex_unlock(&src_table->lock); 2787 2788 return dest_pstate; 2789 } 2790 2791 /** 2792 * dev_pm_opp_add_dynamic() - Add an OPP table from a table definitions 2793 * @dev: The device for which we do this operation 2794 * @data: The OPP data for the OPP to add 2795 * 2796 * This function adds an opp definition to the opp table and returns status. 2797 * The opp is made available by default and it can be controlled using 2798 * dev_pm_opp_enable/disable functions. 2799 * 2800 * Return: 2801 * 0 On success OR 2802 * Duplicate OPPs (both freq and volt are same) and opp->available 2803 * -EEXIST Freq are same and volt are different OR 2804 * Duplicate OPPs (both freq and volt are same) and !opp->available 2805 * -ENOMEM Memory allocation failure 2806 */ 2807 int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data) 2808 { 2809 struct opp_table *opp_table; 2810 int ret; 2811 2812 opp_table = _add_opp_table(dev, true); 2813 if (IS_ERR(opp_table)) 2814 return PTR_ERR(opp_table); 2815 2816 /* Fix regulator count for dynamic OPPs */ 2817 opp_table->regulator_count = 1; 2818 2819 ret = _opp_add_v1(opp_table, dev, data, true); 2820 if (ret) 2821 dev_pm_opp_put_opp_table(opp_table); 2822 2823 return ret; 2824 } 2825 EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic); 2826 2827 /** 2828 * _opp_set_availability() - helper to set the availability of an opp 2829 * @dev: device for which we do this operation 2830 * @freq: OPP frequency to modify availability 2831 * @availability_req: availability status requested for this opp 2832 * 2833 * Set the availability of an OPP, opp_{enable,disable} share a common logic 2834 * which is isolated here. 2835 * 2836 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2837 * copy operation, returns 0 if no modification was done OR modification was 2838 * successful. 2839 */ 2840 static int _opp_set_availability(struct device *dev, unsigned long freq, 2841 bool availability_req) 2842 { 2843 struct opp_table *opp_table; 2844 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); 2845 int r = 0; 2846 2847 /* Find the opp_table */ 2848 opp_table = _find_opp_table(dev); 2849 if (IS_ERR(opp_table)) { 2850 r = PTR_ERR(opp_table); 2851 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 2852 return r; 2853 } 2854 2855 if (!assert_single_clk(opp_table)) { 2856 r = -EINVAL; 2857 goto put_table; 2858 } 2859 2860 mutex_lock(&opp_table->lock); 2861 2862 /* Do we have the frequency? */ 2863 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2864 if (tmp_opp->rates[0] == freq) { 2865 opp = tmp_opp; 2866 break; 2867 } 2868 } 2869 2870 if (IS_ERR(opp)) { 2871 r = PTR_ERR(opp); 2872 goto unlock; 2873 } 2874 2875 /* Is update really needed? */ 2876 if (opp->available == availability_req) 2877 goto unlock; 2878 2879 opp->available = availability_req; 2880 2881 dev_pm_opp_get(opp); 2882 mutex_unlock(&opp_table->lock); 2883 2884 /* Notify the change of the OPP availability */ 2885 if (availability_req) 2886 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, 2887 opp); 2888 else 2889 blocking_notifier_call_chain(&opp_table->head, 2890 OPP_EVENT_DISABLE, opp); 2891 2892 dev_pm_opp_put(opp); 2893 goto put_table; 2894 2895 unlock: 2896 mutex_unlock(&opp_table->lock); 2897 put_table: 2898 dev_pm_opp_put_opp_table(opp_table); 2899 return r; 2900 } 2901 2902 /** 2903 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP 2904 * @dev: device for which we do this operation 2905 * @freq: OPP frequency to adjust voltage of 2906 * @u_volt: new OPP target voltage 2907 * @u_volt_min: new OPP min voltage 2908 * @u_volt_max: new OPP max voltage 2909 * 2910 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2911 * copy operation, returns 0 if no modifcation was done OR modification was 2912 * successful. 2913 */ 2914 int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, 2915 unsigned long u_volt, unsigned long u_volt_min, 2916 unsigned long u_volt_max) 2917 2918 { 2919 struct opp_table *opp_table; 2920 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); 2921 int r = 0; 2922 2923 /* Find the opp_table */ 2924 opp_table = _find_opp_table(dev); 2925 if (IS_ERR(opp_table)) { 2926 r = PTR_ERR(opp_table); 2927 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 2928 return r; 2929 } 2930 2931 if (!assert_single_clk(opp_table)) { 2932 r = -EINVAL; 2933 goto put_table; 2934 } 2935 2936 mutex_lock(&opp_table->lock); 2937 2938 /* Do we have the frequency? */ 2939 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2940 if (tmp_opp->rates[0] == freq) { 2941 opp = tmp_opp; 2942 break; 2943 } 2944 } 2945 2946 if (IS_ERR(opp)) { 2947 r = PTR_ERR(opp); 2948 goto adjust_unlock; 2949 } 2950 2951 /* Is update really needed? */ 2952 if (opp->supplies->u_volt == u_volt) 2953 goto adjust_unlock; 2954 2955 opp->supplies->u_volt = u_volt; 2956 opp->supplies->u_volt_min = u_volt_min; 2957 opp->supplies->u_volt_max = u_volt_max; 2958 2959 dev_pm_opp_get(opp); 2960 mutex_unlock(&opp_table->lock); 2961 2962 /* Notify the voltage change of the OPP */ 2963 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE, 2964 opp); 2965 2966 dev_pm_opp_put(opp); 2967 goto put_table; 2968 2969 adjust_unlock: 2970 mutex_unlock(&opp_table->lock); 2971 put_table: 2972 dev_pm_opp_put_opp_table(opp_table); 2973 return r; 2974 } 2975 EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); 2976 2977 /** 2978 * dev_pm_opp_enable() - Enable a specific OPP 2979 * @dev: device for which we do this operation 2980 * @freq: OPP frequency to enable 2981 * 2982 * Enables a provided opp. If the operation is valid, this returns 0, else the 2983 * corresponding error value. It is meant to be used for users an OPP available 2984 * after being temporarily made unavailable with dev_pm_opp_disable. 2985 * 2986 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2987 * copy operation, returns 0 if no modification was done OR modification was 2988 * successful. 2989 */ 2990 int dev_pm_opp_enable(struct device *dev, unsigned long freq) 2991 { 2992 return _opp_set_availability(dev, freq, true); 2993 } 2994 EXPORT_SYMBOL_GPL(dev_pm_opp_enable); 2995 2996 /** 2997 * dev_pm_opp_disable() - Disable a specific OPP 2998 * @dev: device for which we do this operation 2999 * @freq: OPP frequency to disable 3000 * 3001 * Disables a provided opp. If the operation is valid, this returns 3002 * 0, else the corresponding error value. It is meant to be a temporary 3003 * control by users to make this OPP not available until the circumstances are 3004 * right to make it available again (with a call to dev_pm_opp_enable). 3005 * 3006 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 3007 * copy operation, returns 0 if no modification was done OR modification was 3008 * successful. 3009 */ 3010 int dev_pm_opp_disable(struct device *dev, unsigned long freq) 3011 { 3012 return _opp_set_availability(dev, freq, false); 3013 } 3014 EXPORT_SYMBOL_GPL(dev_pm_opp_disable); 3015 3016 /** 3017 * dev_pm_opp_register_notifier() - Register OPP notifier for the device 3018 * @dev: Device for which notifier needs to be registered 3019 * @nb: Notifier block to be registered 3020 * 3021 * Return: 0 on success or a negative error value. 3022 */ 3023 int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) 3024 { 3025 struct opp_table *opp_table; 3026 int ret; 3027 3028 opp_table = _find_opp_table(dev); 3029 if (IS_ERR(opp_table)) 3030 return PTR_ERR(opp_table); 3031 3032 ret = blocking_notifier_chain_register(&opp_table->head, nb); 3033 3034 dev_pm_opp_put_opp_table(opp_table); 3035 3036 return ret; 3037 } 3038 EXPORT_SYMBOL(dev_pm_opp_register_notifier); 3039 3040 /** 3041 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device 3042 * @dev: Device for which notifier needs to be unregistered 3043 * @nb: Notifier block to be unregistered 3044 * 3045 * Return: 0 on success or a negative error value. 3046 */ 3047 int dev_pm_opp_unregister_notifier(struct device *dev, 3048 struct notifier_block *nb) 3049 { 3050 struct opp_table *opp_table; 3051 int ret; 3052 3053 opp_table = _find_opp_table(dev); 3054 if (IS_ERR(opp_table)) 3055 return PTR_ERR(opp_table); 3056 3057 ret = blocking_notifier_chain_unregister(&opp_table->head, nb); 3058 3059 dev_pm_opp_put_opp_table(opp_table); 3060 3061 return ret; 3062 } 3063 EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); 3064 3065 /** 3066 * dev_pm_opp_remove_table() - Free all OPPs associated with the device 3067 * @dev: device pointer used to lookup OPP table. 3068 * 3069 * Free both OPPs created using static entries present in DT and the 3070 * dynamically added entries. 3071 */ 3072 void dev_pm_opp_remove_table(struct device *dev) 3073 { 3074 struct opp_table *opp_table; 3075 3076 /* Check for existing table for 'dev' */ 3077 opp_table = _find_opp_table(dev); 3078 if (IS_ERR(opp_table)) { 3079 int error = PTR_ERR(opp_table); 3080 3081 if (error != -ENODEV) 3082 WARN(1, "%s: opp_table: %d\n", 3083 IS_ERR_OR_NULL(dev) ? 3084 "Invalid device" : dev_name(dev), 3085 error); 3086 return; 3087 } 3088 3089 /* 3090 * Drop the extra reference only if the OPP table was successfully added 3091 * with dev_pm_opp_of_add_table() earlier. 3092 **/ 3093 if (_opp_remove_all_static(opp_table)) 3094 dev_pm_opp_put_opp_table(opp_table); 3095 3096 /* Drop reference taken by _find_opp_table() */ 3097 dev_pm_opp_put_opp_table(opp_table); 3098 } 3099 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); 3100 3101 /** 3102 * dev_pm_opp_sync_regulators() - Sync state of voltage regulators 3103 * @dev: device for which we do this operation 3104 * 3105 * Sync voltage state of the OPP table regulators. 3106 * 3107 * Return: 0 on success or a negative error value. 3108 */ 3109 int dev_pm_opp_sync_regulators(struct device *dev) 3110 { 3111 struct opp_table *opp_table; 3112 struct regulator *reg; 3113 int i, ret = 0; 3114 3115 /* Device may not have OPP table */ 3116 opp_table = _find_opp_table(dev); 3117 if (IS_ERR(opp_table)) 3118 return 0; 3119 3120 /* Regulator may not be required for the device */ 3121 if (unlikely(!opp_table->regulators)) 3122 goto put_table; 3123 3124 /* Nothing to sync if voltage wasn't changed */ 3125 if (!opp_table->enabled) 3126 goto put_table; 3127 3128 for (i = 0; i < opp_table->regulator_count; i++) { 3129 reg = opp_table->regulators[i]; 3130 ret = regulator_sync_voltage(reg); 3131 if (ret) 3132 break; 3133 } 3134 put_table: 3135 /* Drop reference taken by _find_opp_table() */ 3136 dev_pm_opp_put_opp_table(opp_table); 3137 3138 return ret; 3139 } 3140 EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); 3141