1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic OPP OF helpers 4 * 5 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 6 * Nishanth Menon 7 * Romit Dasgupta 8 * Kevin Hilman 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/cpu.h> 14 #include <linux/errno.h> 15 #include <linux/device.h> 16 #include <linux/of_device.h> 17 #include <linux/pm_domain.h> 18 #include <linux/slab.h> 19 #include <linux/export.h> 20 #include <linux/energy_model.h> 21 22 #include "opp.h" 23 24 /* 25 * Returns opp descriptor node for a device node, caller must 26 * do of_node_put(). 27 */ 28 static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np, 29 int index) 30 { 31 /* "operating-points-v2" can be an array for power domain providers */ 32 return of_parse_phandle(np, "operating-points-v2", index); 33 } 34 35 /* Returns opp descriptor node for a device, caller must do of_node_put() */ 36 struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) 37 { 38 return _opp_of_get_opp_desc_node(dev->of_node, 0); 39 } 40 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); 41 42 struct opp_table *_managed_opp(struct device *dev, int index) 43 { 44 struct opp_table *opp_table, *managed_table = NULL; 45 struct device_node *np; 46 47 np = _opp_of_get_opp_desc_node(dev->of_node, index); 48 if (!np) 49 return NULL; 50 51 list_for_each_entry(opp_table, &opp_tables, node) { 52 if (opp_table->np == np) { 53 /* 54 * Multiple devices can point to the same OPP table and 55 * so will have same node-pointer, np. 56 * 57 * But the OPPs will be considered as shared only if the 58 * OPP table contains a "opp-shared" property. 59 */ 60 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { 61 _get_opp_table_kref(opp_table); 62 managed_table = opp_table; 63 } 64 65 break; 66 } 67 } 68 69 of_node_put(np); 70 71 return managed_table; 72 } 73 74 /* The caller must call dev_pm_opp_put() after the OPP is used */ 75 static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, 76 struct device_node *opp_np) 77 { 78 struct dev_pm_opp *opp; 79 80 mutex_lock(&opp_table->lock); 81 82 list_for_each_entry(opp, &opp_table->opp_list, node) { 83 if (opp->np == opp_np) { 84 dev_pm_opp_get(opp); 85 mutex_unlock(&opp_table->lock); 86 return opp; 87 } 88 } 89 90 mutex_unlock(&opp_table->lock); 91 92 return NULL; 93 } 94 95 static struct device_node *of_parse_required_opp(struct device_node *np, 96 int index) 97 { 98 struct device_node *required_np; 99 100 required_np = of_parse_phandle(np, "required-opps", index); 101 if (unlikely(!required_np)) { 102 pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n", 103 __func__, np, index); 104 } 105 106 return required_np; 107 } 108 109 /* The caller must call dev_pm_opp_put_opp_table() after the table is used */ 110 static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np) 111 { 112 struct opp_table *opp_table; 113 struct device_node *opp_table_np; 114 115 lockdep_assert_held(&opp_table_lock); 116 117 opp_table_np = of_get_parent(opp_np); 118 if (!opp_table_np) 119 goto err; 120 121 /* It is safe to put the node now as all we need now is its address */ 122 of_node_put(opp_table_np); 123 124 list_for_each_entry(opp_table, &opp_tables, node) { 125 if (opp_table_np == opp_table->np) { 126 _get_opp_table_kref(opp_table); 127 return opp_table; 128 } 129 } 130 131 err: 132 return ERR_PTR(-ENODEV); 133 } 134 135 /* Free resources previously acquired by _opp_table_alloc_required_tables() */ 136 static void _opp_table_free_required_tables(struct opp_table *opp_table) 137 { 138 struct opp_table **required_opp_tables = opp_table->required_opp_tables; 139 int i; 140 141 if (!required_opp_tables) 142 return; 143 144 for (i = 0; i < opp_table->required_opp_count; i++) { 145 if (IS_ERR_OR_NULL(required_opp_tables[i])) 146 break; 147 148 dev_pm_opp_put_opp_table(required_opp_tables[i]); 149 } 150 151 kfree(required_opp_tables); 152 153 opp_table->required_opp_count = 0; 154 opp_table->required_opp_tables = NULL; 155 } 156 157 /* 158 * Populate all devices and opp tables which are part of "required-opps" list. 159 * Checking only the first OPP node should be enough. 160 */ 161 static void _opp_table_alloc_required_tables(struct opp_table *opp_table, 162 struct device *dev, 163 struct device_node *opp_np) 164 { 165 struct opp_table **required_opp_tables; 166 struct device_node *required_np, *np; 167 int count, i; 168 169 /* Traversing the first OPP node is all we need */ 170 np = of_get_next_available_child(opp_np, NULL); 171 if (!np) { 172 dev_err(dev, "Empty OPP table\n"); 173 return; 174 } 175 176 count = of_count_phandle_with_args(np, "required-opps", NULL); 177 if (!count) 178 goto put_np; 179 180 required_opp_tables = kcalloc(count, sizeof(*required_opp_tables), 181 GFP_KERNEL); 182 if (!required_opp_tables) 183 goto put_np; 184 185 opp_table->required_opp_tables = required_opp_tables; 186 opp_table->required_opp_count = count; 187 188 for (i = 0; i < count; i++) { 189 required_np = of_parse_required_opp(np, i); 190 if (!required_np) 191 goto free_required_tables; 192 193 required_opp_tables[i] = _find_table_of_opp_np(required_np); 194 of_node_put(required_np); 195 196 if (IS_ERR(required_opp_tables[i])) 197 goto free_required_tables; 198 199 /* 200 * We only support genpd's OPPs in the "required-opps" for now, 201 * as we don't know how much about other cases. Error out if the 202 * required OPP doesn't belong to a genpd. 203 */ 204 if (!required_opp_tables[i]->is_genpd) { 205 dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n", 206 required_np); 207 goto free_required_tables; 208 } 209 } 210 211 goto put_np; 212 213 free_required_tables: 214 _opp_table_free_required_tables(opp_table); 215 put_np: 216 of_node_put(np); 217 } 218 219 void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, 220 int index) 221 { 222 struct device_node *np, *opp_np; 223 u32 val; 224 225 /* 226 * Only required for backward compatibility with v1 bindings, but isn't 227 * harmful for other cases. And so we do it unconditionally. 228 */ 229 np = of_node_get(dev->of_node); 230 if (!np) 231 return; 232 233 if (!of_property_read_u32(np, "clock-latency", &val)) 234 opp_table->clock_latency_ns_max = val; 235 of_property_read_u32(np, "voltage-tolerance", 236 &opp_table->voltage_tolerance_v1); 237 238 if (of_find_property(np, "#power-domain-cells", NULL)) 239 opp_table->is_genpd = true; 240 241 /* Get OPP table node */ 242 opp_np = _opp_of_get_opp_desc_node(np, index); 243 of_node_put(np); 244 245 if (!opp_np) 246 return; 247 248 if (of_property_read_bool(opp_np, "opp-shared")) 249 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; 250 else 251 opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE; 252 253 opp_table->np = opp_np; 254 255 _opp_table_alloc_required_tables(opp_table, dev, opp_np); 256 of_node_put(opp_np); 257 } 258 259 void _of_clear_opp_table(struct opp_table *opp_table) 260 { 261 _opp_table_free_required_tables(opp_table); 262 } 263 264 /* 265 * Release all resources previously acquired with a call to 266 * _of_opp_alloc_required_opps(). 267 */ 268 void _of_opp_free_required_opps(struct opp_table *opp_table, 269 struct dev_pm_opp *opp) 270 { 271 struct dev_pm_opp **required_opps = opp->required_opps; 272 int i; 273 274 if (!required_opps) 275 return; 276 277 for (i = 0; i < opp_table->required_opp_count; i++) { 278 if (!required_opps[i]) 279 break; 280 281 /* Put the reference back */ 282 dev_pm_opp_put(required_opps[i]); 283 } 284 285 kfree(required_opps); 286 opp->required_opps = NULL; 287 } 288 289 /* Populate all required OPPs which are part of "required-opps" list */ 290 static int _of_opp_alloc_required_opps(struct opp_table *opp_table, 291 struct dev_pm_opp *opp) 292 { 293 struct dev_pm_opp **required_opps; 294 struct opp_table *required_table; 295 struct device_node *np; 296 int i, ret, count = opp_table->required_opp_count; 297 298 if (!count) 299 return 0; 300 301 required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL); 302 if (!required_opps) 303 return -ENOMEM; 304 305 opp->required_opps = required_opps; 306 307 for (i = 0; i < count; i++) { 308 required_table = opp_table->required_opp_tables[i]; 309 310 np = of_parse_required_opp(opp->np, i); 311 if (unlikely(!np)) { 312 ret = -ENODEV; 313 goto free_required_opps; 314 } 315 316 required_opps[i] = _find_opp_of_np(required_table, np); 317 of_node_put(np); 318 319 if (!required_opps[i]) { 320 pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", 321 __func__, opp->np, i); 322 ret = -ENODEV; 323 goto free_required_opps; 324 } 325 } 326 327 return 0; 328 329 free_required_opps: 330 _of_opp_free_required_opps(opp_table, opp); 331 332 return ret; 333 } 334 335 static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) 336 { 337 struct device_node *np, *opp_np; 338 struct property *prop; 339 340 if (!opp_table) { 341 np = of_node_get(dev->of_node); 342 if (!np) 343 return -ENODEV; 344 345 opp_np = _opp_of_get_opp_desc_node(np, 0); 346 of_node_put(np); 347 } else { 348 opp_np = of_node_get(opp_table->np); 349 } 350 351 /* Lets not fail in case we are parsing opp-v1 bindings */ 352 if (!opp_np) 353 return 0; 354 355 /* Checking only first OPP is sufficient */ 356 np = of_get_next_available_child(opp_np, NULL); 357 if (!np) { 358 dev_err(dev, "OPP table empty\n"); 359 return -EINVAL; 360 } 361 of_node_put(opp_np); 362 363 prop = of_find_property(np, "opp-peak-kBps", NULL); 364 of_node_put(np); 365 366 if (!prop || !prop->length) 367 return 0; 368 369 return 1; 370 } 371 372 int dev_pm_opp_of_find_icc_paths(struct device *dev, 373 struct opp_table *opp_table) 374 { 375 struct device_node *np; 376 int ret, i, count, num_paths; 377 struct icc_path **paths; 378 379 ret = _bandwidth_supported(dev, opp_table); 380 if (ret <= 0) 381 return ret; 382 383 ret = 0; 384 385 np = of_node_get(dev->of_node); 386 if (!np) 387 return 0; 388 389 count = of_count_phandle_with_args(np, "interconnects", 390 "#interconnect-cells"); 391 of_node_put(np); 392 if (count < 0) 393 return 0; 394 395 /* two phandles when #interconnect-cells = <1> */ 396 if (count % 2) { 397 dev_err(dev, "%s: Invalid interconnects values\n", __func__); 398 return -EINVAL; 399 } 400 401 num_paths = count / 2; 402 paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL); 403 if (!paths) 404 return -ENOMEM; 405 406 for (i = 0; i < num_paths; i++) { 407 paths[i] = of_icc_get_by_index(dev, i); 408 if (IS_ERR(paths[i])) { 409 ret = PTR_ERR(paths[i]); 410 if (ret != -EPROBE_DEFER) { 411 dev_err(dev, "%s: Unable to get path%d: %d\n", 412 __func__, i, ret); 413 } 414 goto err; 415 } 416 } 417 418 if (opp_table) { 419 opp_table->paths = paths; 420 opp_table->path_count = num_paths; 421 return 0; 422 } 423 424 err: 425 while (i--) 426 icc_put(paths[i]); 427 428 kfree(paths); 429 430 return ret; 431 } 432 EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths); 433 434 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, 435 struct device_node *np) 436 { 437 unsigned int levels = opp_table->supported_hw_count; 438 int count, versions, ret, i, j; 439 u32 val; 440 441 if (!opp_table->supported_hw) { 442 /* 443 * In the case that no supported_hw has been set by the 444 * platform but there is an opp-supported-hw value set for 445 * an OPP then the OPP should not be enabled as there is 446 * no way to see if the hardware supports it. 447 */ 448 if (of_find_property(np, "opp-supported-hw", NULL)) 449 return false; 450 else 451 return true; 452 } 453 454 count = of_property_count_u32_elems(np, "opp-supported-hw"); 455 if (count <= 0 || count % levels) { 456 dev_err(dev, "%s: Invalid opp-supported-hw property (%d)\n", 457 __func__, count); 458 return false; 459 } 460 461 versions = count / levels; 462 463 /* All levels in at least one of the versions should match */ 464 for (i = 0; i < versions; i++) { 465 bool supported = true; 466 467 for (j = 0; j < levels; j++) { 468 ret = of_property_read_u32_index(np, "opp-supported-hw", 469 i * levels + j, &val); 470 if (ret) { 471 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", 472 __func__, i * levels + j, ret); 473 return false; 474 } 475 476 /* Check if the level is supported */ 477 if (!(val & opp_table->supported_hw[j])) { 478 supported = false; 479 break; 480 } 481 } 482 483 if (supported) 484 return true; 485 } 486 487 return false; 488 } 489 490 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, 491 struct opp_table *opp_table) 492 { 493 u32 *microvolt, *microamp = NULL; 494 int supplies = opp_table->regulator_count, vcount, icount, ret, i, j; 495 struct property *prop = NULL; 496 char name[NAME_MAX]; 497 498 /* Search for "opp-microvolt-<name>" */ 499 if (opp_table->prop_name) { 500 snprintf(name, sizeof(name), "opp-microvolt-%s", 501 opp_table->prop_name); 502 prop = of_find_property(opp->np, name, NULL); 503 } 504 505 if (!prop) { 506 /* Search for "opp-microvolt" */ 507 sprintf(name, "opp-microvolt"); 508 prop = of_find_property(opp->np, name, NULL); 509 510 /* Missing property isn't a problem, but an invalid entry is */ 511 if (!prop) { 512 if (unlikely(supplies == -1)) { 513 /* Initialize regulator_count */ 514 opp_table->regulator_count = 0; 515 return 0; 516 } 517 518 if (!supplies) 519 return 0; 520 521 dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n", 522 __func__); 523 return -EINVAL; 524 } 525 } 526 527 if (unlikely(supplies == -1)) { 528 /* Initialize regulator_count */ 529 supplies = opp_table->regulator_count = 1; 530 } else if (unlikely(!supplies)) { 531 dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__); 532 return -EINVAL; 533 } 534 535 vcount = of_property_count_u32_elems(opp->np, name); 536 if (vcount < 0) { 537 dev_err(dev, "%s: Invalid %s property (%d)\n", 538 __func__, name, vcount); 539 return vcount; 540 } 541 542 /* There can be one or three elements per supply */ 543 if (vcount != supplies && vcount != supplies * 3) { 544 dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", 545 __func__, name, vcount, supplies); 546 return -EINVAL; 547 } 548 549 microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL); 550 if (!microvolt) 551 return -ENOMEM; 552 553 ret = of_property_read_u32_array(opp->np, name, microvolt, vcount); 554 if (ret) { 555 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); 556 ret = -EINVAL; 557 goto free_microvolt; 558 } 559 560 /* Search for "opp-microamp-<name>" */ 561 prop = NULL; 562 if (opp_table->prop_name) { 563 snprintf(name, sizeof(name), "opp-microamp-%s", 564 opp_table->prop_name); 565 prop = of_find_property(opp->np, name, NULL); 566 } 567 568 if (!prop) { 569 /* Search for "opp-microamp" */ 570 sprintf(name, "opp-microamp"); 571 prop = of_find_property(opp->np, name, NULL); 572 } 573 574 if (prop) { 575 icount = of_property_count_u32_elems(opp->np, name); 576 if (icount < 0) { 577 dev_err(dev, "%s: Invalid %s property (%d)\n", __func__, 578 name, icount); 579 ret = icount; 580 goto free_microvolt; 581 } 582 583 if (icount != supplies) { 584 dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", 585 __func__, name, icount, supplies); 586 ret = -EINVAL; 587 goto free_microvolt; 588 } 589 590 microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL); 591 if (!microamp) { 592 ret = -EINVAL; 593 goto free_microvolt; 594 } 595 596 ret = of_property_read_u32_array(opp->np, name, microamp, 597 icount); 598 if (ret) { 599 dev_err(dev, "%s: error parsing %s: %d\n", __func__, 600 name, ret); 601 ret = -EINVAL; 602 goto free_microamp; 603 } 604 } 605 606 for (i = 0, j = 0; i < supplies; i++) { 607 opp->supplies[i].u_volt = microvolt[j++]; 608 609 if (vcount == supplies) { 610 opp->supplies[i].u_volt_min = opp->supplies[i].u_volt; 611 opp->supplies[i].u_volt_max = opp->supplies[i].u_volt; 612 } else { 613 opp->supplies[i].u_volt_min = microvolt[j++]; 614 opp->supplies[i].u_volt_max = microvolt[j++]; 615 } 616 617 if (microamp) 618 opp->supplies[i].u_amp = microamp[i]; 619 } 620 621 free_microamp: 622 kfree(microamp); 623 free_microvolt: 624 kfree(microvolt); 625 626 return ret; 627 } 628 629 /** 630 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT 631 * entries 632 * @dev: device pointer used to lookup OPP table. 633 * 634 * Free OPPs created using static entries present in DT. 635 */ 636 void dev_pm_opp_of_remove_table(struct device *dev) 637 { 638 dev_pm_opp_remove_table(dev); 639 } 640 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); 641 642 static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table, 643 struct device_node *np, bool peak) 644 { 645 const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps"; 646 struct property *prop; 647 int i, count, ret; 648 u32 *bw; 649 650 prop = of_find_property(np, name, NULL); 651 if (!prop) 652 return -ENODEV; 653 654 count = prop->length / sizeof(u32); 655 if (table->path_count != count) { 656 pr_err("%s: Mismatch between %s and paths (%d %d)\n", 657 __func__, name, count, table->path_count); 658 return -EINVAL; 659 } 660 661 bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL); 662 if (!bw) 663 return -ENOMEM; 664 665 ret = of_property_read_u32_array(np, name, bw, count); 666 if (ret) { 667 pr_err("%s: Error parsing %s: %d\n", __func__, name, ret); 668 goto out; 669 } 670 671 for (i = 0; i < count; i++) { 672 if (peak) 673 new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]); 674 else 675 new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]); 676 } 677 678 out: 679 kfree(bw); 680 return ret; 681 } 682 683 static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table, 684 struct device_node *np, bool *rate_not_available) 685 { 686 bool found = false; 687 u64 rate; 688 int ret; 689 690 ret = of_property_read_u64(np, "opp-hz", &rate); 691 if (!ret) { 692 /* 693 * Rate is defined as an unsigned long in clk API, and so 694 * casting explicitly to its type. Must be fixed once rate is 64 695 * bit guaranteed in clk API. 696 */ 697 new_opp->rate = (unsigned long)rate; 698 found = true; 699 } 700 *rate_not_available = !!ret; 701 702 /* 703 * Bandwidth consists of peak and average (optional) values: 704 * opp-peak-kBps = <path1_value path2_value>; 705 * opp-avg-kBps = <path1_value path2_value>; 706 */ 707 ret = _read_bw(new_opp, table, np, true); 708 if (!ret) { 709 found = true; 710 ret = _read_bw(new_opp, table, np, false); 711 } 712 713 /* The properties were found but we failed to parse them */ 714 if (ret && ret != -ENODEV) 715 return ret; 716 717 if (!of_property_read_u32(np, "opp-level", &new_opp->level)) 718 found = true; 719 720 if (found) 721 return 0; 722 723 return ret; 724 } 725 726 /** 727 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) 728 * @opp_table: OPP table 729 * @dev: device for which we do this operation 730 * @np: device node 731 * 732 * This function adds an opp definition to the opp table and returns status. The 733 * opp can be controlled using dev_pm_opp_enable/disable functions and may be 734 * removed by dev_pm_opp_remove. 735 * 736 * Return: 737 * Valid OPP pointer: 738 * On success 739 * NULL: 740 * Duplicate OPPs (both freq and volt are same) and opp->available 741 * OR if the OPP is not supported by hardware. 742 * ERR_PTR(-EEXIST): 743 * Freq are same and volt are different OR 744 * Duplicate OPPs (both freq and volt are same) and !opp->available 745 * ERR_PTR(-ENOMEM): 746 * Memory allocation failure 747 * ERR_PTR(-EINVAL): 748 * Failed parsing the OPP node 749 */ 750 static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, 751 struct device *dev, struct device_node *np) 752 { 753 struct dev_pm_opp *new_opp; 754 u64 rate = 0; 755 u32 val; 756 int ret; 757 bool rate_not_available = false; 758 759 new_opp = _opp_allocate(opp_table); 760 if (!new_opp) 761 return ERR_PTR(-ENOMEM); 762 763 ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available); 764 if (ret < 0 && !opp_table->is_genpd) { 765 dev_err(dev, "%s: opp key field not found\n", __func__); 766 goto free_opp; 767 } 768 769 /* Check if the OPP supports hardware's hierarchy of versions or not */ 770 if (!_opp_is_supported(dev, opp_table, np)) { 771 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); 772 goto free_opp; 773 } 774 775 new_opp->turbo = of_property_read_bool(np, "turbo-mode"); 776 777 new_opp->np = np; 778 new_opp->dynamic = false; 779 new_opp->available = true; 780 781 ret = _of_opp_alloc_required_opps(opp_table, new_opp); 782 if (ret) 783 goto free_opp; 784 785 if (!of_property_read_u32(np, "clock-latency-ns", &val)) 786 new_opp->clock_latency_ns = val; 787 788 ret = opp_parse_supplies(new_opp, dev, opp_table); 789 if (ret) 790 goto free_required_opps; 791 792 if (opp_table->is_genpd) 793 new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp); 794 795 ret = _opp_add(dev, new_opp, opp_table, rate_not_available); 796 if (ret) { 797 /* Don't return error for duplicate OPPs */ 798 if (ret == -EBUSY) 799 ret = 0; 800 goto free_required_opps; 801 } 802 803 /* OPP to select on device suspend */ 804 if (of_property_read_bool(np, "opp-suspend")) { 805 if (opp_table->suspend_opp) { 806 /* Pick the OPP with higher rate as suspend OPP */ 807 if (new_opp->rate > opp_table->suspend_opp->rate) { 808 opp_table->suspend_opp->suspend = false; 809 new_opp->suspend = true; 810 opp_table->suspend_opp = new_opp; 811 } 812 } else { 813 new_opp->suspend = true; 814 opp_table->suspend_opp = new_opp; 815 } 816 } 817 818 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) 819 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; 820 821 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", 822 __func__, new_opp->turbo, new_opp->rate, 823 new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min, 824 new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns); 825 826 /* 827 * Notify the changes in the availability of the operable 828 * frequency/voltage list. 829 */ 830 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); 831 return new_opp; 832 833 free_required_opps: 834 _of_opp_free_required_opps(opp_table, new_opp); 835 free_opp: 836 _opp_free(new_opp); 837 838 return ERR_PTR(ret); 839 } 840 841 /* Initializes OPP tables based on new bindings */ 842 static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) 843 { 844 struct device_node *np; 845 int ret, count = 0; 846 struct dev_pm_opp *opp; 847 848 /* OPP table is already initialized for the device */ 849 mutex_lock(&opp_table->lock); 850 if (opp_table->parsed_static_opps) { 851 opp_table->parsed_static_opps++; 852 mutex_unlock(&opp_table->lock); 853 return 0; 854 } 855 856 opp_table->parsed_static_opps = 1; 857 mutex_unlock(&opp_table->lock); 858 859 /* We have opp-table node now, iterate over it and add OPPs */ 860 for_each_available_child_of_node(opp_table->np, np) { 861 opp = _opp_add_static_v2(opp_table, dev, np); 862 if (IS_ERR(opp)) { 863 ret = PTR_ERR(opp); 864 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, 865 ret); 866 of_node_put(np); 867 goto remove_static_opp; 868 } else if (opp) { 869 count++; 870 } 871 } 872 873 /* There should be one of more OPP defined */ 874 if (WARN_ON(!count)) { 875 ret = -ENOENT; 876 goto remove_static_opp; 877 } 878 879 list_for_each_entry(opp, &opp_table->opp_list, node) { 880 /* Any non-zero performance state would enable the feature */ 881 if (opp->pstate) { 882 opp_table->genpd_performance_state = true; 883 break; 884 } 885 } 886 887 return 0; 888 889 remove_static_opp: 890 _opp_remove_all_static(opp_table); 891 892 return ret; 893 } 894 895 /* Initializes OPP tables based on old-deprecated bindings */ 896 static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) 897 { 898 const struct property *prop; 899 const __be32 *val; 900 int nr, ret = 0; 901 902 mutex_lock(&opp_table->lock); 903 if (opp_table->parsed_static_opps) { 904 opp_table->parsed_static_opps++; 905 mutex_unlock(&opp_table->lock); 906 return 0; 907 } 908 909 opp_table->parsed_static_opps = 1; 910 mutex_unlock(&opp_table->lock); 911 912 prop = of_find_property(dev->of_node, "operating-points", NULL); 913 if (!prop) { 914 ret = -ENODEV; 915 goto remove_static_opp; 916 } 917 if (!prop->value) { 918 ret = -ENODATA; 919 goto remove_static_opp; 920 } 921 922 /* 923 * Each OPP is a set of tuples consisting of frequency and 924 * voltage like <freq-kHz vol-uV>. 925 */ 926 nr = prop->length / sizeof(u32); 927 if (nr % 2) { 928 dev_err(dev, "%s: Invalid OPP table\n", __func__); 929 ret = -EINVAL; 930 goto remove_static_opp; 931 } 932 933 val = prop->value; 934 while (nr) { 935 unsigned long freq = be32_to_cpup(val++) * 1000; 936 unsigned long volt = be32_to_cpup(val++); 937 938 ret = _opp_add_v1(opp_table, dev, freq, volt, false); 939 if (ret) { 940 dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", 941 __func__, freq, ret); 942 goto remove_static_opp; 943 } 944 nr -= 2; 945 } 946 947 remove_static_opp: 948 _opp_remove_all_static(opp_table); 949 950 return ret; 951 } 952 953 /** 954 * dev_pm_opp_of_add_table() - Initialize opp table from device tree 955 * @dev: device pointer used to lookup OPP table. 956 * 957 * Register the initial OPP table with the OPP library for given device. 958 * 959 * Return: 960 * 0 On success OR 961 * Duplicate OPPs (both freq and volt are same) and opp->available 962 * -EEXIST Freq are same and volt are different OR 963 * Duplicate OPPs (both freq and volt are same) and !opp->available 964 * -ENOMEM Memory allocation failure 965 * -ENODEV when 'operating-points' property is not found or is invalid data 966 * in device node. 967 * -ENODATA when empty 'operating-points' property is found 968 * -EINVAL when invalid entries are found in opp-v2 table 969 */ 970 int dev_pm_opp_of_add_table(struct device *dev) 971 { 972 struct opp_table *opp_table; 973 int ret; 974 975 opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0); 976 if (IS_ERR(opp_table)) 977 return PTR_ERR(opp_table); 978 979 /* 980 * OPPs have two version of bindings now. Also try the old (v1) 981 * bindings for backward compatibility with older dtbs. 982 */ 983 if (opp_table->np) 984 ret = _of_add_opp_table_v2(dev, opp_table); 985 else 986 ret = _of_add_opp_table_v1(dev, opp_table); 987 988 if (ret) 989 dev_pm_opp_put_opp_table(opp_table); 990 991 return ret; 992 } 993 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); 994 995 /** 996 * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree 997 * @dev: device pointer used to lookup OPP table. 998 * @index: Index number. 999 * 1000 * Register the initial OPP table with the OPP library for given device only 1001 * using the "operating-points-v2" property. 1002 * 1003 * Return: 1004 * 0 On success OR 1005 * Duplicate OPPs (both freq and volt are same) and opp->available 1006 * -EEXIST Freq are same and volt are different OR 1007 * Duplicate OPPs (both freq and volt are same) and !opp->available 1008 * -ENOMEM Memory allocation failure 1009 * -ENODEV when 'operating-points' property is not found or is invalid data 1010 * in device node. 1011 * -ENODATA when empty 'operating-points' property is found 1012 * -EINVAL when invalid entries are found in opp-v2 table 1013 */ 1014 int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) 1015 { 1016 struct opp_table *opp_table; 1017 int ret, count; 1018 1019 if (index) { 1020 /* 1021 * If only one phandle is present, then the same OPP table 1022 * applies for all index requests. 1023 */ 1024 count = of_count_phandle_with_args(dev->of_node, 1025 "operating-points-v2", NULL); 1026 if (count == 1) 1027 index = 0; 1028 } 1029 1030 opp_table = dev_pm_opp_get_opp_table_indexed(dev, index); 1031 if (IS_ERR(opp_table)) 1032 return PTR_ERR(opp_table); 1033 1034 ret = _of_add_opp_table_v2(dev, opp_table); 1035 if (ret) 1036 dev_pm_opp_put_opp_table(opp_table); 1037 1038 return ret; 1039 } 1040 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed); 1041 1042 /* CPU device specific helpers */ 1043 1044 /** 1045 * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask 1046 * @cpumask: cpumask for which OPP table needs to be removed 1047 * 1048 * This removes the OPP tables for CPUs present in the @cpumask. 1049 * This should be used only to remove static entries created from DT. 1050 */ 1051 void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) 1052 { 1053 _dev_pm_opp_cpumask_remove_table(cpumask, -1); 1054 } 1055 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); 1056 1057 /** 1058 * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask 1059 * @cpumask: cpumask for which OPP table needs to be added. 1060 * 1061 * This adds the OPP tables for CPUs present in the @cpumask. 1062 */ 1063 int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) 1064 { 1065 struct device *cpu_dev; 1066 int cpu, ret; 1067 1068 if (WARN_ON(cpumask_empty(cpumask))) 1069 return -ENODEV; 1070 1071 for_each_cpu(cpu, cpumask) { 1072 cpu_dev = get_cpu_device(cpu); 1073 if (!cpu_dev) { 1074 pr_err("%s: failed to get cpu%d device\n", __func__, 1075 cpu); 1076 ret = -ENODEV; 1077 goto remove_table; 1078 } 1079 1080 ret = dev_pm_opp_of_add_table(cpu_dev); 1081 if (ret) { 1082 /* 1083 * OPP may get registered dynamically, don't print error 1084 * message here. 1085 */ 1086 pr_debug("%s: couldn't find opp table for cpu:%d, %d\n", 1087 __func__, cpu, ret); 1088 1089 goto remove_table; 1090 } 1091 } 1092 1093 return 0; 1094 1095 remove_table: 1096 /* Free all other OPPs */ 1097 _dev_pm_opp_cpumask_remove_table(cpumask, cpu); 1098 1099 return ret; 1100 } 1101 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); 1102 1103 /* 1104 * Works only for OPP v2 bindings. 1105 * 1106 * Returns -ENOENT if operating-points-v2 bindings aren't supported. 1107 */ 1108 /** 1109 * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with 1110 * @cpu_dev using operating-points-v2 1111 * bindings. 1112 * 1113 * @cpu_dev: CPU device for which we do this operation 1114 * @cpumask: cpumask to update with information of sharing CPUs 1115 * 1116 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. 1117 * 1118 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev. 1119 */ 1120 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, 1121 struct cpumask *cpumask) 1122 { 1123 struct device_node *np, *tmp_np, *cpu_np; 1124 int cpu, ret = 0; 1125 1126 /* Get OPP descriptor node */ 1127 np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); 1128 if (!np) { 1129 dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__); 1130 return -ENOENT; 1131 } 1132 1133 cpumask_set_cpu(cpu_dev->id, cpumask); 1134 1135 /* OPPs are shared ? */ 1136 if (!of_property_read_bool(np, "opp-shared")) 1137 goto put_cpu_node; 1138 1139 for_each_possible_cpu(cpu) { 1140 if (cpu == cpu_dev->id) 1141 continue; 1142 1143 cpu_np = of_cpu_device_node_get(cpu); 1144 if (!cpu_np) { 1145 dev_err(cpu_dev, "%s: failed to get cpu%d node\n", 1146 __func__, cpu); 1147 ret = -ENOENT; 1148 goto put_cpu_node; 1149 } 1150 1151 /* Get OPP descriptor node */ 1152 tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0); 1153 of_node_put(cpu_np); 1154 if (!tmp_np) { 1155 pr_err("%pOF: Couldn't find opp node\n", cpu_np); 1156 ret = -ENOENT; 1157 goto put_cpu_node; 1158 } 1159 1160 /* CPUs are sharing opp node */ 1161 if (np == tmp_np) 1162 cpumask_set_cpu(cpu, cpumask); 1163 1164 of_node_put(tmp_np); 1165 } 1166 1167 put_cpu_node: 1168 of_node_put(np); 1169 return ret; 1170 } 1171 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); 1172 1173 /** 1174 * of_get_required_opp_performance_state() - Search for required OPP and return its performance state. 1175 * @np: Node that contains the "required-opps" property. 1176 * @index: Index of the phandle to parse. 1177 * 1178 * Returns the performance state of the OPP pointed out by the "required-opps" 1179 * property at @index in @np. 1180 * 1181 * Return: Zero or positive performance state on success, otherwise negative 1182 * value on errors. 1183 */ 1184 int of_get_required_opp_performance_state(struct device_node *np, int index) 1185 { 1186 struct dev_pm_opp *opp; 1187 struct device_node *required_np; 1188 struct opp_table *opp_table; 1189 int pstate = -EINVAL; 1190 1191 required_np = of_parse_required_opp(np, index); 1192 if (!required_np) 1193 return -EINVAL; 1194 1195 opp_table = _find_table_of_opp_np(required_np); 1196 if (IS_ERR(opp_table)) { 1197 pr_err("%s: Failed to find required OPP table %pOF: %ld\n", 1198 __func__, np, PTR_ERR(opp_table)); 1199 goto put_required_np; 1200 } 1201 1202 opp = _find_opp_of_np(opp_table, required_np); 1203 if (opp) { 1204 pstate = opp->pstate; 1205 dev_pm_opp_put(opp); 1206 } 1207 1208 dev_pm_opp_put_opp_table(opp_table); 1209 1210 put_required_np: 1211 of_node_put(required_np); 1212 1213 return pstate; 1214 } 1215 EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state); 1216 1217 /** 1218 * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp 1219 * @opp: opp for which DT node has to be returned for 1220 * 1221 * Return: DT node corresponding to the opp, else 0 on success. 1222 * 1223 * The caller needs to put the node with of_node_put() after using it. 1224 */ 1225 struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) 1226 { 1227 if (IS_ERR_OR_NULL(opp)) { 1228 pr_err("%s: Invalid parameters\n", __func__); 1229 return NULL; 1230 } 1231 1232 return of_node_get(opp->np); 1233 } 1234 EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); 1235 1236 /* 1237 * Callback function provided to the Energy Model framework upon registration. 1238 * This computes the power estimated by @dev at @kHz if it is the frequency 1239 * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise 1240 * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled 1241 * frequency and @mW to the associated power. The power is estimated as 1242 * P = C * V^2 * f with C being the device's capacitance and V and f 1243 * respectively the voltage and frequency of the OPP. 1244 * 1245 * Returns -EINVAL if the power calculation failed because of missing 1246 * parameters, 0 otherwise. 1247 */ 1248 static int __maybe_unused _get_power(unsigned long *mW, unsigned long *kHz, 1249 struct device *dev) 1250 { 1251 struct dev_pm_opp *opp; 1252 struct device_node *np; 1253 unsigned long mV, Hz; 1254 u32 cap; 1255 u64 tmp; 1256 int ret; 1257 1258 np = of_node_get(dev->of_node); 1259 if (!np) 1260 return -EINVAL; 1261 1262 ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); 1263 of_node_put(np); 1264 if (ret) 1265 return -EINVAL; 1266 1267 Hz = *kHz * 1000; 1268 opp = dev_pm_opp_find_freq_ceil(dev, &Hz); 1269 if (IS_ERR(opp)) 1270 return -EINVAL; 1271 1272 mV = dev_pm_opp_get_voltage(opp) / 1000; 1273 dev_pm_opp_put(opp); 1274 if (!mV) 1275 return -EINVAL; 1276 1277 tmp = (u64)cap * mV * mV * (Hz / 1000000); 1278 do_div(tmp, 1000000000); 1279 1280 *mW = (unsigned long)tmp; 1281 *kHz = Hz / 1000; 1282 1283 return 0; 1284 } 1285 1286 /** 1287 * dev_pm_opp_of_register_em() - Attempt to register an Energy Model 1288 * @dev : Device for which an Energy Model has to be registered 1289 * @cpus : CPUs for which an Energy Model has to be registered. For 1290 * other type of devices it should be set to NULL. 1291 * 1292 * This checks whether the "dynamic-power-coefficient" devicetree property has 1293 * been specified, and tries to register an Energy Model with it if it has. 1294 * Having this property means the voltages are known for OPPs and the EM 1295 * might be calculated. 1296 */ 1297 int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) 1298 { 1299 struct em_data_callback em_cb = EM_DATA_CB(_get_power); 1300 struct device_node *np; 1301 int ret, nr_opp; 1302 u32 cap; 1303 1304 if (IS_ERR_OR_NULL(dev)) { 1305 ret = -EINVAL; 1306 goto failed; 1307 } 1308 1309 nr_opp = dev_pm_opp_get_opp_count(dev); 1310 if (nr_opp <= 0) { 1311 ret = -EINVAL; 1312 goto failed; 1313 } 1314 1315 np = of_node_get(dev->of_node); 1316 if (!np) { 1317 ret = -EINVAL; 1318 goto failed; 1319 } 1320 1321 /* 1322 * Register an EM only if the 'dynamic-power-coefficient' property is 1323 * set in devicetree. It is assumed the voltage values are known if that 1324 * property is set since it is useless otherwise. If voltages are not 1325 * known, just let the EM registration fail with an error to alert the 1326 * user about the inconsistent configuration. 1327 */ 1328 ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); 1329 of_node_put(np); 1330 if (ret || !cap) { 1331 dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n"); 1332 ret = -EINVAL; 1333 goto failed; 1334 } 1335 1336 ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus); 1337 if (ret) 1338 goto failed; 1339 1340 return 0; 1341 1342 failed: 1343 dev_dbg(dev, "Couldn't register Energy Model %d\n", ret); 1344 return ret; 1345 } 1346 EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em); 1347