1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic OPP Interface
4 *
5 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
6 * Nishanth Menon
7 * Romit Dasgupta
8 * Kevin Hilman
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/clk.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/device.h>
17 #include <linux/export.h>
18 #include <linux/pm_domain.h>
19 #include <linux/regulator/consumer.h>
20 #include <linux/slab.h>
21 #include <linux/xarray.h>
22
23 #include "opp.h"
24
25 /*
26 * The root of the list of all opp-tables. All opp_table structures branch off
27 * from here, with each opp_table containing the list of opps it supports in
28 * various states of availability.
29 */
30 LIST_HEAD(opp_tables);
31
32 /* Lock to allow exclusive modification to the device and opp lists */
33 DEFINE_MUTEX(opp_table_lock);
34 /* Flag indicating that opp_tables list is being updated at the moment */
35 static bool opp_tables_busy;
36
37 /* OPP ID allocator */
38 static DEFINE_XARRAY_ALLOC1(opp_configs);
39
_find_opp_dev(const struct device * dev,struct opp_table * opp_table)40 static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
41 {
42 struct opp_device *opp_dev;
43
44 guard(mutex)(&opp_table->lock);
45
46 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
47 if (opp_dev->dev == dev)
48 return true;
49
50 return false;
51 }
52
_find_opp_table_unlocked(struct device * dev)53 static struct opp_table *_find_opp_table_unlocked(struct device *dev)
54 {
55 struct opp_table *opp_table;
56
57 list_for_each_entry(opp_table, &opp_tables, node) {
58 if (_find_opp_dev(dev, opp_table))
59 return dev_pm_opp_get_opp_table_ref(opp_table);
60 }
61
62 return ERR_PTR(-ENODEV);
63 }
64
65 /**
66 * _find_opp_table() - find opp_table struct using device pointer
67 * @dev: device pointer used to lookup OPP table
68 *
69 * Search OPP table for one containing matching device.
70 *
71 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
72 * -EINVAL based on type of error.
73 *
74 * The callers must call dev_pm_opp_put_opp_table() after the table is used.
75 */
_find_opp_table(struct device * dev)76 struct opp_table *_find_opp_table(struct device *dev)
77 {
78 if (IS_ERR_OR_NULL(dev)) {
79 pr_err("%s: Invalid parameters\n", __func__);
80 return ERR_PTR(-EINVAL);
81 }
82
83 guard(mutex)(&opp_table_lock);
84 return _find_opp_table_unlocked(dev);
85 }
86
87 /*
88 * Returns true if multiple clocks aren't there, else returns false with WARN.
89 *
90 * We don't force clk_count == 1 here as there are users who don't have a clock
91 * representation in the OPP table and manage the clock configuration themselves
92 * in an platform specific way.
93 */
assert_single_clk(struct opp_table * opp_table,unsigned int __always_unused index)94 static bool assert_single_clk(struct opp_table *opp_table,
95 unsigned int __always_unused index)
96 {
97 return !WARN_ON(opp_table->clk_count > 1);
98 }
99
100 /*
101 * Returns true if clock table is large enough to contain the clock index.
102 */
assert_clk_index(struct opp_table * opp_table,unsigned int index)103 static bool assert_clk_index(struct opp_table *opp_table,
104 unsigned int index)
105 {
106 return opp_table->clk_count > index;
107 }
108
109 /*
110 * Returns true if bandwidth table is large enough to contain the bandwidth index.
111 */
assert_bandwidth_index(struct opp_table * opp_table,unsigned int index)112 static bool assert_bandwidth_index(struct opp_table *opp_table,
113 unsigned int index)
114 {
115 return opp_table->path_count > index;
116 }
117
118 /**
119 * dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp
120 * @opp: opp for which bandwidth has to be returned for
121 * @peak: select peak or average bandwidth
122 * @index: bandwidth index
123 *
124 * Return: bandwidth in kBps, else return 0
125 */
dev_pm_opp_get_bw(struct dev_pm_opp * opp,bool peak,int index)126 unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index)
127 {
128 if (IS_ERR_OR_NULL(opp)) {
129 pr_err("%s: Invalid parameters\n", __func__);
130 return 0;
131 }
132
133 if (index >= opp->opp_table->path_count)
134 return 0;
135
136 if (!opp->bandwidth)
137 return 0;
138
139 return peak ? opp->bandwidth[index].peak : opp->bandwidth[index].avg;
140 }
141 EXPORT_SYMBOL_GPL(dev_pm_opp_get_bw);
142
143 /**
144 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
145 * @opp: opp for which voltage has to be returned for
146 *
147 * Return: voltage in micro volt corresponding to the opp, else
148 * return 0
149 *
150 * This is useful only for devices with single power supply.
151 */
dev_pm_opp_get_voltage(struct dev_pm_opp * opp)152 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
153 {
154 if (IS_ERR_OR_NULL(opp)) {
155 pr_err("%s: Invalid parameters\n", __func__);
156 return 0;
157 }
158
159 return opp->supplies[0].u_volt;
160 }
161 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
162
163 /**
164 * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp
165 * @opp: opp for which voltage has to be returned for
166 * @supplies: Placeholder for copying the supply information.
167 *
168 * Return: negative error number on failure, 0 otherwise on success after
169 * setting @supplies.
170 *
171 * This can be used for devices with any number of power supplies. The caller
172 * must ensure the @supplies array must contain space for each regulator.
173 */
dev_pm_opp_get_supplies(struct dev_pm_opp * opp,struct dev_pm_opp_supply * supplies)174 int dev_pm_opp_get_supplies(struct dev_pm_opp *opp,
175 struct dev_pm_opp_supply *supplies)
176 {
177 if (IS_ERR_OR_NULL(opp) || !supplies) {
178 pr_err("%s: Invalid parameters\n", __func__);
179 return -EINVAL;
180 }
181
182 memcpy(supplies, opp->supplies,
183 sizeof(*supplies) * opp->opp_table->regulator_count);
184 return 0;
185 }
186 EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies);
187
188 /**
189 * dev_pm_opp_get_power() - Gets the power corresponding to an opp
190 * @opp: opp for which power has to be returned for
191 *
192 * Return: power in micro watt corresponding to the opp, else
193 * return 0
194 *
195 * This is useful only for devices with single power supply.
196 */
dev_pm_opp_get_power(struct dev_pm_opp * opp)197 unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
198 {
199 unsigned long opp_power = 0;
200 int i;
201
202 if (IS_ERR_OR_NULL(opp)) {
203 pr_err("%s: Invalid parameters\n", __func__);
204 return 0;
205 }
206 for (i = 0; i < opp->opp_table->regulator_count; i++)
207 opp_power += opp->supplies[i].u_watt;
208
209 return opp_power;
210 }
211 EXPORT_SYMBOL_GPL(dev_pm_opp_get_power);
212
213 /**
214 * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an
215 * available opp with specified index
216 * @opp: opp for which frequency has to be returned for
217 * @index: index of the frequency within the required opp
218 *
219 * Return: frequency in hertz corresponding to the opp with specified index,
220 * else return 0
221 */
dev_pm_opp_get_freq_indexed(struct dev_pm_opp * opp,u32 index)222 unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
223 {
224 if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) {
225 pr_err("%s: Invalid parameters\n", __func__);
226 return 0;
227 }
228
229 return opp->rates[index];
230 }
231 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed);
232
233 /**
234 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
235 * @opp: opp for which level value has to be returned for
236 *
237 * Return: level read from device tree corresponding to the opp, else
238 * return U32_MAX.
239 */
dev_pm_opp_get_level(struct dev_pm_opp * opp)240 unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
241 {
242 if (IS_ERR_OR_NULL(opp) || !opp->available) {
243 pr_err("%s: Invalid parameters\n", __func__);
244 return U32_MAX;
245 }
246
247 return opp->level;
248 }
249 EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
250
251 /**
252 * dev_pm_opp_get_required_pstate() - Gets the required performance state
253 * corresponding to an available opp
254 * @opp: opp for which performance state has to be returned for
255 * @index: index of the required opp
256 *
257 * Return: performance state read from device tree corresponding to the
258 * required opp, else return U32_MAX.
259 */
dev_pm_opp_get_required_pstate(struct dev_pm_opp * opp,unsigned int index)260 unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
261 unsigned int index)
262 {
263 if (IS_ERR_OR_NULL(opp) || !opp->available ||
264 index >= opp->opp_table->required_opp_count) {
265 pr_err("%s: Invalid parameters\n", __func__);
266 return 0;
267 }
268
269 /* required-opps not fully initialized yet */
270 if (lazy_linking_pending(opp->opp_table))
271 return 0;
272
273 /* The required OPP table must belong to a genpd */
274 if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) {
275 pr_err("%s: Performance state is only valid for genpds.\n", __func__);
276 return 0;
277 }
278
279 return opp->required_opps[index]->level;
280 }
281 EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
282
283 /**
284 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
285 * @opp: opp for which turbo mode is being verified
286 *
287 * Turbo OPPs are not for normal use, and can be enabled (under certain
288 * conditions) for short duration of times to finish high throughput work
289 * quickly. Running on them for longer times may overheat the chip.
290 *
291 * Return: true if opp is turbo opp, else false.
292 */
dev_pm_opp_is_turbo(struct dev_pm_opp * opp)293 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
294 {
295 if (IS_ERR_OR_NULL(opp) || !opp->available) {
296 pr_err("%s: Invalid parameters\n", __func__);
297 return false;
298 }
299
300 return opp->turbo;
301 }
302 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
303
304 /**
305 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
306 * @dev: device for which we do this operation
307 *
308 * Return: This function returns the max clock latency in nanoseconds.
309 */
dev_pm_opp_get_max_clock_latency(struct device * dev)310 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
311 {
312 struct opp_table *opp_table __free(put_opp_table) =
313 _find_opp_table(dev);
314
315 if (IS_ERR(opp_table))
316 return 0;
317
318 return opp_table->clock_latency_ns_max;
319 }
320 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
321
322 /**
323 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
324 * @dev: device for which we do this operation
325 *
326 * Return: This function returns the max voltage latency in nanoseconds.
327 */
dev_pm_opp_get_max_volt_latency(struct device * dev)328 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
329 {
330 struct dev_pm_opp *opp;
331 struct regulator *reg;
332 unsigned long latency_ns = 0;
333 int ret, i, count;
334 struct {
335 unsigned long min;
336 unsigned long max;
337 } *uV;
338
339 struct opp_table *opp_table __free(put_opp_table) =
340 _find_opp_table(dev);
341
342 if (IS_ERR(opp_table))
343 return 0;
344
345 /* Regulator may not be required for the device */
346 if (!opp_table->regulators)
347 return 0;
348
349 count = opp_table->regulator_count;
350
351 uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
352 if (!uV)
353 return 0;
354
355 scoped_guard(mutex, &opp_table->lock) {
356 for (i = 0; i < count; i++) {
357 uV[i].min = ~0;
358 uV[i].max = 0;
359
360 list_for_each_entry(opp, &opp_table->opp_list, node) {
361 if (!opp->available)
362 continue;
363
364 if (opp->supplies[i].u_volt_min < uV[i].min)
365 uV[i].min = opp->supplies[i].u_volt_min;
366 if (opp->supplies[i].u_volt_max > uV[i].max)
367 uV[i].max = opp->supplies[i].u_volt_max;
368 }
369 }
370 }
371
372 /*
373 * The caller needs to ensure that opp_table (and hence the regulator)
374 * isn't freed, while we are executing this routine.
375 */
376 for (i = 0; i < count; i++) {
377 reg = opp_table->regulators[i];
378 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
379 if (ret > 0)
380 latency_ns += ret * 1000;
381 }
382
383 kfree(uV);
384
385 return latency_ns;
386 }
387 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
388
389 /**
390 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
391 * nanoseconds
392 * @dev: device for which we do this operation
393 *
394 * Return: This function returns the max transition latency, in nanoseconds, to
395 * switch from one OPP to other.
396 */
dev_pm_opp_get_max_transition_latency(struct device * dev)397 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
398 {
399 return dev_pm_opp_get_max_volt_latency(dev) +
400 dev_pm_opp_get_max_clock_latency(dev);
401 }
402 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
403
404 /**
405 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
406 * @dev: device for which we do this operation
407 *
408 * Return: This function returns the frequency of the OPP marked as suspend_opp
409 * if one is available, else returns 0;
410 */
dev_pm_opp_get_suspend_opp_freq(struct device * dev)411 unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
412 {
413 unsigned long freq = 0;
414
415 struct opp_table *opp_table __free(put_opp_table) =
416 _find_opp_table(dev);
417
418 if (IS_ERR(opp_table))
419 return 0;
420
421 if (opp_table->suspend_opp && opp_table->suspend_opp->available)
422 freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
423
424 return freq;
425 }
426 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
427
_get_opp_count(struct opp_table * opp_table)428 int _get_opp_count(struct opp_table *opp_table)
429 {
430 struct dev_pm_opp *opp;
431 int count = 0;
432
433 guard(mutex)(&opp_table->lock);
434
435 list_for_each_entry(opp, &opp_table->opp_list, node) {
436 if (opp->available)
437 count++;
438 }
439
440 return count;
441 }
442
443 /**
444 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
445 * @dev: device for which we do this operation
446 *
447 * Return: This function returns the number of available opps if there are any,
448 * else returns 0 if none or the corresponding error value.
449 */
dev_pm_opp_get_opp_count(struct device * dev)450 int dev_pm_opp_get_opp_count(struct device *dev)
451 {
452 struct opp_table *opp_table __free(put_opp_table) =
453 _find_opp_table(dev);
454
455 if (IS_ERR(opp_table)) {
456 dev_dbg(dev, "%s: OPP table not found (%ld)\n",
457 __func__, PTR_ERR(opp_table));
458 return PTR_ERR(opp_table);
459 }
460
461 return _get_opp_count(opp_table);
462 }
463 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
464
465 /* Helpers to read keys */
_read_freq(struct dev_pm_opp * opp,int index)466 static unsigned long _read_freq(struct dev_pm_opp *opp, int index)
467 {
468 return opp->rates[index];
469 }
470
_read_level(struct dev_pm_opp * opp,int index)471 static unsigned long _read_level(struct dev_pm_opp *opp, int index)
472 {
473 return opp->level;
474 }
475
_read_bw(struct dev_pm_opp * opp,int index)476 static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
477 {
478 return opp->bandwidth[index].peak;
479 }
480
_read_opp_key(struct dev_pm_opp * opp,int index,struct dev_pm_opp_key * key)481 static unsigned long _read_opp_key(struct dev_pm_opp *opp, int index,
482 struct dev_pm_opp_key *key)
483 {
484 key->bw = opp->bandwidth ? opp->bandwidth[index].peak : 0;
485 key->freq = opp->rates[index];
486 key->level = opp->level;
487
488 return true;
489 }
490
491 /* Generic comparison helpers */
_compare_exact(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key)492 static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
493 unsigned long opp_key, unsigned long key)
494 {
495 if (opp_key == key) {
496 *opp = temp_opp;
497 return true;
498 }
499
500 return false;
501 }
502
_compare_ceil(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key)503 static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
504 unsigned long opp_key, unsigned long key)
505 {
506 if (opp_key >= key) {
507 *opp = temp_opp;
508 return true;
509 }
510
511 return false;
512 }
513
_compare_floor(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key)514 static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
515 unsigned long opp_key, unsigned long key)
516 {
517 if (opp_key > key)
518 return true;
519
520 *opp = temp_opp;
521 return false;
522 }
523
_compare_opp_key_exact(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,struct dev_pm_opp_key * opp_key,struct dev_pm_opp_key * key)524 static bool _compare_opp_key_exact(struct dev_pm_opp **opp,
525 struct dev_pm_opp *temp_opp, struct dev_pm_opp_key *opp_key,
526 struct dev_pm_opp_key *key)
527 {
528 bool level_match = (key->level == OPP_LEVEL_UNSET || opp_key->level == key->level);
529 bool freq_match = (key->freq == 0 || opp_key->freq == key->freq);
530 bool bw_match = (key->bw == 0 || opp_key->bw == key->bw);
531
532 if (freq_match && level_match && bw_match) {
533 *opp = temp_opp;
534 return true;
535 }
536
537 return false;
538 }
539
540 /* Generic key finding helpers */
_opp_table_find_key(struct opp_table * opp_table,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* compare)(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key),bool (* assert)(struct opp_table * opp_table,unsigned int index))541 static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
542 unsigned long *key, int index, bool available,
543 unsigned long (*read)(struct dev_pm_opp *opp, int index),
544 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
545 unsigned long opp_key, unsigned long key),
546 bool (*assert)(struct opp_table *opp_table, unsigned int index))
547 {
548 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
549
550 /* Assert that the requirement is met */
551 if (assert && !assert(opp_table, index))
552 return ERR_PTR(-EINVAL);
553
554 guard(mutex)(&opp_table->lock);
555
556 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
557 if (temp_opp->available == available) {
558 if (compare(&opp, temp_opp, read(temp_opp, index), *key))
559 break;
560 }
561 }
562
563 /* Increment the reference count of OPP */
564 if (!IS_ERR(opp)) {
565 *key = read(opp, index);
566 dev_pm_opp_get(opp);
567 }
568
569 return opp;
570 }
571
_opp_table_find_opp_key(struct opp_table * opp_table,struct dev_pm_opp_key * key,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index,struct dev_pm_opp_key * key),bool (* compare)(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,struct dev_pm_opp_key * opp_key,struct dev_pm_opp_key * key),bool (* assert)(struct opp_table * opp_table,unsigned int index))572 static struct dev_pm_opp *_opp_table_find_opp_key(struct opp_table *opp_table,
573 struct dev_pm_opp_key *key, bool available,
574 unsigned long (*read)(struct dev_pm_opp *opp, int index,
575 struct dev_pm_opp_key *key),
576 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
577 struct dev_pm_opp_key *opp_key, struct dev_pm_opp_key *key),
578 bool (*assert)(struct opp_table *opp_table, unsigned int index))
579 {
580 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
581 struct dev_pm_opp_key temp_key;
582
583 /* Assert that the requirement is met */
584 if (!assert(opp_table, 0))
585 return ERR_PTR(-EINVAL);
586
587 guard(mutex)(&opp_table->lock);
588
589 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
590 if (temp_opp->available == available) {
591 read(temp_opp, 0, &temp_key);
592 if (compare(&opp, temp_opp, &temp_key, key)) {
593 /* Increment the reference count of OPP */
594 dev_pm_opp_get(opp);
595 break;
596 }
597 }
598 }
599
600 return opp;
601 }
602
603 static struct dev_pm_opp *
_find_key(struct device * dev,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* compare)(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key),bool (* assert)(struct opp_table * opp_table,unsigned int index))604 _find_key(struct device *dev, unsigned long *key, int index, bool available,
605 unsigned long (*read)(struct dev_pm_opp *opp, int index),
606 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
607 unsigned long opp_key, unsigned long key),
608 bool (*assert)(struct opp_table *opp_table, unsigned int index))
609 {
610 struct opp_table *opp_table __free(put_opp_table) =
611 _find_opp_table(dev);
612
613 if (IS_ERR(opp_table)) {
614 dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
615 PTR_ERR(opp_table));
616 return ERR_CAST(opp_table);
617 }
618
619 return _opp_table_find_key(opp_table, key, index, available, read,
620 compare, assert);
621 }
622
_find_key_exact(struct device * dev,unsigned long key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))623 static struct dev_pm_opp *_find_key_exact(struct device *dev,
624 unsigned long key, int index, bool available,
625 unsigned long (*read)(struct dev_pm_opp *opp, int index),
626 bool (*assert)(struct opp_table *opp_table, unsigned int index))
627 {
628 /*
629 * The value of key will be updated here, but will be ignored as the
630 * caller doesn't need it.
631 */
632 return _find_key(dev, &key, index, available, read, _compare_exact,
633 assert);
634 }
635
_opp_table_find_key_ceil(struct opp_table * opp_table,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))636 static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
637 unsigned long *key, int index, bool available,
638 unsigned long (*read)(struct dev_pm_opp *opp, int index),
639 bool (*assert)(struct opp_table *opp_table, unsigned int index))
640 {
641 return _opp_table_find_key(opp_table, key, index, available, read,
642 _compare_ceil, assert);
643 }
644
_find_key_ceil(struct device * dev,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))645 static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
646 int index, bool available,
647 unsigned long (*read)(struct dev_pm_opp *opp, int index),
648 bool (*assert)(struct opp_table *opp_table, unsigned int index))
649 {
650 return _find_key(dev, key, index, available, read, _compare_ceil,
651 assert);
652 }
653
_find_key_floor(struct device * dev,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))654 static struct dev_pm_opp *_find_key_floor(struct device *dev,
655 unsigned long *key, int index, bool available,
656 unsigned long (*read)(struct dev_pm_opp *opp, int index),
657 bool (*assert)(struct opp_table *opp_table, unsigned int index))
658 {
659 return _find_key(dev, key, index, available, read, _compare_floor,
660 assert);
661 }
662
663 /**
664 * dev_pm_opp_find_freq_exact() - search for an exact frequency
665 * @dev: device for which we do this operation
666 * @freq: frequency to search for
667 * @available: true/false - match for available opp
668 *
669 * Return: Searches for exact match in the opp table and returns pointer to the
670 * matching opp if found, else returns ERR_PTR in case of error and should
671 * be handled using IS_ERR. Error return values can be:
672 * EINVAL: for bad pointer
673 * ERANGE: no match found for search
674 * ENODEV: if device not found in list of registered devices
675 *
676 * Note: available is a modifier for the search. if available=true, then the
677 * match is for exact matching frequency and is available in the stored OPP
678 * table. if false, the match is for exact frequency which is not available.
679 *
680 * This provides a mechanism to enable an opp which is not available currently
681 * or the opposite as well.
682 *
683 * The callers are required to call dev_pm_opp_put() for the returned OPP after
684 * use.
685 */
dev_pm_opp_find_freq_exact(struct device * dev,unsigned long freq,bool available)686 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
687 unsigned long freq, bool available)
688 {
689 return _find_key_exact(dev, freq, 0, available, _read_freq,
690 assert_single_clk);
691 }
692 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
693
694 /**
695 * dev_pm_opp_find_key_exact() - Search for an OPP with exact key set
696 * @dev: Device for which the OPP is being searched
697 * @key: OPP key set to match
698 * @available: true/false - match for available OPP
699 *
700 * Search for an exact match of the key set in the OPP table.
701 *
702 * Return: A matching opp on success, else ERR_PTR in case of error.
703 * Possible error values:
704 * EINVAL: for bad pointers
705 * ERANGE: no match found for search
706 * ENODEV: if device not found in list of registered devices
707 *
708 * Note: 'available' is a modifier for the search. If 'available' == true,
709 * then the match is for exact matching key and is available in the stored
710 * OPP table. If false, the match is for exact key which is not available.
711 *
712 * This provides a mechanism to enable an OPP which is not available currently
713 * or the opposite as well.
714 *
715 * The callers are required to call dev_pm_opp_put() for the returned OPP after
716 * use.
717 */
dev_pm_opp_find_key_exact(struct device * dev,struct dev_pm_opp_key * key,bool available)718 struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
719 struct dev_pm_opp_key *key,
720 bool available)
721 {
722 struct opp_table *opp_table __free(put_opp_table) = _find_opp_table(dev);
723
724 if (IS_ERR(opp_table)) {
725 dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
726 PTR_ERR(opp_table));
727 return ERR_CAST(opp_table);
728 }
729
730 return _opp_table_find_opp_key(opp_table, key, available,
731 _read_opp_key, _compare_opp_key_exact,
732 assert_single_clk);
733 }
734 EXPORT_SYMBOL_GPL(dev_pm_opp_find_key_exact);
735
736 /**
737 * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the
738 * clock corresponding to the index
739 * @dev: Device for which we do this operation
740 * @freq: frequency to search for
741 * @index: Clock index
742 * @available: true/false - match for available opp
743 *
744 * Search for the matching exact OPP for the clock corresponding to the
745 * specified index from a starting freq for a device.
746 *
747 * Return: matching *opp , else returns ERR_PTR in case of error and should be
748 * handled using IS_ERR. Error return values can be:
749 * EINVAL: for bad pointer
750 * ERANGE: no match found for search
751 * ENODEV: if device not found in list of registered devices
752 *
753 * The callers are required to call dev_pm_opp_put() for the returned OPP after
754 * use.
755 */
756 struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device * dev,unsigned long freq,u32 index,bool available)757 dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
758 u32 index, bool available)
759 {
760 return _find_key_exact(dev, freq, index, available, _read_freq,
761 assert_clk_index);
762 }
763 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
764
_find_freq_ceil(struct opp_table * opp_table,unsigned long * freq)765 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
766 unsigned long *freq)
767 {
768 return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq,
769 assert_single_clk);
770 }
771
772 /**
773 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
774 * @dev: device for which we do this operation
775 * @freq: Start frequency
776 *
777 * Search for the matching ceil *available* OPP from a starting freq
778 * for a device.
779 *
780 * Return: matching *opp and refreshes *freq accordingly, else returns
781 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
782 * values can be:
783 * EINVAL: for bad pointer
784 * ERANGE: no match found for search
785 * ENODEV: if device not found in list of registered devices
786 *
787 * The callers are required to call dev_pm_opp_put() for the returned OPP after
788 * use.
789 */
dev_pm_opp_find_freq_ceil(struct device * dev,unsigned long * freq)790 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
791 unsigned long *freq)
792 {
793 return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk);
794 }
795 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
796
797 /**
798 * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the
799 * clock corresponding to the index
800 * @dev: Device for which we do this operation
801 * @freq: Start frequency
802 * @index: Clock index
803 *
804 * Search for the matching ceil *available* OPP for the clock corresponding to
805 * the specified index from a starting freq for a device.
806 *
807 * Return: matching *opp and refreshes *freq accordingly, else returns
808 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
809 * values can be:
810 * EINVAL: for bad pointer
811 * ERANGE: no match found for search
812 * ENODEV: if device not found in list of registered devices
813 *
814 * The callers are required to call dev_pm_opp_put() for the returned OPP after
815 * use.
816 */
817 struct dev_pm_opp *
dev_pm_opp_find_freq_ceil_indexed(struct device * dev,unsigned long * freq,u32 index)818 dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
819 u32 index)
820 {
821 return _find_key_ceil(dev, freq, index, true, _read_freq,
822 assert_clk_index);
823 }
824 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
825
826 /**
827 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
828 * @dev: device for which we do this operation
829 * @freq: Start frequency
830 *
831 * Search for the matching floor *available* OPP from a starting freq
832 * for a device.
833 *
834 * Return: matching *opp and refreshes *freq accordingly, else returns
835 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
836 * values can be:
837 * EINVAL: for bad pointer
838 * ERANGE: no match found for search
839 * ENODEV: if device not found in list of registered devices
840 *
841 * The callers are required to call dev_pm_opp_put() for the returned OPP after
842 * use.
843 */
dev_pm_opp_find_freq_floor(struct device * dev,unsigned long * freq)844 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
845 unsigned long *freq)
846 {
847 return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk);
848 }
849 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
850
851 /**
852 * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the
853 * clock corresponding to the index
854 * @dev: Device for which we do this operation
855 * @freq: Start frequency
856 * @index: Clock index
857 *
858 * Search for the matching floor *available* OPP for the clock corresponding to
859 * the specified index from a starting freq for a device.
860 *
861 * Return: matching *opp and refreshes *freq accordingly, else returns
862 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
863 * values can be:
864 * EINVAL: for bad pointer
865 * ERANGE: no match found for search
866 * ENODEV: if device not found in list of registered devices
867 *
868 * The callers are required to call dev_pm_opp_put() for the returned OPP after
869 * use.
870 */
871 struct dev_pm_opp *
dev_pm_opp_find_freq_floor_indexed(struct device * dev,unsigned long * freq,u32 index)872 dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
873 u32 index)
874 {
875 return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index);
876 }
877 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
878
879 /**
880 * dev_pm_opp_find_level_exact() - search for an exact level
881 * @dev: device for which we do this operation
882 * @level: level to search for
883 *
884 * Return: Searches for exact match in the opp table and returns pointer to the
885 * matching opp if found, else returns ERR_PTR in case of error and should
886 * be handled using IS_ERR. Error return values can be:
887 * EINVAL: for bad pointer
888 * ERANGE: no match found for search
889 * ENODEV: if device not found in list of registered devices
890 *
891 * The callers are required to call dev_pm_opp_put() for the returned OPP after
892 * use.
893 */
dev_pm_opp_find_level_exact(struct device * dev,unsigned int level)894 struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
895 unsigned int level)
896 {
897 return _find_key_exact(dev, level, 0, true, _read_level, NULL);
898 }
899 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
900
901 /**
902 * dev_pm_opp_find_level_ceil() - search for an rounded up level
903 * @dev: device for which we do this operation
904 * @level: level to search for
905 *
906 * Return: Searches for rounded up match in the opp table and returns pointer
907 * to the matching opp if found, else returns ERR_PTR in case of error and
908 * should be handled using IS_ERR. Error return values can be:
909 * EINVAL: for bad pointer
910 * ERANGE: no match found for search
911 * ENODEV: if device not found in list of registered devices
912 *
913 * The callers are required to call dev_pm_opp_put() for the returned OPP after
914 * use.
915 */
dev_pm_opp_find_level_ceil(struct device * dev,unsigned int * level)916 struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
917 unsigned int *level)
918 {
919 unsigned long temp = *level;
920 struct dev_pm_opp *opp;
921
922 opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
923 if (IS_ERR(opp))
924 return opp;
925
926 /* False match */
927 if (temp == OPP_LEVEL_UNSET) {
928 dev_err(dev, "%s: OPP levels aren't available\n", __func__);
929 dev_pm_opp_put(opp);
930 return ERR_PTR(-ENODEV);
931 }
932
933 *level = temp;
934 return opp;
935 }
936 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
937
938 /**
939 * dev_pm_opp_find_level_floor() - Search for a rounded floor level
940 * @dev: device for which we do this operation
941 * @level: Start level
942 *
943 * Search for the matching floor *available* OPP from a starting level
944 * for a device.
945 *
946 * Return: matching *opp and refreshes *level accordingly, else returns
947 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
948 * values can be:
949 * EINVAL: for bad pointer
950 * ERANGE: no match found for search
951 * ENODEV: if device not found in list of registered devices
952 *
953 * The callers are required to call dev_pm_opp_put() for the returned OPP after
954 * use.
955 */
dev_pm_opp_find_level_floor(struct device * dev,unsigned int * level)956 struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
957 unsigned int *level)
958 {
959 unsigned long temp = *level;
960 struct dev_pm_opp *opp;
961
962 opp = _find_key_floor(dev, &temp, 0, true, _read_level, NULL);
963 *level = temp;
964 return opp;
965 }
966 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor);
967
968 /**
969 * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
970 * @dev: device for which we do this operation
971 * @bw: start bandwidth
972 * @index: which bandwidth to compare, in case of OPPs with several values
973 *
974 * Search for the matching floor *available* OPP from a starting bandwidth
975 * for a device.
976 *
977 * Return: matching *opp and refreshes *bw accordingly, else returns
978 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
979 * values can be:
980 * EINVAL: for bad pointer
981 * ERANGE: no match found for search
982 * ENODEV: if device not found in list of registered devices
983 *
984 * The callers are required to call dev_pm_opp_put() for the returned OPP after
985 * use.
986 */
dev_pm_opp_find_bw_ceil(struct device * dev,unsigned int * bw,int index)987 struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
988 int index)
989 {
990 unsigned long temp = *bw;
991 struct dev_pm_opp *opp;
992
993 opp = _find_key_ceil(dev, &temp, index, true, _read_bw,
994 assert_bandwidth_index);
995 *bw = temp;
996 return opp;
997 }
998 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
999
1000 /**
1001 * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
1002 * @dev: device for which we do this operation
1003 * @bw: start bandwidth
1004 * @index: which bandwidth to compare, in case of OPPs with several values
1005 *
1006 * Search for the matching floor *available* OPP from a starting bandwidth
1007 * for a device.
1008 *
1009 * Return: matching *opp and refreshes *bw accordingly, else returns
1010 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
1011 * values can be:
1012 * EINVAL: for bad pointer
1013 * ERANGE: no match found for search
1014 * ENODEV: if device not found in list of registered devices
1015 *
1016 * The callers are required to call dev_pm_opp_put() for the returned OPP after
1017 * use.
1018 */
dev_pm_opp_find_bw_floor(struct device * dev,unsigned int * bw,int index)1019 struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
1020 unsigned int *bw, int index)
1021 {
1022 unsigned long temp = *bw;
1023 struct dev_pm_opp *opp;
1024
1025 opp = _find_key_floor(dev, &temp, index, true, _read_bw,
1026 assert_bandwidth_index);
1027 *bw = temp;
1028 return opp;
1029 }
1030 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
1031
_set_opp_voltage(struct device * dev,struct regulator * reg,struct dev_pm_opp_supply * supply)1032 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
1033 struct dev_pm_opp_supply *supply)
1034 {
1035 int ret;
1036
1037 /* Regulator not available for device */
1038 if (IS_ERR(reg)) {
1039 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
1040 PTR_ERR(reg));
1041 return 0;
1042 }
1043
1044 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
1045 supply->u_volt_min, supply->u_volt, supply->u_volt_max);
1046
1047 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
1048 supply->u_volt, supply->u_volt_max);
1049 if (ret)
1050 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
1051 __func__, supply->u_volt_min, supply->u_volt,
1052 supply->u_volt_max, ret);
1053
1054 return ret;
1055 }
1056
1057 static int
_opp_config_clk_single(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,void * data,bool scaling_down)1058 _opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
1059 struct dev_pm_opp *opp, void *data, bool scaling_down)
1060 {
1061 unsigned long *target = data;
1062 unsigned long freq;
1063 int ret;
1064
1065 /* One of target and opp must be available */
1066 if (target) {
1067 freq = *target;
1068 } else if (opp) {
1069 freq = opp->rates[0];
1070 } else {
1071 WARN_ON(1);
1072 return -EINVAL;
1073 }
1074
1075 ret = clk_set_rate(opp_table->clk, freq);
1076 if (ret) {
1077 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
1078 ret);
1079 } else {
1080 opp_table->current_rate_single_clk = freq;
1081 }
1082
1083 return ret;
1084 }
1085
1086 /*
1087 * Simple implementation for configuring multiple clocks. Configure clocks in
1088 * the order in which they are present in the array while scaling up.
1089 */
dev_pm_opp_config_clks_simple(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,void * data,bool scaling_down)1090 int dev_pm_opp_config_clks_simple(struct device *dev,
1091 struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
1092 bool scaling_down)
1093 {
1094 int ret, i;
1095
1096 if (scaling_down) {
1097 for (i = opp_table->clk_count - 1; i >= 0; i--) {
1098 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
1099 if (ret) {
1100 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
1101 ret);
1102 return ret;
1103 }
1104 }
1105 } else {
1106 for (i = 0; i < opp_table->clk_count; i++) {
1107 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
1108 if (ret) {
1109 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
1110 ret);
1111 return ret;
1112 }
1113 }
1114 }
1115
1116 return 0;
1117 }
1118 EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
1119
_opp_config_regulator_single(struct device * dev,struct dev_pm_opp * old_opp,struct dev_pm_opp * new_opp,struct regulator ** regulators,unsigned int count)1120 static int _opp_config_regulator_single(struct device *dev,
1121 struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
1122 struct regulator **regulators, unsigned int count)
1123 {
1124 struct regulator *reg = regulators[0];
1125 int ret;
1126
1127 /* This function only supports single regulator per device */
1128 if (WARN_ON(count > 1)) {
1129 dev_err(dev, "multiple regulators are not supported\n");
1130 return -EINVAL;
1131 }
1132
1133 ret = _set_opp_voltage(dev, reg, new_opp->supplies);
1134 if (ret)
1135 return ret;
1136
1137 /*
1138 * Enable the regulator after setting its voltages, otherwise it breaks
1139 * some boot-enabled regulators.
1140 */
1141 if (unlikely(!new_opp->opp_table->enabled)) {
1142 ret = regulator_enable(reg);
1143 if (ret < 0)
1144 dev_warn(dev, "Failed to enable regulator: %d", ret);
1145 }
1146
1147 return 0;
1148 }
1149
_set_opp_bw(const struct opp_table * opp_table,struct dev_pm_opp * opp,struct device * dev)1150 static int _set_opp_bw(const struct opp_table *opp_table,
1151 struct dev_pm_opp *opp, struct device *dev)
1152 {
1153 u32 avg, peak;
1154 int i, ret;
1155
1156 if (!opp_table->paths)
1157 return 0;
1158
1159 for (i = 0; i < opp_table->path_count; i++) {
1160 if (!opp) {
1161 avg = 0;
1162 peak = 0;
1163 } else {
1164 avg = opp->bandwidth[i].avg;
1165 peak = opp->bandwidth[i].peak;
1166 }
1167 ret = icc_set_bw(opp_table->paths[i], avg, peak);
1168 if (ret) {
1169 dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
1170 opp ? "set" : "remove", i, ret);
1171 return ret;
1172 }
1173 }
1174
1175 return 0;
1176 }
1177
_set_opp_level(struct device * dev,struct dev_pm_opp * opp)1178 static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp)
1179 {
1180 unsigned int level = 0;
1181 int ret = 0;
1182
1183 if (opp) {
1184 if (opp->level == OPP_LEVEL_UNSET)
1185 return 0;
1186
1187 level = opp->level;
1188 }
1189
1190 /* Request a new performance state through the device's PM domain. */
1191 ret = dev_pm_domain_set_performance_state(dev, level);
1192 if (ret)
1193 dev_err(dev, "Failed to set performance state %u (%d)\n", level,
1194 ret);
1195
1196 return ret;
1197 }
1198
1199 /* This is only called for PM domain for now */
_set_required_opps(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,bool up)1200 static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
1201 struct dev_pm_opp *opp, bool up)
1202 {
1203 struct device **devs = opp_table->required_devs;
1204 struct dev_pm_opp *required_opp;
1205 int index, target, delta, ret;
1206
1207 if (!devs)
1208 return 0;
1209
1210 /* required-opps not fully initialized yet */
1211 if (lazy_linking_pending(opp_table))
1212 return -EBUSY;
1213
1214 /* Scaling up? Set required OPPs in normal order, else reverse */
1215 if (up) {
1216 index = 0;
1217 target = opp_table->required_opp_count;
1218 delta = 1;
1219 } else {
1220 index = opp_table->required_opp_count - 1;
1221 target = -1;
1222 delta = -1;
1223 }
1224
1225 while (index != target) {
1226 if (devs[index]) {
1227 required_opp = opp ? opp->required_opps[index] : NULL;
1228
1229 ret = _set_opp_level(devs[index], required_opp);
1230 if (ret)
1231 return ret;
1232 }
1233
1234 index += delta;
1235 }
1236
1237 return 0;
1238 }
1239
_find_current_opp(struct device * dev,struct opp_table * opp_table)1240 static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
1241 {
1242 struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
1243 unsigned long freq;
1244
1245 if (!IS_ERR(opp_table->clk)) {
1246 freq = clk_get_rate(opp_table->clk);
1247 opp = _find_freq_ceil(opp_table, &freq);
1248 }
1249
1250 /*
1251 * Unable to find the current OPP ? Pick the first from the list since
1252 * it is in ascending order, otherwise rest of the code will need to
1253 * make special checks to validate current_opp.
1254 */
1255 if (IS_ERR(opp)) {
1256 guard(mutex)(&opp_table->lock);
1257 opp = dev_pm_opp_get(list_first_entry(&opp_table->opp_list,
1258 struct dev_pm_opp, node));
1259 }
1260
1261 opp_table->current_opp = opp;
1262 }
1263
_disable_opp_table(struct device * dev,struct opp_table * opp_table)1264 static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
1265 {
1266 int ret;
1267
1268 if (!opp_table->enabled)
1269 return 0;
1270
1271 /*
1272 * Some drivers need to support cases where some platforms may
1273 * have OPP table for the device, while others don't and
1274 * opp_set_rate() just needs to behave like clk_set_rate().
1275 */
1276 if (!_get_opp_count(opp_table))
1277 return 0;
1278
1279 ret = _set_opp_bw(opp_table, NULL, dev);
1280 if (ret)
1281 return ret;
1282
1283 if (opp_table->regulators)
1284 regulator_disable(opp_table->regulators[0]);
1285
1286 ret = _set_opp_level(dev, NULL);
1287 if (ret)
1288 goto out;
1289
1290 ret = _set_required_opps(dev, opp_table, NULL, false);
1291
1292 out:
1293 opp_table->enabled = false;
1294 return ret;
1295 }
1296
_set_opp(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,void * clk_data,bool forced)1297 static int _set_opp(struct device *dev, struct opp_table *opp_table,
1298 struct dev_pm_opp *opp, void *clk_data, bool forced)
1299 {
1300 struct dev_pm_opp *old_opp;
1301 int scaling_down, ret;
1302
1303 if (unlikely(!opp))
1304 return _disable_opp_table(dev, opp_table);
1305
1306 /* Find the currently set OPP if we don't know already */
1307 if (unlikely(!opp_table->current_opp))
1308 _find_current_opp(dev, opp_table);
1309
1310 old_opp = opp_table->current_opp;
1311
1312 /* Return early if nothing to do */
1313 if (!forced && old_opp == opp && opp_table->enabled) {
1314 dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__);
1315 return 0;
1316 }
1317
1318 dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
1319 __func__, old_opp->rates[0], opp->rates[0], old_opp->level,
1320 opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
1321 opp->bandwidth ? opp->bandwidth[0].peak : 0);
1322
1323 scaling_down = _opp_compare_key(opp_table, old_opp, opp);
1324 if (scaling_down == -1)
1325 scaling_down = 0;
1326
1327 /* Scaling up? Configure required OPPs before frequency */
1328 if (!scaling_down) {
1329 ret = _set_required_opps(dev, opp_table, opp, true);
1330 if (ret) {
1331 dev_err(dev, "Failed to set required opps: %d\n", ret);
1332 return ret;
1333 }
1334
1335 ret = _set_opp_level(dev, opp);
1336 if (ret)
1337 return ret;
1338
1339 ret = _set_opp_bw(opp_table, opp, dev);
1340 if (ret) {
1341 dev_err(dev, "Failed to set bw: %d\n", ret);
1342 return ret;
1343 }
1344
1345 if (opp_table->config_regulators) {
1346 ret = opp_table->config_regulators(dev, old_opp, opp,
1347 opp_table->regulators,
1348 opp_table->regulator_count);
1349 if (ret) {
1350 dev_err(dev, "Failed to set regulator voltages: %d\n",
1351 ret);
1352 return ret;
1353 }
1354 }
1355 }
1356
1357 if (opp_table->config_clks) {
1358 ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down);
1359 if (ret)
1360 return ret;
1361 }
1362
1363 /* Scaling down? Configure required OPPs after frequency */
1364 if (scaling_down) {
1365 if (opp_table->config_regulators) {
1366 ret = opp_table->config_regulators(dev, old_opp, opp,
1367 opp_table->regulators,
1368 opp_table->regulator_count);
1369 if (ret) {
1370 dev_err(dev, "Failed to set regulator voltages: %d\n",
1371 ret);
1372 return ret;
1373 }
1374 }
1375
1376 ret = _set_opp_bw(opp_table, opp, dev);
1377 if (ret) {
1378 dev_err(dev, "Failed to set bw: %d\n", ret);
1379 return ret;
1380 }
1381
1382 ret = _set_opp_level(dev, opp);
1383 if (ret)
1384 return ret;
1385
1386 ret = _set_required_opps(dev, opp_table, opp, false);
1387 if (ret) {
1388 dev_err(dev, "Failed to set required opps: %d\n", ret);
1389 return ret;
1390 }
1391 }
1392
1393 opp_table->enabled = true;
1394 dev_pm_opp_put(old_opp);
1395
1396 /* Make sure current_opp doesn't get freed */
1397 opp_table->current_opp = dev_pm_opp_get(opp);
1398
1399 return ret;
1400 }
1401
1402 /**
1403 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
1404 * @dev: device for which we do this operation
1405 * @target_freq: frequency to achieve
1406 *
1407 * This configures the power-supplies to the levels specified by the OPP
1408 * corresponding to the target_freq, and programs the clock to a value <=
1409 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
1410 * provided by the opp, should have already rounded to the target OPP's
1411 * frequency.
1412 */
dev_pm_opp_set_rate(struct device * dev,unsigned long target_freq)1413 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
1414 {
1415 struct dev_pm_opp *opp __free(put_opp) = NULL;
1416 unsigned long freq = 0, temp_freq;
1417 bool forced = false;
1418
1419 struct opp_table *opp_table __free(put_opp_table) =
1420 _find_opp_table(dev);
1421
1422 if (IS_ERR(opp_table)) {
1423 dev_err(dev, "%s: device's opp table doesn't exist\n", __func__);
1424 return PTR_ERR(opp_table);
1425 }
1426
1427 if (target_freq) {
1428 /*
1429 * For IO devices which require an OPP on some platforms/SoCs
1430 * while just needing to scale the clock on some others
1431 * we look for empty OPP tables with just a clock handle and
1432 * scale only the clk. This makes dev_pm_opp_set_rate()
1433 * equivalent to a clk_set_rate()
1434 */
1435 if (!_get_opp_count(opp_table)) {
1436 return opp_table->config_clks(dev, opp_table, NULL,
1437 &target_freq, false);
1438 }
1439
1440 freq = clk_round_rate(opp_table->clk, target_freq);
1441 if ((long)freq <= 0)
1442 freq = target_freq;
1443
1444 /*
1445 * The clock driver may support finer resolution of the
1446 * frequencies than the OPP table, don't update the frequency we
1447 * pass to clk_set_rate() here.
1448 */
1449 temp_freq = freq;
1450 opp = _find_freq_ceil(opp_table, &temp_freq);
1451 if (IS_ERR(opp)) {
1452 dev_err(dev, "%s: failed to find OPP for freq %lu (%ld)\n",
1453 __func__, freq, PTR_ERR(opp));
1454 return PTR_ERR(opp);
1455 }
1456
1457 /*
1458 * An OPP entry specifies the highest frequency at which other
1459 * properties of the OPP entry apply. Even if the new OPP is
1460 * same as the old one, we may still reach here for a different
1461 * value of the frequency. In such a case, do not abort but
1462 * configure the hardware to the desired frequency forcefully.
1463 */
1464 forced = opp_table->current_rate_single_clk != freq;
1465 }
1466
1467 return _set_opp(dev, opp_table, opp, &freq, forced);
1468 }
1469 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
1470
1471 /**
1472 * dev_pm_opp_set_opp() - Configure device for OPP
1473 * @dev: device for which we do this operation
1474 * @opp: OPP to set to
1475 *
1476 * This configures the device based on the properties of the OPP passed to this
1477 * routine.
1478 *
1479 * Return: 0 on success, a negative error number otherwise.
1480 */
dev_pm_opp_set_opp(struct device * dev,struct dev_pm_opp * opp)1481 int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
1482 {
1483 struct opp_table *opp_table __free(put_opp_table) =
1484 _find_opp_table(dev);
1485
1486 if (IS_ERR(opp_table)) {
1487 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
1488 return PTR_ERR(opp_table);
1489 }
1490
1491 return _set_opp(dev, opp_table, opp, NULL, false);
1492 }
1493 EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp);
1494
1495 /* OPP-dev Helpers */
_remove_opp_dev(struct opp_device * opp_dev,struct opp_table * opp_table)1496 static void _remove_opp_dev(struct opp_device *opp_dev,
1497 struct opp_table *opp_table)
1498 {
1499 opp_debug_unregister(opp_dev, opp_table);
1500 list_del(&opp_dev->node);
1501 kfree(opp_dev);
1502 }
1503
_add_opp_dev(const struct device * dev,struct opp_table * opp_table)1504 struct opp_device *_add_opp_dev(const struct device *dev,
1505 struct opp_table *opp_table)
1506 {
1507 struct opp_device *opp_dev;
1508
1509 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
1510 if (!opp_dev)
1511 return NULL;
1512
1513 /* Initialize opp-dev */
1514 opp_dev->dev = dev;
1515
1516 scoped_guard(mutex, &opp_table->lock)
1517 list_add(&opp_dev->node, &opp_table->dev_list);
1518
1519 /* Create debugfs entries for the opp_table */
1520 opp_debug_register(opp_dev, opp_table);
1521
1522 return opp_dev;
1523 }
1524
_allocate_opp_table(struct device * dev,int index)1525 static struct opp_table *_allocate_opp_table(struct device *dev, int index)
1526 {
1527 struct opp_table *opp_table;
1528 struct opp_device *opp_dev;
1529 int ret;
1530
1531 /*
1532 * Allocate a new OPP table. In the infrequent case where a new
1533 * device is needed to be added, we pay this penalty.
1534 */
1535 opp_table = kzalloc_obj(*opp_table);
1536 if (!opp_table)
1537 return ERR_PTR(-ENOMEM);
1538
1539 mutex_init(&opp_table->lock);
1540 INIT_LIST_HEAD(&opp_table->dev_list);
1541 INIT_LIST_HEAD(&opp_table->lazy);
1542
1543 opp_table->clk = ERR_PTR(-ENODEV);
1544
1545 /* Mark regulator count uninitialized */
1546 opp_table->regulator_count = -1;
1547
1548 opp_dev = _add_opp_dev(dev, opp_table);
1549 if (!opp_dev) {
1550 ret = -ENOMEM;
1551 goto err;
1552 }
1553
1554 _of_init_opp_table(opp_table, dev, index);
1555
1556 /* Find interconnect path(s) for the device */
1557 ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
1558 if (ret) {
1559 if (ret == -EPROBE_DEFER)
1560 goto remove_opp_dev;
1561
1562 dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
1563 __func__, ret);
1564 }
1565
1566 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
1567 INIT_LIST_HEAD(&opp_table->opp_list);
1568 kref_init(&opp_table->kref);
1569
1570 return opp_table;
1571
1572 remove_opp_dev:
1573 _of_clear_opp_table(opp_table);
1574 _remove_opp_dev(opp_dev, opp_table);
1575 mutex_destroy(&opp_table->lock);
1576 err:
1577 kfree(opp_table);
1578 return ERR_PTR(ret);
1579 }
1580
_update_opp_table_clk(struct device * dev,struct opp_table * opp_table,bool getclk)1581 static struct opp_table *_update_opp_table_clk(struct device *dev,
1582 struct opp_table *opp_table,
1583 bool getclk)
1584 {
1585 int ret;
1586
1587 /*
1588 * Return early if we don't need to get clk or we have already done it
1589 * earlier.
1590 */
1591 if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) ||
1592 opp_table->clks)
1593 return opp_table;
1594
1595 /* Find clk for the device */
1596 opp_table->clk = clk_get(dev, NULL);
1597
1598 ret = PTR_ERR_OR_ZERO(opp_table->clk);
1599 if (!ret) {
1600 opp_table->config_clks = _opp_config_clk_single;
1601 opp_table->clk_count = 1;
1602 return opp_table;
1603 }
1604
1605 if (ret == -ENOENT) {
1606 /*
1607 * There are few platforms which don't want the OPP core to
1608 * manage device's clock settings. In such cases neither the
1609 * platform provides the clks explicitly to us, nor the DT
1610 * contains a valid clk entry. The OPP nodes in DT may still
1611 * contain "opp-hz" property though, which we need to parse and
1612 * allow the platform to find an OPP based on freq later on.
1613 *
1614 * This is a simple solution to take care of such corner cases,
1615 * i.e. make the clk_count 1, which lets us allocate space for
1616 * frequency in opp->rates and also parse the entries in DT.
1617 */
1618 opp_table->clk_count = 1;
1619
1620 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
1621 return opp_table;
1622 }
1623
1624 dev_pm_opp_put_opp_table(opp_table);
1625 dev_err_probe(dev, ret, "Couldn't find clock\n");
1626
1627 return ERR_PTR(ret);
1628 }
1629
1630 /*
1631 * We need to make sure that the OPP table for a device doesn't get added twice,
1632 * if this routine gets called in parallel with the same device pointer.
1633 *
1634 * The simplest way to enforce that is to perform everything (find existing
1635 * table and if not found, create a new one) under the opp_table_lock, so only
1636 * one creator gets access to the same. But that expands the critical section
1637 * under the lock and may end up causing circular dependencies with frameworks
1638 * like debugfs, interconnect or clock framework as they may be direct or
1639 * indirect users of OPP core.
1640 *
1641 * And for that reason we have to go for a bit tricky implementation here, which
1642 * uses the opp_tables_busy flag to indicate if another creator is in the middle
1643 * of adding an OPP table and others should wait for it to finish.
1644 */
_add_opp_table_indexed(struct device * dev,int index,bool getclk)1645 struct opp_table *_add_opp_table_indexed(struct device *dev, int index,
1646 bool getclk)
1647 {
1648 struct opp_table *opp_table;
1649
1650 again:
1651 mutex_lock(&opp_table_lock);
1652
1653 opp_table = _find_opp_table_unlocked(dev);
1654 if (!IS_ERR(opp_table))
1655 goto unlock;
1656
1657 /*
1658 * The opp_tables list or an OPP table's dev_list is getting updated by
1659 * another user, wait for it to finish.
1660 */
1661 if (unlikely(opp_tables_busy)) {
1662 mutex_unlock(&opp_table_lock);
1663 cpu_relax();
1664 goto again;
1665 }
1666
1667 opp_tables_busy = true;
1668 opp_table = _managed_opp(dev, index);
1669
1670 /* Drop the lock to reduce the size of critical section */
1671 mutex_unlock(&opp_table_lock);
1672
1673 if (opp_table) {
1674 if (!_add_opp_dev(dev, opp_table)) {
1675 dev_pm_opp_put_opp_table(opp_table);
1676 opp_table = ERR_PTR(-ENOMEM);
1677 }
1678
1679 mutex_lock(&opp_table_lock);
1680 } else {
1681 opp_table = _allocate_opp_table(dev, index);
1682
1683 mutex_lock(&opp_table_lock);
1684 if (!IS_ERR(opp_table))
1685 list_add(&opp_table->node, &opp_tables);
1686 }
1687
1688 opp_tables_busy = false;
1689
1690 unlock:
1691 mutex_unlock(&opp_table_lock);
1692
1693 return _update_opp_table_clk(dev, opp_table, getclk);
1694 }
1695
_add_opp_table(struct device * dev,bool getclk)1696 static struct opp_table *_add_opp_table(struct device *dev, bool getclk)
1697 {
1698 return _add_opp_table_indexed(dev, 0, getclk);
1699 }
1700
dev_pm_opp_get_opp_table(struct device * dev)1701 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
1702 {
1703 return _find_opp_table(dev);
1704 }
1705 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
1706
_opp_table_kref_release(struct kref * kref)1707 static void _opp_table_kref_release(struct kref *kref)
1708 {
1709 struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
1710 struct opp_device *opp_dev, *temp;
1711 int i;
1712
1713 /* Drop the lock as soon as we can */
1714 list_del(&opp_table->node);
1715 mutex_unlock(&opp_table_lock);
1716
1717 if (opp_table->current_opp)
1718 dev_pm_opp_put(opp_table->current_opp);
1719
1720 _of_clear_opp_table(opp_table);
1721
1722 /* Release automatically acquired single clk */
1723 if (!IS_ERR(opp_table->clk))
1724 clk_put(opp_table->clk);
1725
1726 if (opp_table->paths) {
1727 for (i = 0; i < opp_table->path_count; i++)
1728 icc_put(opp_table->paths[i]);
1729 kfree(opp_table->paths);
1730 }
1731
1732 WARN_ON(!list_empty(&opp_table->opp_list));
1733
1734 list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node)
1735 _remove_opp_dev(opp_dev, opp_table);
1736
1737 mutex_destroy(&opp_table->lock);
1738 kfree(opp_table);
1739 }
1740
dev_pm_opp_get_opp_table_ref(struct opp_table * opp_table)1741 struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
1742 {
1743 kref_get(&opp_table->kref);
1744 return opp_table;
1745 }
1746 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref);
1747
dev_pm_opp_put_opp_table(struct opp_table * opp_table)1748 void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
1749 {
1750 kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
1751 &opp_table_lock);
1752 }
1753 EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
1754
_opp_free(struct dev_pm_opp * opp)1755 void _opp_free(struct dev_pm_opp *opp)
1756 {
1757 kfree(opp);
1758 }
1759
_opp_kref_release(struct kref * kref)1760 static void _opp_kref_release(struct kref *kref)
1761 {
1762 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1763 struct opp_table *opp_table = opp->opp_table;
1764
1765 list_del(&opp->node);
1766 mutex_unlock(&opp_table->lock);
1767
1768 /*
1769 * Notify the changes in the availability of the operable
1770 * frequency/voltage list.
1771 */
1772 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
1773 _of_clear_opp(opp_table, opp);
1774 opp_debug_remove_one(opp);
1775 kfree(opp);
1776 }
1777
dev_pm_opp_get(struct dev_pm_opp * opp)1778 struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp)
1779 {
1780 kref_get(&opp->kref);
1781 return opp;
1782 }
1783 EXPORT_SYMBOL_GPL(dev_pm_opp_get);
1784
dev_pm_opp_put(struct dev_pm_opp * opp)1785 void dev_pm_opp_put(struct dev_pm_opp *opp)
1786 {
1787 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
1788 }
1789 EXPORT_SYMBOL_GPL(dev_pm_opp_put);
1790
1791 /**
1792 * dev_pm_opp_remove() - Remove an OPP from OPP table
1793 * @dev: device for which we do this operation
1794 * @freq: OPP to remove with matching 'freq'
1795 *
1796 * This function removes an opp from the opp table.
1797 */
dev_pm_opp_remove(struct device * dev,unsigned long freq)1798 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
1799 {
1800 struct dev_pm_opp *opp = NULL, *iter;
1801
1802 struct opp_table *opp_table __free(put_opp_table) =
1803 _find_opp_table(dev);
1804
1805 if (IS_ERR(opp_table))
1806 return;
1807
1808 if (!assert_single_clk(opp_table, 0))
1809 return;
1810
1811 scoped_guard(mutex, &opp_table->lock) {
1812 list_for_each_entry(iter, &opp_table->opp_list, node) {
1813 if (iter->rates[0] == freq) {
1814 opp = iter;
1815 break;
1816 }
1817 }
1818 }
1819
1820 if (opp) {
1821 dev_pm_opp_put(opp);
1822
1823 /* Drop the reference taken by dev_pm_opp_add() */
1824 dev_pm_opp_put_opp_table(opp_table);
1825 } else {
1826 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
1827 __func__, freq);
1828 }
1829 }
1830 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1831
_opp_get_next(struct opp_table * opp_table,bool dynamic)1832 static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
1833 bool dynamic)
1834 {
1835 struct dev_pm_opp *opp;
1836
1837 guard(mutex)(&opp_table->lock);
1838
1839 list_for_each_entry(opp, &opp_table->opp_list, node) {
1840 /*
1841 * Refcount must be dropped only once for each OPP by OPP core,
1842 * do that with help of "removed" flag.
1843 */
1844 if (!opp->removed && dynamic == opp->dynamic)
1845 return opp;
1846 }
1847
1848 return NULL;
1849 }
1850
1851 /*
1852 * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to
1853 * happen lock less to avoid circular dependency issues. This routine must be
1854 * called without the opp_table->lock held.
1855 */
_opp_remove_all(struct opp_table * opp_table,bool dynamic)1856 static void _opp_remove_all(struct opp_table *opp_table, bool dynamic)
1857 {
1858 struct dev_pm_opp *opp;
1859
1860 while ((opp = _opp_get_next(opp_table, dynamic))) {
1861 opp->removed = true;
1862 dev_pm_opp_put(opp);
1863
1864 /* Drop the references taken by dev_pm_opp_add() */
1865 if (dynamic)
1866 dev_pm_opp_put_opp_table(opp_table);
1867 }
1868 }
1869
_opp_remove_all_static(struct opp_table * opp_table)1870 bool _opp_remove_all_static(struct opp_table *opp_table)
1871 {
1872 scoped_guard(mutex, &opp_table->lock) {
1873 if (!opp_table->parsed_static_opps)
1874 return false;
1875
1876 if (--opp_table->parsed_static_opps)
1877 return true;
1878 }
1879
1880 _opp_remove_all(opp_table, false);
1881 return true;
1882 }
1883
1884 /**
1885 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
1886 * @dev: device for which we do this operation
1887 *
1888 * This function removes all dynamically created OPPs from the opp table.
1889 */
dev_pm_opp_remove_all_dynamic(struct device * dev)1890 void dev_pm_opp_remove_all_dynamic(struct device *dev)
1891 {
1892 struct opp_table *opp_table __free(put_opp_table) =
1893 _find_opp_table(dev);
1894
1895 if (IS_ERR(opp_table))
1896 return;
1897
1898 _opp_remove_all(opp_table, true);
1899 }
1900 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
1901
_opp_allocate(struct opp_table * opp_table)1902 struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
1903 {
1904 struct dev_pm_opp *opp;
1905 int supply_count, supply_size, icc_size, clk_size;
1906
1907 /* Allocate space for at least one supply */
1908 supply_count = opp_table->regulator_count > 0 ?
1909 opp_table->regulator_count : 1;
1910 supply_size = sizeof(*opp->supplies) * supply_count;
1911 clk_size = sizeof(*opp->rates) * opp_table->clk_count;
1912 icc_size = sizeof(*opp->bandwidth) * opp_table->path_count;
1913
1914 /* allocate new OPP node and supplies structures */
1915 opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL);
1916 if (!opp)
1917 return NULL;
1918
1919 /* Put the supplies, bw and clock at the end of the OPP structure */
1920 opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
1921
1922 opp->rates = (unsigned long *)(opp->supplies + supply_count);
1923
1924 if (icc_size)
1925 opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count);
1926
1927 INIT_LIST_HEAD(&opp->node);
1928
1929 opp->level = OPP_LEVEL_UNSET;
1930
1931 return opp;
1932 }
1933
_opp_supported_by_regulators(struct dev_pm_opp * opp,struct opp_table * opp_table)1934 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
1935 struct opp_table *opp_table)
1936 {
1937 struct regulator *reg;
1938 int i;
1939
1940 if (!opp_table->regulators)
1941 return true;
1942
1943 for (i = 0; i < opp_table->regulator_count; i++) {
1944 reg = opp_table->regulators[i];
1945
1946 if (!regulator_is_supported_voltage(reg,
1947 opp->supplies[i].u_volt_min,
1948 opp->supplies[i].u_volt_max)) {
1949 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
1950 __func__, opp->supplies[i].u_volt_min,
1951 opp->supplies[i].u_volt_max);
1952 return false;
1953 }
1954 }
1955
1956 return true;
1957 }
1958
_opp_compare_rate(struct opp_table * opp_table,struct dev_pm_opp * opp1,struct dev_pm_opp * opp2)1959 static int _opp_compare_rate(struct opp_table *opp_table,
1960 struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
1961 {
1962 int i;
1963
1964 for (i = 0; i < opp_table->clk_count; i++) {
1965 if (opp1->rates[i] != opp2->rates[i])
1966 return opp1->rates[i] < opp2->rates[i] ? -1 : 1;
1967 }
1968
1969 /* Same rates for both OPPs */
1970 return 0;
1971 }
1972
_opp_compare_bw(struct opp_table * opp_table,struct dev_pm_opp * opp1,struct dev_pm_opp * opp2)1973 static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1,
1974 struct dev_pm_opp *opp2)
1975 {
1976 int i;
1977
1978 for (i = 0; i < opp_table->path_count; i++) {
1979 if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak)
1980 return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1;
1981 }
1982
1983 /* Same bw for both OPPs */
1984 return 0;
1985 }
1986
1987 /*
1988 * Returns
1989 * 0: opp1 == opp2
1990 * 1: opp1 > opp2
1991 * -1: opp1 < opp2
1992 */
_opp_compare_key(struct opp_table * opp_table,struct dev_pm_opp * opp1,struct dev_pm_opp * opp2)1993 int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1,
1994 struct dev_pm_opp *opp2)
1995 {
1996 int ret;
1997
1998 ret = _opp_compare_rate(opp_table, opp1, opp2);
1999 if (ret)
2000 return ret;
2001
2002 ret = _opp_compare_bw(opp_table, opp1, opp2);
2003 if (ret)
2004 return ret;
2005
2006 if (opp1->level != opp2->level)
2007 return opp1->level < opp2->level ? -1 : 1;
2008
2009 /* Duplicate OPPs */
2010 return 0;
2011 }
2012
_opp_is_duplicate(struct device * dev,struct dev_pm_opp * new_opp,struct opp_table * opp_table,struct list_head ** head)2013 static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
2014 struct opp_table *opp_table,
2015 struct list_head **head)
2016 {
2017 struct dev_pm_opp *opp;
2018 int opp_cmp;
2019
2020 /*
2021 * Insert new OPP in order of increasing frequency and discard if
2022 * already present.
2023 *
2024 * Need to use &opp_table->opp_list in the condition part of the 'for'
2025 * loop, don't replace it with head otherwise it will become an infinite
2026 * loop.
2027 */
2028 list_for_each_entry(opp, &opp_table->opp_list, node) {
2029 opp_cmp = _opp_compare_key(opp_table, new_opp, opp);
2030 if (opp_cmp > 0) {
2031 *head = &opp->node;
2032 continue;
2033 }
2034
2035 if (opp_cmp < 0)
2036 return 0;
2037
2038 /* Duplicate OPPs */
2039 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
2040 __func__, opp->rates[0], opp->supplies[0].u_volt,
2041 opp->available, new_opp->rates[0],
2042 new_opp->supplies[0].u_volt, new_opp->available);
2043
2044 /* Should we compare voltages for all regulators here ? */
2045 return opp->available &&
2046 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
2047 }
2048
2049 return 0;
2050 }
2051
_required_opps_available(struct dev_pm_opp * opp,int count)2052 void _required_opps_available(struct dev_pm_opp *opp, int count)
2053 {
2054 int i;
2055
2056 for (i = 0; i < count; i++) {
2057 if (opp->required_opps[i]->available)
2058 continue;
2059
2060 opp->available = false;
2061 pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
2062 __func__, opp->required_opps[i]->np, opp->rates[0]);
2063 return;
2064 }
2065 }
2066
2067 /*
2068 * Returns:
2069 * 0: On success. And appropriate error message for duplicate OPPs.
2070 * -EBUSY: For OPP with same freq/volt and is available. The callers of
2071 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
2072 * sure we don't print error messages unnecessarily if different parts of
2073 * kernel try to initialize the OPP table.
2074 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
2075 * should be considered an error by the callers of _opp_add().
2076 */
_opp_add(struct device * dev,struct dev_pm_opp * new_opp,struct opp_table * opp_table)2077 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
2078 struct opp_table *opp_table)
2079 {
2080 struct list_head *head;
2081 int ret;
2082
2083 scoped_guard(mutex, &opp_table->lock) {
2084 head = &opp_table->opp_list;
2085
2086 ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
2087 if (ret)
2088 return ret;
2089
2090 list_add(&new_opp->node, head);
2091 }
2092
2093 new_opp->opp_table = opp_table;
2094 kref_init(&new_opp->kref);
2095
2096 opp_debug_create_one(new_opp, opp_table);
2097
2098 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
2099 new_opp->available = false;
2100 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
2101 __func__, new_opp->rates[0]);
2102 }
2103
2104 /* required-opps not fully initialized yet */
2105 if (lazy_linking_pending(opp_table))
2106 return 0;
2107
2108 _required_opps_available(new_opp, opp_table->required_opp_count);
2109
2110 return 0;
2111 }
2112
2113 /**
2114 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
2115 * @opp_table: OPP table
2116 * @dev: device for which we do this operation
2117 * @data: The OPP data for the OPP to add
2118 * @dynamic: Dynamically added OPPs.
2119 *
2120 * This function adds an opp definition to the opp table and returns status.
2121 * The opp is made available by default and it can be controlled using
2122 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
2123 *
2124 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
2125 * and freed by dev_pm_opp_of_remove_table.
2126 *
2127 * Return:
2128 * 0 On success OR
2129 * Duplicate OPPs (both freq and volt are same) and opp->available
2130 * -EEXIST Freq are same and volt are different OR
2131 * Duplicate OPPs (both freq and volt are same) and !opp->available
2132 * -ENOMEM Memory allocation failure
2133 */
_opp_add_v1(struct opp_table * opp_table,struct device * dev,struct dev_pm_opp_data * data,bool dynamic)2134 int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
2135 struct dev_pm_opp_data *data, bool dynamic)
2136 {
2137 struct dev_pm_opp *new_opp;
2138 unsigned long tol, u_volt = data->u_volt;
2139 int ret;
2140
2141 if (!assert_single_clk(opp_table, 0))
2142 return -EINVAL;
2143
2144 new_opp = _opp_allocate(opp_table);
2145 if (!new_opp)
2146 return -ENOMEM;
2147
2148 /* populate the opp table */
2149 new_opp->rates[0] = data->freq;
2150 new_opp->level = data->level;
2151 new_opp->turbo = data->turbo;
2152 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
2153 new_opp->supplies[0].u_volt = u_volt;
2154 new_opp->supplies[0].u_volt_min = u_volt - tol;
2155 new_opp->supplies[0].u_volt_max = u_volt + tol;
2156 new_opp->available = true;
2157 new_opp->dynamic = dynamic;
2158
2159 ret = _opp_add(dev, new_opp, opp_table);
2160 if (ret) {
2161 /* Don't return error for duplicate OPPs */
2162 if (ret == -EBUSY)
2163 ret = 0;
2164 goto free_opp;
2165 }
2166
2167 /*
2168 * Notify the changes in the availability of the operable
2169 * frequency/voltage list.
2170 */
2171 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
2172 return 0;
2173
2174 free_opp:
2175 _opp_free(new_opp);
2176
2177 return ret;
2178 }
2179
2180 /*
2181 * This is required only for the V2 bindings, and it enables a platform to
2182 * specify the hierarchy of versions it supports. OPP layer will then enable
2183 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
2184 * property.
2185 */
_opp_set_supported_hw(struct opp_table * opp_table,const u32 * versions,unsigned int count)2186 static int _opp_set_supported_hw(struct opp_table *opp_table,
2187 const u32 *versions, unsigned int count)
2188 {
2189 /* Another CPU that shares the OPP table has set the property ? */
2190 if (opp_table->supported_hw)
2191 return 0;
2192
2193 opp_table->supported_hw = kmemdup_array(versions, count,
2194 sizeof(*versions), GFP_KERNEL);
2195 if (!opp_table->supported_hw)
2196 return -ENOMEM;
2197
2198 opp_table->supported_hw_count = count;
2199
2200 return 0;
2201 }
2202
_opp_put_supported_hw(struct opp_table * opp_table)2203 static void _opp_put_supported_hw(struct opp_table *opp_table)
2204 {
2205 if (opp_table->supported_hw) {
2206 kfree(opp_table->supported_hw);
2207 opp_table->supported_hw = NULL;
2208 opp_table->supported_hw_count = 0;
2209 }
2210 }
2211
2212 /*
2213 * This is required only for the V2 bindings, and it enables a platform to
2214 * specify the extn to be used for certain property names. The properties to
2215 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
2216 * should postfix the property name with -<name> while looking for them.
2217 */
_opp_set_prop_name(struct opp_table * opp_table,const char * name)2218 static int _opp_set_prop_name(struct opp_table *opp_table, const char *name)
2219 {
2220 /* Another CPU that shares the OPP table has set the property ? */
2221 if (!opp_table->prop_name) {
2222 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
2223 if (!opp_table->prop_name)
2224 return -ENOMEM;
2225 }
2226
2227 return 0;
2228 }
2229
_opp_put_prop_name(struct opp_table * opp_table)2230 static void _opp_put_prop_name(struct opp_table *opp_table)
2231 {
2232 if (opp_table->prop_name) {
2233 kfree(opp_table->prop_name);
2234 opp_table->prop_name = NULL;
2235 }
2236 }
2237
2238 /*
2239 * In order to support OPP switching, OPP layer needs to know the name of the
2240 * device's regulators, as the core would be required to switch voltages as
2241 * well.
2242 *
2243 * This must be called before any OPPs are initialized for the device.
2244 */
_opp_set_regulators(struct opp_table * opp_table,struct device * dev,const char * const names[])2245 static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev,
2246 const char * const names[])
2247 {
2248 const char * const *temp = names;
2249 struct regulator *reg;
2250 int count = 0, ret, i;
2251
2252 /* Count number of regulators */
2253 while (*temp++)
2254 count++;
2255
2256 if (!count)
2257 return -EINVAL;
2258
2259 /* Another CPU that shares the OPP table has set the regulators ? */
2260 if (opp_table->regulators)
2261 return 0;
2262
2263 opp_table->regulators = kmalloc_objs(*opp_table->regulators, count);
2264 if (!opp_table->regulators)
2265 return -ENOMEM;
2266
2267 for (i = 0; i < count; i++) {
2268 reg = regulator_get_optional(dev, names[i]);
2269 if (IS_ERR(reg)) {
2270 ret = dev_err_probe(dev, PTR_ERR(reg),
2271 "%s: no regulator (%s) found\n",
2272 __func__, names[i]);
2273 goto free_regulators;
2274 }
2275
2276 opp_table->regulators[i] = reg;
2277 }
2278
2279 opp_table->regulator_count = count;
2280
2281 /* Set generic config_regulators() for single regulators here */
2282 if (count == 1)
2283 opp_table->config_regulators = _opp_config_regulator_single;
2284
2285 return 0;
2286
2287 free_regulators:
2288 while (i != 0)
2289 regulator_put(opp_table->regulators[--i]);
2290
2291 kfree(opp_table->regulators);
2292 opp_table->regulators = NULL;
2293 opp_table->regulator_count = -1;
2294
2295 return ret;
2296 }
2297
_opp_put_regulators(struct opp_table * opp_table)2298 static void _opp_put_regulators(struct opp_table *opp_table)
2299 {
2300 int i;
2301
2302 if (!opp_table->regulators)
2303 return;
2304
2305 if (opp_table->enabled) {
2306 for (i = opp_table->regulator_count - 1; i >= 0; i--)
2307 regulator_disable(opp_table->regulators[i]);
2308 }
2309
2310 for (i = opp_table->regulator_count - 1; i >= 0; i--)
2311 regulator_put(opp_table->regulators[i]);
2312
2313 kfree(opp_table->regulators);
2314 opp_table->regulators = NULL;
2315 opp_table->regulator_count = -1;
2316 }
2317
_put_clks(struct opp_table * opp_table,int count)2318 static void _put_clks(struct opp_table *opp_table, int count)
2319 {
2320 int i;
2321
2322 for (i = count - 1; i >= 0; i--)
2323 clk_put(opp_table->clks[i]);
2324
2325 kfree(opp_table->clks);
2326 opp_table->clks = NULL;
2327 }
2328
2329 /*
2330 * In order to support OPP switching, OPP layer needs to get pointers to the
2331 * clocks for the device. Simple cases work fine without using this routine
2332 * (i.e. by passing connection-id as NULL), but for a device with multiple
2333 * clocks available, the OPP core needs to know the exact names of the clks to
2334 * use.
2335 *
2336 * This must be called before any OPPs are initialized for the device.
2337 */
_opp_set_clknames(struct opp_table * opp_table,struct device * dev,const char * const names[],config_clks_t config_clks)2338 static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev,
2339 const char * const names[],
2340 config_clks_t config_clks)
2341 {
2342 const char * const *temp = names;
2343 int count = 0, ret, i;
2344 struct clk *clk;
2345
2346 /* Count number of clks */
2347 while (*temp++)
2348 count++;
2349
2350 /*
2351 * This is a special case where we have a single clock, whose connection
2352 * id name is NULL, i.e. first two entries are NULL in the array.
2353 */
2354 if (!count && !names[1])
2355 count = 1;
2356
2357 /* Fail early for invalid configurations */
2358 if (!count || (!config_clks && count > 1))
2359 return -EINVAL;
2360
2361 /* Another CPU that shares the OPP table has set the clkname ? */
2362 if (opp_table->clks)
2363 return 0;
2364
2365 opp_table->clks = kmalloc_objs(*opp_table->clks, count);
2366 if (!opp_table->clks)
2367 return -ENOMEM;
2368
2369 /* Find clks for the device */
2370 for (i = 0; i < count; i++) {
2371 clk = clk_get(dev, names[i]);
2372 if (IS_ERR(clk)) {
2373 ret = dev_err_probe(dev, PTR_ERR(clk),
2374 "%s: Couldn't find clock with name: %s\n",
2375 __func__, names[i]);
2376 goto free_clks;
2377 }
2378
2379 opp_table->clks[i] = clk;
2380 }
2381
2382 opp_table->clk_count = count;
2383 opp_table->config_clks = config_clks;
2384
2385 /* Set generic single clk set here */
2386 if (count == 1) {
2387 if (!opp_table->config_clks)
2388 opp_table->config_clks = _opp_config_clk_single;
2389
2390 /*
2391 * We could have just dropped the "clk" field and used "clks"
2392 * everywhere. Instead we kept the "clk" field around for
2393 * following reasons:
2394 *
2395 * - avoiding clks[0] everywhere else.
2396 * - not running single clk helpers for multiple clk usecase by
2397 * mistake.
2398 *
2399 * Since this is single-clk case, just update the clk pointer
2400 * too.
2401 */
2402 opp_table->clk = opp_table->clks[0];
2403 }
2404
2405 return 0;
2406
2407 free_clks:
2408 _put_clks(opp_table, i);
2409 return ret;
2410 }
2411
_opp_put_clknames(struct opp_table * opp_table)2412 static void _opp_put_clknames(struct opp_table *opp_table)
2413 {
2414 if (!opp_table->clks)
2415 return;
2416
2417 opp_table->config_clks = NULL;
2418 opp_table->clk = ERR_PTR(-ENODEV);
2419
2420 _put_clks(opp_table, opp_table->clk_count);
2421 }
2422
2423 /*
2424 * This is useful to support platforms with multiple regulators per device.
2425 *
2426 * This must be called before any OPPs are initialized for the device.
2427 */
_opp_set_config_regulators_helper(struct opp_table * opp_table,struct device * dev,config_regulators_t config_regulators)2428 static int _opp_set_config_regulators_helper(struct opp_table *opp_table,
2429 struct device *dev, config_regulators_t config_regulators)
2430 {
2431 /* Another CPU that shares the OPP table has set the helper ? */
2432 if (!opp_table->config_regulators)
2433 opp_table->config_regulators = config_regulators;
2434
2435 return 0;
2436 }
2437
_opp_put_config_regulators_helper(struct opp_table * opp_table)2438 static void _opp_put_config_regulators_helper(struct opp_table *opp_table)
2439 {
2440 if (opp_table->config_regulators)
2441 opp_table->config_regulators = NULL;
2442 }
2443
_opp_set_required_dev(struct opp_table * opp_table,struct device * dev,struct device * required_dev,unsigned int index)2444 static int _opp_set_required_dev(struct opp_table *opp_table,
2445 struct device *dev,
2446 struct device *required_dev,
2447 unsigned int index)
2448 {
2449 struct opp_table *required_table, *pd_table;
2450 struct device *gdev;
2451
2452 /* Genpd core takes care of propagation to parent genpd */
2453 if (opp_table->is_genpd) {
2454 dev_err(dev, "%s: Operation not supported for genpds\n", __func__);
2455 return -EOPNOTSUPP;
2456 }
2457
2458 if (index >= opp_table->required_opp_count) {
2459 dev_err(dev, "Required OPPs not available, can't set required devs\n");
2460 return -EINVAL;
2461 }
2462
2463 required_table = opp_table->required_opp_tables[index];
2464 if (IS_ERR(required_table)) {
2465 dev_err(dev, "Missing OPP table, unable to set the required devs\n");
2466 return -ENODEV;
2467 }
2468
2469 /*
2470 * The required_opp_tables parsing is not perfect, as the OPP core does
2471 * the parsing solely based on the DT node pointers. The core sets the
2472 * required_opp_tables entry to the first OPP table in the "opp_tables"
2473 * list, that matches with the node pointer.
2474 *
2475 * If the target DT OPP table is used by multiple devices and they all
2476 * create separate instances of 'struct opp_table' from it, then it is
2477 * possible that the required_opp_tables entry may be set to the
2478 * incorrect sibling device.
2479 *
2480 * Cross check it again and fix if required.
2481 */
2482 gdev = dev_to_genpd_dev(required_dev);
2483 if (IS_ERR(gdev))
2484 return PTR_ERR(gdev);
2485
2486 pd_table = _find_opp_table(gdev);
2487 if (!IS_ERR(pd_table)) {
2488 if (pd_table != required_table) {
2489 dev_pm_opp_put_opp_table(required_table);
2490 opp_table->required_opp_tables[index] = pd_table;
2491 } else {
2492 dev_pm_opp_put_opp_table(pd_table);
2493 }
2494 }
2495
2496 opp_table->required_devs[index] = required_dev;
2497 return 0;
2498 }
2499
_opp_put_required_dev(struct opp_table * opp_table,unsigned int index)2500 static void _opp_put_required_dev(struct opp_table *opp_table,
2501 unsigned int index)
2502 {
2503 opp_table->required_devs[index] = NULL;
2504 }
2505
_opp_clear_config(struct opp_config_data * data)2506 static void _opp_clear_config(struct opp_config_data *data)
2507 {
2508 if (data->flags & OPP_CONFIG_REQUIRED_DEV)
2509 _opp_put_required_dev(data->opp_table,
2510 data->required_dev_index);
2511 if (data->flags & OPP_CONFIG_REGULATOR)
2512 _opp_put_regulators(data->opp_table);
2513 if (data->flags & OPP_CONFIG_SUPPORTED_HW)
2514 _opp_put_supported_hw(data->opp_table);
2515 if (data->flags & OPP_CONFIG_REGULATOR_HELPER)
2516 _opp_put_config_regulators_helper(data->opp_table);
2517 if (data->flags & OPP_CONFIG_PROP_NAME)
2518 _opp_put_prop_name(data->opp_table);
2519 if (data->flags & OPP_CONFIG_CLK)
2520 _opp_put_clknames(data->opp_table);
2521
2522 dev_pm_opp_put_opp_table(data->opp_table);
2523 kfree(data);
2524 }
2525
2526 /**
2527 * dev_pm_opp_set_config() - Set OPP configuration for the device.
2528 * @dev: Device for which configuration is being set.
2529 * @config: OPP configuration.
2530 *
2531 * This allows all device OPP configurations to be performed at once.
2532 *
2533 * This must be called before any OPPs are initialized for the device. This may
2534 * be called multiple times for the same OPP table, for example once for each
2535 * CPU that share the same table. This must be balanced by the same number of
2536 * calls to dev_pm_opp_clear_config() in order to free the OPP table properly.
2537 *
2538 * This returns a token to the caller, which must be passed to
2539 * dev_pm_opp_clear_config() to free the resources later. The value of the
2540 * returned token will be >= 1 for success and negative for errors. The minimum
2541 * value of 1 is chosen here to make it easy for callers to manage the resource.
2542 */
dev_pm_opp_set_config(struct device * dev,struct dev_pm_opp_config * config)2543 int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
2544 {
2545 struct opp_table *opp_table;
2546 struct opp_config_data *data;
2547 unsigned int id;
2548 int ret;
2549
2550 data = kmalloc_obj(*data);
2551 if (!data)
2552 return -ENOMEM;
2553
2554 opp_table = _add_opp_table(dev, false);
2555 if (IS_ERR(opp_table)) {
2556 kfree(data);
2557 return PTR_ERR(opp_table);
2558 }
2559
2560 data->opp_table = opp_table;
2561 data->flags = 0;
2562
2563 /* This should be called before OPPs are initialized */
2564 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
2565 ret = -EBUSY;
2566 goto err;
2567 }
2568
2569 /* Configure clocks */
2570 if (config->clk_names) {
2571 ret = _opp_set_clknames(opp_table, dev, config->clk_names,
2572 config->config_clks);
2573 if (ret)
2574 goto err;
2575
2576 data->flags |= OPP_CONFIG_CLK;
2577 } else if (config->config_clks) {
2578 /* Don't allow config callback without clocks */
2579 ret = -EINVAL;
2580 goto err;
2581 }
2582
2583 /* Configure property names */
2584 if (config->prop_name) {
2585 ret = _opp_set_prop_name(opp_table, config->prop_name);
2586 if (ret)
2587 goto err;
2588
2589 data->flags |= OPP_CONFIG_PROP_NAME;
2590 }
2591
2592 /* Configure config_regulators helper */
2593 if (config->config_regulators) {
2594 ret = _opp_set_config_regulators_helper(opp_table, dev,
2595 config->config_regulators);
2596 if (ret)
2597 goto err;
2598
2599 data->flags |= OPP_CONFIG_REGULATOR_HELPER;
2600 }
2601
2602 /* Configure supported hardware */
2603 if (config->supported_hw) {
2604 ret = _opp_set_supported_hw(opp_table, config->supported_hw,
2605 config->supported_hw_count);
2606 if (ret)
2607 goto err;
2608
2609 data->flags |= OPP_CONFIG_SUPPORTED_HW;
2610 }
2611
2612 /* Configure supplies */
2613 if (config->regulator_names) {
2614 ret = _opp_set_regulators(opp_table, dev,
2615 config->regulator_names);
2616 if (ret)
2617 goto err;
2618
2619 data->flags |= OPP_CONFIG_REGULATOR;
2620 }
2621
2622 if (config->required_dev) {
2623 ret = _opp_set_required_dev(opp_table, dev,
2624 config->required_dev,
2625 config->required_dev_index);
2626 if (ret)
2627 goto err;
2628
2629 data->required_dev_index = config->required_dev_index;
2630 data->flags |= OPP_CONFIG_REQUIRED_DEV;
2631 }
2632
2633 ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
2634 GFP_KERNEL);
2635 if (ret)
2636 goto err;
2637
2638 return id;
2639
2640 err:
2641 _opp_clear_config(data);
2642 return ret;
2643 }
2644 EXPORT_SYMBOL_GPL(dev_pm_opp_set_config);
2645
2646 /**
2647 * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration.
2648 * @token: The token returned by dev_pm_opp_set_config() previously.
2649 *
2650 * This allows all device OPP configurations to be cleared at once. This must be
2651 * called once for each call made to dev_pm_opp_set_config(), in order to free
2652 * the OPPs properly.
2653 *
2654 * Currently the first call itself ends up freeing all the OPP configurations,
2655 * while the later ones only drop the OPP table reference. This works well for
2656 * now as we would never want to use an half initialized OPP table and want to
2657 * remove the configurations together.
2658 */
dev_pm_opp_clear_config(int token)2659 void dev_pm_opp_clear_config(int token)
2660 {
2661 struct opp_config_data *data;
2662
2663 /*
2664 * This lets the callers call this unconditionally and keep their code
2665 * simple.
2666 */
2667 if (unlikely(token <= 0))
2668 return;
2669
2670 data = xa_erase(&opp_configs, token);
2671 if (WARN_ON(!data))
2672 return;
2673
2674 _opp_clear_config(data);
2675 }
2676 EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config);
2677
devm_pm_opp_config_release(void * token)2678 static void devm_pm_opp_config_release(void *token)
2679 {
2680 dev_pm_opp_clear_config((unsigned long)token);
2681 }
2682
2683 /**
2684 * devm_pm_opp_set_config() - Set OPP configuration for the device.
2685 * @dev: Device for which configuration is being set.
2686 * @config: OPP configuration.
2687 *
2688 * This allows all device OPP configurations to be performed at once.
2689 * This is a resource-managed variant of dev_pm_opp_set_config().
2690 *
2691 * Return: 0 on success and errorno otherwise.
2692 */
devm_pm_opp_set_config(struct device * dev,struct dev_pm_opp_config * config)2693 int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
2694 {
2695 int token = dev_pm_opp_set_config(dev, config);
2696
2697 if (token < 0)
2698 return token;
2699
2700 return devm_add_action_or_reset(dev, devm_pm_opp_config_release,
2701 (void *) ((unsigned long) token));
2702 }
2703 EXPORT_SYMBOL_GPL(devm_pm_opp_set_config);
2704
2705 /**
2706 * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
2707 * @src_table: OPP table which has @dst_table as one of its required OPP table.
2708 * @dst_table: Required OPP table of the @src_table.
2709 * @src_opp: OPP from the @src_table.
2710 *
2711 * This function returns the OPP (present in @dst_table) pointed out by the
2712 * "required-opps" property of the @src_opp (present in @src_table).
2713 *
2714 * The callers are required to call dev_pm_opp_put() for the returned OPP after
2715 * use.
2716 *
2717 * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise.
2718 */
dev_pm_opp_xlate_required_opp(struct opp_table * src_table,struct opp_table * dst_table,struct dev_pm_opp * src_opp)2719 struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
2720 struct opp_table *dst_table,
2721 struct dev_pm_opp *src_opp)
2722 {
2723 struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV);
2724 int i;
2725
2726 if (!src_table || !dst_table || !src_opp ||
2727 !src_table->required_opp_tables)
2728 return ERR_PTR(-EINVAL);
2729
2730 /* required-opps not fully initialized yet */
2731 if (lazy_linking_pending(src_table))
2732 return ERR_PTR(-EBUSY);
2733
2734 for (i = 0; i < src_table->required_opp_count; i++) {
2735 if (src_table->required_opp_tables[i] != dst_table)
2736 continue;
2737
2738 scoped_guard(mutex, &src_table->lock) {
2739 list_for_each_entry(opp, &src_table->opp_list, node) {
2740 if (opp == src_opp) {
2741 dest_opp = dev_pm_opp_get(opp->required_opps[i]);
2742 break;
2743 }
2744 }
2745 break;
2746 }
2747 }
2748
2749 if (IS_ERR(dest_opp)) {
2750 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__,
2751 src_table, dst_table);
2752 }
2753
2754 return dest_opp;
2755 }
2756 EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp);
2757
2758 /**
2759 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
2760 * @src_table: OPP table which has dst_table as one of its required OPP table.
2761 * @dst_table: Required OPP table of the src_table.
2762 * @pstate: Current performance state of the src_table.
2763 *
2764 * This Returns pstate of the OPP (present in @dst_table) pointed out by the
2765 * "required-opps" property of the OPP (present in @src_table) which has
2766 * performance state set to @pstate.
2767 *
2768 * Return: Zero or positive performance state on success, otherwise negative
2769 * value on errors.
2770 */
dev_pm_opp_xlate_performance_state(struct opp_table * src_table,struct opp_table * dst_table,unsigned int pstate)2771 int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
2772 struct opp_table *dst_table,
2773 unsigned int pstate)
2774 {
2775 struct dev_pm_opp *opp;
2776 int i;
2777
2778 /*
2779 * Normally the src_table will have the "required_opps" property set to
2780 * point to one of the OPPs in the dst_table, but in some cases the
2781 * genpd and its master have one to one mapping of performance states
2782 * and so none of them have the "required-opps" property set. Return the
2783 * pstate of the src_table as it is in such cases.
2784 */
2785 if (!src_table || !src_table->required_opp_count)
2786 return pstate;
2787
2788 /* Both OPP tables must belong to genpds */
2789 if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) {
2790 pr_err("%s: Performance state is only valid for genpds.\n", __func__);
2791 return -EINVAL;
2792 }
2793
2794 /* required-opps not fully initialized yet */
2795 if (lazy_linking_pending(src_table))
2796 return -EBUSY;
2797
2798 for (i = 0; i < src_table->required_opp_count; i++) {
2799 if (src_table->required_opp_tables[i]->np == dst_table->np)
2800 break;
2801 }
2802
2803 if (unlikely(i == src_table->required_opp_count)) {
2804 pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
2805 __func__, src_table, dst_table);
2806 return -EINVAL;
2807 }
2808
2809 guard(mutex)(&src_table->lock);
2810
2811 list_for_each_entry(opp, &src_table->opp_list, node) {
2812 if (opp->level == pstate)
2813 return opp->required_opps[i]->level;
2814 }
2815
2816 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
2817 dst_table);
2818
2819 return -EINVAL;
2820 }
2821
2822 /**
2823 * dev_pm_opp_add_dynamic() - Add an OPP table from a table definitions
2824 * @dev: The device for which we do this operation
2825 * @data: The OPP data for the OPP to add
2826 *
2827 * This function adds an opp definition to the opp table and returns status.
2828 * The opp is made available by default and it can be controlled using
2829 * dev_pm_opp_enable/disable functions.
2830 *
2831 * Return:
2832 * 0 On success OR
2833 * Duplicate OPPs (both freq and volt are same) and opp->available
2834 * -EEXIST Freq are same and volt are different OR
2835 * Duplicate OPPs (both freq and volt are same) and !opp->available
2836 * -ENOMEM Memory allocation failure
2837 */
dev_pm_opp_add_dynamic(struct device * dev,struct dev_pm_opp_data * data)2838 int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data)
2839 {
2840 struct opp_table *opp_table;
2841 int ret;
2842
2843 opp_table = _add_opp_table(dev, true);
2844 if (IS_ERR(opp_table))
2845 return PTR_ERR(opp_table);
2846
2847 /* Fix regulator count for dynamic OPPs */
2848 opp_table->regulator_count = 1;
2849
2850 ret = _opp_add_v1(opp_table, dev, data, true);
2851 if (ret)
2852 dev_pm_opp_put_opp_table(opp_table);
2853
2854 return ret;
2855 }
2856 EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic);
2857
2858 /**
2859 * _opp_set_availability() - helper to set the availability of an opp
2860 * @dev: device for which we do this operation
2861 * @freq: OPP frequency to modify availability
2862 * @availability_req: availability status requested for this opp
2863 *
2864 * Set the availability of an OPP, opp_{enable,disable} share a common logic
2865 * which is isolated here.
2866 *
2867 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2868 * copy operation, returns 0 if no modification was done OR modification was
2869 * successful.
2870 */
_opp_set_availability(struct device * dev,unsigned long freq,bool availability_req)2871 static int _opp_set_availability(struct device *dev, unsigned long freq,
2872 bool availability_req)
2873 {
2874 struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp;
2875
2876 /* Find the opp_table */
2877 struct opp_table *opp_table __free(put_opp_table) =
2878 _find_opp_table(dev);
2879
2880 if (IS_ERR(opp_table)) {
2881 dev_warn(dev, "%s: Device OPP not found (%ld)\n", __func__,
2882 PTR_ERR(opp_table));
2883 return PTR_ERR(opp_table);
2884 }
2885
2886 if (!assert_single_clk(opp_table, 0))
2887 return -EINVAL;
2888
2889 scoped_guard(mutex, &opp_table->lock) {
2890 /* Do we have the frequency? */
2891 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2892 if (tmp_opp->rates[0] == freq) {
2893 opp = dev_pm_opp_get(tmp_opp);
2894
2895 /* Is update really needed? */
2896 if (opp->available == availability_req)
2897 return 0;
2898
2899 opp->available = availability_req;
2900 break;
2901 }
2902 }
2903 }
2904
2905 if (IS_ERR(opp))
2906 return PTR_ERR(opp);
2907
2908 /* Notify the change of the OPP availability */
2909 if (availability_req)
2910 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
2911 opp);
2912 else
2913 blocking_notifier_call_chain(&opp_table->head,
2914 OPP_EVENT_DISABLE, opp);
2915
2916 return 0;
2917 }
2918
2919 /**
2920 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
2921 * @dev: device for which we do this operation
2922 * @freq: OPP frequency to adjust voltage of
2923 * @u_volt: new OPP target voltage
2924 * @u_volt_min: new OPP min voltage
2925 * @u_volt_max: new OPP max voltage
2926 *
2927 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2928 * copy operation, returns 0 if no modifcation was done OR modification was
2929 * successful.
2930 */
dev_pm_opp_adjust_voltage(struct device * dev,unsigned long freq,unsigned long u_volt,unsigned long u_volt_min,unsigned long u_volt_max)2931 int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
2932 unsigned long u_volt, unsigned long u_volt_min,
2933 unsigned long u_volt_max)
2934
2935 {
2936 struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp;
2937 int r;
2938
2939 /* Find the opp_table */
2940 struct opp_table *opp_table __free(put_opp_table) =
2941 _find_opp_table(dev);
2942
2943 if (IS_ERR(opp_table)) {
2944 r = PTR_ERR(opp_table);
2945 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
2946 return r;
2947 }
2948
2949 if (!assert_single_clk(opp_table, 0))
2950 return -EINVAL;
2951
2952 scoped_guard(mutex, &opp_table->lock) {
2953 /* Do we have the frequency? */
2954 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2955 if (tmp_opp->rates[0] == freq) {
2956 opp = dev_pm_opp_get(tmp_opp);
2957
2958 /* Is update really needed? */
2959 if (opp->supplies->u_volt == u_volt)
2960 return 0;
2961
2962 opp->supplies->u_volt = u_volt;
2963 opp->supplies->u_volt_min = u_volt_min;
2964 opp->supplies->u_volt_max = u_volt_max;
2965
2966 break;
2967 }
2968 }
2969 }
2970
2971 if (IS_ERR(opp))
2972 return PTR_ERR(opp);
2973
2974 /* Notify the voltage change of the OPP */
2975 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
2976 opp);
2977
2978 return 0;
2979 }
2980 EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
2981
2982 /**
2983 * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
2984 * @dev: device for which we do this operation
2985 *
2986 * Sync voltage state of the OPP table regulators.
2987 *
2988 * Return: 0 on success or a negative error value.
2989 */
dev_pm_opp_sync_regulators(struct device * dev)2990 int dev_pm_opp_sync_regulators(struct device *dev)
2991 {
2992 struct regulator *reg;
2993 int ret, i;
2994
2995 /* Device may not have OPP table */
2996 struct opp_table *opp_table __free(put_opp_table) =
2997 _find_opp_table(dev);
2998
2999 if (IS_ERR(opp_table))
3000 return 0;
3001
3002 /* Regulator may not be required for the device */
3003 if (unlikely(!opp_table->regulators))
3004 return 0;
3005
3006 /* Nothing to sync if voltage wasn't changed */
3007 if (!opp_table->enabled)
3008 return 0;
3009
3010 for (i = 0; i < opp_table->regulator_count; i++) {
3011 reg = opp_table->regulators[i];
3012 ret = regulator_sync_voltage(reg);
3013 if (ret)
3014 return ret;
3015 }
3016
3017 return 0;
3018 }
3019 EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
3020
3021 /**
3022 * dev_pm_opp_enable() - Enable a specific OPP
3023 * @dev: device for which we do this operation
3024 * @freq: OPP frequency to enable
3025 *
3026 * Enables a provided opp. If the operation is valid, this returns 0, else the
3027 * corresponding error value. It is meant to be used for users an OPP available
3028 * after being temporarily made unavailable with dev_pm_opp_disable.
3029 *
3030 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
3031 * copy operation, returns 0 if no modification was done OR modification was
3032 * successful.
3033 */
dev_pm_opp_enable(struct device * dev,unsigned long freq)3034 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
3035 {
3036 return _opp_set_availability(dev, freq, true);
3037 }
3038 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
3039
3040 /**
3041 * dev_pm_opp_disable() - Disable a specific OPP
3042 * @dev: device for which we do this operation
3043 * @freq: OPP frequency to disable
3044 *
3045 * Disables a provided opp. If the operation is valid, this returns
3046 * 0, else the corresponding error value. It is meant to be a temporary
3047 * control by users to make this OPP not available until the circumstances are
3048 * right to make it available again (with a call to dev_pm_opp_enable).
3049 *
3050 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
3051 * copy operation, returns 0 if no modification was done OR modification was
3052 * successful.
3053 */
dev_pm_opp_disable(struct device * dev,unsigned long freq)3054 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
3055 {
3056 return _opp_set_availability(dev, freq, false);
3057 }
3058 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
3059
3060 /**
3061 * dev_pm_opp_register_notifier() - Register OPP notifier for the device
3062 * @dev: Device for which notifier needs to be registered
3063 * @nb: Notifier block to be registered
3064 *
3065 * Return: 0 on success or a negative error value.
3066 */
dev_pm_opp_register_notifier(struct device * dev,struct notifier_block * nb)3067 int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
3068 {
3069 struct opp_table *opp_table __free(put_opp_table) =
3070 _find_opp_table(dev);
3071
3072 if (IS_ERR(opp_table))
3073 return PTR_ERR(opp_table);
3074
3075 return blocking_notifier_chain_register(&opp_table->head, nb);
3076 }
3077 EXPORT_SYMBOL(dev_pm_opp_register_notifier);
3078
3079 /**
3080 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
3081 * @dev: Device for which notifier needs to be unregistered
3082 * @nb: Notifier block to be unregistered
3083 *
3084 * Return: 0 on success or a negative error value.
3085 */
dev_pm_opp_unregister_notifier(struct device * dev,struct notifier_block * nb)3086 int dev_pm_opp_unregister_notifier(struct device *dev,
3087 struct notifier_block *nb)
3088 {
3089 struct opp_table *opp_table __free(put_opp_table) =
3090 _find_opp_table(dev);
3091
3092 if (IS_ERR(opp_table))
3093 return PTR_ERR(opp_table);
3094
3095 return blocking_notifier_chain_unregister(&opp_table->head, nb);
3096 }
3097 EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
3098
3099 /**
3100 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
3101 * @dev: device pointer used to lookup OPP table.
3102 *
3103 * Free both OPPs created using static entries present in DT and the
3104 * dynamically added entries.
3105 */
dev_pm_opp_remove_table(struct device * dev)3106 void dev_pm_opp_remove_table(struct device *dev)
3107 {
3108 /* Check for existing table for 'dev' */
3109 struct opp_table *opp_table __free(put_opp_table) =
3110 _find_opp_table(dev);
3111
3112 if (IS_ERR(opp_table)) {
3113 int error = PTR_ERR(opp_table);
3114
3115 if (error != -ENODEV)
3116 WARN(1, "%s: opp_table: %d\n",
3117 IS_ERR_OR_NULL(dev) ?
3118 "Invalid device" : dev_name(dev),
3119 error);
3120 return;
3121 }
3122
3123 /*
3124 * Drop the extra reference only if the OPP table was successfully added
3125 * with dev_pm_opp_of_add_table() earlier.
3126 **/
3127 if (_opp_remove_all_static(opp_table))
3128 dev_pm_opp_put_opp_table(opp_table);
3129 }
3130 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
3131