1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic OPP Interface
4 *
5 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
6 * Nishanth Menon
7 * Romit Dasgupta
8 * Kevin Hilman
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/clk.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/device.h>
17 #include <linux/export.h>
18 #include <linux/pm_domain.h>
19 #include <linux/regulator/consumer.h>
20 #include <linux/slab.h>
21 #include <linux/xarray.h>
22
23 #include "opp.h"
24
25 /*
26 * The root of the list of all opp-tables. All opp_table structures branch off
27 * from here, with each opp_table containing the list of opps it supports in
28 * various states of availability.
29 */
30 LIST_HEAD(opp_tables);
31
32 /* Lock to allow exclusive modification to the device and opp lists */
33 DEFINE_MUTEX(opp_table_lock);
34 /* Flag indicating that opp_tables list is being updated at the moment */
35 static bool opp_tables_busy;
36
37 /* OPP ID allocator */
38 static DEFINE_XARRAY_ALLOC1(opp_configs);
39
_find_opp_dev(const struct device * dev,struct opp_table * opp_table)40 static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
41 {
42 struct opp_device *opp_dev;
43 bool found = false;
44
45 mutex_lock(&opp_table->lock);
46 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
47 if (opp_dev->dev == dev) {
48 found = true;
49 break;
50 }
51
52 mutex_unlock(&opp_table->lock);
53 return found;
54 }
55
_find_opp_table_unlocked(struct device * dev)56 static struct opp_table *_find_opp_table_unlocked(struct device *dev)
57 {
58 struct opp_table *opp_table;
59
60 list_for_each_entry(opp_table, &opp_tables, node) {
61 if (_find_opp_dev(dev, opp_table)) {
62 _get_opp_table_kref(opp_table);
63 return opp_table;
64 }
65 }
66
67 return ERR_PTR(-ENODEV);
68 }
69
70 /**
71 * _find_opp_table() - find opp_table struct using device pointer
72 * @dev: device pointer used to lookup OPP table
73 *
74 * Search OPP table for one containing matching device.
75 *
76 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
77 * -EINVAL based on type of error.
78 *
79 * The callers must call dev_pm_opp_put_opp_table() after the table is used.
80 */
_find_opp_table(struct device * dev)81 struct opp_table *_find_opp_table(struct device *dev)
82 {
83 struct opp_table *opp_table;
84
85 if (IS_ERR_OR_NULL(dev)) {
86 pr_err("%s: Invalid parameters\n", __func__);
87 return ERR_PTR(-EINVAL);
88 }
89
90 mutex_lock(&opp_table_lock);
91 opp_table = _find_opp_table_unlocked(dev);
92 mutex_unlock(&opp_table_lock);
93
94 return opp_table;
95 }
96
97 /*
98 * Returns true if multiple clocks aren't there, else returns false with WARN.
99 *
100 * We don't force clk_count == 1 here as there are users who don't have a clock
101 * representation in the OPP table and manage the clock configuration themselves
102 * in an platform specific way.
103 */
assert_single_clk(struct opp_table * opp_table,unsigned int __always_unused index)104 static bool assert_single_clk(struct opp_table *opp_table,
105 unsigned int __always_unused index)
106 {
107 return !WARN_ON(opp_table->clk_count > 1);
108 }
109
110 /*
111 * Returns true if clock table is large enough to contain the clock index.
112 */
assert_clk_index(struct opp_table * opp_table,unsigned int index)113 static bool assert_clk_index(struct opp_table *opp_table,
114 unsigned int index)
115 {
116 return opp_table->clk_count > index;
117 }
118
119 /*
120 * Returns true if bandwidth table is large enough to contain the bandwidth index.
121 */
assert_bandwidth_index(struct opp_table * opp_table,unsigned int index)122 static bool assert_bandwidth_index(struct opp_table *opp_table,
123 unsigned int index)
124 {
125 return opp_table->path_count > index;
126 }
127
128 /**
129 * dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp
130 * @opp: opp for which bandwidth has to be returned for
131 * @peak: select peak or average bandwidth
132 * @index: bandwidth index
133 *
134 * Return: bandwidth in kBps, else return 0
135 */
dev_pm_opp_get_bw(struct dev_pm_opp * opp,bool peak,int index)136 unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index)
137 {
138 if (IS_ERR_OR_NULL(opp)) {
139 pr_err("%s: Invalid parameters\n", __func__);
140 return 0;
141 }
142
143 if (index >= opp->opp_table->path_count)
144 return 0;
145
146 if (!opp->bandwidth)
147 return 0;
148
149 return peak ? opp->bandwidth[index].peak : opp->bandwidth[index].avg;
150 }
151 EXPORT_SYMBOL_GPL(dev_pm_opp_get_bw);
152
153 /**
154 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
155 * @opp: opp for which voltage has to be returned for
156 *
157 * Return: voltage in micro volt corresponding to the opp, else
158 * return 0
159 *
160 * This is useful only for devices with single power supply.
161 */
dev_pm_opp_get_voltage(struct dev_pm_opp * opp)162 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
163 {
164 if (IS_ERR_OR_NULL(opp)) {
165 pr_err("%s: Invalid parameters\n", __func__);
166 return 0;
167 }
168
169 return opp->supplies[0].u_volt;
170 }
171 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
172
173 /**
174 * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp
175 * @opp: opp for which voltage has to be returned for
176 * @supplies: Placeholder for copying the supply information.
177 *
178 * Return: negative error number on failure, 0 otherwise on success after
179 * setting @supplies.
180 *
181 * This can be used for devices with any number of power supplies. The caller
182 * must ensure the @supplies array must contain space for each regulator.
183 */
dev_pm_opp_get_supplies(struct dev_pm_opp * opp,struct dev_pm_opp_supply * supplies)184 int dev_pm_opp_get_supplies(struct dev_pm_opp *opp,
185 struct dev_pm_opp_supply *supplies)
186 {
187 if (IS_ERR_OR_NULL(opp) || !supplies) {
188 pr_err("%s: Invalid parameters\n", __func__);
189 return -EINVAL;
190 }
191
192 memcpy(supplies, opp->supplies,
193 sizeof(*supplies) * opp->opp_table->regulator_count);
194 return 0;
195 }
196 EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies);
197
198 /**
199 * dev_pm_opp_get_power() - Gets the power corresponding to an opp
200 * @opp: opp for which power has to be returned for
201 *
202 * Return: power in micro watt corresponding to the opp, else
203 * return 0
204 *
205 * This is useful only for devices with single power supply.
206 */
dev_pm_opp_get_power(struct dev_pm_opp * opp)207 unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
208 {
209 unsigned long opp_power = 0;
210 int i;
211
212 if (IS_ERR_OR_NULL(opp)) {
213 pr_err("%s: Invalid parameters\n", __func__);
214 return 0;
215 }
216 for (i = 0; i < opp->opp_table->regulator_count; i++)
217 opp_power += opp->supplies[i].u_watt;
218
219 return opp_power;
220 }
221 EXPORT_SYMBOL_GPL(dev_pm_opp_get_power);
222
223 /**
224 * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an
225 * available opp with specified index
226 * @opp: opp for which frequency has to be returned for
227 * @index: index of the frequency within the required opp
228 *
229 * Return: frequency in hertz corresponding to the opp with specified index,
230 * else return 0
231 */
dev_pm_opp_get_freq_indexed(struct dev_pm_opp * opp,u32 index)232 unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
233 {
234 if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) {
235 pr_err("%s: Invalid parameters\n", __func__);
236 return 0;
237 }
238
239 return opp->rates[index];
240 }
241 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed);
242
243 /**
244 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
245 * @opp: opp for which level value has to be returned for
246 *
247 * Return: level read from device tree corresponding to the opp, else
248 * return U32_MAX.
249 */
dev_pm_opp_get_level(struct dev_pm_opp * opp)250 unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
251 {
252 if (IS_ERR_OR_NULL(opp) || !opp->available) {
253 pr_err("%s: Invalid parameters\n", __func__);
254 return 0;
255 }
256
257 return opp->level;
258 }
259 EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
260
261 /**
262 * dev_pm_opp_get_required_pstate() - Gets the required performance state
263 * corresponding to an available opp
264 * @opp: opp for which performance state has to be returned for
265 * @index: index of the required opp
266 *
267 * Return: performance state read from device tree corresponding to the
268 * required opp, else return U32_MAX.
269 */
dev_pm_opp_get_required_pstate(struct dev_pm_opp * opp,unsigned int index)270 unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
271 unsigned int index)
272 {
273 if (IS_ERR_OR_NULL(opp) || !opp->available ||
274 index >= opp->opp_table->required_opp_count) {
275 pr_err("%s: Invalid parameters\n", __func__);
276 return 0;
277 }
278
279 /* required-opps not fully initialized yet */
280 if (lazy_linking_pending(opp->opp_table))
281 return 0;
282
283 /* The required OPP table must belong to a genpd */
284 if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) {
285 pr_err("%s: Performance state is only valid for genpds.\n", __func__);
286 return 0;
287 }
288
289 return opp->required_opps[index]->level;
290 }
291 EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
292
293 /**
294 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
295 * @opp: opp for which turbo mode is being verified
296 *
297 * Turbo OPPs are not for normal use, and can be enabled (under certain
298 * conditions) for short duration of times to finish high throughput work
299 * quickly. Running on them for longer times may overheat the chip.
300 *
301 * Return: true if opp is turbo opp, else false.
302 */
dev_pm_opp_is_turbo(struct dev_pm_opp * opp)303 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
304 {
305 if (IS_ERR_OR_NULL(opp) || !opp->available) {
306 pr_err("%s: Invalid parameters\n", __func__);
307 return false;
308 }
309
310 return opp->turbo;
311 }
312 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
313
314 /**
315 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
316 * @dev: device for which we do this operation
317 *
318 * Return: This function returns the max clock latency in nanoseconds.
319 */
dev_pm_opp_get_max_clock_latency(struct device * dev)320 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
321 {
322 struct opp_table *opp_table;
323 unsigned long clock_latency_ns;
324
325 opp_table = _find_opp_table(dev);
326 if (IS_ERR(opp_table))
327 return 0;
328
329 clock_latency_ns = opp_table->clock_latency_ns_max;
330
331 dev_pm_opp_put_opp_table(opp_table);
332
333 return clock_latency_ns;
334 }
335 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
336
337 /**
338 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
339 * @dev: device for which we do this operation
340 *
341 * Return: This function returns the max voltage latency in nanoseconds.
342 */
dev_pm_opp_get_max_volt_latency(struct device * dev)343 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
344 {
345 struct opp_table *opp_table;
346 struct dev_pm_opp *opp;
347 struct regulator *reg;
348 unsigned long latency_ns = 0;
349 int ret, i, count;
350 struct {
351 unsigned long min;
352 unsigned long max;
353 } *uV;
354
355 opp_table = _find_opp_table(dev);
356 if (IS_ERR(opp_table))
357 return 0;
358
359 /* Regulator may not be required for the device */
360 if (!opp_table->regulators)
361 goto put_opp_table;
362
363 count = opp_table->regulator_count;
364
365 uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
366 if (!uV)
367 goto put_opp_table;
368
369 mutex_lock(&opp_table->lock);
370
371 for (i = 0; i < count; i++) {
372 uV[i].min = ~0;
373 uV[i].max = 0;
374
375 list_for_each_entry(opp, &opp_table->opp_list, node) {
376 if (!opp->available)
377 continue;
378
379 if (opp->supplies[i].u_volt_min < uV[i].min)
380 uV[i].min = opp->supplies[i].u_volt_min;
381 if (opp->supplies[i].u_volt_max > uV[i].max)
382 uV[i].max = opp->supplies[i].u_volt_max;
383 }
384 }
385
386 mutex_unlock(&opp_table->lock);
387
388 /*
389 * The caller needs to ensure that opp_table (and hence the regulator)
390 * isn't freed, while we are executing this routine.
391 */
392 for (i = 0; i < count; i++) {
393 reg = opp_table->regulators[i];
394 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
395 if (ret > 0)
396 latency_ns += ret * 1000;
397 }
398
399 kfree(uV);
400 put_opp_table:
401 dev_pm_opp_put_opp_table(opp_table);
402
403 return latency_ns;
404 }
405 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
406
407 /**
408 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
409 * nanoseconds
410 * @dev: device for which we do this operation
411 *
412 * Return: This function returns the max transition latency, in nanoseconds, to
413 * switch from one OPP to other.
414 */
dev_pm_opp_get_max_transition_latency(struct device * dev)415 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
416 {
417 return dev_pm_opp_get_max_volt_latency(dev) +
418 dev_pm_opp_get_max_clock_latency(dev);
419 }
420 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
421
422 /**
423 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
424 * @dev: device for which we do this operation
425 *
426 * Return: This function returns the frequency of the OPP marked as suspend_opp
427 * if one is available, else returns 0;
428 */
dev_pm_opp_get_suspend_opp_freq(struct device * dev)429 unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
430 {
431 struct opp_table *opp_table;
432 unsigned long freq = 0;
433
434 opp_table = _find_opp_table(dev);
435 if (IS_ERR(opp_table))
436 return 0;
437
438 if (opp_table->suspend_opp && opp_table->suspend_opp->available)
439 freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
440
441 dev_pm_opp_put_opp_table(opp_table);
442
443 return freq;
444 }
445 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
446
_get_opp_count(struct opp_table * opp_table)447 int _get_opp_count(struct opp_table *opp_table)
448 {
449 struct dev_pm_opp *opp;
450 int count = 0;
451
452 mutex_lock(&opp_table->lock);
453
454 list_for_each_entry(opp, &opp_table->opp_list, node) {
455 if (opp->available)
456 count++;
457 }
458
459 mutex_unlock(&opp_table->lock);
460
461 return count;
462 }
463
464 /**
465 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
466 * @dev: device for which we do this operation
467 *
468 * Return: This function returns the number of available opps if there are any,
469 * else returns 0 if none or the corresponding error value.
470 */
dev_pm_opp_get_opp_count(struct device * dev)471 int dev_pm_opp_get_opp_count(struct device *dev)
472 {
473 struct opp_table *opp_table;
474 int count;
475
476 opp_table = _find_opp_table(dev);
477 if (IS_ERR(opp_table)) {
478 count = PTR_ERR(opp_table);
479 dev_dbg(dev, "%s: OPP table not found (%d)\n",
480 __func__, count);
481 return count;
482 }
483
484 count = _get_opp_count(opp_table);
485 dev_pm_opp_put_opp_table(opp_table);
486
487 return count;
488 }
489 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
490
491 /* Helpers to read keys */
_read_freq(struct dev_pm_opp * opp,int index)492 static unsigned long _read_freq(struct dev_pm_opp *opp, int index)
493 {
494 return opp->rates[index];
495 }
496
_read_level(struct dev_pm_opp * opp,int index)497 static unsigned long _read_level(struct dev_pm_opp *opp, int index)
498 {
499 return opp->level;
500 }
501
_read_bw(struct dev_pm_opp * opp,int index)502 static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
503 {
504 return opp->bandwidth[index].peak;
505 }
506
507 /* Generic comparison helpers */
_compare_exact(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key)508 static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
509 unsigned long opp_key, unsigned long key)
510 {
511 if (opp_key == key) {
512 *opp = temp_opp;
513 return true;
514 }
515
516 return false;
517 }
518
_compare_ceil(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key)519 static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
520 unsigned long opp_key, unsigned long key)
521 {
522 if (opp_key >= key) {
523 *opp = temp_opp;
524 return true;
525 }
526
527 return false;
528 }
529
_compare_floor(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key)530 static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
531 unsigned long opp_key, unsigned long key)
532 {
533 if (opp_key > key)
534 return true;
535
536 *opp = temp_opp;
537 return false;
538 }
539
540 /* Generic key finding helpers */
_opp_table_find_key(struct opp_table * opp_table,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* compare)(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key),bool (* assert)(struct opp_table * opp_table,unsigned int index))541 static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
542 unsigned long *key, int index, bool available,
543 unsigned long (*read)(struct dev_pm_opp *opp, int index),
544 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
545 unsigned long opp_key, unsigned long key),
546 bool (*assert)(struct opp_table *opp_table, unsigned int index))
547 {
548 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
549
550 /* Assert that the requirement is met */
551 if (assert && !assert(opp_table, index))
552 return ERR_PTR(-EINVAL);
553
554 mutex_lock(&opp_table->lock);
555
556 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
557 if (temp_opp->available == available) {
558 if (compare(&opp, temp_opp, read(temp_opp, index), *key))
559 break;
560 }
561 }
562
563 /* Increment the reference count of OPP */
564 if (!IS_ERR(opp)) {
565 *key = read(opp, index);
566 dev_pm_opp_get(opp);
567 }
568
569 mutex_unlock(&opp_table->lock);
570
571 return opp;
572 }
573
574 static struct dev_pm_opp *
_find_key(struct device * dev,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* compare)(struct dev_pm_opp ** opp,struct dev_pm_opp * temp_opp,unsigned long opp_key,unsigned long key),bool (* assert)(struct opp_table * opp_table,unsigned int index))575 _find_key(struct device *dev, unsigned long *key, int index, bool available,
576 unsigned long (*read)(struct dev_pm_opp *opp, int index),
577 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
578 unsigned long opp_key, unsigned long key),
579 bool (*assert)(struct opp_table *opp_table, unsigned int index))
580 {
581 struct opp_table *opp_table;
582 struct dev_pm_opp *opp;
583
584 opp_table = _find_opp_table(dev);
585 if (IS_ERR(opp_table)) {
586 dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
587 PTR_ERR(opp_table));
588 return ERR_CAST(opp_table);
589 }
590
591 opp = _opp_table_find_key(opp_table, key, index, available, read,
592 compare, assert);
593
594 dev_pm_opp_put_opp_table(opp_table);
595
596 return opp;
597 }
598
_find_key_exact(struct device * dev,unsigned long key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))599 static struct dev_pm_opp *_find_key_exact(struct device *dev,
600 unsigned long key, int index, bool available,
601 unsigned long (*read)(struct dev_pm_opp *opp, int index),
602 bool (*assert)(struct opp_table *opp_table, unsigned int index))
603 {
604 /*
605 * The value of key will be updated here, but will be ignored as the
606 * caller doesn't need it.
607 */
608 return _find_key(dev, &key, index, available, read, _compare_exact,
609 assert);
610 }
611
_opp_table_find_key_ceil(struct opp_table * opp_table,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))612 static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
613 unsigned long *key, int index, bool available,
614 unsigned long (*read)(struct dev_pm_opp *opp, int index),
615 bool (*assert)(struct opp_table *opp_table, unsigned int index))
616 {
617 return _opp_table_find_key(opp_table, key, index, available, read,
618 _compare_ceil, assert);
619 }
620
_find_key_ceil(struct device * dev,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))621 static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
622 int index, bool available,
623 unsigned long (*read)(struct dev_pm_opp *opp, int index),
624 bool (*assert)(struct opp_table *opp_table, unsigned int index))
625 {
626 return _find_key(dev, key, index, available, read, _compare_ceil,
627 assert);
628 }
629
_find_key_floor(struct device * dev,unsigned long * key,int index,bool available,unsigned long (* read)(struct dev_pm_opp * opp,int index),bool (* assert)(struct opp_table * opp_table,unsigned int index))630 static struct dev_pm_opp *_find_key_floor(struct device *dev,
631 unsigned long *key, int index, bool available,
632 unsigned long (*read)(struct dev_pm_opp *opp, int index),
633 bool (*assert)(struct opp_table *opp_table, unsigned int index))
634 {
635 return _find_key(dev, key, index, available, read, _compare_floor,
636 assert);
637 }
638
639 /**
640 * dev_pm_opp_find_freq_exact() - search for an exact frequency
641 * @dev: device for which we do this operation
642 * @freq: frequency to search for
643 * @available: true/false - match for available opp
644 *
645 * Return: Searches for exact match in the opp table and returns pointer to the
646 * matching opp if found, else returns ERR_PTR in case of error and should
647 * be handled using IS_ERR. Error return values can be:
648 * EINVAL: for bad pointer
649 * ERANGE: no match found for search
650 * ENODEV: if device not found in list of registered devices
651 *
652 * Note: available is a modifier for the search. if available=true, then the
653 * match is for exact matching frequency and is available in the stored OPP
654 * table. if false, the match is for exact frequency which is not available.
655 *
656 * This provides a mechanism to enable an opp which is not available currently
657 * or the opposite as well.
658 *
659 * The callers are required to call dev_pm_opp_put() for the returned OPP after
660 * use.
661 */
dev_pm_opp_find_freq_exact(struct device * dev,unsigned long freq,bool available)662 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
663 unsigned long freq, bool available)
664 {
665 return _find_key_exact(dev, freq, 0, available, _read_freq,
666 assert_single_clk);
667 }
668 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
669
670 /**
671 * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the
672 * clock corresponding to the index
673 * @dev: Device for which we do this operation
674 * @freq: frequency to search for
675 * @index: Clock index
676 * @available: true/false - match for available opp
677 *
678 * Search for the matching exact OPP for the clock corresponding to the
679 * specified index from a starting freq for a device.
680 *
681 * Return: matching *opp , else returns ERR_PTR in case of error and should be
682 * handled using IS_ERR. Error return values can be:
683 * EINVAL: for bad pointer
684 * ERANGE: no match found for search
685 * ENODEV: if device not found in list of registered devices
686 *
687 * The callers are required to call dev_pm_opp_put() for the returned OPP after
688 * use.
689 */
690 struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device * dev,unsigned long freq,u32 index,bool available)691 dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
692 u32 index, bool available)
693 {
694 return _find_key_exact(dev, freq, index, available, _read_freq,
695 assert_clk_index);
696 }
697 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
698
_find_freq_ceil(struct opp_table * opp_table,unsigned long * freq)699 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
700 unsigned long *freq)
701 {
702 return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq,
703 assert_single_clk);
704 }
705
706 /**
707 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
708 * @dev: device for which we do this operation
709 * @freq: Start frequency
710 *
711 * Search for the matching ceil *available* OPP from a starting freq
712 * for a device.
713 *
714 * Return: matching *opp and refreshes *freq accordingly, else returns
715 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
716 * values can be:
717 * EINVAL: for bad pointer
718 * ERANGE: no match found for search
719 * ENODEV: if device not found in list of registered devices
720 *
721 * The callers are required to call dev_pm_opp_put() for the returned OPP after
722 * use.
723 */
dev_pm_opp_find_freq_ceil(struct device * dev,unsigned long * freq)724 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
725 unsigned long *freq)
726 {
727 return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk);
728 }
729 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
730
731 /**
732 * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the
733 * clock corresponding to the index
734 * @dev: Device for which we do this operation
735 * @freq: Start frequency
736 * @index: Clock index
737 *
738 * Search for the matching ceil *available* OPP for the clock corresponding to
739 * the specified index from a starting freq for a device.
740 *
741 * Return: matching *opp and refreshes *freq accordingly, else returns
742 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
743 * values can be:
744 * EINVAL: for bad pointer
745 * ERANGE: no match found for search
746 * ENODEV: if device not found in list of registered devices
747 *
748 * The callers are required to call dev_pm_opp_put() for the returned OPP after
749 * use.
750 */
751 struct dev_pm_opp *
dev_pm_opp_find_freq_ceil_indexed(struct device * dev,unsigned long * freq,u32 index)752 dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
753 u32 index)
754 {
755 return _find_key_ceil(dev, freq, index, true, _read_freq,
756 assert_clk_index);
757 }
758 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
759
760 /**
761 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
762 * @dev: device for which we do this operation
763 * @freq: Start frequency
764 *
765 * Search for the matching floor *available* OPP from a starting freq
766 * for a device.
767 *
768 * Return: matching *opp and refreshes *freq accordingly, else returns
769 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
770 * values can be:
771 * EINVAL: for bad pointer
772 * ERANGE: no match found for search
773 * ENODEV: if device not found in list of registered devices
774 *
775 * The callers are required to call dev_pm_opp_put() for the returned OPP after
776 * use.
777 */
dev_pm_opp_find_freq_floor(struct device * dev,unsigned long * freq)778 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
779 unsigned long *freq)
780 {
781 return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk);
782 }
783 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
784
785 /**
786 * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the
787 * clock corresponding to the index
788 * @dev: Device for which we do this operation
789 * @freq: Start frequency
790 * @index: Clock index
791 *
792 * Search for the matching floor *available* OPP for the clock corresponding to
793 * the specified index from a starting freq for a device.
794 *
795 * Return: matching *opp and refreshes *freq accordingly, else returns
796 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
797 * values can be:
798 * EINVAL: for bad pointer
799 * ERANGE: no match found for search
800 * ENODEV: if device not found in list of registered devices
801 *
802 * The callers are required to call dev_pm_opp_put() for the returned OPP after
803 * use.
804 */
805 struct dev_pm_opp *
dev_pm_opp_find_freq_floor_indexed(struct device * dev,unsigned long * freq,u32 index)806 dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
807 u32 index)
808 {
809 return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index);
810 }
811 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
812
813 /**
814 * dev_pm_opp_find_level_exact() - search for an exact level
815 * @dev: device for which we do this operation
816 * @level: level to search for
817 *
818 * Return: Searches for exact match in the opp table and returns pointer to the
819 * matching opp if found, else returns ERR_PTR in case of error and should
820 * be handled using IS_ERR. Error return values can be:
821 * EINVAL: for bad pointer
822 * ERANGE: no match found for search
823 * ENODEV: if device not found in list of registered devices
824 *
825 * The callers are required to call dev_pm_opp_put() for the returned OPP after
826 * use.
827 */
dev_pm_opp_find_level_exact(struct device * dev,unsigned int level)828 struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
829 unsigned int level)
830 {
831 return _find_key_exact(dev, level, 0, true, _read_level, NULL);
832 }
833 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
834
835 /**
836 * dev_pm_opp_find_level_ceil() - search for an rounded up level
837 * @dev: device for which we do this operation
838 * @level: level to search for
839 *
840 * Return: Searches for rounded up match in the opp table and returns pointer
841 * to the matching opp if found, else returns ERR_PTR in case of error and
842 * should be handled using IS_ERR. Error return values can be:
843 * EINVAL: for bad pointer
844 * ERANGE: no match found for search
845 * ENODEV: if device not found in list of registered devices
846 *
847 * The callers are required to call dev_pm_opp_put() for the returned OPP after
848 * use.
849 */
dev_pm_opp_find_level_ceil(struct device * dev,unsigned int * level)850 struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
851 unsigned int *level)
852 {
853 unsigned long temp = *level;
854 struct dev_pm_opp *opp;
855
856 opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
857 if (IS_ERR(opp))
858 return opp;
859
860 /* False match */
861 if (temp == OPP_LEVEL_UNSET) {
862 dev_err(dev, "%s: OPP levels aren't available\n", __func__);
863 dev_pm_opp_put(opp);
864 return ERR_PTR(-ENODEV);
865 }
866
867 *level = temp;
868 return opp;
869 }
870 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
871
872 /**
873 * dev_pm_opp_find_level_floor() - Search for a rounded floor level
874 * @dev: device for which we do this operation
875 * @level: Start level
876 *
877 * Search for the matching floor *available* OPP from a starting level
878 * for a device.
879 *
880 * Return: matching *opp and refreshes *level accordingly, else returns
881 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
882 * values can be:
883 * EINVAL: for bad pointer
884 * ERANGE: no match found for search
885 * ENODEV: if device not found in list of registered devices
886 *
887 * The callers are required to call dev_pm_opp_put() for the returned OPP after
888 * use.
889 */
dev_pm_opp_find_level_floor(struct device * dev,unsigned int * level)890 struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
891 unsigned int *level)
892 {
893 unsigned long temp = *level;
894 struct dev_pm_opp *opp;
895
896 opp = _find_key_floor(dev, &temp, 0, true, _read_level, NULL);
897 *level = temp;
898 return opp;
899 }
900 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor);
901
902 /**
903 * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
904 * @dev: device for which we do this operation
905 * @bw: start bandwidth
906 * @index: which bandwidth to compare, in case of OPPs with several values
907 *
908 * Search for the matching floor *available* OPP from a starting bandwidth
909 * for a device.
910 *
911 * Return: matching *opp and refreshes *bw accordingly, else returns
912 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
913 * values can be:
914 * EINVAL: for bad pointer
915 * ERANGE: no match found for search
916 * ENODEV: if device not found in list of registered devices
917 *
918 * The callers are required to call dev_pm_opp_put() for the returned OPP after
919 * use.
920 */
dev_pm_opp_find_bw_ceil(struct device * dev,unsigned int * bw,int index)921 struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
922 int index)
923 {
924 unsigned long temp = *bw;
925 struct dev_pm_opp *opp;
926
927 opp = _find_key_ceil(dev, &temp, index, true, _read_bw,
928 assert_bandwidth_index);
929 *bw = temp;
930 return opp;
931 }
932 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
933
934 /**
935 * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
936 * @dev: device for which we do this operation
937 * @bw: start bandwidth
938 * @index: which bandwidth to compare, in case of OPPs with several values
939 *
940 * Search for the matching floor *available* OPP from a starting bandwidth
941 * for a device.
942 *
943 * Return: matching *opp and refreshes *bw accordingly, else returns
944 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
945 * values can be:
946 * EINVAL: for bad pointer
947 * ERANGE: no match found for search
948 * ENODEV: if device not found in list of registered devices
949 *
950 * The callers are required to call dev_pm_opp_put() for the returned OPP after
951 * use.
952 */
dev_pm_opp_find_bw_floor(struct device * dev,unsigned int * bw,int index)953 struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
954 unsigned int *bw, int index)
955 {
956 unsigned long temp = *bw;
957 struct dev_pm_opp *opp;
958
959 opp = _find_key_floor(dev, &temp, index, true, _read_bw,
960 assert_bandwidth_index);
961 *bw = temp;
962 return opp;
963 }
964 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
965
_set_opp_voltage(struct device * dev,struct regulator * reg,struct dev_pm_opp_supply * supply)966 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
967 struct dev_pm_opp_supply *supply)
968 {
969 int ret;
970
971 /* Regulator not available for device */
972 if (IS_ERR(reg)) {
973 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
974 PTR_ERR(reg));
975 return 0;
976 }
977
978 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
979 supply->u_volt_min, supply->u_volt, supply->u_volt_max);
980
981 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
982 supply->u_volt, supply->u_volt_max);
983 if (ret)
984 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
985 __func__, supply->u_volt_min, supply->u_volt,
986 supply->u_volt_max, ret);
987
988 return ret;
989 }
990
991 static int
_opp_config_clk_single(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,void * data,bool scaling_down)992 _opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
993 struct dev_pm_opp *opp, void *data, bool scaling_down)
994 {
995 unsigned long *target = data;
996 unsigned long freq;
997 int ret;
998
999 /* One of target and opp must be available */
1000 if (target) {
1001 freq = *target;
1002 } else if (opp) {
1003 freq = opp->rates[0];
1004 } else {
1005 WARN_ON(1);
1006 return -EINVAL;
1007 }
1008
1009 ret = clk_set_rate(opp_table->clk, freq);
1010 if (ret) {
1011 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
1012 ret);
1013 } else {
1014 opp_table->current_rate_single_clk = freq;
1015 }
1016
1017 return ret;
1018 }
1019
1020 /*
1021 * Simple implementation for configuring multiple clocks. Configure clocks in
1022 * the order in which they are present in the array while scaling up.
1023 */
dev_pm_opp_config_clks_simple(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,void * data,bool scaling_down)1024 int dev_pm_opp_config_clks_simple(struct device *dev,
1025 struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
1026 bool scaling_down)
1027 {
1028 int ret, i;
1029
1030 if (scaling_down) {
1031 for (i = opp_table->clk_count - 1; i >= 0; i--) {
1032 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
1033 if (ret) {
1034 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
1035 ret);
1036 return ret;
1037 }
1038 }
1039 } else {
1040 for (i = 0; i < opp_table->clk_count; i++) {
1041 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
1042 if (ret) {
1043 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
1044 ret);
1045 return ret;
1046 }
1047 }
1048 }
1049
1050 return 0;
1051 }
1052 EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
1053
_opp_config_regulator_single(struct device * dev,struct dev_pm_opp * old_opp,struct dev_pm_opp * new_opp,struct regulator ** regulators,unsigned int count)1054 static int _opp_config_regulator_single(struct device *dev,
1055 struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
1056 struct regulator **regulators, unsigned int count)
1057 {
1058 struct regulator *reg = regulators[0];
1059 int ret;
1060
1061 /* This function only supports single regulator per device */
1062 if (WARN_ON(count > 1)) {
1063 dev_err(dev, "multiple regulators are not supported\n");
1064 return -EINVAL;
1065 }
1066
1067 ret = _set_opp_voltage(dev, reg, new_opp->supplies);
1068 if (ret)
1069 return ret;
1070
1071 /*
1072 * Enable the regulator after setting its voltages, otherwise it breaks
1073 * some boot-enabled regulators.
1074 */
1075 if (unlikely(!new_opp->opp_table->enabled)) {
1076 ret = regulator_enable(reg);
1077 if (ret < 0)
1078 dev_warn(dev, "Failed to enable regulator: %d", ret);
1079 }
1080
1081 return 0;
1082 }
1083
_set_opp_bw(const struct opp_table * opp_table,struct dev_pm_opp * opp,struct device * dev)1084 static int _set_opp_bw(const struct opp_table *opp_table,
1085 struct dev_pm_opp *opp, struct device *dev)
1086 {
1087 u32 avg, peak;
1088 int i, ret;
1089
1090 if (!opp_table->paths)
1091 return 0;
1092
1093 for (i = 0; i < opp_table->path_count; i++) {
1094 if (!opp) {
1095 avg = 0;
1096 peak = 0;
1097 } else {
1098 avg = opp->bandwidth[i].avg;
1099 peak = opp->bandwidth[i].peak;
1100 }
1101 ret = icc_set_bw(opp_table->paths[i], avg, peak);
1102 if (ret) {
1103 dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
1104 opp ? "set" : "remove", i, ret);
1105 return ret;
1106 }
1107 }
1108
1109 return 0;
1110 }
1111
_set_opp_level(struct device * dev,struct dev_pm_opp * opp)1112 static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp)
1113 {
1114 unsigned int level = 0;
1115 int ret = 0;
1116
1117 if (opp) {
1118 if (opp->level == OPP_LEVEL_UNSET)
1119 return 0;
1120
1121 level = opp->level;
1122 }
1123
1124 /* Request a new performance state through the device's PM domain. */
1125 ret = dev_pm_domain_set_performance_state(dev, level);
1126 if (ret)
1127 dev_err(dev, "Failed to set performance state %u (%d)\n", level,
1128 ret);
1129
1130 return ret;
1131 }
1132
1133 /* This is only called for PM domain for now */
_set_required_opps(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,bool up)1134 static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
1135 struct dev_pm_opp *opp, bool up)
1136 {
1137 struct device **devs = opp_table->required_devs;
1138 struct dev_pm_opp *required_opp;
1139 int index, target, delta, ret;
1140
1141 if (!devs)
1142 return 0;
1143
1144 /* required-opps not fully initialized yet */
1145 if (lazy_linking_pending(opp_table))
1146 return -EBUSY;
1147
1148 /* Scaling up? Set required OPPs in normal order, else reverse */
1149 if (up) {
1150 index = 0;
1151 target = opp_table->required_opp_count;
1152 delta = 1;
1153 } else {
1154 index = opp_table->required_opp_count - 1;
1155 target = -1;
1156 delta = -1;
1157 }
1158
1159 while (index != target) {
1160 if (devs[index]) {
1161 required_opp = opp ? opp->required_opps[index] : NULL;
1162
1163 ret = _set_opp_level(devs[index], required_opp);
1164 if (ret)
1165 return ret;
1166 }
1167
1168 index += delta;
1169 }
1170
1171 return 0;
1172 }
1173
_find_current_opp(struct device * dev,struct opp_table * opp_table)1174 static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
1175 {
1176 struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
1177 unsigned long freq;
1178
1179 if (!IS_ERR(opp_table->clk)) {
1180 freq = clk_get_rate(opp_table->clk);
1181 opp = _find_freq_ceil(opp_table, &freq);
1182 }
1183
1184 /*
1185 * Unable to find the current OPP ? Pick the first from the list since
1186 * it is in ascending order, otherwise rest of the code will need to
1187 * make special checks to validate current_opp.
1188 */
1189 if (IS_ERR(opp)) {
1190 mutex_lock(&opp_table->lock);
1191 opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node);
1192 dev_pm_opp_get(opp);
1193 mutex_unlock(&opp_table->lock);
1194 }
1195
1196 opp_table->current_opp = opp;
1197 }
1198
_disable_opp_table(struct device * dev,struct opp_table * opp_table)1199 static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
1200 {
1201 int ret;
1202
1203 if (!opp_table->enabled)
1204 return 0;
1205
1206 /*
1207 * Some drivers need to support cases where some platforms may
1208 * have OPP table for the device, while others don't and
1209 * opp_set_rate() just needs to behave like clk_set_rate().
1210 */
1211 if (!_get_opp_count(opp_table))
1212 return 0;
1213
1214 ret = _set_opp_bw(opp_table, NULL, dev);
1215 if (ret)
1216 return ret;
1217
1218 if (opp_table->regulators)
1219 regulator_disable(opp_table->regulators[0]);
1220
1221 ret = _set_opp_level(dev, NULL);
1222 if (ret)
1223 goto out;
1224
1225 ret = _set_required_opps(dev, opp_table, NULL, false);
1226
1227 out:
1228 opp_table->enabled = false;
1229 return ret;
1230 }
1231
_set_opp(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,void * clk_data,bool forced)1232 static int _set_opp(struct device *dev, struct opp_table *opp_table,
1233 struct dev_pm_opp *opp, void *clk_data, bool forced)
1234 {
1235 struct dev_pm_opp *old_opp;
1236 int scaling_down, ret;
1237
1238 if (unlikely(!opp))
1239 return _disable_opp_table(dev, opp_table);
1240
1241 /* Find the currently set OPP if we don't know already */
1242 if (unlikely(!opp_table->current_opp))
1243 _find_current_opp(dev, opp_table);
1244
1245 old_opp = opp_table->current_opp;
1246
1247 /* Return early if nothing to do */
1248 if (!forced && old_opp == opp && opp_table->enabled) {
1249 dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__);
1250 return 0;
1251 }
1252
1253 dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
1254 __func__, old_opp->rates[0], opp->rates[0], old_opp->level,
1255 opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
1256 opp->bandwidth ? opp->bandwidth[0].peak : 0);
1257
1258 scaling_down = _opp_compare_key(opp_table, old_opp, opp);
1259 if (scaling_down == -1)
1260 scaling_down = 0;
1261
1262 /* Scaling up? Configure required OPPs before frequency */
1263 if (!scaling_down) {
1264 ret = _set_required_opps(dev, opp_table, opp, true);
1265 if (ret) {
1266 dev_err(dev, "Failed to set required opps: %d\n", ret);
1267 return ret;
1268 }
1269
1270 ret = _set_opp_level(dev, opp);
1271 if (ret)
1272 return ret;
1273
1274 ret = _set_opp_bw(opp_table, opp, dev);
1275 if (ret) {
1276 dev_err(dev, "Failed to set bw: %d\n", ret);
1277 return ret;
1278 }
1279
1280 if (opp_table->config_regulators) {
1281 ret = opp_table->config_regulators(dev, old_opp, opp,
1282 opp_table->regulators,
1283 opp_table->regulator_count);
1284 if (ret) {
1285 dev_err(dev, "Failed to set regulator voltages: %d\n",
1286 ret);
1287 return ret;
1288 }
1289 }
1290 }
1291
1292 if (opp_table->config_clks) {
1293 ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down);
1294 if (ret)
1295 return ret;
1296 }
1297
1298 /* Scaling down? Configure required OPPs after frequency */
1299 if (scaling_down) {
1300 if (opp_table->config_regulators) {
1301 ret = opp_table->config_regulators(dev, old_opp, opp,
1302 opp_table->regulators,
1303 opp_table->regulator_count);
1304 if (ret) {
1305 dev_err(dev, "Failed to set regulator voltages: %d\n",
1306 ret);
1307 return ret;
1308 }
1309 }
1310
1311 ret = _set_opp_bw(opp_table, opp, dev);
1312 if (ret) {
1313 dev_err(dev, "Failed to set bw: %d\n", ret);
1314 return ret;
1315 }
1316
1317 ret = _set_opp_level(dev, opp);
1318 if (ret)
1319 return ret;
1320
1321 ret = _set_required_opps(dev, opp_table, opp, false);
1322 if (ret) {
1323 dev_err(dev, "Failed to set required opps: %d\n", ret);
1324 return ret;
1325 }
1326 }
1327
1328 opp_table->enabled = true;
1329 dev_pm_opp_put(old_opp);
1330
1331 /* Make sure current_opp doesn't get freed */
1332 dev_pm_opp_get(opp);
1333 opp_table->current_opp = opp;
1334
1335 return ret;
1336 }
1337
1338 /**
1339 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
1340 * @dev: device for which we do this operation
1341 * @target_freq: frequency to achieve
1342 *
1343 * This configures the power-supplies to the levels specified by the OPP
1344 * corresponding to the target_freq, and programs the clock to a value <=
1345 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
1346 * provided by the opp, should have already rounded to the target OPP's
1347 * frequency.
1348 */
dev_pm_opp_set_rate(struct device * dev,unsigned long target_freq)1349 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
1350 {
1351 struct opp_table *opp_table;
1352 unsigned long freq = 0, temp_freq;
1353 struct dev_pm_opp *opp = NULL;
1354 bool forced = false;
1355 int ret;
1356
1357 opp_table = _find_opp_table(dev);
1358 if (IS_ERR(opp_table)) {
1359 dev_err(dev, "%s: device's opp table doesn't exist\n", __func__);
1360 return PTR_ERR(opp_table);
1361 }
1362
1363 if (target_freq) {
1364 /*
1365 * For IO devices which require an OPP on some platforms/SoCs
1366 * while just needing to scale the clock on some others
1367 * we look for empty OPP tables with just a clock handle and
1368 * scale only the clk. This makes dev_pm_opp_set_rate()
1369 * equivalent to a clk_set_rate()
1370 */
1371 if (!_get_opp_count(opp_table)) {
1372 ret = opp_table->config_clks(dev, opp_table, NULL,
1373 &target_freq, false);
1374 goto put_opp_table;
1375 }
1376
1377 freq = clk_round_rate(opp_table->clk, target_freq);
1378 if ((long)freq <= 0)
1379 freq = target_freq;
1380
1381 /*
1382 * The clock driver may support finer resolution of the
1383 * frequencies than the OPP table, don't update the frequency we
1384 * pass to clk_set_rate() here.
1385 */
1386 temp_freq = freq;
1387 opp = _find_freq_ceil(opp_table, &temp_freq);
1388 if (IS_ERR(opp)) {
1389 ret = PTR_ERR(opp);
1390 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
1391 __func__, freq, ret);
1392 goto put_opp_table;
1393 }
1394
1395 /*
1396 * An OPP entry specifies the highest frequency at which other
1397 * properties of the OPP entry apply. Even if the new OPP is
1398 * same as the old one, we may still reach here for a different
1399 * value of the frequency. In such a case, do not abort but
1400 * configure the hardware to the desired frequency forcefully.
1401 */
1402 forced = opp_table->current_rate_single_clk != freq;
1403 }
1404
1405 ret = _set_opp(dev, opp_table, opp, &freq, forced);
1406
1407 if (freq)
1408 dev_pm_opp_put(opp);
1409
1410 put_opp_table:
1411 dev_pm_opp_put_opp_table(opp_table);
1412 return ret;
1413 }
1414 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
1415
1416 /**
1417 * dev_pm_opp_set_opp() - Configure device for OPP
1418 * @dev: device for which we do this operation
1419 * @opp: OPP to set to
1420 *
1421 * This configures the device based on the properties of the OPP passed to this
1422 * routine.
1423 *
1424 * Return: 0 on success, a negative error number otherwise.
1425 */
dev_pm_opp_set_opp(struct device * dev,struct dev_pm_opp * opp)1426 int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
1427 {
1428 struct opp_table *opp_table;
1429 int ret;
1430
1431 opp_table = _find_opp_table(dev);
1432 if (IS_ERR(opp_table)) {
1433 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
1434 return PTR_ERR(opp_table);
1435 }
1436
1437 ret = _set_opp(dev, opp_table, opp, NULL, false);
1438 dev_pm_opp_put_opp_table(opp_table);
1439
1440 return ret;
1441 }
1442 EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp);
1443
1444 /* OPP-dev Helpers */
_remove_opp_dev(struct opp_device * opp_dev,struct opp_table * opp_table)1445 static void _remove_opp_dev(struct opp_device *opp_dev,
1446 struct opp_table *opp_table)
1447 {
1448 opp_debug_unregister(opp_dev, opp_table);
1449 list_del(&opp_dev->node);
1450 kfree(opp_dev);
1451 }
1452
_add_opp_dev(const struct device * dev,struct opp_table * opp_table)1453 struct opp_device *_add_opp_dev(const struct device *dev,
1454 struct opp_table *opp_table)
1455 {
1456 struct opp_device *opp_dev;
1457
1458 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
1459 if (!opp_dev)
1460 return NULL;
1461
1462 /* Initialize opp-dev */
1463 opp_dev->dev = dev;
1464
1465 mutex_lock(&opp_table->lock);
1466 list_add(&opp_dev->node, &opp_table->dev_list);
1467 mutex_unlock(&opp_table->lock);
1468
1469 /* Create debugfs entries for the opp_table */
1470 opp_debug_register(opp_dev, opp_table);
1471
1472 return opp_dev;
1473 }
1474
_allocate_opp_table(struct device * dev,int index)1475 static struct opp_table *_allocate_opp_table(struct device *dev, int index)
1476 {
1477 struct opp_table *opp_table;
1478 struct opp_device *opp_dev;
1479 int ret;
1480
1481 /*
1482 * Allocate a new OPP table. In the infrequent case where a new
1483 * device is needed to be added, we pay this penalty.
1484 */
1485 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
1486 if (!opp_table)
1487 return ERR_PTR(-ENOMEM);
1488
1489 mutex_init(&opp_table->lock);
1490 INIT_LIST_HEAD(&opp_table->dev_list);
1491 INIT_LIST_HEAD(&opp_table->lazy);
1492
1493 opp_table->clk = ERR_PTR(-ENODEV);
1494
1495 /* Mark regulator count uninitialized */
1496 opp_table->regulator_count = -1;
1497
1498 opp_dev = _add_opp_dev(dev, opp_table);
1499 if (!opp_dev) {
1500 ret = -ENOMEM;
1501 goto err;
1502 }
1503
1504 _of_init_opp_table(opp_table, dev, index);
1505
1506 /* Find interconnect path(s) for the device */
1507 ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
1508 if (ret) {
1509 if (ret == -EPROBE_DEFER)
1510 goto remove_opp_dev;
1511
1512 dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
1513 __func__, ret);
1514 }
1515
1516 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
1517 INIT_LIST_HEAD(&opp_table->opp_list);
1518 kref_init(&opp_table->kref);
1519
1520 return opp_table;
1521
1522 remove_opp_dev:
1523 _of_clear_opp_table(opp_table);
1524 _remove_opp_dev(opp_dev, opp_table);
1525 mutex_destroy(&opp_table->lock);
1526 err:
1527 kfree(opp_table);
1528 return ERR_PTR(ret);
1529 }
1530
_update_opp_table_clk(struct device * dev,struct opp_table * opp_table,bool getclk)1531 static struct opp_table *_update_opp_table_clk(struct device *dev,
1532 struct opp_table *opp_table,
1533 bool getclk)
1534 {
1535 int ret;
1536
1537 /*
1538 * Return early if we don't need to get clk or we have already done it
1539 * earlier.
1540 */
1541 if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) ||
1542 opp_table->clks)
1543 return opp_table;
1544
1545 /* Find clk for the device */
1546 opp_table->clk = clk_get(dev, NULL);
1547
1548 ret = PTR_ERR_OR_ZERO(opp_table->clk);
1549 if (!ret) {
1550 opp_table->config_clks = _opp_config_clk_single;
1551 opp_table->clk_count = 1;
1552 return opp_table;
1553 }
1554
1555 if (ret == -ENOENT) {
1556 /*
1557 * There are few platforms which don't want the OPP core to
1558 * manage device's clock settings. In such cases neither the
1559 * platform provides the clks explicitly to us, nor the DT
1560 * contains a valid clk entry. The OPP nodes in DT may still
1561 * contain "opp-hz" property though, which we need to parse and
1562 * allow the platform to find an OPP based on freq later on.
1563 *
1564 * This is a simple solution to take care of such corner cases,
1565 * i.e. make the clk_count 1, which lets us allocate space for
1566 * frequency in opp->rates and also parse the entries in DT.
1567 */
1568 opp_table->clk_count = 1;
1569
1570 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
1571 return opp_table;
1572 }
1573
1574 dev_pm_opp_put_opp_table(opp_table);
1575 dev_err_probe(dev, ret, "Couldn't find clock\n");
1576
1577 return ERR_PTR(ret);
1578 }
1579
1580 /*
1581 * We need to make sure that the OPP table for a device doesn't get added twice,
1582 * if this routine gets called in parallel with the same device pointer.
1583 *
1584 * The simplest way to enforce that is to perform everything (find existing
1585 * table and if not found, create a new one) under the opp_table_lock, so only
1586 * one creator gets access to the same. But that expands the critical section
1587 * under the lock and may end up causing circular dependencies with frameworks
1588 * like debugfs, interconnect or clock framework as they may be direct or
1589 * indirect users of OPP core.
1590 *
1591 * And for that reason we have to go for a bit tricky implementation here, which
1592 * uses the opp_tables_busy flag to indicate if another creator is in the middle
1593 * of adding an OPP table and others should wait for it to finish.
1594 */
_add_opp_table_indexed(struct device * dev,int index,bool getclk)1595 struct opp_table *_add_opp_table_indexed(struct device *dev, int index,
1596 bool getclk)
1597 {
1598 struct opp_table *opp_table;
1599
1600 again:
1601 mutex_lock(&opp_table_lock);
1602
1603 opp_table = _find_opp_table_unlocked(dev);
1604 if (!IS_ERR(opp_table))
1605 goto unlock;
1606
1607 /*
1608 * The opp_tables list or an OPP table's dev_list is getting updated by
1609 * another user, wait for it to finish.
1610 */
1611 if (unlikely(opp_tables_busy)) {
1612 mutex_unlock(&opp_table_lock);
1613 cpu_relax();
1614 goto again;
1615 }
1616
1617 opp_tables_busy = true;
1618 opp_table = _managed_opp(dev, index);
1619
1620 /* Drop the lock to reduce the size of critical section */
1621 mutex_unlock(&opp_table_lock);
1622
1623 if (opp_table) {
1624 if (!_add_opp_dev(dev, opp_table)) {
1625 dev_pm_opp_put_opp_table(opp_table);
1626 opp_table = ERR_PTR(-ENOMEM);
1627 }
1628
1629 mutex_lock(&opp_table_lock);
1630 } else {
1631 opp_table = _allocate_opp_table(dev, index);
1632
1633 mutex_lock(&opp_table_lock);
1634 if (!IS_ERR(opp_table))
1635 list_add(&opp_table->node, &opp_tables);
1636 }
1637
1638 opp_tables_busy = false;
1639
1640 unlock:
1641 mutex_unlock(&opp_table_lock);
1642
1643 return _update_opp_table_clk(dev, opp_table, getclk);
1644 }
1645
_add_opp_table(struct device * dev,bool getclk)1646 static struct opp_table *_add_opp_table(struct device *dev, bool getclk)
1647 {
1648 return _add_opp_table_indexed(dev, 0, getclk);
1649 }
1650
dev_pm_opp_get_opp_table(struct device * dev)1651 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
1652 {
1653 return _find_opp_table(dev);
1654 }
1655 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
1656
_opp_table_kref_release(struct kref * kref)1657 static void _opp_table_kref_release(struct kref *kref)
1658 {
1659 struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
1660 struct opp_device *opp_dev, *temp;
1661 int i;
1662
1663 /* Drop the lock as soon as we can */
1664 list_del(&opp_table->node);
1665 mutex_unlock(&opp_table_lock);
1666
1667 if (opp_table->current_opp)
1668 dev_pm_opp_put(opp_table->current_opp);
1669
1670 _of_clear_opp_table(opp_table);
1671
1672 /* Release automatically acquired single clk */
1673 if (!IS_ERR(opp_table->clk))
1674 clk_put(opp_table->clk);
1675
1676 if (opp_table->paths) {
1677 for (i = 0; i < opp_table->path_count; i++)
1678 icc_put(opp_table->paths[i]);
1679 kfree(opp_table->paths);
1680 }
1681
1682 WARN_ON(!list_empty(&opp_table->opp_list));
1683
1684 list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node)
1685 _remove_opp_dev(opp_dev, opp_table);
1686
1687 mutex_destroy(&opp_table->lock);
1688 kfree(opp_table);
1689 }
1690
_get_opp_table_kref(struct opp_table * opp_table)1691 void _get_opp_table_kref(struct opp_table *opp_table)
1692 {
1693 kref_get(&opp_table->kref);
1694 }
1695
dev_pm_opp_get_opp_table_ref(struct opp_table * opp_table)1696 void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
1697 {
1698 _get_opp_table_kref(opp_table);
1699 }
1700 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref);
1701
dev_pm_opp_put_opp_table(struct opp_table * opp_table)1702 void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
1703 {
1704 kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
1705 &opp_table_lock);
1706 }
1707 EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
1708
_opp_free(struct dev_pm_opp * opp)1709 void _opp_free(struct dev_pm_opp *opp)
1710 {
1711 kfree(opp);
1712 }
1713
_opp_kref_release(struct kref * kref)1714 static void _opp_kref_release(struct kref *kref)
1715 {
1716 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1717 struct opp_table *opp_table = opp->opp_table;
1718
1719 list_del(&opp->node);
1720 mutex_unlock(&opp_table->lock);
1721
1722 /*
1723 * Notify the changes in the availability of the operable
1724 * frequency/voltage list.
1725 */
1726 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
1727 _of_clear_opp(opp_table, opp);
1728 opp_debug_remove_one(opp);
1729 kfree(opp);
1730 }
1731
dev_pm_opp_get(struct dev_pm_opp * opp)1732 void dev_pm_opp_get(struct dev_pm_opp *opp)
1733 {
1734 kref_get(&opp->kref);
1735 }
1736 EXPORT_SYMBOL_GPL(dev_pm_opp_get);
1737
dev_pm_opp_put(struct dev_pm_opp * opp)1738 void dev_pm_opp_put(struct dev_pm_opp *opp)
1739 {
1740 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
1741 }
1742 EXPORT_SYMBOL_GPL(dev_pm_opp_put);
1743
1744 /**
1745 * dev_pm_opp_remove() - Remove an OPP from OPP table
1746 * @dev: device for which we do this operation
1747 * @freq: OPP to remove with matching 'freq'
1748 *
1749 * This function removes an opp from the opp table.
1750 */
dev_pm_opp_remove(struct device * dev,unsigned long freq)1751 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
1752 {
1753 struct dev_pm_opp *opp = NULL, *iter;
1754 struct opp_table *opp_table;
1755
1756 opp_table = _find_opp_table(dev);
1757 if (IS_ERR(opp_table))
1758 return;
1759
1760 if (!assert_single_clk(opp_table, 0))
1761 goto put_table;
1762
1763 mutex_lock(&opp_table->lock);
1764
1765 list_for_each_entry(iter, &opp_table->opp_list, node) {
1766 if (iter->rates[0] == freq) {
1767 opp = iter;
1768 break;
1769 }
1770 }
1771
1772 mutex_unlock(&opp_table->lock);
1773
1774 if (opp) {
1775 dev_pm_opp_put(opp);
1776
1777 /* Drop the reference taken by dev_pm_opp_add() */
1778 dev_pm_opp_put_opp_table(opp_table);
1779 } else {
1780 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
1781 __func__, freq);
1782 }
1783
1784 put_table:
1785 /* Drop the reference taken by _find_opp_table() */
1786 dev_pm_opp_put_opp_table(opp_table);
1787 }
1788 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1789
_opp_get_next(struct opp_table * opp_table,bool dynamic)1790 static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
1791 bool dynamic)
1792 {
1793 struct dev_pm_opp *opp = NULL, *temp;
1794
1795 mutex_lock(&opp_table->lock);
1796 list_for_each_entry(temp, &opp_table->opp_list, node) {
1797 /*
1798 * Refcount must be dropped only once for each OPP by OPP core,
1799 * do that with help of "removed" flag.
1800 */
1801 if (!temp->removed && dynamic == temp->dynamic) {
1802 opp = temp;
1803 break;
1804 }
1805 }
1806
1807 mutex_unlock(&opp_table->lock);
1808 return opp;
1809 }
1810
1811 /*
1812 * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to
1813 * happen lock less to avoid circular dependency issues. This routine must be
1814 * called without the opp_table->lock held.
1815 */
_opp_remove_all(struct opp_table * opp_table,bool dynamic)1816 static void _opp_remove_all(struct opp_table *opp_table, bool dynamic)
1817 {
1818 struct dev_pm_opp *opp;
1819
1820 while ((opp = _opp_get_next(opp_table, dynamic))) {
1821 opp->removed = true;
1822 dev_pm_opp_put(opp);
1823
1824 /* Drop the references taken by dev_pm_opp_add() */
1825 if (dynamic)
1826 dev_pm_opp_put_opp_table(opp_table);
1827 }
1828 }
1829
_opp_remove_all_static(struct opp_table * opp_table)1830 bool _opp_remove_all_static(struct opp_table *opp_table)
1831 {
1832 mutex_lock(&opp_table->lock);
1833
1834 if (!opp_table->parsed_static_opps) {
1835 mutex_unlock(&opp_table->lock);
1836 return false;
1837 }
1838
1839 if (--opp_table->parsed_static_opps) {
1840 mutex_unlock(&opp_table->lock);
1841 return true;
1842 }
1843
1844 mutex_unlock(&opp_table->lock);
1845
1846 _opp_remove_all(opp_table, false);
1847 return true;
1848 }
1849
1850 /**
1851 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
1852 * @dev: device for which we do this operation
1853 *
1854 * This function removes all dynamically created OPPs from the opp table.
1855 */
dev_pm_opp_remove_all_dynamic(struct device * dev)1856 void dev_pm_opp_remove_all_dynamic(struct device *dev)
1857 {
1858 struct opp_table *opp_table;
1859
1860 opp_table = _find_opp_table(dev);
1861 if (IS_ERR(opp_table))
1862 return;
1863
1864 _opp_remove_all(opp_table, true);
1865
1866 /* Drop the reference taken by _find_opp_table() */
1867 dev_pm_opp_put_opp_table(opp_table);
1868 }
1869 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
1870
_opp_allocate(struct opp_table * opp_table)1871 struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
1872 {
1873 struct dev_pm_opp *opp;
1874 int supply_count, supply_size, icc_size, clk_size;
1875
1876 /* Allocate space for at least one supply */
1877 supply_count = opp_table->regulator_count > 0 ?
1878 opp_table->regulator_count : 1;
1879 supply_size = sizeof(*opp->supplies) * supply_count;
1880 clk_size = sizeof(*opp->rates) * opp_table->clk_count;
1881 icc_size = sizeof(*opp->bandwidth) * opp_table->path_count;
1882
1883 /* allocate new OPP node and supplies structures */
1884 opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL);
1885 if (!opp)
1886 return NULL;
1887
1888 /* Put the supplies, bw and clock at the end of the OPP structure */
1889 opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
1890
1891 opp->rates = (unsigned long *)(opp->supplies + supply_count);
1892
1893 if (icc_size)
1894 opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count);
1895
1896 INIT_LIST_HEAD(&opp->node);
1897
1898 opp->level = OPP_LEVEL_UNSET;
1899
1900 return opp;
1901 }
1902
_opp_supported_by_regulators(struct dev_pm_opp * opp,struct opp_table * opp_table)1903 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
1904 struct opp_table *opp_table)
1905 {
1906 struct regulator *reg;
1907 int i;
1908
1909 if (!opp_table->regulators)
1910 return true;
1911
1912 for (i = 0; i < opp_table->regulator_count; i++) {
1913 reg = opp_table->regulators[i];
1914
1915 if (!regulator_is_supported_voltage(reg,
1916 opp->supplies[i].u_volt_min,
1917 opp->supplies[i].u_volt_max)) {
1918 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
1919 __func__, opp->supplies[i].u_volt_min,
1920 opp->supplies[i].u_volt_max);
1921 return false;
1922 }
1923 }
1924
1925 return true;
1926 }
1927
_opp_compare_rate(struct opp_table * opp_table,struct dev_pm_opp * opp1,struct dev_pm_opp * opp2)1928 static int _opp_compare_rate(struct opp_table *opp_table,
1929 struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
1930 {
1931 int i;
1932
1933 for (i = 0; i < opp_table->clk_count; i++) {
1934 if (opp1->rates[i] != opp2->rates[i])
1935 return opp1->rates[i] < opp2->rates[i] ? -1 : 1;
1936 }
1937
1938 /* Same rates for both OPPs */
1939 return 0;
1940 }
1941
_opp_compare_bw(struct opp_table * opp_table,struct dev_pm_opp * opp1,struct dev_pm_opp * opp2)1942 static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1,
1943 struct dev_pm_opp *opp2)
1944 {
1945 int i;
1946
1947 for (i = 0; i < opp_table->path_count; i++) {
1948 if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak)
1949 return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1;
1950 }
1951
1952 /* Same bw for both OPPs */
1953 return 0;
1954 }
1955
1956 /*
1957 * Returns
1958 * 0: opp1 == opp2
1959 * 1: opp1 > opp2
1960 * -1: opp1 < opp2
1961 */
_opp_compare_key(struct opp_table * opp_table,struct dev_pm_opp * opp1,struct dev_pm_opp * opp2)1962 int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1,
1963 struct dev_pm_opp *opp2)
1964 {
1965 int ret;
1966
1967 ret = _opp_compare_rate(opp_table, opp1, opp2);
1968 if (ret)
1969 return ret;
1970
1971 ret = _opp_compare_bw(opp_table, opp1, opp2);
1972 if (ret)
1973 return ret;
1974
1975 if (opp1->level != opp2->level)
1976 return opp1->level < opp2->level ? -1 : 1;
1977
1978 /* Duplicate OPPs */
1979 return 0;
1980 }
1981
_opp_is_duplicate(struct device * dev,struct dev_pm_opp * new_opp,struct opp_table * opp_table,struct list_head ** head)1982 static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
1983 struct opp_table *opp_table,
1984 struct list_head **head)
1985 {
1986 struct dev_pm_opp *opp;
1987 int opp_cmp;
1988
1989 /*
1990 * Insert new OPP in order of increasing frequency and discard if
1991 * already present.
1992 *
1993 * Need to use &opp_table->opp_list in the condition part of the 'for'
1994 * loop, don't replace it with head otherwise it will become an infinite
1995 * loop.
1996 */
1997 list_for_each_entry(opp, &opp_table->opp_list, node) {
1998 opp_cmp = _opp_compare_key(opp_table, new_opp, opp);
1999 if (opp_cmp > 0) {
2000 *head = &opp->node;
2001 continue;
2002 }
2003
2004 if (opp_cmp < 0)
2005 return 0;
2006
2007 /* Duplicate OPPs */
2008 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
2009 __func__, opp->rates[0], opp->supplies[0].u_volt,
2010 opp->available, new_opp->rates[0],
2011 new_opp->supplies[0].u_volt, new_opp->available);
2012
2013 /* Should we compare voltages for all regulators here ? */
2014 return opp->available &&
2015 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
2016 }
2017
2018 return 0;
2019 }
2020
_required_opps_available(struct dev_pm_opp * opp,int count)2021 void _required_opps_available(struct dev_pm_opp *opp, int count)
2022 {
2023 int i;
2024
2025 for (i = 0; i < count; i++) {
2026 if (opp->required_opps[i]->available)
2027 continue;
2028
2029 opp->available = false;
2030 pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
2031 __func__, opp->required_opps[i]->np, opp->rates[0]);
2032 return;
2033 }
2034 }
2035
2036 /*
2037 * Returns:
2038 * 0: On success. And appropriate error message for duplicate OPPs.
2039 * -EBUSY: For OPP with same freq/volt and is available. The callers of
2040 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
2041 * sure we don't print error messages unnecessarily if different parts of
2042 * kernel try to initialize the OPP table.
2043 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
2044 * should be considered an error by the callers of _opp_add().
2045 */
_opp_add(struct device * dev,struct dev_pm_opp * new_opp,struct opp_table * opp_table)2046 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
2047 struct opp_table *opp_table)
2048 {
2049 struct list_head *head;
2050 int ret;
2051
2052 mutex_lock(&opp_table->lock);
2053 head = &opp_table->opp_list;
2054
2055 ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
2056 if (ret) {
2057 mutex_unlock(&opp_table->lock);
2058 return ret;
2059 }
2060
2061 list_add(&new_opp->node, head);
2062 mutex_unlock(&opp_table->lock);
2063
2064 new_opp->opp_table = opp_table;
2065 kref_init(&new_opp->kref);
2066
2067 opp_debug_create_one(new_opp, opp_table);
2068
2069 if (!_opp_supported_by_regulators(new_opp, opp_table)) {
2070 new_opp->available = false;
2071 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
2072 __func__, new_opp->rates[0]);
2073 }
2074
2075 /* required-opps not fully initialized yet */
2076 if (lazy_linking_pending(opp_table))
2077 return 0;
2078
2079 _required_opps_available(new_opp, opp_table->required_opp_count);
2080
2081 return 0;
2082 }
2083
2084 /**
2085 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
2086 * @opp_table: OPP table
2087 * @dev: device for which we do this operation
2088 * @data: The OPP data for the OPP to add
2089 * @dynamic: Dynamically added OPPs.
2090 *
2091 * This function adds an opp definition to the opp table and returns status.
2092 * The opp is made available by default and it can be controlled using
2093 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
2094 *
2095 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
2096 * and freed by dev_pm_opp_of_remove_table.
2097 *
2098 * Return:
2099 * 0 On success OR
2100 * Duplicate OPPs (both freq and volt are same) and opp->available
2101 * -EEXIST Freq are same and volt are different OR
2102 * Duplicate OPPs (both freq and volt are same) and !opp->available
2103 * -ENOMEM Memory allocation failure
2104 */
_opp_add_v1(struct opp_table * opp_table,struct device * dev,struct dev_pm_opp_data * data,bool dynamic)2105 int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
2106 struct dev_pm_opp_data *data, bool dynamic)
2107 {
2108 struct dev_pm_opp *new_opp;
2109 unsigned long tol, u_volt = data->u_volt;
2110 int ret;
2111
2112 if (!assert_single_clk(opp_table, 0))
2113 return -EINVAL;
2114
2115 new_opp = _opp_allocate(opp_table);
2116 if (!new_opp)
2117 return -ENOMEM;
2118
2119 /* populate the opp table */
2120 new_opp->rates[0] = data->freq;
2121 new_opp->level = data->level;
2122 new_opp->turbo = data->turbo;
2123 tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
2124 new_opp->supplies[0].u_volt = u_volt;
2125 new_opp->supplies[0].u_volt_min = u_volt - tol;
2126 new_opp->supplies[0].u_volt_max = u_volt + tol;
2127 new_opp->available = true;
2128 new_opp->dynamic = dynamic;
2129
2130 ret = _opp_add(dev, new_opp, opp_table);
2131 if (ret) {
2132 /* Don't return error for duplicate OPPs */
2133 if (ret == -EBUSY)
2134 ret = 0;
2135 goto free_opp;
2136 }
2137
2138 /*
2139 * Notify the changes in the availability of the operable
2140 * frequency/voltage list.
2141 */
2142 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
2143 return 0;
2144
2145 free_opp:
2146 _opp_free(new_opp);
2147
2148 return ret;
2149 }
2150
2151 /*
2152 * This is required only for the V2 bindings, and it enables a platform to
2153 * specify the hierarchy of versions it supports. OPP layer will then enable
2154 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
2155 * property.
2156 */
_opp_set_supported_hw(struct opp_table * opp_table,const u32 * versions,unsigned int count)2157 static int _opp_set_supported_hw(struct opp_table *opp_table,
2158 const u32 *versions, unsigned int count)
2159 {
2160 /* Another CPU that shares the OPP table has set the property ? */
2161 if (opp_table->supported_hw)
2162 return 0;
2163
2164 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
2165 GFP_KERNEL);
2166 if (!opp_table->supported_hw)
2167 return -ENOMEM;
2168
2169 opp_table->supported_hw_count = count;
2170
2171 return 0;
2172 }
2173
_opp_put_supported_hw(struct opp_table * opp_table)2174 static void _opp_put_supported_hw(struct opp_table *opp_table)
2175 {
2176 if (opp_table->supported_hw) {
2177 kfree(opp_table->supported_hw);
2178 opp_table->supported_hw = NULL;
2179 opp_table->supported_hw_count = 0;
2180 }
2181 }
2182
2183 /*
2184 * This is required only for the V2 bindings, and it enables a platform to
2185 * specify the extn to be used for certain property names. The properties to
2186 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
2187 * should postfix the property name with -<name> while looking for them.
2188 */
_opp_set_prop_name(struct opp_table * opp_table,const char * name)2189 static int _opp_set_prop_name(struct opp_table *opp_table, const char *name)
2190 {
2191 /* Another CPU that shares the OPP table has set the property ? */
2192 if (!opp_table->prop_name) {
2193 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
2194 if (!opp_table->prop_name)
2195 return -ENOMEM;
2196 }
2197
2198 return 0;
2199 }
2200
_opp_put_prop_name(struct opp_table * opp_table)2201 static void _opp_put_prop_name(struct opp_table *opp_table)
2202 {
2203 if (opp_table->prop_name) {
2204 kfree(opp_table->prop_name);
2205 opp_table->prop_name = NULL;
2206 }
2207 }
2208
2209 /*
2210 * In order to support OPP switching, OPP layer needs to know the name of the
2211 * device's regulators, as the core would be required to switch voltages as
2212 * well.
2213 *
2214 * This must be called before any OPPs are initialized for the device.
2215 */
_opp_set_regulators(struct opp_table * opp_table,struct device * dev,const char * const names[])2216 static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev,
2217 const char * const names[])
2218 {
2219 const char * const *temp = names;
2220 struct regulator *reg;
2221 int count = 0, ret, i;
2222
2223 /* Count number of regulators */
2224 while (*temp++)
2225 count++;
2226
2227 if (!count)
2228 return -EINVAL;
2229
2230 /* Another CPU that shares the OPP table has set the regulators ? */
2231 if (opp_table->regulators)
2232 return 0;
2233
2234 opp_table->regulators = kmalloc_array(count,
2235 sizeof(*opp_table->regulators),
2236 GFP_KERNEL);
2237 if (!opp_table->regulators)
2238 return -ENOMEM;
2239
2240 for (i = 0; i < count; i++) {
2241 reg = regulator_get_optional(dev, names[i]);
2242 if (IS_ERR(reg)) {
2243 ret = dev_err_probe(dev, PTR_ERR(reg),
2244 "%s: no regulator (%s) found\n",
2245 __func__, names[i]);
2246 goto free_regulators;
2247 }
2248
2249 opp_table->regulators[i] = reg;
2250 }
2251
2252 opp_table->regulator_count = count;
2253
2254 /* Set generic config_regulators() for single regulators here */
2255 if (count == 1)
2256 opp_table->config_regulators = _opp_config_regulator_single;
2257
2258 return 0;
2259
2260 free_regulators:
2261 while (i != 0)
2262 regulator_put(opp_table->regulators[--i]);
2263
2264 kfree(opp_table->regulators);
2265 opp_table->regulators = NULL;
2266 opp_table->regulator_count = -1;
2267
2268 return ret;
2269 }
2270
_opp_put_regulators(struct opp_table * opp_table)2271 static void _opp_put_regulators(struct opp_table *opp_table)
2272 {
2273 int i;
2274
2275 if (!opp_table->regulators)
2276 return;
2277
2278 if (opp_table->enabled) {
2279 for (i = opp_table->regulator_count - 1; i >= 0; i--)
2280 regulator_disable(opp_table->regulators[i]);
2281 }
2282
2283 for (i = opp_table->regulator_count - 1; i >= 0; i--)
2284 regulator_put(opp_table->regulators[i]);
2285
2286 kfree(opp_table->regulators);
2287 opp_table->regulators = NULL;
2288 opp_table->regulator_count = -1;
2289 }
2290
_put_clks(struct opp_table * opp_table,int count)2291 static void _put_clks(struct opp_table *opp_table, int count)
2292 {
2293 int i;
2294
2295 for (i = count - 1; i >= 0; i--)
2296 clk_put(opp_table->clks[i]);
2297
2298 kfree(opp_table->clks);
2299 opp_table->clks = NULL;
2300 }
2301
2302 /*
2303 * In order to support OPP switching, OPP layer needs to get pointers to the
2304 * clocks for the device. Simple cases work fine without using this routine
2305 * (i.e. by passing connection-id as NULL), but for a device with multiple
2306 * clocks available, the OPP core needs to know the exact names of the clks to
2307 * use.
2308 *
2309 * This must be called before any OPPs are initialized for the device.
2310 */
_opp_set_clknames(struct opp_table * opp_table,struct device * dev,const char * const names[],config_clks_t config_clks)2311 static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev,
2312 const char * const names[],
2313 config_clks_t config_clks)
2314 {
2315 const char * const *temp = names;
2316 int count = 0, ret, i;
2317 struct clk *clk;
2318
2319 /* Count number of clks */
2320 while (*temp++)
2321 count++;
2322
2323 /*
2324 * This is a special case where we have a single clock, whose connection
2325 * id name is NULL, i.e. first two entries are NULL in the array.
2326 */
2327 if (!count && !names[1])
2328 count = 1;
2329
2330 /* Fail early for invalid configurations */
2331 if (!count || (!config_clks && count > 1))
2332 return -EINVAL;
2333
2334 /* Another CPU that shares the OPP table has set the clkname ? */
2335 if (opp_table->clks)
2336 return 0;
2337
2338 opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks),
2339 GFP_KERNEL);
2340 if (!opp_table->clks)
2341 return -ENOMEM;
2342
2343 /* Find clks for the device */
2344 for (i = 0; i < count; i++) {
2345 clk = clk_get(dev, names[i]);
2346 if (IS_ERR(clk)) {
2347 ret = dev_err_probe(dev, PTR_ERR(clk),
2348 "%s: Couldn't find clock with name: %s\n",
2349 __func__, names[i]);
2350 goto free_clks;
2351 }
2352
2353 opp_table->clks[i] = clk;
2354 }
2355
2356 opp_table->clk_count = count;
2357 opp_table->config_clks = config_clks;
2358
2359 /* Set generic single clk set here */
2360 if (count == 1) {
2361 if (!opp_table->config_clks)
2362 opp_table->config_clks = _opp_config_clk_single;
2363
2364 /*
2365 * We could have just dropped the "clk" field and used "clks"
2366 * everywhere. Instead we kept the "clk" field around for
2367 * following reasons:
2368 *
2369 * - avoiding clks[0] everywhere else.
2370 * - not running single clk helpers for multiple clk usecase by
2371 * mistake.
2372 *
2373 * Since this is single-clk case, just update the clk pointer
2374 * too.
2375 */
2376 opp_table->clk = opp_table->clks[0];
2377 }
2378
2379 return 0;
2380
2381 free_clks:
2382 _put_clks(opp_table, i);
2383 return ret;
2384 }
2385
_opp_put_clknames(struct opp_table * opp_table)2386 static void _opp_put_clknames(struct opp_table *opp_table)
2387 {
2388 if (!opp_table->clks)
2389 return;
2390
2391 opp_table->config_clks = NULL;
2392 opp_table->clk = ERR_PTR(-ENODEV);
2393
2394 _put_clks(opp_table, opp_table->clk_count);
2395 }
2396
2397 /*
2398 * This is useful to support platforms with multiple regulators per device.
2399 *
2400 * This must be called before any OPPs are initialized for the device.
2401 */
_opp_set_config_regulators_helper(struct opp_table * opp_table,struct device * dev,config_regulators_t config_regulators)2402 static int _opp_set_config_regulators_helper(struct opp_table *opp_table,
2403 struct device *dev, config_regulators_t config_regulators)
2404 {
2405 /* Another CPU that shares the OPP table has set the helper ? */
2406 if (!opp_table->config_regulators)
2407 opp_table->config_regulators = config_regulators;
2408
2409 return 0;
2410 }
2411
_opp_put_config_regulators_helper(struct opp_table * opp_table)2412 static void _opp_put_config_regulators_helper(struct opp_table *opp_table)
2413 {
2414 if (opp_table->config_regulators)
2415 opp_table->config_regulators = NULL;
2416 }
2417
_opp_set_required_dev(struct opp_table * opp_table,struct device * dev,struct device * required_dev,unsigned int index)2418 static int _opp_set_required_dev(struct opp_table *opp_table,
2419 struct device *dev,
2420 struct device *required_dev,
2421 unsigned int index)
2422 {
2423 struct opp_table *required_table, *pd_table;
2424 struct device *gdev;
2425
2426 /* Genpd core takes care of propagation to parent genpd */
2427 if (opp_table->is_genpd) {
2428 dev_err(dev, "%s: Operation not supported for genpds\n", __func__);
2429 return -EOPNOTSUPP;
2430 }
2431
2432 if (index >= opp_table->required_opp_count) {
2433 dev_err(dev, "Required OPPs not available, can't set required devs\n");
2434 return -EINVAL;
2435 }
2436
2437 required_table = opp_table->required_opp_tables[index];
2438 if (IS_ERR(required_table)) {
2439 dev_err(dev, "Missing OPP table, unable to set the required devs\n");
2440 return -ENODEV;
2441 }
2442
2443 /*
2444 * The required_opp_tables parsing is not perfect, as the OPP core does
2445 * the parsing solely based on the DT node pointers. The core sets the
2446 * required_opp_tables entry to the first OPP table in the "opp_tables"
2447 * list, that matches with the node pointer.
2448 *
2449 * If the target DT OPP table is used by multiple devices and they all
2450 * create separate instances of 'struct opp_table' from it, then it is
2451 * possible that the required_opp_tables entry may be set to the
2452 * incorrect sibling device.
2453 *
2454 * Cross check it again and fix if required.
2455 */
2456 gdev = dev_to_genpd_dev(required_dev);
2457 if (IS_ERR(gdev))
2458 return PTR_ERR(gdev);
2459
2460 pd_table = _find_opp_table(gdev);
2461 if (!IS_ERR(pd_table)) {
2462 if (pd_table != required_table) {
2463 dev_pm_opp_put_opp_table(required_table);
2464 opp_table->required_opp_tables[index] = pd_table;
2465 } else {
2466 dev_pm_opp_put_opp_table(pd_table);
2467 }
2468 }
2469
2470 opp_table->required_devs[index] = required_dev;
2471 return 0;
2472 }
2473
_opp_put_required_dev(struct opp_table * opp_table,unsigned int index)2474 static void _opp_put_required_dev(struct opp_table *opp_table,
2475 unsigned int index)
2476 {
2477 opp_table->required_devs[index] = NULL;
2478 }
2479
_opp_clear_config(struct opp_config_data * data)2480 static void _opp_clear_config(struct opp_config_data *data)
2481 {
2482 if (data->flags & OPP_CONFIG_REQUIRED_DEV)
2483 _opp_put_required_dev(data->opp_table,
2484 data->required_dev_index);
2485 if (data->flags & OPP_CONFIG_REGULATOR)
2486 _opp_put_regulators(data->opp_table);
2487 if (data->flags & OPP_CONFIG_SUPPORTED_HW)
2488 _opp_put_supported_hw(data->opp_table);
2489 if (data->flags & OPP_CONFIG_REGULATOR_HELPER)
2490 _opp_put_config_regulators_helper(data->opp_table);
2491 if (data->flags & OPP_CONFIG_PROP_NAME)
2492 _opp_put_prop_name(data->opp_table);
2493 if (data->flags & OPP_CONFIG_CLK)
2494 _opp_put_clknames(data->opp_table);
2495
2496 dev_pm_opp_put_opp_table(data->opp_table);
2497 kfree(data);
2498 }
2499
2500 /**
2501 * dev_pm_opp_set_config() - Set OPP configuration for the device.
2502 * @dev: Device for which configuration is being set.
2503 * @config: OPP configuration.
2504 *
2505 * This allows all device OPP configurations to be performed at once.
2506 *
2507 * This must be called before any OPPs are initialized for the device. This may
2508 * be called multiple times for the same OPP table, for example once for each
2509 * CPU that share the same table. This must be balanced by the same number of
2510 * calls to dev_pm_opp_clear_config() in order to free the OPP table properly.
2511 *
2512 * This returns a token to the caller, which must be passed to
2513 * dev_pm_opp_clear_config() to free the resources later. The value of the
2514 * returned token will be >= 1 for success and negative for errors. The minimum
2515 * value of 1 is chosen here to make it easy for callers to manage the resource.
2516 */
dev_pm_opp_set_config(struct device * dev,struct dev_pm_opp_config * config)2517 int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
2518 {
2519 struct opp_table *opp_table;
2520 struct opp_config_data *data;
2521 unsigned int id;
2522 int ret;
2523
2524 data = kmalloc(sizeof(*data), GFP_KERNEL);
2525 if (!data)
2526 return -ENOMEM;
2527
2528 opp_table = _add_opp_table(dev, false);
2529 if (IS_ERR(opp_table)) {
2530 kfree(data);
2531 return PTR_ERR(opp_table);
2532 }
2533
2534 data->opp_table = opp_table;
2535 data->flags = 0;
2536
2537 /* This should be called before OPPs are initialized */
2538 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
2539 ret = -EBUSY;
2540 goto err;
2541 }
2542
2543 /* Configure clocks */
2544 if (config->clk_names) {
2545 ret = _opp_set_clknames(opp_table, dev, config->clk_names,
2546 config->config_clks);
2547 if (ret)
2548 goto err;
2549
2550 data->flags |= OPP_CONFIG_CLK;
2551 } else if (config->config_clks) {
2552 /* Don't allow config callback without clocks */
2553 ret = -EINVAL;
2554 goto err;
2555 }
2556
2557 /* Configure property names */
2558 if (config->prop_name) {
2559 ret = _opp_set_prop_name(opp_table, config->prop_name);
2560 if (ret)
2561 goto err;
2562
2563 data->flags |= OPP_CONFIG_PROP_NAME;
2564 }
2565
2566 /* Configure config_regulators helper */
2567 if (config->config_regulators) {
2568 ret = _opp_set_config_regulators_helper(opp_table, dev,
2569 config->config_regulators);
2570 if (ret)
2571 goto err;
2572
2573 data->flags |= OPP_CONFIG_REGULATOR_HELPER;
2574 }
2575
2576 /* Configure supported hardware */
2577 if (config->supported_hw) {
2578 ret = _opp_set_supported_hw(opp_table, config->supported_hw,
2579 config->supported_hw_count);
2580 if (ret)
2581 goto err;
2582
2583 data->flags |= OPP_CONFIG_SUPPORTED_HW;
2584 }
2585
2586 /* Configure supplies */
2587 if (config->regulator_names) {
2588 ret = _opp_set_regulators(opp_table, dev,
2589 config->regulator_names);
2590 if (ret)
2591 goto err;
2592
2593 data->flags |= OPP_CONFIG_REGULATOR;
2594 }
2595
2596 if (config->required_dev) {
2597 ret = _opp_set_required_dev(opp_table, dev,
2598 config->required_dev,
2599 config->required_dev_index);
2600 if (ret)
2601 goto err;
2602
2603 data->required_dev_index = config->required_dev_index;
2604 data->flags |= OPP_CONFIG_REQUIRED_DEV;
2605 }
2606
2607 ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
2608 GFP_KERNEL);
2609 if (ret)
2610 goto err;
2611
2612 return id;
2613
2614 err:
2615 _opp_clear_config(data);
2616 return ret;
2617 }
2618 EXPORT_SYMBOL_GPL(dev_pm_opp_set_config);
2619
2620 /**
2621 * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration.
2622 * @token: The token returned by dev_pm_opp_set_config() previously.
2623 *
2624 * This allows all device OPP configurations to be cleared at once. This must be
2625 * called once for each call made to dev_pm_opp_set_config(), in order to free
2626 * the OPPs properly.
2627 *
2628 * Currently the first call itself ends up freeing all the OPP configurations,
2629 * while the later ones only drop the OPP table reference. This works well for
2630 * now as we would never want to use an half initialized OPP table and want to
2631 * remove the configurations together.
2632 */
dev_pm_opp_clear_config(int token)2633 void dev_pm_opp_clear_config(int token)
2634 {
2635 struct opp_config_data *data;
2636
2637 /*
2638 * This lets the callers call this unconditionally and keep their code
2639 * simple.
2640 */
2641 if (unlikely(token <= 0))
2642 return;
2643
2644 data = xa_erase(&opp_configs, token);
2645 if (WARN_ON(!data))
2646 return;
2647
2648 _opp_clear_config(data);
2649 }
2650 EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config);
2651
devm_pm_opp_config_release(void * token)2652 static void devm_pm_opp_config_release(void *token)
2653 {
2654 dev_pm_opp_clear_config((unsigned long)token);
2655 }
2656
2657 /**
2658 * devm_pm_opp_set_config() - Set OPP configuration for the device.
2659 * @dev: Device for which configuration is being set.
2660 * @config: OPP configuration.
2661 *
2662 * This allows all device OPP configurations to be performed at once.
2663 * This is a resource-managed variant of dev_pm_opp_set_config().
2664 *
2665 * Return: 0 on success and errorno otherwise.
2666 */
devm_pm_opp_set_config(struct device * dev,struct dev_pm_opp_config * config)2667 int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
2668 {
2669 int token = dev_pm_opp_set_config(dev, config);
2670
2671 if (token < 0)
2672 return token;
2673
2674 return devm_add_action_or_reset(dev, devm_pm_opp_config_release,
2675 (void *) ((unsigned long) token));
2676 }
2677 EXPORT_SYMBOL_GPL(devm_pm_opp_set_config);
2678
2679 /**
2680 * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
2681 * @src_table: OPP table which has @dst_table as one of its required OPP table.
2682 * @dst_table: Required OPP table of the @src_table.
2683 * @src_opp: OPP from the @src_table.
2684 *
2685 * This function returns the OPP (present in @dst_table) pointed out by the
2686 * "required-opps" property of the @src_opp (present in @src_table).
2687 *
2688 * The callers are required to call dev_pm_opp_put() for the returned OPP after
2689 * use.
2690 *
2691 * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise.
2692 */
dev_pm_opp_xlate_required_opp(struct opp_table * src_table,struct opp_table * dst_table,struct dev_pm_opp * src_opp)2693 struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
2694 struct opp_table *dst_table,
2695 struct dev_pm_opp *src_opp)
2696 {
2697 struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV);
2698 int i;
2699
2700 if (!src_table || !dst_table || !src_opp ||
2701 !src_table->required_opp_tables)
2702 return ERR_PTR(-EINVAL);
2703
2704 /* required-opps not fully initialized yet */
2705 if (lazy_linking_pending(src_table))
2706 return ERR_PTR(-EBUSY);
2707
2708 for (i = 0; i < src_table->required_opp_count; i++) {
2709 if (src_table->required_opp_tables[i] == dst_table) {
2710 mutex_lock(&src_table->lock);
2711
2712 list_for_each_entry(opp, &src_table->opp_list, node) {
2713 if (opp == src_opp) {
2714 dest_opp = opp->required_opps[i];
2715 dev_pm_opp_get(dest_opp);
2716 break;
2717 }
2718 }
2719
2720 mutex_unlock(&src_table->lock);
2721 break;
2722 }
2723 }
2724
2725 if (IS_ERR(dest_opp)) {
2726 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__,
2727 src_table, dst_table);
2728 }
2729
2730 return dest_opp;
2731 }
2732 EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp);
2733
2734 /**
2735 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
2736 * @src_table: OPP table which has dst_table as one of its required OPP table.
2737 * @dst_table: Required OPP table of the src_table.
2738 * @pstate: Current performance state of the src_table.
2739 *
2740 * This Returns pstate of the OPP (present in @dst_table) pointed out by the
2741 * "required-opps" property of the OPP (present in @src_table) which has
2742 * performance state set to @pstate.
2743 *
2744 * Return: Zero or positive performance state on success, otherwise negative
2745 * value on errors.
2746 */
dev_pm_opp_xlate_performance_state(struct opp_table * src_table,struct opp_table * dst_table,unsigned int pstate)2747 int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
2748 struct opp_table *dst_table,
2749 unsigned int pstate)
2750 {
2751 struct dev_pm_opp *opp;
2752 int dest_pstate = -EINVAL;
2753 int i;
2754
2755 /*
2756 * Normally the src_table will have the "required_opps" property set to
2757 * point to one of the OPPs in the dst_table, but in some cases the
2758 * genpd and its master have one to one mapping of performance states
2759 * and so none of them have the "required-opps" property set. Return the
2760 * pstate of the src_table as it is in such cases.
2761 */
2762 if (!src_table || !src_table->required_opp_count)
2763 return pstate;
2764
2765 /* Both OPP tables must belong to genpds */
2766 if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) {
2767 pr_err("%s: Performance state is only valid for genpds.\n", __func__);
2768 return -EINVAL;
2769 }
2770
2771 /* required-opps not fully initialized yet */
2772 if (lazy_linking_pending(src_table))
2773 return -EBUSY;
2774
2775 for (i = 0; i < src_table->required_opp_count; i++) {
2776 if (src_table->required_opp_tables[i]->np == dst_table->np)
2777 break;
2778 }
2779
2780 if (unlikely(i == src_table->required_opp_count)) {
2781 pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
2782 __func__, src_table, dst_table);
2783 return -EINVAL;
2784 }
2785
2786 mutex_lock(&src_table->lock);
2787
2788 list_for_each_entry(opp, &src_table->opp_list, node) {
2789 if (opp->level == pstate) {
2790 dest_pstate = opp->required_opps[i]->level;
2791 goto unlock;
2792 }
2793 }
2794
2795 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
2796 dst_table);
2797
2798 unlock:
2799 mutex_unlock(&src_table->lock);
2800
2801 return dest_pstate;
2802 }
2803
2804 /**
2805 * dev_pm_opp_add_dynamic() - Add an OPP table from a table definitions
2806 * @dev: The device for which we do this operation
2807 * @data: The OPP data for the OPP to add
2808 *
2809 * This function adds an opp definition to the opp table and returns status.
2810 * The opp is made available by default and it can be controlled using
2811 * dev_pm_opp_enable/disable functions.
2812 *
2813 * Return:
2814 * 0 On success OR
2815 * Duplicate OPPs (both freq and volt are same) and opp->available
2816 * -EEXIST Freq are same and volt are different OR
2817 * Duplicate OPPs (both freq and volt are same) and !opp->available
2818 * -ENOMEM Memory allocation failure
2819 */
dev_pm_opp_add_dynamic(struct device * dev,struct dev_pm_opp_data * data)2820 int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data)
2821 {
2822 struct opp_table *opp_table;
2823 int ret;
2824
2825 opp_table = _add_opp_table(dev, true);
2826 if (IS_ERR(opp_table))
2827 return PTR_ERR(opp_table);
2828
2829 /* Fix regulator count for dynamic OPPs */
2830 opp_table->regulator_count = 1;
2831
2832 ret = _opp_add_v1(opp_table, dev, data, true);
2833 if (ret)
2834 dev_pm_opp_put_opp_table(opp_table);
2835
2836 return ret;
2837 }
2838 EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic);
2839
2840 /**
2841 * _opp_set_availability() - helper to set the availability of an opp
2842 * @dev: device for which we do this operation
2843 * @freq: OPP frequency to modify availability
2844 * @availability_req: availability status requested for this opp
2845 *
2846 * Set the availability of an OPP, opp_{enable,disable} share a common logic
2847 * which is isolated here.
2848 *
2849 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2850 * copy operation, returns 0 if no modification was done OR modification was
2851 * successful.
2852 */
_opp_set_availability(struct device * dev,unsigned long freq,bool availability_req)2853 static int _opp_set_availability(struct device *dev, unsigned long freq,
2854 bool availability_req)
2855 {
2856 struct opp_table *opp_table;
2857 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
2858 int r = 0;
2859
2860 /* Find the opp_table */
2861 opp_table = _find_opp_table(dev);
2862 if (IS_ERR(opp_table)) {
2863 r = PTR_ERR(opp_table);
2864 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
2865 return r;
2866 }
2867
2868 if (!assert_single_clk(opp_table, 0)) {
2869 r = -EINVAL;
2870 goto put_table;
2871 }
2872
2873 mutex_lock(&opp_table->lock);
2874
2875 /* Do we have the frequency? */
2876 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2877 if (tmp_opp->rates[0] == freq) {
2878 opp = tmp_opp;
2879 break;
2880 }
2881 }
2882
2883 if (IS_ERR(opp)) {
2884 r = PTR_ERR(opp);
2885 goto unlock;
2886 }
2887
2888 /* Is update really needed? */
2889 if (opp->available == availability_req)
2890 goto unlock;
2891
2892 opp->available = availability_req;
2893
2894 dev_pm_opp_get(opp);
2895 mutex_unlock(&opp_table->lock);
2896
2897 /* Notify the change of the OPP availability */
2898 if (availability_req)
2899 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
2900 opp);
2901 else
2902 blocking_notifier_call_chain(&opp_table->head,
2903 OPP_EVENT_DISABLE, opp);
2904
2905 dev_pm_opp_put(opp);
2906 goto put_table;
2907
2908 unlock:
2909 mutex_unlock(&opp_table->lock);
2910 put_table:
2911 dev_pm_opp_put_opp_table(opp_table);
2912 return r;
2913 }
2914
2915 /**
2916 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
2917 * @dev: device for which we do this operation
2918 * @freq: OPP frequency to adjust voltage of
2919 * @u_volt: new OPP target voltage
2920 * @u_volt_min: new OPP min voltage
2921 * @u_volt_max: new OPP max voltage
2922 *
2923 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2924 * copy operation, returns 0 if no modifcation was done OR modification was
2925 * successful.
2926 */
dev_pm_opp_adjust_voltage(struct device * dev,unsigned long freq,unsigned long u_volt,unsigned long u_volt_min,unsigned long u_volt_max)2927 int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
2928 unsigned long u_volt, unsigned long u_volt_min,
2929 unsigned long u_volt_max)
2930
2931 {
2932 struct opp_table *opp_table;
2933 struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
2934 int r = 0;
2935
2936 /* Find the opp_table */
2937 opp_table = _find_opp_table(dev);
2938 if (IS_ERR(opp_table)) {
2939 r = PTR_ERR(opp_table);
2940 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
2941 return r;
2942 }
2943
2944 if (!assert_single_clk(opp_table, 0)) {
2945 r = -EINVAL;
2946 goto put_table;
2947 }
2948
2949 mutex_lock(&opp_table->lock);
2950
2951 /* Do we have the frequency? */
2952 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2953 if (tmp_opp->rates[0] == freq) {
2954 opp = tmp_opp;
2955 break;
2956 }
2957 }
2958
2959 if (IS_ERR(opp)) {
2960 r = PTR_ERR(opp);
2961 goto adjust_unlock;
2962 }
2963
2964 /* Is update really needed? */
2965 if (opp->supplies->u_volt == u_volt)
2966 goto adjust_unlock;
2967
2968 opp->supplies->u_volt = u_volt;
2969 opp->supplies->u_volt_min = u_volt_min;
2970 opp->supplies->u_volt_max = u_volt_max;
2971
2972 dev_pm_opp_get(opp);
2973 mutex_unlock(&opp_table->lock);
2974
2975 /* Notify the voltage change of the OPP */
2976 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
2977 opp);
2978
2979 dev_pm_opp_put(opp);
2980 goto put_table;
2981
2982 adjust_unlock:
2983 mutex_unlock(&opp_table->lock);
2984 put_table:
2985 dev_pm_opp_put_opp_table(opp_table);
2986 return r;
2987 }
2988 EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
2989
2990 /**
2991 * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
2992 * @dev: device for which we do this operation
2993 *
2994 * Sync voltage state of the OPP table regulators.
2995 *
2996 * Return: 0 on success or a negative error value.
2997 */
dev_pm_opp_sync_regulators(struct device * dev)2998 int dev_pm_opp_sync_regulators(struct device *dev)
2999 {
3000 struct opp_table *opp_table;
3001 struct regulator *reg;
3002 int i, ret = 0;
3003
3004 /* Device may not have OPP table */
3005 opp_table = _find_opp_table(dev);
3006 if (IS_ERR(opp_table))
3007 return 0;
3008
3009 /* Regulator may not be required for the device */
3010 if (unlikely(!opp_table->regulators))
3011 goto put_table;
3012
3013 /* Nothing to sync if voltage wasn't changed */
3014 if (!opp_table->enabled)
3015 goto put_table;
3016
3017 for (i = 0; i < opp_table->regulator_count; i++) {
3018 reg = opp_table->regulators[i];
3019 ret = regulator_sync_voltage(reg);
3020 if (ret)
3021 break;
3022 }
3023 put_table:
3024 /* Drop reference taken by _find_opp_table() */
3025 dev_pm_opp_put_opp_table(opp_table);
3026
3027 return ret;
3028 }
3029 EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
3030
3031 /**
3032 * dev_pm_opp_enable() - Enable a specific OPP
3033 * @dev: device for which we do this operation
3034 * @freq: OPP frequency to enable
3035 *
3036 * Enables a provided opp. If the operation is valid, this returns 0, else the
3037 * corresponding error value. It is meant to be used for users an OPP available
3038 * after being temporarily made unavailable with dev_pm_opp_disable.
3039 *
3040 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
3041 * copy operation, returns 0 if no modification was done OR modification was
3042 * successful.
3043 */
dev_pm_opp_enable(struct device * dev,unsigned long freq)3044 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
3045 {
3046 return _opp_set_availability(dev, freq, true);
3047 }
3048 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
3049
3050 /**
3051 * dev_pm_opp_disable() - Disable a specific OPP
3052 * @dev: device for which we do this operation
3053 * @freq: OPP frequency to disable
3054 *
3055 * Disables a provided opp. If the operation is valid, this returns
3056 * 0, else the corresponding error value. It is meant to be a temporary
3057 * control by users to make this OPP not available until the circumstances are
3058 * right to make it available again (with a call to dev_pm_opp_enable).
3059 *
3060 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
3061 * copy operation, returns 0 if no modification was done OR modification was
3062 * successful.
3063 */
dev_pm_opp_disable(struct device * dev,unsigned long freq)3064 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
3065 {
3066 return _opp_set_availability(dev, freq, false);
3067 }
3068 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
3069
3070 /**
3071 * dev_pm_opp_register_notifier() - Register OPP notifier for the device
3072 * @dev: Device for which notifier needs to be registered
3073 * @nb: Notifier block to be registered
3074 *
3075 * Return: 0 on success or a negative error value.
3076 */
dev_pm_opp_register_notifier(struct device * dev,struct notifier_block * nb)3077 int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
3078 {
3079 struct opp_table *opp_table;
3080 int ret;
3081
3082 opp_table = _find_opp_table(dev);
3083 if (IS_ERR(opp_table))
3084 return PTR_ERR(opp_table);
3085
3086 ret = blocking_notifier_chain_register(&opp_table->head, nb);
3087
3088 dev_pm_opp_put_opp_table(opp_table);
3089
3090 return ret;
3091 }
3092 EXPORT_SYMBOL(dev_pm_opp_register_notifier);
3093
3094 /**
3095 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
3096 * @dev: Device for which notifier needs to be unregistered
3097 * @nb: Notifier block to be unregistered
3098 *
3099 * Return: 0 on success or a negative error value.
3100 */
dev_pm_opp_unregister_notifier(struct device * dev,struct notifier_block * nb)3101 int dev_pm_opp_unregister_notifier(struct device *dev,
3102 struct notifier_block *nb)
3103 {
3104 struct opp_table *opp_table;
3105 int ret;
3106
3107 opp_table = _find_opp_table(dev);
3108 if (IS_ERR(opp_table))
3109 return PTR_ERR(opp_table);
3110
3111 ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
3112
3113 dev_pm_opp_put_opp_table(opp_table);
3114
3115 return ret;
3116 }
3117 EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
3118
3119 /**
3120 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
3121 * @dev: device pointer used to lookup OPP table.
3122 *
3123 * Free both OPPs created using static entries present in DT and the
3124 * dynamically added entries.
3125 */
dev_pm_opp_remove_table(struct device * dev)3126 void dev_pm_opp_remove_table(struct device *dev)
3127 {
3128 struct opp_table *opp_table;
3129
3130 /* Check for existing table for 'dev' */
3131 opp_table = _find_opp_table(dev);
3132 if (IS_ERR(opp_table)) {
3133 int error = PTR_ERR(opp_table);
3134
3135 if (error != -ENODEV)
3136 WARN(1, "%s: opp_table: %d\n",
3137 IS_ERR_OR_NULL(dev) ?
3138 "Invalid device" : dev_name(dev),
3139 error);
3140 return;
3141 }
3142
3143 /*
3144 * Drop the extra reference only if the OPP table was successfully added
3145 * with dev_pm_opp_of_add_table() earlier.
3146 **/
3147 if (_opp_remove_all_static(opp_table))
3148 dev_pm_opp_put_opp_table(opp_table);
3149
3150 /* Drop reference taken by _find_opp_table() */
3151 dev_pm_opp_put_opp_table(opp_table);
3152 }
3153 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
3154