xref: /linux/drivers/opp/core.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic OPP Interface
4  *
5  * Copyright (C) 2009-2010 Texas Instruments Incorporated.
6  *	Nishanth Menon
7  *	Romit Dasgupta
8  *	Kevin Hilman
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/clk.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/device.h>
17 #include <linux/export.h>
18 #include <linux/pm_domain.h>
19 #include <linux/regulator/consumer.h>
20 #include <linux/slab.h>
21 #include <linux/xarray.h>
22 
23 #include "opp.h"
24 
25 /*
26  * The root of the list of all opp-tables. All opp_table structures branch off
27  * from here, with each opp_table containing the list of opps it supports in
28  * various states of availability.
29  */
30 LIST_HEAD(opp_tables);
31 
32 /* Lock to allow exclusive modification to the device and opp lists */
33 DEFINE_MUTEX(opp_table_lock);
34 /* Flag indicating that opp_tables list is being updated at the moment */
35 static bool opp_tables_busy;
36 
37 /* OPP ID allocator */
38 static DEFINE_XARRAY_ALLOC1(opp_configs);
39 
40 static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
41 {
42 	struct opp_device *opp_dev;
43 
44 	guard(mutex)(&opp_table->lock);
45 
46 	list_for_each_entry(opp_dev, &opp_table->dev_list, node)
47 		if (opp_dev->dev == dev)
48 			return true;
49 
50 	return false;
51 }
52 
53 static struct opp_table *_find_opp_table_unlocked(struct device *dev)
54 {
55 	struct opp_table *opp_table;
56 
57 	list_for_each_entry(opp_table, &opp_tables, node) {
58 		if (_find_opp_dev(dev, opp_table))
59 			return dev_pm_opp_get_opp_table_ref(opp_table);
60 	}
61 
62 	return ERR_PTR(-ENODEV);
63 }
64 
65 /**
66  * _find_opp_table() - find opp_table struct using device pointer
67  * @dev:	device pointer used to lookup OPP table
68  *
69  * Search OPP table for one containing matching device.
70  *
71  * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
72  * -EINVAL based on type of error.
73  *
74  * The callers must call dev_pm_opp_put_opp_table() after the table is used.
75  */
76 struct opp_table *_find_opp_table(struct device *dev)
77 {
78 	if (IS_ERR_OR_NULL(dev)) {
79 		pr_err("%s: Invalid parameters\n", __func__);
80 		return ERR_PTR(-EINVAL);
81 	}
82 
83 	guard(mutex)(&opp_table_lock);
84 	return _find_opp_table_unlocked(dev);
85 }
86 
87 /*
88  * Returns true if multiple clocks aren't there, else returns false with WARN.
89  *
90  * We don't force clk_count == 1 here as there are users who don't have a clock
91  * representation in the OPP table and manage the clock configuration themselves
92  * in an platform specific way.
93  */
94 static bool assert_single_clk(struct opp_table *opp_table,
95 			      unsigned int __always_unused index)
96 {
97 	return !WARN_ON(opp_table->clk_count > 1);
98 }
99 
100 /*
101  * Returns true if clock table is large enough to contain the clock index.
102  */
103 static bool assert_clk_index(struct opp_table *opp_table,
104 			     unsigned int index)
105 {
106 	return opp_table->clk_count > index;
107 }
108 
109 /*
110  * Returns true if bandwidth table is large enough to contain the bandwidth index.
111  */
112 static bool assert_bandwidth_index(struct opp_table *opp_table,
113 				   unsigned int index)
114 {
115 	return opp_table->path_count > index;
116 }
117 
118 /**
119  * dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp
120  * @opp:	opp for which bandwidth has to be returned for
121  * @peak:	select peak or average bandwidth
122  * @index:	bandwidth index
123  *
124  * Return: bandwidth in kBps, else return 0
125  */
126 unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index)
127 {
128 	if (IS_ERR_OR_NULL(opp)) {
129 		pr_err("%s: Invalid parameters\n", __func__);
130 		return 0;
131 	}
132 
133 	if (index >= opp->opp_table->path_count)
134 		return 0;
135 
136 	if (!opp->bandwidth)
137 		return 0;
138 
139 	return peak ? opp->bandwidth[index].peak : opp->bandwidth[index].avg;
140 }
141 EXPORT_SYMBOL_GPL(dev_pm_opp_get_bw);
142 
143 /**
144  * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
145  * @opp:	opp for which voltage has to be returned for
146  *
147  * Return: voltage in micro volt corresponding to the opp, else
148  * return 0
149  *
150  * This is useful only for devices with single power supply.
151  */
152 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
153 {
154 	if (IS_ERR_OR_NULL(opp)) {
155 		pr_err("%s: Invalid parameters\n", __func__);
156 		return 0;
157 	}
158 
159 	return opp->supplies[0].u_volt;
160 }
161 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
162 
163 /**
164  * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp
165  * @opp:	opp for which voltage has to be returned for
166  * @supplies:	Placeholder for copying the supply information.
167  *
168  * Return: negative error number on failure, 0 otherwise on success after
169  * setting @supplies.
170  *
171  * This can be used for devices with any number of power supplies. The caller
172  * must ensure the @supplies array must contain space for each regulator.
173  */
174 int dev_pm_opp_get_supplies(struct dev_pm_opp *opp,
175 			    struct dev_pm_opp_supply *supplies)
176 {
177 	if (IS_ERR_OR_NULL(opp) || !supplies) {
178 		pr_err("%s: Invalid parameters\n", __func__);
179 		return -EINVAL;
180 	}
181 
182 	memcpy(supplies, opp->supplies,
183 	       sizeof(*supplies) * opp->opp_table->regulator_count);
184 	return 0;
185 }
186 EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies);
187 
188 /**
189  * dev_pm_opp_get_power() - Gets the power corresponding to an opp
190  * @opp:	opp for which power has to be returned for
191  *
192  * Return: power in micro watt corresponding to the opp, else
193  * return 0
194  *
195  * This is useful only for devices with single power supply.
196  */
197 unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
198 {
199 	unsigned long opp_power = 0;
200 	int i;
201 
202 	if (IS_ERR_OR_NULL(opp)) {
203 		pr_err("%s: Invalid parameters\n", __func__);
204 		return 0;
205 	}
206 	for (i = 0; i < opp->opp_table->regulator_count; i++)
207 		opp_power += opp->supplies[i].u_watt;
208 
209 	return opp_power;
210 }
211 EXPORT_SYMBOL_GPL(dev_pm_opp_get_power);
212 
213 /**
214  * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an
215  *				   available opp with specified index
216  * @opp: opp for which frequency has to be returned for
217  * @index: index of the frequency within the required opp
218  *
219  * Return: frequency in hertz corresponding to the opp with specified index,
220  * else return 0
221  */
222 unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
223 {
224 	if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) {
225 		pr_err("%s: Invalid parameters\n", __func__);
226 		return 0;
227 	}
228 
229 	return opp->rates[index];
230 }
231 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed);
232 
233 /**
234  * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
235  * @opp:	opp for which level value has to be returned for
236  *
237  * Return: level read from device tree corresponding to the opp, else
238  * return U32_MAX.
239  */
240 unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
241 {
242 	if (IS_ERR_OR_NULL(opp) || !opp->available) {
243 		pr_err("%s: Invalid parameters\n", __func__);
244 		return 0;
245 	}
246 
247 	return opp->level;
248 }
249 EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
250 
251 /**
252  * dev_pm_opp_get_required_pstate() - Gets the required performance state
253  *                                    corresponding to an available opp
254  * @opp:	opp for which performance state has to be returned for
255  * @index:	index of the required opp
256  *
257  * Return: performance state read from device tree corresponding to the
258  * required opp, else return U32_MAX.
259  */
260 unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
261 					    unsigned int index)
262 {
263 	if (IS_ERR_OR_NULL(opp) || !opp->available ||
264 	    index >= opp->opp_table->required_opp_count) {
265 		pr_err("%s: Invalid parameters\n", __func__);
266 		return 0;
267 	}
268 
269 	/* required-opps not fully initialized yet */
270 	if (lazy_linking_pending(opp->opp_table))
271 		return 0;
272 
273 	/* The required OPP table must belong to a genpd */
274 	if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) {
275 		pr_err("%s: Performance state is only valid for genpds.\n", __func__);
276 		return 0;
277 	}
278 
279 	return opp->required_opps[index]->level;
280 }
281 EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
282 
283 /**
284  * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
285  * @opp: opp for which turbo mode is being verified
286  *
287  * Turbo OPPs are not for normal use, and can be enabled (under certain
288  * conditions) for short duration of times to finish high throughput work
289  * quickly. Running on them for longer times may overheat the chip.
290  *
291  * Return: true if opp is turbo opp, else false.
292  */
293 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
294 {
295 	if (IS_ERR_OR_NULL(opp) || !opp->available) {
296 		pr_err("%s: Invalid parameters\n", __func__);
297 		return false;
298 	}
299 
300 	return opp->turbo;
301 }
302 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
303 
304 /**
305  * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
306  * @dev:	device for which we do this operation
307  *
308  * Return: This function returns the max clock latency in nanoseconds.
309  */
310 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
311 {
312 	struct opp_table *opp_table __free(put_opp_table) =
313 		_find_opp_table(dev);
314 
315 	if (IS_ERR(opp_table))
316 		return 0;
317 
318 	return opp_table->clock_latency_ns_max;
319 }
320 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
321 
322 /**
323  * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
324  * @dev: device for which we do this operation
325  *
326  * Return: This function returns the max voltage latency in nanoseconds.
327  */
328 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
329 {
330 	struct dev_pm_opp *opp;
331 	struct regulator *reg;
332 	unsigned long latency_ns = 0;
333 	int ret, i, count;
334 	struct {
335 		unsigned long min;
336 		unsigned long max;
337 	} *uV;
338 
339 	struct opp_table *opp_table __free(put_opp_table) =
340 		_find_opp_table(dev);
341 
342 	if (IS_ERR(opp_table))
343 		return 0;
344 
345 	/* Regulator may not be required for the device */
346 	if (!opp_table->regulators)
347 		return 0;
348 
349 	count = opp_table->regulator_count;
350 
351 	uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
352 	if (!uV)
353 		return 0;
354 
355 	scoped_guard(mutex, &opp_table->lock) {
356 		for (i = 0; i < count; i++) {
357 			uV[i].min = ~0;
358 			uV[i].max = 0;
359 
360 			list_for_each_entry(opp, &opp_table->opp_list, node) {
361 				if (!opp->available)
362 					continue;
363 
364 				if (opp->supplies[i].u_volt_min < uV[i].min)
365 					uV[i].min = opp->supplies[i].u_volt_min;
366 				if (opp->supplies[i].u_volt_max > uV[i].max)
367 					uV[i].max = opp->supplies[i].u_volt_max;
368 			}
369 		}
370 	}
371 
372 	/*
373 	 * The caller needs to ensure that opp_table (and hence the regulator)
374 	 * isn't freed, while we are executing this routine.
375 	 */
376 	for (i = 0; i < count; i++) {
377 		reg = opp_table->regulators[i];
378 		ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
379 		if (ret > 0)
380 			latency_ns += ret * 1000;
381 	}
382 
383 	kfree(uV);
384 
385 	return latency_ns;
386 }
387 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
388 
389 /**
390  * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
391  *					     nanoseconds
392  * @dev: device for which we do this operation
393  *
394  * Return: This function returns the max transition latency, in nanoseconds, to
395  * switch from one OPP to other.
396  */
397 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
398 {
399 	return dev_pm_opp_get_max_volt_latency(dev) +
400 		dev_pm_opp_get_max_clock_latency(dev);
401 }
402 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
403 
404 /**
405  * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
406  * @dev:	device for which we do this operation
407  *
408  * Return: This function returns the frequency of the OPP marked as suspend_opp
409  * if one is available, else returns 0;
410  */
411 unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
412 {
413 	unsigned long freq = 0;
414 
415 	struct opp_table *opp_table __free(put_opp_table) =
416 		_find_opp_table(dev);
417 
418 	if (IS_ERR(opp_table))
419 		return 0;
420 
421 	if (opp_table->suspend_opp && opp_table->suspend_opp->available)
422 		freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
423 
424 	return freq;
425 }
426 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
427 
428 int _get_opp_count(struct opp_table *opp_table)
429 {
430 	struct dev_pm_opp *opp;
431 	int count = 0;
432 
433 	guard(mutex)(&opp_table->lock);
434 
435 	list_for_each_entry(opp, &opp_table->opp_list, node) {
436 		if (opp->available)
437 			count++;
438 	}
439 
440 	return count;
441 }
442 
443 /**
444  * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
445  * @dev:	device for which we do this operation
446  *
447  * Return: This function returns the number of available opps if there are any,
448  * else returns 0 if none or the corresponding error value.
449  */
450 int dev_pm_opp_get_opp_count(struct device *dev)
451 {
452 	struct opp_table *opp_table __free(put_opp_table) =
453 		_find_opp_table(dev);
454 
455 	if (IS_ERR(opp_table)) {
456 		dev_dbg(dev, "%s: OPP table not found (%ld)\n",
457 			__func__, PTR_ERR(opp_table));
458 		return PTR_ERR(opp_table);
459 	}
460 
461 	return _get_opp_count(opp_table);
462 }
463 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
464 
465 /* Helpers to read keys */
466 static unsigned long _read_freq(struct dev_pm_opp *opp, int index)
467 {
468 	return opp->rates[index];
469 }
470 
471 static unsigned long _read_level(struct dev_pm_opp *opp, int index)
472 {
473 	return opp->level;
474 }
475 
476 static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
477 {
478 	return opp->bandwidth[index].peak;
479 }
480 
481 static unsigned long _read_opp_key(struct dev_pm_opp *opp, int index,
482 				   struct dev_pm_opp_key *key)
483 {
484 	key->bw = opp->bandwidth ? opp->bandwidth[index].peak : 0;
485 	key->freq = opp->rates[index];
486 	key->level = opp->level;
487 
488 	return true;
489 }
490 
491 /* Generic comparison helpers */
492 static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
493 			   unsigned long opp_key, unsigned long key)
494 {
495 	if (opp_key == key) {
496 		*opp = temp_opp;
497 		return true;
498 	}
499 
500 	return false;
501 }
502 
503 static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
504 			  unsigned long opp_key, unsigned long key)
505 {
506 	if (opp_key >= key) {
507 		*opp = temp_opp;
508 		return true;
509 	}
510 
511 	return false;
512 }
513 
514 static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
515 			   unsigned long opp_key, unsigned long key)
516 {
517 	if (opp_key > key)
518 		return true;
519 
520 	*opp = temp_opp;
521 	return false;
522 }
523 
524 static bool _compare_opp_key_exact(struct dev_pm_opp **opp,
525 		struct dev_pm_opp *temp_opp, struct dev_pm_opp_key *opp_key,
526 		struct dev_pm_opp_key *key)
527 {
528 	bool level_match = (key->level == OPP_LEVEL_UNSET || opp_key->level == key->level);
529 	bool freq_match = (key->freq == 0 || opp_key->freq == key->freq);
530 	bool bw_match = (key->bw == 0 || opp_key->bw == key->bw);
531 
532 	if (freq_match && level_match && bw_match) {
533 		*opp = temp_opp;
534 		return true;
535 	}
536 
537 	return false;
538 }
539 
540 /* Generic key finding helpers */
541 static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
542 		unsigned long *key, int index, bool available,
543 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
544 		bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
545 				unsigned long opp_key, unsigned long key),
546 		bool (*assert)(struct opp_table *opp_table, unsigned int index))
547 {
548 	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
549 
550 	/* Assert that the requirement is met */
551 	if (assert && !assert(opp_table, index))
552 		return ERR_PTR(-EINVAL);
553 
554 	guard(mutex)(&opp_table->lock);
555 
556 	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
557 		if (temp_opp->available == available) {
558 			if (compare(&opp, temp_opp, read(temp_opp, index), *key))
559 				break;
560 		}
561 	}
562 
563 	/* Increment the reference count of OPP */
564 	if (!IS_ERR(opp)) {
565 		*key = read(opp, index);
566 		dev_pm_opp_get(opp);
567 	}
568 
569 	return opp;
570 }
571 
572 static struct dev_pm_opp *_opp_table_find_opp_key(struct opp_table *opp_table,
573 		struct dev_pm_opp_key *key, bool available,
574 		unsigned long (*read)(struct dev_pm_opp *opp, int index,
575 				      struct dev_pm_opp_key *key),
576 		bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
577 				struct dev_pm_opp_key *opp_key, struct dev_pm_opp_key *key),
578 		bool (*assert)(struct opp_table *opp_table, unsigned int index))
579 {
580 	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
581 	struct dev_pm_opp_key temp_key;
582 
583 	/* Assert that the requirement is met */
584 	if (!assert(opp_table, 0))
585 		return ERR_PTR(-EINVAL);
586 
587 	guard(mutex)(&opp_table->lock);
588 
589 	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
590 		if (temp_opp->available == available) {
591 			read(temp_opp, 0, &temp_key);
592 			if (compare(&opp, temp_opp, &temp_key, key)) {
593 				/* Increment the reference count of OPP */
594 				dev_pm_opp_get(opp);
595 				break;
596 			}
597 		}
598 	}
599 
600 	return opp;
601 }
602 
603 static struct dev_pm_opp *
604 _find_key(struct device *dev, unsigned long *key, int index, bool available,
605 	  unsigned long (*read)(struct dev_pm_opp *opp, int index),
606 	  bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
607 			  unsigned long opp_key, unsigned long key),
608 	  bool (*assert)(struct opp_table *opp_table, unsigned int index))
609 {
610 	struct opp_table *opp_table __free(put_opp_table) =
611 		_find_opp_table(dev);
612 
613 	if (IS_ERR(opp_table)) {
614 		dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
615 			PTR_ERR(opp_table));
616 		return ERR_CAST(opp_table);
617 	}
618 
619 	return _opp_table_find_key(opp_table, key, index, available, read,
620 				   compare, assert);
621 }
622 
623 static struct dev_pm_opp *_find_key_exact(struct device *dev,
624 		unsigned long key, int index, bool available,
625 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
626 		bool (*assert)(struct opp_table *opp_table, unsigned int index))
627 {
628 	/*
629 	 * The value of key will be updated here, but will be ignored as the
630 	 * caller doesn't need it.
631 	 */
632 	return _find_key(dev, &key, index, available, read, _compare_exact,
633 			 assert);
634 }
635 
636 static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
637 		unsigned long *key, int index, bool available,
638 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
639 		bool (*assert)(struct opp_table *opp_table, unsigned int index))
640 {
641 	return _opp_table_find_key(opp_table, key, index, available, read,
642 				   _compare_ceil, assert);
643 }
644 
645 static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
646 		int index, bool available,
647 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
648 		bool (*assert)(struct opp_table *opp_table, unsigned int index))
649 {
650 	return _find_key(dev, key, index, available, read, _compare_ceil,
651 			 assert);
652 }
653 
654 static struct dev_pm_opp *_find_key_floor(struct device *dev,
655 		unsigned long *key, int index, bool available,
656 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
657 		bool (*assert)(struct opp_table *opp_table, unsigned int index))
658 {
659 	return _find_key(dev, key, index, available, read, _compare_floor,
660 			 assert);
661 }
662 
663 /**
664  * dev_pm_opp_find_freq_exact() - search for an exact frequency
665  * @dev:		device for which we do this operation
666  * @freq:		frequency to search for
667  * @available:		true/false - match for available opp
668  *
669  * Return: Searches for exact match in the opp table and returns pointer to the
670  * matching opp if found, else returns ERR_PTR in case of error and should
671  * be handled using IS_ERR. Error return values can be:
672  * EINVAL:	for bad pointer
673  * ERANGE:	no match found for search
674  * ENODEV:	if device not found in list of registered devices
675  *
676  * Note: available is a modifier for the search. if available=true, then the
677  * match is for exact matching frequency and is available in the stored OPP
678  * table. if false, the match is for exact frequency which is not available.
679  *
680  * This provides a mechanism to enable an opp which is not available currently
681  * or the opposite as well.
682  *
683  * The callers are required to call dev_pm_opp_put() for the returned OPP after
684  * use.
685  */
686 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
687 		unsigned long freq, bool available)
688 {
689 	return _find_key_exact(dev, freq, 0, available, _read_freq,
690 			       assert_single_clk);
691 }
692 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
693 
694 /**
695  * dev_pm_opp_find_key_exact() - Search for an OPP with exact key set
696  * @dev:		Device for which the OPP is being searched
697  * @key:		OPP key set to match
698  * @available:		true/false - match for available OPP
699  *
700  * Search for an exact match of the key set in the OPP table.
701  *
702  * Return: A matching opp on success, else ERR_PTR in case of error.
703  * Possible error values:
704  * EINVAL:	for bad pointers
705  * ERANGE:	no match found for search
706  * ENODEV:	if device not found in list of registered devices
707  *
708  * Note: 'available' is a modifier for the search. If 'available' == true,
709  * then the match is for exact matching key and is available in the stored
710  * OPP table. If false, the match is for exact key which is not available.
711  *
712  * This provides a mechanism to enable an OPP which is not available currently
713  * or the opposite as well.
714  *
715  * The callers are required to call dev_pm_opp_put() for the returned OPP after
716  * use.
717  */
718 struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
719 					     struct dev_pm_opp_key *key,
720 					     bool available)
721 {
722 	struct opp_table *opp_table __free(put_opp_table) = _find_opp_table(dev);
723 
724 	if (IS_ERR(opp_table)) {
725 		dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
726 			PTR_ERR(opp_table));
727 		return ERR_CAST(opp_table);
728 	}
729 
730 	return _opp_table_find_opp_key(opp_table, key, available,
731 				       _read_opp_key, _compare_opp_key_exact,
732 				       assert_single_clk);
733 }
734 EXPORT_SYMBOL_GPL(dev_pm_opp_find_key_exact);
735 
736 /**
737  * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the
738  *					 clock corresponding to the index
739  * @dev:	Device for which we do this operation
740  * @freq:	frequency to search for
741  * @index:	Clock index
742  * @available:	true/false - match for available opp
743  *
744  * Search for the matching exact OPP for the clock corresponding to the
745  * specified index from a starting freq for a device.
746  *
747  * Return: matching *opp , else returns ERR_PTR in case of error and should be
748  * handled using IS_ERR. Error return values can be:
749  * EINVAL:	for bad pointer
750  * ERANGE:	no match found for search
751  * ENODEV:	if device not found in list of registered devices
752  *
753  * The callers are required to call dev_pm_opp_put() for the returned OPP after
754  * use.
755  */
756 struct dev_pm_opp *
757 dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
758 				   u32 index, bool available)
759 {
760 	return _find_key_exact(dev, freq, index, available, _read_freq,
761 			       assert_clk_index);
762 }
763 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
764 
765 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
766 						   unsigned long *freq)
767 {
768 	return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq,
769 					assert_single_clk);
770 }
771 
772 /**
773  * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
774  * @dev:	device for which we do this operation
775  * @freq:	Start frequency
776  *
777  * Search for the matching ceil *available* OPP from a starting freq
778  * for a device.
779  *
780  * Return: matching *opp and refreshes *freq accordingly, else returns
781  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
782  * values can be:
783  * EINVAL:	for bad pointer
784  * ERANGE:	no match found for search
785  * ENODEV:	if device not found in list of registered devices
786  *
787  * The callers are required to call dev_pm_opp_put() for the returned OPP after
788  * use.
789  */
790 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
791 					     unsigned long *freq)
792 {
793 	return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk);
794 }
795 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
796 
797 /**
798  * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the
799  *					 clock corresponding to the index
800  * @dev:	Device for which we do this operation
801  * @freq:	Start frequency
802  * @index:	Clock index
803  *
804  * Search for the matching ceil *available* OPP for the clock corresponding to
805  * the specified index from a starting freq for a device.
806  *
807  * Return: matching *opp and refreshes *freq accordingly, else returns
808  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
809  * values can be:
810  * EINVAL:	for bad pointer
811  * ERANGE:	no match found for search
812  * ENODEV:	if device not found in list of registered devices
813  *
814  * The callers are required to call dev_pm_opp_put() for the returned OPP after
815  * use.
816  */
817 struct dev_pm_opp *
818 dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
819 				  u32 index)
820 {
821 	return _find_key_ceil(dev, freq, index, true, _read_freq,
822 			      assert_clk_index);
823 }
824 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
825 
826 /**
827  * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
828  * @dev:	device for which we do this operation
829  * @freq:	Start frequency
830  *
831  * Search for the matching floor *available* OPP from a starting freq
832  * for a device.
833  *
834  * Return: matching *opp and refreshes *freq accordingly, else returns
835  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
836  * values can be:
837  * EINVAL:	for bad pointer
838  * ERANGE:	no match found for search
839  * ENODEV:	if device not found in list of registered devices
840  *
841  * The callers are required to call dev_pm_opp_put() for the returned OPP after
842  * use.
843  */
844 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
845 					      unsigned long *freq)
846 {
847 	return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk);
848 }
849 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
850 
851 /**
852  * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the
853  *					  clock corresponding to the index
854  * @dev:	Device for which we do this operation
855  * @freq:	Start frequency
856  * @index:	Clock index
857  *
858  * Search for the matching floor *available* OPP for the clock corresponding to
859  * the specified index from a starting freq for a device.
860  *
861  * Return: matching *opp and refreshes *freq accordingly, else returns
862  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
863  * values can be:
864  * EINVAL:	for bad pointer
865  * ERANGE:	no match found for search
866  * ENODEV:	if device not found in list of registered devices
867  *
868  * The callers are required to call dev_pm_opp_put() for the returned OPP after
869  * use.
870  */
871 struct dev_pm_opp *
872 dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
873 				   u32 index)
874 {
875 	return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index);
876 }
877 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
878 
879 /**
880  * dev_pm_opp_find_level_exact() - search for an exact level
881  * @dev:		device for which we do this operation
882  * @level:		level to search for
883  *
884  * Return: Searches for exact match in the opp table and returns pointer to the
885  * matching opp if found, else returns ERR_PTR in case of error and should
886  * be handled using IS_ERR. Error return values can be:
887  * EINVAL:	for bad pointer
888  * ERANGE:	no match found for search
889  * ENODEV:	if device not found in list of registered devices
890  *
891  * The callers are required to call dev_pm_opp_put() for the returned OPP after
892  * use.
893  */
894 struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
895 					       unsigned int level)
896 {
897 	return _find_key_exact(dev, level, 0, true, _read_level, NULL);
898 }
899 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
900 
901 /**
902  * dev_pm_opp_find_level_ceil() - search for an rounded up level
903  * @dev:		device for which we do this operation
904  * @level:		level to search for
905  *
906  * Return: Searches for rounded up match in the opp table and returns pointer
907  * to the  matching opp if found, else returns ERR_PTR in case of error and
908  * should be handled using IS_ERR. Error return values can be:
909  * EINVAL:	for bad pointer
910  * ERANGE:	no match found for search
911  * ENODEV:	if device not found in list of registered devices
912  *
913  * The callers are required to call dev_pm_opp_put() for the returned OPP after
914  * use.
915  */
916 struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
917 					      unsigned int *level)
918 {
919 	unsigned long temp = *level;
920 	struct dev_pm_opp *opp;
921 
922 	opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
923 	if (IS_ERR(opp))
924 		return opp;
925 
926 	/* False match */
927 	if (temp == OPP_LEVEL_UNSET) {
928 		dev_err(dev, "%s: OPP levels aren't available\n", __func__);
929 		dev_pm_opp_put(opp);
930 		return ERR_PTR(-ENODEV);
931 	}
932 
933 	*level = temp;
934 	return opp;
935 }
936 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
937 
938 /**
939  * dev_pm_opp_find_level_floor() - Search for a rounded floor level
940  * @dev:	device for which we do this operation
941  * @level:	Start level
942  *
943  * Search for the matching floor *available* OPP from a starting level
944  * for a device.
945  *
946  * Return: matching *opp and refreshes *level accordingly, else returns
947  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
948  * values can be:
949  * EINVAL:	for bad pointer
950  * ERANGE:	no match found for search
951  * ENODEV:	if device not found in list of registered devices
952  *
953  * The callers are required to call dev_pm_opp_put() for the returned OPP after
954  * use.
955  */
956 struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
957 					       unsigned int *level)
958 {
959 	unsigned long temp = *level;
960 	struct dev_pm_opp *opp;
961 
962 	opp = _find_key_floor(dev, &temp, 0, true, _read_level, NULL);
963 	*level = temp;
964 	return opp;
965 }
966 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor);
967 
968 /**
969  * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
970  * @dev:	device for which we do this operation
971  * @bw:	start bandwidth
972  * @index:	which bandwidth to compare, in case of OPPs with several values
973  *
974  * Search for the matching floor *available* OPP from a starting bandwidth
975  * for a device.
976  *
977  * Return: matching *opp and refreshes *bw accordingly, else returns
978  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
979  * values can be:
980  * EINVAL:	for bad pointer
981  * ERANGE:	no match found for search
982  * ENODEV:	if device not found in list of registered devices
983  *
984  * The callers are required to call dev_pm_opp_put() for the returned OPP after
985  * use.
986  */
987 struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
988 					   int index)
989 {
990 	unsigned long temp = *bw;
991 	struct dev_pm_opp *opp;
992 
993 	opp = _find_key_ceil(dev, &temp, index, true, _read_bw,
994 			     assert_bandwidth_index);
995 	*bw = temp;
996 	return opp;
997 }
998 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
999 
1000 /**
1001  * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
1002  * @dev:	device for which we do this operation
1003  * @bw:	start bandwidth
1004  * @index:	which bandwidth to compare, in case of OPPs with several values
1005  *
1006  * Search for the matching floor *available* OPP from a starting bandwidth
1007  * for a device.
1008  *
1009  * Return: matching *opp and refreshes *bw accordingly, else returns
1010  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
1011  * values can be:
1012  * EINVAL:	for bad pointer
1013  * ERANGE:	no match found for search
1014  * ENODEV:	if device not found in list of registered devices
1015  *
1016  * The callers are required to call dev_pm_opp_put() for the returned OPP after
1017  * use.
1018  */
1019 struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
1020 					    unsigned int *bw, int index)
1021 {
1022 	unsigned long temp = *bw;
1023 	struct dev_pm_opp *opp;
1024 
1025 	opp = _find_key_floor(dev, &temp, index, true, _read_bw,
1026 			      assert_bandwidth_index);
1027 	*bw = temp;
1028 	return opp;
1029 }
1030 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
1031 
1032 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
1033 			    struct dev_pm_opp_supply *supply)
1034 {
1035 	int ret;
1036 
1037 	/* Regulator not available for device */
1038 	if (IS_ERR(reg)) {
1039 		dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
1040 			PTR_ERR(reg));
1041 		return 0;
1042 	}
1043 
1044 	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
1045 		supply->u_volt_min, supply->u_volt, supply->u_volt_max);
1046 
1047 	ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
1048 					    supply->u_volt, supply->u_volt_max);
1049 	if (ret)
1050 		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
1051 			__func__, supply->u_volt_min, supply->u_volt,
1052 			supply->u_volt_max, ret);
1053 
1054 	return ret;
1055 }
1056 
1057 static int
1058 _opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
1059 		       struct dev_pm_opp *opp, void *data, bool scaling_down)
1060 {
1061 	unsigned long *target = data;
1062 	unsigned long freq;
1063 	int ret;
1064 
1065 	/* One of target and opp must be available */
1066 	if (target) {
1067 		freq = *target;
1068 	} else if (opp) {
1069 		freq = opp->rates[0];
1070 	} else {
1071 		WARN_ON(1);
1072 		return -EINVAL;
1073 	}
1074 
1075 	ret = clk_set_rate(opp_table->clk, freq);
1076 	if (ret) {
1077 		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
1078 			ret);
1079 	} else {
1080 		opp_table->current_rate_single_clk = freq;
1081 	}
1082 
1083 	return ret;
1084 }
1085 
1086 /*
1087  * Simple implementation for configuring multiple clocks. Configure clocks in
1088  * the order in which they are present in the array while scaling up.
1089  */
1090 int dev_pm_opp_config_clks_simple(struct device *dev,
1091 		struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
1092 		bool scaling_down)
1093 {
1094 	int ret, i;
1095 
1096 	if (scaling_down) {
1097 		for (i = opp_table->clk_count - 1; i >= 0; i--) {
1098 			ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
1099 			if (ret) {
1100 				dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
1101 					ret);
1102 				return ret;
1103 			}
1104 		}
1105 	} else {
1106 		for (i = 0; i < opp_table->clk_count; i++) {
1107 			ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
1108 			if (ret) {
1109 				dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
1110 					ret);
1111 				return ret;
1112 			}
1113 		}
1114 	}
1115 
1116 	return 0;
1117 }
1118 EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
1119 
1120 static int _opp_config_regulator_single(struct device *dev,
1121 			struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
1122 			struct regulator **regulators, unsigned int count)
1123 {
1124 	struct regulator *reg = regulators[0];
1125 	int ret;
1126 
1127 	/* This function only supports single regulator per device */
1128 	if (WARN_ON(count > 1)) {
1129 		dev_err(dev, "multiple regulators are not supported\n");
1130 		return -EINVAL;
1131 	}
1132 
1133 	ret = _set_opp_voltage(dev, reg, new_opp->supplies);
1134 	if (ret)
1135 		return ret;
1136 
1137 	/*
1138 	 * Enable the regulator after setting its voltages, otherwise it breaks
1139 	 * some boot-enabled regulators.
1140 	 */
1141 	if (unlikely(!new_opp->opp_table->enabled)) {
1142 		ret = regulator_enable(reg);
1143 		if (ret < 0)
1144 			dev_warn(dev, "Failed to enable regulator: %d", ret);
1145 	}
1146 
1147 	return 0;
1148 }
1149 
1150 static int _set_opp_bw(const struct opp_table *opp_table,
1151 		       struct dev_pm_opp *opp, struct device *dev)
1152 {
1153 	u32 avg, peak;
1154 	int i, ret;
1155 
1156 	if (!opp_table->paths)
1157 		return 0;
1158 
1159 	for (i = 0; i < opp_table->path_count; i++) {
1160 		if (!opp) {
1161 			avg = 0;
1162 			peak = 0;
1163 		} else {
1164 			avg = opp->bandwidth[i].avg;
1165 			peak = opp->bandwidth[i].peak;
1166 		}
1167 		ret = icc_set_bw(opp_table->paths[i], avg, peak);
1168 		if (ret) {
1169 			dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
1170 				opp ? "set" : "remove", i, ret);
1171 			return ret;
1172 		}
1173 	}
1174 
1175 	return 0;
1176 }
1177 
1178 static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp)
1179 {
1180 	unsigned int level = 0;
1181 	int ret = 0;
1182 
1183 	if (opp) {
1184 		if (opp->level == OPP_LEVEL_UNSET)
1185 			return 0;
1186 
1187 		level = opp->level;
1188 	}
1189 
1190 	/* Request a new performance state through the device's PM domain. */
1191 	ret = dev_pm_domain_set_performance_state(dev, level);
1192 	if (ret)
1193 		dev_err(dev, "Failed to set performance state %u (%d)\n", level,
1194 			ret);
1195 
1196 	return ret;
1197 }
1198 
1199 /* This is only called for PM domain for now */
1200 static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
1201 			      struct dev_pm_opp *opp, bool up)
1202 {
1203 	struct device **devs = opp_table->required_devs;
1204 	struct dev_pm_opp *required_opp;
1205 	int index, target, delta, ret;
1206 
1207 	if (!devs)
1208 		return 0;
1209 
1210 	/* required-opps not fully initialized yet */
1211 	if (lazy_linking_pending(opp_table))
1212 		return -EBUSY;
1213 
1214 	/* Scaling up? Set required OPPs in normal order, else reverse */
1215 	if (up) {
1216 		index = 0;
1217 		target = opp_table->required_opp_count;
1218 		delta = 1;
1219 	} else {
1220 		index = opp_table->required_opp_count - 1;
1221 		target = -1;
1222 		delta = -1;
1223 	}
1224 
1225 	while (index != target) {
1226 		if (devs[index]) {
1227 			required_opp = opp ? opp->required_opps[index] : NULL;
1228 
1229 			ret = _set_opp_level(devs[index], required_opp);
1230 			if (ret)
1231 				return ret;
1232 		}
1233 
1234 		index += delta;
1235 	}
1236 
1237 	return 0;
1238 }
1239 
1240 static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
1241 {
1242 	struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
1243 	unsigned long freq;
1244 
1245 	if (!IS_ERR(opp_table->clk)) {
1246 		freq = clk_get_rate(opp_table->clk);
1247 		opp = _find_freq_ceil(opp_table, &freq);
1248 	}
1249 
1250 	/*
1251 	 * Unable to find the current OPP ? Pick the first from the list since
1252 	 * it is in ascending order, otherwise rest of the code will need to
1253 	 * make special checks to validate current_opp.
1254 	 */
1255 	if (IS_ERR(opp)) {
1256 		guard(mutex)(&opp_table->lock);
1257 		opp = dev_pm_opp_get(list_first_entry(&opp_table->opp_list,
1258 						      struct dev_pm_opp, node));
1259 	}
1260 
1261 	opp_table->current_opp = opp;
1262 }
1263 
1264 static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
1265 {
1266 	int ret;
1267 
1268 	if (!opp_table->enabled)
1269 		return 0;
1270 
1271 	/*
1272 	 * Some drivers need to support cases where some platforms may
1273 	 * have OPP table for the device, while others don't and
1274 	 * opp_set_rate() just needs to behave like clk_set_rate().
1275 	 */
1276 	if (!_get_opp_count(opp_table))
1277 		return 0;
1278 
1279 	ret = _set_opp_bw(opp_table, NULL, dev);
1280 	if (ret)
1281 		return ret;
1282 
1283 	if (opp_table->regulators)
1284 		regulator_disable(opp_table->regulators[0]);
1285 
1286 	ret = _set_opp_level(dev, NULL);
1287 	if (ret)
1288 		goto out;
1289 
1290 	ret = _set_required_opps(dev, opp_table, NULL, false);
1291 
1292 out:
1293 	opp_table->enabled = false;
1294 	return ret;
1295 }
1296 
1297 static int _set_opp(struct device *dev, struct opp_table *opp_table,
1298 		    struct dev_pm_opp *opp, void *clk_data, bool forced)
1299 {
1300 	struct dev_pm_opp *old_opp;
1301 	int scaling_down, ret;
1302 
1303 	if (unlikely(!opp))
1304 		return _disable_opp_table(dev, opp_table);
1305 
1306 	/* Find the currently set OPP if we don't know already */
1307 	if (unlikely(!opp_table->current_opp))
1308 		_find_current_opp(dev, opp_table);
1309 
1310 	old_opp = opp_table->current_opp;
1311 
1312 	/* Return early if nothing to do */
1313 	if (!forced && old_opp == opp && opp_table->enabled) {
1314 		dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__);
1315 		return 0;
1316 	}
1317 
1318 	dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
1319 		__func__, old_opp->rates[0], opp->rates[0], old_opp->level,
1320 		opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
1321 		opp->bandwidth ? opp->bandwidth[0].peak : 0);
1322 
1323 	scaling_down = _opp_compare_key(opp_table, old_opp, opp);
1324 	if (scaling_down == -1)
1325 		scaling_down = 0;
1326 
1327 	/* Scaling up? Configure required OPPs before frequency */
1328 	if (!scaling_down) {
1329 		ret = _set_required_opps(dev, opp_table, opp, true);
1330 		if (ret) {
1331 			dev_err(dev, "Failed to set required opps: %d\n", ret);
1332 			return ret;
1333 		}
1334 
1335 		ret = _set_opp_level(dev, opp);
1336 		if (ret)
1337 			return ret;
1338 
1339 		ret = _set_opp_bw(opp_table, opp, dev);
1340 		if (ret) {
1341 			dev_err(dev, "Failed to set bw: %d\n", ret);
1342 			return ret;
1343 		}
1344 
1345 		if (opp_table->config_regulators) {
1346 			ret = opp_table->config_regulators(dev, old_opp, opp,
1347 							   opp_table->regulators,
1348 							   opp_table->regulator_count);
1349 			if (ret) {
1350 				dev_err(dev, "Failed to set regulator voltages: %d\n",
1351 					ret);
1352 				return ret;
1353 			}
1354 		}
1355 	}
1356 
1357 	if (opp_table->config_clks) {
1358 		ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down);
1359 		if (ret)
1360 			return ret;
1361 	}
1362 
1363 	/* Scaling down? Configure required OPPs after frequency */
1364 	if (scaling_down) {
1365 		if (opp_table->config_regulators) {
1366 			ret = opp_table->config_regulators(dev, old_opp, opp,
1367 							   opp_table->regulators,
1368 							   opp_table->regulator_count);
1369 			if (ret) {
1370 				dev_err(dev, "Failed to set regulator voltages: %d\n",
1371 					ret);
1372 				return ret;
1373 			}
1374 		}
1375 
1376 		ret = _set_opp_bw(opp_table, opp, dev);
1377 		if (ret) {
1378 			dev_err(dev, "Failed to set bw: %d\n", ret);
1379 			return ret;
1380 		}
1381 
1382 		ret = _set_opp_level(dev, opp);
1383 		if (ret)
1384 			return ret;
1385 
1386 		ret = _set_required_opps(dev, opp_table, opp, false);
1387 		if (ret) {
1388 			dev_err(dev, "Failed to set required opps: %d\n", ret);
1389 			return ret;
1390 		}
1391 	}
1392 
1393 	opp_table->enabled = true;
1394 	dev_pm_opp_put(old_opp);
1395 
1396 	/* Make sure current_opp doesn't get freed */
1397 	opp_table->current_opp = dev_pm_opp_get(opp);
1398 
1399 	return ret;
1400 }
1401 
1402 /**
1403  * dev_pm_opp_set_rate() - Configure new OPP based on frequency
1404  * @dev:	 device for which we do this operation
1405  * @target_freq: frequency to achieve
1406  *
1407  * This configures the power-supplies to the levels specified by the OPP
1408  * corresponding to the target_freq, and programs the clock to a value <=
1409  * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
1410  * provided by the opp, should have already rounded to the target OPP's
1411  * frequency.
1412  */
1413 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
1414 {
1415 	struct dev_pm_opp *opp __free(put_opp) = NULL;
1416 	unsigned long freq = 0, temp_freq;
1417 	bool forced = false;
1418 
1419 	struct opp_table *opp_table __free(put_opp_table) =
1420 		_find_opp_table(dev);
1421 
1422 	if (IS_ERR(opp_table)) {
1423 		dev_err(dev, "%s: device's opp table doesn't exist\n", __func__);
1424 		return PTR_ERR(opp_table);
1425 	}
1426 
1427 	if (target_freq) {
1428 		/*
1429 		 * For IO devices which require an OPP on some platforms/SoCs
1430 		 * while just needing to scale the clock on some others
1431 		 * we look for empty OPP tables with just a clock handle and
1432 		 * scale only the clk. This makes dev_pm_opp_set_rate()
1433 		 * equivalent to a clk_set_rate()
1434 		 */
1435 		if (!_get_opp_count(opp_table)) {
1436 			return opp_table->config_clks(dev, opp_table, NULL,
1437 						      &target_freq, false);
1438 		}
1439 
1440 		freq = clk_round_rate(opp_table->clk, target_freq);
1441 		if ((long)freq <= 0)
1442 			freq = target_freq;
1443 
1444 		/*
1445 		 * The clock driver may support finer resolution of the
1446 		 * frequencies than the OPP table, don't update the frequency we
1447 		 * pass to clk_set_rate() here.
1448 		 */
1449 		temp_freq = freq;
1450 		opp = _find_freq_ceil(opp_table, &temp_freq);
1451 		if (IS_ERR(opp)) {
1452 			dev_err(dev, "%s: failed to find OPP for freq %lu (%ld)\n",
1453 				__func__, freq, PTR_ERR(opp));
1454 			return PTR_ERR(opp);
1455 		}
1456 
1457 		/*
1458 		 * An OPP entry specifies the highest frequency at which other
1459 		 * properties of the OPP entry apply. Even if the new OPP is
1460 		 * same as the old one, we may still reach here for a different
1461 		 * value of the frequency. In such a case, do not abort but
1462 		 * configure the hardware to the desired frequency forcefully.
1463 		 */
1464 		forced = opp_table->current_rate_single_clk != freq;
1465 	}
1466 
1467 	return _set_opp(dev, opp_table, opp, &freq, forced);
1468 }
1469 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
1470 
1471 /**
1472  * dev_pm_opp_set_opp() - Configure device for OPP
1473  * @dev: device for which we do this operation
1474  * @opp: OPP to set to
1475  *
1476  * This configures the device based on the properties of the OPP passed to this
1477  * routine.
1478  *
1479  * Return: 0 on success, a negative error number otherwise.
1480  */
1481 int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
1482 {
1483 	struct opp_table *opp_table __free(put_opp_table) =
1484 		_find_opp_table(dev);
1485 
1486 	if (IS_ERR(opp_table)) {
1487 		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
1488 		return PTR_ERR(opp_table);
1489 	}
1490 
1491 	return _set_opp(dev, opp_table, opp, NULL, false);
1492 }
1493 EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp);
1494 
1495 /* OPP-dev Helpers */
1496 static void _remove_opp_dev(struct opp_device *opp_dev,
1497 			    struct opp_table *opp_table)
1498 {
1499 	opp_debug_unregister(opp_dev, opp_table);
1500 	list_del(&opp_dev->node);
1501 	kfree(opp_dev);
1502 }
1503 
1504 struct opp_device *_add_opp_dev(const struct device *dev,
1505 				struct opp_table *opp_table)
1506 {
1507 	struct opp_device *opp_dev;
1508 
1509 	opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
1510 	if (!opp_dev)
1511 		return NULL;
1512 
1513 	/* Initialize opp-dev */
1514 	opp_dev->dev = dev;
1515 
1516 	scoped_guard(mutex, &opp_table->lock)
1517 		list_add(&opp_dev->node, &opp_table->dev_list);
1518 
1519 	/* Create debugfs entries for the opp_table */
1520 	opp_debug_register(opp_dev, opp_table);
1521 
1522 	return opp_dev;
1523 }
1524 
1525 static struct opp_table *_allocate_opp_table(struct device *dev, int index)
1526 {
1527 	struct opp_table *opp_table;
1528 	struct opp_device *opp_dev;
1529 	int ret;
1530 
1531 	/*
1532 	 * Allocate a new OPP table. In the infrequent case where a new
1533 	 * device is needed to be added, we pay this penalty.
1534 	 */
1535 	opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
1536 	if (!opp_table)
1537 		return ERR_PTR(-ENOMEM);
1538 
1539 	mutex_init(&opp_table->lock);
1540 	INIT_LIST_HEAD(&opp_table->dev_list);
1541 	INIT_LIST_HEAD(&opp_table->lazy);
1542 
1543 	opp_table->clk = ERR_PTR(-ENODEV);
1544 
1545 	/* Mark regulator count uninitialized */
1546 	opp_table->regulator_count = -1;
1547 
1548 	opp_dev = _add_opp_dev(dev, opp_table);
1549 	if (!opp_dev) {
1550 		ret = -ENOMEM;
1551 		goto err;
1552 	}
1553 
1554 	_of_init_opp_table(opp_table, dev, index);
1555 
1556 	/* Find interconnect path(s) for the device */
1557 	ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
1558 	if (ret) {
1559 		if (ret == -EPROBE_DEFER)
1560 			goto remove_opp_dev;
1561 
1562 		dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
1563 			 __func__, ret);
1564 	}
1565 
1566 	BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
1567 	INIT_LIST_HEAD(&opp_table->opp_list);
1568 	kref_init(&opp_table->kref);
1569 
1570 	return opp_table;
1571 
1572 remove_opp_dev:
1573 	_of_clear_opp_table(opp_table);
1574 	_remove_opp_dev(opp_dev, opp_table);
1575 	mutex_destroy(&opp_table->lock);
1576 err:
1577 	kfree(opp_table);
1578 	return ERR_PTR(ret);
1579 }
1580 
1581 static struct opp_table *_update_opp_table_clk(struct device *dev,
1582 					       struct opp_table *opp_table,
1583 					       bool getclk)
1584 {
1585 	int ret;
1586 
1587 	/*
1588 	 * Return early if we don't need to get clk or we have already done it
1589 	 * earlier.
1590 	 */
1591 	if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) ||
1592 	    opp_table->clks)
1593 		return opp_table;
1594 
1595 	/* Find clk for the device */
1596 	opp_table->clk = clk_get(dev, NULL);
1597 
1598 	ret = PTR_ERR_OR_ZERO(opp_table->clk);
1599 	if (!ret) {
1600 		opp_table->config_clks = _opp_config_clk_single;
1601 		opp_table->clk_count = 1;
1602 		return opp_table;
1603 	}
1604 
1605 	if (ret == -ENOENT) {
1606 		/*
1607 		 * There are few platforms which don't want the OPP core to
1608 		 * manage device's clock settings. In such cases neither the
1609 		 * platform provides the clks explicitly to us, nor the DT
1610 		 * contains a valid clk entry. The OPP nodes in DT may still
1611 		 * contain "opp-hz" property though, which we need to parse and
1612 		 * allow the platform to find an OPP based on freq later on.
1613 		 *
1614 		 * This is a simple solution to take care of such corner cases,
1615 		 * i.e. make the clk_count 1, which lets us allocate space for
1616 		 * frequency in opp->rates and also parse the entries in DT.
1617 		 */
1618 		opp_table->clk_count = 1;
1619 
1620 		dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
1621 		return opp_table;
1622 	}
1623 
1624 	dev_pm_opp_put_opp_table(opp_table);
1625 	dev_err_probe(dev, ret, "Couldn't find clock\n");
1626 
1627 	return ERR_PTR(ret);
1628 }
1629 
1630 /*
1631  * We need to make sure that the OPP table for a device doesn't get added twice,
1632  * if this routine gets called in parallel with the same device pointer.
1633  *
1634  * The simplest way to enforce that is to perform everything (find existing
1635  * table and if not found, create a new one) under the opp_table_lock, so only
1636  * one creator gets access to the same. But that expands the critical section
1637  * under the lock and may end up causing circular dependencies with frameworks
1638  * like debugfs, interconnect or clock framework as they may be direct or
1639  * indirect users of OPP core.
1640  *
1641  * And for that reason we have to go for a bit tricky implementation here, which
1642  * uses the opp_tables_busy flag to indicate if another creator is in the middle
1643  * of adding an OPP table and others should wait for it to finish.
1644  */
1645 struct opp_table *_add_opp_table_indexed(struct device *dev, int index,
1646 					 bool getclk)
1647 {
1648 	struct opp_table *opp_table;
1649 
1650 again:
1651 	mutex_lock(&opp_table_lock);
1652 
1653 	opp_table = _find_opp_table_unlocked(dev);
1654 	if (!IS_ERR(opp_table))
1655 		goto unlock;
1656 
1657 	/*
1658 	 * The opp_tables list or an OPP table's dev_list is getting updated by
1659 	 * another user, wait for it to finish.
1660 	 */
1661 	if (unlikely(opp_tables_busy)) {
1662 		mutex_unlock(&opp_table_lock);
1663 		cpu_relax();
1664 		goto again;
1665 	}
1666 
1667 	opp_tables_busy = true;
1668 	opp_table = _managed_opp(dev, index);
1669 
1670 	/* Drop the lock to reduce the size of critical section */
1671 	mutex_unlock(&opp_table_lock);
1672 
1673 	if (opp_table) {
1674 		if (!_add_opp_dev(dev, opp_table)) {
1675 			dev_pm_opp_put_opp_table(opp_table);
1676 			opp_table = ERR_PTR(-ENOMEM);
1677 		}
1678 
1679 		mutex_lock(&opp_table_lock);
1680 	} else {
1681 		opp_table = _allocate_opp_table(dev, index);
1682 
1683 		mutex_lock(&opp_table_lock);
1684 		if (!IS_ERR(opp_table))
1685 			list_add(&opp_table->node, &opp_tables);
1686 	}
1687 
1688 	opp_tables_busy = false;
1689 
1690 unlock:
1691 	mutex_unlock(&opp_table_lock);
1692 
1693 	return _update_opp_table_clk(dev, opp_table, getclk);
1694 }
1695 
1696 static struct opp_table *_add_opp_table(struct device *dev, bool getclk)
1697 {
1698 	return _add_opp_table_indexed(dev, 0, getclk);
1699 }
1700 
1701 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
1702 {
1703 	return _find_opp_table(dev);
1704 }
1705 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
1706 
1707 static void _opp_table_kref_release(struct kref *kref)
1708 {
1709 	struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
1710 	struct opp_device *opp_dev, *temp;
1711 	int i;
1712 
1713 	/* Drop the lock as soon as we can */
1714 	list_del(&opp_table->node);
1715 	mutex_unlock(&opp_table_lock);
1716 
1717 	if (opp_table->current_opp)
1718 		dev_pm_opp_put(opp_table->current_opp);
1719 
1720 	_of_clear_opp_table(opp_table);
1721 
1722 	/* Release automatically acquired single clk */
1723 	if (!IS_ERR(opp_table->clk))
1724 		clk_put(opp_table->clk);
1725 
1726 	if (opp_table->paths) {
1727 		for (i = 0; i < opp_table->path_count; i++)
1728 			icc_put(opp_table->paths[i]);
1729 		kfree(opp_table->paths);
1730 	}
1731 
1732 	WARN_ON(!list_empty(&opp_table->opp_list));
1733 
1734 	list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node)
1735 		_remove_opp_dev(opp_dev, opp_table);
1736 
1737 	mutex_destroy(&opp_table->lock);
1738 	kfree(opp_table);
1739 }
1740 
1741 struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
1742 {
1743 	kref_get(&opp_table->kref);
1744 	return opp_table;
1745 }
1746 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref);
1747 
1748 void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
1749 {
1750 	kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
1751 		       &opp_table_lock);
1752 }
1753 EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
1754 
1755 void _opp_free(struct dev_pm_opp *opp)
1756 {
1757 	kfree(opp);
1758 }
1759 
1760 static void _opp_kref_release(struct kref *kref)
1761 {
1762 	struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1763 	struct opp_table *opp_table = opp->opp_table;
1764 
1765 	list_del(&opp->node);
1766 	mutex_unlock(&opp_table->lock);
1767 
1768 	/*
1769 	 * Notify the changes in the availability of the operable
1770 	 * frequency/voltage list.
1771 	 */
1772 	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
1773 	_of_clear_opp(opp_table, opp);
1774 	opp_debug_remove_one(opp);
1775 	kfree(opp);
1776 }
1777 
1778 struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp)
1779 {
1780 	kref_get(&opp->kref);
1781 	return opp;
1782 }
1783 EXPORT_SYMBOL_GPL(dev_pm_opp_get);
1784 
1785 void dev_pm_opp_put(struct dev_pm_opp *opp)
1786 {
1787 	kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
1788 }
1789 EXPORT_SYMBOL_GPL(dev_pm_opp_put);
1790 
1791 /**
1792  * dev_pm_opp_remove()  - Remove an OPP from OPP table
1793  * @dev:	device for which we do this operation
1794  * @freq:	OPP to remove with matching 'freq'
1795  *
1796  * This function removes an opp from the opp table.
1797  */
1798 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
1799 {
1800 	struct dev_pm_opp *opp = NULL, *iter;
1801 
1802 	struct opp_table *opp_table __free(put_opp_table) =
1803 		_find_opp_table(dev);
1804 
1805 	if (IS_ERR(opp_table))
1806 		return;
1807 
1808 	if (!assert_single_clk(opp_table, 0))
1809 		return;
1810 
1811 	scoped_guard(mutex, &opp_table->lock) {
1812 		list_for_each_entry(iter, &opp_table->opp_list, node) {
1813 			if (iter->rates[0] == freq) {
1814 				opp = iter;
1815 				break;
1816 			}
1817 		}
1818 	}
1819 
1820 	if (opp) {
1821 		dev_pm_opp_put(opp);
1822 
1823 		/* Drop the reference taken by dev_pm_opp_add() */
1824 		dev_pm_opp_put_opp_table(opp_table);
1825 	} else {
1826 		dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
1827 			 __func__, freq);
1828 	}
1829 }
1830 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1831 
1832 static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
1833 					bool dynamic)
1834 {
1835 	struct dev_pm_opp *opp;
1836 
1837 	guard(mutex)(&opp_table->lock);
1838 
1839 	list_for_each_entry(opp, &opp_table->opp_list, node) {
1840 		/*
1841 		 * Refcount must be dropped only once for each OPP by OPP core,
1842 		 * do that with help of "removed" flag.
1843 		 */
1844 		if (!opp->removed && dynamic == opp->dynamic)
1845 			return opp;
1846 	}
1847 
1848 	return NULL;
1849 }
1850 
1851 /*
1852  * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to
1853  * happen lock less to avoid circular dependency issues. This routine must be
1854  * called without the opp_table->lock held.
1855  */
1856 static void _opp_remove_all(struct opp_table *opp_table, bool dynamic)
1857 {
1858 	struct dev_pm_opp *opp;
1859 
1860 	while ((opp = _opp_get_next(opp_table, dynamic))) {
1861 		opp->removed = true;
1862 		dev_pm_opp_put(opp);
1863 
1864 		/* Drop the references taken by dev_pm_opp_add() */
1865 		if (dynamic)
1866 			dev_pm_opp_put_opp_table(opp_table);
1867 	}
1868 }
1869 
1870 bool _opp_remove_all_static(struct opp_table *opp_table)
1871 {
1872 	scoped_guard(mutex, &opp_table->lock) {
1873 		if (!opp_table->parsed_static_opps)
1874 			return false;
1875 
1876 		if (--opp_table->parsed_static_opps)
1877 			return true;
1878 	}
1879 
1880 	_opp_remove_all(opp_table, false);
1881 	return true;
1882 }
1883 
1884 /**
1885  * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
1886  * @dev:	device for which we do this operation
1887  *
1888  * This function removes all dynamically created OPPs from the opp table.
1889  */
1890 void dev_pm_opp_remove_all_dynamic(struct device *dev)
1891 {
1892 	struct opp_table *opp_table __free(put_opp_table) =
1893 		_find_opp_table(dev);
1894 
1895 	if (IS_ERR(opp_table))
1896 		return;
1897 
1898 	_opp_remove_all(opp_table, true);
1899 }
1900 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
1901 
1902 struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
1903 {
1904 	struct dev_pm_opp *opp;
1905 	int supply_count, supply_size, icc_size, clk_size;
1906 
1907 	/* Allocate space for at least one supply */
1908 	supply_count = opp_table->regulator_count > 0 ?
1909 			opp_table->regulator_count : 1;
1910 	supply_size = sizeof(*opp->supplies) * supply_count;
1911 	clk_size = sizeof(*opp->rates) * opp_table->clk_count;
1912 	icc_size = sizeof(*opp->bandwidth) * opp_table->path_count;
1913 
1914 	/* allocate new OPP node and supplies structures */
1915 	opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL);
1916 	if (!opp)
1917 		return NULL;
1918 
1919 	/* Put the supplies, bw and clock at the end of the OPP structure */
1920 	opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
1921 
1922 	opp->rates = (unsigned long *)(opp->supplies + supply_count);
1923 
1924 	if (icc_size)
1925 		opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count);
1926 
1927 	INIT_LIST_HEAD(&opp->node);
1928 
1929 	opp->level = OPP_LEVEL_UNSET;
1930 
1931 	return opp;
1932 }
1933 
1934 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
1935 					 struct opp_table *opp_table)
1936 {
1937 	struct regulator *reg;
1938 	int i;
1939 
1940 	if (!opp_table->regulators)
1941 		return true;
1942 
1943 	for (i = 0; i < opp_table->regulator_count; i++) {
1944 		reg = opp_table->regulators[i];
1945 
1946 		if (!regulator_is_supported_voltage(reg,
1947 					opp->supplies[i].u_volt_min,
1948 					opp->supplies[i].u_volt_max)) {
1949 			pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
1950 				__func__, opp->supplies[i].u_volt_min,
1951 				opp->supplies[i].u_volt_max);
1952 			return false;
1953 		}
1954 	}
1955 
1956 	return true;
1957 }
1958 
1959 static int _opp_compare_rate(struct opp_table *opp_table,
1960 			     struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
1961 {
1962 	int i;
1963 
1964 	for (i = 0; i < opp_table->clk_count; i++) {
1965 		if (opp1->rates[i] != opp2->rates[i])
1966 			return opp1->rates[i] < opp2->rates[i] ? -1 : 1;
1967 	}
1968 
1969 	/* Same rates for both OPPs */
1970 	return 0;
1971 }
1972 
1973 static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1,
1974 			   struct dev_pm_opp *opp2)
1975 {
1976 	int i;
1977 
1978 	for (i = 0; i < opp_table->path_count; i++) {
1979 		if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak)
1980 			return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1;
1981 	}
1982 
1983 	/* Same bw for both OPPs */
1984 	return 0;
1985 }
1986 
1987 /*
1988  * Returns
1989  * 0: opp1 == opp2
1990  * 1: opp1 > opp2
1991  * -1: opp1 < opp2
1992  */
1993 int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1,
1994 		     struct dev_pm_opp *opp2)
1995 {
1996 	int ret;
1997 
1998 	ret = _opp_compare_rate(opp_table, opp1, opp2);
1999 	if (ret)
2000 		return ret;
2001 
2002 	ret = _opp_compare_bw(opp_table, opp1, opp2);
2003 	if (ret)
2004 		return ret;
2005 
2006 	if (opp1->level != opp2->level)
2007 		return opp1->level < opp2->level ? -1 : 1;
2008 
2009 	/* Duplicate OPPs */
2010 	return 0;
2011 }
2012 
2013 static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
2014 			     struct opp_table *opp_table,
2015 			     struct list_head **head)
2016 {
2017 	struct dev_pm_opp *opp;
2018 	int opp_cmp;
2019 
2020 	/*
2021 	 * Insert new OPP in order of increasing frequency and discard if
2022 	 * already present.
2023 	 *
2024 	 * Need to use &opp_table->opp_list in the condition part of the 'for'
2025 	 * loop, don't replace it with head otherwise it will become an infinite
2026 	 * loop.
2027 	 */
2028 	list_for_each_entry(opp, &opp_table->opp_list, node) {
2029 		opp_cmp = _opp_compare_key(opp_table, new_opp, opp);
2030 		if (opp_cmp > 0) {
2031 			*head = &opp->node;
2032 			continue;
2033 		}
2034 
2035 		if (opp_cmp < 0)
2036 			return 0;
2037 
2038 		/* Duplicate OPPs */
2039 		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
2040 			 __func__, opp->rates[0], opp->supplies[0].u_volt,
2041 			 opp->available, new_opp->rates[0],
2042 			 new_opp->supplies[0].u_volt, new_opp->available);
2043 
2044 		/* Should we compare voltages for all regulators here ? */
2045 		return opp->available &&
2046 		       new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
2047 	}
2048 
2049 	return 0;
2050 }
2051 
2052 void _required_opps_available(struct dev_pm_opp *opp, int count)
2053 {
2054 	int i;
2055 
2056 	for (i = 0; i < count; i++) {
2057 		if (opp->required_opps[i]->available)
2058 			continue;
2059 
2060 		opp->available = false;
2061 		pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
2062 			 __func__, opp->required_opps[i]->np, opp->rates[0]);
2063 		return;
2064 	}
2065 }
2066 
2067 /*
2068  * Returns:
2069  * 0: On success. And appropriate error message for duplicate OPPs.
2070  * -EBUSY: For OPP with same freq/volt and is available. The callers of
2071  *  _opp_add() must return 0 if they receive -EBUSY from it. This is to make
2072  *  sure we don't print error messages unnecessarily if different parts of
2073  *  kernel try to initialize the OPP table.
2074  * -EEXIST: For OPP with same freq but different volt or is unavailable. This
2075  *  should be considered an error by the callers of _opp_add().
2076  */
2077 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
2078 	     struct opp_table *opp_table)
2079 {
2080 	struct list_head *head;
2081 	int ret;
2082 
2083 	scoped_guard(mutex, &opp_table->lock) {
2084 		head = &opp_table->opp_list;
2085 
2086 		ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
2087 		if (ret)
2088 			return ret;
2089 
2090 		list_add(&new_opp->node, head);
2091 	}
2092 
2093 	new_opp->opp_table = opp_table;
2094 	kref_init(&new_opp->kref);
2095 
2096 	opp_debug_create_one(new_opp, opp_table);
2097 
2098 	if (!_opp_supported_by_regulators(new_opp, opp_table)) {
2099 		new_opp->available = false;
2100 		dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
2101 			 __func__, new_opp->rates[0]);
2102 	}
2103 
2104 	/* required-opps not fully initialized yet */
2105 	if (lazy_linking_pending(opp_table))
2106 		return 0;
2107 
2108 	_required_opps_available(new_opp, opp_table->required_opp_count);
2109 
2110 	return 0;
2111 }
2112 
2113 /**
2114  * _opp_add_v1() - Allocate a OPP based on v1 bindings.
2115  * @opp_table:	OPP table
2116  * @dev:	device for which we do this operation
2117  * @data:	The OPP data for the OPP to add
2118  * @dynamic:	Dynamically added OPPs.
2119  *
2120  * This function adds an opp definition to the opp table and returns status.
2121  * The opp is made available by default and it can be controlled using
2122  * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
2123  *
2124  * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
2125  * and freed by dev_pm_opp_of_remove_table.
2126  *
2127  * Return:
2128  * 0		On success OR
2129  *		Duplicate OPPs (both freq and volt are same) and opp->available
2130  * -EEXIST	Freq are same and volt are different OR
2131  *		Duplicate OPPs (both freq and volt are same) and !opp->available
2132  * -ENOMEM	Memory allocation failure
2133  */
2134 int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
2135 		struct dev_pm_opp_data *data, bool dynamic)
2136 {
2137 	struct dev_pm_opp *new_opp;
2138 	unsigned long tol, u_volt = data->u_volt;
2139 	int ret;
2140 
2141 	if (!assert_single_clk(opp_table, 0))
2142 		return -EINVAL;
2143 
2144 	new_opp = _opp_allocate(opp_table);
2145 	if (!new_opp)
2146 		return -ENOMEM;
2147 
2148 	/* populate the opp table */
2149 	new_opp->rates[0] = data->freq;
2150 	new_opp->level = data->level;
2151 	new_opp->turbo = data->turbo;
2152 	tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
2153 	new_opp->supplies[0].u_volt = u_volt;
2154 	new_opp->supplies[0].u_volt_min = u_volt - tol;
2155 	new_opp->supplies[0].u_volt_max = u_volt + tol;
2156 	new_opp->available = true;
2157 	new_opp->dynamic = dynamic;
2158 
2159 	ret = _opp_add(dev, new_opp, opp_table);
2160 	if (ret) {
2161 		/* Don't return error for duplicate OPPs */
2162 		if (ret == -EBUSY)
2163 			ret = 0;
2164 		goto free_opp;
2165 	}
2166 
2167 	/*
2168 	 * Notify the changes in the availability of the operable
2169 	 * frequency/voltage list.
2170 	 */
2171 	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
2172 	return 0;
2173 
2174 free_opp:
2175 	_opp_free(new_opp);
2176 
2177 	return ret;
2178 }
2179 
2180 /*
2181  * This is required only for the V2 bindings, and it enables a platform to
2182  * specify the hierarchy of versions it supports. OPP layer will then enable
2183  * OPPs, which are available for those versions, based on its 'opp-supported-hw'
2184  * property.
2185  */
2186 static int _opp_set_supported_hw(struct opp_table *opp_table,
2187 				 const u32 *versions, unsigned int count)
2188 {
2189 	/* Another CPU that shares the OPP table has set the property ? */
2190 	if (opp_table->supported_hw)
2191 		return 0;
2192 
2193 	opp_table->supported_hw = kmemdup_array(versions, count,
2194 						sizeof(*versions), GFP_KERNEL);
2195 	if (!opp_table->supported_hw)
2196 		return -ENOMEM;
2197 
2198 	opp_table->supported_hw_count = count;
2199 
2200 	return 0;
2201 }
2202 
2203 static void _opp_put_supported_hw(struct opp_table *opp_table)
2204 {
2205 	if (opp_table->supported_hw) {
2206 		kfree(opp_table->supported_hw);
2207 		opp_table->supported_hw = NULL;
2208 		opp_table->supported_hw_count = 0;
2209 	}
2210 }
2211 
2212 /*
2213  * This is required only for the V2 bindings, and it enables a platform to
2214  * specify the extn to be used for certain property names. The properties to
2215  * which the extension will apply are opp-microvolt and opp-microamp. OPP core
2216  * should postfix the property name with -<name> while looking for them.
2217  */
2218 static int _opp_set_prop_name(struct opp_table *opp_table, const char *name)
2219 {
2220 	/* Another CPU that shares the OPP table has set the property ? */
2221 	if (!opp_table->prop_name) {
2222 		opp_table->prop_name = kstrdup(name, GFP_KERNEL);
2223 		if (!opp_table->prop_name)
2224 			return -ENOMEM;
2225 	}
2226 
2227 	return 0;
2228 }
2229 
2230 static void _opp_put_prop_name(struct opp_table *opp_table)
2231 {
2232 	if (opp_table->prop_name) {
2233 		kfree(opp_table->prop_name);
2234 		opp_table->prop_name = NULL;
2235 	}
2236 }
2237 
2238 /*
2239  * In order to support OPP switching, OPP layer needs to know the name of the
2240  * device's regulators, as the core would be required to switch voltages as
2241  * well.
2242  *
2243  * This must be called before any OPPs are initialized for the device.
2244  */
2245 static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev,
2246 			       const char * const names[])
2247 {
2248 	const char * const *temp = names;
2249 	struct regulator *reg;
2250 	int count = 0, ret, i;
2251 
2252 	/* Count number of regulators */
2253 	while (*temp++)
2254 		count++;
2255 
2256 	if (!count)
2257 		return -EINVAL;
2258 
2259 	/* Another CPU that shares the OPP table has set the regulators ? */
2260 	if (opp_table->regulators)
2261 		return 0;
2262 
2263 	opp_table->regulators = kmalloc_array(count,
2264 					      sizeof(*opp_table->regulators),
2265 					      GFP_KERNEL);
2266 	if (!opp_table->regulators)
2267 		return -ENOMEM;
2268 
2269 	for (i = 0; i < count; i++) {
2270 		reg = regulator_get_optional(dev, names[i]);
2271 		if (IS_ERR(reg)) {
2272 			ret = dev_err_probe(dev, PTR_ERR(reg),
2273 					    "%s: no regulator (%s) found\n",
2274 					    __func__, names[i]);
2275 			goto free_regulators;
2276 		}
2277 
2278 		opp_table->regulators[i] = reg;
2279 	}
2280 
2281 	opp_table->regulator_count = count;
2282 
2283 	/* Set generic config_regulators() for single regulators here */
2284 	if (count == 1)
2285 		opp_table->config_regulators = _opp_config_regulator_single;
2286 
2287 	return 0;
2288 
2289 free_regulators:
2290 	while (i != 0)
2291 		regulator_put(opp_table->regulators[--i]);
2292 
2293 	kfree(opp_table->regulators);
2294 	opp_table->regulators = NULL;
2295 	opp_table->regulator_count = -1;
2296 
2297 	return ret;
2298 }
2299 
2300 static void _opp_put_regulators(struct opp_table *opp_table)
2301 {
2302 	int i;
2303 
2304 	if (!opp_table->regulators)
2305 		return;
2306 
2307 	if (opp_table->enabled) {
2308 		for (i = opp_table->regulator_count - 1; i >= 0; i--)
2309 			regulator_disable(opp_table->regulators[i]);
2310 	}
2311 
2312 	for (i = opp_table->regulator_count - 1; i >= 0; i--)
2313 		regulator_put(opp_table->regulators[i]);
2314 
2315 	kfree(opp_table->regulators);
2316 	opp_table->regulators = NULL;
2317 	opp_table->regulator_count = -1;
2318 }
2319 
2320 static void _put_clks(struct opp_table *opp_table, int count)
2321 {
2322 	int i;
2323 
2324 	for (i = count - 1; i >= 0; i--)
2325 		clk_put(opp_table->clks[i]);
2326 
2327 	kfree(opp_table->clks);
2328 	opp_table->clks = NULL;
2329 }
2330 
2331 /*
2332  * In order to support OPP switching, OPP layer needs to get pointers to the
2333  * clocks for the device. Simple cases work fine without using this routine
2334  * (i.e. by passing connection-id as NULL), but for a device with multiple
2335  * clocks available, the OPP core needs to know the exact names of the clks to
2336  * use.
2337  *
2338  * This must be called before any OPPs are initialized for the device.
2339  */
2340 static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev,
2341 			     const char * const names[],
2342 			     config_clks_t config_clks)
2343 {
2344 	const char * const *temp = names;
2345 	int count = 0, ret, i;
2346 	struct clk *clk;
2347 
2348 	/* Count number of clks */
2349 	while (*temp++)
2350 		count++;
2351 
2352 	/*
2353 	 * This is a special case where we have a single clock, whose connection
2354 	 * id name is NULL, i.e. first two entries are NULL in the array.
2355 	 */
2356 	if (!count && !names[1])
2357 		count = 1;
2358 
2359 	/* Fail early for invalid configurations */
2360 	if (!count || (!config_clks && count > 1))
2361 		return -EINVAL;
2362 
2363 	/* Another CPU that shares the OPP table has set the clkname ? */
2364 	if (opp_table->clks)
2365 		return 0;
2366 
2367 	opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks),
2368 					GFP_KERNEL);
2369 	if (!opp_table->clks)
2370 		return -ENOMEM;
2371 
2372 	/* Find clks for the device */
2373 	for (i = 0; i < count; i++) {
2374 		clk = clk_get(dev, names[i]);
2375 		if (IS_ERR(clk)) {
2376 			ret = dev_err_probe(dev, PTR_ERR(clk),
2377 					    "%s: Couldn't find clock with name: %s\n",
2378 					    __func__, names[i]);
2379 			goto free_clks;
2380 		}
2381 
2382 		opp_table->clks[i] = clk;
2383 	}
2384 
2385 	opp_table->clk_count = count;
2386 	opp_table->config_clks = config_clks;
2387 
2388 	/* Set generic single clk set here */
2389 	if (count == 1) {
2390 		if (!opp_table->config_clks)
2391 			opp_table->config_clks = _opp_config_clk_single;
2392 
2393 		/*
2394 		 * We could have just dropped the "clk" field and used "clks"
2395 		 * everywhere. Instead we kept the "clk" field around for
2396 		 * following reasons:
2397 		 *
2398 		 * - avoiding clks[0] everywhere else.
2399 		 * - not running single clk helpers for multiple clk usecase by
2400 		 *   mistake.
2401 		 *
2402 		 * Since this is single-clk case, just update the clk pointer
2403 		 * too.
2404 		 */
2405 		opp_table->clk = opp_table->clks[0];
2406 	}
2407 
2408 	return 0;
2409 
2410 free_clks:
2411 	_put_clks(opp_table, i);
2412 	return ret;
2413 }
2414 
2415 static void _opp_put_clknames(struct opp_table *opp_table)
2416 {
2417 	if (!opp_table->clks)
2418 		return;
2419 
2420 	opp_table->config_clks = NULL;
2421 	opp_table->clk = ERR_PTR(-ENODEV);
2422 
2423 	_put_clks(opp_table, opp_table->clk_count);
2424 }
2425 
2426 /*
2427  * This is useful to support platforms with multiple regulators per device.
2428  *
2429  * This must be called before any OPPs are initialized for the device.
2430  */
2431 static int _opp_set_config_regulators_helper(struct opp_table *opp_table,
2432 		struct device *dev, config_regulators_t config_regulators)
2433 {
2434 	/* Another CPU that shares the OPP table has set the helper ? */
2435 	if (!opp_table->config_regulators)
2436 		opp_table->config_regulators = config_regulators;
2437 
2438 	return 0;
2439 }
2440 
2441 static void _opp_put_config_regulators_helper(struct opp_table *opp_table)
2442 {
2443 	if (opp_table->config_regulators)
2444 		opp_table->config_regulators = NULL;
2445 }
2446 
2447 static int _opp_set_required_dev(struct opp_table *opp_table,
2448 				 struct device *dev,
2449 				 struct device *required_dev,
2450 				 unsigned int index)
2451 {
2452 	struct opp_table *required_table, *pd_table;
2453 	struct device *gdev;
2454 
2455 	/* Genpd core takes care of propagation to parent genpd */
2456 	if (opp_table->is_genpd) {
2457 		dev_err(dev, "%s: Operation not supported for genpds\n", __func__);
2458 		return -EOPNOTSUPP;
2459 	}
2460 
2461 	if (index >= opp_table->required_opp_count) {
2462 		dev_err(dev, "Required OPPs not available, can't set required devs\n");
2463 		return -EINVAL;
2464 	}
2465 
2466 	required_table = opp_table->required_opp_tables[index];
2467 	if (IS_ERR(required_table)) {
2468 		dev_err(dev, "Missing OPP table, unable to set the required devs\n");
2469 		return -ENODEV;
2470 	}
2471 
2472 	/*
2473 	 * The required_opp_tables parsing is not perfect, as the OPP core does
2474 	 * the parsing solely based on the DT node pointers. The core sets the
2475 	 * required_opp_tables entry to the first OPP table in the "opp_tables"
2476 	 * list, that matches with the node pointer.
2477 	 *
2478 	 * If the target DT OPP table is used by multiple devices and they all
2479 	 * create separate instances of 'struct opp_table' from it, then it is
2480 	 * possible that the required_opp_tables entry may be set to the
2481 	 * incorrect sibling device.
2482 	 *
2483 	 * Cross check it again and fix if required.
2484 	 */
2485 	gdev = dev_to_genpd_dev(required_dev);
2486 	if (IS_ERR(gdev))
2487 		return PTR_ERR(gdev);
2488 
2489 	pd_table = _find_opp_table(gdev);
2490 	if (!IS_ERR(pd_table)) {
2491 		if (pd_table != required_table) {
2492 			dev_pm_opp_put_opp_table(required_table);
2493 			opp_table->required_opp_tables[index] = pd_table;
2494 		} else {
2495 			dev_pm_opp_put_opp_table(pd_table);
2496 		}
2497 	}
2498 
2499 	opp_table->required_devs[index] = required_dev;
2500 	return 0;
2501 }
2502 
2503 static void _opp_put_required_dev(struct opp_table *opp_table,
2504 				  unsigned int index)
2505 {
2506 	opp_table->required_devs[index] = NULL;
2507 }
2508 
2509 static void _opp_clear_config(struct opp_config_data *data)
2510 {
2511 	if (data->flags & OPP_CONFIG_REQUIRED_DEV)
2512 		_opp_put_required_dev(data->opp_table,
2513 				      data->required_dev_index);
2514 	if (data->flags & OPP_CONFIG_REGULATOR)
2515 		_opp_put_regulators(data->opp_table);
2516 	if (data->flags & OPP_CONFIG_SUPPORTED_HW)
2517 		_opp_put_supported_hw(data->opp_table);
2518 	if (data->flags & OPP_CONFIG_REGULATOR_HELPER)
2519 		_opp_put_config_regulators_helper(data->opp_table);
2520 	if (data->flags & OPP_CONFIG_PROP_NAME)
2521 		_opp_put_prop_name(data->opp_table);
2522 	if (data->flags & OPP_CONFIG_CLK)
2523 		_opp_put_clknames(data->opp_table);
2524 
2525 	dev_pm_opp_put_opp_table(data->opp_table);
2526 	kfree(data);
2527 }
2528 
2529 /**
2530  * dev_pm_opp_set_config() - Set OPP configuration for the device.
2531  * @dev: Device for which configuration is being set.
2532  * @config: OPP configuration.
2533  *
2534  * This allows all device OPP configurations to be performed at once.
2535  *
2536  * This must be called before any OPPs are initialized for the device. This may
2537  * be called multiple times for the same OPP table, for example once for each
2538  * CPU that share the same table. This must be balanced by the same number of
2539  * calls to dev_pm_opp_clear_config() in order to free the OPP table properly.
2540  *
2541  * This returns a token to the caller, which must be passed to
2542  * dev_pm_opp_clear_config() to free the resources later. The value of the
2543  * returned token will be >= 1 for success and negative for errors. The minimum
2544  * value of 1 is chosen here to make it easy for callers to manage the resource.
2545  */
2546 int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
2547 {
2548 	struct opp_table *opp_table;
2549 	struct opp_config_data *data;
2550 	unsigned int id;
2551 	int ret;
2552 
2553 	data = kmalloc(sizeof(*data), GFP_KERNEL);
2554 	if (!data)
2555 		return -ENOMEM;
2556 
2557 	opp_table = _add_opp_table(dev, false);
2558 	if (IS_ERR(opp_table)) {
2559 		kfree(data);
2560 		return PTR_ERR(opp_table);
2561 	}
2562 
2563 	data->opp_table = opp_table;
2564 	data->flags = 0;
2565 
2566 	/* This should be called before OPPs are initialized */
2567 	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
2568 		ret = -EBUSY;
2569 		goto err;
2570 	}
2571 
2572 	/* Configure clocks */
2573 	if (config->clk_names) {
2574 		ret = _opp_set_clknames(opp_table, dev, config->clk_names,
2575 					config->config_clks);
2576 		if (ret)
2577 			goto err;
2578 
2579 		data->flags |= OPP_CONFIG_CLK;
2580 	} else if (config->config_clks) {
2581 		/* Don't allow config callback without clocks */
2582 		ret = -EINVAL;
2583 		goto err;
2584 	}
2585 
2586 	/* Configure property names */
2587 	if (config->prop_name) {
2588 		ret = _opp_set_prop_name(opp_table, config->prop_name);
2589 		if (ret)
2590 			goto err;
2591 
2592 		data->flags |= OPP_CONFIG_PROP_NAME;
2593 	}
2594 
2595 	/* Configure config_regulators helper */
2596 	if (config->config_regulators) {
2597 		ret = _opp_set_config_regulators_helper(opp_table, dev,
2598 						config->config_regulators);
2599 		if (ret)
2600 			goto err;
2601 
2602 		data->flags |= OPP_CONFIG_REGULATOR_HELPER;
2603 	}
2604 
2605 	/* Configure supported hardware */
2606 	if (config->supported_hw) {
2607 		ret = _opp_set_supported_hw(opp_table, config->supported_hw,
2608 					    config->supported_hw_count);
2609 		if (ret)
2610 			goto err;
2611 
2612 		data->flags |= OPP_CONFIG_SUPPORTED_HW;
2613 	}
2614 
2615 	/* Configure supplies */
2616 	if (config->regulator_names) {
2617 		ret = _opp_set_regulators(opp_table, dev,
2618 					  config->regulator_names);
2619 		if (ret)
2620 			goto err;
2621 
2622 		data->flags |= OPP_CONFIG_REGULATOR;
2623 	}
2624 
2625 	if (config->required_dev) {
2626 		ret = _opp_set_required_dev(opp_table, dev,
2627 					    config->required_dev,
2628 					    config->required_dev_index);
2629 		if (ret)
2630 			goto err;
2631 
2632 		data->required_dev_index = config->required_dev_index;
2633 		data->flags |= OPP_CONFIG_REQUIRED_DEV;
2634 	}
2635 
2636 	ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
2637 		       GFP_KERNEL);
2638 	if (ret)
2639 		goto err;
2640 
2641 	return id;
2642 
2643 err:
2644 	_opp_clear_config(data);
2645 	return ret;
2646 }
2647 EXPORT_SYMBOL_GPL(dev_pm_opp_set_config);
2648 
2649 /**
2650  * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration.
2651  * @token: The token returned by dev_pm_opp_set_config() previously.
2652  *
2653  * This allows all device OPP configurations to be cleared at once. This must be
2654  * called once for each call made to dev_pm_opp_set_config(), in order to free
2655  * the OPPs properly.
2656  *
2657  * Currently the first call itself ends up freeing all the OPP configurations,
2658  * while the later ones only drop the OPP table reference. This works well for
2659  * now as we would never want to use an half initialized OPP table and want to
2660  * remove the configurations together.
2661  */
2662 void dev_pm_opp_clear_config(int token)
2663 {
2664 	struct opp_config_data *data;
2665 
2666 	/*
2667 	 * This lets the callers call this unconditionally and keep their code
2668 	 * simple.
2669 	 */
2670 	if (unlikely(token <= 0))
2671 		return;
2672 
2673 	data = xa_erase(&opp_configs, token);
2674 	if (WARN_ON(!data))
2675 		return;
2676 
2677 	_opp_clear_config(data);
2678 }
2679 EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config);
2680 
2681 static void devm_pm_opp_config_release(void *token)
2682 {
2683 	dev_pm_opp_clear_config((unsigned long)token);
2684 }
2685 
2686 /**
2687  * devm_pm_opp_set_config() - Set OPP configuration for the device.
2688  * @dev: Device for which configuration is being set.
2689  * @config: OPP configuration.
2690  *
2691  * This allows all device OPP configurations to be performed at once.
2692  * This is a resource-managed variant of dev_pm_opp_set_config().
2693  *
2694  * Return: 0 on success and errorno otherwise.
2695  */
2696 int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
2697 {
2698 	int token = dev_pm_opp_set_config(dev, config);
2699 
2700 	if (token < 0)
2701 		return token;
2702 
2703 	return devm_add_action_or_reset(dev, devm_pm_opp_config_release,
2704 					(void *) ((unsigned long) token));
2705 }
2706 EXPORT_SYMBOL_GPL(devm_pm_opp_set_config);
2707 
2708 /**
2709  * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
2710  * @src_table: OPP table which has @dst_table as one of its required OPP table.
2711  * @dst_table: Required OPP table of the @src_table.
2712  * @src_opp: OPP from the @src_table.
2713  *
2714  * This function returns the OPP (present in @dst_table) pointed out by the
2715  * "required-opps" property of the @src_opp (present in @src_table).
2716  *
2717  * The callers are required to call dev_pm_opp_put() for the returned OPP after
2718  * use.
2719  *
2720  * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise.
2721  */
2722 struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
2723 						 struct opp_table *dst_table,
2724 						 struct dev_pm_opp *src_opp)
2725 {
2726 	struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV);
2727 	int i;
2728 
2729 	if (!src_table || !dst_table || !src_opp ||
2730 	    !src_table->required_opp_tables)
2731 		return ERR_PTR(-EINVAL);
2732 
2733 	/* required-opps not fully initialized yet */
2734 	if (lazy_linking_pending(src_table))
2735 		return ERR_PTR(-EBUSY);
2736 
2737 	for (i = 0; i < src_table->required_opp_count; i++) {
2738 		if (src_table->required_opp_tables[i] != dst_table)
2739 			continue;
2740 
2741 		scoped_guard(mutex, &src_table->lock) {
2742 			list_for_each_entry(opp, &src_table->opp_list, node) {
2743 				if (opp == src_opp) {
2744 					dest_opp = dev_pm_opp_get(opp->required_opps[i]);
2745 					break;
2746 				}
2747 			}
2748 			break;
2749 		}
2750 	}
2751 
2752 	if (IS_ERR(dest_opp)) {
2753 		pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__,
2754 		       src_table, dst_table);
2755 	}
2756 
2757 	return dest_opp;
2758 }
2759 EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp);
2760 
2761 /**
2762  * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
2763  * @src_table: OPP table which has dst_table as one of its required OPP table.
2764  * @dst_table: Required OPP table of the src_table.
2765  * @pstate: Current performance state of the src_table.
2766  *
2767  * This Returns pstate of the OPP (present in @dst_table) pointed out by the
2768  * "required-opps" property of the OPP (present in @src_table) which has
2769  * performance state set to @pstate.
2770  *
2771  * Return: Zero or positive performance state on success, otherwise negative
2772  * value on errors.
2773  */
2774 int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
2775 				       struct opp_table *dst_table,
2776 				       unsigned int pstate)
2777 {
2778 	struct dev_pm_opp *opp;
2779 	int i;
2780 
2781 	/*
2782 	 * Normally the src_table will have the "required_opps" property set to
2783 	 * point to one of the OPPs in the dst_table, but in some cases the
2784 	 * genpd and its master have one to one mapping of performance states
2785 	 * and so none of them have the "required-opps" property set. Return the
2786 	 * pstate of the src_table as it is in such cases.
2787 	 */
2788 	if (!src_table || !src_table->required_opp_count)
2789 		return pstate;
2790 
2791 	/* Both OPP tables must belong to genpds */
2792 	if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) {
2793 		pr_err("%s: Performance state is only valid for genpds.\n", __func__);
2794 		return -EINVAL;
2795 	}
2796 
2797 	/* required-opps not fully initialized yet */
2798 	if (lazy_linking_pending(src_table))
2799 		return -EBUSY;
2800 
2801 	for (i = 0; i < src_table->required_opp_count; i++) {
2802 		if (src_table->required_opp_tables[i]->np == dst_table->np)
2803 			break;
2804 	}
2805 
2806 	if (unlikely(i == src_table->required_opp_count)) {
2807 		pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
2808 		       __func__, src_table, dst_table);
2809 		return -EINVAL;
2810 	}
2811 
2812 	guard(mutex)(&src_table->lock);
2813 
2814 	list_for_each_entry(opp, &src_table->opp_list, node) {
2815 		if (opp->level == pstate)
2816 			return opp->required_opps[i]->level;
2817 	}
2818 
2819 	pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
2820 	       dst_table);
2821 
2822 	return -EINVAL;
2823 }
2824 
2825 /**
2826  * dev_pm_opp_add_dynamic()  - Add an OPP table from a table definitions
2827  * @dev:	The device for which we do this operation
2828  * @data:	The OPP data for the OPP to add
2829  *
2830  * This function adds an opp definition to the opp table and returns status.
2831  * The opp is made available by default and it can be controlled using
2832  * dev_pm_opp_enable/disable functions.
2833  *
2834  * Return:
2835  * 0		On success OR
2836  *		Duplicate OPPs (both freq and volt are same) and opp->available
2837  * -EEXIST	Freq are same and volt are different OR
2838  *		Duplicate OPPs (both freq and volt are same) and !opp->available
2839  * -ENOMEM	Memory allocation failure
2840  */
2841 int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data)
2842 {
2843 	struct opp_table *opp_table;
2844 	int ret;
2845 
2846 	opp_table = _add_opp_table(dev, true);
2847 	if (IS_ERR(opp_table))
2848 		return PTR_ERR(opp_table);
2849 
2850 	/* Fix regulator count for dynamic OPPs */
2851 	opp_table->regulator_count = 1;
2852 
2853 	ret = _opp_add_v1(opp_table, dev, data, true);
2854 	if (ret)
2855 		dev_pm_opp_put_opp_table(opp_table);
2856 
2857 	return ret;
2858 }
2859 EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic);
2860 
2861 /**
2862  * _opp_set_availability() - helper to set the availability of an opp
2863  * @dev:		device for which we do this operation
2864  * @freq:		OPP frequency to modify availability
2865  * @availability_req:	availability status requested for this opp
2866  *
2867  * Set the availability of an OPP, opp_{enable,disable} share a common logic
2868  * which is isolated here.
2869  *
2870  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2871  * copy operation, returns 0 if no modification was done OR modification was
2872  * successful.
2873  */
2874 static int _opp_set_availability(struct device *dev, unsigned long freq,
2875 				 bool availability_req)
2876 {
2877 	struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp;
2878 
2879 	/* Find the opp_table */
2880 	struct opp_table *opp_table __free(put_opp_table) =
2881 		_find_opp_table(dev);
2882 
2883 	if (IS_ERR(opp_table)) {
2884 		dev_warn(dev, "%s: Device OPP not found (%ld)\n", __func__,
2885 			 PTR_ERR(opp_table));
2886 		return PTR_ERR(opp_table);
2887 	}
2888 
2889 	if (!assert_single_clk(opp_table, 0))
2890 		return -EINVAL;
2891 
2892 	scoped_guard(mutex, &opp_table->lock) {
2893 		/* Do we have the frequency? */
2894 		list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2895 			if (tmp_opp->rates[0] == freq) {
2896 				opp = dev_pm_opp_get(tmp_opp);
2897 
2898 				/* Is update really needed? */
2899 				if (opp->available == availability_req)
2900 					return 0;
2901 
2902 				opp->available = availability_req;
2903 				break;
2904 			}
2905 		}
2906 	}
2907 
2908 	if (IS_ERR(opp))
2909 		return PTR_ERR(opp);
2910 
2911 	/* Notify the change of the OPP availability */
2912 	if (availability_req)
2913 		blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
2914 					     opp);
2915 	else
2916 		blocking_notifier_call_chain(&opp_table->head,
2917 					     OPP_EVENT_DISABLE, opp);
2918 
2919 	return 0;
2920 }
2921 
2922 /**
2923  * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
2924  * @dev:		device for which we do this operation
2925  * @freq:		OPP frequency to adjust voltage of
2926  * @u_volt:		new OPP target voltage
2927  * @u_volt_min:		new OPP min voltage
2928  * @u_volt_max:		new OPP max voltage
2929  *
2930  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2931  * copy operation, returns 0 if no modifcation was done OR modification was
2932  * successful.
2933  */
2934 int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
2935 			      unsigned long u_volt, unsigned long u_volt_min,
2936 			      unsigned long u_volt_max)
2937 
2938 {
2939 	struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp;
2940 	int r;
2941 
2942 	/* Find the opp_table */
2943 	struct opp_table *opp_table __free(put_opp_table) =
2944 		_find_opp_table(dev);
2945 
2946 	if (IS_ERR(opp_table)) {
2947 		r = PTR_ERR(opp_table);
2948 		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
2949 		return r;
2950 	}
2951 
2952 	if (!assert_single_clk(opp_table, 0))
2953 		return -EINVAL;
2954 
2955 	scoped_guard(mutex, &opp_table->lock) {
2956 		/* Do we have the frequency? */
2957 		list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2958 			if (tmp_opp->rates[0] == freq) {
2959 				opp = dev_pm_opp_get(tmp_opp);
2960 
2961 				/* Is update really needed? */
2962 				if (opp->supplies->u_volt == u_volt)
2963 					return 0;
2964 
2965 				opp->supplies->u_volt = u_volt;
2966 				opp->supplies->u_volt_min = u_volt_min;
2967 				opp->supplies->u_volt_max = u_volt_max;
2968 
2969 				break;
2970 			}
2971 		}
2972 	}
2973 
2974 	if (IS_ERR(opp))
2975 		return PTR_ERR(opp);
2976 
2977 	/* Notify the voltage change of the OPP */
2978 	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
2979 				     opp);
2980 
2981 	return 0;
2982 }
2983 EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
2984 
2985 /**
2986  * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
2987  * @dev:	device for which we do this operation
2988  *
2989  * Sync voltage state of the OPP table regulators.
2990  *
2991  * Return: 0 on success or a negative error value.
2992  */
2993 int dev_pm_opp_sync_regulators(struct device *dev)
2994 {
2995 	struct regulator *reg;
2996 	int ret, i;
2997 
2998 	/* Device may not have OPP table */
2999 	struct opp_table *opp_table __free(put_opp_table) =
3000 		_find_opp_table(dev);
3001 
3002 	if (IS_ERR(opp_table))
3003 		return 0;
3004 
3005 	/* Regulator may not be required for the device */
3006 	if (unlikely(!opp_table->regulators))
3007 		return 0;
3008 
3009 	/* Nothing to sync if voltage wasn't changed */
3010 	if (!opp_table->enabled)
3011 		return 0;
3012 
3013 	for (i = 0; i < opp_table->regulator_count; i++) {
3014 		reg = opp_table->regulators[i];
3015 		ret = regulator_sync_voltage(reg);
3016 		if (ret)
3017 			return ret;
3018 	}
3019 
3020 	return 0;
3021 }
3022 EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
3023 
3024 /**
3025  * dev_pm_opp_enable() - Enable a specific OPP
3026  * @dev:	device for which we do this operation
3027  * @freq:	OPP frequency to enable
3028  *
3029  * Enables a provided opp. If the operation is valid, this returns 0, else the
3030  * corresponding error value. It is meant to be used for users an OPP available
3031  * after being temporarily made unavailable with dev_pm_opp_disable.
3032  *
3033  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
3034  * copy operation, returns 0 if no modification was done OR modification was
3035  * successful.
3036  */
3037 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
3038 {
3039 	return _opp_set_availability(dev, freq, true);
3040 }
3041 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
3042 
3043 /**
3044  * dev_pm_opp_disable() - Disable a specific OPP
3045  * @dev:	device for which we do this operation
3046  * @freq:	OPP frequency to disable
3047  *
3048  * Disables a provided opp. If the operation is valid, this returns
3049  * 0, else the corresponding error value. It is meant to be a temporary
3050  * control by users to make this OPP not available until the circumstances are
3051  * right to make it available again (with a call to dev_pm_opp_enable).
3052  *
3053  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
3054  * copy operation, returns 0 if no modification was done OR modification was
3055  * successful.
3056  */
3057 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
3058 {
3059 	return _opp_set_availability(dev, freq, false);
3060 }
3061 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
3062 
3063 /**
3064  * dev_pm_opp_register_notifier() - Register OPP notifier for the device
3065  * @dev:	Device for which notifier needs to be registered
3066  * @nb:		Notifier block to be registered
3067  *
3068  * Return: 0 on success or a negative error value.
3069  */
3070 int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
3071 {
3072 	struct opp_table *opp_table __free(put_opp_table) =
3073 		_find_opp_table(dev);
3074 
3075 	if (IS_ERR(opp_table))
3076 		return PTR_ERR(opp_table);
3077 
3078 	return blocking_notifier_chain_register(&opp_table->head, nb);
3079 }
3080 EXPORT_SYMBOL(dev_pm_opp_register_notifier);
3081 
3082 /**
3083  * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
3084  * @dev:	Device for which notifier needs to be unregistered
3085  * @nb:		Notifier block to be unregistered
3086  *
3087  * Return: 0 on success or a negative error value.
3088  */
3089 int dev_pm_opp_unregister_notifier(struct device *dev,
3090 				   struct notifier_block *nb)
3091 {
3092 	struct opp_table *opp_table __free(put_opp_table) =
3093 		_find_opp_table(dev);
3094 
3095 	if (IS_ERR(opp_table))
3096 		return PTR_ERR(opp_table);
3097 
3098 	return blocking_notifier_chain_unregister(&opp_table->head, nb);
3099 }
3100 EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
3101 
3102 /**
3103  * dev_pm_opp_remove_table() - Free all OPPs associated with the device
3104  * @dev:	device pointer used to lookup OPP table.
3105  *
3106  * Free both OPPs created using static entries present in DT and the
3107  * dynamically added entries.
3108  */
3109 void dev_pm_opp_remove_table(struct device *dev)
3110 {
3111 	/* Check for existing table for 'dev' */
3112 	struct opp_table *opp_table __free(put_opp_table) =
3113 		_find_opp_table(dev);
3114 
3115 	if (IS_ERR(opp_table)) {
3116 		int error = PTR_ERR(opp_table);
3117 
3118 		if (error != -ENODEV)
3119 			WARN(1, "%s: opp_table: %d\n",
3120 			     IS_ERR_OR_NULL(dev) ?
3121 					"Invalid device" : dev_name(dev),
3122 			     error);
3123 		return;
3124 	}
3125 
3126 	/*
3127 	 * Drop the extra reference only if the OPP table was successfully added
3128 	 * with dev_pm_opp_of_add_table() earlier.
3129 	 **/
3130 	if (_opp_remove_all_static(opp_table))
3131 		dev_pm_opp_put_opp_table(opp_table);
3132 }
3133 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
3134