xref: /linux/drivers/clk/qcom/clk-rcg2.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk.h>
12 #include <linux/clk-provider.h>
13 #include <linux/delay.h>
14 #include <linux/rational.h>
15 #include <linux/regmap.h>
16 #include <linux/math64.h>
17 #include <linux/gcd.h>
18 #include <linux/minmax.h>
19 #include <linux/slab.h>
20 
21 #include <asm/div64.h>
22 
23 #include "clk-rcg.h"
24 #include "common.h"
25 
26 #define CMD_REG			0x0
27 #define CMD_UPDATE		BIT(0)
28 #define CMD_ROOT_EN		BIT(1)
29 #define CMD_DIRTY_CFG		BIT(4)
30 #define CMD_DIRTY_N		BIT(5)
31 #define CMD_DIRTY_M		BIT(6)
32 #define CMD_DIRTY_D		BIT(7)
33 #define CMD_ROOT_OFF		BIT(31)
34 
35 #define CFG_REG			0x4
36 #define CFG_SRC_DIV_SHIFT	0
37 #define CFG_SRC_DIV_LENGTH	8
38 #define CFG_SRC_SEL_SHIFT	8
39 #define CFG_SRC_SEL_MASK	(0x7 << CFG_SRC_SEL_SHIFT)
40 #define CFG_MODE_SHIFT		12
41 #define CFG_MODE_MASK		(0x3 << CFG_MODE_SHIFT)
42 #define CFG_MODE_DUAL_EDGE	(0x2 << CFG_MODE_SHIFT)
43 #define CFG_HW_CLK_CTRL_MASK	BIT(20)
44 
45 #define M_REG			0x8
46 #define N_REG			0xc
47 #define D_REG			0x10
48 
49 #define RCG_CFG_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
50 #define RCG_M_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
51 #define RCG_N_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
52 #define RCG_D_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
53 
54 /* Dynamic Frequency Scaling */
55 #define MAX_PERF_LEVEL		8
56 #define SE_CMD_DFSR_OFFSET	0x14
57 #define SE_CMD_DFS_EN		BIT(0)
58 #define SE_PERF_DFSR(level)	(0x1c + 0x4 * (level))
59 #define SE_PERF_M_DFSR(level)	(0x5c + 0x4 * (level))
60 #define SE_PERF_N_DFSR(level)	(0x9c + 0x4 * (level))
61 
62 enum freq_policy {
63 	FLOOR,
64 	CEIL,
65 };
66 
67 static int clk_rcg2_is_enabled(struct clk_hw *hw)
68 {
69 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
70 	u32 cmd;
71 	int ret;
72 
73 	ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
74 	if (ret)
75 		return ret;
76 
77 	return (cmd & CMD_ROOT_OFF) == 0;
78 }
79 
80 static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg)
81 {
82 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
83 	int num_parents = clk_hw_get_num_parents(hw);
84 	int i;
85 
86 	cfg &= CFG_SRC_SEL_MASK;
87 	cfg >>= CFG_SRC_SEL_SHIFT;
88 
89 	for (i = 0; i < num_parents; i++)
90 		if (cfg == rcg->parent_map[i].cfg)
91 			return i;
92 
93 	pr_debug("%s: Clock %s has invalid parent, using default.\n",
94 		 __func__, clk_hw_get_name(hw));
95 	return 0;
96 }
97 
98 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
99 {
100 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
101 	u32 cfg;
102 	int ret;
103 
104 	ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
105 	if (ret) {
106 		pr_debug("%s: Unable to read CFG register for %s\n",
107 			 __func__, clk_hw_get_name(hw));
108 		return 0;
109 	}
110 
111 	return __clk_rcg2_get_parent(hw, cfg);
112 }
113 
114 static int update_config(struct clk_rcg2 *rcg)
115 {
116 	int count, ret;
117 	u32 cmd;
118 	struct clk_hw *hw = &rcg->clkr.hw;
119 	const char *name = clk_hw_get_name(hw);
120 
121 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
122 				 CMD_UPDATE, CMD_UPDATE);
123 	if (ret)
124 		return ret;
125 
126 	/* Wait for update to take effect */
127 	for (count = 500; count > 0; count--) {
128 		ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
129 		if (ret)
130 			return ret;
131 		if (!(cmd & CMD_UPDATE))
132 			return 0;
133 		udelay(1);
134 	}
135 
136 	WARN(1, "%s: rcg didn't update its configuration.", name);
137 	return -EBUSY;
138 }
139 
140 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
141 {
142 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
143 	int ret;
144 	u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
145 
146 	ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
147 				 CFG_SRC_SEL_MASK, cfg);
148 	if (ret)
149 		return ret;
150 
151 	return update_config(rcg);
152 }
153 
154 /**
155  * convert_to_reg_val() - Convert divisor values to hardware values.
156  *
157  * @f: Frequency table with pure m/n/pre_div parameters.
158  */
159 static void convert_to_reg_val(struct freq_tbl *f)
160 {
161 	f->pre_div *= 2;
162 	f->pre_div -= 1;
163 }
164 
165 /**
166  * calc_rate() - Calculate rate based on m/n:d values
167  *
168  * @rate: Parent rate.
169  * @m: Multiplier.
170  * @n: Divisor.
171  * @mode: Use zero to ignore m/n calculation.
172  * @hid_div: Pre divisor register value. Pre divisor value
173  *                  relates to hid_div as pre_div = (hid_div + 1) / 2.
174  *
175  * Return calculated rate according to formula:
176  *
177  *          parent_rate     m
178  *   rate = ----------- x  ---
179  *            pre_div       n
180  */
181 static unsigned long
182 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
183 {
184 	if (hid_div)
185 		rate = mult_frac(rate, 2, hid_div + 1);
186 
187 	if (mode)
188 		rate = mult_frac(rate, m, n);
189 
190 	return rate;
191 }
192 
193 static unsigned long
194 __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
195 {
196 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
197 	u32 hid_div, m = 0, n = 0, mode = 0, mask;
198 
199 	if (rcg->mnd_width) {
200 		mask = BIT(rcg->mnd_width) - 1;
201 		regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
202 		m &= mask;
203 		regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
204 		n = ~n;
205 		n &= mask;
206 		n += m;
207 		mode = cfg & CFG_MODE_MASK;
208 		mode >>= CFG_MODE_SHIFT;
209 	}
210 
211 	mask = BIT(rcg->hid_width) - 1;
212 	hid_div = cfg >> CFG_SRC_DIV_SHIFT;
213 	hid_div &= mask;
214 
215 	return calc_rate(parent_rate, m, n, mode, hid_div);
216 }
217 
218 static unsigned long
219 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
220 {
221 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
222 	u32 cfg;
223 
224 	regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
225 
226 	return __clk_rcg2_recalc_rate(hw, parent_rate, cfg);
227 }
228 
229 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
230 				    struct clk_rate_request *req,
231 				    enum freq_policy policy)
232 {
233 	unsigned long clk_flags, rate = req->rate;
234 	struct clk_hw *p;
235 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
236 	int index;
237 
238 	switch (policy) {
239 	case FLOOR:
240 		f = qcom_find_freq_floor(f, rate);
241 		break;
242 	case CEIL:
243 		f = qcom_find_freq(f, rate);
244 		break;
245 	default:
246 		return -EINVAL;
247 	}
248 
249 	if (!f)
250 		return -EINVAL;
251 
252 	index = qcom_find_src_index(hw, rcg->parent_map, f->src);
253 	if (index < 0)
254 		return index;
255 
256 	clk_flags = clk_hw_get_flags(hw);
257 	p = clk_hw_get_parent_by_index(hw, index);
258 	if (!p)
259 		return -EINVAL;
260 
261 	if (clk_flags & CLK_SET_RATE_PARENT) {
262 		rate = f->freq;
263 		if (f->pre_div) {
264 			if (!rate)
265 				rate = req->rate;
266 			rate /= 2;
267 			rate *= f->pre_div + 1;
268 		}
269 
270 		if (f->n) {
271 			u64 tmp = rate;
272 			tmp = tmp * f->n;
273 			do_div(tmp, f->m);
274 			rate = tmp;
275 		}
276 	} else {
277 		rate = clk_hw_get_rate(p);
278 	}
279 	req->best_parent_hw = p;
280 	req->best_parent_rate = rate;
281 	req->rate = f->freq;
282 
283 	return 0;
284 }
285 
286 static const struct freq_conf *
287 __clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
288 		       unsigned long req_rate)
289 {
290 	unsigned long rate_diff, best_rate_diff = ULONG_MAX;
291 	const struct freq_conf *conf, *best_conf = NULL;
292 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
293 	const char *name = clk_hw_get_name(hw);
294 	unsigned long parent_rate, rate;
295 	struct clk_hw *p;
296 	int index, i;
297 
298 	/* Exit early if only one config is defined */
299 	if (f->num_confs == 1) {
300 		best_conf = f->confs;
301 		goto exit;
302 	}
303 
304 	/* Search in each provided config the one that is near the wanted rate */
305 	for (i = 0, conf = f->confs; i < f->num_confs; i++, conf++) {
306 		index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
307 		if (index < 0)
308 			continue;
309 
310 		p = clk_hw_get_parent_by_index(hw, index);
311 		if (!p)
312 			continue;
313 
314 		parent_rate = clk_hw_get_rate(p);
315 		rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
316 
317 		if (rate == req_rate) {
318 			best_conf = conf;
319 			goto exit;
320 		}
321 
322 		rate_diff = abs_diff(req_rate, rate);
323 		if (rate_diff < best_rate_diff) {
324 			best_rate_diff = rate_diff;
325 			best_conf = conf;
326 		}
327 	}
328 
329 	/*
330 	 * Very unlikely. Warn if we couldn't find a correct config
331 	 * due to parent not found in every config.
332 	 */
333 	if (unlikely(!best_conf)) {
334 		WARN(1, "%s: can't find a configuration for rate %lu\n",
335 		     name, req_rate);
336 		return ERR_PTR(-EINVAL);
337 	}
338 
339 exit:
340 	return best_conf;
341 }
342 
343 static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_multi_tbl *f,
344 				       struct clk_rate_request *req)
345 {
346 	unsigned long clk_flags, rate = req->rate;
347 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
348 	const struct freq_conf *conf;
349 	struct clk_hw *p;
350 	int index;
351 
352 	f = qcom_find_freq_multi(f, rate);
353 	if (!f || !f->confs)
354 		return -EINVAL;
355 
356 	conf = __clk_rcg2_select_conf(hw, f, rate);
357 	if (IS_ERR(conf))
358 		return PTR_ERR(conf);
359 	index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
360 	if (index < 0)
361 		return index;
362 
363 	clk_flags = clk_hw_get_flags(hw);
364 	p = clk_hw_get_parent_by_index(hw, index);
365 	if (!p)
366 		return -EINVAL;
367 
368 	if (clk_flags & CLK_SET_RATE_PARENT) {
369 		rate = f->freq;
370 		if (conf->pre_div) {
371 			if (!rate)
372 				rate = req->rate;
373 			rate /= 2;
374 			rate *= conf->pre_div + 1;
375 		}
376 
377 		if (conf->n) {
378 			u64 tmp = rate;
379 
380 			tmp = tmp * conf->n;
381 			do_div(tmp, conf->m);
382 			rate = tmp;
383 		}
384 	} else {
385 		rate = clk_hw_get_rate(p);
386 	}
387 
388 	req->best_parent_hw = p;
389 	req->best_parent_rate = rate;
390 	req->rate = f->freq;
391 
392 	return 0;
393 }
394 
395 static int clk_rcg2_determine_rate(struct clk_hw *hw,
396 				   struct clk_rate_request *req)
397 {
398 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
399 
400 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
401 }
402 
403 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
404 					 struct clk_rate_request *req)
405 {
406 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
407 
408 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
409 }
410 
411 static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
412 				      struct clk_rate_request *req)
413 {
414 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
415 
416 	return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
417 }
418 
419 /**
420  * clk_rcg2_split_div() - Split multiplier that doesn't fit in n neither in pre_div.
421  *
422  * @multiplier: Multiplier to split between n and pre_div.
423  * @pre_div: Pointer to pre divisor value.
424  * @n: Pointer to n divisor value.
425  * @pre_div_max: Pre divisor maximum value.
426  */
427 static inline void clk_rcg2_split_div(int multiplier, unsigned int *pre_div,
428 				      u16 *n, unsigned int pre_div_max)
429 {
430 	*n = mult_frac(multiplier * *n, *pre_div, pre_div_max);
431 	*pre_div = pre_div_max;
432 }
433 
434 static void clk_rcg2_calc_mnd(u64 parent_rate, u64 rate, struct freq_tbl *f,
435 			unsigned int mnd_max, unsigned int pre_div_max)
436 {
437 	int i = 2;
438 	unsigned int pre_div = 1;
439 	unsigned long rates_gcd, scaled_parent_rate;
440 	u16 m, n = 1, n_candidate = 1, n_max;
441 
442 	rates_gcd = gcd(parent_rate, rate);
443 	m = div64_u64(rate, rates_gcd);
444 	scaled_parent_rate = div64_u64(parent_rate, rates_gcd);
445 	while (scaled_parent_rate > (mnd_max + m) * pre_div_max) {
446 		// we're exceeding divisor's range, trying lower scale.
447 		if (m > 1) {
448 			m--;
449 			scaled_parent_rate = mult_frac(scaled_parent_rate, m, (m + 1));
450 		} else {
451 			// cannot lower scale, just set max divisor values.
452 			f->n = mnd_max + m;
453 			f->pre_div = pre_div_max;
454 			f->m = m;
455 			return;
456 		}
457 	}
458 
459 	n_max = m + mnd_max;
460 
461 	while (scaled_parent_rate > 1) {
462 		while (scaled_parent_rate % i == 0) {
463 			n_candidate *= i;
464 			if (n_candidate < n_max)
465 				n = n_candidate;
466 			else if (pre_div * i < pre_div_max)
467 				pre_div *= i;
468 			else
469 				clk_rcg2_split_div(i, &pre_div, &n, pre_div_max);
470 
471 			scaled_parent_rate /= i;
472 		}
473 		i++;
474 	}
475 
476 	f->m = m;
477 	f->n = n;
478 	f->pre_div = pre_div > 1 ? pre_div : 0;
479 }
480 
481 static int clk_rcg2_determine_gp_rate(struct clk_hw *hw,
482 				   struct clk_rate_request *req)
483 {
484 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
485 	struct freq_tbl f_tbl = {}, *f = &f_tbl;
486 	int mnd_max = BIT(rcg->mnd_width) - 1;
487 	int hid_max = BIT(rcg->hid_width) - 1;
488 	struct clk_hw *parent;
489 	u64 parent_rate;
490 
491 	parent = clk_hw_get_parent(hw);
492 	parent_rate = clk_get_rate(parent->clk);
493 	if (!parent_rate)
494 		return -EINVAL;
495 
496 	clk_rcg2_calc_mnd(parent_rate, req->rate, f, mnd_max, hid_max / 2);
497 	convert_to_reg_val(f);
498 	req->rate = calc_rate(parent_rate, f->m, f->n, f->n, f->pre_div);
499 
500 	return 0;
501 }
502 
503 static int __clk_rcg2_configure_parent(struct clk_rcg2 *rcg, u8 src, u32 *_cfg)
504 {
505 	struct clk_hw *hw = &rcg->clkr.hw;
506 	int index = qcom_find_src_index(hw, rcg->parent_map, src);
507 
508 	if (index < 0)
509 		return index;
510 
511 	*_cfg &= ~CFG_SRC_SEL_MASK;
512 	*_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
513 
514 	return 0;
515 }
516 
517 static int __clk_rcg2_configure_mnd(struct clk_rcg2 *rcg, const struct freq_tbl *f,
518 				u32 *_cfg)
519 {
520 	u32 cfg, mask, d_val, not2d_val, n_minus_m;
521 	int ret;
522 
523 	if (rcg->mnd_width && f->n) {
524 		mask = BIT(rcg->mnd_width) - 1;
525 		ret = regmap_update_bits(rcg->clkr.regmap,
526 				RCG_M_OFFSET(rcg), mask, f->m);
527 		if (ret)
528 			return ret;
529 
530 		ret = regmap_update_bits(rcg->clkr.regmap,
531 				RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
532 		if (ret)
533 			return ret;
534 
535 		/* Calculate 2d value */
536 		d_val = f->n;
537 
538 		n_minus_m = f->n - f->m;
539 		n_minus_m *= 2;
540 
541 		d_val = clamp_t(u32, d_val, f->m, n_minus_m);
542 		not2d_val = ~d_val & mask;
543 
544 		ret = regmap_update_bits(rcg->clkr.regmap,
545 				RCG_D_OFFSET(rcg), mask, not2d_val);
546 		if (ret)
547 			return ret;
548 	}
549 
550 	mask = BIT(rcg->hid_width) - 1;
551 	mask |= CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
552 	cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
553 	if (rcg->mnd_width && f->n && (f->m != f->n))
554 		cfg |= CFG_MODE_DUAL_EDGE;
555 	if (rcg->hw_clk_ctrl)
556 		cfg |= CFG_HW_CLK_CTRL_MASK;
557 
558 	*_cfg &= ~mask;
559 	*_cfg |= cfg;
560 
561 	return 0;
562 }
563 
564 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
565 				u32 *_cfg)
566 {
567 	int ret;
568 
569 	ret = __clk_rcg2_configure_parent(rcg, f->src, _cfg);
570 	if (ret)
571 		return ret;
572 
573 	ret = __clk_rcg2_configure_mnd(rcg, f, _cfg);
574 	if (ret)
575 		return ret;
576 
577 	return 0;
578 }
579 
580 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
581 {
582 	u32 cfg;
583 	int ret;
584 
585 	ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
586 	if (ret)
587 		return ret;
588 
589 	ret = __clk_rcg2_configure(rcg, f, &cfg);
590 	if (ret)
591 		return ret;
592 
593 	ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
594 	if (ret)
595 		return ret;
596 
597 	return update_config(rcg);
598 }
599 
600 static int clk_rcg2_configure_gp(struct clk_rcg2 *rcg, const struct freq_tbl *f)
601 {
602 	u32 cfg;
603 	int ret;
604 
605 	ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
606 	if (ret)
607 		return ret;
608 
609 	ret = __clk_rcg2_configure_mnd(rcg, f, &cfg);
610 	if (ret)
611 		return ret;
612 
613 	ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
614 	if (ret)
615 		return ret;
616 
617 	return update_config(rcg);
618 }
619 
620 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
621 			       enum freq_policy policy)
622 {
623 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
624 	const struct freq_tbl *f;
625 
626 	switch (policy) {
627 	case FLOOR:
628 		f = qcom_find_freq_floor(rcg->freq_tbl, rate);
629 		break;
630 	case CEIL:
631 		f = qcom_find_freq(rcg->freq_tbl, rate);
632 		break;
633 	default:
634 		return -EINVAL;
635 	}
636 
637 	if (!f)
638 		return -EINVAL;
639 
640 	return clk_rcg2_configure(rcg, f);
641 }
642 
643 static int __clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate)
644 {
645 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
646 	const struct freq_multi_tbl *f;
647 	const struct freq_conf *conf;
648 	struct freq_tbl f_tbl = {};
649 
650 	f = qcom_find_freq_multi(rcg->freq_multi_tbl, rate);
651 	if (!f || !f->confs)
652 		return -EINVAL;
653 
654 	conf = __clk_rcg2_select_conf(hw, f, rate);
655 	if (IS_ERR(conf))
656 		return PTR_ERR(conf);
657 
658 	f_tbl.freq = f->freq;
659 	f_tbl.src = conf->src;
660 	f_tbl.pre_div = conf->pre_div;
661 	f_tbl.m = conf->m;
662 	f_tbl.n = conf->n;
663 
664 	return clk_rcg2_configure(rcg, &f_tbl);
665 }
666 
667 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
668 			    unsigned long parent_rate)
669 {
670 	return __clk_rcg2_set_rate(hw, rate, CEIL);
671 }
672 
673 static int clk_rcg2_set_gp_rate(struct clk_hw *hw, unsigned long rate,
674 			    unsigned long parent_rate)
675 {
676 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
677 	int mnd_max = BIT(rcg->mnd_width) - 1;
678 	int hid_max = BIT(rcg->hid_width) - 1;
679 	struct freq_tbl f_tbl = {}, *f = &f_tbl;
680 	int ret;
681 
682 	clk_rcg2_calc_mnd(parent_rate, rate, f, mnd_max, hid_max / 2);
683 	convert_to_reg_val(f);
684 	ret = clk_rcg2_configure_gp(rcg, f);
685 
686 	return ret;
687 }
688 
689 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
690 				   unsigned long parent_rate)
691 {
692 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
693 }
694 
695 static int clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate,
696 				unsigned long parent_rate)
697 {
698 	return __clk_rcg2_fm_set_rate(hw, rate);
699 }
700 
701 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
702 		unsigned long rate, unsigned long parent_rate, u8 index)
703 {
704 	return __clk_rcg2_set_rate(hw, rate, CEIL);
705 }
706 
707 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
708 		unsigned long rate, unsigned long parent_rate, u8 index)
709 {
710 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
711 }
712 
713 static int clk_rcg2_fm_set_rate_and_parent(struct clk_hw *hw,
714 		unsigned long rate, unsigned long parent_rate, u8 index)
715 {
716 	return __clk_rcg2_fm_set_rate(hw, rate);
717 }
718 
719 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
720 {
721 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
722 	u32 notn_m, n, m, d, not2d, mask;
723 
724 	if (!rcg->mnd_width) {
725 		/* 50 % duty-cycle for Non-MND RCGs */
726 		duty->num = 1;
727 		duty->den = 2;
728 		return 0;
729 	}
730 
731 	regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), &not2d);
732 	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
733 	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
734 
735 	if (!not2d && !m && !notn_m) {
736 		/* 50 % duty-cycle always */
737 		duty->num = 1;
738 		duty->den = 2;
739 		return 0;
740 	}
741 
742 	mask = BIT(rcg->mnd_width) - 1;
743 
744 	d = ~(not2d) & mask;
745 	d = DIV_ROUND_CLOSEST(d, 2);
746 
747 	n = (~(notn_m) + m) & mask;
748 
749 	duty->num = d;
750 	duty->den = n;
751 
752 	return 0;
753 }
754 
755 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
756 {
757 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
758 	u32 notn_m, n, m, d, not2d, mask, cfg;
759 	int ret;
760 
761 	/* Duty-cycle cannot be modified for non-MND RCGs */
762 	if (!rcg->mnd_width)
763 		return -EINVAL;
764 
765 	mask = BIT(rcg->mnd_width) - 1;
766 
767 	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
768 	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
769 	regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
770 
771 	/* Duty-cycle cannot be modified if MND divider is in bypass mode. */
772 	if (!(cfg & CFG_MODE_MASK))
773 		return -EINVAL;
774 
775 	n = (~(notn_m) + m) & mask;
776 
777 	/* Calculate 2d value */
778 	d = DIV_ROUND_CLOSEST(n * duty->num * 2, duty->den);
779 
780 	/*
781 	 * Check bit widths of 2d. If D is too big reduce duty cycle.
782 	 * Also make sure it is never zero.
783 	 */
784 	d = clamp_val(d, 1, mask);
785 
786 	if ((d / 2) > (n - m))
787 		d = (n - m) * 2;
788 	else if ((d / 2) < (m / 2))
789 		d = m;
790 
791 	not2d = ~d & mask;
792 
793 	ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
794 				 not2d);
795 	if (ret)
796 		return ret;
797 
798 	return update_config(rcg);
799 }
800 
801 const struct clk_ops clk_rcg2_ops = {
802 	.is_enabled = clk_rcg2_is_enabled,
803 	.get_parent = clk_rcg2_get_parent,
804 	.set_parent = clk_rcg2_set_parent,
805 	.recalc_rate = clk_rcg2_recalc_rate,
806 	.determine_rate = clk_rcg2_determine_rate,
807 	.set_rate = clk_rcg2_set_rate,
808 	.set_rate_and_parent = clk_rcg2_set_rate_and_parent,
809 	.get_duty_cycle = clk_rcg2_get_duty_cycle,
810 	.set_duty_cycle = clk_rcg2_set_duty_cycle,
811 };
812 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
813 
814 const struct clk_ops clk_rcg2_gp_ops = {
815 	.is_enabled = clk_rcg2_is_enabled,
816 	.get_parent = clk_rcg2_get_parent,
817 	.set_parent = clk_rcg2_set_parent,
818 	.recalc_rate = clk_rcg2_recalc_rate,
819 	.determine_rate = clk_rcg2_determine_gp_rate,
820 	.set_rate = clk_rcg2_set_gp_rate,
821 	.get_duty_cycle = clk_rcg2_get_duty_cycle,
822 	.set_duty_cycle = clk_rcg2_set_duty_cycle,
823 };
824 EXPORT_SYMBOL_GPL(clk_rcg2_gp_ops);
825 
826 const struct clk_ops clk_rcg2_floor_ops = {
827 	.is_enabled = clk_rcg2_is_enabled,
828 	.get_parent = clk_rcg2_get_parent,
829 	.set_parent = clk_rcg2_set_parent,
830 	.recalc_rate = clk_rcg2_recalc_rate,
831 	.determine_rate = clk_rcg2_determine_floor_rate,
832 	.set_rate = clk_rcg2_set_floor_rate,
833 	.set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
834 	.get_duty_cycle = clk_rcg2_get_duty_cycle,
835 	.set_duty_cycle = clk_rcg2_set_duty_cycle,
836 };
837 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
838 
839 const struct clk_ops clk_rcg2_fm_ops = {
840 	.is_enabled = clk_rcg2_is_enabled,
841 	.get_parent = clk_rcg2_get_parent,
842 	.set_parent = clk_rcg2_set_parent,
843 	.recalc_rate = clk_rcg2_recalc_rate,
844 	.determine_rate = clk_rcg2_fm_determine_rate,
845 	.set_rate = clk_rcg2_fm_set_rate,
846 	.set_rate_and_parent = clk_rcg2_fm_set_rate_and_parent,
847 	.get_duty_cycle = clk_rcg2_get_duty_cycle,
848 	.set_duty_cycle = clk_rcg2_set_duty_cycle,
849 };
850 EXPORT_SYMBOL_GPL(clk_rcg2_fm_ops);
851 
852 const struct clk_ops clk_rcg2_mux_closest_ops = {
853 	.determine_rate = __clk_mux_determine_rate_closest,
854 	.get_parent = clk_rcg2_get_parent,
855 	.set_parent = clk_rcg2_set_parent,
856 };
857 EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops);
858 
859 struct frac_entry {
860 	int num;
861 	int den;
862 };
863 
864 static const struct frac_entry frac_table_675m[] = {	/* link rate of 270M */
865 	{ 52, 295 },	/* 119 M */
866 	{ 11, 57 },	/* 130.25 M */
867 	{ 63, 307 },	/* 138.50 M */
868 	{ 11, 50 },	/* 148.50 M */
869 	{ 47, 206 },	/* 154 M */
870 	{ 31, 100 },	/* 205.25 M */
871 	{ 107, 269 },	/* 268.50 M */
872 	{ },
873 };
874 
875 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
876 	{ 31, 211 },	/* 119 M */
877 	{ 32, 199 },	/* 130.25 M */
878 	{ 63, 307 },	/* 138.50 M */
879 	{ 11, 60 },	/* 148.50 M */
880 	{ 50, 263 },	/* 154 M */
881 	{ 31, 120 },	/* 205.25 M */
882 	{ 119, 359 },	/* 268.50 M */
883 	{ },
884 };
885 
886 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
887 			      unsigned long parent_rate)
888 {
889 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
890 	struct freq_tbl f = *rcg->freq_tbl;
891 	const struct frac_entry *frac;
892 	int delta = 100000;
893 	s64 src_rate = parent_rate;
894 	s64 request;
895 	u32 mask = BIT(rcg->hid_width) - 1;
896 	u32 hid_div;
897 
898 	if (src_rate == 810000000)
899 		frac = frac_table_810m;
900 	else
901 		frac = frac_table_675m;
902 
903 	for (; frac->num; frac++) {
904 		request = rate;
905 		request *= frac->den;
906 		request = div_s64(request, frac->num);
907 		if ((src_rate < (request - delta)) ||
908 		    (src_rate > (request + delta)))
909 			continue;
910 
911 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
912 				&hid_div);
913 		f.pre_div = hid_div;
914 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
915 		f.pre_div &= mask;
916 		f.m = frac->num;
917 		f.n = frac->den;
918 
919 		return clk_rcg2_configure(rcg, &f);
920 	}
921 
922 	return -EINVAL;
923 }
924 
925 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
926 		unsigned long rate, unsigned long parent_rate, u8 index)
927 {
928 	/* Parent index is set statically in frequency table */
929 	return clk_edp_pixel_set_rate(hw, rate, parent_rate);
930 }
931 
932 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
933 					struct clk_rate_request *req)
934 {
935 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
936 	const struct freq_tbl *f = rcg->freq_tbl;
937 	const struct frac_entry *frac;
938 	int delta = 100000;
939 	s64 request;
940 	u32 mask = BIT(rcg->hid_width) - 1;
941 	u32 hid_div;
942 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
943 
944 	/* Force the correct parent */
945 	req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
946 	req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
947 
948 	if (req->best_parent_rate == 810000000)
949 		frac = frac_table_810m;
950 	else
951 		frac = frac_table_675m;
952 
953 	for (; frac->num; frac++) {
954 		request = req->rate;
955 		request *= frac->den;
956 		request = div_s64(request, frac->num);
957 		if ((req->best_parent_rate < (request - delta)) ||
958 		    (req->best_parent_rate > (request + delta)))
959 			continue;
960 
961 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
962 				&hid_div);
963 		hid_div >>= CFG_SRC_DIV_SHIFT;
964 		hid_div &= mask;
965 
966 		req->rate = calc_rate(req->best_parent_rate,
967 				      frac->num, frac->den,
968 				      !!frac->den, hid_div);
969 		return 0;
970 	}
971 
972 	return -EINVAL;
973 }
974 
975 const struct clk_ops clk_edp_pixel_ops = {
976 	.is_enabled = clk_rcg2_is_enabled,
977 	.get_parent = clk_rcg2_get_parent,
978 	.set_parent = clk_rcg2_set_parent,
979 	.recalc_rate = clk_rcg2_recalc_rate,
980 	.set_rate = clk_edp_pixel_set_rate,
981 	.set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
982 	.determine_rate = clk_edp_pixel_determine_rate,
983 };
984 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
985 
986 static int clk_byte_determine_rate(struct clk_hw *hw,
987 				   struct clk_rate_request *req)
988 {
989 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
990 	const struct freq_tbl *f = rcg->freq_tbl;
991 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
992 	unsigned long parent_rate, div;
993 	u32 mask = BIT(rcg->hid_width) - 1;
994 	struct clk_hw *p;
995 
996 	if (req->rate == 0)
997 		return -EINVAL;
998 
999 	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
1000 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
1001 
1002 	div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
1003 	div = min_t(u32, div, mask);
1004 
1005 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
1006 
1007 	return 0;
1008 }
1009 
1010 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
1011 			 unsigned long parent_rate)
1012 {
1013 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1014 	struct freq_tbl f = *rcg->freq_tbl;
1015 	unsigned long div;
1016 	u32 mask = BIT(rcg->hid_width) - 1;
1017 
1018 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
1019 	div = min_t(u32, div, mask);
1020 
1021 	f.pre_div = div;
1022 
1023 	return clk_rcg2_configure(rcg, &f);
1024 }
1025 
1026 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
1027 		unsigned long rate, unsigned long parent_rate, u8 index)
1028 {
1029 	/* Parent index is set statically in frequency table */
1030 	return clk_byte_set_rate(hw, rate, parent_rate);
1031 }
1032 
1033 const struct clk_ops clk_byte_ops = {
1034 	.is_enabled = clk_rcg2_is_enabled,
1035 	.get_parent = clk_rcg2_get_parent,
1036 	.set_parent = clk_rcg2_set_parent,
1037 	.recalc_rate = clk_rcg2_recalc_rate,
1038 	.set_rate = clk_byte_set_rate,
1039 	.set_rate_and_parent = clk_byte_set_rate_and_parent,
1040 	.determine_rate = clk_byte_determine_rate,
1041 };
1042 EXPORT_SYMBOL_GPL(clk_byte_ops);
1043 
1044 static int clk_byte2_determine_rate(struct clk_hw *hw,
1045 				    struct clk_rate_request *req)
1046 {
1047 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1048 	unsigned long parent_rate, div;
1049 	u32 mask = BIT(rcg->hid_width) - 1;
1050 	struct clk_hw *p;
1051 	unsigned long rate = req->rate;
1052 
1053 	if (rate == 0)
1054 		return -EINVAL;
1055 
1056 	p = req->best_parent_hw;
1057 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
1058 
1059 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
1060 	div = min_t(u32, div, mask);
1061 
1062 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
1063 
1064 	return 0;
1065 }
1066 
1067 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
1068 			 unsigned long parent_rate)
1069 {
1070 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1071 	struct freq_tbl f = { 0 };
1072 	unsigned long div;
1073 	int i, num_parents = clk_hw_get_num_parents(hw);
1074 	u32 mask = BIT(rcg->hid_width) - 1;
1075 	u32 cfg;
1076 
1077 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
1078 	div = min_t(u32, div, mask);
1079 
1080 	f.pre_div = div;
1081 
1082 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1083 	cfg &= CFG_SRC_SEL_MASK;
1084 	cfg >>= CFG_SRC_SEL_SHIFT;
1085 
1086 	for (i = 0; i < num_parents; i++) {
1087 		if (cfg == rcg->parent_map[i].cfg) {
1088 			f.src = rcg->parent_map[i].src;
1089 			return clk_rcg2_configure(rcg, &f);
1090 		}
1091 	}
1092 
1093 	return -EINVAL;
1094 }
1095 
1096 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
1097 		unsigned long rate, unsigned long parent_rate, u8 index)
1098 {
1099 	/* Read the hardware to determine parent during set_rate */
1100 	return clk_byte2_set_rate(hw, rate, parent_rate);
1101 }
1102 
1103 const struct clk_ops clk_byte2_ops = {
1104 	.is_enabled = clk_rcg2_is_enabled,
1105 	.get_parent = clk_rcg2_get_parent,
1106 	.set_parent = clk_rcg2_set_parent,
1107 	.recalc_rate = clk_rcg2_recalc_rate,
1108 	.set_rate = clk_byte2_set_rate,
1109 	.set_rate_and_parent = clk_byte2_set_rate_and_parent,
1110 	.determine_rate = clk_byte2_determine_rate,
1111 };
1112 EXPORT_SYMBOL_GPL(clk_byte2_ops);
1113 
1114 static const struct frac_entry frac_table_pixel[] = {
1115 	{ 3, 8 },
1116 	{ 2, 9 },
1117 	{ 4, 9 },
1118 	{ 1, 1 },
1119 	{ 2, 3 },
1120 	{ }
1121 };
1122 
1123 static int clk_pixel_determine_rate(struct clk_hw *hw,
1124 				    struct clk_rate_request *req)
1125 {
1126 	unsigned long request, src_rate;
1127 	int delta = 100000;
1128 	const struct frac_entry *frac = frac_table_pixel;
1129 
1130 	for (; frac->num; frac++) {
1131 		request = (req->rate * frac->den) / frac->num;
1132 
1133 		src_rate = clk_hw_round_rate(req->best_parent_hw, request);
1134 		if ((src_rate < (request - delta)) ||
1135 			(src_rate > (request + delta)))
1136 			continue;
1137 
1138 		req->best_parent_rate = src_rate;
1139 		req->rate = (src_rate * frac->num) / frac->den;
1140 		return 0;
1141 	}
1142 
1143 	return -EINVAL;
1144 }
1145 
1146 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
1147 		unsigned long parent_rate)
1148 {
1149 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1150 	struct freq_tbl f = { 0 };
1151 	const struct frac_entry *frac = frac_table_pixel;
1152 	unsigned long request;
1153 	int delta = 100000;
1154 	u32 mask = BIT(rcg->hid_width) - 1;
1155 	u32 hid_div, cfg;
1156 	int i, num_parents = clk_hw_get_num_parents(hw);
1157 
1158 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1159 	cfg &= CFG_SRC_SEL_MASK;
1160 	cfg >>= CFG_SRC_SEL_SHIFT;
1161 
1162 	for (i = 0; i < num_parents; i++)
1163 		if (cfg == rcg->parent_map[i].cfg) {
1164 			f.src = rcg->parent_map[i].src;
1165 			break;
1166 		}
1167 
1168 	for (; frac->num; frac++) {
1169 		request = (rate * frac->den) / frac->num;
1170 
1171 		if ((parent_rate < (request - delta)) ||
1172 			(parent_rate > (request + delta)))
1173 			continue;
1174 
1175 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
1176 				&hid_div);
1177 		f.pre_div = hid_div;
1178 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
1179 		f.pre_div &= mask;
1180 		f.m = frac->num;
1181 		f.n = frac->den;
1182 
1183 		return clk_rcg2_configure(rcg, &f);
1184 	}
1185 	return -EINVAL;
1186 }
1187 
1188 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
1189 		unsigned long parent_rate, u8 index)
1190 {
1191 	return clk_pixel_set_rate(hw, rate, parent_rate);
1192 }
1193 
1194 const struct clk_ops clk_pixel_ops = {
1195 	.is_enabled = clk_rcg2_is_enabled,
1196 	.get_parent = clk_rcg2_get_parent,
1197 	.set_parent = clk_rcg2_set_parent,
1198 	.recalc_rate = clk_rcg2_recalc_rate,
1199 	.set_rate = clk_pixel_set_rate,
1200 	.set_rate_and_parent = clk_pixel_set_rate_and_parent,
1201 	.determine_rate = clk_pixel_determine_rate,
1202 };
1203 EXPORT_SYMBOL_GPL(clk_pixel_ops);
1204 
1205 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
1206 				    struct clk_rate_request *req)
1207 {
1208 	struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX };
1209 	struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
1210 	struct clk_hw *xo, *p0, *p1, *p2;
1211 	unsigned long p0_rate;
1212 	u8 mux_div = cgfx->div;
1213 	int ret;
1214 
1215 	p0 = cgfx->hws[0];
1216 	p1 = cgfx->hws[1];
1217 	p2 = cgfx->hws[2];
1218 	/*
1219 	 * This function does ping-pong the RCG between PLLs: if we don't
1220 	 * have at least one fixed PLL and two variable ones,
1221 	 * then it's not going to work correctly.
1222 	 */
1223 	if (WARN_ON(!p0 || !p1 || !p2))
1224 		return -EINVAL;
1225 
1226 	xo = clk_hw_get_parent_by_index(hw, 0);
1227 	if (req->rate == clk_hw_get_rate(xo)) {
1228 		req->best_parent_hw = xo;
1229 		return 0;
1230 	}
1231 
1232 	if (mux_div == 0)
1233 		mux_div = 1;
1234 
1235 	parent_req.rate = req->rate * mux_div;
1236 
1237 	/* This has to be a fixed rate PLL */
1238 	p0_rate = clk_hw_get_rate(p0);
1239 
1240 	if (parent_req.rate == p0_rate) {
1241 		req->rate = req->best_parent_rate = p0_rate;
1242 		req->best_parent_hw = p0;
1243 		return 0;
1244 	}
1245 
1246 	if (req->best_parent_hw == p0) {
1247 		/* Are we going back to a previously used rate? */
1248 		if (clk_hw_get_rate(p2) == parent_req.rate)
1249 			req->best_parent_hw = p2;
1250 		else
1251 			req->best_parent_hw = p1;
1252 	} else if (req->best_parent_hw == p2) {
1253 		req->best_parent_hw = p1;
1254 	} else {
1255 		req->best_parent_hw = p2;
1256 	}
1257 
1258 	clk_hw_get_rate_range(req->best_parent_hw,
1259 			      &parent_req.min_rate, &parent_req.max_rate);
1260 
1261 	if (req->min_rate > parent_req.min_rate)
1262 		parent_req.min_rate = req->min_rate;
1263 
1264 	if (req->max_rate < parent_req.max_rate)
1265 		parent_req.max_rate = req->max_rate;
1266 
1267 	parent_req.best_parent_hw = req->best_parent_hw;
1268 	ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
1269 	if (ret)
1270 		return ret;
1271 
1272 	req->rate = req->best_parent_rate = parent_req.rate;
1273 	req->rate /= mux_div;
1274 
1275 	return 0;
1276 }
1277 
1278 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
1279 		unsigned long parent_rate, u8 index)
1280 {
1281 	struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
1282 	struct clk_rcg2 *rcg = &cgfx->rcg;
1283 	u32 cfg;
1284 	int ret;
1285 
1286 	cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
1287 	/* On some targets, the GFX3D RCG may need to divide PLL frequency */
1288 	if (cgfx->div > 1)
1289 		cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
1290 
1291 	ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
1292 	if (ret)
1293 		return ret;
1294 
1295 	return update_config(rcg);
1296 }
1297 
1298 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
1299 			      unsigned long parent_rate)
1300 {
1301 	/*
1302 	 * We should never get here; clk_gfx3d_determine_rate() should always
1303 	 * make us use a different parent than what we're currently using, so
1304 	 * clk_gfx3d_set_rate_and_parent() should always be called.
1305 	 */
1306 	return 0;
1307 }
1308 
1309 const struct clk_ops clk_gfx3d_ops = {
1310 	.is_enabled = clk_rcg2_is_enabled,
1311 	.get_parent = clk_rcg2_get_parent,
1312 	.set_parent = clk_rcg2_set_parent,
1313 	.recalc_rate = clk_rcg2_recalc_rate,
1314 	.set_rate = clk_gfx3d_set_rate,
1315 	.set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
1316 	.determine_rate = clk_gfx3d_determine_rate,
1317 };
1318 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
1319 
1320 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
1321 {
1322 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1323 	const char *name = clk_hw_get_name(hw);
1324 	int ret, count;
1325 
1326 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
1327 				 CMD_ROOT_EN, CMD_ROOT_EN);
1328 	if (ret)
1329 		return ret;
1330 
1331 	/* wait for RCG to turn ON */
1332 	for (count = 500; count > 0; count--) {
1333 		if (clk_rcg2_is_enabled(hw))
1334 			return 0;
1335 
1336 		udelay(1);
1337 	}
1338 
1339 	pr_err("%s: RCG did not turn on\n", name);
1340 	return -ETIMEDOUT;
1341 }
1342 
1343 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
1344 {
1345 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1346 
1347 	return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
1348 					CMD_ROOT_EN, 0);
1349 }
1350 
1351 static int
1352 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
1353 {
1354 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1355 	int ret;
1356 
1357 	ret = clk_rcg2_set_force_enable(hw);
1358 	if (ret)
1359 		return ret;
1360 
1361 	ret = clk_rcg2_configure(rcg, f);
1362 	if (ret)
1363 		return ret;
1364 
1365 	return clk_rcg2_clear_force_enable(hw);
1366 }
1367 
1368 static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
1369 				      unsigned long parent_rate,
1370 				      enum freq_policy policy)
1371 {
1372 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1373 	const struct freq_tbl *f;
1374 
1375 	switch (policy) {
1376 	case FLOOR:
1377 		f = qcom_find_freq_floor(rcg->freq_tbl, rate);
1378 		break;
1379 	case CEIL:
1380 		f = qcom_find_freq(rcg->freq_tbl, rate);
1381 		break;
1382 	default:
1383 		return -EINVAL;
1384 	}
1385 
1386 	/*
1387 	 * In case clock is disabled, update the M, N and D registers, cache
1388 	 * the CFG value in parked_cfg and don't hit the update bit of CMD
1389 	 * register.
1390 	 */
1391 	if (!clk_hw_is_enabled(hw))
1392 		return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg);
1393 
1394 	return clk_rcg2_shared_force_enable_clear(hw, f);
1395 }
1396 
1397 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
1398 				    unsigned long parent_rate)
1399 {
1400 	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
1401 }
1402 
1403 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
1404 		unsigned long rate, unsigned long parent_rate, u8 index)
1405 {
1406 	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
1407 }
1408 
1409 static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate,
1410 					  unsigned long parent_rate)
1411 {
1412 	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
1413 }
1414 
1415 static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw,
1416 		unsigned long rate, unsigned long parent_rate, u8 index)
1417 {
1418 	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
1419 }
1420 
1421 static int clk_rcg2_shared_enable(struct clk_hw *hw)
1422 {
1423 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1424 	int ret;
1425 
1426 	/*
1427 	 * Set the update bit because required configuration has already
1428 	 * been written in clk_rcg2_shared_set_rate()
1429 	 */
1430 	ret = clk_rcg2_set_force_enable(hw);
1431 	if (ret)
1432 		return ret;
1433 
1434 	/* Write back the stored configuration corresponding to current rate */
1435 	ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg);
1436 	if (ret)
1437 		return ret;
1438 
1439 	ret = update_config(rcg);
1440 	if (ret)
1441 		return ret;
1442 
1443 	return clk_rcg2_clear_force_enable(hw);
1444 }
1445 
1446 static void clk_rcg2_shared_disable(struct clk_hw *hw)
1447 {
1448 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1449 
1450 	/*
1451 	 * Store current configuration as switching to safe source would clear
1452 	 * the SRC and DIV of CFG register
1453 	 */
1454 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
1455 
1456 	/*
1457 	 * Park the RCG at a safe configuration - sourced off of safe source.
1458 	 * Force enable and disable the RCG while configuring it to safeguard
1459 	 * against any update signal coming from the downstream clock.
1460 	 * The current parent is still prepared and enabled at this point, and
1461 	 * the safe source is always on while application processor subsystem
1462 	 * is online. Therefore, the RCG can safely switch its parent.
1463 	 */
1464 	clk_rcg2_set_force_enable(hw);
1465 
1466 	regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
1467 		     rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
1468 
1469 	update_config(rcg);
1470 
1471 	clk_rcg2_clear_force_enable(hw);
1472 }
1473 
1474 static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw)
1475 {
1476 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1477 
1478 	/* If the shared rcg is parked use the cached cfg instead */
1479 	if (!clk_hw_is_enabled(hw))
1480 		return __clk_rcg2_get_parent(hw, rcg->parked_cfg);
1481 
1482 	return clk_rcg2_get_parent(hw);
1483 }
1484 
1485 static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index)
1486 {
1487 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1488 
1489 	/* If the shared rcg is parked only update the cached cfg */
1490 	if (!clk_hw_is_enabled(hw)) {
1491 		rcg->parked_cfg &= ~CFG_SRC_SEL_MASK;
1492 		rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
1493 
1494 		return 0;
1495 	}
1496 
1497 	return clk_rcg2_set_parent(hw, index);
1498 }
1499 
1500 static unsigned long
1501 clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1502 {
1503 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1504 
1505 	/* If the shared rcg is parked use the cached cfg instead */
1506 	if (!clk_hw_is_enabled(hw))
1507 		return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg);
1508 
1509 	return clk_rcg2_recalc_rate(hw, parent_rate);
1510 }
1511 
1512 static int clk_rcg2_shared_init(struct clk_hw *hw)
1513 {
1514 	/*
1515 	 * This does a few things:
1516 	 *
1517 	 *  1. Sets rcg->parked_cfg to reflect the value at probe so that the
1518 	 *     proper parent is reported from clk_rcg2_shared_get_parent().
1519 	 *
1520 	 *  2. Clears the force enable bit of the RCG because we rely on child
1521 	 *     clks (branches) to turn the RCG on/off with a hardware feedback
1522 	 *     mechanism and only set the force enable bit in the RCG when we
1523 	 *     want to make sure the clk stays on for parent switches or
1524 	 *     parking.
1525 	 *
1526 	 *  3. Parks shared RCGs on the safe source at registration because we
1527 	 *     can't be certain that the parent clk will stay on during boot,
1528 	 *     especially if the parent is shared. If this RCG is enabled at
1529 	 *     boot, and the parent is turned off, the RCG will get stuck on. A
1530 	 *     GDSC can wedge if is turned on and the RCG is stuck on because
1531 	 *     the GDSC's controller will hang waiting for the clk status to
1532 	 *     toggle on when it never does.
1533 	 *
1534 	 * The safest option here is to "park" the RCG at init so that the clk
1535 	 * can never get stuck on or off. This ensures the GDSC can't get
1536 	 * wedged.
1537 	 */
1538 	clk_rcg2_shared_disable(hw);
1539 
1540 	return 0;
1541 }
1542 
1543 const struct clk_ops clk_rcg2_shared_ops = {
1544 	.init = clk_rcg2_shared_init,
1545 	.enable = clk_rcg2_shared_enable,
1546 	.disable = clk_rcg2_shared_disable,
1547 	.get_parent = clk_rcg2_shared_get_parent,
1548 	.set_parent = clk_rcg2_shared_set_parent,
1549 	.recalc_rate = clk_rcg2_shared_recalc_rate,
1550 	.determine_rate = clk_rcg2_determine_rate,
1551 	.set_rate = clk_rcg2_shared_set_rate,
1552 	.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
1553 };
1554 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
1555 
1556 const struct clk_ops clk_rcg2_shared_floor_ops = {
1557 	.enable = clk_rcg2_shared_enable,
1558 	.disable = clk_rcg2_shared_disable,
1559 	.get_parent = clk_rcg2_shared_get_parent,
1560 	.set_parent = clk_rcg2_shared_set_parent,
1561 	.recalc_rate = clk_rcg2_shared_recalc_rate,
1562 	.determine_rate = clk_rcg2_determine_floor_rate,
1563 	.set_rate = clk_rcg2_shared_set_floor_rate,
1564 	.set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent,
1565 };
1566 EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops);
1567 
1568 static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
1569 {
1570 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1571 
1572 	/*
1573 	 * Read the config register so that the parent is properly mapped at
1574 	 * registration time.
1575 	 */
1576 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
1577 
1578 	return 0;
1579 }
1580 
1581 /*
1582  * Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left
1583  * unchanged at registration time.
1584  */
1585 const struct clk_ops clk_rcg2_shared_no_init_park_ops = {
1586 	.init = clk_rcg2_shared_no_init_park,
1587 	.enable = clk_rcg2_shared_enable,
1588 	.disable = clk_rcg2_shared_disable,
1589 	.get_parent = clk_rcg2_shared_get_parent,
1590 	.set_parent = clk_rcg2_shared_set_parent,
1591 	.recalc_rate = clk_rcg2_shared_recalc_rate,
1592 	.determine_rate = clk_rcg2_determine_rate,
1593 	.set_rate = clk_rcg2_shared_set_rate,
1594 	.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
1595 };
1596 EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops);
1597 
1598 /* Common APIs to be used for DFS based RCGR */
1599 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
1600 				       struct freq_tbl *f)
1601 {
1602 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1603 	struct clk_hw *p;
1604 	unsigned long prate = 0;
1605 	u32 val, mask, cfg, mode, src;
1606 	int i, num_parents;
1607 
1608 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
1609 
1610 	mask = BIT(rcg->hid_width) - 1;
1611 	f->pre_div = 1;
1612 	if (cfg & mask)
1613 		f->pre_div = cfg & mask;
1614 
1615 	src = cfg & CFG_SRC_SEL_MASK;
1616 	src >>= CFG_SRC_SEL_SHIFT;
1617 
1618 	num_parents = clk_hw_get_num_parents(hw);
1619 	for (i = 0; i < num_parents; i++) {
1620 		if (src == rcg->parent_map[i].cfg) {
1621 			f->src = rcg->parent_map[i].src;
1622 			p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
1623 			prate = clk_hw_get_rate(p);
1624 		}
1625 	}
1626 
1627 	mode = cfg & CFG_MODE_MASK;
1628 	mode >>= CFG_MODE_SHIFT;
1629 	if (mode) {
1630 		mask = BIT(rcg->mnd_width) - 1;
1631 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
1632 			    &val);
1633 		val &= mask;
1634 		f->m = val;
1635 
1636 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
1637 			    &val);
1638 		val = ~val;
1639 		val &= mask;
1640 		val += f->m;
1641 		f->n = val;
1642 	}
1643 
1644 	f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
1645 }
1646 
1647 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
1648 {
1649 	struct freq_tbl *freq_tbl;
1650 	int i;
1651 
1652 	/* Allocate space for 1 extra since table is NULL terminated */
1653 	freq_tbl = kzalloc_objs(*freq_tbl, MAX_PERF_LEVEL + 1);
1654 	if (!freq_tbl)
1655 		return -ENOMEM;
1656 	rcg->freq_tbl = freq_tbl;
1657 
1658 	for (i = 0; i < MAX_PERF_LEVEL; i++)
1659 		clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1660 
1661 	return 0;
1662 }
1663 
1664 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1665 				   struct clk_rate_request *req)
1666 {
1667 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1668 	int ret;
1669 
1670 	if (!rcg->freq_tbl) {
1671 		ret = clk_rcg2_dfs_populate_freq_table(rcg);
1672 		if (ret) {
1673 			pr_err("Failed to update DFS tables for %s\n",
1674 					clk_hw_get_name(hw));
1675 			return ret;
1676 		}
1677 	}
1678 
1679 	return clk_rcg2_determine_rate(hw, req);
1680 }
1681 
1682 static unsigned long
1683 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1684 {
1685 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1686 	u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1687 
1688 	regmap_read(rcg->clkr.regmap,
1689 		    rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1690 	level &= GENMASK(4, 1);
1691 	level >>= 1;
1692 
1693 	if (rcg->freq_tbl)
1694 		return rcg->freq_tbl[level].freq;
1695 
1696 	/*
1697 	 * Assume that parent_rate is actually the parent because
1698 	 * we can't do any better at figuring it out when the table
1699 	 * hasn't been populated yet. We only populate the table
1700 	 * in determine_rate because we can't guarantee the parents
1701 	 * will be registered with the framework until then.
1702 	 */
1703 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1704 		    &cfg);
1705 
1706 	mask = BIT(rcg->hid_width) - 1;
1707 	pre_div = 1;
1708 	if (cfg & mask)
1709 		pre_div = cfg & mask;
1710 
1711 	mode = cfg & CFG_MODE_MASK;
1712 	mode >>= CFG_MODE_SHIFT;
1713 	if (mode) {
1714 		mask = BIT(rcg->mnd_width) - 1;
1715 		regmap_read(rcg->clkr.regmap,
1716 			    rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1717 		m &= mask;
1718 
1719 		regmap_read(rcg->clkr.regmap,
1720 			    rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1721 		n = ~n;
1722 		n &= mask;
1723 		n += m;
1724 	}
1725 
1726 	return calc_rate(parent_rate, m, n, mode, pre_div);
1727 }
1728 
1729 static const struct clk_ops clk_rcg2_dfs_ops = {
1730 	.is_enabled = clk_rcg2_is_enabled,
1731 	.get_parent = clk_rcg2_get_parent,
1732 	.determine_rate = clk_rcg2_dfs_determine_rate,
1733 	.recalc_rate = clk_rcg2_dfs_recalc_rate,
1734 };
1735 
1736 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1737 			       struct regmap *regmap)
1738 {
1739 	struct clk_rcg2 *rcg = data->rcg;
1740 	struct clk_init_data *init = data->init;
1741 	u32 val;
1742 	int ret;
1743 
1744 	ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1745 	if (ret)
1746 		return -EINVAL;
1747 
1748 	if (!(val & SE_CMD_DFS_EN))
1749 		return 0;
1750 
1751 	/*
1752 	 * Rate changes with consumer writing a register in
1753 	 * their own I/O region
1754 	 */
1755 	init->flags |= CLK_GET_RATE_NOCACHE;
1756 	init->ops = &clk_rcg2_dfs_ops;
1757 
1758 	rcg->freq_tbl = NULL;
1759 
1760 	return 0;
1761 }
1762 
1763 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1764 			     const struct clk_rcg_dfs_data *rcgs, size_t len)
1765 {
1766 	int i, ret;
1767 
1768 	for (i = 0; i < len; i++) {
1769 		ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1770 		if (ret)
1771 			return ret;
1772 	}
1773 
1774 	return 0;
1775 }
1776 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1777 
1778 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
1779 			unsigned long parent_rate)
1780 {
1781 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1782 	struct freq_tbl f = { 0 };
1783 	u32 mask = BIT(rcg->hid_width) - 1;
1784 	u32 hid_div, cfg;
1785 	int i, num_parents = clk_hw_get_num_parents(hw);
1786 	unsigned long num, den;
1787 
1788 	rational_best_approximation(parent_rate, rate,
1789 			GENMASK(rcg->mnd_width - 1, 0),
1790 			GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1791 
1792 	if (!num || !den)
1793 		return -EINVAL;
1794 
1795 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1796 	hid_div = cfg;
1797 	cfg &= CFG_SRC_SEL_MASK;
1798 	cfg >>= CFG_SRC_SEL_SHIFT;
1799 
1800 	for (i = 0; i < num_parents; i++) {
1801 		if (cfg == rcg->parent_map[i].cfg) {
1802 			f.src = rcg->parent_map[i].src;
1803 			break;
1804 		}
1805 	}
1806 
1807 	f.pre_div = hid_div;
1808 	f.pre_div >>= CFG_SRC_DIV_SHIFT;
1809 	f.pre_div &= mask;
1810 
1811 	if (num != den) {
1812 		f.m = num;
1813 		f.n = den;
1814 	} else {
1815 		f.m = 0;
1816 		f.n = 0;
1817 	}
1818 
1819 	return clk_rcg2_configure(rcg, &f);
1820 }
1821 
1822 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
1823 		unsigned long rate, unsigned long parent_rate, u8 index)
1824 {
1825 	return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
1826 }
1827 
1828 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
1829 				struct clk_rate_request *req)
1830 {
1831 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1832 	unsigned long num, den;
1833 	u64 tmp;
1834 
1835 	/* Parent rate is a fixed phy link rate */
1836 	rational_best_approximation(req->best_parent_rate, req->rate,
1837 			GENMASK(rcg->mnd_width - 1, 0),
1838 			GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1839 
1840 	if (!num || !den)
1841 		return -EINVAL;
1842 
1843 	tmp = req->best_parent_rate * num;
1844 	do_div(tmp, den);
1845 	req->rate = tmp;
1846 
1847 	return 0;
1848 }
1849 
1850 const struct clk_ops clk_dp_ops = {
1851 	.is_enabled = clk_rcg2_is_enabled,
1852 	.get_parent = clk_rcg2_get_parent,
1853 	.set_parent = clk_rcg2_set_parent,
1854 	.recalc_rate = clk_rcg2_recalc_rate,
1855 	.set_rate = clk_rcg2_dp_set_rate,
1856 	.set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
1857 	.determine_rate = clk_rcg2_dp_determine_rate,
1858 };
1859 EXPORT_SYMBOL_GPL(clk_dp_ops);
1860