xref: /linux/drivers/clk/qcom/clk-rcg2.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk.h>
12 #include <linux/clk-provider.h>
13 #include <linux/delay.h>
14 #include <linux/rational.h>
15 #include <linux/regmap.h>
16 #include <linux/math64.h>
17 #include <linux/gcd.h>
18 #include <linux/minmax.h>
19 #include <linux/slab.h>
20 
21 #include <asm/div64.h>
22 
23 #include "clk-rcg.h"
24 #include "common.h"
25 
26 #define CMD_REG			0x0
27 #define CMD_UPDATE		BIT(0)
28 #define CMD_ROOT_EN		BIT(1)
29 #define CMD_DIRTY_CFG		BIT(4)
30 #define CMD_DIRTY_N		BIT(5)
31 #define CMD_DIRTY_M		BIT(6)
32 #define CMD_DIRTY_D		BIT(7)
33 #define CMD_ROOT_OFF		BIT(31)
34 
35 #define CFG_REG			0x4
36 #define CFG_SRC_DIV_SHIFT	0
37 #define CFG_SRC_DIV_LENGTH	8
38 #define CFG_SRC_SEL_SHIFT	8
39 #define CFG_SRC_SEL_MASK	(0x7 << CFG_SRC_SEL_SHIFT)
40 #define CFG_MODE_SHIFT		12
41 #define CFG_MODE_MASK		(0x3 << CFG_MODE_SHIFT)
42 #define CFG_MODE_DUAL_EDGE	(0x2 << CFG_MODE_SHIFT)
43 #define CFG_HW_CLK_CTRL_MASK	BIT(20)
44 
45 #define M_REG			0x8
46 #define N_REG			0xc
47 #define D_REG			0x10
48 
49 #define RCG_CFG_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
50 #define RCG_M_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
51 #define RCG_N_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
52 #define RCG_D_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
53 
54 /* Dynamic Frequency Scaling */
55 #define MAX_PERF_LEVEL		8
56 #define SE_CMD_DFSR_OFFSET	0x14
57 #define SE_CMD_DFS_EN		BIT(0)
58 #define SE_PERF_DFSR(level)	(0x1c + 0x4 * (level))
59 #define SE_PERF_M_DFSR(level)	(0x5c + 0x4 * (level))
60 #define SE_PERF_N_DFSR(level)	(0x9c + 0x4 * (level))
61 
62 enum freq_policy {
63 	FLOOR,
64 	CEIL,
65 };
66 
67 static int clk_rcg2_is_enabled(struct clk_hw *hw)
68 {
69 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
70 	u32 cmd;
71 	int ret;
72 
73 	ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
74 	if (ret)
75 		return ret;
76 
77 	return (cmd & CMD_ROOT_OFF) == 0;
78 }
79 
80 static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg)
81 {
82 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
83 	int num_parents = clk_hw_get_num_parents(hw);
84 	int i;
85 
86 	cfg &= CFG_SRC_SEL_MASK;
87 	cfg >>= CFG_SRC_SEL_SHIFT;
88 
89 	for (i = 0; i < num_parents; i++)
90 		if (cfg == rcg->parent_map[i].cfg)
91 			return i;
92 
93 	pr_debug("%s: Clock %s has invalid parent, using default.\n",
94 		 __func__, clk_hw_get_name(hw));
95 	return 0;
96 }
97 
98 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
99 {
100 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
101 	u32 cfg;
102 	int ret;
103 
104 	ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
105 	if (ret) {
106 		pr_debug("%s: Unable to read CFG register for %s\n",
107 			 __func__, clk_hw_get_name(hw));
108 		return 0;
109 	}
110 
111 	return __clk_rcg2_get_parent(hw, cfg);
112 }
113 
114 static int update_config(struct clk_rcg2 *rcg)
115 {
116 	int count, ret;
117 	u32 cmd;
118 	struct clk_hw *hw = &rcg->clkr.hw;
119 	const char *name = clk_hw_get_name(hw);
120 
121 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
122 				 CMD_UPDATE, CMD_UPDATE);
123 	if (ret)
124 		return ret;
125 
126 	/* Wait for update to take effect */
127 	for (count = 500; count > 0; count--) {
128 		ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
129 		if (ret)
130 			return ret;
131 		if (!(cmd & CMD_UPDATE))
132 			return 0;
133 		udelay(1);
134 	}
135 
136 	WARN(1, "%s: rcg didn't update its configuration.", name);
137 	return -EBUSY;
138 }
139 
140 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
141 {
142 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
143 	int ret;
144 	u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
145 
146 	ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
147 				 CFG_SRC_SEL_MASK, cfg);
148 	if (ret)
149 		return ret;
150 
151 	return update_config(rcg);
152 }
153 
154 /**
155  * convert_to_reg_val() - Convert divisor values to hardware values.
156  *
157  * @f: Frequency table with pure m/n/pre_div parameters.
158  */
159 static void convert_to_reg_val(struct freq_tbl *f)
160 {
161 	f->pre_div *= 2;
162 	f->pre_div -= 1;
163 }
164 
165 /**
166  * calc_rate() - Calculate rate based on m/n:d values
167  *
168  * @rate: Parent rate.
169  * @m: Multiplier.
170  * @n: Divisor.
171  * @mode: Use zero to ignore m/n calculation.
172  * @hid_div: Pre divisor register value. Pre divisor value
173  *                  relates to hid_div as pre_div = (hid_div + 1) / 2.
174  *
175  * Return calculated rate according to formula:
176  *
177  *          parent_rate     m
178  *   rate = ----------- x  ---
179  *            pre_div       n
180  */
181 static unsigned long
182 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
183 {
184 	if (hid_div)
185 		rate = mult_frac(rate, 2, hid_div + 1);
186 
187 	if (mode)
188 		rate = mult_frac(rate, m, n);
189 
190 	return rate;
191 }
192 
193 static unsigned long
194 __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
195 {
196 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
197 	u32 hid_div, m = 0, n = 0, mode = 0, mask;
198 
199 	if (rcg->mnd_width) {
200 		mask = BIT(rcg->mnd_width) - 1;
201 		regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
202 		m &= mask;
203 		regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
204 		n = ~n;
205 		n &= mask;
206 		n += m;
207 		mode = cfg & CFG_MODE_MASK;
208 		mode >>= CFG_MODE_SHIFT;
209 	}
210 
211 	mask = BIT(rcg->hid_width) - 1;
212 	hid_div = cfg >> CFG_SRC_DIV_SHIFT;
213 	hid_div &= mask;
214 
215 	return calc_rate(parent_rate, m, n, mode, hid_div);
216 }
217 
218 static unsigned long
219 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
220 {
221 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
222 	u32 cfg;
223 
224 	regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
225 
226 	return __clk_rcg2_recalc_rate(hw, parent_rate, cfg);
227 }
228 
229 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
230 				    struct clk_rate_request *req,
231 				    enum freq_policy policy)
232 {
233 	unsigned long clk_flags, rate = req->rate;
234 	struct clk_hw *p;
235 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
236 	int index;
237 
238 	switch (policy) {
239 	case FLOOR:
240 		f = qcom_find_freq_floor(f, rate);
241 		break;
242 	case CEIL:
243 		f = qcom_find_freq(f, rate);
244 		break;
245 	default:
246 		return -EINVAL;
247 	}
248 
249 	if (!f)
250 		return -EINVAL;
251 
252 	index = qcom_find_src_index(hw, rcg->parent_map, f->src);
253 	if (index < 0)
254 		return index;
255 
256 	clk_flags = clk_hw_get_flags(hw);
257 	p = clk_hw_get_parent_by_index(hw, index);
258 	if (!p)
259 		return -EINVAL;
260 
261 	if (clk_flags & CLK_SET_RATE_PARENT) {
262 		rate = f->freq;
263 		if (f->pre_div) {
264 			if (!rate)
265 				rate = req->rate;
266 			rate /= 2;
267 			rate *= f->pre_div + 1;
268 		}
269 
270 		if (f->n) {
271 			u64 tmp = rate;
272 			tmp = tmp * f->n;
273 			do_div(tmp, f->m);
274 			rate = tmp;
275 		}
276 	} else {
277 		rate = clk_hw_get_rate(p);
278 	}
279 	req->best_parent_hw = p;
280 	req->best_parent_rate = rate;
281 	req->rate = f->freq;
282 
283 	return 0;
284 }
285 
286 static const struct freq_conf *
287 __clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
288 		       unsigned long req_rate)
289 {
290 	unsigned long rate_diff, best_rate_diff = ULONG_MAX;
291 	const struct freq_conf *conf, *best_conf = NULL;
292 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
293 	const char *name = clk_hw_get_name(hw);
294 	unsigned long parent_rate, rate;
295 	struct clk_hw *p;
296 	int index, i;
297 
298 	/* Exit early if only one config is defined */
299 	if (f->num_confs == 1) {
300 		best_conf = f->confs;
301 		goto exit;
302 	}
303 
304 	/* Search in each provided config the one that is near the wanted rate */
305 	for (i = 0, conf = f->confs; i < f->num_confs; i++, conf++) {
306 		index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
307 		if (index < 0)
308 			continue;
309 
310 		p = clk_hw_get_parent_by_index(hw, index);
311 		if (!p)
312 			continue;
313 
314 		parent_rate = clk_hw_get_rate(p);
315 		rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
316 
317 		if (rate == req_rate) {
318 			best_conf = conf;
319 			goto exit;
320 		}
321 
322 		rate_diff = abs_diff(req_rate, rate);
323 		if (rate_diff < best_rate_diff) {
324 			best_rate_diff = rate_diff;
325 			best_conf = conf;
326 		}
327 	}
328 
329 	/*
330 	 * Very unlikely. Warn if we couldn't find a correct config
331 	 * due to parent not found in every config.
332 	 */
333 	if (unlikely(!best_conf)) {
334 		WARN(1, "%s: can't find a configuration for rate %lu\n",
335 		     name, req_rate);
336 		return ERR_PTR(-EINVAL);
337 	}
338 
339 exit:
340 	return best_conf;
341 }
342 
343 static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_multi_tbl *f,
344 				       struct clk_rate_request *req)
345 {
346 	unsigned long clk_flags, rate = req->rate;
347 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
348 	const struct freq_conf *conf;
349 	struct clk_hw *p;
350 	int index;
351 
352 	f = qcom_find_freq_multi(f, rate);
353 	if (!f || !f->confs)
354 		return -EINVAL;
355 
356 	conf = __clk_rcg2_select_conf(hw, f, rate);
357 	if (IS_ERR(conf))
358 		return PTR_ERR(conf);
359 	index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
360 	if (index < 0)
361 		return index;
362 
363 	clk_flags = clk_hw_get_flags(hw);
364 	p = clk_hw_get_parent_by_index(hw, index);
365 	if (!p)
366 		return -EINVAL;
367 
368 	if (clk_flags & CLK_SET_RATE_PARENT) {
369 		rate = f->freq;
370 		if (conf->pre_div) {
371 			if (!rate)
372 				rate = req->rate;
373 			rate /= 2;
374 			rate *= conf->pre_div + 1;
375 		}
376 
377 		if (conf->n) {
378 			u64 tmp = rate;
379 
380 			tmp = tmp * conf->n;
381 			do_div(tmp, conf->m);
382 			rate = tmp;
383 		}
384 	} else {
385 		rate = clk_hw_get_rate(p);
386 	}
387 
388 	req->best_parent_hw = p;
389 	req->best_parent_rate = rate;
390 	req->rate = f->freq;
391 
392 	return 0;
393 }
394 
395 static int clk_rcg2_determine_rate(struct clk_hw *hw,
396 				   struct clk_rate_request *req)
397 {
398 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
399 
400 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
401 }
402 
403 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
404 					 struct clk_rate_request *req)
405 {
406 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
407 
408 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
409 }
410 
411 static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
412 				      struct clk_rate_request *req)
413 {
414 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
415 
416 	return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
417 }
418 
419 /**
420  * clk_rcg2_split_div() - Split multiplier that doesn't fit in n neither in pre_div.
421  *
422  * @multiplier: Multiplier to split between n and pre_div.
423  * @pre_div: Pointer to pre divisor value.
424  * @n: Pointer to n divisor value.
425  * @pre_div_max: Pre divisor maximum value.
426  */
427 static inline void clk_rcg2_split_div(int multiplier, unsigned int *pre_div,
428 				      u16 *n, unsigned int pre_div_max)
429 {
430 	*n = mult_frac(multiplier * *n, *pre_div, pre_div_max);
431 	*pre_div = pre_div_max;
432 }
433 
434 static void clk_rcg2_calc_mnd(u64 parent_rate, u64 rate, struct freq_tbl *f,
435 			unsigned int mnd_max, unsigned int pre_div_max)
436 {
437 	int i = 2;
438 	unsigned int pre_div = 1;
439 	unsigned long rates_gcd, scaled_parent_rate;
440 	u16 m, n = 1, n_candidate = 1, n_max;
441 
442 	rates_gcd = gcd(parent_rate, rate);
443 	m = div64_u64(rate, rates_gcd);
444 	scaled_parent_rate = div64_u64(parent_rate, rates_gcd);
445 	while (scaled_parent_rate > (mnd_max + m) * pre_div_max) {
446 		// we're exceeding divisor's range, trying lower scale.
447 		if (m > 1) {
448 			m--;
449 			scaled_parent_rate = mult_frac(scaled_parent_rate, m, (m + 1));
450 		} else {
451 			// cannot lower scale, just set max divisor values.
452 			f->n = mnd_max + m;
453 			f->pre_div = pre_div_max;
454 			f->m = m;
455 			return;
456 		}
457 	}
458 
459 	n_max = m + mnd_max;
460 
461 	while (scaled_parent_rate > 1) {
462 		while (scaled_parent_rate % i == 0) {
463 			n_candidate *= i;
464 			if (n_candidate < n_max)
465 				n = n_candidate;
466 			else if (pre_div * i < pre_div_max)
467 				pre_div *= i;
468 			else
469 				clk_rcg2_split_div(i, &pre_div, &n, pre_div_max);
470 
471 			scaled_parent_rate /= i;
472 		}
473 		i++;
474 	}
475 
476 	f->m = m;
477 	f->n = n;
478 	f->pre_div = pre_div > 1 ? pre_div : 0;
479 }
480 
481 static int clk_rcg2_determine_gp_rate(struct clk_hw *hw,
482 				   struct clk_rate_request *req)
483 {
484 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
485 	struct freq_tbl f_tbl = {}, *f = &f_tbl;
486 	int mnd_max = BIT(rcg->mnd_width) - 1;
487 	int hid_max = BIT(rcg->hid_width) - 1;
488 	struct clk_hw *parent;
489 	u64 parent_rate;
490 
491 	parent = clk_hw_get_parent(hw);
492 	parent_rate = clk_get_rate(parent->clk);
493 	if (!parent_rate)
494 		return -EINVAL;
495 
496 	clk_rcg2_calc_mnd(parent_rate, req->rate, f, mnd_max, hid_max / 2);
497 	convert_to_reg_val(f);
498 	req->rate = calc_rate(parent_rate, f->m, f->n, f->n, f->pre_div);
499 
500 	return 0;
501 }
502 
503 static int __clk_rcg2_configure_parent(struct clk_rcg2 *rcg, u8 src, u32 *_cfg)
504 {
505 	struct clk_hw *hw = &rcg->clkr.hw;
506 	int index = qcom_find_src_index(hw, rcg->parent_map, src);
507 
508 	if (index < 0)
509 		return index;
510 
511 	*_cfg &= ~CFG_SRC_SEL_MASK;
512 	*_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
513 
514 	return 0;
515 }
516 
517 static int __clk_rcg2_configure_mnd(struct clk_rcg2 *rcg, const struct freq_tbl *f,
518 				u32 *_cfg)
519 {
520 	u32 cfg, mask, d_val, not2d_val, n_minus_m;
521 	int ret;
522 
523 	if (rcg->mnd_width && f->n) {
524 		mask = BIT(rcg->mnd_width) - 1;
525 		ret = regmap_update_bits(rcg->clkr.regmap,
526 				RCG_M_OFFSET(rcg), mask, f->m);
527 		if (ret)
528 			return ret;
529 
530 		ret = regmap_update_bits(rcg->clkr.regmap,
531 				RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
532 		if (ret)
533 			return ret;
534 
535 		/* Calculate 2d value */
536 		d_val = f->n;
537 
538 		n_minus_m = f->n - f->m;
539 		n_minus_m *= 2;
540 
541 		d_val = clamp_t(u32, d_val, f->m, n_minus_m);
542 		not2d_val = ~d_val & mask;
543 
544 		ret = regmap_update_bits(rcg->clkr.regmap,
545 				RCG_D_OFFSET(rcg), mask, not2d_val);
546 		if (ret)
547 			return ret;
548 	}
549 
550 	mask = BIT(rcg->hid_width) - 1;
551 	mask |= CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
552 	cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
553 	if (rcg->mnd_width && f->n && (f->m != f->n))
554 		cfg |= CFG_MODE_DUAL_EDGE;
555 	if (rcg->hw_clk_ctrl)
556 		cfg |= CFG_HW_CLK_CTRL_MASK;
557 
558 	*_cfg &= ~mask;
559 	*_cfg |= cfg;
560 
561 	return 0;
562 }
563 
564 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
565 				u32 *_cfg)
566 {
567 	int ret;
568 
569 	ret = __clk_rcg2_configure_parent(rcg, f->src, _cfg);
570 	if (ret)
571 		return ret;
572 
573 	ret = __clk_rcg2_configure_mnd(rcg, f, _cfg);
574 	if (ret)
575 		return ret;
576 
577 	return 0;
578 }
579 
580 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
581 {
582 	u32 cfg;
583 	int ret;
584 
585 	ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
586 	if (ret)
587 		return ret;
588 
589 	ret = __clk_rcg2_configure(rcg, f, &cfg);
590 	if (ret)
591 		return ret;
592 
593 	ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
594 	if (ret)
595 		return ret;
596 
597 	return update_config(rcg);
598 }
599 
600 static int clk_rcg2_configure_gp(struct clk_rcg2 *rcg, const struct freq_tbl *f)
601 {
602 	u32 cfg;
603 	int ret;
604 
605 	ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
606 	if (ret)
607 		return ret;
608 
609 	ret = __clk_rcg2_configure_mnd(rcg, f, &cfg);
610 	if (ret)
611 		return ret;
612 
613 	ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
614 	if (ret)
615 		return ret;
616 
617 	return update_config(rcg);
618 }
619 
620 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
621 			       enum freq_policy policy)
622 {
623 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
624 	const struct freq_tbl *f;
625 
626 	switch (policy) {
627 	case FLOOR:
628 		f = qcom_find_freq_floor(rcg->freq_tbl, rate);
629 		break;
630 	case CEIL:
631 		f = qcom_find_freq(rcg->freq_tbl, rate);
632 		break;
633 	default:
634 		return -EINVAL;
635 	}
636 
637 	if (!f)
638 		return -EINVAL;
639 
640 	return clk_rcg2_configure(rcg, f);
641 }
642 
643 static int __clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate)
644 {
645 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
646 	const struct freq_multi_tbl *f;
647 	const struct freq_conf *conf;
648 	struct freq_tbl f_tbl = {};
649 
650 	f = qcom_find_freq_multi(rcg->freq_multi_tbl, rate);
651 	if (!f || !f->confs)
652 		return -EINVAL;
653 
654 	conf = __clk_rcg2_select_conf(hw, f, rate);
655 	if (IS_ERR(conf))
656 		return PTR_ERR(conf);
657 
658 	f_tbl.freq = f->freq;
659 	f_tbl.src = conf->src;
660 	f_tbl.pre_div = conf->pre_div;
661 	f_tbl.m = conf->m;
662 	f_tbl.n = conf->n;
663 
664 	return clk_rcg2_configure(rcg, &f_tbl);
665 }
666 
667 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
668 			    unsigned long parent_rate)
669 {
670 	return __clk_rcg2_set_rate(hw, rate, CEIL);
671 }
672 
673 static int clk_rcg2_set_gp_rate(struct clk_hw *hw, unsigned long rate,
674 			    unsigned long parent_rate)
675 {
676 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
677 	int mnd_max = BIT(rcg->mnd_width) - 1;
678 	int hid_max = BIT(rcg->hid_width) - 1;
679 	struct freq_tbl f_tbl = {}, *f = &f_tbl;
680 	int ret;
681 
682 	clk_rcg2_calc_mnd(parent_rate, rate, f, mnd_max, hid_max / 2);
683 	convert_to_reg_val(f);
684 	ret = clk_rcg2_configure_gp(rcg, f);
685 
686 	return ret;
687 }
688 
689 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
690 				   unsigned long parent_rate)
691 {
692 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
693 }
694 
695 static int clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate,
696 				unsigned long parent_rate)
697 {
698 	return __clk_rcg2_fm_set_rate(hw, rate);
699 }
700 
701 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
702 		unsigned long rate, unsigned long parent_rate, u8 index)
703 {
704 	return __clk_rcg2_set_rate(hw, rate, CEIL);
705 }
706 
707 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
708 		unsigned long rate, unsigned long parent_rate, u8 index)
709 {
710 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
711 }
712 
713 static int clk_rcg2_fm_set_rate_and_parent(struct clk_hw *hw,
714 		unsigned long rate, unsigned long parent_rate, u8 index)
715 {
716 	return __clk_rcg2_fm_set_rate(hw, rate);
717 }
718 
719 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
720 {
721 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
722 	u32 notn_m, n, m, d, not2d, mask;
723 
724 	if (!rcg->mnd_width) {
725 		/* 50 % duty-cycle for Non-MND RCGs */
726 		duty->num = 1;
727 		duty->den = 2;
728 		return 0;
729 	}
730 
731 	regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), &not2d);
732 	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
733 	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
734 
735 	if (!not2d && !m && !notn_m) {
736 		/* 50 % duty-cycle always */
737 		duty->num = 1;
738 		duty->den = 2;
739 		return 0;
740 	}
741 
742 	mask = BIT(rcg->mnd_width) - 1;
743 
744 	d = ~(not2d) & mask;
745 	d = DIV_ROUND_CLOSEST(d, 2);
746 
747 	n = (~(notn_m) + m) & mask;
748 
749 	duty->num = d;
750 	duty->den = n;
751 
752 	return 0;
753 }
754 
755 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
756 {
757 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
758 	u32 notn_m, n, m, d, not2d, mask, cfg;
759 	int ret;
760 
761 	/* Duty-cycle cannot be modified for non-MND RCGs */
762 	if (!rcg->mnd_width)
763 		return -EINVAL;
764 
765 	mask = BIT(rcg->mnd_width) - 1;
766 
767 	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
768 	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
769 	regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
770 
771 	/* Duty-cycle cannot be modified if MND divider is in bypass mode. */
772 	if (!(cfg & CFG_MODE_MASK))
773 		return -EINVAL;
774 
775 	n = (~(notn_m) + m) & mask;
776 
777 	/* Calculate 2d value */
778 	d = DIV_ROUND_CLOSEST(n * duty->num * 2, duty->den);
779 
780 	/*
781 	 * Check bit widths of 2d. If D is too big reduce duty cycle.
782 	 * Also make sure it is never zero.
783 	 */
784 	d = clamp_val(d, 1, mask);
785 
786 	if ((d / 2) > (n - m))
787 		d = (n - m) * 2;
788 	else if ((d / 2) < (m / 2))
789 		d = m;
790 
791 	not2d = ~d & mask;
792 
793 	ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
794 				 not2d);
795 	if (ret)
796 		return ret;
797 
798 	return update_config(rcg);
799 }
800 
801 const struct clk_ops clk_rcg2_ops = {
802 	.is_enabled = clk_rcg2_is_enabled,
803 	.get_parent = clk_rcg2_get_parent,
804 	.set_parent = clk_rcg2_set_parent,
805 	.recalc_rate = clk_rcg2_recalc_rate,
806 	.determine_rate = clk_rcg2_determine_rate,
807 	.set_rate = clk_rcg2_set_rate,
808 	.set_rate_and_parent = clk_rcg2_set_rate_and_parent,
809 	.get_duty_cycle = clk_rcg2_get_duty_cycle,
810 	.set_duty_cycle = clk_rcg2_set_duty_cycle,
811 };
812 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
813 
814 const struct clk_ops clk_rcg2_gp_ops = {
815 	.is_enabled = clk_rcg2_is_enabled,
816 	.get_parent = clk_rcg2_get_parent,
817 	.set_parent = clk_rcg2_set_parent,
818 	.recalc_rate = clk_rcg2_recalc_rate,
819 	.determine_rate = clk_rcg2_determine_gp_rate,
820 	.set_rate = clk_rcg2_set_gp_rate,
821 	.get_duty_cycle = clk_rcg2_get_duty_cycle,
822 	.set_duty_cycle = clk_rcg2_set_duty_cycle,
823 };
824 EXPORT_SYMBOL_GPL(clk_rcg2_gp_ops);
825 
826 const struct clk_ops clk_rcg2_floor_ops = {
827 	.is_enabled = clk_rcg2_is_enabled,
828 	.get_parent = clk_rcg2_get_parent,
829 	.set_parent = clk_rcg2_set_parent,
830 	.recalc_rate = clk_rcg2_recalc_rate,
831 	.determine_rate = clk_rcg2_determine_floor_rate,
832 	.set_rate = clk_rcg2_set_floor_rate,
833 	.set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
834 	.get_duty_cycle = clk_rcg2_get_duty_cycle,
835 	.set_duty_cycle = clk_rcg2_set_duty_cycle,
836 };
837 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
838 
839 const struct clk_ops clk_rcg2_fm_ops = {
840 	.is_enabled = clk_rcg2_is_enabled,
841 	.get_parent = clk_rcg2_get_parent,
842 	.set_parent = clk_rcg2_set_parent,
843 	.recalc_rate = clk_rcg2_recalc_rate,
844 	.determine_rate = clk_rcg2_fm_determine_rate,
845 	.set_rate = clk_rcg2_fm_set_rate,
846 	.set_rate_and_parent = clk_rcg2_fm_set_rate_and_parent,
847 	.get_duty_cycle = clk_rcg2_get_duty_cycle,
848 	.set_duty_cycle = clk_rcg2_set_duty_cycle,
849 };
850 EXPORT_SYMBOL_GPL(clk_rcg2_fm_ops);
851 
852 const struct clk_ops clk_rcg2_mux_closest_ops = {
853 	.determine_rate = __clk_mux_determine_rate_closest,
854 	.get_parent = clk_rcg2_get_parent,
855 	.set_parent = clk_rcg2_set_parent,
856 };
857 EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops);
858 
859 struct frac_entry {
860 	int num;
861 	int den;
862 };
863 
864 static const struct frac_entry frac_table_675m[] = {	/* link rate of 270M */
865 	{ 52, 295 },	/* 119 M */
866 	{ 11, 57 },	/* 130.25 M */
867 	{ 63, 307 },	/* 138.50 M */
868 	{ 11, 50 },	/* 148.50 M */
869 	{ 47, 206 },	/* 154 M */
870 	{ 31, 100 },	/* 205.25 M */
871 	{ 107, 269 },	/* 268.50 M */
872 	{ },
873 };
874 
875 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
876 	{ 31, 211 },	/* 119 M */
877 	{ 32, 199 },	/* 130.25 M */
878 	{ 63, 307 },	/* 138.50 M */
879 	{ 11, 60 },	/* 148.50 M */
880 	{ 50, 263 },	/* 154 M */
881 	{ 31, 120 },	/* 205.25 M */
882 	{ 119, 359 },	/* 268.50 M */
883 	{ },
884 };
885 
886 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
887 			      unsigned long parent_rate)
888 {
889 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
890 	struct freq_tbl f = *rcg->freq_tbl;
891 	const struct frac_entry *frac;
892 	int delta = 100000;
893 	s64 src_rate = parent_rate;
894 	s64 request;
895 	u32 mask = BIT(rcg->hid_width) - 1;
896 	u32 hid_div;
897 
898 	if (src_rate == 810000000)
899 		frac = frac_table_810m;
900 	else
901 		frac = frac_table_675m;
902 
903 	for (; frac->num; frac++) {
904 		request = rate;
905 		request *= frac->den;
906 		request = div_s64(request, frac->num);
907 		if ((src_rate < (request - delta)) ||
908 		    (src_rate > (request + delta)))
909 			continue;
910 
911 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
912 				&hid_div);
913 		f.pre_div = hid_div;
914 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
915 		f.pre_div &= mask;
916 		f.m = frac->num;
917 		f.n = frac->den;
918 
919 		return clk_rcg2_configure(rcg, &f);
920 	}
921 
922 	return -EINVAL;
923 }
924 
925 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
926 		unsigned long rate, unsigned long parent_rate, u8 index)
927 {
928 	/* Parent index is set statically in frequency table */
929 	return clk_edp_pixel_set_rate(hw, rate, parent_rate);
930 }
931 
932 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
933 					struct clk_rate_request *req)
934 {
935 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
936 	const struct freq_tbl *f = rcg->freq_tbl;
937 	const struct frac_entry *frac;
938 	int delta = 100000;
939 	s64 request;
940 	u32 mask = BIT(rcg->hid_width) - 1;
941 	u32 hid_div;
942 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
943 
944 	/* Force the correct parent */
945 	req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
946 	req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
947 
948 	if (req->best_parent_rate == 810000000)
949 		frac = frac_table_810m;
950 	else
951 		frac = frac_table_675m;
952 
953 	for (; frac->num; frac++) {
954 		request = req->rate;
955 		request *= frac->den;
956 		request = div_s64(request, frac->num);
957 		if ((req->best_parent_rate < (request - delta)) ||
958 		    (req->best_parent_rate > (request + delta)))
959 			continue;
960 
961 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
962 				&hid_div);
963 		hid_div >>= CFG_SRC_DIV_SHIFT;
964 		hid_div &= mask;
965 
966 		req->rate = calc_rate(req->best_parent_rate,
967 				      frac->num, frac->den,
968 				      !!frac->den, hid_div);
969 		return 0;
970 	}
971 
972 	return -EINVAL;
973 }
974 
975 const struct clk_ops clk_edp_pixel_ops = {
976 	.is_enabled = clk_rcg2_is_enabled,
977 	.get_parent = clk_rcg2_get_parent,
978 	.set_parent = clk_rcg2_set_parent,
979 	.recalc_rate = clk_rcg2_recalc_rate,
980 	.set_rate = clk_edp_pixel_set_rate,
981 	.set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
982 	.determine_rate = clk_edp_pixel_determine_rate,
983 };
984 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
985 
986 static int clk_byte_determine_rate(struct clk_hw *hw,
987 				   struct clk_rate_request *req)
988 {
989 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
990 	const struct freq_tbl *f = rcg->freq_tbl;
991 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
992 	unsigned long parent_rate, div;
993 	u32 mask = BIT(rcg->hid_width) - 1;
994 	struct clk_hw *p;
995 
996 	if (req->rate == 0)
997 		return -EINVAL;
998 
999 	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
1000 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
1001 
1002 	div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
1003 	div = min_t(u32, div, mask);
1004 
1005 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
1006 
1007 	return 0;
1008 }
1009 
1010 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
1011 			 unsigned long parent_rate)
1012 {
1013 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1014 	struct freq_tbl f = *rcg->freq_tbl;
1015 	unsigned long div;
1016 	u32 mask = BIT(rcg->hid_width) - 1;
1017 
1018 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
1019 	div = min_t(u32, div, mask);
1020 
1021 	f.pre_div = div;
1022 
1023 	return clk_rcg2_configure(rcg, &f);
1024 }
1025 
1026 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
1027 		unsigned long rate, unsigned long parent_rate, u8 index)
1028 {
1029 	/* Parent index is set statically in frequency table */
1030 	return clk_byte_set_rate(hw, rate, parent_rate);
1031 }
1032 
1033 const struct clk_ops clk_byte_ops = {
1034 	.is_enabled = clk_rcg2_is_enabled,
1035 	.get_parent = clk_rcg2_get_parent,
1036 	.set_parent = clk_rcg2_set_parent,
1037 	.recalc_rate = clk_rcg2_recalc_rate,
1038 	.set_rate = clk_byte_set_rate,
1039 	.set_rate_and_parent = clk_byte_set_rate_and_parent,
1040 	.determine_rate = clk_byte_determine_rate,
1041 };
1042 EXPORT_SYMBOL_GPL(clk_byte_ops);
1043 
1044 static int clk_byte2_determine_rate(struct clk_hw *hw,
1045 				    struct clk_rate_request *req)
1046 {
1047 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1048 	unsigned long parent_rate, div;
1049 	u32 mask = BIT(rcg->hid_width) - 1;
1050 	struct clk_hw *p;
1051 	unsigned long rate = req->rate;
1052 
1053 	if (rate == 0)
1054 		return -EINVAL;
1055 
1056 	p = req->best_parent_hw;
1057 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
1058 
1059 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
1060 	div = min_t(u32, div, mask);
1061 
1062 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
1063 
1064 	return 0;
1065 }
1066 
1067 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
1068 			 unsigned long parent_rate)
1069 {
1070 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1071 	struct freq_tbl f = { 0 };
1072 	unsigned long div;
1073 	int i, num_parents = clk_hw_get_num_parents(hw);
1074 	u32 mask = BIT(rcg->hid_width) - 1;
1075 	u32 cfg;
1076 
1077 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
1078 	div = min_t(u32, div, mask);
1079 
1080 	f.pre_div = div;
1081 
1082 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1083 	cfg &= CFG_SRC_SEL_MASK;
1084 	cfg >>= CFG_SRC_SEL_SHIFT;
1085 
1086 	for (i = 0; i < num_parents; i++) {
1087 		if (cfg == rcg->parent_map[i].cfg) {
1088 			f.src = rcg->parent_map[i].src;
1089 			return clk_rcg2_configure(rcg, &f);
1090 		}
1091 	}
1092 
1093 	return -EINVAL;
1094 }
1095 
1096 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
1097 		unsigned long rate, unsigned long parent_rate, u8 index)
1098 {
1099 	/* Read the hardware to determine parent during set_rate */
1100 	return clk_byte2_set_rate(hw, rate, parent_rate);
1101 }
1102 
1103 const struct clk_ops clk_byte2_ops = {
1104 	.is_enabled = clk_rcg2_is_enabled,
1105 	.get_parent = clk_rcg2_get_parent,
1106 	.set_parent = clk_rcg2_set_parent,
1107 	.recalc_rate = clk_rcg2_recalc_rate,
1108 	.set_rate = clk_byte2_set_rate,
1109 	.set_rate_and_parent = clk_byte2_set_rate_and_parent,
1110 	.determine_rate = clk_byte2_determine_rate,
1111 };
1112 EXPORT_SYMBOL_GPL(clk_byte2_ops);
1113 
1114 static const struct frac_entry frac_table_pixel[] = {
1115 	{ 3, 8 },
1116 	{ 2, 9 },
1117 	{ 4, 9 },
1118 	{ 1, 1 },
1119 	{ 2, 3 },
1120 	{ 16, 35},
1121 	{ 4, 15},
1122 	{ }
1123 };
1124 
1125 static int clk_pixel_determine_rate(struct clk_hw *hw,
1126 				    struct clk_rate_request *req)
1127 {
1128 	unsigned long request, src_rate;
1129 	int delta = 100000;
1130 	const struct frac_entry *frac = frac_table_pixel;
1131 
1132 	for (; frac->num; frac++) {
1133 		request = (req->rate * frac->den) / frac->num;
1134 
1135 		src_rate = clk_hw_round_rate(req->best_parent_hw, request);
1136 		if ((src_rate < (request - delta)) ||
1137 			(src_rate > (request + delta)))
1138 			continue;
1139 
1140 		req->best_parent_rate = src_rate;
1141 		req->rate = (src_rate * frac->num) / frac->den;
1142 		return 0;
1143 	}
1144 
1145 	return -EINVAL;
1146 }
1147 
1148 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
1149 		unsigned long parent_rate)
1150 {
1151 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1152 	struct freq_tbl f = { 0 };
1153 	const struct frac_entry *frac = frac_table_pixel;
1154 	unsigned long request;
1155 	int delta = 100000;
1156 	u32 mask = BIT(rcg->hid_width) - 1;
1157 	u32 hid_div, cfg;
1158 	int i, num_parents = clk_hw_get_num_parents(hw);
1159 
1160 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1161 	cfg &= CFG_SRC_SEL_MASK;
1162 	cfg >>= CFG_SRC_SEL_SHIFT;
1163 
1164 	for (i = 0; i < num_parents; i++)
1165 		if (cfg == rcg->parent_map[i].cfg) {
1166 			f.src = rcg->parent_map[i].src;
1167 			break;
1168 		}
1169 
1170 	for (; frac->num; frac++) {
1171 		request = (rate * frac->den) / frac->num;
1172 
1173 		if ((parent_rate < (request - delta)) ||
1174 			(parent_rate > (request + delta)))
1175 			continue;
1176 
1177 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
1178 				&hid_div);
1179 		f.pre_div = hid_div;
1180 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
1181 		f.pre_div &= mask;
1182 		f.m = frac->num;
1183 		f.n = frac->den;
1184 
1185 		return clk_rcg2_configure(rcg, &f);
1186 	}
1187 	return -EINVAL;
1188 }
1189 
1190 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
1191 		unsigned long parent_rate, u8 index)
1192 {
1193 	return clk_pixel_set_rate(hw, rate, parent_rate);
1194 }
1195 
1196 const struct clk_ops clk_pixel_ops = {
1197 	.is_enabled = clk_rcg2_is_enabled,
1198 	.get_parent = clk_rcg2_get_parent,
1199 	.set_parent = clk_rcg2_set_parent,
1200 	.recalc_rate = clk_rcg2_recalc_rate,
1201 	.set_rate = clk_pixel_set_rate,
1202 	.set_rate_and_parent = clk_pixel_set_rate_and_parent,
1203 	.determine_rate = clk_pixel_determine_rate,
1204 };
1205 EXPORT_SYMBOL_GPL(clk_pixel_ops);
1206 
1207 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
1208 				    struct clk_rate_request *req)
1209 {
1210 	struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX };
1211 	struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
1212 	struct clk_hw *xo, *p0, *p1, *p2;
1213 	unsigned long p0_rate;
1214 	u8 mux_div = cgfx->div;
1215 	int ret;
1216 
1217 	p0 = cgfx->hws[0];
1218 	p1 = cgfx->hws[1];
1219 	p2 = cgfx->hws[2];
1220 	/*
1221 	 * This function does ping-pong the RCG between PLLs: if we don't
1222 	 * have at least one fixed PLL and two variable ones,
1223 	 * then it's not going to work correctly.
1224 	 */
1225 	if (WARN_ON(!p0 || !p1 || !p2))
1226 		return -EINVAL;
1227 
1228 	xo = clk_hw_get_parent_by_index(hw, 0);
1229 	if (req->rate == clk_hw_get_rate(xo)) {
1230 		req->best_parent_hw = xo;
1231 		return 0;
1232 	}
1233 
1234 	if (mux_div == 0)
1235 		mux_div = 1;
1236 
1237 	parent_req.rate = req->rate * mux_div;
1238 
1239 	/* This has to be a fixed rate PLL */
1240 	p0_rate = clk_hw_get_rate(p0);
1241 
1242 	if (parent_req.rate == p0_rate) {
1243 		req->rate = req->best_parent_rate = p0_rate;
1244 		req->best_parent_hw = p0;
1245 		return 0;
1246 	}
1247 
1248 	if (req->best_parent_hw == p0) {
1249 		/* Are we going back to a previously used rate? */
1250 		if (clk_hw_get_rate(p2) == parent_req.rate)
1251 			req->best_parent_hw = p2;
1252 		else
1253 			req->best_parent_hw = p1;
1254 	} else if (req->best_parent_hw == p2) {
1255 		req->best_parent_hw = p1;
1256 	} else {
1257 		req->best_parent_hw = p2;
1258 	}
1259 
1260 	clk_hw_get_rate_range(req->best_parent_hw,
1261 			      &parent_req.min_rate, &parent_req.max_rate);
1262 
1263 	if (req->min_rate > parent_req.min_rate)
1264 		parent_req.min_rate = req->min_rate;
1265 
1266 	if (req->max_rate < parent_req.max_rate)
1267 		parent_req.max_rate = req->max_rate;
1268 
1269 	parent_req.best_parent_hw = req->best_parent_hw;
1270 	ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
1271 	if (ret)
1272 		return ret;
1273 
1274 	req->rate = req->best_parent_rate = parent_req.rate;
1275 	req->rate /= mux_div;
1276 
1277 	return 0;
1278 }
1279 
1280 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
1281 		unsigned long parent_rate, u8 index)
1282 {
1283 	struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
1284 	struct clk_rcg2 *rcg = &cgfx->rcg;
1285 	u32 cfg;
1286 	int ret;
1287 
1288 	cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
1289 	/* On some targets, the GFX3D RCG may need to divide PLL frequency */
1290 	if (cgfx->div > 1)
1291 		cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
1292 
1293 	ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
1294 	if (ret)
1295 		return ret;
1296 
1297 	return update_config(rcg);
1298 }
1299 
1300 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
1301 			      unsigned long parent_rate)
1302 {
1303 	/*
1304 	 * We should never get here; clk_gfx3d_determine_rate() should always
1305 	 * make us use a different parent than what we're currently using, so
1306 	 * clk_gfx3d_set_rate_and_parent() should always be called.
1307 	 */
1308 	return 0;
1309 }
1310 
1311 const struct clk_ops clk_gfx3d_ops = {
1312 	.is_enabled = clk_rcg2_is_enabled,
1313 	.get_parent = clk_rcg2_get_parent,
1314 	.set_parent = clk_rcg2_set_parent,
1315 	.recalc_rate = clk_rcg2_recalc_rate,
1316 	.set_rate = clk_gfx3d_set_rate,
1317 	.set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
1318 	.determine_rate = clk_gfx3d_determine_rate,
1319 };
1320 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
1321 
1322 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
1323 {
1324 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1325 	const char *name = clk_hw_get_name(hw);
1326 	int ret, count;
1327 
1328 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
1329 				 CMD_ROOT_EN, CMD_ROOT_EN);
1330 	if (ret)
1331 		return ret;
1332 
1333 	/* wait for RCG to turn ON */
1334 	for (count = 500; count > 0; count--) {
1335 		if (clk_rcg2_is_enabled(hw))
1336 			return 0;
1337 
1338 		udelay(1);
1339 	}
1340 
1341 	pr_err("%s: RCG did not turn on\n", name);
1342 	return -ETIMEDOUT;
1343 }
1344 
1345 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
1346 {
1347 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1348 
1349 	return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
1350 					CMD_ROOT_EN, 0);
1351 }
1352 
1353 static int
1354 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
1355 {
1356 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1357 	int ret;
1358 
1359 	ret = clk_rcg2_set_force_enable(hw);
1360 	if (ret)
1361 		return ret;
1362 
1363 	ret = clk_rcg2_configure(rcg, f);
1364 	if (ret)
1365 		return ret;
1366 
1367 	return clk_rcg2_clear_force_enable(hw);
1368 }
1369 
1370 static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
1371 				      unsigned long parent_rate,
1372 				      enum freq_policy policy)
1373 {
1374 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1375 	const struct freq_tbl *f;
1376 
1377 	switch (policy) {
1378 	case FLOOR:
1379 		f = qcom_find_freq_floor(rcg->freq_tbl, rate);
1380 		break;
1381 	case CEIL:
1382 		f = qcom_find_freq(rcg->freq_tbl, rate);
1383 		break;
1384 	default:
1385 		return -EINVAL;
1386 	}
1387 
1388 	/*
1389 	 * In case clock is disabled, update the M, N and D registers, cache
1390 	 * the CFG value in parked_cfg and don't hit the update bit of CMD
1391 	 * register.
1392 	 */
1393 	if (!clk_hw_is_enabled(hw))
1394 		return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg);
1395 
1396 	return clk_rcg2_shared_force_enable_clear(hw, f);
1397 }
1398 
1399 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
1400 				    unsigned long parent_rate)
1401 {
1402 	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
1403 }
1404 
1405 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
1406 		unsigned long rate, unsigned long parent_rate, u8 index)
1407 {
1408 	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
1409 }
1410 
1411 static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate,
1412 					  unsigned long parent_rate)
1413 {
1414 	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
1415 }
1416 
1417 static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw,
1418 		unsigned long rate, unsigned long parent_rate, u8 index)
1419 {
1420 	return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
1421 }
1422 
1423 static int clk_rcg2_shared_enable(struct clk_hw *hw)
1424 {
1425 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1426 	int ret;
1427 
1428 	/*
1429 	 * Set the update bit because required configuration has already
1430 	 * been written in clk_rcg2_shared_set_rate()
1431 	 */
1432 	ret = clk_rcg2_set_force_enable(hw);
1433 	if (ret)
1434 		return ret;
1435 
1436 	/* Write back the stored configuration corresponding to current rate */
1437 	ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg);
1438 	if (ret)
1439 		return ret;
1440 
1441 	ret = update_config(rcg);
1442 	if (ret)
1443 		return ret;
1444 
1445 	return clk_rcg2_clear_force_enable(hw);
1446 }
1447 
1448 static void clk_rcg2_shared_disable(struct clk_hw *hw)
1449 {
1450 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1451 
1452 	/*
1453 	 * Store current configuration as switching to safe source would clear
1454 	 * the SRC and DIV of CFG register
1455 	 */
1456 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
1457 
1458 	/*
1459 	 * Park the RCG at a safe configuration - sourced off of safe source.
1460 	 * Force enable and disable the RCG while configuring it to safeguard
1461 	 * against any update signal coming from the downstream clock.
1462 	 * The current parent is still prepared and enabled at this point, and
1463 	 * the safe source is always on while application processor subsystem
1464 	 * is online. Therefore, the RCG can safely switch its parent.
1465 	 */
1466 	clk_rcg2_set_force_enable(hw);
1467 
1468 	regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
1469 		     rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
1470 
1471 	update_config(rcg);
1472 
1473 	clk_rcg2_clear_force_enable(hw);
1474 }
1475 
1476 static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw)
1477 {
1478 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1479 
1480 	/* If the shared rcg is parked use the cached cfg instead */
1481 	if (!clk_hw_is_enabled(hw))
1482 		return __clk_rcg2_get_parent(hw, rcg->parked_cfg);
1483 
1484 	return clk_rcg2_get_parent(hw);
1485 }
1486 
1487 static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index)
1488 {
1489 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1490 
1491 	/* If the shared rcg is parked only update the cached cfg */
1492 	if (!clk_hw_is_enabled(hw)) {
1493 		rcg->parked_cfg &= ~CFG_SRC_SEL_MASK;
1494 		rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
1495 
1496 		return 0;
1497 	}
1498 
1499 	return clk_rcg2_set_parent(hw, index);
1500 }
1501 
1502 static unsigned long
1503 clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1504 {
1505 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1506 
1507 	/* If the shared rcg is parked use the cached cfg instead */
1508 	if (!clk_hw_is_enabled(hw))
1509 		return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg);
1510 
1511 	return clk_rcg2_recalc_rate(hw, parent_rate);
1512 }
1513 
1514 static int clk_rcg2_shared_init(struct clk_hw *hw)
1515 {
1516 	/*
1517 	 * This does a few things:
1518 	 *
1519 	 *  1. Sets rcg->parked_cfg to reflect the value at probe so that the
1520 	 *     proper parent is reported from clk_rcg2_shared_get_parent().
1521 	 *
1522 	 *  2. Clears the force enable bit of the RCG because we rely on child
1523 	 *     clks (branches) to turn the RCG on/off with a hardware feedback
1524 	 *     mechanism and only set the force enable bit in the RCG when we
1525 	 *     want to make sure the clk stays on for parent switches or
1526 	 *     parking.
1527 	 *
1528 	 *  3. Parks shared RCGs on the safe source at registration because we
1529 	 *     can't be certain that the parent clk will stay on during boot,
1530 	 *     especially if the parent is shared. If this RCG is enabled at
1531 	 *     boot, and the parent is turned off, the RCG will get stuck on. A
1532 	 *     GDSC can wedge if is turned on and the RCG is stuck on because
1533 	 *     the GDSC's controller will hang waiting for the clk status to
1534 	 *     toggle on when it never does.
1535 	 *
1536 	 * The safest option here is to "park" the RCG at init so that the clk
1537 	 * can never get stuck on or off. This ensures the GDSC can't get
1538 	 * wedged.
1539 	 */
1540 	clk_rcg2_shared_disable(hw);
1541 
1542 	return 0;
1543 }
1544 
1545 const struct clk_ops clk_rcg2_shared_ops = {
1546 	.init = clk_rcg2_shared_init,
1547 	.enable = clk_rcg2_shared_enable,
1548 	.disable = clk_rcg2_shared_disable,
1549 	.get_parent = clk_rcg2_shared_get_parent,
1550 	.set_parent = clk_rcg2_shared_set_parent,
1551 	.recalc_rate = clk_rcg2_shared_recalc_rate,
1552 	.determine_rate = clk_rcg2_determine_rate,
1553 	.set_rate = clk_rcg2_shared_set_rate,
1554 	.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
1555 };
1556 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
1557 
1558 const struct clk_ops clk_rcg2_shared_floor_ops = {
1559 	.enable = clk_rcg2_shared_enable,
1560 	.disable = clk_rcg2_shared_disable,
1561 	.get_parent = clk_rcg2_shared_get_parent,
1562 	.set_parent = clk_rcg2_shared_set_parent,
1563 	.recalc_rate = clk_rcg2_shared_recalc_rate,
1564 	.determine_rate = clk_rcg2_determine_floor_rate,
1565 	.set_rate = clk_rcg2_shared_set_floor_rate,
1566 	.set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent,
1567 };
1568 EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops);
1569 
1570 static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
1571 {
1572 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1573 
1574 	/*
1575 	 * Read the config register so that the parent is properly mapped at
1576 	 * registration time.
1577 	 */
1578 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
1579 
1580 	return 0;
1581 }
1582 
1583 /*
1584  * Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left
1585  * unchanged at registration time.
1586  */
1587 const struct clk_ops clk_rcg2_shared_no_init_park_ops = {
1588 	.init = clk_rcg2_shared_no_init_park,
1589 	.enable = clk_rcg2_shared_enable,
1590 	.disable = clk_rcg2_shared_disable,
1591 	.get_parent = clk_rcg2_shared_get_parent,
1592 	.set_parent = clk_rcg2_shared_set_parent,
1593 	.recalc_rate = clk_rcg2_shared_recalc_rate,
1594 	.determine_rate = clk_rcg2_determine_rate,
1595 	.set_rate = clk_rcg2_shared_set_rate,
1596 	.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
1597 };
1598 EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops);
1599 
1600 /* Common APIs to be used for DFS based RCGR */
1601 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
1602 				       struct freq_tbl *f)
1603 {
1604 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1605 	struct clk_hw *p;
1606 	unsigned long prate = 0;
1607 	u32 val, mask, cfg, mode, src;
1608 	int i, num_parents;
1609 
1610 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
1611 
1612 	mask = BIT(rcg->hid_width) - 1;
1613 	f->pre_div = 1;
1614 	if (cfg & mask)
1615 		f->pre_div = cfg & mask;
1616 
1617 	src = cfg & CFG_SRC_SEL_MASK;
1618 	src >>= CFG_SRC_SEL_SHIFT;
1619 
1620 	num_parents = clk_hw_get_num_parents(hw);
1621 	for (i = 0; i < num_parents; i++) {
1622 		if (src == rcg->parent_map[i].cfg) {
1623 			f->src = rcg->parent_map[i].src;
1624 			p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
1625 			prate = clk_hw_get_rate(p);
1626 		}
1627 	}
1628 
1629 	mode = cfg & CFG_MODE_MASK;
1630 	mode >>= CFG_MODE_SHIFT;
1631 	if (mode) {
1632 		mask = BIT(rcg->mnd_width) - 1;
1633 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
1634 			    &val);
1635 		val &= mask;
1636 		f->m = val;
1637 
1638 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
1639 			    &val);
1640 		val = ~val;
1641 		val &= mask;
1642 		val += f->m;
1643 		f->n = val;
1644 	}
1645 
1646 	f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
1647 }
1648 
1649 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
1650 {
1651 	struct freq_tbl *freq_tbl;
1652 	int i;
1653 
1654 	/* Allocate space for 1 extra since table is NULL terminated */
1655 	freq_tbl = kzalloc_objs(*freq_tbl, MAX_PERF_LEVEL + 1);
1656 	if (!freq_tbl)
1657 		return -ENOMEM;
1658 	rcg->freq_tbl = freq_tbl;
1659 
1660 	for (i = 0; i < MAX_PERF_LEVEL; i++)
1661 		clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1662 
1663 	return 0;
1664 }
1665 
1666 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1667 				   struct clk_rate_request *req)
1668 {
1669 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1670 	int ret;
1671 
1672 	if (!rcg->freq_tbl) {
1673 		ret = clk_rcg2_dfs_populate_freq_table(rcg);
1674 		if (ret) {
1675 			pr_err("Failed to update DFS tables for %s\n",
1676 					clk_hw_get_name(hw));
1677 			return ret;
1678 		}
1679 	}
1680 
1681 	return clk_rcg2_determine_rate(hw, req);
1682 }
1683 
1684 static unsigned long
1685 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1686 {
1687 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1688 	u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1689 
1690 	regmap_read(rcg->clkr.regmap,
1691 		    rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1692 	level &= GENMASK(4, 1);
1693 	level >>= 1;
1694 
1695 	if (rcg->freq_tbl)
1696 		return rcg->freq_tbl[level].freq;
1697 
1698 	/*
1699 	 * Assume that parent_rate is actually the parent because
1700 	 * we can't do any better at figuring it out when the table
1701 	 * hasn't been populated yet. We only populate the table
1702 	 * in determine_rate because we can't guarantee the parents
1703 	 * will be registered with the framework until then.
1704 	 */
1705 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1706 		    &cfg);
1707 
1708 	mask = BIT(rcg->hid_width) - 1;
1709 	pre_div = 1;
1710 	if (cfg & mask)
1711 		pre_div = cfg & mask;
1712 
1713 	mode = cfg & CFG_MODE_MASK;
1714 	mode >>= CFG_MODE_SHIFT;
1715 	if (mode) {
1716 		mask = BIT(rcg->mnd_width) - 1;
1717 		regmap_read(rcg->clkr.regmap,
1718 			    rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1719 		m &= mask;
1720 
1721 		regmap_read(rcg->clkr.regmap,
1722 			    rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1723 		n = ~n;
1724 		n &= mask;
1725 		n += m;
1726 	}
1727 
1728 	return calc_rate(parent_rate, m, n, mode, pre_div);
1729 }
1730 
1731 static const struct clk_ops clk_rcg2_dfs_ops = {
1732 	.is_enabled = clk_rcg2_is_enabled,
1733 	.get_parent = clk_rcg2_get_parent,
1734 	.determine_rate = clk_rcg2_dfs_determine_rate,
1735 	.recalc_rate = clk_rcg2_dfs_recalc_rate,
1736 };
1737 
1738 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1739 			       struct regmap *regmap)
1740 {
1741 	struct clk_rcg2 *rcg = data->rcg;
1742 	struct clk_init_data *init = data->init;
1743 	u32 val;
1744 	int ret;
1745 
1746 	ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1747 	if (ret)
1748 		return -EINVAL;
1749 
1750 	if (!(val & SE_CMD_DFS_EN))
1751 		return 0;
1752 
1753 	/*
1754 	 * Rate changes with consumer writing a register in
1755 	 * their own I/O region
1756 	 */
1757 	init->flags |= CLK_GET_RATE_NOCACHE;
1758 	init->ops = &clk_rcg2_dfs_ops;
1759 
1760 	rcg->freq_tbl = NULL;
1761 
1762 	return 0;
1763 }
1764 
1765 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1766 			     const struct clk_rcg_dfs_data *rcgs, size_t len)
1767 {
1768 	int i, ret;
1769 
1770 	for (i = 0; i < len; i++) {
1771 		ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1772 		if (ret)
1773 			return ret;
1774 	}
1775 
1776 	return 0;
1777 }
1778 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1779 
1780 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
1781 			unsigned long parent_rate)
1782 {
1783 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1784 	struct freq_tbl f = { 0 };
1785 	u32 mask = BIT(rcg->hid_width) - 1;
1786 	u32 hid_div, cfg;
1787 	int i, num_parents = clk_hw_get_num_parents(hw);
1788 	unsigned long num, den;
1789 
1790 	rational_best_approximation(parent_rate, rate,
1791 			GENMASK(rcg->mnd_width - 1, 0),
1792 			GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1793 
1794 	if (!num || !den)
1795 		return -EINVAL;
1796 
1797 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1798 	hid_div = cfg;
1799 	cfg &= CFG_SRC_SEL_MASK;
1800 	cfg >>= CFG_SRC_SEL_SHIFT;
1801 
1802 	for (i = 0; i < num_parents; i++) {
1803 		if (cfg == rcg->parent_map[i].cfg) {
1804 			f.src = rcg->parent_map[i].src;
1805 			break;
1806 		}
1807 	}
1808 
1809 	f.pre_div = hid_div;
1810 	f.pre_div >>= CFG_SRC_DIV_SHIFT;
1811 	f.pre_div &= mask;
1812 
1813 	if (num != den) {
1814 		f.m = num;
1815 		f.n = den;
1816 	} else {
1817 		f.m = 0;
1818 		f.n = 0;
1819 	}
1820 
1821 	return clk_rcg2_configure(rcg, &f);
1822 }
1823 
1824 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
1825 		unsigned long rate, unsigned long parent_rate, u8 index)
1826 {
1827 	return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
1828 }
1829 
1830 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
1831 				struct clk_rate_request *req)
1832 {
1833 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1834 	unsigned long num, den;
1835 	u64 tmp;
1836 
1837 	/* Parent rate is a fixed phy link rate */
1838 	rational_best_approximation(req->best_parent_rate, req->rate,
1839 			GENMASK(rcg->mnd_width - 1, 0),
1840 			GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1841 
1842 	if (!num || !den)
1843 		return -EINVAL;
1844 
1845 	tmp = req->best_parent_rate * num;
1846 	do_div(tmp, den);
1847 	req->rate = tmp;
1848 
1849 	return 0;
1850 }
1851 
1852 const struct clk_ops clk_dp_ops = {
1853 	.is_enabled = clk_rcg2_is_enabled,
1854 	.get_parent = clk_rcg2_get_parent,
1855 	.set_parent = clk_rcg2_set_parent,
1856 	.recalc_rate = clk_rcg2_recalc_rate,
1857 	.set_rate = clk_rcg2_dp_set_rate,
1858 	.set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
1859 	.determine_rate = clk_rcg2_dp_determine_rate,
1860 };
1861 EXPORT_SYMBOL_GPL(clk_dp_ops);
1862