xref: /linux/drivers/clk/sunxi-ng/ccu_mp.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright (C) 2016 Maxime Ripard
3  * Maxime Ripard <maxime.ripard@free-electrons.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation; either version 2 of
8  * the License, or (at your option) any later version.
9  */
10 
11 #include <linux/clk-provider.h>
12 
13 #include "ccu_gate.h"
14 #include "ccu_mp.h"
15 
16 static void ccu_mp_find_best(unsigned long parent, unsigned long rate,
17 			     unsigned int max_m, unsigned int max_p,
18 			     unsigned int *m, unsigned int *p)
19 {
20 	unsigned long best_rate = 0;
21 	unsigned int best_m = 0, best_p = 0;
22 	unsigned int _m, _p;
23 
24 	for (_p = 1; _p <= max_p; _p <<= 1) {
25 		for (_m = 1; _m <= max_m; _m++) {
26 			unsigned long tmp_rate = parent / _p / _m;
27 
28 			if (tmp_rate > rate)
29 				continue;
30 
31 			if ((rate - tmp_rate) < (rate - best_rate)) {
32 				best_rate = tmp_rate;
33 				best_m = _m;
34 				best_p = _p;
35 			}
36 		}
37 	}
38 
39 	*m = best_m;
40 	*p = best_p;
41 }
42 
43 static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
44 				       struct clk_hw *hw,
45 				       unsigned long *parent_rate,
46 				       unsigned long rate,
47 				       void *data)
48 {
49 	struct ccu_mp *cmp = data;
50 	unsigned int max_m, max_p;
51 	unsigned int m, p;
52 
53 	if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
54 		rate *= cmp->fixed_post_div;
55 
56 	max_m = cmp->m.max ?: 1 << cmp->m.width;
57 	max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
58 
59 	ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
60 	rate = *parent_rate / p / m;
61 
62 	if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
63 		rate /= cmp->fixed_post_div;
64 
65 	return rate;
66 }
67 
68 static void ccu_mp_disable(struct clk_hw *hw)
69 {
70 	struct ccu_mp *cmp = hw_to_ccu_mp(hw);
71 
72 	return ccu_gate_helper_disable(&cmp->common, cmp->enable);
73 }
74 
75 static int ccu_mp_enable(struct clk_hw *hw)
76 {
77 	struct ccu_mp *cmp = hw_to_ccu_mp(hw);
78 
79 	return ccu_gate_helper_enable(&cmp->common, cmp->enable);
80 }
81 
82 static int ccu_mp_is_enabled(struct clk_hw *hw)
83 {
84 	struct ccu_mp *cmp = hw_to_ccu_mp(hw);
85 
86 	return ccu_gate_helper_is_enabled(&cmp->common, cmp->enable);
87 }
88 
89 static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
90 					unsigned long parent_rate)
91 {
92 	struct ccu_mp *cmp = hw_to_ccu_mp(hw);
93 	unsigned long rate;
94 	unsigned int m, p;
95 	u32 reg;
96 
97 	/* Adjust parent_rate according to pre-dividers */
98 	parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
99 						  parent_rate);
100 
101 	reg = readl(cmp->common.base + cmp->common.reg);
102 
103 	m = reg >> cmp->m.shift;
104 	m &= (1 << cmp->m.width) - 1;
105 	m += cmp->m.offset;
106 	if (!m)
107 		m++;
108 
109 	p = reg >> cmp->p.shift;
110 	p &= (1 << cmp->p.width) - 1;
111 
112 	rate = (parent_rate >> p) / m;
113 	if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
114 		rate /= cmp->fixed_post_div;
115 
116 	return rate;
117 }
118 
119 static int ccu_mp_determine_rate(struct clk_hw *hw,
120 				 struct clk_rate_request *req)
121 {
122 	struct ccu_mp *cmp = hw_to_ccu_mp(hw);
123 
124 	return ccu_mux_helper_determine_rate(&cmp->common, &cmp->mux,
125 					     req, ccu_mp_round_rate, cmp);
126 }
127 
128 static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
129 			   unsigned long parent_rate)
130 {
131 	struct ccu_mp *cmp = hw_to_ccu_mp(hw);
132 	unsigned long flags;
133 	unsigned int max_m, max_p;
134 	unsigned int m, p;
135 	u32 reg;
136 
137 	/* Adjust parent_rate according to pre-dividers */
138 	parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
139 						  parent_rate);
140 
141 	max_m = cmp->m.max ?: 1 << cmp->m.width;
142 	max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
143 
144 	/* Adjust target rate according to post-dividers */
145 	if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
146 		rate = rate * cmp->fixed_post_div;
147 
148 	ccu_mp_find_best(parent_rate, rate, max_m, max_p, &m, &p);
149 
150 	spin_lock_irqsave(cmp->common.lock, flags);
151 
152 	reg = readl(cmp->common.base + cmp->common.reg);
153 	reg &= ~GENMASK(cmp->m.width + cmp->m.shift - 1, cmp->m.shift);
154 	reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift);
155 	reg |= (m - cmp->m.offset) << cmp->m.shift;
156 	reg |= ilog2(p) << cmp->p.shift;
157 
158 	writel(reg, cmp->common.base + cmp->common.reg);
159 
160 	spin_unlock_irqrestore(cmp->common.lock, flags);
161 
162 	return 0;
163 }
164 
165 static u8 ccu_mp_get_parent(struct clk_hw *hw)
166 {
167 	struct ccu_mp *cmp = hw_to_ccu_mp(hw);
168 
169 	return ccu_mux_helper_get_parent(&cmp->common, &cmp->mux);
170 }
171 
172 static int ccu_mp_set_parent(struct clk_hw *hw, u8 index)
173 {
174 	struct ccu_mp *cmp = hw_to_ccu_mp(hw);
175 
176 	return ccu_mux_helper_set_parent(&cmp->common, &cmp->mux, index);
177 }
178 
179 const struct clk_ops ccu_mp_ops = {
180 	.disable	= ccu_mp_disable,
181 	.enable		= ccu_mp_enable,
182 	.is_enabled	= ccu_mp_is_enabled,
183 
184 	.get_parent	= ccu_mp_get_parent,
185 	.set_parent	= ccu_mp_set_parent,
186 
187 	.determine_rate	= ccu_mp_determine_rate,
188 	.recalc_rate	= ccu_mp_recalc_rate,
189 	.set_rate	= ccu_mp_set_rate,
190 };
191 
192 /*
193  * Support for MMC timing mode switching
194  *
195  * The MMC clocks on some SoCs support switching between old and
196  * new timing modes. A platform specific API is provided to query
197  * and set the timing mode on supported SoCs.
198  *
199  * In addition, a special class of ccu_mp_ops is provided, which
200  * takes in to account the timing mode switch. When the new timing
201  * mode is active, the clock output rate is halved. This new class
202  * is a wrapper around the generic ccu_mp_ops. When clock rates
203  * are passed through to ccu_mp_ops callbacks, they are doubled
204  * if the new timing mode bit is set, to account for the post
205  * divider. Conversely, when clock rates are passed back, they
206  * are halved if the mode bit is set.
207  */
208 
209 static unsigned long ccu_mp_mmc_recalc_rate(struct clk_hw *hw,
210 					    unsigned long parent_rate)
211 {
212 	unsigned long rate = ccu_mp_recalc_rate(hw, parent_rate);
213 	struct ccu_common *cm = hw_to_ccu_common(hw);
214 	u32 val = readl(cm->base + cm->reg);
215 
216 	if (val & CCU_MMC_NEW_TIMING_MODE)
217 		return rate / 2;
218 	return rate;
219 }
220 
221 static int ccu_mp_mmc_determine_rate(struct clk_hw *hw,
222 				     struct clk_rate_request *req)
223 {
224 	struct ccu_common *cm = hw_to_ccu_common(hw);
225 	u32 val = readl(cm->base + cm->reg);
226 	int ret;
227 
228 	/* adjust the requested clock rate */
229 	if (val & CCU_MMC_NEW_TIMING_MODE) {
230 		req->rate *= 2;
231 		req->min_rate *= 2;
232 		req->max_rate *= 2;
233 	}
234 
235 	ret = ccu_mp_determine_rate(hw, req);
236 
237 	/* re-adjust the requested clock rate back */
238 	if (val & CCU_MMC_NEW_TIMING_MODE) {
239 		req->rate /= 2;
240 		req->min_rate /= 2;
241 		req->max_rate /= 2;
242 	}
243 
244 	return ret;
245 }
246 
247 static int ccu_mp_mmc_set_rate(struct clk_hw *hw, unsigned long rate,
248 			       unsigned long parent_rate)
249 {
250 	struct ccu_common *cm = hw_to_ccu_common(hw);
251 	u32 val = readl(cm->base + cm->reg);
252 
253 	if (val & CCU_MMC_NEW_TIMING_MODE)
254 		rate *= 2;
255 
256 	return ccu_mp_set_rate(hw, rate, parent_rate);
257 }
258 
259 const struct clk_ops ccu_mp_mmc_ops = {
260 	.disable	= ccu_mp_disable,
261 	.enable		= ccu_mp_enable,
262 	.is_enabled	= ccu_mp_is_enabled,
263 
264 	.get_parent	= ccu_mp_get_parent,
265 	.set_parent	= ccu_mp_set_parent,
266 
267 	.determine_rate	= ccu_mp_mmc_determine_rate,
268 	.recalc_rate	= ccu_mp_mmc_recalc_rate,
269 	.set_rate	= ccu_mp_mmc_set_rate,
270 };
271