xref: /linux/drivers/clk/sunxi-ng/ccu_nm.c (revision a032fe30cf09b6723ab61a05aee057311b00f9e1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2016 Maxime Ripard
4  * Maxime Ripard <maxime.ripard@free-electrons.com>
5  */
6 
7 #include <linux/clk-provider.h>
8 #include <linux/io.h>
9 
10 #include "ccu_frac.h"
11 #include "ccu_gate.h"
12 #include "ccu_nm.h"
13 
14 struct _ccu_nm {
15 	unsigned long	n, min_n, max_n;
16 	unsigned long	m, min_m, max_m;
17 };
18 
19 static unsigned long ccu_nm_calc_rate(unsigned long parent,
20 				      unsigned long n, unsigned long m)
21 {
22 	u64 rate = parent;
23 
24 	rate *= n;
25 	do_div(rate, m);
26 
27 	return rate;
28 }
29 
30 static unsigned long ccu_nm_find_best(struct ccu_common *common, unsigned long parent,
31 				      unsigned long rate, struct _ccu_nm *nm)
32 {
33 	unsigned long best_rate = 0;
34 	unsigned long best_n = 0, best_m = 0;
35 	unsigned long _n, _m;
36 
37 	for (_n = nm->min_n; _n <= nm->max_n; _n++) {
38 		for (_m = nm->min_m; _m <= nm->max_m; _m++) {
39 			unsigned long tmp_rate = ccu_nm_calc_rate(parent,
40 								  _n, _m);
41 
42 			if (ccu_is_better_rate(common, rate, tmp_rate, best_rate)) {
43 				best_rate = tmp_rate;
44 				best_n = _n;
45 				best_m = _m;
46 			}
47 		}
48 	}
49 
50 	nm->n = best_n;
51 	nm->m = best_m;
52 
53 	return best_rate;
54 }
55 
56 static void ccu_nm_disable(struct clk_hw *hw)
57 {
58 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
59 
60 	return ccu_gate_helper_disable(&nm->common, nm->enable);
61 }
62 
63 static int ccu_nm_enable(struct clk_hw *hw)
64 {
65 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
66 
67 	return ccu_gate_helper_enable(&nm->common, nm->enable);
68 }
69 
70 static int ccu_nm_is_enabled(struct clk_hw *hw)
71 {
72 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
73 
74 	return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
75 }
76 
77 static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
78 					unsigned long parent_rate)
79 {
80 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
81 	unsigned long rate;
82 	unsigned long n, m;
83 	u32 reg;
84 
85 	if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) {
86 		rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
87 
88 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
89 			rate /= nm->fixed_post_div;
90 
91 		return rate;
92 	}
93 
94 	reg = readl(nm->common.base + nm->common.reg);
95 
96 	n = reg >> nm->n.shift;
97 	n &= (1 << nm->n.width) - 1;
98 	n += nm->n.offset;
99 	if (!n)
100 		n++;
101 
102 	m = reg >> nm->m.shift;
103 	m &= (1 << nm->m.width) - 1;
104 	m += nm->m.offset;
105 	if (!m)
106 		m++;
107 
108 	if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
109 		rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
110 	else
111 		rate = ccu_nm_calc_rate(parent_rate, n, m);
112 
113 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
114 		rate /= nm->fixed_post_div;
115 
116 	return rate;
117 }
118 
119 static int ccu_nm_determine_rate(struct clk_hw *hw,
120 				 struct clk_rate_request *req)
121 {
122 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
123 	struct _ccu_nm _nm;
124 
125 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
126 		req->rate *= nm->fixed_post_div;
127 
128 	if (req->rate < nm->min_rate) {
129 		req->rate = nm->min_rate;
130 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
131 			req->rate /= nm->fixed_post_div;
132 		return 0;
133 	}
134 
135 	if (nm->max_rate && req->rate > nm->max_rate) {
136 		req->rate = nm->max_rate;
137 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
138 			req->rate /= nm->fixed_post_div;
139 		return 0;
140 	}
141 
142 	if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, req->rate)) {
143 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
144 			req->rate /= nm->fixed_post_div;
145 		return 0;
146 	}
147 
148 	if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, req->rate)) {
149 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
150 			req->rate /= nm->fixed_post_div;
151 		return 0;
152 	}
153 
154 	_nm.min_n = nm->n.min ?: 1;
155 	_nm.max_n = nm->n.max ?: 1 << nm->n.width;
156 	_nm.min_m = 1;
157 	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
158 
159 	req->rate = ccu_nm_find_best(&nm->common, req->best_parent_rate,
160 				     req->rate, &_nm);
161 
162 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
163 		req->rate /= nm->fixed_post_div;
164 
165 	return 0;
166 }
167 
168 static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
169 			   unsigned long parent_rate)
170 {
171 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
172 	struct _ccu_nm _nm;
173 	unsigned long flags;
174 	u32 reg;
175 
176 	/* Adjust target rate according to post-dividers */
177 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
178 		rate = rate * nm->fixed_post_div;
179 
180 	if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
181 		spin_lock_irqsave(nm->common.lock, flags);
182 
183 		/* most SoCs require M to be 0 if fractional mode is used */
184 		reg = readl(nm->common.base + nm->common.reg);
185 		reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
186 		writel(reg, nm->common.base + nm->common.reg);
187 
188 		spin_unlock_irqrestore(nm->common.lock, flags);
189 
190 		ccu_frac_helper_enable(&nm->common, &nm->frac);
191 
192 		return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
193 						rate, nm->lock);
194 	} else {
195 		ccu_frac_helper_disable(&nm->common, &nm->frac);
196 	}
197 
198 	_nm.min_n = nm->n.min ?: 1;
199 	_nm.max_n = nm->n.max ?: 1 << nm->n.width;
200 	_nm.min_m = 1;
201 	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
202 
203 	if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
204 		ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
205 
206 		/* Sigma delta modulation requires specific N and M factors */
207 		ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
208 					   &_nm.m, &_nm.n);
209 	} else {
210 		ccu_sdm_helper_disable(&nm->common, &nm->sdm);
211 		ccu_nm_find_best(&nm->common, parent_rate, rate, &_nm);
212 	}
213 
214 	spin_lock_irqsave(nm->common.lock, flags);
215 
216 	reg = readl(nm->common.base + nm->common.reg);
217 	reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
218 	reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
219 
220 	reg |= (_nm.n - nm->n.offset) << nm->n.shift;
221 	reg |= (_nm.m - nm->m.offset) << nm->m.shift;
222 	writel(reg, nm->common.base + nm->common.reg);
223 
224 	spin_unlock_irqrestore(nm->common.lock, flags);
225 
226 	ccu_helper_wait_for_lock(&nm->common, nm->lock);
227 
228 	return 0;
229 }
230 
231 const struct clk_ops ccu_nm_ops = {
232 	.disable	= ccu_nm_disable,
233 	.enable		= ccu_nm_enable,
234 	.is_enabled	= ccu_nm_is_enabled,
235 
236 	.recalc_rate	= ccu_nm_recalc_rate,
237 	.determine_rate = ccu_nm_determine_rate,
238 	.set_rate	= ccu_nm_set_rate,
239 };
240 EXPORT_SYMBOL_NS_GPL(ccu_nm_ops, "SUNXI_CCU");
241