xref: /linux/drivers/clk/sunxi-ng/ccu_nm.c (revision ec63e2a4897075e427c121d863bd89c44578094f)
1 /*
2  * Copyright (C) 2016 Maxime Ripard
3  * Maxime Ripard <maxime.ripard@free-electrons.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation; either version 2 of
8  * the License, or (at your option) any later version.
9  */
10 
11 #include <linux/clk-provider.h>
12 
13 #include "ccu_frac.h"
14 #include "ccu_gate.h"
15 #include "ccu_nm.h"
16 
17 struct _ccu_nm {
18 	unsigned long	n, min_n, max_n;
19 	unsigned long	m, min_m, max_m;
20 };
21 
22 static unsigned long ccu_nm_calc_rate(unsigned long parent,
23 				      unsigned long n, unsigned long m)
24 {
25 	u64 rate = parent;
26 
27 	rate *= n;
28 	do_div(rate, m);
29 
30 	return rate;
31 }
32 
33 static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
34 			     struct _ccu_nm *nm)
35 {
36 	unsigned long best_rate = 0;
37 	unsigned long best_n = 0, best_m = 0;
38 	unsigned long _n, _m;
39 
40 	for (_n = nm->min_n; _n <= nm->max_n; _n++) {
41 		for (_m = nm->min_m; _m <= nm->max_m; _m++) {
42 			unsigned long tmp_rate = ccu_nm_calc_rate(parent,
43 								  _n, _m);
44 
45 			if (tmp_rate > rate)
46 				continue;
47 
48 			if ((rate - tmp_rate) < (rate - best_rate)) {
49 				best_rate = tmp_rate;
50 				best_n = _n;
51 				best_m = _m;
52 			}
53 		}
54 	}
55 
56 	nm->n = best_n;
57 	nm->m = best_m;
58 }
59 
60 static void ccu_nm_disable(struct clk_hw *hw)
61 {
62 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
63 
64 	return ccu_gate_helper_disable(&nm->common, nm->enable);
65 }
66 
67 static int ccu_nm_enable(struct clk_hw *hw)
68 {
69 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
70 
71 	return ccu_gate_helper_enable(&nm->common, nm->enable);
72 }
73 
74 static int ccu_nm_is_enabled(struct clk_hw *hw)
75 {
76 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
77 
78 	return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
79 }
80 
81 static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
82 					unsigned long parent_rate)
83 {
84 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
85 	unsigned long rate;
86 	unsigned long n, m;
87 	u32 reg;
88 
89 	if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) {
90 		rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
91 
92 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
93 			rate /= nm->fixed_post_div;
94 
95 		return rate;
96 	}
97 
98 	reg = readl(nm->common.base + nm->common.reg);
99 
100 	n = reg >> nm->n.shift;
101 	n &= (1 << nm->n.width) - 1;
102 	n += nm->n.offset;
103 	if (!n)
104 		n++;
105 
106 	m = reg >> nm->m.shift;
107 	m &= (1 << nm->m.width) - 1;
108 	m += nm->m.offset;
109 	if (!m)
110 		m++;
111 
112 	if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
113 		rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
114 	else
115 		rate = ccu_nm_calc_rate(parent_rate, n, m);
116 
117 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
118 		rate /= nm->fixed_post_div;
119 
120 	return rate;
121 }
122 
123 static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
124 			      unsigned long *parent_rate)
125 {
126 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
127 	struct _ccu_nm _nm;
128 
129 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
130 		rate *= nm->fixed_post_div;
131 
132 	if (rate < nm->min_rate) {
133 		rate = nm->min_rate;
134 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
135 			rate /= nm->fixed_post_div;
136 		return rate;
137 	}
138 
139 	if (nm->max_rate && rate > nm->max_rate) {
140 		rate = nm->max_rate;
141 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
142 			rate /= nm->fixed_post_div;
143 		return rate;
144 	}
145 
146 	if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
147 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
148 			rate /= nm->fixed_post_div;
149 		return rate;
150 	}
151 
152 	if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
153 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
154 			rate /= nm->fixed_post_div;
155 		return rate;
156 	}
157 
158 	_nm.min_n = nm->n.min ?: 1;
159 	_nm.max_n = nm->n.max ?: 1 << nm->n.width;
160 	_nm.min_m = 1;
161 	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
162 
163 	ccu_nm_find_best(*parent_rate, rate, &_nm);
164 	rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
165 
166 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
167 		rate /= nm->fixed_post_div;
168 
169 	return rate;
170 }
171 
172 static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
173 			   unsigned long parent_rate)
174 {
175 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
176 	struct _ccu_nm _nm;
177 	unsigned long flags;
178 	u32 reg;
179 
180 	/* Adjust target rate according to post-dividers */
181 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
182 		rate = rate * nm->fixed_post_div;
183 
184 	if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
185 		spin_lock_irqsave(nm->common.lock, flags);
186 
187 		/* most SoCs require M to be 0 if fractional mode is used */
188 		reg = readl(nm->common.base + nm->common.reg);
189 		reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
190 		writel(reg, nm->common.base + nm->common.reg);
191 
192 		spin_unlock_irqrestore(nm->common.lock, flags);
193 
194 		ccu_frac_helper_enable(&nm->common, &nm->frac);
195 
196 		return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
197 						rate, nm->lock);
198 	} else {
199 		ccu_frac_helper_disable(&nm->common, &nm->frac);
200 	}
201 
202 	_nm.min_n = nm->n.min ?: 1;
203 	_nm.max_n = nm->n.max ?: 1 << nm->n.width;
204 	_nm.min_m = 1;
205 	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
206 
207 	if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
208 		ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
209 
210 		/* Sigma delta modulation requires specific N and M factors */
211 		ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
212 					   &_nm.m, &_nm.n);
213 	} else {
214 		ccu_sdm_helper_disable(&nm->common, &nm->sdm);
215 		ccu_nm_find_best(parent_rate, rate, &_nm);
216 	}
217 
218 	spin_lock_irqsave(nm->common.lock, flags);
219 
220 	reg = readl(nm->common.base + nm->common.reg);
221 	reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
222 	reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
223 
224 	reg |= (_nm.n - nm->n.offset) << nm->n.shift;
225 	reg |= (_nm.m - nm->m.offset) << nm->m.shift;
226 	writel(reg, nm->common.base + nm->common.reg);
227 
228 	spin_unlock_irqrestore(nm->common.lock, flags);
229 
230 	ccu_helper_wait_for_lock(&nm->common, nm->lock);
231 
232 	return 0;
233 }
234 
235 const struct clk_ops ccu_nm_ops = {
236 	.disable	= ccu_nm_disable,
237 	.enable		= ccu_nm_enable,
238 	.is_enabled	= ccu_nm_is_enabled,
239 
240 	.recalc_rate	= ccu_nm_recalc_rate,
241 	.round_rate	= ccu_nm_round_rate,
242 	.set_rate	= ccu_nm_set_rate,
243 };
244