1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 Emmanuel Vadot <manu@freebsd.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
20 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
22 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/bus.h>
31
32 #include <dev/clk/clk.h>
33
34 #include <dev/clk/allwinner/aw_clk.h>
35 #include <dev/clk/allwinner/aw_clk_nmm.h>
36
37 #include "clkdev_if.h"
38
39 /*
40 * clknode for clocks matching the formula :
41 *
42 * clk = clkin * n / m0 / m1
43 *
44 */
45
46 struct aw_clk_nmm_sc {
47 uint32_t offset;
48
49 struct aw_clk_factor n;
50 struct aw_clk_factor m0;
51 struct aw_clk_factor m1;
52
53 uint32_t gate_shift;
54 uint32_t lock_shift;
55 uint32_t lock_retries;
56
57 uint32_t flags;
58 };
59
60 #define WRITE4(_clk, off, val) \
61 CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
62 #define READ4(_clk, off, val) \
63 CLKDEV_READ_4(clknode_get_device(_clk), off, val)
64 #define DEVICE_LOCK(_clk) \
65 CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
66 #define DEVICE_UNLOCK(_clk) \
67 CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
68
69 static int
aw_clk_nmm_init(struct clknode * clk,device_t dev)70 aw_clk_nmm_init(struct clknode *clk, device_t dev)
71 {
72
73 clknode_init_parent_idx(clk, 0);
74 return (0);
75 }
76
77 static int
aw_clk_nmm_set_gate(struct clknode * clk,bool enable)78 aw_clk_nmm_set_gate(struct clknode *clk, bool enable)
79 {
80 struct aw_clk_nmm_sc *sc;
81 uint32_t val;
82
83 sc = clknode_get_softc(clk);
84
85 if ((sc->flags & AW_CLK_HAS_GATE) == 0)
86 return (0);
87
88 DEVICE_LOCK(clk);
89 READ4(clk, sc->offset, &val);
90 if (enable)
91 val |= (1 << sc->gate_shift);
92 else
93 val &= ~(1 << sc->gate_shift);
94 WRITE4(clk, sc->offset, val);
95 DEVICE_UNLOCK(clk);
96
97 return (0);
98 }
99
100 static uint64_t
aw_clk_nmm_find_best(struct aw_clk_nmm_sc * sc,uint64_t fparent,uint64_t * fout,uint32_t * factor_n,uint32_t * factor_m0,uint32_t * factor_m1)101 aw_clk_nmm_find_best(struct aw_clk_nmm_sc *sc, uint64_t fparent, uint64_t *fout,
102 uint32_t *factor_n, uint32_t *factor_m0, uint32_t *factor_m1)
103 {
104 uint64_t cur, best;
105 uint32_t n, m0, m1;
106 uint32_t max_n, max_m0, max_m1;
107 uint32_t min_n, min_m0, min_m1;
108
109 *factor_n = *factor_m0 = *factor_m1 = 0;
110
111 max_n = aw_clk_factor_get_max(&sc->n);
112 min_n = aw_clk_factor_get_min(&sc->n);
113 max_m0 = aw_clk_factor_get_max(&sc->m0);
114 min_m0 = aw_clk_factor_get_min(&sc->m0);
115 max_m1 = aw_clk_factor_get_max(&sc->m1);
116 min_m1 = aw_clk_factor_get_min(&sc->m1);
117
118 for (m0 = min_m0; m0 <= max_m0; ) {
119 for (m1 = min_m1; m1 <= max_m1; ) {
120 for (n = min_n; n <= max_n; ) {
121 cur = fparent * n / m0 / m1;
122 if (abs(*fout - cur) < abs(*fout - best)) {
123 best = cur;
124 *factor_n = n;
125 *factor_m0 = m0;
126 *factor_m1 = m1;
127 }
128 n++;
129 }
130 m1++;
131 }
132 m0++;
133 }
134
135 return (best);
136 }
137
138 static int
aw_clk_nmm_set_freq(struct clknode * clk,uint64_t fparent,uint64_t * fout,int flags,int * stop)139 aw_clk_nmm_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
140 int flags, int *stop)
141 {
142 struct aw_clk_nmm_sc *sc;
143 uint64_t cur, best;
144 uint32_t val, n, m0, m1, best_n, best_m0, best_m1;
145 int retry;
146
147 sc = clknode_get_softc(clk);
148
149 best = cur = 0;
150
151 best = aw_clk_nmm_find_best(sc, fparent, fout,
152 &best_n, &best_m0, &best_m1);
153
154 if ((flags & CLK_SET_DRYRUN) != 0) {
155 *fout = best;
156 *stop = 1;
157 return (0);
158 }
159
160 if ((best < *fout) &&
161 ((flags & CLK_SET_ROUND_DOWN) == 0)) {
162 *stop = 1;
163 return (ERANGE);
164 }
165 if ((best > *fout) &&
166 ((flags & CLK_SET_ROUND_UP) == 0)) {
167 *stop = 1;
168 return (ERANGE);
169 }
170
171 DEVICE_LOCK(clk);
172 READ4(clk, sc->offset, &val);
173
174 n = aw_clk_factor_get_value(&sc->n, best_n);
175 m0 = aw_clk_factor_get_value(&sc->m0, best_m0);
176 m1 = aw_clk_factor_get_value(&sc->m1, best_m1);
177 val &= ~sc->n.mask;
178 val &= ~sc->m0.mask;
179 val &= ~sc->m1.mask;
180 val |= n << sc->n.shift;
181 val |= m0 << sc->m0.shift;
182 val |= m1 << sc->m1.shift;
183
184 WRITE4(clk, sc->offset, val);
185 DEVICE_UNLOCK(clk);
186
187 if ((sc->flags & AW_CLK_HAS_LOCK) != 0) {
188 for (retry = 0; retry < sc->lock_retries; retry++) {
189 READ4(clk, sc->offset, &val);
190 if ((val & (1 << sc->lock_shift)) != 0)
191 break;
192 DELAY(1000);
193 }
194 }
195
196 *fout = best;
197 *stop = 1;
198
199 return (0);
200 }
201
202 static int
aw_clk_nmm_recalc(struct clknode * clk,uint64_t * freq)203 aw_clk_nmm_recalc(struct clknode *clk, uint64_t *freq)
204 {
205 struct aw_clk_nmm_sc *sc;
206 uint32_t val, n, m0, m1;
207
208 sc = clknode_get_softc(clk);
209
210 DEVICE_LOCK(clk);
211 READ4(clk, sc->offset, &val);
212 DEVICE_UNLOCK(clk);
213
214 n = aw_clk_get_factor(val, &sc->n);
215 m0 = aw_clk_get_factor(val, &sc->m0);
216 m1 = aw_clk_get_factor(val, &sc->m1);
217
218 *freq = *freq * n / m0 / m1;
219
220 return (0);
221 }
222
223 static clknode_method_t aw_nmm_clknode_methods[] = {
224 /* Device interface */
225 CLKNODEMETHOD(clknode_init, aw_clk_nmm_init),
226 CLKNODEMETHOD(clknode_set_gate, aw_clk_nmm_set_gate),
227 CLKNODEMETHOD(clknode_recalc_freq, aw_clk_nmm_recalc),
228 CLKNODEMETHOD(clknode_set_freq, aw_clk_nmm_set_freq),
229 CLKNODEMETHOD_END
230 };
231
232 DEFINE_CLASS_1(aw_nmm_clknode, aw_nmm_clknode_class, aw_nmm_clknode_methods,
233 sizeof(struct aw_clk_nmm_sc), clknode_class);
234
235 int
aw_clk_nmm_register(struct clkdom * clkdom,struct aw_clk_nmm_def * clkdef)236 aw_clk_nmm_register(struct clkdom *clkdom, struct aw_clk_nmm_def *clkdef)
237 {
238 struct clknode *clk;
239 struct aw_clk_nmm_sc *sc;
240
241 clk = clknode_create(clkdom, &aw_nmm_clknode_class, &clkdef->clkdef);
242 if (clk == NULL)
243 return (1);
244
245 sc = clknode_get_softc(clk);
246
247 sc->offset = clkdef->offset;
248
249 sc->n.shift = clkdef->n.shift;
250 sc->n.width = clkdef->n.width;
251 sc->n.mask = ((1 << sc->n.width) - 1) << sc->n.shift;
252 sc->n.value = clkdef->n.value;
253 sc->n.flags = clkdef->n.flags;
254
255 sc->m0.shift = clkdef->m0.shift;
256 sc->m0.width = clkdef->m0.width;
257 sc->m0.mask = ((1 << sc->m0.width) - 1) << sc->m0.shift;
258 sc->m0.value = clkdef->m0.value;
259 sc->m0.flags = clkdef->m0.flags;
260
261 sc->m1.shift = clkdef->m1.shift;
262 sc->m1.width = clkdef->m1.width;
263 sc->m1.mask = ((1 << sc->m1.width) - 1) << sc->m1.shift;
264 sc->m1.value = clkdef->m1.value;
265 sc->m1.flags = clkdef->m1.flags;
266
267 sc->gate_shift = clkdef->gate_shift;
268
269 sc->lock_shift = clkdef->lock_shift;
270 sc->lock_retries = clkdef->lock_retries;
271
272 sc->flags = clkdef->flags;
273
274 clknode_register(clkdom, clk);
275
276 return (0);
277 }
278