xref: /linux/drivers/clk/clk-loongson1.c (revision be1ca3ee8f97067fee87fda73ea5959d5ab75bbf)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Clock driver for Loongson-1 SoC
4  *
5  * Copyright (C) 2012-2023 Keguang Zhang <keguang.zhang@gmail.com>
6  */
7 
8 #include <linux/bits.h>
9 #include <linux/clk-provider.h>
10 #include <linux/container_of.h>
11 #include <linux/io.h>
12 #include <linux/of_address.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/printk.h>
16 
17 #include <dt-bindings/clock/loongson,ls1x-clk.h>
18 
19 /* Loongson 1 Clock Register Definitions */
20 #define CLK_PLL_FREQ		0x0
21 #define CLK_PLL_DIV		0x4
22 
23 static DEFINE_SPINLOCK(ls1x_clk_div_lock);
24 
25 struct ls1x_clk_pll_data {
26 	u32 fixed;
27 	u8 shift;
28 	u8 int_shift;
29 	u8 int_width;
30 	u8 frac_shift;
31 	u8 frac_width;
32 };
33 
34 struct ls1x_clk_div_data {
35 	u8 shift;
36 	u8 width;
37 	unsigned long flags;
38 	const struct clk_div_table *table;
39 	u8 bypass_shift;
40 	u8 bypass_inv;
41 	spinlock_t *lock;	/* protect access to DIV registers */
42 };
43 
44 struct ls1x_clk {
45 	void __iomem *reg;
46 	unsigned int offset;
47 	struct clk_hw hw;
48 	const void *data;
49 };
50 
51 #define to_ls1x_clk(_hw) container_of(_hw, struct ls1x_clk, hw)
52 
53 static inline unsigned long ls1x_pll_rate_part(unsigned int val,
54 					       unsigned int shift,
55 					       unsigned int width)
56 {
57 	return (val & GENMASK(shift + width, shift)) >> shift;
58 }
59 
60 static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
61 					  unsigned long parent_rate)
62 {
63 	struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
64 	const struct ls1x_clk_pll_data *d = ls1x_clk->data;
65 	u32 val, rate;
66 
67 	val = readl(ls1x_clk->reg);
68 	rate = d->fixed;
69 	rate += ls1x_pll_rate_part(val, d->int_shift, d->int_width);
70 	if (d->frac_width)
71 		rate += ls1x_pll_rate_part(val, d->frac_shift, d->frac_width);
72 	rate *= parent_rate;
73 	rate >>= d->shift;
74 
75 	return rate;
76 }
77 
78 static const struct clk_ops ls1x_pll_clk_ops = {
79 	.recalc_rate = ls1x_pll_recalc_rate,
80 };
81 
82 static unsigned long ls1x_divider_recalc_rate(struct clk_hw *hw,
83 					      unsigned long parent_rate)
84 {
85 	struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
86 	const struct ls1x_clk_div_data *d = ls1x_clk->data;
87 	unsigned int val;
88 
89 	val = readl(ls1x_clk->reg) >> d->shift;
90 	val &= clk_div_mask(d->width);
91 
92 	return divider_recalc_rate(hw, parent_rate, val, d->table,
93 				   d->flags, d->width);
94 }
95 
96 static int ls1x_divider_determine_rate(struct clk_hw *hw,
97 				       struct clk_rate_request *req)
98 {
99 	struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
100 	const struct ls1x_clk_div_data *d = ls1x_clk->data;
101 
102 	return divider_determine_rate(hw, req, d->table, d->width, d->flags);
103 }
104 
105 static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
106 				 unsigned long parent_rate)
107 {
108 	struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
109 	const struct ls1x_clk_div_data *d = ls1x_clk->data;
110 	int val, div_val;
111 	unsigned long flags = 0;
112 
113 	div_val = divider_get_val(rate, parent_rate, d->table,
114 				  d->width, d->flags);
115 	if (div_val < 0)
116 		return div_val;
117 
118 	spin_lock_irqsave(d->lock, flags);
119 
120 	/* Bypass the clock */
121 	val = readl(ls1x_clk->reg);
122 	if (d->bypass_inv)
123 		val &= ~BIT(d->bypass_shift);
124 	else
125 		val |= BIT(d->bypass_shift);
126 	writel(val, ls1x_clk->reg);
127 
128 	val = readl(ls1x_clk->reg);
129 	val &= ~(clk_div_mask(d->width) << d->shift);
130 	val |= (u32)div_val << d->shift;
131 	writel(val, ls1x_clk->reg);
132 
133 	/* Restore the clock */
134 	val = readl(ls1x_clk->reg);
135 	if (d->bypass_inv)
136 		val |= BIT(d->bypass_shift);
137 	else
138 		val &= ~BIT(d->bypass_shift);
139 	writel(val, ls1x_clk->reg);
140 
141 	spin_unlock_irqrestore(d->lock, flags);
142 
143 	return 0;
144 }
145 
146 static const struct clk_ops ls1x_clk_divider_ops = {
147 	.recalc_rate = ls1x_divider_recalc_rate,
148 	.determine_rate = ls1x_divider_determine_rate,
149 	.set_rate = ls1x_divider_set_rate,
150 };
151 
152 #define LS1X_CLK_PLL(_name, _offset, _fixed, _shift,			\
153 		     f_shift, f_width, i_shift, i_width)		\
154 struct ls1x_clk _name = {						\
155 	.offset = (_offset),						\
156 	.data = &(const struct ls1x_clk_pll_data) {			\
157 		.fixed = (_fixed),					\
158 		.shift = (_shift),					\
159 		.int_shift = (i_shift),					\
160 		.int_width = (i_width),					\
161 		.frac_shift = (f_shift),				\
162 		.frac_width = (f_width),				\
163 	},								\
164 	.hw.init = &(const struct clk_init_data) {			\
165 		.name = #_name,						\
166 		.ops = &ls1x_pll_clk_ops,				\
167 		.parent_data = &(const struct clk_parent_data) {	\
168 			.fw_name = "xtal",				\
169 			.name = "xtal",					\
170 			.index = -1,					\
171 		},							\
172 		.num_parents = 1,					\
173 	},								\
174 }
175 
176 #define LS1X_CLK_DIV(_name, _pname, _offset, _shift, _width,		\
177 		     _table, _bypass_shift, _bypass_inv, _flags)	\
178 struct ls1x_clk _name = {						\
179 	.offset = (_offset),						\
180 	.data = &(const struct ls1x_clk_div_data){			\
181 		.shift = (_shift),					\
182 		.width = (_width),					\
183 		.table = (_table),					\
184 		.flags = (_flags),					\
185 		.bypass_shift = (_bypass_shift),			\
186 		.bypass_inv = (_bypass_inv),				\
187 		.lock = &ls1x_clk_div_lock,				\
188 	},								\
189 	.hw.init = &(const struct clk_init_data) {			\
190 		.name = #_name,						\
191 		.ops = &ls1x_clk_divider_ops,				\
192 		.parent_hws = (const struct clk_hw *[]) { _pname },	\
193 		.num_parents = 1,					\
194 		.flags = CLK_GET_RATE_NOCACHE,				\
195 	},								\
196 }
197 
198 static LS1X_CLK_PLL(ls1b_clk_pll, CLK_PLL_FREQ, 12, 1, 0, 5, 0, 0);
199 static LS1X_CLK_DIV(ls1b_clk_cpu, &ls1b_clk_pll.hw, CLK_PLL_DIV,
200 		    20, 4, NULL, 8, 0,
201 		    CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST);
202 static LS1X_CLK_DIV(ls1b_clk_dc, &ls1b_clk_pll.hw, CLK_PLL_DIV,
203 		    26, 4, NULL, 12, 0, CLK_DIVIDER_ONE_BASED);
204 static LS1X_CLK_DIV(ls1b_clk_ahb, &ls1b_clk_pll.hw, CLK_PLL_DIV,
205 		    14, 4, NULL, 10, 0, CLK_DIVIDER_ONE_BASED);
206 static CLK_FIXED_FACTOR(ls1b_clk_apb, "ls1b_clk_apb", "ls1b_clk_ahb", 2, 1,
207 			CLK_SET_RATE_PARENT);
208 
209 static struct clk_hw_onecell_data ls1b_clk_hw_data = {
210 	.hws = {
211 		[LS1X_CLKID_PLL] = &ls1b_clk_pll.hw,
212 		[LS1X_CLKID_CPU] = &ls1b_clk_cpu.hw,
213 		[LS1X_CLKID_DC] = &ls1b_clk_dc.hw,
214 		[LS1X_CLKID_AHB] = &ls1b_clk_ahb.hw,
215 		[LS1X_CLKID_APB] = &ls1b_clk_apb.hw,
216 	},
217 	.num = CLK_NR_CLKS,
218 };
219 
220 static const struct clk_div_table ls1c_ahb_div_table[] = {
221 	[0] = { .val = 0, .div = 2 },
222 	[1] = { .val = 1, .div = 4 },
223 	[2] = { .val = 2, .div = 3 },
224 	[3] = { .val = 3, .div = 3 },
225 	[4] = { /* sentinel */ }
226 };
227 
228 static LS1X_CLK_PLL(ls1c_clk_pll, CLK_PLL_FREQ, 0, 2, 8, 8, 16, 8);
229 static LS1X_CLK_DIV(ls1c_clk_cpu, &ls1c_clk_pll.hw, CLK_PLL_DIV,
230 		    8, 7, NULL, 0, 1,
231 		    CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST);
232 static LS1X_CLK_DIV(ls1c_clk_dc, &ls1c_clk_pll.hw, CLK_PLL_DIV,
233 		    24, 7, NULL, 4, 1, CLK_DIVIDER_ONE_BASED);
234 static LS1X_CLK_DIV(ls1c_clk_ahb, &ls1c_clk_cpu.hw, CLK_PLL_FREQ,
235 		    0, 2, ls1c_ahb_div_table, 0, 0, CLK_DIVIDER_ALLOW_ZERO);
236 static CLK_FIXED_FACTOR(ls1c_clk_apb, "ls1c_clk_apb", "ls1c_clk_ahb", 1, 1,
237 			CLK_SET_RATE_PARENT);
238 
239 static struct clk_hw_onecell_data ls1c_clk_hw_data = {
240 	.hws = {
241 		[LS1X_CLKID_PLL] = &ls1c_clk_pll.hw,
242 		[LS1X_CLKID_CPU] = &ls1c_clk_cpu.hw,
243 		[LS1X_CLKID_DC] = &ls1c_clk_dc.hw,
244 		[LS1X_CLKID_AHB] = &ls1c_clk_ahb.hw,
245 		[LS1X_CLKID_APB] = &ls1c_clk_apb.hw,
246 	},
247 	.num = CLK_NR_CLKS,
248 };
249 
250 static void __init ls1x_clk_init(struct device_node *np,
251 				 struct clk_hw_onecell_data *hw_data)
252 {
253 	struct ls1x_clk *ls1x_clk;
254 	void __iomem *reg;
255 	int i, ret;
256 
257 	reg = of_iomap(np, 0);
258 	if (!reg) {
259 		pr_err("Unable to map base for %pOF\n", np);
260 		return;
261 	}
262 
263 	for (i = 0; i < hw_data->num; i++) {
264 		/* array might be sparse */
265 		if (!hw_data->hws[i])
266 			continue;
267 
268 		if (i != LS1X_CLKID_APB) {
269 			ls1x_clk = to_ls1x_clk(hw_data->hws[i]);
270 			ls1x_clk->reg = reg + ls1x_clk->offset;
271 		}
272 
273 		ret = of_clk_hw_register(np, hw_data->hws[i]);
274 		if (ret)
275 			goto err;
276 	}
277 
278 	ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_data);
279 	if (!ret)
280 		return;
281 
282 err:
283 	pr_err("Failed to register %pOF\n", np);
284 
285 	while (--i >= 0)
286 		clk_hw_unregister(hw_data->hws[i]);
287 
288 	iounmap(reg);
289 }
290 
291 static void __init ls1b_clk_init(struct device_node *np)
292 {
293 	return ls1x_clk_init(np, &ls1b_clk_hw_data);
294 }
295 
296 static void __init ls1c_clk_init(struct device_node *np)
297 {
298 	return ls1x_clk_init(np, &ls1c_clk_hw_data);
299 }
300 
301 CLK_OF_DECLARE(ls1b_clk, "loongson,ls1b-clk", ls1b_clk_init);
302 CLK_OF_DECLARE(ls1c_clk, "loongson,ls1c-clk", ls1c_clk_init);
303