xref: /linux/arch/mips/bcm63xx/clk.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7  */
8 
9 #include <linux/init.h>
10 #include <linux/export.h>
11 #include <linux/mutex.h>
12 #include <linux/err.h>
13 #include <linux/clk.h>
14 #include <linux/clkdev.h>
15 #include <linux/delay.h>
16 #include <bcm63xx_cpu.h>
17 #include <bcm63xx_io.h>
18 #include <bcm63xx_regs.h>
19 #include <bcm63xx_reset.h>
20 
21 struct clk {
22 	void		(*set)(struct clk *, int);
23 	unsigned int	rate;
24 	unsigned int	usage;
25 	int		id;
26 };
27 
28 static DEFINE_MUTEX(clocks_mutex);
29 
30 
clk_enable_unlocked(struct clk * clk)31 static void clk_enable_unlocked(struct clk *clk)
32 {
33 	if (clk->set && (clk->usage++) == 0)
34 		clk->set(clk, 1);
35 }
36 
clk_disable_unlocked(struct clk * clk)37 static void clk_disable_unlocked(struct clk *clk)
38 {
39 	if (clk->set && (--clk->usage) == 0)
40 		clk->set(clk, 0);
41 }
42 
bcm_hwclock_set(u32 mask,int enable)43 static void bcm_hwclock_set(u32 mask, int enable)
44 {
45 	u32 reg;
46 
47 	reg = bcm_perf_readl(PERF_CKCTL_REG);
48 	if (enable)
49 		reg |= mask;
50 	else
51 		reg &= ~mask;
52 	bcm_perf_writel(reg, PERF_CKCTL_REG);
53 }
54 
55 /*
56  * Ethernet MAC "misc" clock: dma clocks and main clock on 6348
57  */
enet_misc_set(struct clk * clk,int enable)58 static void enet_misc_set(struct clk *clk, int enable)
59 {
60 	u32 mask;
61 
62 	if (BCMCPU_IS_6338())
63 		mask = CKCTL_6338_ENET_EN;
64 	else if (BCMCPU_IS_6345())
65 		mask = CKCTL_6345_ENET_EN;
66 	else if (BCMCPU_IS_6348())
67 		mask = CKCTL_6348_ENET_EN;
68 	else
69 		/* BCMCPU_IS_6358 */
70 		mask = CKCTL_6358_EMUSB_EN;
71 	bcm_hwclock_set(mask, enable);
72 }
73 
74 static struct clk clk_enet_misc = {
75 	.set	= enet_misc_set,
76 };
77 
78 /*
79  * Ethernet MAC clocks: only relevant on 6358, silently enable misc
80  * clocks
81  */
enetx_set(struct clk * clk,int enable)82 static void enetx_set(struct clk *clk, int enable)
83 {
84 	if (enable)
85 		clk_enable_unlocked(&clk_enet_misc);
86 	else
87 		clk_disable_unlocked(&clk_enet_misc);
88 
89 	if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) {
90 		u32 mask;
91 
92 		if (clk->id == 0)
93 			mask = CKCTL_6358_ENET0_EN;
94 		else
95 			mask = CKCTL_6358_ENET1_EN;
96 		bcm_hwclock_set(mask, enable);
97 	}
98 }
99 
100 static struct clk clk_enet0 = {
101 	.id	= 0,
102 	.set	= enetx_set,
103 };
104 
105 static struct clk clk_enet1 = {
106 	.id	= 1,
107 	.set	= enetx_set,
108 };
109 
110 /*
111  * Ethernet PHY clock
112  */
ephy_set(struct clk * clk,int enable)113 static void ephy_set(struct clk *clk, int enable)
114 {
115 	if (BCMCPU_IS_3368() || BCMCPU_IS_6358())
116 		bcm_hwclock_set(CKCTL_6358_EPHY_EN, enable);
117 }
118 
119 
120 static struct clk clk_ephy = {
121 	.set	= ephy_set,
122 };
123 
124 /*
125  * Ethernet switch SAR clock
126  */
swpkt_sar_set(struct clk * clk,int enable)127 static void swpkt_sar_set(struct clk *clk, int enable)
128 {
129 	if (BCMCPU_IS_6368())
130 		bcm_hwclock_set(CKCTL_6368_SWPKT_SAR_EN, enable);
131 	else
132 		return;
133 }
134 
135 static struct clk clk_swpkt_sar = {
136 	.set	= swpkt_sar_set,
137 };
138 
139 /*
140  * Ethernet switch USB clock
141  */
swpkt_usb_set(struct clk * clk,int enable)142 static void swpkt_usb_set(struct clk *clk, int enable)
143 {
144 	if (BCMCPU_IS_6368())
145 		bcm_hwclock_set(CKCTL_6368_SWPKT_USB_EN, enable);
146 	else
147 		return;
148 }
149 
150 static struct clk clk_swpkt_usb = {
151 	.set	= swpkt_usb_set,
152 };
153 
154 /*
155  * Ethernet switch clock
156  */
enetsw_set(struct clk * clk,int enable)157 static void enetsw_set(struct clk *clk, int enable)
158 {
159 	if (BCMCPU_IS_6328()) {
160 		bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
161 	} else if (BCMCPU_IS_6362()) {
162 		bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
163 	} else if (BCMCPU_IS_6368()) {
164 		if (enable) {
165 			clk_enable_unlocked(&clk_swpkt_sar);
166 			clk_enable_unlocked(&clk_swpkt_usb);
167 		} else {
168 			clk_disable_unlocked(&clk_swpkt_usb);
169 			clk_disable_unlocked(&clk_swpkt_sar);
170 		}
171 		bcm_hwclock_set(CKCTL_6368_ROBOSW_EN, enable);
172 	} else {
173 		return;
174 	}
175 
176 	if (enable) {
177 		/* reset switch core after clock change */
178 		bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
179 		msleep(10);
180 		bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 0);
181 		msleep(10);
182 	}
183 }
184 
185 static struct clk clk_enetsw = {
186 	.set	= enetsw_set,
187 };
188 
189 /*
190  * PCM clock
191  */
pcm_set(struct clk * clk,int enable)192 static void pcm_set(struct clk *clk, int enable)
193 {
194 	if (BCMCPU_IS_3368())
195 		bcm_hwclock_set(CKCTL_3368_PCM_EN, enable);
196 	if (BCMCPU_IS_6358())
197 		bcm_hwclock_set(CKCTL_6358_PCM_EN, enable);
198 }
199 
200 static struct clk clk_pcm = {
201 	.set	= pcm_set,
202 };
203 
204 /*
205  * USB host clock
206  */
usbh_set(struct clk * clk,int enable)207 static void usbh_set(struct clk *clk, int enable)
208 {
209 	if (BCMCPU_IS_6328())
210 		bcm_hwclock_set(CKCTL_6328_USBH_EN, enable);
211 	else if (BCMCPU_IS_6348())
212 		bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
213 	else if (BCMCPU_IS_6362())
214 		bcm_hwclock_set(CKCTL_6362_USBH_EN, enable);
215 	else if (BCMCPU_IS_6368())
216 		bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
217 }
218 
219 static struct clk clk_usbh = {
220 	.set	= usbh_set,
221 };
222 
223 /*
224  * USB device clock
225  */
usbd_set(struct clk * clk,int enable)226 static void usbd_set(struct clk *clk, int enable)
227 {
228 	if (BCMCPU_IS_6328())
229 		bcm_hwclock_set(CKCTL_6328_USBD_EN, enable);
230 	else if (BCMCPU_IS_6362())
231 		bcm_hwclock_set(CKCTL_6362_USBD_EN, enable);
232 	else if (BCMCPU_IS_6368())
233 		bcm_hwclock_set(CKCTL_6368_USBD_EN, enable);
234 }
235 
236 static struct clk clk_usbd = {
237 	.set	= usbd_set,
238 };
239 
240 /*
241  * SPI clock
242  */
spi_set(struct clk * clk,int enable)243 static void spi_set(struct clk *clk, int enable)
244 {
245 	u32 mask;
246 
247 	if (BCMCPU_IS_6338())
248 		mask = CKCTL_6338_SPI_EN;
249 	else if (BCMCPU_IS_6348())
250 		mask = CKCTL_6348_SPI_EN;
251 	else if (BCMCPU_IS_3368() || BCMCPU_IS_6358())
252 		mask = CKCTL_6358_SPI_EN;
253 	else if (BCMCPU_IS_6362())
254 		mask = CKCTL_6362_SPI_EN;
255 	else
256 		/* BCMCPU_IS_6368 */
257 		mask = CKCTL_6368_SPI_EN;
258 	bcm_hwclock_set(mask, enable);
259 }
260 
261 static struct clk clk_spi = {
262 	.set	= spi_set,
263 };
264 
265 /*
266  * HSSPI clock
267  */
hsspi_set(struct clk * clk,int enable)268 static void hsspi_set(struct clk *clk, int enable)
269 {
270 	u32 mask;
271 
272 	if (BCMCPU_IS_6328())
273 		mask = CKCTL_6328_HSSPI_EN;
274 	else if (BCMCPU_IS_6362())
275 		mask = CKCTL_6362_HSSPI_EN;
276 	else
277 		return;
278 
279 	bcm_hwclock_set(mask, enable);
280 }
281 
282 static struct clk clk_hsspi = {
283 	.set	= hsspi_set,
284 };
285 
286 /*
287  * HSSPI PLL
288  */
289 static struct clk clk_hsspi_pll;
290 
291 /*
292  * XTM clock
293  */
xtm_set(struct clk * clk,int enable)294 static void xtm_set(struct clk *clk, int enable)
295 {
296 	if (!BCMCPU_IS_6368())
297 		return;
298 
299 	if (enable)
300 		clk_enable_unlocked(&clk_swpkt_sar);
301 	else
302 		clk_disable_unlocked(&clk_swpkt_sar);
303 
304 	bcm_hwclock_set(CKCTL_6368_SAR_EN, enable);
305 
306 	if (enable) {
307 		/* reset sar core after clock change */
308 		bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 1);
309 		mdelay(1);
310 		bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 0);
311 		mdelay(1);
312 	}
313 }
314 
315 
316 static struct clk clk_xtm = {
317 	.set	= xtm_set,
318 };
319 
320 /*
321  * IPsec clock
322  */
ipsec_set(struct clk * clk,int enable)323 static void ipsec_set(struct clk *clk, int enable)
324 {
325 	if (BCMCPU_IS_6362())
326 		bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable);
327 	else if (BCMCPU_IS_6368())
328 		bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
329 }
330 
331 static struct clk clk_ipsec = {
332 	.set	= ipsec_set,
333 };
334 
335 /*
336  * PCIe clock
337  */
338 
pcie_set(struct clk * clk,int enable)339 static void pcie_set(struct clk *clk, int enable)
340 {
341 	if (BCMCPU_IS_6328())
342 		bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
343 	else if (BCMCPU_IS_6362())
344 		bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable);
345 }
346 
347 static struct clk clk_pcie = {
348 	.set	= pcie_set,
349 };
350 
351 /*
352  * Internal peripheral clock
353  */
354 static struct clk clk_periph = {
355 	.rate	= (50 * 1000 * 1000),
356 };
357 
358 
359 /*
360  * Linux clock API implementation
361  */
clk_enable(struct clk * clk)362 int clk_enable(struct clk *clk)
363 {
364 	if (!clk)
365 		return 0;
366 	mutex_lock(&clocks_mutex);
367 	clk_enable_unlocked(clk);
368 	mutex_unlock(&clocks_mutex);
369 	return 0;
370 }
371 
372 EXPORT_SYMBOL(clk_enable);
373 
clk_disable(struct clk * clk)374 void clk_disable(struct clk *clk)
375 {
376 	if (!clk)
377 		return;
378 
379 	mutex_lock(&clocks_mutex);
380 	clk_disable_unlocked(clk);
381 	mutex_unlock(&clocks_mutex);
382 }
383 
384 EXPORT_SYMBOL(clk_disable);
385 
clk_get_parent(struct clk * clk)386 struct clk *clk_get_parent(struct clk *clk)
387 {
388 	return NULL;
389 }
390 EXPORT_SYMBOL(clk_get_parent);
391 
clk_set_parent(struct clk * clk,struct clk * parent)392 int clk_set_parent(struct clk *clk, struct clk *parent)
393 {
394 	return 0;
395 }
396 EXPORT_SYMBOL(clk_set_parent);
397 
clk_get_rate(struct clk * clk)398 unsigned long clk_get_rate(struct clk *clk)
399 {
400 	if (!clk)
401 		return 0;
402 
403 	return clk->rate;
404 }
405 
406 EXPORT_SYMBOL(clk_get_rate);
407 
clk_set_rate(struct clk * clk,unsigned long rate)408 int clk_set_rate(struct clk *clk, unsigned long rate)
409 {
410 	return 0;
411 }
412 EXPORT_SYMBOL_GPL(clk_set_rate);
413 
clk_round_rate(struct clk * clk,unsigned long rate)414 long clk_round_rate(struct clk *clk, unsigned long rate)
415 {
416 	return 0;
417 }
418 EXPORT_SYMBOL_GPL(clk_round_rate);
419 
420 static struct clk_lookup bcm3368_clks[] = {
421 	/* fixed rate clocks */
422 	CLKDEV_INIT(NULL, "periph", &clk_periph),
423 	CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
424 	CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
425 	/* gated clocks */
426 	CLKDEV_INIT(NULL, "enet0", &clk_enet0),
427 	CLKDEV_INIT(NULL, "enet1", &clk_enet1),
428 	CLKDEV_INIT(NULL, "ephy", &clk_ephy),
429 	CLKDEV_INIT(NULL, "usbh", &clk_usbh),
430 	CLKDEV_INIT(NULL, "usbd", &clk_usbd),
431 	CLKDEV_INIT(NULL, "spi", &clk_spi),
432 	CLKDEV_INIT(NULL, "pcm", &clk_pcm),
433 	CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet0),
434 	CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet1),
435 };
436 
437 static struct clk_lookup bcm6328_clks[] = {
438 	/* fixed rate clocks */
439 	CLKDEV_INIT(NULL, "periph", &clk_periph),
440 	CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
441 	CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
442 	CLKDEV_INIT("bcm63xx-hsspi.0", "pll", &clk_hsspi_pll),
443 	/* gated clocks */
444 	CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
445 	CLKDEV_INIT(NULL, "usbh", &clk_usbh),
446 	CLKDEV_INIT(NULL, "usbd", &clk_usbd),
447 	CLKDEV_INIT(NULL, "hsspi", &clk_hsspi),
448 	CLKDEV_INIT(NULL, "pcie", &clk_pcie),
449 };
450 
451 static struct clk_lookup bcm6338_clks[] = {
452 	/* fixed rate clocks */
453 	CLKDEV_INIT(NULL, "periph", &clk_periph),
454 	CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
455 	/* gated clocks */
456 	CLKDEV_INIT(NULL, "enet0", &clk_enet0),
457 	CLKDEV_INIT(NULL, "enet1", &clk_enet1),
458 	CLKDEV_INIT(NULL, "ephy", &clk_ephy),
459 	CLKDEV_INIT(NULL, "usbh", &clk_usbh),
460 	CLKDEV_INIT(NULL, "usbd", &clk_usbd),
461 	CLKDEV_INIT(NULL, "spi", &clk_spi),
462 	CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
463 };
464 
465 static struct clk_lookup bcm6345_clks[] = {
466 	/* fixed rate clocks */
467 	CLKDEV_INIT(NULL, "periph", &clk_periph),
468 	CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
469 	/* gated clocks */
470 	CLKDEV_INIT(NULL, "enet0", &clk_enet0),
471 	CLKDEV_INIT(NULL, "enet1", &clk_enet1),
472 	CLKDEV_INIT(NULL, "ephy", &clk_ephy),
473 	CLKDEV_INIT(NULL, "usbh", &clk_usbh),
474 	CLKDEV_INIT(NULL, "usbd", &clk_usbd),
475 	CLKDEV_INIT(NULL, "spi", &clk_spi),
476 	CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
477 };
478 
479 static struct clk_lookup bcm6348_clks[] = {
480 	/* fixed rate clocks */
481 	CLKDEV_INIT(NULL, "periph", &clk_periph),
482 	CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
483 	/* gated clocks */
484 	CLKDEV_INIT(NULL, "enet0", &clk_enet0),
485 	CLKDEV_INIT(NULL, "enet1", &clk_enet1),
486 	CLKDEV_INIT(NULL, "ephy", &clk_ephy),
487 	CLKDEV_INIT(NULL, "usbh", &clk_usbh),
488 	CLKDEV_INIT(NULL, "usbd", &clk_usbd),
489 	CLKDEV_INIT(NULL, "spi", &clk_spi),
490 	CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
491 	CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet_misc),
492 };
493 
494 static struct clk_lookup bcm6358_clks[] = {
495 	/* fixed rate clocks */
496 	CLKDEV_INIT(NULL, "periph", &clk_periph),
497 	CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
498 	CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
499 	/* gated clocks */
500 	CLKDEV_INIT(NULL, "enet0", &clk_enet0),
501 	CLKDEV_INIT(NULL, "enet1", &clk_enet1),
502 	CLKDEV_INIT(NULL, "ephy", &clk_ephy),
503 	CLKDEV_INIT(NULL, "usbh", &clk_usbh),
504 	CLKDEV_INIT(NULL, "usbd", &clk_usbd),
505 	CLKDEV_INIT(NULL, "spi", &clk_spi),
506 	CLKDEV_INIT(NULL, "pcm", &clk_pcm),
507 	CLKDEV_INIT(NULL, "swpkt_sar", &clk_swpkt_sar),
508 	CLKDEV_INIT(NULL, "swpkt_usb", &clk_swpkt_usb),
509 	CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet0),
510 	CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet1),
511 };
512 
513 static struct clk_lookup bcm6362_clks[] = {
514 	/* fixed rate clocks */
515 	CLKDEV_INIT(NULL, "periph", &clk_periph),
516 	CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
517 	CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
518 	CLKDEV_INIT("bcm63xx-hsspi.0", "pll", &clk_hsspi_pll),
519 	/* gated clocks */
520 	CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
521 	CLKDEV_INIT(NULL, "usbh", &clk_usbh),
522 	CLKDEV_INIT(NULL, "usbd", &clk_usbd),
523 	CLKDEV_INIT(NULL, "spi", &clk_spi),
524 	CLKDEV_INIT(NULL, "hsspi", &clk_hsspi),
525 	CLKDEV_INIT(NULL, "pcie", &clk_pcie),
526 	CLKDEV_INIT(NULL, "ipsec", &clk_ipsec),
527 };
528 
529 static struct clk_lookup bcm6368_clks[] = {
530 	/* fixed rate clocks */
531 	CLKDEV_INIT(NULL, "periph", &clk_periph),
532 	CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
533 	CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
534 	/* gated clocks */
535 	CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
536 	CLKDEV_INIT(NULL, "usbh", &clk_usbh),
537 	CLKDEV_INIT(NULL, "usbd", &clk_usbd),
538 	CLKDEV_INIT(NULL, "spi", &clk_spi),
539 	CLKDEV_INIT(NULL, "xtm", &clk_xtm),
540 	CLKDEV_INIT(NULL, "ipsec", &clk_ipsec),
541 };
542 
543 #define HSSPI_PLL_HZ_6328	133333333
544 #define HSSPI_PLL_HZ_6362	400000000
545 
bcm63xx_clk_init(void)546 static int __init bcm63xx_clk_init(void)
547 {
548 	switch (bcm63xx_get_cpu_id()) {
549 	case BCM3368_CPU_ID:
550 		clkdev_add_table(bcm3368_clks, ARRAY_SIZE(bcm3368_clks));
551 		break;
552 	case BCM6328_CPU_ID:
553 		clk_hsspi_pll.rate = HSSPI_PLL_HZ_6328;
554 		clkdev_add_table(bcm6328_clks, ARRAY_SIZE(bcm6328_clks));
555 		break;
556 	case BCM6338_CPU_ID:
557 		clkdev_add_table(bcm6338_clks, ARRAY_SIZE(bcm6338_clks));
558 		break;
559 	case BCM6345_CPU_ID:
560 		clkdev_add_table(bcm6345_clks, ARRAY_SIZE(bcm6345_clks));
561 		break;
562 	case BCM6348_CPU_ID:
563 		clkdev_add_table(bcm6348_clks, ARRAY_SIZE(bcm6348_clks));
564 		break;
565 	case BCM6358_CPU_ID:
566 		clkdev_add_table(bcm6358_clks, ARRAY_SIZE(bcm6358_clks));
567 		break;
568 	case BCM6362_CPU_ID:
569 		clk_hsspi_pll.rate = HSSPI_PLL_HZ_6362;
570 		clkdev_add_table(bcm6362_clks, ARRAY_SIZE(bcm6362_clks));
571 		break;
572 	case BCM6368_CPU_ID:
573 		clkdev_add_table(bcm6368_clks, ARRAY_SIZE(bcm6368_clks));
574 		break;
575 	}
576 
577 	return 0;
578 }
579 arch_initcall(bcm63xx_clk_init);
580