xref: /linux/drivers/clk/clk-stm32h7.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) STMicroelectronics 2017
4  * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/clk-provider.h>
9 #include <linux/err.h>
10 #include <linux/io.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/regmap.h>
17 
18 #include <dt-bindings/clock/stm32h7-clks.h>
19 
20 /* Reset Clock Control Registers */
21 #define RCC_CR		0x00
22 #define RCC_CFGR	0x10
23 #define RCC_D1CFGR	0x18
24 #define RCC_D2CFGR	0x1C
25 #define RCC_D3CFGR	0x20
26 #define RCC_PLLCKSELR	0x28
27 #define RCC_PLLCFGR	0x2C
28 #define RCC_PLL1DIVR	0x30
29 #define RCC_PLL1FRACR	0x34
30 #define RCC_PLL2DIVR	0x38
31 #define RCC_PLL2FRACR	0x3C
32 #define RCC_PLL3DIVR	0x40
33 #define RCC_PLL3FRACR	0x44
34 #define RCC_D1CCIPR	0x4C
35 #define RCC_D2CCIP1R	0x50
36 #define RCC_D2CCIP2R	0x54
37 #define RCC_D3CCIPR	0x58
38 #define RCC_BDCR	0x70
39 #define RCC_CSR		0x74
40 #define RCC_AHB3ENR	0xD4
41 #define RCC_AHB1ENR	0xD8
42 #define RCC_AHB2ENR	0xDC
43 #define RCC_AHB4ENR	0xE0
44 #define RCC_APB3ENR	0xE4
45 #define RCC_APB1LENR	0xE8
46 #define RCC_APB1HENR	0xEC
47 #define RCC_APB2ENR	0xF0
48 #define RCC_APB4ENR	0xF4
49 
50 static DEFINE_SPINLOCK(stm32rcc_lock);
51 
52 static void __iomem *base;
53 static struct clk_hw **hws;
54 
55 /* System clock parent */
56 static const char * const sys_src[] = {
57 	"hsi_ck", "csi_ck", "hse_ck", "pll1_p" };
58 
59 static const char * const tracein_src[] = {
60 	"hsi_ck", "csi_ck", "hse_ck", "pll1_r" };
61 
62 static const char * const per_src[] = {
63 	"hsi_ker", "csi_ker", "hse_ck", "disabled" };
64 
65 static const char * const pll_src[] = {
66 	"hsi_ck", "csi_ck", "hse_ck", "no clock" };
67 
68 static const char * const sdmmc_src[] = { "pll1_q", "pll2_r" };
69 
70 static const char * const dsi_src[] = { "ck_dsi_phy", "pll2_q" };
71 
72 static const char * const qspi_src[] = {
73 	"hclk", "pll1_q", "pll2_r", "per_ck" };
74 
75 static const char * const fmc_src[] = {
76 	"hclk", "pll1_q", "pll2_r", "per_ck" };
77 
78 /* Kernel clock parent */
79 static const char * const swp_src[] = {	"pclk1", "hsi_ker" };
80 
81 static const char * const fdcan_src[] = { "hse_ck", "pll1_q", "pll2_q" };
82 
83 static const char * const dfsdm1_src[] = { "pclk2", "sys_ck" };
84 
85 static const char * const spdifrx_src[] = {
86 	"pll1_q", "pll2_r", "pll3_r", "hsi_ker" };
87 
88 static const char *spi_src1[5] = {
89 	"pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
90 
91 static const char * const spi_src2[] = {
92 	"pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
93 
94 static const char * const spi_src3[] = {
95 	"pclk4", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
96 
97 static const char * const lptim_src1[] = {
98 	"pclk1", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
99 
100 static const char * const lptim_src2[] = {
101 	"pclk4", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
102 
103 static const char * const cec_src[] = {"lse_ck", "lsi_ck", "csi_ker_div122" };
104 
105 static const char * const usbotg_src[] = {"pll1_q", "pll3_q", "rc48_ck" };
106 
107 /* i2c 1,2,3 src */
108 static const char * const i2c_src1[] = {
109 	"pclk1", "pll3_r", "hsi_ker", "csi_ker" };
110 
111 static const char * const i2c_src2[] = {
112 	"pclk4", "pll3_r", "hsi_ker", "csi_ker" };
113 
114 static const char * const rng_src[] = {
115 	"rc48_ck", "pll1_q", "lse_ck", "lsi_ck" };
116 
117 /* usart 1,6 src */
118 static const char * const usart_src1[] = {
119 	"pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
120 
121 /* usart 2,3,4,5,7,8 src */
122 static const char * const usart_src2[] = {
123 	"pclk1", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
124 
125 static const char *sai_src[5] = {
126 	"pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
127 
128 static const char * const adc_src[] = { "pll2_p", "pll3_r", "per_ck" };
129 
130 /* lptim 2,3,4,5 src */
131 static const char * const lpuart1_src[] = {
132 	"pclk3", "pll2_q", "pll3_q", "csi_ker", "lse_ck" };
133 
134 static const char * const hrtim_src[] = { "tim2_ker", "d1cpre" };
135 
136 /* RTC clock parent */
137 static const char * const rtc_src[] = { "off", "lse_ck", "lsi_ck", "hse_1M" };
138 
139 /* Micro-controller output clock parent */
140 static const char * const mco_src1[] = {
141 	"hsi_ck", "lse_ck", "hse_ck", "pll1_q",	"rc48_ck" };
142 
143 static const char * const mco_src2[] = {
144 	"sys_ck", "pll2_p", "hse_ck", "pll1_p", "csi_ck", "lsi_ck" };
145 
146 /* LCD clock */
147 static const char * const ltdc_src[] = {"pll3_r"};
148 
149 /* Gate clock with ready bit and backup domain management */
150 struct stm32_ready_gate {
151 	struct	clk_gate gate;
152 	u8	bit_rdy;
153 };
154 
155 #define to_ready_gate_clk(_rgate) container_of(_rgate, struct stm32_ready_gate,\
156 		gate)
157 
158 #define RGATE_TIMEOUT 10000
159 
ready_gate_clk_enable(struct clk_hw * hw)160 static int ready_gate_clk_enable(struct clk_hw *hw)
161 {
162 	struct clk_gate *gate = to_clk_gate(hw);
163 	struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
164 	int bit_status;
165 	unsigned int timeout = RGATE_TIMEOUT;
166 
167 	if (clk_gate_ops.is_enabled(hw))
168 		return 0;
169 
170 	clk_gate_ops.enable(hw);
171 
172 	/* We can't use readl_poll_timeout() because we can blocked if
173 	 * someone enables this clock before clocksource changes.
174 	 * Only jiffies counter is available. Jiffies are incremented by
175 	 * interruptions and enable op does not allow to be interrupted.
176 	 */
177 	do {
178 		bit_status = !(readl(gate->reg) & BIT(rgate->bit_rdy));
179 
180 		if (bit_status)
181 			udelay(100);
182 
183 	} while (bit_status && --timeout);
184 
185 	return bit_status;
186 }
187 
ready_gate_clk_disable(struct clk_hw * hw)188 static void ready_gate_clk_disable(struct clk_hw *hw)
189 {
190 	struct clk_gate *gate = to_clk_gate(hw);
191 	struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
192 	int bit_status;
193 	unsigned int timeout = RGATE_TIMEOUT;
194 
195 	if (!clk_gate_ops.is_enabled(hw))
196 		return;
197 
198 	clk_gate_ops.disable(hw);
199 
200 	do {
201 		bit_status = !!(readl(gate->reg) & BIT(rgate->bit_rdy));
202 
203 		if (bit_status)
204 			udelay(100);
205 
206 	} while (bit_status && --timeout);
207 }
208 
209 static const struct clk_ops ready_gate_clk_ops = {
210 	.enable		= ready_gate_clk_enable,
211 	.disable	= ready_gate_clk_disable,
212 	.is_enabled	= clk_gate_is_enabled,
213 };
214 
clk_register_ready_gate(struct device * dev,const char * name,const char * parent_name,void __iomem * reg,u8 bit_idx,u8 bit_rdy,unsigned long flags,spinlock_t * lock)215 static struct clk_hw *clk_register_ready_gate(struct device *dev,
216 		const char *name, const char *parent_name,
217 		void __iomem *reg, u8 bit_idx, u8 bit_rdy,
218 		unsigned long flags, spinlock_t *lock)
219 {
220 	struct stm32_ready_gate *rgate;
221 	struct clk_init_data init = { NULL };
222 	struct clk_hw *hw;
223 	int ret;
224 
225 	rgate = kzalloc(sizeof(*rgate), GFP_KERNEL);
226 	if (!rgate)
227 		return ERR_PTR(-ENOMEM);
228 
229 	init.name = name;
230 	init.ops = &ready_gate_clk_ops;
231 	init.flags = flags;
232 	init.parent_names = &parent_name;
233 	init.num_parents = 1;
234 
235 	rgate->bit_rdy = bit_rdy;
236 	rgate->gate.lock = lock;
237 	rgate->gate.reg = reg;
238 	rgate->gate.bit_idx = bit_idx;
239 	rgate->gate.hw.init = &init;
240 
241 	hw = &rgate->gate.hw;
242 	ret = clk_hw_register(dev, hw);
243 	if (ret) {
244 		kfree(rgate);
245 		hw = ERR_PTR(ret);
246 	}
247 
248 	return hw;
249 }
250 
251 struct gate_cfg {
252 	u32 offset;
253 	u8  bit_idx;
254 };
255 
256 struct muxdiv_cfg {
257 	u32 offset;
258 	u8 shift;
259 	u8 width;
260 };
261 
262 struct composite_clk_cfg {
263 	struct gate_cfg *gate;
264 	struct muxdiv_cfg *mux;
265 	struct muxdiv_cfg *div;
266 	const char *name;
267 	const char * const *parent_name;
268 	int num_parents;
269 	u32 flags;
270 };
271 
272 struct composite_clk_gcfg_t {
273 	u8 flags;
274 	const struct clk_ops *ops;
275 };
276 
277 /*
278  * General config definition of a composite clock (only clock diviser for rate)
279  */
280 struct composite_clk_gcfg {
281 	struct composite_clk_gcfg_t *mux;
282 	struct composite_clk_gcfg_t *div;
283 	struct composite_clk_gcfg_t *gate;
284 };
285 
286 #define M_CFG_MUX(_mux_ops, _mux_flags)\
287 	.mux = &(struct composite_clk_gcfg_t) { _mux_flags, _mux_ops}
288 
289 #define M_CFG_DIV(_rate_ops, _rate_flags)\
290 	.div = &(struct composite_clk_gcfg_t) {_rate_flags, _rate_ops}
291 
292 #define M_CFG_GATE(_gate_ops, _gate_flags)\
293 	.gate = &(struct composite_clk_gcfg_t) { _gate_flags, _gate_ops}
294 
_get_cmux(void __iomem * reg,u8 shift,u8 width,u32 flags,spinlock_t * lock)295 static struct clk_mux *_get_cmux(void __iomem *reg, u8 shift, u8 width,
296 		u32 flags, spinlock_t *lock)
297 {
298 	struct clk_mux *mux;
299 
300 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
301 	if (!mux)
302 		return ERR_PTR(-ENOMEM);
303 
304 	mux->reg	= reg;
305 	mux->shift	= shift;
306 	mux->mask	= (1 << width) - 1;
307 	mux->flags	= flags;
308 	mux->lock	= lock;
309 
310 	return mux;
311 }
312 
_get_cdiv(void __iomem * reg,u8 shift,u8 width,u32 flags,spinlock_t * lock)313 static struct clk_divider *_get_cdiv(void __iomem *reg, u8 shift, u8 width,
314 		u32 flags, spinlock_t *lock)
315 {
316 	struct clk_divider *div;
317 
318 	div = kzalloc(sizeof(*div), GFP_KERNEL);
319 
320 	if (!div)
321 		return ERR_PTR(-ENOMEM);
322 
323 	div->reg   = reg;
324 	div->shift = shift;
325 	div->width = width;
326 	div->flags = flags;
327 	div->lock  = lock;
328 
329 	return div;
330 }
331 
_get_cgate(void __iomem * reg,u8 bit_idx,u32 flags,spinlock_t * lock)332 static struct clk_gate *_get_cgate(void __iomem *reg, u8 bit_idx, u32 flags,
333 		spinlock_t *lock)
334 {
335 	struct clk_gate *gate;
336 
337 	gate = kzalloc(sizeof(*gate), GFP_KERNEL);
338 	if (!gate)
339 		return ERR_PTR(-ENOMEM);
340 
341 	gate->reg	= reg;
342 	gate->bit_idx	= bit_idx;
343 	gate->flags	= flags;
344 	gate->lock	= lock;
345 
346 	return gate;
347 }
348 
349 struct composite_cfg {
350 	struct clk_hw *mux_hw;
351 	struct clk_hw *div_hw;
352 	struct clk_hw *gate_hw;
353 
354 	const struct clk_ops *mux_ops;
355 	const struct clk_ops *div_ops;
356 	const struct clk_ops *gate_ops;
357 };
358 
get_cfg_composite_div(const struct composite_clk_gcfg * gcfg,const struct composite_clk_cfg * cfg,struct composite_cfg * composite,spinlock_t * lock)359 static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
360 		const struct composite_clk_cfg *cfg,
361 		struct composite_cfg *composite, spinlock_t *lock)
362 {
363 	struct clk_mux     *mux = NULL;
364 	struct clk_divider *div = NULL;
365 	struct clk_gate    *gate = NULL;
366 	const struct clk_ops *mux_ops, *div_ops, *gate_ops;
367 	struct clk_hw *mux_hw;
368 	struct clk_hw *div_hw;
369 	struct clk_hw *gate_hw;
370 
371 	mux_ops = div_ops = gate_ops = NULL;
372 	mux_hw = div_hw = gate_hw = NULL;
373 
374 	if (gcfg->mux && cfg->mux) {
375 		mux = _get_cmux(base + cfg->mux->offset,
376 				cfg->mux->shift,
377 				cfg->mux->width,
378 				gcfg->mux->flags, lock);
379 
380 		if (!IS_ERR(mux)) {
381 			mux_hw = &mux->hw;
382 			mux_ops = gcfg->mux->ops ?
383 				  gcfg->mux->ops : &clk_mux_ops;
384 		}
385 	}
386 
387 	if (gcfg->div && cfg->div) {
388 		div = _get_cdiv(base + cfg->div->offset,
389 				cfg->div->shift,
390 				cfg->div->width,
391 				gcfg->div->flags, lock);
392 
393 		if (!IS_ERR(div)) {
394 			div_hw = &div->hw;
395 			div_ops = gcfg->div->ops ?
396 				  gcfg->div->ops : &clk_divider_ops;
397 		}
398 	}
399 
400 	if (gcfg->gate && cfg->gate) {
401 		gate = _get_cgate(base + cfg->gate->offset,
402 				cfg->gate->bit_idx,
403 				gcfg->gate->flags, lock);
404 
405 		if (!IS_ERR(gate)) {
406 			gate_hw = &gate->hw;
407 			gate_ops = gcfg->gate->ops ?
408 				   gcfg->gate->ops : &clk_gate_ops;
409 		}
410 	}
411 
412 	composite->mux_hw = mux_hw;
413 	composite->mux_ops = mux_ops;
414 
415 	composite->div_hw = div_hw;
416 	composite->div_ops = div_ops;
417 
418 	composite->gate_hw = gate_hw;
419 	composite->gate_ops = gate_ops;
420 }
421 
422 /* Kernel Timer */
423 struct timer_ker {
424 	u8 dppre_shift;
425 	struct clk_hw hw;
426 	spinlock_t *lock;
427 };
428 
429 #define to_timer_ker(_hw) container_of(_hw, struct timer_ker, hw)
430 
timer_ker_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)431 static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
432 		unsigned long parent_rate)
433 {
434 	struct timer_ker *clk_elem = to_timer_ker(hw);
435 	u32 timpre;
436 	u32 dppre_shift = clk_elem->dppre_shift;
437 	u32 prescaler;
438 	u32 mul;
439 
440 	timpre = (readl(base + RCC_CFGR) >> 15) & 0x01;
441 
442 	prescaler = (readl(base + RCC_D2CFGR) >> dppre_shift) & 0x03;
443 
444 	mul = 2;
445 
446 	if (prescaler < 4)
447 		mul = 1;
448 
449 	else if (timpre && prescaler > 4)
450 		mul = 4;
451 
452 	return parent_rate * mul;
453 }
454 
455 static const struct clk_ops timer_ker_ops = {
456 	.recalc_rate = timer_ker_recalc_rate,
457 };
458 
clk_register_stm32_timer_ker(struct device * dev,const char * name,const char * parent_name,unsigned long flags,u8 dppre_shift,spinlock_t * lock)459 static struct clk_hw *clk_register_stm32_timer_ker(struct device *dev,
460 		const char *name, const char *parent_name,
461 		unsigned long flags,
462 		u8 dppre_shift,
463 		spinlock_t *lock)
464 {
465 	struct timer_ker *element;
466 	struct clk_init_data init;
467 	struct clk_hw *hw;
468 	int err;
469 
470 	element = kzalloc(sizeof(*element), GFP_KERNEL);
471 	if (!element)
472 		return ERR_PTR(-ENOMEM);
473 
474 	init.name = name;
475 	init.ops = &timer_ker_ops;
476 	init.flags = flags;
477 	init.parent_names = &parent_name;
478 	init.num_parents = 1;
479 
480 	element->hw.init = &init;
481 	element->lock = lock;
482 	element->dppre_shift = dppre_shift;
483 
484 	hw = &element->hw;
485 	err = clk_hw_register(dev, hw);
486 
487 	if (err) {
488 		kfree(element);
489 		return ERR_PTR(err);
490 	}
491 
492 	return hw;
493 }
494 
495 static const struct clk_div_table d1cpre_div_table[] = {
496 	{ 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
497 	{ 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1},
498 	{ 8, 2 }, { 9, 4 }, { 10, 8 }, { 11, 16 },
499 	{ 12, 64 }, { 13, 128 }, { 14, 256 },
500 	{ 15, 512 },
501 	{ 0 },
502 };
503 
504 static const struct clk_div_table ppre_div_table[] = {
505 	{ 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
506 	{ 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 },
507 	{ 0 },
508 };
509 
register_core_and_bus_clocks(void)510 static void register_core_and_bus_clocks(void)
511 {
512 	/* CORE AND BUS */
513 	hws[SYS_D1CPRE] = clk_hw_register_divider_table(NULL, "d1cpre",
514 			"sys_ck", CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 8, 4, 0,
515 			d1cpre_div_table, &stm32rcc_lock);
516 
517 	hws[HCLK] = clk_hw_register_divider_table(NULL, "hclk", "d1cpre",
518 			CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 0, 4, 0,
519 			d1cpre_div_table, &stm32rcc_lock);
520 
521 	/* D1 DOMAIN */
522 	/* * CPU Systick */
523 	hws[CPU_SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick",
524 			"d1cpre", 0, 1, 8);
525 
526 	/* * APB3 peripheral */
527 	hws[PCLK3] = clk_hw_register_divider_table(NULL, "pclk3", "hclk", 0,
528 			base + RCC_D1CFGR, 4, 3, 0,
529 			ppre_div_table, &stm32rcc_lock);
530 
531 	/* D2 DOMAIN */
532 	/* * APB1 peripheral */
533 	hws[PCLK1] = clk_hw_register_divider_table(NULL, "pclk1", "hclk", 0,
534 			base + RCC_D2CFGR, 4, 3, 0,
535 			ppre_div_table, &stm32rcc_lock);
536 
537 	/* Timers prescaler clocks */
538 	clk_register_stm32_timer_ker(NULL, "tim1_ker", "pclk1", 0,
539 			4, &stm32rcc_lock);
540 
541 	/* * APB2 peripheral */
542 	hws[PCLK2] = clk_hw_register_divider_table(NULL, "pclk2", "hclk", 0,
543 			base + RCC_D2CFGR, 8, 3, 0, ppre_div_table,
544 			&stm32rcc_lock);
545 
546 	clk_register_stm32_timer_ker(NULL, "tim2_ker", "pclk2", 0, 8,
547 			&stm32rcc_lock);
548 
549 	/* D3 DOMAIN */
550 	/* * APB4 peripheral */
551 	hws[PCLK4] = clk_hw_register_divider_table(NULL, "pclk4", "hclk", 0,
552 			base + RCC_D3CFGR, 4, 3, 0,
553 			ppre_div_table, &stm32rcc_lock);
554 }
555 
556 /* MUX clock configuration */
557 struct stm32_mux_clk {
558 	const char *name;
559 	const char * const *parents;
560 	u8 num_parents;
561 	u32 offset;
562 	u8 shift;
563 	u8 width;
564 	u32 flags;
565 };
566 
567 #define M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, _flags)\
568 {\
569 	.name		= _name,\
570 	.parents	= _parents,\
571 	.num_parents	= ARRAY_SIZE(_parents),\
572 	.offset		= _mux_offset,\
573 	.shift		= _mux_shift,\
574 	.width		= _mux_width,\
575 	.flags		= _flags,\
576 }
577 
578 #define M_MCLOC(_name, _parents, _mux_offset, _mux_shift, _mux_width)\
579 	M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, 0)\
580 
581 static const struct stm32_mux_clk stm32_mclk[] __initconst = {
582 	M_MCLOC("per_ck",	per_src,	RCC_D1CCIPR,	28, 3),
583 	M_MCLOC("pllsrc",	pll_src,	RCC_PLLCKSELR,	 0, 3),
584 	M_MCLOC("sys_ck",	sys_src,	RCC_CFGR,	 0, 3),
585 	M_MCLOC("tracein_ck",	tracein_src,	RCC_CFGR,	 0, 3),
586 };
587 
588 /* Oscillary clock configuration */
589 struct stm32_osc_clk {
590 	const char *name;
591 	const char *parent;
592 	u32 gate_offset;
593 	u8 bit_idx;
594 	u8 bit_rdy;
595 	u32 flags;
596 };
597 
598 #define OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, _flags)\
599 {\
600 	.name		= _name,\
601 	.parent		= _parent,\
602 	.gate_offset	= _gate_offset,\
603 	.bit_idx	= _bit_idx,\
604 	.bit_rdy	= _bit_rdy,\
605 	.flags		= _flags,\
606 }
607 
608 #define OSC_CLK(_name, _parent, _gate_offset, _bit_idx, _bit_rdy)\
609 	OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, 0)
610 
611 static const struct stm32_osc_clk stm32_oclk[] __initconst = {
612 	OSC_CLKF("hsi_ck",  "hsidiv",   RCC_CR,   0,  2, CLK_IGNORE_UNUSED),
613 	OSC_CLKF("hsi_ker", "hsidiv",   RCC_CR,   1,  2, CLK_IGNORE_UNUSED),
614 	OSC_CLKF("csi_ck",  "clk-csi",  RCC_CR,   7,  8, CLK_IGNORE_UNUSED),
615 	OSC_CLKF("csi_ker", "clk-csi",  RCC_CR,   9,  8, CLK_IGNORE_UNUSED),
616 	OSC_CLKF("rc48_ck", "clk-rc48", RCC_CR,  12, 13, CLK_IGNORE_UNUSED),
617 	OSC_CLKF("lsi_ck",  "clk-lsi",  RCC_CSR,  0,  1, CLK_IGNORE_UNUSED),
618 };
619 
620 /* PLL configuration */
621 struct st32h7_pll_cfg {
622 	u8 bit_idx;
623 	u32 offset_divr;
624 	u8 bit_frac_en;
625 	u32 offset_frac;
626 	u8 divm;
627 };
628 
629 struct stm32_pll_data {
630 	const char *name;
631 	const char *parent_name;
632 	unsigned long flags;
633 	const struct st32h7_pll_cfg *cfg;
634 };
635 
636 static const struct st32h7_pll_cfg stm32h7_pll1 = {
637 	.bit_idx = 24,
638 	.offset_divr = RCC_PLL1DIVR,
639 	.bit_frac_en = 0,
640 	.offset_frac = RCC_PLL1FRACR,
641 	.divm = 4,
642 };
643 
644 static const struct st32h7_pll_cfg stm32h7_pll2 = {
645 	.bit_idx = 26,
646 	.offset_divr = RCC_PLL2DIVR,
647 	.bit_frac_en = 4,
648 	.offset_frac = RCC_PLL2FRACR,
649 	.divm = 12,
650 };
651 
652 static const struct st32h7_pll_cfg stm32h7_pll3 = {
653 	.bit_idx = 28,
654 	.offset_divr = RCC_PLL3DIVR,
655 	.bit_frac_en = 8,
656 	.offset_frac = RCC_PLL3FRACR,
657 	.divm = 20,
658 };
659 
660 static const struct stm32_pll_data stm32_pll[] = {
661 	{ "vco1", "pllsrc", CLK_IGNORE_UNUSED, &stm32h7_pll1 },
662 	{ "vco2", "pllsrc", 0, &stm32h7_pll2 },
663 	{ "vco3", "pllsrc", 0, &stm32h7_pll3 },
664 };
665 
666 struct stm32_fractional_divider {
667 	void __iomem	*mreg;
668 	u8		mshift;
669 	u8		mwidth;
670 
671 	void __iomem	*nreg;
672 	u8		nshift;
673 	u8		nwidth;
674 
675 	void __iomem	*freg_status;
676 	u8		freg_bit;
677 	void __iomem	*freg_value;
678 	u8		fshift;
679 	u8		fwidth;
680 
681 	u8		flags;
682 	struct clk_hw	hw;
683 	spinlock_t	*lock;
684 };
685 
686 struct stm32_pll_obj {
687 	spinlock_t *lock;
688 	struct stm32_fractional_divider div;
689 	struct stm32_ready_gate rgate;
690 	struct clk_hw hw;
691 };
692 
693 #define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
694 
pll_is_enabled(struct clk_hw * hw)695 static int pll_is_enabled(struct clk_hw *hw)
696 {
697 	struct stm32_pll_obj *clk_elem = to_pll(hw);
698 	struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
699 
700 	__clk_hw_set_clk(_hw, hw);
701 
702 	return ready_gate_clk_ops.is_enabled(_hw);
703 }
704 
pll_enable(struct clk_hw * hw)705 static int pll_enable(struct clk_hw *hw)
706 {
707 	struct stm32_pll_obj *clk_elem = to_pll(hw);
708 	struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
709 
710 	__clk_hw_set_clk(_hw, hw);
711 
712 	return ready_gate_clk_ops.enable(_hw);
713 }
714 
pll_disable(struct clk_hw * hw)715 static void pll_disable(struct clk_hw *hw)
716 {
717 	struct stm32_pll_obj *clk_elem = to_pll(hw);
718 	struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
719 
720 	__clk_hw_set_clk(_hw, hw);
721 
722 	ready_gate_clk_ops.disable(_hw);
723 }
724 
pll_frac_is_enabled(struct clk_hw * hw)725 static int pll_frac_is_enabled(struct clk_hw *hw)
726 {
727 	struct stm32_pll_obj *clk_elem = to_pll(hw);
728 	struct stm32_fractional_divider *fd = &clk_elem->div;
729 
730 	return (readl(fd->freg_status) >> fd->freg_bit) & 0x01;
731 }
732 
pll_read_frac(struct clk_hw * hw)733 static unsigned long pll_read_frac(struct clk_hw *hw)
734 {
735 	struct stm32_pll_obj *clk_elem = to_pll(hw);
736 	struct stm32_fractional_divider *fd = &clk_elem->div;
737 
738 	return (readl(fd->freg_value) >> fd->fshift) &
739 		GENMASK(fd->fwidth - 1, 0);
740 }
741 
pll_fd_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)742 static unsigned long pll_fd_recalc_rate(struct clk_hw *hw,
743 		unsigned long parent_rate)
744 {
745 	struct stm32_pll_obj *clk_elem = to_pll(hw);
746 	struct stm32_fractional_divider *fd = &clk_elem->div;
747 	unsigned long m, n;
748 	u32 val, mask;
749 	u64 rate, rate1 = 0;
750 
751 	val = readl(fd->mreg);
752 	mask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
753 	m = (val & mask) >> fd->mshift;
754 
755 	val = readl(fd->nreg);
756 	mask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
757 	n = ((val & mask) >> fd->nshift) + 1;
758 
759 	if (!n || !m)
760 		return parent_rate;
761 
762 	rate = (u64)parent_rate * n;
763 	do_div(rate, m);
764 
765 	if (pll_frac_is_enabled(hw)) {
766 		val = pll_read_frac(hw);
767 		rate1 = (u64)parent_rate * (u64)val;
768 		do_div(rate1, (m * 8191));
769 	}
770 
771 	return rate + rate1;
772 }
773 
774 static const struct clk_ops pll_ops = {
775 	.enable		= pll_enable,
776 	.disable	= pll_disable,
777 	.is_enabled	= pll_is_enabled,
778 	.recalc_rate	= pll_fd_recalc_rate,
779 };
780 
clk_register_stm32_pll(struct device * dev,const char * name,const char * parent,unsigned long flags,const struct st32h7_pll_cfg * cfg,spinlock_t * lock)781 static struct clk_hw *clk_register_stm32_pll(struct device *dev,
782 		const char *name,
783 		const char *parent,
784 		unsigned long flags,
785 		const struct st32h7_pll_cfg *cfg,
786 		spinlock_t *lock)
787 {
788 	struct stm32_pll_obj *pll;
789 	struct clk_init_data init = { NULL };
790 	struct clk_hw *hw;
791 	int ret;
792 	struct stm32_fractional_divider *div = NULL;
793 	struct stm32_ready_gate *rgate;
794 
795 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
796 	if (!pll)
797 		return ERR_PTR(-ENOMEM);
798 
799 	init.name = name;
800 	init.ops = &pll_ops;
801 	init.flags = flags;
802 	init.parent_names = &parent;
803 	init.num_parents = 1;
804 	pll->hw.init = &init;
805 
806 	hw = &pll->hw;
807 	rgate = &pll->rgate;
808 
809 	rgate->bit_rdy = cfg->bit_idx + 1;
810 	rgate->gate.lock = lock;
811 	rgate->gate.reg = base + RCC_CR;
812 	rgate->gate.bit_idx = cfg->bit_idx;
813 
814 	div = &pll->div;
815 	div->flags = 0;
816 	div->mreg = base + RCC_PLLCKSELR;
817 	div->mshift = cfg->divm;
818 	div->mwidth = 6;
819 	div->nreg = base +  cfg->offset_divr;
820 	div->nshift = 0;
821 	div->nwidth = 9;
822 
823 	div->freg_status = base + RCC_PLLCFGR;
824 	div->freg_bit = cfg->bit_frac_en;
825 	div->freg_value = base +  cfg->offset_frac;
826 	div->fshift = 3;
827 	div->fwidth = 13;
828 
829 	div->lock = lock;
830 
831 	ret = clk_hw_register(dev, hw);
832 	if (ret) {
833 		kfree(pll);
834 		hw = ERR_PTR(ret);
835 	}
836 
837 	return hw;
838 }
839 
840 /* ODF CLOCKS */
odf_divider_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)841 static unsigned long odf_divider_recalc_rate(struct clk_hw *hw,
842 		unsigned long parent_rate)
843 {
844 	return clk_divider_ops.recalc_rate(hw, parent_rate);
845 }
846 
odf_divider_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)847 static int odf_divider_determine_rate(struct clk_hw *hw,
848 				      struct clk_rate_request *req)
849 {
850 	return clk_divider_ops.determine_rate(hw, req);
851 }
852 
odf_divider_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)853 static int odf_divider_set_rate(struct clk_hw *hw, unsigned long rate,
854 		unsigned long parent_rate)
855 {
856 	struct clk_hw *hwp;
857 	int pll_status;
858 	int ret;
859 
860 	hwp = clk_hw_get_parent(hw);
861 
862 	pll_status = pll_is_enabled(hwp);
863 
864 	if (pll_status)
865 		pll_disable(hwp);
866 
867 	ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
868 
869 	if (pll_status)
870 		pll_enable(hwp);
871 
872 	return ret;
873 }
874 
875 static const struct clk_ops odf_divider_ops = {
876 	.recalc_rate	= odf_divider_recalc_rate,
877 	.determine_rate	= odf_divider_determine_rate,
878 	.set_rate	= odf_divider_set_rate,
879 };
880 
odf_gate_enable(struct clk_hw * hw)881 static int odf_gate_enable(struct clk_hw *hw)
882 {
883 	struct clk_hw *hwp;
884 	int pll_status;
885 	int ret;
886 
887 	if (clk_gate_ops.is_enabled(hw))
888 		return 0;
889 
890 	hwp = clk_hw_get_parent(hw);
891 
892 	pll_status = pll_is_enabled(hwp);
893 
894 	if (pll_status)
895 		pll_disable(hwp);
896 
897 	ret = clk_gate_ops.enable(hw);
898 
899 	if (pll_status)
900 		pll_enable(hwp);
901 
902 	return ret;
903 }
904 
odf_gate_disable(struct clk_hw * hw)905 static void odf_gate_disable(struct clk_hw *hw)
906 {
907 	struct clk_hw *hwp;
908 	int pll_status;
909 
910 	if (!clk_gate_ops.is_enabled(hw))
911 		return;
912 
913 	hwp = clk_hw_get_parent(hw);
914 
915 	pll_status = pll_is_enabled(hwp);
916 
917 	if (pll_status)
918 		pll_disable(hwp);
919 
920 	clk_gate_ops.disable(hw);
921 
922 	if (pll_status)
923 		pll_enable(hwp);
924 }
925 
926 static const struct clk_ops odf_gate_ops = {
927 	.enable		= odf_gate_enable,
928 	.disable	= odf_gate_disable,
929 	.is_enabled	= clk_gate_is_enabled,
930 };
931 
932 static struct composite_clk_gcfg odf_clk_gcfg = {
933 	M_CFG_DIV(&odf_divider_ops, 0),
934 	M_CFG_GATE(&odf_gate_ops, 0),
935 };
936 
937 #define M_ODF_F(_name, _parent, _gate_offset,  _bit_idx, _rate_offset,\
938 		_rate_shift, _rate_width, _flags)\
939 {\
940 	.mux = NULL,\
941 	.div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
942 	.gate = &(struct gate_cfg) {_gate_offset, _bit_idx },\
943 	.name = _name,\
944 	.parent_name = &(const char *) {_parent},\
945 	.num_parents = 1,\
946 	.flags = _flags,\
947 }
948 
949 #define M_ODF(_name, _parent, _gate_offset,  _bit_idx, _rate_offset,\
950 		_rate_shift, _rate_width)\
951 M_ODF_F(_name, _parent, _gate_offset,  _bit_idx, _rate_offset,\
952 		_rate_shift, _rate_width, 0)\
953 
954 static const struct composite_clk_cfg stm32_odf[3][3] = {
955 	{
956 		M_ODF_F("pll1_p", "vco1", RCC_PLLCFGR, 16, RCC_PLL1DIVR,  9, 7,
957 				CLK_IGNORE_UNUSED),
958 		M_ODF_F("pll1_q", "vco1", RCC_PLLCFGR, 17, RCC_PLL1DIVR, 16, 7,
959 				CLK_IGNORE_UNUSED),
960 		M_ODF_F("pll1_r", "vco1", RCC_PLLCFGR, 18, RCC_PLL1DIVR, 24, 7,
961 				CLK_IGNORE_UNUSED),
962 	},
963 
964 	{
965 		M_ODF("pll2_p", "vco2", RCC_PLLCFGR, 19, RCC_PLL2DIVR,  9, 7),
966 		M_ODF("pll2_q", "vco2", RCC_PLLCFGR, 20, RCC_PLL2DIVR, 16, 7),
967 		M_ODF("pll2_r", "vco2", RCC_PLLCFGR, 21, RCC_PLL2DIVR, 24, 7),
968 	},
969 	{
970 		M_ODF("pll3_p", "vco3", RCC_PLLCFGR, 22, RCC_PLL3DIVR,  9, 7),
971 		M_ODF("pll3_q", "vco3", RCC_PLLCFGR, 23, RCC_PLL3DIVR, 16, 7),
972 		M_ODF("pll3_r", "vco3", RCC_PLLCFGR, 24, RCC_PLL3DIVR, 24, 7),
973 	}
974 };
975 
976 /* PERIF CLOCKS */
977 struct pclk_t {
978 	u32 gate_offset;
979 	u8 bit_idx;
980 	const char *name;
981 	const char *parent;
982 	u32 flags;
983 };
984 
985 #define PER_CLKF(_gate_offset, _bit_idx, _name, _parent, _flags)\
986 {\
987 	.gate_offset	= _gate_offset,\
988 	.bit_idx	= _bit_idx,\
989 	.name		= _name,\
990 	.parent		= _parent,\
991 	.flags		= _flags,\
992 }
993 
994 #define PER_CLK(_gate_offset, _bit_idx, _name, _parent)\
995 	PER_CLKF(_gate_offset, _bit_idx, _name, _parent, 0)
996 
997 static const struct pclk_t pclk[] = {
998 	PER_CLK(RCC_AHB3ENR, 31, "d1sram1", "hclk"),
999 	PER_CLK(RCC_AHB3ENR, 30, "itcm", "hclk"),
1000 	PER_CLK(RCC_AHB3ENR, 29, "dtcm2", "hclk"),
1001 	PER_CLK(RCC_AHB3ENR, 28, "dtcm1", "hclk"),
1002 	PER_CLK(RCC_AHB3ENR, 8, "flitf", "hclk"),
1003 	PER_CLK(RCC_AHB3ENR, 5, "jpgdec", "hclk"),
1004 	PER_CLK(RCC_AHB3ENR, 4, "dma2d", "hclk"),
1005 	PER_CLK(RCC_AHB3ENR, 0, "mdma", "hclk"),
1006 	PER_CLK(RCC_AHB1ENR, 28, "usb2ulpi", "hclk"),
1007 	PER_CLK(RCC_AHB1ENR, 26, "usb1ulpi", "hclk"),
1008 	PER_CLK(RCC_AHB1ENR, 17, "eth1rx", "hclk"),
1009 	PER_CLK(RCC_AHB1ENR, 16, "eth1tx", "hclk"),
1010 	PER_CLK(RCC_AHB1ENR, 15, "eth1mac", "hclk"),
1011 	PER_CLK(RCC_AHB1ENR, 14, "art", "hclk"),
1012 	PER_CLK(RCC_AHB1ENR, 1, "dma2", "hclk"),
1013 	PER_CLK(RCC_AHB1ENR, 0, "dma1", "hclk"),
1014 	PER_CLK(RCC_AHB2ENR, 31, "d2sram3", "hclk"),
1015 	PER_CLK(RCC_AHB2ENR, 30, "d2sram2", "hclk"),
1016 	PER_CLK(RCC_AHB2ENR, 29, "d2sram1", "hclk"),
1017 	PER_CLK(RCC_AHB2ENR, 5, "hash", "hclk"),
1018 	PER_CLK(RCC_AHB2ENR, 4, "crypt", "hclk"),
1019 	PER_CLK(RCC_AHB2ENR, 0, "camitf", "hclk"),
1020 	PER_CLK(RCC_AHB4ENR, 28, "bkpram", "hclk"),
1021 	PER_CLK(RCC_AHB4ENR, 25, "hsem", "hclk"),
1022 	PER_CLK(RCC_AHB4ENR, 21, "bdma", "hclk"),
1023 	PER_CLK(RCC_AHB4ENR, 19, "crc", "hclk"),
1024 	PER_CLK(RCC_AHB4ENR, 10, "gpiok", "hclk"),
1025 	PER_CLK(RCC_AHB4ENR, 9, "gpioj", "hclk"),
1026 	PER_CLK(RCC_AHB4ENR, 8, "gpioi", "hclk"),
1027 	PER_CLK(RCC_AHB4ENR, 7, "gpioh", "hclk"),
1028 	PER_CLK(RCC_AHB4ENR, 6, "gpiog", "hclk"),
1029 	PER_CLK(RCC_AHB4ENR, 5, "gpiof", "hclk"),
1030 	PER_CLK(RCC_AHB4ENR, 4, "gpioe", "hclk"),
1031 	PER_CLK(RCC_AHB4ENR, 3, "gpiod", "hclk"),
1032 	PER_CLK(RCC_AHB4ENR, 2, "gpioc", "hclk"),
1033 	PER_CLK(RCC_AHB4ENR, 1, "gpiob", "hclk"),
1034 	PER_CLK(RCC_AHB4ENR, 0, "gpioa", "hclk"),
1035 	PER_CLK(RCC_APB3ENR, 6, "wwdg1", "pclk3"),
1036 	PER_CLK(RCC_APB1LENR, 29, "dac12", "pclk1"),
1037 	PER_CLK(RCC_APB1LENR, 11, "wwdg2", "pclk1"),
1038 	PER_CLK(RCC_APB1LENR, 8, "tim14", "tim1_ker"),
1039 	PER_CLK(RCC_APB1LENR, 7, "tim13", "tim1_ker"),
1040 	PER_CLK(RCC_APB1LENR, 6, "tim12", "tim1_ker"),
1041 	PER_CLK(RCC_APB1LENR, 5, "tim7", "tim1_ker"),
1042 	PER_CLK(RCC_APB1LENR, 4, "tim6", "tim1_ker"),
1043 	PER_CLK(RCC_APB1LENR, 3, "tim5", "tim1_ker"),
1044 	PER_CLK(RCC_APB1LENR, 2, "tim4", "tim1_ker"),
1045 	PER_CLK(RCC_APB1LENR, 1, "tim3", "tim1_ker"),
1046 	PER_CLK(RCC_APB1LENR, 0, "tim2", "tim1_ker"),
1047 	PER_CLK(RCC_APB1HENR, 5, "mdios", "pclk1"),
1048 	PER_CLK(RCC_APB1HENR, 4, "opamp", "pclk1"),
1049 	PER_CLK(RCC_APB1HENR, 1, "crs", "pclk1"),
1050 	PER_CLK(RCC_APB2ENR, 18, "tim17", "tim2_ker"),
1051 	PER_CLK(RCC_APB2ENR, 17, "tim16", "tim2_ker"),
1052 	PER_CLK(RCC_APB2ENR, 16, "tim15", "tim2_ker"),
1053 	PER_CLK(RCC_APB2ENR, 1, "tim8", "tim2_ker"),
1054 	PER_CLK(RCC_APB2ENR, 0, "tim1", "tim2_ker"),
1055 	PER_CLK(RCC_APB4ENR, 26, "tmpsens", "pclk4"),
1056 	PER_CLK(RCC_APB4ENR, 16, "rtcapb", "pclk4"),
1057 	PER_CLK(RCC_APB4ENR, 15, "vref", "pclk4"),
1058 	PER_CLK(RCC_APB4ENR, 14, "comp12", "pclk4"),
1059 	PER_CLK(RCC_APB4ENR, 1, "syscfg", "pclk4"),
1060 };
1061 
1062 /* KERNEL CLOCKS */
1063 #define KER_CLKF(_gate_offset, _bit_idx,\
1064 		_mux_offset, _mux_shift, _mux_width,\
1065 		_name, _parent_name,\
1066 		_flags) \
1067 { \
1068 	.gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
1069 	.mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
1070 	.name = _name, \
1071 	.parent_name = _parent_name, \
1072 	.num_parents = ARRAY_SIZE(_parent_name),\
1073 	.flags = _flags,\
1074 }
1075 
1076 #define KER_CLK(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
1077 		_name, _parent_name) \
1078 KER_CLKF(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
1079 		_name, _parent_name, 0)\
1080 
1081 #define KER_CLKF_NOMUX(_gate_offset, _bit_idx,\
1082 		_name, _parent_name,\
1083 		_flags) \
1084 { \
1085 	.gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
1086 	.mux = NULL,\
1087 	.name = _name, \
1088 	.parent_name = _parent_name, \
1089 	.num_parents = 1,\
1090 	.flags = _flags,\
1091 }
1092 
1093 static const struct composite_clk_cfg kclk[] = {
1094 	KER_CLK(RCC_AHB3ENR,  16, RCC_D1CCIPR,	16, 1, "sdmmc1", sdmmc_src),
1095 	KER_CLKF(RCC_AHB3ENR, 14, RCC_D1CCIPR,	 4, 2, "quadspi", qspi_src,
1096 			CLK_IGNORE_UNUSED),
1097 	KER_CLKF(RCC_AHB3ENR, 12, RCC_D1CCIPR,	 0, 2, "fmc", fmc_src,
1098 			CLK_IGNORE_UNUSED),
1099 	KER_CLK(RCC_AHB1ENR,  27, RCC_D2CCIP2R,	20, 2, "usb2otg", usbotg_src),
1100 	KER_CLK(RCC_AHB1ENR,  25, RCC_D2CCIP2R, 20, 2, "usb1otg", usbotg_src),
1101 	KER_CLK(RCC_AHB1ENR,   5, RCC_D3CCIPR,	16, 2, "adc12", adc_src),
1102 	KER_CLK(RCC_AHB2ENR,   9, RCC_D1CCIPR,	16, 1, "sdmmc2", sdmmc_src),
1103 	KER_CLK(RCC_AHB2ENR,   6, RCC_D2CCIP2R,	 8, 2, "rng", rng_src),
1104 	KER_CLK(RCC_AHB4ENR,  24, RCC_D3CCIPR,  16, 2, "adc3", adc_src),
1105 	KER_CLKF(RCC_APB3ENR,   4, RCC_D1CCIPR,	 8, 1, "dsi", dsi_src,
1106 			CLK_SET_RATE_PARENT),
1107 	KER_CLKF_NOMUX(RCC_APB3ENR, 3, "ltdc", ltdc_src, CLK_SET_RATE_PARENT),
1108 	KER_CLK(RCC_APB1LENR, 31, RCC_D2CCIP2R,  0, 3, "usart8", usart_src2),
1109 	KER_CLK(RCC_APB1LENR, 30, RCC_D2CCIP2R,  0, 3, "usart7", usart_src2),
1110 	KER_CLK(RCC_APB1LENR, 27, RCC_D2CCIP2R, 22, 2, "hdmicec", cec_src),
1111 	KER_CLK(RCC_APB1LENR, 23, RCC_D2CCIP2R, 12, 2, "i2c3", i2c_src1),
1112 	KER_CLK(RCC_APB1LENR, 22, RCC_D2CCIP2R, 12, 2, "i2c2", i2c_src1),
1113 	KER_CLK(RCC_APB1LENR, 21, RCC_D2CCIP2R, 12, 2, "i2c1", i2c_src1),
1114 	KER_CLK(RCC_APB1LENR, 20, RCC_D2CCIP2R,	 0, 3, "uart5", usart_src2),
1115 	KER_CLK(RCC_APB1LENR, 19, RCC_D2CCIP2R,  0, 3, "uart4", usart_src2),
1116 	KER_CLK(RCC_APB1LENR, 18, RCC_D2CCIP2R,  0, 3, "usart3", usart_src2),
1117 	KER_CLK(RCC_APB1LENR, 17, RCC_D2CCIP2R,  0, 3, "usart2", usart_src2),
1118 	KER_CLK(RCC_APB1LENR, 16, RCC_D2CCIP1R, 20, 2, "spdifrx", spdifrx_src),
1119 	KER_CLK(RCC_APB1LENR, 15, RCC_D2CCIP1R, 16, 3, "spi3", spi_src1),
1120 	KER_CLK(RCC_APB1LENR, 14, RCC_D2CCIP1R, 16, 3, "spi2", spi_src1),
1121 	KER_CLK(RCC_APB1LENR,  9, RCC_D2CCIP2R, 28, 3, "lptim1", lptim_src1),
1122 	KER_CLK(RCC_APB1HENR,  8, RCC_D2CCIP1R, 28, 2, "fdcan", fdcan_src),
1123 	KER_CLK(RCC_APB1HENR,  2, RCC_D2CCIP1R, 31, 1, "swp", swp_src),
1124 	KER_CLK(RCC_APB2ENR,  29, RCC_CFGR,	14, 1, "hrtim", hrtim_src),
1125 	KER_CLK(RCC_APB2ENR,  28, RCC_D2CCIP1R, 24, 1, "dfsdm1", dfsdm1_src),
1126 	KER_CLKF(RCC_APB2ENR,  24, RCC_D2CCIP1R,  6, 3, "sai3", sai_src,
1127 		 CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
1128 	KER_CLKF(RCC_APB2ENR,  23, RCC_D2CCIP1R,  6, 3, "sai2", sai_src,
1129 		 CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
1130 	KER_CLKF(RCC_APB2ENR,  22, RCC_D2CCIP1R,  0, 3, "sai1", sai_src,
1131 		 CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
1132 	KER_CLK(RCC_APB2ENR,  20, RCC_D2CCIP1R, 16, 3, "spi5", spi_src2),
1133 	KER_CLK(RCC_APB2ENR,  13, RCC_D2CCIP1R, 16, 3, "spi4", spi_src2),
1134 	KER_CLK(RCC_APB2ENR,  12, RCC_D2CCIP1R, 16, 3, "spi1", spi_src1),
1135 	KER_CLK(RCC_APB2ENR,   5, RCC_D2CCIP2R,  3, 3, "usart6", usart_src1),
1136 	KER_CLK(RCC_APB2ENR,   4, RCC_D2CCIP2R,  3, 3, "usart1", usart_src1),
1137 	KER_CLK(RCC_APB4ENR,  21, RCC_D3CCIPR,	24, 3, "sai4b", sai_src),
1138 	KER_CLK(RCC_APB4ENR,  21, RCC_D3CCIPR,	21, 3, "sai4a", sai_src),
1139 	KER_CLK(RCC_APB4ENR,  12, RCC_D3CCIPR,	13, 3, "lptim5", lptim_src2),
1140 	KER_CLK(RCC_APB4ENR,  11, RCC_D3CCIPR,	13, 3, "lptim4", lptim_src2),
1141 	KER_CLK(RCC_APB4ENR,  10, RCC_D3CCIPR,	13, 3, "lptim3", lptim_src2),
1142 	KER_CLK(RCC_APB4ENR,   9, RCC_D3CCIPR,	10, 3, "lptim2", lptim_src2),
1143 	KER_CLK(RCC_APB4ENR,   7, RCC_D3CCIPR,	 8, 2, "i2c4", i2c_src2),
1144 	KER_CLK(RCC_APB4ENR,   5, RCC_D3CCIPR,	28, 3, "spi6", spi_src3),
1145 	KER_CLK(RCC_APB4ENR,   3, RCC_D3CCIPR,	 0, 3, "lpuart1", lpuart1_src),
1146 };
1147 
1148 static struct composite_clk_gcfg kernel_clk_cfg = {
1149 	M_CFG_MUX(NULL, 0),
1150 	M_CFG_GATE(NULL, 0),
1151 };
1152 
1153 /* RTC clock */
1154 /*
1155  * RTC & LSE registers are protected against parasitic write access.
1156  * PWR_CR_DBP bit must be set to enable write access to RTC registers.
1157  */
1158 /* STM32_PWR_CR */
1159 #define PWR_CR				0x00
1160 /* STM32_PWR_CR bit field */
1161 #define PWR_CR_DBP			BIT(8)
1162 
1163 static struct composite_clk_gcfg rtc_clk_cfg = {
1164 	M_CFG_MUX(NULL, 0),
1165 	M_CFG_GATE(NULL, 0),
1166 };
1167 
1168 static const struct composite_clk_cfg rtc_clk =
1169 	KER_CLK(RCC_BDCR, 15, RCC_BDCR, 8, 2, "rtc_ck", rtc_src);
1170 
1171 /* Micro-controller output clock */
1172 static struct composite_clk_gcfg mco_clk_cfg = {
1173 	M_CFG_MUX(NULL, 0),
1174 	M_CFG_DIV(NULL,	CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
1175 };
1176 
1177 #define M_MCO_F(_name, _parents, _mux_offset,  _mux_shift, _mux_width,\
1178 		_rate_offset, _rate_shift, _rate_width,\
1179 		_flags)\
1180 {\
1181 	.mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
1182 	.div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
1183 	.gate = NULL,\
1184 	.name = _name,\
1185 	.parent_name = _parents,\
1186 	.num_parents = ARRAY_SIZE(_parents),\
1187 	.flags = _flags,\
1188 }
1189 
1190 static const struct composite_clk_cfg mco_clk[] = {
1191 	M_MCO_F("mco1", mco_src1, RCC_CFGR, 22, 4, RCC_CFGR, 18, 4, 0),
1192 	M_MCO_F("mco2", mco_src2, RCC_CFGR, 29, 3, RCC_CFGR, 25, 4, 0),
1193 };
1194 
stm32h7_rcc_init(struct device_node * np)1195 static void __init stm32h7_rcc_init(struct device_node *np)
1196 {
1197 	struct clk_hw_onecell_data *clk_data;
1198 	struct composite_cfg c_cfg;
1199 	int n;
1200 	const char *hse_clk, *lse_clk, *i2s_clk;
1201 	struct regmap *pdrm;
1202 
1203 	clk_data = kzalloc(struct_size(clk_data, hws, STM32H7_MAX_CLKS),
1204 			   GFP_KERNEL);
1205 	if (!clk_data)
1206 		return;
1207 
1208 	clk_data->num = STM32H7_MAX_CLKS;
1209 
1210 	hws = clk_data->hws;
1211 
1212 	for (n = 0; n < STM32H7_MAX_CLKS; n++)
1213 		hws[n] = ERR_PTR(-ENOENT);
1214 
1215 	/* get RCC base @ from DT */
1216 	base = of_iomap(np, 0);
1217 	if (!base) {
1218 		pr_err("%pOFn: unable to map resource", np);
1219 		goto err_free_clks;
1220 	}
1221 
1222 	pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
1223 	if (IS_ERR(pdrm))
1224 		pr_warn("%s: Unable to get syscfg\n", __func__);
1225 	else
1226 		/* In any case disable backup domain write protection
1227 		 * and will never be enabled.
1228 		 * Needed by LSE & RTC clocks.
1229 		 */
1230 		regmap_update_bits(pdrm, PWR_CR, PWR_CR_DBP, PWR_CR_DBP);
1231 
1232 	/* Put parent names from DT */
1233 	hse_clk = of_clk_get_parent_name(np, 0);
1234 	lse_clk = of_clk_get_parent_name(np, 1);
1235 	i2s_clk = of_clk_get_parent_name(np, 2);
1236 
1237 	sai_src[3] = i2s_clk;
1238 	spi_src1[3] = i2s_clk;
1239 
1240 	/* Register Internal oscillators */
1241 	clk_hw_register_fixed_rate(NULL, "clk-hsi", NULL, 0, 64000000);
1242 	clk_hw_register_fixed_rate(NULL, "clk-csi", NULL, 0, 4000000);
1243 	clk_hw_register_fixed_rate(NULL, "clk-lsi", NULL, 0, 32000);
1244 	clk_hw_register_fixed_rate(NULL, "clk-rc48", NULL, 0, 48000);
1245 
1246 	/* This clock is coming from outside. Frequencies unknown */
1247 	hws[CK_DSI_PHY] = clk_hw_register_fixed_rate(NULL, "ck_dsi_phy", NULL,
1248 			0, 0);
1249 
1250 	hws[HSI_DIV] = clk_hw_register_divider(NULL, "hsidiv", "clk-hsi", 0,
1251 			base + RCC_CR, 3, 2, CLK_DIVIDER_POWER_OF_TWO,
1252 			&stm32rcc_lock);
1253 
1254 	hws[HSE_1M] = clk_hw_register_divider(NULL, "hse_1M", "hse_ck",	0,
1255 			base + RCC_CFGR, 8, 6, CLK_DIVIDER_ONE_BASED |
1256 			CLK_DIVIDER_ALLOW_ZERO,
1257 			&stm32rcc_lock);
1258 
1259 	/* Mux system clocks */
1260 	for (n = 0; n < ARRAY_SIZE(stm32_mclk); n++)
1261 		hws[MCLK_BANK + n] = clk_hw_register_mux(NULL,
1262 				stm32_mclk[n].name,
1263 				stm32_mclk[n].parents,
1264 				stm32_mclk[n].num_parents,
1265 				stm32_mclk[n].flags,
1266 				stm32_mclk[n].offset + base,
1267 				stm32_mclk[n].shift,
1268 				stm32_mclk[n].width,
1269 				0,
1270 				&stm32rcc_lock);
1271 
1272 	register_core_and_bus_clocks();
1273 
1274 	/* Oscillary clocks */
1275 	for (n = 0; n < ARRAY_SIZE(stm32_oclk); n++)
1276 		hws[OSC_BANK + n] = clk_register_ready_gate(NULL,
1277 				stm32_oclk[n].name,
1278 				stm32_oclk[n].parent,
1279 				stm32_oclk[n].gate_offset + base,
1280 				stm32_oclk[n].bit_idx,
1281 				stm32_oclk[n].bit_rdy,
1282 				stm32_oclk[n].flags,
1283 				&stm32rcc_lock);
1284 
1285 	hws[HSE_CK] = clk_register_ready_gate(NULL,
1286 				"hse_ck",
1287 				hse_clk,
1288 				RCC_CR + base,
1289 				16, 17,
1290 				0,
1291 				&stm32rcc_lock);
1292 
1293 	hws[LSE_CK] = clk_register_ready_gate(NULL,
1294 				"lse_ck",
1295 				lse_clk,
1296 				RCC_BDCR + base,
1297 				0, 1,
1298 				0,
1299 				&stm32rcc_lock);
1300 
1301 	hws[CSI_KER_DIV122 + n] = clk_hw_register_fixed_factor(NULL,
1302 			"csi_ker_div122", "csi_ker", 0, 1, 122);
1303 
1304 	/* PLLs */
1305 	for (n = 0; n < ARRAY_SIZE(stm32_pll); n++) {
1306 		int odf;
1307 
1308 		/* Register the VCO */
1309 		clk_register_stm32_pll(NULL, stm32_pll[n].name,
1310 				stm32_pll[n].parent_name, stm32_pll[n].flags,
1311 				stm32_pll[n].cfg,
1312 				&stm32rcc_lock);
1313 
1314 		/* Register the 3 output dividers */
1315 		for (odf = 0; odf < 3; odf++) {
1316 			int idx = n * 3 + odf;
1317 
1318 			get_cfg_composite_div(&odf_clk_gcfg, &stm32_odf[n][odf],
1319 					&c_cfg,	&stm32rcc_lock);
1320 
1321 			hws[ODF_BANK + idx] = clk_hw_register_composite(NULL,
1322 					stm32_odf[n][odf].name,
1323 					stm32_odf[n][odf].parent_name,
1324 					stm32_odf[n][odf].num_parents,
1325 					c_cfg.mux_hw, c_cfg.mux_ops,
1326 					c_cfg.div_hw, c_cfg.div_ops,
1327 					c_cfg.gate_hw, c_cfg.gate_ops,
1328 					stm32_odf[n][odf].flags);
1329 		}
1330 	}
1331 
1332 	/* Peripheral clocks */
1333 	for (n = 0; n < ARRAY_SIZE(pclk); n++)
1334 		hws[PERIF_BANK + n] = clk_hw_register_gate(NULL, pclk[n].name,
1335 				pclk[n].parent,
1336 				pclk[n].flags, base + pclk[n].gate_offset,
1337 				pclk[n].bit_idx, pclk[n].flags, &stm32rcc_lock);
1338 
1339 	/* Kernel clocks */
1340 	for (n = 0; n < ARRAY_SIZE(kclk); n++) {
1341 		get_cfg_composite_div(&kernel_clk_cfg, &kclk[n], &c_cfg,
1342 				&stm32rcc_lock);
1343 
1344 		hws[KERN_BANK + n] = clk_hw_register_composite(NULL,
1345 				kclk[n].name,
1346 				kclk[n].parent_name,
1347 				kclk[n].num_parents,
1348 				c_cfg.mux_hw, c_cfg.mux_ops,
1349 				c_cfg.div_hw, c_cfg.div_ops,
1350 				c_cfg.gate_hw, c_cfg.gate_ops,
1351 				kclk[n].flags);
1352 	}
1353 
1354 	/* RTC clock (default state is off) */
1355 	clk_hw_register_fixed_rate(NULL, "off", NULL, 0, 0);
1356 
1357 	get_cfg_composite_div(&rtc_clk_cfg, &rtc_clk, &c_cfg, &stm32rcc_lock);
1358 
1359 	hws[RTC_CK] = clk_hw_register_composite(NULL,
1360 			rtc_clk.name,
1361 			rtc_clk.parent_name,
1362 			rtc_clk.num_parents,
1363 			c_cfg.mux_hw, c_cfg.mux_ops,
1364 			c_cfg.div_hw, c_cfg.div_ops,
1365 			c_cfg.gate_hw, c_cfg.gate_ops,
1366 			rtc_clk.flags);
1367 
1368 	/* Micro-controller clocks */
1369 	for (n = 0; n < ARRAY_SIZE(mco_clk); n++) {
1370 		get_cfg_composite_div(&mco_clk_cfg, &mco_clk[n], &c_cfg,
1371 				&stm32rcc_lock);
1372 
1373 		hws[MCO_BANK + n] = clk_hw_register_composite(NULL,
1374 				mco_clk[n].name,
1375 				mco_clk[n].parent_name,
1376 				mco_clk[n].num_parents,
1377 				c_cfg.mux_hw, c_cfg.mux_ops,
1378 				c_cfg.div_hw, c_cfg.div_ops,
1379 				c_cfg.gate_hw, c_cfg.gate_ops,
1380 				mco_clk[n].flags);
1381 	}
1382 
1383 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
1384 
1385 	return;
1386 
1387 err_free_clks:
1388 	kfree(clk_data);
1389 }
1390 
1391 /* The RCC node is a clock and reset controller, and these
1392  * functionalities are supported by different drivers that
1393  * matches the same compatible strings.
1394  */
1395 CLK_OF_DECLARE_DRIVER(stm32h7_rcc, "st,stm32h743-rcc", stm32h7_rcc_init);
1396