xref: /linux/drivers/clk/ti/clkctrl.c (revision a44e4f3ab16bc808590763a543a93b6fbf3abcc4)
1 /*
2  * OMAP clkctrl clock support
3  *
4  * Copyright (C) 2017 Texas Instruments, Inc.
5  *
6  * Tero Kristo <t-kristo@ti.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13  * kind, whether express or implied; without even the implied warranty
14  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #include <linux/clk-provider.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21 #include <linux/of_address.h>
22 #include <linux/clk/ti.h>
23 #include <linux/delay.h>
24 #include <linux/timekeeping.h>
25 #include "clock.h"
26 
27 #define NO_IDLEST			0x1
28 
29 #define OMAP4_MODULEMODE_MASK		0x3
30 
31 #define MODULEMODE_HWCTRL		0x1
32 #define MODULEMODE_SWCTRL		0x2
33 
34 #define OMAP4_IDLEST_MASK		(0x3 << 16)
35 #define OMAP4_IDLEST_SHIFT		16
36 
37 #define CLKCTRL_IDLEST_FUNCTIONAL	0x0
38 #define CLKCTRL_IDLEST_INTERFACE_IDLE	0x2
39 #define CLKCTRL_IDLEST_DISABLED		0x3
40 
41 /* These timeouts are in us */
42 #define OMAP4_MAX_MODULE_READY_TIME	2000
43 #define OMAP4_MAX_MODULE_DISABLE_TIME	5000
44 
45 static bool _early_timeout = true;
46 
47 struct omap_clkctrl_provider {
48 	void __iomem *base;
49 	struct list_head clocks;
50 	char *clkdm_name;
51 };
52 
53 struct omap_clkctrl_clk {
54 	struct clk_hw *clk;
55 	u16 reg_offset;
56 	int bit_offset;
57 	struct list_head node;
58 };
59 
60 union omap4_timeout {
61 	u32 cycles;
62 	ktime_t start;
63 };
64 
65 static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
66 	{ 0 },
67 };
68 
69 static u32 _omap4_idlest(u32 val)
70 {
71 	val &= OMAP4_IDLEST_MASK;
72 	val >>= OMAP4_IDLEST_SHIFT;
73 
74 	return val;
75 }
76 
77 static bool _omap4_is_idle(u32 val)
78 {
79 	val = _omap4_idlest(val);
80 
81 	return val == CLKCTRL_IDLEST_DISABLED;
82 }
83 
84 static bool _omap4_is_ready(u32 val)
85 {
86 	val = _omap4_idlest(val);
87 
88 	return val == CLKCTRL_IDLEST_FUNCTIONAL ||
89 	       val == CLKCTRL_IDLEST_INTERFACE_IDLE;
90 }
91 
92 static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
93 {
94 	/*
95 	 * There are two special cases where ktime_to_ns() can't be
96 	 * used to track the timeouts. First one is during early boot
97 	 * when the timers haven't been initialized yet. The second
98 	 * one is during suspend-resume cycle while timekeeping is
99 	 * being suspended / resumed. Clocksource for the system
100 	 * can be from a timer that requires pm_runtime access, which
101 	 * will eventually bring us here with timekeeping_suspended,
102 	 * during both suspend entry and resume paths. This happens
103 	 * at least on am43xx platform. Account for flakeyness
104 	 * with udelay() by multiplying the timeout value by 2.
105 	 */
106 	if (unlikely(_early_timeout || timekeeping_suspended)) {
107 		if (time->cycles++ < timeout) {
108 			udelay(1 * 2);
109 			return false;
110 		}
111 	} else {
112 		if (!ktime_to_ns(time->start)) {
113 			time->start = ktime_get();
114 			return false;
115 		}
116 
117 		if (ktime_us_delta(ktime_get(), time->start) < timeout) {
118 			cpu_relax();
119 			return false;
120 		}
121 	}
122 
123 	return true;
124 }
125 
126 static int __init _omap4_disable_early_timeout(void)
127 {
128 	_early_timeout = false;
129 
130 	return 0;
131 }
132 arch_initcall(_omap4_disable_early_timeout);
133 
134 static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
135 {
136 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
137 	u32 val;
138 	int ret;
139 	union omap4_timeout timeout = { 0 };
140 
141 	if (clk->clkdm) {
142 		ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
143 		if (ret) {
144 			WARN(1,
145 			     "%s: could not enable %s's clockdomain %s: %d\n",
146 			     __func__, clk_hw_get_name(hw),
147 			     clk->clkdm_name, ret);
148 			return ret;
149 		}
150 	}
151 
152 	if (!clk->enable_bit)
153 		return 0;
154 
155 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
156 
157 	val &= ~OMAP4_MODULEMODE_MASK;
158 	val |= clk->enable_bit;
159 
160 	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
161 
162 	if (clk->flags & NO_IDLEST)
163 		return 0;
164 
165 	/* Wait until module is enabled */
166 	while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
167 		if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
168 			pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
169 			return -EBUSY;
170 		}
171 	}
172 
173 	return 0;
174 }
175 
176 static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
177 {
178 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
179 	u32 val;
180 	union omap4_timeout timeout = { 0 };
181 
182 	if (!clk->enable_bit)
183 		goto exit;
184 
185 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
186 
187 	val &= ~OMAP4_MODULEMODE_MASK;
188 
189 	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
190 
191 	if (clk->flags & NO_IDLEST)
192 		goto exit;
193 
194 	/* Wait until module is disabled */
195 	while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
196 		if (_omap4_is_timeout(&timeout,
197 				      OMAP4_MAX_MODULE_DISABLE_TIME)) {
198 			pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
199 			break;
200 		}
201 	}
202 
203 exit:
204 	if (clk->clkdm)
205 		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
206 }
207 
208 static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
209 {
210 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
211 	u32 val;
212 
213 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
214 
215 	if (val & clk->enable_bit)
216 		return 1;
217 
218 	return 0;
219 }
220 
221 static const struct clk_ops omap4_clkctrl_clk_ops = {
222 	.enable		= _omap4_clkctrl_clk_enable,
223 	.disable	= _omap4_clkctrl_clk_disable,
224 	.is_enabled	= _omap4_clkctrl_clk_is_enabled,
225 	.init		= omap2_init_clk_clkdm,
226 };
227 
228 static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
229 					      void *data)
230 {
231 	struct omap_clkctrl_provider *provider = data;
232 	struct omap_clkctrl_clk *entry;
233 	bool found = false;
234 
235 	if (clkspec->args_count != 2)
236 		return ERR_PTR(-EINVAL);
237 
238 	pr_debug("%s: looking for %x:%x\n", __func__,
239 		 clkspec->args[0], clkspec->args[1]);
240 
241 	list_for_each_entry(entry, &provider->clocks, node) {
242 		if (entry->reg_offset == clkspec->args[0] &&
243 		    entry->bit_offset == clkspec->args[1]) {
244 			found = true;
245 			break;
246 		}
247 	}
248 
249 	if (!found)
250 		return ERR_PTR(-EINVAL);
251 
252 	return entry->clk;
253 }
254 
255 static int __init
256 _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
257 			 struct device_node *node, struct clk_hw *clk_hw,
258 			 u16 offset, u8 bit, const char * const *parents,
259 			 int num_parents, const struct clk_ops *ops)
260 {
261 	struct clk_init_data init = { NULL };
262 	struct clk *clk;
263 	struct omap_clkctrl_clk *clkctrl_clk;
264 	int ret = 0;
265 
266 	if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
267 		init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
268 				      node->parent, node, offset,
269 				      bit);
270 	else
271 		init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", node,
272 				      offset, bit);
273 	clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
274 	if (!init.name || !clkctrl_clk) {
275 		ret = -ENOMEM;
276 		goto cleanup;
277 	}
278 
279 	clk_hw->init = &init;
280 	init.parent_names = parents;
281 	init.num_parents = num_parents;
282 	init.ops = ops;
283 	init.flags = 0;
284 
285 	clk = ti_clk_register(NULL, clk_hw, init.name);
286 	if (IS_ERR_OR_NULL(clk)) {
287 		ret = -EINVAL;
288 		goto cleanup;
289 	}
290 
291 	clkctrl_clk->reg_offset = offset;
292 	clkctrl_clk->bit_offset = bit;
293 	clkctrl_clk->clk = clk_hw;
294 
295 	list_add(&clkctrl_clk->node, &provider->clocks);
296 
297 	return 0;
298 
299 cleanup:
300 	kfree(init.name);
301 	kfree(clkctrl_clk);
302 	return ret;
303 }
304 
305 static void __init
306 _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
307 		       struct device_node *node, u16 offset,
308 		       const struct omap_clkctrl_bit_data *data,
309 		       void __iomem *reg)
310 {
311 	struct clk_hw_omap *clk_hw;
312 
313 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
314 	if (!clk_hw)
315 		return;
316 
317 	clk_hw->enable_bit = data->bit;
318 	clk_hw->enable_reg.ptr = reg;
319 
320 	if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
321 				     data->bit, data->parents, 1,
322 				     &omap_gate_clk_ops))
323 		kfree(clk_hw);
324 }
325 
326 static void __init
327 _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
328 		      struct device_node *node, u16 offset,
329 		      const struct omap_clkctrl_bit_data *data,
330 		      void __iomem *reg)
331 {
332 	struct clk_omap_mux *mux;
333 	int num_parents = 0;
334 	const char * const *pname;
335 
336 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
337 	if (!mux)
338 		return;
339 
340 	pname = data->parents;
341 	while (*pname) {
342 		num_parents++;
343 		pname++;
344 	}
345 
346 	mux->mask = num_parents;
347 	if (!(mux->flags & CLK_MUX_INDEX_ONE))
348 		mux->mask--;
349 
350 	mux->mask = (1 << fls(mux->mask)) - 1;
351 
352 	mux->shift = data->bit;
353 	mux->reg.ptr = reg;
354 
355 	if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
356 				     data->bit, data->parents, num_parents,
357 				     &ti_clk_mux_ops))
358 		kfree(mux);
359 }
360 
361 static void __init
362 _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
363 		      struct device_node *node, u16 offset,
364 		      const struct omap_clkctrl_bit_data *data,
365 		      void __iomem *reg)
366 {
367 	struct clk_omap_divider *div;
368 	const struct omap_clkctrl_div_data *div_data = data->data;
369 	u8 div_flags = 0;
370 
371 	div = kzalloc(sizeof(*div), GFP_KERNEL);
372 	if (!div)
373 		return;
374 
375 	div->reg.ptr = reg;
376 	div->shift = data->bit;
377 	div->flags = div_data->flags;
378 
379 	if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
380 		div_flags |= CLKF_INDEX_POWER_OF_TWO;
381 
382 	if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
383 				      div_data->max_div, div_flags,
384 				      &div->width, &div->table)) {
385 		pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
386 		       node, offset, data->bit);
387 		kfree(div);
388 		return;
389 	}
390 
391 	if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
392 				     data->bit, data->parents, 1,
393 				     &ti_clk_divider_ops))
394 		kfree(div);
395 }
396 
397 static void __init
398 _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
399 			  struct device_node *node,
400 			  const struct omap_clkctrl_reg_data *data,
401 			  void __iomem *reg)
402 {
403 	const struct omap_clkctrl_bit_data *bits = data->bit_data;
404 
405 	if (!bits)
406 		return;
407 
408 	while (bits->bit) {
409 		switch (bits->type) {
410 		case TI_CLK_GATE:
411 			_ti_clkctrl_setup_gate(provider, node, data->offset,
412 					       bits, reg);
413 			break;
414 
415 		case TI_CLK_DIVIDER:
416 			_ti_clkctrl_setup_div(provider, node, data->offset,
417 					      bits, reg);
418 			break;
419 
420 		case TI_CLK_MUX:
421 			_ti_clkctrl_setup_mux(provider, node, data->offset,
422 					      bits, reg);
423 			break;
424 
425 		default:
426 			pr_err("%s: bad subclk type: %d\n", __func__,
427 			       bits->type);
428 			return;
429 		}
430 		bits++;
431 	}
432 }
433 
434 static void __init _clkctrl_add_provider(void *data,
435 					 struct device_node *np)
436 {
437 	of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
438 }
439 
440 static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
441 {
442 	struct omap_clkctrl_provider *provider;
443 	const struct omap_clkctrl_data *data = default_clkctrl_data;
444 	const struct omap_clkctrl_reg_data *reg_data;
445 	struct clk_init_data init = { NULL };
446 	struct clk_hw_omap *hw;
447 	struct clk *clk;
448 	struct omap_clkctrl_clk *clkctrl_clk;
449 	const __be32 *addrp;
450 	u32 addr;
451 	int ret;
452 	char *c;
453 	u16 soc_mask = 0;
454 
455 	if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
456 	    of_node_name_eq(node, "clk"))
457 		ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
458 
459 	addrp = of_get_address(node, 0, NULL, NULL);
460 	addr = (u32)of_translate_address(node, addrp);
461 
462 #ifdef CONFIG_ARCH_OMAP4
463 	if (of_machine_is_compatible("ti,omap4"))
464 		data = omap4_clkctrl_data;
465 #endif
466 #ifdef CONFIG_SOC_OMAP5
467 	if (of_machine_is_compatible("ti,omap5"))
468 		data = omap5_clkctrl_data;
469 #endif
470 #ifdef CONFIG_SOC_DRA7XX
471 	if (of_machine_is_compatible("ti,dra7")) {
472 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
473 			data = dra7_clkctrl_compat_data;
474 		else
475 			data = dra7_clkctrl_data;
476 	}
477 
478 	if (of_machine_is_compatible("ti,dra72"))
479 		soc_mask = CLKF_SOC_DRA72;
480 	if (of_machine_is_compatible("ti,dra74"))
481 		soc_mask = CLKF_SOC_DRA74;
482 	if (of_machine_is_compatible("ti,dra76"))
483 		soc_mask = CLKF_SOC_DRA76;
484 #endif
485 #ifdef CONFIG_SOC_AM33XX
486 	if (of_machine_is_compatible("ti,am33xx")) {
487 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
488 			data = am3_clkctrl_compat_data;
489 		else
490 			data = am3_clkctrl_data;
491 	}
492 #endif
493 #ifdef CONFIG_SOC_AM43XX
494 	if (of_machine_is_compatible("ti,am4372")) {
495 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
496 			data = am4_clkctrl_compat_data;
497 		else
498 			data = am4_clkctrl_data;
499 	}
500 
501 	if (of_machine_is_compatible("ti,am438x")) {
502 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
503 			data = am438x_clkctrl_compat_data;
504 		else
505 			data = am438x_clkctrl_data;
506 	}
507 #endif
508 #ifdef CONFIG_SOC_TI81XX
509 	if (of_machine_is_compatible("ti,dm814"))
510 		data = dm814_clkctrl_data;
511 
512 	if (of_machine_is_compatible("ti,dm816"))
513 		data = dm816_clkctrl_data;
514 #endif
515 
516 	if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
517 		soc_mask |= CLKF_SOC_NONSEC;
518 
519 	while (data->addr) {
520 		if (addr == data->addr)
521 			break;
522 
523 		data++;
524 	}
525 
526 	if (!data->addr) {
527 		pr_err("%pOF not found from clkctrl data.\n", node);
528 		return;
529 	}
530 
531 	provider = kzalloc(sizeof(*provider), GFP_KERNEL);
532 	if (!provider)
533 		return;
534 
535 	provider->base = of_iomap(node, 0);
536 
537 	if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) {
538 		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
539 		if (!provider->clkdm_name) {
540 			kfree(provider);
541 			return;
542 		}
543 
544 		/*
545 		 * Create default clkdm name, replace _cm from end of parent
546 		 * node name with _clkdm
547 		 */
548 		provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
549 	} else {
550 		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
551 		if (!provider->clkdm_name) {
552 			kfree(provider);
553 			return;
554 		}
555 
556 		/*
557 		 * Create default clkdm name, replace _clkctrl from end of
558 		 * node name with _clkdm
559 		 */
560 		provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
561 	}
562 
563 	strcat(provider->clkdm_name, "clkdm");
564 
565 	/* Replace any dash from the clkdm name with underscore */
566 	c = provider->clkdm_name;
567 
568 	while (*c) {
569 		if (*c == '-')
570 			*c = '_';
571 		c++;
572 	}
573 
574 	INIT_LIST_HEAD(&provider->clocks);
575 
576 	/* Generate clocks */
577 	reg_data = data->regs;
578 
579 	while (reg_data->parent) {
580 		if ((reg_data->flags & CLKF_SOC_MASK) &&
581 		    (reg_data->flags & soc_mask) == 0) {
582 			reg_data++;
583 			continue;
584 		}
585 
586 		hw = kzalloc(sizeof(*hw), GFP_KERNEL);
587 		if (!hw)
588 			return;
589 
590 		hw->enable_reg.ptr = provider->base + reg_data->offset;
591 
592 		_ti_clkctrl_setup_subclks(provider, node, reg_data,
593 					  hw->enable_reg.ptr);
594 
595 		if (reg_data->flags & CLKF_SW_SUP)
596 			hw->enable_bit = MODULEMODE_SWCTRL;
597 		if (reg_data->flags & CLKF_HW_SUP)
598 			hw->enable_bit = MODULEMODE_HWCTRL;
599 		if (reg_data->flags & CLKF_NO_IDLEST)
600 			hw->flags |= NO_IDLEST;
601 
602 		if (reg_data->clkdm_name)
603 			hw->clkdm_name = reg_data->clkdm_name;
604 		else
605 			hw->clkdm_name = provider->clkdm_name;
606 
607 		init.parent_names = &reg_data->parent;
608 		init.num_parents = 1;
609 		init.flags = 0;
610 		if (reg_data->flags & CLKF_SET_RATE_PARENT)
611 			init.flags |= CLK_SET_RATE_PARENT;
612 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
613 			init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
614 					      node->parent, node,
615 					      reg_data->offset, 0);
616 		else
617 			init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d",
618 					      node, reg_data->offset, 0);
619 		clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
620 		if (!init.name || !clkctrl_clk)
621 			goto cleanup;
622 
623 		init.ops = &omap4_clkctrl_clk_ops;
624 		hw->hw.init = &init;
625 
626 		clk = ti_clk_register(NULL, &hw->hw, init.name);
627 		if (IS_ERR_OR_NULL(clk))
628 			goto cleanup;
629 
630 		clkctrl_clk->reg_offset = reg_data->offset;
631 		clkctrl_clk->clk = &hw->hw;
632 
633 		list_add(&clkctrl_clk->node, &provider->clocks);
634 
635 		reg_data++;
636 	}
637 
638 	ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
639 	if (ret == -EPROBE_DEFER)
640 		ti_clk_retry_init(node, provider, _clkctrl_add_provider);
641 
642 	return;
643 
644 cleanup:
645 	kfree(hw);
646 	kfree(init.name);
647 	kfree(clkctrl_clk);
648 }
649 CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
650 	       _ti_omap4_clkctrl_setup);
651