xref: /linux/drivers/clk/ti/clkctrl.c (revision 2dbc0838bcf24ca59cabc3130cf3b1d6809cdcd4)
1 /*
2  * OMAP clkctrl clock support
3  *
4  * Copyright (C) 2017 Texas Instruments, Inc.
5  *
6  * Tero Kristo <t-kristo@ti.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13  * kind, whether express or implied; without even the implied warranty
14  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #include <linux/clk-provider.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21 #include <linux/of_address.h>
22 #include <linux/clk/ti.h>
23 #include <linux/delay.h>
24 #include <linux/timekeeping.h>
25 #include "clock.h"
26 
27 #define NO_IDLEST			0x1
28 
29 #define OMAP4_MODULEMODE_MASK		0x3
30 
31 #define MODULEMODE_HWCTRL		0x1
32 #define MODULEMODE_SWCTRL		0x2
33 
34 #define OMAP4_IDLEST_MASK		(0x3 << 16)
35 #define OMAP4_IDLEST_SHIFT		16
36 
37 #define CLKCTRL_IDLEST_FUNCTIONAL	0x0
38 #define CLKCTRL_IDLEST_INTERFACE_IDLE	0x2
39 #define CLKCTRL_IDLEST_DISABLED		0x3
40 
41 /* These timeouts are in us */
42 #define OMAP4_MAX_MODULE_READY_TIME	2000
43 #define OMAP4_MAX_MODULE_DISABLE_TIME	5000
44 
45 static bool _early_timeout = true;
46 
47 struct omap_clkctrl_provider {
48 	void __iomem *base;
49 	struct list_head clocks;
50 	char *clkdm_name;
51 };
52 
53 struct omap_clkctrl_clk {
54 	struct clk_hw *clk;
55 	u16 reg_offset;
56 	int bit_offset;
57 	struct list_head node;
58 };
59 
60 union omap4_timeout {
61 	u32 cycles;
62 	ktime_t start;
63 };
64 
65 static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
66 	{ 0 },
67 };
68 
69 static u32 _omap4_idlest(u32 val)
70 {
71 	val &= OMAP4_IDLEST_MASK;
72 	val >>= OMAP4_IDLEST_SHIFT;
73 
74 	return val;
75 }
76 
77 static bool _omap4_is_idle(u32 val)
78 {
79 	val = _omap4_idlest(val);
80 
81 	return val == CLKCTRL_IDLEST_DISABLED;
82 }
83 
84 static bool _omap4_is_ready(u32 val)
85 {
86 	val = _omap4_idlest(val);
87 
88 	return val == CLKCTRL_IDLEST_FUNCTIONAL ||
89 	       val == CLKCTRL_IDLEST_INTERFACE_IDLE;
90 }
91 
92 static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
93 {
94 	/*
95 	 * There are two special cases where ktime_to_ns() can't be
96 	 * used to track the timeouts. First one is during early boot
97 	 * when the timers haven't been initialized yet. The second
98 	 * one is during suspend-resume cycle while timekeeping is
99 	 * being suspended / resumed. Clocksource for the system
100 	 * can be from a timer that requires pm_runtime access, which
101 	 * will eventually bring us here with timekeeping_suspended,
102 	 * during both suspend entry and resume paths. This happens
103 	 * at least on am43xx platform.
104 	 */
105 	if (unlikely(_early_timeout || timekeeping_suspended)) {
106 		if (time->cycles++ < timeout) {
107 			udelay(1);
108 			return false;
109 		}
110 	} else {
111 		if (!ktime_to_ns(time->start)) {
112 			time->start = ktime_get();
113 			return false;
114 		}
115 
116 		if (ktime_us_delta(ktime_get(), time->start) < timeout) {
117 			cpu_relax();
118 			return false;
119 		}
120 	}
121 
122 	return true;
123 }
124 
125 static int __init _omap4_disable_early_timeout(void)
126 {
127 	_early_timeout = false;
128 
129 	return 0;
130 }
131 arch_initcall(_omap4_disable_early_timeout);
132 
133 static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
134 {
135 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
136 	u32 val;
137 	int ret;
138 	union omap4_timeout timeout = { 0 };
139 
140 	if (clk->clkdm) {
141 		ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
142 		if (ret) {
143 			WARN(1,
144 			     "%s: could not enable %s's clockdomain %s: %d\n",
145 			     __func__, clk_hw_get_name(hw),
146 			     clk->clkdm_name, ret);
147 			return ret;
148 		}
149 	}
150 
151 	if (!clk->enable_bit)
152 		return 0;
153 
154 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
155 
156 	val &= ~OMAP4_MODULEMODE_MASK;
157 	val |= clk->enable_bit;
158 
159 	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
160 
161 	if (clk->flags & NO_IDLEST)
162 		return 0;
163 
164 	/* Wait until module is enabled */
165 	while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
166 		if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
167 			pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
168 			return -EBUSY;
169 		}
170 	}
171 
172 	return 0;
173 }
174 
175 static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
176 {
177 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
178 	u32 val;
179 	union omap4_timeout timeout = { 0 };
180 
181 	if (!clk->enable_bit)
182 		goto exit;
183 
184 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
185 
186 	val &= ~OMAP4_MODULEMODE_MASK;
187 
188 	ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
189 
190 	if (clk->flags & NO_IDLEST)
191 		goto exit;
192 
193 	/* Wait until module is disabled */
194 	while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
195 		if (_omap4_is_timeout(&timeout,
196 				      OMAP4_MAX_MODULE_DISABLE_TIME)) {
197 			pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
198 			break;
199 		}
200 	}
201 
202 exit:
203 	if (clk->clkdm)
204 		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
205 }
206 
207 static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
208 {
209 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
210 	u32 val;
211 
212 	val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
213 
214 	if (val & clk->enable_bit)
215 		return 1;
216 
217 	return 0;
218 }
219 
220 static const struct clk_ops omap4_clkctrl_clk_ops = {
221 	.enable		= _omap4_clkctrl_clk_enable,
222 	.disable	= _omap4_clkctrl_clk_disable,
223 	.is_enabled	= _omap4_clkctrl_clk_is_enabled,
224 	.init		= omap2_init_clk_clkdm,
225 };
226 
227 static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
228 					      void *data)
229 {
230 	struct omap_clkctrl_provider *provider = data;
231 	struct omap_clkctrl_clk *entry;
232 	bool found = false;
233 
234 	if (clkspec->args_count != 2)
235 		return ERR_PTR(-EINVAL);
236 
237 	pr_debug("%s: looking for %x:%x\n", __func__,
238 		 clkspec->args[0], clkspec->args[1]);
239 
240 	list_for_each_entry(entry, &provider->clocks, node) {
241 		if (entry->reg_offset == clkspec->args[0] &&
242 		    entry->bit_offset == clkspec->args[1]) {
243 			found = true;
244 			break;
245 		}
246 	}
247 
248 	if (!found)
249 		return ERR_PTR(-EINVAL);
250 
251 	return entry->clk;
252 }
253 
254 static int __init
255 _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
256 			 struct device_node *node, struct clk_hw *clk_hw,
257 			 u16 offset, u8 bit, const char * const *parents,
258 			 int num_parents, const struct clk_ops *ops)
259 {
260 	struct clk_init_data init = { NULL };
261 	struct clk *clk;
262 	struct omap_clkctrl_clk *clkctrl_clk;
263 	int ret = 0;
264 
265 	if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
266 		init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
267 				      node->parent, node, offset,
268 				      bit);
269 	else
270 		init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", node,
271 				      offset, bit);
272 	clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
273 	if (!init.name || !clkctrl_clk) {
274 		ret = -ENOMEM;
275 		goto cleanup;
276 	}
277 
278 	clk_hw->init = &init;
279 	init.parent_names = parents;
280 	init.num_parents = num_parents;
281 	init.ops = ops;
282 	init.flags = 0;
283 
284 	clk = ti_clk_register(NULL, clk_hw, init.name);
285 	if (IS_ERR_OR_NULL(clk)) {
286 		ret = -EINVAL;
287 		goto cleanup;
288 	}
289 
290 	clkctrl_clk->reg_offset = offset;
291 	clkctrl_clk->bit_offset = bit;
292 	clkctrl_clk->clk = clk_hw;
293 
294 	list_add(&clkctrl_clk->node, &provider->clocks);
295 
296 	return 0;
297 
298 cleanup:
299 	kfree(init.name);
300 	kfree(clkctrl_clk);
301 	return ret;
302 }
303 
304 static void __init
305 _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
306 		       struct device_node *node, u16 offset,
307 		       const struct omap_clkctrl_bit_data *data,
308 		       void __iomem *reg)
309 {
310 	struct clk_hw_omap *clk_hw;
311 
312 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
313 	if (!clk_hw)
314 		return;
315 
316 	clk_hw->enable_bit = data->bit;
317 	clk_hw->enable_reg.ptr = reg;
318 
319 	if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
320 				     data->bit, data->parents, 1,
321 				     &omap_gate_clk_ops))
322 		kfree(clk_hw);
323 }
324 
325 static void __init
326 _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
327 		      struct device_node *node, u16 offset,
328 		      const struct omap_clkctrl_bit_data *data,
329 		      void __iomem *reg)
330 {
331 	struct clk_omap_mux *mux;
332 	int num_parents = 0;
333 	const char * const *pname;
334 
335 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
336 	if (!mux)
337 		return;
338 
339 	pname = data->parents;
340 	while (*pname) {
341 		num_parents++;
342 		pname++;
343 	}
344 
345 	mux->mask = num_parents;
346 	if (!(mux->flags & CLK_MUX_INDEX_ONE))
347 		mux->mask--;
348 
349 	mux->mask = (1 << fls(mux->mask)) - 1;
350 
351 	mux->shift = data->bit;
352 	mux->reg.ptr = reg;
353 
354 	if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
355 				     data->bit, data->parents, num_parents,
356 				     &ti_clk_mux_ops))
357 		kfree(mux);
358 }
359 
360 static void __init
361 _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
362 		      struct device_node *node, u16 offset,
363 		      const struct omap_clkctrl_bit_data *data,
364 		      void __iomem *reg)
365 {
366 	struct clk_omap_divider *div;
367 	const struct omap_clkctrl_div_data *div_data = data->data;
368 	u8 div_flags = 0;
369 
370 	div = kzalloc(sizeof(*div), GFP_KERNEL);
371 	if (!div)
372 		return;
373 
374 	div->reg.ptr = reg;
375 	div->shift = data->bit;
376 	div->flags = div_data->flags;
377 
378 	if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
379 		div_flags |= CLKF_INDEX_POWER_OF_TWO;
380 
381 	if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
382 				      div_data->max_div, div_flags,
383 				      &div->width, &div->table)) {
384 		pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
385 		       node, offset, data->bit);
386 		kfree(div);
387 		return;
388 	}
389 
390 	if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
391 				     data->bit, data->parents, 1,
392 				     &ti_clk_divider_ops))
393 		kfree(div);
394 }
395 
396 static void __init
397 _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
398 			  struct device_node *node,
399 			  const struct omap_clkctrl_reg_data *data,
400 			  void __iomem *reg)
401 {
402 	const struct omap_clkctrl_bit_data *bits = data->bit_data;
403 
404 	if (!bits)
405 		return;
406 
407 	while (bits->bit) {
408 		switch (bits->type) {
409 		case TI_CLK_GATE:
410 			_ti_clkctrl_setup_gate(provider, node, data->offset,
411 					       bits, reg);
412 			break;
413 
414 		case TI_CLK_DIVIDER:
415 			_ti_clkctrl_setup_div(provider, node, data->offset,
416 					      bits, reg);
417 			break;
418 
419 		case TI_CLK_MUX:
420 			_ti_clkctrl_setup_mux(provider, node, data->offset,
421 					      bits, reg);
422 			break;
423 
424 		default:
425 			pr_err("%s: bad subclk type: %d\n", __func__,
426 			       bits->type);
427 			return;
428 		}
429 		bits++;
430 	}
431 }
432 
433 static void __init _clkctrl_add_provider(void *data,
434 					 struct device_node *np)
435 {
436 	of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
437 }
438 
439 static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
440 {
441 	struct omap_clkctrl_provider *provider;
442 	const struct omap_clkctrl_data *data = default_clkctrl_data;
443 	const struct omap_clkctrl_reg_data *reg_data;
444 	struct clk_init_data init = { NULL };
445 	struct clk_hw_omap *hw;
446 	struct clk *clk;
447 	struct omap_clkctrl_clk *clkctrl_clk;
448 	const __be32 *addrp;
449 	u32 addr;
450 	int ret;
451 	char *c;
452 	u16 soc_mask = 0;
453 
454 	if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
455 	    of_node_name_eq(node, "clk"))
456 		ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
457 
458 	addrp = of_get_address(node, 0, NULL, NULL);
459 	addr = (u32)of_translate_address(node, addrp);
460 
461 #ifdef CONFIG_ARCH_OMAP4
462 	if (of_machine_is_compatible("ti,omap4"))
463 		data = omap4_clkctrl_data;
464 #endif
465 #ifdef CONFIG_SOC_OMAP5
466 	if (of_machine_is_compatible("ti,omap5"))
467 		data = omap5_clkctrl_data;
468 #endif
469 #ifdef CONFIG_SOC_DRA7XX
470 	if (of_machine_is_compatible("ti,dra7")) {
471 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
472 			data = dra7_clkctrl_compat_data;
473 		else
474 			data = dra7_clkctrl_data;
475 	}
476 
477 	if (of_machine_is_compatible("ti,dra72"))
478 		soc_mask = CLKF_SOC_DRA72;
479 	if (of_machine_is_compatible("ti,dra74"))
480 		soc_mask = CLKF_SOC_DRA74;
481 	if (of_machine_is_compatible("ti,dra76"))
482 		soc_mask = CLKF_SOC_DRA76;
483 #endif
484 #ifdef CONFIG_SOC_AM33XX
485 	if (of_machine_is_compatible("ti,am33xx")) {
486 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
487 			data = am3_clkctrl_compat_data;
488 		else
489 			data = am3_clkctrl_data;
490 	}
491 #endif
492 #ifdef CONFIG_SOC_AM43XX
493 	if (of_machine_is_compatible("ti,am4372")) {
494 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
495 			data = am4_clkctrl_compat_data;
496 		else
497 			data = am4_clkctrl_data;
498 	}
499 
500 	if (of_machine_is_compatible("ti,am438x")) {
501 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
502 			data = am438x_clkctrl_compat_data;
503 		else
504 			data = am438x_clkctrl_data;
505 	}
506 #endif
507 #ifdef CONFIG_SOC_TI81XX
508 	if (of_machine_is_compatible("ti,dm814"))
509 		data = dm814_clkctrl_data;
510 
511 	if (of_machine_is_compatible("ti,dm816"))
512 		data = dm816_clkctrl_data;
513 #endif
514 
515 	if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
516 		soc_mask |= CLKF_SOC_NONSEC;
517 
518 	while (data->addr) {
519 		if (addr == data->addr)
520 			break;
521 
522 		data++;
523 	}
524 
525 	if (!data->addr) {
526 		pr_err("%pOF not found from clkctrl data.\n", node);
527 		return;
528 	}
529 
530 	provider = kzalloc(sizeof(*provider), GFP_KERNEL);
531 	if (!provider)
532 		return;
533 
534 	provider->base = of_iomap(node, 0);
535 
536 	if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) {
537 		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
538 		if (!provider->clkdm_name) {
539 			kfree(provider);
540 			return;
541 		}
542 
543 		/*
544 		 * Create default clkdm name, replace _cm from end of parent
545 		 * node name with _clkdm
546 		 */
547 		provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
548 	} else {
549 		provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
550 		if (!provider->clkdm_name) {
551 			kfree(provider);
552 			return;
553 		}
554 
555 		/*
556 		 * Create default clkdm name, replace _clkctrl from end of
557 		 * node name with _clkdm
558 		 */
559 		provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
560 	}
561 
562 	strcat(provider->clkdm_name, "clkdm");
563 
564 	/* Replace any dash from the clkdm name with underscore */
565 	c = provider->clkdm_name;
566 
567 	while (*c) {
568 		if (*c == '-')
569 			*c = '_';
570 		c++;
571 	}
572 
573 	INIT_LIST_HEAD(&provider->clocks);
574 
575 	/* Generate clocks */
576 	reg_data = data->regs;
577 
578 	while (reg_data->parent) {
579 		if ((reg_data->flags & CLKF_SOC_MASK) &&
580 		    (reg_data->flags & soc_mask) == 0) {
581 			reg_data++;
582 			continue;
583 		}
584 
585 		hw = kzalloc(sizeof(*hw), GFP_KERNEL);
586 		if (!hw)
587 			return;
588 
589 		hw->enable_reg.ptr = provider->base + reg_data->offset;
590 
591 		_ti_clkctrl_setup_subclks(provider, node, reg_data,
592 					  hw->enable_reg.ptr);
593 
594 		if (reg_data->flags & CLKF_SW_SUP)
595 			hw->enable_bit = MODULEMODE_SWCTRL;
596 		if (reg_data->flags & CLKF_HW_SUP)
597 			hw->enable_bit = MODULEMODE_HWCTRL;
598 		if (reg_data->flags & CLKF_NO_IDLEST)
599 			hw->flags |= NO_IDLEST;
600 
601 		if (reg_data->clkdm_name)
602 			hw->clkdm_name = reg_data->clkdm_name;
603 		else
604 			hw->clkdm_name = provider->clkdm_name;
605 
606 		init.parent_names = &reg_data->parent;
607 		init.num_parents = 1;
608 		init.flags = 0;
609 		if (reg_data->flags & CLKF_SET_RATE_PARENT)
610 			init.flags |= CLK_SET_RATE_PARENT;
611 		if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
612 			init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
613 					      node->parent, node,
614 					      reg_data->offset, 0);
615 		else
616 			init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d",
617 					      node, reg_data->offset, 0);
618 		clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
619 		if (!init.name || !clkctrl_clk)
620 			goto cleanup;
621 
622 		init.ops = &omap4_clkctrl_clk_ops;
623 		hw->hw.init = &init;
624 
625 		clk = ti_clk_register(NULL, &hw->hw, init.name);
626 		if (IS_ERR_OR_NULL(clk))
627 			goto cleanup;
628 
629 		clkctrl_clk->reg_offset = reg_data->offset;
630 		clkctrl_clk->clk = &hw->hw;
631 
632 		list_add(&clkctrl_clk->node, &provider->clocks);
633 
634 		reg_data++;
635 	}
636 
637 	ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
638 	if (ret == -EPROBE_DEFER)
639 		ti_clk_retry_init(node, provider, _clkctrl_add_provider);
640 
641 	return;
642 
643 cleanup:
644 	kfree(hw);
645 	kfree(init.name);
646 	kfree(clkctrl_clk);
647 }
648 CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
649 	       _ti_omap4_clkctrl_setup);
650