xref: /linux/drivers/clk/imx/clk-busy.c (revision f4738f56d1dc62aaba69b33702a5ab098f1b8c63)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2012 Freescale Semiconductor, Inc.
4  * Copyright 2012 Linaro Ltd.
5  */
6 
7 #include <linux/bits.h>
8 #include <linux/clk.h>
9 #include <linux/clk-provider.h>
10 #include <linux/io.h>
11 #include <linux/slab.h>
12 #include <linux/jiffies.h>
13 #include <linux/err.h>
14 #include "clk.h"
15 
16 static int clk_busy_wait(void __iomem *reg, u8 shift)
17 {
18 	unsigned long timeout = jiffies + msecs_to_jiffies(10);
19 
20 	while (readl_relaxed(reg) & (1 << shift))
21 		if (time_after(jiffies, timeout))
22 			return -ETIMEDOUT;
23 
24 	return 0;
25 }
26 
27 struct clk_busy_divider {
28 	struct clk_divider div;
29 	const struct clk_ops *div_ops;
30 	void __iomem *reg;
31 	u8 shift;
32 };
33 
34 static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
35 {
36 	struct clk_divider *div = to_clk_divider(hw);
37 
38 	return container_of(div, struct clk_busy_divider, div);
39 }
40 
41 static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
42 						  unsigned long parent_rate)
43 {
44 	struct clk_busy_divider *busy = to_clk_busy_divider(hw);
45 
46 	return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
47 }
48 
49 static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
50 					unsigned long *prate)
51 {
52 	struct clk_busy_divider *busy = to_clk_busy_divider(hw);
53 
54 	return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
55 }
56 
57 static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
58 		unsigned long parent_rate)
59 {
60 	struct clk_busy_divider *busy = to_clk_busy_divider(hw);
61 	int ret;
62 
63 	ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
64 	if (!ret)
65 		ret = clk_busy_wait(busy->reg, busy->shift);
66 
67 	return ret;
68 }
69 
70 static const struct clk_ops clk_busy_divider_ops = {
71 	.recalc_rate = clk_busy_divider_recalc_rate,
72 	.round_rate = clk_busy_divider_round_rate,
73 	.set_rate = clk_busy_divider_set_rate,
74 };
75 
76 struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name,
77 				 void __iomem *reg, u8 shift, u8 width,
78 				 void __iomem *busy_reg, u8 busy_shift)
79 {
80 	struct clk_busy_divider *busy;
81 	struct clk_hw *hw;
82 	struct clk_init_data init;
83 	int ret;
84 
85 	busy = kzalloc(sizeof(*busy), GFP_KERNEL);
86 	if (!busy)
87 		return ERR_PTR(-ENOMEM);
88 
89 	busy->reg = busy_reg;
90 	busy->shift = busy_shift;
91 
92 	busy->div.reg = reg;
93 	busy->div.shift = shift;
94 	busy->div.width = width;
95 	busy->div.lock = &imx_ccm_lock;
96 	busy->div_ops = &clk_divider_ops;
97 
98 	init.name = name;
99 	init.ops = &clk_busy_divider_ops;
100 	init.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL;
101 	init.parent_names = &parent_name;
102 	init.num_parents = 1;
103 
104 	busy->div.hw.init = &init;
105 
106 	hw = &busy->div.hw;
107 
108 	ret = clk_hw_register(NULL, hw);
109 	if (ret) {
110 		kfree(busy);
111 		return ERR_PTR(ret);
112 	}
113 
114 	return hw;
115 }
116 
117 struct clk_busy_mux {
118 	struct clk_mux mux;
119 	const struct clk_ops *mux_ops;
120 	void __iomem *reg;
121 	u8 shift;
122 };
123 
124 static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
125 {
126 	struct clk_mux *mux = to_clk_mux(hw);
127 
128 	return container_of(mux, struct clk_busy_mux, mux);
129 }
130 
131 static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
132 {
133 	struct clk_busy_mux *busy = to_clk_busy_mux(hw);
134 
135 	return busy->mux_ops->get_parent(&busy->mux.hw);
136 }
137 
138 static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
139 {
140 	struct clk_busy_mux *busy = to_clk_busy_mux(hw);
141 	int ret;
142 
143 	ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
144 	if (!ret)
145 		ret = clk_busy_wait(busy->reg, busy->shift);
146 
147 	return ret;
148 }
149 
150 static const struct clk_ops clk_busy_mux_ops = {
151 	.determine_rate = clk_hw_determine_rate_no_reparent,
152 	.get_parent = clk_busy_mux_get_parent,
153 	.set_parent = clk_busy_mux_set_parent,
154 };
155 
156 struct clk_hw *imx_clk_hw_busy_mux(const char *name, void __iomem *reg, u8 shift,
157 			     u8 width, void __iomem *busy_reg, u8 busy_shift,
158 			     const char * const *parent_names, int num_parents)
159 {
160 	struct clk_busy_mux *busy;
161 	struct clk_hw *hw;
162 	struct clk_init_data init;
163 	int ret;
164 
165 	busy = kzalloc(sizeof(*busy), GFP_KERNEL);
166 	if (!busy)
167 		return ERR_PTR(-ENOMEM);
168 
169 	busy->reg = busy_reg;
170 	busy->shift = busy_shift;
171 
172 	busy->mux.reg = reg;
173 	busy->mux.shift = shift;
174 	busy->mux.mask = BIT(width) - 1;
175 	busy->mux.lock = &imx_ccm_lock;
176 	busy->mux_ops = &clk_mux_ops;
177 
178 	init.name = name;
179 	init.ops = &clk_busy_mux_ops;
180 	init.flags = CLK_IS_CRITICAL;
181 	init.parent_names = parent_names;
182 	init.num_parents = num_parents;
183 
184 	busy->mux.hw.init = &init;
185 
186 	hw = &busy->mux.hw;
187 
188 	ret = clk_hw_register(NULL, hw);
189 	if (ret) {
190 		kfree(busy);
191 		return ERR_PTR(ret);
192 	}
193 
194 	return hw;
195 }
196