xref: /linux/drivers/clk/mediatek/clk-mux.c (revision 90e0d94d369d342e735a75174439482119b6c393)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018 MediaTek Inc.
4  * Author: Owen Chen <owen.chen@mediatek.com>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/clk-provider.h>
9 #include <linux/compiler_types.h>
10 #include <linux/container_of.h>
11 #include <linux/err.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/module.h>
14 #include <linux/regmap.h>
15 #include <linux/spinlock.h>
16 #include <linux/slab.h>
17 
18 #include "clk-mux.h"
19 
20 struct mtk_clk_mux {
21 	struct clk_hw hw;
22 	struct regmap *regmap;
23 	const struct mtk_mux *data;
24 	spinlock_t *lock;
25 	bool reparent;
26 };
27 
28 static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
29 {
30 	return container_of(hw, struct mtk_clk_mux, hw);
31 }
32 
33 static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
34 {
35 	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
36 	unsigned long flags = 0;
37 
38 	if (mux->lock)
39 		spin_lock_irqsave(mux->lock, flags);
40 	else
41 		__acquire(mux->lock);
42 
43 	regmap_write(mux->regmap, mux->data->clr_ofs,
44 		     BIT(mux->data->gate_shift));
45 
46 	/*
47 	 * If the parent has been changed when the clock was disabled, it will
48 	 * not be effective yet. Set the update bit to ensure the mux gets
49 	 * updated.
50 	 */
51 	if (mux->reparent && mux->data->upd_shift >= 0) {
52 		regmap_write(mux->regmap, mux->data->upd_ofs,
53 			     BIT(mux->data->upd_shift));
54 		mux->reparent = false;
55 	}
56 
57 	if (mux->lock)
58 		spin_unlock_irqrestore(mux->lock, flags);
59 	else
60 		__release(mux->lock);
61 
62 	return 0;
63 }
64 
65 static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
66 {
67 	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
68 
69 	regmap_write(mux->regmap, mux->data->set_ofs,
70 			BIT(mux->data->gate_shift));
71 }
72 
73 static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
74 {
75 	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
76 	u32 val;
77 
78 	regmap_read(mux->regmap, mux->data->mux_ofs, &val);
79 
80 	return (val & BIT(mux->data->gate_shift)) == 0;
81 }
82 
83 static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
84 {
85 	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
86 	u32 mask = GENMASK(mux->data->mux_width - 1, 0);
87 	u32 val;
88 
89 	regmap_read(mux->regmap, mux->data->mux_ofs, &val);
90 	val = (val >> mux->data->mux_shift) & mask;
91 
92 	return val;
93 }
94 
95 static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
96 {
97 	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
98 	u32 mask = GENMASK(mux->data->mux_width - 1, 0);
99 	u32 val, orig;
100 	unsigned long flags = 0;
101 
102 	if (mux->lock)
103 		spin_lock_irqsave(mux->lock, flags);
104 	else
105 		__acquire(mux->lock);
106 
107 	regmap_read(mux->regmap, mux->data->mux_ofs, &orig);
108 	val = (orig & ~(mask << mux->data->mux_shift))
109 			| (index << mux->data->mux_shift);
110 
111 	if (val != orig) {
112 		regmap_write(mux->regmap, mux->data->clr_ofs,
113 				mask << mux->data->mux_shift);
114 		regmap_write(mux->regmap, mux->data->set_ofs,
115 				index << mux->data->mux_shift);
116 
117 		if (mux->data->upd_shift >= 0) {
118 			regmap_write(mux->regmap, mux->data->upd_ofs,
119 					BIT(mux->data->upd_shift));
120 			mux->reparent = true;
121 		}
122 	}
123 
124 	if (mux->lock)
125 		spin_unlock_irqrestore(mux->lock, flags);
126 	else
127 		__release(mux->lock);
128 
129 	return 0;
130 }
131 
132 const struct clk_ops mtk_mux_clr_set_upd_ops = {
133 	.get_parent = mtk_clk_mux_get_parent,
134 	.set_parent = mtk_clk_mux_set_parent_setclr_lock,
135 };
136 EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops);
137 
138 const struct clk_ops mtk_mux_gate_clr_set_upd_ops  = {
139 	.enable = mtk_clk_mux_enable_setclr,
140 	.disable = mtk_clk_mux_disable_setclr,
141 	.is_enabled = mtk_clk_mux_is_enabled,
142 	.get_parent = mtk_clk_mux_get_parent,
143 	.set_parent = mtk_clk_mux_set_parent_setclr_lock,
144 };
145 EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
146 
147 static struct clk_hw *mtk_clk_register_mux(const struct mtk_mux *mux,
148 				 struct regmap *regmap,
149 				 spinlock_t *lock)
150 {
151 	struct mtk_clk_mux *clk_mux;
152 	struct clk_init_data init = {};
153 	int ret;
154 
155 	clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
156 	if (!clk_mux)
157 		return ERR_PTR(-ENOMEM);
158 
159 	init.name = mux->name;
160 	init.flags = mux->flags | CLK_SET_RATE_PARENT;
161 	init.parent_names = mux->parent_names;
162 	init.num_parents = mux->num_parents;
163 	init.ops = mux->ops;
164 
165 	clk_mux->regmap = regmap;
166 	clk_mux->data = mux;
167 	clk_mux->lock = lock;
168 	clk_mux->hw.init = &init;
169 
170 	ret = clk_hw_register(NULL, &clk_mux->hw);
171 	if (ret) {
172 		kfree(clk_mux);
173 		return ERR_PTR(ret);
174 	}
175 
176 	return &clk_mux->hw;
177 }
178 
179 static void mtk_clk_unregister_mux(struct clk_hw *hw)
180 {
181 	struct mtk_clk_mux *mux;
182 	if (!hw)
183 		return;
184 
185 	mux = to_mtk_clk_mux(hw);
186 
187 	clk_hw_unregister(hw);
188 	kfree(mux);
189 }
190 
191 int mtk_clk_register_muxes(const struct mtk_mux *muxes,
192 			   int num, struct device_node *node,
193 			   spinlock_t *lock,
194 			   struct clk_hw_onecell_data *clk_data)
195 {
196 	struct regmap *regmap;
197 	struct clk_hw *hw;
198 	int i;
199 
200 	regmap = device_node_to_regmap(node);
201 	if (IS_ERR(regmap)) {
202 		pr_err("Cannot find regmap for %pOF: %pe\n", node, regmap);
203 		return PTR_ERR(regmap);
204 	}
205 
206 	for (i = 0; i < num; i++) {
207 		const struct mtk_mux *mux = &muxes[i];
208 
209 		if (!IS_ERR_OR_NULL(clk_data->hws[mux->id])) {
210 			pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
211 				node, mux->id);
212 			continue;
213 		}
214 
215 		hw = mtk_clk_register_mux(mux, regmap, lock);
216 
217 		if (IS_ERR(hw)) {
218 			pr_err("Failed to register clk %s: %pe\n", mux->name,
219 			       hw);
220 			goto err;
221 		}
222 
223 		clk_data->hws[mux->id] = hw;
224 	}
225 
226 	return 0;
227 
228 err:
229 	while (--i >= 0) {
230 		const struct mtk_mux *mux = &muxes[i];
231 
232 		if (IS_ERR_OR_NULL(clk_data->hws[mux->id]))
233 			continue;
234 
235 		mtk_clk_unregister_mux(clk_data->hws[mux->id]);
236 		clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
237 	}
238 
239 	return PTR_ERR(hw);
240 }
241 EXPORT_SYMBOL_GPL(mtk_clk_register_muxes);
242 
243 void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
244 			      struct clk_hw_onecell_data *clk_data)
245 {
246 	int i;
247 
248 	if (!clk_data)
249 		return;
250 
251 	for (i = num; i > 0; i--) {
252 		const struct mtk_mux *mux = &muxes[i - 1];
253 
254 		if (IS_ERR_OR_NULL(clk_data->hws[mux->id]))
255 			continue;
256 
257 		mtk_clk_unregister_mux(clk_data->hws[mux->id]);
258 		clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
259 	}
260 }
261 EXPORT_SYMBOL_GPL(mtk_clk_unregister_muxes);
262 
263 /*
264  * This clock notifier is called when the frequency of the parent
265  * PLL clock is to be changed. The idea is to switch the parent to a
266  * stable clock, such as the main oscillator, while the PLL frequency
267  * stabilizes.
268  */
269 static int mtk_clk_mux_notifier_cb(struct notifier_block *nb,
270 				   unsigned long event, void *_data)
271 {
272 	struct clk_notifier_data *data = _data;
273 	struct clk_hw *hw = __clk_get_hw(data->clk);
274 	struct mtk_mux_nb *mux_nb = to_mtk_mux_nb(nb);
275 	int ret = 0;
276 
277 	switch (event) {
278 	case PRE_RATE_CHANGE:
279 		mux_nb->original_index = mux_nb->ops->get_parent(hw);
280 		ret = mux_nb->ops->set_parent(hw, mux_nb->bypass_index);
281 		break;
282 	case POST_RATE_CHANGE:
283 	case ABORT_RATE_CHANGE:
284 		ret = mux_nb->ops->set_parent(hw, mux_nb->original_index);
285 		break;
286 	}
287 
288 	return notifier_from_errno(ret);
289 }
290 
291 int devm_mtk_clk_mux_notifier_register(struct device *dev, struct clk *clk,
292 				       struct mtk_mux_nb *mux_nb)
293 {
294 	mux_nb->nb.notifier_call = mtk_clk_mux_notifier_cb;
295 
296 	return devm_clk_notifier_register(dev, clk, &mux_nb->nb);
297 }
298 EXPORT_SYMBOL_GPL(devm_mtk_clk_mux_notifier_register);
299 
300 MODULE_LICENSE("GPL");
301