xref: /linux/drivers/clk/rockchip/clk.c (revision 9f32a03e3e0d372c520d829dd4da6022fe88832a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2014 MundoReader S.L.
4  * Author: Heiko Stuebner <heiko@sntech.de>
5  *
6  * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
7  * Author: Xing Zheng <zhengxing@rock-chips.com>
8  *
9  * based on
10  *
11  * samsung/clk.c
12  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
13  * Copyright (c) 2013 Linaro Ltd.
14  * Author: Thomas Abraham <thomas.ab@samsung.com>
15  */
16 
17 #include <linux/slab.h>
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
20 #include <linux/io.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/platform_device.h>
23 #include <linux/regmap.h>
24 #include <linux/reboot.h>
25 
26 #include "../clk-fractional-divider.h"
27 #include "clk.h"
28 
29 /*
30  * Register a clock branch.
31  * Most clock branches have a form like
32  *
33  * src1 --|--\
34  *        |M |--[GATE]-[DIV]-
35  * src2 --|--/
36  *
37  * sometimes without one of those components.
38  */
rockchip_clk_register_branch(const char * name,const char * const * parent_names,u8 num_parents,void __iomem * base,int muxdiv_offset,u8 mux_shift,u8 mux_width,u8 mux_flags,u32 * mux_table,int div_offset,u8 div_shift,u8 div_width,u8 div_flags,struct clk_div_table * div_table,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,spinlock_t * lock)39 static struct clk *rockchip_clk_register_branch(const char *name,
40 		const char *const *parent_names, u8 num_parents,
41 		void __iomem *base,
42 		int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
43 		u32 *mux_table,
44 		int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
45 		struct clk_div_table *div_table, int gate_offset,
46 		u8 gate_shift, u8 gate_flags, unsigned long flags,
47 		spinlock_t *lock)
48 {
49 	struct clk_hw *hw;
50 	struct clk_mux *mux = NULL;
51 	struct clk_gate *gate = NULL;
52 	struct clk_divider *div = NULL;
53 	const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
54 			     *gate_ops = NULL;
55 	int ret;
56 
57 	if (num_parents > 1) {
58 		mux = kzalloc(sizeof(*mux), GFP_KERNEL);
59 		if (!mux)
60 			return ERR_PTR(-ENOMEM);
61 
62 		mux->reg = base + muxdiv_offset;
63 		mux->shift = mux_shift;
64 		mux->mask = BIT(mux_width) - 1;
65 		mux->flags = mux_flags;
66 		mux->table = mux_table;
67 		mux->lock = lock;
68 		mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
69 							: &clk_mux_ops;
70 	}
71 
72 	if (gate_offset >= 0) {
73 		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
74 		if (!gate) {
75 			ret = -ENOMEM;
76 			goto err_gate;
77 		}
78 
79 		gate->flags = gate_flags;
80 		gate->reg = base + gate_offset;
81 		gate->bit_idx = gate_shift;
82 		gate->lock = lock;
83 		gate_ops = &clk_gate_ops;
84 	}
85 
86 	if (div_width > 0) {
87 		div = kzalloc(sizeof(*div), GFP_KERNEL);
88 		if (!div) {
89 			ret = -ENOMEM;
90 			goto err_div;
91 		}
92 
93 		div->flags = div_flags;
94 		if (div_offset)
95 			div->reg = base + div_offset;
96 		else
97 			div->reg = base + muxdiv_offset;
98 		div->shift = div_shift;
99 		div->width = div_width;
100 		div->lock = lock;
101 		div->table = div_table;
102 		div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
103 						? &clk_divider_ro_ops
104 						: &clk_divider_ops;
105 	}
106 
107 	hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
108 				       mux ? &mux->hw : NULL, mux_ops,
109 				       div ? &div->hw : NULL, div_ops,
110 				       gate ? &gate->hw : NULL, gate_ops,
111 				       flags);
112 	if (IS_ERR(hw)) {
113 		kfree(div);
114 		kfree(gate);
115 		return ERR_CAST(hw);
116 	}
117 
118 	return hw->clk;
119 err_div:
120 	kfree(gate);
121 err_gate:
122 	kfree(mux);
123 	return ERR_PTR(ret);
124 }
125 
126 struct rockchip_clk_frac {
127 	struct notifier_block			clk_nb;
128 	struct clk_fractional_divider		div;
129 	struct clk_gate				gate;
130 
131 	struct clk_mux				mux;
132 	const struct clk_ops			*mux_ops;
133 	int					mux_frac_idx;
134 
135 	bool					rate_change_remuxed;
136 	int					rate_change_idx;
137 };
138 
139 #define to_rockchip_clk_frac_nb(nb) \
140 			container_of(nb, struct rockchip_clk_frac, clk_nb)
141 
rockchip_clk_frac_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)142 static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
143 					 unsigned long event, void *data)
144 {
145 	struct clk_notifier_data *ndata = data;
146 	struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
147 	struct clk_mux *frac_mux = &frac->mux;
148 	int ret = 0;
149 
150 	pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
151 		 __func__, event, ndata->old_rate, ndata->new_rate);
152 	if (event == PRE_RATE_CHANGE) {
153 		frac->rate_change_idx =
154 				frac->mux_ops->get_parent(&frac_mux->hw);
155 		if (frac->rate_change_idx != frac->mux_frac_idx) {
156 			frac->mux_ops->set_parent(&frac_mux->hw,
157 						  frac->mux_frac_idx);
158 			frac->rate_change_remuxed = 1;
159 		}
160 	} else if (event == POST_RATE_CHANGE) {
161 		/*
162 		 * The POST_RATE_CHANGE notifier runs directly after the
163 		 * divider clock is set in clk_change_rate, so we'll have
164 		 * remuxed back to the original parent before clk_change_rate
165 		 * reaches the mux itself.
166 		 */
167 		if (frac->rate_change_remuxed) {
168 			frac->mux_ops->set_parent(&frac_mux->hw,
169 						  frac->rate_change_idx);
170 			frac->rate_change_remuxed = 0;
171 		}
172 	}
173 
174 	return notifier_from_errno(ret);
175 }
176 
177 /*
178  * fractional divider must set that denominator is 20 times larger than
179  * numerator to generate precise clock frequency.
180  */
rockchip_fractional_approximation(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate,unsigned long * m,unsigned long * n)181 static void rockchip_fractional_approximation(struct clk_hw *hw,
182 		unsigned long rate, unsigned long *parent_rate,
183 		unsigned long *m, unsigned long *n)
184 {
185 	struct clk_fractional_divider *fd = to_clk_fd(hw);
186 	unsigned long p_rate, p_parent_rate;
187 	struct clk_hw *p_parent;
188 
189 	p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
190 	if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
191 		p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
192 		p_parent_rate = clk_hw_get_rate(p_parent);
193 		*parent_rate = p_parent_rate;
194 	}
195 
196 	fd->flags |= CLK_FRAC_DIVIDER_POWER_OF_TWO_PS;
197 
198 	clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n);
199 }
200 
rockchip_clk_register_frac_branch(struct rockchip_clk_provider * ctx,const char * name,const char * const * parent_names,u8 num_parents,void __iomem * base,int muxdiv_offset,u8 div_flags,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,struct rockchip_clk_branch * child,spinlock_t * lock)201 static struct clk *rockchip_clk_register_frac_branch(
202 		struct rockchip_clk_provider *ctx, const char *name,
203 		const char *const *parent_names, u8 num_parents,
204 		void __iomem *base, int muxdiv_offset, u8 div_flags,
205 		int gate_offset, u8 gate_shift, u8 gate_flags,
206 		unsigned long flags, struct rockchip_clk_branch *child,
207 		spinlock_t *lock)
208 {
209 	struct clk_hw *hw;
210 	struct rockchip_clk_frac *frac;
211 	struct clk_gate *gate = NULL;
212 	struct clk_fractional_divider *div = NULL;
213 	const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
214 
215 	if (muxdiv_offset < 0)
216 		return ERR_PTR(-EINVAL);
217 
218 	if (child && child->branch_type != branch_mux) {
219 		pr_err("%s: fractional child clock for %s can only be a mux\n",
220 		       __func__, name);
221 		return ERR_PTR(-EINVAL);
222 	}
223 
224 	frac = kzalloc(sizeof(*frac), GFP_KERNEL);
225 	if (!frac)
226 		return ERR_PTR(-ENOMEM);
227 
228 	if (gate_offset >= 0) {
229 		gate = &frac->gate;
230 		gate->flags = gate_flags;
231 		gate->reg = base + gate_offset;
232 		gate->bit_idx = gate_shift;
233 		gate->lock = lock;
234 		gate_ops = &clk_gate_ops;
235 	}
236 
237 	div = &frac->div;
238 	div->flags = div_flags;
239 	div->reg = base + muxdiv_offset;
240 	div->mshift = 16;
241 	div->mwidth = 16;
242 	div->nshift = 0;
243 	div->nwidth = 16;
244 	div->lock = lock;
245 	div->approximation = rockchip_fractional_approximation;
246 	div_ops = &clk_fractional_divider_ops;
247 
248 	hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
249 				       NULL, NULL,
250 				       &div->hw, div_ops,
251 				       gate ? &gate->hw : NULL, gate_ops,
252 				       flags | CLK_SET_RATE_UNGATE);
253 	if (IS_ERR(hw)) {
254 		kfree(frac);
255 		return ERR_CAST(hw);
256 	}
257 
258 	if (child) {
259 		struct clk_mux *frac_mux = &frac->mux;
260 		struct clk_init_data init;
261 		struct clk *mux_clk;
262 		int ret;
263 
264 		frac->mux_frac_idx = match_string(child->parent_names,
265 						  child->num_parents, name);
266 		frac->mux_ops = &clk_mux_ops;
267 		frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
268 
269 		frac_mux->reg = base + child->muxdiv_offset;
270 		frac_mux->shift = child->mux_shift;
271 		frac_mux->mask = BIT(child->mux_width) - 1;
272 		frac_mux->flags = child->mux_flags;
273 		if (child->mux_table)
274 			frac_mux->table = child->mux_table;
275 		frac_mux->lock = lock;
276 		frac_mux->hw.init = &init;
277 
278 		init.name = child->name;
279 		init.flags = child->flags | CLK_SET_RATE_PARENT;
280 		init.ops = frac->mux_ops;
281 		init.parent_names = child->parent_names;
282 		init.num_parents = child->num_parents;
283 
284 		mux_clk = clk_register(NULL, &frac_mux->hw);
285 		if (IS_ERR(mux_clk)) {
286 			kfree(frac);
287 			return mux_clk;
288 		}
289 
290 		rockchip_clk_set_lookup(ctx, mux_clk, child->id);
291 
292 		/* notifier on the fraction divider to catch rate changes */
293 		if (frac->mux_frac_idx >= 0) {
294 			pr_debug("%s: found fractional parent in mux at pos %d\n",
295 				 __func__, frac->mux_frac_idx);
296 			ret = clk_notifier_register(hw->clk, &frac->clk_nb);
297 			if (ret)
298 				pr_err("%s: failed to register clock notifier for %s\n",
299 						__func__, name);
300 		} else {
301 			pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
302 				__func__, name, child->name);
303 		}
304 	}
305 
306 	return hw->clk;
307 }
308 
rockchip_clk_register_factor_branch(const char * name,const char * const * parent_names,u8 num_parents,void __iomem * base,unsigned int mult,unsigned int div,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,spinlock_t * lock)309 static struct clk *rockchip_clk_register_factor_branch(const char *name,
310 		const char *const *parent_names, u8 num_parents,
311 		void __iomem *base, unsigned int mult, unsigned int div,
312 		int gate_offset, u8 gate_shift, u8 gate_flags,
313 		unsigned long flags, spinlock_t *lock)
314 {
315 	struct clk_hw *hw;
316 	struct clk_gate *gate = NULL;
317 	struct clk_fixed_factor *fix = NULL;
318 
319 	/* without gate, register a simple factor clock */
320 	if (gate_offset == 0) {
321 		return clk_register_fixed_factor(NULL, name,
322 				parent_names[0], flags, mult,
323 				div);
324 	}
325 
326 	gate = kzalloc(sizeof(*gate), GFP_KERNEL);
327 	if (!gate)
328 		return ERR_PTR(-ENOMEM);
329 
330 	gate->flags = gate_flags;
331 	gate->reg = base + gate_offset;
332 	gate->bit_idx = gate_shift;
333 	gate->lock = lock;
334 
335 	fix = kzalloc(sizeof(*fix), GFP_KERNEL);
336 	if (!fix) {
337 		kfree(gate);
338 		return ERR_PTR(-ENOMEM);
339 	}
340 
341 	fix->mult = mult;
342 	fix->div = div;
343 
344 	hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
345 				       NULL, NULL,
346 				       &fix->hw, &clk_fixed_factor_ops,
347 				       &gate->hw, &clk_gate_ops, flags);
348 	if (IS_ERR(hw)) {
349 		kfree(fix);
350 		kfree(gate);
351 		return ERR_CAST(hw);
352 	}
353 
354 	return hw->clk;
355 }
356 
rockchip_clk_init_base(struct device_node * np,void __iomem * base,unsigned long nr_clks,bool has_late_clocks)357 static struct rockchip_clk_provider *rockchip_clk_init_base(
358 		struct device_node *np, void __iomem *base,
359 		unsigned long nr_clks, bool has_late_clocks)
360 {
361 	struct rockchip_clk_provider *ctx;
362 	struct clk **clk_table;
363 	struct clk *default_clk_val;
364 	int i;
365 
366 	default_clk_val = ERR_PTR(has_late_clocks ? -EPROBE_DEFER : -ENOENT);
367 
368 	ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
369 	if (!ctx)
370 		return ERR_PTR(-ENOMEM);
371 
372 	clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
373 	if (!clk_table)
374 		goto err_free;
375 
376 	for (i = 0; i < nr_clks; ++i)
377 		clk_table[i] = default_clk_val;
378 
379 	ctx->reg_base = base;
380 	ctx->clk_data.clks = clk_table;
381 	ctx->clk_data.clk_num = nr_clks;
382 	ctx->cru_node = np;
383 	spin_lock_init(&ctx->lock);
384 
385 	hash_init(ctx->aux_grf_table);
386 
387 	ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
388 						   "rockchip,grf");
389 
390 	return ctx;
391 
392 err_free:
393 	kfree(ctx);
394 	return ERR_PTR(-ENOMEM);
395 }
396 
rockchip_clk_init(struct device_node * np,void __iomem * base,unsigned long nr_clks)397 struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
398 						void __iomem *base,
399 						unsigned long nr_clks)
400 {
401 	return rockchip_clk_init_base(np, base, nr_clks, false);
402 }
403 EXPORT_SYMBOL_GPL(rockchip_clk_init);
404 
rockchip_clk_init_early(struct device_node * np,void __iomem * base,unsigned long nr_clks)405 struct rockchip_clk_provider *rockchip_clk_init_early(struct device_node *np,
406 						      void __iomem *base,
407 						      unsigned long nr_clks)
408 {
409 	return rockchip_clk_init_base(np, base, nr_clks, true);
410 }
411 EXPORT_SYMBOL_GPL(rockchip_clk_init_early);
412 
rockchip_clk_finalize(struct rockchip_clk_provider * ctx)413 void rockchip_clk_finalize(struct rockchip_clk_provider *ctx)
414 {
415 	int i;
416 
417 	for (i = 0; i < ctx->clk_data.clk_num; ++i)
418 		if (ctx->clk_data.clks[i] == ERR_PTR(-EPROBE_DEFER))
419 			ctx->clk_data.clks[i] = ERR_PTR(-ENOENT);
420 }
421 EXPORT_SYMBOL_GPL(rockchip_clk_finalize);
422 
rockchip_clk_of_add_provider(struct device_node * np,struct rockchip_clk_provider * ctx)423 void rockchip_clk_of_add_provider(struct device_node *np,
424 				  struct rockchip_clk_provider *ctx)
425 {
426 	if (of_clk_add_provider(np, of_clk_src_onecell_get,
427 				&ctx->clk_data))
428 		pr_err("%s: could not register clk provider\n", __func__);
429 }
430 EXPORT_SYMBOL_GPL(rockchip_clk_of_add_provider);
431 
rockchip_clk_register_plls(struct rockchip_clk_provider * ctx,struct rockchip_pll_clock * list,unsigned int nr_pll,int grf_lock_offset)432 void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
433 				struct rockchip_pll_clock *list,
434 				unsigned int nr_pll, int grf_lock_offset)
435 {
436 	struct clk *clk;
437 	int idx;
438 
439 	for (idx = 0; idx < nr_pll; idx++, list++) {
440 		clk = rockchip_clk_register_pll(ctx, list->type, list->name,
441 				list->parent_names, list->num_parents,
442 				list->con_offset, grf_lock_offset,
443 				list->lock_shift, list->mode_offset,
444 				list->mode_shift, list->rate_table,
445 				list->flags, list->pll_flags);
446 		if (IS_ERR(clk)) {
447 			pr_err("%s: failed to register clock %s\n", __func__,
448 				list->name);
449 			continue;
450 		}
451 
452 		rockchip_clk_set_lookup(ctx, clk, list->id);
453 	}
454 }
455 EXPORT_SYMBOL_GPL(rockchip_clk_register_plls);
456 
rockchip_clk_find_max_clk_id(struct rockchip_clk_branch * list,unsigned int nr_clk)457 unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
458 					   unsigned int nr_clk)
459 {
460 	unsigned long max = 0;
461 	unsigned int idx;
462 
463 	for (idx = 0; idx < nr_clk; idx++, list++) {
464 		if (list->id > max)
465 			max = list->id;
466 		if (list->child && list->child->id > max)
467 			max = list->child->id;
468 	}
469 
470 	return max;
471 }
472 EXPORT_SYMBOL_GPL(rockchip_clk_find_max_clk_id);
473 
rockchip_clk_register_gate_link(struct device * parent_dev,struct rockchip_clk_provider * ctx,struct rockchip_clk_branch * clkbr)474 static struct platform_device *rockchip_clk_register_gate_link(
475 		struct device *parent_dev,
476 		struct rockchip_clk_provider *ctx,
477 		struct rockchip_clk_branch *clkbr)
478 {
479 	struct rockchip_gate_link_platdata gate_link_pdata = {
480 		.ctx = ctx,
481 		.clkbr = clkbr,
482 	};
483 
484 	struct platform_device_info pdevinfo = {
485 		.parent = parent_dev,
486 		.name = "rockchip-gate-link-clk",
487 		.id = clkbr->id,
488 		.fwnode = dev_fwnode(parent_dev),
489 		.of_node_reused = true,
490 		.data = &gate_link_pdata,
491 		.size_data = sizeof(gate_link_pdata),
492 	};
493 
494 	return platform_device_register_full(&pdevinfo);
495 }
496 
rockchip_clk_register_branches(struct rockchip_clk_provider * ctx,struct rockchip_clk_branch * list,unsigned int nr_clk)497 void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
498 				    struct rockchip_clk_branch *list,
499 				    unsigned int nr_clk)
500 {
501 	struct regmap *grf = ctx->grf;
502 	struct rockchip_aux_grf *agrf;
503 	struct clk *clk;
504 	unsigned int idx;
505 	unsigned long flags;
506 
507 	for (idx = 0; idx < nr_clk; idx++, list++) {
508 		flags = list->flags;
509 		clk = NULL;
510 
511 		/* for GRF-dependent branches, choose the right grf first */
512 		if ((list->branch_type == branch_grf_mux ||
513 		     list->branch_type == branch_grf_gate ||
514 		     list->branch_type == branch_grf_mmc) &&
515 		    list->grf_type != grf_type_sys) {
516 			hash_for_each_possible(ctx->aux_grf_table, agrf, node, list->grf_type) {
517 				if (agrf->type == list->grf_type) {
518 					grf = agrf->grf;
519 					break;
520 				}
521 			}
522 		}
523 
524 		/* catch simple muxes */
525 		switch (list->branch_type) {
526 		case branch_mux:
527 			if (list->mux_table)
528 				clk = clk_register_mux_table(NULL, list->name,
529 					list->parent_names, list->num_parents,
530 					flags,
531 					ctx->reg_base + list->muxdiv_offset,
532 					list->mux_shift, list->mux_width,
533 					list->mux_flags, list->mux_table,
534 					&ctx->lock);
535 			else
536 				clk = clk_register_mux(NULL, list->name,
537 					list->parent_names, list->num_parents,
538 					flags,
539 					ctx->reg_base + list->muxdiv_offset,
540 					list->mux_shift, list->mux_width,
541 					list->mux_flags, &ctx->lock);
542 			break;
543 		case branch_grf_mux:
544 			clk = rockchip_clk_register_muxgrf(list->name,
545 				list->parent_names, list->num_parents,
546 				flags, grf, list->muxdiv_offset,
547 				list->mux_shift, list->mux_width,
548 				list->mux_flags);
549 			break;
550 		case branch_divider:
551 			if (list->div_table)
552 				clk = clk_register_divider_table(NULL,
553 					list->name, list->parent_names[0],
554 					flags,
555 					ctx->reg_base + list->muxdiv_offset,
556 					list->div_shift, list->div_width,
557 					list->div_flags, list->div_table,
558 					&ctx->lock);
559 			else
560 				clk = clk_register_divider(NULL, list->name,
561 					list->parent_names[0], flags,
562 					ctx->reg_base + list->muxdiv_offset,
563 					list->div_shift, list->div_width,
564 					list->div_flags, &ctx->lock);
565 			break;
566 		case branch_fraction_divider:
567 			clk = rockchip_clk_register_frac_branch(ctx, list->name,
568 				list->parent_names, list->num_parents,
569 				ctx->reg_base, list->muxdiv_offset,
570 				list->div_flags,
571 				list->gate_offset, list->gate_shift,
572 				list->gate_flags, flags, list->child,
573 				&ctx->lock);
574 			break;
575 		case branch_half_divider:
576 			clk = rockchip_clk_register_halfdiv(list->name,
577 				list->parent_names, list->num_parents,
578 				ctx->reg_base, list->muxdiv_offset,
579 				list->mux_shift, list->mux_width,
580 				list->mux_flags, list->div_shift,
581 				list->div_width, list->div_flags,
582 				list->gate_offset, list->gate_shift,
583 				list->gate_flags, flags, &ctx->lock);
584 			break;
585 		case branch_gate:
586 			flags |= CLK_SET_RATE_PARENT;
587 
588 			clk = clk_register_gate(NULL, list->name,
589 				list->parent_names[0], flags,
590 				ctx->reg_base + list->gate_offset,
591 				list->gate_shift, list->gate_flags, &ctx->lock);
592 			break;
593 		case branch_grf_gate:
594 			flags |= CLK_SET_RATE_PARENT;
595 			clk = rockchip_clk_register_gate_grf(list->name,
596 				list->parent_names[0], flags, grf,
597 				list->gate_offset, list->gate_shift,
598 				list->gate_flags);
599 			break;
600 		case branch_composite:
601 			clk = rockchip_clk_register_branch(list->name,
602 				list->parent_names, list->num_parents,
603 				ctx->reg_base, list->muxdiv_offset,
604 				list->mux_shift,
605 				list->mux_width, list->mux_flags,
606 				list->mux_table, list->div_offset,
607 				list->div_shift, list->div_width,
608 				list->div_flags, list->div_table,
609 				list->gate_offset, list->gate_shift,
610 				list->gate_flags, flags, &ctx->lock);
611 			break;
612 		case branch_mmc:
613 			clk = rockchip_clk_register_mmc(
614 				list->name,
615 				list->parent_names, list->num_parents,
616 				ctx->reg_base + list->muxdiv_offset,
617 				NULL, 0,
618 				list->div_shift
619 			);
620 			break;
621 		case branch_grf_mmc:
622 			clk = rockchip_clk_register_mmc(
623 				list->name,
624 				list->parent_names, list->num_parents,
625 				NULL,
626 				grf, list->muxdiv_offset,
627 				list->div_shift
628 			);
629 			break;
630 		case branch_inverter:
631 			clk = rockchip_clk_register_inverter(
632 				list->name, list->parent_names,
633 				list->num_parents,
634 				ctx->reg_base + list->muxdiv_offset,
635 				list->div_shift, list->div_flags, &ctx->lock);
636 			break;
637 		case branch_factor:
638 			clk = rockchip_clk_register_factor_branch(
639 				list->name, list->parent_names,
640 				list->num_parents, ctx->reg_base,
641 				list->div_shift, list->div_width,
642 				list->gate_offset, list->gate_shift,
643 				list->gate_flags, flags, &ctx->lock);
644 			break;
645 		case branch_ddrclk:
646 			clk = rockchip_clk_register_ddrclk(
647 				list->name, list->flags,
648 				list->parent_names, list->num_parents,
649 				list->muxdiv_offset, list->mux_shift,
650 				list->mux_width, list->div_shift,
651 				list->div_width, list->div_flags,
652 				ctx->reg_base, &ctx->lock);
653 			break;
654 		case branch_linked_gate:
655 			/* must be registered late, fall-through for error message */
656 			break;
657 		}
658 
659 		/* none of the cases above matched */
660 		if (!clk) {
661 			pr_err("%s: unknown clock type %d\n",
662 			       __func__, list->branch_type);
663 			continue;
664 		}
665 
666 		if (IS_ERR(clk)) {
667 			pr_err("%s: failed to register clock %s: %ld\n",
668 			       __func__, list->name, PTR_ERR(clk));
669 			continue;
670 		}
671 
672 		rockchip_clk_set_lookup(ctx, clk, list->id);
673 	}
674 }
675 EXPORT_SYMBOL_GPL(rockchip_clk_register_branches);
676 
rockchip_clk_register_late_branches(struct device * dev,struct rockchip_clk_provider * ctx,struct rockchip_clk_branch * list,unsigned int nr_clk)677 void rockchip_clk_register_late_branches(struct device *dev,
678 					 struct rockchip_clk_provider *ctx,
679 					 struct rockchip_clk_branch *list,
680 					 unsigned int nr_clk)
681 {
682 	unsigned int idx;
683 
684 	for (idx = 0; idx < nr_clk; idx++, list++) {
685 		struct platform_device *pdev = NULL;
686 
687 		switch (list->branch_type) {
688 		case branch_linked_gate:
689 			pdev = rockchip_clk_register_gate_link(dev, ctx, list);
690 			break;
691 		default:
692 			dev_err(dev, "unknown clock type %d\n", list->branch_type);
693 			break;
694 		}
695 
696 		if (!pdev)
697 			dev_err(dev, "failed to register device for clock %s\n", list->name);
698 	}
699 }
700 EXPORT_SYMBOL_GPL(rockchip_clk_register_late_branches);
701 
rockchip_clk_register_armclk(struct rockchip_clk_provider * ctx,unsigned int lookup_id,const char * name,const char * const * parent_names,u8 num_parents,const struct rockchip_cpuclk_reg_data * reg_data,const struct rockchip_cpuclk_rate_table * rates,int nrates)702 void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
703 				  unsigned int lookup_id,
704 				  const char *name, const char *const *parent_names,
705 				  u8 num_parents,
706 				  const struct rockchip_cpuclk_reg_data *reg_data,
707 				  const struct rockchip_cpuclk_rate_table *rates,
708 				  int nrates)
709 {
710 	struct clk *clk;
711 
712 	clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
713 					   reg_data, rates, nrates,
714 					   ctx->reg_base, &ctx->lock);
715 	if (IS_ERR(clk)) {
716 		pr_err("%s: failed to register clock %s: %ld\n",
717 		       __func__, name, PTR_ERR(clk));
718 		return;
719 	}
720 
721 	rockchip_clk_set_lookup(ctx, clk, lookup_id);
722 }
723 EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
724 
rockchip_clk_protect_critical(const char * const clocks[],int nclocks)725 void rockchip_clk_protect_critical(const char *const clocks[],
726 				   int nclocks)
727 {
728 	int i;
729 
730 	/* Protect the clocks that needs to stay on */
731 	for (i = 0; i < nclocks; i++) {
732 		struct clk *clk = __clk_lookup(clocks[i]);
733 
734 		clk_prepare_enable(clk);
735 	}
736 }
737 EXPORT_SYMBOL_GPL(rockchip_clk_protect_critical);
738 
739 static void __iomem *rst_base;
740 static unsigned int reg_restart;
741 static void (*cb_restart)(void);
rockchip_restart_notify(struct notifier_block * this,unsigned long mode,void * cmd)742 static int rockchip_restart_notify(struct notifier_block *this,
743 				   unsigned long mode, void *cmd)
744 {
745 	if (cb_restart)
746 		cb_restart();
747 
748 	writel(0xfdb9, rst_base + reg_restart);
749 	return NOTIFY_DONE;
750 }
751 
752 static struct notifier_block rockchip_restart_handler = {
753 	.notifier_call = rockchip_restart_notify,
754 	.priority = 128,
755 };
756 
757 void
rockchip_register_restart_notifier(struct rockchip_clk_provider * ctx,unsigned int reg,void (* cb)(void))758 rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
759 				   unsigned int reg,
760 				   void (*cb)(void))
761 {
762 	int ret;
763 
764 	rst_base = ctx->reg_base;
765 	reg_restart = reg;
766 	cb_restart = cb;
767 	ret = register_restart_handler(&rockchip_restart_handler);
768 	if (ret)
769 		pr_err("%s: cannot register restart handler, %d\n",
770 		       __func__, ret);
771 }
772 EXPORT_SYMBOL_GPL(rockchip_register_restart_notifier);
773