xref: /linux/drivers/clk/sunxi/clk-sun9i-cpus.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Chen-Yu Tsai
4  *
5  * Chen-Yu Tsai <wens@csie.org>
6  *
7  * Allwinner A80 CPUS clock driver
8  *
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/clk-provider.h>
13 #include <linux/io.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 
19 static DEFINE_SPINLOCK(sun9i_a80_cpus_lock);
20 
21 
22 #define SUN9I_CPUS_MAX_PARENTS		4
23 #define SUN9I_CPUS_MUX_PARENT_PLL4	3
24 #define SUN9I_CPUS_MUX_SHIFT		16
25 #define SUN9I_CPUS_MUX_MASK		GENMASK(17, 16)
26 #define SUN9I_CPUS_MUX_GET_PARENT(reg)	((reg & SUN9I_CPUS_MUX_MASK) >> \
27 						SUN9I_CPUS_MUX_SHIFT)
28 
29 #define SUN9I_CPUS_DIV_SHIFT		4
30 #define SUN9I_CPUS_DIV_MASK		GENMASK(5, 4)
31 #define SUN9I_CPUS_DIV_GET(reg)		((reg & SUN9I_CPUS_DIV_MASK) >> \
32 						SUN9I_CPUS_DIV_SHIFT)
33 #define SUN9I_CPUS_DIV_SET(reg, div)	((reg & ~SUN9I_CPUS_DIV_MASK) | \
34 						(div << SUN9I_CPUS_DIV_SHIFT))
35 #define SUN9I_CPUS_PLL4_DIV_SHIFT	8
36 #define SUN9I_CPUS_PLL4_DIV_MASK	GENMASK(12, 8)
37 #define SUN9I_CPUS_PLL4_DIV_GET(reg)	((reg & SUN9I_CPUS_PLL4_DIV_MASK) >> \
38 						SUN9I_CPUS_PLL4_DIV_SHIFT)
39 #define SUN9I_CPUS_PLL4_DIV_SET(reg, div) ((reg & ~SUN9I_CPUS_PLL4_DIV_MASK) | \
40 						(div << SUN9I_CPUS_PLL4_DIV_SHIFT))
41 
42 struct sun9i_a80_cpus_clk {
43 	struct clk_hw hw;
44 	void __iomem *reg;
45 };
46 
47 #define to_sun9i_a80_cpus_clk(_hw) container_of(_hw, struct sun9i_a80_cpus_clk, hw)
48 
sun9i_a80_cpus_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)49 static unsigned long sun9i_a80_cpus_clk_recalc_rate(struct clk_hw *hw,
50 						    unsigned long parent_rate)
51 {
52 	struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw);
53 	unsigned long rate;
54 	u32 reg;
55 
56 	/* Fetch the register value */
57 	reg = readl(cpus->reg);
58 
59 	/* apply pre-divider first if parent is pll4 */
60 	if (SUN9I_CPUS_MUX_GET_PARENT(reg) == SUN9I_CPUS_MUX_PARENT_PLL4)
61 		parent_rate /= SUN9I_CPUS_PLL4_DIV_GET(reg) + 1;
62 
63 	/* clk divider */
64 	rate = parent_rate / (SUN9I_CPUS_DIV_GET(reg) + 1);
65 
66 	return rate;
67 }
68 
sun9i_a80_cpus_clk_round(unsigned long rate,u8 * divp,u8 * pre_divp,u8 parent,unsigned long parent_rate)69 static long sun9i_a80_cpus_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
70 				     u8 parent, unsigned long parent_rate)
71 {
72 	u8 div, pre_div = 1;
73 
74 	/*
75 	 * clock can only divide, so we will never be able to achieve
76 	 * frequencies higher than the parent frequency
77 	 */
78 	if (parent_rate && rate > parent_rate)
79 		rate = parent_rate;
80 
81 	div = DIV_ROUND_UP(parent_rate, rate);
82 
83 	/* calculate pre-divider if parent is pll4 */
84 	if (parent == SUN9I_CPUS_MUX_PARENT_PLL4 && div > 4) {
85 		/* pre-divider is 1 ~ 32 */
86 		if (div < 32) {
87 			pre_div = div;
88 			div = 1;
89 		} else if (div < 64) {
90 			pre_div = DIV_ROUND_UP(div, 2);
91 			div = 2;
92 		} else if (div < 96) {
93 			pre_div = DIV_ROUND_UP(div, 3);
94 			div = 3;
95 		} else {
96 			pre_div = DIV_ROUND_UP(div, 4);
97 			div = 4;
98 		}
99 	}
100 
101 	/* we were asked to pass back divider values */
102 	if (divp) {
103 		*divp = div - 1;
104 		*pre_divp = pre_div - 1;
105 	}
106 
107 	return parent_rate / pre_div / div;
108 }
109 
sun9i_a80_cpus_clk_determine_rate(struct clk_hw * clk,struct clk_rate_request * req)110 static int sun9i_a80_cpus_clk_determine_rate(struct clk_hw *clk,
111 					     struct clk_rate_request *req)
112 {
113 	struct clk_hw *parent, *best_parent = NULL;
114 	int i, num_parents;
115 	unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
116 	unsigned long rate = req->rate;
117 
118 	/* find the parent that can help provide the fastest rate <= rate */
119 	num_parents = clk_hw_get_num_parents(clk);
120 	for (i = 0; i < num_parents; i++) {
121 		parent = clk_hw_get_parent_by_index(clk, i);
122 		if (!parent)
123 			continue;
124 		if (clk_hw_get_flags(clk) & CLK_SET_RATE_PARENT)
125 			parent_rate = clk_hw_round_rate(parent, rate);
126 		else
127 			parent_rate = clk_hw_get_rate(parent);
128 
129 		child_rate = sun9i_a80_cpus_clk_round(rate, NULL, NULL, i,
130 						      parent_rate);
131 
132 		if (child_rate <= rate && child_rate > best_child_rate) {
133 			best_parent = parent;
134 			best = parent_rate;
135 			best_child_rate = child_rate;
136 		}
137 	}
138 
139 	if (!best_parent)
140 		return -EINVAL;
141 
142 	req->best_parent_hw = best_parent;
143 	req->best_parent_rate = best;
144 	req->rate = best_child_rate;
145 
146 	return 0;
147 }
148 
sun9i_a80_cpus_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)149 static int sun9i_a80_cpus_clk_set_rate(struct clk_hw *hw, unsigned long rate,
150 				       unsigned long parent_rate)
151 {
152 	struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw);
153 	unsigned long flags;
154 	u8 div, pre_div, parent;
155 	u32 reg;
156 
157 	spin_lock_irqsave(&sun9i_a80_cpus_lock, flags);
158 
159 	reg = readl(cpus->reg);
160 
161 	/* need to know which parent is used to apply pre-divider */
162 	parent = SUN9I_CPUS_MUX_GET_PARENT(reg);
163 	sun9i_a80_cpus_clk_round(rate, &div, &pre_div, parent, parent_rate);
164 
165 	reg = SUN9I_CPUS_DIV_SET(reg, div);
166 	reg = SUN9I_CPUS_PLL4_DIV_SET(reg, pre_div);
167 	writel(reg, cpus->reg);
168 
169 	spin_unlock_irqrestore(&sun9i_a80_cpus_lock, flags);
170 
171 	return 0;
172 }
173 
174 static const struct clk_ops sun9i_a80_cpus_clk_ops = {
175 	.determine_rate	= sun9i_a80_cpus_clk_determine_rate,
176 	.recalc_rate	= sun9i_a80_cpus_clk_recalc_rate,
177 	.set_rate	= sun9i_a80_cpus_clk_set_rate,
178 };
179 
180 /**
181  * sun9i_a80_cpus_setup() - Setup function for a80 cpus composite clk
182  * @node: &struct device_node for the clock
183  */
sun9i_a80_cpus_setup(struct device_node * node)184 static void sun9i_a80_cpus_setup(struct device_node *node)
185 {
186 	const char *clk_name = node->name;
187 	const char *parents[SUN9I_CPUS_MAX_PARENTS];
188 	struct resource res;
189 	struct sun9i_a80_cpus_clk *cpus;
190 	struct clk_mux *mux;
191 	struct clk *clk;
192 	int ret;
193 
194 	cpus = kzalloc(sizeof(*cpus), GFP_KERNEL);
195 	if (!cpus)
196 		return;
197 
198 	cpus->reg = of_io_request_and_map(node, 0, of_node_full_name(node));
199 	if (IS_ERR(cpus->reg))
200 		goto err_free_cpus;
201 
202 	of_property_read_string(node, "clock-output-names", &clk_name);
203 
204 	/* we have a mux, we will have >1 parents */
205 	ret = of_clk_parent_fill(node, parents, SUN9I_CPUS_MAX_PARENTS);
206 
207 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
208 	if (!mux)
209 		goto err_unmap;
210 
211 	/* set up clock properties */
212 	mux->reg = cpus->reg;
213 	mux->shift = SUN9I_CPUS_MUX_SHIFT;
214 	/* un-shifted mask is what mux_clk expects */
215 	mux->mask = SUN9I_CPUS_MUX_MASK >> SUN9I_CPUS_MUX_SHIFT;
216 	mux->lock = &sun9i_a80_cpus_lock;
217 
218 	clk = clk_register_composite(NULL, clk_name, parents, ret,
219 				     &mux->hw, &clk_mux_ops,
220 				     &cpus->hw, &sun9i_a80_cpus_clk_ops,
221 				     NULL, NULL, 0);
222 	if (IS_ERR(clk))
223 		goto err_free_mux;
224 
225 	ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
226 	if (ret)
227 		goto err_unregister;
228 
229 	return;
230 
231 err_unregister:
232 	clk_unregister(clk);
233 err_free_mux:
234 	kfree(mux);
235 err_unmap:
236 	iounmap(cpus->reg);
237 	of_address_to_resource(node, 0, &res);
238 	release_mem_region(res.start, resource_size(&res));
239 err_free_cpus:
240 	kfree(cpus);
241 }
242 CLK_OF_DECLARE(sun9i_a80_cpus, "allwinner,sun9i-a80-cpus-clk",
243 	       sun9i_a80_cpus_setup);
244