xref: /linux/drivers/clk/x86/clk-pmc-atom.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel Atom platform clocks driver for BayTrail and CherryTrail SoCs
4  *
5  * Copyright (C) 2016, Intel Corporation
6  * Author: Irina Tirdea <irina.tirdea@intel.com>
7  */
8 
9 #include <linux/clk-provider.h>
10 #include <linux/clkdev.h>
11 #include <linux/err.h>
12 #include <linux/io.h>
13 #include <linux/platform_data/x86/clk-pmc-atom.h>
14 #include <linux/platform_data/x86/pmc_atom.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 
18 #define PLT_CLK_NAME_BASE	"pmc_plt_clk"
19 
20 struct clk_plt_fixed {
21 	struct clk_hw *clk;
22 	struct clk_lookup *lookup;
23 };
24 
25 struct clk_plt {
26 	struct clk_hw hw;
27 	void __iomem *reg;
28 	struct clk_lookup *lookup;
29 	/* protect access to PMC registers */
30 	spinlock_t lock;
31 };
32 
33 #define to_clk_plt(_hw) container_of(_hw, struct clk_plt, hw)
34 
35 struct clk_plt_data {
36 	struct clk_plt_fixed **parents;
37 	u8 nparents;
38 	struct clk_plt *clks[PMC_CLK_NUM];
39 	struct clk_lookup *mclk_lookup;
40 	struct clk_lookup *ether_clk_lookup;
41 };
42 
43 /* Return an index in parent table */
plt_reg_to_parent(int reg)44 static inline int plt_reg_to_parent(int reg)
45 {
46 	switch (reg & PMC_MASK_CLK_FREQ) {
47 	default:
48 	case PMC_CLK_FREQ_XTAL:
49 		return 0;
50 	case PMC_CLK_FREQ_PLL:
51 		return 1;
52 	}
53 }
54 
55 /* Return clk index of parent */
plt_parent_to_reg(int index)56 static inline int plt_parent_to_reg(int index)
57 {
58 	switch (index) {
59 	default:
60 	case 0:
61 		return PMC_CLK_FREQ_XTAL;
62 	case 1:
63 		return PMC_CLK_FREQ_PLL;
64 	}
65 }
66 
67 /* Abstract status in simpler enabled/disabled value */
plt_reg_to_enabled(int reg)68 static inline int plt_reg_to_enabled(int reg)
69 {
70 	switch (reg & PMC_MASK_CLK_CTL) {
71 	case PMC_CLK_CTL_GATED_ON_D3:
72 	case PMC_CLK_CTL_FORCE_ON:
73 		return 1;	/* enabled */
74 	case PMC_CLK_CTL_FORCE_OFF:
75 	case PMC_CLK_CTL_RESERVED:
76 	default:
77 		return 0;	/* disabled */
78 	}
79 }
80 
plt_clk_reg_update(struct clk_plt * clk,u32 mask,u32 val)81 static void plt_clk_reg_update(struct clk_plt *clk, u32 mask, u32 val)
82 {
83 	u32 tmp;
84 	unsigned long flags;
85 
86 	spin_lock_irqsave(&clk->lock, flags);
87 
88 	tmp = readl(clk->reg);
89 	tmp = (tmp & ~mask) | (val & mask);
90 	writel(tmp, clk->reg);
91 
92 	spin_unlock_irqrestore(&clk->lock, flags);
93 }
94 
plt_clk_set_parent(struct clk_hw * hw,u8 index)95 static int plt_clk_set_parent(struct clk_hw *hw, u8 index)
96 {
97 	struct clk_plt *clk = to_clk_plt(hw);
98 
99 	plt_clk_reg_update(clk, PMC_MASK_CLK_FREQ, plt_parent_to_reg(index));
100 
101 	return 0;
102 }
103 
plt_clk_get_parent(struct clk_hw * hw)104 static u8 plt_clk_get_parent(struct clk_hw *hw)
105 {
106 	struct clk_plt *clk = to_clk_plt(hw);
107 	u32 value;
108 
109 	value = readl(clk->reg);
110 
111 	return plt_reg_to_parent(value);
112 }
113 
plt_clk_enable(struct clk_hw * hw)114 static int plt_clk_enable(struct clk_hw *hw)
115 {
116 	struct clk_plt *clk = to_clk_plt(hw);
117 
118 	plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_ON);
119 
120 	return 0;
121 }
122 
plt_clk_disable(struct clk_hw * hw)123 static void plt_clk_disable(struct clk_hw *hw)
124 {
125 	struct clk_plt *clk = to_clk_plt(hw);
126 
127 	plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_OFF);
128 }
129 
plt_clk_is_enabled(struct clk_hw * hw)130 static int plt_clk_is_enabled(struct clk_hw *hw)
131 {
132 	struct clk_plt *clk = to_clk_plt(hw);
133 	u32 value;
134 
135 	value = readl(clk->reg);
136 
137 	return plt_reg_to_enabled(value);
138 }
139 
140 static const struct clk_ops plt_clk_ops = {
141 	.enable = plt_clk_enable,
142 	.disable = plt_clk_disable,
143 	.is_enabled = plt_clk_is_enabled,
144 	.get_parent = plt_clk_get_parent,
145 	.set_parent = plt_clk_set_parent,
146 	.determine_rate = __clk_mux_determine_rate,
147 };
148 
plt_clk_register(struct platform_device * pdev,int id,const struct pmc_clk_data * pmc_data,const char ** parent_names,int num_parents)149 static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
150 					const struct pmc_clk_data *pmc_data,
151 					const char **parent_names,
152 					int num_parents)
153 {
154 	struct clk_plt *pclk;
155 	struct clk_init_data init;
156 	int ret;
157 
158 	pclk = devm_kzalloc(&pdev->dev, sizeof(*pclk), GFP_KERNEL);
159 	if (!pclk)
160 		return ERR_PTR(-ENOMEM);
161 
162 	init.name =  kasprintf(GFP_KERNEL, "%s_%d", PLT_CLK_NAME_BASE, id);
163 	init.ops = &plt_clk_ops;
164 	init.flags = 0;
165 	init.parent_names = parent_names;
166 	init.num_parents = num_parents;
167 
168 	pclk->hw.init = &init;
169 	pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
170 	spin_lock_init(&pclk->lock);
171 
172 	/*
173 	 * On some systems, the pmc_plt_clocks already enabled by the
174 	 * firmware are being marked as critical to avoid them being
175 	 * gated by the clock framework.
176 	 */
177 	if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
178 		init.flags |= CLK_IS_CRITICAL;
179 
180 	ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
181 	if (ret) {
182 		pclk = ERR_PTR(ret);
183 		goto err_free_init;
184 	}
185 
186 	pclk->lookup = clkdev_hw_create(&pclk->hw, init.name, NULL);
187 	if (!pclk->lookup) {
188 		pclk = ERR_PTR(-ENOMEM);
189 		goto err_free_init;
190 	}
191 
192 err_free_init:
193 	kfree(init.name);
194 	return pclk;
195 }
196 
plt_clk_unregister(struct clk_plt * pclk)197 static void plt_clk_unregister(struct clk_plt *pclk)
198 {
199 	clkdev_drop(pclk->lookup);
200 }
201 
plt_clk_register_fixed_rate(struct platform_device * pdev,const char * name,const char * parent_name,unsigned long fixed_rate)202 static struct clk_plt_fixed *plt_clk_register_fixed_rate(struct platform_device *pdev,
203 						 const char *name,
204 						 const char *parent_name,
205 						 unsigned long fixed_rate)
206 {
207 	struct clk_plt_fixed *pclk;
208 
209 	pclk = devm_kzalloc(&pdev->dev, sizeof(*pclk), GFP_KERNEL);
210 	if (!pclk)
211 		return ERR_PTR(-ENOMEM);
212 
213 	pclk->clk = clk_hw_register_fixed_rate(&pdev->dev, name, parent_name,
214 					       0, fixed_rate);
215 	if (IS_ERR(pclk->clk))
216 		return ERR_CAST(pclk->clk);
217 
218 	pclk->lookup = clkdev_hw_create(pclk->clk, name, NULL);
219 	if (!pclk->lookup) {
220 		clk_hw_unregister_fixed_rate(pclk->clk);
221 		return ERR_PTR(-ENOMEM);
222 	}
223 
224 	return pclk;
225 }
226 
plt_clk_unregister_fixed_rate(struct clk_plt_fixed * pclk)227 static void plt_clk_unregister_fixed_rate(struct clk_plt_fixed *pclk)
228 {
229 	clkdev_drop(pclk->lookup);
230 	clk_hw_unregister_fixed_rate(pclk->clk);
231 }
232 
plt_clk_unregister_fixed_rate_loop(struct clk_plt_data * data,unsigned int i)233 static void plt_clk_unregister_fixed_rate_loop(struct clk_plt_data *data,
234 					       unsigned int i)
235 {
236 	while (i--)
237 		plt_clk_unregister_fixed_rate(data->parents[i]);
238 }
239 
plt_clk_free_parent_names_loop(const char ** parent_names,unsigned int i)240 static void plt_clk_free_parent_names_loop(const char **parent_names,
241 					   unsigned int i)
242 {
243 	while (i--)
244 		kfree_const(parent_names[i]);
245 	kfree(parent_names);
246 }
247 
plt_clk_unregister_loop(struct clk_plt_data * data,unsigned int i)248 static void plt_clk_unregister_loop(struct clk_plt_data *data,
249 				    unsigned int i)
250 {
251 	while (i--)
252 		plt_clk_unregister(data->clks[i]);
253 }
254 
plt_clk_register_parents(struct platform_device * pdev,struct clk_plt_data * data,const struct pmc_clk * clks)255 static const char **plt_clk_register_parents(struct platform_device *pdev,
256 					     struct clk_plt_data *data,
257 					     const struct pmc_clk *clks)
258 {
259 	const char **parent_names;
260 	unsigned int i;
261 	int err;
262 	int nparents = 0;
263 
264 	data->nparents = 0;
265 	while (clks[nparents].name)
266 		nparents++;
267 
268 	data->parents = devm_kcalloc(&pdev->dev, nparents,
269 				     sizeof(*data->parents), GFP_KERNEL);
270 	if (!data->parents)
271 		return ERR_PTR(-ENOMEM);
272 
273 	parent_names = kcalloc(nparents, sizeof(*parent_names),
274 			       GFP_KERNEL);
275 	if (!parent_names)
276 		return ERR_PTR(-ENOMEM);
277 
278 	for (i = 0; i < nparents; i++) {
279 		data->parents[i] =
280 			plt_clk_register_fixed_rate(pdev, clks[i].name,
281 						    clks[i].parent_name,
282 						    clks[i].freq);
283 		if (IS_ERR(data->parents[i])) {
284 			err = PTR_ERR(data->parents[i]);
285 			goto err_unreg;
286 		}
287 		parent_names[i] = kstrdup_const(clks[i].name, GFP_KERNEL);
288 	}
289 
290 	data->nparents = nparents;
291 	return parent_names;
292 
293 err_unreg:
294 	plt_clk_unregister_fixed_rate_loop(data, i);
295 	plt_clk_free_parent_names_loop(parent_names, i);
296 	return ERR_PTR(err);
297 }
298 
plt_clk_unregister_parents(struct clk_plt_data * data)299 static void plt_clk_unregister_parents(struct clk_plt_data *data)
300 {
301 	plt_clk_unregister_fixed_rate_loop(data, data->nparents);
302 }
303 
plt_clk_probe(struct platform_device * pdev)304 static int plt_clk_probe(struct platform_device *pdev)
305 {
306 	const struct pmc_clk_data *pmc_data;
307 	const char **parent_names;
308 	struct clk_plt_data *data;
309 	unsigned int i;
310 	int err;
311 
312 	pmc_data = dev_get_platdata(&pdev->dev);
313 	if (!pmc_data || !pmc_data->clks)
314 		return -EINVAL;
315 
316 	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
317 	if (!data)
318 		return -ENOMEM;
319 
320 	parent_names = plt_clk_register_parents(pdev, data, pmc_data->clks);
321 	if (IS_ERR(parent_names))
322 		return PTR_ERR(parent_names);
323 
324 	for (i = 0; i < PMC_CLK_NUM; i++) {
325 		data->clks[i] = plt_clk_register(pdev, i, pmc_data,
326 						 parent_names, data->nparents);
327 		if (IS_ERR(data->clks[i])) {
328 			err = PTR_ERR(data->clks[i]);
329 			goto err_unreg_clk_plt;
330 		}
331 	}
332 	data->mclk_lookup = clkdev_hw_create(&data->clks[3]->hw, "mclk", NULL);
333 	if (!data->mclk_lookup) {
334 		err = -ENOMEM;
335 		goto err_unreg_clk_plt;
336 	}
337 
338 	data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
339 						  "ether_clk", NULL);
340 	if (!data->ether_clk_lookup) {
341 		err = -ENOMEM;
342 		goto err_drop_mclk;
343 	}
344 
345 	plt_clk_free_parent_names_loop(parent_names, data->nparents);
346 
347 	platform_set_drvdata(pdev, data);
348 	return 0;
349 
350 err_drop_mclk:
351 	clkdev_drop(data->mclk_lookup);
352 err_unreg_clk_plt:
353 	plt_clk_unregister_loop(data, i);
354 	plt_clk_unregister_parents(data);
355 	plt_clk_free_parent_names_loop(parent_names, data->nparents);
356 	return err;
357 }
358 
plt_clk_remove(struct platform_device * pdev)359 static void plt_clk_remove(struct platform_device *pdev)
360 {
361 	struct clk_plt_data *data;
362 
363 	data = platform_get_drvdata(pdev);
364 
365 	clkdev_drop(data->ether_clk_lookup);
366 	clkdev_drop(data->mclk_lookup);
367 	plt_clk_unregister_loop(data, PMC_CLK_NUM);
368 	plt_clk_unregister_parents(data);
369 }
370 
371 static struct platform_driver plt_clk_driver = {
372 	.driver = {
373 		.name = "clk-pmc-atom",
374 	},
375 	.probe = plt_clk_probe,
376 	.remove = plt_clk_remove,
377 };
378 builtin_platform_driver(plt_clk_driver);
379