xref: /linux/drivers/clk/qcom/clk-cpu-8996.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020, The Linux Foundation. All rights reserved.
4  */
5 
6 /*
7  * Each of the CPU clusters (Power and Perf) on msm8996 are
8  * clocked via 2 PLLs, a primary and alternate. There are also
9  * 2 Mux'es, a primary and secondary all connected together
10  * as shown below
11  *
12  *                              +-------+
13  *               XO             |       |
14  *           +------------------>0      |
15  *                              |       |
16  *                    PLL/2     | SMUX  +----+
17  *                      +------->1      |    |
18  *                      |       |       |    |
19  *                      |       +-------+    |    +-------+
20  *                      |                    +---->0      |
21  *                      |                         |       |
22  * +---------------+    |             +----------->1      | CPU clk
23  * |Primary PLL    +----+ PLL_EARLY   |           |       +------>
24  * |               +------+-----------+    +------>2 PMUX |
25  * +---------------+      |                |      |       |
26  *                        |   +------+     |   +-->3      |
27  *                        +--^+  ACD +-----+   |  +-------+
28  * +---------------+          +------+         |
29  * |Alt PLL        |                           |
30  * |               +---------------------------+
31  * +---------------+         PLL_EARLY
32  *
33  * The primary PLL is what drives the CPU clk, except for times
34  * when we are reprogramming the PLL itself (for rate changes) when
35  * we temporarily switch to an alternate PLL.
36  *
37  * The primary PLL operates on a single VCO range, between 600MHz
38  * and 3GHz. However the CPUs do support OPPs with frequencies
39  * between 300MHz and 600MHz. In order to support running the CPUs
40  * at those frequencies we end up having to lock the PLL at twice
41  * the rate and drive the CPU clk via the PLL/2 output and SMUX.
42  *
43  * So for frequencies above 600MHz we follow the following path
44  *  Primary PLL --> PLL_EARLY --> PMUX(1) --> CPU clk
45  * and for frequencies between 300MHz and 600MHz we follow
46  *  Primary PLL --> PLL/2 --> SMUX(1) --> PMUX(0) --> CPU clk
47  *
48  * ACD stands for Adaptive Clock Distribution and is used to
49  * detect voltage droops.
50  */
51 
52 #include <linux/bitfield.h>
53 #include <linux/clk.h>
54 #include <linux/clk-provider.h>
55 #include <linux/io.h>
56 #include <linux/module.h>
57 #include <linux/platform_device.h>
58 #include <linux/regmap.h>
59 #include <soc/qcom/kryo-l2-accessors.h>
60 
61 #include "clk-alpha-pll.h"
62 #include "clk-regmap.h"
63 #include "clk-regmap-mux.h"
64 
65 enum _pmux_input {
66 	SMUX_INDEX = 0,
67 	PLL_INDEX,
68 	ACD_INDEX,
69 	ALT_INDEX,
70 	NUM_OF_PMUX_INPUTS
71 };
72 
73 #define DIV_2_THRESHOLD		600000000
74 #define PWRCL_REG_OFFSET 0x0
75 #define PERFCL_REG_OFFSET 0x80000
76 #define MUX_OFFSET	0x40
77 #define ALT_PLL_OFFSET	0x100
78 #define SSSCTL_OFFSET 0x160
79 
80 #define PMUX_MASK	0x3
81 
82 static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = {
83 	[PLL_OFF_L_VAL] = 0x04,
84 	[PLL_OFF_ALPHA_VAL] = 0x08,
85 	[PLL_OFF_USER_CTL] = 0x10,
86 	[PLL_OFF_CONFIG_CTL] = 0x18,
87 	[PLL_OFF_CONFIG_CTL_U] = 0x1c,
88 	[PLL_OFF_TEST_CTL] = 0x20,
89 	[PLL_OFF_TEST_CTL_U] = 0x24,
90 	[PLL_OFF_STATUS] = 0x28,
91 };
92 
93 static const u8 alt_pll_regs[PLL_OFF_MAX_REGS] = {
94 	[PLL_OFF_L_VAL] = 0x04,
95 	[PLL_OFF_ALPHA_VAL] = 0x08,
96 	[PLL_OFF_ALPHA_VAL_U] = 0x0c,
97 	[PLL_OFF_USER_CTL] = 0x10,
98 	[PLL_OFF_USER_CTL_U] = 0x14,
99 	[PLL_OFF_CONFIG_CTL] = 0x18,
100 	[PLL_OFF_TEST_CTL] = 0x20,
101 	[PLL_OFF_TEST_CTL_U] = 0x24,
102 	[PLL_OFF_STATUS] = 0x28,
103 };
104 
105 /* PLLs */
106 
107 static const struct alpha_pll_config hfpll_config = {
108 	.l = 60,
109 	.config_ctl_val = 0x200d4aa8,
110 	.config_ctl_hi_val = 0x006,
111 	.pre_div_mask = BIT(12),
112 	.post_div_mask = 0x3 << 8,
113 	.post_div_val = 0x1 << 8,
114 	.main_output_mask = BIT(0),
115 	.early_output_mask = BIT(3),
116 };
117 
118 static const struct clk_parent_data pll_parent[] = {
119 	{ .fw_name = "xo" },
120 };
121 
122 static struct clk_alpha_pll pwrcl_pll = {
123 	.offset = PWRCL_REG_OFFSET,
124 	.regs = prim_pll_regs,
125 	.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
126 	.clkr.hw.init = &(struct clk_init_data){
127 		.name = "pwrcl_pll",
128 		.parent_data = pll_parent,
129 		.num_parents = ARRAY_SIZE(pll_parent),
130 		.ops = &clk_alpha_pll_huayra_ops,
131 	},
132 };
133 
134 static struct clk_alpha_pll perfcl_pll = {
135 	.offset = PERFCL_REG_OFFSET,
136 	.regs = prim_pll_regs,
137 	.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
138 	.clkr.hw.init = &(struct clk_init_data){
139 		.name = "perfcl_pll",
140 		.parent_data = pll_parent,
141 		.num_parents = ARRAY_SIZE(pll_parent),
142 		.ops = &clk_alpha_pll_huayra_ops,
143 	},
144 };
145 
146 static struct clk_fixed_factor pwrcl_pll_postdiv = {
147 	.mult = 1,
148 	.div = 2,
149 	.hw.init = &(struct clk_init_data){
150 		.name = "pwrcl_pll_postdiv",
151 		.parent_data = &(const struct clk_parent_data){
152 			.hw = &pwrcl_pll.clkr.hw
153 		},
154 		.num_parents = 1,
155 		.ops = &clk_fixed_factor_ops,
156 		.flags = CLK_SET_RATE_PARENT,
157 	},
158 };
159 
160 static struct clk_fixed_factor perfcl_pll_postdiv = {
161 	.mult = 1,
162 	.div = 2,
163 	.hw.init = &(struct clk_init_data){
164 		.name = "perfcl_pll_postdiv",
165 		.parent_data = &(const struct clk_parent_data){
166 			.hw = &perfcl_pll.clkr.hw
167 		},
168 		.num_parents = 1,
169 		.ops = &clk_fixed_factor_ops,
170 		.flags = CLK_SET_RATE_PARENT,
171 	},
172 };
173 
174 static struct clk_fixed_factor perfcl_pll_acd = {
175 	.mult = 1,
176 	.div = 1,
177 	.hw.init = &(struct clk_init_data){
178 		.name = "perfcl_pll_acd",
179 		.parent_data = &(const struct clk_parent_data){
180 			.hw = &perfcl_pll.clkr.hw
181 		},
182 		.num_parents = 1,
183 		.ops = &clk_fixed_factor_ops,
184 		.flags = CLK_SET_RATE_PARENT,
185 	},
186 };
187 
188 static struct clk_fixed_factor pwrcl_pll_acd = {
189 	.mult = 1,
190 	.div = 1,
191 	.hw.init = &(struct clk_init_data){
192 		.name = "pwrcl_pll_acd",
193 		.parent_data = &(const struct clk_parent_data){
194 			.hw = &pwrcl_pll.clkr.hw
195 		},
196 		.num_parents = 1,
197 		.ops = &clk_fixed_factor_ops,
198 		.flags = CLK_SET_RATE_PARENT,
199 	},
200 };
201 
202 static const struct pll_vco alt_pll_vco_modes[] = {
203 	VCO(3,  250000000,  500000000),
204 	VCO(2,  500000000,  750000000),
205 	VCO(1,  750000000, 1000000000),
206 	VCO(0, 1000000000, 2150400000),
207 };
208 
209 static const struct alpha_pll_config altpll_config = {
210 	.l = 16,
211 	.vco_val = 0x3 << 20,
212 	.vco_mask = 0x3 << 20,
213 	.config_ctl_val = 0x4001051b,
214 	.post_div_mask = 0x3 << 8,
215 	.post_div_val = 0x1 << 8,
216 	.main_output_mask = BIT(0),
217 	.early_output_mask = BIT(3),
218 };
219 
220 static struct clk_alpha_pll pwrcl_alt_pll = {
221 	.offset = PWRCL_REG_OFFSET + ALT_PLL_OFFSET,
222 	.regs = alt_pll_regs,
223 	.vco_table = alt_pll_vco_modes,
224 	.num_vco = ARRAY_SIZE(alt_pll_vco_modes),
225 	.flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
226 	.clkr.hw.init = &(struct clk_init_data) {
227 		.name = "pwrcl_alt_pll",
228 		.parent_data = pll_parent,
229 		.num_parents = ARRAY_SIZE(pll_parent),
230 		.ops = &clk_alpha_pll_hwfsm_ops,
231 	},
232 };
233 
234 static struct clk_alpha_pll perfcl_alt_pll = {
235 	.offset = PERFCL_REG_OFFSET + ALT_PLL_OFFSET,
236 	.regs = alt_pll_regs,
237 	.vco_table = alt_pll_vco_modes,
238 	.num_vco = ARRAY_SIZE(alt_pll_vco_modes),
239 	.flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
240 	.clkr.hw.init = &(struct clk_init_data) {
241 		.name = "perfcl_alt_pll",
242 		.parent_data = pll_parent,
243 		.num_parents = ARRAY_SIZE(pll_parent),
244 		.ops = &clk_alpha_pll_hwfsm_ops,
245 	},
246 };
247 
248 struct clk_cpu_8996_pmux {
249 	u32	reg;
250 	struct notifier_block nb;
251 	struct clk_regmap clkr;
252 };
253 
254 static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
255 			       void *data);
256 
257 #define to_clk_cpu_8996_pmux_nb(_nb) \
258 	container_of(_nb, struct clk_cpu_8996_pmux, nb)
259 
260 static inline struct clk_cpu_8996_pmux *to_clk_cpu_8996_pmux_hw(struct clk_hw *hw)
261 {
262 	return container_of(to_clk_regmap(hw), struct clk_cpu_8996_pmux, clkr);
263 }
264 
265 static u8 clk_cpu_8996_pmux_get_parent(struct clk_hw *hw)
266 {
267 	struct clk_regmap *clkr = to_clk_regmap(hw);
268 	struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
269 	u32 val;
270 
271 	regmap_read(clkr->regmap, cpuclk->reg, &val);
272 
273 	return FIELD_GET(PMUX_MASK, val);
274 }
275 
276 static int clk_cpu_8996_pmux_set_parent(struct clk_hw *hw, u8 index)
277 {
278 	struct clk_regmap *clkr = to_clk_regmap(hw);
279 	struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
280 	u32 val;
281 
282 	val = FIELD_PREP(PMUX_MASK, index);
283 
284 	return regmap_update_bits(clkr->regmap, cpuclk->reg, PMUX_MASK, val);
285 }
286 
287 static int clk_cpu_8996_pmux_determine_rate(struct clk_hw *hw,
288 					   struct clk_rate_request *req)
289 {
290 	struct clk_hw *parent;
291 
292 	if (req->rate < (DIV_2_THRESHOLD / 2))
293 		return -EINVAL;
294 
295 	if (req->rate < DIV_2_THRESHOLD)
296 		parent = clk_hw_get_parent_by_index(hw, SMUX_INDEX);
297 	else
298 		parent = clk_hw_get_parent_by_index(hw, ACD_INDEX);
299 	if (!parent)
300 		return -EINVAL;
301 
302 	req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
303 	req->best_parent_hw = parent;
304 
305 	return 0;
306 }
307 
308 static const struct clk_ops clk_cpu_8996_pmux_ops = {
309 	.set_parent = clk_cpu_8996_pmux_set_parent,
310 	.get_parent = clk_cpu_8996_pmux_get_parent,
311 	.determine_rate = clk_cpu_8996_pmux_determine_rate,
312 };
313 
314 static const struct clk_parent_data pwrcl_smux_parents[] = {
315 	{ .fw_name = "xo" },
316 	{ .hw = &pwrcl_pll_postdiv.hw },
317 };
318 
319 static const struct clk_parent_data perfcl_smux_parents[] = {
320 	{ .fw_name = "xo" },
321 	{ .hw = &perfcl_pll_postdiv.hw },
322 };
323 
324 static struct clk_regmap_mux pwrcl_smux = {
325 	.reg = PWRCL_REG_OFFSET + MUX_OFFSET,
326 	.shift = 2,
327 	.width = 2,
328 	.clkr.hw.init = &(struct clk_init_data) {
329 		.name = "pwrcl_smux",
330 		.parent_data = pwrcl_smux_parents,
331 		.num_parents = ARRAY_SIZE(pwrcl_smux_parents),
332 		.ops = &clk_regmap_mux_closest_ops,
333 		.flags = CLK_SET_RATE_PARENT,
334 	},
335 };
336 
337 static struct clk_regmap_mux perfcl_smux = {
338 	.reg = PERFCL_REG_OFFSET + MUX_OFFSET,
339 	.shift = 2,
340 	.width = 2,
341 	.clkr.hw.init = &(struct clk_init_data) {
342 		.name = "perfcl_smux",
343 		.parent_data = perfcl_smux_parents,
344 		.num_parents = ARRAY_SIZE(perfcl_smux_parents),
345 		.ops = &clk_regmap_mux_closest_ops,
346 		.flags = CLK_SET_RATE_PARENT,
347 	},
348 };
349 
350 static const struct clk_hw *pwrcl_pmux_parents[] = {
351 	[SMUX_INDEX] = &pwrcl_smux.clkr.hw,
352 	[PLL_INDEX] = &pwrcl_pll.clkr.hw,
353 	[ACD_INDEX] = &pwrcl_pll_acd.hw,
354 	[ALT_INDEX] = &pwrcl_alt_pll.clkr.hw,
355 };
356 
357 static const struct clk_hw *perfcl_pmux_parents[] = {
358 	[SMUX_INDEX] = &perfcl_smux.clkr.hw,
359 	[PLL_INDEX] = &perfcl_pll.clkr.hw,
360 	[ACD_INDEX] = &perfcl_pll_acd.hw,
361 	[ALT_INDEX] = &perfcl_alt_pll.clkr.hw,
362 };
363 
364 static struct clk_cpu_8996_pmux pwrcl_pmux = {
365 	.reg = PWRCL_REG_OFFSET + MUX_OFFSET,
366 	.nb.notifier_call = cpu_clk_notifier_cb,
367 	.clkr.hw.init = &(struct clk_init_data) {
368 		.name = "pwrcl_pmux",
369 		.parent_hws = pwrcl_pmux_parents,
370 		.num_parents = ARRAY_SIZE(pwrcl_pmux_parents),
371 		.ops = &clk_cpu_8996_pmux_ops,
372 		/* CPU clock is critical and should never be gated */
373 		.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
374 	},
375 };
376 
377 static struct clk_cpu_8996_pmux perfcl_pmux = {
378 	.reg = PERFCL_REG_OFFSET + MUX_OFFSET,
379 	.nb.notifier_call = cpu_clk_notifier_cb,
380 	.clkr.hw.init = &(struct clk_init_data) {
381 		.name = "perfcl_pmux",
382 		.parent_hws = perfcl_pmux_parents,
383 		.num_parents = ARRAY_SIZE(perfcl_pmux_parents),
384 		.ops = &clk_cpu_8996_pmux_ops,
385 		/* CPU clock is critical and should never be gated */
386 		.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
387 	},
388 };
389 
390 static const struct regmap_config cpu_msm8996_regmap_config = {
391 	.reg_bits		= 32,
392 	.reg_stride		= 4,
393 	.val_bits		= 32,
394 	.max_register		= 0x80210,
395 	.fast_io		= true,
396 	.val_format_endian	= REGMAP_ENDIAN_LITTLE,
397 };
398 
399 static struct clk_hw *cpu_msm8996_hw_clks[] = {
400 	&pwrcl_pll_postdiv.hw,
401 	&perfcl_pll_postdiv.hw,
402 	&pwrcl_pll_acd.hw,
403 	&perfcl_pll_acd.hw,
404 };
405 
406 static struct clk_regmap *cpu_msm8996_clks[] = {
407 	&pwrcl_pll.clkr,
408 	&perfcl_pll.clkr,
409 	&pwrcl_alt_pll.clkr,
410 	&perfcl_alt_pll.clkr,
411 	&pwrcl_smux.clkr,
412 	&perfcl_smux.clkr,
413 	&pwrcl_pmux.clkr,
414 	&perfcl_pmux.clkr,
415 };
416 
417 static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
418 					      struct regmap *regmap)
419 {
420 	int i, ret;
421 
422 	for (i = 0; i < ARRAY_SIZE(cpu_msm8996_hw_clks); i++) {
423 		ret = devm_clk_hw_register(dev, cpu_msm8996_hw_clks[i]);
424 		if (ret)
425 			return ret;
426 	}
427 
428 	for (i = 0; i < ARRAY_SIZE(cpu_msm8996_clks); i++) {
429 		ret = devm_clk_register_regmap(dev, cpu_msm8996_clks[i]);
430 		if (ret)
431 			return ret;
432 	}
433 
434 	clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config);
435 	clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config);
436 	clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config);
437 	clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
438 
439 	/* Enable alt PLLs */
440 	clk_prepare_enable(pwrcl_alt_pll.clkr.hw.clk);
441 	clk_prepare_enable(perfcl_alt_pll.clkr.hw.clk);
442 
443 	devm_clk_notifier_register(dev, pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb);
444 	devm_clk_notifier_register(dev, perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb);
445 
446 	return ret;
447 }
448 
449 #define CPU_AFINITY_MASK 0xFFF
450 #define PWRCL_CPU_REG_MASK 0x3
451 #define PERFCL_CPU_REG_MASK 0x103
452 
453 #define L2ACDCR_REG 0x580ULL
454 #define L2ACDTD_REG 0x581ULL
455 #define L2ACDDVMRC_REG 0x584ULL
456 #define L2ACDSSCR_REG 0x589ULL
457 
458 static DEFINE_SPINLOCK(qcom_clk_acd_lock);
459 static void __iomem *base;
460 
461 static void qcom_cpu_clk_msm8996_acd_init(void __iomem *base)
462 {
463 	u64 hwid;
464 	unsigned long flags;
465 
466 	spin_lock_irqsave(&qcom_clk_acd_lock, flags);
467 
468 	hwid = read_cpuid_mpidr() & CPU_AFINITY_MASK;
469 
470 	kryo_l2_set_indirect_reg(L2ACDTD_REG, 0x00006a11);
471 	kryo_l2_set_indirect_reg(L2ACDDVMRC_REG, 0x000e0f0f);
472 	kryo_l2_set_indirect_reg(L2ACDSSCR_REG, 0x00000601);
473 
474 	if (PWRCL_CPU_REG_MASK == (hwid | PWRCL_CPU_REG_MASK)) {
475 		writel(0xf, base + PWRCL_REG_OFFSET + SSSCTL_OFFSET);
476 		kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd);
477 	}
478 
479 	if (PERFCL_CPU_REG_MASK == (hwid | PERFCL_CPU_REG_MASK)) {
480 		kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd);
481 		writel(0xf, base + PERFCL_REG_OFFSET + SSSCTL_OFFSET);
482 	}
483 
484 	spin_unlock_irqrestore(&qcom_clk_acd_lock, flags);
485 }
486 
487 static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
488 			       void *data)
489 {
490 	struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_nb(nb);
491 	struct clk_notifier_data *cnd = data;
492 	int ret;
493 
494 	switch (event) {
495 	case PRE_RATE_CHANGE:
496 		ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, ALT_INDEX);
497 		qcom_cpu_clk_msm8996_acd_init(base);
498 		break;
499 	case POST_RATE_CHANGE:
500 		if (cnd->new_rate < DIV_2_THRESHOLD)
501 			ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
502 							   SMUX_INDEX);
503 		else
504 			ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
505 							   ACD_INDEX);
506 		break;
507 	default:
508 		ret = 0;
509 		break;
510 	}
511 
512 	return notifier_from_errno(ret);
513 };
514 
515 static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev)
516 {
517 	struct regmap *regmap;
518 	struct clk_hw_onecell_data *data;
519 	struct device *dev = &pdev->dev;
520 	int ret;
521 
522 	data = devm_kzalloc(dev, struct_size(data, hws, 2), GFP_KERNEL);
523 	if (!data)
524 		return -ENOMEM;
525 
526 	base = devm_platform_ioremap_resource(pdev, 0);
527 	if (IS_ERR(base))
528 		return PTR_ERR(base);
529 
530 	regmap = devm_regmap_init_mmio(dev, base, &cpu_msm8996_regmap_config);
531 	if (IS_ERR(regmap))
532 		return PTR_ERR(regmap);
533 
534 	ret = qcom_cpu_clk_msm8996_register_clks(dev, regmap);
535 	if (ret)
536 		return ret;
537 
538 	qcom_cpu_clk_msm8996_acd_init(base);
539 
540 	data->hws[0] = &pwrcl_pmux.clkr.hw;
541 	data->hws[1] = &perfcl_pmux.clkr.hw;
542 	data->num = 2;
543 
544 	return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data);
545 }
546 
547 static const struct of_device_id qcom_cpu_clk_msm8996_match_table[] = {
548 	{ .compatible = "qcom,msm8996-apcc" },
549 	{}
550 };
551 MODULE_DEVICE_TABLE(of, qcom_cpu_clk_msm8996_match_table);
552 
553 static struct platform_driver qcom_cpu_clk_msm8996_driver = {
554 	.probe = qcom_cpu_clk_msm8996_driver_probe,
555 	.driver = {
556 		.name = "qcom-msm8996-apcc",
557 		.of_match_table = qcom_cpu_clk_msm8996_match_table,
558 	},
559 };
560 module_platform_driver(qcom_cpu_clk_msm8996_driver);
561 
562 MODULE_DESCRIPTION("QCOM MSM8996 CPU Clock Driver");
563 MODULE_LICENSE("GPL v2");
564