1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * SCI Clock driver for keystone based devices
4 *
5 * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
6 * Tero Kristo <t-kristo@ti.com>
7 */
8 #include <linux/clk-provider.h>
9 #include <linux/err.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/soc/ti/ti_sci_protocol.h>
16 #include <linux/bsearch.h>
17 #include <linux/list_sort.h>
18
19 #define SCI_CLK_SSC_ENABLE BIT(0)
20 #define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1)
21 #define SCI_CLK_INPUT_TERMINATION BIT(2)
22
23 /**
24 * struct sci_clk_provider - TI SCI clock provider representation
25 * @sci: Handle to the System Control Interface protocol handler
26 * @ops: Pointer to the SCI ops to be used by the clocks
27 * @dev: Device pointer for the clock provider
28 * @clocks: Clocks array for this device
29 * @num_clocks: Total number of clocks for this provider
30 */
31 struct sci_clk_provider {
32 const struct ti_sci_handle *sci;
33 const struct ti_sci_clk_ops *ops;
34 struct device *dev;
35 struct sci_clk **clocks;
36 int num_clocks;
37 };
38
39 /**
40 * struct sci_clk - TI SCI clock representation
41 * @hw: Hardware clock cookie for common clock framework
42 * @dev_id: Device index
43 * @clk_id: Clock index
44 * @num_parents: Number of parents for this clock
45 * @provider: Master clock provider
46 * @flags: Flags for the clock
47 * @node: Link for handling clocks probed via DT
48 * @cached_req: Cached requested freq for determine rate calls
49 * @cached_res: Cached result freq for determine rate calls
50 */
51 struct sci_clk {
52 struct clk_hw hw;
53 u16 dev_id;
54 u32 clk_id;
55 u32 num_parents;
56 struct sci_clk_provider *provider;
57 u8 flags;
58 struct list_head node;
59 unsigned long cached_req;
60 unsigned long cached_res;
61 };
62
63 #define to_sci_clk(_hw) container_of(_hw, struct sci_clk, hw)
64
65 /**
66 * sci_clk_prepare - Prepare (enable) a TI SCI clock
67 * @hw: clock to prepare
68 *
69 * Prepares a clock to be actively used. Returns the SCI protocol status.
70 */
sci_clk_prepare(struct clk_hw * hw)71 static int sci_clk_prepare(struct clk_hw *hw)
72 {
73 struct sci_clk *clk = to_sci_clk(hw);
74 bool enable_ssc = clk->flags & SCI_CLK_SSC_ENABLE;
75 bool allow_freq_change = clk->flags & SCI_CLK_ALLOW_FREQ_CHANGE;
76 bool input_termination = clk->flags & SCI_CLK_INPUT_TERMINATION;
77
78 return clk->provider->ops->get_clock(clk->provider->sci, clk->dev_id,
79 clk->clk_id, enable_ssc,
80 allow_freq_change,
81 input_termination);
82 }
83
84 /**
85 * sci_clk_unprepare - Un-prepares (disables) a TI SCI clock
86 * @hw: clock to unprepare
87 *
88 * Un-prepares a clock from active state.
89 */
sci_clk_unprepare(struct clk_hw * hw)90 static void sci_clk_unprepare(struct clk_hw *hw)
91 {
92 struct sci_clk *clk = to_sci_clk(hw);
93 int ret;
94
95 ret = clk->provider->ops->put_clock(clk->provider->sci, clk->dev_id,
96 clk->clk_id);
97 if (ret)
98 dev_err(clk->provider->dev,
99 "unprepare failed for dev=%d, clk=%d, ret=%d\n",
100 clk->dev_id, clk->clk_id, ret);
101 }
102
103 /**
104 * sci_clk_is_prepared - Check if a TI SCI clock is prepared or not
105 * @hw: clock to check status for
106 *
107 * Checks if a clock is prepared (enabled) in hardware. Returns non-zero
108 * value if clock is enabled, zero otherwise.
109 */
sci_clk_is_prepared(struct clk_hw * hw)110 static int sci_clk_is_prepared(struct clk_hw *hw)
111 {
112 struct sci_clk *clk = to_sci_clk(hw);
113 bool req_state, current_state;
114 int ret;
115
116 ret = clk->provider->ops->is_on(clk->provider->sci, clk->dev_id,
117 clk->clk_id, &req_state,
118 ¤t_state);
119 if (ret) {
120 dev_err(clk->provider->dev,
121 "is_prepared failed for dev=%d, clk=%d, ret=%d\n",
122 clk->dev_id, clk->clk_id, ret);
123 return 0;
124 }
125
126 return req_state;
127 }
128
129 /**
130 * sci_clk_recalc_rate - Get clock rate for a TI SCI clock
131 * @hw: clock to get rate for
132 * @parent_rate: parent rate provided by common clock framework, not used
133 *
134 * Gets the current clock rate of a TI SCI clock. Returns the current
135 * clock rate, or zero in failure.
136 */
sci_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)137 static unsigned long sci_clk_recalc_rate(struct clk_hw *hw,
138 unsigned long parent_rate)
139 {
140 struct sci_clk *clk = to_sci_clk(hw);
141 u64 freq;
142 int ret;
143
144 ret = clk->provider->ops->get_freq(clk->provider->sci, clk->dev_id,
145 clk->clk_id, &freq);
146 if (ret) {
147 dev_err(clk->provider->dev,
148 "recalc-rate failed for dev=%d, clk=%d, ret=%d\n",
149 clk->dev_id, clk->clk_id, ret);
150 return 0;
151 }
152
153 return freq;
154 }
155
156 /**
157 * sci_clk_determine_rate - Determines a clock rate a clock can be set to
158 * @hw: clock to change rate for
159 * @req: requested rate configuration for the clock
160 *
161 * Determines a suitable clock rate and parent for a TI SCI clock.
162 * The parent handling is un-used, as generally the parent clock rates
163 * are not known by the kernel; instead these are internally handled
164 * by the firmware. Returns 0 on success, negative error value on failure.
165 */
sci_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)166 static int sci_clk_determine_rate(struct clk_hw *hw,
167 struct clk_rate_request *req)
168 {
169 struct sci_clk *clk = to_sci_clk(hw);
170 int ret;
171 u64 new_rate;
172
173 if (clk->cached_req && clk->cached_req == req->rate) {
174 req->rate = clk->cached_res;
175 return 0;
176 }
177
178 ret = clk->provider->ops->get_best_match_freq(clk->provider->sci,
179 clk->dev_id,
180 clk->clk_id,
181 req->min_rate,
182 req->rate,
183 req->max_rate,
184 &new_rate);
185 if (ret) {
186 dev_err(clk->provider->dev,
187 "determine-rate failed for dev=%d, clk=%d, ret=%d\n",
188 clk->dev_id, clk->clk_id, ret);
189 return ret;
190 }
191
192 clk->cached_req = req->rate;
193 clk->cached_res = new_rate;
194
195 req->rate = new_rate;
196
197 return 0;
198 }
199
200 /**
201 * sci_clk_set_rate - Set rate for a TI SCI clock
202 * @hw: clock to change rate for
203 * @rate: target rate for the clock
204 * @parent_rate: rate of the clock parent, not used for TI SCI clocks
205 *
206 * Sets a clock frequency for a TI SCI clock. Returns the TI SCI
207 * protocol status.
208 */
sci_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)209 static int sci_clk_set_rate(struct clk_hw *hw, unsigned long rate,
210 unsigned long parent_rate)
211 {
212 struct sci_clk *clk = to_sci_clk(hw);
213
214 return clk->provider->ops->set_freq(clk->provider->sci, clk->dev_id,
215 clk->clk_id, rate / 10 * 9, rate,
216 rate / 10 * 11);
217 }
218
219 /**
220 * sci_clk_get_parent - Get the current parent of a TI SCI clock
221 * @hw: clock to get parent for
222 *
223 * Returns the index of the currently selected parent for a TI SCI clock.
224 */
sci_clk_get_parent(struct clk_hw * hw)225 static u8 sci_clk_get_parent(struct clk_hw *hw)
226 {
227 struct sci_clk *clk = to_sci_clk(hw);
228 u32 parent_id = 0;
229 int ret;
230
231 ret = clk->provider->ops->get_parent(clk->provider->sci, clk->dev_id,
232 clk->clk_id, (void *)&parent_id);
233 if (ret) {
234 dev_err(clk->provider->dev,
235 "get-parent failed for dev=%d, clk=%d, ret=%d\n",
236 clk->dev_id, clk->clk_id, ret);
237 return 0;
238 }
239
240 parent_id = parent_id - clk->clk_id - 1;
241
242 return (u8)parent_id;
243 }
244
245 /**
246 * sci_clk_set_parent - Set the parent of a TI SCI clock
247 * @hw: clock to set parent for
248 * @index: new parent index for the clock
249 *
250 * Sets the parent of a TI SCI clock. Return TI SCI protocol status.
251 */
sci_clk_set_parent(struct clk_hw * hw,u8 index)252 static int sci_clk_set_parent(struct clk_hw *hw, u8 index)
253 {
254 struct sci_clk *clk = to_sci_clk(hw);
255
256 clk->cached_req = 0;
257
258 return clk->provider->ops->set_parent(clk->provider->sci, clk->dev_id,
259 clk->clk_id,
260 index + 1 + clk->clk_id);
261 }
262
263 static const struct clk_ops sci_clk_ops = {
264 .prepare = sci_clk_prepare,
265 .unprepare = sci_clk_unprepare,
266 .is_prepared = sci_clk_is_prepared,
267 .recalc_rate = sci_clk_recalc_rate,
268 .determine_rate = sci_clk_determine_rate,
269 .set_rate = sci_clk_set_rate,
270 .get_parent = sci_clk_get_parent,
271 .set_parent = sci_clk_set_parent,
272 };
273
274 /**
275 * _sci_clk_build - Gets a handle for an SCI clock
276 * @provider: Handle to SCI clock provider
277 * @sci_clk: Handle to the SCI clock to populate
278 *
279 * Gets a handle to an existing TI SCI hw clock, or builds a new clock
280 * entry and registers it with the common clock framework. Called from
281 * the common clock framework, when a corresponding of_clk_get call is
282 * executed, or recursively from itself when parsing parent clocks.
283 * Returns 0 on success, negative error code on failure.
284 */
_sci_clk_build(struct sci_clk_provider * provider,struct sci_clk * sci_clk)285 static int _sci_clk_build(struct sci_clk_provider *provider,
286 struct sci_clk *sci_clk)
287 {
288 struct clk_init_data init = { NULL };
289 char *name = NULL;
290 char **parent_names = NULL;
291 int i;
292 int ret = 0;
293
294 name = kasprintf(GFP_KERNEL, "clk:%d:%d", sci_clk->dev_id,
295 sci_clk->clk_id);
296 if (!name)
297 return -ENOMEM;
298
299 init.name = name;
300
301 /*
302 * From kernel point of view, we only care about a clocks parents,
303 * if it has more than 1 possible parent. In this case, it is going
304 * to have mux functionality. Otherwise it is going to act as a root
305 * clock.
306 */
307 if (sci_clk->num_parents < 2)
308 sci_clk->num_parents = 0;
309
310 if (sci_clk->num_parents) {
311 parent_names = kcalloc(sci_clk->num_parents, sizeof(char *),
312 GFP_KERNEL);
313
314 if (!parent_names) {
315 ret = -ENOMEM;
316 goto err;
317 }
318
319 for (i = 0; i < sci_clk->num_parents; i++) {
320 char *parent_name;
321
322 parent_name = kasprintf(GFP_KERNEL, "clk:%d:%d",
323 sci_clk->dev_id,
324 sci_clk->clk_id + 1 + i);
325 if (!parent_name) {
326 ret = -ENOMEM;
327 goto err;
328 }
329 parent_names[i] = parent_name;
330 }
331 init.parent_names = (void *)parent_names;
332 }
333
334 init.ops = &sci_clk_ops;
335 init.num_parents = sci_clk->num_parents;
336 sci_clk->hw.init = &init;
337
338 ret = devm_clk_hw_register(provider->dev, &sci_clk->hw);
339 if (ret)
340 dev_err(provider->dev, "failed clk register with %d\n", ret);
341
342 err:
343 if (parent_names) {
344 for (i = 0; i < sci_clk->num_parents; i++)
345 kfree(parent_names[i]);
346
347 kfree(parent_names);
348 }
349
350 kfree(name);
351
352 return ret;
353 }
354
_cmp_sci_clk(const void * a,const void * b)355 static int _cmp_sci_clk(const void *a, const void *b)
356 {
357 const struct sci_clk *ca = a;
358 const struct sci_clk *cb = *(struct sci_clk **)b;
359
360 if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id)
361 return 0;
362 if (ca->dev_id > cb->dev_id ||
363 (ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id))
364 return 1;
365 return -1;
366 }
367
368 /**
369 * sci_clk_get - Xlate function for getting clock handles
370 * @clkspec: device tree clock specifier
371 * @data: pointer to the clock provider
372 *
373 * Xlate function for retrieving clock TI SCI hw clock handles based on
374 * device tree clock specifier. Called from the common clock framework,
375 * when a corresponding of_clk_get call is executed. Returns a pointer
376 * to the TI SCI hw clock struct, or ERR_PTR value in failure.
377 */
sci_clk_get(struct of_phandle_args * clkspec,void * data)378 static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data)
379 {
380 struct sci_clk_provider *provider = data;
381 struct sci_clk **clk;
382 struct sci_clk key;
383
384 if (clkspec->args_count != 2)
385 return ERR_PTR(-EINVAL);
386
387 key.dev_id = clkspec->args[0];
388 key.clk_id = clkspec->args[1];
389
390 clk = bsearch(&key, provider->clocks, provider->num_clocks,
391 sizeof(clk), _cmp_sci_clk);
392
393 if (!clk)
394 return ERR_PTR(-ENODEV);
395
396 return &(*clk)->hw;
397 }
398
ti_sci_init_clocks(struct sci_clk_provider * p)399 static int ti_sci_init_clocks(struct sci_clk_provider *p)
400 {
401 int i;
402 int ret;
403
404 for (i = 0; i < p->num_clocks; i++) {
405 ret = _sci_clk_build(p, p->clocks[i]);
406 if (ret)
407 return ret;
408 }
409
410 return 0;
411 }
412
413 static const struct of_device_id ti_sci_clk_of_match[] = {
414 { .compatible = "ti,k2g-sci-clk" },
415 { /* Sentinel */ },
416 };
417 MODULE_DEVICE_TABLE(of, ti_sci_clk_of_match);
418
419 #ifdef CONFIG_TI_SCI_CLK_PROBE_FROM_FW
ti_sci_scan_clocks_from_fw(struct sci_clk_provider * provider)420 static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider)
421 {
422 int ret;
423 int num_clks = 0;
424 struct sci_clk **clks = NULL;
425 struct sci_clk **tmp_clks;
426 struct sci_clk *sci_clk;
427 int max_clks = 0;
428 int clk_id = 0;
429 int dev_id = 0;
430 u32 num_parents = 0;
431 int gap_size = 0;
432 struct device *dev = provider->dev;
433
434 while (1) {
435 ret = provider->ops->get_num_parents(provider->sci, dev_id,
436 clk_id,
437 (void *)&num_parents);
438 if (ret) {
439 gap_size++;
440 if (!clk_id) {
441 if (gap_size >= 5)
442 break;
443 dev_id++;
444 } else {
445 if (gap_size >= 2) {
446 dev_id++;
447 clk_id = 0;
448 gap_size = 0;
449 } else {
450 clk_id++;
451 }
452 }
453 continue;
454 }
455
456 gap_size = 0;
457
458 if (num_clks == max_clks) {
459 tmp_clks = devm_kmalloc_array(dev, max_clks + 64,
460 sizeof(sci_clk),
461 GFP_KERNEL);
462 memcpy(tmp_clks, clks, max_clks * sizeof(sci_clk));
463 if (max_clks)
464 devm_kfree(dev, clks);
465 max_clks += 64;
466 clks = tmp_clks;
467 }
468
469 sci_clk = devm_kzalloc(dev, sizeof(*sci_clk), GFP_KERNEL);
470 if (!sci_clk)
471 return -ENOMEM;
472 sci_clk->dev_id = dev_id;
473 sci_clk->clk_id = clk_id;
474 sci_clk->provider = provider;
475 sci_clk->num_parents = num_parents;
476
477 clks[num_clks] = sci_clk;
478
479 clk_id++;
480 num_clks++;
481 }
482
483 provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
484 GFP_KERNEL);
485 if (!provider->clocks)
486 return -ENOMEM;
487
488 memcpy(provider->clocks, clks, num_clks * sizeof(sci_clk));
489
490 provider->num_clocks = num_clks;
491
492 devm_kfree(dev, clks);
493
494 return 0;
495 }
496
497 #else
498
_cmp_sci_clk_list(void * priv,const struct list_head * a,const struct list_head * b)499 static int _cmp_sci_clk_list(void *priv, const struct list_head *a,
500 const struct list_head *b)
501 {
502 struct sci_clk *ca = container_of(a, struct sci_clk, node);
503 struct sci_clk *cb = container_of(b, struct sci_clk, node);
504
505 return _cmp_sci_clk(ca, &cb);
506 }
507
ti_sci_scan_clocks_from_dt(struct sci_clk_provider * provider)508 static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
509 {
510 struct device *dev = provider->dev;
511 struct device_node *np = NULL;
512 int ret;
513 int index;
514 struct of_phandle_args args;
515 struct list_head clks;
516 struct sci_clk *sci_clk, *prev;
517 int num_clks = 0;
518 int num_parents;
519 bool state;
520 int clk_id;
521 const char * const clk_names[] = {
522 "clocks", "assigned-clocks", "assigned-clock-parents", NULL
523 };
524 const char * const *clk_name;
525
526 INIT_LIST_HEAD(&clks);
527
528 clk_name = clk_names;
529
530 while (*clk_name) {
531 np = of_find_node_with_property(np, *clk_name);
532 if (!np) {
533 clk_name++;
534 continue;
535 }
536
537 if (!of_device_is_available(np))
538 continue;
539
540 index = 0;
541
542 do {
543 ret = of_parse_phandle_with_args(np, *clk_name,
544 "#clock-cells", index,
545 &args);
546 if (ret)
547 break;
548
549 if (args.args_count == 2 && args.np == dev->of_node) {
550 sci_clk = devm_kzalloc(dev, sizeof(*sci_clk),
551 GFP_KERNEL);
552 if (!sci_clk)
553 return -ENOMEM;
554
555 sci_clk->dev_id = args.args[0];
556 sci_clk->clk_id = args.args[1];
557 sci_clk->provider = provider;
558 provider->ops->get_num_parents(provider->sci,
559 sci_clk->dev_id,
560 sci_clk->clk_id,
561 (void *)&sci_clk->num_parents);
562 list_add_tail(&sci_clk->node, &clks);
563
564 num_clks++;
565
566 num_parents = sci_clk->num_parents;
567 if (num_parents == 1)
568 num_parents = 0;
569
570 /*
571 * Linux kernel has inherent limitation
572 * of 255 clock parents at the moment.
573 * Right now, it is not expected that
574 * any mux clock from sci-clk driver
575 * would exceed that limit either, but
576 * the ABI basically provides that
577 * possibility. Print out a warning if
578 * this happens for any clock.
579 */
580 if (num_parents >= 255) {
581 dev_warn(dev, "too many parents for dev=%d, clk=%d (%d), cropping to 255.\n",
582 sci_clk->dev_id,
583 sci_clk->clk_id, num_parents);
584 num_parents = 255;
585 }
586
587 clk_id = args.args[1] + 1;
588
589 while (num_parents--) {
590 /* Check if this clock id is valid */
591 ret = provider->ops->is_auto(provider->sci,
592 sci_clk->dev_id, clk_id, &state);
593
594 if (ret) {
595 clk_id++;
596 continue;
597 }
598
599 sci_clk = devm_kzalloc(dev,
600 sizeof(*sci_clk),
601 GFP_KERNEL);
602 if (!sci_clk)
603 return -ENOMEM;
604 sci_clk->dev_id = args.args[0];
605 sci_clk->clk_id = clk_id++;
606 sci_clk->provider = provider;
607 list_add_tail(&sci_clk->node, &clks);
608
609 num_clks++;
610 }
611 }
612
613 index++;
614 } while (args.np);
615 }
616
617 list_sort(NULL, &clks, _cmp_sci_clk_list);
618
619 provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
620 GFP_KERNEL);
621 if (!provider->clocks)
622 return -ENOMEM;
623
624 num_clks = 0;
625 prev = NULL;
626
627 list_for_each_entry(sci_clk, &clks, node) {
628 if (prev && prev->dev_id == sci_clk->dev_id &&
629 prev->clk_id == sci_clk->clk_id)
630 continue;
631
632 provider->clocks[num_clks++] = sci_clk;
633 prev = sci_clk;
634 }
635
636 provider->num_clocks = num_clks;
637
638 return 0;
639 }
640 #endif
641
642 /**
643 * ti_sci_clk_probe - Probe function for the TI SCI clock driver
644 * @pdev: platform device pointer to be probed
645 *
646 * Probes the TI SCI clock device. Allocates a new clock provider
647 * and registers this to the common clock framework. Also applies
648 * any required flags to the identified clocks via clock lists
649 * supplied from DT. Returns 0 for success, negative error value
650 * for failure.
651 */
ti_sci_clk_probe(struct platform_device * pdev)652 static int ti_sci_clk_probe(struct platform_device *pdev)
653 {
654 struct device *dev = &pdev->dev;
655 struct device_node *np = dev->of_node;
656 struct sci_clk_provider *provider;
657 const struct ti_sci_handle *handle;
658 int ret;
659
660 handle = devm_ti_sci_get_handle(dev);
661 if (IS_ERR(handle))
662 return PTR_ERR(handle);
663
664 provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
665 if (!provider)
666 return -ENOMEM;
667
668 provider->sci = handle;
669 provider->ops = &handle->ops.clk_ops;
670 provider->dev = dev;
671
672 #ifdef CONFIG_TI_SCI_CLK_PROBE_FROM_FW
673 ret = ti_sci_scan_clocks_from_fw(provider);
674 if (ret) {
675 dev_err(dev, "scan clocks from FW failed: %d\n", ret);
676 return ret;
677 }
678 #else
679 ret = ti_sci_scan_clocks_from_dt(provider);
680 if (ret) {
681 dev_err(dev, "scan clocks from DT failed: %d\n", ret);
682 return ret;
683 }
684 #endif
685
686 ret = ti_sci_init_clocks(provider);
687 if (ret) {
688 pr_err("ti-sci-init-clocks failed.\n");
689 return ret;
690 }
691
692 return of_clk_add_hw_provider(np, sci_clk_get, provider);
693 }
694
695 /**
696 * ti_sci_clk_remove - Remove TI SCI clock device
697 * @pdev: platform device pointer for the device to be removed
698 *
699 * Removes the TI SCI device. Unregisters the clock provider registered
700 * via common clock framework. Any memory allocated for the device will
701 * be free'd silently via the devm framework. Returns 0 always.
702 */
ti_sci_clk_remove(struct platform_device * pdev)703 static void ti_sci_clk_remove(struct platform_device *pdev)
704 {
705 of_clk_del_provider(pdev->dev.of_node);
706 }
707
708 static struct platform_driver ti_sci_clk_driver = {
709 .probe = ti_sci_clk_probe,
710 .remove = ti_sci_clk_remove,
711 .driver = {
712 .name = "ti-sci-clk",
713 .of_match_table = of_match_ptr(ti_sci_clk_of_match),
714 },
715 };
716 module_platform_driver(ti_sci_clk_driver);
717
718 MODULE_LICENSE("GPL v2");
719 MODULE_DESCRIPTION("TI System Control Interface(SCI) Clock driver");
720 MODULE_AUTHOR("Tero Kristo");
721 MODULE_ALIAS("platform:ti-sci-clk");
722