1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 SiFive, Inc.
4 * Copyright (C) 2020 Zong Li
5 */
6
7 #include <linux/delay.h>
8 #include <linux/io.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include "sifive-prci.h"
12 #include "fu540-prci.h"
13 #include "fu740-prci.h"
14
15 /*
16 * Private functions
17 */
18
19 /**
20 * __prci_readl() - read from a PRCI register
21 * @pd: PRCI context
22 * @offs: register offset to read from (in bytes, from PRCI base address)
23 *
24 * Read the register located at offset @offs from the base virtual
25 * address of the PRCI register target described by @pd, and return
26 * the value to the caller.
27 *
28 * Context: Any context.
29 *
30 * Return: the contents of the register described by @pd and @offs.
31 */
__prci_readl(struct __prci_data * pd,u32 offs)32 static u32 __prci_readl(struct __prci_data *pd, u32 offs)
33 {
34 return readl_relaxed(pd->va + offs);
35 }
36
__prci_writel(u32 v,u32 offs,struct __prci_data * pd)37 static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
38 {
39 writel_relaxed(v, pd->va + offs);
40 }
41
42 /* WRPLL-related private functions */
43
44 /**
45 * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
46 * @c: ptr to a struct wrpll_cfg record to write config into
47 * @r: value read from the PRCI PLL configuration register
48 *
49 * Given a value @r read from an FU740 PRCI PLL configuration register,
50 * split it into fields and populate it into the WRPLL configuration record
51 * pointed to by @c.
52 *
53 * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
54 * have the same register layout.
55 *
56 * Context: Any context.
57 */
__prci_wrpll_unpack(struct wrpll_cfg * c,u32 r)58 static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
59 {
60 u32 v;
61
62 v = r & PRCI_COREPLLCFG0_DIVR_MASK;
63 v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
64 c->divr = v;
65
66 v = r & PRCI_COREPLLCFG0_DIVF_MASK;
67 v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
68 c->divf = v;
69
70 v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
71 v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
72 c->divq = v;
73
74 v = r & PRCI_COREPLLCFG0_RANGE_MASK;
75 v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
76 c->range = v;
77
78 c->flags &=
79 (WRPLL_FLAGS_INT_FEEDBACK_MASK | WRPLL_FLAGS_EXT_FEEDBACK_MASK);
80
81 /* external feedback mode not supported */
82 c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
83 }
84
85 /**
86 * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
87 * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
88 *
89 * Using a set of WRPLL configuration values pointed to by @c,
90 * assemble a PRCI PLL configuration register value, and return it to
91 * the caller.
92 *
93 * Context: Any context. Caller must ensure that the contents of the
94 * record pointed to by @c do not change during the execution
95 * of this function.
96 *
97 * Returns: a value suitable for writing into a PRCI PLL configuration
98 * register
99 */
__prci_wrpll_pack(const struct wrpll_cfg * c)100 static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
101 {
102 u32 r = 0;
103
104 r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
105 r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
106 r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
107 r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
108
109 /* external feedback mode not supported */
110 r |= PRCI_COREPLLCFG0_FSE_MASK;
111
112 return r;
113 }
114
115 /**
116 * __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
117 * @pd: PRCI context
118 * @pwd: PRCI WRPLL metadata
119 *
120 * Read the current configuration of the PLL identified by @pwd from
121 * the PRCI identified by @pd, and store it into the local configuration
122 * cache in @pwd.
123 *
124 * Context: Any context. Caller must prevent the records pointed to by
125 * @pd and @pwd from changing during execution.
126 */
__prci_wrpll_read_cfg0(struct __prci_data * pd,struct __prci_wrpll_data * pwd)127 static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
128 struct __prci_wrpll_data *pwd)
129 {
130 __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
131 }
132
133 /**
134 * __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
135 * @pd: PRCI context
136 * @pwd: PRCI WRPLL metadata
137 * @c: WRPLL configuration record to write
138 *
139 * Write the WRPLL configuration described by @c into the WRPLL
140 * configuration register identified by @pwd in the PRCI instance
141 * described by @c. Make a cached copy of the WRPLL's current
142 * configuration so it can be used by other code.
143 *
144 * Context: Any context. Caller must prevent the records pointed to by
145 * @pd and @pwd from changing during execution.
146 */
__prci_wrpll_write_cfg0(struct __prci_data * pd,struct __prci_wrpll_data * pwd,struct wrpll_cfg * c)147 static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
148 struct __prci_wrpll_data *pwd,
149 struct wrpll_cfg *c)
150 {
151 __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
152
153 memcpy(&pwd->c, c, sizeof(*c));
154 }
155
156 /**
157 * __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
158 * into the PRCI
159 * @pd: PRCI context
160 * @pwd: PRCI WRPLL metadata
161 * @enable: Clock enable or disable value
162 */
__prci_wrpll_write_cfg1(struct __prci_data * pd,struct __prci_wrpll_data * pwd,u32 enable)163 static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
164 struct __prci_wrpll_data *pwd,
165 u32 enable)
166 {
167 __prci_writel(enable, pwd->cfg1_offs, pd);
168 }
169
170 /*
171 * Linux clock framework integration
172 *
173 * See the Linux clock framework documentation for more information on
174 * these functions.
175 */
176
sifive_prci_wrpll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)177 unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
178 unsigned long parent_rate)
179 {
180 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
181 struct __prci_wrpll_data *pwd = pc->pwd;
182
183 return wrpll_calc_output_rate(&pwd->c, parent_rate);
184 }
185
sifive_prci_wrpll_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)186 int sifive_prci_wrpll_determine_rate(struct clk_hw *hw,
187 struct clk_rate_request *req)
188 {
189 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
190 struct __prci_wrpll_data *pwd = pc->pwd;
191 struct wrpll_cfg c;
192
193 memcpy(&c, &pwd->c, sizeof(c));
194
195 wrpll_configure_for_rate(&c, req->rate, req->best_parent_rate);
196
197 req->rate = wrpll_calc_output_rate(&c, req->best_parent_rate);
198
199 return 0;
200 }
201
sifive_prci_wrpll_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)202 int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
203 unsigned long rate, unsigned long parent_rate)
204 {
205 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
206 struct __prci_wrpll_data *pwd = pc->pwd;
207 struct __prci_data *pd = pc->pd;
208 int r;
209
210 r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
211 if (r)
212 return r;
213
214 if (pwd->enable_bypass)
215 pwd->enable_bypass(pd);
216
217 __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
218
219 udelay(wrpll_calc_max_lock_us(&pwd->c));
220
221 return 0;
222 }
223
sifive_clk_is_enabled(struct clk_hw * hw)224 int sifive_clk_is_enabled(struct clk_hw *hw)
225 {
226 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
227 struct __prci_wrpll_data *pwd = pc->pwd;
228 struct __prci_data *pd = pc->pd;
229 u32 r;
230
231 r = __prci_readl(pd, pwd->cfg1_offs);
232
233 if (r & PRCI_COREPLLCFG1_CKE_MASK)
234 return 1;
235 else
236 return 0;
237 }
238
sifive_prci_clock_enable(struct clk_hw * hw)239 int sifive_prci_clock_enable(struct clk_hw *hw)
240 {
241 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
242 struct __prci_wrpll_data *pwd = pc->pwd;
243 struct __prci_data *pd = pc->pd;
244
245 if (sifive_clk_is_enabled(hw))
246 return 0;
247
248 __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
249
250 if (pwd->disable_bypass)
251 pwd->disable_bypass(pd);
252
253 return 0;
254 }
255
sifive_prci_clock_disable(struct clk_hw * hw)256 void sifive_prci_clock_disable(struct clk_hw *hw)
257 {
258 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
259 struct __prci_wrpll_data *pwd = pc->pwd;
260 struct __prci_data *pd = pc->pd;
261 u32 r;
262
263 if (pwd->enable_bypass)
264 pwd->enable_bypass(pd);
265
266 r = __prci_readl(pd, pwd->cfg1_offs);
267 r &= ~PRCI_COREPLLCFG1_CKE_MASK;
268
269 __prci_wrpll_write_cfg1(pd, pwd, r);
270 }
271
272 /* TLCLKSEL clock integration */
273
sifive_prci_tlclksel_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)274 unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
275 unsigned long parent_rate)
276 {
277 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
278 struct __prci_data *pd = pc->pd;
279 u32 v;
280 u8 div;
281
282 v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
283 v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
284 div = v ? 1 : 2;
285
286 return div_u64(parent_rate, div);
287 }
288
289 /* HFPCLK clock integration */
290
sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)291 unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
292 unsigned long parent_rate)
293 {
294 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
295 struct __prci_data *pd = pc->pd;
296 u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
297
298 return div_u64(parent_rate, div + 2);
299 }
300
301 /*
302 * Core clock mux control
303 */
304
305 /**
306 * sifive_prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
307 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
308 *
309 * Switch the CORECLK mux to the HFCLK input source; return once complete.
310 *
311 * Context: Any context. Caller must prevent concurrent changes to the
312 * PRCI_CORECLKSEL_OFFSET register.
313 */
sifive_prci_coreclksel_use_hfclk(struct __prci_data * pd)314 void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
315 {
316 u32 r;
317
318 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
319 r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
320 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
321
322 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
323 }
324
325 /**
326 * sifive_prci_coreclksel_use_corepll() - switch the CORECLK mux to output
327 * COREPLL
328 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
329 *
330 * Switch the CORECLK mux to the COREPLL output clock; return once complete.
331 *
332 * Context: Any context. Caller must prevent concurrent changes to the
333 * PRCI_CORECLKSEL_OFFSET register.
334 */
sifive_prci_coreclksel_use_corepll(struct __prci_data * pd)335 void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
336 {
337 u32 r;
338
339 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
340 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
341 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
342
343 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
344 }
345
346 /**
347 * sifive_prci_coreclksel_use_final_corepll() - switch the CORECLK mux to output
348 * FINAL_COREPLL
349 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
350 *
351 * Switch the CORECLK mux to the final COREPLL output clock; return once
352 * complete.
353 *
354 * Context: Any context. Caller must prevent concurrent changes to the
355 * PRCI_CORECLKSEL_OFFSET register.
356 */
sifive_prci_coreclksel_use_final_corepll(struct __prci_data * pd)357 void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
358 {
359 u32 r;
360
361 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
362 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
363 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
364
365 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
366 }
367
368 /**
369 * sifive_prci_corepllsel_use_dvfscorepll() - switch the COREPLL mux to
370 * output DVFS_COREPLL
371 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
372 *
373 * Switch the COREPLL mux to the DVFSCOREPLL output clock; return once complete.
374 *
375 * Context: Any context. Caller must prevent concurrent changes to the
376 * PRCI_COREPLLSEL_OFFSET register.
377 */
sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data * pd)378 void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
379 {
380 u32 r;
381
382 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
383 r |= PRCI_COREPLLSEL_COREPLLSEL_MASK;
384 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
385
386 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
387 }
388
389 /**
390 * sifive_prci_corepllsel_use_corepll() - switch the COREPLL mux to
391 * output COREPLL
392 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
393 *
394 * Switch the COREPLL mux to the COREPLL output clock; return once complete.
395 *
396 * Context: Any context. Caller must prevent concurrent changes to the
397 * PRCI_COREPLLSEL_OFFSET register.
398 */
sifive_prci_corepllsel_use_corepll(struct __prci_data * pd)399 void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
400 {
401 u32 r;
402
403 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
404 r &= ~PRCI_COREPLLSEL_COREPLLSEL_MASK;
405 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
406
407 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
408 }
409
410 /**
411 * sifive_prci_hfpclkpllsel_use_hfclk() - switch the HFPCLKPLL mux to
412 * output HFCLK
413 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
414 *
415 * Switch the HFPCLKPLL mux to the HFCLK input source; return once complete.
416 *
417 * Context: Any context. Caller must prevent concurrent changes to the
418 * PRCI_HFPCLKPLLSEL_OFFSET register.
419 */
sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data * pd)420 void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
421 {
422 u32 r;
423
424 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
425 r |= PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
426 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
427
428 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
429 }
430
431 /**
432 * sifive_prci_hfpclkpllsel_use_hfpclkpll() - switch the HFPCLKPLL mux to
433 * output HFPCLKPLL
434 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
435 *
436 * Switch the HFPCLKPLL mux to the HFPCLKPLL output clock; return once complete.
437 *
438 * Context: Any context. Caller must prevent concurrent changes to the
439 * PRCI_HFPCLKPLLSEL_OFFSET register.
440 */
sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data * pd)441 void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
442 {
443 u32 r;
444
445 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
446 r &= ~PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
447 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
448
449 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
450 }
451
452 /* PCIE AUX clock APIs for enable, disable. */
sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw * hw)453 int sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw *hw)
454 {
455 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
456 struct __prci_data *pd = pc->pd;
457 u32 r;
458
459 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET);
460
461 if (r & PRCI_PCIE_AUX_EN_MASK)
462 return 1;
463 else
464 return 0;
465 }
466
sifive_prci_pcie_aux_clock_enable(struct clk_hw * hw)467 int sifive_prci_pcie_aux_clock_enable(struct clk_hw *hw)
468 {
469 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
470 struct __prci_data *pd = pc->pd;
471 u32 r __maybe_unused;
472
473 if (sifive_prci_pcie_aux_clock_is_enabled(hw))
474 return 0;
475
476 __prci_writel(1, PRCI_PCIE_AUX_OFFSET, pd);
477 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
478
479 return 0;
480 }
481
sifive_prci_pcie_aux_clock_disable(struct clk_hw * hw)482 void sifive_prci_pcie_aux_clock_disable(struct clk_hw *hw)
483 {
484 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
485 struct __prci_data *pd = pc->pd;
486 u32 r __maybe_unused;
487
488 __prci_writel(0, PRCI_PCIE_AUX_OFFSET, pd);
489 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
490
491 }
492
493 /**
494 * __prci_register_clocks() - register clock controls in the PRCI
495 * @dev: Linux struct device
496 * @pd: The pointer for PRCI per-device instance data
497 * @desc: The pointer for the information of clocks of each SoCs
498 *
499 * Register the list of clock controls described in __prci_init_clocks[] with
500 * the Linux clock framework.
501 *
502 * Return: 0 upon success or a negative error code upon failure.
503 */
__prci_register_clocks(struct device * dev,struct __prci_data * pd,const struct prci_clk_desc * desc)504 static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
505 const struct prci_clk_desc *desc)
506 {
507 struct clk_init_data init = { };
508 struct __prci_clock *pic;
509 int parent_count, i, r;
510
511 parent_count = of_clk_get_parent_count(dev->of_node);
512 if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
513 dev_err(dev, "expected only two parent clocks, found %d\n",
514 parent_count);
515 return -EINVAL;
516 }
517
518 /* Register PLLs */
519 for (i = 0; i < desc->num_clks; ++i) {
520 pic = &(desc->clks[i]);
521
522 init.name = pic->name;
523 init.parent_names = &pic->parent_name;
524 init.num_parents = 1;
525 init.ops = pic->ops;
526 pic->hw.init = &init;
527
528 pic->pd = pd;
529
530 if (pic->pwd)
531 __prci_wrpll_read_cfg0(pd, pic->pwd);
532
533 r = devm_clk_hw_register(dev, &pic->hw);
534 if (r) {
535 dev_warn(dev, "Failed to register clock %s: %d\n",
536 init.name, r);
537 return r;
538 }
539
540 pd->hw_clks.hws[i] = &pic->hw;
541 }
542
543 pd->hw_clks.num = i;
544
545 r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
546 &pd->hw_clks);
547 if (r) {
548 dev_err(dev, "could not add hw_provider: %d\n", r);
549 return r;
550 }
551
552 return 0;
553 }
554
555 /**
556 * sifive_prci_probe() - initialize prci data and check parent count
557 * @pdev: platform device pointer for the prci
558 *
559 * Return: 0 upon success or a negative error code upon failure.
560 */
sifive_prci_probe(struct platform_device * pdev)561 static int sifive_prci_probe(struct platform_device *pdev)
562 {
563 struct device *dev = &pdev->dev;
564 struct __prci_data *pd;
565 const struct prci_clk_desc *desc;
566 int r;
567
568 desc = of_device_get_match_data(&pdev->dev);
569
570 pd = devm_kzalloc(dev, struct_size(pd, hw_clks.hws, desc->num_clks), GFP_KERNEL);
571 if (!pd)
572 return -ENOMEM;
573
574 pd->va = devm_platform_ioremap_resource(pdev, 0);
575 if (IS_ERR(pd->va))
576 return PTR_ERR(pd->va);
577
578 pd->reset.rcdev.owner = THIS_MODULE;
579 pd->reset.rcdev.nr_resets = PRCI_RST_NR;
580 pd->reset.rcdev.ops = &reset_simple_ops;
581 pd->reset.rcdev.of_node = pdev->dev.of_node;
582 pd->reset.active_low = true;
583 pd->reset.membase = pd->va + PRCI_DEVICESRESETREG_OFFSET;
584 spin_lock_init(&pd->reset.lock);
585
586 r = devm_reset_controller_register(&pdev->dev, &pd->reset.rcdev);
587 if (r) {
588 dev_err(dev, "could not register reset controller: %d\n", r);
589 return r;
590 }
591 r = __prci_register_clocks(dev, pd, desc);
592 if (r) {
593 dev_err(dev, "could not register clocks: %d\n", r);
594 return r;
595 }
596
597 dev_dbg(dev, "SiFive PRCI probed\n");
598
599 return 0;
600 }
601
602 static const struct of_device_id sifive_prci_of_match[] = {
603 {.compatible = "sifive,fu540-c000-prci", .data = &prci_clk_fu540},
604 {.compatible = "sifive,fu740-c000-prci", .data = &prci_clk_fu740},
605 {}
606 };
607 MODULE_DEVICE_TABLE(of, sifive_prci_of_match);
608
609 static struct platform_driver sifive_prci_driver = {
610 .driver = {
611 .name = "sifive-clk-prci",
612 .of_match_table = sifive_prci_of_match,
613 },
614 .probe = sifive_prci_probe,
615 };
616 module_platform_driver(sifive_prci_driver);
617
618 MODULE_AUTHOR("Paul Walmsley <paul.walmsley@sifive.com>");
619 MODULE_DESCRIPTION("SiFive Power Reset Clock Interface (PRCI) driver");
620 MODULE_LICENSE("GPL");
621