1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
4 *
5 * Authors:
6 * Serge Semin <Sergey.Semin@baikalelectronics.ru>
7 * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
8 *
9 * Baikal-T1 CCU PLL interface driver
10 */
11
12 #define pr_fmt(fmt) "bt1-ccu-pll: " fmt
13
14 #include <linux/kernel.h>
15 #include <linux/printk.h>
16 #include <linux/limits.h>
17 #include <linux/bits.h>
18 #include <linux/bitfield.h>
19 #include <linux/slab.h>
20 #include <linux/clk-provider.h>
21 #include <linux/of.h>
22 #include <linux/spinlock.h>
23 #include <linux/regmap.h>
24 #include <linux/iopoll.h>
25 #include <linux/time64.h>
26 #include <linux/rational.h>
27 #include <linux/debugfs.h>
28
29 #include "ccu-pll.h"
30
31 #define CCU_PLL_CTL 0x000
32 #define CCU_PLL_CTL_EN BIT(0)
33 #define CCU_PLL_CTL_RST BIT(1)
34 #define CCU_PLL_CTL_CLKR_FLD 2
35 #define CCU_PLL_CTL_CLKR_MASK GENMASK(7, CCU_PLL_CTL_CLKR_FLD)
36 #define CCU_PLL_CTL_CLKF_FLD 8
37 #define CCU_PLL_CTL_CLKF_MASK GENMASK(20, CCU_PLL_CTL_CLKF_FLD)
38 #define CCU_PLL_CTL_CLKOD_FLD 21
39 #define CCU_PLL_CTL_CLKOD_MASK GENMASK(24, CCU_PLL_CTL_CLKOD_FLD)
40 #define CCU_PLL_CTL_BYPASS BIT(30)
41 #define CCU_PLL_CTL_LOCK BIT(31)
42 #define CCU_PLL_CTL1 0x004
43 #define CCU_PLL_CTL1_BWADJ_FLD 3
44 #define CCU_PLL_CTL1_BWADJ_MASK GENMASK(14, CCU_PLL_CTL1_BWADJ_FLD)
45
46 #define CCU_PLL_LOCK_CHECK_RETRIES 50
47
48 #define CCU_PLL_NR_MAX \
49 ((CCU_PLL_CTL_CLKR_MASK >> CCU_PLL_CTL_CLKR_FLD) + 1)
50 #define CCU_PLL_NF_MAX \
51 ((CCU_PLL_CTL_CLKF_MASK >> (CCU_PLL_CTL_CLKF_FLD + 1)) + 1)
52 #define CCU_PLL_OD_MAX \
53 ((CCU_PLL_CTL_CLKOD_MASK >> CCU_PLL_CTL_CLKOD_FLD) + 1)
54 #define CCU_PLL_NB_MAX \
55 ((CCU_PLL_CTL1_BWADJ_MASK >> CCU_PLL_CTL1_BWADJ_FLD) + 1)
56 #define CCU_PLL_FDIV_MIN 427000UL
57 #define CCU_PLL_FDIV_MAX 3500000000UL
58 #define CCU_PLL_FOUT_MIN 200000000UL
59 #define CCU_PLL_FOUT_MAX 2500000000UL
60 #define CCU_PLL_FVCO_MIN 700000000UL
61 #define CCU_PLL_FVCO_MAX 3500000000UL
62 #define CCU_PLL_CLKOD_FACTOR 2
63
ccu_pll_lock_delay_us(unsigned long ref_clk,unsigned long nr)64 static inline unsigned long ccu_pll_lock_delay_us(unsigned long ref_clk,
65 unsigned long nr)
66 {
67 u64 us = 500ULL * nr * USEC_PER_SEC;
68
69 do_div(us, ref_clk);
70
71 return us;
72 }
73
ccu_pll_calc_freq(unsigned long ref_clk,unsigned long nr,unsigned long nf,unsigned long od)74 static inline unsigned long ccu_pll_calc_freq(unsigned long ref_clk,
75 unsigned long nr,
76 unsigned long nf,
77 unsigned long od)
78 {
79 u64 tmp = ref_clk;
80
81 do_div(tmp, nr);
82 tmp *= nf;
83 do_div(tmp, od);
84
85 return tmp;
86 }
87
ccu_pll_reset(struct ccu_pll * pll,unsigned long ref_clk,unsigned long nr)88 static int ccu_pll_reset(struct ccu_pll *pll, unsigned long ref_clk,
89 unsigned long nr)
90 {
91 unsigned long ud, ut;
92 u32 val;
93
94 ud = ccu_pll_lock_delay_us(ref_clk, nr);
95 ut = ud * CCU_PLL_LOCK_CHECK_RETRIES;
96
97 regmap_update_bits(pll->sys_regs, pll->reg_ctl,
98 CCU_PLL_CTL_RST, CCU_PLL_CTL_RST);
99
100 return regmap_read_poll_timeout_atomic(pll->sys_regs, pll->reg_ctl, val,
101 val & CCU_PLL_CTL_LOCK, ud, ut);
102 }
103
ccu_pll_enable(struct clk_hw * hw)104 static int ccu_pll_enable(struct clk_hw *hw)
105 {
106 struct clk_hw *parent_hw = clk_hw_get_parent(hw);
107 struct ccu_pll *pll = to_ccu_pll(hw);
108 unsigned long flags;
109 u32 val = 0;
110 int ret;
111
112 if (!parent_hw) {
113 pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
114 return -EINVAL;
115 }
116
117 regmap_read(pll->sys_regs, pll->reg_ctl, &val);
118 if (val & CCU_PLL_CTL_EN)
119 return 0;
120
121 spin_lock_irqsave(&pll->lock, flags);
122 regmap_write(pll->sys_regs, pll->reg_ctl, val | CCU_PLL_CTL_EN);
123 ret = ccu_pll_reset(pll, clk_hw_get_rate(parent_hw),
124 FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1);
125 spin_unlock_irqrestore(&pll->lock, flags);
126 if (ret)
127 pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw));
128
129 return ret;
130 }
131
ccu_pll_disable(struct clk_hw * hw)132 static void ccu_pll_disable(struct clk_hw *hw)
133 {
134 struct ccu_pll *pll = to_ccu_pll(hw);
135 unsigned long flags;
136
137 spin_lock_irqsave(&pll->lock, flags);
138 regmap_update_bits(pll->sys_regs, pll->reg_ctl, CCU_PLL_CTL_EN, 0);
139 spin_unlock_irqrestore(&pll->lock, flags);
140 }
141
ccu_pll_is_enabled(struct clk_hw * hw)142 static int ccu_pll_is_enabled(struct clk_hw *hw)
143 {
144 struct ccu_pll *pll = to_ccu_pll(hw);
145 u32 val = 0;
146
147 regmap_read(pll->sys_regs, pll->reg_ctl, &val);
148
149 return !!(val & CCU_PLL_CTL_EN);
150 }
151
ccu_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)152 static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
153 unsigned long parent_rate)
154 {
155 struct ccu_pll *pll = to_ccu_pll(hw);
156 unsigned long nr, nf, od;
157 u32 val = 0;
158
159 regmap_read(pll->sys_regs, pll->reg_ctl, &val);
160 nr = FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1;
161 nf = FIELD_GET(CCU_PLL_CTL_CLKF_MASK, val) + 1;
162 od = FIELD_GET(CCU_PLL_CTL_CLKOD_MASK, val) + 1;
163
164 return ccu_pll_calc_freq(parent_rate, nr, nf, od);
165 }
166
ccu_pll_calc_factors(unsigned long rate,unsigned long parent_rate,unsigned long * nr,unsigned long * nf,unsigned long * od)167 static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate,
168 unsigned long *nr, unsigned long *nf,
169 unsigned long *od)
170 {
171 unsigned long err, freq, min_err = ULONG_MAX;
172 unsigned long num, denom, n1, d1, nri;
173 unsigned long nr_max, nf_max, od_max;
174
175 /*
176 * Make sure PLL is working with valid input signal (Fdiv). If
177 * you want to speed the function up just reduce CCU_PLL_NR_MAX.
178 * This will cause a worse approximation though.
179 */
180 nri = (parent_rate / CCU_PLL_FDIV_MAX) + 1;
181 nr_max = min(parent_rate / CCU_PLL_FDIV_MIN, CCU_PLL_NR_MAX);
182
183 /*
184 * Find a closest [nr;nf;od] vector taking into account the
185 * limitations like: 1) 700MHz <= Fvco <= 3.5GHz, 2) PLL Od is
186 * either 1 or even number within the acceptable range (alas 1s
187 * is also excluded by the next loop).
188 */
189 for (; nri <= nr_max; ++nri) {
190 /* Use Od factor to fulfill the limitation 2). */
191 num = CCU_PLL_CLKOD_FACTOR * rate;
192 denom = parent_rate / nri;
193
194 /*
195 * Make sure Fvco is within the acceptable range to fulfill
196 * the condition 1). Note due to the CCU_PLL_CLKOD_FACTOR value
197 * the actual upper limit is also divided by that factor.
198 * It's not big problem for us since practically there is no
199 * need in clocks with that high frequency.
200 */
201 nf_max = min(CCU_PLL_FVCO_MAX / denom, CCU_PLL_NF_MAX);
202 od_max = CCU_PLL_OD_MAX / CCU_PLL_CLKOD_FACTOR;
203
204 /*
205 * Bypass the out-of-bound values, which can't be properly
206 * handled by the rational fraction approximation algorithm.
207 */
208 if (num / denom >= nf_max) {
209 n1 = nf_max;
210 d1 = 1;
211 } else if (denom / num >= od_max) {
212 n1 = 1;
213 d1 = od_max;
214 } else {
215 rational_best_approximation(num, denom, nf_max, od_max,
216 &n1, &d1);
217 }
218
219 /* Select the best approximation of the target rate. */
220 freq = ccu_pll_calc_freq(parent_rate, nri, n1, d1);
221 err = abs((int64_t)freq - num);
222 if (err < min_err) {
223 min_err = err;
224 *nr = nri;
225 *nf = n1;
226 *od = CCU_PLL_CLKOD_FACTOR * d1;
227 }
228 }
229 }
230
ccu_pll_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)231 static int ccu_pll_determine_rate(struct clk_hw *hw,
232 struct clk_rate_request *req)
233 {
234 unsigned long nr = 1, nf = 1, od = 1;
235
236 ccu_pll_calc_factors(req->rate, req->best_parent_rate, &nr, &nf, &od);
237
238 req->rate = ccu_pll_calc_freq(req->best_parent_rate, nr, nf, od);
239
240 return 0;
241 }
242
243 /*
244 * This method is used for PLLs, which support the on-the-fly dividers
245 * adjustment. So there is no need in gating such clocks.
246 */
ccu_pll_set_rate_reset(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)247 static int ccu_pll_set_rate_reset(struct clk_hw *hw, unsigned long rate,
248 unsigned long parent_rate)
249 {
250 struct ccu_pll *pll = to_ccu_pll(hw);
251 unsigned long nr, nf, od;
252 unsigned long flags;
253 u32 mask, val;
254 int ret;
255
256 ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od);
257
258 mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK |
259 CCU_PLL_CTL_CLKOD_MASK;
260 val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) |
261 FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) |
262 FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1);
263
264 spin_lock_irqsave(&pll->lock, flags);
265 regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val);
266 ret = ccu_pll_reset(pll, parent_rate, nr);
267 spin_unlock_irqrestore(&pll->lock, flags);
268 if (ret)
269 pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw));
270
271 return ret;
272 }
273
274 /*
275 * This method is used for PLLs, which don't support the on-the-fly dividers
276 * adjustment. So the corresponding clocks are supposed to be gated first.
277 */
ccu_pll_set_rate_norst(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)278 static int ccu_pll_set_rate_norst(struct clk_hw *hw, unsigned long rate,
279 unsigned long parent_rate)
280 {
281 struct ccu_pll *pll = to_ccu_pll(hw);
282 unsigned long nr, nf, od;
283 unsigned long flags;
284 u32 mask, val;
285
286 ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od);
287
288 /*
289 * Disable PLL if it was enabled by default or left enabled by the
290 * system bootloader.
291 */
292 mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK |
293 CCU_PLL_CTL_CLKOD_MASK | CCU_PLL_CTL_EN;
294 val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) |
295 FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) |
296 FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1);
297
298 spin_lock_irqsave(&pll->lock, flags);
299 regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val);
300 spin_unlock_irqrestore(&pll->lock, flags);
301
302 return 0;
303 }
304
305 #ifdef CONFIG_DEBUG_FS
306
307 struct ccu_pll_dbgfs_bit {
308 struct ccu_pll *pll;
309 const char *name;
310 unsigned int reg;
311 u32 mask;
312 };
313
314 struct ccu_pll_dbgfs_fld {
315 struct ccu_pll *pll;
316 const char *name;
317 unsigned int reg;
318 unsigned int lsb;
319 u32 mask;
320 u32 min;
321 u32 max;
322 };
323
324 #define CCU_PLL_DBGFS_BIT_ATTR(_name, _reg, _mask) \
325 { \
326 .name = _name, \
327 .reg = _reg, \
328 .mask = _mask \
329 }
330
331 #define CCU_PLL_DBGFS_FLD_ATTR(_name, _reg, _lsb, _mask, _min, _max) \
332 { \
333 .name = _name, \
334 .reg = _reg, \
335 .lsb = _lsb, \
336 .mask = _mask, \
337 .min = _min, \
338 .max = _max \
339 }
340
341 static const struct ccu_pll_dbgfs_bit ccu_pll_bits[] = {
342 CCU_PLL_DBGFS_BIT_ATTR("pll_en", CCU_PLL_CTL, CCU_PLL_CTL_EN),
343 CCU_PLL_DBGFS_BIT_ATTR("pll_rst", CCU_PLL_CTL, CCU_PLL_CTL_RST),
344 CCU_PLL_DBGFS_BIT_ATTR("pll_bypass", CCU_PLL_CTL, CCU_PLL_CTL_BYPASS),
345 CCU_PLL_DBGFS_BIT_ATTR("pll_lock", CCU_PLL_CTL, CCU_PLL_CTL_LOCK)
346 };
347
348 #define CCU_PLL_DBGFS_BIT_NUM ARRAY_SIZE(ccu_pll_bits)
349
350 static const struct ccu_pll_dbgfs_fld ccu_pll_flds[] = {
351 CCU_PLL_DBGFS_FLD_ATTR("pll_nr", CCU_PLL_CTL, CCU_PLL_CTL_CLKR_FLD,
352 CCU_PLL_CTL_CLKR_MASK, 1, CCU_PLL_NR_MAX),
353 CCU_PLL_DBGFS_FLD_ATTR("pll_nf", CCU_PLL_CTL, CCU_PLL_CTL_CLKF_FLD,
354 CCU_PLL_CTL_CLKF_MASK, 1, CCU_PLL_NF_MAX),
355 CCU_PLL_DBGFS_FLD_ATTR("pll_od", CCU_PLL_CTL, CCU_PLL_CTL_CLKOD_FLD,
356 CCU_PLL_CTL_CLKOD_MASK, 1, CCU_PLL_OD_MAX),
357 CCU_PLL_DBGFS_FLD_ATTR("pll_nb", CCU_PLL_CTL1, CCU_PLL_CTL1_BWADJ_FLD,
358 CCU_PLL_CTL1_BWADJ_MASK, 1, CCU_PLL_NB_MAX)
359 };
360
361 #define CCU_PLL_DBGFS_FLD_NUM ARRAY_SIZE(ccu_pll_flds)
362
363 /*
364 * It can be dangerous to change the PLL settings behind clock framework back,
365 * therefore we don't provide any kernel config based compile time option for
366 * this feature to enable.
367 */
368 #undef CCU_PLL_ALLOW_WRITE_DEBUGFS
369 #ifdef CCU_PLL_ALLOW_WRITE_DEBUGFS
370
ccu_pll_dbgfs_bit_set(void * priv,u64 val)371 static int ccu_pll_dbgfs_bit_set(void *priv, u64 val)
372 {
373 const struct ccu_pll_dbgfs_bit *bit = priv;
374 struct ccu_pll *pll = bit->pll;
375 unsigned long flags;
376
377 spin_lock_irqsave(&pll->lock, flags);
378 regmap_update_bits(pll->sys_regs, pll->reg_ctl + bit->reg,
379 bit->mask, val ? bit->mask : 0);
380 spin_unlock_irqrestore(&pll->lock, flags);
381
382 return 0;
383 }
384
ccu_pll_dbgfs_fld_set(void * priv,u64 val)385 static int ccu_pll_dbgfs_fld_set(void *priv, u64 val)
386 {
387 struct ccu_pll_dbgfs_fld *fld = priv;
388 struct ccu_pll *pll = fld->pll;
389 unsigned long flags;
390 u32 data;
391
392 val = clamp_t(u64, val, fld->min, fld->max);
393 data = ((val - 1) << fld->lsb) & fld->mask;
394
395 spin_lock_irqsave(&pll->lock, flags);
396 regmap_update_bits(pll->sys_regs, pll->reg_ctl + fld->reg, fld->mask,
397 data);
398 spin_unlock_irqrestore(&pll->lock, flags);
399
400 return 0;
401 }
402
403 #define ccu_pll_dbgfs_mode 0644
404
405 #else /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */
406
407 #define ccu_pll_dbgfs_bit_set NULL
408 #define ccu_pll_dbgfs_fld_set NULL
409 #define ccu_pll_dbgfs_mode 0444
410
411 #endif /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */
412
ccu_pll_dbgfs_bit_get(void * priv,u64 * val)413 static int ccu_pll_dbgfs_bit_get(void *priv, u64 *val)
414 {
415 struct ccu_pll_dbgfs_bit *bit = priv;
416 struct ccu_pll *pll = bit->pll;
417 u32 data = 0;
418
419 regmap_read(pll->sys_regs, pll->reg_ctl + bit->reg, &data);
420 *val = !!(data & bit->mask);
421
422 return 0;
423 }
424 DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_bit_fops,
425 ccu_pll_dbgfs_bit_get, ccu_pll_dbgfs_bit_set, "%llu\n");
426
ccu_pll_dbgfs_fld_get(void * priv,u64 * val)427 static int ccu_pll_dbgfs_fld_get(void *priv, u64 *val)
428 {
429 struct ccu_pll_dbgfs_fld *fld = priv;
430 struct ccu_pll *pll = fld->pll;
431 u32 data = 0;
432
433 regmap_read(pll->sys_regs, pll->reg_ctl + fld->reg, &data);
434 *val = ((data & fld->mask) >> fld->lsb) + 1;
435
436 return 0;
437 }
438 DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_fld_fops,
439 ccu_pll_dbgfs_fld_get, ccu_pll_dbgfs_fld_set, "%llu\n");
440
ccu_pll_debug_init(struct clk_hw * hw,struct dentry * dentry)441 static void ccu_pll_debug_init(struct clk_hw *hw, struct dentry *dentry)
442 {
443 struct ccu_pll *pll = to_ccu_pll(hw);
444 struct ccu_pll_dbgfs_bit *bits;
445 struct ccu_pll_dbgfs_fld *flds;
446 int idx;
447
448 bits = kcalloc(CCU_PLL_DBGFS_BIT_NUM, sizeof(*bits), GFP_KERNEL);
449 if (!bits)
450 return;
451
452 for (idx = 0; idx < CCU_PLL_DBGFS_BIT_NUM; ++idx) {
453 bits[idx] = ccu_pll_bits[idx];
454 bits[idx].pll = pll;
455
456 debugfs_create_file_unsafe(bits[idx].name, ccu_pll_dbgfs_mode,
457 dentry, &bits[idx],
458 &ccu_pll_dbgfs_bit_fops);
459 }
460
461 flds = kcalloc(CCU_PLL_DBGFS_FLD_NUM, sizeof(*flds), GFP_KERNEL);
462 if (!flds)
463 return;
464
465 for (idx = 0; idx < CCU_PLL_DBGFS_FLD_NUM; ++idx) {
466 flds[idx] = ccu_pll_flds[idx];
467 flds[idx].pll = pll;
468
469 debugfs_create_file_unsafe(flds[idx].name, ccu_pll_dbgfs_mode,
470 dentry, &flds[idx],
471 &ccu_pll_dbgfs_fld_fops);
472 }
473 }
474
475 #else /* !CONFIG_DEBUG_FS */
476
477 #define ccu_pll_debug_init NULL
478
479 #endif /* !CONFIG_DEBUG_FS */
480
481 static const struct clk_ops ccu_pll_gate_to_set_ops = {
482 .enable = ccu_pll_enable,
483 .disable = ccu_pll_disable,
484 .is_enabled = ccu_pll_is_enabled,
485 .recalc_rate = ccu_pll_recalc_rate,
486 .determine_rate = ccu_pll_determine_rate,
487 .set_rate = ccu_pll_set_rate_norst,
488 .debug_init = ccu_pll_debug_init
489 };
490
491 static const struct clk_ops ccu_pll_straight_set_ops = {
492 .enable = ccu_pll_enable,
493 .disable = ccu_pll_disable,
494 .is_enabled = ccu_pll_is_enabled,
495 .recalc_rate = ccu_pll_recalc_rate,
496 .determine_rate = ccu_pll_determine_rate,
497 .set_rate = ccu_pll_set_rate_reset,
498 .debug_init = ccu_pll_debug_init
499 };
500
ccu_pll_hw_register(const struct ccu_pll_init_data * pll_init)501 struct ccu_pll *ccu_pll_hw_register(const struct ccu_pll_init_data *pll_init)
502 {
503 struct clk_parent_data parent_data = { };
504 struct clk_init_data hw_init = { };
505 struct ccu_pll *pll;
506 int ret;
507
508 if (!pll_init)
509 return ERR_PTR(-EINVAL);
510
511 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
512 if (!pll)
513 return ERR_PTR(-ENOMEM);
514
515 /*
516 * Note since Baikal-T1 System Controller registers are MMIO-backed
517 * we won't check the regmap IO operations return status, because it
518 * must be zero anyway.
519 */
520 pll->hw.init = &hw_init;
521 pll->reg_ctl = pll_init->base + CCU_PLL_CTL;
522 pll->reg_ctl1 = pll_init->base + CCU_PLL_CTL1;
523 pll->sys_regs = pll_init->sys_regs;
524 pll->id = pll_init->id;
525 spin_lock_init(&pll->lock);
526
527 hw_init.name = pll_init->name;
528 hw_init.flags = pll_init->flags;
529
530 if (hw_init.flags & CLK_SET_RATE_GATE)
531 hw_init.ops = &ccu_pll_gate_to_set_ops;
532 else
533 hw_init.ops = &ccu_pll_straight_set_ops;
534
535 if (!pll_init->parent_name) {
536 ret = -EINVAL;
537 goto err_free_pll;
538 }
539 parent_data.fw_name = pll_init->parent_name;
540 hw_init.parent_data = &parent_data;
541 hw_init.num_parents = 1;
542
543 ret = of_clk_hw_register(pll_init->np, &pll->hw);
544 if (ret)
545 goto err_free_pll;
546
547 return pll;
548
549 err_free_pll:
550 kfree(pll);
551
552 return ERR_PTR(ret);
553 }
554
ccu_pll_hw_unregister(struct ccu_pll * pll)555 void ccu_pll_hw_unregister(struct ccu_pll *pll)
556 {
557 clk_hw_unregister(&pll->hw);
558
559 kfree(pll);
560 }
561