1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 MaxLinear, Inc.
4 * Copyright (C) 2020 Intel Corporation.
5 * Zhu Yixin <yzhu@maxlinear.com>
6 * Rahul Tanwar <rtanwar@maxlinear.com>
7 */
8 #include <linux/clk-provider.h>
9 #include <linux/device.h>
10 #include <linux/of.h>
11
12 #include "clk-cgu.h"
13
14 #define GATE_HW_REG_STAT(reg) ((reg) + 0x0)
15 #define GATE_HW_REG_EN(reg) ((reg) + 0x4)
16 #define GATE_HW_REG_DIS(reg) ((reg) + 0x8)
17 #define MAX_DDIV_REG 8
18 #define MAX_DIVIDER_VAL 64
19
20 #define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
21 #define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
22 #define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
23 #define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)
24
lgm_clk_register_fixed(struct lgm_clk_provider * ctx,const struct lgm_clk_branch * list)25 static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
26 const struct lgm_clk_branch *list)
27 {
28
29 if (list->div_flags & CLOCK_FLAG_VAL_INIT)
30 lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
31 list->div_width, list->div_val);
32
33 return clk_hw_register_fixed_rate(NULL, list->name,
34 list->parent_data[0].name,
35 list->flags, list->mux_flags);
36 }
37
lgm_clk_mux_get_parent(struct clk_hw * hw)38 static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
39 {
40 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
41 u32 val;
42
43 if (mux->flags & MUX_CLK_SW)
44 val = mux->reg;
45 else
46 val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
47 mux->width);
48 return clk_mux_val_to_index(hw, NULL, mux->flags, val);
49 }
50
lgm_clk_mux_set_parent(struct clk_hw * hw,u8 index)51 static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
52 {
53 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
54 u32 val;
55
56 val = clk_mux_index_to_val(NULL, mux->flags, index);
57 if (mux->flags & MUX_CLK_SW)
58 mux->reg = val;
59 else
60 lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
61 mux->width, val);
62
63 return 0;
64 }
65
lgm_clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)66 static int lgm_clk_mux_determine_rate(struct clk_hw *hw,
67 struct clk_rate_request *req)
68 {
69 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
70
71 return clk_mux_determine_rate_flags(hw, req, mux->flags);
72 }
73
74 static const struct clk_ops lgm_clk_mux_ops = {
75 .get_parent = lgm_clk_mux_get_parent,
76 .set_parent = lgm_clk_mux_set_parent,
77 .determine_rate = lgm_clk_mux_determine_rate,
78 };
79
80 static struct clk_hw *
lgm_clk_register_mux(struct lgm_clk_provider * ctx,const struct lgm_clk_branch * list)81 lgm_clk_register_mux(struct lgm_clk_provider *ctx,
82 const struct lgm_clk_branch *list)
83 {
84 unsigned long cflags = list->mux_flags;
85 struct device *dev = ctx->dev;
86 u8 shift = list->mux_shift;
87 u8 width = list->mux_width;
88 struct clk_init_data init = {};
89 struct lgm_clk_mux *mux;
90 u32 reg = list->mux_off;
91 struct clk_hw *hw;
92 int ret;
93
94 mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
95 if (!mux)
96 return ERR_PTR(-ENOMEM);
97
98 init.name = list->name;
99 init.ops = &lgm_clk_mux_ops;
100 init.flags = list->flags;
101 init.parent_data = list->parent_data;
102 init.num_parents = list->num_parents;
103
104 mux->membase = ctx->membase;
105 mux->reg = reg;
106 mux->shift = shift;
107 mux->width = width;
108 mux->flags = cflags;
109 mux->hw.init = &init;
110
111 hw = &mux->hw;
112 ret = devm_clk_hw_register(dev, hw);
113 if (ret)
114 return ERR_PTR(ret);
115
116 if (cflags & CLOCK_FLAG_VAL_INIT)
117 lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
118
119 return hw;
120 }
121
122 static unsigned long
lgm_clk_divider_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)123 lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
124 {
125 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
126 unsigned int val;
127
128 val = lgm_get_clk_val(divider->membase, divider->reg,
129 divider->shift, divider->width);
130
131 return divider_recalc_rate(hw, parent_rate, val, divider->table,
132 divider->flags, divider->width);
133 }
134
lgm_clk_divider_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)135 static int lgm_clk_divider_determine_rate(struct clk_hw *hw,
136 struct clk_rate_request *req)
137 {
138 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
139
140 req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, divider->table,
141 divider->width, divider->flags);
142
143 return 0;
144 }
145
146 static int
lgm_clk_divider_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long prate)147 lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
148 unsigned long prate)
149 {
150 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
151 int value;
152
153 value = divider_get_val(rate, prate, divider->table,
154 divider->width, divider->flags);
155 if (value < 0)
156 return value;
157
158 lgm_set_clk_val(divider->membase, divider->reg,
159 divider->shift, divider->width, value);
160
161 return 0;
162 }
163
lgm_clk_divider_enable_disable(struct clk_hw * hw,int enable)164 static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
165 {
166 struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
167
168 if (div->flags != DIV_CLK_NO_MASK)
169 lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
170 div->width_gate, enable);
171 return 0;
172 }
173
lgm_clk_divider_enable(struct clk_hw * hw)174 static int lgm_clk_divider_enable(struct clk_hw *hw)
175 {
176 return lgm_clk_divider_enable_disable(hw, 1);
177 }
178
lgm_clk_divider_disable(struct clk_hw * hw)179 static void lgm_clk_divider_disable(struct clk_hw *hw)
180 {
181 lgm_clk_divider_enable_disable(hw, 0);
182 }
183
184 static const struct clk_ops lgm_clk_divider_ops = {
185 .recalc_rate = lgm_clk_divider_recalc_rate,
186 .determine_rate = lgm_clk_divider_determine_rate,
187 .set_rate = lgm_clk_divider_set_rate,
188 .enable = lgm_clk_divider_enable,
189 .disable = lgm_clk_divider_disable,
190 };
191
192 static struct clk_hw *
lgm_clk_register_divider(struct lgm_clk_provider * ctx,const struct lgm_clk_branch * list)193 lgm_clk_register_divider(struct lgm_clk_provider *ctx,
194 const struct lgm_clk_branch *list)
195 {
196 unsigned long cflags = list->div_flags;
197 struct device *dev = ctx->dev;
198 struct lgm_clk_divider *div;
199 struct clk_init_data init = {};
200 u8 shift = list->div_shift;
201 u8 width = list->div_width;
202 u8 shift_gate = list->div_shift_gate;
203 u8 width_gate = list->div_width_gate;
204 u32 reg = list->div_off;
205 struct clk_hw *hw;
206 int ret;
207
208 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
209 if (!div)
210 return ERR_PTR(-ENOMEM);
211
212 init.name = list->name;
213 init.ops = &lgm_clk_divider_ops;
214 init.flags = list->flags;
215 init.parent_data = list->parent_data;
216 init.num_parents = 1;
217
218 div->membase = ctx->membase;
219 div->reg = reg;
220 div->shift = shift;
221 div->width = width;
222 div->shift_gate = shift_gate;
223 div->width_gate = width_gate;
224 div->flags = cflags;
225 div->table = list->div_table;
226 div->hw.init = &init;
227
228 hw = &div->hw;
229 ret = devm_clk_hw_register(dev, hw);
230 if (ret)
231 return ERR_PTR(ret);
232
233 if (cflags & CLOCK_FLAG_VAL_INIT)
234 lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
235
236 return hw;
237 }
238
239 static struct clk_hw *
lgm_clk_register_fixed_factor(struct lgm_clk_provider * ctx,const struct lgm_clk_branch * list)240 lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
241 const struct lgm_clk_branch *list)
242 {
243 struct clk_hw *hw;
244
245 hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
246 list->parent_data[0].name, list->flags,
247 list->mult, list->div);
248 if (IS_ERR(hw))
249 return ERR_CAST(hw);
250
251 if (list->div_flags & CLOCK_FLAG_VAL_INIT)
252 lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
253 list->div_width, list->div_val);
254
255 return hw;
256 }
257
lgm_clk_gate_enable(struct clk_hw * hw)258 static int lgm_clk_gate_enable(struct clk_hw *hw)
259 {
260 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
261 unsigned int reg;
262
263 reg = GATE_HW_REG_EN(gate->reg);
264 lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
265
266 return 0;
267 }
268
lgm_clk_gate_disable(struct clk_hw * hw)269 static void lgm_clk_gate_disable(struct clk_hw *hw)
270 {
271 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
272 unsigned int reg;
273
274 reg = GATE_HW_REG_DIS(gate->reg);
275 lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
276 }
277
lgm_clk_gate_is_enabled(struct clk_hw * hw)278 static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
279 {
280 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
281 unsigned int reg, ret;
282
283 reg = GATE_HW_REG_STAT(gate->reg);
284 ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
285
286 return ret;
287 }
288
289 static const struct clk_ops lgm_clk_gate_ops = {
290 .enable = lgm_clk_gate_enable,
291 .disable = lgm_clk_gate_disable,
292 .is_enabled = lgm_clk_gate_is_enabled,
293 };
294
295 static struct clk_hw *
lgm_clk_register_gate(struct lgm_clk_provider * ctx,const struct lgm_clk_branch * list)296 lgm_clk_register_gate(struct lgm_clk_provider *ctx,
297 const struct lgm_clk_branch *list)
298 {
299 unsigned long cflags = list->gate_flags;
300 const char *pname = list->parent_data[0].name;
301 struct device *dev = ctx->dev;
302 u8 shift = list->gate_shift;
303 struct clk_init_data init = {};
304 struct lgm_clk_gate *gate;
305 u32 reg = list->gate_off;
306 struct clk_hw *hw;
307 int ret;
308
309 gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
310 if (!gate)
311 return ERR_PTR(-ENOMEM);
312
313 init.name = list->name;
314 init.ops = &lgm_clk_gate_ops;
315 init.flags = list->flags;
316 init.parent_names = pname ? &pname : NULL;
317 init.num_parents = pname ? 1 : 0;
318
319 gate->membase = ctx->membase;
320 gate->reg = reg;
321 gate->shift = shift;
322 gate->flags = cflags;
323 gate->hw.init = &init;
324
325 hw = &gate->hw;
326 ret = devm_clk_hw_register(dev, hw);
327 if (ret)
328 return ERR_PTR(ret);
329
330 if (cflags & CLOCK_FLAG_VAL_INIT) {
331 lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
332 }
333
334 return hw;
335 }
336
lgm_clk_register_branches(struct lgm_clk_provider * ctx,const struct lgm_clk_branch * list,unsigned int nr_clk)337 int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
338 const struct lgm_clk_branch *list,
339 unsigned int nr_clk)
340 {
341 struct clk_hw *hw;
342 unsigned int idx;
343
344 for (idx = 0; idx < nr_clk; idx++, list++) {
345 switch (list->type) {
346 case CLK_TYPE_FIXED:
347 hw = lgm_clk_register_fixed(ctx, list);
348 break;
349 case CLK_TYPE_MUX:
350 hw = lgm_clk_register_mux(ctx, list);
351 break;
352 case CLK_TYPE_DIVIDER:
353 hw = lgm_clk_register_divider(ctx, list);
354 break;
355 case CLK_TYPE_FIXED_FACTOR:
356 hw = lgm_clk_register_fixed_factor(ctx, list);
357 break;
358 case CLK_TYPE_GATE:
359 if (list->gate_flags & GATE_CLK_HW) {
360 hw = lgm_clk_register_gate(ctx, list);
361 } else {
362 /*
363 * GATE_CLKs can be controlled either from
364 * CGU clk driver i.e. this driver or directly
365 * from power management driver/daemon. It is
366 * dependent on the power policy/profile requirements
367 * of the end product. To override control of gate
368 * clks from this driver, provide NULL for this index
369 * of gate clk provider.
370 */
371 hw = NULL;
372 }
373 break;
374
375 default:
376 dev_err(ctx->dev, "invalid clk type\n");
377 return -EINVAL;
378 }
379
380 if (IS_ERR(hw)) {
381 dev_err(ctx->dev,
382 "register clk: %s, type: %u failed!\n",
383 list->name, list->type);
384 return -EIO;
385 }
386 ctx->clk_data.hws[list->id] = hw;
387 }
388
389 return 0;
390 }
391
392 static unsigned long
lgm_clk_ddiv_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)393 lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
394 {
395 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
396 unsigned int div0, div1, exdiv;
397 u64 prate;
398
399 div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
400 ddiv->shift0, ddiv->width0) + 1;
401 div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
402 ddiv->shift1, ddiv->width1) + 1;
403 exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
404 ddiv->shift2, ddiv->width2);
405 prate = (u64)parent_rate;
406 do_div(prate, div0);
407 do_div(prate, div1);
408
409 if (exdiv) {
410 do_div(prate, ddiv->div);
411 prate *= ddiv->mult;
412 }
413
414 return prate;
415 }
416
lgm_clk_ddiv_enable(struct clk_hw * hw)417 static int lgm_clk_ddiv_enable(struct clk_hw *hw)
418 {
419 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
420
421 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
422 ddiv->width_gate, 1);
423 return 0;
424 }
425
lgm_clk_ddiv_disable(struct clk_hw * hw)426 static void lgm_clk_ddiv_disable(struct clk_hw *hw)
427 {
428 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
429
430 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
431 ddiv->width_gate, 0);
432 }
433
434 static int
lgm_clk_get_ddiv_val(u32 div,u32 * ddiv1,u32 * ddiv2)435 lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2)
436 {
437 u32 idx, temp;
438
439 *ddiv1 = 1;
440 *ddiv2 = 1;
441
442 if (div > MAX_DIVIDER_VAL)
443 div = MAX_DIVIDER_VAL;
444
445 if (div > 1) {
446 for (idx = 2; idx <= MAX_DDIV_REG; idx++) {
447 temp = DIV_ROUND_UP_ULL((u64)div, idx);
448 if (div % idx == 0 && temp <= MAX_DDIV_REG)
449 break;
450 }
451
452 if (idx > MAX_DDIV_REG)
453 return -EINVAL;
454
455 *ddiv1 = temp;
456 *ddiv2 = idx;
457 }
458
459 return 0;
460 }
461
462 static int
lgm_clk_ddiv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long prate)463 lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
464 unsigned long prate)
465 {
466 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
467 u32 div, ddiv1, ddiv2;
468
469 div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
470
471 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
472 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
473 div = div * 2;
474 }
475
476 if (div <= 0)
477 return -EINVAL;
478
479 if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2))
480 return -EINVAL;
481
482 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
483 ddiv1 - 1);
484
485 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
486 ddiv2 - 1);
487
488 return 0;
489 }
490
lgm_clk_ddiv_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)491 static int lgm_clk_ddiv_determine_rate(struct clk_hw *hw,
492 struct clk_rate_request *req)
493 {
494 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
495 u32 div, ddiv1, ddiv2;
496 u64 rate64;
497
498 div = DIV_ROUND_CLOSEST_ULL((u64)req->best_parent_rate, req->rate);
499
500 /* if predivide bit is enabled, modify div by factor of 2.5 */
501 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
502 div = div * 2;
503 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
504 }
505
506 if (div <= 0) {
507 req->rate = req->best_parent_rate;
508
509 return 0;
510 }
511
512 if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
513 if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
514 return -EINVAL;
515
516 rate64 = req->best_parent_rate;
517 do_div(rate64, ddiv1);
518 do_div(rate64, ddiv2);
519
520 /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
521 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
522 rate64 = rate64 * 2;
523 rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
524 }
525
526 req->rate = rate64;
527
528 return 0;
529 }
530
531 static const struct clk_ops lgm_clk_ddiv_ops = {
532 .recalc_rate = lgm_clk_ddiv_recalc_rate,
533 .enable = lgm_clk_ddiv_enable,
534 .disable = lgm_clk_ddiv_disable,
535 .set_rate = lgm_clk_ddiv_set_rate,
536 .determine_rate = lgm_clk_ddiv_determine_rate,
537 };
538
lgm_clk_register_ddiv(struct lgm_clk_provider * ctx,const struct lgm_clk_ddiv_data * list,unsigned int nr_clk)539 int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
540 const struct lgm_clk_ddiv_data *list,
541 unsigned int nr_clk)
542 {
543 struct device *dev = ctx->dev;
544 struct clk_hw *hw;
545 unsigned int idx;
546 int ret;
547
548 for (idx = 0; idx < nr_clk; idx++, list++) {
549 struct clk_init_data init = {};
550 struct lgm_clk_ddiv *ddiv;
551
552 ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
553 if (!ddiv)
554 return -ENOMEM;
555
556 init.name = list->name;
557 init.ops = &lgm_clk_ddiv_ops;
558 init.flags = list->flags;
559 init.parent_data = list->parent_data;
560 init.num_parents = 1;
561
562 ddiv->membase = ctx->membase;
563 ddiv->reg = list->reg;
564 ddiv->shift0 = list->shift0;
565 ddiv->width0 = list->width0;
566 ddiv->shift1 = list->shift1;
567 ddiv->width1 = list->width1;
568 ddiv->shift_gate = list->shift_gate;
569 ddiv->width_gate = list->width_gate;
570 ddiv->shift2 = list->ex_shift;
571 ddiv->width2 = list->ex_width;
572 ddiv->flags = list->div_flags;
573 ddiv->mult = 2;
574 ddiv->div = 5;
575 ddiv->hw.init = &init;
576
577 hw = &ddiv->hw;
578 ret = devm_clk_hw_register(dev, hw);
579 if (ret) {
580 dev_err(dev, "register clk: %s failed!\n", list->name);
581 return ret;
582 }
583 ctx->clk_data.hws[list->id] = hw;
584 }
585
586 return 0;
587 }
588