1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * PLL clock driver for the Mobileye EyeQ5, EyeQ6L and EyeQ6H platforms.
4 *
5 * This controller handles:
6 * - Read-only PLLs, all derived from the same main crystal clock.
7 * - It also exposes divider clocks, those are children to PLLs.
8 * - Fixed factor clocks, children to PLLs.
9 *
10 * Parent clock is expected to be constant. This driver's registers live in a
11 * shared region called OLB. Some PLLs and fixed-factors are initialised early
12 * by of_clk_init(); if so, two clk providers are registered.
13 *
14 * We use eqc_ as prefix, as-in "EyeQ Clock", but way shorter.
15 *
16 * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
17 */
18
19 /*
20 * Set pr_fmt() for printing from eqc_early_init().
21 * It is called at of_clk_init() stage (read: really early).
22 */
23 #define pr_fmt(fmt) "clk-eyeq: " fmt
24
25 #include <linux/array_size.h>
26 #include <linux/auxiliary_bus.h>
27 #include <linux/bitfield.h>
28 #include <linux/bits.h>
29 #include <linux/clk-provider.h>
30 #include <linux/device.h>
31 #include <linux/err.h>
32 #include <linux/errno.h>
33 #include <linux/init.h>
34 #include <linux/io-64-nonatomic-hi-lo.h>
35 #include <linux/io.h>
36 #include <linux/mod_devicetable.h>
37 #include <linux/module.h>
38 #include <linux/of.h>
39 #include <linux/of_address.h>
40 #include <linux/overflow.h>
41 #include <linux/platform_device.h>
42 #include <linux/printk.h>
43 #include <linux/slab.h>
44 #include <linux/spinlock.h>
45 #include <linux/types.h>
46
47 #include <dt-bindings/clock/mobileye,eyeq5-clk.h>
48
49 /* In frac mode, it enables fractional noise canceling DAC. Else, no function. */
50 #define PCSR0_DAC_EN BIT(0)
51 /* Fractional or integer mode */
52 #define PCSR0_DSM_EN BIT(1)
53 #define PCSR0_PLL_EN BIT(2)
54 /* All clocks output held at 0 */
55 #define PCSR0_FOUTPOSTDIV_EN BIT(3)
56 #define PCSR0_POST_DIV1 GENMASK(6, 4)
57 #define PCSR0_POST_DIV2 GENMASK(9, 7)
58 #define PCSR0_REF_DIV GENMASK(15, 10)
59 #define PCSR0_INTIN GENMASK(27, 16)
60 #define PCSR0_BYPASS BIT(28)
61 /* Bits 30..29 are reserved */
62 #define PCSR0_PLL_LOCKED BIT(31)
63
64 #define PCSR1_RESET BIT(0)
65 #define PCSR1_SSGC_DIV GENMASK(4, 1)
66 /* Spread amplitude (% = 0.1 * SPREAD[4:0]) */
67 #define PCSR1_SPREAD GENMASK(9, 5)
68 #define PCSR1_DIS_SSCG BIT(10)
69 /* Down-spread or center-spread */
70 #define PCSR1_DOWN_SPREAD BIT(11)
71 #define PCSR1_FRAC_IN GENMASK(31, 12)
72
73 struct eqc_pll {
74 unsigned int index;
75 const char *name;
76 unsigned int reg64;
77 };
78
79 /*
80 * Divider clock. Divider is 2*(v+1), with v the register value.
81 * Min divider is 2, max is 2*(2^width).
82 */
83 struct eqc_div {
84 unsigned int index;
85 const char *name;
86 unsigned int parent;
87 unsigned int reg;
88 u8 shift;
89 u8 width;
90 };
91
92 struct eqc_fixed_factor {
93 unsigned int index;
94 const char *name;
95 unsigned int mult;
96 unsigned int div;
97 unsigned int parent;
98 };
99
100 struct eqc_match_data {
101 unsigned int pll_count;
102 const struct eqc_pll *plls;
103
104 unsigned int div_count;
105 const struct eqc_div *divs;
106
107 unsigned int fixed_factor_count;
108 const struct eqc_fixed_factor *fixed_factors;
109
110 const char *reset_auxdev_name;
111 const char *pinctrl_auxdev_name;
112
113 unsigned int early_clk_count;
114 };
115
116 struct eqc_early_match_data {
117 unsigned int early_pll_count;
118 const struct eqc_pll *early_plls;
119
120 unsigned int early_fixed_factor_count;
121 const struct eqc_fixed_factor *early_fixed_factors;
122
123 /*
124 * We want our of_xlate callback to EPROBE_DEFER instead of dev_err()
125 * and EINVAL. For that, we must know the total clock count.
126 */
127 unsigned int late_clk_count;
128 };
129
130 /*
131 * Both factors (mult and div) must fit in 32 bits. When an operation overflows,
132 * this function throws away low bits so that factors still fit in 32 bits.
133 *
134 * Precision loss depends on amplitude of mult and div. Worst theorical
135 * loss is: (UINT_MAX+1) / UINT_MAX - 1 = 2.3e-10.
136 * This is 1Hz every 4.3GHz.
137 */
eqc_pll_downshift_factors(unsigned long * mult,unsigned long * div)138 static void eqc_pll_downshift_factors(unsigned long *mult, unsigned long *div)
139 {
140 unsigned long biggest;
141 unsigned int shift;
142
143 /* This function can be removed if mult/div switch to unsigned long. */
144 static_assert(sizeof_field(struct clk_fixed_factor, mult) == sizeof(unsigned int));
145 static_assert(sizeof_field(struct clk_fixed_factor, div) == sizeof(unsigned int));
146
147 /* No overflow, nothing to be done. */
148 if (*mult <= UINT_MAX && *div <= UINT_MAX)
149 return;
150
151 /*
152 * Compute the shift required to bring the biggest factor into unsigned
153 * int range. That is, shift its highest set bit to the unsigned int
154 * most significant bit.
155 */
156 biggest = max(*mult, *div);
157 shift = __fls(biggest) - (BITS_PER_BYTE * sizeof(unsigned int)) + 1;
158
159 *mult >>= shift;
160 *div >>= shift;
161 }
162
eqc_pll_parse_registers(u32 r0,u32 r1,unsigned long * mult,unsigned long * div,unsigned long * acc)163 static int eqc_pll_parse_registers(u32 r0, u32 r1, unsigned long *mult,
164 unsigned long *div, unsigned long *acc)
165 {
166 u32 spread;
167
168 if (r0 & PCSR0_BYPASS) {
169 *mult = 1;
170 *div = 1;
171 *acc = 0;
172 return 0;
173 }
174
175 if (!(r0 & PCSR0_PLL_LOCKED))
176 return -EINVAL;
177
178 *mult = FIELD_GET(PCSR0_INTIN, r0);
179 *div = FIELD_GET(PCSR0_REF_DIV, r0);
180 if (r0 & PCSR0_FOUTPOSTDIV_EN)
181 *div *= FIELD_GET(PCSR0_POST_DIV1, r0) * FIELD_GET(PCSR0_POST_DIV2, r0);
182
183 /* Fractional mode, in 2^20 (0x100000) parts. */
184 if (r0 & PCSR0_DSM_EN) {
185 *div *= (1ULL << 20);
186 *mult = *mult * (1ULL << 20) + FIELD_GET(PCSR1_FRAC_IN, r1);
187 }
188
189 if (!*mult || !*div)
190 return -EINVAL;
191
192 if (r1 & (PCSR1_RESET | PCSR1_DIS_SSCG)) {
193 *acc = 0;
194 return 0;
195 }
196
197 /*
198 * Spread spectrum.
199 *
200 * Spread is 1/1000 parts of frequency, accuracy is half of
201 * that. To get accuracy, convert to ppb (parts per billion).
202 *
203 * acc = spread * 1e6 / 2
204 * with acc in parts per billion and,
205 * spread in parts per thousand.
206 */
207 spread = FIELD_GET(PCSR1_SPREAD, r1);
208 *acc = spread * 500000;
209
210 if (r1 & PCSR1_DOWN_SPREAD) {
211 /*
212 * Downspreading: the central frequency is half a
213 * spread lower.
214 */
215 *mult *= 2000 - spread;
216 *div *= 2000;
217
218 /*
219 * Previous operation might overflow 32 bits. If it
220 * does, throw away the least amount of low bits.
221 */
222 eqc_pll_downshift_factors(mult, div);
223 }
224
225 return 0;
226 }
227
eqc_probe_init_plls(struct device * dev,const struct eqc_match_data * data,void __iomem * base,struct clk_hw_onecell_data * cells)228 static void eqc_probe_init_plls(struct device *dev, const struct eqc_match_data *data,
229 void __iomem *base, struct clk_hw_onecell_data *cells)
230 {
231 unsigned long mult, div, acc;
232 const struct eqc_pll *pll;
233 struct clk_hw *hw;
234 unsigned int i;
235 u32 r0, r1;
236 u64 val;
237 int ret;
238
239 for (i = 0; i < data->pll_count; i++) {
240 pll = &data->plls[i];
241
242 val = readq(base + pll->reg64);
243 r0 = val;
244 r1 = val >> 32;
245
246 ret = eqc_pll_parse_registers(r0, r1, &mult, &div, &acc);
247 if (ret) {
248 dev_warn(dev, "failed parsing state of %s\n", pll->name);
249 cells->hws[pll->index] = ERR_PTR(ret);
250 continue;
251 }
252
253 hw = clk_hw_register_fixed_factor_with_accuracy_fwname(dev,
254 dev->of_node, pll->name, "ref", 0, mult, div, acc);
255 cells->hws[pll->index] = hw;
256 if (IS_ERR(hw))
257 dev_warn(dev, "failed registering %s: %pe\n", pll->name, hw);
258 }
259 }
260
eqc_probe_init_divs(struct device * dev,const struct eqc_match_data * data,void __iomem * base,struct clk_hw_onecell_data * cells)261 static void eqc_probe_init_divs(struct device *dev, const struct eqc_match_data *data,
262 void __iomem *base, struct clk_hw_onecell_data *cells)
263 {
264 struct clk_parent_data parent_data = { };
265 const struct eqc_div *div;
266 struct clk_hw *parent;
267 void __iomem *reg;
268 struct clk_hw *hw;
269 unsigned int i;
270
271 for (i = 0; i < data->div_count; i++) {
272 div = &data->divs[i];
273 reg = base + div->reg;
274 parent = cells->hws[div->parent];
275
276 if (IS_ERR(parent)) {
277 /* Parent is in early clk provider. */
278 parent_data.index = div->parent;
279 parent_data.hw = NULL;
280 } else {
281 /* Avoid clock lookup when we already have the hw reference. */
282 parent_data.index = 0;
283 parent_data.hw = parent;
284 }
285
286 hw = clk_hw_register_divider_table_parent_data(dev, div->name,
287 &parent_data, 0, reg, div->shift, div->width,
288 CLK_DIVIDER_EVEN_INTEGERS, NULL, NULL);
289 cells->hws[div->index] = hw;
290 if (IS_ERR(hw))
291 dev_warn(dev, "failed registering %s: %pe\n",
292 div->name, hw);
293 }
294 }
295
eqc_probe_init_fixed_factors(struct device * dev,const struct eqc_match_data * data,struct clk_hw_onecell_data * cells)296 static void eqc_probe_init_fixed_factors(struct device *dev,
297 const struct eqc_match_data *data,
298 struct clk_hw_onecell_data *cells)
299 {
300 const struct eqc_fixed_factor *ff;
301 struct clk_hw *hw, *parent_hw;
302 unsigned int i;
303
304 for (i = 0; i < data->fixed_factor_count; i++) {
305 ff = &data->fixed_factors[i];
306 parent_hw = cells->hws[ff->parent];
307
308 if (IS_ERR(parent_hw)) {
309 /* Parent is in early clk provider. */
310 hw = clk_hw_register_fixed_factor_index(dev, ff->name,
311 ff->parent, 0, ff->mult, ff->div);
312 } else {
313 /* Avoid clock lookup when we already have the hw reference. */
314 hw = clk_hw_register_fixed_factor_parent_hw(dev, ff->name,
315 parent_hw, 0, ff->mult, ff->div);
316 }
317
318 cells->hws[ff->index] = hw;
319 if (IS_ERR(hw))
320 dev_warn(dev, "failed registering %s: %pe\n",
321 ff->name, hw);
322 }
323 }
324
eqc_auxdev_release(struct device * dev)325 static void eqc_auxdev_release(struct device *dev)
326 {
327 struct auxiliary_device *adev = to_auxiliary_dev(dev);
328
329 kfree(adev);
330 }
331
eqc_auxdev_create(struct device * dev,void __iomem * base,const char * name,u32 id)332 static int eqc_auxdev_create(struct device *dev, void __iomem *base,
333 const char *name, u32 id)
334 {
335 struct auxiliary_device *adev;
336 int ret;
337
338 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
339 if (!adev)
340 return -ENOMEM;
341
342 adev->name = name;
343 adev->dev.parent = dev;
344 adev->dev.platform_data = (void __force *)base;
345 adev->dev.release = eqc_auxdev_release;
346 adev->id = id;
347
348 ret = auxiliary_device_init(adev);
349 if (ret)
350 return ret;
351
352 ret = auxiliary_device_add(adev);
353 if (ret)
354 auxiliary_device_uninit(adev);
355
356 return ret;
357 }
358
eqc_probe(struct platform_device * pdev)359 static int eqc_probe(struct platform_device *pdev)
360 {
361 struct device *dev = &pdev->dev;
362 struct device_node *np = dev->of_node;
363 const struct eqc_match_data *data;
364 struct clk_hw_onecell_data *cells;
365 unsigned int i, clk_count;
366 struct resource *res;
367 void __iomem *base;
368 int ret;
369
370 data = device_get_match_data(dev);
371 if (!data)
372 return 0; /* No clocks nor auxdevs, we are done. */
373
374 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
375 if (!res)
376 return -ENODEV;
377
378 base = ioremap(res->start, resource_size(res));
379 if (!base)
380 return -ENOMEM;
381
382 /* Init optional reset auxiliary device. */
383 if (data->reset_auxdev_name) {
384 ret = eqc_auxdev_create(dev, base, data->reset_auxdev_name, 0);
385 if (ret)
386 dev_warn(dev, "failed creating auxiliary device %s.%s: %d\n",
387 KBUILD_MODNAME, data->reset_auxdev_name, ret);
388 }
389
390 /* Init optional pinctrl auxiliary device. */
391 if (data->pinctrl_auxdev_name) {
392 ret = eqc_auxdev_create(dev, base, data->pinctrl_auxdev_name, 0);
393 if (ret)
394 dev_warn(dev, "failed creating auxiliary device %s.%s: %d\n",
395 KBUILD_MODNAME, data->pinctrl_auxdev_name, ret);
396 }
397
398 if (data->pll_count + data->div_count + data->fixed_factor_count == 0)
399 return 0; /* Zero clocks, we are done. */
400
401 clk_count = data->pll_count + data->div_count +
402 data->fixed_factor_count + data->early_clk_count;
403 cells = kzalloc(struct_size(cells, hws, clk_count), GFP_KERNEL);
404 if (!cells)
405 return -ENOMEM;
406
407 cells->num = clk_count;
408
409 /* Early PLLs are marked as errors: the early provider will get queried. */
410 for (i = 0; i < clk_count; i++)
411 cells->hws[i] = ERR_PTR(-EINVAL);
412
413 eqc_probe_init_plls(dev, data, base, cells);
414
415 eqc_probe_init_divs(dev, data, base, cells);
416
417 eqc_probe_init_fixed_factors(dev, data, cells);
418
419 return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, cells);
420 }
421
422 /* Required early for GIC timer (pll-cpu) and UARTs (pll-per). */
423 static const struct eqc_pll eqc_eyeq5_early_plls[] = {
424 { .index = EQ5C_PLL_CPU, .name = "pll-cpu", .reg64 = 0x02C },
425 { .index = EQ5C_PLL_PER, .name = "pll-per", .reg64 = 0x05C },
426 };
427
428 static const struct eqc_pll eqc_eyeq5_plls[] = {
429 { .index = EQ5C_PLL_VMP, .name = "pll-vmp", .reg64 = 0x034 },
430 { .index = EQ5C_PLL_PMA, .name = "pll-pma", .reg64 = 0x03C },
431 { .index = EQ5C_PLL_VDI, .name = "pll-vdi", .reg64 = 0x044 },
432 { .index = EQ5C_PLL_DDR0, .name = "pll-ddr0", .reg64 = 0x04C },
433 { .index = EQ5C_PLL_PCI, .name = "pll-pci", .reg64 = 0x054 },
434 { .index = EQ5C_PLL_PMAC, .name = "pll-pmac", .reg64 = 0x064 },
435 { .index = EQ5C_PLL_MPC, .name = "pll-mpc", .reg64 = 0x06C },
436 { .index = EQ5C_PLL_DDR1, .name = "pll-ddr1", .reg64 = 0x074 },
437 };
438
439 enum {
440 /*
441 * EQ5C_PLL_CPU children.
442 * EQ5C_PER_OCC_PCI is the last clock exposed in dt-bindings.
443 */
444 EQ5C_CPU_OCC = EQ5C_PER_OCC_PCI + 1,
445 EQ5C_CPU_SI_CSS0,
446 EQ5C_CPU_CPC,
447 EQ5C_CPU_CM,
448 EQ5C_CPU_MEM,
449 EQ5C_CPU_OCC_ISRAM,
450 EQ5C_CPU_ISRAM,
451 EQ5C_CPU_OCC_DBU,
452 EQ5C_CPU_SI_DBU_TP,
453
454 /*
455 * EQ5C_PLL_VDI children.
456 */
457 EQ5C_VDI_OCC_VDI,
458 EQ5C_VDI_VDI,
459 EQ5C_VDI_OCC_CAN_SER,
460 EQ5C_VDI_CAN_SER,
461 EQ5C_VDI_I2C_SER,
462
463 /*
464 * EQ5C_PLL_PER children.
465 */
466 EQ5C_PER_PERIPH,
467 EQ5C_PER_CAN,
468 EQ5C_PER_TIMER,
469 EQ5C_PER_CCF,
470 EQ5C_PER_OCC_MJPEG,
471 EQ5C_PER_HSM,
472 EQ5C_PER_MJPEG,
473 EQ5C_PER_FCMU_A,
474 };
475
476 static const struct eqc_fixed_factor eqc_eyeq5_early_fixed_factors[] = {
477 /* EQ5C_PLL_CPU children */
478 { EQ5C_CPU_OCC, "occ-cpu", 1, 1, EQ5C_PLL_CPU },
479 { EQ5C_CPU_SI_CSS0, "si-css0", 1, 1, EQ5C_CPU_OCC },
480 { EQ5C_CPU_CORE0, "core0", 1, 1, EQ5C_CPU_SI_CSS0 },
481 { EQ5C_CPU_CORE1, "core1", 1, 1, EQ5C_CPU_SI_CSS0 },
482 { EQ5C_CPU_CORE2, "core2", 1, 1, EQ5C_CPU_SI_CSS0 },
483 { EQ5C_CPU_CORE3, "core3", 1, 1, EQ5C_CPU_SI_CSS0 },
484
485 /* EQ5C_PLL_PER children */
486 { EQ5C_PER_OCC, "occ-periph", 1, 16, EQ5C_PLL_PER },
487 { EQ5C_PER_UART, "uart", 1, 1, EQ5C_PER_OCC },
488 };
489
490 static const struct eqc_fixed_factor eqc_eyeq5_fixed_factors[] = {
491 /* EQ5C_PLL_CPU children */
492 { EQ5C_CPU_CPC, "cpc", 1, 1, EQ5C_CPU_SI_CSS0 },
493 { EQ5C_CPU_CM, "cm", 1, 1, EQ5C_CPU_SI_CSS0 },
494 { EQ5C_CPU_MEM, "mem", 1, 1, EQ5C_CPU_SI_CSS0 },
495 { EQ5C_CPU_OCC_ISRAM, "occ-isram", 1, 2, EQ5C_PLL_CPU },
496 { EQ5C_CPU_ISRAM, "isram", 1, 1, EQ5C_CPU_OCC_ISRAM },
497 { EQ5C_CPU_OCC_DBU, "occ-dbu", 1, 10, EQ5C_PLL_CPU },
498 { EQ5C_CPU_SI_DBU_TP, "si-dbu-tp", 1, 1, EQ5C_CPU_OCC_DBU },
499
500 /* EQ5C_PLL_VDI children */
501 { EQ5C_VDI_OCC_VDI, "occ-vdi", 1, 2, EQ5C_PLL_VDI },
502 { EQ5C_VDI_VDI, "vdi", 1, 1, EQ5C_VDI_OCC_VDI },
503 { EQ5C_VDI_OCC_CAN_SER, "occ-can-ser", 1, 16, EQ5C_PLL_VDI },
504 { EQ5C_VDI_CAN_SER, "can-ser", 1, 1, EQ5C_VDI_OCC_CAN_SER },
505 { EQ5C_VDI_I2C_SER, "i2c-ser", 1, 20, EQ5C_PLL_VDI },
506
507 /* EQ5C_PLL_PER children */
508 { EQ5C_PER_PERIPH, "periph", 1, 1, EQ5C_PER_OCC },
509 { EQ5C_PER_CAN, "can", 1, 1, EQ5C_PER_OCC },
510 { EQ5C_PER_SPI, "spi", 1, 1, EQ5C_PER_OCC },
511 { EQ5C_PER_I2C, "i2c", 1, 1, EQ5C_PER_OCC },
512 { EQ5C_PER_TIMER, "timer", 1, 1, EQ5C_PER_OCC },
513 { EQ5C_PER_GPIO, "gpio", 1, 1, EQ5C_PER_OCC },
514 { EQ5C_PER_EMMC, "emmc-sys", 1, 10, EQ5C_PLL_PER },
515 { EQ5C_PER_CCF, "ccf-ctrl", 1, 4, EQ5C_PLL_PER },
516 { EQ5C_PER_OCC_MJPEG, "occ-mjpeg", 1, 2, EQ5C_PLL_PER },
517 { EQ5C_PER_HSM, "hsm", 1, 1, EQ5C_PER_OCC_MJPEG },
518 { EQ5C_PER_MJPEG, "mjpeg", 1, 1, EQ5C_PER_OCC_MJPEG },
519 { EQ5C_PER_FCMU_A, "fcmu-a", 1, 20, EQ5C_PLL_PER },
520 { EQ5C_PER_OCC_PCI, "occ-pci-sys", 1, 8, EQ5C_PLL_PER },
521 };
522
523 static const struct eqc_div eqc_eyeq5_divs[] = {
524 {
525 .index = EQ5C_DIV_OSPI,
526 .name = "div-ospi",
527 .parent = EQ5C_PLL_PER,
528 .reg = 0x11C,
529 .shift = 0,
530 .width = 4,
531 },
532 };
533
534 static const struct eqc_early_match_data eqc_eyeq5_early_match_data __initconst = {
535 .early_pll_count = ARRAY_SIZE(eqc_eyeq5_early_plls),
536 .early_plls = eqc_eyeq5_early_plls,
537
538 .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq5_early_fixed_factors),
539 .early_fixed_factors = eqc_eyeq5_early_fixed_factors,
540
541 .late_clk_count = ARRAY_SIZE(eqc_eyeq5_plls) + ARRAY_SIZE(eqc_eyeq5_divs) +
542 ARRAY_SIZE(eqc_eyeq5_fixed_factors),
543 };
544
545 static const struct eqc_match_data eqc_eyeq5_match_data = {
546 .pll_count = ARRAY_SIZE(eqc_eyeq5_plls),
547 .plls = eqc_eyeq5_plls,
548
549 .div_count = ARRAY_SIZE(eqc_eyeq5_divs),
550 .divs = eqc_eyeq5_divs,
551
552 .fixed_factor_count = ARRAY_SIZE(eqc_eyeq5_fixed_factors),
553 .fixed_factors = eqc_eyeq5_fixed_factors,
554
555 .reset_auxdev_name = "reset",
556 .pinctrl_auxdev_name = "pinctrl",
557
558 .early_clk_count = ARRAY_SIZE(eqc_eyeq5_early_plls) +
559 ARRAY_SIZE(eqc_eyeq5_early_fixed_factors),
560 };
561
562 static const struct eqc_pll eqc_eyeq6l_plls[] = {
563 { .index = EQ6LC_PLL_DDR, .name = "pll-ddr", .reg64 = 0x02C },
564 { .index = EQ6LC_PLL_CPU, .name = "pll-cpu", .reg64 = 0x034 }, /* also acc */
565 { .index = EQ6LC_PLL_PER, .name = "pll-per", .reg64 = 0x03C },
566 { .index = EQ6LC_PLL_VDI, .name = "pll-vdi", .reg64 = 0x044 },
567 };
568
569 static const struct eqc_match_data eqc_eyeq6l_match_data = {
570 .pll_count = ARRAY_SIZE(eqc_eyeq6l_plls),
571 .plls = eqc_eyeq6l_plls,
572
573 .reset_auxdev_name = "reset",
574 };
575
576 static const struct eqc_match_data eqc_eyeq6h_west_match_data = {
577 .reset_auxdev_name = "reset_west",
578 };
579
580 static const struct eqc_pll eqc_eyeq6h_east_plls[] = {
581 { .index = 0, .name = "pll-east", .reg64 = 0x074 },
582 };
583
584 static const struct eqc_match_data eqc_eyeq6h_east_match_data = {
585 .pll_count = ARRAY_SIZE(eqc_eyeq6h_east_plls),
586 .plls = eqc_eyeq6h_east_plls,
587
588 .reset_auxdev_name = "reset_east",
589 };
590
591 static const struct eqc_pll eqc_eyeq6h_south_plls[] = {
592 { .index = EQ6HC_SOUTH_PLL_VDI, .name = "pll-vdi", .reg64 = 0x000 },
593 { .index = EQ6HC_SOUTH_PLL_PCIE, .name = "pll-pcie", .reg64 = 0x008 },
594 { .index = EQ6HC_SOUTH_PLL_PER, .name = "pll-per", .reg64 = 0x010 },
595 { .index = EQ6HC_SOUTH_PLL_ISP, .name = "pll-isp", .reg64 = 0x018 },
596 };
597
598 static const struct eqc_div eqc_eyeq6h_south_divs[] = {
599 {
600 .index = EQ6HC_SOUTH_DIV_EMMC,
601 .name = "div-emmc",
602 .parent = EQ6HC_SOUTH_PLL_PER,
603 .reg = 0x070,
604 .shift = 4,
605 .width = 4,
606 },
607 {
608 .index = EQ6HC_SOUTH_DIV_OSPI_REF,
609 .name = "div-ospi-ref",
610 .parent = EQ6HC_SOUTH_PLL_PER,
611 .reg = 0x090,
612 .shift = 4,
613 .width = 4,
614 },
615 {
616 .index = EQ6HC_SOUTH_DIV_OSPI_SYS,
617 .name = "div-ospi-sys",
618 .parent = EQ6HC_SOUTH_PLL_PER,
619 .reg = 0x090,
620 .shift = 8,
621 .width = 1,
622 },
623 {
624 .index = EQ6HC_SOUTH_DIV_TSU,
625 .name = "div-tsu",
626 .parent = EQ6HC_SOUTH_PLL_PCIE,
627 .reg = 0x098,
628 .shift = 4,
629 .width = 8,
630 },
631 };
632
633 static const struct eqc_match_data eqc_eyeq6h_south_match_data = {
634 .pll_count = ARRAY_SIZE(eqc_eyeq6h_south_plls),
635 .plls = eqc_eyeq6h_south_plls,
636
637 .div_count = ARRAY_SIZE(eqc_eyeq6h_south_divs),
638 .divs = eqc_eyeq6h_south_divs,
639 };
640
641 static const struct eqc_pll eqc_eyeq6h_ddr0_plls[] = {
642 { .index = 0, .name = "pll-ddr0", .reg64 = 0x074 },
643 };
644
645 static const struct eqc_match_data eqc_eyeq6h_ddr0_match_data = {
646 .pll_count = ARRAY_SIZE(eqc_eyeq6h_ddr0_plls),
647 .plls = eqc_eyeq6h_ddr0_plls,
648 };
649
650 static const struct eqc_pll eqc_eyeq6h_ddr1_plls[] = {
651 { .index = 0, .name = "pll-ddr1", .reg64 = 0x074 },
652 };
653
654 static const struct eqc_match_data eqc_eyeq6h_ddr1_match_data = {
655 .pll_count = ARRAY_SIZE(eqc_eyeq6h_ddr1_plls),
656 .plls = eqc_eyeq6h_ddr1_plls,
657 };
658
659 static const struct eqc_pll eqc_eyeq6h_acc_plls[] = {
660 { .index = EQ6HC_ACC_PLL_XNN, .name = "pll-xnn", .reg64 = 0x040 },
661 { .index = EQ6HC_ACC_PLL_VMP, .name = "pll-vmp", .reg64 = 0x050 },
662 { .index = EQ6HC_ACC_PLL_PMA, .name = "pll-pma", .reg64 = 0x05C },
663 { .index = EQ6HC_ACC_PLL_MPC, .name = "pll-mpc", .reg64 = 0x068 },
664 { .index = EQ6HC_ACC_PLL_NOC, .name = "pll-noc", .reg64 = 0x070 },
665 };
666
667 static const struct eqc_match_data eqc_eyeq6h_acc_match_data = {
668 .pll_count = ARRAY_SIZE(eqc_eyeq6h_acc_plls),
669 .plls = eqc_eyeq6h_acc_plls,
670
671 .reset_auxdev_name = "reset_acc",
672 };
673
674 static const struct of_device_id eqc_match_table[] = {
675 { .compatible = "mobileye,eyeq5-olb", .data = &eqc_eyeq5_match_data },
676 { .compatible = "mobileye,eyeq6l-olb", .data = &eqc_eyeq6l_match_data },
677 { .compatible = "mobileye,eyeq6h-west-olb", .data = &eqc_eyeq6h_west_match_data },
678 { .compatible = "mobileye,eyeq6h-east-olb", .data = &eqc_eyeq6h_east_match_data },
679 { .compatible = "mobileye,eyeq6h-south-olb", .data = &eqc_eyeq6h_south_match_data },
680 { .compatible = "mobileye,eyeq6h-ddr0-olb", .data = &eqc_eyeq6h_ddr0_match_data },
681 { .compatible = "mobileye,eyeq6h-ddr1-olb", .data = &eqc_eyeq6h_ddr1_match_data },
682 { .compatible = "mobileye,eyeq6h-acc-olb", .data = &eqc_eyeq6h_acc_match_data },
683 {}
684 };
685
686 static struct platform_driver eqc_driver = {
687 .probe = eqc_probe,
688 .driver = {
689 .name = "clk-eyeq",
690 .of_match_table = eqc_match_table,
691 .suppress_bind_attrs = true,
692 },
693 };
694 builtin_platform_driver(eqc_driver);
695
696 /* Required early for GIC timer. */
697 static const struct eqc_pll eqc_eyeq6h_central_early_plls[] = {
698 { .index = EQ6HC_CENTRAL_PLL_CPU, .name = "pll-cpu", .reg64 = 0x02C },
699 };
700
701 static const struct eqc_fixed_factor eqc_eyeq6h_central_early_fixed_factors[] = {
702 { EQ6HC_CENTRAL_CPU_OCC, "occ-cpu", 1, 1, EQ6HC_CENTRAL_PLL_CPU },
703 };
704
705 static const struct eqc_early_match_data eqc_eyeq6h_central_early_match_data __initconst = {
706 .early_pll_count = ARRAY_SIZE(eqc_eyeq6h_central_early_plls),
707 .early_plls = eqc_eyeq6h_central_early_plls,
708
709 .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq6h_central_early_fixed_factors),
710 .early_fixed_factors = eqc_eyeq6h_central_early_fixed_factors,
711 };
712
713 /* Required early for UART. */
714 static const struct eqc_pll eqc_eyeq6h_west_early_plls[] = {
715 { .index = EQ6HC_WEST_PLL_PER, .name = "pll-west", .reg64 = 0x074 },
716 };
717
718 static const struct eqc_fixed_factor eqc_eyeq6h_west_early_fixed_factors[] = {
719 { EQ6HC_WEST_PER_OCC, "west-per-occ", 1, 10, EQ6HC_WEST_PLL_PER },
720 { EQ6HC_WEST_PER_UART, "west-per-uart", 1, 1, EQ6HC_WEST_PER_OCC },
721 };
722
723 static const struct eqc_early_match_data eqc_eyeq6h_west_early_match_data __initconst = {
724 .early_pll_count = ARRAY_SIZE(eqc_eyeq6h_west_early_plls),
725 .early_plls = eqc_eyeq6h_west_early_plls,
726
727 .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq6h_west_early_fixed_factors),
728 .early_fixed_factors = eqc_eyeq6h_west_early_fixed_factors,
729 };
730
eqc_early_init(struct device_node * np,const struct eqc_early_match_data * early_data)731 static void __init eqc_early_init(struct device_node *np,
732 const struct eqc_early_match_data *early_data)
733 {
734 struct clk_hw_onecell_data *cells;
735 unsigned int i, clk_count;
736 void __iomem *base;
737 int ret;
738
739 clk_count = early_data->early_pll_count + early_data->early_fixed_factor_count +
740 early_data->late_clk_count;
741 cells = kzalloc(struct_size(cells, hws, clk_count), GFP_KERNEL);
742 if (!cells) {
743 ret = -ENOMEM;
744 goto err;
745 }
746
747 cells->num = clk_count;
748
749 /*
750 * Mark all clocks as deferred; some are registered here, the rest at
751 * platform device probe.
752 *
753 * Once the platform device is probed, its provider will take priority
754 * when looking up clocks.
755 */
756 for (i = 0; i < clk_count; i++)
757 cells->hws[i] = ERR_PTR(-EPROBE_DEFER);
758
759 /* Offsets (reg64) of early PLLs are relative to OLB block. */
760 base = of_iomap(np, 0);
761 if (!base) {
762 ret = -ENODEV;
763 goto err;
764 }
765
766 for (i = 0; i < early_data->early_pll_count; i++) {
767 const struct eqc_pll *pll = &early_data->early_plls[i];
768 unsigned long mult, div, acc;
769 struct clk_hw *hw;
770 u32 r0, r1;
771 u64 val;
772
773 val = readq(base + pll->reg64);
774 r0 = val;
775 r1 = val >> 32;
776
777 ret = eqc_pll_parse_registers(r0, r1, &mult, &div, &acc);
778 if (ret) {
779 pr_err("failed parsing state of %s\n", pll->name);
780 goto err;
781 }
782
783 hw = clk_hw_register_fixed_factor_with_accuracy_fwname(NULL,
784 np, pll->name, "ref", 0, mult, div, acc);
785 cells->hws[pll->index] = hw;
786 if (IS_ERR(hw)) {
787 pr_err("failed registering %s: %pe\n", pll->name, hw);
788 ret = PTR_ERR(hw);
789 goto err;
790 }
791 }
792
793 for (i = 0; i < early_data->early_fixed_factor_count; i++) {
794 const struct eqc_fixed_factor *ff = &early_data->early_fixed_factors[i];
795 struct clk_hw *parent_hw = cells->hws[ff->parent];
796 struct clk_hw *hw;
797
798 hw = clk_hw_register_fixed_factor_parent_hw(NULL, ff->name,
799 parent_hw, 0, ff->mult, ff->div);
800 cells->hws[ff->index] = hw;
801 if (IS_ERR(hw)) {
802 pr_err("failed registering %s: %pe\n", ff->name, hw);
803 ret = PTR_ERR(hw);
804 goto err;
805 }
806 }
807
808 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, cells);
809 if (ret) {
810 pr_err("failed registering clk provider: %d\n", ret);
811 goto err;
812 }
813
814 return;
815
816 err:
817 /*
818 * We are doomed. The system will not be able to boot.
819 *
820 * Let's still try to be good citizens by freeing resources and print
821 * a last error message that might help debugging.
822 */
823
824 pr_err("failed clk init: %d\n", ret);
825
826 if (cells) {
827 of_clk_del_provider(np);
828
829 for (i = 0; i < early_data->early_pll_count; i++) {
830 const struct eqc_pll *pll = &early_data->early_plls[i];
831 struct clk_hw *hw = cells->hws[pll->index];
832
833 if (!IS_ERR_OR_NULL(hw))
834 clk_hw_unregister_fixed_factor(hw);
835 }
836
837 kfree(cells);
838 }
839 }
840
eqc_eyeq5_early_init(struct device_node * np)841 static void __init eqc_eyeq5_early_init(struct device_node *np)
842 {
843 eqc_early_init(np, &eqc_eyeq5_early_match_data);
844 }
845 CLK_OF_DECLARE_DRIVER(eqc_eyeq5, "mobileye,eyeq5-olb", eqc_eyeq5_early_init);
846
eqc_eyeq6h_central_early_init(struct device_node * np)847 static void __init eqc_eyeq6h_central_early_init(struct device_node *np)
848 {
849 eqc_early_init(np, &eqc_eyeq6h_central_early_match_data);
850 }
851 CLK_OF_DECLARE_DRIVER(eqc_eyeq6h_central, "mobileye,eyeq6h-central-olb",
852 eqc_eyeq6h_central_early_init);
853
eqc_eyeq6h_west_early_init(struct device_node * np)854 static void __init eqc_eyeq6h_west_early_init(struct device_node *np)
855 {
856 eqc_early_init(np, &eqc_eyeq6h_west_early_match_data);
857 }
858 CLK_OF_DECLARE_DRIVER(eqc_eyeq6h_west, "mobileye,eyeq6h-west-olb",
859 eqc_eyeq6h_west_early_init);
860