1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Reset driver for the Mobileye EyeQ5, EyeQ6L and EyeQ6H platforms.
4 *
5 * Controllers live in a shared register region called OLB. EyeQ5 and EyeQ6L
6 * have a single OLB instance for a single reset controller. EyeQ6H has seven
7 * OLB instances; three host reset controllers.
8 *
9 * Each reset controller has one or more domain. Domains are of a given type
10 * (see enum eqr_domain_type), with a valid offset mask (up to 32 resets per
11 * domain).
12 *
13 * Domain types define expected behavior: one-register-per-reset,
14 * one-bit-per-reset, status detection method, busywait duration, etc.
15 *
16 * We use eqr_ as prefix, as-in "EyeQ Reset", but way shorter.
17 *
18 * Known resets in EyeQ5 domain 0 (type EQR_EYEQ5_SARCR):
19 * 3. CAN0 4. CAN1 5. CAN2 6. SPI0
20 * 7. SPI1 8. SPI2 9. SPI3 10. UART0
21 * 11. UART1 12. UART2 13. I2C0 14. I2C1
22 * 15. I2C2 16. I2C3 17. I2C4 18. TIMER0
23 * 19. TIMER1 20. TIMER2 21. TIMER3 22. TIMER4
24 * 23. WD0 24. EXT0 25. EXT1 26. GPIO
25 * 27. WD1
26 *
27 * Known resets in EyeQ5 domain 1 (type EQR_EYEQ5_ACRP):
28 * 0. VMP0 1. VMP1 2. VMP2 3. VMP3
29 * 4. PMA0 5. PMA1 6. PMAC0 7. PMAC1
30 * 8. MPC0 9. MPC1 10. MPC2 11. MPC3
31 * 12. MPC4
32 *
33 * Known resets in EyeQ5 domain 2 (type EQR_EYEQ5_PCIE):
34 * 0. PCIE0_CORE 1. PCIE0_APB 2. PCIE0_LINK_AXI 3. PCIE0_LINK_MGMT
35 * 4. PCIE0_LINK_HOT 5. PCIE0_LINK_PIPE 6. PCIE1_CORE 7. PCIE1_APB
36 * 8. PCIE1_LINK_AXI 9. PCIE1_LINK_MGMT 10. PCIE1_LINK_HOT 11. PCIE1_LINK_PIPE
37 * 12. MULTIPHY 13. MULTIPHY_APB 15. PCIE0_LINK_MGMT 16. PCIE1_LINK_MGMT
38 * 17. PCIE0_LINK_PM 18. PCIE1_LINK_PM
39 *
40 * Known resets in EyeQ6L domain 0 (type EQR_EYEQ5_SARCR):
41 * 0. SPI0 1. SPI1 2. UART0 3. I2C0
42 * 4. I2C1 5. TIMER0 6. TIMER1 7. TIMER2
43 * 8. TIMER3 9. WD0 10. WD1 11. EXT0
44 * 12. EXT1 13. GPIO
45 *
46 * Known resets in EyeQ6L domain 1 (type EQR_EYEQ5_ACRP):
47 * 0. VMP0 1. VMP1 2. VMP2 3. VMP3
48 * 4. PMA0 5. PMA1 6. PMAC0 7. PMAC1
49 * 8. MPC0 9. MPC1 10. MPC2 11. MPC3
50 * 12. MPC4
51 *
52 * Known resets in EyeQ6Lplus domain 0 (type EQR_EYEQ5_PCIE):
53 * 0. SPI0 1. SPI1 2. UART0 3. I2C0
54 * 4. I2C1 5. TIMER0 6. TIMER1 7. TIMER2
55 * 8. TIMER3 9. WD0 10. WD1 11. EXT0
56 * 12. EXT1 13. GPIO
57 *
58 * Known resets in EyeQ6Lplus domain 1 (type EQR_EYEQ5_ACRP):
59 * 0. VMP0 1. VMP1 2. VMP2 3. VMP3
60 * 4. PMA0 5. PMA1 6. PMAC0 7. PMAC1
61 * 8. MPC0 9. MPC1 10. MPC2 11. MPC3
62 * 12. MPC4
63 *
64 * Known resets in EyeQ6H west/east (type EQR_EYEQ6H_SARCR):
65 * 0. CAN 1. SPI0 2. SPI1 3. UART0
66 * 4. UART1 5. I2C0 6. I2C1 7. -hole-
67 * 8. TIMER0 9. TIMER1 10. WD 11. EXT TIMER
68 * 12. GPIO
69 *
70 * Known resets in EyeQ6H acc (type EQR_EYEQ5_ACRP):
71 * 1. XNN0 2. XNN1 3. XNN2 4. XNN3
72 * 5. VMP0 6. VMP1 7. VMP2 8. VMP3
73 * 9. PMA0 10. PMA1 11. MPC0 12. MPC1
74 * 13. MPC2 14. MPC3 15. PERIPH
75 *
76 * Abbreviations:
77 * - PMA: Programmable Macro Array
78 * - MPC: Multi-threaded Processing Clusters
79 * - VMP: Vector Microcode Processors
80 *
81 * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
82 */
83
84 #include <linux/array_size.h>
85 #include <linux/auxiliary_bus.h>
86 #include <linux/bitfield.h>
87 #include <linux/bits.h>
88 #include <linux/bug.h>
89 #include <linux/cleanup.h>
90 #include <linux/container_of.h>
91 #include <linux/device.h>
92 #include <linux/err.h>
93 #include <linux/errno.h>
94 #include <linux/init.h>
95 #include <linux/io.h>
96 #include <linux/iopoll.h>
97 #include <linux/lockdep.h>
98 #include <linux/mod_devicetable.h>
99 #include <linux/mutex.h>
100 #include <linux/of.h>
101 #include <linux/reset-controller.h>
102 #include <linux/slab.h>
103 #include <linux/types.h>
104
105 /*
106 * A reset ID, as returned by eqr_of_xlate_*(), is a (domain, offset) pair.
107 * Low byte is domain, rest is offset.
108 */
109 #define ID_DOMAIN_MASK GENMASK(7, 0)
110 #define ID_OFFSET_MASK GENMASK(31, 8)
111
112 enum eqr_domain_type {
113 EQR_EYEQ5_SARCR,
114 EQR_EYEQ5_ACRP,
115 EQR_EYEQ5_PCIE,
116 EQR_EYEQ6H_SARCR,
117 };
118
119 /*
120 * Domain type EQR_EYEQ5_SARCR register offsets.
121 */
122 #define EQR_EYEQ5_SARCR_REQUEST (0x000)
123 #define EQR_EYEQ5_SARCR_STATUS (0x004)
124
125 /*
126 * Domain type EQR_EYEQ5_ACRP register masks.
127 * Registers are: base + 4 * offset.
128 */
129 #define EQR_EYEQ5_ACRP_PD_REQ BIT(0)
130 #define EQR_EYEQ5_ACRP_ST_POWER_DOWN BIT(27)
131 #define EQR_EYEQ5_ACRP_ST_ACTIVE BIT(29)
132
133 /*
134 * Domain type EQR_EYEQ6H_SARCR register offsets.
135 */
136 #define EQR_EYEQ6H_SARCR_RST_REQUEST (0x000)
137 #define EQR_EYEQ6H_SARCR_CLK_STATUS (0x004)
138 #define EQR_EYEQ6H_SARCR_RST_STATUS (0x008)
139 #define EQR_EYEQ6H_SARCR_CLK_REQUEST (0x00C)
140
141 struct eqr_busy_wait_timings {
142 unsigned long sleep_us;
143 unsigned long timeout_us;
144 };
145
146 static const struct eqr_busy_wait_timings eqr_timings[] = {
147 [EQR_EYEQ5_SARCR] = {1, 10},
148 [EQR_EYEQ5_ACRP] = {1, 40 * USEC_PER_MSEC}, /* LBIST implies long timeout. */
149 /* EQR_EYEQ5_PCIE does no busy waiting. */
150 [EQR_EYEQ6H_SARCR] = {1, 400},
151 };
152
153 #define EQR_MAX_DOMAIN_COUNT 3
154
155 struct eqr_domain_descriptor {
156 enum eqr_domain_type type;
157 u32 valid_mask;
158 unsigned int offset;
159 };
160
161 struct eqr_match_data {
162 unsigned int domain_count;
163 const struct eqr_domain_descriptor *domains;
164 };
165
166 struct eqr_private {
167 /*
168 * One mutex per domain for read-modify-write operations on registers.
169 * Some domains can be involved in LBIST which implies long critical
170 * sections; we wouldn't want other domains to be impacted by that.
171 */
172 struct mutex mutexes[EQR_MAX_DOMAIN_COUNT];
173 void __iomem *base;
174 const struct eqr_match_data *data;
175 struct reset_controller_dev rcdev;
176 };
177
eqr_rcdev_to_priv(struct reset_controller_dev * x)178 static inline struct eqr_private *eqr_rcdev_to_priv(struct reset_controller_dev *x)
179 {
180 return container_of(x, struct eqr_private, rcdev);
181 }
182
eqr_double_readl(void __iomem * addr_a,void __iomem * addr_b,u32 * dest_a,u32 * dest_b)183 static u32 eqr_double_readl(void __iomem *addr_a, void __iomem *addr_b,
184 u32 *dest_a, u32 *dest_b)
185 {
186 *dest_a = readl(addr_a);
187 *dest_b = readl(addr_b);
188 return 0; /* read_poll_timeout() op argument must return something. */
189 }
190
eqr_busy_wait_locked(struct eqr_private * priv,struct device * dev,u32 domain,u32 offset,bool assert)191 static int eqr_busy_wait_locked(struct eqr_private *priv, struct device *dev,
192 u32 domain, u32 offset, bool assert)
193 {
194 void __iomem *base = priv->base + priv->data->domains[domain].offset;
195 enum eqr_domain_type domain_type = priv->data->domains[domain].type;
196 unsigned long timeout_us = eqr_timings[domain_type].timeout_us;
197 unsigned long sleep_us = eqr_timings[domain_type].sleep_us;
198 u32 val, mask, rst_status, clk_status;
199 void __iomem *reg;
200 int ret;
201
202 lockdep_assert_held(&priv->mutexes[domain]);
203
204 switch (domain_type) {
205 case EQR_EYEQ5_SARCR:
206 reg = base + EQR_EYEQ5_SARCR_STATUS;
207 mask = BIT(offset);
208
209 ret = readl_poll_timeout(reg, val, !(val & mask) == assert,
210 sleep_us, timeout_us);
211 break;
212
213 case EQR_EYEQ5_ACRP:
214 reg = base + 4 * offset;
215 if (assert)
216 mask = EQR_EYEQ5_ACRP_ST_POWER_DOWN;
217 else
218 mask = EQR_EYEQ5_ACRP_ST_ACTIVE;
219
220 ret = readl_poll_timeout(reg, val, !!(val & mask),
221 sleep_us, timeout_us);
222 break;
223
224 case EQR_EYEQ5_PCIE:
225 ret = 0; /* No busy waiting. */
226 break;
227
228 case EQR_EYEQ6H_SARCR:
229 /*
230 * Wait until both bits change:
231 * readl(base + EQR_EYEQ6H_SARCR_RST_STATUS) & BIT(offset)
232 * readl(base + EQR_EYEQ6H_SARCR_CLK_STATUS) & BIT(offset)
233 */
234 mask = BIT(offset);
235 ret = read_poll_timeout(eqr_double_readl, val,
236 (!(rst_status & mask) == assert) &&
237 (!(clk_status & mask) == assert),
238 sleep_us, timeout_us, false,
239 base + EQR_EYEQ6H_SARCR_RST_STATUS,
240 base + EQR_EYEQ6H_SARCR_CLK_STATUS,
241 &rst_status, &clk_status);
242 break;
243
244 default:
245 WARN_ON(1);
246 ret = -EINVAL;
247 break;
248 }
249
250 if (ret == -ETIMEDOUT)
251 dev_dbg(dev, "%u-%u: timeout\n", domain, offset);
252 return ret;
253 }
254
eqr_assert_locked(struct eqr_private * priv,u32 domain,u32 offset)255 static void eqr_assert_locked(struct eqr_private *priv, u32 domain, u32 offset)
256 {
257 enum eqr_domain_type domain_type = priv->data->domains[domain].type;
258 void __iomem *base, *reg;
259 u32 val;
260
261 lockdep_assert_held(&priv->mutexes[domain]);
262
263 base = priv->base + priv->data->domains[domain].offset;
264
265 switch (domain_type) {
266 case EQR_EYEQ5_SARCR:
267 reg = base + EQR_EYEQ5_SARCR_REQUEST;
268 writel(readl(reg) & ~BIT(offset), reg);
269 break;
270
271 case EQR_EYEQ5_ACRP:
272 reg = base + 4 * offset;
273 writel(readl(reg) | EQR_EYEQ5_ACRP_PD_REQ, reg);
274 break;
275
276 case EQR_EYEQ5_PCIE:
277 writel(readl(base) & ~BIT(offset), base);
278 break;
279
280 case EQR_EYEQ6H_SARCR:
281 /* RST_REQUEST and CLK_REQUEST must be kept in sync. */
282 val = readl(base + EQR_EYEQ6H_SARCR_RST_REQUEST);
283 val &= ~BIT(offset);
284 writel(val, base + EQR_EYEQ6H_SARCR_RST_REQUEST);
285 writel(val, base + EQR_EYEQ6H_SARCR_CLK_REQUEST);
286 break;
287
288 default:
289 WARN_ON(1);
290 break;
291 }
292 }
293
eqr_assert(struct reset_controller_dev * rcdev,unsigned long id)294 static int eqr_assert(struct reset_controller_dev *rcdev, unsigned long id)
295 {
296 struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
297 u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
298 u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
299
300 dev_dbg(rcdev->dev, "%u-%u: assert request\n", domain, offset);
301
302 guard(mutex)(&priv->mutexes[domain]);
303
304 eqr_assert_locked(priv, domain, offset);
305 return eqr_busy_wait_locked(priv, rcdev->dev, domain, offset, true);
306 }
307
eqr_deassert_locked(struct eqr_private * priv,u32 domain,u32 offset)308 static void eqr_deassert_locked(struct eqr_private *priv, u32 domain,
309 u32 offset)
310 {
311 enum eqr_domain_type domain_type = priv->data->domains[domain].type;
312 void __iomem *base, *reg;
313 u32 val;
314
315 lockdep_assert_held(&priv->mutexes[domain]);
316
317 base = priv->base + priv->data->domains[domain].offset;
318
319 switch (domain_type) {
320 case EQR_EYEQ5_SARCR:
321 reg = base + EQR_EYEQ5_SARCR_REQUEST;
322 writel(readl(reg) | BIT(offset), reg);
323 break;
324
325 case EQR_EYEQ5_ACRP:
326 reg = base + 4 * offset;
327 writel(readl(reg) & ~EQR_EYEQ5_ACRP_PD_REQ, reg);
328 break;
329
330 case EQR_EYEQ5_PCIE:
331 writel(readl(base) | BIT(offset), base);
332 break;
333
334 case EQR_EYEQ6H_SARCR:
335 /* RST_REQUEST and CLK_REQUEST must be kept in sync. */
336 val = readl(base + EQR_EYEQ6H_SARCR_RST_REQUEST);
337 val |= BIT(offset);
338 writel(val, base + EQR_EYEQ6H_SARCR_RST_REQUEST);
339 writel(val, base + EQR_EYEQ6H_SARCR_CLK_REQUEST);
340 break;
341
342 default:
343 WARN_ON(1);
344 break;
345 }
346 }
347
eqr_deassert(struct reset_controller_dev * rcdev,unsigned long id)348 static int eqr_deassert(struct reset_controller_dev *rcdev, unsigned long id)
349 {
350 struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
351 u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
352 u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
353
354 dev_dbg(rcdev->dev, "%u-%u: deassert request\n", domain, offset);
355
356 guard(mutex)(&priv->mutexes[domain]);
357
358 eqr_deassert_locked(priv, domain, offset);
359 return eqr_busy_wait_locked(priv, rcdev->dev, domain, offset, false);
360 }
361
eqr_status(struct reset_controller_dev * rcdev,unsigned long id)362 static int eqr_status(struct reset_controller_dev *rcdev, unsigned long id)
363 {
364 u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
365 u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
366 struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
367 enum eqr_domain_type domain_type = priv->data->domains[domain].type;
368 void __iomem *base, *reg;
369
370 dev_dbg(rcdev->dev, "%u-%u: status request\n", domain, offset);
371
372 guard(mutex)(&priv->mutexes[domain]);
373
374 base = priv->base + priv->data->domains[domain].offset;
375
376 switch (domain_type) {
377 case EQR_EYEQ5_SARCR:
378 reg = base + EQR_EYEQ5_SARCR_STATUS;
379 return !(readl(reg) & BIT(offset));
380 case EQR_EYEQ5_ACRP:
381 reg = base + 4 * offset;
382 return !(readl(reg) & EQR_EYEQ5_ACRP_ST_ACTIVE);
383 case EQR_EYEQ5_PCIE:
384 return !(readl(base) & BIT(offset));
385 case EQR_EYEQ6H_SARCR:
386 reg = base + EQR_EYEQ6H_SARCR_RST_STATUS;
387 return !(readl(reg) & BIT(offset));
388 default:
389 return -EINVAL;
390 }
391 }
392
393 static const struct reset_control_ops eqr_ops = {
394 .assert = eqr_assert,
395 .deassert = eqr_deassert,
396 .status = eqr_status,
397 };
398
eqr_of_xlate_internal(struct reset_controller_dev * rcdev,u32 domain,u32 offset)399 static int eqr_of_xlate_internal(struct reset_controller_dev *rcdev,
400 u32 domain, u32 offset)
401 {
402 struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
403
404 if (domain >= priv->data->domain_count || offset > 31 ||
405 !(priv->data->domains[domain].valid_mask & BIT(offset))) {
406 dev_err(rcdev->dev, "%u-%u: invalid reset\n", domain, offset);
407 return -EINVAL;
408 }
409
410 return FIELD_PREP(ID_DOMAIN_MASK, domain) | FIELD_PREP(ID_OFFSET_MASK, offset);
411 }
412
eqr_of_xlate_onecell(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)413 static int eqr_of_xlate_onecell(struct reset_controller_dev *rcdev,
414 const struct of_phandle_args *reset_spec)
415 {
416 return eqr_of_xlate_internal(rcdev, 0, reset_spec->args[0]);
417 }
418
eqr_of_xlate_twocells(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)419 static int eqr_of_xlate_twocells(struct reset_controller_dev *rcdev,
420 const struct of_phandle_args *reset_spec)
421 {
422 return eqr_of_xlate_internal(rcdev, reset_spec->args[0], reset_spec->args[1]);
423 }
424
eqr_probe(struct auxiliary_device * adev,const struct auxiliary_device_id * id)425 static int eqr_probe(struct auxiliary_device *adev,
426 const struct auxiliary_device_id *id)
427 {
428 const struct of_device_id *match;
429 struct device *dev = &adev->dev;
430 struct eqr_private *priv;
431 unsigned int i;
432 int ret;
433
434 /*
435 * Get match data. We cannot use device_get_match_data() because it does
436 * not accept reused OF nodes; see device_set_of_node_from_dev().
437 */
438 match = of_match_node(dev->driver->of_match_table, dev->of_node);
439 if (!match || !match->data)
440 return -ENODEV;
441
442 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
443 if (!priv)
444 return -ENOMEM;
445
446 priv->data = match->data;
447 priv->base = (void __iomem *)dev_get_platdata(dev);
448 priv->rcdev.ops = &eqr_ops;
449 priv->rcdev.owner = THIS_MODULE;
450 priv->rcdev.dev = dev;
451 priv->rcdev.of_node = dev->of_node;
452
453 if (priv->data->domain_count == 1) {
454 priv->rcdev.of_reset_n_cells = 1;
455 priv->rcdev.of_xlate = eqr_of_xlate_onecell;
456 } else {
457 priv->rcdev.of_reset_n_cells = 2;
458 priv->rcdev.of_xlate = eqr_of_xlate_twocells;
459 }
460
461 for (i = 0; i < priv->data->domain_count; i++)
462 mutex_init(&priv->mutexes[i]);
463
464 priv->rcdev.nr_resets = 0;
465 for (i = 0; i < priv->data->domain_count; i++)
466 priv->rcdev.nr_resets += hweight32(priv->data->domains[i].valid_mask);
467
468 ret = devm_reset_controller_register(dev, &priv->rcdev);
469 if (ret)
470 return dev_err_probe(dev, ret, "failed registering reset controller\n");
471
472 return 0;
473 }
474
475 static const struct eqr_domain_descriptor eqr_eyeq5_domains[] = {
476 {
477 .type = EQR_EYEQ5_SARCR,
478 .valid_mask = 0xFFFFFF8,
479 .offset = 0x004,
480 },
481 {
482 .type = EQR_EYEQ5_ACRP,
483 .valid_mask = 0x0001FFF,
484 .offset = 0x200,
485 },
486 {
487 .type = EQR_EYEQ5_PCIE,
488 .valid_mask = 0x007BFFF,
489 .offset = 0x120,
490 },
491 };
492
493 static const struct eqr_match_data eqr_eyeq5_data = {
494 .domain_count = ARRAY_SIZE(eqr_eyeq5_domains),
495 .domains = eqr_eyeq5_domains,
496 };
497
498 static const struct eqr_domain_descriptor eqr_eyeq6l_domains[] = {
499 {
500 .type = EQR_EYEQ5_SARCR,
501 .valid_mask = 0x3FFF,
502 .offset = 0x004,
503 },
504 {
505 .type = EQR_EYEQ5_ACRP,
506 .valid_mask = 0x00FF,
507 .offset = 0x200,
508 },
509 };
510
511 static const struct eqr_match_data eqr_eyeq6l_data = {
512 .domain_count = ARRAY_SIZE(eqr_eyeq6l_domains),
513 .domains = eqr_eyeq6l_domains,
514 };
515
516 static const struct eqr_domain_descriptor eqr_eyeq6lplus_domains[] = {
517 {
518 .type = EQR_EYEQ5_PCIE,
519 .valid_mask = 0x3FFF,
520 .offset = 0x004,
521 },
522 {
523 .type = EQR_EYEQ5_ACRP,
524 .valid_mask = 0x00FF,
525 .offset = 0x200,
526 },
527 };
528
529 static const struct eqr_match_data eqr_eyeq6lplus_data = {
530 .domain_count = ARRAY_SIZE(eqr_eyeq6lplus_domains),
531 .domains = eqr_eyeq6lplus_domains,
532 };
533
534 /* West and east OLBs each have an instance. */
535 static const struct eqr_domain_descriptor eqr_eyeq6h_we_domains[] = {
536 {
537 .type = EQR_EYEQ6H_SARCR,
538 .valid_mask = 0x1F7F,
539 .offset = 0x004,
540 },
541 };
542
543 static const struct eqr_match_data eqr_eyeq6h_we_data = {
544 .domain_count = ARRAY_SIZE(eqr_eyeq6h_we_domains),
545 .domains = eqr_eyeq6h_we_domains,
546 };
547
548 static const struct eqr_domain_descriptor eqr_eyeq6h_acc_domains[] = {
549 {
550 .type = EQR_EYEQ5_ACRP,
551 .valid_mask = 0x7FFF,
552 .offset = 0x000,
553 },
554 };
555
556 static const struct eqr_match_data eqr_eyeq6h_acc_data = {
557 .domain_count = ARRAY_SIZE(eqr_eyeq6h_acc_domains),
558 .domains = eqr_eyeq6h_acc_domains,
559 };
560
561 /*
562 * Table describes OLB system-controller compatibles.
563 * It does not get used to match against devicetree node.
564 */
565 static const struct of_device_id eqr_match_table[] = {
566 { .compatible = "mobileye,eyeq5-olb", .data = &eqr_eyeq5_data },
567 { .compatible = "mobileye,eyeq6l-olb", .data = &eqr_eyeq6l_data },
568 { .compatible = "mobileye,eyeq6lplus-olb", .data = &eqr_eyeq6lplus_data },
569 { .compatible = "mobileye,eyeq6h-west-olb", .data = &eqr_eyeq6h_we_data },
570 { .compatible = "mobileye,eyeq6h-east-olb", .data = &eqr_eyeq6h_we_data },
571 { .compatible = "mobileye,eyeq6h-acc-olb", .data = &eqr_eyeq6h_acc_data },
572 {}
573 };
574 MODULE_DEVICE_TABLE(of, eqr_match_table);
575
576 static const struct auxiliary_device_id eqr_id_table[] = {
577 { .name = "clk_eyeq.reset" },
578 { .name = "clk_eyeq.reset_west" },
579 { .name = "clk_eyeq.reset_east" },
580 { .name = "clk_eyeq.reset_acc" },
581 {}
582 };
583 MODULE_DEVICE_TABLE(auxiliary, eqr_id_table);
584
585 static struct auxiliary_driver eqr_driver = {
586 .probe = eqr_probe,
587 .id_table = eqr_id_table,
588 .driver = {
589 .of_match_table = eqr_match_table,
590 }
591 };
592 module_auxiliary_driver(eqr_driver);
593