xref: /linux/drivers/reset/reset-eyeq.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Reset driver for the Mobileye EyeQ5, EyeQ6L and EyeQ6H platforms.
4  *
5  * Controllers live in a shared register region called OLB. EyeQ5 and EyeQ6L
6  * have a single OLB instance for a single reset controller. EyeQ6H has seven
7  * OLB instances; three host reset controllers.
8  *
9  * Each reset controller has one or more domain. Domains are of a given type
10  * (see enum eqr_domain_type), with a valid offset mask (up to 32 resets per
11  * domain).
12  *
13  * Domain types define expected behavior: one-register-per-reset,
14  * one-bit-per-reset, status detection method, busywait duration, etc.
15  *
16  * We use eqr_ as prefix, as-in "EyeQ Reset", but way shorter.
17  *
18  * Known resets in EyeQ5 domain 0 (type EQR_EYEQ5_SARCR):
19  *  3. CAN0	 4. CAN1	 5. CAN2	 6. SPI0
20  *  7. SPI1	 8. SPI2	 9. SPI3	10. UART0
21  * 11. UART1	12. UART2	13. I2C0	14. I2C1
22  * 15. I2C2	16. I2C3	17. I2C4	18. TIMER0
23  * 19. TIMER1	20. TIMER2	21. TIMER3	22. TIMER4
24  * 23. WD0	24. EXT0	25. EXT1	26. GPIO
25  * 27. WD1
26  *
27  * Known resets in EyeQ5 domain 1 (type EQR_EYEQ5_ACRP):
28  *  0. VMP0	 1. VMP1	 2. VMP2	 3. VMP3
29  *  4. PMA0	 5. PMA1	 6. PMAC0	 7. PMAC1
30  *  8. MPC0	 9. MPC1	10. MPC2	11. MPC3
31  * 12. MPC4
32  *
33  * Known resets in EyeQ5 domain 2 (type EQR_EYEQ5_PCIE):
34  *  0. PCIE0_CORE	 1. PCIE0_APB		 2. PCIE0_LINK_AXI	 3. PCIE0_LINK_MGMT
35  *  4. PCIE0_LINK_HOT	 5. PCIE0_LINK_PIPE	 6. PCIE1_CORE		 7. PCIE1_APB
36  *  8. PCIE1_LINK_AXI	 9. PCIE1_LINK_MGMT	10. PCIE1_LINK_HOT	11. PCIE1_LINK_PIPE
37  * 12. MULTIPHY		13. MULTIPHY_APB	15. PCIE0_LINK_MGMT	16. PCIE1_LINK_MGMT
38  * 17. PCIE0_LINK_PM	18. PCIE1_LINK_PM
39  *
40  * Known resets in EyeQ6L domain 0 (type EQR_EYEQ5_SARCR):
41  *  0. SPI0	 1. SPI1	 2. UART0	 3. I2C0
42  *  4. I2C1	 5. TIMER0	 6. TIMER1	 7. TIMER2
43  *  8. TIMER3	 9. WD0		10. WD1		11. EXT0
44  * 12. EXT1	13. GPIO
45  *
46  * Known resets in EyeQ6L domain 1 (type EQR_EYEQ5_ACRP):
47  *  0. VMP0	 1. VMP1	 2. VMP2	 3. VMP3
48  *  4. PMA0	 5. PMA1	 6. PMAC0	 7. PMAC1
49  *  8. MPC0	 9. MPC1	10. MPC2	11. MPC3
50  * 12. MPC4
51  *
52  * Known resets in EyeQ6Lplus domain 0 (type EQR_EYEQ5_PCIE):
53  *  0. SPI0	 1. SPI1	 2. UART0	 3. I2C0
54  *  4. I2C1	 5. TIMER0	 6. TIMER1	 7. TIMER2
55  *  8. TIMER3	 9. WD0		10. WD1		11. EXT0
56  * 12. EXT1	13. GPIO
57  *
58  * Known resets in EyeQ6Lplus domain 1 (type EQR_EYEQ5_ACRP):
59  *  0. VMP0	 1. VMP1	 2. VMP2	 3. VMP3
60  *  4. PMA0	 5. PMA1	 6. PMAC0	 7. PMAC1
61  *  8. MPC0	 9. MPC1	10. MPC2	11. MPC3
62  * 12. MPC4
63  *
64  * Known resets in EyeQ6H west/east (type EQR_EYEQ6H_SARCR):
65  *  0. CAN	 1. SPI0	 2. SPI1	 3. UART0
66  *  4. UART1	 5. I2C0	 6. I2C1	 7. -hole-
67  *  8. TIMER0	 9. TIMER1	10. WD		11. EXT TIMER
68  * 12. GPIO
69  *
70  * Known resets in EyeQ6H acc (type EQR_EYEQ5_ACRP):
71  *  1. XNN0	 2. XNN1	 3. XNN2	 4. XNN3
72  *  5. VMP0	 6. VMP1	 7. VMP2	 8. VMP3
73  *  9. PMA0	10. PMA1	11. MPC0	12. MPC1
74  * 13. MPC2	14. MPC3	15. PERIPH
75  *
76  * Abbreviations:
77  *  - PMA: Programmable Macro Array
78  *  - MPC: Multi-threaded Processing Clusters
79  *  - VMP: Vector Microcode Processors
80  *
81  * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
82  */
83 
84 #include <linux/array_size.h>
85 #include <linux/auxiliary_bus.h>
86 #include <linux/bitfield.h>
87 #include <linux/bits.h>
88 #include <linux/bug.h>
89 #include <linux/cleanup.h>
90 #include <linux/container_of.h>
91 #include <linux/device.h>
92 #include <linux/err.h>
93 #include <linux/errno.h>
94 #include <linux/init.h>
95 #include <linux/io.h>
96 #include <linux/iopoll.h>
97 #include <linux/lockdep.h>
98 #include <linux/mod_devicetable.h>
99 #include <linux/mutex.h>
100 #include <linux/of.h>
101 #include <linux/reset-controller.h>
102 #include <linux/slab.h>
103 #include <linux/types.h>
104 
105 /*
106  * A reset ID, as returned by eqr_of_xlate_*(), is a (domain, offset) pair.
107  * Low byte is domain, rest is offset.
108  */
109 #define ID_DOMAIN_MASK	GENMASK(7, 0)
110 #define ID_OFFSET_MASK	GENMASK(31, 8)
111 
112 enum eqr_domain_type {
113 	EQR_EYEQ5_SARCR,
114 	EQR_EYEQ5_ACRP,
115 	EQR_EYEQ5_PCIE,
116 	EQR_EYEQ6H_SARCR,
117 };
118 
119 /*
120  * Domain type EQR_EYEQ5_SARCR register offsets.
121  */
122 #define EQR_EYEQ5_SARCR_REQUEST		(0x000)
123 #define EQR_EYEQ5_SARCR_STATUS		(0x004)
124 
125 /*
126  * Domain type EQR_EYEQ5_ACRP register masks.
127  * Registers are: base + 4 * offset.
128  */
129 #define EQR_EYEQ5_ACRP_PD_REQ		BIT(0)
130 #define EQR_EYEQ5_ACRP_ST_POWER_DOWN	BIT(27)
131 #define EQR_EYEQ5_ACRP_ST_ACTIVE	BIT(29)
132 
133 /*
134  * Domain type EQR_EYEQ6H_SARCR register offsets.
135  */
136 #define EQR_EYEQ6H_SARCR_RST_REQUEST	(0x000)
137 #define EQR_EYEQ6H_SARCR_CLK_STATUS	(0x004)
138 #define EQR_EYEQ6H_SARCR_RST_STATUS	(0x008)
139 #define EQR_EYEQ6H_SARCR_CLK_REQUEST	(0x00C)
140 
141 struct eqr_busy_wait_timings {
142 	unsigned long sleep_us;
143 	unsigned long timeout_us;
144 };
145 
146 static const struct eqr_busy_wait_timings eqr_timings[] = {
147 	[EQR_EYEQ5_SARCR]	= {1, 10},
148 	[EQR_EYEQ5_ACRP]	= {1, 40 * USEC_PER_MSEC}, /* LBIST implies long timeout. */
149 	/* EQR_EYEQ5_PCIE does no busy waiting. */
150 	[EQR_EYEQ6H_SARCR]	= {1, 400},
151 };
152 
153 #define EQR_MAX_DOMAIN_COUNT 3
154 
155 struct eqr_domain_descriptor {
156 	enum eqr_domain_type	type;
157 	u32			valid_mask;
158 	unsigned int		offset;
159 };
160 
161 struct eqr_match_data {
162 	unsigned int				domain_count;
163 	const struct eqr_domain_descriptor	*domains;
164 };
165 
166 struct eqr_private {
167 	/*
168 	 * One mutex per domain for read-modify-write operations on registers.
169 	 * Some domains can be involved in LBIST which implies long critical
170 	 * sections; we wouldn't want other domains to be impacted by that.
171 	 */
172 	struct mutex			mutexes[EQR_MAX_DOMAIN_COUNT];
173 	void __iomem			*base;
174 	const struct eqr_match_data	*data;
175 	struct reset_controller_dev	rcdev;
176 };
177 
178 static inline struct eqr_private *eqr_rcdev_to_priv(struct reset_controller_dev *x)
179 {
180 	return container_of(x, struct eqr_private, rcdev);
181 }
182 
183 static u32 eqr_double_readl(void __iomem *addr_a, void __iomem *addr_b,
184 			    u32 *dest_a, u32 *dest_b)
185 {
186 	*dest_a = readl(addr_a);
187 	*dest_b = readl(addr_b);
188 	return 0; /* read_poll_timeout() op argument must return something. */
189 }
190 
191 static int eqr_busy_wait_locked(struct eqr_private *priv, struct device *dev,
192 				u32 domain, u32 offset, bool assert)
193 {
194 	void __iomem *base = priv->base + priv->data->domains[domain].offset;
195 	enum eqr_domain_type domain_type = priv->data->domains[domain].type;
196 	unsigned long timeout_us = eqr_timings[domain_type].timeout_us;
197 	unsigned long sleep_us = eqr_timings[domain_type].sleep_us;
198 	u32 val, mask, rst_status, clk_status;
199 	void __iomem *reg;
200 	int ret;
201 
202 	lockdep_assert_held(&priv->mutexes[domain]);
203 
204 	switch (domain_type) {
205 	case EQR_EYEQ5_SARCR:
206 		reg = base + EQR_EYEQ5_SARCR_STATUS;
207 		mask = BIT(offset);
208 
209 		ret = readl_poll_timeout(reg, val, !(val & mask) == assert,
210 					 sleep_us, timeout_us);
211 		break;
212 
213 	case EQR_EYEQ5_ACRP:
214 		reg = base + 4 * offset;
215 		if (assert)
216 			mask = EQR_EYEQ5_ACRP_ST_POWER_DOWN;
217 		else
218 			mask = EQR_EYEQ5_ACRP_ST_ACTIVE;
219 
220 		ret = readl_poll_timeout(reg, val, !!(val & mask),
221 					 sleep_us, timeout_us);
222 		break;
223 
224 	case EQR_EYEQ5_PCIE:
225 		ret = 0; /* No busy waiting. */
226 		break;
227 
228 	case EQR_EYEQ6H_SARCR:
229 		/*
230 		 * Wait until both bits change:
231 		 *	readl(base + EQR_EYEQ6H_SARCR_RST_STATUS) & BIT(offset)
232 		 *	readl(base + EQR_EYEQ6H_SARCR_CLK_STATUS) & BIT(offset)
233 		 */
234 		mask = BIT(offset);
235 		ret = read_poll_timeout(eqr_double_readl, val,
236 					(!(rst_status & mask) == assert) &&
237 					(!(clk_status & mask) == assert),
238 					sleep_us, timeout_us, false,
239 					base + EQR_EYEQ6H_SARCR_RST_STATUS,
240 					base + EQR_EYEQ6H_SARCR_CLK_STATUS,
241 					&rst_status, &clk_status);
242 		break;
243 
244 	default:
245 		WARN_ON(1);
246 		ret = -EINVAL;
247 		break;
248 	}
249 
250 	if (ret == -ETIMEDOUT)
251 		dev_dbg(dev, "%u-%u: timeout\n", domain, offset);
252 	return ret;
253 }
254 
255 static void eqr_assert_locked(struct eqr_private *priv, u32 domain, u32 offset)
256 {
257 	enum eqr_domain_type domain_type = priv->data->domains[domain].type;
258 	void __iomem *base, *reg;
259 	u32 val;
260 
261 	lockdep_assert_held(&priv->mutexes[domain]);
262 
263 	base = priv->base + priv->data->domains[domain].offset;
264 
265 	switch (domain_type) {
266 	case EQR_EYEQ5_SARCR:
267 		reg = base + EQR_EYEQ5_SARCR_REQUEST;
268 		writel(readl(reg) & ~BIT(offset), reg);
269 		break;
270 
271 	case EQR_EYEQ5_ACRP:
272 		reg = base + 4 * offset;
273 		writel(readl(reg) | EQR_EYEQ5_ACRP_PD_REQ, reg);
274 		break;
275 
276 	case EQR_EYEQ5_PCIE:
277 		writel(readl(base) & ~BIT(offset), base);
278 		break;
279 
280 	case EQR_EYEQ6H_SARCR:
281 		/* RST_REQUEST and CLK_REQUEST must be kept in sync. */
282 		val = readl(base + EQR_EYEQ6H_SARCR_RST_REQUEST);
283 		val &= ~BIT(offset);
284 		writel(val, base + EQR_EYEQ6H_SARCR_RST_REQUEST);
285 		writel(val, base + EQR_EYEQ6H_SARCR_CLK_REQUEST);
286 		break;
287 
288 	default:
289 		WARN_ON(1);
290 		break;
291 	}
292 }
293 
294 static int eqr_assert(struct reset_controller_dev *rcdev, unsigned long id)
295 {
296 	struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
297 	u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
298 	u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
299 
300 	dev_dbg(rcdev->dev, "%u-%u: assert request\n", domain, offset);
301 
302 	guard(mutex)(&priv->mutexes[domain]);
303 
304 	eqr_assert_locked(priv, domain, offset);
305 	return eqr_busy_wait_locked(priv, rcdev->dev, domain, offset, true);
306 }
307 
308 static void eqr_deassert_locked(struct eqr_private *priv, u32 domain,
309 				u32 offset)
310 {
311 	enum eqr_domain_type domain_type = priv->data->domains[domain].type;
312 	void __iomem *base, *reg;
313 	u32 val;
314 
315 	lockdep_assert_held(&priv->mutexes[domain]);
316 
317 	base = priv->base + priv->data->domains[domain].offset;
318 
319 	switch (domain_type) {
320 	case EQR_EYEQ5_SARCR:
321 		reg = base + EQR_EYEQ5_SARCR_REQUEST;
322 		writel(readl(reg) | BIT(offset), reg);
323 		break;
324 
325 	case EQR_EYEQ5_ACRP:
326 		reg = base + 4 * offset;
327 		writel(readl(reg) & ~EQR_EYEQ5_ACRP_PD_REQ, reg);
328 		break;
329 
330 	case EQR_EYEQ5_PCIE:
331 		writel(readl(base) | BIT(offset), base);
332 		break;
333 
334 	case EQR_EYEQ6H_SARCR:
335 		/* RST_REQUEST and CLK_REQUEST must be kept in sync. */
336 		val = readl(base + EQR_EYEQ6H_SARCR_RST_REQUEST);
337 		val |= BIT(offset);
338 		writel(val, base + EQR_EYEQ6H_SARCR_RST_REQUEST);
339 		writel(val, base + EQR_EYEQ6H_SARCR_CLK_REQUEST);
340 		break;
341 
342 	default:
343 		WARN_ON(1);
344 		break;
345 	}
346 }
347 
348 static int eqr_deassert(struct reset_controller_dev *rcdev, unsigned long id)
349 {
350 	struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
351 	u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
352 	u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
353 
354 	dev_dbg(rcdev->dev, "%u-%u: deassert request\n", domain, offset);
355 
356 	guard(mutex)(&priv->mutexes[domain]);
357 
358 	eqr_deassert_locked(priv, domain, offset);
359 	return eqr_busy_wait_locked(priv, rcdev->dev, domain, offset, false);
360 }
361 
362 static int eqr_status(struct reset_controller_dev *rcdev, unsigned long id)
363 {
364 	u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
365 	u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
366 	struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
367 	enum eqr_domain_type domain_type = priv->data->domains[domain].type;
368 	void __iomem *base, *reg;
369 
370 	dev_dbg(rcdev->dev, "%u-%u: status request\n", domain, offset);
371 
372 	guard(mutex)(&priv->mutexes[domain]);
373 
374 	base = priv->base + priv->data->domains[domain].offset;
375 
376 	switch (domain_type) {
377 	case EQR_EYEQ5_SARCR:
378 		reg = base + EQR_EYEQ5_SARCR_STATUS;
379 		return !(readl(reg) & BIT(offset));
380 	case EQR_EYEQ5_ACRP:
381 		reg = base + 4 * offset;
382 		return !(readl(reg) & EQR_EYEQ5_ACRP_ST_ACTIVE);
383 	case EQR_EYEQ5_PCIE:
384 		return !(readl(base) & BIT(offset));
385 	case EQR_EYEQ6H_SARCR:
386 		reg = base + EQR_EYEQ6H_SARCR_RST_STATUS;
387 		return !(readl(reg) & BIT(offset));
388 	default:
389 		return -EINVAL;
390 	}
391 }
392 
393 static const struct reset_control_ops eqr_ops = {
394 	.assert	  = eqr_assert,
395 	.deassert = eqr_deassert,
396 	.status	  = eqr_status,
397 };
398 
399 static int eqr_of_xlate_internal(struct reset_controller_dev *rcdev,
400 				 u32 domain, u32 offset)
401 {
402 	struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
403 
404 	if (domain >= priv->data->domain_count || offset > 31 ||
405 	    !(priv->data->domains[domain].valid_mask & BIT(offset))) {
406 		dev_err(rcdev->dev, "%u-%u: invalid reset\n", domain, offset);
407 		return -EINVAL;
408 	}
409 
410 	return FIELD_PREP(ID_DOMAIN_MASK, domain) | FIELD_PREP(ID_OFFSET_MASK, offset);
411 }
412 
413 static int eqr_of_xlate_onecell(struct reset_controller_dev *rcdev,
414 				const struct of_phandle_args *reset_spec)
415 {
416 	return eqr_of_xlate_internal(rcdev, 0, reset_spec->args[0]);
417 }
418 
419 static int eqr_of_xlate_twocells(struct reset_controller_dev *rcdev,
420 				 const struct of_phandle_args *reset_spec)
421 {
422 	return eqr_of_xlate_internal(rcdev, reset_spec->args[0], reset_spec->args[1]);
423 }
424 
425 static void eqr_of_node_put(void *_dev)
426 {
427 	struct device *dev = _dev;
428 
429 	of_node_put(dev->of_node);
430 }
431 
432 static int eqr_probe(struct auxiliary_device *adev,
433 		     const struct auxiliary_device_id *id)
434 {
435 	const struct of_device_id *match;
436 	struct device *dev = &adev->dev;
437 	struct eqr_private *priv;
438 	unsigned int i;
439 	int ret;
440 
441 	/*
442 	 * We are an auxiliary device of clk-eyeq. We do not have an OF node by
443 	 * default; let's reuse our parent's OF node.
444 	 */
445 	WARN_ON(dev->of_node);
446 	device_set_of_node_from_dev(dev, dev->parent);
447 	if (!dev->of_node)
448 		return -ENODEV;
449 
450 	ret = devm_add_action_or_reset(dev, eqr_of_node_put, dev);
451 	if (ret)
452 		return ret;
453 
454 	/*
455 	 * Using our newfound OF node, we can get match data. We cannot use
456 	 * device_get_match_data() because it does not match reused OF nodes.
457 	 */
458 	match = of_match_node(dev->driver->of_match_table, dev->of_node);
459 	if (!match || !match->data)
460 		return -ENODEV;
461 
462 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
463 	if (!priv)
464 		return -ENOMEM;
465 
466 	priv->data = match->data;
467 	priv->base = (void __iomem *)dev_get_platdata(dev);
468 	priv->rcdev.ops = &eqr_ops;
469 	priv->rcdev.owner = THIS_MODULE;
470 	priv->rcdev.dev = dev;
471 	priv->rcdev.of_node = dev->of_node;
472 
473 	if (priv->data->domain_count == 1) {
474 		priv->rcdev.of_reset_n_cells = 1;
475 		priv->rcdev.of_xlate = eqr_of_xlate_onecell;
476 	} else {
477 		priv->rcdev.of_reset_n_cells = 2;
478 		priv->rcdev.of_xlate = eqr_of_xlate_twocells;
479 	}
480 
481 	for (i = 0; i < priv->data->domain_count; i++)
482 		mutex_init(&priv->mutexes[i]);
483 
484 	priv->rcdev.nr_resets = 0;
485 	for (i = 0; i < priv->data->domain_count; i++)
486 		priv->rcdev.nr_resets += hweight32(priv->data->domains[i].valid_mask);
487 
488 	ret = devm_reset_controller_register(dev, &priv->rcdev);
489 	if (ret)
490 		return dev_err_probe(dev, ret, "failed registering reset controller\n");
491 
492 	return 0;
493 }
494 
495 static const struct eqr_domain_descriptor eqr_eyeq5_domains[] = {
496 	{
497 		.type = EQR_EYEQ5_SARCR,
498 		.valid_mask = 0xFFFFFF8,
499 		.offset = 0x004,
500 	},
501 	{
502 		.type = EQR_EYEQ5_ACRP,
503 		.valid_mask = 0x0001FFF,
504 		.offset = 0x200,
505 	},
506 	{
507 		.type = EQR_EYEQ5_PCIE,
508 		.valid_mask = 0x007BFFF,
509 		.offset = 0x120,
510 	},
511 };
512 
513 static const struct eqr_match_data eqr_eyeq5_data = {
514 	.domain_count	= ARRAY_SIZE(eqr_eyeq5_domains),
515 	.domains	= eqr_eyeq5_domains,
516 };
517 
518 static const struct eqr_domain_descriptor eqr_eyeq6l_domains[] = {
519 	{
520 		.type = EQR_EYEQ5_SARCR,
521 		.valid_mask = 0x3FFF,
522 		.offset = 0x004,
523 	},
524 	{
525 		.type = EQR_EYEQ5_ACRP,
526 		.valid_mask = 0x00FF,
527 		.offset = 0x200,
528 	},
529 };
530 
531 static const struct eqr_match_data eqr_eyeq6l_data = {
532 	.domain_count	= ARRAY_SIZE(eqr_eyeq6l_domains),
533 	.domains	= eqr_eyeq6l_domains,
534 };
535 
536 static const struct eqr_domain_descriptor eqr_eyeq6lplus_domains[] = {
537 	{
538 		.type = EQR_EYEQ5_PCIE,
539 		.valid_mask = 0x3FFF,
540 		.offset = 0x004,
541 	},
542 	{
543 		.type = EQR_EYEQ5_ACRP,
544 		.valid_mask = 0x00FF,
545 		.offset = 0x200,
546 	},
547 };
548 
549 static const struct eqr_match_data eqr_eyeq6lplus_data = {
550 	.domain_count	= ARRAY_SIZE(eqr_eyeq6lplus_domains),
551 	.domains	= eqr_eyeq6lplus_domains,
552 };
553 
554 /* West and east OLBs each have an instance. */
555 static const struct eqr_domain_descriptor eqr_eyeq6h_we_domains[] = {
556 	{
557 		.type = EQR_EYEQ6H_SARCR,
558 		.valid_mask = 0x1F7F,
559 		.offset = 0x004,
560 	},
561 };
562 
563 static const struct eqr_match_data eqr_eyeq6h_we_data = {
564 	.domain_count	= ARRAY_SIZE(eqr_eyeq6h_we_domains),
565 	.domains	= eqr_eyeq6h_we_domains,
566 };
567 
568 static const struct eqr_domain_descriptor eqr_eyeq6h_acc_domains[] = {
569 	{
570 		.type = EQR_EYEQ5_ACRP,
571 		.valid_mask = 0x7FFF,
572 		.offset = 0x000,
573 	},
574 };
575 
576 static const struct eqr_match_data eqr_eyeq6h_acc_data = {
577 	.domain_count	= ARRAY_SIZE(eqr_eyeq6h_acc_domains),
578 	.domains	= eqr_eyeq6h_acc_domains,
579 };
580 
581 /*
582  * Table describes OLB system-controller compatibles.
583  * It does not get used to match against devicetree node.
584  */
585 static const struct of_device_id eqr_match_table[] = {
586 	{ .compatible = "mobileye,eyeq5-olb", .data = &eqr_eyeq5_data },
587 	{ .compatible = "mobileye,eyeq6l-olb", .data = &eqr_eyeq6l_data },
588 	{ .compatible = "mobileye,eyeq6lplus-olb", .data = &eqr_eyeq6lplus_data },
589 	{ .compatible = "mobileye,eyeq6h-west-olb", .data = &eqr_eyeq6h_we_data },
590 	{ .compatible = "mobileye,eyeq6h-east-olb", .data = &eqr_eyeq6h_we_data },
591 	{ .compatible = "mobileye,eyeq6h-acc-olb", .data = &eqr_eyeq6h_acc_data },
592 	{}
593 };
594 MODULE_DEVICE_TABLE(of, eqr_match_table);
595 
596 static const struct auxiliary_device_id eqr_id_table[] = {
597 	{ .name = "clk_eyeq.reset" },
598 	{ .name = "clk_eyeq.reset_west" },
599 	{ .name = "clk_eyeq.reset_east" },
600 	{ .name = "clk_eyeq.reset_acc" },
601 	{}
602 };
603 MODULE_DEVICE_TABLE(auxiliary, eqr_id_table);
604 
605 static struct auxiliary_driver eqr_driver = {
606 	.probe = eqr_probe,
607 	.id_table = eqr_id_table,
608 	.driver = {
609 		.of_match_table = eqr_match_table,
610 	}
611 };
612 module_auxiliary_driver(eqr_driver);
613