xref: /linux/drivers/spi/spi-axi-spi-engine.c (revision 5722a6cecfff3e381b96bbbd7e9b3911731e80d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI-Engine SPI controller driver
4  * Copyright 2015 Analog Devices Inc.
5  * Copyright 2024 BayLibre, SAS
6  *  Author: Lars-Peter Clausen <lars@metafoo.de>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/bitops.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/dmaengine.h>
14 #include <linux/fpga/adi-axi-common.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/of.h>
19 #include <linux/module.h>
20 #include <linux/overflow.h>
21 #include <linux/platform_device.h>
22 #include <linux/spi/offload/provider.h>
23 #include <linux/spi/spi.h>
24 #include <trace/events/spi.h>
25 
26 #define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH	0x10
27 #define SPI_ENGINE_REG_RESET			0x40
28 
29 #define SPI_ENGINE_REG_INT_ENABLE		0x80
30 #define SPI_ENGINE_REG_INT_PENDING		0x84
31 #define SPI_ENGINE_REG_INT_SOURCE		0x88
32 
33 #define SPI_ENGINE_REG_SYNC_ID			0xc0
34 #define SPI_ENGINE_REG_OFFLOAD_SYNC_ID		0xc4
35 
36 #define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
37 #define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
38 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
39 
40 #define SPI_ENGINE_REG_CMD_FIFO			0xe0
41 #define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
42 #define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
43 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
44 
45 #define SPI_ENGINE_MAX_NUM_OFFLOADS		32
46 
47 #define SPI_ENGINE_REG_OFFLOAD_CTRL(x)		(0x100 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
48 #define SPI_ENGINE_REG_OFFLOAD_STATUS(x)	(0x104 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
49 #define SPI_ENGINE_REG_OFFLOAD_RESET(x)		(0x108 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
50 #define SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(x)	(0x110 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
51 #define SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(x)	(0x114 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
52 
53 #define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO	GENMASK(15, 8)
54 #define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD	GENMASK(7, 0)
55 
56 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
57 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
58 #define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
59 #define SPI_ENGINE_INT_SYNC			BIT(3)
60 #define SPI_ENGINE_INT_OFFLOAD_SYNC		BIT(4)
61 
62 #define SPI_ENGINE_OFFLOAD_CTRL_ENABLE		BIT(0)
63 
64 #define SPI_ENGINE_CONFIG_CPHA			BIT(0)
65 #define SPI_ENGINE_CONFIG_CPOL			BIT(1)
66 #define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
67 #define SPI_ENGINE_CONFIG_SDO_IDLE_HIGH		BIT(3)
68 
69 #define SPI_ENGINE_INST_TRANSFER		0x0
70 #define SPI_ENGINE_INST_ASSERT			0x1
71 #define SPI_ENGINE_INST_WRITE			0x2
72 #define SPI_ENGINE_INST_MISC			0x3
73 #define SPI_ENGINE_INST_CS_INV			0x4
74 
75 #define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
76 #define SPI_ENGINE_CMD_REG_CONFIG		0x1
77 #define SPI_ENGINE_CMD_REG_XFER_BITS		0x2
78 
79 #define SPI_ENGINE_MISC_SYNC			0x0
80 #define SPI_ENGINE_MISC_SLEEP			0x1
81 
82 #define SPI_ENGINE_TRANSFER_WRITE		0x1
83 #define SPI_ENGINE_TRANSFER_READ		0x2
84 
85 /* Arbitrary sync ID for use by host->cur_msg */
86 #define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID		0x1
87 
88 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
89 	(((inst) << 12) | ((arg1) << 8) | (arg2))
90 
91 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
92 	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
93 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
94 	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
95 #define SPI_ENGINE_CMD_WRITE(reg, val) \
96 	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
97 #define SPI_ENGINE_CMD_SLEEP(delay) \
98 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
99 #define SPI_ENGINE_CMD_SYNC(id) \
100 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
101 #define SPI_ENGINE_CMD_CS_INV(flags) \
102 	SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
103 
104 /* default sizes - can be changed when SPI Engine firmware is compiled */
105 #define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE	16
106 #define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE	16
107 
108 struct spi_engine_program {
109 	unsigned int length;
110 	uint16_t instructions[] __counted_by(length);
111 };
112 
113 /**
114  * struct spi_engine_message_state - SPI engine per-message state
115  */
116 struct spi_engine_message_state {
117 	/** @cmd_length: Number of elements in cmd_buf array. */
118 	unsigned cmd_length;
119 	/** @cmd_buf: Array of commands not yet written to CMD FIFO. */
120 	const uint16_t *cmd_buf;
121 	/** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
122 	struct spi_transfer *tx_xfer;
123 	/** @tx_length: Size of tx_buf in bytes. */
124 	unsigned int tx_length;
125 	/** @tx_buf: Bytes not yet written to TX FIFO. */
126 	const uint8_t *tx_buf;
127 	/** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
128 	struct spi_transfer *rx_xfer;
129 	/** @rx_length: Size of tx_buf in bytes. */
130 	unsigned int rx_length;
131 	/** @rx_buf: Bytes not yet written to the RX FIFO. */
132 	uint8_t *rx_buf;
133 };
134 
135 enum {
136 	SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED,
137 	SPI_ENGINE_OFFLOAD_FLAG_PREPARED,
138 };
139 
140 struct spi_engine_offload {
141 	struct spi_engine *spi_engine;
142 	unsigned long flags;
143 	unsigned int offload_num;
144 	unsigned int spi_mode_config;
145 	u8 bits_per_word;
146 };
147 
148 struct spi_engine {
149 	struct clk *clk;
150 	struct clk *ref_clk;
151 
152 	spinlock_t lock;
153 
154 	void __iomem *base;
155 	struct spi_engine_message_state msg_state;
156 	struct completion msg_complete;
157 	unsigned int int_enable;
158 	/* shadows hardware CS inversion flag state */
159 	u8 cs_inv;
160 
161 	unsigned int offload_ctrl_mem_size;
162 	unsigned int offload_sdo_mem_size;
163 	struct spi_offload *offload;
164 	u32 offload_caps;
165 	bool offload_requires_sync;
166 };
167 
spi_engine_program_add_cmd(struct spi_engine_program * p,bool dry,uint16_t cmd)168 static void spi_engine_program_add_cmd(struct spi_engine_program *p,
169 	bool dry, uint16_t cmd)
170 {
171 	p->length++;
172 
173 	if (!dry)
174 		p->instructions[p->length - 1] = cmd;
175 }
176 
spi_engine_get_config(struct spi_device * spi)177 static unsigned int spi_engine_get_config(struct spi_device *spi)
178 {
179 	unsigned int config = 0;
180 
181 	if (spi->mode & SPI_CPOL)
182 		config |= SPI_ENGINE_CONFIG_CPOL;
183 	if (spi->mode & SPI_CPHA)
184 		config |= SPI_ENGINE_CONFIG_CPHA;
185 	if (spi->mode & SPI_3WIRE)
186 		config |= SPI_ENGINE_CONFIG_3WIRE;
187 	if (spi->mode & SPI_MOSI_IDLE_HIGH)
188 		config |= SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
189 	if (spi->mode & SPI_MOSI_IDLE_LOW)
190 		config &= ~SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
191 
192 	return config;
193 }
194 
spi_engine_gen_xfer(struct spi_engine_program * p,bool dry,struct spi_transfer * xfer)195 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
196 	struct spi_transfer *xfer)
197 {
198 	unsigned int len;
199 
200 	if (xfer->bits_per_word <= 8)
201 		len = xfer->len;
202 	else if (xfer->bits_per_word <= 16)
203 		len = xfer->len / 2;
204 	else
205 		len = xfer->len / 4;
206 
207 	while (len) {
208 		unsigned int n = min(len, 256U);
209 		unsigned int flags = 0;
210 
211 		if (xfer->tx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM))
212 			flags |= SPI_ENGINE_TRANSFER_WRITE;
213 		if (xfer->rx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM))
214 			flags |= SPI_ENGINE_TRANSFER_READ;
215 
216 		spi_engine_program_add_cmd(p, dry,
217 			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
218 		len -= n;
219 	}
220 }
221 
spi_engine_gen_sleep(struct spi_engine_program * p,bool dry,int delay_ns,int inst_ns,u32 sclk_hz)222 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
223 				 int delay_ns, int inst_ns, u32 sclk_hz)
224 {
225 	unsigned int t;
226 
227 	/*
228 	 * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
229 	 * delay is less that the instruction execution time, there is no need
230 	 * for an extra sleep instruction since the instruction execution time
231 	 * will already cover the required delay.
232 	 */
233 	if (delay_ns < 0 || delay_ns <= inst_ns)
234 		return;
235 
236 	t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
237 	while (t) {
238 		unsigned int n = min(t, 256U);
239 
240 		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
241 		t -= n;
242 	}
243 }
244 
spi_engine_gen_cs(struct spi_engine_program * p,bool dry,struct spi_device * spi,bool assert)245 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
246 		struct spi_device *spi, bool assert)
247 {
248 	unsigned int mask = 0xff;
249 
250 	if (assert)
251 		mask ^= BIT(spi_get_chipselect(spi, 0));
252 
253 	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
254 }
255 
256 /*
257  * Performs precompile steps on the message.
258  *
259  * The SPI core does most of the message/transfer validation and filling in
260  * fields for us via __spi_validate(). This fixes up anything remaining not
261  * done there.
262  *
263  * NB: This is separate from spi_engine_compile_message() because the latter
264  * is called twice and would otherwise result in double-evaluation.
265  *
266  * Returns 0 on success, -EINVAL on failure.
267  */
spi_engine_precompile_message(struct spi_message * msg)268 static int spi_engine_precompile_message(struct spi_message *msg)
269 {
270 	unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
271 	struct spi_transfer *xfer;
272 	u8 min_bits_per_word = U8_MAX;
273 	u8 max_bits_per_word = 0;
274 
275 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
276 		/* If we have an offload transfer, we can't rx to buffer */
277 		if (msg->offload && xfer->rx_buf)
278 			return -EINVAL;
279 
280 		clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
281 		xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
282 
283 		if (xfer->len) {
284 			min_bits_per_word = min(min_bits_per_word, xfer->bits_per_word);
285 			max_bits_per_word = max(max_bits_per_word, xfer->bits_per_word);
286 		}
287 	}
288 
289 	/*
290 	 * If all xfers in the message use the same bits_per_word, we can
291 	 * provide some optimization when using SPI offload.
292 	 */
293 	if (msg->offload) {
294 		struct spi_engine_offload *priv = msg->offload->priv;
295 
296 		if (min_bits_per_word == max_bits_per_word)
297 			priv->bits_per_word = min_bits_per_word;
298 		else
299 			priv->bits_per_word = 0;
300 	}
301 
302 	return 0;
303 }
304 
spi_engine_compile_message(struct spi_message * msg,bool dry,struct spi_engine_program * p)305 static void spi_engine_compile_message(struct spi_message *msg, bool dry,
306 				       struct spi_engine_program *p)
307 {
308 	struct spi_device *spi = msg->spi;
309 	struct spi_controller *host = spi->controller;
310 	struct spi_engine_offload *priv;
311 	struct spi_transfer *xfer;
312 	int clk_div, new_clk_div, inst_ns;
313 	bool keep_cs = false;
314 	u8 bits_per_word = 0;
315 
316 	/*
317 	 * Take into account instruction execution time for more accurate sleep
318 	 * times, especially when the delay is small.
319 	 */
320 	inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
321 
322 	clk_div = 1;
323 
324 	/*
325 	 * As an optimization, SPI offload sets once this when the offload is
326 	 * enabled instead of repeating the instruction in each message.
327 	 */
328 	if (msg->offload) {
329 		priv = msg->offload->priv;
330 		priv->spi_mode_config = spi_engine_get_config(spi);
331 
332 		/*
333 		 * If all xfers use the same bits_per_word, it can be optimized
334 		 * in the same way.
335 		 */
336 		bits_per_word = priv->bits_per_word;
337 	} else {
338 		spi_engine_program_add_cmd(p, dry,
339 			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
340 				spi_engine_get_config(spi)));
341 	}
342 
343 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
344 	spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
345 
346 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
347 		new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
348 		if (new_clk_div != clk_div) {
349 			clk_div = new_clk_div;
350 			/* actual divider used is register value + 1 */
351 			spi_engine_program_add_cmd(p, dry,
352 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
353 					clk_div - 1));
354 		}
355 
356 		if (bits_per_word != xfer->bits_per_word && xfer->len) {
357 			bits_per_word = xfer->bits_per_word;
358 			spi_engine_program_add_cmd(p, dry,
359 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
360 					bits_per_word));
361 		}
362 
363 		spi_engine_gen_xfer(p, dry, xfer);
364 		spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
365 				     inst_ns, xfer->effective_speed_hz);
366 
367 		if (xfer->cs_change) {
368 			if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
369 				keep_cs = true;
370 			} else {
371 				if (!xfer->cs_off)
372 					spi_engine_gen_cs(p, dry, spi, false);
373 
374 				spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
375 					&xfer->cs_change_delay, xfer), inst_ns,
376 					xfer->effective_speed_hz);
377 
378 				if (!list_next_entry(xfer, transfer_list)->cs_off)
379 					spi_engine_gen_cs(p, dry, spi, true);
380 			}
381 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
382 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
383 			spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
384 		}
385 	}
386 
387 	if (!keep_cs)
388 		spi_engine_gen_cs(p, dry, spi, false);
389 
390 	/*
391 	 * Restore clockdiv to default so that future gen_sleep commands don't
392 	 * have to be aware of the current register state.
393 	 */
394 	if (clk_div != 1)
395 		spi_engine_program_add_cmd(p, dry,
396 			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
397 }
398 
spi_engine_xfer_next(struct spi_message * msg,struct spi_transfer ** _xfer)399 static void spi_engine_xfer_next(struct spi_message *msg,
400 	struct spi_transfer **_xfer)
401 {
402 	struct spi_transfer *xfer = *_xfer;
403 
404 	if (!xfer) {
405 		xfer = list_first_entry(&msg->transfers,
406 			struct spi_transfer, transfer_list);
407 	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
408 		xfer = NULL;
409 	} else {
410 		xfer = list_next_entry(xfer, transfer_list);
411 	}
412 
413 	*_xfer = xfer;
414 }
415 
spi_engine_tx_next(struct spi_message * msg)416 static void spi_engine_tx_next(struct spi_message *msg)
417 {
418 	struct spi_engine_message_state *st = msg->state;
419 	struct spi_transfer *xfer = st->tx_xfer;
420 
421 	do {
422 		spi_engine_xfer_next(msg, &xfer);
423 	} while (xfer && !xfer->tx_buf);
424 
425 	st->tx_xfer = xfer;
426 	if (xfer) {
427 		st->tx_length = xfer->len;
428 		st->tx_buf = xfer->tx_buf;
429 	} else {
430 		st->tx_buf = NULL;
431 	}
432 }
433 
spi_engine_rx_next(struct spi_message * msg)434 static void spi_engine_rx_next(struct spi_message *msg)
435 {
436 	struct spi_engine_message_state *st = msg->state;
437 	struct spi_transfer *xfer = st->rx_xfer;
438 
439 	do {
440 		spi_engine_xfer_next(msg, &xfer);
441 	} while (xfer && !xfer->rx_buf);
442 
443 	st->rx_xfer = xfer;
444 	if (xfer) {
445 		st->rx_length = xfer->len;
446 		st->rx_buf = xfer->rx_buf;
447 	} else {
448 		st->rx_buf = NULL;
449 	}
450 }
451 
spi_engine_write_cmd_fifo(struct spi_engine * spi_engine,struct spi_message * msg)452 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
453 				      struct spi_message *msg)
454 {
455 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
456 	struct spi_engine_message_state *st = msg->state;
457 	unsigned int n, m, i;
458 	const uint16_t *buf;
459 
460 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
461 	while (n && st->cmd_length) {
462 		m = min(n, st->cmd_length);
463 		buf = st->cmd_buf;
464 		for (i = 0; i < m; i++)
465 			writel_relaxed(buf[i], addr);
466 		st->cmd_buf += m;
467 		st->cmd_length -= m;
468 		n -= m;
469 	}
470 
471 	return st->cmd_length != 0;
472 }
473 
spi_engine_write_tx_fifo(struct spi_engine * spi_engine,struct spi_message * msg)474 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
475 				     struct spi_message *msg)
476 {
477 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
478 	struct spi_engine_message_state *st = msg->state;
479 	unsigned int n, m, i;
480 
481 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
482 	while (n && st->tx_length) {
483 		if (st->tx_xfer->bits_per_word <= 8) {
484 			const u8 *buf = st->tx_buf;
485 
486 			m = min(n, st->tx_length);
487 			for (i = 0; i < m; i++)
488 				writel_relaxed(buf[i], addr);
489 			st->tx_buf += m;
490 			st->tx_length -= m;
491 		} else if (st->tx_xfer->bits_per_word <= 16) {
492 			const u16 *buf = (const u16 *)st->tx_buf;
493 
494 			m = min(n, st->tx_length / 2);
495 			for (i = 0; i < m; i++)
496 				writel_relaxed(buf[i], addr);
497 			st->tx_buf += m * 2;
498 			st->tx_length -= m * 2;
499 		} else {
500 			const u32 *buf = (const u32 *)st->tx_buf;
501 
502 			m = min(n, st->tx_length / 4);
503 			for (i = 0; i < m; i++)
504 				writel_relaxed(buf[i], addr);
505 			st->tx_buf += m * 4;
506 			st->tx_length -= m * 4;
507 		}
508 		n -= m;
509 		if (st->tx_length == 0)
510 			spi_engine_tx_next(msg);
511 	}
512 
513 	return st->tx_length != 0;
514 }
515 
spi_engine_read_rx_fifo(struct spi_engine * spi_engine,struct spi_message * msg)516 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
517 				    struct spi_message *msg)
518 {
519 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
520 	struct spi_engine_message_state *st = msg->state;
521 	unsigned int n, m, i;
522 
523 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
524 	while (n && st->rx_length) {
525 		if (st->rx_xfer->bits_per_word <= 8) {
526 			u8 *buf = st->rx_buf;
527 
528 			m = min(n, st->rx_length);
529 			for (i = 0; i < m; i++)
530 				buf[i] = readl_relaxed(addr);
531 			st->rx_buf += m;
532 			st->rx_length -= m;
533 		} else if (st->rx_xfer->bits_per_word <= 16) {
534 			u16 *buf = (u16 *)st->rx_buf;
535 
536 			m = min(n, st->rx_length / 2);
537 			for (i = 0; i < m; i++)
538 				buf[i] = readl_relaxed(addr);
539 			st->rx_buf += m * 2;
540 			st->rx_length -= m * 2;
541 		} else {
542 			u32 *buf = (u32 *)st->rx_buf;
543 
544 			m = min(n, st->rx_length / 4);
545 			for (i = 0; i < m; i++)
546 				buf[i] = readl_relaxed(addr);
547 			st->rx_buf += m * 4;
548 			st->rx_length -= m * 4;
549 		}
550 		n -= m;
551 		if (st->rx_length == 0)
552 			spi_engine_rx_next(msg);
553 	}
554 
555 	return st->rx_length != 0;
556 }
557 
spi_engine_irq(int irq,void * devid)558 static irqreturn_t spi_engine_irq(int irq, void *devid)
559 {
560 	struct spi_controller *host = devid;
561 	struct spi_message *msg = host->cur_msg;
562 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
563 	unsigned int disable_int = 0;
564 	unsigned int pending;
565 	int completed_id = -1;
566 
567 	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
568 
569 	if (pending & SPI_ENGINE_INT_SYNC) {
570 		writel_relaxed(SPI_ENGINE_INT_SYNC,
571 			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
572 		completed_id = readl_relaxed(
573 			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
574 	}
575 
576 	spin_lock(&spi_engine->lock);
577 
578 	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
579 		if (!spi_engine_write_cmd_fifo(spi_engine, msg))
580 			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
581 	}
582 
583 	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
584 		if (!spi_engine_write_tx_fifo(spi_engine, msg))
585 			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
586 	}
587 
588 	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
589 		if (!spi_engine_read_rx_fifo(spi_engine, msg))
590 			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
591 	}
592 
593 	if (pending & SPI_ENGINE_INT_SYNC && msg) {
594 		if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
595 			msg->status = 0;
596 			msg->actual_length = msg->frame_length;
597 			complete(&spi_engine->msg_complete);
598 			disable_int |= SPI_ENGINE_INT_SYNC;
599 		}
600 	}
601 
602 	if (disable_int) {
603 		spi_engine->int_enable &= ~disable_int;
604 		writel_relaxed(spi_engine->int_enable,
605 			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
606 	}
607 
608 	spin_unlock(&spi_engine->lock);
609 
610 	return IRQ_HANDLED;
611 }
612 
spi_engine_offload_prepare(struct spi_message * msg)613 static int spi_engine_offload_prepare(struct spi_message *msg)
614 {
615 	struct spi_controller *host = msg->spi->controller;
616 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
617 	struct spi_engine_program *p = msg->opt_state;
618 	struct spi_engine_offload *priv = msg->offload->priv;
619 	struct spi_transfer *xfer;
620 	void __iomem *cmd_addr;
621 	void __iomem *sdo_addr;
622 	size_t tx_word_count = 0;
623 	unsigned int i;
624 
625 	if (p->length > spi_engine->offload_ctrl_mem_size)
626 		return -EINVAL;
627 
628 	/* count total number of tx words in message */
629 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
630 		/* no support for reading to rx_buf */
631 		if (xfer->rx_buf)
632 			return -EINVAL;
633 
634 		if (!xfer->tx_buf)
635 			continue;
636 
637 		if (xfer->bits_per_word <= 8)
638 			tx_word_count += xfer->len;
639 		else if (xfer->bits_per_word <= 16)
640 			tx_word_count += xfer->len / 2;
641 		else
642 			tx_word_count += xfer->len / 4;
643 	}
644 
645 	if (tx_word_count && !(spi_engine->offload_caps & SPI_OFFLOAD_CAP_TX_STATIC_DATA))
646 		return -EINVAL;
647 
648 	if (tx_word_count > spi_engine->offload_sdo_mem_size)
649 		return -EINVAL;
650 
651 	/*
652 	 * This protects against calling spi_optimize_message() with an offload
653 	 * that has already been prepared with a different message.
654 	 */
655 	if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags))
656 		return -EBUSY;
657 
658 	cmd_addr = spi_engine->base +
659 		   SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(priv->offload_num);
660 	sdo_addr = spi_engine->base +
661 		   SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(priv->offload_num);
662 
663 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
664 		if (!xfer->tx_buf)
665 			continue;
666 
667 		if (xfer->bits_per_word <= 8) {
668 			const u8 *buf = xfer->tx_buf;
669 
670 			for (i = 0; i < xfer->len; i++)
671 				writel_relaxed(buf[i], sdo_addr);
672 		} else if (xfer->bits_per_word <= 16) {
673 			const u16 *buf = xfer->tx_buf;
674 
675 			for (i = 0; i < xfer->len / 2; i++)
676 				writel_relaxed(buf[i], sdo_addr);
677 		} else {
678 			const u32 *buf = xfer->tx_buf;
679 
680 			for (i = 0; i < xfer->len / 4; i++)
681 				writel_relaxed(buf[i], sdo_addr);
682 		}
683 	}
684 
685 	for (i = 0; i < p->length; i++)
686 		writel_relaxed(p->instructions[i], cmd_addr);
687 
688 	return 0;
689 }
690 
spi_engine_offload_unprepare(struct spi_offload * offload)691 static void spi_engine_offload_unprepare(struct spi_offload *offload)
692 {
693 	struct spi_engine_offload *priv = offload->priv;
694 	struct spi_engine *spi_engine = priv->spi_engine;
695 
696 	writel_relaxed(1, spi_engine->base +
697 			  SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
698 	writel_relaxed(0, spi_engine->base +
699 			  SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
700 
701 	clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags);
702 }
703 
spi_engine_optimize_message(struct spi_message * msg)704 static int spi_engine_optimize_message(struct spi_message *msg)
705 {
706 	struct spi_controller *host = msg->spi->controller;
707 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
708 	struct spi_engine_program p_dry, *p;
709 	int ret;
710 
711 	ret = spi_engine_precompile_message(msg);
712 	if (ret)
713 		return ret;
714 
715 	p_dry.length = 0;
716 	spi_engine_compile_message(msg, true, &p_dry);
717 
718 	p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL);
719 	if (!p)
720 		return -ENOMEM;
721 
722 	spi_engine_compile_message(msg, false, p);
723 
724 	/*
725 	 * Non-offload needs SYNC for completion interrupt. Older versions of
726 	 * the IP core also need SYNC for offload to work properly.
727 	 */
728 	if (!msg->offload || spi_engine->offload_requires_sync)
729 		spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
730 			msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
731 
732 	msg->opt_state = p;
733 
734 	if (msg->offload) {
735 		ret = spi_engine_offload_prepare(msg);
736 		if (ret) {
737 			msg->opt_state = NULL;
738 			kfree(p);
739 			return ret;
740 		}
741 	}
742 
743 	return 0;
744 }
745 
spi_engine_unoptimize_message(struct spi_message * msg)746 static int spi_engine_unoptimize_message(struct spi_message *msg)
747 {
748 	if (msg->offload)
749 		spi_engine_offload_unprepare(msg->offload);
750 
751 	kfree(msg->opt_state);
752 
753 	return 0;
754 }
755 
756 static struct spi_offload
spi_engine_get_offload(struct spi_device * spi,const struct spi_offload_config * config)757 *spi_engine_get_offload(struct spi_device *spi,
758 			const struct spi_offload_config *config)
759 {
760 	struct spi_controller *host = spi->controller;
761 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
762 	struct spi_engine_offload *priv;
763 
764 	if (!spi_engine->offload)
765 		return ERR_PTR(-ENODEV);
766 
767 	if (config->capability_flags & ~spi_engine->offload_caps)
768 		return ERR_PTR(-EINVAL);
769 
770 	priv = spi_engine->offload->priv;
771 
772 	if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags))
773 		return ERR_PTR(-EBUSY);
774 
775 	return spi_engine->offload;
776 }
777 
spi_engine_put_offload(struct spi_offload * offload)778 static void spi_engine_put_offload(struct spi_offload *offload)
779 {
780 	struct spi_engine_offload *priv = offload->priv;
781 
782 	clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags);
783 }
784 
spi_engine_setup(struct spi_device * device)785 static int spi_engine_setup(struct spi_device *device)
786 {
787 	struct spi_controller *host = device->controller;
788 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
789 	unsigned int reg;
790 
791 	if (device->mode & SPI_CS_HIGH)
792 		spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
793 	else
794 		spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
795 
796 	writel_relaxed(SPI_ENGINE_CMD_SYNC(0),
797 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
798 
799 	writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
800 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
801 
802 	/*
803 	 * In addition to setting the flags, we have to do a CS assert command
804 	 * to make the new setting actually take effect.
805 	 */
806 	writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
807 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
808 
809 	writel_relaxed(SPI_ENGINE_CMD_SYNC(1),
810 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
811 
812 	return readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID,
813 					  reg, reg == 1, 1, 1000);
814 }
815 
spi_engine_transfer_one_message(struct spi_controller * host,struct spi_message * msg)816 static int spi_engine_transfer_one_message(struct spi_controller *host,
817 	struct spi_message *msg)
818 {
819 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
820 	struct spi_engine_message_state *st = &spi_engine->msg_state;
821 	struct spi_engine_program *p = msg->opt_state;
822 	unsigned int int_enable = 0;
823 	unsigned long flags;
824 
825 	if (msg->offload) {
826 		dev_err(&host->dev, "Single transfer offload not supported\n");
827 		msg->status = -EOPNOTSUPP;
828 		goto out;
829 	}
830 
831 	/* reinitialize message state for this transfer */
832 	memset(st, 0, sizeof(*st));
833 	st->cmd_buf = p->instructions;
834 	st->cmd_length = p->length;
835 	msg->state = st;
836 
837 	reinit_completion(&spi_engine->msg_complete);
838 
839 	if (trace_spi_transfer_start_enabled()) {
840 		struct spi_transfer *xfer;
841 
842 		list_for_each_entry(xfer, &msg->transfers, transfer_list)
843 			trace_spi_transfer_start(msg, xfer);
844 	}
845 
846 	spin_lock_irqsave(&spi_engine->lock, flags);
847 
848 	if (spi_engine_write_cmd_fifo(spi_engine, msg))
849 		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
850 
851 	spi_engine_tx_next(msg);
852 	if (spi_engine_write_tx_fifo(spi_engine, msg))
853 		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
854 
855 	spi_engine_rx_next(msg);
856 	if (st->rx_length != 0)
857 		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
858 
859 	int_enable |= SPI_ENGINE_INT_SYNC;
860 
861 	writel_relaxed(int_enable,
862 		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
863 	spi_engine->int_enable = int_enable;
864 	spin_unlock_irqrestore(&spi_engine->lock, flags);
865 
866 	if (!wait_for_completion_timeout(&spi_engine->msg_complete,
867 					 msecs_to_jiffies(5000))) {
868 		dev_err(&host->dev,
869 			"Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
870 		msg->status = -ETIMEDOUT;
871 	}
872 
873 	if (trace_spi_transfer_stop_enabled()) {
874 		struct spi_transfer *xfer;
875 
876 		list_for_each_entry(xfer, &msg->transfers, transfer_list)
877 			trace_spi_transfer_stop(msg, xfer);
878 	}
879 
880 out:
881 	spi_finalize_current_message(host);
882 
883 	return msg->status;
884 }
885 
spi_engine_trigger_enable(struct spi_offload * offload)886 static int spi_engine_trigger_enable(struct spi_offload *offload)
887 {
888 	struct spi_engine_offload *priv = offload->priv;
889 	struct spi_engine *spi_engine = priv->spi_engine;
890 	unsigned int reg;
891 	int ret;
892 
893 	writel_relaxed(SPI_ENGINE_CMD_SYNC(0),
894 		spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
895 
896 	writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
897 					    priv->spi_mode_config),
898 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
899 
900 	if (priv->bits_per_word)
901 		writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
902 						    priv->bits_per_word),
903 			       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
904 
905 	writel_relaxed(SPI_ENGINE_CMD_SYNC(1),
906 		spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
907 
908 	ret = readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID,
909 					 reg, reg == 1, 1, 1000);
910 	if (ret)
911 		return ret;
912 
913 	reg = readl_relaxed(spi_engine->base +
914 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
915 	reg |= SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
916 	writel_relaxed(reg, spi_engine->base +
917 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
918 	return 0;
919 }
920 
spi_engine_trigger_disable(struct spi_offload * offload)921 static void spi_engine_trigger_disable(struct spi_offload *offload)
922 {
923 	struct spi_engine_offload *priv = offload->priv;
924 	struct spi_engine *spi_engine = priv->spi_engine;
925 	unsigned int reg;
926 
927 	reg = readl_relaxed(spi_engine->base +
928 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
929 	reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
930 	writel_relaxed(reg, spi_engine->base +
931 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
932 }
933 
934 static struct dma_chan
spi_engine_tx_stream_request_dma_chan(struct spi_offload * offload)935 *spi_engine_tx_stream_request_dma_chan(struct spi_offload *offload)
936 {
937 	struct spi_engine_offload *priv = offload->priv;
938 	char name[16];
939 
940 	snprintf(name, sizeof(name), "offload%u-tx", priv->offload_num);
941 
942 	return dma_request_chan(offload->provider_dev, name);
943 }
944 
945 static struct dma_chan
spi_engine_rx_stream_request_dma_chan(struct spi_offload * offload)946 *spi_engine_rx_stream_request_dma_chan(struct spi_offload *offload)
947 {
948 	struct spi_engine_offload *priv = offload->priv;
949 	char name[16];
950 
951 	snprintf(name, sizeof(name), "offload%u-rx", priv->offload_num);
952 
953 	return dma_request_chan(offload->provider_dev, name);
954 }
955 
956 static const struct spi_offload_ops spi_engine_offload_ops = {
957 	.trigger_enable = spi_engine_trigger_enable,
958 	.trigger_disable = spi_engine_trigger_disable,
959 	.tx_stream_request_dma_chan = spi_engine_tx_stream_request_dma_chan,
960 	.rx_stream_request_dma_chan = spi_engine_rx_stream_request_dma_chan,
961 };
962 
spi_engine_release_hw(void * p)963 static void spi_engine_release_hw(void *p)
964 {
965 	struct spi_engine *spi_engine = p;
966 
967 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
968 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
969 	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
970 }
971 
spi_engine_probe(struct platform_device * pdev)972 static int spi_engine_probe(struct platform_device *pdev)
973 {
974 	struct spi_engine *spi_engine;
975 	struct spi_controller *host;
976 	unsigned int version;
977 	int irq, ret;
978 
979 	irq = platform_get_irq(pdev, 0);
980 	if (irq < 0)
981 		return irq;
982 
983 	host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
984 	if (!host)
985 		return -ENOMEM;
986 
987 	spi_engine = spi_controller_get_devdata(host);
988 
989 	spin_lock_init(&spi_engine->lock);
990 	init_completion(&spi_engine->msg_complete);
991 
992 	/*
993 	 * REVISIT: for now, all SPI Engines only have one offload. In the
994 	 * future, this should be read from a memory mapped register to
995 	 * determine the number of offloads enabled at HDL compile time. For
996 	 * now, we can tell if an offload is present if there is a trigger
997 	 * source wired up to it.
998 	 */
999 	if (device_property_present(&pdev->dev, "trigger-sources")) {
1000 		struct spi_engine_offload *priv;
1001 
1002 		spi_engine->offload =
1003 			devm_spi_offload_alloc(&pdev->dev,
1004 					       sizeof(struct spi_engine_offload));
1005 		if (IS_ERR(spi_engine->offload))
1006 			return PTR_ERR(spi_engine->offload);
1007 
1008 		priv = spi_engine->offload->priv;
1009 		priv->spi_engine = spi_engine;
1010 		priv->offload_num = 0;
1011 
1012 		spi_engine->offload->ops = &spi_engine_offload_ops;
1013 		spi_engine->offload_caps = SPI_OFFLOAD_CAP_TRIGGER;
1014 
1015 		if (device_property_match_string(&pdev->dev, "dma-names", "offload0-rx") >= 0) {
1016 			spi_engine->offload_caps |= SPI_OFFLOAD_CAP_RX_STREAM_DMA;
1017 			spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_RX_STREAM;
1018 		}
1019 
1020 		if (device_property_match_string(&pdev->dev, "dma-names", "offload0-tx") >= 0) {
1021 			spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STREAM_DMA;
1022 			spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_TX_STREAM;
1023 		} else {
1024 			/*
1025 			 * HDL compile option to enable TX DMA stream also disables
1026 			 * the SDO memory, so can't do both at the same time.
1027 			 */
1028 			spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STATIC_DATA;
1029 		}
1030 	}
1031 
1032 	spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
1033 	if (IS_ERR(spi_engine->clk))
1034 		return PTR_ERR(spi_engine->clk);
1035 
1036 	spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
1037 	if (IS_ERR(spi_engine->ref_clk))
1038 		return PTR_ERR(spi_engine->ref_clk);
1039 
1040 	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
1041 	if (IS_ERR(spi_engine->base))
1042 		return PTR_ERR(spi_engine->base);
1043 
1044 	version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
1045 	if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
1046 		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
1047 			ADI_AXI_PCORE_VER_MAJOR(version),
1048 			ADI_AXI_PCORE_VER_MINOR(version),
1049 			ADI_AXI_PCORE_VER_PATCH(version));
1050 		return -ENODEV;
1051 	}
1052 
1053 	if (ADI_AXI_PCORE_VER_MINOR(version) >= 1) {
1054 		unsigned int sizes = readl(spi_engine->base +
1055 				SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH);
1056 
1057 		spi_engine->offload_ctrl_mem_size = 1 <<
1058 			FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD, sizes);
1059 		spi_engine->offload_sdo_mem_size = 1 <<
1060 			FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO, sizes);
1061 	} else {
1062 		spi_engine->offload_ctrl_mem_size = SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE;
1063 		spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE;
1064 	}
1065 
1066 	/* IP v1.5 dropped the requirement for SYNC in offload messages. */
1067 	spi_engine->offload_requires_sync = ADI_AXI_PCORE_VER_MINOR(version) < 5;
1068 
1069 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
1070 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
1071 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
1072 
1073 	ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
1074 				       spi_engine);
1075 	if (ret)
1076 		return ret;
1077 
1078 	ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
1079 			       host);
1080 	if (ret)
1081 		return ret;
1082 
1083 	host->dev.of_node = pdev->dev.of_node;
1084 	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
1085 	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
1086 	host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
1087 	host->transfer_one_message = spi_engine_transfer_one_message;
1088 	host->optimize_message = spi_engine_optimize_message;
1089 	host->unoptimize_message = spi_engine_unoptimize_message;
1090 	host->get_offload = spi_engine_get_offload;
1091 	host->put_offload = spi_engine_put_offload;
1092 	host->num_chipselect = 8;
1093 
1094 	/* Some features depend of the IP core version. */
1095 	if (ADI_AXI_PCORE_VER_MAJOR(version) >= 1) {
1096 		if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
1097 			host->mode_bits |= SPI_CS_HIGH;
1098 			host->setup = spi_engine_setup;
1099 		}
1100 		if (ADI_AXI_PCORE_VER_MINOR(version) >= 3)
1101 			host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
1102 	}
1103 
1104 	if (host->max_speed_hz == 0)
1105 		return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
1106 
1107 	return devm_spi_register_controller(&pdev->dev, host);
1108 }
1109 
1110 static const struct of_device_id spi_engine_match_table[] = {
1111 	{ .compatible = "adi,axi-spi-engine-1.00.a" },
1112 	{ },
1113 };
1114 MODULE_DEVICE_TABLE(of, spi_engine_match_table);
1115 
1116 static struct platform_driver spi_engine_driver = {
1117 	.probe = spi_engine_probe,
1118 	.driver = {
1119 		.name = "spi-engine",
1120 		.of_match_table = spi_engine_match_table,
1121 	},
1122 };
1123 module_platform_driver(spi_engine_driver);
1124 
1125 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1126 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
1127 MODULE_LICENSE("GPL");
1128