xref: /linux/drivers/spi/spi-axi-spi-engine.c (revision 3e7819886281e077e82006fe4804b0d6b0f5643b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI-Engine SPI controller driver
4  * Copyright 2015 Analog Devices Inc.
5  *  Author: Lars-Peter Clausen <lars@metafoo.de>
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/fpga/adi-axi-common.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/of.h>
14 #include <linux/module.h>
15 #include <linux/overflow.h>
16 #include <linux/platform_device.h>
17 #include <linux/spi/spi.h>
18 
19 #define SPI_ENGINE_REG_RESET			0x40
20 
21 #define SPI_ENGINE_REG_INT_ENABLE		0x80
22 #define SPI_ENGINE_REG_INT_PENDING		0x84
23 #define SPI_ENGINE_REG_INT_SOURCE		0x88
24 
25 #define SPI_ENGINE_REG_SYNC_ID			0xc0
26 
27 #define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
28 #define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
29 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
30 
31 #define SPI_ENGINE_REG_CMD_FIFO			0xe0
32 #define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
33 #define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
34 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
35 
36 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
37 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
38 #define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
39 #define SPI_ENGINE_INT_SYNC			BIT(3)
40 
41 #define SPI_ENGINE_CONFIG_CPHA			BIT(0)
42 #define SPI_ENGINE_CONFIG_CPOL			BIT(1)
43 #define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
44 
45 #define SPI_ENGINE_INST_TRANSFER		0x0
46 #define SPI_ENGINE_INST_ASSERT			0x1
47 #define SPI_ENGINE_INST_WRITE			0x2
48 #define SPI_ENGINE_INST_MISC			0x3
49 
50 #define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
51 #define SPI_ENGINE_CMD_REG_CONFIG		0x1
52 #define SPI_ENGINE_CMD_REG_XFER_BITS		0x2
53 
54 #define SPI_ENGINE_MISC_SYNC			0x0
55 #define SPI_ENGINE_MISC_SLEEP			0x1
56 
57 #define SPI_ENGINE_TRANSFER_WRITE		0x1
58 #define SPI_ENGINE_TRANSFER_READ		0x2
59 
60 /* Arbitrary sync ID for use by host->cur_msg */
61 #define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID		0x1
62 
63 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
64 	(((inst) << 12) | ((arg1) << 8) | (arg2))
65 
66 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
67 	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
68 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
69 	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
70 #define SPI_ENGINE_CMD_WRITE(reg, val) \
71 	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
72 #define SPI_ENGINE_CMD_SLEEP(delay) \
73 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
74 #define SPI_ENGINE_CMD_SYNC(id) \
75 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
76 
77 struct spi_engine_program {
78 	unsigned int length;
79 	uint16_t instructions[] __counted_by(length);
80 };
81 
82 /**
83  * struct spi_engine_message_state - SPI engine per-message state
84  */
85 struct spi_engine_message_state {
86 	/** @cmd_length: Number of elements in cmd_buf array. */
87 	unsigned cmd_length;
88 	/** @cmd_buf: Array of commands not yet written to CMD FIFO. */
89 	const uint16_t *cmd_buf;
90 	/** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
91 	struct spi_transfer *tx_xfer;
92 	/** @tx_length: Size of tx_buf in bytes. */
93 	unsigned int tx_length;
94 	/** @tx_buf: Bytes not yet written to TX FIFO. */
95 	const uint8_t *tx_buf;
96 	/** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
97 	struct spi_transfer *rx_xfer;
98 	/** @rx_length: Size of tx_buf in bytes. */
99 	unsigned int rx_length;
100 	/** @rx_buf: Bytes not yet written to the RX FIFO. */
101 	uint8_t *rx_buf;
102 };
103 
104 struct spi_engine {
105 	struct clk *clk;
106 	struct clk *ref_clk;
107 
108 	spinlock_t lock;
109 
110 	void __iomem *base;
111 	struct spi_engine_message_state msg_state;
112 	struct completion msg_complete;
113 	unsigned int int_enable;
114 };
115 
116 static void spi_engine_program_add_cmd(struct spi_engine_program *p,
117 	bool dry, uint16_t cmd)
118 {
119 	p->length++;
120 
121 	if (!dry)
122 		p->instructions[p->length - 1] = cmd;
123 }
124 
125 static unsigned int spi_engine_get_config(struct spi_device *spi)
126 {
127 	unsigned int config = 0;
128 
129 	if (spi->mode & SPI_CPOL)
130 		config |= SPI_ENGINE_CONFIG_CPOL;
131 	if (spi->mode & SPI_CPHA)
132 		config |= SPI_ENGINE_CONFIG_CPHA;
133 	if (spi->mode & SPI_3WIRE)
134 		config |= SPI_ENGINE_CONFIG_3WIRE;
135 
136 	return config;
137 }
138 
139 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
140 	struct spi_transfer *xfer)
141 {
142 	unsigned int len;
143 
144 	if (xfer->bits_per_word <= 8)
145 		len = xfer->len;
146 	else if (xfer->bits_per_word <= 16)
147 		len = xfer->len / 2;
148 	else
149 		len = xfer->len / 4;
150 
151 	while (len) {
152 		unsigned int n = min(len, 256U);
153 		unsigned int flags = 0;
154 
155 		if (xfer->tx_buf)
156 			flags |= SPI_ENGINE_TRANSFER_WRITE;
157 		if (xfer->rx_buf)
158 			flags |= SPI_ENGINE_TRANSFER_READ;
159 
160 		spi_engine_program_add_cmd(p, dry,
161 			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
162 		len -= n;
163 	}
164 }
165 
166 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
167 				 int delay_ns, int inst_ns, u32 sclk_hz)
168 {
169 	unsigned int t;
170 
171 	/*
172 	 * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
173 	 * delay is less that the instruction execution time, there is no need
174 	 * for an extra sleep instruction since the instruction execution time
175 	 * will already cover the required delay.
176 	 */
177 	if (delay_ns < 0 || delay_ns <= inst_ns)
178 		return;
179 
180 	t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
181 	while (t) {
182 		unsigned int n = min(t, 256U);
183 
184 		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
185 		t -= n;
186 	}
187 }
188 
189 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
190 		struct spi_device *spi, bool assert)
191 {
192 	unsigned int mask = 0xff;
193 
194 	if (assert)
195 		mask ^= BIT(spi_get_chipselect(spi, 0));
196 
197 	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
198 }
199 
200 /*
201  * Performs precompile steps on the message.
202  *
203  * The SPI core does most of the message/transfer validation and filling in
204  * fields for us via __spi_validate(). This fixes up anything remaining not
205  * done there.
206  *
207  * NB: This is separate from spi_engine_compile_message() because the latter
208  * is called twice and would otherwise result in double-evaluation.
209  */
210 static void spi_engine_precompile_message(struct spi_message *msg)
211 {
212 	unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
213 	struct spi_transfer *xfer;
214 
215 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
216 		clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
217 		xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
218 	}
219 }
220 
221 static void spi_engine_compile_message(struct spi_message *msg, bool dry,
222 				       struct spi_engine_program *p)
223 {
224 	struct spi_device *spi = msg->spi;
225 	struct spi_controller *host = spi->controller;
226 	struct spi_transfer *xfer;
227 	int clk_div, new_clk_div, inst_ns;
228 	bool keep_cs = false;
229 	u8 bits_per_word = 0;
230 
231 	/*
232 	 * Take into account instruction execution time for more accurate sleep
233 	 * times, especially when the delay is small.
234 	 */
235 	inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
236 
237 	clk_div = 1;
238 
239 	spi_engine_program_add_cmd(p, dry,
240 		SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
241 			spi_engine_get_config(spi)));
242 
243 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
244 	spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
245 
246 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
247 		new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
248 		if (new_clk_div != clk_div) {
249 			clk_div = new_clk_div;
250 			/* actual divider used is register value + 1 */
251 			spi_engine_program_add_cmd(p, dry,
252 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
253 					clk_div - 1));
254 		}
255 
256 		if (bits_per_word != xfer->bits_per_word) {
257 			bits_per_word = xfer->bits_per_word;
258 			spi_engine_program_add_cmd(p, dry,
259 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
260 					bits_per_word));
261 		}
262 
263 		spi_engine_gen_xfer(p, dry, xfer);
264 		spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
265 				     inst_ns, xfer->effective_speed_hz);
266 
267 		if (xfer->cs_change) {
268 			if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
269 				keep_cs = true;
270 			} else {
271 				if (!xfer->cs_off)
272 					spi_engine_gen_cs(p, dry, spi, false);
273 
274 				spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
275 					&xfer->cs_change_delay, xfer), inst_ns,
276 					xfer->effective_speed_hz);
277 
278 				if (!list_next_entry(xfer, transfer_list)->cs_off)
279 					spi_engine_gen_cs(p, dry, spi, true);
280 			}
281 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
282 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
283 			spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
284 		}
285 	}
286 
287 	if (!keep_cs)
288 		spi_engine_gen_cs(p, dry, spi, false);
289 
290 	/*
291 	 * Restore clockdiv to default so that future gen_sleep commands don't
292 	 * have to be aware of the current register state.
293 	 */
294 	if (clk_div != 1)
295 		spi_engine_program_add_cmd(p, dry,
296 			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
297 }
298 
299 static void spi_engine_xfer_next(struct spi_message *msg,
300 	struct spi_transfer **_xfer)
301 {
302 	struct spi_transfer *xfer = *_xfer;
303 
304 	if (!xfer) {
305 		xfer = list_first_entry(&msg->transfers,
306 			struct spi_transfer, transfer_list);
307 	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
308 		xfer = NULL;
309 	} else {
310 		xfer = list_next_entry(xfer, transfer_list);
311 	}
312 
313 	*_xfer = xfer;
314 }
315 
316 static void spi_engine_tx_next(struct spi_message *msg)
317 {
318 	struct spi_engine_message_state *st = msg->state;
319 	struct spi_transfer *xfer = st->tx_xfer;
320 
321 	do {
322 		spi_engine_xfer_next(msg, &xfer);
323 	} while (xfer && !xfer->tx_buf);
324 
325 	st->tx_xfer = xfer;
326 	if (xfer) {
327 		st->tx_length = xfer->len;
328 		st->tx_buf = xfer->tx_buf;
329 	} else {
330 		st->tx_buf = NULL;
331 	}
332 }
333 
334 static void spi_engine_rx_next(struct spi_message *msg)
335 {
336 	struct spi_engine_message_state *st = msg->state;
337 	struct spi_transfer *xfer = st->rx_xfer;
338 
339 	do {
340 		spi_engine_xfer_next(msg, &xfer);
341 	} while (xfer && !xfer->rx_buf);
342 
343 	st->rx_xfer = xfer;
344 	if (xfer) {
345 		st->rx_length = xfer->len;
346 		st->rx_buf = xfer->rx_buf;
347 	} else {
348 		st->rx_buf = NULL;
349 	}
350 }
351 
352 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
353 				      struct spi_message *msg)
354 {
355 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
356 	struct spi_engine_message_state *st = msg->state;
357 	unsigned int n, m, i;
358 	const uint16_t *buf;
359 
360 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
361 	while (n && st->cmd_length) {
362 		m = min(n, st->cmd_length);
363 		buf = st->cmd_buf;
364 		for (i = 0; i < m; i++)
365 			writel_relaxed(buf[i], addr);
366 		st->cmd_buf += m;
367 		st->cmd_length -= m;
368 		n -= m;
369 	}
370 
371 	return st->cmd_length != 0;
372 }
373 
374 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
375 				     struct spi_message *msg)
376 {
377 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
378 	struct spi_engine_message_state *st = msg->state;
379 	unsigned int n, m, i;
380 
381 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
382 	while (n && st->tx_length) {
383 		if (st->tx_xfer->bits_per_word <= 8) {
384 			const u8 *buf = st->tx_buf;
385 
386 			m = min(n, st->tx_length);
387 			for (i = 0; i < m; i++)
388 				writel_relaxed(buf[i], addr);
389 			st->tx_buf += m;
390 			st->tx_length -= m;
391 		} else if (st->tx_xfer->bits_per_word <= 16) {
392 			const u16 *buf = (const u16 *)st->tx_buf;
393 
394 			m = min(n, st->tx_length / 2);
395 			for (i = 0; i < m; i++)
396 				writel_relaxed(buf[i], addr);
397 			st->tx_buf += m * 2;
398 			st->tx_length -= m * 2;
399 		} else {
400 			const u32 *buf = (const u32 *)st->tx_buf;
401 
402 			m = min(n, st->tx_length / 4);
403 			for (i = 0; i < m; i++)
404 				writel_relaxed(buf[i], addr);
405 			st->tx_buf += m * 4;
406 			st->tx_length -= m * 4;
407 		}
408 		n -= m;
409 		if (st->tx_length == 0)
410 			spi_engine_tx_next(msg);
411 	}
412 
413 	return st->tx_length != 0;
414 }
415 
416 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
417 				    struct spi_message *msg)
418 {
419 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
420 	struct spi_engine_message_state *st = msg->state;
421 	unsigned int n, m, i;
422 
423 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
424 	while (n && st->rx_length) {
425 		if (st->rx_xfer->bits_per_word <= 8) {
426 			u8 *buf = st->rx_buf;
427 
428 			m = min(n, st->rx_length);
429 			for (i = 0; i < m; i++)
430 				buf[i] = readl_relaxed(addr);
431 			st->rx_buf += m;
432 			st->rx_length -= m;
433 		} else if (st->rx_xfer->bits_per_word <= 16) {
434 			u16 *buf = (u16 *)st->rx_buf;
435 
436 			m = min(n, st->rx_length / 2);
437 			for (i = 0; i < m; i++)
438 				buf[i] = readl_relaxed(addr);
439 			st->rx_buf += m * 2;
440 			st->rx_length -= m * 2;
441 		} else {
442 			u32 *buf = (u32 *)st->rx_buf;
443 
444 			m = min(n, st->rx_length / 4);
445 			for (i = 0; i < m; i++)
446 				buf[i] = readl_relaxed(addr);
447 			st->rx_buf += m * 4;
448 			st->rx_length -= m * 4;
449 		}
450 		n -= m;
451 		if (st->rx_length == 0)
452 			spi_engine_rx_next(msg);
453 	}
454 
455 	return st->rx_length != 0;
456 }
457 
458 static irqreturn_t spi_engine_irq(int irq, void *devid)
459 {
460 	struct spi_controller *host = devid;
461 	struct spi_message *msg = host->cur_msg;
462 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
463 	unsigned int disable_int = 0;
464 	unsigned int pending;
465 	int completed_id = -1;
466 
467 	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
468 
469 	if (pending & SPI_ENGINE_INT_SYNC) {
470 		writel_relaxed(SPI_ENGINE_INT_SYNC,
471 			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
472 		completed_id = readl_relaxed(
473 			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
474 	}
475 
476 	spin_lock(&spi_engine->lock);
477 
478 	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
479 		if (!spi_engine_write_cmd_fifo(spi_engine, msg))
480 			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
481 	}
482 
483 	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
484 		if (!spi_engine_write_tx_fifo(spi_engine, msg))
485 			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
486 	}
487 
488 	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
489 		if (!spi_engine_read_rx_fifo(spi_engine, msg))
490 			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
491 	}
492 
493 	if (pending & SPI_ENGINE_INT_SYNC && msg) {
494 		if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
495 			msg->status = 0;
496 			msg->actual_length = msg->frame_length;
497 			complete(&spi_engine->msg_complete);
498 			disable_int |= SPI_ENGINE_INT_SYNC;
499 		}
500 	}
501 
502 	if (disable_int) {
503 		spi_engine->int_enable &= ~disable_int;
504 		writel_relaxed(spi_engine->int_enable,
505 			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
506 	}
507 
508 	spin_unlock(&spi_engine->lock);
509 
510 	return IRQ_HANDLED;
511 }
512 
513 static int spi_engine_optimize_message(struct spi_message *msg)
514 {
515 	struct spi_engine_program p_dry, *p;
516 
517 	spi_engine_precompile_message(msg);
518 
519 	p_dry.length = 0;
520 	spi_engine_compile_message(msg, true, &p_dry);
521 
522 	p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL);
523 	if (!p)
524 		return -ENOMEM;
525 
526 	spi_engine_compile_message(msg, false, p);
527 
528 	spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
529 						AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
530 
531 	msg->opt_state = p;
532 
533 	return 0;
534 }
535 
536 static int spi_engine_unoptimize_message(struct spi_message *msg)
537 {
538 	kfree(msg->opt_state);
539 
540 	return 0;
541 }
542 
543 static int spi_engine_transfer_one_message(struct spi_controller *host,
544 	struct spi_message *msg)
545 {
546 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
547 	struct spi_engine_message_state *st = &spi_engine->msg_state;
548 	struct spi_engine_program *p = msg->opt_state;
549 	unsigned int int_enable = 0;
550 	unsigned long flags;
551 
552 	/* reinitialize message state for this transfer */
553 	memset(st, 0, sizeof(*st));
554 	st->cmd_buf = p->instructions;
555 	st->cmd_length = p->length;
556 	msg->state = st;
557 
558 	reinit_completion(&spi_engine->msg_complete);
559 
560 	spin_lock_irqsave(&spi_engine->lock, flags);
561 
562 	if (spi_engine_write_cmd_fifo(spi_engine, msg))
563 		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
564 
565 	spi_engine_tx_next(msg);
566 	if (spi_engine_write_tx_fifo(spi_engine, msg))
567 		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
568 
569 	spi_engine_rx_next(msg);
570 	if (st->rx_length != 0)
571 		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
572 
573 	int_enable |= SPI_ENGINE_INT_SYNC;
574 
575 	writel_relaxed(int_enable,
576 		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
577 	spi_engine->int_enable = int_enable;
578 	spin_unlock_irqrestore(&spi_engine->lock, flags);
579 
580 	if (!wait_for_completion_timeout(&spi_engine->msg_complete,
581 					 msecs_to_jiffies(5000))) {
582 		dev_err(&host->dev,
583 			"Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
584 		msg->status = -ETIMEDOUT;
585 	}
586 
587 	spi_finalize_current_message(host);
588 
589 	return msg->status;
590 }
591 
592 static void spi_engine_release_hw(void *p)
593 {
594 	struct spi_engine *spi_engine = p;
595 
596 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
597 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
598 	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
599 }
600 
601 static int spi_engine_probe(struct platform_device *pdev)
602 {
603 	struct spi_engine *spi_engine;
604 	struct spi_controller *host;
605 	unsigned int version;
606 	int irq;
607 	int ret;
608 
609 	irq = platform_get_irq(pdev, 0);
610 	if (irq < 0)
611 		return irq;
612 
613 	host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
614 	if (!host)
615 		return -ENOMEM;
616 
617 	spi_engine = spi_controller_get_devdata(host);
618 
619 	spin_lock_init(&spi_engine->lock);
620 	init_completion(&spi_engine->msg_complete);
621 
622 	spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
623 	if (IS_ERR(spi_engine->clk))
624 		return PTR_ERR(spi_engine->clk);
625 
626 	spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
627 	if (IS_ERR(spi_engine->ref_clk))
628 		return PTR_ERR(spi_engine->ref_clk);
629 
630 	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
631 	if (IS_ERR(spi_engine->base))
632 		return PTR_ERR(spi_engine->base);
633 
634 	version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
635 	if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
636 		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
637 			ADI_AXI_PCORE_VER_MAJOR(version),
638 			ADI_AXI_PCORE_VER_MINOR(version),
639 			ADI_AXI_PCORE_VER_PATCH(version));
640 		return -ENODEV;
641 	}
642 
643 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
644 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
645 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
646 
647 	ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
648 				       spi_engine);
649 	if (ret)
650 		return ret;
651 
652 	ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
653 			       host);
654 	if (ret)
655 		return ret;
656 
657 	host->dev.of_node = pdev->dev.of_node;
658 	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
659 	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
660 	host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
661 	host->transfer_one_message = spi_engine_transfer_one_message;
662 	host->optimize_message = spi_engine_optimize_message;
663 	host->unoptimize_message = spi_engine_unoptimize_message;
664 	host->num_chipselect = 8;
665 
666 	if (host->max_speed_hz == 0)
667 		return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
668 
669 	ret = devm_spi_register_controller(&pdev->dev, host);
670 	if (ret)
671 		return ret;
672 
673 	platform_set_drvdata(pdev, host);
674 
675 	return 0;
676 }
677 
678 static const struct of_device_id spi_engine_match_table[] = {
679 	{ .compatible = "adi,axi-spi-engine-1.00.a" },
680 	{ },
681 };
682 MODULE_DEVICE_TABLE(of, spi_engine_match_table);
683 
684 static struct platform_driver spi_engine_driver = {
685 	.probe = spi_engine_probe,
686 	.driver = {
687 		.name = "spi-engine",
688 		.of_match_table = spi_engine_match_table,
689 	},
690 };
691 module_platform_driver(spi_engine_driver);
692 
693 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
694 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
695 MODULE_LICENSE("GPL");
696