xref: /linux/drivers/spi/spi-axi-spi-engine.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI-Engine SPI controller driver
4  * Copyright 2015 Analog Devices Inc.
5  *  Author: Lars-Peter Clausen <lars@metafoo.de>
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/fpga/adi-axi-common.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/of.h>
14 #include <linux/module.h>
15 #include <linux/overflow.h>
16 #include <linux/platform_device.h>
17 #include <linux/spi/spi.h>
18 
19 #define SPI_ENGINE_REG_RESET			0x40
20 
21 #define SPI_ENGINE_REG_INT_ENABLE		0x80
22 #define SPI_ENGINE_REG_INT_PENDING		0x84
23 #define SPI_ENGINE_REG_INT_SOURCE		0x88
24 
25 #define SPI_ENGINE_REG_SYNC_ID			0xc0
26 
27 #define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
28 #define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
29 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
30 
31 #define SPI_ENGINE_REG_CMD_FIFO			0xe0
32 #define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
33 #define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
34 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
35 
36 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
37 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
38 #define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
39 #define SPI_ENGINE_INT_SYNC			BIT(3)
40 
41 #define SPI_ENGINE_CONFIG_CPHA			BIT(0)
42 #define SPI_ENGINE_CONFIG_CPOL			BIT(1)
43 #define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
44 #define SPI_ENGINE_CONFIG_SDO_IDLE_HIGH		BIT(3)
45 
46 #define SPI_ENGINE_INST_TRANSFER		0x0
47 #define SPI_ENGINE_INST_ASSERT			0x1
48 #define SPI_ENGINE_INST_WRITE			0x2
49 #define SPI_ENGINE_INST_MISC			0x3
50 #define SPI_ENGINE_INST_CS_INV			0x4
51 
52 #define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
53 #define SPI_ENGINE_CMD_REG_CONFIG		0x1
54 #define SPI_ENGINE_CMD_REG_XFER_BITS		0x2
55 
56 #define SPI_ENGINE_MISC_SYNC			0x0
57 #define SPI_ENGINE_MISC_SLEEP			0x1
58 
59 #define SPI_ENGINE_TRANSFER_WRITE		0x1
60 #define SPI_ENGINE_TRANSFER_READ		0x2
61 
62 /* Arbitrary sync ID for use by host->cur_msg */
63 #define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID		0x1
64 
65 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
66 	(((inst) << 12) | ((arg1) << 8) | (arg2))
67 
68 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
69 	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
70 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
71 	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
72 #define SPI_ENGINE_CMD_WRITE(reg, val) \
73 	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
74 #define SPI_ENGINE_CMD_SLEEP(delay) \
75 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
76 #define SPI_ENGINE_CMD_SYNC(id) \
77 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
78 #define SPI_ENGINE_CMD_CS_INV(flags) \
79 	SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
80 
81 struct spi_engine_program {
82 	unsigned int length;
83 	uint16_t instructions[] __counted_by(length);
84 };
85 
86 /**
87  * struct spi_engine_message_state - SPI engine per-message state
88  */
89 struct spi_engine_message_state {
90 	/** @cmd_length: Number of elements in cmd_buf array. */
91 	unsigned cmd_length;
92 	/** @cmd_buf: Array of commands not yet written to CMD FIFO. */
93 	const uint16_t *cmd_buf;
94 	/** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
95 	struct spi_transfer *tx_xfer;
96 	/** @tx_length: Size of tx_buf in bytes. */
97 	unsigned int tx_length;
98 	/** @tx_buf: Bytes not yet written to TX FIFO. */
99 	const uint8_t *tx_buf;
100 	/** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
101 	struct spi_transfer *rx_xfer;
102 	/** @rx_length: Size of tx_buf in bytes. */
103 	unsigned int rx_length;
104 	/** @rx_buf: Bytes not yet written to the RX FIFO. */
105 	uint8_t *rx_buf;
106 };
107 
108 struct spi_engine {
109 	struct clk *clk;
110 	struct clk *ref_clk;
111 
112 	spinlock_t lock;
113 
114 	void __iomem *base;
115 	struct spi_engine_message_state msg_state;
116 	struct completion msg_complete;
117 	unsigned int int_enable;
118 	/* shadows hardware CS inversion flag state */
119 	u8 cs_inv;
120 };
121 
spi_engine_program_add_cmd(struct spi_engine_program * p,bool dry,uint16_t cmd)122 static void spi_engine_program_add_cmd(struct spi_engine_program *p,
123 	bool dry, uint16_t cmd)
124 {
125 	p->length++;
126 
127 	if (!dry)
128 		p->instructions[p->length - 1] = cmd;
129 }
130 
spi_engine_get_config(struct spi_device * spi)131 static unsigned int spi_engine_get_config(struct spi_device *spi)
132 {
133 	unsigned int config = 0;
134 
135 	if (spi->mode & SPI_CPOL)
136 		config |= SPI_ENGINE_CONFIG_CPOL;
137 	if (spi->mode & SPI_CPHA)
138 		config |= SPI_ENGINE_CONFIG_CPHA;
139 	if (spi->mode & SPI_3WIRE)
140 		config |= SPI_ENGINE_CONFIG_3WIRE;
141 	if (spi->mode & SPI_MOSI_IDLE_HIGH)
142 		config |= SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
143 	if (spi->mode & SPI_MOSI_IDLE_LOW)
144 		config &= ~SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
145 
146 	return config;
147 }
148 
spi_engine_gen_xfer(struct spi_engine_program * p,bool dry,struct spi_transfer * xfer)149 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
150 	struct spi_transfer *xfer)
151 {
152 	unsigned int len;
153 
154 	if (xfer->bits_per_word <= 8)
155 		len = xfer->len;
156 	else if (xfer->bits_per_word <= 16)
157 		len = xfer->len / 2;
158 	else
159 		len = xfer->len / 4;
160 
161 	while (len) {
162 		unsigned int n = min(len, 256U);
163 		unsigned int flags = 0;
164 
165 		if (xfer->tx_buf)
166 			flags |= SPI_ENGINE_TRANSFER_WRITE;
167 		if (xfer->rx_buf)
168 			flags |= SPI_ENGINE_TRANSFER_READ;
169 
170 		spi_engine_program_add_cmd(p, dry,
171 			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
172 		len -= n;
173 	}
174 }
175 
spi_engine_gen_sleep(struct spi_engine_program * p,bool dry,int delay_ns,int inst_ns,u32 sclk_hz)176 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
177 				 int delay_ns, int inst_ns, u32 sclk_hz)
178 {
179 	unsigned int t;
180 
181 	/*
182 	 * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
183 	 * delay is less that the instruction execution time, there is no need
184 	 * for an extra sleep instruction since the instruction execution time
185 	 * will already cover the required delay.
186 	 */
187 	if (delay_ns < 0 || delay_ns <= inst_ns)
188 		return;
189 
190 	t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
191 	while (t) {
192 		unsigned int n = min(t, 256U);
193 
194 		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
195 		t -= n;
196 	}
197 }
198 
spi_engine_gen_cs(struct spi_engine_program * p,bool dry,struct spi_device * spi,bool assert)199 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
200 		struct spi_device *spi, bool assert)
201 {
202 	unsigned int mask = 0xff;
203 
204 	if (assert)
205 		mask ^= BIT(spi_get_chipselect(spi, 0));
206 
207 	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
208 }
209 
210 /*
211  * Performs precompile steps on the message.
212  *
213  * The SPI core does most of the message/transfer validation and filling in
214  * fields for us via __spi_validate(). This fixes up anything remaining not
215  * done there.
216  *
217  * NB: This is separate from spi_engine_compile_message() because the latter
218  * is called twice and would otherwise result in double-evaluation.
219  */
spi_engine_precompile_message(struct spi_message * msg)220 static void spi_engine_precompile_message(struct spi_message *msg)
221 {
222 	unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
223 	struct spi_transfer *xfer;
224 
225 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
226 		clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
227 		xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
228 	}
229 }
230 
spi_engine_compile_message(struct spi_message * msg,bool dry,struct spi_engine_program * p)231 static void spi_engine_compile_message(struct spi_message *msg, bool dry,
232 				       struct spi_engine_program *p)
233 {
234 	struct spi_device *spi = msg->spi;
235 	struct spi_controller *host = spi->controller;
236 	struct spi_transfer *xfer;
237 	int clk_div, new_clk_div, inst_ns;
238 	bool keep_cs = false;
239 	u8 bits_per_word = 0;
240 
241 	/*
242 	 * Take into account instruction execution time for more accurate sleep
243 	 * times, especially when the delay is small.
244 	 */
245 	inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
246 
247 	clk_div = 1;
248 
249 	spi_engine_program_add_cmd(p, dry,
250 		SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
251 			spi_engine_get_config(spi)));
252 
253 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
254 	spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
255 
256 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
257 		new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
258 		if (new_clk_div != clk_div) {
259 			clk_div = new_clk_div;
260 			/* actual divider used is register value + 1 */
261 			spi_engine_program_add_cmd(p, dry,
262 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
263 					clk_div - 1));
264 		}
265 
266 		if (bits_per_word != xfer->bits_per_word && xfer->len) {
267 			bits_per_word = xfer->bits_per_word;
268 			spi_engine_program_add_cmd(p, dry,
269 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
270 					bits_per_word));
271 		}
272 
273 		spi_engine_gen_xfer(p, dry, xfer);
274 		spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
275 				     inst_ns, xfer->effective_speed_hz);
276 
277 		if (xfer->cs_change) {
278 			if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
279 				keep_cs = true;
280 			} else {
281 				if (!xfer->cs_off)
282 					spi_engine_gen_cs(p, dry, spi, false);
283 
284 				spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
285 					&xfer->cs_change_delay, xfer), inst_ns,
286 					xfer->effective_speed_hz);
287 
288 				if (!list_next_entry(xfer, transfer_list)->cs_off)
289 					spi_engine_gen_cs(p, dry, spi, true);
290 			}
291 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
292 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
293 			spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
294 		}
295 	}
296 
297 	if (!keep_cs)
298 		spi_engine_gen_cs(p, dry, spi, false);
299 
300 	/*
301 	 * Restore clockdiv to default so that future gen_sleep commands don't
302 	 * have to be aware of the current register state.
303 	 */
304 	if (clk_div != 1)
305 		spi_engine_program_add_cmd(p, dry,
306 			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
307 }
308 
spi_engine_xfer_next(struct spi_message * msg,struct spi_transfer ** _xfer)309 static void spi_engine_xfer_next(struct spi_message *msg,
310 	struct spi_transfer **_xfer)
311 {
312 	struct spi_transfer *xfer = *_xfer;
313 
314 	if (!xfer) {
315 		xfer = list_first_entry(&msg->transfers,
316 			struct spi_transfer, transfer_list);
317 	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
318 		xfer = NULL;
319 	} else {
320 		xfer = list_next_entry(xfer, transfer_list);
321 	}
322 
323 	*_xfer = xfer;
324 }
325 
spi_engine_tx_next(struct spi_message * msg)326 static void spi_engine_tx_next(struct spi_message *msg)
327 {
328 	struct spi_engine_message_state *st = msg->state;
329 	struct spi_transfer *xfer = st->tx_xfer;
330 
331 	do {
332 		spi_engine_xfer_next(msg, &xfer);
333 	} while (xfer && !xfer->tx_buf);
334 
335 	st->tx_xfer = xfer;
336 	if (xfer) {
337 		st->tx_length = xfer->len;
338 		st->tx_buf = xfer->tx_buf;
339 	} else {
340 		st->tx_buf = NULL;
341 	}
342 }
343 
spi_engine_rx_next(struct spi_message * msg)344 static void spi_engine_rx_next(struct spi_message *msg)
345 {
346 	struct spi_engine_message_state *st = msg->state;
347 	struct spi_transfer *xfer = st->rx_xfer;
348 
349 	do {
350 		spi_engine_xfer_next(msg, &xfer);
351 	} while (xfer && !xfer->rx_buf);
352 
353 	st->rx_xfer = xfer;
354 	if (xfer) {
355 		st->rx_length = xfer->len;
356 		st->rx_buf = xfer->rx_buf;
357 	} else {
358 		st->rx_buf = NULL;
359 	}
360 }
361 
spi_engine_write_cmd_fifo(struct spi_engine * spi_engine,struct spi_message * msg)362 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
363 				      struct spi_message *msg)
364 {
365 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
366 	struct spi_engine_message_state *st = msg->state;
367 	unsigned int n, m, i;
368 	const uint16_t *buf;
369 
370 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
371 	while (n && st->cmd_length) {
372 		m = min(n, st->cmd_length);
373 		buf = st->cmd_buf;
374 		for (i = 0; i < m; i++)
375 			writel_relaxed(buf[i], addr);
376 		st->cmd_buf += m;
377 		st->cmd_length -= m;
378 		n -= m;
379 	}
380 
381 	return st->cmd_length != 0;
382 }
383 
spi_engine_write_tx_fifo(struct spi_engine * spi_engine,struct spi_message * msg)384 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
385 				     struct spi_message *msg)
386 {
387 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
388 	struct spi_engine_message_state *st = msg->state;
389 	unsigned int n, m, i;
390 
391 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
392 	while (n && st->tx_length) {
393 		if (st->tx_xfer->bits_per_word <= 8) {
394 			const u8 *buf = st->tx_buf;
395 
396 			m = min(n, st->tx_length);
397 			for (i = 0; i < m; i++)
398 				writel_relaxed(buf[i], addr);
399 			st->tx_buf += m;
400 			st->tx_length -= m;
401 		} else if (st->tx_xfer->bits_per_word <= 16) {
402 			const u16 *buf = (const u16 *)st->tx_buf;
403 
404 			m = min(n, st->tx_length / 2);
405 			for (i = 0; i < m; i++)
406 				writel_relaxed(buf[i], addr);
407 			st->tx_buf += m * 2;
408 			st->tx_length -= m * 2;
409 		} else {
410 			const u32 *buf = (const u32 *)st->tx_buf;
411 
412 			m = min(n, st->tx_length / 4);
413 			for (i = 0; i < m; i++)
414 				writel_relaxed(buf[i], addr);
415 			st->tx_buf += m * 4;
416 			st->tx_length -= m * 4;
417 		}
418 		n -= m;
419 		if (st->tx_length == 0)
420 			spi_engine_tx_next(msg);
421 	}
422 
423 	return st->tx_length != 0;
424 }
425 
spi_engine_read_rx_fifo(struct spi_engine * spi_engine,struct spi_message * msg)426 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
427 				    struct spi_message *msg)
428 {
429 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
430 	struct spi_engine_message_state *st = msg->state;
431 	unsigned int n, m, i;
432 
433 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
434 	while (n && st->rx_length) {
435 		if (st->rx_xfer->bits_per_word <= 8) {
436 			u8 *buf = st->rx_buf;
437 
438 			m = min(n, st->rx_length);
439 			for (i = 0; i < m; i++)
440 				buf[i] = readl_relaxed(addr);
441 			st->rx_buf += m;
442 			st->rx_length -= m;
443 		} else if (st->rx_xfer->bits_per_word <= 16) {
444 			u16 *buf = (u16 *)st->rx_buf;
445 
446 			m = min(n, st->rx_length / 2);
447 			for (i = 0; i < m; i++)
448 				buf[i] = readl_relaxed(addr);
449 			st->rx_buf += m * 2;
450 			st->rx_length -= m * 2;
451 		} else {
452 			u32 *buf = (u32 *)st->rx_buf;
453 
454 			m = min(n, st->rx_length / 4);
455 			for (i = 0; i < m; i++)
456 				buf[i] = readl_relaxed(addr);
457 			st->rx_buf += m * 4;
458 			st->rx_length -= m * 4;
459 		}
460 		n -= m;
461 		if (st->rx_length == 0)
462 			spi_engine_rx_next(msg);
463 	}
464 
465 	return st->rx_length != 0;
466 }
467 
spi_engine_irq(int irq,void * devid)468 static irqreturn_t spi_engine_irq(int irq, void *devid)
469 {
470 	struct spi_controller *host = devid;
471 	struct spi_message *msg = host->cur_msg;
472 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
473 	unsigned int disable_int = 0;
474 	unsigned int pending;
475 	int completed_id = -1;
476 
477 	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
478 
479 	if (pending & SPI_ENGINE_INT_SYNC) {
480 		writel_relaxed(SPI_ENGINE_INT_SYNC,
481 			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
482 		completed_id = readl_relaxed(
483 			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
484 	}
485 
486 	spin_lock(&spi_engine->lock);
487 
488 	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
489 		if (!spi_engine_write_cmd_fifo(spi_engine, msg))
490 			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
491 	}
492 
493 	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
494 		if (!spi_engine_write_tx_fifo(spi_engine, msg))
495 			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
496 	}
497 
498 	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
499 		if (!spi_engine_read_rx_fifo(spi_engine, msg))
500 			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
501 	}
502 
503 	if (pending & SPI_ENGINE_INT_SYNC && msg) {
504 		if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
505 			msg->status = 0;
506 			msg->actual_length = msg->frame_length;
507 			complete(&spi_engine->msg_complete);
508 			disable_int |= SPI_ENGINE_INT_SYNC;
509 		}
510 	}
511 
512 	if (disable_int) {
513 		spi_engine->int_enable &= ~disable_int;
514 		writel_relaxed(spi_engine->int_enable,
515 			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
516 	}
517 
518 	spin_unlock(&spi_engine->lock);
519 
520 	return IRQ_HANDLED;
521 }
522 
spi_engine_optimize_message(struct spi_message * msg)523 static int spi_engine_optimize_message(struct spi_message *msg)
524 {
525 	struct spi_engine_program p_dry, *p;
526 
527 	spi_engine_precompile_message(msg);
528 
529 	p_dry.length = 0;
530 	spi_engine_compile_message(msg, true, &p_dry);
531 
532 	p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL);
533 	if (!p)
534 		return -ENOMEM;
535 
536 	spi_engine_compile_message(msg, false, p);
537 
538 	spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
539 						AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
540 
541 	msg->opt_state = p;
542 
543 	return 0;
544 }
545 
spi_engine_unoptimize_message(struct spi_message * msg)546 static int spi_engine_unoptimize_message(struct spi_message *msg)
547 {
548 	kfree(msg->opt_state);
549 
550 	return 0;
551 }
552 
spi_engine_setup(struct spi_device * device)553 static int spi_engine_setup(struct spi_device *device)
554 {
555 	struct spi_controller *host = device->controller;
556 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
557 
558 	if (device->mode & SPI_CS_HIGH)
559 		spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
560 	else
561 		spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
562 
563 	writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
564 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
565 
566 	/*
567 	 * In addition to setting the flags, we have to do a CS assert command
568 	 * to make the new setting actually take effect.
569 	 */
570 	writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
571 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
572 
573 	return 0;
574 }
575 
spi_engine_transfer_one_message(struct spi_controller * host,struct spi_message * msg)576 static int spi_engine_transfer_one_message(struct spi_controller *host,
577 	struct spi_message *msg)
578 {
579 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
580 	struct spi_engine_message_state *st = &spi_engine->msg_state;
581 	struct spi_engine_program *p = msg->opt_state;
582 	unsigned int int_enable = 0;
583 	unsigned long flags;
584 
585 	/* reinitialize message state for this transfer */
586 	memset(st, 0, sizeof(*st));
587 	st->cmd_buf = p->instructions;
588 	st->cmd_length = p->length;
589 	msg->state = st;
590 
591 	reinit_completion(&spi_engine->msg_complete);
592 
593 	spin_lock_irqsave(&spi_engine->lock, flags);
594 
595 	if (spi_engine_write_cmd_fifo(spi_engine, msg))
596 		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
597 
598 	spi_engine_tx_next(msg);
599 	if (spi_engine_write_tx_fifo(spi_engine, msg))
600 		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
601 
602 	spi_engine_rx_next(msg);
603 	if (st->rx_length != 0)
604 		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
605 
606 	int_enable |= SPI_ENGINE_INT_SYNC;
607 
608 	writel_relaxed(int_enable,
609 		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
610 	spi_engine->int_enable = int_enable;
611 	spin_unlock_irqrestore(&spi_engine->lock, flags);
612 
613 	if (!wait_for_completion_timeout(&spi_engine->msg_complete,
614 					 msecs_to_jiffies(5000))) {
615 		dev_err(&host->dev,
616 			"Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
617 		msg->status = -ETIMEDOUT;
618 	}
619 
620 	spi_finalize_current_message(host);
621 
622 	return msg->status;
623 }
624 
spi_engine_release_hw(void * p)625 static void spi_engine_release_hw(void *p)
626 {
627 	struct spi_engine *spi_engine = p;
628 
629 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
630 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
631 	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
632 }
633 
spi_engine_probe(struct platform_device * pdev)634 static int spi_engine_probe(struct platform_device *pdev)
635 {
636 	struct spi_engine *spi_engine;
637 	struct spi_controller *host;
638 	unsigned int version;
639 	int irq;
640 	int ret;
641 
642 	irq = platform_get_irq(pdev, 0);
643 	if (irq < 0)
644 		return irq;
645 
646 	host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
647 	if (!host)
648 		return -ENOMEM;
649 
650 	spi_engine = spi_controller_get_devdata(host);
651 
652 	spin_lock_init(&spi_engine->lock);
653 	init_completion(&spi_engine->msg_complete);
654 
655 	spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
656 	if (IS_ERR(spi_engine->clk))
657 		return PTR_ERR(spi_engine->clk);
658 
659 	spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
660 	if (IS_ERR(spi_engine->ref_clk))
661 		return PTR_ERR(spi_engine->ref_clk);
662 
663 	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
664 	if (IS_ERR(spi_engine->base))
665 		return PTR_ERR(spi_engine->base);
666 
667 	version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
668 	if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
669 		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
670 			ADI_AXI_PCORE_VER_MAJOR(version),
671 			ADI_AXI_PCORE_VER_MINOR(version),
672 			ADI_AXI_PCORE_VER_PATCH(version));
673 		return -ENODEV;
674 	}
675 
676 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
677 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
678 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
679 
680 	ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
681 				       spi_engine);
682 	if (ret)
683 		return ret;
684 
685 	ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
686 			       host);
687 	if (ret)
688 		return ret;
689 
690 	host->dev.of_node = pdev->dev.of_node;
691 	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
692 	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
693 	host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
694 	host->transfer_one_message = spi_engine_transfer_one_message;
695 	host->optimize_message = spi_engine_optimize_message;
696 	host->unoptimize_message = spi_engine_unoptimize_message;
697 	host->num_chipselect = 8;
698 
699 	/* Some features depend of the IP core version. */
700 	if (ADI_AXI_PCORE_VER_MAJOR(version) >= 1) {
701 		if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
702 			host->mode_bits |= SPI_CS_HIGH;
703 			host->setup = spi_engine_setup;
704 		}
705 		if (ADI_AXI_PCORE_VER_MINOR(version) >= 3)
706 			host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
707 	}
708 
709 	if (host->max_speed_hz == 0)
710 		return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
711 
712 	return devm_spi_register_controller(&pdev->dev, host);
713 }
714 
715 static const struct of_device_id spi_engine_match_table[] = {
716 	{ .compatible = "adi,axi-spi-engine-1.00.a" },
717 	{ },
718 };
719 MODULE_DEVICE_TABLE(of, spi_engine_match_table);
720 
721 static struct platform_driver spi_engine_driver = {
722 	.probe = spi_engine_probe,
723 	.driver = {
724 		.name = "spi-engine",
725 		.of_match_table = spi_engine_match_table,
726 	},
727 };
728 module_platform_driver(spi_engine_driver);
729 
730 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
731 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
732 MODULE_LICENSE("GPL");
733