xref: /linux/drivers/spi/spi-axi-spi-engine.c (revision 323bbfcf1ef8836d0d2ad9e2c1f1c684f0e3b5b3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI-Engine SPI controller driver
4  * Copyright 2015 Analog Devices Inc.
5  * Copyright 2024 BayLibre, SAS
6  *  Author: Lars-Peter Clausen <lars@metafoo.de>
7  */
8 
9 #include <linux/adi-axi-common.h>
10 #include <linux/bitfield.h>
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/dmaengine.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/of.h>
19 #include <linux/module.h>
20 #include <linux/overflow.h>
21 #include <linux/platform_device.h>
22 #include <linux/spi/offload/provider.h>
23 #include <linux/spi/spi.h>
24 #include <trace/events/spi.h>
25 
26 #define SPI_ENGINE_REG_DATA_WIDTH		0x0C
27 #define   SPI_ENGINE_REG_DATA_WIDTH_NUM_OF_SDIO_MASK	GENMASK(23, 16)
28 #define   SPI_ENGINE_REG_DATA_WIDTH_MASK		GENMASK(15, 0)
29 #define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH	0x10
30 #define SPI_ENGINE_REG_RESET			0x40
31 
32 #define SPI_ENGINE_REG_INT_ENABLE		0x80
33 #define SPI_ENGINE_REG_INT_PENDING		0x84
34 #define SPI_ENGINE_REG_INT_SOURCE		0x88
35 
36 #define SPI_ENGINE_REG_SYNC_ID			0xc0
37 #define SPI_ENGINE_REG_OFFLOAD_SYNC_ID		0xc4
38 
39 #define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
40 #define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
41 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
42 
43 #define SPI_ENGINE_REG_CMD_FIFO			0xe0
44 #define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
45 #define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
46 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
47 
48 #define SPI_ENGINE_MAX_NUM_OFFLOADS		32
49 
50 #define SPI_ENGINE_REG_OFFLOAD_CTRL(x)		(0x100 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
51 #define SPI_ENGINE_REG_OFFLOAD_STATUS(x)	(0x104 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
52 #define SPI_ENGINE_REG_OFFLOAD_RESET(x)		(0x108 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
53 #define SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(x)	(0x110 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
54 #define SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(x)	(0x114 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
55 
56 #define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO	GENMASK(15, 8)
57 #define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD	GENMASK(7, 0)
58 
59 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
60 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
61 #define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
62 #define SPI_ENGINE_INT_SYNC			BIT(3)
63 #define SPI_ENGINE_INT_OFFLOAD_SYNC		BIT(4)
64 
65 #define SPI_ENGINE_OFFLOAD_CTRL_ENABLE		BIT(0)
66 
67 #define SPI_ENGINE_CONFIG_CPHA			BIT(0)
68 #define SPI_ENGINE_CONFIG_CPOL			BIT(1)
69 #define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
70 #define SPI_ENGINE_CONFIG_SDO_IDLE_HIGH		BIT(3)
71 
72 #define SPI_ENGINE_INST_TRANSFER		0x0
73 #define SPI_ENGINE_INST_ASSERT			0x1
74 #define SPI_ENGINE_INST_WRITE			0x2
75 #define SPI_ENGINE_INST_MISC			0x3
76 #define SPI_ENGINE_INST_CS_INV			0x4
77 
78 #define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
79 #define SPI_ENGINE_CMD_REG_CONFIG		0x1
80 #define SPI_ENGINE_CMD_REG_XFER_BITS		0x2
81 #define SPI_ENGINE_CMD_REG_SDI_MASK		0x3
82 #define SPI_ENGINE_CMD_REG_SDO_MASK		0x4
83 
84 #define SPI_ENGINE_MISC_SYNC			0x0
85 #define SPI_ENGINE_MISC_SLEEP			0x1
86 
87 #define SPI_ENGINE_TRANSFER_WRITE		0x1
88 #define SPI_ENGINE_TRANSFER_READ		0x2
89 
90 /* Arbitrary sync ID for use by host->cur_msg */
91 #define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID		0x1
92 
93 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
94 	(((inst) << 12) | ((arg1) << 8) | (arg2))
95 
96 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
97 	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
98 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
99 	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
100 #define SPI_ENGINE_CMD_WRITE(reg, val) \
101 	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
102 #define SPI_ENGINE_CMD_SLEEP(delay) \
103 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
104 #define SPI_ENGINE_CMD_SYNC(id) \
105 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
106 #define SPI_ENGINE_CMD_CS_INV(flags) \
107 	SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
108 
109 /* default sizes - can be changed when SPI Engine firmware is compiled */
110 #define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE	16
111 #define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE	16
112 
113 /* Extending SPI_MULTI_LANE_MODE values for optimizing messages. */
114 #define SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN	-1
115 #define SPI_ENGINE_MULTI_BUS_MODE_CONFLICTING	-2
116 
117 struct spi_engine_program {
118 	unsigned int length;
119 	uint16_t instructions[] __counted_by(length);
120 };
121 
122 /**
123  * struct spi_engine_message_state - SPI engine per-message state
124  */
125 struct spi_engine_message_state {
126 	/** @cmd_length: Number of elements in cmd_buf array. */
127 	unsigned cmd_length;
128 	/** @cmd_buf: Array of commands not yet written to CMD FIFO. */
129 	const uint16_t *cmd_buf;
130 	/** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
131 	struct spi_transfer *tx_xfer;
132 	/** @tx_length: Size of tx_buf in bytes. */
133 	unsigned int tx_length;
134 	/** @tx_buf: Bytes not yet written to TX FIFO. */
135 	const uint8_t *tx_buf;
136 	/** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
137 	struct spi_transfer *rx_xfer;
138 	/** @rx_length: Size of tx_buf in bytes. */
139 	unsigned int rx_length;
140 	/** @rx_buf: Bytes not yet written to the RX FIFO. */
141 	uint8_t *rx_buf;
142 };
143 
144 enum {
145 	SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED,
146 	SPI_ENGINE_OFFLOAD_FLAG_PREPARED,
147 };
148 
149 struct spi_engine_offload {
150 	struct spi_engine *spi_engine;
151 	unsigned long flags;
152 	unsigned int offload_num;
153 	unsigned int spi_mode_config;
154 	unsigned int multi_lane_mode;
155 	u8 rx_primary_lane_mask;
156 	u8 tx_primary_lane_mask;
157 	u8 rx_all_lanes_mask;
158 	u8 tx_all_lanes_mask;
159 	u8 bits_per_word;
160 };
161 
162 struct spi_engine {
163 	struct clk *clk;
164 	struct clk *ref_clk;
165 
166 	spinlock_t lock;
167 
168 	void __iomem *base;
169 	struct spi_engine_message_state msg_state;
170 	struct completion msg_complete;
171 	unsigned int int_enable;
172 	/* shadows hardware CS inversion flag state */
173 	u8 cs_inv;
174 
175 	unsigned int offload_ctrl_mem_size;
176 	unsigned int offload_sdo_mem_size;
177 	struct spi_offload *offload;
178 	u32 offload_caps;
179 	bool offload_requires_sync;
180 };
181 
spi_engine_primary_lane_flag(struct spi_device * spi,u8 * rx_lane_flags,u8 * tx_lane_flags)182 static void spi_engine_primary_lane_flag(struct spi_device *spi,
183 					 u8 *rx_lane_flags, u8 *tx_lane_flags)
184 {
185 	*rx_lane_flags = BIT(spi->rx_lane_map[0]);
186 	*tx_lane_flags = BIT(spi->tx_lane_map[0]);
187 }
188 
spi_engine_all_lanes_flags(struct spi_device * spi,u8 * rx_lane_flags,u8 * tx_lane_flags)189 static void spi_engine_all_lanes_flags(struct spi_device *spi,
190 				       u8 *rx_lane_flags, u8 *tx_lane_flags)
191 {
192 	int i;
193 
194 	for (i = 0; i < spi->num_rx_lanes; i++)
195 		*rx_lane_flags |= BIT(spi->rx_lane_map[i]);
196 
197 	for (i = 0; i < spi->num_tx_lanes; i++)
198 		*tx_lane_flags |= BIT(spi->tx_lane_map[i]);
199 }
200 
spi_engine_program_add_cmd(struct spi_engine_program * p,bool dry,uint16_t cmd)201 static void spi_engine_program_add_cmd(struct spi_engine_program *p,
202 	bool dry, uint16_t cmd)
203 {
204 	p->length++;
205 
206 	if (!dry)
207 		p->instructions[p->length - 1] = cmd;
208 }
209 
spi_engine_get_config(struct spi_device * spi)210 static unsigned int spi_engine_get_config(struct spi_device *spi)
211 {
212 	unsigned int config = 0;
213 
214 	if (spi->mode & SPI_CPOL)
215 		config |= SPI_ENGINE_CONFIG_CPOL;
216 	if (spi->mode & SPI_CPHA)
217 		config |= SPI_ENGINE_CONFIG_CPHA;
218 	if (spi->mode & SPI_3WIRE)
219 		config |= SPI_ENGINE_CONFIG_3WIRE;
220 	if (spi->mode & SPI_MOSI_IDLE_HIGH)
221 		config |= SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
222 	if (spi->mode & SPI_MOSI_IDLE_LOW)
223 		config &= ~SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
224 
225 	return config;
226 }
227 
spi_engine_gen_xfer(struct spi_engine_program * p,bool dry,struct spi_transfer * xfer,u32 num_lanes)228 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
229 				struct spi_transfer *xfer, u32 num_lanes)
230 {
231 	unsigned int len;
232 
233 	if (xfer->bits_per_word <= 8)
234 		len = xfer->len;
235 	else if (xfer->bits_per_word <= 16)
236 		len = xfer->len / 2;
237 	else
238 		len = xfer->len / 4;
239 
240 	if (xfer->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE)
241 		len /= num_lanes;
242 
243 	while (len) {
244 		unsigned int n = min(len, 256U);
245 		unsigned int flags = 0;
246 
247 		if (xfer->tx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM))
248 			flags |= SPI_ENGINE_TRANSFER_WRITE;
249 		if (xfer->rx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM))
250 			flags |= SPI_ENGINE_TRANSFER_READ;
251 
252 		spi_engine_program_add_cmd(p, dry,
253 			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
254 		len -= n;
255 	}
256 }
257 
spi_engine_gen_sleep(struct spi_engine_program * p,bool dry,int delay_ns,int inst_ns,u32 sclk_hz)258 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
259 				 int delay_ns, int inst_ns, u32 sclk_hz)
260 {
261 	unsigned int t;
262 
263 	/*
264 	 * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
265 	 * delay is less that the instruction execution time, there is no need
266 	 * for an extra sleep instruction since the instruction execution time
267 	 * will already cover the required delay.
268 	 */
269 	if (delay_ns < 0 || delay_ns <= inst_ns)
270 		return;
271 
272 	t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
273 	while (t) {
274 		unsigned int n = min(t, 256U);
275 
276 		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
277 		t -= n;
278 	}
279 }
280 
spi_engine_gen_cs(struct spi_engine_program * p,bool dry,struct spi_device * spi,bool assert)281 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
282 		struct spi_device *spi, bool assert)
283 {
284 	unsigned int mask = 0xff;
285 
286 	if (assert)
287 		mask ^= BIT(spi_get_chipselect(spi, 0));
288 
289 	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
290 }
291 
292 /*
293  * Performs precompile steps on the message.
294  *
295  * The SPI core does most of the message/transfer validation and filling in
296  * fields for us via __spi_validate(). This fixes up anything remaining not
297  * done there.
298  *
299  * NB: This is separate from spi_engine_compile_message() because the latter
300  * is called twice and would otherwise result in double-evaluation.
301  *
302  * Returns 0 on success, -EINVAL on failure.
303  */
spi_engine_precompile_message(struct spi_message * msg)304 static int spi_engine_precompile_message(struct spi_message *msg)
305 {
306 	unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
307 	struct spi_transfer *xfer;
308 	int multi_lane_mode = SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN;
309 	u8 min_bits_per_word = U8_MAX;
310 	u8 max_bits_per_word = 0;
311 
312 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
313 		/* If we have an offload transfer, we can't rx to buffer */
314 		if (msg->offload && xfer->rx_buf)
315 			return -EINVAL;
316 
317 		clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
318 		xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
319 
320 		if (xfer->len) {
321 			min_bits_per_word = min(min_bits_per_word, xfer->bits_per_word);
322 			max_bits_per_word = max(max_bits_per_word, xfer->bits_per_word);
323 		}
324 
325 		if (xfer->rx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM ||
326 		    xfer->tx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM) {
327 			switch (xfer->multi_lane_mode) {
328 			case SPI_MULTI_LANE_MODE_SINGLE:
329 			case SPI_MULTI_LANE_MODE_STRIPE:
330 				break;
331 			default:
332 				/* Other modes, like mirror not supported */
333 				return -EINVAL;
334 			}
335 
336 			/* If all xfers have the same multi-lane mode, we can optimize. */
337 			if (multi_lane_mode == SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN)
338 				multi_lane_mode = xfer->multi_lane_mode;
339 			else if (multi_lane_mode != xfer->multi_lane_mode)
340 				multi_lane_mode = SPI_ENGINE_MULTI_BUS_MODE_CONFLICTING;
341 		}
342 	}
343 
344 	/*
345 	 * If all xfers in the message use the same bits_per_word, we can
346 	 * provide some optimization when using SPI offload.
347 	 */
348 	if (msg->offload) {
349 		struct spi_engine_offload *priv = msg->offload->priv;
350 
351 		if (min_bits_per_word == max_bits_per_word)
352 			priv->bits_per_word = min_bits_per_word;
353 		else
354 			priv->bits_per_word = 0;
355 
356 		priv->multi_lane_mode = multi_lane_mode;
357 		spi_engine_primary_lane_flag(msg->spi,
358 					     &priv->rx_primary_lane_mask,
359 					     &priv->tx_primary_lane_mask);
360 		spi_engine_all_lanes_flags(msg->spi,
361 					   &priv->rx_all_lanes_mask,
362 					   &priv->tx_all_lanes_mask);
363 	}
364 
365 	return 0;
366 }
367 
spi_engine_compile_message(struct spi_message * msg,bool dry,struct spi_engine_program * p)368 static void spi_engine_compile_message(struct spi_message *msg, bool dry,
369 				       struct spi_engine_program *p)
370 {
371 	struct spi_device *spi = msg->spi;
372 	struct spi_controller *host = spi->controller;
373 	struct spi_engine_offload *priv;
374 	struct spi_transfer *xfer;
375 	int clk_div, new_clk_div, inst_ns;
376 	int prev_multi_lane_mode = SPI_MULTI_LANE_MODE_SINGLE;
377 	bool keep_cs = false;
378 	u8 bits_per_word = 0;
379 
380 	/*
381 	 * Take into account instruction execution time for more accurate sleep
382 	 * times, especially when the delay is small.
383 	 */
384 	inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
385 
386 	clk_div = 1;
387 
388 	/*
389 	 * As an optimization, SPI offload sets once this when the offload is
390 	 * enabled instead of repeating the instruction in each message.
391 	 */
392 	if (msg->offload) {
393 		priv = msg->offload->priv;
394 		priv->spi_mode_config = spi_engine_get_config(spi);
395 
396 		/*
397 		 * If all xfers use the same bits_per_word, it can be optimized
398 		 * in the same way.
399 		 */
400 		bits_per_word = priv->bits_per_word;
401 		prev_multi_lane_mode = priv->multi_lane_mode;
402 	} else {
403 		spi_engine_program_add_cmd(p, dry,
404 			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
405 				spi_engine_get_config(spi)));
406 	}
407 
408 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
409 	spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
410 
411 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
412 		if (xfer->rx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM ||
413 		    xfer->tx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM) {
414 			if (xfer->multi_lane_mode != prev_multi_lane_mode) {
415 				u8 tx_lane_flags, rx_lane_flags;
416 
417 				if (xfer->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE)
418 					spi_engine_all_lanes_flags(spi, &rx_lane_flags,
419 								   &tx_lane_flags);
420 				else
421 					spi_engine_primary_lane_flag(spi, &rx_lane_flags,
422 								     &tx_lane_flags);
423 
424 				spi_engine_program_add_cmd(p, dry,
425 					SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
426 							     rx_lane_flags));
427 				spi_engine_program_add_cmd(p, dry,
428 					SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
429 							     tx_lane_flags));
430 			}
431 			prev_multi_lane_mode = xfer->multi_lane_mode;
432 		}
433 
434 		new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
435 		if (new_clk_div != clk_div) {
436 			clk_div = new_clk_div;
437 			/* actual divider used is register value + 1 */
438 			spi_engine_program_add_cmd(p, dry,
439 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
440 					clk_div - 1));
441 		}
442 
443 		if (bits_per_word != xfer->bits_per_word && xfer->len) {
444 			bits_per_word = xfer->bits_per_word;
445 			spi_engine_program_add_cmd(p, dry,
446 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
447 					bits_per_word));
448 		}
449 
450 		spi_engine_gen_xfer(p, dry, xfer, spi->num_rx_lanes);
451 		spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
452 				     inst_ns, xfer->effective_speed_hz);
453 
454 		if (xfer->cs_change) {
455 			if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
456 				keep_cs = true;
457 			} else {
458 				if (!xfer->cs_off)
459 					spi_engine_gen_cs(p, dry, spi, false);
460 
461 				spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
462 					&xfer->cs_change_delay, xfer), inst_ns,
463 					xfer->effective_speed_hz);
464 
465 				if (!list_next_entry(xfer, transfer_list)->cs_off)
466 					spi_engine_gen_cs(p, dry, spi, true);
467 			}
468 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
469 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
470 			spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
471 		}
472 	}
473 
474 	if (!keep_cs)
475 		spi_engine_gen_cs(p, dry, spi, false);
476 
477 	/*
478 	 * Restore clockdiv to default so that future gen_sleep commands don't
479 	 * have to be aware of the current register state.
480 	 */
481 	if (clk_div != 1)
482 		spi_engine_program_add_cmd(p, dry,
483 			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
484 
485 	/* Restore single lane mode unless offload disable will restore it later. */
486 	if (prev_multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE &&
487 	    (!msg->offload || priv->multi_lane_mode != SPI_MULTI_LANE_MODE_STRIPE)) {
488 		u8 rx_lane_flags, tx_lane_flags;
489 
490 		spi_engine_primary_lane_flag(spi, &rx_lane_flags, &tx_lane_flags);
491 
492 		spi_engine_program_add_cmd(p, dry,
493 			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, rx_lane_flags));
494 		spi_engine_program_add_cmd(p, dry,
495 			SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, tx_lane_flags));
496 	}
497 }
498 
spi_engine_xfer_next(struct spi_message * msg,struct spi_transfer ** _xfer)499 static void spi_engine_xfer_next(struct spi_message *msg,
500 	struct spi_transfer **_xfer)
501 {
502 	struct spi_transfer *xfer = *_xfer;
503 
504 	if (!xfer) {
505 		xfer = list_first_entry(&msg->transfers,
506 			struct spi_transfer, transfer_list);
507 	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
508 		xfer = NULL;
509 	} else {
510 		xfer = list_next_entry(xfer, transfer_list);
511 	}
512 
513 	*_xfer = xfer;
514 }
515 
spi_engine_tx_next(struct spi_message * msg)516 static void spi_engine_tx_next(struct spi_message *msg)
517 {
518 	struct spi_engine_message_state *st = msg->state;
519 	struct spi_transfer *xfer = st->tx_xfer;
520 
521 	do {
522 		spi_engine_xfer_next(msg, &xfer);
523 	} while (xfer && !xfer->tx_buf);
524 
525 	st->tx_xfer = xfer;
526 	if (xfer) {
527 		st->tx_length = xfer->len;
528 		st->tx_buf = xfer->tx_buf;
529 	} else {
530 		st->tx_buf = NULL;
531 	}
532 }
533 
spi_engine_rx_next(struct spi_message * msg)534 static void spi_engine_rx_next(struct spi_message *msg)
535 {
536 	struct spi_engine_message_state *st = msg->state;
537 	struct spi_transfer *xfer = st->rx_xfer;
538 
539 	do {
540 		spi_engine_xfer_next(msg, &xfer);
541 	} while (xfer && !xfer->rx_buf);
542 
543 	st->rx_xfer = xfer;
544 	if (xfer) {
545 		st->rx_length = xfer->len;
546 		st->rx_buf = xfer->rx_buf;
547 	} else {
548 		st->rx_buf = NULL;
549 	}
550 }
551 
spi_engine_write_cmd_fifo(struct spi_engine * spi_engine,struct spi_message * msg)552 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
553 				      struct spi_message *msg)
554 {
555 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
556 	struct spi_engine_message_state *st = msg->state;
557 	unsigned int n, m, i;
558 	const uint16_t *buf;
559 
560 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
561 	while (n && st->cmd_length) {
562 		m = min(n, st->cmd_length);
563 		buf = st->cmd_buf;
564 		for (i = 0; i < m; i++)
565 			writel_relaxed(buf[i], addr);
566 		st->cmd_buf += m;
567 		st->cmd_length -= m;
568 		n -= m;
569 	}
570 
571 	return st->cmd_length != 0;
572 }
573 
spi_engine_write_tx_fifo(struct spi_engine * spi_engine,struct spi_message * msg)574 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
575 				     struct spi_message *msg)
576 {
577 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
578 	struct spi_engine_message_state *st = msg->state;
579 	unsigned int n, m, i;
580 
581 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
582 	while (n && st->tx_length) {
583 		if (st->tx_xfer->bits_per_word <= 8) {
584 			const u8 *buf = st->tx_buf;
585 
586 			m = min(n, st->tx_length);
587 			for (i = 0; i < m; i++)
588 				writel_relaxed(buf[i], addr);
589 			st->tx_buf += m;
590 			st->tx_length -= m;
591 		} else if (st->tx_xfer->bits_per_word <= 16) {
592 			const u16 *buf = (const u16 *)st->tx_buf;
593 
594 			m = min(n, st->tx_length / 2);
595 			for (i = 0; i < m; i++)
596 				writel_relaxed(buf[i], addr);
597 			st->tx_buf += m * 2;
598 			st->tx_length -= m * 2;
599 		} else {
600 			const u32 *buf = (const u32 *)st->tx_buf;
601 
602 			m = min(n, st->tx_length / 4);
603 			for (i = 0; i < m; i++)
604 				writel_relaxed(buf[i], addr);
605 			st->tx_buf += m * 4;
606 			st->tx_length -= m * 4;
607 		}
608 		n -= m;
609 		if (st->tx_length == 0)
610 			spi_engine_tx_next(msg);
611 	}
612 
613 	return st->tx_length != 0;
614 }
615 
spi_engine_read_rx_fifo(struct spi_engine * spi_engine,struct spi_message * msg)616 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
617 				    struct spi_message *msg)
618 {
619 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
620 	struct spi_engine_message_state *st = msg->state;
621 	unsigned int n, m, i;
622 
623 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
624 	while (n && st->rx_length) {
625 		if (st->rx_xfer->bits_per_word <= 8) {
626 			u8 *buf = st->rx_buf;
627 
628 			m = min(n, st->rx_length);
629 			for (i = 0; i < m; i++)
630 				buf[i] = readl_relaxed(addr);
631 			st->rx_buf += m;
632 			st->rx_length -= m;
633 		} else if (st->rx_xfer->bits_per_word <= 16) {
634 			u16 *buf = (u16 *)st->rx_buf;
635 
636 			m = min(n, st->rx_length / 2);
637 			for (i = 0; i < m; i++)
638 				buf[i] = readl_relaxed(addr);
639 			st->rx_buf += m * 2;
640 			st->rx_length -= m * 2;
641 		} else {
642 			u32 *buf = (u32 *)st->rx_buf;
643 
644 			m = min(n, st->rx_length / 4);
645 			for (i = 0; i < m; i++)
646 				buf[i] = readl_relaxed(addr);
647 			st->rx_buf += m * 4;
648 			st->rx_length -= m * 4;
649 		}
650 		n -= m;
651 		if (st->rx_length == 0)
652 			spi_engine_rx_next(msg);
653 	}
654 
655 	return st->rx_length != 0;
656 }
657 
spi_engine_irq(int irq,void * devid)658 static irqreturn_t spi_engine_irq(int irq, void *devid)
659 {
660 	struct spi_controller *host = devid;
661 	struct spi_message *msg = host->cur_msg;
662 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
663 	unsigned int disable_int = 0;
664 	unsigned int pending;
665 	int completed_id = -1;
666 
667 	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
668 
669 	if (pending & SPI_ENGINE_INT_SYNC) {
670 		writel_relaxed(SPI_ENGINE_INT_SYNC,
671 			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
672 		completed_id = readl_relaxed(
673 			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
674 	}
675 
676 	spin_lock(&spi_engine->lock);
677 
678 	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
679 		if (!spi_engine_write_cmd_fifo(spi_engine, msg))
680 			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
681 	}
682 
683 	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
684 		if (!spi_engine_write_tx_fifo(spi_engine, msg))
685 			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
686 	}
687 
688 	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
689 		if (!spi_engine_read_rx_fifo(spi_engine, msg))
690 			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
691 	}
692 
693 	if (pending & SPI_ENGINE_INT_SYNC && msg) {
694 		if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
695 			msg->status = 0;
696 			msg->actual_length = msg->frame_length;
697 			complete(&spi_engine->msg_complete);
698 			disable_int |= SPI_ENGINE_INT_SYNC;
699 		}
700 	}
701 
702 	if (disable_int) {
703 		spi_engine->int_enable &= ~disable_int;
704 		writel_relaxed(spi_engine->int_enable,
705 			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
706 	}
707 
708 	spin_unlock(&spi_engine->lock);
709 
710 	return IRQ_HANDLED;
711 }
712 
spi_engine_offload_prepare(struct spi_message * msg)713 static int spi_engine_offload_prepare(struct spi_message *msg)
714 {
715 	struct spi_controller *host = msg->spi->controller;
716 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
717 	struct spi_engine_program *p = msg->opt_state;
718 	struct spi_engine_offload *priv = msg->offload->priv;
719 	struct spi_transfer *xfer;
720 	void __iomem *cmd_addr;
721 	void __iomem *sdo_addr;
722 	size_t tx_word_count = 0;
723 	unsigned int i;
724 
725 	if (p->length > spi_engine->offload_ctrl_mem_size)
726 		return -EINVAL;
727 
728 	/* count total number of tx words in message */
729 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
730 		/* no support for reading to rx_buf */
731 		if (xfer->rx_buf)
732 			return -EINVAL;
733 
734 		if (!xfer->tx_buf)
735 			continue;
736 
737 		if (xfer->bits_per_word <= 8)
738 			tx_word_count += xfer->len;
739 		else if (xfer->bits_per_word <= 16)
740 			tx_word_count += xfer->len / 2;
741 		else
742 			tx_word_count += xfer->len / 4;
743 	}
744 
745 	if (tx_word_count && !(spi_engine->offload_caps & SPI_OFFLOAD_CAP_TX_STATIC_DATA))
746 		return -EINVAL;
747 
748 	if (tx_word_count > spi_engine->offload_sdo_mem_size)
749 		return -EINVAL;
750 
751 	/*
752 	 * This protects against calling spi_optimize_message() with an offload
753 	 * that has already been prepared with a different message.
754 	 */
755 	if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags))
756 		return -EBUSY;
757 
758 	cmd_addr = spi_engine->base +
759 		   SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(priv->offload_num);
760 	sdo_addr = spi_engine->base +
761 		   SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(priv->offload_num);
762 
763 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
764 		if (!xfer->tx_buf)
765 			continue;
766 
767 		if (xfer->bits_per_word <= 8) {
768 			const u8 *buf = xfer->tx_buf;
769 
770 			for (i = 0; i < xfer->len; i++)
771 				writel_relaxed(buf[i], sdo_addr);
772 		} else if (xfer->bits_per_word <= 16) {
773 			const u16 *buf = xfer->tx_buf;
774 
775 			for (i = 0; i < xfer->len / 2; i++)
776 				writel_relaxed(buf[i], sdo_addr);
777 		} else {
778 			const u32 *buf = xfer->tx_buf;
779 
780 			for (i = 0; i < xfer->len / 4; i++)
781 				writel_relaxed(buf[i], sdo_addr);
782 		}
783 	}
784 
785 	for (i = 0; i < p->length; i++)
786 		writel_relaxed(p->instructions[i], cmd_addr);
787 
788 	return 0;
789 }
790 
spi_engine_offload_unprepare(struct spi_offload * offload)791 static void spi_engine_offload_unprepare(struct spi_offload *offload)
792 {
793 	struct spi_engine_offload *priv = offload->priv;
794 	struct spi_engine *spi_engine = priv->spi_engine;
795 
796 	writel_relaxed(1, spi_engine->base +
797 			  SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
798 	writel_relaxed(0, spi_engine->base +
799 			  SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
800 
801 	clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags);
802 }
803 
spi_engine_optimize_message(struct spi_message * msg)804 static int spi_engine_optimize_message(struct spi_message *msg)
805 {
806 	struct spi_controller *host = msg->spi->controller;
807 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
808 	struct spi_engine_program p_dry, *p;
809 	int ret;
810 
811 	ret = spi_engine_precompile_message(msg);
812 	if (ret)
813 		return ret;
814 
815 	p_dry.length = 0;
816 	spi_engine_compile_message(msg, true, &p_dry);
817 
818 	p = kzalloc_flex(*p, instructions, p_dry.length + 1);
819 	if (!p)
820 		return -ENOMEM;
821 
822 	spi_engine_compile_message(msg, false, p);
823 
824 	/*
825 	 * Non-offload needs SYNC for completion interrupt. Older versions of
826 	 * the IP core also need SYNC for offload to work properly.
827 	 */
828 	if (!msg->offload || spi_engine->offload_requires_sync)
829 		spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
830 			msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
831 
832 	msg->opt_state = p;
833 
834 	if (msg->offload) {
835 		ret = spi_engine_offload_prepare(msg);
836 		if (ret) {
837 			msg->opt_state = NULL;
838 			kfree(p);
839 			return ret;
840 		}
841 	}
842 
843 	return 0;
844 }
845 
spi_engine_unoptimize_message(struct spi_message * msg)846 static int spi_engine_unoptimize_message(struct spi_message *msg)
847 {
848 	if (msg->offload)
849 		spi_engine_offload_unprepare(msg->offload);
850 
851 	kfree(msg->opt_state);
852 
853 	return 0;
854 }
855 
856 static struct spi_offload
spi_engine_get_offload(struct spi_device * spi,const struct spi_offload_config * config)857 *spi_engine_get_offload(struct spi_device *spi,
858 			const struct spi_offload_config *config)
859 {
860 	struct spi_controller *host = spi->controller;
861 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
862 	struct spi_engine_offload *priv;
863 
864 	if (!spi_engine->offload)
865 		return ERR_PTR(-ENODEV);
866 
867 	if (config->capability_flags & ~spi_engine->offload_caps)
868 		return ERR_PTR(-EINVAL);
869 
870 	priv = spi_engine->offload->priv;
871 
872 	if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags))
873 		return ERR_PTR(-EBUSY);
874 
875 	return spi_engine->offload;
876 }
877 
spi_engine_put_offload(struct spi_offload * offload)878 static void spi_engine_put_offload(struct spi_offload *offload)
879 {
880 	struct spi_engine_offload *priv = offload->priv;
881 
882 	clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags);
883 }
884 
spi_engine_setup(struct spi_device * device)885 static int spi_engine_setup(struct spi_device *device)
886 {
887 	struct spi_controller *host = device->controller;
888 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
889 	unsigned int reg;
890 
891 	if (device->mode & SPI_CS_HIGH)
892 		spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
893 	else
894 		spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
895 
896 	writel_relaxed(SPI_ENGINE_CMD_SYNC(0),
897 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
898 
899 	writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
900 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
901 
902 	if (host->num_data_lanes > 1) {
903 		u8 rx_lane_flags, tx_lane_flags;
904 
905 		spi_engine_primary_lane_flag(device, &rx_lane_flags, &tx_lane_flags);
906 
907 		writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
908 						    rx_lane_flags),
909 			       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
910 		writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
911 						    tx_lane_flags),
912 			       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
913 	}
914 
915 	/*
916 	 * In addition to setting the flags, we have to do a CS assert command
917 	 * to make the new setting actually take effect.
918 	 */
919 	writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
920 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
921 
922 	writel_relaxed(SPI_ENGINE_CMD_SYNC(1),
923 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
924 
925 	return readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID,
926 					  reg, reg == 1, 1, 1000);
927 }
928 
spi_engine_transfer_one_message(struct spi_controller * host,struct spi_message * msg)929 static int spi_engine_transfer_one_message(struct spi_controller *host,
930 	struct spi_message *msg)
931 {
932 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
933 	struct spi_engine_message_state *st = &spi_engine->msg_state;
934 	struct spi_engine_program *p = msg->opt_state;
935 	unsigned int int_enable = 0;
936 	unsigned long flags;
937 
938 	if (msg->offload) {
939 		dev_err(&host->dev, "Single transfer offload not supported\n");
940 		msg->status = -EOPNOTSUPP;
941 		goto out;
942 	}
943 
944 	/* reinitialize message state for this transfer */
945 	memset(st, 0, sizeof(*st));
946 	st->cmd_buf = p->instructions;
947 	st->cmd_length = p->length;
948 	msg->state = st;
949 
950 	reinit_completion(&spi_engine->msg_complete);
951 
952 	if (trace_spi_transfer_start_enabled()) {
953 		struct spi_transfer *xfer;
954 
955 		list_for_each_entry(xfer, &msg->transfers, transfer_list)
956 			trace_spi_transfer_start(msg, xfer);
957 	}
958 
959 	spin_lock_irqsave(&spi_engine->lock, flags);
960 
961 	if (spi_engine_write_cmd_fifo(spi_engine, msg))
962 		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
963 
964 	spi_engine_tx_next(msg);
965 	if (spi_engine_write_tx_fifo(spi_engine, msg))
966 		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
967 
968 	spi_engine_rx_next(msg);
969 	if (st->rx_length != 0)
970 		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
971 
972 	int_enable |= SPI_ENGINE_INT_SYNC;
973 
974 	writel_relaxed(int_enable,
975 		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
976 	spi_engine->int_enable = int_enable;
977 	spin_unlock_irqrestore(&spi_engine->lock, flags);
978 
979 	if (!wait_for_completion_timeout(&spi_engine->msg_complete,
980 					 msecs_to_jiffies(5000))) {
981 		dev_err(&host->dev,
982 			"Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
983 		msg->status = -ETIMEDOUT;
984 	}
985 
986 	if (trace_spi_transfer_stop_enabled()) {
987 		struct spi_transfer *xfer;
988 
989 		list_for_each_entry(xfer, &msg->transfers, transfer_list)
990 			trace_spi_transfer_stop(msg, xfer);
991 	}
992 
993 out:
994 	spi_finalize_current_message(host);
995 
996 	return msg->status;
997 }
998 
spi_engine_trigger_enable(struct spi_offload * offload)999 static int spi_engine_trigger_enable(struct spi_offload *offload)
1000 {
1001 	struct spi_engine_offload *priv = offload->priv;
1002 	struct spi_engine *spi_engine = priv->spi_engine;
1003 	unsigned int reg;
1004 	int ret;
1005 
1006 	writel_relaxed(SPI_ENGINE_CMD_SYNC(0),
1007 		spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
1008 
1009 	writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
1010 					    priv->spi_mode_config),
1011 		       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
1012 
1013 	if (priv->bits_per_word)
1014 		writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
1015 						    priv->bits_per_word),
1016 			       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
1017 
1018 	if (priv->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) {
1019 		writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
1020 						    priv->rx_all_lanes_mask),
1021 			       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
1022 		writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
1023 						    priv->tx_all_lanes_mask),
1024 			       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
1025 	}
1026 
1027 	writel_relaxed(SPI_ENGINE_CMD_SYNC(1),
1028 		spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
1029 
1030 	ret = readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID,
1031 					 reg, reg == 1, 1, 1000);
1032 	if (ret)
1033 		return ret;
1034 
1035 	reg = readl_relaxed(spi_engine->base +
1036 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
1037 	reg |= SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
1038 	writel_relaxed(reg, spi_engine->base +
1039 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
1040 	return 0;
1041 }
1042 
spi_engine_trigger_disable(struct spi_offload * offload)1043 static void spi_engine_trigger_disable(struct spi_offload *offload)
1044 {
1045 	struct spi_engine_offload *priv = offload->priv;
1046 	struct spi_engine *spi_engine = priv->spi_engine;
1047 	unsigned int reg;
1048 
1049 	reg = readl_relaxed(spi_engine->base +
1050 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
1051 	reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
1052 	writel_relaxed(reg, spi_engine->base +
1053 			    SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
1054 
1055 	/* Restore single-lane mode. */
1056 	if (priv->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) {
1057 		writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
1058 						    priv->rx_primary_lane_mask),
1059 			       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
1060 		writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
1061 						    priv->tx_primary_lane_mask),
1062 			       spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
1063 	}
1064 }
1065 
1066 static struct dma_chan
spi_engine_tx_stream_request_dma_chan(struct spi_offload * offload)1067 *spi_engine_tx_stream_request_dma_chan(struct spi_offload *offload)
1068 {
1069 	struct spi_engine_offload *priv = offload->priv;
1070 	char name[16];
1071 
1072 	snprintf(name, sizeof(name), "offload%u-tx", priv->offload_num);
1073 
1074 	return dma_request_chan(offload->provider_dev, name);
1075 }
1076 
1077 static struct dma_chan
spi_engine_rx_stream_request_dma_chan(struct spi_offload * offload)1078 *spi_engine_rx_stream_request_dma_chan(struct spi_offload *offload)
1079 {
1080 	struct spi_engine_offload *priv = offload->priv;
1081 	char name[16];
1082 
1083 	snprintf(name, sizeof(name), "offload%u-rx", priv->offload_num);
1084 
1085 	return dma_request_chan(offload->provider_dev, name);
1086 }
1087 
1088 static const struct spi_offload_ops spi_engine_offload_ops = {
1089 	.trigger_enable = spi_engine_trigger_enable,
1090 	.trigger_disable = spi_engine_trigger_disable,
1091 	.tx_stream_request_dma_chan = spi_engine_tx_stream_request_dma_chan,
1092 	.rx_stream_request_dma_chan = spi_engine_rx_stream_request_dma_chan,
1093 };
1094 
spi_engine_release_hw(void * p)1095 static void spi_engine_release_hw(void *p)
1096 {
1097 	struct spi_engine *spi_engine = p;
1098 
1099 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
1100 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
1101 	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
1102 }
1103 
spi_engine_probe(struct platform_device * pdev)1104 static int spi_engine_probe(struct platform_device *pdev)
1105 {
1106 	struct spi_engine *spi_engine;
1107 	struct spi_controller *host;
1108 	unsigned int version, data_width_reg_val;
1109 	int irq, ret;
1110 
1111 	irq = platform_get_irq(pdev, 0);
1112 	if (irq < 0)
1113 		return irq;
1114 
1115 	host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
1116 	if (!host)
1117 		return -ENOMEM;
1118 
1119 	spi_engine = spi_controller_get_devdata(host);
1120 
1121 	spin_lock_init(&spi_engine->lock);
1122 	init_completion(&spi_engine->msg_complete);
1123 
1124 	/*
1125 	 * REVISIT: for now, all SPI Engines only have one offload. In the
1126 	 * future, this should be read from a memory mapped register to
1127 	 * determine the number of offloads enabled at HDL compile time. For
1128 	 * now, we can tell if an offload is present if there is a trigger
1129 	 * source wired up to it.
1130 	 */
1131 	if (device_property_present(&pdev->dev, "trigger-sources")) {
1132 		struct spi_engine_offload *priv;
1133 
1134 		spi_engine->offload =
1135 			devm_spi_offload_alloc(&pdev->dev,
1136 					       sizeof(struct spi_engine_offload));
1137 		if (IS_ERR(spi_engine->offload))
1138 			return PTR_ERR(spi_engine->offload);
1139 
1140 		priv = spi_engine->offload->priv;
1141 		priv->spi_engine = spi_engine;
1142 		priv->offload_num = 0;
1143 
1144 		spi_engine->offload->ops = &spi_engine_offload_ops;
1145 		spi_engine->offload_caps = SPI_OFFLOAD_CAP_TRIGGER;
1146 
1147 		if (device_property_match_string(&pdev->dev, "dma-names", "offload0-rx") >= 0) {
1148 			spi_engine->offload_caps |= SPI_OFFLOAD_CAP_RX_STREAM_DMA;
1149 			spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_RX_STREAM;
1150 		}
1151 
1152 		if (device_property_match_string(&pdev->dev, "dma-names", "offload0-tx") >= 0) {
1153 			spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STREAM_DMA;
1154 			spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_TX_STREAM;
1155 		} else {
1156 			/*
1157 			 * HDL compile option to enable TX DMA stream also disables
1158 			 * the SDO memory, so can't do both at the same time.
1159 			 */
1160 			spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STATIC_DATA;
1161 		}
1162 	}
1163 
1164 	spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
1165 	if (IS_ERR(spi_engine->clk))
1166 		return PTR_ERR(spi_engine->clk);
1167 
1168 	spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
1169 	if (IS_ERR(spi_engine->ref_clk))
1170 		return PTR_ERR(spi_engine->ref_clk);
1171 
1172 	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
1173 	if (IS_ERR(spi_engine->base))
1174 		return PTR_ERR(spi_engine->base);
1175 
1176 	version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
1177 	if (ADI_AXI_PCORE_VER_MAJOR(version) > 2) {
1178 		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
1179 			ADI_AXI_PCORE_VER_MAJOR(version),
1180 			ADI_AXI_PCORE_VER_MINOR(version),
1181 			ADI_AXI_PCORE_VER_PATCH(version));
1182 		return -ENODEV;
1183 	}
1184 
1185 	data_width_reg_val = readl(spi_engine->base + SPI_ENGINE_REG_DATA_WIDTH);
1186 
1187 	if (adi_axi_pcore_ver_gteq(version, 1, 1)) {
1188 		unsigned int sizes = readl(spi_engine->base +
1189 				SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH);
1190 
1191 		spi_engine->offload_ctrl_mem_size = 1 <<
1192 			FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD, sizes);
1193 		spi_engine->offload_sdo_mem_size = 1 <<
1194 			FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO, sizes);
1195 	} else {
1196 		spi_engine->offload_ctrl_mem_size = SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE;
1197 		spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE;
1198 	}
1199 
1200 	/* IP v1.5 dropped the requirement for SYNC in offload messages. */
1201 	spi_engine->offload_requires_sync = !adi_axi_pcore_ver_gteq(version, 1, 5);
1202 
1203 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
1204 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
1205 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
1206 
1207 	ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
1208 				       spi_engine);
1209 	if (ret)
1210 		return ret;
1211 
1212 	ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
1213 			       host);
1214 	if (ret)
1215 		return ret;
1216 
1217 	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
1218 	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
1219 	host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
1220 	host->transfer_one_message = spi_engine_transfer_one_message;
1221 	host->optimize_message = spi_engine_optimize_message;
1222 	host->unoptimize_message = spi_engine_unoptimize_message;
1223 	host->get_offload = spi_engine_get_offload;
1224 	host->put_offload = spi_engine_put_offload;
1225 	host->num_chipselect = 8;
1226 
1227 	if (adi_axi_pcore_ver_gteq(version, 1, 2)) {
1228 		host->mode_bits |= SPI_CS_HIGH;
1229 		host->setup = spi_engine_setup;
1230 	}
1231 	if (adi_axi_pcore_ver_gteq(version, 1, 3))
1232 		host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
1233 	if (adi_axi_pcore_ver_gteq(version, 2, 0))
1234 		host->num_data_lanes = FIELD_GET(SPI_ENGINE_REG_DATA_WIDTH_NUM_OF_SDIO_MASK,
1235 						 data_width_reg_val);
1236 
1237 	if (host->max_speed_hz == 0)
1238 		return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
1239 
1240 	return devm_spi_register_controller(&pdev->dev, host);
1241 }
1242 
1243 static const struct of_device_id spi_engine_match_table[] = {
1244 	{ .compatible = "adi,axi-spi-engine-1.00.a" },
1245 	{ },
1246 };
1247 MODULE_DEVICE_TABLE(of, spi_engine_match_table);
1248 
1249 static struct platform_driver spi_engine_driver = {
1250 	.probe = spi_engine_probe,
1251 	.driver = {
1252 		.name = "spi-engine",
1253 		.of_match_table = spi_engine_match_table,
1254 	},
1255 };
1256 module_platform_driver(spi_engine_driver);
1257 
1258 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1259 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
1260 MODULE_LICENSE("GPL");
1261