xref: /linux/drivers/spi/spi-fsl-lpspi.c (revision e6640487845061255af9614ec0a192e4fafa486e)
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Freescale i.MX7ULP LPSPI driver
4 //
5 // Copyright 2016 Freescale Semiconductor, Inc.
6 // Copyright 2018, 2023, 2025 NXP
7 
8 #include <linux/bitfield.h>
9 #include <linux/clk.h>
10 #include <linux/completion.h>
11 #include <linux/delay.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/pinctrl/consumer.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma/imx-dma.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/slab.h>
26 #include <linux/spi/spi.h>
27 #include <linux/spi/spi_bitbang.h>
28 #include <linux/types.h>
29 #include <linux/minmax.h>
30 
31 #define DRIVER_NAME "fsl_lpspi"
32 
33 #define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
34 
35 /* The maximum bytes that edma can transfer once.*/
36 #define FSL_LPSPI_MAX_EDMA_BYTES  ((1 << 15) - 1)
37 
38 /* i.MX7ULP LPSPI registers */
39 #define IMX7ULP_VERID	0x0
40 #define IMX7ULP_PARAM	0x4
41 #define IMX7ULP_CR	0x10
42 #define IMX7ULP_SR	0x14
43 #define IMX7ULP_IER	0x18
44 #define IMX7ULP_DER	0x1c
45 #define IMX7ULP_CFGR0	0x20
46 #define IMX7ULP_CFGR1	0x24
47 #define IMX7ULP_DMR0	0x30
48 #define IMX7ULP_DMR1	0x34
49 #define IMX7ULP_CCR	0x40
50 #define IMX7ULP_FCR	0x58
51 #define IMX7ULP_FSR	0x5c
52 #define IMX7ULP_TCR	0x60
53 #define IMX7ULP_TDR	0x64
54 #define IMX7ULP_RSR	0x70
55 #define IMX7ULP_RDR	0x74
56 
57 /* General control register field define */
58 #define CR_RRF		BIT(9)
59 #define CR_RTF		BIT(8)
60 #define CR_RST		BIT(1)
61 #define CR_MEN		BIT(0)
62 #define SR_MBF		BIT(24)
63 #define SR_TCF		BIT(10)
64 #define SR_FCF		BIT(9)
65 #define SR_RDF		BIT(1)
66 #define SR_TDF		BIT(0)
67 #define IER_TCIE	BIT(10)
68 #define IER_FCIE	BIT(9)
69 #define IER_RDIE	BIT(1)
70 #define IER_TDIE	BIT(0)
71 #define DER_RDDE	BIT(1)
72 #define DER_TDDE	BIT(0)
73 #define CFGR1_PCSCFG	BIT(27)
74 #define CFGR1_PINCFG	(BIT(24)|BIT(25))
75 #define CFGR1_PCSPOL_MASK	GENMASK(11, 8)
76 #define CFGR1_NOSTALL	BIT(3)
77 #define CFGR1_HOST	BIT(0)
78 #define FSR_TXCOUNT	(0xFF)
79 #define RSR_RXEMPTY	BIT(1)
80 #define TCR_CPOL	BIT(31)
81 #define TCR_CPHA	BIT(30)
82 #define TCR_CONT	BIT(21)
83 #define TCR_CONTC	BIT(20)
84 #define TCR_RXMSK	BIT(19)
85 #define TCR_TXMSK	BIT(18)
86 
87 #define SR_CLEAR_MASK	GENMASK(13, 8)
88 
89 struct fsl_lpspi_devtype_data {
90 	u8 prescale_max : 3; /* 0 == no limit */
91 	bool query_hw_for_num_cs : 1;
92 };
93 
94 struct lpspi_config {
95 	u8 bpw;
96 	u8 chip_select;
97 	u8 prescale;
98 	u16 mode;
99 	u32 speed_hz;
100 	u32 effective_speed_hz;
101 };
102 
103 struct fsl_lpspi_data {
104 	struct device *dev;
105 	void __iomem *base;
106 	unsigned long base_phys;
107 	struct clk *clk_ipg;
108 	struct clk *clk_per;
109 	bool is_target;
110 	bool is_only_cs1;
111 	bool is_first_byte;
112 
113 	void *rx_buf;
114 	const void *tx_buf;
115 	void (*tx)(struct fsl_lpspi_data *);
116 	void (*rx)(struct fsl_lpspi_data *);
117 
118 	u32 remain;
119 	u8 watermark;
120 	u8 txfifosize;
121 	u8 rxfifosize;
122 
123 	struct lpspi_config config;
124 	struct completion xfer_done;
125 
126 	bool target_aborted;
127 
128 	/* DMA */
129 	bool usedma;
130 	struct completion dma_rx_completion;
131 	struct completion dma_tx_completion;
132 
133 	const struct fsl_lpspi_devtype_data *devtype_data;
134 };
135 
136 /*
137  * Devices with ERR051608 have a max TCR_PRESCALE value of 1, otherwise there is
138  * no prescale limit: https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
139  */
140 static const struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
141 	.prescale_max = 1,
142 	.query_hw_for_num_cs = true,
143 };
144 
145 static const struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
146 	/* All defaults */
147 };
148 
149 static const struct fsl_lpspi_devtype_data s32g_lpspi_devtype_data = {
150 	.query_hw_for_num_cs = true,
151 };
152 
153 static const struct of_device_id fsl_lpspi_dt_ids[] = {
154 	{ .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,},
155 	{ .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,},
156 	{ .compatible = "nxp,s32g2-lpspi", .data = &s32g_lpspi_devtype_data,},
157 	{ /* sentinel */ }
158 };
159 MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
160 
161 #define LPSPI_BUF_RX(type)						\
162 static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi)	\
163 {									\
164 	unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR);	\
165 									\
166 	if (fsl_lpspi->rx_buf) {					\
167 		*(type *)fsl_lpspi->rx_buf = val;			\
168 		fsl_lpspi->rx_buf += sizeof(type);                      \
169 	}								\
170 }
171 
172 #define LPSPI_BUF_TX(type)						\
173 static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi)	\
174 {									\
175 	type val = 0;							\
176 									\
177 	if (fsl_lpspi->tx_buf) {					\
178 		val = *(type *)fsl_lpspi->tx_buf;			\
179 		fsl_lpspi->tx_buf += sizeof(type);			\
180 	}								\
181 									\
182 	fsl_lpspi->remain -= sizeof(type);				\
183 	writel(val, fsl_lpspi->base + IMX7ULP_TDR);			\
184 }
185 
186 LPSPI_BUF_RX(u8)
LPSPI_BUF_TX(u8)187 LPSPI_BUF_TX(u8)
188 LPSPI_BUF_RX(u16)
189 LPSPI_BUF_TX(u16)
190 LPSPI_BUF_RX(u32)
191 LPSPI_BUF_TX(u32)
192 
193 static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
194 			      unsigned int enable)
195 {
196 	writel(enable, fsl_lpspi->base + IMX7ULP_IER);
197 }
198 
fsl_lpspi_bytes_per_word(const int bpw)199 static int fsl_lpspi_bytes_per_word(const int bpw)
200 {
201 	return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
202 }
203 
fsl_lpspi_can_dma(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * transfer)204 static bool fsl_lpspi_can_dma(struct spi_controller *controller,
205 			      struct spi_device *spi,
206 			      struct spi_transfer *transfer)
207 {
208 	unsigned int bytes_per_word;
209 
210 	if (!controller->dma_rx)
211 		return false;
212 
213 	bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
214 
215 	switch (bytes_per_word) {
216 	case 1:
217 	case 2:
218 	case 4:
219 		break;
220 	default:
221 		return false;
222 	}
223 
224 	return true;
225 }
226 
lpspi_prepare_xfer_hardware(struct spi_controller * controller)227 static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
228 {
229 	struct fsl_lpspi_data *fsl_lpspi =
230 				spi_controller_get_devdata(controller);
231 	int ret;
232 
233 	ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
234 	if (ret < 0) {
235 		dev_err(fsl_lpspi->dev, "failed to enable clock\n");
236 		return ret;
237 	}
238 
239 	return 0;
240 }
241 
lpspi_unprepare_xfer_hardware(struct spi_controller * controller)242 static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
243 {
244 	struct fsl_lpspi_data *fsl_lpspi =
245 				spi_controller_get_devdata(controller);
246 
247 	pm_runtime_put_autosuspend(fsl_lpspi->dev);
248 
249 	return 0;
250 }
251 
fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data * fsl_lpspi)252 static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
253 {
254 	u8 txfifo_cnt;
255 	u32 temp;
256 
257 	txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
258 
259 	while (txfifo_cnt < fsl_lpspi->txfifosize) {
260 		if (!fsl_lpspi->remain)
261 			break;
262 		fsl_lpspi->tx(fsl_lpspi);
263 		txfifo_cnt++;
264 	}
265 
266 	if (txfifo_cnt < fsl_lpspi->txfifosize) {
267 		if (!fsl_lpspi->is_target) {
268 			temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
269 			temp &= ~TCR_CONTC;
270 			writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
271 		}
272 
273 		fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
274 	} else
275 		fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
276 }
277 
fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data * fsl_lpspi)278 static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
279 {
280 	while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
281 		fsl_lpspi->rx(fsl_lpspi);
282 }
283 
fsl_lpspi_set_cmd(struct fsl_lpspi_data * fsl_lpspi)284 static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
285 {
286 	u32 temp = 0;
287 
288 	temp |= fsl_lpspi->config.bpw - 1;
289 	temp |= (fsl_lpspi->config.mode & 0x3) << 30;
290 	temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
291 	if (!fsl_lpspi->is_target) {
292 		temp |= fsl_lpspi->config.prescale << 27;
293 		/*
294 		 * Set TCR_CONT will keep SS asserted after current transfer.
295 		 * For the first transfer, clear TCR_CONTC to assert SS.
296 		 * For subsequent transfer, set TCR_CONTC to keep SS asserted.
297 		 */
298 		if (!fsl_lpspi->usedma) {
299 			temp |= TCR_CONT;
300 			if (fsl_lpspi->is_first_byte)
301 				temp &= ~TCR_CONTC;
302 			else
303 				temp |= TCR_CONTC;
304 		}
305 	}
306 	writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
307 
308 	dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
309 }
310 
fsl_lpspi_set_watermark(struct fsl_lpspi_data * fsl_lpspi)311 static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
312 {
313 	u32 temp;
314 
315 	if (!fsl_lpspi->usedma)
316 		temp = fsl_lpspi->watermark >> 1 |
317 		       (fsl_lpspi->watermark >> 1) << 16;
318 	else
319 		temp = fsl_lpspi->watermark >> 1;
320 
321 	writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
322 
323 	dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
324 }
325 
fsl_lpspi_set_bitrate(struct fsl_lpspi_data * fsl_lpspi)326 static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
327 {
328 	struct lpspi_config config = fsl_lpspi->config;
329 	unsigned int perclk_rate, div;
330 	u8 prescale_max;
331 	u8 prescale;
332 	int scldiv;
333 
334 	perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
335 	prescale_max = fsl_lpspi->devtype_data->prescale_max ?: 7;
336 
337 	if (!config.speed_hz) {
338 		dev_err(fsl_lpspi->dev,
339 			"error: the transmission speed provided is 0!\n");
340 		return -EINVAL;
341 	}
342 
343 	if (config.speed_hz > perclk_rate / 2) {
344 		div = 2;
345 	} else {
346 		div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
347 	}
348 
349 	for (prescale = 0; prescale <= prescale_max; prescale++) {
350 		scldiv = div / (1 << prescale) - 2;
351 		if (scldiv >= 0 && scldiv < 256) {
352 			fsl_lpspi->config.prescale = prescale;
353 			break;
354 		}
355 	}
356 
357 	if (scldiv < 0 || scldiv >= 256)
358 		return -EINVAL;
359 
360 	writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
361 					fsl_lpspi->base + IMX7ULP_CCR);
362 
363 	fsl_lpspi->config.effective_speed_hz = perclk_rate / (scldiv + 2) *
364 					       (1 << prescale);
365 
366 	dev_dbg(fsl_lpspi->dev, "perclk=%u, speed=%u, prescale=%u, scldiv=%d\n",
367 		perclk_rate, config.speed_hz, prescale, scldiv);
368 
369 	return 0;
370 }
371 
fsl_lpspi_dma_configure(struct spi_controller * controller)372 static int fsl_lpspi_dma_configure(struct spi_controller *controller)
373 {
374 	int ret;
375 	enum dma_slave_buswidth buswidth;
376 	struct dma_slave_config rx = {}, tx = {};
377 	struct fsl_lpspi_data *fsl_lpspi =
378 				spi_controller_get_devdata(controller);
379 
380 	switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
381 	case 4:
382 		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
383 		break;
384 	case 2:
385 		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
386 		break;
387 	case 1:
388 		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
389 		break;
390 	default:
391 		return -EINVAL;
392 	}
393 
394 	tx.direction = DMA_MEM_TO_DEV;
395 	tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
396 	tx.dst_addr_width = buswidth;
397 	tx.dst_maxburst = 1;
398 	ret = dmaengine_slave_config(controller->dma_tx, &tx);
399 	if (ret) {
400 		dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
401 			ret);
402 		return ret;
403 	}
404 
405 	rx.direction = DMA_DEV_TO_MEM;
406 	rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
407 	rx.src_addr_width = buswidth;
408 	rx.src_maxburst = 1;
409 	ret = dmaengine_slave_config(controller->dma_rx, &rx);
410 	if (ret) {
411 		dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
412 			ret);
413 		return ret;
414 	}
415 
416 	return 0;
417 }
418 
fsl_lpspi_config(struct fsl_lpspi_data * fsl_lpspi)419 static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
420 {
421 	u32 temp;
422 	int ret;
423 
424 	if (!fsl_lpspi->is_target) {
425 		ret = fsl_lpspi_set_bitrate(fsl_lpspi);
426 		if (ret)
427 			return ret;
428 	}
429 
430 	fsl_lpspi_set_watermark(fsl_lpspi);
431 
432 	if (!fsl_lpspi->is_target)
433 		temp = CFGR1_HOST;
434 	else
435 		temp = CFGR1_PINCFG;
436 	if (fsl_lpspi->config.mode & SPI_CS_HIGH)
437 		temp |= FIELD_PREP(CFGR1_PCSPOL_MASK,
438 				   BIT(fsl_lpspi->config.chip_select));
439 
440 	writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
441 
442 	temp = readl(fsl_lpspi->base + IMX7ULP_CR);
443 	temp |= CR_RRF | CR_RTF | CR_MEN;
444 	writel(temp, fsl_lpspi->base + IMX7ULP_CR);
445 
446 	temp = 0;
447 	if (fsl_lpspi->usedma)
448 		temp = DER_TDDE | DER_RDDE;
449 	writel(temp, fsl_lpspi->base + IMX7ULP_DER);
450 
451 	return 0;
452 }
453 
fsl_lpspi_setup_transfer(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * t)454 static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
455 				     struct spi_device *spi,
456 				     struct spi_transfer *t)
457 {
458 	struct fsl_lpspi_data *fsl_lpspi =
459 				spi_controller_get_devdata(spi->controller);
460 
461 	if (t == NULL)
462 		return -EINVAL;
463 
464 	fsl_lpspi->config.mode = spi->mode;
465 	fsl_lpspi->config.bpw = t->bits_per_word;
466 	fsl_lpspi->config.speed_hz = t->speed_hz;
467 	if (fsl_lpspi->is_only_cs1)
468 		fsl_lpspi->config.chip_select = 1;
469 	else
470 		fsl_lpspi->config.chip_select = spi_get_chipselect(spi, 0);
471 
472 	if (!fsl_lpspi->config.speed_hz)
473 		fsl_lpspi->config.speed_hz = spi->max_speed_hz;
474 	if (!fsl_lpspi->config.bpw)
475 		fsl_lpspi->config.bpw = spi->bits_per_word;
476 
477 	/* Initialize the functions for transfer */
478 	if (fsl_lpspi->config.bpw <= 8) {
479 		fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
480 		fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
481 	} else if (fsl_lpspi->config.bpw <= 16) {
482 		fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
483 		fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
484 	} else {
485 		fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
486 		fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
487 	}
488 
489 	/*
490 	 * t->len is 'unsigned' and txfifosize and watermrk is 'u8', force
491 	 * type cast is inevitable. When len > 255, len will be truncated in min_t(),
492 	 * it caused wrong watermark set. 'unsigned int' is as the designated type
493 	 * for min_t() to avoid truncation.
494 	 */
495 	fsl_lpspi->watermark = min_t(unsigned int,
496 				     fsl_lpspi->txfifosize,
497 				     t->len);
498 
499 	if (fsl_lpspi_can_dma(controller, spi, t))
500 		fsl_lpspi->usedma = true;
501 	else
502 		fsl_lpspi->usedma = false;
503 
504 	return fsl_lpspi_config(fsl_lpspi);
505 }
506 
fsl_lpspi_target_abort(struct spi_controller * controller)507 static int fsl_lpspi_target_abort(struct spi_controller *controller)
508 {
509 	struct fsl_lpspi_data *fsl_lpspi =
510 				spi_controller_get_devdata(controller);
511 
512 	fsl_lpspi->target_aborted = true;
513 	if (!fsl_lpspi->usedma)
514 		complete(&fsl_lpspi->xfer_done);
515 	else {
516 		complete(&fsl_lpspi->dma_tx_completion);
517 		complete(&fsl_lpspi->dma_rx_completion);
518 	}
519 
520 	return 0;
521 }
522 
fsl_lpspi_wait_for_completion(struct spi_controller * controller)523 static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
524 {
525 	struct fsl_lpspi_data *fsl_lpspi =
526 				spi_controller_get_devdata(controller);
527 
528 	if (fsl_lpspi->is_target) {
529 		if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
530 			fsl_lpspi->target_aborted) {
531 			dev_dbg(fsl_lpspi->dev, "interrupted\n");
532 			return -EINTR;
533 		}
534 	} else {
535 		if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
536 			dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
537 			return -ETIMEDOUT;
538 		}
539 	}
540 
541 	return 0;
542 }
543 
fsl_lpspi_reset(struct fsl_lpspi_data * fsl_lpspi)544 static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
545 {
546 	u32 temp;
547 
548 	if (!fsl_lpspi->usedma) {
549 		/* Disable all interrupt */
550 		fsl_lpspi_intctrl(fsl_lpspi, 0);
551 	}
552 
553 	/* Clear FIFO and disable module */
554 	temp = CR_RRF | CR_RTF;
555 	writel(temp, fsl_lpspi->base + IMX7ULP_CR);
556 
557 	/* W1C for all flags in SR */
558 	writel(SR_CLEAR_MASK, fsl_lpspi->base + IMX7ULP_SR);
559 
560 	return 0;
561 }
562 
fsl_lpspi_dma_rx_callback(void * cookie)563 static void fsl_lpspi_dma_rx_callback(void *cookie)
564 {
565 	struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
566 
567 	complete(&fsl_lpspi->dma_rx_completion);
568 }
569 
fsl_lpspi_dma_tx_callback(void * cookie)570 static void fsl_lpspi_dma_tx_callback(void *cookie)
571 {
572 	struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
573 
574 	complete(&fsl_lpspi->dma_tx_completion);
575 }
576 
fsl_lpspi_calculate_timeout(struct fsl_lpspi_data * fsl_lpspi,int size)577 static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
578 				       int size)
579 {
580 	unsigned long timeout = 0;
581 
582 	/* Time with actual data transfer and CS change delay related to HW */
583 	timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
584 
585 	/* Add extra second for scheduler related activities */
586 	timeout += 1;
587 
588 	/* Double calculated timeout */
589 	return secs_to_jiffies(2 * timeout);
590 }
591 
fsl_lpspi_dma_transfer(struct spi_controller * controller,struct fsl_lpspi_data * fsl_lpspi,struct spi_transfer * transfer)592 static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
593 				struct fsl_lpspi_data *fsl_lpspi,
594 				struct spi_transfer *transfer)
595 {
596 	struct dma_async_tx_descriptor *desc_tx, *desc_rx;
597 	unsigned long transfer_timeout;
598 	unsigned long time_left;
599 	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
600 	int ret;
601 
602 	ret = fsl_lpspi_dma_configure(controller);
603 	if (ret)
604 		return ret;
605 
606 	desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
607 				rx->sgl, rx->nents, DMA_DEV_TO_MEM,
608 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
609 	if (!desc_rx)
610 		return -EINVAL;
611 
612 	desc_rx->callback = fsl_lpspi_dma_rx_callback;
613 	desc_rx->callback_param = (void *)fsl_lpspi;
614 	dmaengine_submit(desc_rx);
615 	reinit_completion(&fsl_lpspi->dma_rx_completion);
616 	dma_async_issue_pending(controller->dma_rx);
617 
618 	desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
619 				tx->sgl, tx->nents, DMA_MEM_TO_DEV,
620 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
621 	if (!desc_tx) {
622 		dmaengine_terminate_all(controller->dma_tx);
623 		return -EINVAL;
624 	}
625 
626 	desc_tx->callback = fsl_lpspi_dma_tx_callback;
627 	desc_tx->callback_param = (void *)fsl_lpspi;
628 	dmaengine_submit(desc_tx);
629 	reinit_completion(&fsl_lpspi->dma_tx_completion);
630 	dma_async_issue_pending(controller->dma_tx);
631 
632 	fsl_lpspi->target_aborted = false;
633 
634 	if (!fsl_lpspi->is_target) {
635 		transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
636 							       transfer->len);
637 
638 		/* Wait eDMA to finish the data transfer.*/
639 		time_left = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
640 							transfer_timeout);
641 		if (!time_left) {
642 			dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
643 			dmaengine_terminate_all(controller->dma_tx);
644 			dmaengine_terminate_all(controller->dma_rx);
645 			fsl_lpspi_reset(fsl_lpspi);
646 			return -ETIMEDOUT;
647 		}
648 
649 		time_left = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
650 							transfer_timeout);
651 		if (!time_left) {
652 			dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
653 			dmaengine_terminate_all(controller->dma_tx);
654 			dmaengine_terminate_all(controller->dma_rx);
655 			fsl_lpspi_reset(fsl_lpspi);
656 			return -ETIMEDOUT;
657 		}
658 	} else {
659 		if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
660 			fsl_lpspi->target_aborted) {
661 			dev_dbg(fsl_lpspi->dev,
662 				"I/O Error in DMA TX interrupted\n");
663 			dmaengine_terminate_all(controller->dma_tx);
664 			dmaengine_terminate_all(controller->dma_rx);
665 			fsl_lpspi_reset(fsl_lpspi);
666 			return -EINTR;
667 		}
668 
669 		if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
670 			fsl_lpspi->target_aborted) {
671 			dev_dbg(fsl_lpspi->dev,
672 				"I/O Error in DMA RX interrupted\n");
673 			dmaengine_terminate_all(controller->dma_tx);
674 			dmaengine_terminate_all(controller->dma_rx);
675 			fsl_lpspi_reset(fsl_lpspi);
676 			return -EINTR;
677 		}
678 	}
679 
680 	fsl_lpspi_reset(fsl_lpspi);
681 
682 	return 0;
683 }
684 
fsl_lpspi_dma_exit(struct spi_controller * controller)685 static void fsl_lpspi_dma_exit(struct spi_controller *controller)
686 {
687 	if (controller->dma_rx) {
688 		dma_release_channel(controller->dma_rx);
689 		controller->dma_rx = NULL;
690 	}
691 
692 	if (controller->dma_tx) {
693 		dma_release_channel(controller->dma_tx);
694 		controller->dma_tx = NULL;
695 	}
696 }
697 
fsl_lpspi_dma_init(struct device * dev,struct fsl_lpspi_data * fsl_lpspi,struct spi_controller * controller)698 static int fsl_lpspi_dma_init(struct device *dev,
699 			      struct fsl_lpspi_data *fsl_lpspi,
700 			      struct spi_controller *controller)
701 {
702 	int ret;
703 
704 	/* Prepare for TX DMA: */
705 	controller->dma_tx = dma_request_chan(dev, "tx");
706 	if (IS_ERR(controller->dma_tx)) {
707 		ret = PTR_ERR(controller->dma_tx);
708 		dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
709 		controller->dma_tx = NULL;
710 		goto err;
711 	}
712 
713 	/* Prepare for RX DMA: */
714 	controller->dma_rx = dma_request_chan(dev, "rx");
715 	if (IS_ERR(controller->dma_rx)) {
716 		ret = PTR_ERR(controller->dma_rx);
717 		dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
718 		controller->dma_rx = NULL;
719 		goto err;
720 	}
721 
722 	init_completion(&fsl_lpspi->dma_rx_completion);
723 	init_completion(&fsl_lpspi->dma_tx_completion);
724 	controller->can_dma = fsl_lpspi_can_dma;
725 	controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
726 
727 	return 0;
728 err:
729 	fsl_lpspi_dma_exit(controller);
730 	return ret;
731 }
732 
fsl_lpspi_pio_transfer(struct spi_controller * controller,struct spi_transfer * t)733 static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
734 				  struct spi_transfer *t)
735 {
736 	struct fsl_lpspi_data *fsl_lpspi =
737 				spi_controller_get_devdata(controller);
738 	int ret;
739 
740 	fsl_lpspi->tx_buf = t->tx_buf;
741 	fsl_lpspi->rx_buf = t->rx_buf;
742 	fsl_lpspi->remain = t->len;
743 
744 	reinit_completion(&fsl_lpspi->xfer_done);
745 	fsl_lpspi->target_aborted = false;
746 
747 	fsl_lpspi_write_tx_fifo(fsl_lpspi);
748 
749 	ret = fsl_lpspi_wait_for_completion(controller);
750 
751 	fsl_lpspi_reset(fsl_lpspi);
752 
753 	return ret;
754 }
755 
fsl_lpspi_transfer_one(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * t)756 static int fsl_lpspi_transfer_one(struct spi_controller *controller,
757 				  struct spi_device *spi,
758 				  struct spi_transfer *t)
759 {
760 	struct fsl_lpspi_data *fsl_lpspi =
761 					spi_controller_get_devdata(controller);
762 	int ret;
763 
764 	fsl_lpspi->is_first_byte = true;
765 	ret = fsl_lpspi_setup_transfer(controller, spi, t);
766 	if (ret < 0)
767 		return ret;
768 
769 	t->effective_speed_hz = fsl_lpspi->config.effective_speed_hz;
770 
771 	fsl_lpspi_set_cmd(fsl_lpspi);
772 	fsl_lpspi->is_first_byte = false;
773 
774 	if (fsl_lpspi->usedma)
775 		ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
776 	else
777 		ret = fsl_lpspi_pio_transfer(controller, t);
778 	if (ret < 0)
779 		return ret;
780 
781 	return 0;
782 }
783 
fsl_lpspi_isr(int irq,void * dev_id)784 static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
785 {
786 	u32 temp_SR, temp_IER;
787 	struct fsl_lpspi_data *fsl_lpspi = dev_id;
788 
789 	temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
790 	fsl_lpspi_intctrl(fsl_lpspi, 0);
791 	temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
792 
793 	fsl_lpspi_read_rx_fifo(fsl_lpspi);
794 
795 	if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
796 		fsl_lpspi_write_tx_fifo(fsl_lpspi);
797 		return IRQ_HANDLED;
798 	}
799 
800 	if (temp_SR & SR_MBF ||
801 	    readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
802 		writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
803 		fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE | (temp_IER & IER_TDIE));
804 		return IRQ_HANDLED;
805 	}
806 
807 	if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
808 		writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
809 		complete(&fsl_lpspi->xfer_done);
810 		return IRQ_HANDLED;
811 	}
812 
813 	return IRQ_NONE;
814 }
815 
816 #ifdef CONFIG_PM
fsl_lpspi_runtime_resume(struct device * dev)817 static int fsl_lpspi_runtime_resume(struct device *dev)
818 {
819 	struct spi_controller *controller = dev_get_drvdata(dev);
820 	struct fsl_lpspi_data *fsl_lpspi;
821 	int ret;
822 
823 	fsl_lpspi = spi_controller_get_devdata(controller);
824 
825 	ret = clk_prepare_enable(fsl_lpspi->clk_per);
826 	if (ret)
827 		return ret;
828 
829 	ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
830 	if (ret) {
831 		clk_disable_unprepare(fsl_lpspi->clk_per);
832 		return ret;
833 	}
834 
835 	return 0;
836 }
837 
fsl_lpspi_runtime_suspend(struct device * dev)838 static int fsl_lpspi_runtime_suspend(struct device *dev)
839 {
840 	struct spi_controller *controller = dev_get_drvdata(dev);
841 	struct fsl_lpspi_data *fsl_lpspi;
842 
843 	fsl_lpspi = spi_controller_get_devdata(controller);
844 
845 	clk_disable_unprepare(fsl_lpspi->clk_per);
846 	clk_disable_unprepare(fsl_lpspi->clk_ipg);
847 
848 	return 0;
849 }
850 #endif
851 
fsl_lpspi_init_rpm(struct fsl_lpspi_data * fsl_lpspi)852 static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
853 {
854 	struct device *dev = fsl_lpspi->dev;
855 
856 	pm_runtime_enable(dev);
857 	pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
858 	pm_runtime_use_autosuspend(dev);
859 
860 	return 0;
861 }
862 
fsl_lpspi_probe(struct platform_device * pdev)863 static int fsl_lpspi_probe(struct platform_device *pdev)
864 {
865 	const struct fsl_lpspi_devtype_data *devtype_data;
866 	struct fsl_lpspi_data *fsl_lpspi;
867 	struct spi_controller *controller;
868 	struct resource *res;
869 	int ret, irq;
870 	u32 num_cs;
871 	u32 temp;
872 	bool is_target;
873 
874 	devtype_data = of_device_get_match_data(&pdev->dev);
875 	if (!devtype_data)
876 		return -ENODEV;
877 
878 	is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
879 	if (is_target)
880 		controller = devm_spi_alloc_target(&pdev->dev,
881 						   sizeof(struct fsl_lpspi_data));
882 	else
883 		controller = devm_spi_alloc_host(&pdev->dev,
884 						 sizeof(struct fsl_lpspi_data));
885 
886 	if (!controller)
887 		return -ENOMEM;
888 
889 	platform_set_drvdata(pdev, controller);
890 
891 	fsl_lpspi = spi_controller_get_devdata(controller);
892 	fsl_lpspi->dev = &pdev->dev;
893 	fsl_lpspi->is_target = is_target;
894 	fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
895 						"fsl,spi-only-use-cs1-sel");
896 	fsl_lpspi->devtype_data = devtype_data;
897 
898 	init_completion(&fsl_lpspi->xfer_done);
899 
900 	fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
901 	if (IS_ERR(fsl_lpspi->base)) {
902 		ret = PTR_ERR(fsl_lpspi->base);
903 		return ret;
904 	}
905 	fsl_lpspi->base_phys = res->start;
906 
907 	irq = platform_get_irq(pdev, 0);
908 	if (irq < 0) {
909 		ret = irq;
910 		return ret;
911 	}
912 
913 	ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, IRQF_NO_AUTOEN,
914 			       dev_name(&pdev->dev), fsl_lpspi);
915 	if (ret) {
916 		dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
917 		return ret;
918 	}
919 
920 	fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
921 	if (IS_ERR(fsl_lpspi->clk_per)) {
922 		ret = PTR_ERR(fsl_lpspi->clk_per);
923 		return ret;
924 	}
925 
926 	fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
927 	if (IS_ERR(fsl_lpspi->clk_ipg)) {
928 		ret = PTR_ERR(fsl_lpspi->clk_ipg);
929 		return ret;
930 	}
931 
932 	/* enable the clock */
933 	ret = fsl_lpspi_init_rpm(fsl_lpspi);
934 	if (ret)
935 		return ret;
936 
937 	ret = pm_runtime_get_sync(fsl_lpspi->dev);
938 	if (ret < 0) {
939 		dev_err(fsl_lpspi->dev, "failed to enable clock\n");
940 		goto out_pm_get;
941 	}
942 
943 	temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
944 	fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
945 	fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
946 	if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
947 				 &num_cs)) {
948 		if (devtype_data->query_hw_for_num_cs)
949 			num_cs = ((temp >> 16) & 0xf);
950 		else
951 			num_cs = 1;
952 	}
953 
954 	controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
955 	controller->transfer_one = fsl_lpspi_transfer_one;
956 	controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
957 	controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
958 	controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
959 	controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
960 	controller->dev.of_node = pdev->dev.of_node;
961 	controller->bus_num = pdev->id;
962 	controller->num_chipselect = num_cs;
963 	controller->target_abort = fsl_lpspi_target_abort;
964 	if (!fsl_lpspi->is_target)
965 		controller->use_gpio_descriptors = true;
966 
967 	ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
968 	if (ret == -EPROBE_DEFER)
969 		goto out_pm_get;
970 	if (ret < 0) {
971 		dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret);
972 		enable_irq(irq);
973 	}
974 
975 	ret = devm_spi_register_controller(&pdev->dev, controller);
976 	if (ret < 0) {
977 		dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
978 		goto free_dma;
979 	}
980 
981 	pm_runtime_put_autosuspend(fsl_lpspi->dev);
982 
983 	return 0;
984 
985 free_dma:
986 	fsl_lpspi_dma_exit(controller);
987 out_pm_get:
988 	pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
989 	pm_runtime_put_sync(fsl_lpspi->dev);
990 	pm_runtime_disable(fsl_lpspi->dev);
991 
992 	return ret;
993 }
994 
fsl_lpspi_remove(struct platform_device * pdev)995 static void fsl_lpspi_remove(struct platform_device *pdev)
996 {
997 	struct spi_controller *controller = platform_get_drvdata(pdev);
998 	struct fsl_lpspi_data *fsl_lpspi =
999 				spi_controller_get_devdata(controller);
1000 
1001 	fsl_lpspi_dma_exit(controller);
1002 
1003 	pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
1004 	pm_runtime_disable(fsl_lpspi->dev);
1005 }
1006 
fsl_lpspi_suspend(struct device * dev)1007 static int fsl_lpspi_suspend(struct device *dev)
1008 {
1009 	pinctrl_pm_select_sleep_state(dev);
1010 	return pm_runtime_force_suspend(dev);
1011 }
1012 
fsl_lpspi_resume(struct device * dev)1013 static int fsl_lpspi_resume(struct device *dev)
1014 {
1015 	int ret;
1016 
1017 	ret = pm_runtime_force_resume(dev);
1018 	if (ret) {
1019 		dev_err(dev, "Error in resume: %d\n", ret);
1020 		return ret;
1021 	}
1022 
1023 	pinctrl_pm_select_default_state(dev);
1024 
1025 	return 0;
1026 }
1027 
1028 static const struct dev_pm_ops fsl_lpspi_pm_ops = {
1029 	SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
1030 				fsl_lpspi_runtime_resume, NULL)
1031 	SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
1032 };
1033 
1034 static struct platform_driver fsl_lpspi_driver = {
1035 	.driver = {
1036 		.name = DRIVER_NAME,
1037 		.of_match_table = fsl_lpspi_dt_ids,
1038 		.pm = pm_ptr(&fsl_lpspi_pm_ops),
1039 	},
1040 	.probe = fsl_lpspi_probe,
1041 	.remove = fsl_lpspi_remove,
1042 };
1043 module_platform_driver(fsl_lpspi_driver);
1044 
1045 MODULE_DESCRIPTION("LPSPI Controller driver");
1046 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1047 MODULE_LICENSE("GPL");
1048