xref: /linux/drivers/spi/spi-fsl-lpspi.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Freescale i.MX7ULP LPSPI driver
4 //
5 // Copyright 2016 Freescale Semiconductor, Inc.
6 // Copyright 2018 NXP Semiconductors
7 
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/irq.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/dma/imx-dma.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/spi/spi.h>
26 #include <linux/spi/spi_bitbang.h>
27 #include <linux/types.h>
28 
29 #define DRIVER_NAME "fsl_lpspi"
30 
31 #define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
32 
33 /* The maximum bytes that edma can transfer once.*/
34 #define FSL_LPSPI_MAX_EDMA_BYTES  ((1 << 15) - 1)
35 
36 /* i.MX7ULP LPSPI registers */
37 #define IMX7ULP_VERID	0x0
38 #define IMX7ULP_PARAM	0x4
39 #define IMX7ULP_CR	0x10
40 #define IMX7ULP_SR	0x14
41 #define IMX7ULP_IER	0x18
42 #define IMX7ULP_DER	0x1c
43 #define IMX7ULP_CFGR0	0x20
44 #define IMX7ULP_CFGR1	0x24
45 #define IMX7ULP_DMR0	0x30
46 #define IMX7ULP_DMR1	0x34
47 #define IMX7ULP_CCR	0x40
48 #define IMX7ULP_FCR	0x58
49 #define IMX7ULP_FSR	0x5c
50 #define IMX7ULP_TCR	0x60
51 #define IMX7ULP_TDR	0x64
52 #define IMX7ULP_RSR	0x70
53 #define IMX7ULP_RDR	0x74
54 
55 /* General control register field define */
56 #define CR_RRF		BIT(9)
57 #define CR_RTF		BIT(8)
58 #define CR_RST		BIT(1)
59 #define CR_MEN		BIT(0)
60 #define SR_MBF		BIT(24)
61 #define SR_TCF		BIT(10)
62 #define SR_FCF		BIT(9)
63 #define SR_RDF		BIT(1)
64 #define SR_TDF		BIT(0)
65 #define IER_TCIE	BIT(10)
66 #define IER_FCIE	BIT(9)
67 #define IER_RDIE	BIT(1)
68 #define IER_TDIE	BIT(0)
69 #define DER_RDDE	BIT(1)
70 #define DER_TDDE	BIT(0)
71 #define CFGR1_PCSCFG	BIT(27)
72 #define CFGR1_PINCFG	(BIT(24)|BIT(25))
73 #define CFGR1_PCSPOL	BIT(8)
74 #define CFGR1_NOSTALL	BIT(3)
75 #define CFGR1_HOST	BIT(0)
76 #define FSR_TXCOUNT	(0xFF)
77 #define RSR_RXEMPTY	BIT(1)
78 #define TCR_CPOL	BIT(31)
79 #define TCR_CPHA	BIT(30)
80 #define TCR_CONT	BIT(21)
81 #define TCR_CONTC	BIT(20)
82 #define TCR_RXMSK	BIT(19)
83 #define TCR_TXMSK	BIT(18)
84 
85 struct fsl_lpspi_devtype_data {
86 	u8 prescale_max;
87 };
88 
89 struct lpspi_config {
90 	u8 bpw;
91 	u8 chip_select;
92 	u8 prescale;
93 	u16 mode;
94 	u32 speed_hz;
95 };
96 
97 struct fsl_lpspi_data {
98 	struct device *dev;
99 	void __iomem *base;
100 	unsigned long base_phys;
101 	struct clk *clk_ipg;
102 	struct clk *clk_per;
103 	bool is_target;
104 	bool is_only_cs1;
105 	bool is_first_byte;
106 
107 	void *rx_buf;
108 	const void *tx_buf;
109 	void (*tx)(struct fsl_lpspi_data *);
110 	void (*rx)(struct fsl_lpspi_data *);
111 
112 	u32 remain;
113 	u8 watermark;
114 	u8 txfifosize;
115 	u8 rxfifosize;
116 
117 	struct lpspi_config config;
118 	struct completion xfer_done;
119 
120 	bool target_aborted;
121 
122 	/* DMA */
123 	bool usedma;
124 	struct completion dma_rx_completion;
125 	struct completion dma_tx_completion;
126 
127 	const struct fsl_lpspi_devtype_data *devtype_data;
128 };
129 
130 /*
131  * ERR051608 fixed or not:
132  * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
133  */
134 static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
135 	.prescale_max = 1,
136 };
137 
138 static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
139 	.prescale_max = 7,
140 };
141 
142 static const struct of_device_id fsl_lpspi_dt_ids[] = {
143 	{ .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,},
144 	{ .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,},
145 	{ /* sentinel */ }
146 };
147 MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
148 
149 #define LPSPI_BUF_RX(type)						\
150 static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi)	\
151 {									\
152 	unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR);	\
153 									\
154 	if (fsl_lpspi->rx_buf) {					\
155 		*(type *)fsl_lpspi->rx_buf = val;			\
156 		fsl_lpspi->rx_buf += sizeof(type);                      \
157 	}								\
158 }
159 
160 #define LPSPI_BUF_TX(type)						\
161 static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi)	\
162 {									\
163 	type val = 0;							\
164 									\
165 	if (fsl_lpspi->tx_buf) {					\
166 		val = *(type *)fsl_lpspi->tx_buf;			\
167 		fsl_lpspi->tx_buf += sizeof(type);			\
168 	}								\
169 									\
170 	fsl_lpspi->remain -= sizeof(type);				\
171 	writel(val, fsl_lpspi->base + IMX7ULP_TDR);			\
172 }
173 
174 LPSPI_BUF_RX(u8)
175 LPSPI_BUF_TX(u8)
176 LPSPI_BUF_RX(u16)
177 LPSPI_BUF_TX(u16)
178 LPSPI_BUF_RX(u32)
179 LPSPI_BUF_TX(u32)
180 
181 static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
182 			      unsigned int enable)
183 {
184 	writel(enable, fsl_lpspi->base + IMX7ULP_IER);
185 }
186 
187 static int fsl_lpspi_bytes_per_word(const int bpw)
188 {
189 	return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
190 }
191 
192 static bool fsl_lpspi_can_dma(struct spi_controller *controller,
193 			      struct spi_device *spi,
194 			      struct spi_transfer *transfer)
195 {
196 	unsigned int bytes_per_word;
197 
198 	if (!controller->dma_rx)
199 		return false;
200 
201 	bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
202 
203 	switch (bytes_per_word) {
204 	case 1:
205 	case 2:
206 	case 4:
207 		break;
208 	default:
209 		return false;
210 	}
211 
212 	return true;
213 }
214 
215 static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
216 {
217 	struct fsl_lpspi_data *fsl_lpspi =
218 				spi_controller_get_devdata(controller);
219 	int ret;
220 
221 	ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
222 	if (ret < 0) {
223 		dev_err(fsl_lpspi->dev, "failed to enable clock\n");
224 		return ret;
225 	}
226 
227 	return 0;
228 }
229 
230 static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
231 {
232 	struct fsl_lpspi_data *fsl_lpspi =
233 				spi_controller_get_devdata(controller);
234 
235 	pm_runtime_mark_last_busy(fsl_lpspi->dev);
236 	pm_runtime_put_autosuspend(fsl_lpspi->dev);
237 
238 	return 0;
239 }
240 
241 static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
242 {
243 	u8 txfifo_cnt;
244 	u32 temp;
245 
246 	txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
247 
248 	while (txfifo_cnt < fsl_lpspi->txfifosize) {
249 		if (!fsl_lpspi->remain)
250 			break;
251 		fsl_lpspi->tx(fsl_lpspi);
252 		txfifo_cnt++;
253 	}
254 
255 	if (txfifo_cnt < fsl_lpspi->txfifosize) {
256 		if (!fsl_lpspi->is_target) {
257 			temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
258 			temp &= ~TCR_CONTC;
259 			writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
260 		}
261 
262 		fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
263 	} else
264 		fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
265 }
266 
267 static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
268 {
269 	while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
270 		fsl_lpspi->rx(fsl_lpspi);
271 }
272 
273 static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
274 {
275 	u32 temp = 0;
276 
277 	temp |= fsl_lpspi->config.bpw - 1;
278 	temp |= (fsl_lpspi->config.mode & 0x3) << 30;
279 	temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
280 	if (!fsl_lpspi->is_target) {
281 		temp |= fsl_lpspi->config.prescale << 27;
282 		/*
283 		 * Set TCR_CONT will keep SS asserted after current transfer.
284 		 * For the first transfer, clear TCR_CONTC to assert SS.
285 		 * For subsequent transfer, set TCR_CONTC to keep SS asserted.
286 		 */
287 		if (!fsl_lpspi->usedma) {
288 			temp |= TCR_CONT;
289 			if (fsl_lpspi->is_first_byte)
290 				temp &= ~TCR_CONTC;
291 			else
292 				temp |= TCR_CONTC;
293 		}
294 	}
295 	writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
296 
297 	dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
298 }
299 
300 static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
301 {
302 	u32 temp;
303 
304 	if (!fsl_lpspi->usedma)
305 		temp = fsl_lpspi->watermark >> 1 |
306 		       (fsl_lpspi->watermark >> 1) << 16;
307 	else
308 		temp = fsl_lpspi->watermark >> 1;
309 
310 	writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
311 
312 	dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
313 }
314 
315 static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
316 {
317 	struct lpspi_config config = fsl_lpspi->config;
318 	unsigned int perclk_rate, scldiv, div;
319 	u8 prescale_max;
320 	u8 prescale;
321 
322 	perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
323 	prescale_max = fsl_lpspi->devtype_data->prescale_max;
324 
325 	if (!config.speed_hz) {
326 		dev_err(fsl_lpspi->dev,
327 			"error: the transmission speed provided is 0!\n");
328 		return -EINVAL;
329 	}
330 
331 	if (config.speed_hz > perclk_rate / 2) {
332 		dev_err(fsl_lpspi->dev,
333 		      "per-clk should be at least two times of transfer speed");
334 		return -EINVAL;
335 	}
336 
337 	div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
338 
339 	for (prescale = 0; prescale <= prescale_max; prescale++) {
340 		scldiv = div / (1 << prescale) - 2;
341 		if (scldiv < 256) {
342 			fsl_lpspi->config.prescale = prescale;
343 			break;
344 		}
345 	}
346 
347 	if (scldiv >= 256)
348 		return -EINVAL;
349 
350 	writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
351 					fsl_lpspi->base + IMX7ULP_CCR);
352 
353 	dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale=%d, scldiv=%d\n",
354 		perclk_rate, config.speed_hz, prescale, scldiv);
355 
356 	return 0;
357 }
358 
359 static int fsl_lpspi_dma_configure(struct spi_controller *controller)
360 {
361 	int ret;
362 	enum dma_slave_buswidth buswidth;
363 	struct dma_slave_config rx = {}, tx = {};
364 	struct fsl_lpspi_data *fsl_lpspi =
365 				spi_controller_get_devdata(controller);
366 
367 	switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
368 	case 4:
369 		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
370 		break;
371 	case 2:
372 		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
373 		break;
374 	case 1:
375 		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
376 		break;
377 	default:
378 		return -EINVAL;
379 	}
380 
381 	tx.direction = DMA_MEM_TO_DEV;
382 	tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
383 	tx.dst_addr_width = buswidth;
384 	tx.dst_maxburst = 1;
385 	ret = dmaengine_slave_config(controller->dma_tx, &tx);
386 	if (ret) {
387 		dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
388 			ret);
389 		return ret;
390 	}
391 
392 	rx.direction = DMA_DEV_TO_MEM;
393 	rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
394 	rx.src_addr_width = buswidth;
395 	rx.src_maxburst = 1;
396 	ret = dmaengine_slave_config(controller->dma_rx, &rx);
397 	if (ret) {
398 		dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
399 			ret);
400 		return ret;
401 	}
402 
403 	return 0;
404 }
405 
406 static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
407 {
408 	u32 temp;
409 	int ret;
410 
411 	if (!fsl_lpspi->is_target) {
412 		ret = fsl_lpspi_set_bitrate(fsl_lpspi);
413 		if (ret)
414 			return ret;
415 	}
416 
417 	fsl_lpspi_set_watermark(fsl_lpspi);
418 
419 	if (!fsl_lpspi->is_target)
420 		temp = CFGR1_HOST;
421 	else
422 		temp = CFGR1_PINCFG;
423 	if (fsl_lpspi->config.mode & SPI_CS_HIGH)
424 		temp |= CFGR1_PCSPOL;
425 	writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
426 
427 	temp = readl(fsl_lpspi->base + IMX7ULP_CR);
428 	temp |= CR_RRF | CR_RTF | CR_MEN;
429 	writel(temp, fsl_lpspi->base + IMX7ULP_CR);
430 
431 	temp = 0;
432 	if (fsl_lpspi->usedma)
433 		temp = DER_TDDE | DER_RDDE;
434 	writel(temp, fsl_lpspi->base + IMX7ULP_DER);
435 
436 	return 0;
437 }
438 
439 static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
440 				     struct spi_device *spi,
441 				     struct spi_transfer *t)
442 {
443 	struct fsl_lpspi_data *fsl_lpspi =
444 				spi_controller_get_devdata(spi->controller);
445 
446 	if (t == NULL)
447 		return -EINVAL;
448 
449 	fsl_lpspi->config.mode = spi->mode;
450 	fsl_lpspi->config.bpw = t->bits_per_word;
451 	fsl_lpspi->config.speed_hz = t->speed_hz;
452 	if (fsl_lpspi->is_only_cs1)
453 		fsl_lpspi->config.chip_select = 1;
454 	else
455 		fsl_lpspi->config.chip_select = spi_get_chipselect(spi, 0);
456 
457 	if (!fsl_lpspi->config.speed_hz)
458 		fsl_lpspi->config.speed_hz = spi->max_speed_hz;
459 	if (!fsl_lpspi->config.bpw)
460 		fsl_lpspi->config.bpw = spi->bits_per_word;
461 
462 	/* Initialize the functions for transfer */
463 	if (fsl_lpspi->config.bpw <= 8) {
464 		fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
465 		fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
466 	} else if (fsl_lpspi->config.bpw <= 16) {
467 		fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
468 		fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
469 	} else {
470 		fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
471 		fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
472 	}
473 
474 	if (t->len <= fsl_lpspi->txfifosize)
475 		fsl_lpspi->watermark = t->len;
476 	else
477 		fsl_lpspi->watermark = fsl_lpspi->txfifosize;
478 
479 	if (fsl_lpspi_can_dma(controller, spi, t))
480 		fsl_lpspi->usedma = true;
481 	else
482 		fsl_lpspi->usedma = false;
483 
484 	return fsl_lpspi_config(fsl_lpspi);
485 }
486 
487 static int fsl_lpspi_target_abort(struct spi_controller *controller)
488 {
489 	struct fsl_lpspi_data *fsl_lpspi =
490 				spi_controller_get_devdata(controller);
491 
492 	fsl_lpspi->target_aborted = true;
493 	if (!fsl_lpspi->usedma)
494 		complete(&fsl_lpspi->xfer_done);
495 	else {
496 		complete(&fsl_lpspi->dma_tx_completion);
497 		complete(&fsl_lpspi->dma_rx_completion);
498 	}
499 
500 	return 0;
501 }
502 
503 static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
504 {
505 	struct fsl_lpspi_data *fsl_lpspi =
506 				spi_controller_get_devdata(controller);
507 
508 	if (fsl_lpspi->is_target) {
509 		if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
510 			fsl_lpspi->target_aborted) {
511 			dev_dbg(fsl_lpspi->dev, "interrupted\n");
512 			return -EINTR;
513 		}
514 	} else {
515 		if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
516 			dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
517 			return -ETIMEDOUT;
518 		}
519 	}
520 
521 	return 0;
522 }
523 
524 static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
525 {
526 	u32 temp;
527 
528 	if (!fsl_lpspi->usedma) {
529 		/* Disable all interrupt */
530 		fsl_lpspi_intctrl(fsl_lpspi, 0);
531 	}
532 
533 	/* W1C for all flags in SR */
534 	temp = 0x3F << 8;
535 	writel(temp, fsl_lpspi->base + IMX7ULP_SR);
536 
537 	/* Clear FIFO and disable module */
538 	temp = CR_RRF | CR_RTF;
539 	writel(temp, fsl_lpspi->base + IMX7ULP_CR);
540 
541 	return 0;
542 }
543 
544 static void fsl_lpspi_dma_rx_callback(void *cookie)
545 {
546 	struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
547 
548 	complete(&fsl_lpspi->dma_rx_completion);
549 }
550 
551 static void fsl_lpspi_dma_tx_callback(void *cookie)
552 {
553 	struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
554 
555 	complete(&fsl_lpspi->dma_tx_completion);
556 }
557 
558 static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
559 				       int size)
560 {
561 	unsigned long timeout = 0;
562 
563 	/* Time with actual data transfer and CS change delay related to HW */
564 	timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
565 
566 	/* Add extra second for scheduler related activities */
567 	timeout += 1;
568 
569 	/* Double calculated timeout */
570 	return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
571 }
572 
573 static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
574 				struct fsl_lpspi_data *fsl_lpspi,
575 				struct spi_transfer *transfer)
576 {
577 	struct dma_async_tx_descriptor *desc_tx, *desc_rx;
578 	unsigned long transfer_timeout;
579 	unsigned long time_left;
580 	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
581 	int ret;
582 
583 	ret = fsl_lpspi_dma_configure(controller);
584 	if (ret)
585 		return ret;
586 
587 	desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
588 				rx->sgl, rx->nents, DMA_DEV_TO_MEM,
589 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
590 	if (!desc_rx)
591 		return -EINVAL;
592 
593 	desc_rx->callback = fsl_lpspi_dma_rx_callback;
594 	desc_rx->callback_param = (void *)fsl_lpspi;
595 	dmaengine_submit(desc_rx);
596 	reinit_completion(&fsl_lpspi->dma_rx_completion);
597 	dma_async_issue_pending(controller->dma_rx);
598 
599 	desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
600 				tx->sgl, tx->nents, DMA_MEM_TO_DEV,
601 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
602 	if (!desc_tx) {
603 		dmaengine_terminate_all(controller->dma_tx);
604 		return -EINVAL;
605 	}
606 
607 	desc_tx->callback = fsl_lpspi_dma_tx_callback;
608 	desc_tx->callback_param = (void *)fsl_lpspi;
609 	dmaengine_submit(desc_tx);
610 	reinit_completion(&fsl_lpspi->dma_tx_completion);
611 	dma_async_issue_pending(controller->dma_tx);
612 
613 	fsl_lpspi->target_aborted = false;
614 
615 	if (!fsl_lpspi->is_target) {
616 		transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
617 							       transfer->len);
618 
619 		/* Wait eDMA to finish the data transfer.*/
620 		time_left = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
621 							transfer_timeout);
622 		if (!time_left) {
623 			dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
624 			dmaengine_terminate_all(controller->dma_tx);
625 			dmaengine_terminate_all(controller->dma_rx);
626 			fsl_lpspi_reset(fsl_lpspi);
627 			return -ETIMEDOUT;
628 		}
629 
630 		time_left = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
631 							transfer_timeout);
632 		if (!time_left) {
633 			dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
634 			dmaengine_terminate_all(controller->dma_tx);
635 			dmaengine_terminate_all(controller->dma_rx);
636 			fsl_lpspi_reset(fsl_lpspi);
637 			return -ETIMEDOUT;
638 		}
639 	} else {
640 		if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
641 			fsl_lpspi->target_aborted) {
642 			dev_dbg(fsl_lpspi->dev,
643 				"I/O Error in DMA TX interrupted\n");
644 			dmaengine_terminate_all(controller->dma_tx);
645 			dmaengine_terminate_all(controller->dma_rx);
646 			fsl_lpspi_reset(fsl_lpspi);
647 			return -EINTR;
648 		}
649 
650 		if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
651 			fsl_lpspi->target_aborted) {
652 			dev_dbg(fsl_lpspi->dev,
653 				"I/O Error in DMA RX interrupted\n");
654 			dmaengine_terminate_all(controller->dma_tx);
655 			dmaengine_terminate_all(controller->dma_rx);
656 			fsl_lpspi_reset(fsl_lpspi);
657 			return -EINTR;
658 		}
659 	}
660 
661 	fsl_lpspi_reset(fsl_lpspi);
662 
663 	return 0;
664 }
665 
666 static void fsl_lpspi_dma_exit(struct spi_controller *controller)
667 {
668 	if (controller->dma_rx) {
669 		dma_release_channel(controller->dma_rx);
670 		controller->dma_rx = NULL;
671 	}
672 
673 	if (controller->dma_tx) {
674 		dma_release_channel(controller->dma_tx);
675 		controller->dma_tx = NULL;
676 	}
677 }
678 
679 static int fsl_lpspi_dma_init(struct device *dev,
680 			      struct fsl_lpspi_data *fsl_lpspi,
681 			      struct spi_controller *controller)
682 {
683 	int ret;
684 
685 	/* Prepare for TX DMA: */
686 	controller->dma_tx = dma_request_chan(dev, "tx");
687 	if (IS_ERR(controller->dma_tx)) {
688 		ret = PTR_ERR(controller->dma_tx);
689 		dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
690 		controller->dma_tx = NULL;
691 		goto err;
692 	}
693 
694 	/* Prepare for RX DMA: */
695 	controller->dma_rx = dma_request_chan(dev, "rx");
696 	if (IS_ERR(controller->dma_rx)) {
697 		ret = PTR_ERR(controller->dma_rx);
698 		dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
699 		controller->dma_rx = NULL;
700 		goto err;
701 	}
702 
703 	init_completion(&fsl_lpspi->dma_rx_completion);
704 	init_completion(&fsl_lpspi->dma_tx_completion);
705 	controller->can_dma = fsl_lpspi_can_dma;
706 	controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
707 
708 	return 0;
709 err:
710 	fsl_lpspi_dma_exit(controller);
711 	return ret;
712 }
713 
714 static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
715 				  struct spi_transfer *t)
716 {
717 	struct fsl_lpspi_data *fsl_lpspi =
718 				spi_controller_get_devdata(controller);
719 	int ret;
720 
721 	fsl_lpspi->tx_buf = t->tx_buf;
722 	fsl_lpspi->rx_buf = t->rx_buf;
723 	fsl_lpspi->remain = t->len;
724 
725 	reinit_completion(&fsl_lpspi->xfer_done);
726 	fsl_lpspi->target_aborted = false;
727 
728 	fsl_lpspi_write_tx_fifo(fsl_lpspi);
729 
730 	ret = fsl_lpspi_wait_for_completion(controller);
731 	if (ret)
732 		return ret;
733 
734 	fsl_lpspi_reset(fsl_lpspi);
735 
736 	return 0;
737 }
738 
739 static int fsl_lpspi_transfer_one(struct spi_controller *controller,
740 				  struct spi_device *spi,
741 				  struct spi_transfer *t)
742 {
743 	struct fsl_lpspi_data *fsl_lpspi =
744 					spi_controller_get_devdata(controller);
745 	int ret;
746 
747 	fsl_lpspi->is_first_byte = true;
748 	ret = fsl_lpspi_setup_transfer(controller, spi, t);
749 	if (ret < 0)
750 		return ret;
751 
752 	fsl_lpspi_set_cmd(fsl_lpspi);
753 	fsl_lpspi->is_first_byte = false;
754 
755 	if (fsl_lpspi->usedma)
756 		ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
757 	else
758 		ret = fsl_lpspi_pio_transfer(controller, t);
759 	if (ret < 0)
760 		return ret;
761 
762 	return 0;
763 }
764 
765 static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
766 {
767 	u32 temp_SR, temp_IER;
768 	struct fsl_lpspi_data *fsl_lpspi = dev_id;
769 
770 	temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
771 	fsl_lpspi_intctrl(fsl_lpspi, 0);
772 	temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
773 
774 	fsl_lpspi_read_rx_fifo(fsl_lpspi);
775 
776 	if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
777 		fsl_lpspi_write_tx_fifo(fsl_lpspi);
778 		return IRQ_HANDLED;
779 	}
780 
781 	if (temp_SR & SR_MBF ||
782 	    readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
783 		writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
784 		fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
785 		return IRQ_HANDLED;
786 	}
787 
788 	if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
789 		writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
790 		complete(&fsl_lpspi->xfer_done);
791 		return IRQ_HANDLED;
792 	}
793 
794 	return IRQ_NONE;
795 }
796 
797 #ifdef CONFIG_PM
798 static int fsl_lpspi_runtime_resume(struct device *dev)
799 {
800 	struct spi_controller *controller = dev_get_drvdata(dev);
801 	struct fsl_lpspi_data *fsl_lpspi;
802 	int ret;
803 
804 	fsl_lpspi = spi_controller_get_devdata(controller);
805 
806 	ret = clk_prepare_enable(fsl_lpspi->clk_per);
807 	if (ret)
808 		return ret;
809 
810 	ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
811 	if (ret) {
812 		clk_disable_unprepare(fsl_lpspi->clk_per);
813 		return ret;
814 	}
815 
816 	return 0;
817 }
818 
819 static int fsl_lpspi_runtime_suspend(struct device *dev)
820 {
821 	struct spi_controller *controller = dev_get_drvdata(dev);
822 	struct fsl_lpspi_data *fsl_lpspi;
823 
824 	fsl_lpspi = spi_controller_get_devdata(controller);
825 
826 	clk_disable_unprepare(fsl_lpspi->clk_per);
827 	clk_disable_unprepare(fsl_lpspi->clk_ipg);
828 
829 	return 0;
830 }
831 #endif
832 
833 static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
834 {
835 	struct device *dev = fsl_lpspi->dev;
836 
837 	pm_runtime_enable(dev);
838 	pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
839 	pm_runtime_use_autosuspend(dev);
840 
841 	return 0;
842 }
843 
844 static int fsl_lpspi_probe(struct platform_device *pdev)
845 {
846 	const struct fsl_lpspi_devtype_data *devtype_data;
847 	struct fsl_lpspi_data *fsl_lpspi;
848 	struct spi_controller *controller;
849 	struct resource *res;
850 	int ret, irq;
851 	u32 num_cs;
852 	u32 temp;
853 	bool is_target;
854 
855 	devtype_data = of_device_get_match_data(&pdev->dev);
856 	if (!devtype_data)
857 		return -ENODEV;
858 
859 	is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
860 	if (is_target)
861 		controller = devm_spi_alloc_target(&pdev->dev,
862 						   sizeof(struct fsl_lpspi_data));
863 	else
864 		controller = devm_spi_alloc_host(&pdev->dev,
865 						 sizeof(struct fsl_lpspi_data));
866 
867 	if (!controller)
868 		return -ENOMEM;
869 
870 	platform_set_drvdata(pdev, controller);
871 
872 	fsl_lpspi = spi_controller_get_devdata(controller);
873 	fsl_lpspi->dev = &pdev->dev;
874 	fsl_lpspi->is_target = is_target;
875 	fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
876 						"fsl,spi-only-use-cs1-sel");
877 	fsl_lpspi->devtype_data = devtype_data;
878 
879 	init_completion(&fsl_lpspi->xfer_done);
880 
881 	fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
882 	if (IS_ERR(fsl_lpspi->base)) {
883 		ret = PTR_ERR(fsl_lpspi->base);
884 		return ret;
885 	}
886 	fsl_lpspi->base_phys = res->start;
887 
888 	irq = platform_get_irq(pdev, 0);
889 	if (irq < 0) {
890 		ret = irq;
891 		return ret;
892 	}
893 
894 	ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
895 			       dev_name(&pdev->dev), fsl_lpspi);
896 	if (ret) {
897 		dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
898 		return ret;
899 	}
900 
901 	fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
902 	if (IS_ERR(fsl_lpspi->clk_per)) {
903 		ret = PTR_ERR(fsl_lpspi->clk_per);
904 		return ret;
905 	}
906 
907 	fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
908 	if (IS_ERR(fsl_lpspi->clk_ipg)) {
909 		ret = PTR_ERR(fsl_lpspi->clk_ipg);
910 		return ret;
911 	}
912 
913 	/* enable the clock */
914 	ret = fsl_lpspi_init_rpm(fsl_lpspi);
915 	if (ret)
916 		return ret;
917 
918 	ret = pm_runtime_get_sync(fsl_lpspi->dev);
919 	if (ret < 0) {
920 		dev_err(fsl_lpspi->dev, "failed to enable clock\n");
921 		goto out_pm_get;
922 	}
923 
924 	temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
925 	fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
926 	fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
927 	if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
928 				 &num_cs)) {
929 		if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx93-spi"))
930 			num_cs = ((temp >> 16) & 0xf);
931 		else
932 			num_cs = 1;
933 	}
934 
935 	controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
936 	controller->transfer_one = fsl_lpspi_transfer_one;
937 	controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
938 	controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
939 	controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
940 	controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
941 	controller->dev.of_node = pdev->dev.of_node;
942 	controller->bus_num = pdev->id;
943 	controller->num_chipselect = num_cs;
944 	controller->target_abort = fsl_lpspi_target_abort;
945 	if (!fsl_lpspi->is_target)
946 		controller->use_gpio_descriptors = true;
947 
948 	ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
949 	if (ret == -EPROBE_DEFER)
950 		goto out_pm_get;
951 	if (ret < 0)
952 		dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret);
953 	else
954 		/*
955 		 * disable LPSPI module IRQ when enable DMA mode successfully,
956 		 * to prevent the unexpected LPSPI module IRQ events.
957 		 */
958 		disable_irq(irq);
959 
960 	ret = devm_spi_register_controller(&pdev->dev, controller);
961 	if (ret < 0) {
962 		dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
963 		goto free_dma;
964 	}
965 
966 	pm_runtime_mark_last_busy(fsl_lpspi->dev);
967 	pm_runtime_put_autosuspend(fsl_lpspi->dev);
968 
969 	return 0;
970 
971 free_dma:
972 	fsl_lpspi_dma_exit(controller);
973 out_pm_get:
974 	pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
975 	pm_runtime_put_sync(fsl_lpspi->dev);
976 	pm_runtime_disable(fsl_lpspi->dev);
977 
978 	return ret;
979 }
980 
981 static void fsl_lpspi_remove(struct platform_device *pdev)
982 {
983 	struct spi_controller *controller = platform_get_drvdata(pdev);
984 	struct fsl_lpspi_data *fsl_lpspi =
985 				spi_controller_get_devdata(controller);
986 
987 	fsl_lpspi_dma_exit(controller);
988 
989 	pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
990 	pm_runtime_disable(fsl_lpspi->dev);
991 }
992 
993 static int fsl_lpspi_suspend(struct device *dev)
994 {
995 	pinctrl_pm_select_sleep_state(dev);
996 	return pm_runtime_force_suspend(dev);
997 }
998 
999 static int fsl_lpspi_resume(struct device *dev)
1000 {
1001 	int ret;
1002 
1003 	ret = pm_runtime_force_resume(dev);
1004 	if (ret) {
1005 		dev_err(dev, "Error in resume: %d\n", ret);
1006 		return ret;
1007 	}
1008 
1009 	pinctrl_pm_select_default_state(dev);
1010 
1011 	return 0;
1012 }
1013 
1014 static const struct dev_pm_ops fsl_lpspi_pm_ops = {
1015 	SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
1016 				fsl_lpspi_runtime_resume, NULL)
1017 	SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
1018 };
1019 
1020 static struct platform_driver fsl_lpspi_driver = {
1021 	.driver = {
1022 		.name = DRIVER_NAME,
1023 		.of_match_table = fsl_lpspi_dt_ids,
1024 		.pm = pm_ptr(&fsl_lpspi_pm_ops),
1025 	},
1026 	.probe = fsl_lpspi_probe,
1027 	.remove_new = fsl_lpspi_remove,
1028 };
1029 module_platform_driver(fsl_lpspi_driver);
1030 
1031 MODULE_DESCRIPTION("LPSPI Controller driver");
1032 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1033 MODULE_LICENSE("GPL");
1034