1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Freescale i.MX7ULP LPSPI driver
4 //
5 // Copyright 2016 Freescale Semiconductor, Inc.
6 // Copyright 2018, 2023, 2025 NXP
7
8 #include <linux/bitfield.h>
9 #include <linux/clk.h>
10 #include <linux/completion.h>
11 #include <linux/delay.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/pinctrl/consumer.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma/imx-dma.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/slab.h>
26 #include <linux/spi/spi.h>
27 #include <linux/spi/spi_bitbang.h>
28 #include <linux/types.h>
29 #include <linux/minmax.h>
30
31 #define DRIVER_NAME "fsl_lpspi"
32
33 #define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */
34
35 /* The maximum bytes that edma can transfer once.*/
36 #define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1)
37
38 /* i.MX7ULP LPSPI registers */
39 #define IMX7ULP_VERID 0x0
40 #define IMX7ULP_PARAM 0x4
41 #define IMX7ULP_CR 0x10
42 #define IMX7ULP_SR 0x14
43 #define IMX7ULP_IER 0x18
44 #define IMX7ULP_DER 0x1c
45 #define IMX7ULP_CFGR0 0x20
46 #define IMX7ULP_CFGR1 0x24
47 #define IMX7ULP_DMR0 0x30
48 #define IMX7ULP_DMR1 0x34
49 #define IMX7ULP_CCR 0x40
50 #define IMX7ULP_FCR 0x58
51 #define IMX7ULP_FSR 0x5c
52 #define IMX7ULP_TCR 0x60
53 #define IMX7ULP_TDR 0x64
54 #define IMX7ULP_RSR 0x70
55 #define IMX7ULP_RDR 0x74
56
57 /* General control register field define */
58 #define CR_RRF BIT(9)
59 #define CR_RTF BIT(8)
60 #define CR_RST BIT(1)
61 #define CR_MEN BIT(0)
62 #define SR_MBF BIT(24)
63 #define SR_TCF BIT(10)
64 #define SR_FCF BIT(9)
65 #define SR_RDF BIT(1)
66 #define SR_TDF BIT(0)
67 #define IER_TCIE BIT(10)
68 #define IER_FCIE BIT(9)
69 #define IER_RDIE BIT(1)
70 #define IER_TDIE BIT(0)
71 #define DER_RDDE BIT(1)
72 #define DER_TDDE BIT(0)
73 #define CFGR1_PCSCFG BIT(27)
74 #define CFGR1_PINCFG (BIT(24)|BIT(25))
75 #define CFGR1_PCSPOL_MASK GENMASK(11, 8)
76 #define CFGR1_NOSTALL BIT(3)
77 #define CFGR1_HOST BIT(0)
78 #define FSR_TXCOUNT (0xFF)
79 #define RSR_RXEMPTY BIT(1)
80 #define TCR_CPOL BIT(31)
81 #define TCR_CPHA BIT(30)
82 #define TCR_CONT BIT(21)
83 #define TCR_CONTC BIT(20)
84 #define TCR_RXMSK BIT(19)
85 #define TCR_TXMSK BIT(18)
86
87 #define SR_CLEAR_MASK GENMASK(13, 8)
88
89 struct fsl_lpspi_devtype_data {
90 u8 prescale_max : 3; /* 0 == no limit */
91 bool query_hw_for_num_cs : 1;
92 };
93
94 struct lpspi_config {
95 u8 bpw;
96 u8 chip_select;
97 u8 prescale;
98 u16 mode;
99 u32 speed_hz;
100 u32 effective_speed_hz;
101 };
102
103 struct fsl_lpspi_data {
104 struct device *dev;
105 void __iomem *base;
106 unsigned long base_phys;
107 struct clk *clk_ipg;
108 struct clk *clk_per;
109 bool is_target;
110 bool is_only_cs1;
111 bool is_first_byte;
112
113 void *rx_buf;
114 const void *tx_buf;
115 void (*tx)(struct fsl_lpspi_data *);
116 void (*rx)(struct fsl_lpspi_data *);
117
118 u32 remain;
119 u8 watermark;
120 u8 txfifosize;
121 u8 rxfifosize;
122
123 struct lpspi_config config;
124 struct completion xfer_done;
125
126 bool target_aborted;
127
128 /* DMA */
129 bool usedma;
130 struct completion dma_rx_completion;
131 struct completion dma_tx_completion;
132
133 const struct fsl_lpspi_devtype_data *devtype_data;
134 };
135
136 /*
137 * Devices with ERR051608 have a max TCR_PRESCALE value of 1, otherwise there is
138 * no prescale limit: https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
139 */
140 static const struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
141 .prescale_max = 1,
142 .query_hw_for_num_cs = true,
143 };
144
145 static const struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
146 /* All defaults */
147 };
148
149 static const struct fsl_lpspi_devtype_data s32g_lpspi_devtype_data = {
150 .query_hw_for_num_cs = true,
151 };
152
153 static const struct of_device_id fsl_lpspi_dt_ids[] = {
154 { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,},
155 { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,},
156 { .compatible = "nxp,s32g2-lpspi", .data = &s32g_lpspi_devtype_data,},
157 { /* sentinel */ }
158 };
159 MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
160
161 #define LPSPI_BUF_RX(type) \
162 static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \
163 { \
164 unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \
165 \
166 if (fsl_lpspi->rx_buf) { \
167 *(type *)fsl_lpspi->rx_buf = val; \
168 fsl_lpspi->rx_buf += sizeof(type); \
169 } \
170 }
171
172 #define LPSPI_BUF_TX(type) \
173 static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \
174 { \
175 type val = 0; \
176 \
177 if (fsl_lpspi->tx_buf) { \
178 val = *(type *)fsl_lpspi->tx_buf; \
179 fsl_lpspi->tx_buf += sizeof(type); \
180 } \
181 \
182 fsl_lpspi->remain -= sizeof(type); \
183 writel(val, fsl_lpspi->base + IMX7ULP_TDR); \
184 }
185
186 LPSPI_BUF_RX(u8)
LPSPI_BUF_TX(u8)187 LPSPI_BUF_TX(u8)
188 LPSPI_BUF_RX(u16)
189 LPSPI_BUF_TX(u16)
190 LPSPI_BUF_RX(u32)
191 LPSPI_BUF_TX(u32)
192
193 static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
194 unsigned int enable)
195 {
196 writel(enable, fsl_lpspi->base + IMX7ULP_IER);
197 }
198
fsl_lpspi_bytes_per_word(const int bpw)199 static int fsl_lpspi_bytes_per_word(const int bpw)
200 {
201 return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
202 }
203
fsl_lpspi_can_dma(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * transfer)204 static bool fsl_lpspi_can_dma(struct spi_controller *controller,
205 struct spi_device *spi,
206 struct spi_transfer *transfer)
207 {
208 unsigned int bytes_per_word;
209
210 if (!controller->dma_rx)
211 return false;
212
213 bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word);
214
215 switch (bytes_per_word) {
216 case 1:
217 case 2:
218 case 4:
219 break;
220 default:
221 return false;
222 }
223
224 return true;
225 }
226
lpspi_prepare_xfer_hardware(struct spi_controller * controller)227 static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
228 {
229 struct fsl_lpspi_data *fsl_lpspi =
230 spi_controller_get_devdata(controller);
231 int ret;
232
233 ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
234 if (ret < 0) {
235 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
236 return ret;
237 }
238
239 return 0;
240 }
241
lpspi_unprepare_xfer_hardware(struct spi_controller * controller)242 static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
243 {
244 struct fsl_lpspi_data *fsl_lpspi =
245 spi_controller_get_devdata(controller);
246
247 pm_runtime_put_autosuspend(fsl_lpspi->dev);
248
249 return 0;
250 }
251
fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data * fsl_lpspi)252 static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
253 {
254 u8 txfifo_cnt;
255 u32 temp;
256
257 txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
258
259 while (txfifo_cnt < fsl_lpspi->txfifosize) {
260 if (!fsl_lpspi->remain)
261 break;
262 fsl_lpspi->tx(fsl_lpspi);
263 txfifo_cnt++;
264 }
265
266 if (txfifo_cnt < fsl_lpspi->txfifosize) {
267 if (!fsl_lpspi->is_target) {
268 temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
269 temp &= ~TCR_CONTC;
270 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
271 }
272
273 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
274 } else
275 fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
276 }
277
fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data * fsl_lpspi)278 static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
279 {
280 while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
281 fsl_lpspi->rx(fsl_lpspi);
282 }
283
fsl_lpspi_set_cmd(struct fsl_lpspi_data * fsl_lpspi)284 static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
285 {
286 u32 temp = 0;
287
288 temp |= fsl_lpspi->config.bpw - 1;
289 temp |= (fsl_lpspi->config.mode & 0x3) << 30;
290 temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
291 if (!fsl_lpspi->is_target) {
292 temp |= fsl_lpspi->config.prescale << 27;
293 /*
294 * Set TCR_CONT will keep SS asserted after current transfer.
295 * For the first transfer, clear TCR_CONTC to assert SS.
296 * For subsequent transfer, set TCR_CONTC to keep SS asserted.
297 */
298 if (!fsl_lpspi->usedma) {
299 temp |= TCR_CONT;
300 if (fsl_lpspi->is_first_byte)
301 temp &= ~TCR_CONTC;
302 else
303 temp |= TCR_CONTC;
304 }
305 }
306 writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
307
308 dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
309 }
310
fsl_lpspi_set_watermark(struct fsl_lpspi_data * fsl_lpspi)311 static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
312 {
313 u32 temp;
314
315 if (!fsl_lpspi->usedma)
316 temp = fsl_lpspi->watermark >> 1 |
317 (fsl_lpspi->watermark >> 1) << 16;
318 else
319 temp = fsl_lpspi->watermark >> 1;
320
321 writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
322
323 dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
324 }
325
fsl_lpspi_set_bitrate(struct fsl_lpspi_data * fsl_lpspi)326 static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
327 {
328 struct lpspi_config config = fsl_lpspi->config;
329 unsigned int perclk_rate, div;
330 u8 prescale_max;
331 u8 prescale;
332 int scldiv;
333
334 perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
335 prescale_max = fsl_lpspi->devtype_data->prescale_max ?: 7;
336
337 if (!config.speed_hz) {
338 dev_err(fsl_lpspi->dev,
339 "error: the transmission speed provided is 0!\n");
340 return -EINVAL;
341 }
342
343 if (config.speed_hz > perclk_rate / 2) {
344 div = 2;
345 } else {
346 div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
347 }
348
349 for (prescale = 0; prescale <= prescale_max; prescale++) {
350 scldiv = div / (1 << prescale) - 2;
351 if (scldiv >= 0 && scldiv < 256) {
352 fsl_lpspi->config.prescale = prescale;
353 break;
354 }
355 }
356
357 if (scldiv < 0 || scldiv >= 256)
358 return -EINVAL;
359
360 writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
361 fsl_lpspi->base + IMX7ULP_CCR);
362
363 fsl_lpspi->config.effective_speed_hz = perclk_rate / (scldiv + 2) *
364 (1 << prescale);
365
366 dev_dbg(fsl_lpspi->dev, "perclk=%u, speed=%u, prescale=%u, scldiv=%d\n",
367 perclk_rate, config.speed_hz, prescale, scldiv);
368
369 return 0;
370 }
371
fsl_lpspi_dma_configure(struct spi_controller * controller)372 static int fsl_lpspi_dma_configure(struct spi_controller *controller)
373 {
374 int ret;
375 enum dma_slave_buswidth buswidth;
376 struct dma_slave_config rx = {}, tx = {};
377 struct fsl_lpspi_data *fsl_lpspi =
378 spi_controller_get_devdata(controller);
379
380 switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) {
381 case 4:
382 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
383 break;
384 case 2:
385 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
386 break;
387 case 1:
388 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
389 break;
390 default:
391 return -EINVAL;
392 }
393
394 tx.direction = DMA_MEM_TO_DEV;
395 tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR;
396 tx.dst_addr_width = buswidth;
397 tx.dst_maxburst = 1;
398 ret = dmaengine_slave_config(controller->dma_tx, &tx);
399 if (ret) {
400 dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n",
401 ret);
402 return ret;
403 }
404
405 rx.direction = DMA_DEV_TO_MEM;
406 rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR;
407 rx.src_addr_width = buswidth;
408 rx.src_maxburst = 1;
409 ret = dmaengine_slave_config(controller->dma_rx, &rx);
410 if (ret) {
411 dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n",
412 ret);
413 return ret;
414 }
415
416 return 0;
417 }
418
fsl_lpspi_config(struct fsl_lpspi_data * fsl_lpspi)419 static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
420 {
421 u32 temp;
422 int ret;
423
424 if (!fsl_lpspi->is_target) {
425 ret = fsl_lpspi_set_bitrate(fsl_lpspi);
426 if (ret)
427 return ret;
428 }
429
430 fsl_lpspi_set_watermark(fsl_lpspi);
431
432 if (!fsl_lpspi->is_target)
433 temp = CFGR1_HOST;
434 else
435 temp = CFGR1_PINCFG;
436 if (fsl_lpspi->config.mode & SPI_CS_HIGH)
437 temp |= FIELD_PREP(CFGR1_PCSPOL_MASK,
438 BIT(fsl_lpspi->config.chip_select));
439
440 writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
441
442 temp = readl(fsl_lpspi->base + IMX7ULP_CR);
443 temp |= CR_RRF | CR_RTF | CR_MEN;
444 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
445
446 temp = 0;
447 if (fsl_lpspi->usedma)
448 temp = DER_TDDE | DER_RDDE;
449 writel(temp, fsl_lpspi->base + IMX7ULP_DER);
450
451 return 0;
452 }
453
fsl_lpspi_setup_transfer(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * t)454 static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
455 struct spi_device *spi,
456 struct spi_transfer *t)
457 {
458 struct fsl_lpspi_data *fsl_lpspi =
459 spi_controller_get_devdata(spi->controller);
460
461 if (t == NULL)
462 return -EINVAL;
463
464 fsl_lpspi->config.mode = spi->mode;
465 fsl_lpspi->config.bpw = t->bits_per_word;
466 fsl_lpspi->config.speed_hz = t->speed_hz;
467 if (fsl_lpspi->is_only_cs1)
468 fsl_lpspi->config.chip_select = 1;
469 else
470 fsl_lpspi->config.chip_select = spi_get_chipselect(spi, 0);
471
472 if (!fsl_lpspi->config.speed_hz)
473 fsl_lpspi->config.speed_hz = spi->max_speed_hz;
474 if (!fsl_lpspi->config.bpw)
475 fsl_lpspi->config.bpw = spi->bits_per_word;
476
477 /* Initialize the functions for transfer */
478 if (fsl_lpspi->config.bpw <= 8) {
479 fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
480 fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
481 } else if (fsl_lpspi->config.bpw <= 16) {
482 fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
483 fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
484 } else {
485 fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
486 fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
487 }
488
489 fsl_lpspi->watermark = min_t(typeof(fsl_lpspi->watermark),
490 fsl_lpspi->txfifosize,
491 t->len);
492
493 if (fsl_lpspi_can_dma(controller, spi, t))
494 fsl_lpspi->usedma = true;
495 else
496 fsl_lpspi->usedma = false;
497
498 return fsl_lpspi_config(fsl_lpspi);
499 }
500
fsl_lpspi_target_abort(struct spi_controller * controller)501 static int fsl_lpspi_target_abort(struct spi_controller *controller)
502 {
503 struct fsl_lpspi_data *fsl_lpspi =
504 spi_controller_get_devdata(controller);
505
506 fsl_lpspi->target_aborted = true;
507 if (!fsl_lpspi->usedma)
508 complete(&fsl_lpspi->xfer_done);
509 else {
510 complete(&fsl_lpspi->dma_tx_completion);
511 complete(&fsl_lpspi->dma_rx_completion);
512 }
513
514 return 0;
515 }
516
fsl_lpspi_wait_for_completion(struct spi_controller * controller)517 static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
518 {
519 struct fsl_lpspi_data *fsl_lpspi =
520 spi_controller_get_devdata(controller);
521
522 if (fsl_lpspi->is_target) {
523 if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
524 fsl_lpspi->target_aborted) {
525 dev_dbg(fsl_lpspi->dev, "interrupted\n");
526 return -EINTR;
527 }
528 } else {
529 if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
530 dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
531 return -ETIMEDOUT;
532 }
533 }
534
535 return 0;
536 }
537
fsl_lpspi_reset(struct fsl_lpspi_data * fsl_lpspi)538 static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
539 {
540 u32 temp;
541
542 if (!fsl_lpspi->usedma) {
543 /* Disable all interrupt */
544 fsl_lpspi_intctrl(fsl_lpspi, 0);
545 }
546
547 /* Clear FIFO and disable module */
548 temp = CR_RRF | CR_RTF;
549 writel(temp, fsl_lpspi->base + IMX7ULP_CR);
550
551 /* W1C for all flags in SR */
552 writel(SR_CLEAR_MASK, fsl_lpspi->base + IMX7ULP_SR);
553
554 return 0;
555 }
556
fsl_lpspi_dma_rx_callback(void * cookie)557 static void fsl_lpspi_dma_rx_callback(void *cookie)
558 {
559 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
560
561 complete(&fsl_lpspi->dma_rx_completion);
562 }
563
fsl_lpspi_dma_tx_callback(void * cookie)564 static void fsl_lpspi_dma_tx_callback(void *cookie)
565 {
566 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie;
567
568 complete(&fsl_lpspi->dma_tx_completion);
569 }
570
fsl_lpspi_calculate_timeout(struct fsl_lpspi_data * fsl_lpspi,int size)571 static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
572 int size)
573 {
574 unsigned long timeout = 0;
575
576 /* Time with actual data transfer and CS change delay related to HW */
577 timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz;
578
579 /* Add extra second for scheduler related activities */
580 timeout += 1;
581
582 /* Double calculated timeout */
583 return secs_to_jiffies(2 * timeout);
584 }
585
fsl_lpspi_dma_transfer(struct spi_controller * controller,struct fsl_lpspi_data * fsl_lpspi,struct spi_transfer * transfer)586 static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
587 struct fsl_lpspi_data *fsl_lpspi,
588 struct spi_transfer *transfer)
589 {
590 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
591 unsigned long transfer_timeout;
592 unsigned long time_left;
593 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
594 int ret;
595
596 ret = fsl_lpspi_dma_configure(controller);
597 if (ret)
598 return ret;
599
600 desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
601 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
602 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
603 if (!desc_rx)
604 return -EINVAL;
605
606 desc_rx->callback = fsl_lpspi_dma_rx_callback;
607 desc_rx->callback_param = (void *)fsl_lpspi;
608 dmaengine_submit(desc_rx);
609 reinit_completion(&fsl_lpspi->dma_rx_completion);
610 dma_async_issue_pending(controller->dma_rx);
611
612 desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
613 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
614 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
615 if (!desc_tx) {
616 dmaengine_terminate_all(controller->dma_tx);
617 return -EINVAL;
618 }
619
620 desc_tx->callback = fsl_lpspi_dma_tx_callback;
621 desc_tx->callback_param = (void *)fsl_lpspi;
622 dmaengine_submit(desc_tx);
623 reinit_completion(&fsl_lpspi->dma_tx_completion);
624 dma_async_issue_pending(controller->dma_tx);
625
626 fsl_lpspi->target_aborted = false;
627
628 if (!fsl_lpspi->is_target) {
629 transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
630 transfer->len);
631
632 /* Wait eDMA to finish the data transfer.*/
633 time_left = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion,
634 transfer_timeout);
635 if (!time_left) {
636 dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n");
637 dmaengine_terminate_all(controller->dma_tx);
638 dmaengine_terminate_all(controller->dma_rx);
639 fsl_lpspi_reset(fsl_lpspi);
640 return -ETIMEDOUT;
641 }
642
643 time_left = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion,
644 transfer_timeout);
645 if (!time_left) {
646 dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n");
647 dmaengine_terminate_all(controller->dma_tx);
648 dmaengine_terminate_all(controller->dma_rx);
649 fsl_lpspi_reset(fsl_lpspi);
650 return -ETIMEDOUT;
651 }
652 } else {
653 if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
654 fsl_lpspi->target_aborted) {
655 dev_dbg(fsl_lpspi->dev,
656 "I/O Error in DMA TX interrupted\n");
657 dmaengine_terminate_all(controller->dma_tx);
658 dmaengine_terminate_all(controller->dma_rx);
659 fsl_lpspi_reset(fsl_lpspi);
660 return -EINTR;
661 }
662
663 if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
664 fsl_lpspi->target_aborted) {
665 dev_dbg(fsl_lpspi->dev,
666 "I/O Error in DMA RX interrupted\n");
667 dmaengine_terminate_all(controller->dma_tx);
668 dmaengine_terminate_all(controller->dma_rx);
669 fsl_lpspi_reset(fsl_lpspi);
670 return -EINTR;
671 }
672 }
673
674 fsl_lpspi_reset(fsl_lpspi);
675
676 return 0;
677 }
678
fsl_lpspi_dma_exit(struct spi_controller * controller)679 static void fsl_lpspi_dma_exit(struct spi_controller *controller)
680 {
681 if (controller->dma_rx) {
682 dma_release_channel(controller->dma_rx);
683 controller->dma_rx = NULL;
684 }
685
686 if (controller->dma_tx) {
687 dma_release_channel(controller->dma_tx);
688 controller->dma_tx = NULL;
689 }
690 }
691
fsl_lpspi_dma_init(struct device * dev,struct fsl_lpspi_data * fsl_lpspi,struct spi_controller * controller)692 static int fsl_lpspi_dma_init(struct device *dev,
693 struct fsl_lpspi_data *fsl_lpspi,
694 struct spi_controller *controller)
695 {
696 int ret;
697
698 /* Prepare for TX DMA: */
699 controller->dma_tx = dma_request_chan(dev, "tx");
700 if (IS_ERR(controller->dma_tx)) {
701 ret = PTR_ERR(controller->dma_tx);
702 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
703 controller->dma_tx = NULL;
704 goto err;
705 }
706
707 /* Prepare for RX DMA: */
708 controller->dma_rx = dma_request_chan(dev, "rx");
709 if (IS_ERR(controller->dma_rx)) {
710 ret = PTR_ERR(controller->dma_rx);
711 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
712 controller->dma_rx = NULL;
713 goto err;
714 }
715
716 init_completion(&fsl_lpspi->dma_rx_completion);
717 init_completion(&fsl_lpspi->dma_tx_completion);
718 controller->can_dma = fsl_lpspi_can_dma;
719 controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES;
720
721 return 0;
722 err:
723 fsl_lpspi_dma_exit(controller);
724 return ret;
725 }
726
fsl_lpspi_pio_transfer(struct spi_controller * controller,struct spi_transfer * t)727 static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
728 struct spi_transfer *t)
729 {
730 struct fsl_lpspi_data *fsl_lpspi =
731 spi_controller_get_devdata(controller);
732 int ret;
733
734 fsl_lpspi->tx_buf = t->tx_buf;
735 fsl_lpspi->rx_buf = t->rx_buf;
736 fsl_lpspi->remain = t->len;
737
738 reinit_completion(&fsl_lpspi->xfer_done);
739 fsl_lpspi->target_aborted = false;
740
741 fsl_lpspi_write_tx_fifo(fsl_lpspi);
742
743 ret = fsl_lpspi_wait_for_completion(controller);
744
745 fsl_lpspi_reset(fsl_lpspi);
746
747 return ret;
748 }
749
fsl_lpspi_transfer_one(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * t)750 static int fsl_lpspi_transfer_one(struct spi_controller *controller,
751 struct spi_device *spi,
752 struct spi_transfer *t)
753 {
754 struct fsl_lpspi_data *fsl_lpspi =
755 spi_controller_get_devdata(controller);
756 int ret;
757
758 fsl_lpspi->is_first_byte = true;
759 ret = fsl_lpspi_setup_transfer(controller, spi, t);
760 if (ret < 0)
761 return ret;
762
763 t->effective_speed_hz = fsl_lpspi->config.effective_speed_hz;
764
765 fsl_lpspi_set_cmd(fsl_lpspi);
766 fsl_lpspi->is_first_byte = false;
767
768 if (fsl_lpspi->usedma)
769 ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t);
770 else
771 ret = fsl_lpspi_pio_transfer(controller, t);
772 if (ret < 0)
773 return ret;
774
775 return 0;
776 }
777
fsl_lpspi_isr(int irq,void * dev_id)778 static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
779 {
780 u32 temp_SR, temp_IER;
781 struct fsl_lpspi_data *fsl_lpspi = dev_id;
782
783 temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
784 fsl_lpspi_intctrl(fsl_lpspi, 0);
785 temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
786
787 fsl_lpspi_read_rx_fifo(fsl_lpspi);
788
789 if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
790 fsl_lpspi_write_tx_fifo(fsl_lpspi);
791 return IRQ_HANDLED;
792 }
793
794 if (temp_SR & SR_MBF ||
795 readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
796 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
797 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE | (temp_IER & IER_TDIE));
798 return IRQ_HANDLED;
799 }
800
801 if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
802 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
803 complete(&fsl_lpspi->xfer_done);
804 return IRQ_HANDLED;
805 }
806
807 return IRQ_NONE;
808 }
809
810 #ifdef CONFIG_PM
fsl_lpspi_runtime_resume(struct device * dev)811 static int fsl_lpspi_runtime_resume(struct device *dev)
812 {
813 struct spi_controller *controller = dev_get_drvdata(dev);
814 struct fsl_lpspi_data *fsl_lpspi;
815 int ret;
816
817 fsl_lpspi = spi_controller_get_devdata(controller);
818
819 ret = clk_prepare_enable(fsl_lpspi->clk_per);
820 if (ret)
821 return ret;
822
823 ret = clk_prepare_enable(fsl_lpspi->clk_ipg);
824 if (ret) {
825 clk_disable_unprepare(fsl_lpspi->clk_per);
826 return ret;
827 }
828
829 return 0;
830 }
831
fsl_lpspi_runtime_suspend(struct device * dev)832 static int fsl_lpspi_runtime_suspend(struct device *dev)
833 {
834 struct spi_controller *controller = dev_get_drvdata(dev);
835 struct fsl_lpspi_data *fsl_lpspi;
836
837 fsl_lpspi = spi_controller_get_devdata(controller);
838
839 clk_disable_unprepare(fsl_lpspi->clk_per);
840 clk_disable_unprepare(fsl_lpspi->clk_ipg);
841
842 return 0;
843 }
844 #endif
845
fsl_lpspi_init_rpm(struct fsl_lpspi_data * fsl_lpspi)846 static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
847 {
848 struct device *dev = fsl_lpspi->dev;
849
850 pm_runtime_enable(dev);
851 pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT);
852 pm_runtime_use_autosuspend(dev);
853
854 return 0;
855 }
856
fsl_lpspi_probe(struct platform_device * pdev)857 static int fsl_lpspi_probe(struct platform_device *pdev)
858 {
859 const struct fsl_lpspi_devtype_data *devtype_data;
860 struct fsl_lpspi_data *fsl_lpspi;
861 struct spi_controller *controller;
862 struct resource *res;
863 int ret, irq;
864 u32 num_cs;
865 u32 temp;
866 bool is_target;
867
868 devtype_data = of_device_get_match_data(&pdev->dev);
869 if (!devtype_data)
870 return -ENODEV;
871
872 is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
873 if (is_target)
874 controller = devm_spi_alloc_target(&pdev->dev,
875 sizeof(struct fsl_lpspi_data));
876 else
877 controller = devm_spi_alloc_host(&pdev->dev,
878 sizeof(struct fsl_lpspi_data));
879
880 if (!controller)
881 return -ENOMEM;
882
883 platform_set_drvdata(pdev, controller);
884
885 fsl_lpspi = spi_controller_get_devdata(controller);
886 fsl_lpspi->dev = &pdev->dev;
887 fsl_lpspi->is_target = is_target;
888 fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
889 "fsl,spi-only-use-cs1-sel");
890 fsl_lpspi->devtype_data = devtype_data;
891
892 init_completion(&fsl_lpspi->xfer_done);
893
894 fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
895 if (IS_ERR(fsl_lpspi->base)) {
896 ret = PTR_ERR(fsl_lpspi->base);
897 return ret;
898 }
899 fsl_lpspi->base_phys = res->start;
900
901 irq = platform_get_irq(pdev, 0);
902 if (irq < 0) {
903 ret = irq;
904 return ret;
905 }
906
907 ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, IRQF_NO_AUTOEN,
908 dev_name(&pdev->dev), fsl_lpspi);
909 if (ret) {
910 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
911 return ret;
912 }
913
914 fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
915 if (IS_ERR(fsl_lpspi->clk_per)) {
916 ret = PTR_ERR(fsl_lpspi->clk_per);
917 return ret;
918 }
919
920 fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
921 if (IS_ERR(fsl_lpspi->clk_ipg)) {
922 ret = PTR_ERR(fsl_lpspi->clk_ipg);
923 return ret;
924 }
925
926 /* enable the clock */
927 ret = fsl_lpspi_init_rpm(fsl_lpspi);
928 if (ret)
929 return ret;
930
931 ret = pm_runtime_get_sync(fsl_lpspi->dev);
932 if (ret < 0) {
933 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
934 goto out_pm_get;
935 }
936
937 temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
938 fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
939 fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
940 if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
941 &num_cs)) {
942 if (devtype_data->query_hw_for_num_cs)
943 num_cs = ((temp >> 16) & 0xf);
944 else
945 num_cs = 1;
946 }
947
948 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
949 controller->transfer_one = fsl_lpspi_transfer_one;
950 controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
951 controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
952 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
953 controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
954 controller->dev.of_node = pdev->dev.of_node;
955 controller->bus_num = pdev->id;
956 controller->num_chipselect = num_cs;
957 controller->target_abort = fsl_lpspi_target_abort;
958 if (!fsl_lpspi->is_target)
959 controller->use_gpio_descriptors = true;
960
961 ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
962 if (ret == -EPROBE_DEFER)
963 goto out_pm_get;
964 if (ret < 0) {
965 dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret);
966 enable_irq(irq);
967 }
968
969 ret = devm_spi_register_controller(&pdev->dev, controller);
970 if (ret < 0) {
971 dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
972 goto free_dma;
973 }
974
975 pm_runtime_put_autosuspend(fsl_lpspi->dev);
976
977 return 0;
978
979 free_dma:
980 fsl_lpspi_dma_exit(controller);
981 out_pm_get:
982 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
983 pm_runtime_put_sync(fsl_lpspi->dev);
984 pm_runtime_disable(fsl_lpspi->dev);
985
986 return ret;
987 }
988
fsl_lpspi_remove(struct platform_device * pdev)989 static void fsl_lpspi_remove(struct platform_device *pdev)
990 {
991 struct spi_controller *controller = platform_get_drvdata(pdev);
992 struct fsl_lpspi_data *fsl_lpspi =
993 spi_controller_get_devdata(controller);
994
995 fsl_lpspi_dma_exit(controller);
996
997 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
998 pm_runtime_disable(fsl_lpspi->dev);
999 }
1000
fsl_lpspi_suspend(struct device * dev)1001 static int fsl_lpspi_suspend(struct device *dev)
1002 {
1003 pinctrl_pm_select_sleep_state(dev);
1004 return pm_runtime_force_suspend(dev);
1005 }
1006
fsl_lpspi_resume(struct device * dev)1007 static int fsl_lpspi_resume(struct device *dev)
1008 {
1009 int ret;
1010
1011 ret = pm_runtime_force_resume(dev);
1012 if (ret) {
1013 dev_err(dev, "Error in resume: %d\n", ret);
1014 return ret;
1015 }
1016
1017 pinctrl_pm_select_default_state(dev);
1018
1019 return 0;
1020 }
1021
1022 static const struct dev_pm_ops fsl_lpspi_pm_ops = {
1023 SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
1024 fsl_lpspi_runtime_resume, NULL)
1025 SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume)
1026 };
1027
1028 static struct platform_driver fsl_lpspi_driver = {
1029 .driver = {
1030 .name = DRIVER_NAME,
1031 .of_match_table = fsl_lpspi_dt_ids,
1032 .pm = pm_ptr(&fsl_lpspi_pm_ops),
1033 },
1034 .probe = fsl_lpspi_probe,
1035 .remove = fsl_lpspi_remove,
1036 };
1037 module_platform_driver(fsl_lpspi_driver);
1038
1039 MODULE_DESCRIPTION("LPSPI Controller driver");
1040 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1041 MODULE_LICENSE("GPL");
1042