1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 // Copyright (C) 2008 Juergen Beisert
4
5 #include <linux/bits.h>
6 #include <linux/bitfield.h>
7 #include <linux/clk.h>
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/irq.h>
16 #include <linux/kernel.h>
17 #include <linux/math.h>
18 #include <linux/math64.h>
19 #include <linux/module.h>
20 #include <linux/overflow.h>
21 #include <linux/pinctrl/consumer.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/spi/spi.h>
26 #include <linux/types.h>
27 #include <linux/of.h>
28 #include <linux/property.h>
29
30 #include <linux/dma/imx-dma.h>
31
32 #define DRIVER_NAME "spi_imx"
33
34 static bool use_dma = true;
35 module_param(use_dma, bool, 0644);
36 MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
37
38 /* define polling limits */
39 static unsigned int polling_limit_us = 30;
40 module_param(polling_limit_us, uint, 0664);
41 MODULE_PARM_DESC(polling_limit_us,
42 "time in us to run a transfer in polling mode\n");
43
44 #define MXC_RPM_TIMEOUT 2000 /* 2000ms */
45 #define MXC_SPI_DEFAULT_SPEED 500000 /* 500KHz */
46
47 #define MXC_CSPIRXDATA 0x00
48 #define MXC_CSPITXDATA 0x04
49 #define MXC_CSPICTRL 0x08
50 #define MXC_CSPIINT 0x0c
51 #define MXC_RESET 0x1c
52
53 /* generic defines to abstract from the different register layouts */
54 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
55 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
56 #define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
57
58 /* The maximum bytes that a sdma BD can transfer. */
59 #define MAX_SDMA_BD_BYTES (1 << 15)
60 #define MX51_ECSPI_CTRL_MAX_BURST 512
61 /* The maximum bytes that IMX53_ECSPI can transfer in target mode.*/
62 #define MX53_MAX_TRANSFER_BYTES 512
63 #define BYTES_PER_32BITS_WORD 4
64
65 enum spi_imx_devtype {
66 IMX1_CSPI,
67 IMX21_CSPI,
68 IMX27_CSPI,
69 IMX31_CSPI,
70 IMX35_CSPI, /* CSPI on all i.mx except above */
71 IMX51_ECSPI, /* ECSPI on i.mx51 */
72 IMX53_ECSPI, /* ECSPI on i.mx53 and later */
73 };
74
75 struct spi_imx_data;
76
77 struct spi_imx_devtype_data {
78 void (*intctrl)(struct spi_imx_data *spi_imx, int enable);
79 int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg);
80 int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi,
81 struct spi_transfer *t);
82 void (*trigger)(struct spi_imx_data *spi_imx);
83 int (*rx_available)(struct spi_imx_data *spi_imx);
84 void (*reset)(struct spi_imx_data *spi_imx);
85 void (*setup_wml)(struct spi_imx_data *spi_imx);
86 void (*disable)(struct spi_imx_data *spi_imx);
87 bool has_dmamode;
88 bool has_targetmode;
89 unsigned int fifo_size;
90 bool dynamic_burst;
91 /*
92 * ERR009165 fixed or not:
93 * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
94 */
95 bool tx_glitch_fixed;
96 enum spi_imx_devtype devtype;
97 };
98
99 struct dma_data_package {
100 u32 cmd_word;
101 void *dma_rx_buf;
102 void *dma_tx_buf;
103 dma_addr_t dma_tx_addr;
104 dma_addr_t dma_rx_addr;
105 int dma_len;
106 int data_len;
107 };
108
109 struct spi_imx_data {
110 struct spi_controller *controller;
111 struct device *dev;
112
113 struct completion xfer_done;
114 void __iomem *base;
115 unsigned long base_phys;
116
117 struct clk *clk_per;
118 struct clk *clk_ipg;
119 unsigned long spi_clk;
120 unsigned int spi_bus_clk;
121
122 unsigned int bits_per_word;
123 unsigned int spi_drctl;
124
125 unsigned int count, remainder;
126 void (*tx)(struct spi_imx_data *spi_imx);
127 void (*rx)(struct spi_imx_data *spi_imx);
128 void *rx_buf;
129 const void *tx_buf;
130 unsigned int txfifo; /* number of words pushed in tx FIFO */
131 unsigned int dynamic_burst;
132 bool rx_only;
133
134 /* Target mode */
135 bool target_mode;
136 bool target_aborted;
137 unsigned int target_burst;
138
139 /* DMA */
140 bool usedma;
141 u32 wml;
142 struct completion dma_rx_completion;
143 struct completion dma_tx_completion;
144 size_t dma_package_num;
145 struct dma_data_package *dma_data;
146 int rx_offset;
147
148 const struct spi_imx_devtype_data *devtype_data;
149 };
150
is_imx27_cspi(struct spi_imx_data * d)151 static inline int is_imx27_cspi(struct spi_imx_data *d)
152 {
153 return d->devtype_data->devtype == IMX27_CSPI;
154 }
155
is_imx35_cspi(struct spi_imx_data * d)156 static inline int is_imx35_cspi(struct spi_imx_data *d)
157 {
158 return d->devtype_data->devtype == IMX35_CSPI;
159 }
160
is_imx51_ecspi(struct spi_imx_data * d)161 static inline int is_imx51_ecspi(struct spi_imx_data *d)
162 {
163 return d->devtype_data->devtype == IMX51_ECSPI;
164 }
165
is_imx53_ecspi(struct spi_imx_data * d)166 static inline int is_imx53_ecspi(struct spi_imx_data *d)
167 {
168 return d->devtype_data->devtype == IMX53_ECSPI;
169 }
170
171 #define MXC_SPI_BUF_RX(type) \
172 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
173 { \
174 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
175 \
176 if (spi_imx->rx_buf) { \
177 *(type *)spi_imx->rx_buf = val; \
178 spi_imx->rx_buf += sizeof(type); \
179 } \
180 \
181 spi_imx->remainder -= sizeof(type); \
182 }
183
184 #define MXC_SPI_BUF_TX(type) \
185 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
186 { \
187 type val = 0; \
188 \
189 if (spi_imx->tx_buf) { \
190 val = *(type *)spi_imx->tx_buf; \
191 spi_imx->tx_buf += sizeof(type); \
192 } \
193 \
194 spi_imx->count -= sizeof(type); \
195 \
196 writel(val, spi_imx->base + MXC_CSPITXDATA); \
197 }
198
199 MXC_SPI_BUF_RX(u8)
200 MXC_SPI_BUF_TX(u8)
201 MXC_SPI_BUF_RX(u16)
202 MXC_SPI_BUF_TX(u16)
203 MXC_SPI_BUF_RX(u32)
204 MXC_SPI_BUF_TX(u32)
205
206 /* Align to cache line to avoid swiotlo bounce */
207 #define DMA_CACHE_ALIGNED_LEN(x) ALIGN((x), dma_get_cache_alignment())
208
209 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
210 * (which is currently not the case in this driver)
211 */
212 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
213 256, 384, 512, 768, 1024};
214
215 /* MX21, MX27 */
spi_imx_clkdiv_1(unsigned int fin,unsigned int fspi,unsigned int max,unsigned int * fres)216 static unsigned int spi_imx_clkdiv_1(unsigned int fin,
217 unsigned int fspi, unsigned int max, unsigned int *fres)
218 {
219 int i;
220
221 for (i = 2; i < max; i++)
222 if (fspi * mxc_clkdivs[i] >= fin)
223 break;
224
225 *fres = fin / mxc_clkdivs[i];
226 return i;
227 }
228
229 /* MX1, MX31, MX35, MX51 CSPI */
spi_imx_clkdiv_2(unsigned int fin,unsigned int fspi,unsigned int * fres)230 static unsigned int spi_imx_clkdiv_2(unsigned int fin,
231 unsigned int fspi, unsigned int *fres)
232 {
233 int i, div = 4;
234
235 for (i = 0; i < 7; i++) {
236 if (fspi * div >= fin)
237 goto out;
238 div <<= 1;
239 }
240
241 out:
242 *fres = fin / div;
243 return i;
244 }
245
spi_imx_bytes_per_word(const int bits_per_word)246 static int spi_imx_bytes_per_word(const int bits_per_word)
247 {
248 if (bits_per_word <= 8)
249 return 1;
250 else if (bits_per_word <= 16)
251 return 2;
252 else
253 return 4;
254 }
255
spi_imx_can_dma(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * transfer)256 static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi,
257 struct spi_transfer *transfer)
258 {
259 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
260
261 if (!use_dma || controller->fallback)
262 return false;
263
264 if (!controller->dma_rx)
265 return false;
266
267 /*
268 * Due to Freescale errata ERR003775 "eCSPI: Burst completion by Chip
269 * Select (SS) signal in Slave mode is not functional" burst size must
270 * be set exactly to the size of the transfer. This limit SPI transaction
271 * with maximum 2^12 bits.
272 */
273 if (transfer->len > MX53_MAX_TRANSFER_BYTES && spi_imx->target_mode)
274 return false;
275
276 if (transfer->len < spi_imx->devtype_data->fifo_size)
277 return false;
278
279 /* DMA only can transmit data in bytes */
280 if (spi_imx->bits_per_word != 8 && spi_imx->bits_per_word != 16 &&
281 spi_imx->bits_per_word != 32)
282 return false;
283
284 if (transfer->len >= MAX_SDMA_BD_BYTES)
285 return false;
286
287 spi_imx->dynamic_burst = 0;
288
289 return true;
290 }
291
292 /*
293 * Note the number of natively supported chip selects for MX51 is 4. Some
294 * devices may have less actual SS pins but the register map supports 4. When
295 * using gpio chip selects the cs values passed into the macros below can go
296 * outside the range 0 - 3. We therefore need to limit the cs value to avoid
297 * corrupting bits outside the allocated locations.
298 *
299 * The simplest way to do this is to just mask the cs bits to 2 bits. This
300 * still allows all 4 native chip selects to work as well as gpio chip selects
301 * (which can use any of the 4 chip select configurations).
302 */
303
304 #define MX51_ECSPI_CTRL 0x08
305 #define MX51_ECSPI_CTRL_ENABLE (1 << 0)
306 #define MX51_ECSPI_CTRL_XCH (1 << 2)
307 #define MX51_ECSPI_CTRL_SMC (1 << 3)
308 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
309 #define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
310 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
311 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
312 #define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18)
313 #define MX51_ECSPI_CTRL_BL_OFFSET 20
314 #define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
315
316 #define MX51_ECSPI_CONFIG 0x0c
317 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0))
318 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4))
319 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8))
320 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12))
321 #define MX51_ECSPI_CONFIG_DATACTL(cs) (1 << ((cs & 3) + 16))
322 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20))
323
324 #define MX51_ECSPI_INT 0x10
325 #define MX51_ECSPI_INT_TEEN (1 << 0)
326 #define MX51_ECSPI_INT_RREN (1 << 3)
327 #define MX51_ECSPI_INT_RDREN (1 << 4)
328
329 #define MX51_ECSPI_DMA 0x14
330 #define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
331 #define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
332 #define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
333
334 #define MX51_ECSPI_DMA_TEDEN (1 << 7)
335 #define MX51_ECSPI_DMA_RXDEN (1 << 23)
336 #define MX51_ECSPI_DMA_RXTDEN (1 << 31)
337
338 #define MX51_ECSPI_STAT 0x18
339 #define MX51_ECSPI_STAT_RR (1 << 3)
340
341 #define MX51_ECSPI_PERIOD 0x1c
342 #define MX51_ECSPI_PERIOD_MASK 0x7fff
343 /*
344 * As measured on the i.MX6, the SPI host controller inserts a 4 SPI-Clock
345 * (SCLK) delay after each burst if the PERIOD reg is 0x0. This value will be
346 * called MX51_ECSPI_PERIOD_MIN_DELAY_SCK.
347 *
348 * If the PERIOD register is != 0, the controller inserts a delay of
349 * MX51_ECSPI_PERIOD_MIN_DELAY_SCK + register value + 1 SCLK after each burst.
350 */
351 #define MX51_ECSPI_PERIOD_MIN_DELAY_SCK 4
352
353 #define MX51_ECSPI_TESTREG 0x20
354 #define MX51_ECSPI_TESTREG_LBC BIT(31)
355
spi_imx_buf_rx_swap_u32(struct spi_imx_data * spi_imx)356 static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
357 {
358 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
359
360 if (spi_imx->rx_buf) {
361 #ifdef __LITTLE_ENDIAN
362 unsigned int bytes_per_word;
363
364 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
365 if (bytes_per_word == 1)
366 swab32s(&val);
367 else if (bytes_per_word == 2)
368 swahw32s(&val);
369 #endif
370 *(u32 *)spi_imx->rx_buf = val;
371 spi_imx->rx_buf += sizeof(u32);
372 }
373
374 spi_imx->remainder -= sizeof(u32);
375 }
376
spi_imx_buf_rx_swap(struct spi_imx_data * spi_imx)377 static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
378 {
379 int unaligned;
380 u32 val;
381
382 unaligned = spi_imx->remainder % 4;
383
384 if (!unaligned) {
385 spi_imx_buf_rx_swap_u32(spi_imx);
386 return;
387 }
388
389 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
390 spi_imx_buf_rx_u16(spi_imx);
391 return;
392 }
393
394 val = readl(spi_imx->base + MXC_CSPIRXDATA);
395
396 while (unaligned--) {
397 if (spi_imx->rx_buf) {
398 *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
399 spi_imx->rx_buf++;
400 }
401 spi_imx->remainder--;
402 }
403 }
404
spi_imx_buf_tx_swap_u32(struct spi_imx_data * spi_imx)405 static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
406 {
407 u32 val = 0;
408 #ifdef __LITTLE_ENDIAN
409 unsigned int bytes_per_word;
410 #endif
411
412 if (spi_imx->tx_buf) {
413 val = *(u32 *)spi_imx->tx_buf;
414 spi_imx->tx_buf += sizeof(u32);
415 }
416
417 spi_imx->count -= sizeof(u32);
418 #ifdef __LITTLE_ENDIAN
419 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
420
421 if (bytes_per_word == 1)
422 swab32s(&val);
423 else if (bytes_per_word == 2)
424 swahw32s(&val);
425 #endif
426 writel(val, spi_imx->base + MXC_CSPITXDATA);
427 }
428
spi_imx_buf_tx_swap(struct spi_imx_data * spi_imx)429 static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
430 {
431 int unaligned;
432 u32 val = 0;
433
434 unaligned = spi_imx->count % 4;
435
436 if (!unaligned) {
437 spi_imx_buf_tx_swap_u32(spi_imx);
438 return;
439 }
440
441 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
442 spi_imx_buf_tx_u16(spi_imx);
443 return;
444 }
445
446 while (unaligned--) {
447 if (spi_imx->tx_buf) {
448 val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
449 spi_imx->tx_buf++;
450 }
451 spi_imx->count--;
452 }
453
454 writel(val, spi_imx->base + MXC_CSPITXDATA);
455 }
456
mx53_ecspi_rx_target(struct spi_imx_data * spi_imx)457 static void mx53_ecspi_rx_target(struct spi_imx_data *spi_imx)
458 {
459 u32 val = readl(spi_imx->base + MXC_CSPIRXDATA);
460 #ifdef __LITTLE_ENDIAN
461 unsigned int bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
462
463 if (bytes_per_word == 1)
464 swab32s(&val);
465 else if (bytes_per_word == 2)
466 swahw32s(&val);
467 #endif
468 if (spi_imx->rx_buf) {
469 int n_bytes = spi_imx->target_burst % sizeof(val);
470
471 if (!n_bytes)
472 n_bytes = sizeof(val);
473
474 memcpy(spi_imx->rx_buf,
475 ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
476
477 spi_imx->rx_buf += n_bytes;
478 spi_imx->target_burst -= n_bytes;
479 }
480
481 spi_imx->remainder -= sizeof(u32);
482 }
483
mx53_ecspi_tx_target(struct spi_imx_data * spi_imx)484 static void mx53_ecspi_tx_target(struct spi_imx_data *spi_imx)
485 {
486 u32 val = 0;
487 int n_bytes = spi_imx->count % sizeof(val);
488 #ifdef __LITTLE_ENDIAN
489 unsigned int bytes_per_word;
490 #endif
491
492 if (!n_bytes)
493 n_bytes = sizeof(val);
494
495 if (spi_imx->tx_buf) {
496 memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
497 spi_imx->tx_buf, n_bytes);
498 spi_imx->tx_buf += n_bytes;
499 }
500
501 spi_imx->count -= n_bytes;
502
503 #ifdef __LITTLE_ENDIAN
504 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
505 if (bytes_per_word == 1)
506 swab32s(&val);
507 else if (bytes_per_word == 2)
508 swahw32s(&val);
509 #endif
510 writel(val, spi_imx->base + MXC_CSPITXDATA);
511 }
512
513 /* MX51 eCSPI */
mx51_ecspi_clkdiv(struct spi_imx_data * spi_imx,unsigned int fspi,unsigned int * fres)514 static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
515 unsigned int fspi, unsigned int *fres)
516 {
517 /*
518 * there are two 4-bit dividers, the pre-divider divides by
519 * $pre, the post-divider by 2^$post
520 */
521 unsigned int pre, post;
522 unsigned int fin = spi_imx->spi_clk;
523
524 fspi = min(fspi, fin);
525
526 post = fls(fin) - fls(fspi);
527 if (fin > fspi << post)
528 post++;
529
530 /* now we have: (fin <= fspi << post) with post being minimal */
531
532 post = max(4U, post) - 4;
533 if (unlikely(post > 0xf)) {
534 dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
535 fspi, fin);
536 return 0xff;
537 }
538
539 pre = DIV_ROUND_UP(fin, fspi << post) - 1;
540
541 dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
542 __func__, fin, fspi, post, pre);
543
544 /* Resulting frequency for the SCLK line. */
545 *fres = (fin / (pre + 1)) >> post;
546
547 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
548 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
549 }
550
mx51_ecspi_intctrl(struct spi_imx_data * spi_imx,int enable)551 static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
552 {
553 unsigned int val = 0;
554
555 if (enable & MXC_INT_TE)
556 val |= MX51_ECSPI_INT_TEEN;
557
558 if (enable & MXC_INT_RR)
559 val |= MX51_ECSPI_INT_RREN;
560
561 if (enable & MXC_INT_RDR)
562 val |= MX51_ECSPI_INT_RDREN;
563
564 writel(val, spi_imx->base + MX51_ECSPI_INT);
565 }
566
mx51_ecspi_trigger(struct spi_imx_data * spi_imx)567 static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
568 {
569 u32 reg;
570
571 if (spi_imx->usedma) {
572 reg = readl(spi_imx->base + MX51_ECSPI_DMA);
573 reg |= MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN;
574 writel(reg, spi_imx->base + MX51_ECSPI_DMA);
575 } else {
576 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
577 reg |= MX51_ECSPI_CTRL_XCH;
578 writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
579 }
580 }
581
mx51_ecspi_disable(struct spi_imx_data * spi_imx)582 static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
583 {
584 u32 ctrl;
585
586 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
587 ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
588 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
589 }
590
mx51_ecspi_channel(const struct spi_device * spi)591 static int mx51_ecspi_channel(const struct spi_device *spi)
592 {
593 if (!spi_get_csgpiod(spi, 0))
594 return spi_get_chipselect(spi, 0);
595 return spi->controller->unused_native_cs;
596 }
597
mx51_ecspi_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)598 static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
599 struct spi_message *msg)
600 {
601 struct spi_device *spi = msg->spi;
602 struct spi_transfer *xfer;
603 u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
604 u32 min_speed_hz = ~0U;
605 u32 testreg, delay;
606 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
607 u32 current_cfg = cfg;
608 int channel = mx51_ecspi_channel(spi);
609
610 /* set Host or Target mode */
611 if (spi_imx->target_mode)
612 ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
613 else
614 ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
615
616 /*
617 * Enable SPI_RDY handling (falling edge/level triggered).
618 */
619 if (spi->mode & SPI_READY)
620 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
621
622 /* set chip select to use */
623 ctrl |= MX51_ECSPI_CTRL_CS(channel);
624
625 /*
626 * The ctrl register must be written first, with the EN bit set other
627 * registers must not be written to.
628 */
629 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
630
631 testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
632 if (spi->mode & SPI_LOOP)
633 testreg |= MX51_ECSPI_TESTREG_LBC;
634 else
635 testreg &= ~MX51_ECSPI_TESTREG_LBC;
636 writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
637
638 /*
639 * eCSPI burst completion by Chip Select signal in Target mode
640 * is not functional for imx53 Soc, config SPI burst completed when
641 * BURST_LENGTH + 1 bits are received
642 */
643 if (spi_imx->target_mode)
644 cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(channel);
645 else
646 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(channel);
647
648 if (spi->mode & SPI_CPOL) {
649 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(channel);
650 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(channel);
651 } else {
652 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(channel);
653 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(channel);
654 }
655
656 if (spi->mode & SPI_MOSI_IDLE_LOW)
657 cfg |= MX51_ECSPI_CONFIG_DATACTL(channel);
658 else
659 cfg &= ~MX51_ECSPI_CONFIG_DATACTL(channel);
660
661 if (spi->mode & SPI_CS_HIGH)
662 cfg |= MX51_ECSPI_CONFIG_SSBPOL(channel);
663 else
664 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(channel);
665
666 if (cfg == current_cfg)
667 return 0;
668
669 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
670
671 /*
672 * Wait until the changes in the configuration register CONFIGREG
673 * propagate into the hardware. It takes exactly one tick of the
674 * SCLK clock, but we will wait two SCLK clock just to be sure. The
675 * effect of the delay it takes for the hardware to apply changes
676 * is noticable if the SCLK clock run very slow. In such a case, if
677 * the polarity of SCLK should be inverted, the GPIO ChipSelect might
678 * be asserted before the SCLK polarity changes, which would disrupt
679 * the SPI communication as the device on the other end would consider
680 * the change of SCLK polarity as a clock tick already.
681 *
682 * Because spi_imx->spi_bus_clk is only set in prepare_message
683 * callback, iterate over all the transfers in spi_message, find the
684 * one with lowest bus frequency, and use that bus frequency for the
685 * delay calculation. In case all transfers have speed_hz == 0, then
686 * min_speed_hz is ~0 and the resulting delay is zero.
687 */
688 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
689 if (!xfer->speed_hz)
690 continue;
691 min_speed_hz = min(xfer->speed_hz, min_speed_hz);
692 }
693
694 delay = (2 * 1000000) / min_speed_hz;
695 if (likely(delay < 10)) /* SCLK is faster than 200 kHz */
696 udelay(delay);
697 else /* SCLK is _very_ slow */
698 usleep_range(delay, delay + 10);
699
700 return 0;
701 }
702
mx51_configure_cpha(struct spi_imx_data * spi_imx,struct spi_device * spi)703 static void mx51_configure_cpha(struct spi_imx_data *spi_imx,
704 struct spi_device *spi)
705 {
706 bool cpha = (spi->mode & SPI_CPHA);
707 bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only;
708 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
709 int channel = mx51_ecspi_channel(spi);
710
711 /* Flip cpha logical value iff flip_cpha */
712 cpha ^= flip_cpha;
713
714 if (cpha)
715 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(channel);
716 else
717 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(channel);
718
719 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
720 }
721
mx51_ecspi_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi,struct spi_transfer * t)722 static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
723 struct spi_device *spi, struct spi_transfer *t)
724 {
725 u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
726 u64 word_delay_sck;
727 u32 clk;
728
729 /* Clear BL field and set the right value */
730 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
731 if (spi_imx->target_mode)
732 ctrl |= (spi_imx->target_burst * 8 - 1)
733 << MX51_ECSPI_CTRL_BL_OFFSET;
734 else {
735 ctrl |= (spi_imx->bits_per_word - 1)
736 << MX51_ECSPI_CTRL_BL_OFFSET;
737 }
738
739 /* set clock speed */
740 ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
741 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
742
743 if (!spi_imx->target_mode) {
744 ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
745 spi_imx->spi_bus_clk = clk;
746 }
747
748 mx51_configure_cpha(spi_imx, spi);
749
750 /*
751 * ERR009165: work in XHC mode instead of SMC as PIO on the chips
752 * before i.mx6ul.
753 */
754 if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
755 ctrl |= MX51_ECSPI_CTRL_SMC;
756 else
757 ctrl &= ~MX51_ECSPI_CTRL_SMC;
758
759 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
760
761 /* calculate word delay in SPI Clock (SCLK) cycles */
762 if (t->word_delay.value == 0) {
763 word_delay_sck = 0;
764 } else if (t->word_delay.unit == SPI_DELAY_UNIT_SCK) {
765 word_delay_sck = t->word_delay.value;
766
767 if (word_delay_sck <= MX51_ECSPI_PERIOD_MIN_DELAY_SCK)
768 word_delay_sck = 0;
769 else if (word_delay_sck <= MX51_ECSPI_PERIOD_MIN_DELAY_SCK + 1)
770 word_delay_sck = 1;
771 else
772 word_delay_sck -= MX51_ECSPI_PERIOD_MIN_DELAY_SCK + 1;
773 } else {
774 int word_delay_ns;
775
776 word_delay_ns = spi_delay_to_ns(&t->word_delay, t);
777 if (word_delay_ns < 0)
778 return word_delay_ns;
779
780 if (word_delay_ns <= mul_u64_u32_div(NSEC_PER_SEC,
781 MX51_ECSPI_PERIOD_MIN_DELAY_SCK,
782 spi_imx->spi_bus_clk)) {
783 word_delay_sck = 0;
784 } else if (word_delay_ns <= mul_u64_u32_div(NSEC_PER_SEC,
785 MX51_ECSPI_PERIOD_MIN_DELAY_SCK + 1,
786 spi_imx->spi_bus_clk)) {
787 word_delay_sck = 1;
788 } else {
789 word_delay_ns -= mul_u64_u32_div(NSEC_PER_SEC,
790 MX51_ECSPI_PERIOD_MIN_DELAY_SCK + 1,
791 spi_imx->spi_bus_clk);
792
793 word_delay_sck = DIV_U64_ROUND_UP((u64)word_delay_ns * spi_imx->spi_bus_clk,
794 NSEC_PER_SEC);
795 }
796 }
797
798 if (!FIELD_FIT(MX51_ECSPI_PERIOD_MASK, word_delay_sck))
799 return -EINVAL;
800
801 writel(FIELD_PREP(MX51_ECSPI_PERIOD_MASK, word_delay_sck),
802 spi_imx->base + MX51_ECSPI_PERIOD);
803
804 return 0;
805 }
806
mx51_setup_wml(struct spi_imx_data * spi_imx)807 static void mx51_setup_wml(struct spi_imx_data *spi_imx)
808 {
809 u32 tx_wml = 0;
810
811 if (spi_imx->devtype_data->tx_glitch_fixed)
812 tx_wml = spi_imx->wml;
813 /*
814 * Configure the DMA register: setup the watermark
815 * and enable DMA request.
816 */
817 writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
818 MX51_ECSPI_DMA_TX_WML(tx_wml) |
819 MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
820 MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
821 }
822
mx51_ecspi_rx_available(struct spi_imx_data * spi_imx)823 static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
824 {
825 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
826 }
827
mx51_ecspi_reset(struct spi_imx_data * spi_imx)828 static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
829 {
830 /* drain receive buffer */
831 while (mx51_ecspi_rx_available(spi_imx))
832 readl(spi_imx->base + MXC_CSPIRXDATA);
833 }
834
835 #define MX31_INTREG_TEEN (1 << 0)
836 #define MX31_INTREG_RREN (1 << 3)
837
838 #define MX31_CSPICTRL_ENABLE (1 << 0)
839 #define MX31_CSPICTRL_HOST (1 << 1)
840 #define MX31_CSPICTRL_XCH (1 << 2)
841 #define MX31_CSPICTRL_SMC (1 << 3)
842 #define MX31_CSPICTRL_POL (1 << 4)
843 #define MX31_CSPICTRL_PHA (1 << 5)
844 #define MX31_CSPICTRL_SSCTL (1 << 6)
845 #define MX31_CSPICTRL_SSPOL (1 << 7)
846 #define MX31_CSPICTRL_BC_SHIFT 8
847 #define MX35_CSPICTRL_BL_SHIFT 20
848 #define MX31_CSPICTRL_CS_SHIFT 24
849 #define MX35_CSPICTRL_CS_SHIFT 12
850 #define MX31_CSPICTRL_DR_SHIFT 16
851
852 #define MX31_CSPI_DMAREG 0x10
853 #define MX31_DMAREG_RH_DEN (1<<4)
854 #define MX31_DMAREG_TH_DEN (1<<1)
855
856 #define MX31_CSPISTATUS 0x14
857 #define MX31_STATUS_RR (1 << 3)
858
859 #define MX31_CSPI_TESTREG 0x1C
860 #define MX31_TEST_LBC (1 << 14)
861
862 /* These functions also work for the i.MX35, but be aware that
863 * the i.MX35 has a slightly different register layout for bits
864 * we do not use here.
865 */
mx31_intctrl(struct spi_imx_data * spi_imx,int enable)866 static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
867 {
868 unsigned int val = 0;
869
870 if (enable & MXC_INT_TE)
871 val |= MX31_INTREG_TEEN;
872 if (enable & MXC_INT_RR)
873 val |= MX31_INTREG_RREN;
874
875 writel(val, spi_imx->base + MXC_CSPIINT);
876 }
877
mx31_trigger(struct spi_imx_data * spi_imx)878 static void mx31_trigger(struct spi_imx_data *spi_imx)
879 {
880 unsigned int reg;
881
882 reg = readl(spi_imx->base + MXC_CSPICTRL);
883 reg |= MX31_CSPICTRL_XCH;
884 writel(reg, spi_imx->base + MXC_CSPICTRL);
885 }
886
mx31_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)887 static int mx31_prepare_message(struct spi_imx_data *spi_imx,
888 struct spi_message *msg)
889 {
890 return 0;
891 }
892
mx31_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi,struct spi_transfer * t)893 static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
894 struct spi_device *spi, struct spi_transfer *t)
895 {
896 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_HOST;
897 unsigned int clk;
898
899 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
900 MX31_CSPICTRL_DR_SHIFT;
901 spi_imx->spi_bus_clk = clk;
902
903 if (is_imx35_cspi(spi_imx)) {
904 reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
905 reg |= MX31_CSPICTRL_SSCTL;
906 } else {
907 reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
908 }
909
910 if (spi->mode & SPI_CPHA)
911 reg |= MX31_CSPICTRL_PHA;
912 if (spi->mode & SPI_CPOL)
913 reg |= MX31_CSPICTRL_POL;
914 if (spi->mode & SPI_CS_HIGH)
915 reg |= MX31_CSPICTRL_SSPOL;
916 if (!spi_get_csgpiod(spi, 0))
917 reg |= (spi_get_chipselect(spi, 0)) <<
918 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
919 MX31_CSPICTRL_CS_SHIFT);
920
921 if (spi_imx->usedma)
922 reg |= MX31_CSPICTRL_SMC;
923
924 writel(reg, spi_imx->base + MXC_CSPICTRL);
925
926 reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
927 if (spi->mode & SPI_LOOP)
928 reg |= MX31_TEST_LBC;
929 else
930 reg &= ~MX31_TEST_LBC;
931 writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
932
933 if (spi_imx->usedma) {
934 /*
935 * configure DMA requests when RXFIFO is half full and
936 * when TXFIFO is half empty
937 */
938 writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
939 spi_imx->base + MX31_CSPI_DMAREG);
940 }
941
942 return 0;
943 }
944
mx31_rx_available(struct spi_imx_data * spi_imx)945 static int mx31_rx_available(struct spi_imx_data *spi_imx)
946 {
947 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
948 }
949
mx31_reset(struct spi_imx_data * spi_imx)950 static void mx31_reset(struct spi_imx_data *spi_imx)
951 {
952 /* drain receive buffer */
953 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
954 readl(spi_imx->base + MXC_CSPIRXDATA);
955 }
956
957 #define MX21_INTREG_RR (1 << 4)
958 #define MX21_INTREG_TEEN (1 << 9)
959 #define MX21_INTREG_RREN (1 << 13)
960
961 #define MX21_CSPICTRL_POL (1 << 5)
962 #define MX21_CSPICTRL_PHA (1 << 6)
963 #define MX21_CSPICTRL_SSPOL (1 << 8)
964 #define MX21_CSPICTRL_XCH (1 << 9)
965 #define MX21_CSPICTRL_ENABLE (1 << 10)
966 #define MX21_CSPICTRL_HOST (1 << 11)
967 #define MX21_CSPICTRL_DR_SHIFT 14
968 #define MX21_CSPICTRL_CS_SHIFT 19
969
mx21_intctrl(struct spi_imx_data * spi_imx,int enable)970 static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
971 {
972 unsigned int val = 0;
973
974 if (enable & MXC_INT_TE)
975 val |= MX21_INTREG_TEEN;
976 if (enable & MXC_INT_RR)
977 val |= MX21_INTREG_RREN;
978
979 writel(val, spi_imx->base + MXC_CSPIINT);
980 }
981
mx21_trigger(struct spi_imx_data * spi_imx)982 static void mx21_trigger(struct spi_imx_data *spi_imx)
983 {
984 unsigned int reg;
985
986 reg = readl(spi_imx->base + MXC_CSPICTRL);
987 reg |= MX21_CSPICTRL_XCH;
988 writel(reg, spi_imx->base + MXC_CSPICTRL);
989 }
990
mx21_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)991 static int mx21_prepare_message(struct spi_imx_data *spi_imx,
992 struct spi_message *msg)
993 {
994 return 0;
995 }
996
mx21_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi,struct spi_transfer * t)997 static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
998 struct spi_device *spi, struct spi_transfer *t)
999 {
1000 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_HOST;
1001 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
1002 unsigned int clk;
1003
1004 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
1005 << MX21_CSPICTRL_DR_SHIFT;
1006 spi_imx->spi_bus_clk = clk;
1007
1008 reg |= spi_imx->bits_per_word - 1;
1009
1010 if (spi->mode & SPI_CPHA)
1011 reg |= MX21_CSPICTRL_PHA;
1012 if (spi->mode & SPI_CPOL)
1013 reg |= MX21_CSPICTRL_POL;
1014 if (spi->mode & SPI_CS_HIGH)
1015 reg |= MX21_CSPICTRL_SSPOL;
1016 if (!spi_get_csgpiod(spi, 0))
1017 reg |= spi_get_chipselect(spi, 0) << MX21_CSPICTRL_CS_SHIFT;
1018
1019 writel(reg, spi_imx->base + MXC_CSPICTRL);
1020
1021 return 0;
1022 }
1023
mx21_rx_available(struct spi_imx_data * spi_imx)1024 static int mx21_rx_available(struct spi_imx_data *spi_imx)
1025 {
1026 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
1027 }
1028
mx21_reset(struct spi_imx_data * spi_imx)1029 static void mx21_reset(struct spi_imx_data *spi_imx)
1030 {
1031 writel(1, spi_imx->base + MXC_RESET);
1032 }
1033
1034 #define MX1_INTREG_RR (1 << 3)
1035 #define MX1_INTREG_TEEN (1 << 8)
1036 #define MX1_INTREG_RREN (1 << 11)
1037
1038 #define MX1_CSPICTRL_POL (1 << 4)
1039 #define MX1_CSPICTRL_PHA (1 << 5)
1040 #define MX1_CSPICTRL_XCH (1 << 8)
1041 #define MX1_CSPICTRL_ENABLE (1 << 9)
1042 #define MX1_CSPICTRL_HOST (1 << 10)
1043 #define MX1_CSPICTRL_DR_SHIFT 13
1044
mx1_intctrl(struct spi_imx_data * spi_imx,int enable)1045 static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
1046 {
1047 unsigned int val = 0;
1048
1049 if (enable & MXC_INT_TE)
1050 val |= MX1_INTREG_TEEN;
1051 if (enable & MXC_INT_RR)
1052 val |= MX1_INTREG_RREN;
1053
1054 writel(val, spi_imx->base + MXC_CSPIINT);
1055 }
1056
mx1_trigger(struct spi_imx_data * spi_imx)1057 static void mx1_trigger(struct spi_imx_data *spi_imx)
1058 {
1059 unsigned int reg;
1060
1061 reg = readl(spi_imx->base + MXC_CSPICTRL);
1062 reg |= MX1_CSPICTRL_XCH;
1063 writel(reg, spi_imx->base + MXC_CSPICTRL);
1064 }
1065
mx1_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)1066 static int mx1_prepare_message(struct spi_imx_data *spi_imx,
1067 struct spi_message *msg)
1068 {
1069 return 0;
1070 }
1071
mx1_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi,struct spi_transfer * t)1072 static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
1073 struct spi_device *spi, struct spi_transfer *t)
1074 {
1075 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_HOST;
1076 unsigned int clk;
1077
1078 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
1079 MX1_CSPICTRL_DR_SHIFT;
1080 spi_imx->spi_bus_clk = clk;
1081
1082 reg |= spi_imx->bits_per_word - 1;
1083
1084 if (spi->mode & SPI_CPHA)
1085 reg |= MX1_CSPICTRL_PHA;
1086 if (spi->mode & SPI_CPOL)
1087 reg |= MX1_CSPICTRL_POL;
1088
1089 writel(reg, spi_imx->base + MXC_CSPICTRL);
1090
1091 return 0;
1092 }
1093
mx1_rx_available(struct spi_imx_data * spi_imx)1094 static int mx1_rx_available(struct spi_imx_data *spi_imx)
1095 {
1096 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
1097 }
1098
mx1_reset(struct spi_imx_data * spi_imx)1099 static void mx1_reset(struct spi_imx_data *spi_imx)
1100 {
1101 writel(1, spi_imx->base + MXC_RESET);
1102 }
1103
1104 static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
1105 .intctrl = mx1_intctrl,
1106 .prepare_message = mx1_prepare_message,
1107 .prepare_transfer = mx1_prepare_transfer,
1108 .trigger = mx1_trigger,
1109 .rx_available = mx1_rx_available,
1110 .reset = mx1_reset,
1111 .fifo_size = 8,
1112 .has_dmamode = false,
1113 .dynamic_burst = false,
1114 .has_targetmode = false,
1115 .devtype = IMX1_CSPI,
1116 };
1117
1118 static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
1119 .intctrl = mx21_intctrl,
1120 .prepare_message = mx21_prepare_message,
1121 .prepare_transfer = mx21_prepare_transfer,
1122 .trigger = mx21_trigger,
1123 .rx_available = mx21_rx_available,
1124 .reset = mx21_reset,
1125 .fifo_size = 8,
1126 .has_dmamode = false,
1127 .dynamic_burst = false,
1128 .has_targetmode = false,
1129 .devtype = IMX21_CSPI,
1130 };
1131
1132 static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
1133 /* i.mx27 cspi shares the functions with i.mx21 one */
1134 .intctrl = mx21_intctrl,
1135 .prepare_message = mx21_prepare_message,
1136 .prepare_transfer = mx21_prepare_transfer,
1137 .trigger = mx21_trigger,
1138 .rx_available = mx21_rx_available,
1139 .reset = mx21_reset,
1140 .fifo_size = 8,
1141 .has_dmamode = false,
1142 .dynamic_burst = false,
1143 .has_targetmode = false,
1144 .devtype = IMX27_CSPI,
1145 };
1146
1147 static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
1148 .intctrl = mx31_intctrl,
1149 .prepare_message = mx31_prepare_message,
1150 .prepare_transfer = mx31_prepare_transfer,
1151 .trigger = mx31_trigger,
1152 .rx_available = mx31_rx_available,
1153 .reset = mx31_reset,
1154 .fifo_size = 8,
1155 .has_dmamode = false,
1156 .dynamic_burst = false,
1157 .has_targetmode = false,
1158 .devtype = IMX31_CSPI,
1159 };
1160
1161 static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
1162 /* i.mx35 and later cspi shares the functions with i.mx31 one */
1163 .intctrl = mx31_intctrl,
1164 .prepare_message = mx31_prepare_message,
1165 .prepare_transfer = mx31_prepare_transfer,
1166 .trigger = mx31_trigger,
1167 .rx_available = mx31_rx_available,
1168 .reset = mx31_reset,
1169 .fifo_size = 8,
1170 .has_dmamode = false,
1171 .dynamic_burst = false,
1172 .has_targetmode = false,
1173 .devtype = IMX35_CSPI,
1174 };
1175
1176 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
1177 .intctrl = mx51_ecspi_intctrl,
1178 .prepare_message = mx51_ecspi_prepare_message,
1179 .prepare_transfer = mx51_ecspi_prepare_transfer,
1180 .trigger = mx51_ecspi_trigger,
1181 .rx_available = mx51_ecspi_rx_available,
1182 .reset = mx51_ecspi_reset,
1183 .setup_wml = mx51_setup_wml,
1184 .fifo_size = 64,
1185 .has_dmamode = true,
1186 .dynamic_burst = true,
1187 .has_targetmode = true,
1188 .disable = mx51_ecspi_disable,
1189 .devtype = IMX51_ECSPI,
1190 };
1191
1192 static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
1193 .intctrl = mx51_ecspi_intctrl,
1194 .prepare_message = mx51_ecspi_prepare_message,
1195 .prepare_transfer = mx51_ecspi_prepare_transfer,
1196 .trigger = mx51_ecspi_trigger,
1197 .rx_available = mx51_ecspi_rx_available,
1198 .reset = mx51_ecspi_reset,
1199 .fifo_size = 64,
1200 .has_dmamode = true,
1201 .has_targetmode = true,
1202 .disable = mx51_ecspi_disable,
1203 .devtype = IMX53_ECSPI,
1204 };
1205
1206 static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
1207 .intctrl = mx51_ecspi_intctrl,
1208 .prepare_message = mx51_ecspi_prepare_message,
1209 .prepare_transfer = mx51_ecspi_prepare_transfer,
1210 .trigger = mx51_ecspi_trigger,
1211 .rx_available = mx51_ecspi_rx_available,
1212 .reset = mx51_ecspi_reset,
1213 .setup_wml = mx51_setup_wml,
1214 .fifo_size = 64,
1215 .has_dmamode = true,
1216 .dynamic_burst = true,
1217 .has_targetmode = true,
1218 .tx_glitch_fixed = true,
1219 .disable = mx51_ecspi_disable,
1220 .devtype = IMX51_ECSPI,
1221 };
1222
1223 static const struct of_device_id spi_imx_dt_ids[] = {
1224 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
1225 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
1226 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
1227 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
1228 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
1229 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
1230 { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
1231 { .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
1232 { /* sentinel */ }
1233 };
1234 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
1235
spi_imx_set_burst_len(struct spi_imx_data * spi_imx,int n_bits)1236 static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
1237 {
1238 u32 ctrl;
1239
1240 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
1241 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
1242 ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
1243 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
1244 }
1245
spi_imx_push(struct spi_imx_data * spi_imx)1246 static void spi_imx_push(struct spi_imx_data *spi_imx)
1247 {
1248 unsigned int burst_len;
1249
1250 /*
1251 * Reload the FIFO when the remaining bytes to be transferred in the
1252 * current burst is 0. This only applies when bits_per_word is a
1253 * multiple of 8.
1254 */
1255 if (!spi_imx->remainder) {
1256 if (spi_imx->dynamic_burst) {
1257
1258 /* We need to deal unaligned data first */
1259 burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
1260
1261 if (!burst_len)
1262 burst_len = MX51_ECSPI_CTRL_MAX_BURST;
1263
1264 spi_imx_set_burst_len(spi_imx, burst_len * 8);
1265
1266 spi_imx->remainder = burst_len;
1267 } else {
1268 spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
1269 }
1270 }
1271
1272 while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
1273 if (!spi_imx->count)
1274 break;
1275 if (spi_imx->dynamic_burst &&
1276 spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
1277 break;
1278 spi_imx->tx(spi_imx);
1279 spi_imx->txfifo++;
1280 }
1281
1282 if (!spi_imx->target_mode)
1283 spi_imx->devtype_data->trigger(spi_imx);
1284 }
1285
spi_imx_isr(int irq,void * dev_id)1286 static irqreturn_t spi_imx_isr(int irq, void *dev_id)
1287 {
1288 struct spi_imx_data *spi_imx = dev_id;
1289
1290 while (spi_imx->txfifo &&
1291 spi_imx->devtype_data->rx_available(spi_imx)) {
1292 spi_imx->rx(spi_imx);
1293 spi_imx->txfifo--;
1294 }
1295
1296 if (spi_imx->count) {
1297 spi_imx_push(spi_imx);
1298 return IRQ_HANDLED;
1299 }
1300
1301 if (spi_imx->txfifo) {
1302 /* No data left to push, but still waiting for rx data,
1303 * enable receive data available interrupt.
1304 */
1305 spi_imx->devtype_data->intctrl(
1306 spi_imx, MXC_INT_RR);
1307 return IRQ_HANDLED;
1308 }
1309
1310 spi_imx->devtype_data->intctrl(spi_imx, 0);
1311 complete(&spi_imx->xfer_done);
1312
1313 return IRQ_HANDLED;
1314 }
1315
spi_imx_setupxfer(struct spi_device * spi,struct spi_transfer * t)1316 static int spi_imx_setupxfer(struct spi_device *spi,
1317 struct spi_transfer *t)
1318 {
1319 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
1320
1321 if (!t)
1322 return 0;
1323
1324 if (!spi_imx->target_mode) {
1325 if (!t->speed_hz) {
1326 if (!spi->max_speed_hz) {
1327 dev_err(&spi->dev, "no speed_hz provided!\n");
1328 return -EINVAL;
1329 }
1330 dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
1331 spi_imx->spi_bus_clk = spi->max_speed_hz;
1332 } else {
1333 spi_imx->spi_bus_clk = t->speed_hz;
1334 }
1335 }
1336
1337 spi_imx->bits_per_word = t->bits_per_word;
1338 spi_imx->count = t->len;
1339
1340 /*
1341 * Initialize the functions for transfer. To transfer non byte-aligned
1342 * words, we have to use multiple word-size bursts. To insert word
1343 * delay, the burst size has to equal the word size. We can't use
1344 * dynamic_burst in these cases.
1345 */
1346 if (spi_imx->devtype_data->dynamic_burst && !spi_imx->target_mode &&
1347 !(spi->mode & SPI_CS_WORD) &&
1348 !(t->word_delay.value) &&
1349 (spi_imx->bits_per_word == 8 ||
1350 spi_imx->bits_per_word == 16 ||
1351 spi_imx->bits_per_word == 32)) {
1352
1353 spi_imx->rx = spi_imx_buf_rx_swap;
1354 spi_imx->tx = spi_imx_buf_tx_swap;
1355 spi_imx->dynamic_burst = 1;
1356
1357 } else {
1358 if (spi_imx->bits_per_word <= 8) {
1359 spi_imx->rx = spi_imx_buf_rx_u8;
1360 spi_imx->tx = spi_imx_buf_tx_u8;
1361 } else if (spi_imx->bits_per_word <= 16) {
1362 spi_imx->rx = spi_imx_buf_rx_u16;
1363 spi_imx->tx = spi_imx_buf_tx_u16;
1364 } else {
1365 spi_imx->rx = spi_imx_buf_rx_u32;
1366 spi_imx->tx = spi_imx_buf_tx_u32;
1367 }
1368 spi_imx->dynamic_burst = 0;
1369 }
1370
1371 if (spi_imx_can_dma(spi_imx->controller, spi, t))
1372 spi_imx->usedma = true;
1373 else
1374 spi_imx->usedma = false;
1375
1376 spi_imx->rx_only = ((t->tx_buf == NULL)
1377 || (t->tx_buf == spi->controller->dummy_tx));
1378
1379 if (spi_imx->target_mode) {
1380 spi_imx->rx = mx53_ecspi_rx_target;
1381 spi_imx->tx = mx53_ecspi_tx_target;
1382 spi_imx->target_burst = t->len;
1383 }
1384
1385 return spi_imx->devtype_data->prepare_transfer(spi_imx, spi, t);
1386 }
1387
spi_imx_sdma_exit(struct spi_imx_data * spi_imx)1388 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
1389 {
1390 struct spi_controller *controller = spi_imx->controller;
1391
1392 if (controller->dma_rx) {
1393 dma_release_channel(controller->dma_rx);
1394 controller->dma_rx = NULL;
1395 }
1396
1397 if (controller->dma_tx) {
1398 dma_release_channel(controller->dma_tx);
1399 controller->dma_tx = NULL;
1400 }
1401 }
1402
spi_imx_sdma_init(struct device * dev,struct spi_imx_data * spi_imx,struct spi_controller * controller)1403 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
1404 struct spi_controller *controller)
1405 {
1406 int ret;
1407
1408 spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
1409
1410 /* Prepare for TX DMA: */
1411 controller->dma_tx = dma_request_chan(dev, "tx");
1412 if (IS_ERR(controller->dma_tx)) {
1413 ret = PTR_ERR(controller->dma_tx);
1414 dev_err_probe(dev, ret, "can't get the TX DMA channel!\n");
1415 controller->dma_tx = NULL;
1416 goto err;
1417 }
1418
1419 /* Prepare for RX : */
1420 controller->dma_rx = dma_request_chan(dev, "rx");
1421 if (IS_ERR(controller->dma_rx)) {
1422 ret = PTR_ERR(controller->dma_rx);
1423 dev_err_probe(dev, ret, "can't get the RX DMA channel!\n");
1424 controller->dma_rx = NULL;
1425 goto err;
1426 }
1427
1428 init_completion(&spi_imx->dma_rx_completion);
1429 init_completion(&spi_imx->dma_tx_completion);
1430 spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX |
1431 SPI_CONTROLLER_MUST_TX;
1432
1433 return 0;
1434 err:
1435 spi_imx_sdma_exit(spi_imx);
1436 return ret;
1437 }
1438
spi_imx_dma_rx_callback(void * cookie)1439 static void spi_imx_dma_rx_callback(void *cookie)
1440 {
1441 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1442
1443 complete(&spi_imx->dma_rx_completion);
1444 }
1445
spi_imx_dma_tx_callback(void * cookie)1446 static void spi_imx_dma_tx_callback(void *cookie)
1447 {
1448 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1449
1450 complete(&spi_imx->dma_tx_completion);
1451 }
1452
spi_imx_calculate_timeout(struct spi_imx_data * spi_imx,int size)1453 static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
1454 {
1455 unsigned long timeout = 0;
1456
1457 /* Time with actual data transfer and CS change delay related to HW */
1458 timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
1459
1460 /* Add extra second for scheduler related activities */
1461 timeout += 1;
1462
1463 /* Double calculated timeout */
1464 return secs_to_jiffies(2 * timeout);
1465 }
1466
spi_imx_dma_unmap(struct spi_imx_data * spi_imx,struct dma_data_package * dma_data)1467 static void spi_imx_dma_unmap(struct spi_imx_data *spi_imx,
1468 struct dma_data_package *dma_data)
1469 {
1470 struct device *tx_dev = spi_imx->controller->dma_tx->device->dev;
1471 struct device *rx_dev = spi_imx->controller->dma_rx->device->dev;
1472
1473 dma_unmap_single(tx_dev, dma_data->dma_tx_addr,
1474 DMA_CACHE_ALIGNED_LEN(dma_data->dma_len),
1475 DMA_TO_DEVICE);
1476 dma_unmap_single(rx_dev, dma_data->dma_rx_addr,
1477 DMA_CACHE_ALIGNED_LEN(dma_data->dma_len),
1478 DMA_FROM_DEVICE);
1479 }
1480
spi_imx_dma_rx_data_handle(struct spi_imx_data * spi_imx,struct dma_data_package * dma_data,void * rx_buf,bool word_delay)1481 static void spi_imx_dma_rx_data_handle(struct spi_imx_data *spi_imx,
1482 struct dma_data_package *dma_data, void *rx_buf,
1483 bool word_delay)
1484 {
1485 void *copy_ptr;
1486 int unaligned;
1487
1488 /*
1489 * On little-endian CPUs, adjust byte order:
1490 * - Swap bytes when bpw = 8
1491 * - Swap half-words when bpw = 16
1492 * This ensures correct data ordering for DMA transfers.
1493 */
1494 #ifdef __LITTLE_ENDIAN
1495 if (!word_delay) {
1496 unsigned int bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
1497 u32 *temp = dma_data->dma_rx_buf;
1498
1499 for (int i = 0; i < DIV_ROUND_UP(dma_data->dma_len, sizeof(*temp)); i++) {
1500 if (bytes_per_word == 1)
1501 swab32s(temp + i);
1502 else if (bytes_per_word == 2)
1503 swahw32s(temp + i);
1504 }
1505 }
1506 #endif
1507
1508 /*
1509 * When dynamic burst enabled, DMA RX always receives 32-bit words from RXFIFO with
1510 * buswidth = 4, but when data_len is not 4-bytes alignment, the RM shows when
1511 * burst length = 32*n + m bits, a SPI burst contains the m LSB in first word and all
1512 * 32 bits in other n words. So if garbage bytes in the first word, trim first word then
1513 * copy the actual data to rx_buf.
1514 */
1515 if (dma_data->data_len % BYTES_PER_32BITS_WORD && !word_delay) {
1516 unaligned = dma_data->data_len % BYTES_PER_32BITS_WORD;
1517 copy_ptr = (u8 *)dma_data->dma_rx_buf + BYTES_PER_32BITS_WORD - unaligned;
1518 } else {
1519 copy_ptr = dma_data->dma_rx_buf;
1520 }
1521
1522 memcpy(rx_buf, copy_ptr, dma_data->data_len);
1523 }
1524
spi_imx_dma_map(struct spi_imx_data * spi_imx,struct dma_data_package * dma_data)1525 static int spi_imx_dma_map(struct spi_imx_data *spi_imx,
1526 struct dma_data_package *dma_data)
1527 {
1528 struct spi_controller *controller = spi_imx->controller;
1529 struct device *tx_dev = controller->dma_tx->device->dev;
1530 struct device *rx_dev = controller->dma_rx->device->dev;
1531 int ret;
1532
1533 dma_data->dma_tx_addr = dma_map_single(tx_dev, dma_data->dma_tx_buf,
1534 DMA_CACHE_ALIGNED_LEN(dma_data->dma_len),
1535 DMA_TO_DEVICE);
1536 ret = dma_mapping_error(tx_dev, dma_data->dma_tx_addr);
1537 if (ret < 0) {
1538 dev_err(spi_imx->dev, "DMA TX map failed %d\n", ret);
1539 return ret;
1540 }
1541
1542 dma_data->dma_rx_addr = dma_map_single(rx_dev, dma_data->dma_rx_buf,
1543 DMA_CACHE_ALIGNED_LEN(dma_data->dma_len),
1544 DMA_FROM_DEVICE);
1545 ret = dma_mapping_error(rx_dev, dma_data->dma_rx_addr);
1546 if (ret < 0) {
1547 dev_err(spi_imx->dev, "DMA RX map failed %d\n", ret);
1548 dma_unmap_single(tx_dev, dma_data->dma_tx_addr,
1549 DMA_CACHE_ALIGNED_LEN(dma_data->dma_len),
1550 DMA_TO_DEVICE);
1551 return ret;
1552 }
1553
1554 return 0;
1555 }
1556
spi_imx_dma_tx_data_handle(struct spi_imx_data * spi_imx,struct dma_data_package * dma_data,const void * tx_buf,bool word_delay)1557 static int spi_imx_dma_tx_data_handle(struct spi_imx_data *spi_imx,
1558 struct dma_data_package *dma_data,
1559 const void *tx_buf,
1560 bool word_delay)
1561 {
1562 void *copy_ptr;
1563 int unaligned;
1564
1565 if (word_delay) {
1566 dma_data->dma_len = dma_data->data_len;
1567 } else {
1568 /*
1569 * As per the reference manual, when burst length = 32*n + m bits, ECSPI
1570 * sends m LSB bits in the first word, followed by n full 32-bit words.
1571 * Since actual data may not be 4-byte aligned, allocate DMA TX/RX buffers
1572 * to ensure alignment. For TX, DMA pushes 4-byte aligned words to TXFIFO,
1573 * while ECSPI uses BURST_LENGTH settings to maintain correct bit count.
1574 * For RX, DMA always receives 32-bit words from RXFIFO, when data len is
1575 * not 4-byte aligned, trim the first word to drop garbage bytes, then group
1576 * all transfer DMA bounse buffer and copy all valid data to rx_buf.
1577 */
1578 dma_data->dma_len = ALIGN(dma_data->data_len, BYTES_PER_32BITS_WORD);
1579 }
1580
1581 dma_data->dma_tx_buf = kzalloc(dma_data->dma_len, GFP_KERNEL);
1582 if (!dma_data->dma_tx_buf)
1583 return -ENOMEM;
1584
1585 dma_data->dma_rx_buf = kzalloc(dma_data->dma_len, GFP_KERNEL);
1586 if (!dma_data->dma_rx_buf) {
1587 kfree(dma_data->dma_tx_buf);
1588 return -ENOMEM;
1589 }
1590
1591 if (dma_data->data_len % BYTES_PER_32BITS_WORD && !word_delay) {
1592 unaligned = dma_data->data_len % BYTES_PER_32BITS_WORD;
1593 copy_ptr = (u8 *)dma_data->dma_tx_buf + BYTES_PER_32BITS_WORD - unaligned;
1594 } else {
1595 copy_ptr = dma_data->dma_tx_buf;
1596 }
1597
1598 memcpy(copy_ptr, tx_buf, dma_data->data_len);
1599
1600 /*
1601 * When word_delay is enabled, DMA transfers an entire word in one minor loop.
1602 * In this case, no data requires additional handling.
1603 */
1604 if (word_delay)
1605 return 0;
1606
1607 #ifdef __LITTLE_ENDIAN
1608 /*
1609 * On little-endian CPUs, adjust byte order:
1610 * - Swap bytes when bpw = 8
1611 * - Swap half-words when bpw = 16
1612 * This ensures correct data ordering for DMA transfers.
1613 */
1614 unsigned int bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
1615 u32 *temp = dma_data->dma_tx_buf;
1616
1617 for (int i = 0; i < DIV_ROUND_UP(dma_data->dma_len, sizeof(*temp)); i++) {
1618 if (bytes_per_word == 1)
1619 swab32s(temp + i);
1620 else if (bytes_per_word == 2)
1621 swahw32s(temp + i);
1622 }
1623 #endif
1624
1625 return 0;
1626 }
1627
spi_imx_dma_data_prepare(struct spi_imx_data * spi_imx,struct spi_transfer * transfer,bool word_delay)1628 static int spi_imx_dma_data_prepare(struct spi_imx_data *spi_imx,
1629 struct spi_transfer *transfer,
1630 bool word_delay)
1631 {
1632 u32 pre_bl, tail_bl;
1633 u32 ctrl;
1634 int ret;
1635
1636 /*
1637 * ECSPI supports a maximum burst of 512 bytes. When xfer->len exceeds 512
1638 * and is not a multiple of 512, a tail transfer is required. BURST_LEGTH
1639 * is used for SPI HW to maintain correct bit count. BURST_LENGTH should
1640 * update with data length. After DMA request submit, SPI can not update the
1641 * BURST_LENGTH, in this case, we must split two package, update the register
1642 * then setup second DMA transfer.
1643 */
1644 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
1645 if (word_delay) {
1646 /*
1647 * When SPI IMX need to support word delay, according to "Sample Period Control
1648 * Register" shows, The Sample Period Control Register (ECSPI_PERIODREG)
1649 * provides software a way to insert delays (wait states) between consecutive
1650 * SPI transfers. As a result, ECSPI can only transfer one word per frame, and
1651 * the delay occurs between frames.
1652 */
1653 spi_imx->dma_package_num = 1;
1654 pre_bl = spi_imx->bits_per_word - 1;
1655 } else if (transfer->len <= MX51_ECSPI_CTRL_MAX_BURST) {
1656 spi_imx->dma_package_num = 1;
1657 pre_bl = transfer->len * BITS_PER_BYTE - 1;
1658 } else if (!(transfer->len % MX51_ECSPI_CTRL_MAX_BURST)) {
1659 spi_imx->dma_package_num = 1;
1660 pre_bl = MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1;
1661 } else {
1662 spi_imx->dma_package_num = 2;
1663 pre_bl = MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1;
1664 tail_bl = (transfer->len % MX51_ECSPI_CTRL_MAX_BURST) * BITS_PER_BYTE - 1;
1665 }
1666
1667 spi_imx->dma_data = kmalloc_objs(struct dma_data_package,
1668 spi_imx->dma_package_num,
1669 GFP_KERNEL | __GFP_ZERO);
1670 if (!spi_imx->dma_data) {
1671 dev_err(spi_imx->dev, "Failed to allocate DMA package buffer!\n");
1672 return -ENOMEM;
1673 }
1674
1675 if (spi_imx->dma_package_num == 1) {
1676 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
1677 ctrl |= pre_bl << MX51_ECSPI_CTRL_BL_OFFSET;
1678 spi_imx->dma_data[0].cmd_word = ctrl;
1679 spi_imx->dma_data[0].data_len = transfer->len;
1680 ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[0], transfer->tx_buf,
1681 word_delay);
1682 if (ret) {
1683 kfree(spi_imx->dma_data);
1684 return ret;
1685 }
1686 } else {
1687 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
1688 ctrl |= pre_bl << MX51_ECSPI_CTRL_BL_OFFSET;
1689 spi_imx->dma_data[0].cmd_word = ctrl;
1690 spi_imx->dma_data[0].data_len = round_down(transfer->len,
1691 MX51_ECSPI_CTRL_MAX_BURST);
1692 ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[0], transfer->tx_buf,
1693 false);
1694 if (ret) {
1695 kfree(spi_imx->dma_data);
1696 return ret;
1697 }
1698
1699 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
1700 ctrl |= tail_bl << MX51_ECSPI_CTRL_BL_OFFSET;
1701 spi_imx->dma_data[1].cmd_word = ctrl;
1702 spi_imx->dma_data[1].data_len = transfer->len % MX51_ECSPI_CTRL_MAX_BURST;
1703 ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[1],
1704 transfer->tx_buf + spi_imx->dma_data[0].data_len,
1705 false);
1706 if (ret) {
1707 kfree(spi_imx->dma_data[0].dma_tx_buf);
1708 kfree(spi_imx->dma_data[0].dma_rx_buf);
1709 kfree(spi_imx->dma_data);
1710 return ret;
1711 }
1712 }
1713
1714 return 0;
1715 }
1716
spi_imx_dma_submit(struct spi_imx_data * spi_imx,struct dma_data_package * dma_data,struct spi_transfer * transfer)1717 static int spi_imx_dma_submit(struct spi_imx_data *spi_imx,
1718 struct dma_data_package *dma_data,
1719 struct spi_transfer *transfer)
1720 {
1721 struct spi_controller *controller = spi_imx->controller;
1722 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
1723 unsigned long transfer_timeout;
1724 unsigned long time_left;
1725 dma_cookie_t cookie;
1726
1727 /*
1728 * The TX DMA setup starts the transfer, so make sure RX is configured
1729 * before TX.
1730 */
1731 desc_rx = dmaengine_prep_slave_single(controller->dma_rx, dma_data->dma_rx_addr,
1732 dma_data->dma_len, DMA_DEV_TO_MEM,
1733 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1734 if (!desc_rx) {
1735 transfer->error |= SPI_TRANS_FAIL_NO_START;
1736 return -EINVAL;
1737 }
1738
1739 desc_rx->callback = spi_imx_dma_rx_callback;
1740 desc_rx->callback_param = (void *)spi_imx;
1741 cookie = dmaengine_submit(desc_rx);
1742 if (dma_submit_error(cookie)) {
1743 dev_err(spi_imx->dev, "submitting DMA RX failed\n");
1744 transfer->error |= SPI_TRANS_FAIL_NO_START;
1745 goto dmaengine_terminate_rx;
1746 }
1747
1748 reinit_completion(&spi_imx->dma_rx_completion);
1749 dma_async_issue_pending(controller->dma_rx);
1750
1751 desc_tx = dmaengine_prep_slave_single(controller->dma_tx, dma_data->dma_tx_addr,
1752 dma_data->dma_len, DMA_MEM_TO_DEV,
1753 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1754 if (!desc_tx)
1755 goto dmaengine_terminate_rx;
1756
1757 desc_tx->callback = spi_imx_dma_tx_callback;
1758 desc_tx->callback_param = (void *)spi_imx;
1759 cookie = dmaengine_submit(desc_tx);
1760 if (dma_submit_error(cookie)) {
1761 dev_err(spi_imx->dev, "submitting DMA TX failed\n");
1762 goto dmaengine_terminate_tx;
1763 }
1764 reinit_completion(&spi_imx->dma_tx_completion);
1765 dma_async_issue_pending(controller->dma_tx);
1766
1767 spi_imx->devtype_data->trigger(spi_imx);
1768
1769 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1770
1771 if (!spi_imx->target_mode) {
1772 /* Wait SDMA to finish the data transfer.*/
1773 time_left = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
1774 transfer_timeout);
1775 if (!time_left) {
1776 dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
1777 dmaengine_terminate_all(controller->dma_tx);
1778 dmaengine_terminate_all(controller->dma_rx);
1779 return -ETIMEDOUT;
1780 }
1781
1782 time_left = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
1783 transfer_timeout);
1784 if (!time_left) {
1785 dev_err(&controller->dev, "I/O Error in DMA RX\n");
1786 spi_imx->devtype_data->reset(spi_imx);
1787 dmaengine_terminate_all(controller->dma_rx);
1788 return -ETIMEDOUT;
1789 }
1790 } else {
1791 spi_imx->target_aborted = false;
1792
1793 if (wait_for_completion_interruptible(&spi_imx->dma_tx_completion) ||
1794 READ_ONCE(spi_imx->target_aborted)) {
1795 dev_dbg(spi_imx->dev, "I/O Error in DMA TX interrupted\n");
1796 dmaengine_terminate_all(controller->dma_tx);
1797 dmaengine_terminate_all(controller->dma_rx);
1798 return -EINTR;
1799 }
1800
1801 if (wait_for_completion_interruptible(&spi_imx->dma_rx_completion) ||
1802 READ_ONCE(spi_imx->target_aborted)) {
1803 dev_dbg(spi_imx->dev, "I/O Error in DMA RX interrupted\n");
1804 dmaengine_terminate_all(controller->dma_rx);
1805 return -EINTR;
1806 }
1807
1808 /*
1809 * ECSPI has a HW issue when works in Target mode, after 64 words
1810 * writtern to TXFIFO, even TXFIFO becomes empty, ECSPI_TXDATA keeps
1811 * shift out the last word data, so we have to disable ECSPI when in
1812 * target mode after the transfer completes.
1813 */
1814 if (spi_imx->devtype_data->disable)
1815 spi_imx->devtype_data->disable(spi_imx);
1816 }
1817
1818 return 0;
1819
1820 dmaengine_terminate_tx:
1821 dmaengine_terminate_all(controller->dma_tx);
1822 dmaengine_terminate_rx:
1823 dmaengine_terminate_all(controller->dma_rx);
1824
1825 return -EINVAL;
1826 }
1827
spi_imx_dma_max_wml_find(struct spi_imx_data * spi_imx,struct dma_data_package * dma_data,bool word_delay)1828 static void spi_imx_dma_max_wml_find(struct spi_imx_data *spi_imx,
1829 struct dma_data_package *dma_data,
1830 bool word_delay)
1831 {
1832 unsigned int bytes_per_word = word_delay ?
1833 spi_imx_bytes_per_word(spi_imx->bits_per_word) :
1834 BYTES_PER_32BITS_WORD;
1835 unsigned int i;
1836
1837 for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
1838 if (!(dma_data->dma_len % (i * bytes_per_word)))
1839 break;
1840 }
1841 /* Use 1 as wml in case no available burst length got */
1842 if (i == 0)
1843 i = 1;
1844
1845 spi_imx->wml = i;
1846 }
1847
spi_imx_dma_configure(struct spi_controller * controller,bool word_delay)1848 static int spi_imx_dma_configure(struct spi_controller *controller, bool word_delay)
1849 {
1850 int ret;
1851 enum dma_slave_buswidth buswidth;
1852 struct dma_slave_config rx = {}, tx = {};
1853 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
1854
1855 if (word_delay) {
1856 switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
1857 case 4:
1858 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
1859 break;
1860 case 2:
1861 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
1862 break;
1863 case 1:
1864 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
1865 break;
1866 default:
1867 return -EINVAL;
1868 }
1869 } else {
1870 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
1871 }
1872
1873 tx.direction = DMA_MEM_TO_DEV;
1874 tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
1875 tx.dst_addr_width = buswidth;
1876 tx.dst_maxburst = spi_imx->wml;
1877 ret = dmaengine_slave_config(controller->dma_tx, &tx);
1878 if (ret) {
1879 dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
1880 return ret;
1881 }
1882
1883 rx.direction = DMA_DEV_TO_MEM;
1884 rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
1885 rx.src_addr_width = buswidth;
1886 rx.src_maxburst = spi_imx->wml;
1887 ret = dmaengine_slave_config(controller->dma_rx, &rx);
1888 if (ret) {
1889 dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
1890 return ret;
1891 }
1892
1893 return 0;
1894 }
1895
spi_imx_dma_package_transfer(struct spi_imx_data * spi_imx,struct dma_data_package * dma_data,struct spi_transfer * transfer,bool word_delay)1896 static int spi_imx_dma_package_transfer(struct spi_imx_data *spi_imx,
1897 struct dma_data_package *dma_data,
1898 struct spi_transfer *transfer,
1899 bool word_delay)
1900 {
1901 struct spi_controller *controller = spi_imx->controller;
1902 int ret;
1903
1904 spi_imx_dma_max_wml_find(spi_imx, dma_data, word_delay);
1905
1906 ret = spi_imx_dma_configure(controller, word_delay);
1907 if (ret)
1908 goto dma_failure_no_start;
1909
1910 if (!spi_imx->devtype_data->setup_wml) {
1911 dev_err(spi_imx->dev, "No setup_wml()?\n");
1912 ret = -EINVAL;
1913 goto dma_failure_no_start;
1914 }
1915 spi_imx->devtype_data->setup_wml(spi_imx);
1916
1917 ret = spi_imx_dma_submit(spi_imx, dma_data, transfer);
1918 if (ret)
1919 return ret;
1920
1921 /* Trim the DMA RX buffer and copy the actual data to rx_buf */
1922 dma_sync_single_for_cpu(controller->dma_rx->device->dev, dma_data->dma_rx_addr,
1923 dma_data->dma_len, DMA_FROM_DEVICE);
1924 spi_imx_dma_rx_data_handle(spi_imx, dma_data, transfer->rx_buf + spi_imx->rx_offset,
1925 word_delay);
1926 spi_imx->rx_offset += dma_data->data_len;
1927
1928 return 0;
1929 /* fallback to pio */
1930 dma_failure_no_start:
1931 transfer->error |= SPI_TRANS_FAIL_NO_START;
1932 return ret;
1933 }
1934
spi_imx_dma_transfer(struct spi_imx_data * spi_imx,struct spi_transfer * transfer)1935 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1936 struct spi_transfer *transfer)
1937 {
1938 bool word_delay = transfer->word_delay.value != 0 && !spi_imx->target_mode;
1939 int ret;
1940 int i;
1941
1942 ret = spi_imx_dma_data_prepare(spi_imx, transfer, word_delay);
1943 if (ret < 0) {
1944 transfer->error |= SPI_TRANS_FAIL_NO_START;
1945 dev_err(spi_imx->dev, "DMA data prepare fail\n");
1946 goto fallback_pio;
1947 }
1948
1949 spi_imx->rx_offset = 0;
1950
1951 /* Each dma_package performs a separate DMA transfer once */
1952 for (i = 0; i < spi_imx->dma_package_num; i++) {
1953 ret = spi_imx_dma_map(spi_imx, &spi_imx->dma_data[i]);
1954 if (ret < 0) {
1955 if (i == 0)
1956 transfer->error |= SPI_TRANS_FAIL_NO_START;
1957 dev_err(spi_imx->dev, "DMA map fail\n");
1958 break;
1959 }
1960
1961 /* Update the CTRL register BL field */
1962 writel(spi_imx->dma_data[i].cmd_word, spi_imx->base + MX51_ECSPI_CTRL);
1963
1964 ret = spi_imx_dma_package_transfer(spi_imx, &spi_imx->dma_data[i],
1965 transfer, word_delay);
1966
1967 /* Whether the dma transmission is successful or not, dma unmap is necessary */
1968 spi_imx_dma_unmap(spi_imx, &spi_imx->dma_data[i]);
1969
1970 if (ret < 0) {
1971 dev_dbg(spi_imx->dev, "DMA %d transfer not really finish\n", i);
1972 break;
1973 }
1974 }
1975
1976 for (int j = 0; j < spi_imx->dma_package_num; j++) {
1977 kfree(spi_imx->dma_data[j].dma_tx_buf);
1978 kfree(spi_imx->dma_data[j].dma_rx_buf);
1979 }
1980 kfree(spi_imx->dma_data);
1981
1982 fallback_pio:
1983 return ret;
1984 }
1985
spi_imx_pio_transfer(struct spi_device * spi,struct spi_transfer * transfer)1986 static int spi_imx_pio_transfer(struct spi_device *spi,
1987 struct spi_transfer *transfer)
1988 {
1989 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
1990 unsigned long transfer_timeout;
1991 unsigned long time_left;
1992
1993 spi_imx->tx_buf = transfer->tx_buf;
1994 spi_imx->rx_buf = transfer->rx_buf;
1995 spi_imx->count = transfer->len;
1996 spi_imx->txfifo = 0;
1997 spi_imx->remainder = 0;
1998
1999 reinit_completion(&spi_imx->xfer_done);
2000
2001 spi_imx_push(spi_imx);
2002
2003 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
2004
2005 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
2006
2007 time_left = wait_for_completion_timeout(&spi_imx->xfer_done,
2008 transfer_timeout);
2009 if (!time_left) {
2010 dev_err(&spi->dev, "I/O Error in PIO\n");
2011 spi_imx->devtype_data->reset(spi_imx);
2012 return -ETIMEDOUT;
2013 }
2014
2015 return 0;
2016 }
2017
spi_imx_poll_transfer(struct spi_device * spi,struct spi_transfer * transfer)2018 static int spi_imx_poll_transfer(struct spi_device *spi,
2019 struct spi_transfer *transfer)
2020 {
2021 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
2022 unsigned long timeout;
2023
2024 spi_imx->tx_buf = transfer->tx_buf;
2025 spi_imx->rx_buf = transfer->rx_buf;
2026 spi_imx->count = transfer->len;
2027 spi_imx->txfifo = 0;
2028 spi_imx->remainder = 0;
2029
2030 /* fill in the fifo before timeout calculations if we are
2031 * interrupted here, then the data is getting transferred by
2032 * the HW while we are interrupted
2033 */
2034 spi_imx_push(spi_imx);
2035
2036 timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies;
2037 while (spi_imx->txfifo) {
2038 /* RX */
2039 while (spi_imx->txfifo &&
2040 spi_imx->devtype_data->rx_available(spi_imx)) {
2041 spi_imx->rx(spi_imx);
2042 spi_imx->txfifo--;
2043 }
2044
2045 /* TX */
2046 if (spi_imx->count) {
2047 spi_imx_push(spi_imx);
2048 continue;
2049 }
2050
2051 if (spi_imx->txfifo &&
2052 time_after(jiffies, timeout)) {
2053
2054 dev_err_ratelimited(&spi->dev,
2055 "timeout period reached: jiffies: %lu- falling back to interrupt mode\n",
2056 jiffies - timeout);
2057
2058 /* fall back to interrupt mode */
2059 return spi_imx_pio_transfer(spi, transfer);
2060 }
2061 }
2062
2063 return 0;
2064 }
2065
spi_imx_pio_transfer_target(struct spi_device * spi,struct spi_transfer * transfer)2066 static int spi_imx_pio_transfer_target(struct spi_device *spi,
2067 struct spi_transfer *transfer)
2068 {
2069 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
2070 int ret = 0;
2071
2072 if (transfer->len > MX53_MAX_TRANSFER_BYTES) {
2073 dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
2074 MX53_MAX_TRANSFER_BYTES);
2075 return -EMSGSIZE;
2076 }
2077
2078 spi_imx->tx_buf = transfer->tx_buf;
2079 spi_imx->rx_buf = transfer->rx_buf;
2080 spi_imx->count = transfer->len;
2081 spi_imx->txfifo = 0;
2082 spi_imx->remainder = 0;
2083
2084 reinit_completion(&spi_imx->xfer_done);
2085 spi_imx->target_aborted = false;
2086
2087 spi_imx_push(spi_imx);
2088
2089 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
2090
2091 if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
2092 spi_imx->target_aborted) {
2093 dev_dbg(&spi->dev, "interrupted\n");
2094 ret = -EINTR;
2095 }
2096
2097 /* ecspi has a HW issue when works in Target mode,
2098 * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
2099 * ECSPI_TXDATA keeps shift out the last word data,
2100 * so we have to disable ECSPI when in target mode after the
2101 * transfer completes
2102 */
2103 if (spi_imx->devtype_data->disable)
2104 spi_imx->devtype_data->disable(spi_imx);
2105
2106 return ret;
2107 }
2108
spi_imx_transfer_estimate_time_us(struct spi_transfer * transfer)2109 static unsigned int spi_imx_transfer_estimate_time_us(struct spi_transfer *transfer)
2110 {
2111 u64 result;
2112
2113 result = DIV_U64_ROUND_CLOSEST((u64)USEC_PER_SEC * transfer->len * BITS_PER_BYTE,
2114 transfer->effective_speed_hz);
2115 if (transfer->word_delay.value) {
2116 unsigned int word_delay_us;
2117 unsigned int words;
2118
2119 words = DIV_ROUND_UP(transfer->len * BITS_PER_BYTE, transfer->bits_per_word);
2120 word_delay_us = DIV_ROUND_CLOSEST(spi_delay_to_ns(&transfer->word_delay, transfer),
2121 NSEC_PER_USEC);
2122 result += (u64)words * word_delay_us;
2123 }
2124
2125 return min(result, U32_MAX);
2126 }
2127
spi_imx_transfer_one(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * transfer)2128 static int spi_imx_transfer_one(struct spi_controller *controller,
2129 struct spi_device *spi,
2130 struct spi_transfer *transfer)
2131 {
2132 int ret;
2133 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
2134
2135 ret = spi_imx_setupxfer(spi, transfer);
2136 if (ret < 0)
2137 return ret;
2138 transfer->effective_speed_hz = spi_imx->spi_bus_clk;
2139
2140 /* flush rxfifo before transfer */
2141 while (spi_imx->devtype_data->rx_available(spi_imx))
2142 readl(spi_imx->base + MXC_CSPIRXDATA);
2143
2144 if (spi_imx->target_mode && !spi_imx->usedma)
2145 return spi_imx_pio_transfer_target(spi, transfer);
2146
2147 /*
2148 * If we decided in spi_imx_can_dma() that we want to do a DMA
2149 * transfer, the SPI transfer has already been mapped, so we
2150 * have to do the DMA transfer here.
2151 */
2152 if (spi_imx->usedma) {
2153 ret = spi_imx_dma_transfer(spi_imx, transfer);
2154 if (transfer->error & SPI_TRANS_FAIL_NO_START) {
2155 spi_imx->usedma = false;
2156 if (spi_imx->target_mode)
2157 return spi_imx_pio_transfer_target(spi, transfer);
2158 else
2159 return spi_imx_pio_transfer(spi, transfer);
2160 }
2161 return ret;
2162 }
2163 /* run in polling mode for short transfers */
2164 if (transfer->len == 1 || (polling_limit_us &&
2165 spi_imx_transfer_estimate_time_us(transfer) < polling_limit_us))
2166 return spi_imx_poll_transfer(spi, transfer);
2167
2168 return spi_imx_pio_transfer(spi, transfer);
2169 }
2170
spi_imx_setup(struct spi_device * spi)2171 static int spi_imx_setup(struct spi_device *spi)
2172 {
2173 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
2174 spi->mode, spi->bits_per_word, spi->max_speed_hz);
2175
2176 return 0;
2177 }
2178
2179 static int
spi_imx_prepare_message(struct spi_controller * controller,struct spi_message * msg)2180 spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg)
2181 {
2182 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
2183 int ret;
2184
2185 ret = pm_runtime_resume_and_get(spi_imx->dev);
2186 if (ret < 0) {
2187 dev_err(spi_imx->dev, "failed to enable clock\n");
2188 return ret;
2189 }
2190
2191 ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
2192 if (ret) {
2193 pm_runtime_put_autosuspend(spi_imx->dev);
2194 }
2195
2196 return ret;
2197 }
2198
2199 static int
spi_imx_unprepare_message(struct spi_controller * controller,struct spi_message * msg)2200 spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg)
2201 {
2202 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
2203
2204 pm_runtime_put_autosuspend(spi_imx->dev);
2205 return 0;
2206 }
2207
spi_imx_target_abort(struct spi_controller * controller)2208 static int spi_imx_target_abort(struct spi_controller *controller)
2209 {
2210 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
2211
2212 spi_imx->target_aborted = true;
2213 complete(&spi_imx->xfer_done);
2214
2215 return 0;
2216 }
2217
spi_imx_probe(struct platform_device * pdev)2218 static int spi_imx_probe(struct platform_device *pdev)
2219 {
2220 struct device_node *np = pdev->dev.of_node;
2221 struct spi_controller *controller;
2222 struct spi_imx_data *spi_imx;
2223 struct resource *res;
2224 int ret, irq, spi_drctl;
2225 const struct spi_imx_devtype_data *devtype_data =
2226 of_device_get_match_data(&pdev->dev);
2227 bool target_mode;
2228 u32 val;
2229
2230 target_mode = devtype_data->has_targetmode &&
2231 of_property_read_bool(np, "spi-slave");
2232 if (target_mode)
2233 controller = devm_spi_alloc_target(&pdev->dev, sizeof(*spi_imx));
2234 else
2235 controller = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_imx));
2236 if (!controller)
2237 return -ENOMEM;
2238
2239 ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
2240 if ((ret < 0) || (spi_drctl >= 0x3)) {
2241 /* '11' is reserved */
2242 spi_drctl = 0;
2243 }
2244
2245 platform_set_drvdata(pdev, controller);
2246
2247 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
2248 controller->bus_num = np ? -1 : pdev->id;
2249 controller->use_gpio_descriptors = true;
2250
2251 spi_imx = spi_controller_get_devdata(controller);
2252 spi_imx->controller = controller;
2253 spi_imx->dev = &pdev->dev;
2254 spi_imx->target_mode = target_mode;
2255
2256 spi_imx->devtype_data = devtype_data;
2257
2258 /*
2259 * Get number of chip selects from device properties. This can be
2260 * coming from device tree or boardfiles, if it is not defined,
2261 * a default value of 3 chip selects will be used, as all the legacy
2262 * board files have <= 3 chip selects.
2263 */
2264 if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
2265 controller->num_chipselect = val;
2266 else
2267 controller->num_chipselect = 3;
2268
2269 controller->transfer_one = spi_imx_transfer_one;
2270 controller->setup = spi_imx_setup;
2271 controller->prepare_message = spi_imx_prepare_message;
2272 controller->unprepare_message = spi_imx_unprepare_message;
2273 controller->target_abort = spi_imx_target_abort;
2274 spi_imx->spi_bus_clk = MXC_SPI_DEFAULT_SPEED;
2275 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS |
2276 SPI_MOSI_IDLE_LOW;
2277
2278 if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
2279 is_imx53_ecspi(spi_imx))
2280 controller->mode_bits |= SPI_LOOP | SPI_READY;
2281
2282 if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx))
2283 controller->mode_bits |= SPI_RX_CPHA_FLIP;
2284
2285 if (is_imx51_ecspi(spi_imx) &&
2286 device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
2287 /*
2288 * When using HW-CS implementing SPI_CS_WORD can be done by just
2289 * setting the burst length to the word size. This is
2290 * considerably faster than manually controlling the CS.
2291 */
2292 controller->mode_bits |= SPI_CS_WORD;
2293
2294 if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx)) {
2295 controller->max_native_cs = 4;
2296 controller->flags |= SPI_CONTROLLER_GPIO_SS;
2297 }
2298
2299 spi_imx->spi_drctl = spi_drctl;
2300
2301 init_completion(&spi_imx->xfer_done);
2302
2303 spi_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2304 if (IS_ERR(spi_imx->base))
2305 return PTR_ERR(spi_imx->base);
2306
2307 spi_imx->base_phys = res->start;
2308
2309 irq = platform_get_irq(pdev, 0);
2310 if (irq < 0)
2311 return irq;
2312
2313 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
2314 dev_name(&pdev->dev), spi_imx);
2315 if (ret)
2316 return dev_err_probe(&pdev->dev, ret, "can't get irq%d\n", irq);
2317
2318 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2319 if (IS_ERR(spi_imx->clk_ipg))
2320 return PTR_ERR(spi_imx->clk_ipg);
2321
2322 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
2323 if (IS_ERR(spi_imx->clk_per))
2324 return PTR_ERR(spi_imx->clk_per);
2325
2326 ret = clk_prepare_enable(spi_imx->clk_per);
2327 if (ret)
2328 return ret;
2329
2330 ret = clk_prepare_enable(spi_imx->clk_ipg);
2331 if (ret)
2332 goto out_put_per;
2333
2334 pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
2335 pm_runtime_use_autosuspend(spi_imx->dev);
2336 pm_runtime_get_noresume(spi_imx->dev);
2337 pm_runtime_set_active(spi_imx->dev);
2338 pm_runtime_enable(spi_imx->dev);
2339
2340 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
2341 /*
2342 * Only validated on i.mx35 and i.mx6 now, can remove the constraint
2343 * if validated on other chips.
2344 */
2345 if (spi_imx->devtype_data->has_dmamode) {
2346 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller);
2347 if (ret == -EPROBE_DEFER)
2348 goto out_runtime_pm_put;
2349
2350 if (ret < 0)
2351 dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
2352 ret);
2353 }
2354
2355 spi_imx->devtype_data->reset(spi_imx);
2356
2357 spi_imx->devtype_data->intctrl(spi_imx, 0);
2358
2359 ret = spi_register_controller(controller);
2360 if (ret) {
2361 dev_err_probe(&pdev->dev, ret, "register controller failed\n");
2362 goto out_register_controller;
2363 }
2364
2365 pm_runtime_put_autosuspend(spi_imx->dev);
2366
2367 return ret;
2368
2369 out_register_controller:
2370 if (spi_imx->devtype_data->has_dmamode)
2371 spi_imx_sdma_exit(spi_imx);
2372 out_runtime_pm_put:
2373 pm_runtime_dont_use_autosuspend(spi_imx->dev);
2374 pm_runtime_disable(spi_imx->dev);
2375 pm_runtime_put_noidle(spi_imx->dev);
2376 pm_runtime_set_suspended(&pdev->dev);
2377
2378 clk_disable_unprepare(spi_imx->clk_ipg);
2379 out_put_per:
2380 clk_disable_unprepare(spi_imx->clk_per);
2381
2382 return ret;
2383 }
2384
spi_imx_remove(struct platform_device * pdev)2385 static void spi_imx_remove(struct platform_device *pdev)
2386 {
2387 struct spi_controller *controller = platform_get_drvdata(pdev);
2388 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
2389 int ret;
2390
2391 spi_unregister_controller(controller);
2392
2393 ret = pm_runtime_get_sync(spi_imx->dev);
2394 if (ret >= 0)
2395 writel(0, spi_imx->base + MXC_CSPICTRL);
2396 else
2397 dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");
2398
2399 pm_runtime_dont_use_autosuspend(spi_imx->dev);
2400 pm_runtime_put_sync(spi_imx->dev);
2401 pm_runtime_disable(spi_imx->dev);
2402
2403 spi_imx_sdma_exit(spi_imx);
2404 }
2405
spi_imx_runtime_resume(struct device * dev)2406 static int spi_imx_runtime_resume(struct device *dev)
2407 {
2408 struct spi_controller *controller = dev_get_drvdata(dev);
2409 struct spi_imx_data *spi_imx;
2410 int ret;
2411
2412 spi_imx = spi_controller_get_devdata(controller);
2413
2414 ret = clk_prepare_enable(spi_imx->clk_per);
2415 if (ret)
2416 return ret;
2417
2418 ret = clk_prepare_enable(spi_imx->clk_ipg);
2419 if (ret) {
2420 clk_disable_unprepare(spi_imx->clk_per);
2421 return ret;
2422 }
2423
2424 return 0;
2425 }
2426
spi_imx_runtime_suspend(struct device * dev)2427 static int spi_imx_runtime_suspend(struct device *dev)
2428 {
2429 struct spi_controller *controller = dev_get_drvdata(dev);
2430 struct spi_imx_data *spi_imx;
2431
2432 spi_imx = spi_controller_get_devdata(controller);
2433
2434 clk_disable_unprepare(spi_imx->clk_per);
2435 clk_disable_unprepare(spi_imx->clk_ipg);
2436
2437 return 0;
2438 }
2439
spi_imx_suspend(struct device * dev)2440 static int spi_imx_suspend(struct device *dev)
2441 {
2442 pinctrl_pm_select_sleep_state(dev);
2443 return 0;
2444 }
2445
spi_imx_resume(struct device * dev)2446 static int spi_imx_resume(struct device *dev)
2447 {
2448 pinctrl_pm_select_default_state(dev);
2449 return 0;
2450 }
2451
2452 static const struct dev_pm_ops imx_spi_pm = {
2453 RUNTIME_PM_OPS(spi_imx_runtime_suspend, spi_imx_runtime_resume, NULL)
2454 SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
2455 };
2456
2457 static struct platform_driver spi_imx_driver = {
2458 .driver = {
2459 .name = DRIVER_NAME,
2460 .of_match_table = spi_imx_dt_ids,
2461 .pm = pm_ptr(&imx_spi_pm),
2462 },
2463 .probe = spi_imx_probe,
2464 .remove = spi_imx_remove,
2465 };
2466 module_platform_driver(spi_imx_driver);
2467
2468 MODULE_DESCRIPTION("i.MX SPI Controller driver");
2469 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
2470 MODULE_LICENSE("GPL");
2471 MODULE_ALIAS("platform:" DRIVER_NAME);
2472