1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. 3 // Copyright (C) 2008 Juergen Beisert 4 5 #include <linux/bits.h> 6 #include <linux/bitfield.h> 7 #include <linux/clk.h> 8 #include <linux/completion.h> 9 #include <linux/delay.h> 10 #include <linux/dmaengine.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/irq.h> 16 #include <linux/kernel.h> 17 #include <linux/math.h> 18 #include <linux/math64.h> 19 #include <linux/module.h> 20 #include <linux/overflow.h> 21 #include <linux/pinctrl/consumer.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/slab.h> 25 #include <linux/spi/spi.h> 26 #include <linux/types.h> 27 #include <linux/of.h> 28 #include <linux/property.h> 29 30 #include <linux/dma/imx-dma.h> 31 32 #define DRIVER_NAME "spi_imx" 33 34 static bool use_dma = true; 35 module_param(use_dma, bool, 0644); 36 MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)"); 37 38 /* define polling limits */ 39 static unsigned int polling_limit_us = 30; 40 module_param(polling_limit_us, uint, 0664); 41 MODULE_PARM_DESC(polling_limit_us, 42 "time in us to run a transfer in polling mode\n"); 43 44 #define MXC_RPM_TIMEOUT 2000 /* 2000ms */ 45 #define MXC_SPI_DEFAULT_SPEED 500000 /* 500KHz */ 46 47 #define MXC_CSPIRXDATA 0x00 48 #define MXC_CSPITXDATA 0x04 49 #define MXC_CSPICTRL 0x08 50 #define MXC_CSPIINT 0x0c 51 #define MXC_RESET 0x1c 52 53 /* generic defines to abstract from the different register layouts */ 54 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 55 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 56 #define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */ 57 58 /* The maximum bytes that a sdma BD can transfer. */ 59 #define MAX_SDMA_BD_BYTES (1 << 15) 60 #define MX51_ECSPI_CTRL_MAX_BURST 512 61 /* The maximum bytes that IMX53_ECSPI can transfer in target mode.*/ 62 #define MX53_MAX_TRANSFER_BYTES 512 63 #define BYTES_PER_32BITS_WORD 4 64 65 enum spi_imx_devtype { 66 IMX1_CSPI, 67 IMX21_CSPI, 68 IMX27_CSPI, 69 IMX31_CSPI, 70 IMX35_CSPI, /* CSPI on all i.mx except above */ 71 IMX51_ECSPI, /* ECSPI on i.mx51 */ 72 IMX53_ECSPI, /* ECSPI on i.mx53 and later */ 73 }; 74 75 struct spi_imx_data; 76 77 struct spi_imx_devtype_data { 78 void (*intctrl)(struct spi_imx_data *spi_imx, int enable); 79 int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg); 80 int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi, 81 struct spi_transfer *t); 82 void (*trigger)(struct spi_imx_data *spi_imx); 83 int (*rx_available)(struct spi_imx_data *spi_imx); 84 void (*reset)(struct spi_imx_data *spi_imx); 85 void (*setup_wml)(struct spi_imx_data *spi_imx); 86 void (*disable)(struct spi_imx_data *spi_imx); 87 bool has_dmamode; 88 bool has_targetmode; 89 unsigned int fifo_size; 90 bool dynamic_burst; 91 /* 92 * ERR009165 fixed or not: 93 * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf 94 */ 95 bool tx_glitch_fixed; 96 enum spi_imx_devtype devtype; 97 }; 98 99 struct dma_data_package { 100 u32 cmd_word; 101 void *dma_rx_buf; 102 void *dma_tx_buf; 103 dma_addr_t dma_tx_addr; 104 dma_addr_t dma_rx_addr; 105 int dma_len; 106 int data_len; 107 }; 108 109 struct spi_imx_data { 110 struct spi_controller *controller; 111 struct device *dev; 112 113 struct completion xfer_done; 114 void __iomem *base; 115 unsigned long base_phys; 116 117 struct clk *clk_per; 118 struct clk *clk_ipg; 119 unsigned long spi_clk; 120 unsigned int spi_bus_clk; 121 122 unsigned int bits_per_word; 123 unsigned int spi_drctl; 124 125 unsigned int count, remainder; 126 void (*tx)(struct spi_imx_data *spi_imx); 127 void (*rx)(struct spi_imx_data *spi_imx); 128 void *rx_buf; 129 const void *tx_buf; 130 unsigned int txfifo; /* number of words pushed in tx FIFO */ 131 unsigned int dynamic_burst; 132 bool rx_only; 133 134 /* Target mode */ 135 bool target_mode; 136 bool target_aborted; 137 unsigned int target_burst; 138 139 /* DMA */ 140 bool usedma; 141 u32 wml; 142 struct completion dma_rx_completion; 143 struct completion dma_tx_completion; 144 size_t dma_package_num; 145 struct dma_data_package *dma_data; 146 int rx_offset; 147 148 const struct spi_imx_devtype_data *devtype_data; 149 }; 150 151 static inline int is_imx27_cspi(struct spi_imx_data *d) 152 { 153 return d->devtype_data->devtype == IMX27_CSPI; 154 } 155 156 static inline int is_imx35_cspi(struct spi_imx_data *d) 157 { 158 return d->devtype_data->devtype == IMX35_CSPI; 159 } 160 161 static inline int is_imx51_ecspi(struct spi_imx_data *d) 162 { 163 return d->devtype_data->devtype == IMX51_ECSPI; 164 } 165 166 static inline int is_imx53_ecspi(struct spi_imx_data *d) 167 { 168 return d->devtype_data->devtype == IMX53_ECSPI; 169 } 170 171 #define MXC_SPI_BUF_RX(type) \ 172 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ 173 { \ 174 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ 175 \ 176 if (spi_imx->rx_buf) { \ 177 *(type *)spi_imx->rx_buf = val; \ 178 spi_imx->rx_buf += sizeof(type); \ 179 } \ 180 \ 181 spi_imx->remainder -= sizeof(type); \ 182 } 183 184 #define MXC_SPI_BUF_TX(type) \ 185 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ 186 { \ 187 type val = 0; \ 188 \ 189 if (spi_imx->tx_buf) { \ 190 val = *(type *)spi_imx->tx_buf; \ 191 spi_imx->tx_buf += sizeof(type); \ 192 } \ 193 \ 194 spi_imx->count -= sizeof(type); \ 195 \ 196 writel(val, spi_imx->base + MXC_CSPITXDATA); \ 197 } 198 199 MXC_SPI_BUF_RX(u8) 200 MXC_SPI_BUF_TX(u8) 201 MXC_SPI_BUF_RX(u16) 202 MXC_SPI_BUF_TX(u16) 203 MXC_SPI_BUF_RX(u32) 204 MXC_SPI_BUF_TX(u32) 205 206 /* Align to cache line to avoid swiotlo bounce */ 207 #define DMA_CACHE_ALIGNED_LEN(x) ALIGN((x), dma_get_cache_alignment()) 208 209 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set 210 * (which is currently not the case in this driver) 211 */ 212 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 213 256, 384, 512, 768, 1024}; 214 215 /* MX21, MX27 */ 216 static unsigned int spi_imx_clkdiv_1(unsigned int fin, 217 unsigned int fspi, unsigned int max, unsigned int *fres) 218 { 219 int i; 220 221 for (i = 2; i < max; i++) 222 if (fspi * mxc_clkdivs[i] >= fin) 223 break; 224 225 *fres = fin / mxc_clkdivs[i]; 226 return i; 227 } 228 229 /* MX1, MX31, MX35, MX51 CSPI */ 230 static unsigned int spi_imx_clkdiv_2(unsigned int fin, 231 unsigned int fspi, unsigned int *fres) 232 { 233 int i, div = 4; 234 235 for (i = 0; i < 7; i++) { 236 if (fspi * div >= fin) 237 goto out; 238 div <<= 1; 239 } 240 241 out: 242 *fres = fin / div; 243 return i; 244 } 245 246 static int spi_imx_bytes_per_word(const int bits_per_word) 247 { 248 if (bits_per_word <= 8) 249 return 1; 250 else if (bits_per_word <= 16) 251 return 2; 252 else 253 return 4; 254 } 255 256 static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi, 257 struct spi_transfer *transfer) 258 { 259 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 260 261 if (!use_dma || controller->fallback) 262 return false; 263 264 if (!controller->dma_rx) 265 return false; 266 267 /* 268 * Due to Freescale errata ERR003775 "eCSPI: Burst completion by Chip 269 * Select (SS) signal in Slave mode is not functional" burst size must 270 * be set exactly to the size of the transfer. This limit SPI transaction 271 * with maximum 2^12 bits. 272 */ 273 if (transfer->len > MX53_MAX_TRANSFER_BYTES && spi_imx->target_mode) 274 return false; 275 276 if (transfer->len < spi_imx->devtype_data->fifo_size) 277 return false; 278 279 /* DMA only can transmit data in bytes */ 280 if (spi_imx->bits_per_word != 8 && spi_imx->bits_per_word != 16 && 281 spi_imx->bits_per_word != 32) 282 return false; 283 284 if (transfer->len >= MAX_SDMA_BD_BYTES) 285 return false; 286 287 spi_imx->dynamic_burst = 0; 288 289 return true; 290 } 291 292 /* 293 * Note the number of natively supported chip selects for MX51 is 4. Some 294 * devices may have less actual SS pins but the register map supports 4. When 295 * using gpio chip selects the cs values passed into the macros below can go 296 * outside the range 0 - 3. We therefore need to limit the cs value to avoid 297 * corrupting bits outside the allocated locations. 298 * 299 * The simplest way to do this is to just mask the cs bits to 2 bits. This 300 * still allows all 4 native chip selects to work as well as gpio chip selects 301 * (which can use any of the 4 chip select configurations). 302 */ 303 304 #define MX51_ECSPI_CTRL 0x08 305 #define MX51_ECSPI_CTRL_ENABLE (1 << 0) 306 #define MX51_ECSPI_CTRL_XCH (1 << 2) 307 #define MX51_ECSPI_CTRL_SMC (1 << 3) 308 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4) 309 #define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16) 310 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8 311 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12 312 #define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18) 313 #define MX51_ECSPI_CTRL_BL_OFFSET 20 314 #define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20) 315 316 #define MX51_ECSPI_CONFIG 0x0c 317 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0)) 318 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4)) 319 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8)) 320 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12)) 321 #define MX51_ECSPI_CONFIG_DATACTL(cs) (1 << ((cs & 3) + 16)) 322 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20)) 323 324 #define MX51_ECSPI_INT 0x10 325 #define MX51_ECSPI_INT_TEEN (1 << 0) 326 #define MX51_ECSPI_INT_RREN (1 << 3) 327 #define MX51_ECSPI_INT_RDREN (1 << 4) 328 329 #define MX51_ECSPI_DMA 0x14 330 #define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f) 331 #define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16) 332 #define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24) 333 334 #define MX51_ECSPI_DMA_TEDEN (1 << 7) 335 #define MX51_ECSPI_DMA_RXDEN (1 << 23) 336 #define MX51_ECSPI_DMA_RXTDEN (1 << 31) 337 338 #define MX51_ECSPI_STAT 0x18 339 #define MX51_ECSPI_STAT_RR (1 << 3) 340 341 #define MX51_ECSPI_PERIOD 0x1c 342 #define MX51_ECSPI_PERIOD_MASK 0x7fff 343 /* 344 * As measured on the i.MX6, the SPI host controller inserts a 4 SPI-Clock 345 * (SCLK) delay after each burst if the PERIOD reg is 0x0. This value will be 346 * called MX51_ECSPI_PERIOD_MIN_DELAY_SCK. 347 * 348 * If the PERIOD register is != 0, the controller inserts a delay of 349 * MX51_ECSPI_PERIOD_MIN_DELAY_SCK + register value + 1 SCLK after each burst. 350 */ 351 #define MX51_ECSPI_PERIOD_MIN_DELAY_SCK 4 352 353 #define MX51_ECSPI_TESTREG 0x20 354 #define MX51_ECSPI_TESTREG_LBC BIT(31) 355 356 static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx) 357 { 358 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); 359 360 if (spi_imx->rx_buf) { 361 #ifdef __LITTLE_ENDIAN 362 unsigned int bytes_per_word; 363 364 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 365 if (bytes_per_word == 1) 366 swab32s(&val); 367 else if (bytes_per_word == 2) 368 swahw32s(&val); 369 #endif 370 *(u32 *)spi_imx->rx_buf = val; 371 spi_imx->rx_buf += sizeof(u32); 372 } 373 374 spi_imx->remainder -= sizeof(u32); 375 } 376 377 static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx) 378 { 379 int unaligned; 380 u32 val; 381 382 unaligned = spi_imx->remainder % 4; 383 384 if (!unaligned) { 385 spi_imx_buf_rx_swap_u32(spi_imx); 386 return; 387 } 388 389 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) { 390 spi_imx_buf_rx_u16(spi_imx); 391 return; 392 } 393 394 val = readl(spi_imx->base + MXC_CSPIRXDATA); 395 396 while (unaligned--) { 397 if (spi_imx->rx_buf) { 398 *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff; 399 spi_imx->rx_buf++; 400 } 401 spi_imx->remainder--; 402 } 403 } 404 405 static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx) 406 { 407 u32 val = 0; 408 #ifdef __LITTLE_ENDIAN 409 unsigned int bytes_per_word; 410 #endif 411 412 if (spi_imx->tx_buf) { 413 val = *(u32 *)spi_imx->tx_buf; 414 spi_imx->tx_buf += sizeof(u32); 415 } 416 417 spi_imx->count -= sizeof(u32); 418 #ifdef __LITTLE_ENDIAN 419 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 420 421 if (bytes_per_word == 1) 422 swab32s(&val); 423 else if (bytes_per_word == 2) 424 swahw32s(&val); 425 #endif 426 writel(val, spi_imx->base + MXC_CSPITXDATA); 427 } 428 429 static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx) 430 { 431 int unaligned; 432 u32 val = 0; 433 434 unaligned = spi_imx->count % 4; 435 436 if (!unaligned) { 437 spi_imx_buf_tx_swap_u32(spi_imx); 438 return; 439 } 440 441 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) { 442 spi_imx_buf_tx_u16(spi_imx); 443 return; 444 } 445 446 while (unaligned--) { 447 if (spi_imx->tx_buf) { 448 val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned); 449 spi_imx->tx_buf++; 450 } 451 spi_imx->count--; 452 } 453 454 writel(val, spi_imx->base + MXC_CSPITXDATA); 455 } 456 457 static void mx53_ecspi_rx_target(struct spi_imx_data *spi_imx) 458 { 459 u32 val = readl(spi_imx->base + MXC_CSPIRXDATA); 460 #ifdef __LITTLE_ENDIAN 461 unsigned int bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 462 463 if (bytes_per_word == 1) 464 swab32s(&val); 465 else if (bytes_per_word == 2) 466 swahw32s(&val); 467 #endif 468 if (spi_imx->rx_buf) { 469 int n_bytes = spi_imx->target_burst % sizeof(val); 470 471 if (!n_bytes) 472 n_bytes = sizeof(val); 473 474 memcpy(spi_imx->rx_buf, 475 ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes); 476 477 spi_imx->rx_buf += n_bytes; 478 spi_imx->target_burst -= n_bytes; 479 } 480 481 spi_imx->remainder -= sizeof(u32); 482 } 483 484 static void mx53_ecspi_tx_target(struct spi_imx_data *spi_imx) 485 { 486 u32 val = 0; 487 int n_bytes = spi_imx->count % sizeof(val); 488 #ifdef __LITTLE_ENDIAN 489 unsigned int bytes_per_word; 490 #endif 491 492 if (!n_bytes) 493 n_bytes = sizeof(val); 494 495 if (spi_imx->tx_buf) { 496 memcpy(((u8 *)&val) + sizeof(val) - n_bytes, 497 spi_imx->tx_buf, n_bytes); 498 spi_imx->tx_buf += n_bytes; 499 } 500 501 spi_imx->count -= n_bytes; 502 503 #ifdef __LITTLE_ENDIAN 504 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 505 if (bytes_per_word == 1) 506 swab32s(&val); 507 else if (bytes_per_word == 2) 508 swahw32s(&val); 509 #endif 510 writel(val, spi_imx->base + MXC_CSPITXDATA); 511 } 512 513 /* MX51 eCSPI */ 514 static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx, 515 unsigned int fspi, unsigned int *fres) 516 { 517 /* 518 * there are two 4-bit dividers, the pre-divider divides by 519 * $pre, the post-divider by 2^$post 520 */ 521 unsigned int pre, post; 522 unsigned int fin = spi_imx->spi_clk; 523 524 fspi = min(fspi, fin); 525 526 post = fls(fin) - fls(fspi); 527 if (fin > fspi << post) 528 post++; 529 530 /* now we have: (fin <= fspi << post) with post being minimal */ 531 532 post = max(4U, post) - 4; 533 if (unlikely(post > 0xf)) { 534 dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n", 535 fspi, fin); 536 return 0xff; 537 } 538 539 pre = DIV_ROUND_UP(fin, fspi << post) - 1; 540 541 dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n", 542 __func__, fin, fspi, post, pre); 543 544 /* Resulting frequency for the SCLK line. */ 545 *fres = (fin / (pre + 1)) >> post; 546 547 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) | 548 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET); 549 } 550 551 static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable) 552 { 553 unsigned int val = 0; 554 555 if (enable & MXC_INT_TE) 556 val |= MX51_ECSPI_INT_TEEN; 557 558 if (enable & MXC_INT_RR) 559 val |= MX51_ECSPI_INT_RREN; 560 561 if (enable & MXC_INT_RDR) 562 val |= MX51_ECSPI_INT_RDREN; 563 564 writel(val, spi_imx->base + MX51_ECSPI_INT); 565 } 566 567 static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx) 568 { 569 u32 reg; 570 571 if (spi_imx->usedma) { 572 reg = readl(spi_imx->base + MX51_ECSPI_DMA); 573 reg |= MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN; 574 writel(reg, spi_imx->base + MX51_ECSPI_DMA); 575 } else { 576 reg = readl(spi_imx->base + MX51_ECSPI_CTRL); 577 reg |= MX51_ECSPI_CTRL_XCH; 578 writel(reg, spi_imx->base + MX51_ECSPI_CTRL); 579 } 580 } 581 582 static void mx51_ecspi_disable(struct spi_imx_data *spi_imx) 583 { 584 u32 ctrl; 585 586 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); 587 ctrl &= ~MX51_ECSPI_CTRL_ENABLE; 588 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 589 } 590 591 static int mx51_ecspi_channel(const struct spi_device *spi) 592 { 593 if (!spi_get_csgpiod(spi, 0)) 594 return spi_get_chipselect(spi, 0); 595 return spi->controller->unused_native_cs; 596 } 597 598 static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx, 599 struct spi_message *msg) 600 { 601 struct spi_device *spi = msg->spi; 602 struct spi_transfer *xfer; 603 u32 ctrl = MX51_ECSPI_CTRL_ENABLE; 604 u32 min_speed_hz = ~0U; 605 u32 testreg, delay; 606 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); 607 u32 current_cfg = cfg; 608 int channel = mx51_ecspi_channel(spi); 609 610 /* set Host or Target mode */ 611 if (spi_imx->target_mode) 612 ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK; 613 else 614 ctrl |= MX51_ECSPI_CTRL_MODE_MASK; 615 616 /* 617 * Enable SPI_RDY handling (falling edge/level triggered). 618 */ 619 if (spi->mode & SPI_READY) 620 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl); 621 622 /* set chip select to use */ 623 ctrl |= MX51_ECSPI_CTRL_CS(channel); 624 625 /* 626 * The ctrl register must be written first, with the EN bit set other 627 * registers must not be written to. 628 */ 629 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 630 631 testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG); 632 if (spi->mode & SPI_LOOP) 633 testreg |= MX51_ECSPI_TESTREG_LBC; 634 else 635 testreg &= ~MX51_ECSPI_TESTREG_LBC; 636 writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG); 637 638 /* 639 * eCSPI burst completion by Chip Select signal in Target mode 640 * is not functional for imx53 Soc, config SPI burst completed when 641 * BURST_LENGTH + 1 bits are received 642 */ 643 if (spi_imx->target_mode) 644 cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(channel); 645 else 646 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(channel); 647 648 if (spi->mode & SPI_CPOL) { 649 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(channel); 650 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(channel); 651 } else { 652 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(channel); 653 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(channel); 654 } 655 656 if (spi->mode & SPI_MOSI_IDLE_LOW) 657 cfg |= MX51_ECSPI_CONFIG_DATACTL(channel); 658 else 659 cfg &= ~MX51_ECSPI_CONFIG_DATACTL(channel); 660 661 if (spi->mode & SPI_CS_HIGH) 662 cfg |= MX51_ECSPI_CONFIG_SSBPOL(channel); 663 else 664 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(channel); 665 666 if (cfg == current_cfg) 667 return 0; 668 669 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); 670 671 /* 672 * Wait until the changes in the configuration register CONFIGREG 673 * propagate into the hardware. It takes exactly one tick of the 674 * SCLK clock, but we will wait two SCLK clock just to be sure. The 675 * effect of the delay it takes for the hardware to apply changes 676 * is noticable if the SCLK clock run very slow. In such a case, if 677 * the polarity of SCLK should be inverted, the GPIO ChipSelect might 678 * be asserted before the SCLK polarity changes, which would disrupt 679 * the SPI communication as the device on the other end would consider 680 * the change of SCLK polarity as a clock tick already. 681 * 682 * Because spi_imx->spi_bus_clk is only set in prepare_message 683 * callback, iterate over all the transfers in spi_message, find the 684 * one with lowest bus frequency, and use that bus frequency for the 685 * delay calculation. In case all transfers have speed_hz == 0, then 686 * min_speed_hz is ~0 and the resulting delay is zero. 687 */ 688 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 689 if (!xfer->speed_hz) 690 continue; 691 min_speed_hz = min(xfer->speed_hz, min_speed_hz); 692 } 693 694 delay = (2 * 1000000) / min_speed_hz; 695 if (likely(delay < 10)) /* SCLK is faster than 200 kHz */ 696 udelay(delay); 697 else /* SCLK is _very_ slow */ 698 usleep_range(delay, delay + 10); 699 700 return 0; 701 } 702 703 static void mx51_configure_cpha(struct spi_imx_data *spi_imx, 704 struct spi_device *spi) 705 { 706 bool cpha = (spi->mode & SPI_CPHA); 707 bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only; 708 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); 709 int channel = mx51_ecspi_channel(spi); 710 711 /* Flip cpha logical value iff flip_cpha */ 712 cpha ^= flip_cpha; 713 714 if (cpha) 715 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(channel); 716 else 717 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(channel); 718 719 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); 720 } 721 722 static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, 723 struct spi_device *spi, struct spi_transfer *t) 724 { 725 u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); 726 u64 word_delay_sck; 727 u32 clk; 728 729 /* Clear BL field and set the right value */ 730 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; 731 if (spi_imx->target_mode) 732 ctrl |= (spi_imx->target_burst * 8 - 1) 733 << MX51_ECSPI_CTRL_BL_OFFSET; 734 else { 735 ctrl |= (spi_imx->bits_per_word - 1) 736 << MX51_ECSPI_CTRL_BL_OFFSET; 737 } 738 739 /* set clock speed */ 740 ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET | 741 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET); 742 743 if (!spi_imx->target_mode) { 744 ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk); 745 spi_imx->spi_bus_clk = clk; 746 } 747 748 mx51_configure_cpha(spi_imx, spi); 749 750 /* 751 * ERR009165: work in XHC mode instead of SMC as PIO on the chips 752 * before i.mx6ul. 753 */ 754 if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed) 755 ctrl |= MX51_ECSPI_CTRL_SMC; 756 else 757 ctrl &= ~MX51_ECSPI_CTRL_SMC; 758 759 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 760 761 /* calculate word delay in SPI Clock (SCLK) cycles */ 762 if (t->word_delay.value == 0) { 763 word_delay_sck = 0; 764 } else if (t->word_delay.unit == SPI_DELAY_UNIT_SCK) { 765 word_delay_sck = t->word_delay.value; 766 767 if (word_delay_sck <= MX51_ECSPI_PERIOD_MIN_DELAY_SCK) 768 word_delay_sck = 0; 769 else if (word_delay_sck <= MX51_ECSPI_PERIOD_MIN_DELAY_SCK + 1) 770 word_delay_sck = 1; 771 else 772 word_delay_sck -= MX51_ECSPI_PERIOD_MIN_DELAY_SCK + 1; 773 } else { 774 int word_delay_ns; 775 776 word_delay_ns = spi_delay_to_ns(&t->word_delay, t); 777 if (word_delay_ns < 0) 778 return word_delay_ns; 779 780 if (word_delay_ns <= mul_u64_u32_div(NSEC_PER_SEC, 781 MX51_ECSPI_PERIOD_MIN_DELAY_SCK, 782 spi_imx->spi_bus_clk)) { 783 word_delay_sck = 0; 784 } else if (word_delay_ns <= mul_u64_u32_div(NSEC_PER_SEC, 785 MX51_ECSPI_PERIOD_MIN_DELAY_SCK + 1, 786 spi_imx->spi_bus_clk)) { 787 word_delay_sck = 1; 788 } else { 789 word_delay_ns -= mul_u64_u32_div(NSEC_PER_SEC, 790 MX51_ECSPI_PERIOD_MIN_DELAY_SCK + 1, 791 spi_imx->spi_bus_clk); 792 793 word_delay_sck = DIV_U64_ROUND_UP((u64)word_delay_ns * spi_imx->spi_bus_clk, 794 NSEC_PER_SEC); 795 } 796 } 797 798 if (!FIELD_FIT(MX51_ECSPI_PERIOD_MASK, word_delay_sck)) 799 return -EINVAL; 800 801 writel(FIELD_PREP(MX51_ECSPI_PERIOD_MASK, word_delay_sck), 802 spi_imx->base + MX51_ECSPI_PERIOD); 803 804 return 0; 805 } 806 807 static void mx51_setup_wml(struct spi_imx_data *spi_imx) 808 { 809 u32 tx_wml = 0; 810 811 if (spi_imx->devtype_data->tx_glitch_fixed) 812 tx_wml = spi_imx->wml; 813 /* 814 * Configure the DMA register: setup the watermark 815 * and enable DMA request. 816 */ 817 writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) | 818 MX51_ECSPI_DMA_TX_WML(tx_wml) | 819 MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) | 820 MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA); 821 } 822 823 static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx) 824 { 825 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR; 826 } 827 828 static void mx51_ecspi_reset(struct spi_imx_data *spi_imx) 829 { 830 /* drain receive buffer */ 831 while (mx51_ecspi_rx_available(spi_imx)) 832 readl(spi_imx->base + MXC_CSPIRXDATA); 833 } 834 835 #define MX31_INTREG_TEEN (1 << 0) 836 #define MX31_INTREG_RREN (1 << 3) 837 838 #define MX31_CSPICTRL_ENABLE (1 << 0) 839 #define MX31_CSPICTRL_HOST (1 << 1) 840 #define MX31_CSPICTRL_XCH (1 << 2) 841 #define MX31_CSPICTRL_SMC (1 << 3) 842 #define MX31_CSPICTRL_POL (1 << 4) 843 #define MX31_CSPICTRL_PHA (1 << 5) 844 #define MX31_CSPICTRL_SSCTL (1 << 6) 845 #define MX31_CSPICTRL_SSPOL (1 << 7) 846 #define MX31_CSPICTRL_BC_SHIFT 8 847 #define MX35_CSPICTRL_BL_SHIFT 20 848 #define MX31_CSPICTRL_CS_SHIFT 24 849 #define MX35_CSPICTRL_CS_SHIFT 12 850 #define MX31_CSPICTRL_DR_SHIFT 16 851 852 #define MX31_CSPI_DMAREG 0x10 853 #define MX31_DMAREG_RH_DEN (1<<4) 854 #define MX31_DMAREG_TH_DEN (1<<1) 855 856 #define MX31_CSPISTATUS 0x14 857 #define MX31_STATUS_RR (1 << 3) 858 859 #define MX31_CSPI_TESTREG 0x1C 860 #define MX31_TEST_LBC (1 << 14) 861 862 /* These functions also work for the i.MX35, but be aware that 863 * the i.MX35 has a slightly different register layout for bits 864 * we do not use here. 865 */ 866 static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) 867 { 868 unsigned int val = 0; 869 870 if (enable & MXC_INT_TE) 871 val |= MX31_INTREG_TEEN; 872 if (enable & MXC_INT_RR) 873 val |= MX31_INTREG_RREN; 874 875 writel(val, spi_imx->base + MXC_CSPIINT); 876 } 877 878 static void mx31_trigger(struct spi_imx_data *spi_imx) 879 { 880 unsigned int reg; 881 882 reg = readl(spi_imx->base + MXC_CSPICTRL); 883 reg |= MX31_CSPICTRL_XCH; 884 writel(reg, spi_imx->base + MXC_CSPICTRL); 885 } 886 887 static int mx31_prepare_message(struct spi_imx_data *spi_imx, 888 struct spi_message *msg) 889 { 890 return 0; 891 } 892 893 static int mx31_prepare_transfer(struct spi_imx_data *spi_imx, 894 struct spi_device *spi, struct spi_transfer *t) 895 { 896 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_HOST; 897 unsigned int clk; 898 899 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) << 900 MX31_CSPICTRL_DR_SHIFT; 901 spi_imx->spi_bus_clk = clk; 902 903 if (is_imx35_cspi(spi_imx)) { 904 reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT; 905 reg |= MX31_CSPICTRL_SSCTL; 906 } else { 907 reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT; 908 } 909 910 if (spi->mode & SPI_CPHA) 911 reg |= MX31_CSPICTRL_PHA; 912 if (spi->mode & SPI_CPOL) 913 reg |= MX31_CSPICTRL_POL; 914 if (spi->mode & SPI_CS_HIGH) 915 reg |= MX31_CSPICTRL_SSPOL; 916 if (!spi_get_csgpiod(spi, 0)) 917 reg |= (spi_get_chipselect(spi, 0)) << 918 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : 919 MX31_CSPICTRL_CS_SHIFT); 920 921 if (spi_imx->usedma) 922 reg |= MX31_CSPICTRL_SMC; 923 924 writel(reg, spi_imx->base + MXC_CSPICTRL); 925 926 reg = readl(spi_imx->base + MX31_CSPI_TESTREG); 927 if (spi->mode & SPI_LOOP) 928 reg |= MX31_TEST_LBC; 929 else 930 reg &= ~MX31_TEST_LBC; 931 writel(reg, spi_imx->base + MX31_CSPI_TESTREG); 932 933 if (spi_imx->usedma) { 934 /* 935 * configure DMA requests when RXFIFO is half full and 936 * when TXFIFO is half empty 937 */ 938 writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN, 939 spi_imx->base + MX31_CSPI_DMAREG); 940 } 941 942 return 0; 943 } 944 945 static int mx31_rx_available(struct spi_imx_data *spi_imx) 946 { 947 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 948 } 949 950 static void mx31_reset(struct spi_imx_data *spi_imx) 951 { 952 /* drain receive buffer */ 953 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR) 954 readl(spi_imx->base + MXC_CSPIRXDATA); 955 } 956 957 #define MX21_INTREG_RR (1 << 4) 958 #define MX21_INTREG_TEEN (1 << 9) 959 #define MX21_INTREG_RREN (1 << 13) 960 961 #define MX21_CSPICTRL_POL (1 << 5) 962 #define MX21_CSPICTRL_PHA (1 << 6) 963 #define MX21_CSPICTRL_SSPOL (1 << 8) 964 #define MX21_CSPICTRL_XCH (1 << 9) 965 #define MX21_CSPICTRL_ENABLE (1 << 10) 966 #define MX21_CSPICTRL_HOST (1 << 11) 967 #define MX21_CSPICTRL_DR_SHIFT 14 968 #define MX21_CSPICTRL_CS_SHIFT 19 969 970 static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable) 971 { 972 unsigned int val = 0; 973 974 if (enable & MXC_INT_TE) 975 val |= MX21_INTREG_TEEN; 976 if (enable & MXC_INT_RR) 977 val |= MX21_INTREG_RREN; 978 979 writel(val, spi_imx->base + MXC_CSPIINT); 980 } 981 982 static void mx21_trigger(struct spi_imx_data *spi_imx) 983 { 984 unsigned int reg; 985 986 reg = readl(spi_imx->base + MXC_CSPICTRL); 987 reg |= MX21_CSPICTRL_XCH; 988 writel(reg, spi_imx->base + MXC_CSPICTRL); 989 } 990 991 static int mx21_prepare_message(struct spi_imx_data *spi_imx, 992 struct spi_message *msg) 993 { 994 return 0; 995 } 996 997 static int mx21_prepare_transfer(struct spi_imx_data *spi_imx, 998 struct spi_device *spi, struct spi_transfer *t) 999 { 1000 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_HOST; 1001 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; 1002 unsigned int clk; 1003 1004 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk) 1005 << MX21_CSPICTRL_DR_SHIFT; 1006 spi_imx->spi_bus_clk = clk; 1007 1008 reg |= spi_imx->bits_per_word - 1; 1009 1010 if (spi->mode & SPI_CPHA) 1011 reg |= MX21_CSPICTRL_PHA; 1012 if (spi->mode & SPI_CPOL) 1013 reg |= MX21_CSPICTRL_POL; 1014 if (spi->mode & SPI_CS_HIGH) 1015 reg |= MX21_CSPICTRL_SSPOL; 1016 if (!spi_get_csgpiod(spi, 0)) 1017 reg |= spi_get_chipselect(spi, 0) << MX21_CSPICTRL_CS_SHIFT; 1018 1019 writel(reg, spi_imx->base + MXC_CSPICTRL); 1020 1021 return 0; 1022 } 1023 1024 static int mx21_rx_available(struct spi_imx_data *spi_imx) 1025 { 1026 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR; 1027 } 1028 1029 static void mx21_reset(struct spi_imx_data *spi_imx) 1030 { 1031 writel(1, spi_imx->base + MXC_RESET); 1032 } 1033 1034 #define MX1_INTREG_RR (1 << 3) 1035 #define MX1_INTREG_TEEN (1 << 8) 1036 #define MX1_INTREG_RREN (1 << 11) 1037 1038 #define MX1_CSPICTRL_POL (1 << 4) 1039 #define MX1_CSPICTRL_PHA (1 << 5) 1040 #define MX1_CSPICTRL_XCH (1 << 8) 1041 #define MX1_CSPICTRL_ENABLE (1 << 9) 1042 #define MX1_CSPICTRL_HOST (1 << 10) 1043 #define MX1_CSPICTRL_DR_SHIFT 13 1044 1045 static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) 1046 { 1047 unsigned int val = 0; 1048 1049 if (enable & MXC_INT_TE) 1050 val |= MX1_INTREG_TEEN; 1051 if (enable & MXC_INT_RR) 1052 val |= MX1_INTREG_RREN; 1053 1054 writel(val, spi_imx->base + MXC_CSPIINT); 1055 } 1056 1057 static void mx1_trigger(struct spi_imx_data *spi_imx) 1058 { 1059 unsigned int reg; 1060 1061 reg = readl(spi_imx->base + MXC_CSPICTRL); 1062 reg |= MX1_CSPICTRL_XCH; 1063 writel(reg, spi_imx->base + MXC_CSPICTRL); 1064 } 1065 1066 static int mx1_prepare_message(struct spi_imx_data *spi_imx, 1067 struct spi_message *msg) 1068 { 1069 return 0; 1070 } 1071 1072 static int mx1_prepare_transfer(struct spi_imx_data *spi_imx, 1073 struct spi_device *spi, struct spi_transfer *t) 1074 { 1075 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_HOST; 1076 unsigned int clk; 1077 1078 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) << 1079 MX1_CSPICTRL_DR_SHIFT; 1080 spi_imx->spi_bus_clk = clk; 1081 1082 reg |= spi_imx->bits_per_word - 1; 1083 1084 if (spi->mode & SPI_CPHA) 1085 reg |= MX1_CSPICTRL_PHA; 1086 if (spi->mode & SPI_CPOL) 1087 reg |= MX1_CSPICTRL_POL; 1088 1089 writel(reg, spi_imx->base + MXC_CSPICTRL); 1090 1091 return 0; 1092 } 1093 1094 static int mx1_rx_available(struct spi_imx_data *spi_imx) 1095 { 1096 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; 1097 } 1098 1099 static void mx1_reset(struct spi_imx_data *spi_imx) 1100 { 1101 writel(1, spi_imx->base + MXC_RESET); 1102 } 1103 1104 static struct spi_imx_devtype_data imx1_cspi_devtype_data = { 1105 .intctrl = mx1_intctrl, 1106 .prepare_message = mx1_prepare_message, 1107 .prepare_transfer = mx1_prepare_transfer, 1108 .trigger = mx1_trigger, 1109 .rx_available = mx1_rx_available, 1110 .reset = mx1_reset, 1111 .fifo_size = 8, 1112 .has_dmamode = false, 1113 .dynamic_burst = false, 1114 .has_targetmode = false, 1115 .devtype = IMX1_CSPI, 1116 }; 1117 1118 static struct spi_imx_devtype_data imx21_cspi_devtype_data = { 1119 .intctrl = mx21_intctrl, 1120 .prepare_message = mx21_prepare_message, 1121 .prepare_transfer = mx21_prepare_transfer, 1122 .trigger = mx21_trigger, 1123 .rx_available = mx21_rx_available, 1124 .reset = mx21_reset, 1125 .fifo_size = 8, 1126 .has_dmamode = false, 1127 .dynamic_burst = false, 1128 .has_targetmode = false, 1129 .devtype = IMX21_CSPI, 1130 }; 1131 1132 static struct spi_imx_devtype_data imx27_cspi_devtype_data = { 1133 /* i.mx27 cspi shares the functions with i.mx21 one */ 1134 .intctrl = mx21_intctrl, 1135 .prepare_message = mx21_prepare_message, 1136 .prepare_transfer = mx21_prepare_transfer, 1137 .trigger = mx21_trigger, 1138 .rx_available = mx21_rx_available, 1139 .reset = mx21_reset, 1140 .fifo_size = 8, 1141 .has_dmamode = false, 1142 .dynamic_burst = false, 1143 .has_targetmode = false, 1144 .devtype = IMX27_CSPI, 1145 }; 1146 1147 static struct spi_imx_devtype_data imx31_cspi_devtype_data = { 1148 .intctrl = mx31_intctrl, 1149 .prepare_message = mx31_prepare_message, 1150 .prepare_transfer = mx31_prepare_transfer, 1151 .trigger = mx31_trigger, 1152 .rx_available = mx31_rx_available, 1153 .reset = mx31_reset, 1154 .fifo_size = 8, 1155 .has_dmamode = false, 1156 .dynamic_burst = false, 1157 .has_targetmode = false, 1158 .devtype = IMX31_CSPI, 1159 }; 1160 1161 static struct spi_imx_devtype_data imx35_cspi_devtype_data = { 1162 /* i.mx35 and later cspi shares the functions with i.mx31 one */ 1163 .intctrl = mx31_intctrl, 1164 .prepare_message = mx31_prepare_message, 1165 .prepare_transfer = mx31_prepare_transfer, 1166 .trigger = mx31_trigger, 1167 .rx_available = mx31_rx_available, 1168 .reset = mx31_reset, 1169 .fifo_size = 8, 1170 .has_dmamode = false, 1171 .dynamic_burst = false, 1172 .has_targetmode = false, 1173 .devtype = IMX35_CSPI, 1174 }; 1175 1176 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { 1177 .intctrl = mx51_ecspi_intctrl, 1178 .prepare_message = mx51_ecspi_prepare_message, 1179 .prepare_transfer = mx51_ecspi_prepare_transfer, 1180 .trigger = mx51_ecspi_trigger, 1181 .rx_available = mx51_ecspi_rx_available, 1182 .reset = mx51_ecspi_reset, 1183 .setup_wml = mx51_setup_wml, 1184 .fifo_size = 64, 1185 .has_dmamode = true, 1186 .dynamic_burst = true, 1187 .has_targetmode = true, 1188 .disable = mx51_ecspi_disable, 1189 .devtype = IMX51_ECSPI, 1190 }; 1191 1192 static struct spi_imx_devtype_data imx53_ecspi_devtype_data = { 1193 .intctrl = mx51_ecspi_intctrl, 1194 .prepare_message = mx51_ecspi_prepare_message, 1195 .prepare_transfer = mx51_ecspi_prepare_transfer, 1196 .trigger = mx51_ecspi_trigger, 1197 .rx_available = mx51_ecspi_rx_available, 1198 .reset = mx51_ecspi_reset, 1199 .fifo_size = 64, 1200 .has_dmamode = true, 1201 .has_targetmode = true, 1202 .disable = mx51_ecspi_disable, 1203 .devtype = IMX53_ECSPI, 1204 }; 1205 1206 static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = { 1207 .intctrl = mx51_ecspi_intctrl, 1208 .prepare_message = mx51_ecspi_prepare_message, 1209 .prepare_transfer = mx51_ecspi_prepare_transfer, 1210 .trigger = mx51_ecspi_trigger, 1211 .rx_available = mx51_ecspi_rx_available, 1212 .reset = mx51_ecspi_reset, 1213 .setup_wml = mx51_setup_wml, 1214 .fifo_size = 64, 1215 .has_dmamode = true, 1216 .dynamic_burst = true, 1217 .has_targetmode = true, 1218 .tx_glitch_fixed = true, 1219 .disable = mx51_ecspi_disable, 1220 .devtype = IMX51_ECSPI, 1221 }; 1222 1223 static const struct of_device_id spi_imx_dt_ids[] = { 1224 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, }, 1225 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, }, 1226 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, }, 1227 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, }, 1228 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, }, 1229 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, 1230 { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, }, 1231 { .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, }, 1232 { /* sentinel */ } 1233 }; 1234 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); 1235 1236 static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits) 1237 { 1238 u32 ctrl; 1239 1240 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); 1241 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; 1242 ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET); 1243 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 1244 } 1245 1246 static void spi_imx_push(struct spi_imx_data *spi_imx) 1247 { 1248 unsigned int burst_len; 1249 1250 /* 1251 * Reload the FIFO when the remaining bytes to be transferred in the 1252 * current burst is 0. This only applies when bits_per_word is a 1253 * multiple of 8. 1254 */ 1255 if (!spi_imx->remainder) { 1256 if (spi_imx->dynamic_burst) { 1257 1258 /* We need to deal unaligned data first */ 1259 burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST; 1260 1261 if (!burst_len) 1262 burst_len = MX51_ECSPI_CTRL_MAX_BURST; 1263 1264 spi_imx_set_burst_len(spi_imx, burst_len * 8); 1265 1266 spi_imx->remainder = burst_len; 1267 } else { 1268 spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word); 1269 } 1270 } 1271 1272 while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) { 1273 if (!spi_imx->count) 1274 break; 1275 if (spi_imx->dynamic_burst && 1276 spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4)) 1277 break; 1278 spi_imx->tx(spi_imx); 1279 spi_imx->txfifo++; 1280 } 1281 1282 if (!spi_imx->target_mode) 1283 spi_imx->devtype_data->trigger(spi_imx); 1284 } 1285 1286 static irqreturn_t spi_imx_isr(int irq, void *dev_id) 1287 { 1288 struct spi_imx_data *spi_imx = dev_id; 1289 1290 while (spi_imx->txfifo && 1291 spi_imx->devtype_data->rx_available(spi_imx)) { 1292 spi_imx->rx(spi_imx); 1293 spi_imx->txfifo--; 1294 } 1295 1296 if (spi_imx->count) { 1297 spi_imx_push(spi_imx); 1298 return IRQ_HANDLED; 1299 } 1300 1301 if (spi_imx->txfifo) { 1302 /* No data left to push, but still waiting for rx data, 1303 * enable receive data available interrupt. 1304 */ 1305 spi_imx->devtype_data->intctrl( 1306 spi_imx, MXC_INT_RR); 1307 return IRQ_HANDLED; 1308 } 1309 1310 spi_imx->devtype_data->intctrl(spi_imx, 0); 1311 complete(&spi_imx->xfer_done); 1312 1313 return IRQ_HANDLED; 1314 } 1315 1316 static int spi_imx_setupxfer(struct spi_device *spi, 1317 struct spi_transfer *t) 1318 { 1319 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); 1320 1321 if (!t) 1322 return 0; 1323 1324 if (!spi_imx->target_mode) { 1325 if (!t->speed_hz) { 1326 if (!spi->max_speed_hz) { 1327 dev_err(&spi->dev, "no speed_hz provided!\n"); 1328 return -EINVAL; 1329 } 1330 dev_dbg(&spi->dev, "using spi->max_speed_hz!\n"); 1331 spi_imx->spi_bus_clk = spi->max_speed_hz; 1332 } else { 1333 spi_imx->spi_bus_clk = t->speed_hz; 1334 } 1335 } 1336 1337 spi_imx->bits_per_word = t->bits_per_word; 1338 spi_imx->count = t->len; 1339 1340 /* 1341 * Initialize the functions for transfer. To transfer non byte-aligned 1342 * words, we have to use multiple word-size bursts. To insert word 1343 * delay, the burst size has to equal the word size. We can't use 1344 * dynamic_burst in these cases. 1345 */ 1346 if (spi_imx->devtype_data->dynamic_burst && !spi_imx->target_mode && 1347 !(spi->mode & SPI_CS_WORD) && 1348 !(t->word_delay.value) && 1349 (spi_imx->bits_per_word == 8 || 1350 spi_imx->bits_per_word == 16 || 1351 spi_imx->bits_per_word == 32)) { 1352 1353 spi_imx->rx = spi_imx_buf_rx_swap; 1354 spi_imx->tx = spi_imx_buf_tx_swap; 1355 spi_imx->dynamic_burst = 1; 1356 1357 } else { 1358 if (spi_imx->bits_per_word <= 8) { 1359 spi_imx->rx = spi_imx_buf_rx_u8; 1360 spi_imx->tx = spi_imx_buf_tx_u8; 1361 } else if (spi_imx->bits_per_word <= 16) { 1362 spi_imx->rx = spi_imx_buf_rx_u16; 1363 spi_imx->tx = spi_imx_buf_tx_u16; 1364 } else { 1365 spi_imx->rx = spi_imx_buf_rx_u32; 1366 spi_imx->tx = spi_imx_buf_tx_u32; 1367 } 1368 spi_imx->dynamic_burst = 0; 1369 } 1370 1371 if (spi_imx_can_dma(spi_imx->controller, spi, t)) 1372 spi_imx->usedma = true; 1373 else 1374 spi_imx->usedma = false; 1375 1376 spi_imx->rx_only = ((t->tx_buf == NULL) 1377 || (t->tx_buf == spi->controller->dummy_tx)); 1378 1379 if (spi_imx->target_mode) { 1380 spi_imx->rx = mx53_ecspi_rx_target; 1381 spi_imx->tx = mx53_ecspi_tx_target; 1382 spi_imx->target_burst = t->len; 1383 } 1384 1385 spi_imx->devtype_data->prepare_transfer(spi_imx, spi, t); 1386 1387 return 0; 1388 } 1389 1390 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx) 1391 { 1392 struct spi_controller *controller = spi_imx->controller; 1393 1394 if (controller->dma_rx) { 1395 dma_release_channel(controller->dma_rx); 1396 controller->dma_rx = NULL; 1397 } 1398 1399 if (controller->dma_tx) { 1400 dma_release_channel(controller->dma_tx); 1401 controller->dma_tx = NULL; 1402 } 1403 } 1404 1405 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, 1406 struct spi_controller *controller) 1407 { 1408 int ret; 1409 1410 spi_imx->wml = spi_imx->devtype_data->fifo_size / 2; 1411 1412 /* Prepare for TX DMA: */ 1413 controller->dma_tx = dma_request_chan(dev, "tx"); 1414 if (IS_ERR(controller->dma_tx)) { 1415 ret = PTR_ERR(controller->dma_tx); 1416 dev_err_probe(dev, ret, "can't get the TX DMA channel!\n"); 1417 controller->dma_tx = NULL; 1418 goto err; 1419 } 1420 1421 /* Prepare for RX : */ 1422 controller->dma_rx = dma_request_chan(dev, "rx"); 1423 if (IS_ERR(controller->dma_rx)) { 1424 ret = PTR_ERR(controller->dma_rx); 1425 dev_err_probe(dev, ret, "can't get the RX DMA channel!\n"); 1426 controller->dma_rx = NULL; 1427 goto err; 1428 } 1429 1430 init_completion(&spi_imx->dma_rx_completion); 1431 init_completion(&spi_imx->dma_tx_completion); 1432 spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX | 1433 SPI_CONTROLLER_MUST_TX; 1434 1435 return 0; 1436 err: 1437 spi_imx_sdma_exit(spi_imx); 1438 return ret; 1439 } 1440 1441 static void spi_imx_dma_rx_callback(void *cookie) 1442 { 1443 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 1444 1445 complete(&spi_imx->dma_rx_completion); 1446 } 1447 1448 static void spi_imx_dma_tx_callback(void *cookie) 1449 { 1450 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 1451 1452 complete(&spi_imx->dma_tx_completion); 1453 } 1454 1455 static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size) 1456 { 1457 unsigned long timeout = 0; 1458 1459 /* Time with actual data transfer and CS change delay related to HW */ 1460 timeout = (8 + 4) * size / spi_imx->spi_bus_clk; 1461 1462 /* Add extra second for scheduler related activities */ 1463 timeout += 1; 1464 1465 /* Double calculated timeout */ 1466 return secs_to_jiffies(2 * timeout); 1467 } 1468 1469 static void spi_imx_dma_unmap(struct spi_imx_data *spi_imx, 1470 struct dma_data_package *dma_data) 1471 { 1472 struct device *tx_dev = spi_imx->controller->dma_tx->device->dev; 1473 struct device *rx_dev = spi_imx->controller->dma_rx->device->dev; 1474 1475 dma_unmap_single(tx_dev, dma_data->dma_tx_addr, 1476 DMA_CACHE_ALIGNED_LEN(dma_data->dma_len), 1477 DMA_TO_DEVICE); 1478 dma_unmap_single(rx_dev, dma_data->dma_rx_addr, 1479 DMA_CACHE_ALIGNED_LEN(dma_data->dma_len), 1480 DMA_FROM_DEVICE); 1481 } 1482 1483 static void spi_imx_dma_rx_data_handle(struct spi_imx_data *spi_imx, 1484 struct dma_data_package *dma_data, void *rx_buf, 1485 bool word_delay) 1486 { 1487 void *copy_ptr; 1488 int unaligned; 1489 1490 /* 1491 * On little-endian CPUs, adjust byte order: 1492 * - Swap bytes when bpw = 8 1493 * - Swap half-words when bpw = 16 1494 * This ensures correct data ordering for DMA transfers. 1495 */ 1496 #ifdef __LITTLE_ENDIAN 1497 if (!word_delay) { 1498 unsigned int bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 1499 u32 *temp = dma_data->dma_rx_buf; 1500 1501 for (int i = 0; i < DIV_ROUND_UP(dma_data->dma_len, sizeof(*temp)); i++) { 1502 if (bytes_per_word == 1) 1503 swab32s(temp + i); 1504 else if (bytes_per_word == 2) 1505 swahw32s(temp + i); 1506 } 1507 } 1508 #endif 1509 1510 /* 1511 * When dynamic burst enabled, DMA RX always receives 32-bit words from RXFIFO with 1512 * buswidth = 4, but when data_len is not 4-bytes alignment, the RM shows when 1513 * burst length = 32*n + m bits, a SPI burst contains the m LSB in first word and all 1514 * 32 bits in other n words. So if garbage bytes in the first word, trim first word then 1515 * copy the actual data to rx_buf. 1516 */ 1517 if (dma_data->data_len % BYTES_PER_32BITS_WORD && !word_delay) { 1518 unaligned = dma_data->data_len % BYTES_PER_32BITS_WORD; 1519 copy_ptr = (u8 *)dma_data->dma_rx_buf + BYTES_PER_32BITS_WORD - unaligned; 1520 } else { 1521 copy_ptr = dma_data->dma_rx_buf; 1522 } 1523 1524 memcpy(rx_buf, copy_ptr, dma_data->data_len); 1525 } 1526 1527 static int spi_imx_dma_map(struct spi_imx_data *spi_imx, 1528 struct dma_data_package *dma_data) 1529 { 1530 struct spi_controller *controller = spi_imx->controller; 1531 struct device *tx_dev = controller->dma_tx->device->dev; 1532 struct device *rx_dev = controller->dma_rx->device->dev; 1533 int ret; 1534 1535 dma_data->dma_tx_addr = dma_map_single(tx_dev, dma_data->dma_tx_buf, 1536 DMA_CACHE_ALIGNED_LEN(dma_data->dma_len), 1537 DMA_TO_DEVICE); 1538 ret = dma_mapping_error(tx_dev, dma_data->dma_tx_addr); 1539 if (ret < 0) { 1540 dev_err(spi_imx->dev, "DMA TX map failed %d\n", ret); 1541 return ret; 1542 } 1543 1544 dma_data->dma_rx_addr = dma_map_single(rx_dev, dma_data->dma_rx_buf, 1545 DMA_CACHE_ALIGNED_LEN(dma_data->dma_len), 1546 DMA_FROM_DEVICE); 1547 ret = dma_mapping_error(rx_dev, dma_data->dma_rx_addr); 1548 if (ret < 0) { 1549 dev_err(spi_imx->dev, "DMA RX map failed %d\n", ret); 1550 dma_unmap_single(tx_dev, dma_data->dma_tx_addr, 1551 DMA_CACHE_ALIGNED_LEN(dma_data->dma_len), 1552 DMA_TO_DEVICE); 1553 return ret; 1554 } 1555 1556 return 0; 1557 } 1558 1559 static int spi_imx_dma_tx_data_handle(struct spi_imx_data *spi_imx, 1560 struct dma_data_package *dma_data, 1561 const void *tx_buf, 1562 bool word_delay) 1563 { 1564 void *copy_ptr; 1565 int unaligned; 1566 1567 if (word_delay) { 1568 dma_data->dma_len = dma_data->data_len; 1569 } else { 1570 /* 1571 * As per the reference manual, when burst length = 32*n + m bits, ECSPI 1572 * sends m LSB bits in the first word, followed by n full 32-bit words. 1573 * Since actual data may not be 4-byte aligned, allocate DMA TX/RX buffers 1574 * to ensure alignment. For TX, DMA pushes 4-byte aligned words to TXFIFO, 1575 * while ECSPI uses BURST_LENGTH settings to maintain correct bit count. 1576 * For RX, DMA always receives 32-bit words from RXFIFO, when data len is 1577 * not 4-byte aligned, trim the first word to drop garbage bytes, then group 1578 * all transfer DMA bounse buffer and copy all valid data to rx_buf. 1579 */ 1580 dma_data->dma_len = ALIGN(dma_data->data_len, BYTES_PER_32BITS_WORD); 1581 } 1582 1583 dma_data->dma_tx_buf = kzalloc(dma_data->dma_len, GFP_KERNEL); 1584 if (!dma_data->dma_tx_buf) 1585 return -ENOMEM; 1586 1587 dma_data->dma_rx_buf = kzalloc(dma_data->dma_len, GFP_KERNEL); 1588 if (!dma_data->dma_rx_buf) { 1589 kfree(dma_data->dma_tx_buf); 1590 return -ENOMEM; 1591 } 1592 1593 if (dma_data->data_len % BYTES_PER_32BITS_WORD && !word_delay) { 1594 unaligned = dma_data->data_len % BYTES_PER_32BITS_WORD; 1595 copy_ptr = (u8 *)dma_data->dma_tx_buf + BYTES_PER_32BITS_WORD - unaligned; 1596 } else { 1597 copy_ptr = dma_data->dma_tx_buf; 1598 } 1599 1600 memcpy(copy_ptr, tx_buf, dma_data->data_len); 1601 1602 /* 1603 * When word_delay is enabled, DMA transfers an entire word in one minor loop. 1604 * In this case, no data requires additional handling. 1605 */ 1606 if (word_delay) 1607 return 0; 1608 1609 #ifdef __LITTLE_ENDIAN 1610 /* 1611 * On little-endian CPUs, adjust byte order: 1612 * - Swap bytes when bpw = 8 1613 * - Swap half-words when bpw = 16 1614 * This ensures correct data ordering for DMA transfers. 1615 */ 1616 unsigned int bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 1617 u32 *temp = dma_data->dma_tx_buf; 1618 1619 for (int i = 0; i < DIV_ROUND_UP(dma_data->dma_len, sizeof(*temp)); i++) { 1620 if (bytes_per_word == 1) 1621 swab32s(temp + i); 1622 else if (bytes_per_word == 2) 1623 swahw32s(temp + i); 1624 } 1625 #endif 1626 1627 return 0; 1628 } 1629 1630 static int spi_imx_dma_data_prepare(struct spi_imx_data *spi_imx, 1631 struct spi_transfer *transfer, 1632 bool word_delay) 1633 { 1634 u32 pre_bl, tail_bl; 1635 u32 ctrl; 1636 int ret; 1637 1638 /* 1639 * ECSPI supports a maximum burst of 512 bytes. When xfer->len exceeds 512 1640 * and is not a multiple of 512, a tail transfer is required. BURST_LEGTH 1641 * is used for SPI HW to maintain correct bit count. BURST_LENGTH should 1642 * update with data length. After DMA request submit, SPI can not update the 1643 * BURST_LENGTH, in this case, we must split two package, update the register 1644 * then setup second DMA transfer. 1645 */ 1646 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); 1647 if (word_delay) { 1648 /* 1649 * When SPI IMX need to support word delay, according to "Sample Period Control 1650 * Register" shows, The Sample Period Control Register (ECSPI_PERIODREG) 1651 * provides software a way to insert delays (wait states) between consecutive 1652 * SPI transfers. As a result, ECSPI can only transfer one word per frame, and 1653 * the delay occurs between frames. 1654 */ 1655 spi_imx->dma_package_num = 1; 1656 pre_bl = spi_imx->bits_per_word - 1; 1657 } else if (transfer->len <= MX51_ECSPI_CTRL_MAX_BURST) { 1658 spi_imx->dma_package_num = 1; 1659 pre_bl = transfer->len * BITS_PER_BYTE - 1; 1660 } else if (!(transfer->len % MX51_ECSPI_CTRL_MAX_BURST)) { 1661 spi_imx->dma_package_num = 1; 1662 pre_bl = MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1; 1663 } else { 1664 spi_imx->dma_package_num = 2; 1665 pre_bl = MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1; 1666 tail_bl = (transfer->len % MX51_ECSPI_CTRL_MAX_BURST) * BITS_PER_BYTE - 1; 1667 } 1668 1669 spi_imx->dma_data = kmalloc_array(spi_imx->dma_package_num, 1670 sizeof(struct dma_data_package), 1671 GFP_KERNEL | __GFP_ZERO); 1672 if (!spi_imx->dma_data) { 1673 dev_err(spi_imx->dev, "Failed to allocate DMA package buffer!\n"); 1674 return -ENOMEM; 1675 } 1676 1677 if (spi_imx->dma_package_num == 1) { 1678 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; 1679 ctrl |= pre_bl << MX51_ECSPI_CTRL_BL_OFFSET; 1680 spi_imx->dma_data[0].cmd_word = ctrl; 1681 spi_imx->dma_data[0].data_len = transfer->len; 1682 ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[0], transfer->tx_buf, 1683 word_delay); 1684 if (ret) { 1685 kfree(spi_imx->dma_data); 1686 return ret; 1687 } 1688 } else { 1689 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; 1690 ctrl |= pre_bl << MX51_ECSPI_CTRL_BL_OFFSET; 1691 spi_imx->dma_data[0].cmd_word = ctrl; 1692 spi_imx->dma_data[0].data_len = round_down(transfer->len, 1693 MX51_ECSPI_CTRL_MAX_BURST); 1694 ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[0], transfer->tx_buf, 1695 false); 1696 if (ret) { 1697 kfree(spi_imx->dma_data); 1698 return ret; 1699 } 1700 1701 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; 1702 ctrl |= tail_bl << MX51_ECSPI_CTRL_BL_OFFSET; 1703 spi_imx->dma_data[1].cmd_word = ctrl; 1704 spi_imx->dma_data[1].data_len = transfer->len % MX51_ECSPI_CTRL_MAX_BURST; 1705 ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[1], 1706 transfer->tx_buf + spi_imx->dma_data[0].data_len, 1707 false); 1708 if (ret) { 1709 kfree(spi_imx->dma_data[0].dma_tx_buf); 1710 kfree(spi_imx->dma_data[0].dma_rx_buf); 1711 kfree(spi_imx->dma_data); 1712 } 1713 } 1714 1715 return 0; 1716 } 1717 1718 static int spi_imx_dma_submit(struct spi_imx_data *spi_imx, 1719 struct dma_data_package *dma_data, 1720 struct spi_transfer *transfer) 1721 { 1722 struct spi_controller *controller = spi_imx->controller; 1723 struct dma_async_tx_descriptor *desc_tx, *desc_rx; 1724 unsigned long transfer_timeout; 1725 unsigned long time_left; 1726 dma_cookie_t cookie; 1727 1728 /* 1729 * The TX DMA setup starts the transfer, so make sure RX is configured 1730 * before TX. 1731 */ 1732 desc_rx = dmaengine_prep_slave_single(controller->dma_rx, dma_data->dma_rx_addr, 1733 dma_data->dma_len, DMA_DEV_TO_MEM, 1734 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1735 if (!desc_rx) { 1736 transfer->error |= SPI_TRANS_FAIL_NO_START; 1737 return -EINVAL; 1738 } 1739 1740 desc_rx->callback = spi_imx_dma_rx_callback; 1741 desc_rx->callback_param = (void *)spi_imx; 1742 cookie = dmaengine_submit(desc_rx); 1743 if (dma_submit_error(cookie)) { 1744 dev_err(spi_imx->dev, "submitting DMA RX failed\n"); 1745 transfer->error |= SPI_TRANS_FAIL_NO_START; 1746 goto dmaengine_terminate_rx; 1747 } 1748 1749 reinit_completion(&spi_imx->dma_rx_completion); 1750 dma_async_issue_pending(controller->dma_rx); 1751 1752 desc_tx = dmaengine_prep_slave_single(controller->dma_tx, dma_data->dma_tx_addr, 1753 dma_data->dma_len, DMA_MEM_TO_DEV, 1754 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1755 if (!desc_tx) 1756 goto dmaengine_terminate_rx; 1757 1758 desc_tx->callback = spi_imx_dma_tx_callback; 1759 desc_tx->callback_param = (void *)spi_imx; 1760 cookie = dmaengine_submit(desc_tx); 1761 if (dma_submit_error(cookie)) { 1762 dev_err(spi_imx->dev, "submitting DMA TX failed\n"); 1763 goto dmaengine_terminate_tx; 1764 } 1765 reinit_completion(&spi_imx->dma_tx_completion); 1766 dma_async_issue_pending(controller->dma_tx); 1767 1768 spi_imx->devtype_data->trigger(spi_imx); 1769 1770 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); 1771 1772 if (!spi_imx->target_mode) { 1773 /* Wait SDMA to finish the data transfer.*/ 1774 time_left = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 1775 transfer_timeout); 1776 if (!time_left) { 1777 dev_err(spi_imx->dev, "I/O Error in DMA TX\n"); 1778 dmaengine_terminate_all(controller->dma_tx); 1779 dmaengine_terminate_all(controller->dma_rx); 1780 return -ETIMEDOUT; 1781 } 1782 1783 time_left = wait_for_completion_timeout(&spi_imx->dma_rx_completion, 1784 transfer_timeout); 1785 if (!time_left) { 1786 dev_err(&controller->dev, "I/O Error in DMA RX\n"); 1787 spi_imx->devtype_data->reset(spi_imx); 1788 dmaengine_terminate_all(controller->dma_rx); 1789 return -ETIMEDOUT; 1790 } 1791 } else { 1792 spi_imx->target_aborted = false; 1793 1794 if (wait_for_completion_interruptible(&spi_imx->dma_tx_completion) || 1795 READ_ONCE(spi_imx->target_aborted)) { 1796 dev_dbg(spi_imx->dev, "I/O Error in DMA TX interrupted\n"); 1797 dmaengine_terminate_all(controller->dma_tx); 1798 dmaengine_terminate_all(controller->dma_rx); 1799 return -EINTR; 1800 } 1801 1802 if (wait_for_completion_interruptible(&spi_imx->dma_rx_completion) || 1803 READ_ONCE(spi_imx->target_aborted)) { 1804 dev_dbg(spi_imx->dev, "I/O Error in DMA RX interrupted\n"); 1805 dmaengine_terminate_all(controller->dma_rx); 1806 return -EINTR; 1807 } 1808 1809 /* 1810 * ECSPI has a HW issue when works in Target mode, after 64 words 1811 * writtern to TXFIFO, even TXFIFO becomes empty, ECSPI_TXDATA keeps 1812 * shift out the last word data, so we have to disable ECSPI when in 1813 * target mode after the transfer completes. 1814 */ 1815 if (spi_imx->devtype_data->disable) 1816 spi_imx->devtype_data->disable(spi_imx); 1817 } 1818 1819 return 0; 1820 1821 dmaengine_terminate_tx: 1822 dmaengine_terminate_all(controller->dma_tx); 1823 dmaengine_terminate_rx: 1824 dmaengine_terminate_all(controller->dma_rx); 1825 1826 return -EINVAL; 1827 } 1828 1829 static void spi_imx_dma_max_wml_find(struct spi_imx_data *spi_imx, 1830 struct dma_data_package *dma_data, 1831 bool word_delay) 1832 { 1833 unsigned int bytes_per_word = word_delay ? 1834 spi_imx_bytes_per_word(spi_imx->bits_per_word) : 1835 BYTES_PER_32BITS_WORD; 1836 unsigned int i; 1837 1838 for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) { 1839 if (!dma_data->dma_len % (i * bytes_per_word)) 1840 break; 1841 } 1842 /* Use 1 as wml in case no available burst length got */ 1843 if (i == 0) 1844 i = 1; 1845 1846 spi_imx->wml = i; 1847 } 1848 1849 static int spi_imx_dma_configure(struct spi_controller *controller, bool word_delay) 1850 { 1851 int ret; 1852 enum dma_slave_buswidth buswidth; 1853 struct dma_slave_config rx = {}, tx = {}; 1854 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 1855 1856 if (word_delay) { 1857 switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) { 1858 case 4: 1859 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; 1860 break; 1861 case 2: 1862 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 1863 break; 1864 case 1: 1865 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 1866 break; 1867 default: 1868 return -EINVAL; 1869 } 1870 } else { 1871 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; 1872 } 1873 1874 tx.direction = DMA_MEM_TO_DEV; 1875 tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA; 1876 tx.dst_addr_width = buswidth; 1877 tx.dst_maxburst = spi_imx->wml; 1878 ret = dmaengine_slave_config(controller->dma_tx, &tx); 1879 if (ret) { 1880 dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret); 1881 return ret; 1882 } 1883 1884 rx.direction = DMA_DEV_TO_MEM; 1885 rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA; 1886 rx.src_addr_width = buswidth; 1887 rx.src_maxburst = spi_imx->wml; 1888 ret = dmaengine_slave_config(controller->dma_rx, &rx); 1889 if (ret) { 1890 dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret); 1891 return ret; 1892 } 1893 1894 return 0; 1895 } 1896 1897 static int spi_imx_dma_package_transfer(struct spi_imx_data *spi_imx, 1898 struct dma_data_package *dma_data, 1899 struct spi_transfer *transfer, 1900 bool word_delay) 1901 { 1902 struct spi_controller *controller = spi_imx->controller; 1903 int ret; 1904 1905 spi_imx_dma_max_wml_find(spi_imx, dma_data, word_delay); 1906 1907 ret = spi_imx_dma_configure(controller, word_delay); 1908 if (ret) 1909 goto dma_failure_no_start; 1910 1911 if (!spi_imx->devtype_data->setup_wml) { 1912 dev_err(spi_imx->dev, "No setup_wml()?\n"); 1913 ret = -EINVAL; 1914 goto dma_failure_no_start; 1915 } 1916 spi_imx->devtype_data->setup_wml(spi_imx); 1917 1918 ret = spi_imx_dma_submit(spi_imx, dma_data, transfer); 1919 if (ret) 1920 return ret; 1921 1922 /* Trim the DMA RX buffer and copy the actual data to rx_buf */ 1923 dma_sync_single_for_cpu(controller->dma_rx->device->dev, dma_data->dma_rx_addr, 1924 dma_data->dma_len, DMA_FROM_DEVICE); 1925 spi_imx_dma_rx_data_handle(spi_imx, dma_data, transfer->rx_buf + spi_imx->rx_offset, 1926 word_delay); 1927 spi_imx->rx_offset += dma_data->data_len; 1928 1929 return 0; 1930 /* fallback to pio */ 1931 dma_failure_no_start: 1932 transfer->error |= SPI_TRANS_FAIL_NO_START; 1933 return ret; 1934 } 1935 1936 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, 1937 struct spi_transfer *transfer) 1938 { 1939 bool word_delay = transfer->word_delay.value != 0 && !spi_imx->target_mode; 1940 int ret; 1941 int i; 1942 1943 ret = spi_imx_dma_data_prepare(spi_imx, transfer, word_delay); 1944 if (ret < 0) { 1945 transfer->error |= SPI_TRANS_FAIL_NO_START; 1946 dev_err(spi_imx->dev, "DMA data prepare fail\n"); 1947 goto fallback_pio; 1948 } 1949 1950 spi_imx->rx_offset = 0; 1951 1952 /* Each dma_package performs a separate DMA transfer once */ 1953 for (i = 0; i < spi_imx->dma_package_num; i++) { 1954 ret = spi_imx_dma_map(spi_imx, &spi_imx->dma_data[i]); 1955 if (ret < 0) { 1956 if (i == 0) 1957 transfer->error |= SPI_TRANS_FAIL_NO_START; 1958 dev_err(spi_imx->dev, "DMA map fail\n"); 1959 break; 1960 } 1961 1962 /* Update the CTRL register BL field */ 1963 writel(spi_imx->dma_data[i].cmd_word, spi_imx->base + MX51_ECSPI_CTRL); 1964 1965 ret = spi_imx_dma_package_transfer(spi_imx, &spi_imx->dma_data[i], 1966 transfer, word_delay); 1967 1968 /* Whether the dma transmission is successful or not, dma unmap is necessary */ 1969 spi_imx_dma_unmap(spi_imx, &spi_imx->dma_data[i]); 1970 1971 if (ret < 0) { 1972 dev_dbg(spi_imx->dev, "DMA %d transfer not really finish\n", i); 1973 break; 1974 } 1975 } 1976 1977 for (int j = 0; j < spi_imx->dma_package_num; j++) { 1978 kfree(spi_imx->dma_data[j].dma_tx_buf); 1979 kfree(spi_imx->dma_data[j].dma_rx_buf); 1980 } 1981 kfree(spi_imx->dma_data); 1982 1983 fallback_pio: 1984 return ret; 1985 } 1986 1987 static int spi_imx_pio_transfer(struct spi_device *spi, 1988 struct spi_transfer *transfer) 1989 { 1990 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); 1991 unsigned long transfer_timeout; 1992 unsigned long time_left; 1993 1994 spi_imx->tx_buf = transfer->tx_buf; 1995 spi_imx->rx_buf = transfer->rx_buf; 1996 spi_imx->count = transfer->len; 1997 spi_imx->txfifo = 0; 1998 spi_imx->remainder = 0; 1999 2000 reinit_completion(&spi_imx->xfer_done); 2001 2002 spi_imx_push(spi_imx); 2003 2004 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); 2005 2006 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); 2007 2008 time_left = wait_for_completion_timeout(&spi_imx->xfer_done, 2009 transfer_timeout); 2010 if (!time_left) { 2011 dev_err(&spi->dev, "I/O Error in PIO\n"); 2012 spi_imx->devtype_data->reset(spi_imx); 2013 return -ETIMEDOUT; 2014 } 2015 2016 return 0; 2017 } 2018 2019 static int spi_imx_poll_transfer(struct spi_device *spi, 2020 struct spi_transfer *transfer) 2021 { 2022 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); 2023 unsigned long timeout; 2024 2025 spi_imx->tx_buf = transfer->tx_buf; 2026 spi_imx->rx_buf = transfer->rx_buf; 2027 spi_imx->count = transfer->len; 2028 spi_imx->txfifo = 0; 2029 spi_imx->remainder = 0; 2030 2031 /* fill in the fifo before timeout calculations if we are 2032 * interrupted here, then the data is getting transferred by 2033 * the HW while we are interrupted 2034 */ 2035 spi_imx_push(spi_imx); 2036 2037 timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies; 2038 while (spi_imx->txfifo) { 2039 /* RX */ 2040 while (spi_imx->txfifo && 2041 spi_imx->devtype_data->rx_available(spi_imx)) { 2042 spi_imx->rx(spi_imx); 2043 spi_imx->txfifo--; 2044 } 2045 2046 /* TX */ 2047 if (spi_imx->count) { 2048 spi_imx_push(spi_imx); 2049 continue; 2050 } 2051 2052 if (spi_imx->txfifo && 2053 time_after(jiffies, timeout)) { 2054 2055 dev_err_ratelimited(&spi->dev, 2056 "timeout period reached: jiffies: %lu- falling back to interrupt mode\n", 2057 jiffies - timeout); 2058 2059 /* fall back to interrupt mode */ 2060 return spi_imx_pio_transfer(spi, transfer); 2061 } 2062 } 2063 2064 return 0; 2065 } 2066 2067 static int spi_imx_pio_transfer_target(struct spi_device *spi, 2068 struct spi_transfer *transfer) 2069 { 2070 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); 2071 int ret = 0; 2072 2073 if (transfer->len > MX53_MAX_TRANSFER_BYTES) { 2074 dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n", 2075 MX53_MAX_TRANSFER_BYTES); 2076 return -EMSGSIZE; 2077 } 2078 2079 spi_imx->tx_buf = transfer->tx_buf; 2080 spi_imx->rx_buf = transfer->rx_buf; 2081 spi_imx->count = transfer->len; 2082 spi_imx->txfifo = 0; 2083 spi_imx->remainder = 0; 2084 2085 reinit_completion(&spi_imx->xfer_done); 2086 spi_imx->target_aborted = false; 2087 2088 spi_imx_push(spi_imx); 2089 2090 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR); 2091 2092 if (wait_for_completion_interruptible(&spi_imx->xfer_done) || 2093 spi_imx->target_aborted) { 2094 dev_dbg(&spi->dev, "interrupted\n"); 2095 ret = -EINTR; 2096 } 2097 2098 /* ecspi has a HW issue when works in Target mode, 2099 * after 64 words writtern to TXFIFO, even TXFIFO becomes empty, 2100 * ECSPI_TXDATA keeps shift out the last word data, 2101 * so we have to disable ECSPI when in target mode after the 2102 * transfer completes 2103 */ 2104 if (spi_imx->devtype_data->disable) 2105 spi_imx->devtype_data->disable(spi_imx); 2106 2107 return ret; 2108 } 2109 2110 static unsigned int spi_imx_transfer_estimate_time_us(struct spi_transfer *transfer) 2111 { 2112 u64 result; 2113 2114 result = DIV_U64_ROUND_CLOSEST((u64)USEC_PER_SEC * transfer->len * BITS_PER_BYTE, 2115 transfer->effective_speed_hz); 2116 if (transfer->word_delay.value) { 2117 unsigned int word_delay_us; 2118 unsigned int words; 2119 2120 words = DIV_ROUND_UP(transfer->len * BITS_PER_BYTE, transfer->bits_per_word); 2121 word_delay_us = DIV_ROUND_CLOSEST(spi_delay_to_ns(&transfer->word_delay, transfer), 2122 NSEC_PER_USEC); 2123 result += (u64)words * word_delay_us; 2124 } 2125 2126 return min(result, U32_MAX); 2127 } 2128 2129 static int spi_imx_transfer_one(struct spi_controller *controller, 2130 struct spi_device *spi, 2131 struct spi_transfer *transfer) 2132 { 2133 int ret; 2134 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); 2135 2136 ret = spi_imx_setupxfer(spi, transfer); 2137 if (ret < 0) 2138 return ret; 2139 transfer->effective_speed_hz = spi_imx->spi_bus_clk; 2140 2141 /* flush rxfifo before transfer */ 2142 while (spi_imx->devtype_data->rx_available(spi_imx)) 2143 readl(spi_imx->base + MXC_CSPIRXDATA); 2144 2145 if (spi_imx->target_mode && !spi_imx->usedma) 2146 return spi_imx_pio_transfer_target(spi, transfer); 2147 2148 /* 2149 * If we decided in spi_imx_can_dma() that we want to do a DMA 2150 * transfer, the SPI transfer has already been mapped, so we 2151 * have to do the DMA transfer here. 2152 */ 2153 if (spi_imx->usedma) { 2154 ret = spi_imx_dma_transfer(spi_imx, transfer); 2155 if (transfer->error & SPI_TRANS_FAIL_NO_START) { 2156 spi_imx->usedma = false; 2157 if (spi_imx->target_mode) 2158 return spi_imx_pio_transfer_target(spi, transfer); 2159 else 2160 return spi_imx_pio_transfer(spi, transfer); 2161 } 2162 return ret; 2163 } 2164 /* run in polling mode for short transfers */ 2165 if (transfer->len == 1 || (polling_limit_us && 2166 spi_imx_transfer_estimate_time_us(transfer) < polling_limit_us)) 2167 return spi_imx_poll_transfer(spi, transfer); 2168 2169 return spi_imx_pio_transfer(spi, transfer); 2170 } 2171 2172 static int spi_imx_setup(struct spi_device *spi) 2173 { 2174 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, 2175 spi->mode, spi->bits_per_word, spi->max_speed_hz); 2176 2177 return 0; 2178 } 2179 2180 static int 2181 spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg) 2182 { 2183 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 2184 int ret; 2185 2186 ret = pm_runtime_resume_and_get(spi_imx->dev); 2187 if (ret < 0) { 2188 dev_err(spi_imx->dev, "failed to enable clock\n"); 2189 return ret; 2190 } 2191 2192 ret = spi_imx->devtype_data->prepare_message(spi_imx, msg); 2193 if (ret) { 2194 pm_runtime_put_autosuspend(spi_imx->dev); 2195 } 2196 2197 return ret; 2198 } 2199 2200 static int 2201 spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg) 2202 { 2203 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 2204 2205 pm_runtime_put_autosuspend(spi_imx->dev); 2206 return 0; 2207 } 2208 2209 static int spi_imx_target_abort(struct spi_controller *controller) 2210 { 2211 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 2212 2213 spi_imx->target_aborted = true; 2214 complete(&spi_imx->xfer_done); 2215 2216 return 0; 2217 } 2218 2219 static int spi_imx_probe(struct platform_device *pdev) 2220 { 2221 struct device_node *np = pdev->dev.of_node; 2222 struct spi_controller *controller; 2223 struct spi_imx_data *spi_imx; 2224 struct resource *res; 2225 int ret, irq, spi_drctl; 2226 const struct spi_imx_devtype_data *devtype_data = 2227 of_device_get_match_data(&pdev->dev); 2228 bool target_mode; 2229 u32 val; 2230 2231 target_mode = devtype_data->has_targetmode && 2232 of_property_read_bool(np, "spi-slave"); 2233 if (target_mode) 2234 controller = spi_alloc_target(&pdev->dev, 2235 sizeof(struct spi_imx_data)); 2236 else 2237 controller = spi_alloc_host(&pdev->dev, 2238 sizeof(struct spi_imx_data)); 2239 if (!controller) 2240 return -ENOMEM; 2241 2242 ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl); 2243 if ((ret < 0) || (spi_drctl >= 0x3)) { 2244 /* '11' is reserved */ 2245 spi_drctl = 0; 2246 } 2247 2248 platform_set_drvdata(pdev, controller); 2249 2250 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 2251 controller->bus_num = np ? -1 : pdev->id; 2252 controller->use_gpio_descriptors = true; 2253 2254 spi_imx = spi_controller_get_devdata(controller); 2255 spi_imx->controller = controller; 2256 spi_imx->dev = &pdev->dev; 2257 spi_imx->target_mode = target_mode; 2258 2259 spi_imx->devtype_data = devtype_data; 2260 2261 /* 2262 * Get number of chip selects from device properties. This can be 2263 * coming from device tree or boardfiles, if it is not defined, 2264 * a default value of 3 chip selects will be used, as all the legacy 2265 * board files have <= 3 chip selects. 2266 */ 2267 if (!device_property_read_u32(&pdev->dev, "num-cs", &val)) 2268 controller->num_chipselect = val; 2269 else 2270 controller->num_chipselect = 3; 2271 2272 controller->transfer_one = spi_imx_transfer_one; 2273 controller->setup = spi_imx_setup; 2274 controller->prepare_message = spi_imx_prepare_message; 2275 controller->unprepare_message = spi_imx_unprepare_message; 2276 controller->target_abort = spi_imx_target_abort; 2277 spi_imx->spi_bus_clk = MXC_SPI_DEFAULT_SPEED; 2278 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS | 2279 SPI_MOSI_IDLE_LOW; 2280 2281 if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) || 2282 is_imx53_ecspi(spi_imx)) 2283 controller->mode_bits |= SPI_LOOP | SPI_READY; 2284 2285 if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx)) 2286 controller->mode_bits |= SPI_RX_CPHA_FLIP; 2287 2288 if (is_imx51_ecspi(spi_imx) && 2289 device_property_read_u32(&pdev->dev, "cs-gpios", NULL)) 2290 /* 2291 * When using HW-CS implementing SPI_CS_WORD can be done by just 2292 * setting the burst length to the word size. This is 2293 * considerably faster than manually controlling the CS. 2294 */ 2295 controller->mode_bits |= SPI_CS_WORD; 2296 2297 if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx)) { 2298 controller->max_native_cs = 4; 2299 controller->flags |= SPI_CONTROLLER_GPIO_SS; 2300 } 2301 2302 spi_imx->spi_drctl = spi_drctl; 2303 2304 init_completion(&spi_imx->xfer_done); 2305 2306 spi_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 2307 if (IS_ERR(spi_imx->base)) { 2308 ret = PTR_ERR(spi_imx->base); 2309 goto out_controller_put; 2310 } 2311 spi_imx->base_phys = res->start; 2312 2313 irq = platform_get_irq(pdev, 0); 2314 if (irq < 0) { 2315 ret = irq; 2316 goto out_controller_put; 2317 } 2318 2319 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0, 2320 dev_name(&pdev->dev), spi_imx); 2321 if (ret) { 2322 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); 2323 goto out_controller_put; 2324 } 2325 2326 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 2327 if (IS_ERR(spi_imx->clk_ipg)) { 2328 ret = PTR_ERR(spi_imx->clk_ipg); 2329 goto out_controller_put; 2330 } 2331 2332 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); 2333 if (IS_ERR(spi_imx->clk_per)) { 2334 ret = PTR_ERR(spi_imx->clk_per); 2335 goto out_controller_put; 2336 } 2337 2338 ret = clk_prepare_enable(spi_imx->clk_per); 2339 if (ret) 2340 goto out_controller_put; 2341 2342 ret = clk_prepare_enable(spi_imx->clk_ipg); 2343 if (ret) 2344 goto out_put_per; 2345 2346 pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT); 2347 pm_runtime_use_autosuspend(spi_imx->dev); 2348 pm_runtime_get_noresume(spi_imx->dev); 2349 pm_runtime_set_active(spi_imx->dev); 2350 pm_runtime_enable(spi_imx->dev); 2351 2352 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); 2353 /* 2354 * Only validated on i.mx35 and i.mx6 now, can remove the constraint 2355 * if validated on other chips. 2356 */ 2357 if (spi_imx->devtype_data->has_dmamode) { 2358 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller); 2359 if (ret == -EPROBE_DEFER) 2360 goto out_runtime_pm_put; 2361 2362 if (ret < 0) 2363 dev_dbg(&pdev->dev, "dma setup error %d, use pio\n", 2364 ret); 2365 } 2366 2367 spi_imx->devtype_data->reset(spi_imx); 2368 2369 spi_imx->devtype_data->intctrl(spi_imx, 0); 2370 2371 ret = spi_register_controller(controller); 2372 if (ret) { 2373 dev_err_probe(&pdev->dev, ret, "register controller failed\n"); 2374 goto out_register_controller; 2375 } 2376 2377 pm_runtime_put_autosuspend(spi_imx->dev); 2378 2379 return ret; 2380 2381 out_register_controller: 2382 if (spi_imx->devtype_data->has_dmamode) 2383 spi_imx_sdma_exit(spi_imx); 2384 out_runtime_pm_put: 2385 pm_runtime_dont_use_autosuspend(spi_imx->dev); 2386 pm_runtime_disable(spi_imx->dev); 2387 pm_runtime_set_suspended(&pdev->dev); 2388 2389 clk_disable_unprepare(spi_imx->clk_ipg); 2390 out_put_per: 2391 clk_disable_unprepare(spi_imx->clk_per); 2392 out_controller_put: 2393 spi_controller_put(controller); 2394 2395 return ret; 2396 } 2397 2398 static void spi_imx_remove(struct platform_device *pdev) 2399 { 2400 struct spi_controller *controller = platform_get_drvdata(pdev); 2401 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 2402 int ret; 2403 2404 spi_unregister_controller(controller); 2405 2406 ret = pm_runtime_get_sync(spi_imx->dev); 2407 if (ret >= 0) 2408 writel(0, spi_imx->base + MXC_CSPICTRL); 2409 else 2410 dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n"); 2411 2412 pm_runtime_dont_use_autosuspend(spi_imx->dev); 2413 pm_runtime_put_sync(spi_imx->dev); 2414 pm_runtime_disable(spi_imx->dev); 2415 2416 spi_imx_sdma_exit(spi_imx); 2417 } 2418 2419 static int spi_imx_runtime_resume(struct device *dev) 2420 { 2421 struct spi_controller *controller = dev_get_drvdata(dev); 2422 struct spi_imx_data *spi_imx; 2423 int ret; 2424 2425 spi_imx = spi_controller_get_devdata(controller); 2426 2427 ret = clk_prepare_enable(spi_imx->clk_per); 2428 if (ret) 2429 return ret; 2430 2431 ret = clk_prepare_enable(spi_imx->clk_ipg); 2432 if (ret) { 2433 clk_disable_unprepare(spi_imx->clk_per); 2434 return ret; 2435 } 2436 2437 return 0; 2438 } 2439 2440 static int spi_imx_runtime_suspend(struct device *dev) 2441 { 2442 struct spi_controller *controller = dev_get_drvdata(dev); 2443 struct spi_imx_data *spi_imx; 2444 2445 spi_imx = spi_controller_get_devdata(controller); 2446 2447 clk_disable_unprepare(spi_imx->clk_per); 2448 clk_disable_unprepare(spi_imx->clk_ipg); 2449 2450 return 0; 2451 } 2452 2453 static int spi_imx_suspend(struct device *dev) 2454 { 2455 pinctrl_pm_select_sleep_state(dev); 2456 return 0; 2457 } 2458 2459 static int spi_imx_resume(struct device *dev) 2460 { 2461 pinctrl_pm_select_default_state(dev); 2462 return 0; 2463 } 2464 2465 static const struct dev_pm_ops imx_spi_pm = { 2466 RUNTIME_PM_OPS(spi_imx_runtime_suspend, spi_imx_runtime_resume, NULL) 2467 SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume) 2468 }; 2469 2470 static struct platform_driver spi_imx_driver = { 2471 .driver = { 2472 .name = DRIVER_NAME, 2473 .of_match_table = spi_imx_dt_ids, 2474 .pm = pm_ptr(&imx_spi_pm), 2475 }, 2476 .probe = spi_imx_probe, 2477 .remove = spi_imx_remove, 2478 }; 2479 module_platform_driver(spi_imx_driver); 2480 2481 MODULE_DESCRIPTION("i.MX SPI Controller driver"); 2482 MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 2483 MODULE_LICENSE("GPL"); 2484 MODULE_ALIAS("platform:" DRIVER_NAME); 2485