1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. 3 // Copyright (C) 2008 Juergen Beisert 4 5 #include <linux/clk.h> 6 #include <linux/completion.h> 7 #include <linux/delay.h> 8 #include <linux/dmaengine.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/err.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/irq.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/pinctrl/consumer.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/slab.h> 20 #include <linux/spi/spi.h> 21 #include <linux/types.h> 22 #include <linux/of.h> 23 #include <linux/property.h> 24 25 #include <linux/dma/imx-dma.h> 26 27 #define DRIVER_NAME "spi_imx" 28 29 static bool use_dma = true; 30 module_param(use_dma, bool, 0644); 31 MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)"); 32 33 /* define polling limits */ 34 static unsigned int polling_limit_us = 30; 35 module_param(polling_limit_us, uint, 0664); 36 MODULE_PARM_DESC(polling_limit_us, 37 "time in us to run a transfer in polling mode\n"); 38 39 #define MXC_RPM_TIMEOUT 2000 /* 2000ms */ 40 41 #define MXC_CSPIRXDATA 0x00 42 #define MXC_CSPITXDATA 0x04 43 #define MXC_CSPICTRL 0x08 44 #define MXC_CSPIINT 0x0c 45 #define MXC_RESET 0x1c 46 47 /* generic defines to abstract from the different register layouts */ 48 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 49 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 50 #define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */ 51 52 /* The maximum bytes that a sdma BD can transfer. */ 53 #define MAX_SDMA_BD_BYTES (1 << 15) 54 #define MX51_ECSPI_CTRL_MAX_BURST 512 55 /* The maximum bytes that IMX53_ECSPI can transfer in target mode.*/ 56 #define MX53_MAX_TRANSFER_BYTES 512 57 58 enum spi_imx_devtype { 59 IMX1_CSPI, 60 IMX21_CSPI, 61 IMX27_CSPI, 62 IMX31_CSPI, 63 IMX35_CSPI, /* CSPI on all i.mx except above */ 64 IMX51_ECSPI, /* ECSPI on i.mx51 */ 65 IMX53_ECSPI, /* ECSPI on i.mx53 and later */ 66 }; 67 68 struct spi_imx_data; 69 70 struct spi_imx_devtype_data { 71 void (*intctrl)(struct spi_imx_data *spi_imx, int enable); 72 int (*prepare_message)(struct spi_imx_data *spi_imx, struct spi_message *msg); 73 int (*prepare_transfer)(struct spi_imx_data *spi_imx, struct spi_device *spi); 74 void (*trigger)(struct spi_imx_data *spi_imx); 75 int (*rx_available)(struct spi_imx_data *spi_imx); 76 void (*reset)(struct spi_imx_data *spi_imx); 77 void (*setup_wml)(struct spi_imx_data *spi_imx); 78 void (*disable)(struct spi_imx_data *spi_imx); 79 bool has_dmamode; 80 bool has_targetmode; 81 unsigned int fifo_size; 82 bool dynamic_burst; 83 /* 84 * ERR009165 fixed or not: 85 * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf 86 */ 87 bool tx_glitch_fixed; 88 enum spi_imx_devtype devtype; 89 }; 90 91 struct spi_imx_data { 92 struct spi_controller *controller; 93 struct device *dev; 94 95 struct completion xfer_done; 96 void __iomem *base; 97 unsigned long base_phys; 98 99 struct clk *clk_per; 100 struct clk *clk_ipg; 101 unsigned long spi_clk; 102 unsigned int spi_bus_clk; 103 104 unsigned int bits_per_word; 105 unsigned int spi_drctl; 106 107 unsigned int count, remainder; 108 void (*tx)(struct spi_imx_data *spi_imx); 109 void (*rx)(struct spi_imx_data *spi_imx); 110 void *rx_buf; 111 const void *tx_buf; 112 unsigned int txfifo; /* number of words pushed in tx FIFO */ 113 unsigned int dynamic_burst; 114 bool rx_only; 115 116 /* Target mode */ 117 bool target_mode; 118 bool target_aborted; 119 unsigned int target_burst; 120 121 /* DMA */ 122 bool usedma; 123 u32 wml; 124 struct completion dma_rx_completion; 125 struct completion dma_tx_completion; 126 127 const struct spi_imx_devtype_data *devtype_data; 128 }; 129 130 static inline int is_imx27_cspi(struct spi_imx_data *d) 131 { 132 return d->devtype_data->devtype == IMX27_CSPI; 133 } 134 135 static inline int is_imx35_cspi(struct spi_imx_data *d) 136 { 137 return d->devtype_data->devtype == IMX35_CSPI; 138 } 139 140 static inline int is_imx51_ecspi(struct spi_imx_data *d) 141 { 142 return d->devtype_data->devtype == IMX51_ECSPI; 143 } 144 145 static inline int is_imx53_ecspi(struct spi_imx_data *d) 146 { 147 return d->devtype_data->devtype == IMX53_ECSPI; 148 } 149 150 #define MXC_SPI_BUF_RX(type) \ 151 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ 152 { \ 153 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ 154 \ 155 if (spi_imx->rx_buf) { \ 156 *(type *)spi_imx->rx_buf = val; \ 157 spi_imx->rx_buf += sizeof(type); \ 158 } \ 159 \ 160 spi_imx->remainder -= sizeof(type); \ 161 } 162 163 #define MXC_SPI_BUF_TX(type) \ 164 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ 165 { \ 166 type val = 0; \ 167 \ 168 if (spi_imx->tx_buf) { \ 169 val = *(type *)spi_imx->tx_buf; \ 170 spi_imx->tx_buf += sizeof(type); \ 171 } \ 172 \ 173 spi_imx->count -= sizeof(type); \ 174 \ 175 writel(val, spi_imx->base + MXC_CSPITXDATA); \ 176 } 177 178 MXC_SPI_BUF_RX(u8) 179 MXC_SPI_BUF_TX(u8) 180 MXC_SPI_BUF_RX(u16) 181 MXC_SPI_BUF_TX(u16) 182 MXC_SPI_BUF_RX(u32) 183 MXC_SPI_BUF_TX(u32) 184 185 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set 186 * (which is currently not the case in this driver) 187 */ 188 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 189 256, 384, 512, 768, 1024}; 190 191 /* MX21, MX27 */ 192 static unsigned int spi_imx_clkdiv_1(unsigned int fin, 193 unsigned int fspi, unsigned int max, unsigned int *fres) 194 { 195 int i; 196 197 for (i = 2; i < max; i++) 198 if (fspi * mxc_clkdivs[i] >= fin) 199 break; 200 201 *fres = fin / mxc_clkdivs[i]; 202 return i; 203 } 204 205 /* MX1, MX31, MX35, MX51 CSPI */ 206 static unsigned int spi_imx_clkdiv_2(unsigned int fin, 207 unsigned int fspi, unsigned int *fres) 208 { 209 int i, div = 4; 210 211 for (i = 0; i < 7; i++) { 212 if (fspi * div >= fin) 213 goto out; 214 div <<= 1; 215 } 216 217 out: 218 *fres = fin / div; 219 return i; 220 } 221 222 static int spi_imx_bytes_per_word(const int bits_per_word) 223 { 224 if (bits_per_word <= 8) 225 return 1; 226 else if (bits_per_word <= 16) 227 return 2; 228 else 229 return 4; 230 } 231 232 static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device *spi, 233 struct spi_transfer *transfer) 234 { 235 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 236 237 if (!use_dma || controller->fallback) 238 return false; 239 240 if (!controller->dma_rx) 241 return false; 242 243 if (spi_imx->target_mode) 244 return false; 245 246 if (transfer->len < spi_imx->devtype_data->fifo_size) 247 return false; 248 249 spi_imx->dynamic_burst = 0; 250 251 return true; 252 } 253 254 /* 255 * Note the number of natively supported chip selects for MX51 is 4. Some 256 * devices may have less actual SS pins but the register map supports 4. When 257 * using gpio chip selects the cs values passed into the macros below can go 258 * outside the range 0 - 3. We therefore need to limit the cs value to avoid 259 * corrupting bits outside the allocated locations. 260 * 261 * The simplest way to do this is to just mask the cs bits to 2 bits. This 262 * still allows all 4 native chip selects to work as well as gpio chip selects 263 * (which can use any of the 4 chip select configurations). 264 */ 265 266 #define MX51_ECSPI_CTRL 0x08 267 #define MX51_ECSPI_CTRL_ENABLE (1 << 0) 268 #define MX51_ECSPI_CTRL_XCH (1 << 2) 269 #define MX51_ECSPI_CTRL_SMC (1 << 3) 270 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4) 271 #define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16) 272 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8 273 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12 274 #define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18) 275 #define MX51_ECSPI_CTRL_BL_OFFSET 20 276 #define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20) 277 278 #define MX51_ECSPI_CONFIG 0x0c 279 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0)) 280 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4)) 281 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8)) 282 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12)) 283 #define MX51_ECSPI_CONFIG_DATACTL(cs) (1 << ((cs & 3) + 16)) 284 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20)) 285 286 #define MX51_ECSPI_INT 0x10 287 #define MX51_ECSPI_INT_TEEN (1 << 0) 288 #define MX51_ECSPI_INT_RREN (1 << 3) 289 #define MX51_ECSPI_INT_RDREN (1 << 4) 290 291 #define MX51_ECSPI_DMA 0x14 292 #define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f) 293 #define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16) 294 #define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24) 295 296 #define MX51_ECSPI_DMA_TEDEN (1 << 7) 297 #define MX51_ECSPI_DMA_RXDEN (1 << 23) 298 #define MX51_ECSPI_DMA_RXTDEN (1 << 31) 299 300 #define MX51_ECSPI_STAT 0x18 301 #define MX51_ECSPI_STAT_RR (1 << 3) 302 303 #define MX51_ECSPI_TESTREG 0x20 304 #define MX51_ECSPI_TESTREG_LBC BIT(31) 305 306 static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx) 307 { 308 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); 309 310 if (spi_imx->rx_buf) { 311 #ifdef __LITTLE_ENDIAN 312 unsigned int bytes_per_word; 313 314 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 315 if (bytes_per_word == 1) 316 swab32s(&val); 317 else if (bytes_per_word == 2) 318 swahw32s(&val); 319 #endif 320 *(u32 *)spi_imx->rx_buf = val; 321 spi_imx->rx_buf += sizeof(u32); 322 } 323 324 spi_imx->remainder -= sizeof(u32); 325 } 326 327 static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx) 328 { 329 int unaligned; 330 u32 val; 331 332 unaligned = spi_imx->remainder % 4; 333 334 if (!unaligned) { 335 spi_imx_buf_rx_swap_u32(spi_imx); 336 return; 337 } 338 339 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) { 340 spi_imx_buf_rx_u16(spi_imx); 341 return; 342 } 343 344 val = readl(spi_imx->base + MXC_CSPIRXDATA); 345 346 while (unaligned--) { 347 if (spi_imx->rx_buf) { 348 *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff; 349 spi_imx->rx_buf++; 350 } 351 spi_imx->remainder--; 352 } 353 } 354 355 static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx) 356 { 357 u32 val = 0; 358 #ifdef __LITTLE_ENDIAN 359 unsigned int bytes_per_word; 360 #endif 361 362 if (spi_imx->tx_buf) { 363 val = *(u32 *)spi_imx->tx_buf; 364 spi_imx->tx_buf += sizeof(u32); 365 } 366 367 spi_imx->count -= sizeof(u32); 368 #ifdef __LITTLE_ENDIAN 369 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); 370 371 if (bytes_per_word == 1) 372 swab32s(&val); 373 else if (bytes_per_word == 2) 374 swahw32s(&val); 375 #endif 376 writel(val, spi_imx->base + MXC_CSPITXDATA); 377 } 378 379 static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx) 380 { 381 int unaligned; 382 u32 val = 0; 383 384 unaligned = spi_imx->count % 4; 385 386 if (!unaligned) { 387 spi_imx_buf_tx_swap_u32(spi_imx); 388 return; 389 } 390 391 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) { 392 spi_imx_buf_tx_u16(spi_imx); 393 return; 394 } 395 396 while (unaligned--) { 397 if (spi_imx->tx_buf) { 398 val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned); 399 spi_imx->tx_buf++; 400 } 401 spi_imx->count--; 402 } 403 404 writel(val, spi_imx->base + MXC_CSPITXDATA); 405 } 406 407 static void mx53_ecspi_rx_target(struct spi_imx_data *spi_imx) 408 { 409 u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA)); 410 411 if (spi_imx->rx_buf) { 412 int n_bytes = spi_imx->target_burst % sizeof(val); 413 414 if (!n_bytes) 415 n_bytes = sizeof(val); 416 417 memcpy(spi_imx->rx_buf, 418 ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes); 419 420 spi_imx->rx_buf += n_bytes; 421 spi_imx->target_burst -= n_bytes; 422 } 423 424 spi_imx->remainder -= sizeof(u32); 425 } 426 427 static void mx53_ecspi_tx_target(struct spi_imx_data *spi_imx) 428 { 429 u32 val = 0; 430 int n_bytes = spi_imx->count % sizeof(val); 431 432 if (!n_bytes) 433 n_bytes = sizeof(val); 434 435 if (spi_imx->tx_buf) { 436 memcpy(((u8 *)&val) + sizeof(val) - n_bytes, 437 spi_imx->tx_buf, n_bytes); 438 val = cpu_to_be32(val); 439 spi_imx->tx_buf += n_bytes; 440 } 441 442 spi_imx->count -= n_bytes; 443 444 writel(val, spi_imx->base + MXC_CSPITXDATA); 445 } 446 447 /* MX51 eCSPI */ 448 static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx, 449 unsigned int fspi, unsigned int *fres) 450 { 451 /* 452 * there are two 4-bit dividers, the pre-divider divides by 453 * $pre, the post-divider by 2^$post 454 */ 455 unsigned int pre, post; 456 unsigned int fin = spi_imx->spi_clk; 457 458 fspi = min(fspi, fin); 459 460 post = fls(fin) - fls(fspi); 461 if (fin > fspi << post) 462 post++; 463 464 /* now we have: (fin <= fspi << post) with post being minimal */ 465 466 post = max(4U, post) - 4; 467 if (unlikely(post > 0xf)) { 468 dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n", 469 fspi, fin); 470 return 0xff; 471 } 472 473 pre = DIV_ROUND_UP(fin, fspi << post) - 1; 474 475 dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n", 476 __func__, fin, fspi, post, pre); 477 478 /* Resulting frequency for the SCLK line. */ 479 *fres = (fin / (pre + 1)) >> post; 480 481 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) | 482 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET); 483 } 484 485 static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable) 486 { 487 unsigned int val = 0; 488 489 if (enable & MXC_INT_TE) 490 val |= MX51_ECSPI_INT_TEEN; 491 492 if (enable & MXC_INT_RR) 493 val |= MX51_ECSPI_INT_RREN; 494 495 if (enable & MXC_INT_RDR) 496 val |= MX51_ECSPI_INT_RDREN; 497 498 writel(val, spi_imx->base + MX51_ECSPI_INT); 499 } 500 501 static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx) 502 { 503 u32 reg; 504 505 reg = readl(spi_imx->base + MX51_ECSPI_CTRL); 506 reg |= MX51_ECSPI_CTRL_XCH; 507 writel(reg, spi_imx->base + MX51_ECSPI_CTRL); 508 } 509 510 static void mx51_ecspi_disable(struct spi_imx_data *spi_imx) 511 { 512 u32 ctrl; 513 514 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); 515 ctrl &= ~MX51_ECSPI_CTRL_ENABLE; 516 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 517 } 518 519 static int mx51_ecspi_channel(const struct spi_device *spi) 520 { 521 if (!spi_get_csgpiod(spi, 0)) 522 return spi_get_chipselect(spi, 0); 523 return spi->controller->unused_native_cs; 524 } 525 526 static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx, 527 struct spi_message *msg) 528 { 529 struct spi_device *spi = msg->spi; 530 struct spi_transfer *xfer; 531 u32 ctrl = MX51_ECSPI_CTRL_ENABLE; 532 u32 min_speed_hz = ~0U; 533 u32 testreg, delay; 534 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); 535 u32 current_cfg = cfg; 536 int channel = mx51_ecspi_channel(spi); 537 538 /* set Host or Target mode */ 539 if (spi_imx->target_mode) 540 ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK; 541 else 542 ctrl |= MX51_ECSPI_CTRL_MODE_MASK; 543 544 /* 545 * Enable SPI_RDY handling (falling edge/level triggered). 546 */ 547 if (spi->mode & SPI_READY) 548 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl); 549 550 /* set chip select to use */ 551 ctrl |= MX51_ECSPI_CTRL_CS(channel); 552 553 /* 554 * The ctrl register must be written first, with the EN bit set other 555 * registers must not be written to. 556 */ 557 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 558 559 testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG); 560 if (spi->mode & SPI_LOOP) 561 testreg |= MX51_ECSPI_TESTREG_LBC; 562 else 563 testreg &= ~MX51_ECSPI_TESTREG_LBC; 564 writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG); 565 566 /* 567 * eCSPI burst completion by Chip Select signal in Target mode 568 * is not functional for imx53 Soc, config SPI burst completed when 569 * BURST_LENGTH + 1 bits are received 570 */ 571 if (spi_imx->target_mode && is_imx53_ecspi(spi_imx)) 572 cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(channel); 573 else 574 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(channel); 575 576 if (spi->mode & SPI_CPOL) { 577 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(channel); 578 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(channel); 579 } else { 580 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(channel); 581 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(channel); 582 } 583 584 if (spi->mode & SPI_MOSI_IDLE_LOW) 585 cfg |= MX51_ECSPI_CONFIG_DATACTL(channel); 586 else 587 cfg &= ~MX51_ECSPI_CONFIG_DATACTL(channel); 588 589 if (spi->mode & SPI_CS_HIGH) 590 cfg |= MX51_ECSPI_CONFIG_SSBPOL(channel); 591 else 592 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(channel); 593 594 if (cfg == current_cfg) 595 return 0; 596 597 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); 598 599 /* 600 * Wait until the changes in the configuration register CONFIGREG 601 * propagate into the hardware. It takes exactly one tick of the 602 * SCLK clock, but we will wait two SCLK clock just to be sure. The 603 * effect of the delay it takes for the hardware to apply changes 604 * is noticable if the SCLK clock run very slow. In such a case, if 605 * the polarity of SCLK should be inverted, the GPIO ChipSelect might 606 * be asserted before the SCLK polarity changes, which would disrupt 607 * the SPI communication as the device on the other end would consider 608 * the change of SCLK polarity as a clock tick already. 609 * 610 * Because spi_imx->spi_bus_clk is only set in prepare_message 611 * callback, iterate over all the transfers in spi_message, find the 612 * one with lowest bus frequency, and use that bus frequency for the 613 * delay calculation. In case all transfers have speed_hz == 0, then 614 * min_speed_hz is ~0 and the resulting delay is zero. 615 */ 616 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 617 if (!xfer->speed_hz) 618 continue; 619 min_speed_hz = min(xfer->speed_hz, min_speed_hz); 620 } 621 622 delay = (2 * 1000000) / min_speed_hz; 623 if (likely(delay < 10)) /* SCLK is faster than 200 kHz */ 624 udelay(delay); 625 else /* SCLK is _very_ slow */ 626 usleep_range(delay, delay + 10); 627 628 return 0; 629 } 630 631 static void mx51_configure_cpha(struct spi_imx_data *spi_imx, 632 struct spi_device *spi) 633 { 634 bool cpha = (spi->mode & SPI_CPHA); 635 bool flip_cpha = (spi->mode & SPI_RX_CPHA_FLIP) && spi_imx->rx_only; 636 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); 637 int channel = mx51_ecspi_channel(spi); 638 639 /* Flip cpha logical value iff flip_cpha */ 640 cpha ^= flip_cpha; 641 642 if (cpha) 643 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(channel); 644 else 645 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(channel); 646 647 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); 648 } 649 650 static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, 651 struct spi_device *spi) 652 { 653 u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); 654 u32 clk; 655 656 /* Clear BL field and set the right value */ 657 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; 658 if (spi_imx->target_mode && is_imx53_ecspi(spi_imx)) 659 ctrl |= (spi_imx->target_burst * 8 - 1) 660 << MX51_ECSPI_CTRL_BL_OFFSET; 661 else { 662 if (spi_imx->count >= 512) 663 ctrl |= 0xFFF << MX51_ECSPI_CTRL_BL_OFFSET; 664 else 665 ctrl |= (spi_imx->count*8 - 1) 666 << MX51_ECSPI_CTRL_BL_OFFSET; 667 } 668 669 /* set clock speed */ 670 ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET | 671 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET); 672 ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk); 673 spi_imx->spi_bus_clk = clk; 674 675 mx51_configure_cpha(spi_imx, spi); 676 677 /* 678 * ERR009165: work in XHC mode instead of SMC as PIO on the chips 679 * before i.mx6ul. 680 */ 681 if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed) 682 ctrl |= MX51_ECSPI_CTRL_SMC; 683 else 684 ctrl &= ~MX51_ECSPI_CTRL_SMC; 685 686 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 687 688 return 0; 689 } 690 691 static void mx51_setup_wml(struct spi_imx_data *spi_imx) 692 { 693 u32 tx_wml = 0; 694 695 if (spi_imx->devtype_data->tx_glitch_fixed) 696 tx_wml = spi_imx->wml; 697 /* 698 * Configure the DMA register: setup the watermark 699 * and enable DMA request. 700 */ 701 writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) | 702 MX51_ECSPI_DMA_TX_WML(tx_wml) | 703 MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) | 704 MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN | 705 MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA); 706 } 707 708 static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx) 709 { 710 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR; 711 } 712 713 static void mx51_ecspi_reset(struct spi_imx_data *spi_imx) 714 { 715 /* drain receive buffer */ 716 while (mx51_ecspi_rx_available(spi_imx)) 717 readl(spi_imx->base + MXC_CSPIRXDATA); 718 } 719 720 #define MX31_INTREG_TEEN (1 << 0) 721 #define MX31_INTREG_RREN (1 << 3) 722 723 #define MX31_CSPICTRL_ENABLE (1 << 0) 724 #define MX31_CSPICTRL_HOST (1 << 1) 725 #define MX31_CSPICTRL_XCH (1 << 2) 726 #define MX31_CSPICTRL_SMC (1 << 3) 727 #define MX31_CSPICTRL_POL (1 << 4) 728 #define MX31_CSPICTRL_PHA (1 << 5) 729 #define MX31_CSPICTRL_SSCTL (1 << 6) 730 #define MX31_CSPICTRL_SSPOL (1 << 7) 731 #define MX31_CSPICTRL_BC_SHIFT 8 732 #define MX35_CSPICTRL_BL_SHIFT 20 733 #define MX31_CSPICTRL_CS_SHIFT 24 734 #define MX35_CSPICTRL_CS_SHIFT 12 735 #define MX31_CSPICTRL_DR_SHIFT 16 736 737 #define MX31_CSPI_DMAREG 0x10 738 #define MX31_DMAREG_RH_DEN (1<<4) 739 #define MX31_DMAREG_TH_DEN (1<<1) 740 741 #define MX31_CSPISTATUS 0x14 742 #define MX31_STATUS_RR (1 << 3) 743 744 #define MX31_CSPI_TESTREG 0x1C 745 #define MX31_TEST_LBC (1 << 14) 746 747 /* These functions also work for the i.MX35, but be aware that 748 * the i.MX35 has a slightly different register layout for bits 749 * we do not use here. 750 */ 751 static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) 752 { 753 unsigned int val = 0; 754 755 if (enable & MXC_INT_TE) 756 val |= MX31_INTREG_TEEN; 757 if (enable & MXC_INT_RR) 758 val |= MX31_INTREG_RREN; 759 760 writel(val, spi_imx->base + MXC_CSPIINT); 761 } 762 763 static void mx31_trigger(struct spi_imx_data *spi_imx) 764 { 765 unsigned int reg; 766 767 reg = readl(spi_imx->base + MXC_CSPICTRL); 768 reg |= MX31_CSPICTRL_XCH; 769 writel(reg, spi_imx->base + MXC_CSPICTRL); 770 } 771 772 static int mx31_prepare_message(struct spi_imx_data *spi_imx, 773 struct spi_message *msg) 774 { 775 return 0; 776 } 777 778 static int mx31_prepare_transfer(struct spi_imx_data *spi_imx, 779 struct spi_device *spi) 780 { 781 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_HOST; 782 unsigned int clk; 783 784 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) << 785 MX31_CSPICTRL_DR_SHIFT; 786 spi_imx->spi_bus_clk = clk; 787 788 if (is_imx35_cspi(spi_imx)) { 789 reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT; 790 reg |= MX31_CSPICTRL_SSCTL; 791 } else { 792 reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT; 793 } 794 795 if (spi->mode & SPI_CPHA) 796 reg |= MX31_CSPICTRL_PHA; 797 if (spi->mode & SPI_CPOL) 798 reg |= MX31_CSPICTRL_POL; 799 if (spi->mode & SPI_CS_HIGH) 800 reg |= MX31_CSPICTRL_SSPOL; 801 if (!spi_get_csgpiod(spi, 0)) 802 reg |= (spi_get_chipselect(spi, 0)) << 803 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : 804 MX31_CSPICTRL_CS_SHIFT); 805 806 if (spi_imx->usedma) 807 reg |= MX31_CSPICTRL_SMC; 808 809 writel(reg, spi_imx->base + MXC_CSPICTRL); 810 811 reg = readl(spi_imx->base + MX31_CSPI_TESTREG); 812 if (spi->mode & SPI_LOOP) 813 reg |= MX31_TEST_LBC; 814 else 815 reg &= ~MX31_TEST_LBC; 816 writel(reg, spi_imx->base + MX31_CSPI_TESTREG); 817 818 if (spi_imx->usedma) { 819 /* 820 * configure DMA requests when RXFIFO is half full and 821 * when TXFIFO is half empty 822 */ 823 writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN, 824 spi_imx->base + MX31_CSPI_DMAREG); 825 } 826 827 return 0; 828 } 829 830 static int mx31_rx_available(struct spi_imx_data *spi_imx) 831 { 832 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 833 } 834 835 static void mx31_reset(struct spi_imx_data *spi_imx) 836 { 837 /* drain receive buffer */ 838 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR) 839 readl(spi_imx->base + MXC_CSPIRXDATA); 840 } 841 842 #define MX21_INTREG_RR (1 << 4) 843 #define MX21_INTREG_TEEN (1 << 9) 844 #define MX21_INTREG_RREN (1 << 13) 845 846 #define MX21_CSPICTRL_POL (1 << 5) 847 #define MX21_CSPICTRL_PHA (1 << 6) 848 #define MX21_CSPICTRL_SSPOL (1 << 8) 849 #define MX21_CSPICTRL_XCH (1 << 9) 850 #define MX21_CSPICTRL_ENABLE (1 << 10) 851 #define MX21_CSPICTRL_HOST (1 << 11) 852 #define MX21_CSPICTRL_DR_SHIFT 14 853 #define MX21_CSPICTRL_CS_SHIFT 19 854 855 static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable) 856 { 857 unsigned int val = 0; 858 859 if (enable & MXC_INT_TE) 860 val |= MX21_INTREG_TEEN; 861 if (enable & MXC_INT_RR) 862 val |= MX21_INTREG_RREN; 863 864 writel(val, spi_imx->base + MXC_CSPIINT); 865 } 866 867 static void mx21_trigger(struct spi_imx_data *spi_imx) 868 { 869 unsigned int reg; 870 871 reg = readl(spi_imx->base + MXC_CSPICTRL); 872 reg |= MX21_CSPICTRL_XCH; 873 writel(reg, spi_imx->base + MXC_CSPICTRL); 874 } 875 876 static int mx21_prepare_message(struct spi_imx_data *spi_imx, 877 struct spi_message *msg) 878 { 879 return 0; 880 } 881 882 static int mx21_prepare_transfer(struct spi_imx_data *spi_imx, 883 struct spi_device *spi) 884 { 885 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_HOST; 886 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; 887 unsigned int clk; 888 889 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk) 890 << MX21_CSPICTRL_DR_SHIFT; 891 spi_imx->spi_bus_clk = clk; 892 893 reg |= spi_imx->bits_per_word - 1; 894 895 if (spi->mode & SPI_CPHA) 896 reg |= MX21_CSPICTRL_PHA; 897 if (spi->mode & SPI_CPOL) 898 reg |= MX21_CSPICTRL_POL; 899 if (spi->mode & SPI_CS_HIGH) 900 reg |= MX21_CSPICTRL_SSPOL; 901 if (!spi_get_csgpiod(spi, 0)) 902 reg |= spi_get_chipselect(spi, 0) << MX21_CSPICTRL_CS_SHIFT; 903 904 writel(reg, spi_imx->base + MXC_CSPICTRL); 905 906 return 0; 907 } 908 909 static int mx21_rx_available(struct spi_imx_data *spi_imx) 910 { 911 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR; 912 } 913 914 static void mx21_reset(struct spi_imx_data *spi_imx) 915 { 916 writel(1, spi_imx->base + MXC_RESET); 917 } 918 919 #define MX1_INTREG_RR (1 << 3) 920 #define MX1_INTREG_TEEN (1 << 8) 921 #define MX1_INTREG_RREN (1 << 11) 922 923 #define MX1_CSPICTRL_POL (1 << 4) 924 #define MX1_CSPICTRL_PHA (1 << 5) 925 #define MX1_CSPICTRL_XCH (1 << 8) 926 #define MX1_CSPICTRL_ENABLE (1 << 9) 927 #define MX1_CSPICTRL_HOST (1 << 10) 928 #define MX1_CSPICTRL_DR_SHIFT 13 929 930 static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) 931 { 932 unsigned int val = 0; 933 934 if (enable & MXC_INT_TE) 935 val |= MX1_INTREG_TEEN; 936 if (enable & MXC_INT_RR) 937 val |= MX1_INTREG_RREN; 938 939 writel(val, spi_imx->base + MXC_CSPIINT); 940 } 941 942 static void mx1_trigger(struct spi_imx_data *spi_imx) 943 { 944 unsigned int reg; 945 946 reg = readl(spi_imx->base + MXC_CSPICTRL); 947 reg |= MX1_CSPICTRL_XCH; 948 writel(reg, spi_imx->base + MXC_CSPICTRL); 949 } 950 951 static int mx1_prepare_message(struct spi_imx_data *spi_imx, 952 struct spi_message *msg) 953 { 954 return 0; 955 } 956 957 static int mx1_prepare_transfer(struct spi_imx_data *spi_imx, 958 struct spi_device *spi) 959 { 960 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_HOST; 961 unsigned int clk; 962 963 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) << 964 MX1_CSPICTRL_DR_SHIFT; 965 spi_imx->spi_bus_clk = clk; 966 967 reg |= spi_imx->bits_per_word - 1; 968 969 if (spi->mode & SPI_CPHA) 970 reg |= MX1_CSPICTRL_PHA; 971 if (spi->mode & SPI_CPOL) 972 reg |= MX1_CSPICTRL_POL; 973 974 writel(reg, spi_imx->base + MXC_CSPICTRL); 975 976 return 0; 977 } 978 979 static int mx1_rx_available(struct spi_imx_data *spi_imx) 980 { 981 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; 982 } 983 984 static void mx1_reset(struct spi_imx_data *spi_imx) 985 { 986 writel(1, spi_imx->base + MXC_RESET); 987 } 988 989 static struct spi_imx_devtype_data imx1_cspi_devtype_data = { 990 .intctrl = mx1_intctrl, 991 .prepare_message = mx1_prepare_message, 992 .prepare_transfer = mx1_prepare_transfer, 993 .trigger = mx1_trigger, 994 .rx_available = mx1_rx_available, 995 .reset = mx1_reset, 996 .fifo_size = 8, 997 .has_dmamode = false, 998 .dynamic_burst = false, 999 .has_targetmode = false, 1000 .devtype = IMX1_CSPI, 1001 }; 1002 1003 static struct spi_imx_devtype_data imx21_cspi_devtype_data = { 1004 .intctrl = mx21_intctrl, 1005 .prepare_message = mx21_prepare_message, 1006 .prepare_transfer = mx21_prepare_transfer, 1007 .trigger = mx21_trigger, 1008 .rx_available = mx21_rx_available, 1009 .reset = mx21_reset, 1010 .fifo_size = 8, 1011 .has_dmamode = false, 1012 .dynamic_burst = false, 1013 .has_targetmode = false, 1014 .devtype = IMX21_CSPI, 1015 }; 1016 1017 static struct spi_imx_devtype_data imx27_cspi_devtype_data = { 1018 /* i.mx27 cspi shares the functions with i.mx21 one */ 1019 .intctrl = mx21_intctrl, 1020 .prepare_message = mx21_prepare_message, 1021 .prepare_transfer = mx21_prepare_transfer, 1022 .trigger = mx21_trigger, 1023 .rx_available = mx21_rx_available, 1024 .reset = mx21_reset, 1025 .fifo_size = 8, 1026 .has_dmamode = false, 1027 .dynamic_burst = false, 1028 .has_targetmode = false, 1029 .devtype = IMX27_CSPI, 1030 }; 1031 1032 static struct spi_imx_devtype_data imx31_cspi_devtype_data = { 1033 .intctrl = mx31_intctrl, 1034 .prepare_message = mx31_prepare_message, 1035 .prepare_transfer = mx31_prepare_transfer, 1036 .trigger = mx31_trigger, 1037 .rx_available = mx31_rx_available, 1038 .reset = mx31_reset, 1039 .fifo_size = 8, 1040 .has_dmamode = false, 1041 .dynamic_burst = false, 1042 .has_targetmode = false, 1043 .devtype = IMX31_CSPI, 1044 }; 1045 1046 static struct spi_imx_devtype_data imx35_cspi_devtype_data = { 1047 /* i.mx35 and later cspi shares the functions with i.mx31 one */ 1048 .intctrl = mx31_intctrl, 1049 .prepare_message = mx31_prepare_message, 1050 .prepare_transfer = mx31_prepare_transfer, 1051 .trigger = mx31_trigger, 1052 .rx_available = mx31_rx_available, 1053 .reset = mx31_reset, 1054 .fifo_size = 8, 1055 .has_dmamode = true, 1056 .dynamic_burst = false, 1057 .has_targetmode = false, 1058 .devtype = IMX35_CSPI, 1059 }; 1060 1061 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { 1062 .intctrl = mx51_ecspi_intctrl, 1063 .prepare_message = mx51_ecspi_prepare_message, 1064 .prepare_transfer = mx51_ecspi_prepare_transfer, 1065 .trigger = mx51_ecspi_trigger, 1066 .rx_available = mx51_ecspi_rx_available, 1067 .reset = mx51_ecspi_reset, 1068 .setup_wml = mx51_setup_wml, 1069 .fifo_size = 64, 1070 .has_dmamode = true, 1071 .dynamic_burst = true, 1072 .has_targetmode = true, 1073 .disable = mx51_ecspi_disable, 1074 .devtype = IMX51_ECSPI, 1075 }; 1076 1077 static struct spi_imx_devtype_data imx53_ecspi_devtype_data = { 1078 .intctrl = mx51_ecspi_intctrl, 1079 .prepare_message = mx51_ecspi_prepare_message, 1080 .prepare_transfer = mx51_ecspi_prepare_transfer, 1081 .trigger = mx51_ecspi_trigger, 1082 .rx_available = mx51_ecspi_rx_available, 1083 .reset = mx51_ecspi_reset, 1084 .fifo_size = 64, 1085 .has_dmamode = true, 1086 .has_targetmode = true, 1087 .disable = mx51_ecspi_disable, 1088 .devtype = IMX53_ECSPI, 1089 }; 1090 1091 static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = { 1092 .intctrl = mx51_ecspi_intctrl, 1093 .prepare_message = mx51_ecspi_prepare_message, 1094 .prepare_transfer = mx51_ecspi_prepare_transfer, 1095 .trigger = mx51_ecspi_trigger, 1096 .rx_available = mx51_ecspi_rx_available, 1097 .reset = mx51_ecspi_reset, 1098 .setup_wml = mx51_setup_wml, 1099 .fifo_size = 64, 1100 .has_dmamode = true, 1101 .dynamic_burst = true, 1102 .has_targetmode = true, 1103 .tx_glitch_fixed = true, 1104 .disable = mx51_ecspi_disable, 1105 .devtype = IMX51_ECSPI, 1106 }; 1107 1108 static const struct of_device_id spi_imx_dt_ids[] = { 1109 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, }, 1110 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, }, 1111 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, }, 1112 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, }, 1113 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, }, 1114 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, 1115 { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, }, 1116 { .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, }, 1117 { /* sentinel */ } 1118 }; 1119 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); 1120 1121 static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits) 1122 { 1123 u32 ctrl; 1124 1125 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); 1126 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; 1127 ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET); 1128 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 1129 } 1130 1131 static void spi_imx_push(struct spi_imx_data *spi_imx) 1132 { 1133 unsigned int burst_len; 1134 1135 /* 1136 * Reload the FIFO when the remaining bytes to be transferred in the 1137 * current burst is 0. This only applies when bits_per_word is a 1138 * multiple of 8. 1139 */ 1140 if (!spi_imx->remainder) { 1141 if (spi_imx->dynamic_burst) { 1142 1143 /* We need to deal unaligned data first */ 1144 burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST; 1145 1146 if (!burst_len) 1147 burst_len = MX51_ECSPI_CTRL_MAX_BURST; 1148 1149 spi_imx_set_burst_len(spi_imx, burst_len * 8); 1150 1151 spi_imx->remainder = burst_len; 1152 } else { 1153 spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word); 1154 } 1155 } 1156 1157 while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) { 1158 if (!spi_imx->count) 1159 break; 1160 if (spi_imx->dynamic_burst && 1161 spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4)) 1162 break; 1163 spi_imx->tx(spi_imx); 1164 spi_imx->txfifo++; 1165 } 1166 1167 if (!spi_imx->target_mode) 1168 spi_imx->devtype_data->trigger(spi_imx); 1169 } 1170 1171 static irqreturn_t spi_imx_isr(int irq, void *dev_id) 1172 { 1173 struct spi_imx_data *spi_imx = dev_id; 1174 1175 while (spi_imx->txfifo && 1176 spi_imx->devtype_data->rx_available(spi_imx)) { 1177 spi_imx->rx(spi_imx); 1178 spi_imx->txfifo--; 1179 } 1180 1181 if (spi_imx->count) { 1182 spi_imx_push(spi_imx); 1183 return IRQ_HANDLED; 1184 } 1185 1186 if (spi_imx->txfifo) { 1187 /* No data left to push, but still waiting for rx data, 1188 * enable receive data available interrupt. 1189 */ 1190 spi_imx->devtype_data->intctrl( 1191 spi_imx, MXC_INT_RR); 1192 return IRQ_HANDLED; 1193 } 1194 1195 spi_imx->devtype_data->intctrl(spi_imx, 0); 1196 complete(&spi_imx->xfer_done); 1197 1198 return IRQ_HANDLED; 1199 } 1200 1201 static int spi_imx_dma_configure(struct spi_controller *controller) 1202 { 1203 int ret; 1204 enum dma_slave_buswidth buswidth; 1205 struct dma_slave_config rx = {}, tx = {}; 1206 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 1207 1208 switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) { 1209 case 4: 1210 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; 1211 break; 1212 case 2: 1213 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 1214 break; 1215 case 1: 1216 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 1217 break; 1218 default: 1219 return -EINVAL; 1220 } 1221 1222 tx.direction = DMA_MEM_TO_DEV; 1223 tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA; 1224 tx.dst_addr_width = buswidth; 1225 tx.dst_maxburst = spi_imx->wml; 1226 ret = dmaengine_slave_config(controller->dma_tx, &tx); 1227 if (ret) { 1228 dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret); 1229 return ret; 1230 } 1231 1232 rx.direction = DMA_DEV_TO_MEM; 1233 rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA; 1234 rx.src_addr_width = buswidth; 1235 rx.src_maxburst = spi_imx->wml; 1236 ret = dmaengine_slave_config(controller->dma_rx, &rx); 1237 if (ret) { 1238 dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret); 1239 return ret; 1240 } 1241 1242 return 0; 1243 } 1244 1245 static int spi_imx_setupxfer(struct spi_device *spi, 1246 struct spi_transfer *t) 1247 { 1248 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); 1249 1250 if (!t) 1251 return 0; 1252 1253 if (!t->speed_hz) { 1254 if (!spi->max_speed_hz) { 1255 dev_err(&spi->dev, "no speed_hz provided!\n"); 1256 return -EINVAL; 1257 } 1258 dev_dbg(&spi->dev, "using spi->max_speed_hz!\n"); 1259 spi_imx->spi_bus_clk = spi->max_speed_hz; 1260 } else 1261 spi_imx->spi_bus_clk = t->speed_hz; 1262 1263 spi_imx->bits_per_word = t->bits_per_word; 1264 spi_imx->count = t->len; 1265 1266 /* 1267 * Initialize the functions for transfer. To transfer non byte-aligned 1268 * words, we have to use multiple word-size bursts, we can't use 1269 * dynamic_burst in that case. 1270 */ 1271 if (spi_imx->devtype_data->dynamic_burst && !spi_imx->target_mode && 1272 !(spi->mode & SPI_CS_WORD) && 1273 (spi_imx->bits_per_word == 8 || 1274 spi_imx->bits_per_word == 16 || 1275 spi_imx->bits_per_word == 32)) { 1276 1277 spi_imx->rx = spi_imx_buf_rx_swap; 1278 spi_imx->tx = spi_imx_buf_tx_swap; 1279 spi_imx->dynamic_burst = 1; 1280 1281 } else { 1282 if (spi_imx->bits_per_word <= 8) { 1283 spi_imx->rx = spi_imx_buf_rx_u8; 1284 spi_imx->tx = spi_imx_buf_tx_u8; 1285 } else if (spi_imx->bits_per_word <= 16) { 1286 spi_imx->rx = spi_imx_buf_rx_u16; 1287 spi_imx->tx = spi_imx_buf_tx_u16; 1288 } else { 1289 spi_imx->rx = spi_imx_buf_rx_u32; 1290 spi_imx->tx = spi_imx_buf_tx_u32; 1291 } 1292 spi_imx->dynamic_burst = 0; 1293 } 1294 1295 if (spi_imx_can_dma(spi_imx->controller, spi, t)) 1296 spi_imx->usedma = true; 1297 else 1298 spi_imx->usedma = false; 1299 1300 spi_imx->rx_only = ((t->tx_buf == NULL) 1301 || (t->tx_buf == spi->controller->dummy_tx)); 1302 1303 if (is_imx53_ecspi(spi_imx) && spi_imx->target_mode) { 1304 spi_imx->rx = mx53_ecspi_rx_target; 1305 spi_imx->tx = mx53_ecspi_tx_target; 1306 spi_imx->target_burst = t->len; 1307 } 1308 1309 spi_imx->devtype_data->prepare_transfer(spi_imx, spi); 1310 1311 return 0; 1312 } 1313 1314 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx) 1315 { 1316 struct spi_controller *controller = spi_imx->controller; 1317 1318 if (controller->dma_rx) { 1319 dma_release_channel(controller->dma_rx); 1320 controller->dma_rx = NULL; 1321 } 1322 1323 if (controller->dma_tx) { 1324 dma_release_channel(controller->dma_tx); 1325 controller->dma_tx = NULL; 1326 } 1327 } 1328 1329 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, 1330 struct spi_controller *controller) 1331 { 1332 int ret; 1333 1334 spi_imx->wml = spi_imx->devtype_data->fifo_size / 2; 1335 1336 /* Prepare for TX DMA: */ 1337 controller->dma_tx = dma_request_chan(dev, "tx"); 1338 if (IS_ERR(controller->dma_tx)) { 1339 ret = PTR_ERR(controller->dma_tx); 1340 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret); 1341 controller->dma_tx = NULL; 1342 goto err; 1343 } 1344 1345 /* Prepare for RX : */ 1346 controller->dma_rx = dma_request_chan(dev, "rx"); 1347 if (IS_ERR(controller->dma_rx)) { 1348 ret = PTR_ERR(controller->dma_rx); 1349 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret); 1350 controller->dma_rx = NULL; 1351 goto err; 1352 } 1353 1354 init_completion(&spi_imx->dma_rx_completion); 1355 init_completion(&spi_imx->dma_tx_completion); 1356 controller->can_dma = spi_imx_can_dma; 1357 controller->max_dma_len = MAX_SDMA_BD_BYTES; 1358 spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX | 1359 SPI_CONTROLLER_MUST_TX; 1360 1361 return 0; 1362 err: 1363 spi_imx_sdma_exit(spi_imx); 1364 return ret; 1365 } 1366 1367 static void spi_imx_dma_rx_callback(void *cookie) 1368 { 1369 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 1370 1371 complete(&spi_imx->dma_rx_completion); 1372 } 1373 1374 static void spi_imx_dma_tx_callback(void *cookie) 1375 { 1376 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; 1377 1378 complete(&spi_imx->dma_tx_completion); 1379 } 1380 1381 static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size) 1382 { 1383 unsigned long timeout = 0; 1384 1385 /* Time with actual data transfer and CS change delay related to HW */ 1386 timeout = (8 + 4) * size / spi_imx->spi_bus_clk; 1387 1388 /* Add extra second for scheduler related activities */ 1389 timeout += 1; 1390 1391 /* Double calculated timeout */ 1392 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC); 1393 } 1394 1395 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, 1396 struct spi_transfer *transfer) 1397 { 1398 struct dma_async_tx_descriptor *desc_tx, *desc_rx; 1399 unsigned long transfer_timeout; 1400 unsigned long timeout; 1401 struct spi_controller *controller = spi_imx->controller; 1402 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 1403 struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents); 1404 unsigned int bytes_per_word, i; 1405 int ret; 1406 1407 /* Get the right burst length from the last sg to ensure no tail data */ 1408 bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word); 1409 for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) { 1410 if (!(sg_dma_len(last_sg) % (i * bytes_per_word))) 1411 break; 1412 } 1413 /* Use 1 as wml in case no available burst length got */ 1414 if (i == 0) 1415 i = 1; 1416 1417 spi_imx->wml = i; 1418 1419 ret = spi_imx_dma_configure(controller); 1420 if (ret) 1421 goto dma_failure_no_start; 1422 1423 if (!spi_imx->devtype_data->setup_wml) { 1424 dev_err(spi_imx->dev, "No setup_wml()?\n"); 1425 ret = -EINVAL; 1426 goto dma_failure_no_start; 1427 } 1428 spi_imx->devtype_data->setup_wml(spi_imx); 1429 1430 /* 1431 * The TX DMA setup starts the transfer, so make sure RX is configured 1432 * before TX. 1433 */ 1434 desc_rx = dmaengine_prep_slave_sg(controller->dma_rx, 1435 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 1436 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1437 if (!desc_rx) { 1438 ret = -EINVAL; 1439 goto dma_failure_no_start; 1440 } 1441 1442 desc_rx->callback = spi_imx_dma_rx_callback; 1443 desc_rx->callback_param = (void *)spi_imx; 1444 dmaengine_submit(desc_rx); 1445 reinit_completion(&spi_imx->dma_rx_completion); 1446 dma_async_issue_pending(controller->dma_rx); 1447 1448 desc_tx = dmaengine_prep_slave_sg(controller->dma_tx, 1449 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 1450 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1451 if (!desc_tx) { 1452 dmaengine_terminate_all(controller->dma_tx); 1453 dmaengine_terminate_all(controller->dma_rx); 1454 return -EINVAL; 1455 } 1456 1457 desc_tx->callback = spi_imx_dma_tx_callback; 1458 desc_tx->callback_param = (void *)spi_imx; 1459 dmaengine_submit(desc_tx); 1460 reinit_completion(&spi_imx->dma_tx_completion); 1461 dma_async_issue_pending(controller->dma_tx); 1462 1463 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); 1464 1465 /* Wait SDMA to finish the data transfer.*/ 1466 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 1467 transfer_timeout); 1468 if (!timeout) { 1469 dev_err(spi_imx->dev, "I/O Error in DMA TX\n"); 1470 dmaengine_terminate_all(controller->dma_tx); 1471 dmaengine_terminate_all(controller->dma_rx); 1472 return -ETIMEDOUT; 1473 } 1474 1475 timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion, 1476 transfer_timeout); 1477 if (!timeout) { 1478 dev_err(&controller->dev, "I/O Error in DMA RX\n"); 1479 spi_imx->devtype_data->reset(spi_imx); 1480 dmaengine_terminate_all(controller->dma_rx); 1481 return -ETIMEDOUT; 1482 } 1483 1484 return 0; 1485 /* fallback to pio */ 1486 dma_failure_no_start: 1487 transfer->error |= SPI_TRANS_FAIL_NO_START; 1488 return ret; 1489 } 1490 1491 static int spi_imx_pio_transfer(struct spi_device *spi, 1492 struct spi_transfer *transfer) 1493 { 1494 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); 1495 unsigned long transfer_timeout; 1496 unsigned long timeout; 1497 1498 spi_imx->tx_buf = transfer->tx_buf; 1499 spi_imx->rx_buf = transfer->rx_buf; 1500 spi_imx->count = transfer->len; 1501 spi_imx->txfifo = 0; 1502 spi_imx->remainder = 0; 1503 1504 reinit_completion(&spi_imx->xfer_done); 1505 1506 spi_imx_push(spi_imx); 1507 1508 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); 1509 1510 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); 1511 1512 timeout = wait_for_completion_timeout(&spi_imx->xfer_done, 1513 transfer_timeout); 1514 if (!timeout) { 1515 dev_err(&spi->dev, "I/O Error in PIO\n"); 1516 spi_imx->devtype_data->reset(spi_imx); 1517 return -ETIMEDOUT; 1518 } 1519 1520 return 0; 1521 } 1522 1523 static int spi_imx_poll_transfer(struct spi_device *spi, 1524 struct spi_transfer *transfer) 1525 { 1526 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); 1527 unsigned long timeout; 1528 1529 spi_imx->tx_buf = transfer->tx_buf; 1530 spi_imx->rx_buf = transfer->rx_buf; 1531 spi_imx->count = transfer->len; 1532 spi_imx->txfifo = 0; 1533 spi_imx->remainder = 0; 1534 1535 /* fill in the fifo before timeout calculations if we are 1536 * interrupted here, then the data is getting transferred by 1537 * the HW while we are interrupted 1538 */ 1539 spi_imx_push(spi_imx); 1540 1541 timeout = spi_imx_calculate_timeout(spi_imx, transfer->len) + jiffies; 1542 while (spi_imx->txfifo) { 1543 /* RX */ 1544 while (spi_imx->txfifo && 1545 spi_imx->devtype_data->rx_available(spi_imx)) { 1546 spi_imx->rx(spi_imx); 1547 spi_imx->txfifo--; 1548 } 1549 1550 /* TX */ 1551 if (spi_imx->count) { 1552 spi_imx_push(spi_imx); 1553 continue; 1554 } 1555 1556 if (spi_imx->txfifo && 1557 time_after(jiffies, timeout)) { 1558 1559 dev_err_ratelimited(&spi->dev, 1560 "timeout period reached: jiffies: %lu- falling back to interrupt mode\n", 1561 jiffies - timeout); 1562 1563 /* fall back to interrupt mode */ 1564 return spi_imx_pio_transfer(spi, transfer); 1565 } 1566 } 1567 1568 return 0; 1569 } 1570 1571 static int spi_imx_pio_transfer_target(struct spi_device *spi, 1572 struct spi_transfer *transfer) 1573 { 1574 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); 1575 int ret = 0; 1576 1577 if (is_imx53_ecspi(spi_imx) && 1578 transfer->len > MX53_MAX_TRANSFER_BYTES) { 1579 dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n", 1580 MX53_MAX_TRANSFER_BYTES); 1581 return -EMSGSIZE; 1582 } 1583 1584 spi_imx->tx_buf = transfer->tx_buf; 1585 spi_imx->rx_buf = transfer->rx_buf; 1586 spi_imx->count = transfer->len; 1587 spi_imx->txfifo = 0; 1588 spi_imx->remainder = 0; 1589 1590 reinit_completion(&spi_imx->xfer_done); 1591 spi_imx->target_aborted = false; 1592 1593 spi_imx_push(spi_imx); 1594 1595 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR); 1596 1597 if (wait_for_completion_interruptible(&spi_imx->xfer_done) || 1598 spi_imx->target_aborted) { 1599 dev_dbg(&spi->dev, "interrupted\n"); 1600 ret = -EINTR; 1601 } 1602 1603 /* ecspi has a HW issue when works in Target mode, 1604 * after 64 words writtern to TXFIFO, even TXFIFO becomes empty, 1605 * ECSPI_TXDATA keeps shift out the last word data, 1606 * so we have to disable ECSPI when in target mode after the 1607 * transfer completes 1608 */ 1609 if (spi_imx->devtype_data->disable) 1610 spi_imx->devtype_data->disable(spi_imx); 1611 1612 return ret; 1613 } 1614 1615 static int spi_imx_transfer_one(struct spi_controller *controller, 1616 struct spi_device *spi, 1617 struct spi_transfer *transfer) 1618 { 1619 struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); 1620 unsigned long hz_per_byte, byte_limit; 1621 1622 spi_imx_setupxfer(spi, transfer); 1623 transfer->effective_speed_hz = spi_imx->spi_bus_clk; 1624 1625 /* flush rxfifo before transfer */ 1626 while (spi_imx->devtype_data->rx_available(spi_imx)) 1627 readl(spi_imx->base + MXC_CSPIRXDATA); 1628 1629 if (spi_imx->target_mode) 1630 return spi_imx_pio_transfer_target(spi, transfer); 1631 1632 /* 1633 * If we decided in spi_imx_can_dma() that we want to do a DMA 1634 * transfer, the SPI transfer has already been mapped, so we 1635 * have to do the DMA transfer here. 1636 */ 1637 if (spi_imx->usedma) 1638 return spi_imx_dma_transfer(spi_imx, transfer); 1639 /* 1640 * Calculate the estimated time in us the transfer runs. Find 1641 * the number of Hz per byte per polling limit. 1642 */ 1643 hz_per_byte = polling_limit_us ? ((8 + 4) * USEC_PER_SEC) / polling_limit_us : 0; 1644 byte_limit = hz_per_byte ? transfer->effective_speed_hz / hz_per_byte : 1; 1645 1646 /* run in polling mode for short transfers */ 1647 if (transfer->len < byte_limit) 1648 return spi_imx_poll_transfer(spi, transfer); 1649 1650 return spi_imx_pio_transfer(spi, transfer); 1651 } 1652 1653 static int spi_imx_setup(struct spi_device *spi) 1654 { 1655 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, 1656 spi->mode, spi->bits_per_word, spi->max_speed_hz); 1657 1658 return 0; 1659 } 1660 1661 static void spi_imx_cleanup(struct spi_device *spi) 1662 { 1663 } 1664 1665 static int 1666 spi_imx_prepare_message(struct spi_controller *controller, struct spi_message *msg) 1667 { 1668 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 1669 int ret; 1670 1671 ret = pm_runtime_resume_and_get(spi_imx->dev); 1672 if (ret < 0) { 1673 dev_err(spi_imx->dev, "failed to enable clock\n"); 1674 return ret; 1675 } 1676 1677 ret = spi_imx->devtype_data->prepare_message(spi_imx, msg); 1678 if (ret) { 1679 pm_runtime_mark_last_busy(spi_imx->dev); 1680 pm_runtime_put_autosuspend(spi_imx->dev); 1681 } 1682 1683 return ret; 1684 } 1685 1686 static int 1687 spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message *msg) 1688 { 1689 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 1690 1691 pm_runtime_mark_last_busy(spi_imx->dev); 1692 pm_runtime_put_autosuspend(spi_imx->dev); 1693 return 0; 1694 } 1695 1696 static int spi_imx_target_abort(struct spi_controller *controller) 1697 { 1698 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 1699 1700 spi_imx->target_aborted = true; 1701 complete(&spi_imx->xfer_done); 1702 1703 return 0; 1704 } 1705 1706 static int spi_imx_probe(struct platform_device *pdev) 1707 { 1708 struct device_node *np = pdev->dev.of_node; 1709 struct spi_controller *controller; 1710 struct spi_imx_data *spi_imx; 1711 struct resource *res; 1712 int ret, irq, spi_drctl; 1713 const struct spi_imx_devtype_data *devtype_data = 1714 of_device_get_match_data(&pdev->dev); 1715 bool target_mode; 1716 u32 val; 1717 1718 target_mode = devtype_data->has_targetmode && 1719 of_property_read_bool(np, "spi-slave"); 1720 if (target_mode) 1721 controller = spi_alloc_target(&pdev->dev, 1722 sizeof(struct spi_imx_data)); 1723 else 1724 controller = spi_alloc_host(&pdev->dev, 1725 sizeof(struct spi_imx_data)); 1726 if (!controller) 1727 return -ENOMEM; 1728 1729 ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl); 1730 if ((ret < 0) || (spi_drctl >= 0x3)) { 1731 /* '11' is reserved */ 1732 spi_drctl = 0; 1733 } 1734 1735 platform_set_drvdata(pdev, controller); 1736 1737 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 1738 controller->bus_num = np ? -1 : pdev->id; 1739 controller->use_gpio_descriptors = true; 1740 1741 spi_imx = spi_controller_get_devdata(controller); 1742 spi_imx->controller = controller; 1743 spi_imx->dev = &pdev->dev; 1744 spi_imx->target_mode = target_mode; 1745 1746 spi_imx->devtype_data = devtype_data; 1747 1748 /* 1749 * Get number of chip selects from device properties. This can be 1750 * coming from device tree or boardfiles, if it is not defined, 1751 * a default value of 3 chip selects will be used, as all the legacy 1752 * board files have <= 3 chip selects. 1753 */ 1754 if (!device_property_read_u32(&pdev->dev, "num-cs", &val)) 1755 controller->num_chipselect = val; 1756 else 1757 controller->num_chipselect = 3; 1758 1759 controller->transfer_one = spi_imx_transfer_one; 1760 controller->setup = spi_imx_setup; 1761 controller->cleanup = spi_imx_cleanup; 1762 controller->prepare_message = spi_imx_prepare_message; 1763 controller->unprepare_message = spi_imx_unprepare_message; 1764 controller->target_abort = spi_imx_target_abort; 1765 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS | 1766 SPI_MOSI_IDLE_LOW; 1767 1768 if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) || 1769 is_imx53_ecspi(spi_imx)) 1770 controller->mode_bits |= SPI_LOOP | SPI_READY; 1771 1772 if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx)) 1773 controller->mode_bits |= SPI_RX_CPHA_FLIP; 1774 1775 if (is_imx51_ecspi(spi_imx) && 1776 device_property_read_u32(&pdev->dev, "cs-gpios", NULL)) 1777 /* 1778 * When using HW-CS implementing SPI_CS_WORD can be done by just 1779 * setting the burst length to the word size. This is 1780 * considerably faster than manually controlling the CS. 1781 */ 1782 controller->mode_bits |= SPI_CS_WORD; 1783 1784 if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx)) { 1785 controller->max_native_cs = 4; 1786 controller->flags |= SPI_CONTROLLER_GPIO_SS; 1787 } 1788 1789 spi_imx->spi_drctl = spi_drctl; 1790 1791 init_completion(&spi_imx->xfer_done); 1792 1793 spi_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1794 if (IS_ERR(spi_imx->base)) { 1795 ret = PTR_ERR(spi_imx->base); 1796 goto out_controller_put; 1797 } 1798 spi_imx->base_phys = res->start; 1799 1800 irq = platform_get_irq(pdev, 0); 1801 if (irq < 0) { 1802 ret = irq; 1803 goto out_controller_put; 1804 } 1805 1806 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0, 1807 dev_name(&pdev->dev), spi_imx); 1808 if (ret) { 1809 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); 1810 goto out_controller_put; 1811 } 1812 1813 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1814 if (IS_ERR(spi_imx->clk_ipg)) { 1815 ret = PTR_ERR(spi_imx->clk_ipg); 1816 goto out_controller_put; 1817 } 1818 1819 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); 1820 if (IS_ERR(spi_imx->clk_per)) { 1821 ret = PTR_ERR(spi_imx->clk_per); 1822 goto out_controller_put; 1823 } 1824 1825 ret = clk_prepare_enable(spi_imx->clk_per); 1826 if (ret) 1827 goto out_controller_put; 1828 1829 ret = clk_prepare_enable(spi_imx->clk_ipg); 1830 if (ret) 1831 goto out_put_per; 1832 1833 pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT); 1834 pm_runtime_use_autosuspend(spi_imx->dev); 1835 pm_runtime_get_noresume(spi_imx->dev); 1836 pm_runtime_set_active(spi_imx->dev); 1837 pm_runtime_enable(spi_imx->dev); 1838 1839 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); 1840 /* 1841 * Only validated on i.mx35 and i.mx6 now, can remove the constraint 1842 * if validated on other chips. 1843 */ 1844 if (spi_imx->devtype_data->has_dmamode) { 1845 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, controller); 1846 if (ret == -EPROBE_DEFER) 1847 goto out_runtime_pm_put; 1848 1849 if (ret < 0) 1850 dev_dbg(&pdev->dev, "dma setup error %d, use pio\n", 1851 ret); 1852 } 1853 1854 spi_imx->devtype_data->reset(spi_imx); 1855 1856 spi_imx->devtype_data->intctrl(spi_imx, 0); 1857 1858 controller->dev.of_node = pdev->dev.of_node; 1859 ret = spi_register_controller(controller); 1860 if (ret) { 1861 dev_err_probe(&pdev->dev, ret, "register controller failed\n"); 1862 goto out_register_controller; 1863 } 1864 1865 pm_runtime_mark_last_busy(spi_imx->dev); 1866 pm_runtime_put_autosuspend(spi_imx->dev); 1867 1868 return ret; 1869 1870 out_register_controller: 1871 if (spi_imx->devtype_data->has_dmamode) 1872 spi_imx_sdma_exit(spi_imx); 1873 out_runtime_pm_put: 1874 pm_runtime_dont_use_autosuspend(spi_imx->dev); 1875 pm_runtime_set_suspended(&pdev->dev); 1876 pm_runtime_disable(spi_imx->dev); 1877 1878 clk_disable_unprepare(spi_imx->clk_ipg); 1879 out_put_per: 1880 clk_disable_unprepare(spi_imx->clk_per); 1881 out_controller_put: 1882 spi_controller_put(controller); 1883 1884 return ret; 1885 } 1886 1887 static void spi_imx_remove(struct platform_device *pdev) 1888 { 1889 struct spi_controller *controller = platform_get_drvdata(pdev); 1890 struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); 1891 int ret; 1892 1893 spi_unregister_controller(controller); 1894 1895 ret = pm_runtime_get_sync(spi_imx->dev); 1896 if (ret >= 0) 1897 writel(0, spi_imx->base + MXC_CSPICTRL); 1898 else 1899 dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n"); 1900 1901 pm_runtime_dont_use_autosuspend(spi_imx->dev); 1902 pm_runtime_put_sync(spi_imx->dev); 1903 pm_runtime_disable(spi_imx->dev); 1904 1905 spi_imx_sdma_exit(spi_imx); 1906 } 1907 1908 static int __maybe_unused spi_imx_runtime_resume(struct device *dev) 1909 { 1910 struct spi_controller *controller = dev_get_drvdata(dev); 1911 struct spi_imx_data *spi_imx; 1912 int ret; 1913 1914 spi_imx = spi_controller_get_devdata(controller); 1915 1916 ret = clk_prepare_enable(spi_imx->clk_per); 1917 if (ret) 1918 return ret; 1919 1920 ret = clk_prepare_enable(spi_imx->clk_ipg); 1921 if (ret) { 1922 clk_disable_unprepare(spi_imx->clk_per); 1923 return ret; 1924 } 1925 1926 return 0; 1927 } 1928 1929 static int __maybe_unused spi_imx_runtime_suspend(struct device *dev) 1930 { 1931 struct spi_controller *controller = dev_get_drvdata(dev); 1932 struct spi_imx_data *spi_imx; 1933 1934 spi_imx = spi_controller_get_devdata(controller); 1935 1936 clk_disable_unprepare(spi_imx->clk_per); 1937 clk_disable_unprepare(spi_imx->clk_ipg); 1938 1939 return 0; 1940 } 1941 1942 static int __maybe_unused spi_imx_suspend(struct device *dev) 1943 { 1944 pinctrl_pm_select_sleep_state(dev); 1945 return 0; 1946 } 1947 1948 static int __maybe_unused spi_imx_resume(struct device *dev) 1949 { 1950 pinctrl_pm_select_default_state(dev); 1951 return 0; 1952 } 1953 1954 static const struct dev_pm_ops imx_spi_pm = { 1955 SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend, 1956 spi_imx_runtime_resume, NULL) 1957 SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume) 1958 }; 1959 1960 static struct platform_driver spi_imx_driver = { 1961 .driver = { 1962 .name = DRIVER_NAME, 1963 .of_match_table = spi_imx_dt_ids, 1964 .pm = &imx_spi_pm, 1965 }, 1966 .probe = spi_imx_probe, 1967 .remove_new = spi_imx_remove, 1968 }; 1969 module_platform_driver(spi_imx_driver); 1970 1971 MODULE_DESCRIPTION("i.MX SPI Controller driver"); 1972 MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 1973 MODULE_LICENSE("GPL"); 1974 MODULE_ALIAS("platform:" DRIVER_NAME); 1975