1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Freescale i.MX7ULP LPSPI driver 4 // 5 // Copyright 2016 Freescale Semiconductor, Inc. 6 // Copyright 2018, 2023, 2025 NXP 7 8 #include <linux/bitfield.h> 9 #include <linux/clk.h> 10 #include <linux/completion.h> 11 #include <linux/delay.h> 12 #include <linux/dmaengine.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/irq.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/pinctrl/consumer.h> 22 #include <linux/platform_device.h> 23 #include <linux/dma/imx-dma.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/slab.h> 26 #include <linux/spi/spi.h> 27 #include <linux/spi/spi_bitbang.h> 28 #include <linux/types.h> 29 #include <linux/minmax.h> 30 31 #define DRIVER_NAME "fsl_lpspi" 32 33 #define FSL_LPSPI_RPM_TIMEOUT 50 /* 50ms */ 34 35 /* The maximum bytes that edma can transfer once.*/ 36 #define FSL_LPSPI_MAX_EDMA_BYTES ((1 << 15) - 1) 37 38 /* i.MX7ULP LPSPI registers */ 39 #define IMX7ULP_VERID 0x0 40 #define IMX7ULP_PARAM 0x4 41 #define IMX7ULP_CR 0x10 42 #define IMX7ULP_SR 0x14 43 #define IMX7ULP_IER 0x18 44 #define IMX7ULP_DER 0x1c 45 #define IMX7ULP_CFGR0 0x20 46 #define IMX7ULP_CFGR1 0x24 47 #define IMX7ULP_DMR0 0x30 48 #define IMX7ULP_DMR1 0x34 49 #define IMX7ULP_CCR 0x40 50 #define IMX7ULP_FCR 0x58 51 #define IMX7ULP_FSR 0x5c 52 #define IMX7ULP_TCR 0x60 53 #define IMX7ULP_TDR 0x64 54 #define IMX7ULP_RSR 0x70 55 #define IMX7ULP_RDR 0x74 56 57 /* General control register field define */ 58 #define CR_RRF BIT(9) 59 #define CR_RTF BIT(8) 60 #define CR_RST BIT(1) 61 #define CR_MEN BIT(0) 62 #define SR_MBF BIT(24) 63 #define SR_TCF BIT(10) 64 #define SR_FCF BIT(9) 65 #define SR_RDF BIT(1) 66 #define SR_TDF BIT(0) 67 #define IER_TCIE BIT(10) 68 #define IER_FCIE BIT(9) 69 #define IER_RDIE BIT(1) 70 #define IER_TDIE BIT(0) 71 #define DER_RDDE BIT(1) 72 #define DER_TDDE BIT(0) 73 #define CFGR1_PCSCFG BIT(27) 74 #define CFGR1_PINCFG (BIT(24)|BIT(25)) 75 #define CFGR1_PCSPOL_MASK GENMASK(11, 8) 76 #define CFGR1_NOSTALL BIT(3) 77 #define CFGR1_HOST BIT(0) 78 #define FSR_TXCOUNT (0xFF) 79 #define RSR_RXEMPTY BIT(1) 80 #define TCR_CPOL BIT(31) 81 #define TCR_CPHA BIT(30) 82 #define TCR_CONT BIT(21) 83 #define TCR_CONTC BIT(20) 84 #define TCR_RXMSK BIT(19) 85 #define TCR_TXMSK BIT(18) 86 87 #define SR_CLEAR_MASK GENMASK(13, 8) 88 89 struct fsl_lpspi_devtype_data { 90 u8 prescale_max : 3; /* 0 == no limit */ 91 bool query_hw_for_num_cs : 1; 92 }; 93 94 struct lpspi_config { 95 u8 bpw; 96 u8 chip_select; 97 u8 prescale; 98 u16 mode; 99 u32 speed_hz; 100 u32 effective_speed_hz; 101 }; 102 103 struct fsl_lpspi_data { 104 struct device *dev; 105 void __iomem *base; 106 unsigned long base_phys; 107 struct clk *clk_ipg; 108 struct clk *clk_per; 109 bool is_target; 110 bool is_only_cs1; 111 bool is_first_byte; 112 113 void *rx_buf; 114 const void *tx_buf; 115 void (*tx)(struct fsl_lpspi_data *); 116 void (*rx)(struct fsl_lpspi_data *); 117 118 u32 remain; 119 u8 watermark; 120 u8 txfifosize; 121 u8 rxfifosize; 122 123 struct lpspi_config config; 124 struct completion xfer_done; 125 126 bool target_aborted; 127 128 /* DMA */ 129 bool usedma; 130 struct completion dma_rx_completion; 131 struct completion dma_tx_completion; 132 133 const struct fsl_lpspi_devtype_data *devtype_data; 134 }; 135 136 /* 137 * Devices with ERR051608 have a max TCR_PRESCALE value of 1, otherwise there is 138 * no prescale limit: https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf 139 */ 140 static const struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = { 141 .prescale_max = 1, 142 .query_hw_for_num_cs = true, 143 }; 144 145 static const struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = { 146 /* All defaults */ 147 }; 148 149 static const struct fsl_lpspi_devtype_data s32g_lpspi_devtype_data = { 150 .query_hw_for_num_cs = true, 151 }; 152 153 static const struct of_device_id fsl_lpspi_dt_ids[] = { 154 { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,}, 155 { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,}, 156 { .compatible = "nxp,s32g2-lpspi", .data = &s32g_lpspi_devtype_data,}, 157 { /* sentinel */ } 158 }; 159 MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids); 160 161 #define LPSPI_BUF_RX(type) \ 162 static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi) \ 163 { \ 164 unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR); \ 165 \ 166 if (fsl_lpspi->rx_buf) { \ 167 *(type *)fsl_lpspi->rx_buf = val; \ 168 fsl_lpspi->rx_buf += sizeof(type); \ 169 } \ 170 } 171 172 #define LPSPI_BUF_TX(type) \ 173 static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi) \ 174 { \ 175 type val = 0; \ 176 \ 177 if (fsl_lpspi->tx_buf) { \ 178 val = *(type *)fsl_lpspi->tx_buf; \ 179 fsl_lpspi->tx_buf += sizeof(type); \ 180 } \ 181 \ 182 fsl_lpspi->remain -= sizeof(type); \ 183 writel(val, fsl_lpspi->base + IMX7ULP_TDR); \ 184 } 185 186 LPSPI_BUF_RX(u8) 187 LPSPI_BUF_TX(u8) 188 LPSPI_BUF_RX(u16) 189 LPSPI_BUF_TX(u16) 190 LPSPI_BUF_RX(u32) 191 LPSPI_BUF_TX(u32) 192 193 static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi, 194 unsigned int enable) 195 { 196 writel(enable, fsl_lpspi->base + IMX7ULP_IER); 197 } 198 199 static int fsl_lpspi_bytes_per_word(const int bpw) 200 { 201 return DIV_ROUND_UP(bpw, BITS_PER_BYTE); 202 } 203 204 static bool fsl_lpspi_can_dma(struct spi_controller *controller, 205 struct spi_device *spi, 206 struct spi_transfer *transfer) 207 { 208 unsigned int bytes_per_word; 209 210 if (!controller->dma_rx) 211 return false; 212 213 bytes_per_word = fsl_lpspi_bytes_per_word(transfer->bits_per_word); 214 215 switch (bytes_per_word) { 216 case 1: 217 case 2: 218 case 4: 219 break; 220 default: 221 return false; 222 } 223 224 return true; 225 } 226 227 static int lpspi_prepare_xfer_hardware(struct spi_controller *controller) 228 { 229 struct fsl_lpspi_data *fsl_lpspi = 230 spi_controller_get_devdata(controller); 231 int ret; 232 233 ret = pm_runtime_resume_and_get(fsl_lpspi->dev); 234 if (ret < 0) { 235 dev_err(fsl_lpspi->dev, "failed to enable clock\n"); 236 return ret; 237 } 238 239 return 0; 240 } 241 242 static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller) 243 { 244 struct fsl_lpspi_data *fsl_lpspi = 245 spi_controller_get_devdata(controller); 246 247 pm_runtime_put_autosuspend(fsl_lpspi->dev); 248 249 return 0; 250 } 251 252 static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi) 253 { 254 u8 txfifo_cnt; 255 u32 temp; 256 257 txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff; 258 259 while (txfifo_cnt < fsl_lpspi->txfifosize) { 260 if (!fsl_lpspi->remain) 261 break; 262 fsl_lpspi->tx(fsl_lpspi); 263 txfifo_cnt++; 264 } 265 266 if (txfifo_cnt < fsl_lpspi->txfifosize) { 267 if (!fsl_lpspi->is_target) { 268 temp = readl(fsl_lpspi->base + IMX7ULP_TCR); 269 temp &= ~TCR_CONTC; 270 writel(temp, fsl_lpspi->base + IMX7ULP_TCR); 271 } 272 273 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); 274 } else 275 fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE); 276 } 277 278 static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi) 279 { 280 while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY)) 281 fsl_lpspi->rx(fsl_lpspi); 282 } 283 284 static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi, 285 struct spi_device *spi) 286 { 287 u32 temp = 0; 288 289 temp |= fsl_lpspi->config.bpw - 1; 290 temp |= (fsl_lpspi->config.mode & 0x3) << 30; 291 temp |= (fsl_lpspi->config.chip_select & 0x3) << 24; 292 if (!fsl_lpspi->is_target) { 293 temp |= fsl_lpspi->config.prescale << 27; 294 /* 295 * Set TCR_CONT will keep SS asserted after current transfer. 296 * For the first transfer, clear TCR_CONTC to assert SS. 297 * For subsequent transfer, set TCR_CONTC to keep SS asserted. 298 */ 299 if (!fsl_lpspi->usedma) { 300 temp |= TCR_CONT; 301 if (fsl_lpspi->is_first_byte) 302 temp &= ~TCR_CONTC; 303 else 304 temp |= TCR_CONTC; 305 } 306 } 307 308 if (spi->mode & SPI_CPOL) 309 temp |= TCR_CPOL; 310 311 if (spi->mode & SPI_CPHA) 312 temp |= TCR_CPHA; 313 314 writel(temp, fsl_lpspi->base + IMX7ULP_TCR); 315 316 dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp); 317 } 318 319 static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi) 320 { 321 u32 temp; 322 323 if (!fsl_lpspi->usedma) 324 temp = fsl_lpspi->watermark >> 1 | 325 (fsl_lpspi->watermark >> 1) << 16; 326 else 327 temp = fsl_lpspi->watermark >> 1; 328 329 writel(temp, fsl_lpspi->base + IMX7ULP_FCR); 330 331 dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp); 332 } 333 334 static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) 335 { 336 struct lpspi_config config = fsl_lpspi->config; 337 unsigned int perclk_rate, div; 338 u8 prescale_max; 339 u8 prescale; 340 int scldiv; 341 342 perclk_rate = clk_get_rate(fsl_lpspi->clk_per); 343 prescale_max = fsl_lpspi->devtype_data->prescale_max ?: 7; 344 345 if (!config.speed_hz) { 346 dev_err(fsl_lpspi->dev, 347 "error: the transmission speed provided is 0!\n"); 348 return -EINVAL; 349 } 350 351 if (config.speed_hz > perclk_rate / 2) { 352 div = 2; 353 } else { 354 div = DIV_ROUND_UP(perclk_rate, config.speed_hz); 355 } 356 357 for (prescale = 0; prescale <= prescale_max; prescale++) { 358 scldiv = div / (1 << prescale) - 2; 359 if (scldiv >= 0 && scldiv < 256) { 360 fsl_lpspi->config.prescale = prescale; 361 break; 362 } 363 } 364 365 if (scldiv < 0 || scldiv >= 256) 366 return -EINVAL; 367 368 writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16), 369 fsl_lpspi->base + IMX7ULP_CCR); 370 371 fsl_lpspi->config.effective_speed_hz = perclk_rate / (scldiv + 2) * 372 (1 << prescale); 373 374 dev_dbg(fsl_lpspi->dev, "perclk=%u, speed=%u, prescale=%u, scldiv=%d\n", 375 perclk_rate, config.speed_hz, prescale, scldiv); 376 377 return 0; 378 } 379 380 static int fsl_lpspi_dma_configure(struct spi_controller *controller) 381 { 382 int ret; 383 enum dma_slave_buswidth buswidth; 384 struct dma_slave_config rx = {}, tx = {}; 385 struct fsl_lpspi_data *fsl_lpspi = 386 spi_controller_get_devdata(controller); 387 388 switch (fsl_lpspi_bytes_per_word(fsl_lpspi->config.bpw)) { 389 case 4: 390 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; 391 break; 392 case 2: 393 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 394 break; 395 case 1: 396 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 397 break; 398 default: 399 return -EINVAL; 400 } 401 402 tx.direction = DMA_MEM_TO_DEV; 403 tx.dst_addr = fsl_lpspi->base_phys + IMX7ULP_TDR; 404 tx.dst_addr_width = buswidth; 405 tx.dst_maxburst = 1; 406 ret = dmaengine_slave_config(controller->dma_tx, &tx); 407 if (ret) { 408 dev_err(fsl_lpspi->dev, "TX dma configuration failed with %d\n", 409 ret); 410 return ret; 411 } 412 413 rx.direction = DMA_DEV_TO_MEM; 414 rx.src_addr = fsl_lpspi->base_phys + IMX7ULP_RDR; 415 rx.src_addr_width = buswidth; 416 rx.src_maxburst = 1; 417 ret = dmaengine_slave_config(controller->dma_rx, &rx); 418 if (ret) { 419 dev_err(fsl_lpspi->dev, "RX dma configuration failed with %d\n", 420 ret); 421 return ret; 422 } 423 424 return 0; 425 } 426 427 static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi) 428 { 429 u32 temp; 430 int ret; 431 432 if (!fsl_lpspi->is_target) { 433 ret = fsl_lpspi_set_bitrate(fsl_lpspi); 434 if (ret) 435 return ret; 436 } 437 438 fsl_lpspi_set_watermark(fsl_lpspi); 439 440 if (!fsl_lpspi->is_target) 441 temp = CFGR1_HOST; 442 else 443 temp = CFGR1_PINCFG; 444 if (fsl_lpspi->config.mode & SPI_CS_HIGH) 445 temp |= FIELD_PREP(CFGR1_PCSPOL_MASK, 446 BIT(fsl_lpspi->config.chip_select)); 447 448 writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1); 449 450 temp = readl(fsl_lpspi->base + IMX7ULP_CR); 451 temp |= CR_RRF | CR_RTF | CR_MEN; 452 writel(temp, fsl_lpspi->base + IMX7ULP_CR); 453 454 temp = 0; 455 if (fsl_lpspi->usedma) 456 temp = DER_TDDE | DER_RDDE; 457 writel(temp, fsl_lpspi->base + IMX7ULP_DER); 458 459 return 0; 460 } 461 462 static int fsl_lpspi_setup_transfer(struct spi_controller *controller, 463 struct spi_device *spi, 464 struct spi_transfer *t) 465 { 466 struct fsl_lpspi_data *fsl_lpspi = 467 spi_controller_get_devdata(spi->controller); 468 469 if (t == NULL) 470 return -EINVAL; 471 472 fsl_lpspi->config.mode = spi->mode; 473 fsl_lpspi->config.bpw = t->bits_per_word; 474 fsl_lpspi->config.speed_hz = t->speed_hz; 475 if (fsl_lpspi->is_only_cs1) 476 fsl_lpspi->config.chip_select = 1; 477 else 478 fsl_lpspi->config.chip_select = spi_get_chipselect(spi, 0); 479 480 if (!fsl_lpspi->config.speed_hz) 481 fsl_lpspi->config.speed_hz = spi->max_speed_hz; 482 if (!fsl_lpspi->config.bpw) 483 fsl_lpspi->config.bpw = spi->bits_per_word; 484 485 /* Initialize the functions for transfer */ 486 if (fsl_lpspi->config.bpw <= 8) { 487 fsl_lpspi->rx = fsl_lpspi_buf_rx_u8; 488 fsl_lpspi->tx = fsl_lpspi_buf_tx_u8; 489 } else if (fsl_lpspi->config.bpw <= 16) { 490 fsl_lpspi->rx = fsl_lpspi_buf_rx_u16; 491 fsl_lpspi->tx = fsl_lpspi_buf_tx_u16; 492 } else { 493 fsl_lpspi->rx = fsl_lpspi_buf_rx_u32; 494 fsl_lpspi->tx = fsl_lpspi_buf_tx_u32; 495 } 496 497 fsl_lpspi->watermark = min(fsl_lpspi->txfifosize, t->len); 498 499 return fsl_lpspi_config(fsl_lpspi); 500 } 501 502 static int fsl_lpspi_prepare_message(struct spi_controller *controller, 503 struct spi_message *msg) 504 { 505 struct fsl_lpspi_data *fsl_lpspi = 506 spi_controller_get_devdata(controller); 507 struct spi_device *spi = msg->spi; 508 struct spi_transfer *t; 509 int ret; 510 511 t = list_first_entry_or_null(&msg->transfers, struct spi_transfer, 512 transfer_list); 513 if (!t) 514 return 0; 515 516 fsl_lpspi->is_first_byte = true; 517 fsl_lpspi->usedma = false; 518 ret = fsl_lpspi_setup_transfer(controller, spi, t); 519 520 if (fsl_lpspi_can_dma(controller, spi, t)) 521 fsl_lpspi->usedma = true; 522 else 523 fsl_lpspi->usedma = false; 524 525 if (ret < 0) 526 return ret; 527 528 fsl_lpspi_set_cmd(fsl_lpspi, spi); 529 530 /* No IRQs */ 531 writel(0, fsl_lpspi->base + IMX7ULP_IER); 532 533 /* Controller disable, clear FIFOs, clear status */ 534 writel(CR_RRF | CR_RTF, fsl_lpspi->base + IMX7ULP_CR); 535 writel(SR_CLEAR_MASK, fsl_lpspi->base + IMX7ULP_SR); 536 537 return 0; 538 } 539 540 static int fsl_lpspi_target_abort(struct spi_controller *controller) 541 { 542 struct fsl_lpspi_data *fsl_lpspi = 543 spi_controller_get_devdata(controller); 544 545 fsl_lpspi->target_aborted = true; 546 if (!fsl_lpspi->usedma) 547 complete(&fsl_lpspi->xfer_done); 548 else { 549 complete(&fsl_lpspi->dma_tx_completion); 550 complete(&fsl_lpspi->dma_rx_completion); 551 } 552 553 return 0; 554 } 555 556 static int fsl_lpspi_wait_for_completion(struct spi_controller *controller) 557 { 558 struct fsl_lpspi_data *fsl_lpspi = 559 spi_controller_get_devdata(controller); 560 561 if (fsl_lpspi->is_target) { 562 if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) || 563 fsl_lpspi->target_aborted) { 564 dev_dbg(fsl_lpspi->dev, "interrupted\n"); 565 return -EINTR; 566 } 567 } else { 568 if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) { 569 dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n"); 570 return -ETIMEDOUT; 571 } 572 } 573 574 return 0; 575 } 576 577 static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi) 578 { 579 u32 temp; 580 581 if (!fsl_lpspi->usedma) { 582 /* Disable all interrupt */ 583 fsl_lpspi_intctrl(fsl_lpspi, 0); 584 } 585 586 /* Clear FIFO and disable module */ 587 temp = CR_RRF | CR_RTF; 588 writel(temp, fsl_lpspi->base + IMX7ULP_CR); 589 590 /* W1C for all flags in SR */ 591 writel(SR_CLEAR_MASK, fsl_lpspi->base + IMX7ULP_SR); 592 593 return 0; 594 } 595 596 static void fsl_lpspi_dma_rx_callback(void *cookie) 597 { 598 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie; 599 600 complete(&fsl_lpspi->dma_rx_completion); 601 } 602 603 static void fsl_lpspi_dma_tx_callback(void *cookie) 604 { 605 struct fsl_lpspi_data *fsl_lpspi = (struct fsl_lpspi_data *)cookie; 606 607 complete(&fsl_lpspi->dma_tx_completion); 608 } 609 610 static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi, 611 int size) 612 { 613 unsigned long timeout = 0; 614 615 /* Time with actual data transfer and CS change delay related to HW */ 616 timeout = (8 + 4) * size / fsl_lpspi->config.speed_hz; 617 618 /* Add extra second for scheduler related activities */ 619 timeout += 1; 620 621 /* Double calculated timeout */ 622 return secs_to_jiffies(2 * timeout); 623 } 624 625 static int fsl_lpspi_dma_transfer(struct spi_controller *controller, 626 struct fsl_lpspi_data *fsl_lpspi, 627 struct spi_transfer *transfer) 628 { 629 struct dma_async_tx_descriptor *desc_tx, *desc_rx; 630 unsigned long transfer_timeout; 631 unsigned long time_left; 632 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 633 int ret; 634 635 ret = fsl_lpspi_dma_configure(controller); 636 if (ret) 637 return ret; 638 639 desc_rx = dmaengine_prep_slave_sg(controller->dma_rx, 640 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 641 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 642 if (!desc_rx) 643 return -EINVAL; 644 645 desc_rx->callback = fsl_lpspi_dma_rx_callback; 646 desc_rx->callback_param = (void *)fsl_lpspi; 647 dmaengine_submit(desc_rx); 648 reinit_completion(&fsl_lpspi->dma_rx_completion); 649 dma_async_issue_pending(controller->dma_rx); 650 651 desc_tx = dmaengine_prep_slave_sg(controller->dma_tx, 652 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 653 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 654 if (!desc_tx) { 655 dmaengine_terminate_all(controller->dma_tx); 656 return -EINVAL; 657 } 658 659 desc_tx->callback = fsl_lpspi_dma_tx_callback; 660 desc_tx->callback_param = (void *)fsl_lpspi; 661 dmaengine_submit(desc_tx); 662 reinit_completion(&fsl_lpspi->dma_tx_completion); 663 dma_async_issue_pending(controller->dma_tx); 664 665 fsl_lpspi->target_aborted = false; 666 667 if (!fsl_lpspi->is_target) { 668 transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi, 669 transfer->len); 670 671 /* Wait eDMA to finish the data transfer.*/ 672 time_left = wait_for_completion_timeout(&fsl_lpspi->dma_tx_completion, 673 transfer_timeout); 674 if (!time_left) { 675 dev_err(fsl_lpspi->dev, "I/O Error in DMA TX\n"); 676 dmaengine_terminate_all(controller->dma_tx); 677 dmaengine_terminate_all(controller->dma_rx); 678 fsl_lpspi_reset(fsl_lpspi); 679 return -ETIMEDOUT; 680 } 681 682 time_left = wait_for_completion_timeout(&fsl_lpspi->dma_rx_completion, 683 transfer_timeout); 684 if (!time_left) { 685 dev_err(fsl_lpspi->dev, "I/O Error in DMA RX\n"); 686 dmaengine_terminate_all(controller->dma_tx); 687 dmaengine_terminate_all(controller->dma_rx); 688 fsl_lpspi_reset(fsl_lpspi); 689 return -ETIMEDOUT; 690 } 691 } else { 692 if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) || 693 fsl_lpspi->target_aborted) { 694 dev_dbg(fsl_lpspi->dev, 695 "I/O Error in DMA TX interrupted\n"); 696 dmaengine_terminate_all(controller->dma_tx); 697 dmaengine_terminate_all(controller->dma_rx); 698 fsl_lpspi_reset(fsl_lpspi); 699 return -EINTR; 700 } 701 702 if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) || 703 fsl_lpspi->target_aborted) { 704 dev_dbg(fsl_lpspi->dev, 705 "I/O Error in DMA RX interrupted\n"); 706 dmaengine_terminate_all(controller->dma_tx); 707 dmaengine_terminate_all(controller->dma_rx); 708 fsl_lpspi_reset(fsl_lpspi); 709 return -EINTR; 710 } 711 } 712 713 fsl_lpspi_reset(fsl_lpspi); 714 715 return 0; 716 } 717 718 static void fsl_lpspi_dma_exit(struct spi_controller *controller) 719 { 720 if (controller->dma_rx) { 721 dma_release_channel(controller->dma_rx); 722 controller->dma_rx = NULL; 723 } 724 725 if (controller->dma_tx) { 726 dma_release_channel(controller->dma_tx); 727 controller->dma_tx = NULL; 728 } 729 } 730 731 static int fsl_lpspi_dma_init(struct device *dev, 732 struct fsl_lpspi_data *fsl_lpspi, 733 struct spi_controller *controller) 734 { 735 int ret; 736 737 /* Prepare for TX DMA: */ 738 controller->dma_tx = dma_request_chan(dev, "tx"); 739 if (IS_ERR(controller->dma_tx)) { 740 ret = PTR_ERR(controller->dma_tx); 741 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret); 742 controller->dma_tx = NULL; 743 goto err; 744 } 745 746 /* Prepare for RX DMA: */ 747 controller->dma_rx = dma_request_chan(dev, "rx"); 748 if (IS_ERR(controller->dma_rx)) { 749 ret = PTR_ERR(controller->dma_rx); 750 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret); 751 controller->dma_rx = NULL; 752 goto err; 753 } 754 755 init_completion(&fsl_lpspi->dma_rx_completion); 756 init_completion(&fsl_lpspi->dma_tx_completion); 757 controller->can_dma = fsl_lpspi_can_dma; 758 controller->max_dma_len = FSL_LPSPI_MAX_EDMA_BYTES; 759 760 return 0; 761 err: 762 fsl_lpspi_dma_exit(controller); 763 return ret; 764 } 765 766 static int fsl_lpspi_pio_transfer(struct spi_controller *controller, 767 struct spi_transfer *t) 768 { 769 struct fsl_lpspi_data *fsl_lpspi = 770 spi_controller_get_devdata(controller); 771 int ret; 772 773 fsl_lpspi->tx_buf = t->tx_buf; 774 fsl_lpspi->rx_buf = t->rx_buf; 775 fsl_lpspi->remain = t->len; 776 777 reinit_completion(&fsl_lpspi->xfer_done); 778 fsl_lpspi->target_aborted = false; 779 780 fsl_lpspi_write_tx_fifo(fsl_lpspi); 781 782 ret = fsl_lpspi_wait_for_completion(controller); 783 784 fsl_lpspi_reset(fsl_lpspi); 785 786 return ret; 787 } 788 789 static int fsl_lpspi_transfer_one(struct spi_controller *controller, 790 struct spi_device *spi, 791 struct spi_transfer *t) 792 { 793 struct fsl_lpspi_data *fsl_lpspi = 794 spi_controller_get_devdata(controller); 795 int ret; 796 797 if (fsl_lpspi_can_dma(controller, spi, t)) 798 fsl_lpspi->usedma = true; 799 else 800 fsl_lpspi->usedma = false; 801 802 ret = fsl_lpspi_setup_transfer(controller, spi, t); 803 if (ret < 0) 804 return ret; 805 806 t->effective_speed_hz = fsl_lpspi->config.effective_speed_hz; 807 808 fsl_lpspi_set_cmd(fsl_lpspi, spi); 809 fsl_lpspi->is_first_byte = false; 810 811 if (fsl_lpspi->usedma) 812 ret = fsl_lpspi_dma_transfer(controller, fsl_lpspi, t); 813 else 814 ret = fsl_lpspi_pio_transfer(controller, t); 815 if (ret < 0) 816 return ret; 817 818 return 0; 819 } 820 821 static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id) 822 { 823 u32 temp_SR, temp_IER; 824 struct fsl_lpspi_data *fsl_lpspi = dev_id; 825 826 temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER); 827 fsl_lpspi_intctrl(fsl_lpspi, 0); 828 temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR); 829 830 fsl_lpspi_read_rx_fifo(fsl_lpspi); 831 832 if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) { 833 fsl_lpspi_write_tx_fifo(fsl_lpspi); 834 return IRQ_HANDLED; 835 } 836 837 if (temp_SR & SR_MBF || 838 readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) { 839 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); 840 fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE | (temp_IER & IER_TDIE)); 841 return IRQ_HANDLED; 842 } 843 844 if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) { 845 writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); 846 complete(&fsl_lpspi->xfer_done); 847 return IRQ_HANDLED; 848 } 849 850 return IRQ_NONE; 851 } 852 853 #ifdef CONFIG_PM 854 static int fsl_lpspi_runtime_resume(struct device *dev) 855 { 856 struct spi_controller *controller = dev_get_drvdata(dev); 857 struct fsl_lpspi_data *fsl_lpspi; 858 int ret; 859 860 fsl_lpspi = spi_controller_get_devdata(controller); 861 862 ret = clk_prepare_enable(fsl_lpspi->clk_per); 863 if (ret) 864 return ret; 865 866 ret = clk_prepare_enable(fsl_lpspi->clk_ipg); 867 if (ret) { 868 clk_disable_unprepare(fsl_lpspi->clk_per); 869 return ret; 870 } 871 872 return 0; 873 } 874 875 static int fsl_lpspi_runtime_suspend(struct device *dev) 876 { 877 struct spi_controller *controller = dev_get_drvdata(dev); 878 struct fsl_lpspi_data *fsl_lpspi; 879 880 fsl_lpspi = spi_controller_get_devdata(controller); 881 882 clk_disable_unprepare(fsl_lpspi->clk_per); 883 clk_disable_unprepare(fsl_lpspi->clk_ipg); 884 885 return 0; 886 } 887 #endif 888 889 static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi) 890 { 891 struct device *dev = fsl_lpspi->dev; 892 893 pm_runtime_enable(dev); 894 pm_runtime_set_autosuspend_delay(dev, FSL_LPSPI_RPM_TIMEOUT); 895 pm_runtime_use_autosuspend(dev); 896 897 return 0; 898 } 899 900 static int fsl_lpspi_probe(struct platform_device *pdev) 901 { 902 const struct fsl_lpspi_devtype_data *devtype_data; 903 struct fsl_lpspi_data *fsl_lpspi; 904 struct spi_controller *controller; 905 struct resource *res; 906 int ret, irq; 907 u32 num_cs; 908 u32 temp; 909 bool is_target; 910 911 devtype_data = of_device_get_match_data(&pdev->dev); 912 if (!devtype_data) 913 return -ENODEV; 914 915 is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave"); 916 if (is_target) 917 controller = devm_spi_alloc_target(&pdev->dev, 918 sizeof(struct fsl_lpspi_data)); 919 else 920 controller = devm_spi_alloc_host(&pdev->dev, 921 sizeof(struct fsl_lpspi_data)); 922 923 if (!controller) 924 return -ENOMEM; 925 926 platform_set_drvdata(pdev, controller); 927 928 fsl_lpspi = spi_controller_get_devdata(controller); 929 fsl_lpspi->dev = &pdev->dev; 930 fsl_lpspi->is_target = is_target; 931 fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node, 932 "fsl,spi-only-use-cs1-sel"); 933 fsl_lpspi->devtype_data = devtype_data; 934 935 init_completion(&fsl_lpspi->xfer_done); 936 937 fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 938 if (IS_ERR(fsl_lpspi->base)) { 939 ret = PTR_ERR(fsl_lpspi->base); 940 return ret; 941 } 942 fsl_lpspi->base_phys = res->start; 943 944 irq = platform_get_irq(pdev, 0); 945 if (irq < 0) { 946 ret = irq; 947 return ret; 948 } 949 950 ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, IRQF_NO_AUTOEN, 951 dev_name(&pdev->dev), fsl_lpspi); 952 if (ret) { 953 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); 954 return ret; 955 } 956 957 fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per"); 958 if (IS_ERR(fsl_lpspi->clk_per)) { 959 ret = PTR_ERR(fsl_lpspi->clk_per); 960 return ret; 961 } 962 963 fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 964 if (IS_ERR(fsl_lpspi->clk_ipg)) { 965 ret = PTR_ERR(fsl_lpspi->clk_ipg); 966 return ret; 967 } 968 969 /* enable the clock */ 970 ret = fsl_lpspi_init_rpm(fsl_lpspi); 971 if (ret) 972 return ret; 973 974 ret = pm_runtime_get_sync(fsl_lpspi->dev); 975 if (ret < 0) { 976 dev_err(fsl_lpspi->dev, "failed to enable clock\n"); 977 goto out_pm_get; 978 } 979 980 temp = readl(fsl_lpspi->base + IMX7ULP_PARAM); 981 fsl_lpspi->txfifosize = 1 << (temp & 0x0f); 982 fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f); 983 if (of_property_read_u32((&pdev->dev)->of_node, "num-cs", 984 &num_cs)) { 985 if (devtype_data->query_hw_for_num_cs) 986 num_cs = ((temp >> 16) & 0xf); 987 else 988 num_cs = 1; 989 } 990 991 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); 992 controller->prepare_message = fsl_lpspi_prepare_message; 993 controller->transfer_one = fsl_lpspi_transfer_one; 994 controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; 995 controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; 996 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 997 controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; 998 controller->bus_num = pdev->id; 999 controller->num_chipselect = num_cs; 1000 controller->target_abort = fsl_lpspi_target_abort; 1001 if (!fsl_lpspi->is_target) 1002 controller->use_gpio_descriptors = true; 1003 1004 ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller); 1005 if (ret == -EPROBE_DEFER) 1006 goto out_pm_get; 1007 if (ret < 0) { 1008 dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret); 1009 enable_irq(irq); 1010 } 1011 1012 ret = devm_spi_register_controller(&pdev->dev, controller); 1013 if (ret < 0) { 1014 dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n"); 1015 goto free_dma; 1016 } 1017 1018 pm_runtime_put_autosuspend(fsl_lpspi->dev); 1019 1020 return 0; 1021 1022 free_dma: 1023 fsl_lpspi_dma_exit(controller); 1024 out_pm_get: 1025 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev); 1026 pm_runtime_put_sync(fsl_lpspi->dev); 1027 pm_runtime_disable(fsl_lpspi->dev); 1028 1029 return ret; 1030 } 1031 1032 static void fsl_lpspi_remove(struct platform_device *pdev) 1033 { 1034 struct spi_controller *controller = platform_get_drvdata(pdev); 1035 struct fsl_lpspi_data *fsl_lpspi = 1036 spi_controller_get_devdata(controller); 1037 1038 fsl_lpspi_dma_exit(controller); 1039 1040 pm_runtime_dont_use_autosuspend(fsl_lpspi->dev); 1041 pm_runtime_disable(fsl_lpspi->dev); 1042 } 1043 1044 static int fsl_lpspi_suspend(struct device *dev) 1045 { 1046 pinctrl_pm_select_sleep_state(dev); 1047 return pm_runtime_force_suspend(dev); 1048 } 1049 1050 static int fsl_lpspi_resume(struct device *dev) 1051 { 1052 int ret; 1053 1054 ret = pm_runtime_force_resume(dev); 1055 if (ret) { 1056 dev_err(dev, "Error in resume: %d\n", ret); 1057 return ret; 1058 } 1059 1060 pinctrl_pm_select_default_state(dev); 1061 1062 return 0; 1063 } 1064 1065 static const struct dev_pm_ops fsl_lpspi_pm_ops = { 1066 SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend, 1067 fsl_lpspi_runtime_resume, NULL) 1068 SYSTEM_SLEEP_PM_OPS(fsl_lpspi_suspend, fsl_lpspi_resume) 1069 }; 1070 1071 static struct platform_driver fsl_lpspi_driver = { 1072 .driver = { 1073 .name = DRIVER_NAME, 1074 .of_match_table = fsl_lpspi_dt_ids, 1075 .pm = pm_ptr(&fsl_lpspi_pm_ops), 1076 }, 1077 .probe = fsl_lpspi_probe, 1078 .remove = fsl_lpspi_remove, 1079 }; 1080 module_platform_driver(fsl_lpspi_driver); 1081 1082 MODULE_DESCRIPTION("LPSPI Controller driver"); 1083 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>"); 1084 MODULE_LICENSE("GPL"); 1085