1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Copyright 2013 Freescale Semiconductor, Inc. 4 // Copyright 2020-2025 NXP 5 // 6 // Freescale DSPI driver 7 // This file contains a driver for the Freescale DSPI 8 9 #include <linux/clk.h> 10 #include <linux/delay.h> 11 #include <linux/dmaengine.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/platform_device.h> 18 #include <linux/pinctrl/consumer.h> 19 #include <linux/regmap.h> 20 #include <linux/spi/spi.h> 21 #include <linux/spi/spi-fsl-dspi.h> 22 23 #define DRIVER_NAME "fsl-dspi" 24 25 #define SPI_MCR 0x00 26 #define SPI_MCR_HOST BIT(31) 27 #define SPI_MCR_MTFE BIT(26) 28 #define SPI_MCR_PCSIS(x) ((x) << 16) 29 #define SPI_MCR_CLR_TXF BIT(11) 30 #define SPI_MCR_CLR_RXF BIT(10) 31 #define SPI_MCR_XSPI BIT(3) 32 #define SPI_MCR_DIS_TXF BIT(13) 33 #define SPI_MCR_DIS_RXF BIT(12) 34 #define SPI_MCR_HALT BIT(0) 35 36 #define SPI_TCR 0x08 37 #define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16) 38 39 #define SPI_CTAR(x) (0x0c + (((x) & GENMASK(2, 0)) * 4)) 40 #define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27)) 41 #define SPI_CTAR_DBR BIT(31) 42 #define SPI_CTAR_CPOL BIT(26) 43 #define SPI_CTAR_CPHA BIT(25) 44 #define SPI_CTAR_LSBFE BIT(24) 45 #define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22)) 46 #define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20)) 47 #define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18)) 48 #define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16)) 49 #define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12)) 50 #define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8)) 51 #define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4)) 52 #define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0)) 53 #define SPI_CTAR_SCALE_BITS 0xf 54 55 #define SPI_CTAR0_SLAVE 0x0c 56 57 #define SPI_SR 0x2c 58 #define SPI_SR_TCFQF BIT(31) 59 #define SPI_SR_TFUF BIT(27) 60 #define SPI_SR_TFFF BIT(25) 61 #define SPI_SR_CMDTCF BIT(23) 62 #define SPI_SR_SPEF BIT(21) 63 #define SPI_SR_RFOF BIT(19) 64 #define SPI_SR_TFIWF BIT(18) 65 #define SPI_SR_RFDF BIT(17) 66 #define SPI_SR_CMDFFF BIT(16) 67 #define SPI_SR_TXRXS BIT(30) 68 #define SPI_SR_CLEAR (SPI_SR_TCFQF | \ 69 SPI_SR_TFUF | SPI_SR_TFFF | \ 70 SPI_SR_CMDTCF | SPI_SR_SPEF | \ 71 SPI_SR_RFOF | SPI_SR_TFIWF | \ 72 SPI_SR_RFDF | SPI_SR_CMDFFF) 73 74 #define SPI_RSER_TFFFE BIT(25) 75 #define SPI_RSER_TFFFD BIT(24) 76 #define SPI_RSER_RFDFE BIT(17) 77 #define SPI_RSER_RFDFD BIT(16) 78 79 #define SPI_RSER 0x30 80 #define SPI_RSER_TCFQE BIT(31) 81 #define SPI_RSER_CMDTCFE BIT(23) 82 83 #define SPI_PUSHR 0x34 84 #define SPI_PUSHR_CMD_CONT BIT(15) 85 #define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12))) 86 #define SPI_PUSHR_CMD_EOQ BIT(11) 87 #define SPI_PUSHR_CMD_CTCNT BIT(10) 88 #define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0)) 89 90 #define SPI_PUSHR_SLAVE 0x34 91 92 #define SPI_POPR 0x38 93 94 #define SPI_TXFR0 0x3c 95 #define SPI_TXFR1 0x40 96 #define SPI_TXFR2 0x44 97 #define SPI_TXFR3 0x48 98 #define SPI_TXFR4 0x4C 99 #define SPI_RXFR0 0x7c 100 #define SPI_RXFR1 0x80 101 #define SPI_RXFR2 0x84 102 #define SPI_RXFR3 0x88 103 #define SPI_RXFR4 0x8C 104 105 #define SPI_CTARE(x) (0x11c + (((x) & GENMASK(2, 0)) * 4)) 106 #define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16) 107 #define SPI_CTARE_DTCP(x) ((x) & 0x7ff) 108 109 #define SPI_SREX 0x13c 110 111 #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1) 112 #define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4) 113 114 #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000) 115 116 #define SPI_25MHZ 25000000 117 118 struct chip_data { 119 u32 ctar_val; 120 }; 121 122 enum dspi_trans_mode { 123 DSPI_XSPI_MODE, 124 DSPI_DMA_MODE, 125 }; 126 127 struct fsl_dspi_devtype_data { 128 enum dspi_trans_mode trans_mode; 129 u8 max_clock_factor; 130 int fifo_size; 131 const struct regmap_config *regmap; 132 }; 133 134 enum { 135 LS1021A, 136 LS1012A, 137 LS1028A, 138 LS1043A, 139 LS1046A, 140 LS2080A, 141 LS2085A, 142 LX2160A, 143 MCF5441X, 144 VF610, 145 S32G, 146 S32G_TARGET, 147 }; 148 149 static const struct regmap_range dspi_yes_ranges[] = { 150 regmap_reg_range(SPI_MCR, SPI_MCR), 151 regmap_reg_range(SPI_TCR, SPI_CTAR(3)), 152 regmap_reg_range(SPI_SR, SPI_TXFR3), 153 regmap_reg_range(SPI_RXFR0, SPI_RXFR3), 154 regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), 155 regmap_reg_range(SPI_SREX, SPI_SREX), 156 }; 157 158 static const struct regmap_range s32g_dspi_yes_ranges[] = { 159 regmap_reg_range(SPI_MCR, SPI_MCR), 160 regmap_reg_range(SPI_TCR, SPI_CTAR(5)), 161 regmap_reg_range(SPI_SR, SPI_TXFR4), 162 regmap_reg_range(SPI_RXFR0, SPI_RXFR4), 163 regmap_reg_range(SPI_CTARE(0), SPI_CTARE(5)), 164 regmap_reg_range(SPI_SREX, SPI_SREX), 165 }; 166 167 static const struct regmap_access_table dspi_access_table = { 168 .yes_ranges = dspi_yes_ranges, 169 .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), 170 }; 171 172 static const struct regmap_access_table s32g_dspi_access_table = { 173 .yes_ranges = s32g_dspi_yes_ranges, 174 .n_yes_ranges = ARRAY_SIZE(s32g_dspi_yes_ranges), 175 }; 176 177 static const struct regmap_range dspi_volatile_ranges[] = { 178 regmap_reg_range(SPI_MCR, SPI_TCR), 179 regmap_reg_range(SPI_SR, SPI_SR), 180 regmap_reg_range(SPI_PUSHR, SPI_RXFR4), 181 regmap_reg_range(SPI_SREX, SPI_SREX), 182 }; 183 184 static const struct regmap_access_table dspi_volatile_table = { 185 .yes_ranges = dspi_volatile_ranges, 186 .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges), 187 }; 188 189 enum { 190 DSPI_REGMAP, 191 S32G_DSPI_REGMAP, 192 DSPI_XSPI_REGMAP, 193 S32G_DSPI_XSPI_REGMAP, 194 DSPI_PUSHR, 195 }; 196 197 static const struct regmap_config dspi_regmap_config[] = { 198 [DSPI_REGMAP] = { 199 .reg_bits = 32, 200 .val_bits = 32, 201 .reg_stride = 4, 202 .max_register = SPI_RXFR3, 203 .volatile_table = &dspi_volatile_table, 204 .rd_table = &dspi_access_table, 205 .wr_table = &dspi_access_table, 206 }, 207 [S32G_DSPI_REGMAP] = { 208 .reg_bits = 32, 209 .val_bits = 32, 210 .reg_stride = 4, 211 .max_register = SPI_RXFR4, 212 .volatile_table = &dspi_volatile_table, 213 .wr_table = &s32g_dspi_access_table, 214 .rd_table = &s32g_dspi_access_table, 215 }, 216 [DSPI_XSPI_REGMAP] = { 217 .reg_bits = 32, 218 .val_bits = 32, 219 .reg_stride = 4, 220 .max_register = SPI_SREX, 221 .volatile_table = &dspi_volatile_table, 222 .rd_table = &dspi_access_table, 223 .wr_table = &dspi_access_table, 224 }, 225 [S32G_DSPI_XSPI_REGMAP] = { 226 .reg_bits = 32, 227 .val_bits = 32, 228 .reg_stride = 4, 229 .max_register = SPI_SREX, 230 .volatile_table = &dspi_volatile_table, 231 .wr_table = &s32g_dspi_access_table, 232 .rd_table = &s32g_dspi_access_table, 233 }, 234 [DSPI_PUSHR] = { 235 .name = "pushr", 236 .reg_bits = 16, 237 .val_bits = 16, 238 .reg_stride = 2, 239 .max_register = 0x2, 240 }, 241 }; 242 243 static const struct fsl_dspi_devtype_data devtype_data[] = { 244 [VF610] = { 245 .trans_mode = DSPI_DMA_MODE, 246 .max_clock_factor = 2, 247 .fifo_size = 4, 248 .regmap = &dspi_regmap_config[DSPI_REGMAP], 249 }, 250 [LS1021A] = { 251 /* Has A-011218 DMA erratum */ 252 .trans_mode = DSPI_XSPI_MODE, 253 .max_clock_factor = 8, 254 .fifo_size = 4, 255 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], 256 }, 257 [LS1012A] = { 258 /* Has A-011218 DMA erratum */ 259 .trans_mode = DSPI_XSPI_MODE, 260 .max_clock_factor = 8, 261 .fifo_size = 16, 262 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], 263 }, 264 [LS1028A] = { 265 .trans_mode = DSPI_XSPI_MODE, 266 .max_clock_factor = 8, 267 .fifo_size = 4, 268 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], 269 }, 270 [LS1043A] = { 271 /* Has A-011218 DMA erratum */ 272 .trans_mode = DSPI_XSPI_MODE, 273 .max_clock_factor = 8, 274 .fifo_size = 16, 275 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], 276 }, 277 [LS1046A] = { 278 /* Has A-011218 DMA erratum */ 279 .trans_mode = DSPI_XSPI_MODE, 280 .max_clock_factor = 8, 281 .fifo_size = 16, 282 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], 283 }, 284 [LS2080A] = { 285 .trans_mode = DSPI_XSPI_MODE, 286 .max_clock_factor = 8, 287 .fifo_size = 4, 288 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], 289 }, 290 [LS2085A] = { 291 .trans_mode = DSPI_XSPI_MODE, 292 .max_clock_factor = 8, 293 .fifo_size = 4, 294 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], 295 }, 296 [LX2160A] = { 297 .trans_mode = DSPI_XSPI_MODE, 298 .max_clock_factor = 8, 299 .fifo_size = 4, 300 .regmap = &dspi_regmap_config[DSPI_XSPI_REGMAP], 301 }, 302 [MCF5441X] = { 303 .trans_mode = DSPI_DMA_MODE, 304 .max_clock_factor = 8, 305 .fifo_size = 16, 306 .regmap = &dspi_regmap_config[DSPI_REGMAP], 307 }, 308 [S32G] = { 309 .trans_mode = DSPI_XSPI_MODE, 310 .max_clock_factor = 1, 311 .fifo_size = 5, 312 .regmap = &dspi_regmap_config[S32G_DSPI_XSPI_REGMAP], 313 }, 314 [S32G_TARGET] = { 315 .trans_mode = DSPI_DMA_MODE, 316 .max_clock_factor = 1, 317 .fifo_size = 5, 318 .regmap = &dspi_regmap_config[S32G_DSPI_REGMAP], 319 }, 320 }; 321 322 struct fsl_dspi_dma { 323 u32 *tx_dma_buf; 324 struct dma_chan *chan_tx; 325 dma_addr_t tx_dma_phys; 326 struct completion cmd_tx_complete; 327 struct dma_async_tx_descriptor *tx_desc; 328 329 u32 *rx_dma_buf; 330 struct dma_chan *chan_rx; 331 dma_addr_t rx_dma_phys; 332 struct completion cmd_rx_complete; 333 struct dma_async_tx_descriptor *rx_desc; 334 335 size_t bufsize; 336 }; 337 338 struct fsl_dspi { 339 struct spi_controller *ctlr; 340 struct platform_device *pdev; 341 342 struct regmap *regmap; 343 struct regmap *regmap_pushr; 344 int irq; 345 struct clk *clk; 346 347 struct spi_transfer *cur_transfer; 348 struct spi_message *cur_msg; 349 struct chip_data *cur_chip; 350 size_t progress; 351 size_t len; 352 const void *tx; 353 void *rx; 354 u16 tx_cmd; 355 bool mtf_enabled; 356 const struct fsl_dspi_devtype_data *devtype_data; 357 358 struct completion xfer_done; 359 360 struct fsl_dspi_dma *dma; 361 362 int oper_word_size; 363 int oper_bits_per_word; 364 365 int words_in_flight; 366 367 /* 368 * Offsets for CMD and TXDATA within SPI_PUSHR when accessed 369 * individually (in XSPI mode) 370 */ 371 int pushr_cmd; 372 int pushr_tx; 373 374 void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata); 375 void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata); 376 }; 377 378 static void dspi_setup_accel(struct fsl_dspi *dspi); 379 380 static bool is_s32g_dspi(struct fsl_dspi *data) 381 { 382 return data->devtype_data == &devtype_data[S32G] || 383 data->devtype_data == &devtype_data[S32G_TARGET]; 384 } 385 386 static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 387 { 388 switch (dspi->oper_word_size) { 389 case 1: 390 *txdata = *(u8 *)dspi->tx; 391 break; 392 case 2: 393 *txdata = *(u16 *)dspi->tx; 394 break; 395 case 4: 396 *txdata = *(u32 *)dspi->tx; 397 break; 398 } 399 dspi->tx += dspi->oper_word_size; 400 } 401 402 static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 403 { 404 switch (dspi->oper_word_size) { 405 case 1: 406 *(u8 *)dspi->rx = rxdata; 407 break; 408 case 2: 409 *(u16 *)dspi->rx = rxdata; 410 break; 411 case 4: 412 *(u32 *)dspi->rx = rxdata; 413 break; 414 } 415 dspi->rx += dspi->oper_word_size; 416 } 417 418 static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 419 { 420 *txdata = (__force u32)cpu_to_be32(*(u32 *)dspi->tx); 421 dspi->tx += sizeof(u32); 422 } 423 424 static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 425 { 426 *(u32 *)dspi->rx = be32_to_cpu((__force __be32)rxdata); 427 dspi->rx += sizeof(u32); 428 } 429 430 static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 431 { 432 *txdata = (__force u32)cpu_to_be16(*(u16 *)dspi->tx); 433 dspi->tx += sizeof(u16); 434 } 435 436 static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 437 { 438 *(u16 *)dspi->rx = be16_to_cpu((__force __be16)rxdata); 439 dspi->rx += sizeof(u16); 440 } 441 442 static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 443 { 444 u16 hi = *(u16 *)dspi->tx; 445 u16 lo = *(u16 *)(dspi->tx + 2); 446 447 *txdata = (u32)hi << 16 | lo; 448 dspi->tx += sizeof(u32); 449 } 450 451 static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 452 { 453 u16 hi = rxdata & 0xffff; 454 u16 lo = rxdata >> 16; 455 456 *(u16 *)dspi->rx = lo; 457 *(u16 *)(dspi->rx + 2) = hi; 458 dspi->rx += sizeof(u32); 459 } 460 461 /* 462 * Pop one word from the TX buffer for pushing into the 463 * PUSHR register (TX FIFO) 464 */ 465 static u32 dspi_pop_tx(struct fsl_dspi *dspi) 466 { 467 u32 txdata = 0; 468 469 if (dspi->tx) 470 dspi->host_to_dev(dspi, &txdata); 471 dspi->len -= dspi->oper_word_size; 472 return txdata; 473 } 474 475 /* Push one word to the RX buffer from the POPR register (RX FIFO) */ 476 static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata) 477 { 478 if (!dspi->rx) 479 return; 480 dspi->dev_to_host(dspi, rxdata); 481 } 482 483 static int dspi_fifo_error(struct fsl_dspi *dspi, u32 spi_sr) 484 { 485 if (spi_sr & (SPI_SR_TFUF | SPI_SR_RFOF)) { 486 dev_err_ratelimited(&dspi->pdev->dev, "FIFO errors:%s%s\n", 487 spi_sr & SPI_SR_TFUF ? " TX underflow," : "", 488 spi_sr & SPI_SR_RFOF ? " RX overflow," : ""); 489 return -EIO; 490 } 491 return 0; 492 } 493 494 #if IS_ENABLED(CONFIG_DMA_ENGINE) 495 496 /* Prepare one TX FIFO entry (txdata plus cmd) */ 497 static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi) 498 { 499 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi); 500 501 if (spi_controller_is_target(dspi->ctlr)) 502 return data; 503 504 if (dspi->len > 0) 505 cmd |= SPI_PUSHR_CMD_CONT; 506 return cmd << 16 | data; 507 } 508 509 static size_t dspi_dma_max_datawords(struct fsl_dspi *dspi) 510 { 511 /* 512 * Transfers look like one of these, so we always use a full DMA word 513 * regardless of SPI word size: 514 * 515 * 31 16 15 0 516 * ----------------------------------------- 517 * | CONTROL WORD | 16-bit DATA | 518 * ----------------------------------------- 519 * or 520 * ----------------------------------------- 521 * | CONTROL WORD | UNUSED | 8-bit DATA | 522 * ----------------------------------------- 523 */ 524 return dspi->dma->bufsize / DMA_SLAVE_BUSWIDTH_4_BYTES; 525 } 526 527 static size_t dspi_dma_transfer_size(struct fsl_dspi *dspi) 528 { 529 return dspi->words_in_flight * DMA_SLAVE_BUSWIDTH_4_BYTES; 530 } 531 532 static void dspi_tx_dma_callback(void *arg) 533 { 534 struct fsl_dspi *dspi = arg; 535 struct fsl_dspi_dma *dma = dspi->dma; 536 struct device *dev = &dspi->pdev->dev; 537 538 dma_sync_single_for_cpu(dev, dma->tx_dma_phys, 539 dspi_dma_transfer_size(dspi), DMA_TO_DEVICE); 540 complete(&dma->cmd_tx_complete); 541 } 542 543 static void dspi_rx_dma_callback(void *arg) 544 { 545 struct fsl_dspi *dspi = arg; 546 struct fsl_dspi_dma *dma = dspi->dma; 547 struct device *dev = &dspi->pdev->dev; 548 int i; 549 550 if (dspi->rx) { 551 dma_sync_single_for_cpu(dev, dma->rx_dma_phys, 552 dspi_dma_transfer_size(dspi), 553 DMA_FROM_DEVICE); 554 for (i = 0; i < dspi->words_in_flight; i++) 555 dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]); 556 } 557 558 complete(&dma->cmd_rx_complete); 559 } 560 561 static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) 562 { 563 size_t size = dspi_dma_transfer_size(dspi); 564 struct device *dev = &dspi->pdev->dev; 565 struct fsl_dspi_dma *dma = dspi->dma; 566 int time_left; 567 u32 spi_sr; 568 int i; 569 570 for (i = 0; i < dspi->words_in_flight; i++) 571 dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi); 572 573 dma_sync_single_for_device(dev, dma->tx_dma_phys, size, DMA_TO_DEVICE); 574 dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx, 575 dma->tx_dma_phys, size, 576 DMA_MEM_TO_DEV, 577 DMA_PREP_INTERRUPT | 578 DMA_CTRL_ACK); 579 if (!dma->tx_desc) { 580 dev_err(dev, "Not able to get desc for DMA xfer\n"); 581 return -EIO; 582 } 583 584 dma->tx_desc->callback = dspi_tx_dma_callback; 585 dma->tx_desc->callback_param = dspi; 586 if (dma_submit_error(dmaengine_submit(dma->tx_desc))) { 587 dev_err(dev, "DMA submit failed\n"); 588 return -EINVAL; 589 } 590 591 dma_sync_single_for_device(dev, dma->rx_dma_phys, size, 592 DMA_FROM_DEVICE); 593 dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx, 594 dma->rx_dma_phys, size, 595 DMA_DEV_TO_MEM, 596 DMA_PREP_INTERRUPT | 597 DMA_CTRL_ACK); 598 if (!dma->rx_desc) { 599 dev_err(dev, "Not able to get desc for DMA xfer\n"); 600 return -EIO; 601 } 602 603 dma->rx_desc->callback = dspi_rx_dma_callback; 604 dma->rx_desc->callback_param = dspi; 605 if (dma_submit_error(dmaengine_submit(dma->rx_desc))) { 606 dev_err(dev, "DMA submit failed\n"); 607 return -EINVAL; 608 } 609 610 reinit_completion(&dspi->dma->cmd_rx_complete); 611 reinit_completion(&dspi->dma->cmd_tx_complete); 612 613 dma_async_issue_pending(dma->chan_rx); 614 dma_async_issue_pending(dma->chan_tx); 615 616 if (spi_controller_is_target(dspi->ctlr)) { 617 wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete); 618 regmap_read(dspi->regmap, SPI_SR, &spi_sr); 619 return dspi_fifo_error(dspi, spi_sr); 620 } 621 622 time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete, 623 DMA_COMPLETION_TIMEOUT); 624 if (time_left == 0) { 625 dev_err(dev, "DMA tx timeout\n"); 626 dmaengine_terminate_all(dma->chan_tx); 627 dmaengine_terminate_all(dma->chan_rx); 628 return -ETIMEDOUT; 629 } 630 631 time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete, 632 DMA_COMPLETION_TIMEOUT); 633 if (time_left == 0) { 634 dev_err(dev, "DMA rx timeout\n"); 635 dmaengine_terminate_all(dma->chan_tx); 636 dmaengine_terminate_all(dma->chan_rx); 637 return -ETIMEDOUT; 638 } 639 640 return 0; 641 } 642 643 static void dspi_dma_xfer(struct fsl_dspi *dspi) 644 { 645 struct spi_message *message = dspi->cur_msg; 646 struct device *dev = &dspi->pdev->dev; 647 648 /* 649 * dspi->len gets decremented by dspi_pop_tx_pushr in 650 * dspi_next_xfer_dma_submit 651 */ 652 while (dspi->len) { 653 /* Figure out operational bits-per-word for this chunk */ 654 dspi_setup_accel(dspi); 655 656 dspi->words_in_flight = min(dspi->len / dspi->oper_word_size, 657 dspi_dma_max_datawords(dspi)); 658 659 message->actual_length += dspi->words_in_flight * 660 dspi->oper_word_size; 661 662 message->status = dspi_next_xfer_dma_submit(dspi); 663 if (message->status) { 664 dev_err(dev, "DMA transfer failed\n"); 665 break; 666 } 667 } 668 } 669 670 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) 671 { 672 struct device *dev = &dspi->pdev->dev; 673 struct dma_slave_config cfg; 674 struct fsl_dspi_dma *dma; 675 int ret; 676 677 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); 678 if (!dma) 679 return -ENOMEM; 680 681 dma->chan_rx = dma_request_chan(dev, "rx"); 682 if (IS_ERR(dma->chan_rx)) 683 return dev_err_probe(dev, PTR_ERR(dma->chan_rx), "rx dma channel not available\n"); 684 685 dma->chan_tx = dma_request_chan(dev, "tx"); 686 if (IS_ERR(dma->chan_tx)) { 687 ret = dev_err_probe(dev, PTR_ERR(dma->chan_tx), "tx dma channel not available\n"); 688 goto err_tx_channel; 689 } 690 691 if (spi_controller_is_target(dspi->ctlr)) { 692 /* 693 * In target mode we have to be ready to receive the maximum 694 * that can possibly be transferred at once by EDMA without any 695 * FIFO underflows. 696 */ 697 dma->bufsize = min(dma_get_max_seg_size(dma->chan_rx->device->dev), 698 dma_get_max_seg_size(dma->chan_tx->device->dev)) * 699 DMA_SLAVE_BUSWIDTH_4_BYTES; 700 } else { 701 dma->bufsize = PAGE_SIZE; 702 } 703 704 dma->tx_dma_buf = dma_alloc_noncoherent(dma->chan_tx->device->dev, 705 dma->bufsize, &dma->tx_dma_phys, 706 DMA_TO_DEVICE, GFP_KERNEL); 707 if (!dma->tx_dma_buf) { 708 ret = -ENOMEM; 709 goto err_tx_dma_buf; 710 } 711 712 dma->rx_dma_buf = dma_alloc_noncoherent(dma->chan_rx->device->dev, 713 dma->bufsize, &dma->rx_dma_phys, 714 DMA_FROM_DEVICE, GFP_KERNEL); 715 if (!dma->rx_dma_buf) { 716 ret = -ENOMEM; 717 goto err_rx_dma_buf; 718 } 719 720 memset(&cfg, 0, sizeof(cfg)); 721 cfg.src_addr = phy_addr + SPI_POPR; 722 cfg.dst_addr = phy_addr + SPI_PUSHR; 723 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 724 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 725 cfg.src_maxburst = 1; 726 cfg.dst_maxburst = 1; 727 728 cfg.direction = DMA_DEV_TO_MEM; 729 ret = dmaengine_slave_config(dma->chan_rx, &cfg); 730 if (ret) { 731 dev_err_probe(dev, ret, "can't configure rx dma channel\n"); 732 goto err_slave_config; 733 } 734 735 cfg.direction = DMA_MEM_TO_DEV; 736 ret = dmaengine_slave_config(dma->chan_tx, &cfg); 737 if (ret) { 738 dev_err_probe(dev, ret, "can't configure tx dma channel\n"); 739 goto err_slave_config; 740 } 741 742 dspi->dma = dma; 743 init_completion(&dma->cmd_tx_complete); 744 init_completion(&dma->cmd_rx_complete); 745 746 return 0; 747 748 err_slave_config: 749 dma_free_noncoherent(dma->chan_rx->device->dev, dma->bufsize, 750 dma->rx_dma_buf, dma->rx_dma_phys, 751 DMA_FROM_DEVICE); 752 err_rx_dma_buf: 753 dma_free_noncoherent(dma->chan_tx->device->dev, dma->bufsize, 754 dma->tx_dma_buf, dma->tx_dma_phys, DMA_TO_DEVICE); 755 err_tx_dma_buf: 756 dma_release_channel(dma->chan_tx); 757 err_tx_channel: 758 dma_release_channel(dma->chan_rx); 759 760 devm_kfree(dev, dma); 761 dspi->dma = NULL; 762 763 return ret; 764 } 765 766 static void dspi_release_dma(struct fsl_dspi *dspi) 767 { 768 struct fsl_dspi_dma *dma = dspi->dma; 769 770 if (!dma) 771 return; 772 773 if (dma->chan_tx) { 774 dma_free_noncoherent(dma->chan_tx->device->dev, dma->bufsize, 775 dma->tx_dma_buf, dma->tx_dma_phys, 776 DMA_TO_DEVICE); 777 dma_release_channel(dma->chan_tx); 778 } 779 780 if (dma->chan_rx) { 781 dma_free_noncoherent(dma->chan_rx->device->dev, dma->bufsize, 782 dma->rx_dma_buf, dma->rx_dma_phys, 783 DMA_FROM_DEVICE); 784 dma_release_channel(dma->chan_rx); 785 } 786 } 787 #else 788 static void dspi_dma_xfer(struct fsl_dspi *dspi) 789 { 790 dspi->cur_msg->status = -EINVAL; 791 } 792 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) 793 { 794 dev_err(&dspi->pdev->dev, "DMA support not enabled in kernel\n"); 795 return -EINVAL; 796 } 797 static void dspi_release_dma(struct fsl_dspi *dspi) {} 798 #endif 799 800 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz, 801 unsigned long clkrate, bool mtf_enabled) 802 { 803 /* Valid baud rate pre-scaler values */ 804 int pbr_tbl[4] = {2, 3, 5, 7}; 805 int brs[16] = { 2, 4, 6, 8, 806 16, 32, 64, 128, 807 256, 512, 1024, 2048, 808 4096, 8192, 16384, 32768 }; 809 int scale_needed, scale, minscale = INT_MAX; 810 int i, j; 811 812 scale_needed = clkrate / speed_hz; 813 if (clkrate % speed_hz) 814 scale_needed++; 815 816 for (i = 0; i < ARRAY_SIZE(brs); i++) 817 for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) { 818 if (mtf_enabled) { 819 /* In MTF mode DBR=1 so frequency is doubled */ 820 scale = (brs[i] * pbr_tbl[j]) / 2; 821 } else { 822 scale = brs[i] * pbr_tbl[j]; 823 } 824 825 if (scale >= scale_needed) { 826 if (scale < minscale) { 827 minscale = scale; 828 *br = i; 829 *pbr = j; 830 } 831 break; 832 } 833 } 834 835 if (minscale == INT_MAX) { 836 pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n", 837 speed_hz, clkrate); 838 *pbr = ARRAY_SIZE(pbr_tbl) - 1; 839 *br = ARRAY_SIZE(brs) - 1; 840 } 841 } 842 843 static void ns_delay_scale(char *psc, char *sc, int delay_ns, 844 unsigned long clkrate) 845 { 846 int scale_needed, scale, minscale = INT_MAX; 847 int pscale_tbl[4] = {1, 3, 5, 7}; 848 u32 remainder; 849 int i, j; 850 851 scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC, 852 &remainder); 853 if (remainder) 854 scale_needed++; 855 856 for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++) 857 for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) { 858 scale = pscale_tbl[i] * (2 << j); 859 if (scale >= scale_needed) { 860 if (scale < minscale) { 861 minscale = scale; 862 *psc = i; 863 *sc = j; 864 } 865 break; 866 } 867 } 868 869 if (minscale == INT_MAX) { 870 pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value", 871 delay_ns, clkrate); 872 *psc = ARRAY_SIZE(pscale_tbl) - 1; 873 *sc = SPI_CTAR_SCALE_BITS; 874 } 875 } 876 877 static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd) 878 { 879 /* 880 * The only time when the PCS doesn't need continuation after this word 881 * is when it's last. We need to look ahead, because we actually call 882 * dspi_pop_tx (the function that decrements dspi->len) _after_ 883 * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One 884 * word is enough. If there's more to transmit than that, 885 * dspi_xspi_write will know to split the FIFO writes in 2, and 886 * generate a new PUSHR command with the final word that will have PCS 887 * deasserted (not continued) here. 888 */ 889 if (dspi->len > dspi->oper_word_size) 890 cmd |= SPI_PUSHR_CMD_CONT; 891 regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd); 892 } 893 894 static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata) 895 { 896 regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata); 897 } 898 899 static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words) 900 { 901 int num_bytes = num_words * dspi->oper_word_size; 902 u16 tx_cmd = dspi->tx_cmd; 903 904 /* 905 * If the PCS needs to de-assert (i.e. we're at the end of the buffer 906 * and cs_change does not want the PCS to stay on), then we need a new 907 * PUSHR command, since this one (for the body of the buffer) 908 * necessarily has the CONT bit set. 909 * So send one word less during this go, to force a split and a command 910 * with a single word next time, when CONT will be unset. 911 */ 912 if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len) 913 tx_cmd |= SPI_PUSHR_CMD_EOQ; 914 915 /* Update CTARE */ 916 regmap_write(dspi->regmap, SPI_CTARE(0), 917 SPI_FRAME_EBITS(dspi->oper_bits_per_word) | 918 SPI_CTARE_DTCP(num_words)); 919 920 /* 921 * Write the CMD FIFO entry first, and then the two 922 * corresponding TX FIFO entries (or one...). 923 */ 924 dspi_pushr_cmd_write(dspi, tx_cmd); 925 926 /* Fill TX FIFO with as many transfers as possible */ 927 while (num_words--) { 928 u32 data = dspi_pop_tx(dspi); 929 930 dspi_pushr_txdata_write(dspi, data & 0xFFFF); 931 if (dspi->oper_bits_per_word > 16) 932 dspi_pushr_txdata_write(dspi, data >> 16); 933 } 934 } 935 936 static u32 dspi_popr_read(struct fsl_dspi *dspi) 937 { 938 u32 rxdata = 0; 939 940 regmap_read(dspi->regmap, SPI_POPR, &rxdata); 941 return rxdata; 942 } 943 944 static void dspi_fifo_read(struct fsl_dspi *dspi) 945 { 946 int num_fifo_entries = dspi->words_in_flight; 947 948 /* Read one FIFO entry and push to rx buffer */ 949 while (num_fifo_entries--) 950 dspi_push_rx(dspi, dspi_popr_read(dspi)); 951 } 952 953 static void dspi_setup_accel(struct fsl_dspi *dspi) 954 { 955 struct spi_transfer *xfer = dspi->cur_transfer; 956 bool odd = !!(dspi->len & 1); 957 958 /* 959 * No accel for DMA transfers or frames not multiples of 8 bits at the 960 * moment. 961 */ 962 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE || 963 xfer->bits_per_word % 8) 964 goto no_accel; 965 966 if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) { 967 dspi->oper_bits_per_word = 16; 968 } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) { 969 dspi->oper_bits_per_word = 8; 970 } else { 971 /* Start off with maximum supported by hardware */ 972 dspi->oper_bits_per_word = 32; 973 974 /* 975 * And go down only if the buffer can't be sent with 976 * words this big 977 */ 978 do { 979 if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8)) 980 break; 981 982 dspi->oper_bits_per_word /= 2; 983 } while (dspi->oper_bits_per_word > 8); 984 } 985 986 if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) { 987 dspi->dev_to_host = dspi_8on32_dev_to_host; 988 dspi->host_to_dev = dspi_8on32_host_to_dev; 989 } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) { 990 dspi->dev_to_host = dspi_8on16_dev_to_host; 991 dspi->host_to_dev = dspi_8on16_host_to_dev; 992 } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) { 993 dspi->dev_to_host = dspi_16on32_dev_to_host; 994 dspi->host_to_dev = dspi_16on32_host_to_dev; 995 } else { 996 no_accel: 997 dspi->dev_to_host = dspi_native_dev_to_host; 998 dspi->host_to_dev = dspi_native_host_to_dev; 999 dspi->oper_bits_per_word = xfer->bits_per_word; 1000 } 1001 1002 dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8); 1003 1004 /* 1005 * Update CTAR here (code is common for XSPI and DMA modes). 1006 * We will update CTARE in the portion specific to XSPI, when we 1007 * also know the preload value (DTCP). 1008 */ 1009 regmap_write(dspi->regmap, SPI_CTAR(0), 1010 dspi->cur_chip->ctar_val | 1011 SPI_FRAME_BITS(dspi->oper_bits_per_word)); 1012 } 1013 1014 static void dspi_fifo_write(struct fsl_dspi *dspi) 1015 { 1016 int num_fifo_entries = dspi->devtype_data->fifo_size; 1017 struct spi_transfer *xfer = dspi->cur_transfer; 1018 struct spi_message *msg = dspi->cur_msg; 1019 int num_words, num_bytes; 1020 1021 dspi_setup_accel(dspi); 1022 1023 /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */ 1024 if (dspi->oper_word_size == 4) 1025 num_fifo_entries /= 2; 1026 1027 /* 1028 * Integer division intentionally trims off odd (or non-multiple of 4) 1029 * numbers of bytes at the end of the buffer, which will be sent next 1030 * time using a smaller oper_word_size. 1031 */ 1032 num_words = dspi->len / dspi->oper_word_size; 1033 if (num_words > num_fifo_entries) 1034 num_words = num_fifo_entries; 1035 1036 /* Update total number of bytes that were transferred */ 1037 num_bytes = num_words * dspi->oper_word_size; 1038 msg->actual_length += num_bytes; 1039 dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8); 1040 1041 /* 1042 * Update shared variable for use in the next interrupt (both in 1043 * dspi_fifo_read and in dspi_fifo_write). 1044 */ 1045 dspi->words_in_flight = num_words; 1046 1047 spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq); 1048 1049 dspi_xspi_fifo_write(dspi, num_words); 1050 /* 1051 * Everything after this point is in a potential race with the next 1052 * interrupt, so we must never use dspi->words_in_flight again since it 1053 * might already be modified by the next dspi_fifo_write. 1054 */ 1055 1056 spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, 1057 dspi->progress, !dspi->irq); 1058 } 1059 1060 /* 1061 * Read the previous transfer from the FIFO and transmit the next one. 1062 * 1063 * Returns false if the buffer to be transmitted is empty, and true if there is 1064 * still data to transmit. 1065 */ 1066 static bool dspi_rxtx(struct fsl_dspi *dspi) 1067 { 1068 dspi_fifo_read(dspi); 1069 1070 if (!dspi->len) 1071 /* Success! */ 1072 return false; 1073 1074 dspi_fifo_write(dspi); 1075 1076 return true; 1077 } 1078 1079 static void dspi_poll(struct fsl_dspi *dspi) 1080 { 1081 int tries; 1082 int err = 0; 1083 u32 spi_sr; 1084 1085 do { 1086 for (tries = 1000; tries > 0; --tries) { 1087 regmap_read(dspi->regmap, SPI_SR, &spi_sr); 1088 regmap_write(dspi->regmap, SPI_SR, spi_sr); 1089 1090 dspi->cur_msg->status = dspi_fifo_error(dspi, spi_sr); 1091 if (dspi->cur_msg->status) 1092 return; 1093 if (spi_sr & SPI_SR_CMDTCF) 1094 break; 1095 } 1096 if (!tries) { 1097 err = -ETIMEDOUT; 1098 break; 1099 } 1100 } while (dspi_rxtx(dspi)); 1101 1102 dspi->cur_msg->status = err; 1103 } 1104 1105 static irqreturn_t dspi_interrupt(int irq, void *dev_id) 1106 { 1107 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; 1108 int status; 1109 u32 spi_sr; 1110 1111 regmap_read(dspi->regmap, SPI_SR, &spi_sr); 1112 regmap_write(dspi->regmap, SPI_SR, spi_sr); 1113 1114 if (!(spi_sr & SPI_SR_CMDTCF)) 1115 return IRQ_NONE; 1116 1117 status = dspi_fifo_error(dspi, spi_sr); 1118 if (status) { 1119 if (dspi->cur_msg) 1120 WRITE_ONCE(dspi->cur_msg->status, status); 1121 complete(&dspi->xfer_done); 1122 return IRQ_HANDLED; 1123 } 1124 1125 if (dspi_rxtx(dspi) == false) { 1126 if (dspi->cur_msg) 1127 WRITE_ONCE(dspi->cur_msg->status, 0); 1128 complete(&dspi->xfer_done); 1129 } 1130 1131 return IRQ_HANDLED; 1132 } 1133 1134 static void dspi_assert_cs(struct spi_device *spi, bool *cs) 1135 { 1136 if (!spi_get_csgpiod(spi, 0) || *cs) 1137 return; 1138 1139 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), true); 1140 *cs = true; 1141 } 1142 1143 static void dspi_deassert_cs(struct spi_device *spi, bool *cs) 1144 { 1145 if (!spi_get_csgpiod(spi, 0) || !*cs) 1146 return; 1147 1148 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), false); 1149 *cs = false; 1150 } 1151 1152 static int dspi_transfer_one_message(struct spi_controller *ctlr, 1153 struct spi_message *message) 1154 { 1155 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); 1156 struct spi_device *spi = message->spi; 1157 struct spi_transfer *transfer; 1158 bool cs = false; 1159 u32 val = 0; 1160 bool cs_change = false; 1161 1162 message->actual_length = 0; 1163 1164 /* Put DSPI in running mode if halted. */ 1165 regmap_read(dspi->regmap, SPI_MCR, &val); 1166 if (val & SPI_MCR_HALT) { 1167 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0); 1168 while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && 1169 !(val & SPI_SR_TXRXS)) 1170 ; 1171 } 1172 1173 list_for_each_entry(transfer, &message->transfers, transfer_list) { 1174 dspi->cur_transfer = transfer; 1175 dspi->cur_msg = message; 1176 dspi->cur_chip = spi_get_ctldata(spi); 1177 1178 dspi_assert_cs(spi, &cs); 1179 1180 /* Prepare command word for CMD FIFO */ 1181 dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0); 1182 if (!spi_get_csgpiod(spi, 0)) 1183 dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi_get_chipselect(spi, 0)); 1184 1185 if (list_is_last(&dspi->cur_transfer->transfer_list, 1186 &dspi->cur_msg->transfers)) { 1187 /* Leave PCS activated after last transfer when 1188 * cs_change is set. 1189 */ 1190 if (transfer->cs_change) 1191 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; 1192 } else { 1193 /* Keep PCS active between transfers in same message 1194 * when cs_change is not set, and de-activate PCS 1195 * between transfers in the same message when 1196 * cs_change is set. 1197 */ 1198 if (!transfer->cs_change) 1199 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; 1200 } 1201 1202 cs_change = transfer->cs_change; 1203 dspi->tx = transfer->tx_buf; 1204 dspi->rx = transfer->rx_buf; 1205 dspi->len = transfer->len; 1206 dspi->progress = 0; 1207 1208 regmap_update_bits(dspi->regmap, SPI_MCR, 1209 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, 1210 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); 1211 1212 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); 1213 1214 spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, 1215 dspi->progress, !dspi->irq); 1216 1217 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { 1218 dspi_dma_xfer(dspi); 1219 } else { 1220 /* 1221 * Reinitialize the completion before transferring data 1222 * to avoid the case where it might remain in the done 1223 * state due to a spurious interrupt from a previous 1224 * transfer. This could falsely signal that the current 1225 * transfer has completed. 1226 */ 1227 if (dspi->irq) 1228 reinit_completion(&dspi->xfer_done); 1229 1230 dspi_fifo_write(dspi); 1231 1232 if (dspi->irq) 1233 wait_for_completion(&dspi->xfer_done); 1234 else 1235 dspi_poll(dspi); 1236 } 1237 if (READ_ONCE(message->status)) 1238 break; 1239 1240 spi_transfer_delay_exec(transfer); 1241 1242 if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT)) 1243 dspi_deassert_cs(spi, &cs); 1244 } 1245 1246 dspi->cur_msg = NULL; 1247 if (message->status || !cs_change) { 1248 /* Put DSPI in stop mode */ 1249 regmap_update_bits(dspi->regmap, SPI_MCR, 1250 SPI_MCR_HALT, SPI_MCR_HALT); 1251 while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && 1252 val & SPI_SR_TXRXS) 1253 ; 1254 } 1255 1256 spi_finalize_current_message(ctlr); 1257 1258 return message->status; 1259 } 1260 1261 static int dspi_set_mtf(struct fsl_dspi *dspi) 1262 { 1263 if (spi_controller_is_target(dspi->ctlr)) 1264 return 0; 1265 1266 if (dspi->mtf_enabled) 1267 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_MTFE, 1268 SPI_MCR_MTFE); 1269 else 1270 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_MTFE, 0); 1271 1272 return 0; 1273 } 1274 1275 static int dspi_setup(struct spi_device *spi) 1276 { 1277 struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller); 1278 u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz); 1279 unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0; 1280 u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4); 1281 u32 cs_sck_delay = 0, sck_cs_delay = 0; 1282 struct fsl_dspi_platform_data *pdata; 1283 unsigned char pasc = 0, asc = 0; 1284 struct gpio_desc *gpio_cs; 1285 struct chip_data *chip; 1286 unsigned long clkrate; 1287 bool cs = true; 1288 int val; 1289 1290 /* Only alloc on first setup */ 1291 chip = spi_get_ctldata(spi); 1292 if (chip == NULL) { 1293 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 1294 if (!chip) 1295 return -ENOMEM; 1296 } 1297 1298 pdata = dev_get_platdata(&dspi->pdev->dev); 1299 1300 if (!pdata) { 1301 val = spi_delay_to_ns(&spi->cs_setup, NULL); 1302 cs_sck_delay = val >= 0 ? val : 0; 1303 if (!cs_sck_delay) 1304 of_property_read_u32(spi->dev.of_node, 1305 "fsl,spi-cs-sck-delay", 1306 &cs_sck_delay); 1307 1308 val = spi_delay_to_ns(&spi->cs_hold, NULL); 1309 sck_cs_delay = val >= 0 ? val : 0; 1310 if (!sck_cs_delay) 1311 of_property_read_u32(spi->dev.of_node, 1312 "fsl,spi-sck-cs-delay", 1313 &sck_cs_delay); 1314 } else { 1315 cs_sck_delay = pdata->cs_sck_delay; 1316 sck_cs_delay = pdata->sck_cs_delay; 1317 } 1318 1319 /* Since tCSC and tASC apply to continuous transfers too, avoid SCK 1320 * glitches of half a cycle by never allowing tCSC + tASC to go below 1321 * half a SCK period. 1322 */ 1323 if (cs_sck_delay < quarter_period_ns) 1324 cs_sck_delay = quarter_period_ns; 1325 if (sck_cs_delay < quarter_period_ns) 1326 sck_cs_delay = quarter_period_ns; 1327 1328 dev_dbg(&spi->dev, 1329 "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n", 1330 cs_sck_delay, sck_cs_delay); 1331 1332 clkrate = clk_get_rate(dspi->clk); 1333 1334 if (is_s32g_dspi(dspi) && spi->max_speed_hz > SPI_25MHZ) 1335 dspi->mtf_enabled = true; 1336 else 1337 dspi->mtf_enabled = false; 1338 1339 dspi_set_mtf(dspi); 1340 1341 hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate, 1342 dspi->mtf_enabled); 1343 1344 /* Set PCS to SCK delay scale values */ 1345 ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate); 1346 1347 /* Set After SCK delay scale values */ 1348 ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate); 1349 1350 chip->ctar_val = 0; 1351 if (spi->mode & SPI_CPOL) 1352 chip->ctar_val |= SPI_CTAR_CPOL; 1353 if (spi->mode & SPI_CPHA) 1354 chip->ctar_val |= SPI_CTAR_CPHA; 1355 1356 if (!spi_controller_is_target(dspi->ctlr)) { 1357 chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) | 1358 SPI_CTAR_CSSCK(cssck) | 1359 SPI_CTAR_PASC(pasc) | 1360 SPI_CTAR_ASC(asc) | 1361 SPI_CTAR_PBR(pbr) | 1362 SPI_CTAR_BR(br); 1363 1364 if (dspi->mtf_enabled) 1365 chip->ctar_val |= SPI_CTAR_DBR; 1366 1367 if (spi->mode & SPI_LSB_FIRST) 1368 chip->ctar_val |= SPI_CTAR_LSBFE; 1369 } 1370 1371 gpio_cs = spi_get_csgpiod(spi, 0); 1372 if (gpio_cs) 1373 gpiod_direction_output(gpio_cs, false); 1374 1375 dspi_deassert_cs(spi, &cs); 1376 1377 spi_set_ctldata(spi, chip); 1378 1379 return 0; 1380 } 1381 1382 static void dspi_cleanup(struct spi_device *spi) 1383 { 1384 struct chip_data *chip = spi_get_ctldata(spi); 1385 1386 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n", 1387 spi->controller->bus_num, spi_get_chipselect(spi, 0)); 1388 1389 kfree(chip); 1390 } 1391 1392 static const struct of_device_id fsl_dspi_dt_ids[] = { 1393 { 1394 .compatible = "fsl,vf610-dspi", 1395 .data = &devtype_data[VF610], 1396 }, { 1397 .compatible = "fsl,ls1021a-v1.0-dspi", 1398 .data = &devtype_data[LS1021A], 1399 }, { 1400 .compatible = "fsl,ls1012a-dspi", 1401 .data = &devtype_data[LS1012A], 1402 }, { 1403 .compatible = "fsl,ls1028a-dspi", 1404 .data = &devtype_data[LS1028A], 1405 }, { 1406 .compatible = "fsl,ls1043a-dspi", 1407 .data = &devtype_data[LS1043A], 1408 }, { 1409 .compatible = "fsl,ls1046a-dspi", 1410 .data = &devtype_data[LS1046A], 1411 }, { 1412 .compatible = "fsl,ls2080a-dspi", 1413 .data = &devtype_data[LS2080A], 1414 }, { 1415 .compatible = "fsl,ls2085a-dspi", 1416 .data = &devtype_data[LS2085A], 1417 }, { 1418 .compatible = "fsl,lx2160a-dspi", 1419 .data = &devtype_data[LX2160A], 1420 }, { 1421 .compatible = "nxp,s32g2-dspi", 1422 .data = &devtype_data[S32G], 1423 }, 1424 { /* sentinel */ } 1425 }; 1426 MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids); 1427 1428 static int dspi_init(struct fsl_dspi *dspi) 1429 { 1430 unsigned int mcr; 1431 1432 /* Set idle states for all chip select signals to high */ 1433 mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0)); 1434 1435 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) 1436 mcr |= SPI_MCR_XSPI; 1437 if (!spi_controller_is_target(dspi->ctlr)) 1438 mcr |= SPI_MCR_HOST; 1439 1440 mcr |= SPI_MCR_HALT; 1441 1442 regmap_write(dspi->regmap, SPI_MCR, mcr); 1443 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); 1444 1445 switch (dspi->devtype_data->trans_mode) { 1446 case DSPI_XSPI_MODE: 1447 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE); 1448 break; 1449 case DSPI_DMA_MODE: 1450 regmap_write(dspi->regmap, SPI_RSER, 1451 SPI_RSER_TFFFE | SPI_RSER_TFFFD | 1452 SPI_RSER_RFDFE | SPI_RSER_RFDFD); 1453 break; 1454 default: 1455 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n", 1456 dspi->devtype_data->trans_mode); 1457 return -EINVAL; 1458 } 1459 1460 return 0; 1461 } 1462 1463 #ifdef CONFIG_PM_SLEEP 1464 static int dspi_suspend(struct device *dev) 1465 { 1466 struct fsl_dspi *dspi = dev_get_drvdata(dev); 1467 1468 if (dspi->irq) 1469 disable_irq(dspi->irq); 1470 spi_controller_suspend(dspi->ctlr); 1471 clk_disable_unprepare(dspi->clk); 1472 1473 pinctrl_pm_select_sleep_state(dev); 1474 1475 return 0; 1476 } 1477 1478 static int dspi_resume(struct device *dev) 1479 { 1480 struct fsl_dspi *dspi = dev_get_drvdata(dev); 1481 int ret; 1482 1483 pinctrl_pm_select_default_state(dev); 1484 1485 ret = clk_prepare_enable(dspi->clk); 1486 if (ret) 1487 return ret; 1488 spi_controller_resume(dspi->ctlr); 1489 1490 ret = dspi_init(dspi); 1491 if (ret) { 1492 dev_err(dev, "failed to initialize dspi during resume\n"); 1493 return ret; 1494 } 1495 1496 dspi_set_mtf(dspi); 1497 1498 if (dspi->irq) 1499 enable_irq(dspi->irq); 1500 1501 return 0; 1502 } 1503 #endif /* CONFIG_PM_SLEEP */ 1504 1505 static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); 1506 1507 static int dspi_target_abort(struct spi_controller *host) 1508 { 1509 struct fsl_dspi *dspi = spi_controller_get_devdata(host); 1510 1511 /* 1512 * Terminate all pending DMA transactions for the SPI working 1513 * in TARGET mode. 1514 */ 1515 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { 1516 dmaengine_terminate_sync(dspi->dma->chan_rx); 1517 dmaengine_terminate_sync(dspi->dma->chan_tx); 1518 } 1519 1520 /* Clear the internal DSPI RX and TX FIFO buffers */ 1521 regmap_update_bits(dspi->regmap, SPI_MCR, 1522 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, 1523 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); 1524 1525 return 0; 1526 } 1527 1528 static int dspi_probe(struct platform_device *pdev) 1529 { 1530 struct device_node *np = pdev->dev.of_node; 1531 struct fsl_dspi_platform_data *pdata; 1532 struct spi_controller *ctlr; 1533 int ret, cs_num, bus_num = -1; 1534 struct fsl_dspi *dspi; 1535 struct resource *res; 1536 void __iomem *base; 1537 bool big_endian; 1538 1539 dspi = devm_kzalloc(&pdev->dev, sizeof(*dspi), GFP_KERNEL); 1540 if (!dspi) 1541 return -ENOMEM; 1542 1543 if (of_property_read_bool(np, "spi-slave")) 1544 ctlr = spi_alloc_target(&pdev->dev, 0); 1545 else 1546 ctlr = spi_alloc_host(&pdev->dev, 0); 1547 if (!ctlr) 1548 return -ENOMEM; 1549 1550 spi_controller_set_devdata(ctlr, dspi); 1551 platform_set_drvdata(pdev, dspi); 1552 1553 dspi->pdev = pdev; 1554 dspi->ctlr = ctlr; 1555 1556 ctlr->setup = dspi_setup; 1557 ctlr->transfer_one_message = dspi_transfer_one_message; 1558 ctlr->dev.of_node = pdev->dev.of_node; 1559 1560 ctlr->cleanup = dspi_cleanup; 1561 ctlr->target_abort = dspi_target_abort; 1562 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; 1563 ctlr->use_gpio_descriptors = true; 1564 1565 pdata = dev_get_platdata(&pdev->dev); 1566 if (pdata) { 1567 ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num; 1568 ctlr->bus_num = pdata->bus_num; 1569 1570 /* Only Coldfire uses platform data */ 1571 dspi->devtype_data = &devtype_data[MCF5441X]; 1572 big_endian = true; 1573 } else { 1574 1575 ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num); 1576 if (ret < 0) { 1577 dev_err(&pdev->dev, "can't get spi-num-chipselects\n"); 1578 goto out_ctlr_put; 1579 } 1580 ctlr->num_chipselect = ctlr->max_native_cs = cs_num; 1581 1582 of_property_read_u32(np, "bus-num", &bus_num); 1583 ctlr->bus_num = bus_num; 1584 1585 dspi->devtype_data = of_device_get_match_data(&pdev->dev); 1586 if (!dspi->devtype_data) { 1587 dev_err(&pdev->dev, "can't get devtype_data\n"); 1588 ret = -EFAULT; 1589 goto out_ctlr_put; 1590 } 1591 1592 big_endian = of_device_is_big_endian(np); 1593 } 1594 if (big_endian) { 1595 dspi->pushr_cmd = 0; 1596 dspi->pushr_tx = 2; 1597 } else { 1598 dspi->pushr_cmd = 2; 1599 dspi->pushr_tx = 0; 1600 } 1601 1602 if (spi_controller_is_target(ctlr) && is_s32g_dspi(dspi)) 1603 dspi->devtype_data = &devtype_data[S32G_TARGET]; 1604 1605 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) 1606 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1607 else 1608 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 1609 1610 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1611 if (IS_ERR(base)) { 1612 ret = PTR_ERR(base); 1613 goto out_ctlr_put; 1614 } 1615 1616 dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, 1617 dspi->devtype_data->regmap); 1618 if (IS_ERR(dspi->regmap)) { 1619 dev_err(&pdev->dev, "failed to init regmap: %ld\n", 1620 PTR_ERR(dspi->regmap)); 1621 ret = PTR_ERR(dspi->regmap); 1622 goto out_ctlr_put; 1623 } 1624 1625 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) { 1626 dspi->regmap_pushr = devm_regmap_init_mmio( 1627 &pdev->dev, base + SPI_PUSHR, 1628 &dspi_regmap_config[DSPI_PUSHR]); 1629 if (IS_ERR(dspi->regmap_pushr)) { 1630 dev_err(&pdev->dev, 1631 "failed to init pushr regmap: %ld\n", 1632 PTR_ERR(dspi->regmap_pushr)); 1633 ret = PTR_ERR(dspi->regmap_pushr); 1634 goto out_ctlr_put; 1635 } 1636 } 1637 1638 dspi->clk = devm_clk_get_enabled(&pdev->dev, "dspi"); 1639 if (IS_ERR(dspi->clk)) { 1640 ret = PTR_ERR(dspi->clk); 1641 dev_err(&pdev->dev, "unable to get clock\n"); 1642 goto out_ctlr_put; 1643 } 1644 1645 ret = dspi_init(dspi); 1646 if (ret) 1647 goto out_ctlr_put; 1648 1649 dspi->irq = platform_get_irq(pdev, 0); 1650 if (dspi->irq <= 0) { 1651 dev_info(&pdev->dev, 1652 "can't get platform irq, using poll mode\n"); 1653 dspi->irq = 0; 1654 goto poll_mode; 1655 } 1656 1657 init_completion(&dspi->xfer_done); 1658 1659 ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL, 1660 IRQF_SHARED, pdev->name, dspi); 1661 if (ret < 0) { 1662 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); 1663 goto out_ctlr_put; 1664 } 1665 1666 poll_mode: 1667 1668 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { 1669 ret = dspi_request_dma(dspi, res->start); 1670 if (ret < 0) { 1671 dev_err(&pdev->dev, "can't get dma channels\n"); 1672 goto out_free_irq; 1673 } 1674 } 1675 1676 ctlr->max_speed_hz = 1677 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor; 1678 1679 if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE) 1680 ctlr->ptp_sts_supported = true; 1681 1682 ret = spi_register_controller(ctlr); 1683 if (ret != 0) { 1684 dev_err(&pdev->dev, "Problem registering DSPI ctlr\n"); 1685 goto out_release_dma; 1686 } 1687 1688 return ret; 1689 1690 out_release_dma: 1691 dspi_release_dma(dspi); 1692 out_free_irq: 1693 if (dspi->irq) 1694 free_irq(dspi->irq, dspi); 1695 out_ctlr_put: 1696 spi_controller_put(ctlr); 1697 1698 return ret; 1699 } 1700 1701 static void dspi_remove(struct platform_device *pdev) 1702 { 1703 struct fsl_dspi *dspi = platform_get_drvdata(pdev); 1704 1705 /* Disconnect from the SPI framework */ 1706 spi_unregister_controller(dspi->ctlr); 1707 1708 /* Disable RX and TX */ 1709 regmap_update_bits(dspi->regmap, SPI_MCR, 1710 SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF, 1711 SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF); 1712 1713 /* Stop Running */ 1714 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT); 1715 1716 dspi_release_dma(dspi); 1717 if (dspi->irq) 1718 free_irq(dspi->irq, dspi); 1719 } 1720 1721 static void dspi_shutdown(struct platform_device *pdev) 1722 { 1723 dspi_remove(pdev); 1724 } 1725 1726 static struct platform_driver fsl_dspi_driver = { 1727 .driver.name = DRIVER_NAME, 1728 .driver.of_match_table = fsl_dspi_dt_ids, 1729 .driver.pm = &dspi_pm, 1730 .probe = dspi_probe, 1731 .remove = dspi_remove, 1732 .shutdown = dspi_shutdown, 1733 }; 1734 module_platform_driver(fsl_dspi_driver); 1735 1736 MODULE_DESCRIPTION("Freescale DSPI Controller Driver"); 1737 MODULE_LICENSE("GPL"); 1738 MODULE_ALIAS("platform:" DRIVER_NAME); 1739