1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Copyright 2013 Freescale Semiconductor, Inc. 4 // Copyright 2020-2025 NXP 5 // 6 // Freescale DSPI driver 7 // This file contains a driver for the Freescale DSPI 8 9 #include <linux/clk.h> 10 #include <linux/delay.h> 11 #include <linux/dmaengine.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/platform_device.h> 18 #include <linux/pinctrl/consumer.h> 19 #include <linux/regmap.h> 20 #include <linux/spi/spi.h> 21 #include <linux/spi/spi-fsl-dspi.h> 22 23 #define DRIVER_NAME "fsl-dspi" 24 25 #define SPI_MCR 0x00 26 #define SPI_MCR_HOST BIT(31) 27 #define SPI_MCR_PCSIS(x) ((x) << 16) 28 #define SPI_MCR_CLR_TXF BIT(11) 29 #define SPI_MCR_CLR_RXF BIT(10) 30 #define SPI_MCR_XSPI BIT(3) 31 #define SPI_MCR_DIS_TXF BIT(13) 32 #define SPI_MCR_DIS_RXF BIT(12) 33 #define SPI_MCR_HALT BIT(0) 34 35 #define SPI_TCR 0x08 36 #define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16) 37 38 #define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4)) 39 #define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27)) 40 #define SPI_CTAR_CPOL BIT(26) 41 #define SPI_CTAR_CPHA BIT(25) 42 #define SPI_CTAR_LSBFE BIT(24) 43 #define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22)) 44 #define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20)) 45 #define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18)) 46 #define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16)) 47 #define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12)) 48 #define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8)) 49 #define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4)) 50 #define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0)) 51 #define SPI_CTAR_SCALE_BITS 0xf 52 53 #define SPI_CTAR0_SLAVE 0x0c 54 55 #define SPI_SR 0x2c 56 #define SPI_SR_TCFQF BIT(31) 57 #define SPI_SR_TFUF BIT(27) 58 #define SPI_SR_TFFF BIT(25) 59 #define SPI_SR_CMDTCF BIT(23) 60 #define SPI_SR_SPEF BIT(21) 61 #define SPI_SR_RFOF BIT(19) 62 #define SPI_SR_TFIWF BIT(18) 63 #define SPI_SR_RFDF BIT(17) 64 #define SPI_SR_CMDFFF BIT(16) 65 #define SPI_SR_TXRXS BIT(30) 66 #define SPI_SR_CLEAR (SPI_SR_TCFQF | \ 67 SPI_SR_TFUF | SPI_SR_TFFF | \ 68 SPI_SR_CMDTCF | SPI_SR_SPEF | \ 69 SPI_SR_RFOF | SPI_SR_TFIWF | \ 70 SPI_SR_RFDF | SPI_SR_CMDFFF) 71 72 #define SPI_RSER_TFFFE BIT(25) 73 #define SPI_RSER_TFFFD BIT(24) 74 #define SPI_RSER_RFDFE BIT(17) 75 #define SPI_RSER_RFDFD BIT(16) 76 77 #define SPI_RSER 0x30 78 #define SPI_RSER_TCFQE BIT(31) 79 #define SPI_RSER_CMDTCFE BIT(23) 80 81 #define SPI_PUSHR 0x34 82 #define SPI_PUSHR_CMD_CONT BIT(15) 83 #define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12))) 84 #define SPI_PUSHR_CMD_EOQ BIT(11) 85 #define SPI_PUSHR_CMD_CTCNT BIT(10) 86 #define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0)) 87 88 #define SPI_PUSHR_SLAVE 0x34 89 90 #define SPI_POPR 0x38 91 92 #define SPI_TXFR0 0x3c 93 #define SPI_TXFR1 0x40 94 #define SPI_TXFR2 0x44 95 #define SPI_TXFR3 0x48 96 #define SPI_RXFR0 0x7c 97 #define SPI_RXFR1 0x80 98 #define SPI_RXFR2 0x84 99 #define SPI_RXFR3 0x88 100 101 #define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4)) 102 #define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16) 103 #define SPI_CTARE_DTCP(x) ((x) & 0x7ff) 104 105 #define SPI_SREX 0x13c 106 107 #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1) 108 #define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4) 109 110 #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000) 111 112 struct chip_data { 113 u32 ctar_val; 114 }; 115 116 enum dspi_trans_mode { 117 DSPI_XSPI_MODE, 118 DSPI_DMA_MODE, 119 }; 120 121 struct fsl_dspi_devtype_data { 122 enum dspi_trans_mode trans_mode; 123 u8 max_clock_factor; 124 int fifo_size; 125 }; 126 127 enum { 128 LS1021A, 129 LS1012A, 130 LS1028A, 131 LS1043A, 132 LS1046A, 133 LS2080A, 134 LS2085A, 135 LX2160A, 136 MCF5441X, 137 VF610, 138 }; 139 140 static const struct fsl_dspi_devtype_data devtype_data[] = { 141 [VF610] = { 142 .trans_mode = DSPI_DMA_MODE, 143 .max_clock_factor = 2, 144 .fifo_size = 4, 145 }, 146 [LS1021A] = { 147 /* Has A-011218 DMA erratum */ 148 .trans_mode = DSPI_XSPI_MODE, 149 .max_clock_factor = 8, 150 .fifo_size = 4, 151 }, 152 [LS1012A] = { 153 /* Has A-011218 DMA erratum */ 154 .trans_mode = DSPI_XSPI_MODE, 155 .max_clock_factor = 8, 156 .fifo_size = 16, 157 }, 158 [LS1028A] = { 159 .trans_mode = DSPI_XSPI_MODE, 160 .max_clock_factor = 8, 161 .fifo_size = 4, 162 }, 163 [LS1043A] = { 164 /* Has A-011218 DMA erratum */ 165 .trans_mode = DSPI_XSPI_MODE, 166 .max_clock_factor = 8, 167 .fifo_size = 16, 168 }, 169 [LS1046A] = { 170 /* Has A-011218 DMA erratum */ 171 .trans_mode = DSPI_XSPI_MODE, 172 .max_clock_factor = 8, 173 .fifo_size = 16, 174 }, 175 [LS2080A] = { 176 .trans_mode = DSPI_XSPI_MODE, 177 .max_clock_factor = 8, 178 .fifo_size = 4, 179 }, 180 [LS2085A] = { 181 .trans_mode = DSPI_XSPI_MODE, 182 .max_clock_factor = 8, 183 .fifo_size = 4, 184 }, 185 [LX2160A] = { 186 .trans_mode = DSPI_XSPI_MODE, 187 .max_clock_factor = 8, 188 .fifo_size = 4, 189 }, 190 [MCF5441X] = { 191 .trans_mode = DSPI_DMA_MODE, 192 .max_clock_factor = 8, 193 .fifo_size = 16, 194 }, 195 }; 196 197 struct fsl_dspi_dma { 198 u32 *tx_dma_buf; 199 struct dma_chan *chan_tx; 200 dma_addr_t tx_dma_phys; 201 struct completion cmd_tx_complete; 202 struct dma_async_tx_descriptor *tx_desc; 203 204 u32 *rx_dma_buf; 205 struct dma_chan *chan_rx; 206 dma_addr_t rx_dma_phys; 207 struct completion cmd_rx_complete; 208 struct dma_async_tx_descriptor *rx_desc; 209 }; 210 211 struct fsl_dspi { 212 struct spi_controller *ctlr; 213 struct platform_device *pdev; 214 215 struct regmap *regmap; 216 struct regmap *regmap_pushr; 217 int irq; 218 struct clk *clk; 219 220 struct spi_transfer *cur_transfer; 221 struct spi_message *cur_msg; 222 struct chip_data *cur_chip; 223 size_t progress; 224 size_t len; 225 const void *tx; 226 void *rx; 227 u16 tx_cmd; 228 const struct fsl_dspi_devtype_data *devtype_data; 229 230 struct completion xfer_done; 231 232 struct fsl_dspi_dma *dma; 233 234 int oper_word_size; 235 int oper_bits_per_word; 236 237 int words_in_flight; 238 239 /* 240 * Offsets for CMD and TXDATA within SPI_PUSHR when accessed 241 * individually (in XSPI mode) 242 */ 243 int pushr_cmd; 244 int pushr_tx; 245 246 void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata); 247 void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata); 248 }; 249 250 static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 251 { 252 switch (dspi->oper_word_size) { 253 case 1: 254 *txdata = *(u8 *)dspi->tx; 255 break; 256 case 2: 257 *txdata = *(u16 *)dspi->tx; 258 break; 259 case 4: 260 *txdata = *(u32 *)dspi->tx; 261 break; 262 } 263 dspi->tx += dspi->oper_word_size; 264 } 265 266 static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 267 { 268 switch (dspi->oper_word_size) { 269 case 1: 270 *(u8 *)dspi->rx = rxdata; 271 break; 272 case 2: 273 *(u16 *)dspi->rx = rxdata; 274 break; 275 case 4: 276 *(u32 *)dspi->rx = rxdata; 277 break; 278 } 279 dspi->rx += dspi->oper_word_size; 280 } 281 282 static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 283 { 284 *txdata = (__force u32)cpu_to_be32(*(u32 *)dspi->tx); 285 dspi->tx += sizeof(u32); 286 } 287 288 static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 289 { 290 *(u32 *)dspi->rx = be32_to_cpu((__force __be32)rxdata); 291 dspi->rx += sizeof(u32); 292 } 293 294 static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 295 { 296 *txdata = (__force u32)cpu_to_be16(*(u16 *)dspi->tx); 297 dspi->tx += sizeof(u16); 298 } 299 300 static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 301 { 302 *(u16 *)dspi->rx = be16_to_cpu((__force __be16)rxdata); 303 dspi->rx += sizeof(u16); 304 } 305 306 static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 307 { 308 u16 hi = *(u16 *)dspi->tx; 309 u16 lo = *(u16 *)(dspi->tx + 2); 310 311 *txdata = (u32)hi << 16 | lo; 312 dspi->tx += sizeof(u32); 313 } 314 315 static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 316 { 317 u16 hi = rxdata & 0xffff; 318 u16 lo = rxdata >> 16; 319 320 *(u16 *)dspi->rx = lo; 321 *(u16 *)(dspi->rx + 2) = hi; 322 dspi->rx += sizeof(u32); 323 } 324 325 /* 326 * Pop one word from the TX buffer for pushing into the 327 * PUSHR register (TX FIFO) 328 */ 329 static u32 dspi_pop_tx(struct fsl_dspi *dspi) 330 { 331 u32 txdata = 0; 332 333 if (dspi->tx) 334 dspi->host_to_dev(dspi, &txdata); 335 dspi->len -= dspi->oper_word_size; 336 return txdata; 337 } 338 339 /* Prepare one TX FIFO entry (txdata plus cmd) */ 340 static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi) 341 { 342 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi); 343 344 if (spi_controller_is_target(dspi->ctlr)) 345 return data; 346 347 if (dspi->len > 0) 348 cmd |= SPI_PUSHR_CMD_CONT; 349 return cmd << 16 | data; 350 } 351 352 /* Push one word to the RX buffer from the POPR register (RX FIFO) */ 353 static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata) 354 { 355 if (!dspi->rx) 356 return; 357 dspi->dev_to_host(dspi, rxdata); 358 } 359 360 static void dspi_tx_dma_callback(void *arg) 361 { 362 struct fsl_dspi *dspi = arg; 363 struct fsl_dspi_dma *dma = dspi->dma; 364 365 complete(&dma->cmd_tx_complete); 366 } 367 368 static void dspi_rx_dma_callback(void *arg) 369 { 370 struct fsl_dspi *dspi = arg; 371 struct fsl_dspi_dma *dma = dspi->dma; 372 int i; 373 374 if (dspi->rx) { 375 for (i = 0; i < dspi->words_in_flight; i++) 376 dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]); 377 } 378 379 complete(&dma->cmd_rx_complete); 380 } 381 382 static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) 383 { 384 struct device *dev = &dspi->pdev->dev; 385 struct fsl_dspi_dma *dma = dspi->dma; 386 int time_left; 387 int i; 388 389 for (i = 0; i < dspi->words_in_flight; i++) 390 dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi); 391 392 dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx, 393 dma->tx_dma_phys, 394 dspi->words_in_flight * 395 DMA_SLAVE_BUSWIDTH_4_BYTES, 396 DMA_MEM_TO_DEV, 397 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 398 if (!dma->tx_desc) { 399 dev_err(dev, "Not able to get desc for DMA xfer\n"); 400 return -EIO; 401 } 402 403 dma->tx_desc->callback = dspi_tx_dma_callback; 404 dma->tx_desc->callback_param = dspi; 405 if (dma_submit_error(dmaengine_submit(dma->tx_desc))) { 406 dev_err(dev, "DMA submit failed\n"); 407 return -EINVAL; 408 } 409 410 dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx, 411 dma->rx_dma_phys, 412 dspi->words_in_flight * 413 DMA_SLAVE_BUSWIDTH_4_BYTES, 414 DMA_DEV_TO_MEM, 415 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 416 if (!dma->rx_desc) { 417 dev_err(dev, "Not able to get desc for DMA xfer\n"); 418 return -EIO; 419 } 420 421 dma->rx_desc->callback = dspi_rx_dma_callback; 422 dma->rx_desc->callback_param = dspi; 423 if (dma_submit_error(dmaengine_submit(dma->rx_desc))) { 424 dev_err(dev, "DMA submit failed\n"); 425 return -EINVAL; 426 } 427 428 reinit_completion(&dspi->dma->cmd_rx_complete); 429 reinit_completion(&dspi->dma->cmd_tx_complete); 430 431 dma_async_issue_pending(dma->chan_rx); 432 dma_async_issue_pending(dma->chan_tx); 433 434 if (spi_controller_is_target(dspi->ctlr)) { 435 wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete); 436 return 0; 437 } 438 439 time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete, 440 DMA_COMPLETION_TIMEOUT); 441 if (time_left == 0) { 442 dev_err(dev, "DMA tx timeout\n"); 443 dmaengine_terminate_all(dma->chan_tx); 444 dmaengine_terminate_all(dma->chan_rx); 445 return -ETIMEDOUT; 446 } 447 448 time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete, 449 DMA_COMPLETION_TIMEOUT); 450 if (time_left == 0) { 451 dev_err(dev, "DMA rx timeout\n"); 452 dmaengine_terminate_all(dma->chan_tx); 453 dmaengine_terminate_all(dma->chan_rx); 454 return -ETIMEDOUT; 455 } 456 457 return 0; 458 } 459 460 static void dspi_setup_accel(struct fsl_dspi *dspi); 461 462 static int dspi_dma_xfer(struct fsl_dspi *dspi) 463 { 464 struct spi_message *message = dspi->cur_msg; 465 struct device *dev = &dspi->pdev->dev; 466 int ret = 0; 467 468 /* 469 * dspi->len gets decremented by dspi_pop_tx_pushr in 470 * dspi_next_xfer_dma_submit 471 */ 472 while (dspi->len) { 473 /* Figure out operational bits-per-word for this chunk */ 474 dspi_setup_accel(dspi); 475 476 dspi->words_in_flight = dspi->len / dspi->oper_word_size; 477 if (dspi->words_in_flight > dspi->devtype_data->fifo_size) 478 dspi->words_in_flight = dspi->devtype_data->fifo_size; 479 480 message->actual_length += dspi->words_in_flight * 481 dspi->oper_word_size; 482 483 ret = dspi_next_xfer_dma_submit(dspi); 484 if (ret) { 485 dev_err(dev, "DMA transfer failed\n"); 486 break; 487 } 488 } 489 490 return ret; 491 } 492 493 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) 494 { 495 int dma_bufsize = dspi->devtype_data->fifo_size * 2; 496 struct device *dev = &dspi->pdev->dev; 497 struct dma_slave_config cfg; 498 struct fsl_dspi_dma *dma; 499 int ret; 500 501 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); 502 if (!dma) 503 return -ENOMEM; 504 505 dma->chan_rx = dma_request_chan(dev, "rx"); 506 if (IS_ERR(dma->chan_rx)) 507 return dev_err_probe(dev, PTR_ERR(dma->chan_rx), "rx dma channel not available\n"); 508 509 dma->chan_tx = dma_request_chan(dev, "tx"); 510 if (IS_ERR(dma->chan_tx)) { 511 ret = dev_err_probe(dev, PTR_ERR(dma->chan_tx), "tx dma channel not available\n"); 512 goto err_tx_channel; 513 } 514 515 dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev, 516 dma_bufsize, &dma->tx_dma_phys, 517 GFP_KERNEL); 518 if (!dma->tx_dma_buf) { 519 ret = -ENOMEM; 520 goto err_tx_dma_buf; 521 } 522 523 dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev, 524 dma_bufsize, &dma->rx_dma_phys, 525 GFP_KERNEL); 526 if (!dma->rx_dma_buf) { 527 ret = -ENOMEM; 528 goto err_rx_dma_buf; 529 } 530 531 memset(&cfg, 0, sizeof(cfg)); 532 cfg.src_addr = phy_addr + SPI_POPR; 533 cfg.dst_addr = phy_addr + SPI_PUSHR; 534 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 535 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 536 cfg.src_maxburst = 1; 537 cfg.dst_maxburst = 1; 538 539 cfg.direction = DMA_DEV_TO_MEM; 540 ret = dmaengine_slave_config(dma->chan_rx, &cfg); 541 if (ret) { 542 dev_err_probe(dev, ret, "can't configure rx dma channel\n"); 543 goto err_slave_config; 544 } 545 546 cfg.direction = DMA_MEM_TO_DEV; 547 ret = dmaengine_slave_config(dma->chan_tx, &cfg); 548 if (ret) { 549 dev_err_probe(dev, ret, "can't configure tx dma channel\n"); 550 goto err_slave_config; 551 } 552 553 dspi->dma = dma; 554 init_completion(&dma->cmd_tx_complete); 555 init_completion(&dma->cmd_rx_complete); 556 557 return 0; 558 559 err_slave_config: 560 dma_free_coherent(dma->chan_rx->device->dev, 561 dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys); 562 err_rx_dma_buf: 563 dma_free_coherent(dma->chan_tx->device->dev, 564 dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys); 565 err_tx_dma_buf: 566 dma_release_channel(dma->chan_tx); 567 err_tx_channel: 568 dma_release_channel(dma->chan_rx); 569 570 devm_kfree(dev, dma); 571 dspi->dma = NULL; 572 573 return ret; 574 } 575 576 static void dspi_release_dma(struct fsl_dspi *dspi) 577 { 578 int dma_bufsize = dspi->devtype_data->fifo_size * 2; 579 struct fsl_dspi_dma *dma = dspi->dma; 580 581 if (!dma) 582 return; 583 584 if (dma->chan_tx) { 585 dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize, 586 dma->tx_dma_buf, dma->tx_dma_phys); 587 dma_release_channel(dma->chan_tx); 588 } 589 590 if (dma->chan_rx) { 591 dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize, 592 dma->rx_dma_buf, dma->rx_dma_phys); 593 dma_release_channel(dma->chan_rx); 594 } 595 } 596 597 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz, 598 unsigned long clkrate) 599 { 600 /* Valid baud rate pre-scaler values */ 601 int pbr_tbl[4] = {2, 3, 5, 7}; 602 int brs[16] = { 2, 4, 6, 8, 603 16, 32, 64, 128, 604 256, 512, 1024, 2048, 605 4096, 8192, 16384, 32768 }; 606 int scale_needed, scale, minscale = INT_MAX; 607 int i, j; 608 609 scale_needed = clkrate / speed_hz; 610 if (clkrate % speed_hz) 611 scale_needed++; 612 613 for (i = 0; i < ARRAY_SIZE(brs); i++) 614 for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) { 615 scale = brs[i] * pbr_tbl[j]; 616 if (scale >= scale_needed) { 617 if (scale < minscale) { 618 minscale = scale; 619 *br = i; 620 *pbr = j; 621 } 622 break; 623 } 624 } 625 626 if (minscale == INT_MAX) { 627 pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n", 628 speed_hz, clkrate); 629 *pbr = ARRAY_SIZE(pbr_tbl) - 1; 630 *br = ARRAY_SIZE(brs) - 1; 631 } 632 } 633 634 static void ns_delay_scale(char *psc, char *sc, int delay_ns, 635 unsigned long clkrate) 636 { 637 int scale_needed, scale, minscale = INT_MAX; 638 int pscale_tbl[4] = {1, 3, 5, 7}; 639 u32 remainder; 640 int i, j; 641 642 scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC, 643 &remainder); 644 if (remainder) 645 scale_needed++; 646 647 for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++) 648 for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) { 649 scale = pscale_tbl[i] * (2 << j); 650 if (scale >= scale_needed) { 651 if (scale < minscale) { 652 minscale = scale; 653 *psc = i; 654 *sc = j; 655 } 656 break; 657 } 658 } 659 660 if (minscale == INT_MAX) { 661 pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value", 662 delay_ns, clkrate); 663 *psc = ARRAY_SIZE(pscale_tbl) - 1; 664 *sc = SPI_CTAR_SCALE_BITS; 665 } 666 } 667 668 static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd) 669 { 670 /* 671 * The only time when the PCS doesn't need continuation after this word 672 * is when it's last. We need to look ahead, because we actually call 673 * dspi_pop_tx (the function that decrements dspi->len) _after_ 674 * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One 675 * word is enough. If there's more to transmit than that, 676 * dspi_xspi_write will know to split the FIFO writes in 2, and 677 * generate a new PUSHR command with the final word that will have PCS 678 * deasserted (not continued) here. 679 */ 680 if (dspi->len > dspi->oper_word_size) 681 cmd |= SPI_PUSHR_CMD_CONT; 682 regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd); 683 } 684 685 static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata) 686 { 687 regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata); 688 } 689 690 static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words) 691 { 692 int num_bytes = num_words * dspi->oper_word_size; 693 u16 tx_cmd = dspi->tx_cmd; 694 695 /* 696 * If the PCS needs to de-assert (i.e. we're at the end of the buffer 697 * and cs_change does not want the PCS to stay on), then we need a new 698 * PUSHR command, since this one (for the body of the buffer) 699 * necessarily has the CONT bit set. 700 * So send one word less during this go, to force a split and a command 701 * with a single word next time, when CONT will be unset. 702 */ 703 if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len) 704 tx_cmd |= SPI_PUSHR_CMD_EOQ; 705 706 /* Update CTARE */ 707 regmap_write(dspi->regmap, SPI_CTARE(0), 708 SPI_FRAME_EBITS(dspi->oper_bits_per_word) | 709 SPI_CTARE_DTCP(num_words)); 710 711 /* 712 * Write the CMD FIFO entry first, and then the two 713 * corresponding TX FIFO entries (or one...). 714 */ 715 dspi_pushr_cmd_write(dspi, tx_cmd); 716 717 /* Fill TX FIFO with as many transfers as possible */ 718 while (num_words--) { 719 u32 data = dspi_pop_tx(dspi); 720 721 dspi_pushr_txdata_write(dspi, data & 0xFFFF); 722 if (dspi->oper_bits_per_word > 16) 723 dspi_pushr_txdata_write(dspi, data >> 16); 724 } 725 } 726 727 static u32 dspi_popr_read(struct fsl_dspi *dspi) 728 { 729 u32 rxdata = 0; 730 731 regmap_read(dspi->regmap, SPI_POPR, &rxdata); 732 return rxdata; 733 } 734 735 static void dspi_fifo_read(struct fsl_dspi *dspi) 736 { 737 int num_fifo_entries = dspi->words_in_flight; 738 739 /* Read one FIFO entry and push to rx buffer */ 740 while (num_fifo_entries--) 741 dspi_push_rx(dspi, dspi_popr_read(dspi)); 742 } 743 744 static void dspi_setup_accel(struct fsl_dspi *dspi) 745 { 746 struct spi_transfer *xfer = dspi->cur_transfer; 747 bool odd = !!(dspi->len & 1); 748 749 /* No accel for frames not multiple of 8 bits at the moment */ 750 if (xfer->bits_per_word % 8) 751 goto no_accel; 752 753 if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) { 754 dspi->oper_bits_per_word = 16; 755 } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) { 756 dspi->oper_bits_per_word = 8; 757 } else { 758 /* Start off with maximum supported by hardware */ 759 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) 760 dspi->oper_bits_per_word = 32; 761 else 762 dspi->oper_bits_per_word = 16; 763 764 /* 765 * And go down only if the buffer can't be sent with 766 * words this big 767 */ 768 do { 769 if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8)) 770 break; 771 772 dspi->oper_bits_per_word /= 2; 773 } while (dspi->oper_bits_per_word > 8); 774 } 775 776 if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) { 777 dspi->dev_to_host = dspi_8on32_dev_to_host; 778 dspi->host_to_dev = dspi_8on32_host_to_dev; 779 } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) { 780 dspi->dev_to_host = dspi_8on16_dev_to_host; 781 dspi->host_to_dev = dspi_8on16_host_to_dev; 782 } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) { 783 dspi->dev_to_host = dspi_16on32_dev_to_host; 784 dspi->host_to_dev = dspi_16on32_host_to_dev; 785 } else { 786 no_accel: 787 dspi->dev_to_host = dspi_native_dev_to_host; 788 dspi->host_to_dev = dspi_native_host_to_dev; 789 dspi->oper_bits_per_word = xfer->bits_per_word; 790 } 791 792 dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8); 793 794 /* 795 * Update CTAR here (code is common for XSPI and DMA modes). 796 * We will update CTARE in the portion specific to XSPI, when we 797 * also know the preload value (DTCP). 798 */ 799 regmap_write(dspi->regmap, SPI_CTAR(0), 800 dspi->cur_chip->ctar_val | 801 SPI_FRAME_BITS(dspi->oper_bits_per_word)); 802 } 803 804 static void dspi_fifo_write(struct fsl_dspi *dspi) 805 { 806 int num_fifo_entries = dspi->devtype_data->fifo_size; 807 struct spi_transfer *xfer = dspi->cur_transfer; 808 struct spi_message *msg = dspi->cur_msg; 809 int num_words, num_bytes; 810 811 dspi_setup_accel(dspi); 812 813 /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */ 814 if (dspi->oper_word_size == 4) 815 num_fifo_entries /= 2; 816 817 /* 818 * Integer division intentionally trims off odd (or non-multiple of 4) 819 * numbers of bytes at the end of the buffer, which will be sent next 820 * time using a smaller oper_word_size. 821 */ 822 num_words = dspi->len / dspi->oper_word_size; 823 if (num_words > num_fifo_entries) 824 num_words = num_fifo_entries; 825 826 /* Update total number of bytes that were transferred */ 827 num_bytes = num_words * dspi->oper_word_size; 828 msg->actual_length += num_bytes; 829 dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8); 830 831 /* 832 * Update shared variable for use in the next interrupt (both in 833 * dspi_fifo_read and in dspi_fifo_write). 834 */ 835 dspi->words_in_flight = num_words; 836 837 spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq); 838 839 dspi_xspi_fifo_write(dspi, num_words); 840 /* 841 * Everything after this point is in a potential race with the next 842 * interrupt, so we must never use dspi->words_in_flight again since it 843 * might already be modified by the next dspi_fifo_write. 844 */ 845 846 spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, 847 dspi->progress, !dspi->irq); 848 } 849 850 static int dspi_rxtx(struct fsl_dspi *dspi) 851 { 852 dspi_fifo_read(dspi); 853 854 if (!dspi->len) 855 /* Success! */ 856 return 0; 857 858 dspi_fifo_write(dspi); 859 860 return -EINPROGRESS; 861 } 862 863 static int dspi_poll(struct fsl_dspi *dspi) 864 { 865 int tries = 1000; 866 u32 spi_sr; 867 868 do { 869 regmap_read(dspi->regmap, SPI_SR, &spi_sr); 870 regmap_write(dspi->regmap, SPI_SR, spi_sr); 871 872 if (spi_sr & SPI_SR_CMDTCF) 873 break; 874 } while (--tries); 875 876 if (!tries) 877 return -ETIMEDOUT; 878 879 return dspi_rxtx(dspi); 880 } 881 882 static irqreturn_t dspi_interrupt(int irq, void *dev_id) 883 { 884 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; 885 u32 spi_sr; 886 887 regmap_read(dspi->regmap, SPI_SR, &spi_sr); 888 regmap_write(dspi->regmap, SPI_SR, spi_sr); 889 890 if (!(spi_sr & SPI_SR_CMDTCF)) 891 return IRQ_NONE; 892 893 if (dspi_rxtx(dspi) == 0) 894 complete(&dspi->xfer_done); 895 896 return IRQ_HANDLED; 897 } 898 899 static void dspi_assert_cs(struct spi_device *spi, bool *cs) 900 { 901 if (!spi_get_csgpiod(spi, 0) || *cs) 902 return; 903 904 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), true); 905 *cs = true; 906 } 907 908 static void dspi_deassert_cs(struct spi_device *spi, bool *cs) 909 { 910 if (!spi_get_csgpiod(spi, 0) || !*cs) 911 return; 912 913 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), false); 914 *cs = false; 915 } 916 917 static int dspi_transfer_one_message(struct spi_controller *ctlr, 918 struct spi_message *message) 919 { 920 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); 921 struct spi_device *spi = message->spi; 922 struct spi_transfer *transfer; 923 bool cs = false; 924 int status = 0; 925 u32 val = 0; 926 bool cs_change = false; 927 928 message->actual_length = 0; 929 930 /* Put DSPI in running mode if halted. */ 931 regmap_read(dspi->regmap, SPI_MCR, &val); 932 if (val & SPI_MCR_HALT) { 933 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0); 934 while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && 935 !(val & SPI_SR_TXRXS)) 936 ; 937 } 938 939 list_for_each_entry(transfer, &message->transfers, transfer_list) { 940 dspi->cur_transfer = transfer; 941 dspi->cur_msg = message; 942 dspi->cur_chip = spi_get_ctldata(spi); 943 944 dspi_assert_cs(spi, &cs); 945 946 /* Prepare command word for CMD FIFO */ 947 dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0); 948 if (!spi_get_csgpiod(spi, 0)) 949 dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi_get_chipselect(spi, 0)); 950 951 if (list_is_last(&dspi->cur_transfer->transfer_list, 952 &dspi->cur_msg->transfers)) { 953 /* Leave PCS activated after last transfer when 954 * cs_change is set. 955 */ 956 if (transfer->cs_change) 957 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; 958 } else { 959 /* Keep PCS active between transfers in same message 960 * when cs_change is not set, and de-activate PCS 961 * between transfers in the same message when 962 * cs_change is set. 963 */ 964 if (!transfer->cs_change) 965 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; 966 } 967 968 cs_change = transfer->cs_change; 969 dspi->tx = transfer->tx_buf; 970 dspi->rx = transfer->rx_buf; 971 dspi->len = transfer->len; 972 dspi->progress = 0; 973 974 regmap_update_bits(dspi->regmap, SPI_MCR, 975 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, 976 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); 977 978 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); 979 980 spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, 981 dspi->progress, !dspi->irq); 982 983 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { 984 status = dspi_dma_xfer(dspi); 985 } else { 986 dspi_fifo_write(dspi); 987 988 if (dspi->irq) { 989 wait_for_completion(&dspi->xfer_done); 990 reinit_completion(&dspi->xfer_done); 991 } else { 992 do { 993 status = dspi_poll(dspi); 994 } while (status == -EINPROGRESS); 995 } 996 } 997 if (status) 998 break; 999 1000 spi_transfer_delay_exec(transfer); 1001 1002 if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT)) 1003 dspi_deassert_cs(spi, &cs); 1004 } 1005 1006 if (status || !cs_change) { 1007 /* Put DSPI in stop mode */ 1008 regmap_update_bits(dspi->regmap, SPI_MCR, 1009 SPI_MCR_HALT, SPI_MCR_HALT); 1010 while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && 1011 val & SPI_SR_TXRXS) 1012 ; 1013 } 1014 1015 message->status = status; 1016 spi_finalize_current_message(ctlr); 1017 1018 return status; 1019 } 1020 1021 static int dspi_setup(struct spi_device *spi) 1022 { 1023 struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller); 1024 u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz); 1025 unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0; 1026 u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4); 1027 u32 cs_sck_delay = 0, sck_cs_delay = 0; 1028 struct fsl_dspi_platform_data *pdata; 1029 unsigned char pasc = 0, asc = 0; 1030 struct gpio_desc *gpio_cs; 1031 struct chip_data *chip; 1032 unsigned long clkrate; 1033 bool cs = true; 1034 int val; 1035 1036 /* Only alloc on first setup */ 1037 chip = spi_get_ctldata(spi); 1038 if (chip == NULL) { 1039 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 1040 if (!chip) 1041 return -ENOMEM; 1042 } 1043 1044 pdata = dev_get_platdata(&dspi->pdev->dev); 1045 1046 if (!pdata) { 1047 val = spi_delay_to_ns(&spi->cs_setup, NULL); 1048 cs_sck_delay = val >= 0 ? val : 0; 1049 if (!cs_sck_delay) 1050 of_property_read_u32(spi->dev.of_node, 1051 "fsl,spi-cs-sck-delay", 1052 &cs_sck_delay); 1053 1054 val = spi_delay_to_ns(&spi->cs_hold, NULL); 1055 sck_cs_delay = val >= 0 ? val : 0; 1056 if (!sck_cs_delay) 1057 of_property_read_u32(spi->dev.of_node, 1058 "fsl,spi-sck-cs-delay", 1059 &sck_cs_delay); 1060 } else { 1061 cs_sck_delay = pdata->cs_sck_delay; 1062 sck_cs_delay = pdata->sck_cs_delay; 1063 } 1064 1065 /* Since tCSC and tASC apply to continuous transfers too, avoid SCK 1066 * glitches of half a cycle by never allowing tCSC + tASC to go below 1067 * half a SCK period. 1068 */ 1069 if (cs_sck_delay < quarter_period_ns) 1070 cs_sck_delay = quarter_period_ns; 1071 if (sck_cs_delay < quarter_period_ns) 1072 sck_cs_delay = quarter_period_ns; 1073 1074 dev_dbg(&spi->dev, 1075 "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n", 1076 cs_sck_delay, sck_cs_delay); 1077 1078 clkrate = clk_get_rate(dspi->clk); 1079 hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate); 1080 1081 /* Set PCS to SCK delay scale values */ 1082 ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate); 1083 1084 /* Set After SCK delay scale values */ 1085 ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate); 1086 1087 chip->ctar_val = 0; 1088 if (spi->mode & SPI_CPOL) 1089 chip->ctar_val |= SPI_CTAR_CPOL; 1090 if (spi->mode & SPI_CPHA) 1091 chip->ctar_val |= SPI_CTAR_CPHA; 1092 1093 if (!spi_controller_is_target(dspi->ctlr)) { 1094 chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) | 1095 SPI_CTAR_CSSCK(cssck) | 1096 SPI_CTAR_PASC(pasc) | 1097 SPI_CTAR_ASC(asc) | 1098 SPI_CTAR_PBR(pbr) | 1099 SPI_CTAR_BR(br); 1100 1101 if (spi->mode & SPI_LSB_FIRST) 1102 chip->ctar_val |= SPI_CTAR_LSBFE; 1103 } 1104 1105 gpio_cs = spi_get_csgpiod(spi, 0); 1106 if (gpio_cs) 1107 gpiod_direction_output(gpio_cs, false); 1108 1109 dspi_deassert_cs(spi, &cs); 1110 1111 spi_set_ctldata(spi, chip); 1112 1113 return 0; 1114 } 1115 1116 static void dspi_cleanup(struct spi_device *spi) 1117 { 1118 struct chip_data *chip = spi_get_ctldata(spi); 1119 1120 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n", 1121 spi->controller->bus_num, spi_get_chipselect(spi, 0)); 1122 1123 kfree(chip); 1124 } 1125 1126 static const struct of_device_id fsl_dspi_dt_ids[] = { 1127 { 1128 .compatible = "fsl,vf610-dspi", 1129 .data = &devtype_data[VF610], 1130 }, { 1131 .compatible = "fsl,ls1021a-v1.0-dspi", 1132 .data = &devtype_data[LS1021A], 1133 }, { 1134 .compatible = "fsl,ls1012a-dspi", 1135 .data = &devtype_data[LS1012A], 1136 }, { 1137 .compatible = "fsl,ls1028a-dspi", 1138 .data = &devtype_data[LS1028A], 1139 }, { 1140 .compatible = "fsl,ls1043a-dspi", 1141 .data = &devtype_data[LS1043A], 1142 }, { 1143 .compatible = "fsl,ls1046a-dspi", 1144 .data = &devtype_data[LS1046A], 1145 }, { 1146 .compatible = "fsl,ls2080a-dspi", 1147 .data = &devtype_data[LS2080A], 1148 }, { 1149 .compatible = "fsl,ls2085a-dspi", 1150 .data = &devtype_data[LS2085A], 1151 }, { 1152 .compatible = "fsl,lx2160a-dspi", 1153 .data = &devtype_data[LX2160A], 1154 }, 1155 { /* sentinel */ } 1156 }; 1157 MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids); 1158 1159 #ifdef CONFIG_PM_SLEEP 1160 static int dspi_suspend(struct device *dev) 1161 { 1162 struct fsl_dspi *dspi = dev_get_drvdata(dev); 1163 1164 if (dspi->irq) 1165 disable_irq(dspi->irq); 1166 spi_controller_suspend(dspi->ctlr); 1167 clk_disable_unprepare(dspi->clk); 1168 1169 pinctrl_pm_select_sleep_state(dev); 1170 1171 return 0; 1172 } 1173 1174 static int dspi_resume(struct device *dev) 1175 { 1176 struct fsl_dspi *dspi = dev_get_drvdata(dev); 1177 int ret; 1178 1179 pinctrl_pm_select_default_state(dev); 1180 1181 ret = clk_prepare_enable(dspi->clk); 1182 if (ret) 1183 return ret; 1184 spi_controller_resume(dspi->ctlr); 1185 if (dspi->irq) 1186 enable_irq(dspi->irq); 1187 1188 return 0; 1189 } 1190 #endif /* CONFIG_PM_SLEEP */ 1191 1192 static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); 1193 1194 static const struct regmap_range dspi_yes_ranges[] = { 1195 regmap_reg_range(SPI_MCR, SPI_MCR), 1196 regmap_reg_range(SPI_TCR, SPI_CTAR(3)), 1197 regmap_reg_range(SPI_SR, SPI_TXFR3), 1198 regmap_reg_range(SPI_RXFR0, SPI_RXFR3), 1199 regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), 1200 regmap_reg_range(SPI_SREX, SPI_SREX), 1201 }; 1202 1203 static const struct regmap_access_table dspi_access_table = { 1204 .yes_ranges = dspi_yes_ranges, 1205 .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), 1206 }; 1207 1208 static const struct regmap_range dspi_volatile_ranges[] = { 1209 regmap_reg_range(SPI_MCR, SPI_TCR), 1210 regmap_reg_range(SPI_SR, SPI_SR), 1211 regmap_reg_range(SPI_PUSHR, SPI_RXFR3), 1212 }; 1213 1214 static const struct regmap_access_table dspi_volatile_table = { 1215 .yes_ranges = dspi_volatile_ranges, 1216 .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges), 1217 }; 1218 1219 static const struct regmap_config dspi_regmap_config = { 1220 .reg_bits = 32, 1221 .val_bits = 32, 1222 .reg_stride = 4, 1223 .max_register = 0x88, 1224 .volatile_table = &dspi_volatile_table, 1225 .rd_table = &dspi_access_table, 1226 .wr_table = &dspi_access_table, 1227 }; 1228 1229 static const struct regmap_range dspi_xspi_volatile_ranges[] = { 1230 regmap_reg_range(SPI_MCR, SPI_TCR), 1231 regmap_reg_range(SPI_SR, SPI_SR), 1232 regmap_reg_range(SPI_PUSHR, SPI_RXFR3), 1233 regmap_reg_range(SPI_SREX, SPI_SREX), 1234 }; 1235 1236 static const struct regmap_access_table dspi_xspi_volatile_table = { 1237 .yes_ranges = dspi_xspi_volatile_ranges, 1238 .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges), 1239 }; 1240 1241 static const struct regmap_config dspi_xspi_regmap_config[] = { 1242 { 1243 .reg_bits = 32, 1244 .val_bits = 32, 1245 .reg_stride = 4, 1246 .max_register = 0x13c, 1247 .volatile_table = &dspi_xspi_volatile_table, 1248 .rd_table = &dspi_access_table, 1249 .wr_table = &dspi_access_table, 1250 }, 1251 { 1252 .name = "pushr", 1253 .reg_bits = 16, 1254 .val_bits = 16, 1255 .reg_stride = 2, 1256 .max_register = 0x2, 1257 }, 1258 }; 1259 1260 static int dspi_init(struct fsl_dspi *dspi) 1261 { 1262 unsigned int mcr; 1263 1264 /* Set idle states for all chip select signals to high */ 1265 mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->max_native_cs - 1, 0)); 1266 1267 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) 1268 mcr |= SPI_MCR_XSPI; 1269 if (!spi_controller_is_target(dspi->ctlr)) 1270 mcr |= SPI_MCR_HOST; 1271 1272 mcr |= SPI_MCR_HALT; 1273 1274 regmap_write(dspi->regmap, SPI_MCR, mcr); 1275 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); 1276 1277 switch (dspi->devtype_data->trans_mode) { 1278 case DSPI_XSPI_MODE: 1279 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE); 1280 break; 1281 case DSPI_DMA_MODE: 1282 regmap_write(dspi->regmap, SPI_RSER, 1283 SPI_RSER_TFFFE | SPI_RSER_TFFFD | 1284 SPI_RSER_RFDFE | SPI_RSER_RFDFD); 1285 break; 1286 default: 1287 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n", 1288 dspi->devtype_data->trans_mode); 1289 return -EINVAL; 1290 } 1291 1292 return 0; 1293 } 1294 1295 static int dspi_target_abort(struct spi_controller *host) 1296 { 1297 struct fsl_dspi *dspi = spi_controller_get_devdata(host); 1298 1299 /* 1300 * Terminate all pending DMA transactions for the SPI working 1301 * in TARGET mode. 1302 */ 1303 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { 1304 dmaengine_terminate_sync(dspi->dma->chan_rx); 1305 dmaengine_terminate_sync(dspi->dma->chan_tx); 1306 } 1307 1308 /* Clear the internal DSPI RX and TX FIFO buffers */ 1309 regmap_update_bits(dspi->regmap, SPI_MCR, 1310 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, 1311 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); 1312 1313 return 0; 1314 } 1315 1316 static int dspi_probe(struct platform_device *pdev) 1317 { 1318 struct device_node *np = pdev->dev.of_node; 1319 const struct regmap_config *regmap_config; 1320 struct fsl_dspi_platform_data *pdata; 1321 struct spi_controller *ctlr; 1322 int ret, cs_num, bus_num = -1; 1323 struct fsl_dspi *dspi; 1324 struct resource *res; 1325 void __iomem *base; 1326 bool big_endian; 1327 1328 dspi = devm_kzalloc(&pdev->dev, sizeof(*dspi), GFP_KERNEL); 1329 if (!dspi) 1330 return -ENOMEM; 1331 1332 ctlr = spi_alloc_host(&pdev->dev, 0); 1333 if (!ctlr) 1334 return -ENOMEM; 1335 1336 spi_controller_set_devdata(ctlr, dspi); 1337 platform_set_drvdata(pdev, dspi); 1338 1339 dspi->pdev = pdev; 1340 dspi->ctlr = ctlr; 1341 1342 ctlr->setup = dspi_setup; 1343 ctlr->transfer_one_message = dspi_transfer_one_message; 1344 ctlr->dev.of_node = pdev->dev.of_node; 1345 1346 ctlr->cleanup = dspi_cleanup; 1347 ctlr->target_abort = dspi_target_abort; 1348 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; 1349 ctlr->use_gpio_descriptors = true; 1350 1351 pdata = dev_get_platdata(&pdev->dev); 1352 if (pdata) { 1353 ctlr->num_chipselect = ctlr->max_native_cs = pdata->cs_num; 1354 ctlr->bus_num = pdata->bus_num; 1355 1356 /* Only Coldfire uses platform data */ 1357 dspi->devtype_data = &devtype_data[MCF5441X]; 1358 big_endian = true; 1359 } else { 1360 1361 ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num); 1362 if (ret < 0) { 1363 dev_err(&pdev->dev, "can't get spi-num-chipselects\n"); 1364 goto out_ctlr_put; 1365 } 1366 ctlr->num_chipselect = ctlr->max_native_cs = cs_num; 1367 1368 of_property_read_u32(np, "bus-num", &bus_num); 1369 ctlr->bus_num = bus_num; 1370 1371 if (of_property_read_bool(np, "spi-slave")) 1372 ctlr->target = true; 1373 1374 dspi->devtype_data = of_device_get_match_data(&pdev->dev); 1375 if (!dspi->devtype_data) { 1376 dev_err(&pdev->dev, "can't get devtype_data\n"); 1377 ret = -EFAULT; 1378 goto out_ctlr_put; 1379 } 1380 1381 big_endian = of_device_is_big_endian(np); 1382 } 1383 if (big_endian) { 1384 dspi->pushr_cmd = 0; 1385 dspi->pushr_tx = 2; 1386 } else { 1387 dspi->pushr_cmd = 2; 1388 dspi->pushr_tx = 0; 1389 } 1390 1391 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) 1392 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1393 else 1394 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 1395 1396 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1397 if (IS_ERR(base)) { 1398 ret = PTR_ERR(base); 1399 goto out_ctlr_put; 1400 } 1401 1402 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) 1403 regmap_config = &dspi_xspi_regmap_config[0]; 1404 else 1405 regmap_config = &dspi_regmap_config; 1406 dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config); 1407 if (IS_ERR(dspi->regmap)) { 1408 dev_err(&pdev->dev, "failed to init regmap: %ld\n", 1409 PTR_ERR(dspi->regmap)); 1410 ret = PTR_ERR(dspi->regmap); 1411 goto out_ctlr_put; 1412 } 1413 1414 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) { 1415 dspi->regmap_pushr = devm_regmap_init_mmio( 1416 &pdev->dev, base + SPI_PUSHR, 1417 &dspi_xspi_regmap_config[1]); 1418 if (IS_ERR(dspi->regmap_pushr)) { 1419 dev_err(&pdev->dev, 1420 "failed to init pushr regmap: %ld\n", 1421 PTR_ERR(dspi->regmap_pushr)); 1422 ret = PTR_ERR(dspi->regmap_pushr); 1423 goto out_ctlr_put; 1424 } 1425 } 1426 1427 dspi->clk = devm_clk_get_enabled(&pdev->dev, "dspi"); 1428 if (IS_ERR(dspi->clk)) { 1429 ret = PTR_ERR(dspi->clk); 1430 dev_err(&pdev->dev, "unable to get clock\n"); 1431 goto out_ctlr_put; 1432 } 1433 1434 ret = dspi_init(dspi); 1435 if (ret) 1436 goto out_ctlr_put; 1437 1438 dspi->irq = platform_get_irq(pdev, 0); 1439 if (dspi->irq <= 0) { 1440 dev_info(&pdev->dev, 1441 "can't get platform irq, using poll mode\n"); 1442 dspi->irq = 0; 1443 goto poll_mode; 1444 } 1445 1446 init_completion(&dspi->xfer_done); 1447 1448 ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL, 1449 IRQF_SHARED, pdev->name, dspi); 1450 if (ret < 0) { 1451 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); 1452 goto out_ctlr_put; 1453 } 1454 1455 poll_mode: 1456 1457 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { 1458 ret = dspi_request_dma(dspi, res->start); 1459 if (ret < 0) { 1460 dev_err(&pdev->dev, "can't get dma channels\n"); 1461 goto out_free_irq; 1462 } 1463 } 1464 1465 ctlr->max_speed_hz = 1466 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor; 1467 1468 if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE) 1469 ctlr->ptp_sts_supported = true; 1470 1471 ret = spi_register_controller(ctlr); 1472 if (ret != 0) { 1473 dev_err(&pdev->dev, "Problem registering DSPI ctlr\n"); 1474 goto out_release_dma; 1475 } 1476 1477 return ret; 1478 1479 out_release_dma: 1480 dspi_release_dma(dspi); 1481 out_free_irq: 1482 if (dspi->irq) 1483 free_irq(dspi->irq, dspi); 1484 out_ctlr_put: 1485 spi_controller_put(ctlr); 1486 1487 return ret; 1488 } 1489 1490 static void dspi_remove(struct platform_device *pdev) 1491 { 1492 struct fsl_dspi *dspi = platform_get_drvdata(pdev); 1493 1494 /* Disconnect from the SPI framework */ 1495 spi_unregister_controller(dspi->ctlr); 1496 1497 /* Disable RX and TX */ 1498 regmap_update_bits(dspi->regmap, SPI_MCR, 1499 SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF, 1500 SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF); 1501 1502 /* Stop Running */ 1503 regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT); 1504 1505 dspi_release_dma(dspi); 1506 if (dspi->irq) 1507 free_irq(dspi->irq, dspi); 1508 } 1509 1510 static void dspi_shutdown(struct platform_device *pdev) 1511 { 1512 dspi_remove(pdev); 1513 } 1514 1515 static struct platform_driver fsl_dspi_driver = { 1516 .driver.name = DRIVER_NAME, 1517 .driver.of_match_table = fsl_dspi_dt_ids, 1518 .driver.pm = &dspi_pm, 1519 .probe = dspi_probe, 1520 .remove = dspi_remove, 1521 .shutdown = dspi_shutdown, 1522 }; 1523 module_platform_driver(fsl_dspi_driver); 1524 1525 MODULE_DESCRIPTION("Freescale DSPI Controller Driver"); 1526 MODULE_LICENSE("GPL"); 1527 MODULE_ALIAS("platform:" DRIVER_NAME); 1528