1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2009 Texas Instruments. 4 * Copyright (C) 2010 EF Johnson Technologies 5 */ 6 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/gpio/consumer.h> 10 #include <linux/module.h> 11 #include <linux/delay.h> 12 #include <linux/platform_data/edma.h> 13 #include <linux/platform_device.h> 14 #include <linux/err.h> 15 #include <linux/clk.h> 16 #include <linux/dmaengine.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/of.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi_bitbang.h> 21 #include <linux/slab.h> 22 23 #define CS_DEFAULT 0xFF 24 25 #define SPIFMT_PHASE_MASK BIT(16) 26 #define SPIFMT_POLARITY_MASK BIT(17) 27 #define SPIFMT_DISTIMER_MASK BIT(18) 28 #define SPIFMT_SHIFTDIR_MASK BIT(20) 29 #define SPIFMT_WAITENA_MASK BIT(21) 30 #define SPIFMT_PARITYENA_MASK BIT(22) 31 #define SPIFMT_ODD_PARITY_MASK BIT(23) 32 #define SPIFMT_WDELAY_MASK 0x3f000000u 33 #define SPIFMT_WDELAY_SHIFT 24 34 #define SPIFMT_PRESCALE_SHIFT 8 35 36 /* SPIPC0 */ 37 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ 38 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ 39 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ 40 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ 41 42 #define SPIINT_MASKALL 0x0101035F 43 #define SPIINT_MASKINT 0x0000015F 44 #define SPI_INTLVL_1 0x000001FF 45 #define SPI_INTLVL_0 0x00000000 46 47 /* SPIDAT1 (upper 16 bit defines) */ 48 #define SPIDAT1_CSHOLD_MASK BIT(12) 49 #define SPIDAT1_WDEL BIT(10) 50 51 /* SPIGCR1 */ 52 #define SPIGCR1_CLKMOD_MASK BIT(1) 53 #define SPIGCR1_MASTER_MASK BIT(0) 54 #define SPIGCR1_POWERDOWN_MASK BIT(8) 55 #define SPIGCR1_LOOPBACK_MASK BIT(16) 56 #define SPIGCR1_SPIENA_MASK BIT(24) 57 58 /* SPIBUF */ 59 #define SPIBUF_TXFULL_MASK BIT(29) 60 #define SPIBUF_RXEMPTY_MASK BIT(31) 61 62 /* SPIDELAY */ 63 #define SPIDELAY_C2TDELAY_SHIFT 24 64 #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) 65 #define SPIDELAY_T2CDELAY_SHIFT 16 66 #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) 67 #define SPIDELAY_T2EDELAY_SHIFT 8 68 #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) 69 #define SPIDELAY_C2EDELAY_SHIFT 0 70 #define SPIDELAY_C2EDELAY_MASK 0xFF 71 72 /* Error Masks */ 73 #define SPIFLG_DLEN_ERR_MASK BIT(0) 74 #define SPIFLG_TIMEOUT_MASK BIT(1) 75 #define SPIFLG_PARERR_MASK BIT(2) 76 #define SPIFLG_DESYNC_MASK BIT(3) 77 #define SPIFLG_BITERR_MASK BIT(4) 78 #define SPIFLG_OVRRUN_MASK BIT(6) 79 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) 80 #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ 81 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ 82 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ 83 | SPIFLG_OVRRUN_MASK) 84 85 #define SPIINT_DMA_REQ_EN BIT(16) 86 87 /* SPI Controller registers */ 88 #define SPIGCR0 0x00 89 #define SPIGCR1 0x04 90 #define SPIINT 0x08 91 #define SPILVL 0x0c 92 #define SPIFLG 0x10 93 #define SPIPC0 0x14 94 #define SPIDAT1 0x3c 95 #define SPIBUF 0x40 96 #define SPIDELAY 0x48 97 #define SPIDEF 0x4c 98 #define SPIFMT0 0x50 99 100 #define SPI_IO_TYPE_POLL 1 101 #define SPI_IO_TYPE_DMA 2 102 103 #define DMA_MIN_BYTES 16 104 105 enum { 106 SPI_VERSION_1, /* For DM355/DM365/DM6467 */ 107 SPI_VERSION_2, /* For DA8xx */ 108 }; 109 110 /** 111 * struct davinci_spi_platform_data - Platform data for SPI master device on DaVinci 112 * 113 * @version: version of the SPI IP. Different DaVinci devices have slightly 114 * varying versions of the same IP. 115 * @num_chipselect: number of chipselects supported by this SPI master 116 * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt 117 * controller withn the SoC. Possible values are 0 and 1. 118 * @prescaler_limit: max clock prescaler value 119 * @cshold_bug: set this to true if the SPI controller on your chip requires 120 * a write to CSHOLD bit in between transfers (like in DM355). 121 * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any 122 * device on the bus. 123 */ 124 struct davinci_spi_platform_data { 125 u8 version; 126 u8 num_chipselect; 127 u8 intr_line; 128 u8 prescaler_limit; 129 bool cshold_bug; 130 enum dma_event_q dma_event_q; 131 }; 132 133 /** 134 * struct davinci_spi_config - Per-chip-select configuration for SPI slave devices 135 * 136 * @wdelay: amount of delay between transmissions. Measured in number of 137 * SPI module clocks. 138 * @odd_parity: polarity of parity flag at the end of transmit data stream. 139 * 0 - odd parity, 1 - even parity. 140 * @parity_enable: enable transmission of parity at end of each transmit 141 * data stream. 142 * @io_type: type of IO transfer. Choose between polled, interrupt and DMA. 143 * @timer_disable: disable chip-select timers (setup and hold) 144 * @c2tdelay: chip-select setup time. Measured in number of SPI module clocks. 145 * @t2cdelay: chip-select hold time. Measured in number of SPI module clocks. 146 * @t2edelay: transmit data finished to SPI ENAn pin inactive time. Measured 147 * in number of SPI clocks. 148 * @c2edelay: chip-select active to SPI ENAn signal active time. Measured in 149 * number of SPI clocks. 150 */ 151 struct davinci_spi_config { 152 u8 wdelay; 153 u8 odd_parity; 154 u8 parity_enable; 155 u8 io_type; 156 u8 timer_disable; 157 u8 c2tdelay; 158 u8 t2cdelay; 159 u8 t2edelay; 160 u8 c2edelay; 161 }; 162 163 /* SPI Controller driver's private data. */ 164 struct davinci_spi { 165 struct spi_bitbang bitbang; 166 struct clk *clk; 167 168 u8 version; 169 resource_size_t pbase; 170 void __iomem *base; 171 u32 irq; 172 struct completion done; 173 174 const void *tx; 175 void *rx; 176 int rcount; 177 int wcount; 178 179 struct dma_chan *dma_rx; 180 struct dma_chan *dma_tx; 181 182 struct davinci_spi_platform_data pdata; 183 184 void (*get_rx)(u32 rx_data, struct davinci_spi *); 185 u32 (*get_tx)(struct davinci_spi *); 186 187 u8 *bytes_per_word; 188 189 u8 prescaler_limit; 190 }; 191 192 static struct davinci_spi_config davinci_spi_default_cfg; 193 194 static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) 195 { 196 if (dspi->rx) { 197 u8 *rx = dspi->rx; 198 *rx++ = (u8)data; 199 dspi->rx = rx; 200 } 201 } 202 203 static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) 204 { 205 if (dspi->rx) { 206 u16 *rx = dspi->rx; 207 *rx++ = (u16)data; 208 dspi->rx = rx; 209 } 210 } 211 212 static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) 213 { 214 u32 data = 0; 215 216 if (dspi->tx) { 217 const u8 *tx = dspi->tx; 218 219 data = *tx++; 220 dspi->tx = tx; 221 } 222 return data; 223 } 224 225 static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) 226 { 227 u32 data = 0; 228 229 if (dspi->tx) { 230 const u16 *tx = dspi->tx; 231 232 data = *tx++; 233 dspi->tx = tx; 234 } 235 return data; 236 } 237 238 static inline void set_io_bits(void __iomem *addr, u32 bits) 239 { 240 u32 v = ioread32(addr); 241 242 v |= bits; 243 iowrite32(v, addr); 244 } 245 246 static inline void clear_io_bits(void __iomem *addr, u32 bits) 247 { 248 u32 v = ioread32(addr); 249 250 v &= ~bits; 251 iowrite32(v, addr); 252 } 253 254 /* 255 * Interface to control the chip select signal 256 */ 257 static void davinci_spi_chipselect(struct spi_device *spi, int value) 258 { 259 struct davinci_spi *dspi; 260 struct davinci_spi_config *spicfg = spi->controller_data; 261 u8 chip_sel = spi_get_chipselect(spi, 0); 262 u16 spidat1 = CS_DEFAULT; 263 264 dspi = spi_controller_get_devdata(spi->controller); 265 266 /* program delay transfers if tx_delay is non zero */ 267 if (spicfg && spicfg->wdelay) 268 spidat1 |= SPIDAT1_WDEL; 269 270 /* 271 * Board specific chip select logic decides the polarity and cs 272 * line for the controller 273 */ 274 if (spi_get_csgpiod(spi, 0)) { 275 if (value == BITBANG_CS_ACTIVE) 276 gpiod_set_value(spi_get_csgpiod(spi, 0), 1); 277 else 278 gpiod_set_value(spi_get_csgpiod(spi, 0), 0); 279 } else { 280 if (value == BITBANG_CS_ACTIVE) { 281 if (!(spi->mode & SPI_CS_WORD)) 282 spidat1 |= SPIDAT1_CSHOLD_MASK; 283 spidat1 &= ~(0x1 << chip_sel); 284 } 285 } 286 287 iowrite16(spidat1, dspi->base + SPIDAT1 + 2); 288 } 289 290 /** 291 * davinci_spi_get_prescale - Calculates the correct prescale value 292 * @dspi: the controller data 293 * @max_speed_hz: the maximum rate the SPI clock can run at 294 * 295 * This function calculates the prescale value that generates a clock rate 296 * less than or equal to the specified maximum. 297 * 298 * Returns: calculated prescale value for easy programming into SPI registers 299 * or negative error number if valid prescalar cannot be updated. 300 */ 301 static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, 302 u32 max_speed_hz) 303 { 304 int ret; 305 306 /* Subtract 1 to match what will be programmed into SPI register. */ 307 ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz) - 1; 308 309 if (ret < dspi->prescaler_limit || ret > 255) 310 return -EINVAL; 311 312 return ret; 313 } 314 315 /** 316 * davinci_spi_setup_transfer - This functions will determine transfer method 317 * @spi: spi device on which data transfer to be done 318 * @t: spi transfer in which transfer info is filled 319 * 320 * This function determines data transfer method (8/16/32 bit transfer). 321 * It will also set the SPI Clock Control register according to 322 * SPI slave device freq. 323 */ 324 static int davinci_spi_setup_transfer(struct spi_device *spi, 325 struct spi_transfer *t) 326 { 327 328 struct davinci_spi *dspi; 329 struct davinci_spi_config *spicfg; 330 u8 bits_per_word = 0; 331 u32 hz = 0, spifmt = 0; 332 int prescale; 333 334 dspi = spi_controller_get_devdata(spi->controller); 335 spicfg = spi->controller_data; 336 if (!spicfg) 337 spicfg = &davinci_spi_default_cfg; 338 339 if (t) { 340 bits_per_word = t->bits_per_word; 341 hz = t->speed_hz; 342 } 343 344 /* if bits_per_word is not set then set it default */ 345 if (!bits_per_word) 346 bits_per_word = spi->bits_per_word; 347 348 /* 349 * Assign function pointer to appropriate transfer method 350 * 8bit, 16bit or 32bit transfer 351 */ 352 if (bits_per_word <= 8) { 353 dspi->get_rx = davinci_spi_rx_buf_u8; 354 dspi->get_tx = davinci_spi_tx_buf_u8; 355 dspi->bytes_per_word[spi_get_chipselect(spi, 0)] = 1; 356 } else { 357 dspi->get_rx = davinci_spi_rx_buf_u16; 358 dspi->get_tx = davinci_spi_tx_buf_u16; 359 dspi->bytes_per_word[spi_get_chipselect(spi, 0)] = 2; 360 } 361 362 if (!hz) 363 hz = spi->max_speed_hz; 364 365 /* Set up SPIFMTn register, unique to this chipselect. */ 366 367 prescale = davinci_spi_get_prescale(dspi, hz); 368 if (prescale < 0) 369 return prescale; 370 371 spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); 372 373 if (spi->mode & SPI_LSB_FIRST) 374 spifmt |= SPIFMT_SHIFTDIR_MASK; 375 376 if (spi->mode & SPI_CPOL) 377 spifmt |= SPIFMT_POLARITY_MASK; 378 379 if (!(spi->mode & SPI_CPHA)) 380 spifmt |= SPIFMT_PHASE_MASK; 381 382 /* 383 * Assume wdelay is used only on SPI peripherals that has this field 384 * in SPIFMTn register and when it's configured from board file or DT. 385 */ 386 if (spicfg->wdelay) 387 spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) 388 & SPIFMT_WDELAY_MASK); 389 390 /* 391 * Version 1 hardware supports two basic SPI modes: 392 * - Standard SPI mode uses 4 pins, with chipselect 393 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) 394 * (distinct from SPI_3WIRE, with just one data wire; 395 * or similar variants without MOSI or without MISO) 396 * 397 * Version 2 hardware supports an optional handshaking signal, 398 * so it can support two more modes: 399 * - 5 pin SPI variant is standard SPI plus SPI_READY 400 * - 4 pin with enable is (SPI_READY | SPI_NO_CS) 401 */ 402 403 if (dspi->version == SPI_VERSION_2) { 404 405 u32 delay = 0; 406 407 if (spicfg->odd_parity) 408 spifmt |= SPIFMT_ODD_PARITY_MASK; 409 410 if (spicfg->parity_enable) 411 spifmt |= SPIFMT_PARITYENA_MASK; 412 413 if (spicfg->timer_disable) { 414 spifmt |= SPIFMT_DISTIMER_MASK; 415 } else { 416 delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) 417 & SPIDELAY_C2TDELAY_MASK; 418 delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) 419 & SPIDELAY_T2CDELAY_MASK; 420 } 421 422 if (spi->mode & SPI_READY) { 423 spifmt |= SPIFMT_WAITENA_MASK; 424 delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) 425 & SPIDELAY_T2EDELAY_MASK; 426 delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) 427 & SPIDELAY_C2EDELAY_MASK; 428 } 429 430 iowrite32(delay, dspi->base + SPIDELAY); 431 } 432 433 iowrite32(spifmt, dspi->base + SPIFMT0); 434 435 return 0; 436 } 437 438 static int davinci_spi_of_setup(struct spi_device *spi) 439 { 440 struct davinci_spi_config *spicfg = spi->controller_data; 441 struct device_node *np = spi->dev.of_node; 442 struct davinci_spi *dspi = spi_controller_get_devdata(spi->controller); 443 u32 prop; 444 445 if (spicfg == NULL && np) { 446 spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL); 447 if (!spicfg) 448 return -ENOMEM; 449 *spicfg = davinci_spi_default_cfg; 450 /* override with dt configured values */ 451 if (!of_property_read_u32(np, "ti,spi-wdelay", &prop)) 452 spicfg->wdelay = (u8)prop; 453 spi->controller_data = spicfg; 454 455 if (dspi->dma_rx && dspi->dma_tx) 456 spicfg->io_type = SPI_IO_TYPE_DMA; 457 } 458 459 return 0; 460 } 461 462 /** 463 * davinci_spi_setup - This functions will set default transfer method 464 * @spi: spi device on which data transfer to be done 465 * 466 * This functions sets the default transfer method. 467 */ 468 static int davinci_spi_setup(struct spi_device *spi) 469 { 470 struct davinci_spi *dspi; 471 struct device_node *np = spi->dev.of_node; 472 bool internal_cs = true; 473 474 dspi = spi_controller_get_devdata(spi->controller); 475 476 if (!(spi->mode & SPI_NO_CS)) { 477 if (np && spi_get_csgpiod(spi, 0)) 478 internal_cs = false; 479 480 if (internal_cs) 481 set_io_bits(dspi->base + SPIPC0, 1 << spi_get_chipselect(spi, 0)); 482 } 483 484 if (spi->mode & SPI_READY) 485 set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); 486 487 if (spi->mode & SPI_LOOP) 488 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); 489 else 490 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); 491 492 return davinci_spi_of_setup(spi); 493 } 494 495 static void davinci_spi_cleanup(struct spi_device *spi) 496 { 497 struct davinci_spi_config *spicfg = spi->controller_data; 498 499 spi->controller_data = NULL; 500 if (spi->dev.of_node) 501 kfree(spicfg); 502 } 503 504 static bool davinci_spi_can_dma(struct spi_controller *host, 505 struct spi_device *spi, 506 struct spi_transfer *xfer) 507 { 508 struct davinci_spi_config *spicfg = spi->controller_data; 509 bool can_dma = false; 510 511 if (spicfg) 512 can_dma = (spicfg->io_type == SPI_IO_TYPE_DMA) && 513 (xfer->len >= DMA_MIN_BYTES) && 514 !is_vmalloc_addr(xfer->rx_buf) && 515 !is_vmalloc_addr(xfer->tx_buf); 516 517 return can_dma; 518 } 519 520 static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) 521 { 522 struct device *sdev = dspi->bitbang.ctlr->dev.parent; 523 524 if (int_status & SPIFLG_TIMEOUT_MASK) { 525 dev_err(sdev, "SPI Time-out Error\n"); 526 return -ETIMEDOUT; 527 } 528 if (int_status & SPIFLG_DESYNC_MASK) { 529 dev_err(sdev, "SPI Desynchronization Error\n"); 530 return -EIO; 531 } 532 if (int_status & SPIFLG_BITERR_MASK) { 533 dev_err(sdev, "SPI Bit error\n"); 534 return -EIO; 535 } 536 537 if (dspi->version == SPI_VERSION_2) { 538 if (int_status & SPIFLG_DLEN_ERR_MASK) { 539 dev_err(sdev, "SPI Data Length Error\n"); 540 return -EIO; 541 } 542 if (int_status & SPIFLG_PARERR_MASK) { 543 dev_err(sdev, "SPI Parity Error\n"); 544 return -EIO; 545 } 546 if (int_status & SPIFLG_OVRRUN_MASK) { 547 dev_err(sdev, "SPI Data Overrun error\n"); 548 return -EIO; 549 } 550 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { 551 dev_err(sdev, "SPI Buffer Init Active\n"); 552 return -EBUSY; 553 } 554 } 555 556 return 0; 557 } 558 559 /** 560 * davinci_spi_process_events - check for and handle any SPI controller events 561 * @dspi: the controller data 562 * 563 * This function will check the SPIFLG register and handle any events that are 564 * detected there 565 */ 566 static int davinci_spi_process_events(struct davinci_spi *dspi) 567 { 568 u32 buf, status, errors = 0, spidat1; 569 570 buf = ioread32(dspi->base + SPIBUF); 571 572 if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { 573 dspi->get_rx(buf & 0xFFFF, dspi); 574 dspi->rcount--; 575 } 576 577 status = ioread32(dspi->base + SPIFLG); 578 579 if (unlikely(status & SPIFLG_ERROR_MASK)) { 580 errors = status & SPIFLG_ERROR_MASK; 581 goto out; 582 } 583 584 if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { 585 spidat1 = ioread32(dspi->base + SPIDAT1); 586 dspi->wcount--; 587 spidat1 &= ~0xFFFF; 588 spidat1 |= 0xFFFF & dspi->get_tx(dspi); 589 iowrite32(spidat1, dspi->base + SPIDAT1); 590 } 591 592 out: 593 return errors; 594 } 595 596 static void davinci_spi_dma_rx_callback(void *data) 597 { 598 struct davinci_spi *dspi = (struct davinci_spi *)data; 599 600 dspi->rcount = 0; 601 602 if (!dspi->wcount && !dspi->rcount) 603 complete(&dspi->done); 604 } 605 606 static void davinci_spi_dma_tx_callback(void *data) 607 { 608 struct davinci_spi *dspi = (struct davinci_spi *)data; 609 610 dspi->wcount = 0; 611 612 if (!dspi->wcount && !dspi->rcount) 613 complete(&dspi->done); 614 } 615 616 /** 617 * davinci_spi_bufs - functions which will handle transfer data 618 * @spi: spi device on which data transfer to be done 619 * @t: spi transfer in which transfer info is filled 620 * 621 * This function will put data to be transferred into data register 622 * of SPI controller and then wait until the completion will be marked 623 * by the IRQ Handler. 624 */ 625 static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 626 { 627 struct davinci_spi *dspi; 628 int data_type, ret = -ENOMEM; 629 u32 tx_data, spidat1; 630 u32 errors = 0; 631 struct davinci_spi_config *spicfg; 632 struct davinci_spi_platform_data *pdata; 633 unsigned long timeout; 634 635 dspi = spi_controller_get_devdata(spi->controller); 636 pdata = &dspi->pdata; 637 spicfg = (struct davinci_spi_config *)spi->controller_data; 638 if (!spicfg) 639 spicfg = &davinci_spi_default_cfg; 640 641 /* convert len to words based on bits_per_word */ 642 data_type = dspi->bytes_per_word[spi_get_chipselect(spi, 0)]; 643 644 dspi->tx = t->tx_buf; 645 dspi->rx = t->rx_buf; 646 dspi->wcount = t->len / data_type; 647 dspi->rcount = dspi->wcount; 648 649 spidat1 = ioread32(dspi->base + SPIDAT1); 650 651 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 652 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 653 654 reinit_completion(&dspi->done); 655 656 if (!davinci_spi_can_dma(spi->controller, spi, t)) { 657 if (spicfg->io_type != SPI_IO_TYPE_POLL) 658 set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 659 /* start the transfer */ 660 dspi->wcount--; 661 tx_data = dspi->get_tx(dspi); 662 spidat1 &= 0xFFFF0000; 663 spidat1 |= tx_data & 0xFFFF; 664 iowrite32(spidat1, dspi->base + SPIDAT1); 665 } else { 666 struct dma_slave_config dma_rx_conf = { 667 .direction = DMA_DEV_TO_MEM, 668 .src_addr = (unsigned long)dspi->pbase + SPIBUF, 669 .src_addr_width = data_type, 670 .src_maxburst = 1, 671 }; 672 struct dma_slave_config dma_tx_conf = { 673 .direction = DMA_MEM_TO_DEV, 674 .dst_addr = (unsigned long)dspi->pbase + SPIDAT1, 675 .dst_addr_width = data_type, 676 .dst_maxburst = 1, 677 }; 678 struct dma_async_tx_descriptor *rxdesc; 679 struct dma_async_tx_descriptor *txdesc; 680 681 dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf); 682 dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf); 683 684 rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx, 685 t->rx_sg.sgl, t->rx_sg.nents, DMA_DEV_TO_MEM, 686 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 687 if (!rxdesc) 688 goto err_desc; 689 690 if (!t->tx_buf) { 691 /* To avoid errors when doing rx-only transfers with 692 * many SG entries (> 20), use the rx buffer as the 693 * dummy tx buffer so that dma reloads are done at the 694 * same time for rx and tx. 695 */ 696 t->tx_sg.sgl = t->rx_sg.sgl; 697 t->tx_sg.nents = t->rx_sg.nents; 698 } 699 700 txdesc = dmaengine_prep_slave_sg(dspi->dma_tx, 701 t->tx_sg.sgl, t->tx_sg.nents, DMA_MEM_TO_DEV, 702 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 703 if (!txdesc) 704 goto err_desc; 705 706 rxdesc->callback = davinci_spi_dma_rx_callback; 707 rxdesc->callback_param = (void *)dspi; 708 txdesc->callback = davinci_spi_dma_tx_callback; 709 txdesc->callback_param = (void *)dspi; 710 711 if (pdata->cshold_bug) 712 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); 713 714 dmaengine_submit(rxdesc); 715 dmaengine_submit(txdesc); 716 717 dma_async_issue_pending(dspi->dma_rx); 718 dma_async_issue_pending(dspi->dma_tx); 719 720 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 721 } 722 723 /* Wait for the transfer to complete */ 724 if (spicfg->io_type != SPI_IO_TYPE_POLL) { 725 timeout = DIV_ROUND_UP(t->speed_hz, MSEC_PER_SEC); 726 timeout = DIV_ROUND_UP(t->len * 8, timeout); 727 /* Assume we are at most 2x slower than the nominal bus speed */ 728 timeout = 2 * msecs_to_jiffies(timeout); 729 730 if (wait_for_completion_timeout(&dspi->done, timeout) == 0) 731 errors = SPIFLG_TIMEOUT_MASK; 732 } else { 733 while (dspi->rcount > 0 || dspi->wcount > 0) { 734 errors = davinci_spi_process_events(dspi); 735 if (errors) 736 break; 737 cpu_relax(); 738 } 739 } 740 741 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); 742 if (davinci_spi_can_dma(spi->controller, spi, t)) 743 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 744 745 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 746 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 747 748 /* 749 * Check for bit error, desync error,parity error,timeout error and 750 * receive overflow errors 751 */ 752 if (errors) { 753 ret = davinci_spi_check_error(dspi, errors); 754 WARN(!ret, "%s: error reported but no error found!\n", 755 dev_name(&spi->dev)); 756 return ret; 757 } 758 759 if (dspi->rcount != 0 || dspi->wcount != 0) { 760 dev_err(&spi->dev, "SPI data transfer error\n"); 761 return -EIO; 762 } 763 764 return t->len; 765 766 err_desc: 767 return ret; 768 } 769 770 /** 771 * dummy_thread_fn - dummy thread function 772 * @irq: IRQ number for this SPI Master 773 * @data: structure for SPI Master controller davinci_spi 774 * 775 * This is to satisfy the request_threaded_irq() API so that the irq 776 * handler is called in interrupt context. 777 */ 778 static irqreturn_t dummy_thread_fn(s32 irq, void *data) 779 { 780 return IRQ_HANDLED; 781 } 782 783 /** 784 * davinci_spi_irq - Interrupt handler for SPI Master Controller 785 * @irq: IRQ number for this SPI Master 786 * @data: structure for SPI Master controller davinci_spi 787 * 788 * ISR will determine that interrupt arrives either for READ or WRITE command. 789 * According to command it will do the appropriate action. It will check 790 * transfer length and if it is not zero then dispatch transfer command again. 791 * If transfer length is zero then it will indicate the COMPLETION so that 792 * davinci_spi_bufs function can go ahead. 793 */ 794 static irqreturn_t davinci_spi_irq(s32 irq, void *data) 795 { 796 struct davinci_spi *dspi = data; 797 int status; 798 799 status = davinci_spi_process_events(dspi); 800 if (unlikely(status != 0)) 801 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 802 803 if ((!dspi->rcount && !dspi->wcount) || status) 804 complete(&dspi->done); 805 806 return IRQ_HANDLED; 807 } 808 809 static int davinci_spi_request_dma(struct davinci_spi *dspi) 810 { 811 struct device *sdev = dspi->bitbang.ctlr->dev.parent; 812 813 dspi->dma_rx = dma_request_chan(sdev, "rx"); 814 if (IS_ERR(dspi->dma_rx)) 815 return PTR_ERR(dspi->dma_rx); 816 817 dspi->dma_tx = dma_request_chan(sdev, "tx"); 818 if (IS_ERR(dspi->dma_tx)) { 819 dma_release_channel(dspi->dma_rx); 820 return PTR_ERR(dspi->dma_tx); 821 } 822 823 return 0; 824 } 825 826 #if defined(CONFIG_OF) 827 828 /* OF SPI data structure */ 829 struct davinci_spi_of_data { 830 u8 version; 831 u8 prescaler_limit; 832 }; 833 834 static const struct davinci_spi_of_data dm6441_spi_data = { 835 .version = SPI_VERSION_1, 836 .prescaler_limit = 2, 837 }; 838 839 static const struct davinci_spi_of_data da830_spi_data = { 840 .version = SPI_VERSION_2, 841 .prescaler_limit = 2, 842 }; 843 844 static const struct davinci_spi_of_data keystone_spi_data = { 845 .version = SPI_VERSION_1, 846 .prescaler_limit = 0, 847 }; 848 849 static const struct of_device_id davinci_spi_of_match[] = { 850 { 851 .compatible = "ti,dm6441-spi", 852 .data = &dm6441_spi_data, 853 }, 854 { 855 .compatible = "ti,da830-spi", 856 .data = &da830_spi_data, 857 }, 858 { 859 .compatible = "ti,keystone-spi", 860 .data = &keystone_spi_data, 861 }, 862 { }, 863 }; 864 MODULE_DEVICE_TABLE(of, davinci_spi_of_match); 865 866 /** 867 * spi_davinci_get_pdata - Get platform data from DTS binding 868 * @pdev: ptr to platform data 869 * @dspi: ptr to driver data 870 * 871 * Parses and populates pdata in dspi from device tree bindings. 872 * 873 * NOTE: Not all platform data params are supported currently. 874 */ 875 static int spi_davinci_get_pdata(struct platform_device *pdev, 876 struct davinci_spi *dspi) 877 { 878 struct device_node *node = pdev->dev.of_node; 879 const struct davinci_spi_of_data *spi_data; 880 struct davinci_spi_platform_data *pdata; 881 unsigned int num_cs, intr_line = 0; 882 883 pdata = &dspi->pdata; 884 885 spi_data = device_get_match_data(&pdev->dev); 886 887 pdata->version = spi_data->version; 888 pdata->prescaler_limit = spi_data->prescaler_limit; 889 /* 890 * default num_cs is 1 and all chipsel are internal to the chip 891 * indicated by chip_sel being NULL or cs_gpios being NULL or 892 * set to -ENOENT. num-cs includes internal as well as gpios. 893 * indicated by chip_sel being NULL. GPIO based CS is not 894 * supported yet in DT bindings. 895 */ 896 num_cs = 1; 897 of_property_read_u32(node, "num-cs", &num_cs); 898 pdata->num_chipselect = num_cs; 899 of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line); 900 pdata->intr_line = intr_line; 901 return 0; 902 } 903 #else 904 static int spi_davinci_get_pdata(struct platform_device *pdev, 905 struct davinci_spi *dspi) 906 { 907 return -ENODEV; 908 } 909 #endif 910 911 /** 912 * davinci_spi_probe - probe function for SPI Master Controller 913 * @pdev: platform_device structure which contains plateform specific data 914 * 915 * According to Linux Device Model this function will be invoked by Linux 916 * with platform_device struct which contains the device specific info. 917 * This function will map the SPI controller's memory, register IRQ, 918 * Reset SPI controller and setting its registers to default value. 919 * It will invoke spi_bitbang_start to create work queue so that client driver 920 * can register transfer method to work queue. 921 */ 922 static int davinci_spi_probe(struct platform_device *pdev) 923 { 924 struct spi_controller *host; 925 struct davinci_spi *dspi; 926 struct davinci_spi_platform_data *pdata; 927 struct resource *r; 928 int ret = 0; 929 u32 spipc0; 930 931 host = spi_alloc_host(&pdev->dev, sizeof(struct davinci_spi)); 932 if (host == NULL) { 933 ret = -ENOMEM; 934 goto err; 935 } 936 937 platform_set_drvdata(pdev, host); 938 939 dspi = spi_controller_get_devdata(host); 940 941 if (dev_get_platdata(&pdev->dev)) { 942 pdata = dev_get_platdata(&pdev->dev); 943 dspi->pdata = *pdata; 944 } else { 945 /* update dspi pdata with that from the DT */ 946 ret = spi_davinci_get_pdata(pdev, dspi); 947 if (ret < 0) 948 goto free_host; 949 } 950 951 /* pdata in dspi is now updated and point pdata to that */ 952 pdata = &dspi->pdata; 953 954 dspi->bytes_per_word = devm_kcalloc(&pdev->dev, 955 pdata->num_chipselect, 956 sizeof(*dspi->bytes_per_word), 957 GFP_KERNEL); 958 if (dspi->bytes_per_word == NULL) { 959 ret = -ENOMEM; 960 goto free_host; 961 } 962 963 dspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); 964 if (IS_ERR(dspi->base)) { 965 ret = PTR_ERR(dspi->base); 966 goto free_host; 967 } 968 dspi->pbase = r->start; 969 970 init_completion(&dspi->done); 971 972 ret = platform_get_irq(pdev, 0); 973 if (ret < 0) 974 goto free_host; 975 dspi->irq = ret; 976 977 ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, 978 dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); 979 if (ret) 980 goto free_host; 981 982 dspi->bitbang.ctlr = host; 983 984 dspi->clk = devm_clk_get_enabled(&pdev->dev, NULL); 985 if (IS_ERR(dspi->clk)) { 986 ret = -ENODEV; 987 goto free_host; 988 } 989 990 host->use_gpio_descriptors = true; 991 host->dev.of_node = pdev->dev.of_node; 992 host->bus_num = pdev->id; 993 host->num_chipselect = pdata->num_chipselect; 994 host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16); 995 host->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_GPIO_SS; 996 host->setup = davinci_spi_setup; 997 host->cleanup = davinci_spi_cleanup; 998 host->can_dma = davinci_spi_can_dma; 999 1000 dspi->bitbang.chipselect = davinci_spi_chipselect; 1001 dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; 1002 dspi->prescaler_limit = pdata->prescaler_limit; 1003 dspi->version = pdata->version; 1004 1005 dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_WORD; 1006 if (dspi->version == SPI_VERSION_2) 1007 dspi->bitbang.flags |= SPI_READY; 1008 1009 dspi->bitbang.txrx_bufs = davinci_spi_bufs; 1010 1011 ret = davinci_spi_request_dma(dspi); 1012 if (ret == -EPROBE_DEFER) { 1013 goto free_host; 1014 } else if (ret) { 1015 dev_info(&pdev->dev, "DMA is not supported (%d)\n", ret); 1016 dspi->dma_rx = NULL; 1017 dspi->dma_tx = NULL; 1018 } 1019 1020 dspi->get_rx = davinci_spi_rx_buf_u8; 1021 dspi->get_tx = davinci_spi_tx_buf_u8; 1022 1023 /* Reset In/OUT SPI module */ 1024 iowrite32(0, dspi->base + SPIGCR0); 1025 udelay(100); 1026 iowrite32(1, dspi->base + SPIGCR0); 1027 1028 /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ 1029 spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; 1030 iowrite32(spipc0, dspi->base + SPIPC0); 1031 1032 if (pdata->intr_line) 1033 iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); 1034 else 1035 iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); 1036 1037 iowrite32(CS_DEFAULT, dspi->base + SPIDEF); 1038 1039 /* host mode default */ 1040 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); 1041 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); 1042 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 1043 1044 ret = spi_bitbang_start(&dspi->bitbang); 1045 if (ret) 1046 goto free_dma; 1047 1048 dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base); 1049 1050 return ret; 1051 1052 free_dma: 1053 /* This bit needs to be cleared to disable dpsi->clk */ 1054 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 1055 1056 if (dspi->dma_rx) { 1057 dma_release_channel(dspi->dma_rx); 1058 dma_release_channel(dspi->dma_tx); 1059 } 1060 free_host: 1061 spi_controller_put(host); 1062 err: 1063 return ret; 1064 } 1065 1066 /** 1067 * davinci_spi_remove - remove function for SPI Master Controller 1068 * @pdev: platform_device structure which contains plateform specific data 1069 * 1070 * This function will do the reverse action of davinci_spi_probe function 1071 * It will free the IRQ and SPI controller's memory region. 1072 * It will also call spi_bitbang_stop to destroy the work queue which was 1073 * created by spi_bitbang_start. 1074 */ 1075 static void davinci_spi_remove(struct platform_device *pdev) 1076 { 1077 struct davinci_spi *dspi; 1078 struct spi_controller *host; 1079 1080 host = platform_get_drvdata(pdev); 1081 dspi = spi_controller_get_devdata(host); 1082 1083 spi_bitbang_stop(&dspi->bitbang); 1084 1085 /* This bit needs to be cleared to disable dpsi->clk */ 1086 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 1087 1088 if (dspi->dma_rx) { 1089 dma_release_channel(dspi->dma_rx); 1090 dma_release_channel(dspi->dma_tx); 1091 } 1092 1093 spi_controller_put(host); 1094 } 1095 1096 static struct platform_driver davinci_spi_driver = { 1097 .driver = { 1098 .name = "spi_davinci", 1099 .of_match_table = of_match_ptr(davinci_spi_of_match), 1100 }, 1101 .probe = davinci_spi_probe, 1102 .remove = davinci_spi_remove, 1103 }; 1104 module_platform_driver(davinci_spi_driver); 1105 1106 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); 1107 MODULE_LICENSE("GPL"); 1108