1 /* 2 * Copyright (C) 2009 Texas Instruments. 3 * Copyright (C) 2010 EF Johnson Technologies 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/gpio.h> 19 #include <linux/module.h> 20 #include <linux/delay.h> 21 #include <linux/platform_device.h> 22 #include <linux/err.h> 23 #include <linux/clk.h> 24 #include <linux/dmaengine.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 #include <linux/of_gpio.h> 29 #include <linux/spi/spi.h> 30 #include <linux/spi/spi_bitbang.h> 31 #include <linux/slab.h> 32 33 #include <linux/platform_data/spi-davinci.h> 34 35 #define CS_DEFAULT 0xFF 36 37 #define SPIFMT_PHASE_MASK BIT(16) 38 #define SPIFMT_POLARITY_MASK BIT(17) 39 #define SPIFMT_DISTIMER_MASK BIT(18) 40 #define SPIFMT_SHIFTDIR_MASK BIT(20) 41 #define SPIFMT_WAITENA_MASK BIT(21) 42 #define SPIFMT_PARITYENA_MASK BIT(22) 43 #define SPIFMT_ODD_PARITY_MASK BIT(23) 44 #define SPIFMT_WDELAY_MASK 0x3f000000u 45 #define SPIFMT_WDELAY_SHIFT 24 46 #define SPIFMT_PRESCALE_SHIFT 8 47 48 /* SPIPC0 */ 49 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ 50 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ 51 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ 52 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ 53 54 #define SPIINT_MASKALL 0x0101035F 55 #define SPIINT_MASKINT 0x0000015F 56 #define SPI_INTLVL_1 0x000001FF 57 #define SPI_INTLVL_0 0x00000000 58 59 /* SPIDAT1 (upper 16 bit defines) */ 60 #define SPIDAT1_CSHOLD_MASK BIT(12) 61 #define SPIDAT1_WDEL BIT(10) 62 63 /* SPIGCR1 */ 64 #define SPIGCR1_CLKMOD_MASK BIT(1) 65 #define SPIGCR1_MASTER_MASK BIT(0) 66 #define SPIGCR1_POWERDOWN_MASK BIT(8) 67 #define SPIGCR1_LOOPBACK_MASK BIT(16) 68 #define SPIGCR1_SPIENA_MASK BIT(24) 69 70 /* SPIBUF */ 71 #define SPIBUF_TXFULL_MASK BIT(29) 72 #define SPIBUF_RXEMPTY_MASK BIT(31) 73 74 /* SPIDELAY */ 75 #define SPIDELAY_C2TDELAY_SHIFT 24 76 #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) 77 #define SPIDELAY_T2CDELAY_SHIFT 16 78 #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) 79 #define SPIDELAY_T2EDELAY_SHIFT 8 80 #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) 81 #define SPIDELAY_C2EDELAY_SHIFT 0 82 #define SPIDELAY_C2EDELAY_MASK 0xFF 83 84 /* Error Masks */ 85 #define SPIFLG_DLEN_ERR_MASK BIT(0) 86 #define SPIFLG_TIMEOUT_MASK BIT(1) 87 #define SPIFLG_PARERR_MASK BIT(2) 88 #define SPIFLG_DESYNC_MASK BIT(3) 89 #define SPIFLG_BITERR_MASK BIT(4) 90 #define SPIFLG_OVRRUN_MASK BIT(6) 91 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) 92 #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ 93 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ 94 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ 95 | SPIFLG_OVRRUN_MASK) 96 97 #define SPIINT_DMA_REQ_EN BIT(16) 98 99 /* SPI Controller registers */ 100 #define SPIGCR0 0x00 101 #define SPIGCR1 0x04 102 #define SPIINT 0x08 103 #define SPILVL 0x0c 104 #define SPIFLG 0x10 105 #define SPIPC0 0x14 106 #define SPIDAT1 0x3c 107 #define SPIBUF 0x40 108 #define SPIDELAY 0x48 109 #define SPIDEF 0x4c 110 #define SPIFMT0 0x50 111 112 /* SPI Controller driver's private data. */ 113 struct davinci_spi { 114 struct spi_bitbang bitbang; 115 struct clk *clk; 116 117 u8 version; 118 resource_size_t pbase; 119 void __iomem *base; 120 u32 irq; 121 struct completion done; 122 123 const void *tx; 124 void *rx; 125 int rcount; 126 int wcount; 127 128 struct dma_chan *dma_rx; 129 struct dma_chan *dma_tx; 130 131 struct davinci_spi_platform_data pdata; 132 133 void (*get_rx)(u32 rx_data, struct davinci_spi *); 134 u32 (*get_tx)(struct davinci_spi *); 135 136 u8 *bytes_per_word; 137 138 u8 prescaler_limit; 139 }; 140 141 static struct davinci_spi_config davinci_spi_default_cfg; 142 143 static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) 144 { 145 if (dspi->rx) { 146 u8 *rx = dspi->rx; 147 *rx++ = (u8)data; 148 dspi->rx = rx; 149 } 150 } 151 152 static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) 153 { 154 if (dspi->rx) { 155 u16 *rx = dspi->rx; 156 *rx++ = (u16)data; 157 dspi->rx = rx; 158 } 159 } 160 161 static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) 162 { 163 u32 data = 0; 164 165 if (dspi->tx) { 166 const u8 *tx = dspi->tx; 167 168 data = *tx++; 169 dspi->tx = tx; 170 } 171 return data; 172 } 173 174 static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) 175 { 176 u32 data = 0; 177 178 if (dspi->tx) { 179 const u16 *tx = dspi->tx; 180 181 data = *tx++; 182 dspi->tx = tx; 183 } 184 return data; 185 } 186 187 static inline void set_io_bits(void __iomem *addr, u32 bits) 188 { 189 u32 v = ioread32(addr); 190 191 v |= bits; 192 iowrite32(v, addr); 193 } 194 195 static inline void clear_io_bits(void __iomem *addr, u32 bits) 196 { 197 u32 v = ioread32(addr); 198 199 v &= ~bits; 200 iowrite32(v, addr); 201 } 202 203 /* 204 * Interface to control the chip select signal 205 */ 206 static void davinci_spi_chipselect(struct spi_device *spi, int value) 207 { 208 struct davinci_spi *dspi; 209 struct davinci_spi_platform_data *pdata; 210 struct davinci_spi_config *spicfg = spi->controller_data; 211 u8 chip_sel = spi->chip_select; 212 u16 spidat1 = CS_DEFAULT; 213 214 dspi = spi_master_get_devdata(spi->master); 215 pdata = &dspi->pdata; 216 217 /* program delay transfers if tx_delay is non zero */ 218 if (spicfg->wdelay) 219 spidat1 |= SPIDAT1_WDEL; 220 221 /* 222 * Board specific chip select logic decides the polarity and cs 223 * line for the controller 224 */ 225 if (spi->cs_gpio >= 0) { 226 if (value == BITBANG_CS_ACTIVE) 227 gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH); 228 else 229 gpio_set_value(spi->cs_gpio, 230 !(spi->mode & SPI_CS_HIGH)); 231 } else { 232 if (value == BITBANG_CS_ACTIVE) { 233 spidat1 |= SPIDAT1_CSHOLD_MASK; 234 spidat1 &= ~(0x1 << chip_sel); 235 } 236 } 237 238 iowrite16(spidat1, dspi->base + SPIDAT1 + 2); 239 } 240 241 /** 242 * davinci_spi_get_prescale - Calculates the correct prescale value 243 * @maxspeed_hz: the maximum rate the SPI clock can run at 244 * 245 * This function calculates the prescale value that generates a clock rate 246 * less than or equal to the specified maximum. 247 * 248 * Returns: calculated prescale value for easy programming into SPI registers 249 * or negative error number if valid prescalar cannot be updated. 250 */ 251 static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, 252 u32 max_speed_hz) 253 { 254 int ret; 255 256 /* Subtract 1 to match what will be programmed into SPI register. */ 257 ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz) - 1; 258 259 if (ret < dspi->prescaler_limit || ret > 255) 260 return -EINVAL; 261 262 return ret; 263 } 264 265 /** 266 * davinci_spi_setup_transfer - This functions will determine transfer method 267 * @spi: spi device on which data transfer to be done 268 * @t: spi transfer in which transfer info is filled 269 * 270 * This function determines data transfer method (8/16/32 bit transfer). 271 * It will also set the SPI Clock Control register according to 272 * SPI slave device freq. 273 */ 274 static int davinci_spi_setup_transfer(struct spi_device *spi, 275 struct spi_transfer *t) 276 { 277 278 struct davinci_spi *dspi; 279 struct davinci_spi_config *spicfg; 280 u8 bits_per_word = 0; 281 u32 hz = 0, spifmt = 0; 282 int prescale; 283 284 dspi = spi_master_get_devdata(spi->master); 285 spicfg = spi->controller_data; 286 if (!spicfg) 287 spicfg = &davinci_spi_default_cfg; 288 289 if (t) { 290 bits_per_word = t->bits_per_word; 291 hz = t->speed_hz; 292 } 293 294 /* if bits_per_word is not set then set it default */ 295 if (!bits_per_word) 296 bits_per_word = spi->bits_per_word; 297 298 /* 299 * Assign function pointer to appropriate transfer method 300 * 8bit, 16bit or 32bit transfer 301 */ 302 if (bits_per_word <= 8) { 303 dspi->get_rx = davinci_spi_rx_buf_u8; 304 dspi->get_tx = davinci_spi_tx_buf_u8; 305 dspi->bytes_per_word[spi->chip_select] = 1; 306 } else { 307 dspi->get_rx = davinci_spi_rx_buf_u16; 308 dspi->get_tx = davinci_spi_tx_buf_u16; 309 dspi->bytes_per_word[spi->chip_select] = 2; 310 } 311 312 if (!hz) 313 hz = spi->max_speed_hz; 314 315 /* Set up SPIFMTn register, unique to this chipselect. */ 316 317 prescale = davinci_spi_get_prescale(dspi, hz); 318 if (prescale < 0) 319 return prescale; 320 321 spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); 322 323 if (spi->mode & SPI_LSB_FIRST) 324 spifmt |= SPIFMT_SHIFTDIR_MASK; 325 326 if (spi->mode & SPI_CPOL) 327 spifmt |= SPIFMT_POLARITY_MASK; 328 329 if (!(spi->mode & SPI_CPHA)) 330 spifmt |= SPIFMT_PHASE_MASK; 331 332 /* 333 * Assume wdelay is used only on SPI peripherals that has this field 334 * in SPIFMTn register and when it's configured from board file or DT. 335 */ 336 if (spicfg->wdelay) 337 spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) 338 & SPIFMT_WDELAY_MASK); 339 340 /* 341 * Version 1 hardware supports two basic SPI modes: 342 * - Standard SPI mode uses 4 pins, with chipselect 343 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) 344 * (distinct from SPI_3WIRE, with just one data wire; 345 * or similar variants without MOSI or without MISO) 346 * 347 * Version 2 hardware supports an optional handshaking signal, 348 * so it can support two more modes: 349 * - 5 pin SPI variant is standard SPI plus SPI_READY 350 * - 4 pin with enable is (SPI_READY | SPI_NO_CS) 351 */ 352 353 if (dspi->version == SPI_VERSION_2) { 354 355 u32 delay = 0; 356 357 if (spicfg->odd_parity) 358 spifmt |= SPIFMT_ODD_PARITY_MASK; 359 360 if (spicfg->parity_enable) 361 spifmt |= SPIFMT_PARITYENA_MASK; 362 363 if (spicfg->timer_disable) { 364 spifmt |= SPIFMT_DISTIMER_MASK; 365 } else { 366 delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) 367 & SPIDELAY_C2TDELAY_MASK; 368 delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) 369 & SPIDELAY_T2CDELAY_MASK; 370 } 371 372 if (spi->mode & SPI_READY) { 373 spifmt |= SPIFMT_WAITENA_MASK; 374 delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) 375 & SPIDELAY_T2EDELAY_MASK; 376 delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) 377 & SPIDELAY_C2EDELAY_MASK; 378 } 379 380 iowrite32(delay, dspi->base + SPIDELAY); 381 } 382 383 iowrite32(spifmt, dspi->base + SPIFMT0); 384 385 return 0; 386 } 387 388 static int davinci_spi_of_setup(struct spi_device *spi) 389 { 390 struct davinci_spi_config *spicfg = spi->controller_data; 391 struct device_node *np = spi->dev.of_node; 392 u32 prop; 393 394 if (spicfg == NULL && np) { 395 spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL); 396 if (!spicfg) 397 return -ENOMEM; 398 *spicfg = davinci_spi_default_cfg; 399 /* override with dt configured values */ 400 if (!of_property_read_u32(np, "ti,spi-wdelay", &prop)) 401 spicfg->wdelay = (u8)prop; 402 spi->controller_data = spicfg; 403 } 404 405 return 0; 406 } 407 408 /** 409 * davinci_spi_setup - This functions will set default transfer method 410 * @spi: spi device on which data transfer to be done 411 * 412 * This functions sets the default transfer method. 413 */ 414 static int davinci_spi_setup(struct spi_device *spi) 415 { 416 int retval = 0; 417 struct davinci_spi *dspi; 418 struct davinci_spi_platform_data *pdata; 419 struct spi_master *master = spi->master; 420 struct device_node *np = spi->dev.of_node; 421 bool internal_cs = true; 422 423 dspi = spi_master_get_devdata(spi->master); 424 pdata = &dspi->pdata; 425 426 if (!(spi->mode & SPI_NO_CS)) { 427 if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) { 428 retval = gpio_direction_output( 429 spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 430 internal_cs = false; 431 } else if (pdata->chip_sel && 432 spi->chip_select < pdata->num_chipselect && 433 pdata->chip_sel[spi->chip_select] != SPI_INTERN_CS) { 434 spi->cs_gpio = pdata->chip_sel[spi->chip_select]; 435 retval = gpio_direction_output( 436 spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 437 internal_cs = false; 438 } 439 440 if (retval) { 441 dev_err(&spi->dev, "GPIO %d setup failed (%d)\n", 442 spi->cs_gpio, retval); 443 return retval; 444 } 445 446 if (internal_cs) 447 set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); 448 } 449 450 if (spi->mode & SPI_READY) 451 set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); 452 453 if (spi->mode & SPI_LOOP) 454 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); 455 else 456 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); 457 458 return davinci_spi_of_setup(spi); 459 } 460 461 static void davinci_spi_cleanup(struct spi_device *spi) 462 { 463 struct davinci_spi_config *spicfg = spi->controller_data; 464 465 spi->controller_data = NULL; 466 if (spi->dev.of_node) 467 kfree(spicfg); 468 } 469 470 static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) 471 { 472 struct device *sdev = dspi->bitbang.master->dev.parent; 473 474 if (int_status & SPIFLG_TIMEOUT_MASK) { 475 dev_err(sdev, "SPI Time-out Error\n"); 476 return -ETIMEDOUT; 477 } 478 if (int_status & SPIFLG_DESYNC_MASK) { 479 dev_err(sdev, "SPI Desynchronization Error\n"); 480 return -EIO; 481 } 482 if (int_status & SPIFLG_BITERR_MASK) { 483 dev_err(sdev, "SPI Bit error\n"); 484 return -EIO; 485 } 486 487 if (dspi->version == SPI_VERSION_2) { 488 if (int_status & SPIFLG_DLEN_ERR_MASK) { 489 dev_err(sdev, "SPI Data Length Error\n"); 490 return -EIO; 491 } 492 if (int_status & SPIFLG_PARERR_MASK) { 493 dev_err(sdev, "SPI Parity Error\n"); 494 return -EIO; 495 } 496 if (int_status & SPIFLG_OVRRUN_MASK) { 497 dev_err(sdev, "SPI Data Overrun error\n"); 498 return -EIO; 499 } 500 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { 501 dev_err(sdev, "SPI Buffer Init Active\n"); 502 return -EBUSY; 503 } 504 } 505 506 return 0; 507 } 508 509 /** 510 * davinci_spi_process_events - check for and handle any SPI controller events 511 * @dspi: the controller data 512 * 513 * This function will check the SPIFLG register and handle any events that are 514 * detected there 515 */ 516 static int davinci_spi_process_events(struct davinci_spi *dspi) 517 { 518 u32 buf, status, errors = 0, spidat1; 519 520 buf = ioread32(dspi->base + SPIBUF); 521 522 if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { 523 dspi->get_rx(buf & 0xFFFF, dspi); 524 dspi->rcount--; 525 } 526 527 status = ioread32(dspi->base + SPIFLG); 528 529 if (unlikely(status & SPIFLG_ERROR_MASK)) { 530 errors = status & SPIFLG_ERROR_MASK; 531 goto out; 532 } 533 534 if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { 535 spidat1 = ioread32(dspi->base + SPIDAT1); 536 dspi->wcount--; 537 spidat1 &= ~0xFFFF; 538 spidat1 |= 0xFFFF & dspi->get_tx(dspi); 539 iowrite32(spidat1, dspi->base + SPIDAT1); 540 } 541 542 out: 543 return errors; 544 } 545 546 static void davinci_spi_dma_rx_callback(void *data) 547 { 548 struct davinci_spi *dspi = (struct davinci_spi *)data; 549 550 dspi->rcount = 0; 551 552 if (!dspi->wcount && !dspi->rcount) 553 complete(&dspi->done); 554 } 555 556 static void davinci_spi_dma_tx_callback(void *data) 557 { 558 struct davinci_spi *dspi = (struct davinci_spi *)data; 559 560 dspi->wcount = 0; 561 562 if (!dspi->wcount && !dspi->rcount) 563 complete(&dspi->done); 564 } 565 566 /** 567 * davinci_spi_bufs - functions which will handle transfer data 568 * @spi: spi device on which data transfer to be done 569 * @t: spi transfer in which transfer info is filled 570 * 571 * This function will put data to be transferred into data register 572 * of SPI controller and then wait until the completion will be marked 573 * by the IRQ Handler. 574 */ 575 static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 576 { 577 struct davinci_spi *dspi; 578 int data_type, ret = -ENOMEM; 579 u32 tx_data, spidat1; 580 u32 errors = 0; 581 struct davinci_spi_config *spicfg; 582 struct davinci_spi_platform_data *pdata; 583 unsigned uninitialized_var(rx_buf_count); 584 void *dummy_buf = NULL; 585 struct scatterlist sg_rx, sg_tx; 586 587 dspi = spi_master_get_devdata(spi->master); 588 pdata = &dspi->pdata; 589 spicfg = (struct davinci_spi_config *)spi->controller_data; 590 if (!spicfg) 591 spicfg = &davinci_spi_default_cfg; 592 593 /* convert len to words based on bits_per_word */ 594 data_type = dspi->bytes_per_word[spi->chip_select]; 595 596 dspi->tx = t->tx_buf; 597 dspi->rx = t->rx_buf; 598 dspi->wcount = t->len / data_type; 599 dspi->rcount = dspi->wcount; 600 601 spidat1 = ioread32(dspi->base + SPIDAT1); 602 603 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 604 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 605 606 reinit_completion(&dspi->done); 607 608 if (spicfg->io_type == SPI_IO_TYPE_INTR) 609 set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 610 611 if (spicfg->io_type != SPI_IO_TYPE_DMA) { 612 /* start the transfer */ 613 dspi->wcount--; 614 tx_data = dspi->get_tx(dspi); 615 spidat1 &= 0xFFFF0000; 616 spidat1 |= tx_data & 0xFFFF; 617 iowrite32(spidat1, dspi->base + SPIDAT1); 618 } else { 619 struct dma_slave_config dma_rx_conf = { 620 .direction = DMA_DEV_TO_MEM, 621 .src_addr = (unsigned long)dspi->pbase + SPIBUF, 622 .src_addr_width = data_type, 623 .src_maxburst = 1, 624 }; 625 struct dma_slave_config dma_tx_conf = { 626 .direction = DMA_MEM_TO_DEV, 627 .dst_addr = (unsigned long)dspi->pbase + SPIDAT1, 628 .dst_addr_width = data_type, 629 .dst_maxburst = 1, 630 }; 631 struct dma_async_tx_descriptor *rxdesc; 632 struct dma_async_tx_descriptor *txdesc; 633 void *buf; 634 635 dummy_buf = kzalloc(t->len, GFP_KERNEL); 636 if (!dummy_buf) 637 goto err_alloc_dummy_buf; 638 639 dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf); 640 dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf); 641 642 sg_init_table(&sg_rx, 1); 643 if (!t->rx_buf) 644 buf = dummy_buf; 645 else 646 buf = t->rx_buf; 647 t->rx_dma = dma_map_single(&spi->dev, buf, 648 t->len, DMA_FROM_DEVICE); 649 if (dma_mapping_error(&spi->dev, !t->rx_dma)) { 650 ret = -EFAULT; 651 goto err_rx_map; 652 } 653 sg_dma_address(&sg_rx) = t->rx_dma; 654 sg_dma_len(&sg_rx) = t->len; 655 656 sg_init_table(&sg_tx, 1); 657 if (!t->tx_buf) 658 buf = dummy_buf; 659 else 660 buf = (void *)t->tx_buf; 661 t->tx_dma = dma_map_single(&spi->dev, buf, 662 t->len, DMA_TO_DEVICE); 663 if (dma_mapping_error(&spi->dev, t->tx_dma)) { 664 ret = -EFAULT; 665 goto err_tx_map; 666 } 667 sg_dma_address(&sg_tx) = t->tx_dma; 668 sg_dma_len(&sg_tx) = t->len; 669 670 rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx, 671 &sg_rx, 1, DMA_DEV_TO_MEM, 672 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 673 if (!rxdesc) 674 goto err_desc; 675 676 txdesc = dmaengine_prep_slave_sg(dspi->dma_tx, 677 &sg_tx, 1, DMA_MEM_TO_DEV, 678 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 679 if (!txdesc) 680 goto err_desc; 681 682 rxdesc->callback = davinci_spi_dma_rx_callback; 683 rxdesc->callback_param = (void *)dspi; 684 txdesc->callback = davinci_spi_dma_tx_callback; 685 txdesc->callback_param = (void *)dspi; 686 687 if (pdata->cshold_bug) 688 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); 689 690 dmaengine_submit(rxdesc); 691 dmaengine_submit(txdesc); 692 693 dma_async_issue_pending(dspi->dma_rx); 694 dma_async_issue_pending(dspi->dma_tx); 695 696 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 697 } 698 699 /* Wait for the transfer to complete */ 700 if (spicfg->io_type != SPI_IO_TYPE_POLL) { 701 if (wait_for_completion_timeout(&dspi->done, HZ) == 0) 702 errors = SPIFLG_TIMEOUT_MASK; 703 } else { 704 while (dspi->rcount > 0 || dspi->wcount > 0) { 705 errors = davinci_spi_process_events(dspi); 706 if (errors) 707 break; 708 cpu_relax(); 709 } 710 } 711 712 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); 713 if (spicfg->io_type == SPI_IO_TYPE_DMA) { 714 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 715 716 dma_unmap_single(&spi->dev, t->rx_dma, 717 t->len, DMA_FROM_DEVICE); 718 dma_unmap_single(&spi->dev, t->tx_dma, 719 t->len, DMA_TO_DEVICE); 720 kfree(dummy_buf); 721 } 722 723 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 724 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 725 726 /* 727 * Check for bit error, desync error,parity error,timeout error and 728 * receive overflow errors 729 */ 730 if (errors) { 731 ret = davinci_spi_check_error(dspi, errors); 732 WARN(!ret, "%s: error reported but no error found!\n", 733 dev_name(&spi->dev)); 734 return ret; 735 } 736 737 if (dspi->rcount != 0 || dspi->wcount != 0) { 738 dev_err(&spi->dev, "SPI data transfer error\n"); 739 return -EIO; 740 } 741 742 return t->len; 743 744 err_desc: 745 dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE); 746 err_tx_map: 747 dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE); 748 err_rx_map: 749 kfree(dummy_buf); 750 err_alloc_dummy_buf: 751 return ret; 752 } 753 754 /** 755 * dummy_thread_fn - dummy thread function 756 * @irq: IRQ number for this SPI Master 757 * @context_data: structure for SPI Master controller davinci_spi 758 * 759 * This is to satisfy the request_threaded_irq() API so that the irq 760 * handler is called in interrupt context. 761 */ 762 static irqreturn_t dummy_thread_fn(s32 irq, void *data) 763 { 764 return IRQ_HANDLED; 765 } 766 767 /** 768 * davinci_spi_irq - Interrupt handler for SPI Master Controller 769 * @irq: IRQ number for this SPI Master 770 * @context_data: structure for SPI Master controller davinci_spi 771 * 772 * ISR will determine that interrupt arrives either for READ or WRITE command. 773 * According to command it will do the appropriate action. It will check 774 * transfer length and if it is not zero then dispatch transfer command again. 775 * If transfer length is zero then it will indicate the COMPLETION so that 776 * davinci_spi_bufs function can go ahead. 777 */ 778 static irqreturn_t davinci_spi_irq(s32 irq, void *data) 779 { 780 struct davinci_spi *dspi = data; 781 int status; 782 783 status = davinci_spi_process_events(dspi); 784 if (unlikely(status != 0)) 785 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 786 787 if ((!dspi->rcount && !dspi->wcount) || status) 788 complete(&dspi->done); 789 790 return IRQ_HANDLED; 791 } 792 793 static int davinci_spi_request_dma(struct davinci_spi *dspi) 794 { 795 struct device *sdev = dspi->bitbang.master->dev.parent; 796 797 dspi->dma_rx = dma_request_chan(sdev, "rx"); 798 if (IS_ERR(dspi->dma_rx)) 799 return PTR_ERR(dspi->dma_rx); 800 801 dspi->dma_tx = dma_request_chan(sdev, "tx"); 802 if (IS_ERR(dspi->dma_tx)) { 803 dma_release_channel(dspi->dma_rx); 804 return PTR_ERR(dspi->dma_tx); 805 } 806 807 return 0; 808 } 809 810 #if defined(CONFIG_OF) 811 812 /* OF SPI data structure */ 813 struct davinci_spi_of_data { 814 u8 version; 815 u8 prescaler_limit; 816 }; 817 818 static const struct davinci_spi_of_data dm6441_spi_data = { 819 .version = SPI_VERSION_1, 820 .prescaler_limit = 2, 821 }; 822 823 static const struct davinci_spi_of_data da830_spi_data = { 824 .version = SPI_VERSION_2, 825 .prescaler_limit = 2, 826 }; 827 828 static const struct davinci_spi_of_data keystone_spi_data = { 829 .version = SPI_VERSION_1, 830 .prescaler_limit = 0, 831 }; 832 833 static const struct of_device_id davinci_spi_of_match[] = { 834 { 835 .compatible = "ti,dm6441-spi", 836 .data = &dm6441_spi_data, 837 }, 838 { 839 .compatible = "ti,da830-spi", 840 .data = &da830_spi_data, 841 }, 842 { 843 .compatible = "ti,keystone-spi", 844 .data = &keystone_spi_data, 845 }, 846 { }, 847 }; 848 MODULE_DEVICE_TABLE(of, davinci_spi_of_match); 849 850 /** 851 * spi_davinci_get_pdata - Get platform data from DTS binding 852 * @pdev: ptr to platform data 853 * @dspi: ptr to driver data 854 * 855 * Parses and populates pdata in dspi from device tree bindings. 856 * 857 * NOTE: Not all platform data params are supported currently. 858 */ 859 static int spi_davinci_get_pdata(struct platform_device *pdev, 860 struct davinci_spi *dspi) 861 { 862 struct device_node *node = pdev->dev.of_node; 863 struct davinci_spi_of_data *spi_data; 864 struct davinci_spi_platform_data *pdata; 865 unsigned int num_cs, intr_line = 0; 866 const struct of_device_id *match; 867 868 pdata = &dspi->pdata; 869 870 match = of_match_device(davinci_spi_of_match, &pdev->dev); 871 if (!match) 872 return -ENODEV; 873 874 spi_data = (struct davinci_spi_of_data *)match->data; 875 876 pdata->version = spi_data->version; 877 pdata->prescaler_limit = spi_data->prescaler_limit; 878 /* 879 * default num_cs is 1 and all chipsel are internal to the chip 880 * indicated by chip_sel being NULL or cs_gpios being NULL or 881 * set to -ENOENT. num-cs includes internal as well as gpios. 882 * indicated by chip_sel being NULL. GPIO based CS is not 883 * supported yet in DT bindings. 884 */ 885 num_cs = 1; 886 of_property_read_u32(node, "num-cs", &num_cs); 887 pdata->num_chipselect = num_cs; 888 of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line); 889 pdata->intr_line = intr_line; 890 return 0; 891 } 892 #else 893 static struct davinci_spi_platform_data 894 *spi_davinci_get_pdata(struct platform_device *pdev, 895 struct davinci_spi *dspi) 896 { 897 return -ENODEV; 898 } 899 #endif 900 901 /** 902 * davinci_spi_probe - probe function for SPI Master Controller 903 * @pdev: platform_device structure which contains plateform specific data 904 * 905 * According to Linux Device Model this function will be invoked by Linux 906 * with platform_device struct which contains the device specific info. 907 * This function will map the SPI controller's memory, register IRQ, 908 * Reset SPI controller and setting its registers to default value. 909 * It will invoke spi_bitbang_start to create work queue so that client driver 910 * can register transfer method to work queue. 911 */ 912 static int davinci_spi_probe(struct platform_device *pdev) 913 { 914 struct spi_master *master; 915 struct davinci_spi *dspi; 916 struct davinci_spi_platform_data *pdata; 917 struct resource *r; 918 int ret = 0; 919 u32 spipc0; 920 921 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); 922 if (master == NULL) { 923 ret = -ENOMEM; 924 goto err; 925 } 926 927 platform_set_drvdata(pdev, master); 928 929 dspi = spi_master_get_devdata(master); 930 931 if (dev_get_platdata(&pdev->dev)) { 932 pdata = dev_get_platdata(&pdev->dev); 933 dspi->pdata = *pdata; 934 } else { 935 /* update dspi pdata with that from the DT */ 936 ret = spi_davinci_get_pdata(pdev, dspi); 937 if (ret < 0) 938 goto free_master; 939 } 940 941 /* pdata in dspi is now updated and point pdata to that */ 942 pdata = &dspi->pdata; 943 944 dspi->bytes_per_word = devm_kzalloc(&pdev->dev, 945 sizeof(*dspi->bytes_per_word) * 946 pdata->num_chipselect, GFP_KERNEL); 947 if (dspi->bytes_per_word == NULL) { 948 ret = -ENOMEM; 949 goto free_master; 950 } 951 952 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 953 if (r == NULL) { 954 ret = -ENOENT; 955 goto free_master; 956 } 957 958 dspi->pbase = r->start; 959 960 dspi->base = devm_ioremap_resource(&pdev->dev, r); 961 if (IS_ERR(dspi->base)) { 962 ret = PTR_ERR(dspi->base); 963 goto free_master; 964 } 965 966 ret = platform_get_irq(pdev, 0); 967 if (ret == 0) 968 ret = -EINVAL; 969 if (ret < 0) 970 goto free_master; 971 dspi->irq = ret; 972 973 ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, 974 dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); 975 if (ret) 976 goto free_master; 977 978 dspi->bitbang.master = master; 979 980 dspi->clk = devm_clk_get(&pdev->dev, NULL); 981 if (IS_ERR(dspi->clk)) { 982 ret = -ENODEV; 983 goto free_master; 984 } 985 clk_prepare_enable(dspi->clk); 986 987 master->dev.of_node = pdev->dev.of_node; 988 master->bus_num = pdev->id; 989 master->num_chipselect = pdata->num_chipselect; 990 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16); 991 master->setup = davinci_spi_setup; 992 master->cleanup = davinci_spi_cleanup; 993 994 dspi->bitbang.chipselect = davinci_spi_chipselect; 995 dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; 996 dspi->prescaler_limit = pdata->prescaler_limit; 997 dspi->version = pdata->version; 998 999 dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; 1000 if (dspi->version == SPI_VERSION_2) 1001 dspi->bitbang.flags |= SPI_READY; 1002 1003 if (pdev->dev.of_node) { 1004 int i; 1005 1006 for (i = 0; i < pdata->num_chipselect; i++) { 1007 int cs_gpio = of_get_named_gpio(pdev->dev.of_node, 1008 "cs-gpios", i); 1009 1010 if (cs_gpio == -EPROBE_DEFER) { 1011 ret = cs_gpio; 1012 goto free_clk; 1013 } 1014 1015 if (gpio_is_valid(cs_gpio)) { 1016 ret = devm_gpio_request(&pdev->dev, cs_gpio, 1017 dev_name(&pdev->dev)); 1018 if (ret) 1019 goto free_clk; 1020 } 1021 } 1022 } 1023 1024 dspi->bitbang.txrx_bufs = davinci_spi_bufs; 1025 1026 ret = davinci_spi_request_dma(dspi); 1027 if (ret == -EPROBE_DEFER) { 1028 goto free_clk; 1029 } else if (ret) { 1030 dev_info(&pdev->dev, "DMA is not supported (%d)\n", ret); 1031 dspi->dma_rx = NULL; 1032 dspi->dma_tx = NULL; 1033 } 1034 1035 dspi->get_rx = davinci_spi_rx_buf_u8; 1036 dspi->get_tx = davinci_spi_tx_buf_u8; 1037 1038 init_completion(&dspi->done); 1039 1040 /* Reset In/OUT SPI module */ 1041 iowrite32(0, dspi->base + SPIGCR0); 1042 udelay(100); 1043 iowrite32(1, dspi->base + SPIGCR0); 1044 1045 /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ 1046 spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; 1047 iowrite32(spipc0, dspi->base + SPIPC0); 1048 1049 if (pdata->intr_line) 1050 iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); 1051 else 1052 iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); 1053 1054 iowrite32(CS_DEFAULT, dspi->base + SPIDEF); 1055 1056 /* master mode default */ 1057 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); 1058 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); 1059 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 1060 1061 ret = spi_bitbang_start(&dspi->bitbang); 1062 if (ret) 1063 goto free_dma; 1064 1065 dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base); 1066 1067 return ret; 1068 1069 free_dma: 1070 if (dspi->dma_rx) { 1071 dma_release_channel(dspi->dma_rx); 1072 dma_release_channel(dspi->dma_tx); 1073 } 1074 free_clk: 1075 clk_disable_unprepare(dspi->clk); 1076 free_master: 1077 spi_master_put(master); 1078 err: 1079 return ret; 1080 } 1081 1082 /** 1083 * davinci_spi_remove - remove function for SPI Master Controller 1084 * @pdev: platform_device structure which contains plateform specific data 1085 * 1086 * This function will do the reverse action of davinci_spi_probe function 1087 * It will free the IRQ and SPI controller's memory region. 1088 * It will also call spi_bitbang_stop to destroy the work queue which was 1089 * created by spi_bitbang_start. 1090 */ 1091 static int davinci_spi_remove(struct platform_device *pdev) 1092 { 1093 struct davinci_spi *dspi; 1094 struct spi_master *master; 1095 1096 master = platform_get_drvdata(pdev); 1097 dspi = spi_master_get_devdata(master); 1098 1099 spi_bitbang_stop(&dspi->bitbang); 1100 1101 clk_disable_unprepare(dspi->clk); 1102 spi_master_put(master); 1103 1104 if (dspi->dma_rx) { 1105 dma_release_channel(dspi->dma_rx); 1106 dma_release_channel(dspi->dma_tx); 1107 } 1108 1109 return 0; 1110 } 1111 1112 static struct platform_driver davinci_spi_driver = { 1113 .driver = { 1114 .name = "spi_davinci", 1115 .of_match_table = of_match_ptr(davinci_spi_of_match), 1116 }, 1117 .probe = davinci_spi_probe, 1118 .remove = davinci_spi_remove, 1119 }; 1120 module_platform_driver(davinci_spi_driver); 1121 1122 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); 1123 MODULE_LICENSE("GPL"); 1124