1 /* 2 * Driver for Cirrus Logic EP93xx SPI controller. 3 * 4 * Copyright (C) 2010-2011 Mika Westerberg 5 * 6 * Explicit FIFO handling code was inspired by amba-pl022 driver. 7 * 8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. 9 * 10 * For more information about the SPI controller see documentation on Cirrus 11 * Logic web site: 12 * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19 #include <linux/io.h> 20 #include <linux/clk.h> 21 #include <linux/err.h> 22 #include <linux/delay.h> 23 #include <linux/device.h> 24 #include <linux/dmaengine.h> 25 #include <linux/bitops.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/platform_device.h> 29 #include <linux/workqueue.h> 30 #include <linux/sched.h> 31 #include <linux/scatterlist.h> 32 #include <linux/spi/spi.h> 33 34 #include <linux/platform_data/dma-ep93xx.h> 35 #include <linux/platform_data/spi-ep93xx.h> 36 37 #define SSPCR0 0x0000 38 #define SSPCR0_MODE_SHIFT 6 39 #define SSPCR0_SCR_SHIFT 8 40 41 #define SSPCR1 0x0004 42 #define SSPCR1_RIE BIT(0) 43 #define SSPCR1_TIE BIT(1) 44 #define SSPCR1_RORIE BIT(2) 45 #define SSPCR1_LBM BIT(3) 46 #define SSPCR1_SSE BIT(4) 47 #define SSPCR1_MS BIT(5) 48 #define SSPCR1_SOD BIT(6) 49 50 #define SSPDR 0x0008 51 52 #define SSPSR 0x000c 53 #define SSPSR_TFE BIT(0) 54 #define SSPSR_TNF BIT(1) 55 #define SSPSR_RNE BIT(2) 56 #define SSPSR_RFF BIT(3) 57 #define SSPSR_BSY BIT(4) 58 #define SSPCPSR 0x0010 59 60 #define SSPIIR 0x0014 61 #define SSPIIR_RIS BIT(0) 62 #define SSPIIR_TIS BIT(1) 63 #define SSPIIR_RORIS BIT(2) 64 #define SSPICR SSPIIR 65 66 /* timeout in milliseconds */ 67 #define SPI_TIMEOUT 5 68 /* maximum depth of RX/TX FIFO */ 69 #define SPI_FIFO_SIZE 8 70 71 /** 72 * struct ep93xx_spi - EP93xx SPI controller structure 73 * @lock: spinlock that protects concurrent accesses to fields @running, 74 * @current_msg and @msg_queue 75 * @pdev: pointer to platform device 76 * @clk: clock for the controller 77 * @regs_base: pointer to ioremap()'d registers 78 * @sspdr_phys: physical address of the SSPDR register 79 * @min_rate: minimum clock rate (in Hz) supported by the controller 80 * @max_rate: maximum clock rate (in Hz) supported by the controller 81 * @running: is the queue running 82 * @wq: workqueue used by the driver 83 * @msg_work: work that is queued for the driver 84 * @wait: wait here until given transfer is completed 85 * @msg_queue: queue for the messages 86 * @current_msg: message that is currently processed (or %NULL if none) 87 * @tx: current byte in transfer to transmit 88 * @rx: current byte in transfer to receive 89 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one 90 * frame decreases this level and sending one frame increases it. 91 * @dma_rx: RX DMA channel 92 * @dma_tx: TX DMA channel 93 * @dma_rx_data: RX parameters passed to the DMA engine 94 * @dma_tx_data: TX parameters passed to the DMA engine 95 * @rx_sgt: sg table for RX transfers 96 * @tx_sgt: sg table for TX transfers 97 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by 98 * the client 99 * 100 * This structure holds EP93xx SPI controller specific information. When 101 * @running is %true, driver accepts transfer requests from protocol drivers. 102 * @current_msg is used to hold pointer to the message that is currently 103 * processed. If @current_msg is %NULL, it means that no processing is going 104 * on. 105 * 106 * Most of the fields are only written once and they can be accessed without 107 * taking the @lock. Fields that are accessed concurrently are: @current_msg, 108 * @running, and @msg_queue. 109 */ 110 struct ep93xx_spi { 111 spinlock_t lock; 112 const struct platform_device *pdev; 113 struct clk *clk; 114 void __iomem *regs_base; 115 unsigned long sspdr_phys; 116 unsigned long min_rate; 117 unsigned long max_rate; 118 bool running; 119 struct workqueue_struct *wq; 120 struct work_struct msg_work; 121 struct completion wait; 122 struct list_head msg_queue; 123 struct spi_message *current_msg; 124 size_t tx; 125 size_t rx; 126 size_t fifo_level; 127 struct dma_chan *dma_rx; 128 struct dma_chan *dma_tx; 129 struct ep93xx_dma_data dma_rx_data; 130 struct ep93xx_dma_data dma_tx_data; 131 struct sg_table rx_sgt; 132 struct sg_table tx_sgt; 133 void *zeropage; 134 }; 135 136 /** 137 * struct ep93xx_spi_chip - SPI device hardware settings 138 * @spi: back pointer to the SPI device 139 * @rate: max rate in hz this chip supports 140 * @div_cpsr: cpsr (pre-scaler) divider 141 * @div_scr: scr divider 142 * @dss: bits per word (4 - 16 bits) 143 * @ops: private chip operations 144 * 145 * This structure is used to store hardware register specific settings for each 146 * SPI device. Settings are written to hardware by function 147 * ep93xx_spi_chip_setup(). 148 */ 149 struct ep93xx_spi_chip { 150 const struct spi_device *spi; 151 unsigned long rate; 152 u8 div_cpsr; 153 u8 div_scr; 154 u8 dss; 155 struct ep93xx_spi_chip_ops *ops; 156 }; 157 158 /* converts bits per word to CR0.DSS value */ 159 #define bits_per_word_to_dss(bpw) ((bpw) - 1) 160 161 static inline void 162 ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value) 163 { 164 __raw_writeb(value, espi->regs_base + reg); 165 } 166 167 static inline u8 168 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) 169 { 170 return __raw_readb(spi->regs_base + reg); 171 } 172 173 static inline void 174 ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value) 175 { 176 __raw_writew(value, espi->regs_base + reg); 177 } 178 179 static inline u16 180 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) 181 { 182 return __raw_readw(spi->regs_base + reg); 183 } 184 185 static int ep93xx_spi_enable(const struct ep93xx_spi *espi) 186 { 187 u8 regval; 188 int err; 189 190 err = clk_enable(espi->clk); 191 if (err) 192 return err; 193 194 regval = ep93xx_spi_read_u8(espi, SSPCR1); 195 regval |= SSPCR1_SSE; 196 ep93xx_spi_write_u8(espi, SSPCR1, regval); 197 198 return 0; 199 } 200 201 static void ep93xx_spi_disable(const struct ep93xx_spi *espi) 202 { 203 u8 regval; 204 205 regval = ep93xx_spi_read_u8(espi, SSPCR1); 206 regval &= ~SSPCR1_SSE; 207 ep93xx_spi_write_u8(espi, SSPCR1, regval); 208 209 clk_disable(espi->clk); 210 } 211 212 static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) 213 { 214 u8 regval; 215 216 regval = ep93xx_spi_read_u8(espi, SSPCR1); 217 regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 218 ep93xx_spi_write_u8(espi, SSPCR1, regval); 219 } 220 221 static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) 222 { 223 u8 regval; 224 225 regval = ep93xx_spi_read_u8(espi, SSPCR1); 226 regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 227 ep93xx_spi_write_u8(espi, SSPCR1, regval); 228 } 229 230 /** 231 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors 232 * @espi: ep93xx SPI controller struct 233 * @chip: divisors are calculated for this chip 234 * @rate: desired SPI output clock rate 235 * 236 * Function calculates cpsr (clock pre-scaler) and scr divisors based on 237 * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If, 238 * for some reason, divisors cannot be calculated nothing is stored and 239 * %-EINVAL is returned. 240 */ 241 static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, 242 struct ep93xx_spi_chip *chip, 243 unsigned long rate) 244 { 245 unsigned long spi_clk_rate = clk_get_rate(espi->clk); 246 int cpsr, scr; 247 248 /* 249 * Make sure that max value is between values supported by the 250 * controller. Note that minimum value is already checked in 251 * ep93xx_spi_transfer(). 252 */ 253 rate = clamp(rate, espi->min_rate, espi->max_rate); 254 255 /* 256 * Calculate divisors so that we can get speed according the 257 * following formula: 258 * rate = spi_clock_rate / (cpsr * (1 + scr)) 259 * 260 * cpsr must be even number and starts from 2, scr can be any number 261 * between 0 and 255. 262 */ 263 for (cpsr = 2; cpsr <= 254; cpsr += 2) { 264 for (scr = 0; scr <= 255; scr++) { 265 if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { 266 chip->div_scr = (u8)scr; 267 chip->div_cpsr = (u8)cpsr; 268 return 0; 269 } 270 } 271 } 272 273 return -EINVAL; 274 } 275 276 static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) 277 { 278 struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); 279 int value = (spi->mode & SPI_CS_HIGH) ? control : !control; 280 281 if (chip->ops && chip->ops->cs_control) 282 chip->ops->cs_control(spi, value); 283 } 284 285 /** 286 * ep93xx_spi_setup() - setup an SPI device 287 * @spi: SPI device to setup 288 * 289 * This function sets up SPI device mode, speed etc. Can be called multiple 290 * times for a single device. Returns %0 in case of success, negative error in 291 * case of failure. When this function returns success, the device is 292 * deselected. 293 */ 294 static int ep93xx_spi_setup(struct spi_device *spi) 295 { 296 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); 297 struct ep93xx_spi_chip *chip; 298 299 if (spi->bits_per_word < 4 || spi->bits_per_word > 16) { 300 dev_err(&espi->pdev->dev, "invalid bits per word %d\n", 301 spi->bits_per_word); 302 return -EINVAL; 303 } 304 305 chip = spi_get_ctldata(spi); 306 if (!chip) { 307 dev_dbg(&espi->pdev->dev, "initial setup for %s\n", 308 spi->modalias); 309 310 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 311 if (!chip) 312 return -ENOMEM; 313 314 chip->spi = spi; 315 chip->ops = spi->controller_data; 316 317 if (chip->ops && chip->ops->setup) { 318 int ret = chip->ops->setup(spi); 319 if (ret) { 320 kfree(chip); 321 return ret; 322 } 323 } 324 325 spi_set_ctldata(spi, chip); 326 } 327 328 if (spi->max_speed_hz != chip->rate) { 329 int err; 330 331 err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz); 332 if (err != 0) { 333 spi_set_ctldata(spi, NULL); 334 kfree(chip); 335 return err; 336 } 337 chip->rate = spi->max_speed_hz; 338 } 339 340 chip->dss = bits_per_word_to_dss(spi->bits_per_word); 341 342 ep93xx_spi_cs_control(spi, false); 343 return 0; 344 } 345 346 /** 347 * ep93xx_spi_transfer() - queue message to be transferred 348 * @spi: target SPI device 349 * @msg: message to be transferred 350 * 351 * This function is called by SPI device drivers when they are going to transfer 352 * a new message. It simply puts the message in the queue and schedules 353 * workqueue to perform the actual transfer later on. 354 * 355 * Returns %0 on success and negative error in case of failure. 356 */ 357 static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) 358 { 359 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); 360 struct spi_transfer *t; 361 unsigned long flags; 362 363 if (!msg || !msg->complete) 364 return -EINVAL; 365 366 /* first validate each transfer */ 367 list_for_each_entry(t, &msg->transfers, transfer_list) { 368 if (t->bits_per_word) { 369 if (t->bits_per_word < 4 || t->bits_per_word > 16) 370 return -EINVAL; 371 } 372 if (t->speed_hz && t->speed_hz < espi->min_rate) 373 return -EINVAL; 374 } 375 376 /* 377 * Now that we own the message, let's initialize it so that it is 378 * suitable for us. We use @msg->status to signal whether there was 379 * error in transfer and @msg->state is used to hold pointer to the 380 * current transfer (or %NULL if no active current transfer). 381 */ 382 msg->state = NULL; 383 msg->status = 0; 384 msg->actual_length = 0; 385 386 spin_lock_irqsave(&espi->lock, flags); 387 if (!espi->running) { 388 spin_unlock_irqrestore(&espi->lock, flags); 389 return -ESHUTDOWN; 390 } 391 list_add_tail(&msg->queue, &espi->msg_queue); 392 queue_work(espi->wq, &espi->msg_work); 393 spin_unlock_irqrestore(&espi->lock, flags); 394 395 return 0; 396 } 397 398 /** 399 * ep93xx_spi_cleanup() - cleans up master controller specific state 400 * @spi: SPI device to cleanup 401 * 402 * This function releases master controller specific state for given @spi 403 * device. 404 */ 405 static void ep93xx_spi_cleanup(struct spi_device *spi) 406 { 407 struct ep93xx_spi_chip *chip; 408 409 chip = spi_get_ctldata(spi); 410 if (chip) { 411 if (chip->ops && chip->ops->cleanup) 412 chip->ops->cleanup(spi); 413 spi_set_ctldata(spi, NULL); 414 kfree(chip); 415 } 416 } 417 418 /** 419 * ep93xx_spi_chip_setup() - configures hardware according to given @chip 420 * @espi: ep93xx SPI controller struct 421 * @chip: chip specific settings 422 * 423 * This function sets up the actual hardware registers with settings given in 424 * @chip. Note that no validation is done so make sure that callers validate 425 * settings before calling this. 426 */ 427 static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, 428 const struct ep93xx_spi_chip *chip) 429 { 430 u16 cr0; 431 432 cr0 = chip->div_scr << SSPCR0_SCR_SHIFT; 433 cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; 434 cr0 |= chip->dss; 435 436 dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", 437 chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss); 438 dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); 439 440 ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr); 441 ep93xx_spi_write_u16(espi, SSPCR0, cr0); 442 } 443 444 static inline int bits_per_word(const struct ep93xx_spi *espi) 445 { 446 struct spi_message *msg = espi->current_msg; 447 struct spi_transfer *t = msg->state; 448 449 return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word; 450 } 451 452 static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) 453 { 454 if (bits_per_word(espi) > 8) { 455 u16 tx_val = 0; 456 457 if (t->tx_buf) 458 tx_val = ((u16 *)t->tx_buf)[espi->tx]; 459 ep93xx_spi_write_u16(espi, SSPDR, tx_val); 460 espi->tx += sizeof(tx_val); 461 } else { 462 u8 tx_val = 0; 463 464 if (t->tx_buf) 465 tx_val = ((u8 *)t->tx_buf)[espi->tx]; 466 ep93xx_spi_write_u8(espi, SSPDR, tx_val); 467 espi->tx += sizeof(tx_val); 468 } 469 } 470 471 static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) 472 { 473 if (bits_per_word(espi) > 8) { 474 u16 rx_val; 475 476 rx_val = ep93xx_spi_read_u16(espi, SSPDR); 477 if (t->rx_buf) 478 ((u16 *)t->rx_buf)[espi->rx] = rx_val; 479 espi->rx += sizeof(rx_val); 480 } else { 481 u8 rx_val; 482 483 rx_val = ep93xx_spi_read_u8(espi, SSPDR); 484 if (t->rx_buf) 485 ((u8 *)t->rx_buf)[espi->rx] = rx_val; 486 espi->rx += sizeof(rx_val); 487 } 488 } 489 490 /** 491 * ep93xx_spi_read_write() - perform next RX/TX transfer 492 * @espi: ep93xx SPI controller struct 493 * 494 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If 495 * called several times, the whole transfer will be completed. Returns 496 * %-EINPROGRESS when current transfer was not yet completed otherwise %0. 497 * 498 * When this function is finished, RX FIFO should be empty and TX FIFO should be 499 * full. 500 */ 501 static int ep93xx_spi_read_write(struct ep93xx_spi *espi) 502 { 503 struct spi_message *msg = espi->current_msg; 504 struct spi_transfer *t = msg->state; 505 506 /* read as long as RX FIFO has frames in it */ 507 while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { 508 ep93xx_do_read(espi, t); 509 espi->fifo_level--; 510 } 511 512 /* write as long as TX FIFO has room */ 513 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { 514 ep93xx_do_write(espi, t); 515 espi->fifo_level++; 516 } 517 518 if (espi->rx == t->len) 519 return 0; 520 521 return -EINPROGRESS; 522 } 523 524 static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) 525 { 526 /* 527 * Now everything is set up for the current transfer. We prime the TX 528 * FIFO, enable interrupts, and wait for the transfer to complete. 529 */ 530 if (ep93xx_spi_read_write(espi)) { 531 ep93xx_spi_enable_interrupts(espi); 532 wait_for_completion(&espi->wait); 533 } 534 } 535 536 /** 537 * ep93xx_spi_dma_prepare() - prepares a DMA transfer 538 * @espi: ep93xx SPI controller struct 539 * @dir: DMA transfer direction 540 * 541 * Function configures the DMA, maps the buffer and prepares the DMA 542 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR 543 * in case of failure. 544 */ 545 static struct dma_async_tx_descriptor * 546 ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir) 547 { 548 struct spi_transfer *t = espi->current_msg->state; 549 struct dma_async_tx_descriptor *txd; 550 enum dma_slave_buswidth buswidth; 551 struct dma_slave_config conf; 552 struct scatterlist *sg; 553 struct sg_table *sgt; 554 struct dma_chan *chan; 555 const void *buf, *pbuf; 556 size_t len = t->len; 557 int i, ret, nents; 558 559 if (bits_per_word(espi) > 8) 560 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 561 else 562 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 563 564 memset(&conf, 0, sizeof(conf)); 565 conf.direction = dir; 566 567 if (dir == DMA_DEV_TO_MEM) { 568 chan = espi->dma_rx; 569 buf = t->rx_buf; 570 sgt = &espi->rx_sgt; 571 572 conf.src_addr = espi->sspdr_phys; 573 conf.src_addr_width = buswidth; 574 } else { 575 chan = espi->dma_tx; 576 buf = t->tx_buf; 577 sgt = &espi->tx_sgt; 578 579 conf.dst_addr = espi->sspdr_phys; 580 conf.dst_addr_width = buswidth; 581 } 582 583 ret = dmaengine_slave_config(chan, &conf); 584 if (ret) 585 return ERR_PTR(ret); 586 587 /* 588 * We need to split the transfer into PAGE_SIZE'd chunks. This is 589 * because we are using @espi->zeropage to provide a zero RX buffer 590 * for the TX transfers and we have only allocated one page for that. 591 * 592 * For performance reasons we allocate a new sg_table only when 593 * needed. Otherwise we will re-use the current one. Eventually the 594 * last sg_table is released in ep93xx_spi_release_dma(). 595 */ 596 597 nents = DIV_ROUND_UP(len, PAGE_SIZE); 598 if (nents != sgt->nents) { 599 sg_free_table(sgt); 600 601 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 602 if (ret) 603 return ERR_PTR(ret); 604 } 605 606 pbuf = buf; 607 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 608 size_t bytes = min_t(size_t, len, PAGE_SIZE); 609 610 if (buf) { 611 sg_set_page(sg, virt_to_page(pbuf), bytes, 612 offset_in_page(pbuf)); 613 } else { 614 sg_set_page(sg, virt_to_page(espi->zeropage), 615 bytes, 0); 616 } 617 618 pbuf += bytes; 619 len -= bytes; 620 } 621 622 if (WARN_ON(len)) { 623 dev_warn(&espi->pdev->dev, "len = %d expected 0!", len); 624 return ERR_PTR(-EINVAL); 625 } 626 627 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 628 if (!nents) 629 return ERR_PTR(-ENOMEM); 630 631 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK); 632 if (!txd) { 633 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 634 return ERR_PTR(-ENOMEM); 635 } 636 return txd; 637 } 638 639 /** 640 * ep93xx_spi_dma_finish() - finishes with a DMA transfer 641 * @espi: ep93xx SPI controller struct 642 * @dir: DMA transfer direction 643 * 644 * Function finishes with the DMA transfer. After this, the DMA buffer is 645 * unmapped. 646 */ 647 static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, 648 enum dma_transfer_direction dir) 649 { 650 struct dma_chan *chan; 651 struct sg_table *sgt; 652 653 if (dir == DMA_DEV_TO_MEM) { 654 chan = espi->dma_rx; 655 sgt = &espi->rx_sgt; 656 } else { 657 chan = espi->dma_tx; 658 sgt = &espi->tx_sgt; 659 } 660 661 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 662 } 663 664 static void ep93xx_spi_dma_callback(void *callback_param) 665 { 666 complete(callback_param); 667 } 668 669 static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) 670 { 671 struct spi_message *msg = espi->current_msg; 672 struct dma_async_tx_descriptor *rxd, *txd; 673 674 rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM); 675 if (IS_ERR(rxd)) { 676 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 677 msg->status = PTR_ERR(rxd); 678 return; 679 } 680 681 txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV); 682 if (IS_ERR(txd)) { 683 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); 684 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); 685 msg->status = PTR_ERR(txd); 686 return; 687 } 688 689 /* We are ready when RX is done */ 690 rxd->callback = ep93xx_spi_dma_callback; 691 rxd->callback_param = &espi->wait; 692 693 /* Now submit both descriptors and wait while they finish */ 694 dmaengine_submit(rxd); 695 dmaengine_submit(txd); 696 697 dma_async_issue_pending(espi->dma_rx); 698 dma_async_issue_pending(espi->dma_tx); 699 700 wait_for_completion(&espi->wait); 701 702 ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV); 703 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); 704 } 705 706 /** 707 * ep93xx_spi_process_transfer() - processes one SPI transfer 708 * @espi: ep93xx SPI controller struct 709 * @msg: current message 710 * @t: transfer to process 711 * 712 * This function processes one SPI transfer given in @t. Function waits until 713 * transfer is complete (may sleep) and updates @msg->status based on whether 714 * transfer was successfully processed or not. 715 */ 716 static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, 717 struct spi_message *msg, 718 struct spi_transfer *t) 719 { 720 struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); 721 722 msg->state = t; 723 724 /* 725 * Handle any transfer specific settings if needed. We use 726 * temporary chip settings here and restore original later when 727 * the transfer is finished. 728 */ 729 if (t->speed_hz || t->bits_per_word) { 730 struct ep93xx_spi_chip tmp_chip = *chip; 731 732 if (t->speed_hz) { 733 int err; 734 735 err = ep93xx_spi_calc_divisors(espi, &tmp_chip, 736 t->speed_hz); 737 if (err) { 738 dev_err(&espi->pdev->dev, 739 "failed to adjust speed\n"); 740 msg->status = err; 741 return; 742 } 743 } 744 745 if (t->bits_per_word) 746 tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word); 747 748 /* 749 * Set up temporary new hw settings for this transfer. 750 */ 751 ep93xx_spi_chip_setup(espi, &tmp_chip); 752 } 753 754 espi->rx = 0; 755 espi->tx = 0; 756 757 /* 758 * There is no point of setting up DMA for the transfers which will 759 * fit into the FIFO and can be transferred with a single interrupt. 760 * So in these cases we will be using PIO and don't bother for DMA. 761 */ 762 if (espi->dma_rx && t->len > SPI_FIFO_SIZE) 763 ep93xx_spi_dma_transfer(espi); 764 else 765 ep93xx_spi_pio_transfer(espi); 766 767 /* 768 * In case of error during transmit, we bail out from processing 769 * the message. 770 */ 771 if (msg->status) 772 return; 773 774 msg->actual_length += t->len; 775 776 /* 777 * After this transfer is finished, perform any possible 778 * post-transfer actions requested by the protocol driver. 779 */ 780 if (t->delay_usecs) { 781 set_current_state(TASK_UNINTERRUPTIBLE); 782 schedule_timeout(usecs_to_jiffies(t->delay_usecs)); 783 } 784 if (t->cs_change) { 785 if (!list_is_last(&t->transfer_list, &msg->transfers)) { 786 /* 787 * In case protocol driver is asking us to drop the 788 * chipselect briefly, we let the scheduler to handle 789 * any "delay" here. 790 */ 791 ep93xx_spi_cs_control(msg->spi, false); 792 cond_resched(); 793 ep93xx_spi_cs_control(msg->spi, true); 794 } 795 } 796 797 if (t->speed_hz || t->bits_per_word) 798 ep93xx_spi_chip_setup(espi, chip); 799 } 800 801 /* 802 * ep93xx_spi_process_message() - process one SPI message 803 * @espi: ep93xx SPI controller struct 804 * @msg: message to process 805 * 806 * This function processes a single SPI message. We go through all transfers in 807 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is 808 * asserted during the whole message (unless per transfer cs_change is set). 809 * 810 * @msg->status contains %0 in case of success or negative error code in case of 811 * failure. 812 */ 813 static void ep93xx_spi_process_message(struct ep93xx_spi *espi, 814 struct spi_message *msg) 815 { 816 unsigned long timeout; 817 struct spi_transfer *t; 818 int err; 819 820 /* 821 * Enable the SPI controller and its clock. 822 */ 823 err = ep93xx_spi_enable(espi); 824 if (err) { 825 dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); 826 msg->status = err; 827 return; 828 } 829 830 /* 831 * Just to be sure: flush any data from RX FIFO. 832 */ 833 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); 834 while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { 835 if (time_after(jiffies, timeout)) { 836 dev_warn(&espi->pdev->dev, 837 "timeout while flushing RX FIFO\n"); 838 msg->status = -ETIMEDOUT; 839 return; 840 } 841 ep93xx_spi_read_u16(espi, SSPDR); 842 } 843 844 /* 845 * We explicitly handle FIFO level. This way we don't have to check TX 846 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. 847 */ 848 espi->fifo_level = 0; 849 850 /* 851 * Update SPI controller registers according to spi device and assert 852 * the chipselect. 853 */ 854 ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi)); 855 ep93xx_spi_cs_control(msg->spi, true); 856 857 list_for_each_entry(t, &msg->transfers, transfer_list) { 858 ep93xx_spi_process_transfer(espi, msg, t); 859 if (msg->status) 860 break; 861 } 862 863 /* 864 * Now the whole message is transferred (or failed for some reason). We 865 * deselect the device and disable the SPI controller. 866 */ 867 ep93xx_spi_cs_control(msg->spi, false); 868 ep93xx_spi_disable(espi); 869 } 870 871 #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) 872 873 /** 874 * ep93xx_spi_work() - EP93xx SPI workqueue worker function 875 * @work: work struct 876 * 877 * Workqueue worker function. This function is called when there are new 878 * SPI messages to be processed. Message is taken out from the queue and then 879 * passed to ep93xx_spi_process_message(). 880 * 881 * After message is transferred, protocol driver is notified by calling 882 * @msg->complete(). In case of error, @msg->status is set to negative error 883 * number, otherwise it contains zero (and @msg->actual_length is updated). 884 */ 885 static void ep93xx_spi_work(struct work_struct *work) 886 { 887 struct ep93xx_spi *espi = work_to_espi(work); 888 struct spi_message *msg; 889 890 spin_lock_irq(&espi->lock); 891 if (!espi->running || espi->current_msg || 892 list_empty(&espi->msg_queue)) { 893 spin_unlock_irq(&espi->lock); 894 return; 895 } 896 msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); 897 list_del_init(&msg->queue); 898 espi->current_msg = msg; 899 spin_unlock_irq(&espi->lock); 900 901 ep93xx_spi_process_message(espi, msg); 902 903 /* 904 * Update the current message and re-schedule ourselves if there are 905 * more messages in the queue. 906 */ 907 spin_lock_irq(&espi->lock); 908 espi->current_msg = NULL; 909 if (espi->running && !list_empty(&espi->msg_queue)) 910 queue_work(espi->wq, &espi->msg_work); 911 spin_unlock_irq(&espi->lock); 912 913 /* notify the protocol driver that we are done with this message */ 914 msg->complete(msg->context); 915 } 916 917 static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) 918 { 919 struct ep93xx_spi *espi = dev_id; 920 u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); 921 922 /* 923 * If we got ROR (receive overrun) interrupt we know that something is 924 * wrong. Just abort the message. 925 */ 926 if (unlikely(irq_status & SSPIIR_RORIS)) { 927 /* clear the overrun interrupt */ 928 ep93xx_spi_write_u8(espi, SSPICR, 0); 929 dev_warn(&espi->pdev->dev, 930 "receive overrun, aborting the message\n"); 931 espi->current_msg->status = -EIO; 932 } else { 933 /* 934 * Interrupt is either RX (RIS) or TX (TIS). For both cases we 935 * simply execute next data transfer. 936 */ 937 if (ep93xx_spi_read_write(espi)) { 938 /* 939 * In normal case, there still is some processing left 940 * for current transfer. Let's wait for the next 941 * interrupt then. 942 */ 943 return IRQ_HANDLED; 944 } 945 } 946 947 /* 948 * Current transfer is finished, either with error or with success. In 949 * any case we disable interrupts and notify the worker to handle 950 * any post-processing of the message. 951 */ 952 ep93xx_spi_disable_interrupts(espi); 953 complete(&espi->wait); 954 return IRQ_HANDLED; 955 } 956 957 static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) 958 { 959 if (ep93xx_dma_chan_is_m2p(chan)) 960 return false; 961 962 chan->private = filter_param; 963 return true; 964 } 965 966 static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) 967 { 968 dma_cap_mask_t mask; 969 int ret; 970 971 espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); 972 if (!espi->zeropage) 973 return -ENOMEM; 974 975 dma_cap_zero(mask); 976 dma_cap_set(DMA_SLAVE, mask); 977 978 espi->dma_rx_data.port = EP93XX_DMA_SSP; 979 espi->dma_rx_data.direction = DMA_DEV_TO_MEM; 980 espi->dma_rx_data.name = "ep93xx-spi-rx"; 981 982 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, 983 &espi->dma_rx_data); 984 if (!espi->dma_rx) { 985 ret = -ENODEV; 986 goto fail_free_page; 987 } 988 989 espi->dma_tx_data.port = EP93XX_DMA_SSP; 990 espi->dma_tx_data.direction = DMA_MEM_TO_DEV; 991 espi->dma_tx_data.name = "ep93xx-spi-tx"; 992 993 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, 994 &espi->dma_tx_data); 995 if (!espi->dma_tx) { 996 ret = -ENODEV; 997 goto fail_release_rx; 998 } 999 1000 return 0; 1001 1002 fail_release_rx: 1003 dma_release_channel(espi->dma_rx); 1004 espi->dma_rx = NULL; 1005 fail_free_page: 1006 free_page((unsigned long)espi->zeropage); 1007 1008 return ret; 1009 } 1010 1011 static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) 1012 { 1013 if (espi->dma_rx) { 1014 dma_release_channel(espi->dma_rx); 1015 sg_free_table(&espi->rx_sgt); 1016 } 1017 if (espi->dma_tx) { 1018 dma_release_channel(espi->dma_tx); 1019 sg_free_table(&espi->tx_sgt); 1020 } 1021 1022 if (espi->zeropage) 1023 free_page((unsigned long)espi->zeropage); 1024 } 1025 1026 static int __devinit ep93xx_spi_probe(struct platform_device *pdev) 1027 { 1028 struct spi_master *master; 1029 struct ep93xx_spi_info *info; 1030 struct ep93xx_spi *espi; 1031 struct resource *res; 1032 int irq; 1033 int error; 1034 1035 info = pdev->dev.platform_data; 1036 1037 master = spi_alloc_master(&pdev->dev, sizeof(*espi)); 1038 if (!master) { 1039 dev_err(&pdev->dev, "failed to allocate spi master\n"); 1040 return -ENOMEM; 1041 } 1042 1043 master->setup = ep93xx_spi_setup; 1044 master->transfer = ep93xx_spi_transfer; 1045 master->cleanup = ep93xx_spi_cleanup; 1046 master->bus_num = pdev->id; 1047 master->num_chipselect = info->num_chipselect; 1048 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1049 1050 platform_set_drvdata(pdev, master); 1051 1052 espi = spi_master_get_devdata(master); 1053 1054 espi->clk = clk_get(&pdev->dev, NULL); 1055 if (IS_ERR(espi->clk)) { 1056 dev_err(&pdev->dev, "unable to get spi clock\n"); 1057 error = PTR_ERR(espi->clk); 1058 goto fail_release_master; 1059 } 1060 1061 spin_lock_init(&espi->lock); 1062 init_completion(&espi->wait); 1063 1064 /* 1065 * Calculate maximum and minimum supported clock rates 1066 * for the controller. 1067 */ 1068 espi->max_rate = clk_get_rate(espi->clk) / 2; 1069 espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); 1070 espi->pdev = pdev; 1071 1072 irq = platform_get_irq(pdev, 0); 1073 if (irq < 0) { 1074 error = -EBUSY; 1075 dev_err(&pdev->dev, "failed to get irq resources\n"); 1076 goto fail_put_clock; 1077 } 1078 1079 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1080 if (!res) { 1081 dev_err(&pdev->dev, "unable to get iomem resource\n"); 1082 error = -ENODEV; 1083 goto fail_put_clock; 1084 } 1085 1086 espi->sspdr_phys = res->start + SSPDR; 1087 1088 espi->regs_base = devm_request_and_ioremap(&pdev->dev, res); 1089 if (!espi->regs_base) { 1090 dev_err(&pdev->dev, "failed to map resources\n"); 1091 error = -ENODEV; 1092 goto fail_put_clock; 1093 } 1094 1095 error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt, 1096 0, "ep93xx-spi", espi); 1097 if (error) { 1098 dev_err(&pdev->dev, "failed to request irq\n"); 1099 goto fail_put_clock; 1100 } 1101 1102 if (info->use_dma && ep93xx_spi_setup_dma(espi)) 1103 dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); 1104 1105 espi->wq = create_singlethread_workqueue("ep93xx_spid"); 1106 if (!espi->wq) { 1107 dev_err(&pdev->dev, "unable to create workqueue\n"); 1108 goto fail_free_dma; 1109 } 1110 INIT_WORK(&espi->msg_work, ep93xx_spi_work); 1111 INIT_LIST_HEAD(&espi->msg_queue); 1112 espi->running = true; 1113 1114 /* make sure that the hardware is disabled */ 1115 ep93xx_spi_write_u8(espi, SSPCR1, 0); 1116 1117 error = spi_register_master(master); 1118 if (error) { 1119 dev_err(&pdev->dev, "failed to register SPI master\n"); 1120 goto fail_free_queue; 1121 } 1122 1123 dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", 1124 (unsigned long)res->start, irq); 1125 1126 return 0; 1127 1128 fail_free_queue: 1129 destroy_workqueue(espi->wq); 1130 fail_free_dma: 1131 ep93xx_spi_release_dma(espi); 1132 fail_put_clock: 1133 clk_put(espi->clk); 1134 fail_release_master: 1135 spi_master_put(master); 1136 platform_set_drvdata(pdev, NULL); 1137 1138 return error; 1139 } 1140 1141 static int __devexit ep93xx_spi_remove(struct platform_device *pdev) 1142 { 1143 struct spi_master *master = platform_get_drvdata(pdev); 1144 struct ep93xx_spi *espi = spi_master_get_devdata(master); 1145 1146 spin_lock_irq(&espi->lock); 1147 espi->running = false; 1148 spin_unlock_irq(&espi->lock); 1149 1150 destroy_workqueue(espi->wq); 1151 1152 /* 1153 * Complete remaining messages with %-ESHUTDOWN status. 1154 */ 1155 spin_lock_irq(&espi->lock); 1156 while (!list_empty(&espi->msg_queue)) { 1157 struct spi_message *msg; 1158 1159 msg = list_first_entry(&espi->msg_queue, 1160 struct spi_message, queue); 1161 list_del_init(&msg->queue); 1162 msg->status = -ESHUTDOWN; 1163 spin_unlock_irq(&espi->lock); 1164 msg->complete(msg->context); 1165 spin_lock_irq(&espi->lock); 1166 } 1167 spin_unlock_irq(&espi->lock); 1168 1169 ep93xx_spi_release_dma(espi); 1170 clk_put(espi->clk); 1171 platform_set_drvdata(pdev, NULL); 1172 1173 spi_unregister_master(master); 1174 return 0; 1175 } 1176 1177 static struct platform_driver ep93xx_spi_driver = { 1178 .driver = { 1179 .name = "ep93xx-spi", 1180 .owner = THIS_MODULE, 1181 }, 1182 .probe = ep93xx_spi_probe, 1183 .remove = __devexit_p(ep93xx_spi_remove), 1184 }; 1185 module_platform_driver(ep93xx_spi_driver); 1186 1187 MODULE_DESCRIPTION("EP93xx SPI Controller driver"); 1188 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); 1189 MODULE_LICENSE("GPL"); 1190 MODULE_ALIAS("platform:ep93xx-spi"); 1191