1 /* 2 * Driver for Cirrus Logic EP93xx SPI controller. 3 * 4 * Copyright (C) 2010-2011 Mika Westerberg 5 * 6 * Explicit FIFO handling code was inspired by amba-pl022 driver. 7 * 8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. 9 * 10 * For more information about the SPI controller see documentation on Cirrus 11 * Logic web site: 12 * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 19 #include <linux/io.h> 20 #include <linux/clk.h> 21 #include <linux/err.h> 22 #include <linux/delay.h> 23 #include <linux/device.h> 24 #include <linux/dmaengine.h> 25 #include <linux/bitops.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/platform_device.h> 29 #include <linux/workqueue.h> 30 #include <linux/sched.h> 31 #include <linux/scatterlist.h> 32 #include <linux/spi/spi.h> 33 34 #include <linux/platform_data/dma-ep93xx.h> 35 #include <linux/platform_data/spi-ep93xx.h> 36 37 #define SSPCR0 0x0000 38 #define SSPCR0_MODE_SHIFT 6 39 #define SSPCR0_SCR_SHIFT 8 40 41 #define SSPCR1 0x0004 42 #define SSPCR1_RIE BIT(0) 43 #define SSPCR1_TIE BIT(1) 44 #define SSPCR1_RORIE BIT(2) 45 #define SSPCR1_LBM BIT(3) 46 #define SSPCR1_SSE BIT(4) 47 #define SSPCR1_MS BIT(5) 48 #define SSPCR1_SOD BIT(6) 49 50 #define SSPDR 0x0008 51 52 #define SSPSR 0x000c 53 #define SSPSR_TFE BIT(0) 54 #define SSPSR_TNF BIT(1) 55 #define SSPSR_RNE BIT(2) 56 #define SSPSR_RFF BIT(3) 57 #define SSPSR_BSY BIT(4) 58 #define SSPCPSR 0x0010 59 60 #define SSPIIR 0x0014 61 #define SSPIIR_RIS BIT(0) 62 #define SSPIIR_TIS BIT(1) 63 #define SSPIIR_RORIS BIT(2) 64 #define SSPICR SSPIIR 65 66 /* timeout in milliseconds */ 67 #define SPI_TIMEOUT 5 68 /* maximum depth of RX/TX FIFO */ 69 #define SPI_FIFO_SIZE 8 70 71 /** 72 * struct ep93xx_spi - EP93xx SPI controller structure 73 * @lock: spinlock that protects concurrent accesses to fields @running, 74 * @current_msg and @msg_queue 75 * @pdev: pointer to platform device 76 * @clk: clock for the controller 77 * @regs_base: pointer to ioremap()'d registers 78 * @sspdr_phys: physical address of the SSPDR register 79 * @min_rate: minimum clock rate (in Hz) supported by the controller 80 * @max_rate: maximum clock rate (in Hz) supported by the controller 81 * @running: is the queue running 82 * @wq: workqueue used by the driver 83 * @msg_work: work that is queued for the driver 84 * @wait: wait here until given transfer is completed 85 * @msg_queue: queue for the messages 86 * @current_msg: message that is currently processed (or %NULL if none) 87 * @tx: current byte in transfer to transmit 88 * @rx: current byte in transfer to receive 89 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one 90 * frame decreases this level and sending one frame increases it. 91 * @dma_rx: RX DMA channel 92 * @dma_tx: TX DMA channel 93 * @dma_rx_data: RX parameters passed to the DMA engine 94 * @dma_tx_data: TX parameters passed to the DMA engine 95 * @rx_sgt: sg table for RX transfers 96 * @tx_sgt: sg table for TX transfers 97 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by 98 * the client 99 * 100 * This structure holds EP93xx SPI controller specific information. When 101 * @running is %true, driver accepts transfer requests from protocol drivers. 102 * @current_msg is used to hold pointer to the message that is currently 103 * processed. If @current_msg is %NULL, it means that no processing is going 104 * on. 105 * 106 * Most of the fields are only written once and they can be accessed without 107 * taking the @lock. Fields that are accessed concurrently are: @current_msg, 108 * @running, and @msg_queue. 109 */ 110 struct ep93xx_spi { 111 spinlock_t lock; 112 const struct platform_device *pdev; 113 struct clk *clk; 114 void __iomem *regs_base; 115 unsigned long sspdr_phys; 116 unsigned long min_rate; 117 unsigned long max_rate; 118 bool running; 119 struct workqueue_struct *wq; 120 struct work_struct msg_work; 121 struct completion wait; 122 struct list_head msg_queue; 123 struct spi_message *current_msg; 124 size_t tx; 125 size_t rx; 126 size_t fifo_level; 127 struct dma_chan *dma_rx; 128 struct dma_chan *dma_tx; 129 struct ep93xx_dma_data dma_rx_data; 130 struct ep93xx_dma_data dma_tx_data; 131 struct sg_table rx_sgt; 132 struct sg_table tx_sgt; 133 void *zeropage; 134 }; 135 136 /** 137 * struct ep93xx_spi_chip - SPI device hardware settings 138 * @spi: back pointer to the SPI device 139 * @rate: max rate in hz this chip supports 140 * @div_cpsr: cpsr (pre-scaler) divider 141 * @div_scr: scr divider 142 * @dss: bits per word (4 - 16 bits) 143 * @ops: private chip operations 144 * 145 * This structure is used to store hardware register specific settings for each 146 * SPI device. Settings are written to hardware by function 147 * ep93xx_spi_chip_setup(). 148 */ 149 struct ep93xx_spi_chip { 150 const struct spi_device *spi; 151 unsigned long rate; 152 u8 div_cpsr; 153 u8 div_scr; 154 u8 dss; 155 struct ep93xx_spi_chip_ops *ops; 156 }; 157 158 /* converts bits per word to CR0.DSS value */ 159 #define bits_per_word_to_dss(bpw) ((bpw) - 1) 160 161 static inline void 162 ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value) 163 { 164 __raw_writeb(value, espi->regs_base + reg); 165 } 166 167 static inline u8 168 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) 169 { 170 return __raw_readb(spi->regs_base + reg); 171 } 172 173 static inline void 174 ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value) 175 { 176 __raw_writew(value, espi->regs_base + reg); 177 } 178 179 static inline u16 180 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) 181 { 182 return __raw_readw(spi->regs_base + reg); 183 } 184 185 static int ep93xx_spi_enable(const struct ep93xx_spi *espi) 186 { 187 u8 regval; 188 int err; 189 190 err = clk_enable(espi->clk); 191 if (err) 192 return err; 193 194 regval = ep93xx_spi_read_u8(espi, SSPCR1); 195 regval |= SSPCR1_SSE; 196 ep93xx_spi_write_u8(espi, SSPCR1, regval); 197 198 return 0; 199 } 200 201 static void ep93xx_spi_disable(const struct ep93xx_spi *espi) 202 { 203 u8 regval; 204 205 regval = ep93xx_spi_read_u8(espi, SSPCR1); 206 regval &= ~SSPCR1_SSE; 207 ep93xx_spi_write_u8(espi, SSPCR1, regval); 208 209 clk_disable(espi->clk); 210 } 211 212 static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) 213 { 214 u8 regval; 215 216 regval = ep93xx_spi_read_u8(espi, SSPCR1); 217 regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 218 ep93xx_spi_write_u8(espi, SSPCR1, regval); 219 } 220 221 static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) 222 { 223 u8 regval; 224 225 regval = ep93xx_spi_read_u8(espi, SSPCR1); 226 regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 227 ep93xx_spi_write_u8(espi, SSPCR1, regval); 228 } 229 230 /** 231 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors 232 * @espi: ep93xx SPI controller struct 233 * @chip: divisors are calculated for this chip 234 * @rate: desired SPI output clock rate 235 * 236 * Function calculates cpsr (clock pre-scaler) and scr divisors based on 237 * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If, 238 * for some reason, divisors cannot be calculated nothing is stored and 239 * %-EINVAL is returned. 240 */ 241 static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, 242 struct ep93xx_spi_chip *chip, 243 unsigned long rate) 244 { 245 unsigned long spi_clk_rate = clk_get_rate(espi->clk); 246 int cpsr, scr; 247 248 /* 249 * Make sure that max value is between values supported by the 250 * controller. Note that minimum value is already checked in 251 * ep93xx_spi_transfer(). 252 */ 253 rate = clamp(rate, espi->min_rate, espi->max_rate); 254 255 /* 256 * Calculate divisors so that we can get speed according the 257 * following formula: 258 * rate = spi_clock_rate / (cpsr * (1 + scr)) 259 * 260 * cpsr must be even number and starts from 2, scr can be any number 261 * between 0 and 255. 262 */ 263 for (cpsr = 2; cpsr <= 254; cpsr += 2) { 264 for (scr = 0; scr <= 255; scr++) { 265 if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { 266 chip->div_scr = (u8)scr; 267 chip->div_cpsr = (u8)cpsr; 268 return 0; 269 } 270 } 271 } 272 273 return -EINVAL; 274 } 275 276 static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) 277 { 278 struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); 279 int value = (spi->mode & SPI_CS_HIGH) ? control : !control; 280 281 if (chip->ops && chip->ops->cs_control) 282 chip->ops->cs_control(spi, value); 283 } 284 285 /** 286 * ep93xx_spi_setup() - setup an SPI device 287 * @spi: SPI device to setup 288 * 289 * This function sets up SPI device mode, speed etc. Can be called multiple 290 * times for a single device. Returns %0 in case of success, negative error in 291 * case of failure. When this function returns success, the device is 292 * deselected. 293 */ 294 static int ep93xx_spi_setup(struct spi_device *spi) 295 { 296 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); 297 struct ep93xx_spi_chip *chip; 298 299 chip = spi_get_ctldata(spi); 300 if (!chip) { 301 dev_dbg(&espi->pdev->dev, "initial setup for %s\n", 302 spi->modalias); 303 304 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 305 if (!chip) 306 return -ENOMEM; 307 308 chip->spi = spi; 309 chip->ops = spi->controller_data; 310 311 if (chip->ops && chip->ops->setup) { 312 int ret = chip->ops->setup(spi); 313 if (ret) { 314 kfree(chip); 315 return ret; 316 } 317 } 318 319 spi_set_ctldata(spi, chip); 320 } 321 322 if (spi->max_speed_hz != chip->rate) { 323 int err; 324 325 err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz); 326 if (err != 0) { 327 spi_set_ctldata(spi, NULL); 328 kfree(chip); 329 return err; 330 } 331 chip->rate = spi->max_speed_hz; 332 } 333 334 chip->dss = bits_per_word_to_dss(spi->bits_per_word); 335 336 ep93xx_spi_cs_control(spi, false); 337 return 0; 338 } 339 340 /** 341 * ep93xx_spi_transfer() - queue message to be transferred 342 * @spi: target SPI device 343 * @msg: message to be transferred 344 * 345 * This function is called by SPI device drivers when they are going to transfer 346 * a new message. It simply puts the message in the queue and schedules 347 * workqueue to perform the actual transfer later on. 348 * 349 * Returns %0 on success and negative error in case of failure. 350 */ 351 static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) 352 { 353 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); 354 struct spi_transfer *t; 355 unsigned long flags; 356 357 if (!msg || !msg->complete) 358 return -EINVAL; 359 360 /* first validate each transfer */ 361 list_for_each_entry(t, &msg->transfers, transfer_list) { 362 if (t->speed_hz && t->speed_hz < espi->min_rate) 363 return -EINVAL; 364 } 365 366 /* 367 * Now that we own the message, let's initialize it so that it is 368 * suitable for us. We use @msg->status to signal whether there was 369 * error in transfer and @msg->state is used to hold pointer to the 370 * current transfer (or %NULL if no active current transfer). 371 */ 372 msg->state = NULL; 373 msg->status = 0; 374 msg->actual_length = 0; 375 376 spin_lock_irqsave(&espi->lock, flags); 377 if (!espi->running) { 378 spin_unlock_irqrestore(&espi->lock, flags); 379 return -ESHUTDOWN; 380 } 381 list_add_tail(&msg->queue, &espi->msg_queue); 382 queue_work(espi->wq, &espi->msg_work); 383 spin_unlock_irqrestore(&espi->lock, flags); 384 385 return 0; 386 } 387 388 /** 389 * ep93xx_spi_cleanup() - cleans up master controller specific state 390 * @spi: SPI device to cleanup 391 * 392 * This function releases master controller specific state for given @spi 393 * device. 394 */ 395 static void ep93xx_spi_cleanup(struct spi_device *spi) 396 { 397 struct ep93xx_spi_chip *chip; 398 399 chip = spi_get_ctldata(spi); 400 if (chip) { 401 if (chip->ops && chip->ops->cleanup) 402 chip->ops->cleanup(spi); 403 spi_set_ctldata(spi, NULL); 404 kfree(chip); 405 } 406 } 407 408 /** 409 * ep93xx_spi_chip_setup() - configures hardware according to given @chip 410 * @espi: ep93xx SPI controller struct 411 * @chip: chip specific settings 412 * 413 * This function sets up the actual hardware registers with settings given in 414 * @chip. Note that no validation is done so make sure that callers validate 415 * settings before calling this. 416 */ 417 static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, 418 const struct ep93xx_spi_chip *chip) 419 { 420 u16 cr0; 421 422 cr0 = chip->div_scr << SSPCR0_SCR_SHIFT; 423 cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; 424 cr0 |= chip->dss; 425 426 dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", 427 chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss); 428 dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); 429 430 ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr); 431 ep93xx_spi_write_u16(espi, SSPCR0, cr0); 432 } 433 434 static inline int bits_per_word(const struct ep93xx_spi *espi) 435 { 436 struct spi_message *msg = espi->current_msg; 437 struct spi_transfer *t = msg->state; 438 439 return t->bits_per_word; 440 } 441 442 static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) 443 { 444 if (bits_per_word(espi) > 8) { 445 u16 tx_val = 0; 446 447 if (t->tx_buf) 448 tx_val = ((u16 *)t->tx_buf)[espi->tx]; 449 ep93xx_spi_write_u16(espi, SSPDR, tx_val); 450 espi->tx += sizeof(tx_val); 451 } else { 452 u8 tx_val = 0; 453 454 if (t->tx_buf) 455 tx_val = ((u8 *)t->tx_buf)[espi->tx]; 456 ep93xx_spi_write_u8(espi, SSPDR, tx_val); 457 espi->tx += sizeof(tx_val); 458 } 459 } 460 461 static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) 462 { 463 if (bits_per_word(espi) > 8) { 464 u16 rx_val; 465 466 rx_val = ep93xx_spi_read_u16(espi, SSPDR); 467 if (t->rx_buf) 468 ((u16 *)t->rx_buf)[espi->rx] = rx_val; 469 espi->rx += sizeof(rx_val); 470 } else { 471 u8 rx_val; 472 473 rx_val = ep93xx_spi_read_u8(espi, SSPDR); 474 if (t->rx_buf) 475 ((u8 *)t->rx_buf)[espi->rx] = rx_val; 476 espi->rx += sizeof(rx_val); 477 } 478 } 479 480 /** 481 * ep93xx_spi_read_write() - perform next RX/TX transfer 482 * @espi: ep93xx SPI controller struct 483 * 484 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If 485 * called several times, the whole transfer will be completed. Returns 486 * %-EINPROGRESS when current transfer was not yet completed otherwise %0. 487 * 488 * When this function is finished, RX FIFO should be empty and TX FIFO should be 489 * full. 490 */ 491 static int ep93xx_spi_read_write(struct ep93xx_spi *espi) 492 { 493 struct spi_message *msg = espi->current_msg; 494 struct spi_transfer *t = msg->state; 495 496 /* read as long as RX FIFO has frames in it */ 497 while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { 498 ep93xx_do_read(espi, t); 499 espi->fifo_level--; 500 } 501 502 /* write as long as TX FIFO has room */ 503 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { 504 ep93xx_do_write(espi, t); 505 espi->fifo_level++; 506 } 507 508 if (espi->rx == t->len) 509 return 0; 510 511 return -EINPROGRESS; 512 } 513 514 static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) 515 { 516 /* 517 * Now everything is set up for the current transfer. We prime the TX 518 * FIFO, enable interrupts, and wait for the transfer to complete. 519 */ 520 if (ep93xx_spi_read_write(espi)) { 521 ep93xx_spi_enable_interrupts(espi); 522 wait_for_completion(&espi->wait); 523 } 524 } 525 526 /** 527 * ep93xx_spi_dma_prepare() - prepares a DMA transfer 528 * @espi: ep93xx SPI controller struct 529 * @dir: DMA transfer direction 530 * 531 * Function configures the DMA, maps the buffer and prepares the DMA 532 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR 533 * in case of failure. 534 */ 535 static struct dma_async_tx_descriptor * 536 ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir) 537 { 538 struct spi_transfer *t = espi->current_msg->state; 539 struct dma_async_tx_descriptor *txd; 540 enum dma_slave_buswidth buswidth; 541 struct dma_slave_config conf; 542 struct scatterlist *sg; 543 struct sg_table *sgt; 544 struct dma_chan *chan; 545 const void *buf, *pbuf; 546 size_t len = t->len; 547 int i, ret, nents; 548 549 if (bits_per_word(espi) > 8) 550 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 551 else 552 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 553 554 memset(&conf, 0, sizeof(conf)); 555 conf.direction = dir; 556 557 if (dir == DMA_DEV_TO_MEM) { 558 chan = espi->dma_rx; 559 buf = t->rx_buf; 560 sgt = &espi->rx_sgt; 561 562 conf.src_addr = espi->sspdr_phys; 563 conf.src_addr_width = buswidth; 564 } else { 565 chan = espi->dma_tx; 566 buf = t->tx_buf; 567 sgt = &espi->tx_sgt; 568 569 conf.dst_addr = espi->sspdr_phys; 570 conf.dst_addr_width = buswidth; 571 } 572 573 ret = dmaengine_slave_config(chan, &conf); 574 if (ret) 575 return ERR_PTR(ret); 576 577 /* 578 * We need to split the transfer into PAGE_SIZE'd chunks. This is 579 * because we are using @espi->zeropage to provide a zero RX buffer 580 * for the TX transfers and we have only allocated one page for that. 581 * 582 * For performance reasons we allocate a new sg_table only when 583 * needed. Otherwise we will re-use the current one. Eventually the 584 * last sg_table is released in ep93xx_spi_release_dma(). 585 */ 586 587 nents = DIV_ROUND_UP(len, PAGE_SIZE); 588 if (nents != sgt->nents) { 589 sg_free_table(sgt); 590 591 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 592 if (ret) 593 return ERR_PTR(ret); 594 } 595 596 pbuf = buf; 597 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 598 size_t bytes = min_t(size_t, len, PAGE_SIZE); 599 600 if (buf) { 601 sg_set_page(sg, virt_to_page(pbuf), bytes, 602 offset_in_page(pbuf)); 603 } else { 604 sg_set_page(sg, virt_to_page(espi->zeropage), 605 bytes, 0); 606 } 607 608 pbuf += bytes; 609 len -= bytes; 610 } 611 612 if (WARN_ON(len)) { 613 dev_warn(&espi->pdev->dev, "len = %d expected 0!", len); 614 return ERR_PTR(-EINVAL); 615 } 616 617 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 618 if (!nents) 619 return ERR_PTR(-ENOMEM); 620 621 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK); 622 if (!txd) { 623 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 624 return ERR_PTR(-ENOMEM); 625 } 626 return txd; 627 } 628 629 /** 630 * ep93xx_spi_dma_finish() - finishes with a DMA transfer 631 * @espi: ep93xx SPI controller struct 632 * @dir: DMA transfer direction 633 * 634 * Function finishes with the DMA transfer. After this, the DMA buffer is 635 * unmapped. 636 */ 637 static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, 638 enum dma_transfer_direction dir) 639 { 640 struct dma_chan *chan; 641 struct sg_table *sgt; 642 643 if (dir == DMA_DEV_TO_MEM) { 644 chan = espi->dma_rx; 645 sgt = &espi->rx_sgt; 646 } else { 647 chan = espi->dma_tx; 648 sgt = &espi->tx_sgt; 649 } 650 651 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 652 } 653 654 static void ep93xx_spi_dma_callback(void *callback_param) 655 { 656 complete(callback_param); 657 } 658 659 static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) 660 { 661 struct spi_message *msg = espi->current_msg; 662 struct dma_async_tx_descriptor *rxd, *txd; 663 664 rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM); 665 if (IS_ERR(rxd)) { 666 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 667 msg->status = PTR_ERR(rxd); 668 return; 669 } 670 671 txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV); 672 if (IS_ERR(txd)) { 673 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); 674 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); 675 msg->status = PTR_ERR(txd); 676 return; 677 } 678 679 /* We are ready when RX is done */ 680 rxd->callback = ep93xx_spi_dma_callback; 681 rxd->callback_param = &espi->wait; 682 683 /* Now submit both descriptors and wait while they finish */ 684 dmaengine_submit(rxd); 685 dmaengine_submit(txd); 686 687 dma_async_issue_pending(espi->dma_rx); 688 dma_async_issue_pending(espi->dma_tx); 689 690 wait_for_completion(&espi->wait); 691 692 ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV); 693 ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); 694 } 695 696 /** 697 * ep93xx_spi_process_transfer() - processes one SPI transfer 698 * @espi: ep93xx SPI controller struct 699 * @msg: current message 700 * @t: transfer to process 701 * 702 * This function processes one SPI transfer given in @t. Function waits until 703 * transfer is complete (may sleep) and updates @msg->status based on whether 704 * transfer was successfully processed or not. 705 */ 706 static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, 707 struct spi_message *msg, 708 struct spi_transfer *t) 709 { 710 struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); 711 712 msg->state = t; 713 714 /* 715 * Handle any transfer specific settings if needed. We use 716 * temporary chip settings here and restore original later when 717 * the transfer is finished. 718 */ 719 if (t->speed_hz || t->bits_per_word) { 720 struct ep93xx_spi_chip tmp_chip = *chip; 721 722 if (t->speed_hz) { 723 int err; 724 725 err = ep93xx_spi_calc_divisors(espi, &tmp_chip, 726 t->speed_hz); 727 if (err) { 728 dev_err(&espi->pdev->dev, 729 "failed to adjust speed\n"); 730 msg->status = err; 731 return; 732 } 733 } 734 735 if (t->bits_per_word) 736 tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word); 737 738 /* 739 * Set up temporary new hw settings for this transfer. 740 */ 741 ep93xx_spi_chip_setup(espi, &tmp_chip); 742 } 743 744 espi->rx = 0; 745 espi->tx = 0; 746 747 /* 748 * There is no point of setting up DMA for the transfers which will 749 * fit into the FIFO and can be transferred with a single interrupt. 750 * So in these cases we will be using PIO and don't bother for DMA. 751 */ 752 if (espi->dma_rx && t->len > SPI_FIFO_SIZE) 753 ep93xx_spi_dma_transfer(espi); 754 else 755 ep93xx_spi_pio_transfer(espi); 756 757 /* 758 * In case of error during transmit, we bail out from processing 759 * the message. 760 */ 761 if (msg->status) 762 return; 763 764 msg->actual_length += t->len; 765 766 /* 767 * After this transfer is finished, perform any possible 768 * post-transfer actions requested by the protocol driver. 769 */ 770 if (t->delay_usecs) { 771 set_current_state(TASK_UNINTERRUPTIBLE); 772 schedule_timeout(usecs_to_jiffies(t->delay_usecs)); 773 } 774 if (t->cs_change) { 775 if (!list_is_last(&t->transfer_list, &msg->transfers)) { 776 /* 777 * In case protocol driver is asking us to drop the 778 * chipselect briefly, we let the scheduler to handle 779 * any "delay" here. 780 */ 781 ep93xx_spi_cs_control(msg->spi, false); 782 cond_resched(); 783 ep93xx_spi_cs_control(msg->spi, true); 784 } 785 } 786 787 if (t->speed_hz || t->bits_per_word) 788 ep93xx_spi_chip_setup(espi, chip); 789 } 790 791 /* 792 * ep93xx_spi_process_message() - process one SPI message 793 * @espi: ep93xx SPI controller struct 794 * @msg: message to process 795 * 796 * This function processes a single SPI message. We go through all transfers in 797 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is 798 * asserted during the whole message (unless per transfer cs_change is set). 799 * 800 * @msg->status contains %0 in case of success or negative error code in case of 801 * failure. 802 */ 803 static void ep93xx_spi_process_message(struct ep93xx_spi *espi, 804 struct spi_message *msg) 805 { 806 unsigned long timeout; 807 struct spi_transfer *t; 808 int err; 809 810 /* 811 * Enable the SPI controller and its clock. 812 */ 813 err = ep93xx_spi_enable(espi); 814 if (err) { 815 dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); 816 msg->status = err; 817 return; 818 } 819 820 /* 821 * Just to be sure: flush any data from RX FIFO. 822 */ 823 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); 824 while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { 825 if (time_after(jiffies, timeout)) { 826 dev_warn(&espi->pdev->dev, 827 "timeout while flushing RX FIFO\n"); 828 msg->status = -ETIMEDOUT; 829 return; 830 } 831 ep93xx_spi_read_u16(espi, SSPDR); 832 } 833 834 /* 835 * We explicitly handle FIFO level. This way we don't have to check TX 836 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. 837 */ 838 espi->fifo_level = 0; 839 840 /* 841 * Update SPI controller registers according to spi device and assert 842 * the chipselect. 843 */ 844 ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi)); 845 ep93xx_spi_cs_control(msg->spi, true); 846 847 list_for_each_entry(t, &msg->transfers, transfer_list) { 848 ep93xx_spi_process_transfer(espi, msg, t); 849 if (msg->status) 850 break; 851 } 852 853 /* 854 * Now the whole message is transferred (or failed for some reason). We 855 * deselect the device and disable the SPI controller. 856 */ 857 ep93xx_spi_cs_control(msg->spi, false); 858 ep93xx_spi_disable(espi); 859 } 860 861 #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) 862 863 /** 864 * ep93xx_spi_work() - EP93xx SPI workqueue worker function 865 * @work: work struct 866 * 867 * Workqueue worker function. This function is called when there are new 868 * SPI messages to be processed. Message is taken out from the queue and then 869 * passed to ep93xx_spi_process_message(). 870 * 871 * After message is transferred, protocol driver is notified by calling 872 * @msg->complete(). In case of error, @msg->status is set to negative error 873 * number, otherwise it contains zero (and @msg->actual_length is updated). 874 */ 875 static void ep93xx_spi_work(struct work_struct *work) 876 { 877 struct ep93xx_spi *espi = work_to_espi(work); 878 struct spi_message *msg; 879 880 spin_lock_irq(&espi->lock); 881 if (!espi->running || espi->current_msg || 882 list_empty(&espi->msg_queue)) { 883 spin_unlock_irq(&espi->lock); 884 return; 885 } 886 msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); 887 list_del_init(&msg->queue); 888 espi->current_msg = msg; 889 spin_unlock_irq(&espi->lock); 890 891 ep93xx_spi_process_message(espi, msg); 892 893 /* 894 * Update the current message and re-schedule ourselves if there are 895 * more messages in the queue. 896 */ 897 spin_lock_irq(&espi->lock); 898 espi->current_msg = NULL; 899 if (espi->running && !list_empty(&espi->msg_queue)) 900 queue_work(espi->wq, &espi->msg_work); 901 spin_unlock_irq(&espi->lock); 902 903 /* notify the protocol driver that we are done with this message */ 904 msg->complete(msg->context); 905 } 906 907 static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) 908 { 909 struct ep93xx_spi *espi = dev_id; 910 u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); 911 912 /* 913 * If we got ROR (receive overrun) interrupt we know that something is 914 * wrong. Just abort the message. 915 */ 916 if (unlikely(irq_status & SSPIIR_RORIS)) { 917 /* clear the overrun interrupt */ 918 ep93xx_spi_write_u8(espi, SSPICR, 0); 919 dev_warn(&espi->pdev->dev, 920 "receive overrun, aborting the message\n"); 921 espi->current_msg->status = -EIO; 922 } else { 923 /* 924 * Interrupt is either RX (RIS) or TX (TIS). For both cases we 925 * simply execute next data transfer. 926 */ 927 if (ep93xx_spi_read_write(espi)) { 928 /* 929 * In normal case, there still is some processing left 930 * for current transfer. Let's wait for the next 931 * interrupt then. 932 */ 933 return IRQ_HANDLED; 934 } 935 } 936 937 /* 938 * Current transfer is finished, either with error or with success. In 939 * any case we disable interrupts and notify the worker to handle 940 * any post-processing of the message. 941 */ 942 ep93xx_spi_disable_interrupts(espi); 943 complete(&espi->wait); 944 return IRQ_HANDLED; 945 } 946 947 static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) 948 { 949 if (ep93xx_dma_chan_is_m2p(chan)) 950 return false; 951 952 chan->private = filter_param; 953 return true; 954 } 955 956 static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) 957 { 958 dma_cap_mask_t mask; 959 int ret; 960 961 espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); 962 if (!espi->zeropage) 963 return -ENOMEM; 964 965 dma_cap_zero(mask); 966 dma_cap_set(DMA_SLAVE, mask); 967 968 espi->dma_rx_data.port = EP93XX_DMA_SSP; 969 espi->dma_rx_data.direction = DMA_DEV_TO_MEM; 970 espi->dma_rx_data.name = "ep93xx-spi-rx"; 971 972 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, 973 &espi->dma_rx_data); 974 if (!espi->dma_rx) { 975 ret = -ENODEV; 976 goto fail_free_page; 977 } 978 979 espi->dma_tx_data.port = EP93XX_DMA_SSP; 980 espi->dma_tx_data.direction = DMA_MEM_TO_DEV; 981 espi->dma_tx_data.name = "ep93xx-spi-tx"; 982 983 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, 984 &espi->dma_tx_data); 985 if (!espi->dma_tx) { 986 ret = -ENODEV; 987 goto fail_release_rx; 988 } 989 990 return 0; 991 992 fail_release_rx: 993 dma_release_channel(espi->dma_rx); 994 espi->dma_rx = NULL; 995 fail_free_page: 996 free_page((unsigned long)espi->zeropage); 997 998 return ret; 999 } 1000 1001 static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) 1002 { 1003 if (espi->dma_rx) { 1004 dma_release_channel(espi->dma_rx); 1005 sg_free_table(&espi->rx_sgt); 1006 } 1007 if (espi->dma_tx) { 1008 dma_release_channel(espi->dma_tx); 1009 sg_free_table(&espi->tx_sgt); 1010 } 1011 1012 if (espi->zeropage) 1013 free_page((unsigned long)espi->zeropage); 1014 } 1015 1016 static int ep93xx_spi_probe(struct platform_device *pdev) 1017 { 1018 struct spi_master *master; 1019 struct ep93xx_spi_info *info; 1020 struct ep93xx_spi *espi; 1021 struct resource *res; 1022 int irq; 1023 int error; 1024 1025 info = pdev->dev.platform_data; 1026 1027 master = spi_alloc_master(&pdev->dev, sizeof(*espi)); 1028 if (!master) { 1029 dev_err(&pdev->dev, "failed to allocate spi master\n"); 1030 return -ENOMEM; 1031 } 1032 1033 master->setup = ep93xx_spi_setup; 1034 master->transfer = ep93xx_spi_transfer; 1035 master->cleanup = ep93xx_spi_cleanup; 1036 master->bus_num = pdev->id; 1037 master->num_chipselect = info->num_chipselect; 1038 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1039 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 1040 1041 platform_set_drvdata(pdev, master); 1042 1043 espi = spi_master_get_devdata(master); 1044 1045 espi->clk = clk_get(&pdev->dev, NULL); 1046 if (IS_ERR(espi->clk)) { 1047 dev_err(&pdev->dev, "unable to get spi clock\n"); 1048 error = PTR_ERR(espi->clk); 1049 goto fail_release_master; 1050 } 1051 1052 spin_lock_init(&espi->lock); 1053 init_completion(&espi->wait); 1054 1055 /* 1056 * Calculate maximum and minimum supported clock rates 1057 * for the controller. 1058 */ 1059 espi->max_rate = clk_get_rate(espi->clk) / 2; 1060 espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); 1061 espi->pdev = pdev; 1062 1063 irq = platform_get_irq(pdev, 0); 1064 if (irq < 0) { 1065 error = -EBUSY; 1066 dev_err(&pdev->dev, "failed to get irq resources\n"); 1067 goto fail_put_clock; 1068 } 1069 1070 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1071 if (!res) { 1072 dev_err(&pdev->dev, "unable to get iomem resource\n"); 1073 error = -ENODEV; 1074 goto fail_put_clock; 1075 } 1076 1077 espi->sspdr_phys = res->start + SSPDR; 1078 1079 espi->regs_base = devm_ioremap_resource(&pdev->dev, res); 1080 if (IS_ERR(espi->regs_base)) { 1081 error = PTR_ERR(espi->regs_base); 1082 goto fail_put_clock; 1083 } 1084 1085 error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt, 1086 0, "ep93xx-spi", espi); 1087 if (error) { 1088 dev_err(&pdev->dev, "failed to request irq\n"); 1089 goto fail_put_clock; 1090 } 1091 1092 if (info->use_dma && ep93xx_spi_setup_dma(espi)) 1093 dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); 1094 1095 espi->wq = create_singlethread_workqueue("ep93xx_spid"); 1096 if (!espi->wq) { 1097 dev_err(&pdev->dev, "unable to create workqueue\n"); 1098 error = -ENOMEM; 1099 goto fail_free_dma; 1100 } 1101 INIT_WORK(&espi->msg_work, ep93xx_spi_work); 1102 INIT_LIST_HEAD(&espi->msg_queue); 1103 espi->running = true; 1104 1105 /* make sure that the hardware is disabled */ 1106 ep93xx_spi_write_u8(espi, SSPCR1, 0); 1107 1108 error = spi_register_master(master); 1109 if (error) { 1110 dev_err(&pdev->dev, "failed to register SPI master\n"); 1111 goto fail_free_queue; 1112 } 1113 1114 dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", 1115 (unsigned long)res->start, irq); 1116 1117 return 0; 1118 1119 fail_free_queue: 1120 destroy_workqueue(espi->wq); 1121 fail_free_dma: 1122 ep93xx_spi_release_dma(espi); 1123 fail_put_clock: 1124 clk_put(espi->clk); 1125 fail_release_master: 1126 spi_master_put(master); 1127 1128 return error; 1129 } 1130 1131 static int ep93xx_spi_remove(struct platform_device *pdev) 1132 { 1133 struct spi_master *master = platform_get_drvdata(pdev); 1134 struct ep93xx_spi *espi = spi_master_get_devdata(master); 1135 1136 spin_lock_irq(&espi->lock); 1137 espi->running = false; 1138 spin_unlock_irq(&espi->lock); 1139 1140 destroy_workqueue(espi->wq); 1141 1142 /* 1143 * Complete remaining messages with %-ESHUTDOWN status. 1144 */ 1145 spin_lock_irq(&espi->lock); 1146 while (!list_empty(&espi->msg_queue)) { 1147 struct spi_message *msg; 1148 1149 msg = list_first_entry(&espi->msg_queue, 1150 struct spi_message, queue); 1151 list_del_init(&msg->queue); 1152 msg->status = -ESHUTDOWN; 1153 spin_unlock_irq(&espi->lock); 1154 msg->complete(msg->context); 1155 spin_lock_irq(&espi->lock); 1156 } 1157 spin_unlock_irq(&espi->lock); 1158 1159 ep93xx_spi_release_dma(espi); 1160 clk_put(espi->clk); 1161 1162 spi_unregister_master(master); 1163 return 0; 1164 } 1165 1166 static struct platform_driver ep93xx_spi_driver = { 1167 .driver = { 1168 .name = "ep93xx-spi", 1169 .owner = THIS_MODULE, 1170 }, 1171 .probe = ep93xx_spi_probe, 1172 .remove = ep93xx_spi_remove, 1173 }; 1174 module_platform_driver(ep93xx_spi_driver); 1175 1176 MODULE_DESCRIPTION("EP93xx SPI Controller driver"); 1177 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); 1178 MODULE_LICENSE("GPL"); 1179 MODULE_ALIAS("platform:ep93xx-spi"); 1180