1 /* 2 * Driver for Atmel AT32 and AT91 SPI Controllers 3 * 4 * Copyright (C) 2006 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/clk.h> 14 #include <linux/module.h> 15 #include <linux/platform_device.h> 16 #include <linux/delay.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/dmaengine.h> 19 #include <linux/err.h> 20 #include <linux/interrupt.h> 21 #include <linux/spi/spi.h> 22 #include <linux/slab.h> 23 #include <linux/platform_data/atmel.h> 24 #include <linux/platform_data/dma-atmel.h> 25 #include <linux/of.h> 26 27 #include <linux/io.h> 28 #include <linux/gpio.h> 29 30 /* SPI register offsets */ 31 #define SPI_CR 0x0000 32 #define SPI_MR 0x0004 33 #define SPI_RDR 0x0008 34 #define SPI_TDR 0x000c 35 #define SPI_SR 0x0010 36 #define SPI_IER 0x0014 37 #define SPI_IDR 0x0018 38 #define SPI_IMR 0x001c 39 #define SPI_CSR0 0x0030 40 #define SPI_CSR1 0x0034 41 #define SPI_CSR2 0x0038 42 #define SPI_CSR3 0x003c 43 #define SPI_VERSION 0x00fc 44 #define SPI_RPR 0x0100 45 #define SPI_RCR 0x0104 46 #define SPI_TPR 0x0108 47 #define SPI_TCR 0x010c 48 #define SPI_RNPR 0x0110 49 #define SPI_RNCR 0x0114 50 #define SPI_TNPR 0x0118 51 #define SPI_TNCR 0x011c 52 #define SPI_PTCR 0x0120 53 #define SPI_PTSR 0x0124 54 55 /* Bitfields in CR */ 56 #define SPI_SPIEN_OFFSET 0 57 #define SPI_SPIEN_SIZE 1 58 #define SPI_SPIDIS_OFFSET 1 59 #define SPI_SPIDIS_SIZE 1 60 #define SPI_SWRST_OFFSET 7 61 #define SPI_SWRST_SIZE 1 62 #define SPI_LASTXFER_OFFSET 24 63 #define SPI_LASTXFER_SIZE 1 64 65 /* Bitfields in MR */ 66 #define SPI_MSTR_OFFSET 0 67 #define SPI_MSTR_SIZE 1 68 #define SPI_PS_OFFSET 1 69 #define SPI_PS_SIZE 1 70 #define SPI_PCSDEC_OFFSET 2 71 #define SPI_PCSDEC_SIZE 1 72 #define SPI_FDIV_OFFSET 3 73 #define SPI_FDIV_SIZE 1 74 #define SPI_MODFDIS_OFFSET 4 75 #define SPI_MODFDIS_SIZE 1 76 #define SPI_WDRBT_OFFSET 5 77 #define SPI_WDRBT_SIZE 1 78 #define SPI_LLB_OFFSET 7 79 #define SPI_LLB_SIZE 1 80 #define SPI_PCS_OFFSET 16 81 #define SPI_PCS_SIZE 4 82 #define SPI_DLYBCS_OFFSET 24 83 #define SPI_DLYBCS_SIZE 8 84 85 /* Bitfields in RDR */ 86 #define SPI_RD_OFFSET 0 87 #define SPI_RD_SIZE 16 88 89 /* Bitfields in TDR */ 90 #define SPI_TD_OFFSET 0 91 #define SPI_TD_SIZE 16 92 93 /* Bitfields in SR */ 94 #define SPI_RDRF_OFFSET 0 95 #define SPI_RDRF_SIZE 1 96 #define SPI_TDRE_OFFSET 1 97 #define SPI_TDRE_SIZE 1 98 #define SPI_MODF_OFFSET 2 99 #define SPI_MODF_SIZE 1 100 #define SPI_OVRES_OFFSET 3 101 #define SPI_OVRES_SIZE 1 102 #define SPI_ENDRX_OFFSET 4 103 #define SPI_ENDRX_SIZE 1 104 #define SPI_ENDTX_OFFSET 5 105 #define SPI_ENDTX_SIZE 1 106 #define SPI_RXBUFF_OFFSET 6 107 #define SPI_RXBUFF_SIZE 1 108 #define SPI_TXBUFE_OFFSET 7 109 #define SPI_TXBUFE_SIZE 1 110 #define SPI_NSSR_OFFSET 8 111 #define SPI_NSSR_SIZE 1 112 #define SPI_TXEMPTY_OFFSET 9 113 #define SPI_TXEMPTY_SIZE 1 114 #define SPI_SPIENS_OFFSET 16 115 #define SPI_SPIENS_SIZE 1 116 117 /* Bitfields in CSR0 */ 118 #define SPI_CPOL_OFFSET 0 119 #define SPI_CPOL_SIZE 1 120 #define SPI_NCPHA_OFFSET 1 121 #define SPI_NCPHA_SIZE 1 122 #define SPI_CSAAT_OFFSET 3 123 #define SPI_CSAAT_SIZE 1 124 #define SPI_BITS_OFFSET 4 125 #define SPI_BITS_SIZE 4 126 #define SPI_SCBR_OFFSET 8 127 #define SPI_SCBR_SIZE 8 128 #define SPI_DLYBS_OFFSET 16 129 #define SPI_DLYBS_SIZE 8 130 #define SPI_DLYBCT_OFFSET 24 131 #define SPI_DLYBCT_SIZE 8 132 133 /* Bitfields in RCR */ 134 #define SPI_RXCTR_OFFSET 0 135 #define SPI_RXCTR_SIZE 16 136 137 /* Bitfields in TCR */ 138 #define SPI_TXCTR_OFFSET 0 139 #define SPI_TXCTR_SIZE 16 140 141 /* Bitfields in RNCR */ 142 #define SPI_RXNCR_OFFSET 0 143 #define SPI_RXNCR_SIZE 16 144 145 /* Bitfields in TNCR */ 146 #define SPI_TXNCR_OFFSET 0 147 #define SPI_TXNCR_SIZE 16 148 149 /* Bitfields in PTCR */ 150 #define SPI_RXTEN_OFFSET 0 151 #define SPI_RXTEN_SIZE 1 152 #define SPI_RXTDIS_OFFSET 1 153 #define SPI_RXTDIS_SIZE 1 154 #define SPI_TXTEN_OFFSET 8 155 #define SPI_TXTEN_SIZE 1 156 #define SPI_TXTDIS_OFFSET 9 157 #define SPI_TXTDIS_SIZE 1 158 159 /* Constants for BITS */ 160 #define SPI_BITS_8_BPT 0 161 #define SPI_BITS_9_BPT 1 162 #define SPI_BITS_10_BPT 2 163 #define SPI_BITS_11_BPT 3 164 #define SPI_BITS_12_BPT 4 165 #define SPI_BITS_13_BPT 5 166 #define SPI_BITS_14_BPT 6 167 #define SPI_BITS_15_BPT 7 168 #define SPI_BITS_16_BPT 8 169 170 /* Bit manipulation macros */ 171 #define SPI_BIT(name) \ 172 (1 << SPI_##name##_OFFSET) 173 #define SPI_BF(name,value) \ 174 (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET) 175 #define SPI_BFEXT(name,value) \ 176 (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1)) 177 #define SPI_BFINS(name,value,old) \ 178 ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \ 179 | SPI_BF(name,value)) 180 181 /* Register access macros */ 182 #define spi_readl(port,reg) \ 183 __raw_readl((port)->regs + SPI_##reg) 184 #define spi_writel(port,reg,value) \ 185 __raw_writel((value), (port)->regs + SPI_##reg) 186 187 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and 188 * cache operations; better heuristics consider wordsize and bitrate. 189 */ 190 #define DMA_MIN_BYTES 16 191 192 struct atmel_spi_dma { 193 struct dma_chan *chan_rx; 194 struct dma_chan *chan_tx; 195 struct scatterlist sgrx; 196 struct scatterlist sgtx; 197 struct dma_async_tx_descriptor *data_desc_rx; 198 struct dma_async_tx_descriptor *data_desc_tx; 199 200 struct at_dma_slave dma_slave; 201 }; 202 203 struct atmel_spi_caps { 204 bool is_spi2; 205 bool has_wdrbt; 206 bool has_dma_support; 207 }; 208 209 /* 210 * The core SPI transfer engine just talks to a register bank to set up 211 * DMA transfers; transfer queue progress is driven by IRQs. The clock 212 * framework provides the base clock, subdivided for each spi_device. 213 */ 214 struct atmel_spi { 215 spinlock_t lock; 216 unsigned long flags; 217 218 phys_addr_t phybase; 219 void __iomem *regs; 220 int irq; 221 struct clk *clk; 222 struct platform_device *pdev; 223 struct spi_device *stay; 224 225 u8 stopping; 226 struct list_head queue; 227 struct tasklet_struct tasklet; 228 struct spi_transfer *current_transfer; 229 unsigned long current_remaining_bytes; 230 struct spi_transfer *next_transfer; 231 unsigned long next_remaining_bytes; 232 int done_status; 233 234 /* scratch buffer */ 235 void *buffer; 236 dma_addr_t buffer_dma; 237 238 struct atmel_spi_caps caps; 239 240 bool use_dma; 241 bool use_pdc; 242 /* dmaengine data */ 243 struct atmel_spi_dma dma; 244 }; 245 246 /* Controller-specific per-slave state */ 247 struct atmel_spi_device { 248 unsigned int npcs_pin; 249 u32 csr; 250 }; 251 252 #define BUFFER_SIZE PAGE_SIZE 253 #define INVALID_DMA_ADDRESS 0xffffffff 254 255 /* 256 * Version 2 of the SPI controller has 257 * - CR.LASTXFER 258 * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero) 259 * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs) 260 * - SPI_CSRx.CSAAT 261 * - SPI_CSRx.SBCR allows faster clocking 262 */ 263 static bool atmel_spi_is_v2(struct atmel_spi *as) 264 { 265 return as->caps.is_spi2; 266 } 267 268 /* 269 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby 270 * they assume that spi slave device state will not change on deselect, so 271 * that automagic deselection is OK. ("NPCSx rises if no data is to be 272 * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer 273 * controllers have CSAAT and friends. 274 * 275 * Since the CSAAT functionality is a bit weird on newer controllers as 276 * well, we use GPIO to control nCSx pins on all controllers, updating 277 * MR.PCS to avoid confusing the controller. Using GPIOs also lets us 278 * support active-high chipselects despite the controller's belief that 279 * only active-low devices/systems exists. 280 * 281 * However, at91rm9200 has a second erratum whereby nCS0 doesn't work 282 * right when driven with GPIO. ("Mode Fault does not allow more than one 283 * Master on Chip Select 0.") No workaround exists for that ... so for 284 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, 285 * and (c) will trigger that first erratum in some cases. 286 */ 287 288 static void cs_activate(struct atmel_spi *as, struct spi_device *spi) 289 { 290 struct atmel_spi_device *asd = spi->controller_state; 291 unsigned active = spi->mode & SPI_CS_HIGH; 292 u32 mr; 293 294 if (atmel_spi_is_v2(as)) { 295 spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr); 296 /* For the low SPI version, there is a issue that PDC transfer 297 * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS 298 */ 299 spi_writel(as, CSR0, asd->csr); 300 if (as->caps.has_wdrbt) { 301 spi_writel(as, MR, 302 SPI_BF(PCS, ~(0x01 << spi->chip_select)) 303 | SPI_BIT(WDRBT) 304 | SPI_BIT(MODFDIS) 305 | SPI_BIT(MSTR)); 306 } else { 307 spi_writel(as, MR, 308 SPI_BF(PCS, ~(0x01 << spi->chip_select)) 309 | SPI_BIT(MODFDIS) 310 | SPI_BIT(MSTR)); 311 } 312 313 mr = spi_readl(as, MR); 314 gpio_set_value(asd->npcs_pin, active); 315 } else { 316 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; 317 int i; 318 u32 csr; 319 320 /* Make sure clock polarity is correct */ 321 for (i = 0; i < spi->master->num_chipselect; i++) { 322 csr = spi_readl(as, CSR0 + 4 * i); 323 if ((csr ^ cpol) & SPI_BIT(CPOL)) 324 spi_writel(as, CSR0 + 4 * i, 325 csr ^ SPI_BIT(CPOL)); 326 } 327 328 mr = spi_readl(as, MR); 329 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); 330 if (spi->chip_select != 0) 331 gpio_set_value(asd->npcs_pin, active); 332 spi_writel(as, MR, mr); 333 } 334 335 dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", 336 asd->npcs_pin, active ? " (high)" : "", 337 mr); 338 } 339 340 static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) 341 { 342 struct atmel_spi_device *asd = spi->controller_state; 343 unsigned active = spi->mode & SPI_CS_HIGH; 344 u32 mr; 345 346 /* only deactivate *this* device; sometimes transfers to 347 * another device may be active when this routine is called. 348 */ 349 mr = spi_readl(as, MR); 350 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) { 351 mr = SPI_BFINS(PCS, 0xf, mr); 352 spi_writel(as, MR, mr); 353 } 354 355 dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", 356 asd->npcs_pin, active ? " (low)" : "", 357 mr); 358 359 if (atmel_spi_is_v2(as) || spi->chip_select != 0) 360 gpio_set_value(asd->npcs_pin, !active); 361 } 362 363 static void atmel_spi_lock(struct atmel_spi *as) 364 { 365 spin_lock_irqsave(&as->lock, as->flags); 366 } 367 368 static void atmel_spi_unlock(struct atmel_spi *as) 369 { 370 spin_unlock_irqrestore(&as->lock, as->flags); 371 } 372 373 static inline bool atmel_spi_use_dma(struct atmel_spi *as, 374 struct spi_transfer *xfer) 375 { 376 return as->use_dma && xfer->len >= DMA_MIN_BYTES; 377 } 378 379 static inline int atmel_spi_xfer_is_last(struct spi_message *msg, 380 struct spi_transfer *xfer) 381 { 382 return msg->transfers.prev == &xfer->transfer_list; 383 } 384 385 static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer) 386 { 387 return xfer->delay_usecs == 0 && !xfer->cs_change; 388 } 389 390 static int atmel_spi_dma_slave_config(struct atmel_spi *as, 391 struct dma_slave_config *slave_config, 392 u8 bits_per_word) 393 { 394 int err = 0; 395 396 if (bits_per_word > 8) { 397 slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 398 slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 399 } else { 400 slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 401 slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 402 } 403 404 slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR; 405 slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR; 406 slave_config->src_maxburst = 1; 407 slave_config->dst_maxburst = 1; 408 slave_config->device_fc = false; 409 410 slave_config->direction = DMA_MEM_TO_DEV; 411 if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) { 412 dev_err(&as->pdev->dev, 413 "failed to configure tx dma channel\n"); 414 err = -EINVAL; 415 } 416 417 slave_config->direction = DMA_DEV_TO_MEM; 418 if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) { 419 dev_err(&as->pdev->dev, 420 "failed to configure rx dma channel\n"); 421 err = -EINVAL; 422 } 423 424 return err; 425 } 426 427 static bool filter(struct dma_chan *chan, void *pdata) 428 { 429 struct atmel_spi_dma *sl_pdata = pdata; 430 struct at_dma_slave *sl; 431 432 if (!sl_pdata) 433 return false; 434 435 sl = &sl_pdata->dma_slave; 436 if (sl->dma_dev == chan->device->dev) { 437 chan->private = sl; 438 return true; 439 } else { 440 return false; 441 } 442 } 443 444 static int atmel_spi_configure_dma(struct atmel_spi *as) 445 { 446 struct dma_slave_config slave_config; 447 struct device *dev = &as->pdev->dev; 448 int err; 449 450 dma_cap_mask_t mask; 451 dma_cap_zero(mask); 452 dma_cap_set(DMA_SLAVE, mask); 453 454 as->dma.chan_tx = dma_request_slave_channel_compat(mask, filter, 455 &as->dma, 456 dev, "tx"); 457 if (!as->dma.chan_tx) { 458 dev_err(dev, 459 "DMA TX channel not available, SPI unable to use DMA\n"); 460 err = -EBUSY; 461 goto error; 462 } 463 464 as->dma.chan_rx = dma_request_slave_channel_compat(mask, filter, 465 &as->dma, 466 dev, "rx"); 467 468 if (!as->dma.chan_rx) { 469 dev_err(dev, 470 "DMA RX channel not available, SPI unable to use DMA\n"); 471 err = -EBUSY; 472 goto error; 473 } 474 475 err = atmel_spi_dma_slave_config(as, &slave_config, 8); 476 if (err) 477 goto error; 478 479 dev_info(&as->pdev->dev, 480 "Using %s (tx) and %s (rx) for DMA transfers\n", 481 dma_chan_name(as->dma.chan_tx), 482 dma_chan_name(as->dma.chan_rx)); 483 return 0; 484 error: 485 if (as->dma.chan_rx) 486 dma_release_channel(as->dma.chan_rx); 487 if (as->dma.chan_tx) 488 dma_release_channel(as->dma.chan_tx); 489 return err; 490 } 491 492 static void atmel_spi_stop_dma(struct atmel_spi *as) 493 { 494 if (as->dma.chan_rx) 495 as->dma.chan_rx->device->device_control(as->dma.chan_rx, 496 DMA_TERMINATE_ALL, 0); 497 if (as->dma.chan_tx) 498 as->dma.chan_tx->device->device_control(as->dma.chan_tx, 499 DMA_TERMINATE_ALL, 0); 500 } 501 502 static void atmel_spi_release_dma(struct atmel_spi *as) 503 { 504 if (as->dma.chan_rx) 505 dma_release_channel(as->dma.chan_rx); 506 if (as->dma.chan_tx) 507 dma_release_channel(as->dma.chan_tx); 508 } 509 510 /* This function is called by the DMA driver from tasklet context */ 511 static void dma_callback(void *data) 512 { 513 struct spi_master *master = data; 514 struct atmel_spi *as = spi_master_get_devdata(master); 515 516 /* trigger SPI tasklet */ 517 tasklet_schedule(&as->tasklet); 518 } 519 520 /* 521 * Next transfer using PIO. 522 * lock is held, spi tasklet is blocked 523 */ 524 static void atmel_spi_next_xfer_pio(struct spi_master *master, 525 struct spi_transfer *xfer) 526 { 527 struct atmel_spi *as = spi_master_get_devdata(master); 528 529 dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n"); 530 531 as->current_remaining_bytes = xfer->len; 532 533 /* Make sure data is not remaining in RDR */ 534 spi_readl(as, RDR); 535 while (spi_readl(as, SR) & SPI_BIT(RDRF)) { 536 spi_readl(as, RDR); 537 cpu_relax(); 538 } 539 540 if (xfer->tx_buf) 541 if (xfer->bits_per_word > 8) 542 spi_writel(as, TDR, *(u16 *)(xfer->tx_buf)); 543 else 544 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); 545 else 546 spi_writel(as, TDR, 0); 547 548 dev_dbg(master->dev.parent, 549 " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", 550 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf, 551 xfer->bits_per_word); 552 553 /* Enable relevant interrupts */ 554 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); 555 } 556 557 /* 558 * Submit next transfer for DMA. 559 * lock is held, spi tasklet is blocked 560 */ 561 static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, 562 struct spi_transfer *xfer, 563 u32 *plen) 564 { 565 struct atmel_spi *as = spi_master_get_devdata(master); 566 struct dma_chan *rxchan = as->dma.chan_rx; 567 struct dma_chan *txchan = as->dma.chan_tx; 568 struct dma_async_tx_descriptor *rxdesc; 569 struct dma_async_tx_descriptor *txdesc; 570 struct dma_slave_config slave_config; 571 dma_cookie_t cookie; 572 u32 len = *plen; 573 574 dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n"); 575 576 /* Check that the channels are available */ 577 if (!rxchan || !txchan) 578 return -ENODEV; 579 580 /* release lock for DMA operations */ 581 atmel_spi_unlock(as); 582 583 /* prepare the RX dma transfer */ 584 sg_init_table(&as->dma.sgrx, 1); 585 if (xfer->rx_buf) { 586 as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen; 587 } else { 588 as->dma.sgrx.dma_address = as->buffer_dma; 589 if (len > BUFFER_SIZE) 590 len = BUFFER_SIZE; 591 } 592 593 /* prepare the TX dma transfer */ 594 sg_init_table(&as->dma.sgtx, 1); 595 if (xfer->tx_buf) { 596 as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen; 597 } else { 598 as->dma.sgtx.dma_address = as->buffer_dma; 599 if (len > BUFFER_SIZE) 600 len = BUFFER_SIZE; 601 memset(as->buffer, 0, len); 602 } 603 604 sg_dma_len(&as->dma.sgtx) = len; 605 sg_dma_len(&as->dma.sgrx) = len; 606 607 *plen = len; 608 609 if (atmel_spi_dma_slave_config(as, &slave_config, 8)) 610 goto err_exit; 611 612 /* Send both scatterlists */ 613 rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 614 &as->dma.sgrx, 615 1, 616 DMA_FROM_DEVICE, 617 DMA_PREP_INTERRUPT | DMA_CTRL_ACK, 618 NULL); 619 if (!rxdesc) 620 goto err_dma; 621 622 txdesc = txchan->device->device_prep_slave_sg(txchan, 623 &as->dma.sgtx, 624 1, 625 DMA_TO_DEVICE, 626 DMA_PREP_INTERRUPT | DMA_CTRL_ACK, 627 NULL); 628 if (!txdesc) 629 goto err_dma; 630 631 dev_dbg(master->dev.parent, 632 " start dma xfer %p: len %u tx %p/%08x rx %p/%08x\n", 633 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, 634 xfer->rx_buf, xfer->rx_dma); 635 636 /* Enable relevant interrupts */ 637 spi_writel(as, IER, SPI_BIT(OVRES)); 638 639 /* Put the callback on the RX transfer only, that should finish last */ 640 rxdesc->callback = dma_callback; 641 rxdesc->callback_param = master; 642 643 /* Submit and fire RX and TX with TX last so we're ready to read! */ 644 cookie = rxdesc->tx_submit(rxdesc); 645 if (dma_submit_error(cookie)) 646 goto err_dma; 647 cookie = txdesc->tx_submit(txdesc); 648 if (dma_submit_error(cookie)) 649 goto err_dma; 650 rxchan->device->device_issue_pending(rxchan); 651 txchan->device->device_issue_pending(txchan); 652 653 /* take back lock */ 654 atmel_spi_lock(as); 655 return 0; 656 657 err_dma: 658 spi_writel(as, IDR, SPI_BIT(OVRES)); 659 atmel_spi_stop_dma(as); 660 err_exit: 661 atmel_spi_lock(as); 662 return -ENOMEM; 663 } 664 665 static void atmel_spi_next_xfer_data(struct spi_master *master, 666 struct spi_transfer *xfer, 667 dma_addr_t *tx_dma, 668 dma_addr_t *rx_dma, 669 u32 *plen) 670 { 671 struct atmel_spi *as = spi_master_get_devdata(master); 672 u32 len = *plen; 673 674 /* use scratch buffer only when rx or tx data is unspecified */ 675 if (xfer->rx_buf) 676 *rx_dma = xfer->rx_dma + xfer->len - *plen; 677 else { 678 *rx_dma = as->buffer_dma; 679 if (len > BUFFER_SIZE) 680 len = BUFFER_SIZE; 681 } 682 683 if (xfer->tx_buf) 684 *tx_dma = xfer->tx_dma + xfer->len - *plen; 685 else { 686 *tx_dma = as->buffer_dma; 687 if (len > BUFFER_SIZE) 688 len = BUFFER_SIZE; 689 memset(as->buffer, 0, len); 690 dma_sync_single_for_device(&as->pdev->dev, 691 as->buffer_dma, len, DMA_TO_DEVICE); 692 } 693 694 *plen = len; 695 } 696 697 /* 698 * Submit next transfer for PDC. 699 * lock is held, spi irq is blocked 700 */ 701 static void atmel_spi_pdc_next_xfer(struct spi_master *master, 702 struct spi_message *msg) 703 { 704 struct atmel_spi *as = spi_master_get_devdata(master); 705 struct spi_transfer *xfer; 706 u32 len, remaining; 707 u32 ieval; 708 dma_addr_t tx_dma, rx_dma; 709 710 if (!as->current_transfer) 711 xfer = list_entry(msg->transfers.next, 712 struct spi_transfer, transfer_list); 713 else if (!as->next_transfer) 714 xfer = list_entry(as->current_transfer->transfer_list.next, 715 struct spi_transfer, transfer_list); 716 else 717 xfer = NULL; 718 719 if (xfer) { 720 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 721 722 len = xfer->len; 723 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 724 remaining = xfer->len - len; 725 726 spi_writel(as, RPR, rx_dma); 727 spi_writel(as, TPR, tx_dma); 728 729 if (msg->spi->bits_per_word > 8) 730 len >>= 1; 731 spi_writel(as, RCR, len); 732 spi_writel(as, TCR, len); 733 734 dev_dbg(&msg->spi->dev, 735 " start xfer %p: len %u tx %p/%08x rx %p/%08x\n", 736 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, 737 xfer->rx_buf, xfer->rx_dma); 738 } else { 739 xfer = as->next_transfer; 740 remaining = as->next_remaining_bytes; 741 } 742 743 as->current_transfer = xfer; 744 as->current_remaining_bytes = remaining; 745 746 if (remaining > 0) 747 len = remaining; 748 else if (!atmel_spi_xfer_is_last(msg, xfer) 749 && atmel_spi_xfer_can_be_chained(xfer)) { 750 xfer = list_entry(xfer->transfer_list.next, 751 struct spi_transfer, transfer_list); 752 len = xfer->len; 753 } else 754 xfer = NULL; 755 756 as->next_transfer = xfer; 757 758 if (xfer) { 759 u32 total; 760 761 total = len; 762 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 763 as->next_remaining_bytes = total - len; 764 765 spi_writel(as, RNPR, rx_dma); 766 spi_writel(as, TNPR, tx_dma); 767 768 if (msg->spi->bits_per_word > 8) 769 len >>= 1; 770 spi_writel(as, RNCR, len); 771 spi_writel(as, TNCR, len); 772 773 dev_dbg(&msg->spi->dev, 774 " next xfer %p: len %u tx %p/%08x rx %p/%08x\n", 775 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, 776 xfer->rx_buf, xfer->rx_dma); 777 ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES); 778 } else { 779 spi_writel(as, RNCR, 0); 780 spi_writel(as, TNCR, 0); 781 ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES); 782 } 783 784 /* REVISIT: We're waiting for ENDRX before we start the next 785 * transfer because we need to handle some difficult timing 786 * issues otherwise. If we wait for ENDTX in one transfer and 787 * then starts waiting for ENDRX in the next, it's difficult 788 * to tell the difference between the ENDRX interrupt we're 789 * actually waiting for and the ENDRX interrupt of the 790 * previous transfer. 791 * 792 * It should be doable, though. Just not now... 793 */ 794 spi_writel(as, IER, ieval); 795 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); 796 } 797 798 /* 799 * Choose way to submit next transfer and start it. 800 * lock is held, spi tasklet is blocked 801 */ 802 static void atmel_spi_dma_next_xfer(struct spi_master *master, 803 struct spi_message *msg) 804 { 805 struct atmel_spi *as = spi_master_get_devdata(master); 806 struct spi_transfer *xfer; 807 u32 remaining, len; 808 809 remaining = as->current_remaining_bytes; 810 if (remaining) { 811 xfer = as->current_transfer; 812 len = remaining; 813 } else { 814 if (!as->current_transfer) 815 xfer = list_entry(msg->transfers.next, 816 struct spi_transfer, transfer_list); 817 else 818 xfer = list_entry( 819 as->current_transfer->transfer_list.next, 820 struct spi_transfer, transfer_list); 821 822 as->current_transfer = xfer; 823 len = xfer->len; 824 } 825 826 if (atmel_spi_use_dma(as, xfer)) { 827 u32 total = len; 828 if (!atmel_spi_next_xfer_dma_submit(master, xfer, &len)) { 829 as->current_remaining_bytes = total - len; 830 return; 831 } else { 832 dev_err(&msg->spi->dev, "unable to use DMA, fallback to PIO\n"); 833 } 834 } 835 836 /* use PIO if error appened using DMA */ 837 atmel_spi_next_xfer_pio(master, xfer); 838 } 839 840 static void atmel_spi_next_message(struct spi_master *master) 841 { 842 struct atmel_spi *as = spi_master_get_devdata(master); 843 struct spi_message *msg; 844 struct spi_device *spi; 845 846 BUG_ON(as->current_transfer); 847 848 msg = list_entry(as->queue.next, struct spi_message, queue); 849 spi = msg->spi; 850 851 dev_dbg(master->dev.parent, "start message %p for %s\n", 852 msg, dev_name(&spi->dev)); 853 854 /* select chip if it's not still active */ 855 if (as->stay) { 856 if (as->stay != spi) { 857 cs_deactivate(as, as->stay); 858 cs_activate(as, spi); 859 } 860 as->stay = NULL; 861 } else 862 cs_activate(as, spi); 863 864 if (as->use_pdc) 865 atmel_spi_pdc_next_xfer(master, msg); 866 else 867 atmel_spi_dma_next_xfer(master, msg); 868 } 869 870 /* 871 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: 872 * - The buffer is either valid for CPU access, else NULL 873 * - If the buffer is valid, so is its DMA address 874 * 875 * This driver manages the dma address unless message->is_dma_mapped. 876 */ 877 static int 878 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) 879 { 880 struct device *dev = &as->pdev->dev; 881 882 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; 883 if (xfer->tx_buf) { 884 /* tx_buf is a const void* where we need a void * for the dma 885 * mapping */ 886 void *nonconst_tx = (void *)xfer->tx_buf; 887 888 xfer->tx_dma = dma_map_single(dev, 889 nonconst_tx, xfer->len, 890 DMA_TO_DEVICE); 891 if (dma_mapping_error(dev, xfer->tx_dma)) 892 return -ENOMEM; 893 } 894 if (xfer->rx_buf) { 895 xfer->rx_dma = dma_map_single(dev, 896 xfer->rx_buf, xfer->len, 897 DMA_FROM_DEVICE); 898 if (dma_mapping_error(dev, xfer->rx_dma)) { 899 if (xfer->tx_buf) 900 dma_unmap_single(dev, 901 xfer->tx_dma, xfer->len, 902 DMA_TO_DEVICE); 903 return -ENOMEM; 904 } 905 } 906 return 0; 907 } 908 909 static void atmel_spi_dma_unmap_xfer(struct spi_master *master, 910 struct spi_transfer *xfer) 911 { 912 if (xfer->tx_dma != INVALID_DMA_ADDRESS) 913 dma_unmap_single(master->dev.parent, xfer->tx_dma, 914 xfer->len, DMA_TO_DEVICE); 915 if (xfer->rx_dma != INVALID_DMA_ADDRESS) 916 dma_unmap_single(master->dev.parent, xfer->rx_dma, 917 xfer->len, DMA_FROM_DEVICE); 918 } 919 920 static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as) 921 { 922 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 923 } 924 925 static void 926 atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, 927 struct spi_message *msg, int stay) 928 { 929 if (!stay || as->done_status < 0) 930 cs_deactivate(as, msg->spi); 931 else 932 as->stay = msg->spi; 933 934 list_del(&msg->queue); 935 msg->status = as->done_status; 936 937 dev_dbg(master->dev.parent, 938 "xfer complete: %u bytes transferred\n", 939 msg->actual_length); 940 941 atmel_spi_unlock(as); 942 msg->complete(msg->context); 943 atmel_spi_lock(as); 944 945 as->current_transfer = NULL; 946 as->next_transfer = NULL; 947 as->done_status = 0; 948 949 /* continue if needed */ 950 if (list_empty(&as->queue) || as->stopping) { 951 if (as->use_pdc) 952 atmel_spi_disable_pdc_transfer(as); 953 } else { 954 atmel_spi_next_message(master); 955 } 956 } 957 958 /* Called from IRQ 959 * lock is held 960 * 961 * Must update "current_remaining_bytes" to keep track of data 962 * to transfer. 963 */ 964 static void 965 atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) 966 { 967 u8 *txp; 968 u8 *rxp; 969 u16 *txp16; 970 u16 *rxp16; 971 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 972 973 if (xfer->rx_buf) { 974 if (xfer->bits_per_word > 8) { 975 rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); 976 *rxp16 = spi_readl(as, RDR); 977 } else { 978 rxp = ((u8 *)xfer->rx_buf) + xfer_pos; 979 *rxp = spi_readl(as, RDR); 980 } 981 } else { 982 spi_readl(as, RDR); 983 } 984 if (xfer->bits_per_word > 8) { 985 as->current_remaining_bytes -= 2; 986 if (as->current_remaining_bytes < 0) 987 as->current_remaining_bytes = 0; 988 } else { 989 as->current_remaining_bytes--; 990 } 991 992 if (as->current_remaining_bytes) { 993 if (xfer->tx_buf) { 994 if (xfer->bits_per_word > 8) { 995 txp16 = (u16 *)(((u8 *)xfer->tx_buf) 996 + xfer_pos + 2); 997 spi_writel(as, TDR, *txp16); 998 } else { 999 txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; 1000 spi_writel(as, TDR, *txp); 1001 } 1002 } else { 1003 spi_writel(as, TDR, 0); 1004 } 1005 } 1006 } 1007 1008 /* Tasklet 1009 * Called from DMA callback + pio transfer and overrun IRQ. 1010 */ 1011 static void atmel_spi_tasklet_func(unsigned long data) 1012 { 1013 struct spi_master *master = (struct spi_master *)data; 1014 struct atmel_spi *as = spi_master_get_devdata(master); 1015 struct spi_message *msg; 1016 struct spi_transfer *xfer; 1017 1018 dev_vdbg(master->dev.parent, "atmel_spi_tasklet_func\n"); 1019 1020 atmel_spi_lock(as); 1021 1022 xfer = as->current_transfer; 1023 1024 if (xfer == NULL) 1025 /* already been there */ 1026 goto tasklet_out; 1027 1028 msg = list_entry(as->queue.next, struct spi_message, queue); 1029 1030 if (as->current_remaining_bytes == 0) { 1031 if (as->done_status < 0) { 1032 /* error happened (overrun) */ 1033 if (atmel_spi_use_dma(as, xfer)) 1034 atmel_spi_stop_dma(as); 1035 } else { 1036 /* only update length if no error */ 1037 msg->actual_length += xfer->len; 1038 } 1039 1040 if (atmel_spi_use_dma(as, xfer)) 1041 if (!msg->is_dma_mapped) 1042 atmel_spi_dma_unmap_xfer(master, xfer); 1043 1044 if (xfer->delay_usecs) 1045 udelay(xfer->delay_usecs); 1046 1047 if (atmel_spi_xfer_is_last(msg, xfer) || as->done_status < 0) { 1048 /* report completed (or erroneous) message */ 1049 atmel_spi_msg_done(master, as, msg, xfer->cs_change); 1050 } else { 1051 if (xfer->cs_change) { 1052 cs_deactivate(as, msg->spi); 1053 udelay(1); 1054 cs_activate(as, msg->spi); 1055 } 1056 1057 /* 1058 * Not done yet. Submit the next transfer. 1059 * 1060 * FIXME handle protocol options for xfer 1061 */ 1062 atmel_spi_dma_next_xfer(master, msg); 1063 } 1064 } else { 1065 /* 1066 * Keep going, we still have data to send in 1067 * the current transfer. 1068 */ 1069 atmel_spi_dma_next_xfer(master, msg); 1070 } 1071 1072 tasklet_out: 1073 atmel_spi_unlock(as); 1074 } 1075 1076 /* Interrupt 1077 * 1078 * No need for locking in this Interrupt handler: done_status is the 1079 * only information modified. What we need is the update of this field 1080 * before tasklet runs. This is ensured by using barrier. 1081 */ 1082 static irqreturn_t 1083 atmel_spi_pio_interrupt(int irq, void *dev_id) 1084 { 1085 struct spi_master *master = dev_id; 1086 struct atmel_spi *as = spi_master_get_devdata(master); 1087 u32 status, pending, imr; 1088 struct spi_transfer *xfer; 1089 int ret = IRQ_NONE; 1090 1091 imr = spi_readl(as, IMR); 1092 status = spi_readl(as, SR); 1093 pending = status & imr; 1094 1095 if (pending & SPI_BIT(OVRES)) { 1096 ret = IRQ_HANDLED; 1097 spi_writel(as, IDR, SPI_BIT(OVRES)); 1098 dev_warn(master->dev.parent, "overrun\n"); 1099 1100 /* 1101 * When we get an overrun, we disregard the current 1102 * transfer. Data will not be copied back from any 1103 * bounce buffer and msg->actual_len will not be 1104 * updated with the last xfer. 1105 * 1106 * We will also not process any remaning transfers in 1107 * the message. 1108 * 1109 * All actions are done in tasklet with done_status indication 1110 */ 1111 as->done_status = -EIO; 1112 smp_wmb(); 1113 1114 /* Clear any overrun happening while cleaning up */ 1115 spi_readl(as, SR); 1116 1117 tasklet_schedule(&as->tasklet); 1118 1119 } else if (pending & SPI_BIT(RDRF)) { 1120 atmel_spi_lock(as); 1121 1122 if (as->current_remaining_bytes) { 1123 ret = IRQ_HANDLED; 1124 xfer = as->current_transfer; 1125 atmel_spi_pump_pio_data(as, xfer); 1126 if (!as->current_remaining_bytes) { 1127 /* no more data to xfer, kick tasklet */ 1128 spi_writel(as, IDR, pending); 1129 tasklet_schedule(&as->tasklet); 1130 } 1131 } 1132 1133 atmel_spi_unlock(as); 1134 } else { 1135 WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending); 1136 ret = IRQ_HANDLED; 1137 spi_writel(as, IDR, pending); 1138 } 1139 1140 return ret; 1141 } 1142 1143 static irqreturn_t 1144 atmel_spi_pdc_interrupt(int irq, void *dev_id) 1145 { 1146 struct spi_master *master = dev_id; 1147 struct atmel_spi *as = spi_master_get_devdata(master); 1148 struct spi_message *msg; 1149 struct spi_transfer *xfer; 1150 u32 status, pending, imr; 1151 int ret = IRQ_NONE; 1152 1153 atmel_spi_lock(as); 1154 1155 xfer = as->current_transfer; 1156 msg = list_entry(as->queue.next, struct spi_message, queue); 1157 1158 imr = spi_readl(as, IMR); 1159 status = spi_readl(as, SR); 1160 pending = status & imr; 1161 1162 if (pending & SPI_BIT(OVRES)) { 1163 int timeout; 1164 1165 ret = IRQ_HANDLED; 1166 1167 spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) 1168 | SPI_BIT(OVRES))); 1169 1170 /* 1171 * When we get an overrun, we disregard the current 1172 * transfer. Data will not be copied back from any 1173 * bounce buffer and msg->actual_len will not be 1174 * updated with the last xfer. 1175 * 1176 * We will also not process any remaning transfers in 1177 * the message. 1178 * 1179 * First, stop the transfer and unmap the DMA buffers. 1180 */ 1181 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 1182 if (!msg->is_dma_mapped) 1183 atmel_spi_dma_unmap_xfer(master, xfer); 1184 1185 /* REVISIT: udelay in irq is unfriendly */ 1186 if (xfer->delay_usecs) 1187 udelay(xfer->delay_usecs); 1188 1189 dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n", 1190 spi_readl(as, TCR), spi_readl(as, RCR)); 1191 1192 /* 1193 * Clean up DMA registers and make sure the data 1194 * registers are empty. 1195 */ 1196 spi_writel(as, RNCR, 0); 1197 spi_writel(as, TNCR, 0); 1198 spi_writel(as, RCR, 0); 1199 spi_writel(as, TCR, 0); 1200 for (timeout = 1000; timeout; timeout--) 1201 if (spi_readl(as, SR) & SPI_BIT(TXEMPTY)) 1202 break; 1203 if (!timeout) 1204 dev_warn(master->dev.parent, 1205 "timeout waiting for TXEMPTY"); 1206 while (spi_readl(as, SR) & SPI_BIT(RDRF)) 1207 spi_readl(as, RDR); 1208 1209 /* Clear any overrun happening while cleaning up */ 1210 spi_readl(as, SR); 1211 1212 as->done_status = -EIO; 1213 atmel_spi_msg_done(master, as, msg, 0); 1214 } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { 1215 ret = IRQ_HANDLED; 1216 1217 spi_writel(as, IDR, pending); 1218 1219 if (as->current_remaining_bytes == 0) { 1220 msg->actual_length += xfer->len; 1221 1222 if (!msg->is_dma_mapped) 1223 atmel_spi_dma_unmap_xfer(master, xfer); 1224 1225 /* REVISIT: udelay in irq is unfriendly */ 1226 if (xfer->delay_usecs) 1227 udelay(xfer->delay_usecs); 1228 1229 if (atmel_spi_xfer_is_last(msg, xfer)) { 1230 /* report completed message */ 1231 atmel_spi_msg_done(master, as, msg, 1232 xfer->cs_change); 1233 } else { 1234 if (xfer->cs_change) { 1235 cs_deactivate(as, msg->spi); 1236 udelay(1); 1237 cs_activate(as, msg->spi); 1238 } 1239 1240 /* 1241 * Not done yet. Submit the next transfer. 1242 * 1243 * FIXME handle protocol options for xfer 1244 */ 1245 atmel_spi_pdc_next_xfer(master, msg); 1246 } 1247 } else { 1248 /* 1249 * Keep going, we still have data to send in 1250 * the current transfer. 1251 */ 1252 atmel_spi_pdc_next_xfer(master, msg); 1253 } 1254 } 1255 1256 atmel_spi_unlock(as); 1257 1258 return ret; 1259 } 1260 1261 static int atmel_spi_setup(struct spi_device *spi) 1262 { 1263 struct atmel_spi *as; 1264 struct atmel_spi_device *asd; 1265 u32 scbr, csr; 1266 unsigned int bits = spi->bits_per_word; 1267 unsigned long bus_hz; 1268 unsigned int npcs_pin; 1269 int ret; 1270 1271 as = spi_master_get_devdata(spi->master); 1272 1273 if (as->stopping) 1274 return -ESHUTDOWN; 1275 1276 if (spi->chip_select > spi->master->num_chipselect) { 1277 dev_dbg(&spi->dev, 1278 "setup: invalid chipselect %u (%u defined)\n", 1279 spi->chip_select, spi->master->num_chipselect); 1280 return -EINVAL; 1281 } 1282 1283 /* see notes above re chipselect */ 1284 if (!atmel_spi_is_v2(as) 1285 && spi->chip_select == 0 1286 && (spi->mode & SPI_CS_HIGH)) { 1287 dev_dbg(&spi->dev, "setup: can't be active-high\n"); 1288 return -EINVAL; 1289 } 1290 1291 /* v1 chips start out at half the peripheral bus speed. */ 1292 bus_hz = clk_get_rate(as->clk); 1293 if (!atmel_spi_is_v2(as)) 1294 bus_hz /= 2; 1295 1296 if (spi->max_speed_hz) { 1297 /* 1298 * Calculate the lowest divider that satisfies the 1299 * constraint, assuming div32/fdiv/mbz == 0. 1300 */ 1301 scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz); 1302 1303 /* 1304 * If the resulting divider doesn't fit into the 1305 * register bitfield, we can't satisfy the constraint. 1306 */ 1307 if (scbr >= (1 << SPI_SCBR_SIZE)) { 1308 dev_dbg(&spi->dev, 1309 "setup: %d Hz too slow, scbr %u; min %ld Hz\n", 1310 spi->max_speed_hz, scbr, bus_hz/255); 1311 return -EINVAL; 1312 } 1313 } else 1314 /* speed zero means "as slow as possible" */ 1315 scbr = 0xff; 1316 1317 csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8); 1318 if (spi->mode & SPI_CPOL) 1319 csr |= SPI_BIT(CPOL); 1320 if (!(spi->mode & SPI_CPHA)) 1321 csr |= SPI_BIT(NCPHA); 1322 1323 /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. 1324 * 1325 * DLYBCT would add delays between words, slowing down transfers. 1326 * It could potentially be useful to cope with DMA bottlenecks, but 1327 * in those cases it's probably best to just use a lower bitrate. 1328 */ 1329 csr |= SPI_BF(DLYBS, 0); 1330 csr |= SPI_BF(DLYBCT, 0); 1331 1332 /* chipselect must have been muxed as GPIO (e.g. in board setup) */ 1333 npcs_pin = (unsigned int)spi->controller_data; 1334 1335 if (gpio_is_valid(spi->cs_gpio)) 1336 npcs_pin = spi->cs_gpio; 1337 1338 asd = spi->controller_state; 1339 if (!asd) { 1340 asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL); 1341 if (!asd) 1342 return -ENOMEM; 1343 1344 ret = gpio_request(npcs_pin, dev_name(&spi->dev)); 1345 if (ret) { 1346 kfree(asd); 1347 return ret; 1348 } 1349 1350 asd->npcs_pin = npcs_pin; 1351 spi->controller_state = asd; 1352 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); 1353 } else { 1354 atmel_spi_lock(as); 1355 if (as->stay == spi) 1356 as->stay = NULL; 1357 cs_deactivate(as, spi); 1358 atmel_spi_unlock(as); 1359 } 1360 1361 asd->csr = csr; 1362 1363 dev_dbg(&spi->dev, 1364 "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", 1365 bus_hz / scbr, bits, spi->mode, spi->chip_select, csr); 1366 1367 if (!atmel_spi_is_v2(as)) 1368 spi_writel(as, CSR0 + 4 * spi->chip_select, csr); 1369 1370 return 0; 1371 } 1372 1373 static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) 1374 { 1375 struct atmel_spi *as; 1376 struct spi_transfer *xfer; 1377 struct device *controller = spi->master->dev.parent; 1378 u8 bits; 1379 struct atmel_spi_device *asd; 1380 1381 as = spi_master_get_devdata(spi->master); 1382 1383 dev_dbg(controller, "new message %p submitted for %s\n", 1384 msg, dev_name(&spi->dev)); 1385 1386 if (unlikely(list_empty(&msg->transfers))) 1387 return -EINVAL; 1388 1389 if (as->stopping) 1390 return -ESHUTDOWN; 1391 1392 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1393 if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1394 dev_dbg(&spi->dev, "missing rx or tx buf\n"); 1395 return -EINVAL; 1396 } 1397 1398 if (xfer->bits_per_word) { 1399 asd = spi->controller_state; 1400 bits = (asd->csr >> 4) & 0xf; 1401 if (bits != xfer->bits_per_word - 8) { 1402 dev_dbg(&spi->dev, "you can't yet change " 1403 "bits_per_word in transfers\n"); 1404 return -ENOPROTOOPT; 1405 } 1406 } 1407 1408 if (xfer->bits_per_word > 8) { 1409 if (xfer->len % 2) { 1410 dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n"); 1411 return -EINVAL; 1412 } 1413 } 1414 1415 /* FIXME implement these protocol options!! */ 1416 if (xfer->speed_hz < spi->max_speed_hz) { 1417 dev_dbg(&spi->dev, "can't change speed in transfer\n"); 1418 return -ENOPROTOOPT; 1419 } 1420 1421 /* 1422 * DMA map early, for performance (empties dcache ASAP) and 1423 * better fault reporting. 1424 */ 1425 if ((!msg->is_dma_mapped) && (atmel_spi_use_dma(as, xfer) 1426 || as->use_pdc)) { 1427 if (atmel_spi_dma_map_xfer(as, xfer) < 0) 1428 return -ENOMEM; 1429 } 1430 } 1431 1432 #ifdef VERBOSE 1433 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1434 dev_dbg(controller, 1435 " xfer %p: len %u tx %p/%08x rx %p/%08x\n", 1436 xfer, xfer->len, 1437 xfer->tx_buf, xfer->tx_dma, 1438 xfer->rx_buf, xfer->rx_dma); 1439 } 1440 #endif 1441 1442 msg->status = -EINPROGRESS; 1443 msg->actual_length = 0; 1444 1445 atmel_spi_lock(as); 1446 list_add_tail(&msg->queue, &as->queue); 1447 if (!as->current_transfer) 1448 atmel_spi_next_message(spi->master); 1449 atmel_spi_unlock(as); 1450 1451 return 0; 1452 } 1453 1454 static void atmel_spi_cleanup(struct spi_device *spi) 1455 { 1456 struct atmel_spi *as = spi_master_get_devdata(spi->master); 1457 struct atmel_spi_device *asd = spi->controller_state; 1458 unsigned gpio = (unsigned) spi->controller_data; 1459 1460 if (!asd) 1461 return; 1462 1463 atmel_spi_lock(as); 1464 if (as->stay == spi) { 1465 as->stay = NULL; 1466 cs_deactivate(as, spi); 1467 } 1468 atmel_spi_unlock(as); 1469 1470 spi->controller_state = NULL; 1471 gpio_free(gpio); 1472 kfree(asd); 1473 } 1474 1475 static inline unsigned int atmel_get_version(struct atmel_spi *as) 1476 { 1477 return spi_readl(as, VERSION) & 0x00000fff; 1478 } 1479 1480 static void atmel_get_caps(struct atmel_spi *as) 1481 { 1482 unsigned int version; 1483 1484 version = atmel_get_version(as); 1485 dev_info(&as->pdev->dev, "version: 0x%x\n", version); 1486 1487 as->caps.is_spi2 = version > 0x121; 1488 as->caps.has_wdrbt = version >= 0x210; 1489 as->caps.has_dma_support = version >= 0x212; 1490 } 1491 1492 /*-------------------------------------------------------------------------*/ 1493 1494 static int atmel_spi_probe(struct platform_device *pdev) 1495 { 1496 struct resource *regs; 1497 int irq; 1498 struct clk *clk; 1499 int ret; 1500 struct spi_master *master; 1501 struct atmel_spi *as; 1502 1503 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1504 if (!regs) 1505 return -ENXIO; 1506 1507 irq = platform_get_irq(pdev, 0); 1508 if (irq < 0) 1509 return irq; 1510 1511 clk = clk_get(&pdev->dev, "spi_clk"); 1512 if (IS_ERR(clk)) 1513 return PTR_ERR(clk); 1514 1515 /* setup spi core then atmel-specific driver state */ 1516 ret = -ENOMEM; 1517 master = spi_alloc_master(&pdev->dev, sizeof *as); 1518 if (!master) 1519 goto out_free; 1520 1521 /* the spi->mode bits understood by this driver: */ 1522 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1523 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); 1524 master->dev.of_node = pdev->dev.of_node; 1525 master->bus_num = pdev->id; 1526 master->num_chipselect = master->dev.of_node ? 0 : 4; 1527 master->setup = atmel_spi_setup; 1528 master->transfer = atmel_spi_transfer; 1529 master->cleanup = atmel_spi_cleanup; 1530 platform_set_drvdata(pdev, master); 1531 1532 as = spi_master_get_devdata(master); 1533 1534 /* 1535 * Scratch buffer is used for throwaway rx and tx data. 1536 * It's coherent to minimize dcache pollution. 1537 */ 1538 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, 1539 &as->buffer_dma, GFP_KERNEL); 1540 if (!as->buffer) 1541 goto out_free; 1542 1543 spin_lock_init(&as->lock); 1544 INIT_LIST_HEAD(&as->queue); 1545 1546 as->pdev = pdev; 1547 as->regs = ioremap(regs->start, resource_size(regs)); 1548 if (!as->regs) 1549 goto out_free_buffer; 1550 as->phybase = regs->start; 1551 as->irq = irq; 1552 as->clk = clk; 1553 1554 atmel_get_caps(as); 1555 1556 as->use_dma = false; 1557 as->use_pdc = false; 1558 if (as->caps.has_dma_support) { 1559 if (atmel_spi_configure_dma(as) == 0) 1560 as->use_dma = true; 1561 } else { 1562 as->use_pdc = true; 1563 } 1564 1565 if (as->caps.has_dma_support && !as->use_dma) 1566 dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n"); 1567 1568 if (as->use_pdc) { 1569 ret = request_irq(irq, atmel_spi_pdc_interrupt, 0, 1570 dev_name(&pdev->dev), master); 1571 } else { 1572 tasklet_init(&as->tasklet, atmel_spi_tasklet_func, 1573 (unsigned long)master); 1574 1575 ret = request_irq(irq, atmel_spi_pio_interrupt, 0, 1576 dev_name(&pdev->dev), master); 1577 } 1578 if (ret) 1579 goto out_unmap_regs; 1580 1581 /* Initialize the hardware */ 1582 clk_enable(clk); 1583 spi_writel(as, CR, SPI_BIT(SWRST)); 1584 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1585 if (as->caps.has_wdrbt) { 1586 spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS) 1587 | SPI_BIT(MSTR)); 1588 } else { 1589 spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); 1590 } 1591 1592 if (as->use_pdc) 1593 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 1594 spi_writel(as, CR, SPI_BIT(SPIEN)); 1595 1596 /* go! */ 1597 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 1598 (unsigned long)regs->start, irq); 1599 1600 ret = spi_register_master(master); 1601 if (ret) 1602 goto out_free_dma; 1603 1604 return 0; 1605 1606 out_free_dma: 1607 if (as->use_dma) 1608 atmel_spi_release_dma(as); 1609 1610 spi_writel(as, CR, SPI_BIT(SWRST)); 1611 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1612 clk_disable(clk); 1613 free_irq(irq, master); 1614 out_unmap_regs: 1615 iounmap(as->regs); 1616 out_free_buffer: 1617 if (!as->use_pdc) 1618 tasklet_kill(&as->tasklet); 1619 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1620 as->buffer_dma); 1621 out_free: 1622 clk_put(clk); 1623 spi_master_put(master); 1624 return ret; 1625 } 1626 1627 static int atmel_spi_remove(struct platform_device *pdev) 1628 { 1629 struct spi_master *master = platform_get_drvdata(pdev); 1630 struct atmel_spi *as = spi_master_get_devdata(master); 1631 struct spi_message *msg; 1632 struct spi_transfer *xfer; 1633 1634 /* reset the hardware and block queue progress */ 1635 spin_lock_irq(&as->lock); 1636 as->stopping = 1; 1637 if (as->use_dma) { 1638 atmel_spi_stop_dma(as); 1639 atmel_spi_release_dma(as); 1640 } 1641 1642 spi_writel(as, CR, SPI_BIT(SWRST)); 1643 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1644 spi_readl(as, SR); 1645 spin_unlock_irq(&as->lock); 1646 1647 /* Terminate remaining queued transfers */ 1648 list_for_each_entry(msg, &as->queue, queue) { 1649 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1650 if (!msg->is_dma_mapped 1651 && (atmel_spi_use_dma(as, xfer) 1652 || as->use_pdc)) 1653 atmel_spi_dma_unmap_xfer(master, xfer); 1654 } 1655 msg->status = -ESHUTDOWN; 1656 msg->complete(msg->context); 1657 } 1658 1659 if (!as->use_pdc) 1660 tasklet_kill(&as->tasklet); 1661 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1662 as->buffer_dma); 1663 1664 clk_disable(as->clk); 1665 clk_put(as->clk); 1666 free_irq(as->irq, master); 1667 iounmap(as->regs); 1668 1669 spi_unregister_master(master); 1670 1671 return 0; 1672 } 1673 1674 #ifdef CONFIG_PM 1675 1676 static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg) 1677 { 1678 struct spi_master *master = platform_get_drvdata(pdev); 1679 struct atmel_spi *as = spi_master_get_devdata(master); 1680 1681 clk_disable(as->clk); 1682 return 0; 1683 } 1684 1685 static int atmel_spi_resume(struct platform_device *pdev) 1686 { 1687 struct spi_master *master = platform_get_drvdata(pdev); 1688 struct atmel_spi *as = spi_master_get_devdata(master); 1689 1690 clk_enable(as->clk); 1691 return 0; 1692 } 1693 1694 #else 1695 #define atmel_spi_suspend NULL 1696 #define atmel_spi_resume NULL 1697 #endif 1698 1699 #if defined(CONFIG_OF) 1700 static const struct of_device_id atmel_spi_dt_ids[] = { 1701 { .compatible = "atmel,at91rm9200-spi" }, 1702 { /* sentinel */ } 1703 }; 1704 1705 MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids); 1706 #endif 1707 1708 static struct platform_driver atmel_spi_driver = { 1709 .driver = { 1710 .name = "atmel_spi", 1711 .owner = THIS_MODULE, 1712 .of_match_table = of_match_ptr(atmel_spi_dt_ids), 1713 }, 1714 .suspend = atmel_spi_suspend, 1715 .resume = atmel_spi_resume, 1716 .probe = atmel_spi_probe, 1717 .remove = atmel_spi_remove, 1718 }; 1719 module_platform_driver(atmel_spi_driver); 1720 1721 MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver"); 1722 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1723 MODULE_LICENSE("GPL"); 1724 MODULE_ALIAS("platform:atmel_spi"); 1725