1 /* 2 * Driver for Atmel AT32 and AT91 SPI Controllers 3 * 4 * Copyright (C) 2006 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/clk.h> 13 #include <linux/module.h> 14 #include <linux/platform_device.h> 15 #include <linux/delay.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/dmaengine.h> 18 #include <linux/err.h> 19 #include <linux/interrupt.h> 20 #include <linux/spi/spi.h> 21 #include <linux/slab.h> 22 #include <linux/platform_data/atmel.h> 23 #include <linux/platform_data/dma-atmel.h> 24 #include <linux/of.h> 25 26 #include <linux/io.h> 27 #include <linux/gpio.h> 28 #include <linux/pinctrl/consumer.h> 29 #include <linux/pm_runtime.h> 30 31 /* SPI register offsets */ 32 #define SPI_CR 0x0000 33 #define SPI_MR 0x0004 34 #define SPI_RDR 0x0008 35 #define SPI_TDR 0x000c 36 #define SPI_SR 0x0010 37 #define SPI_IER 0x0014 38 #define SPI_IDR 0x0018 39 #define SPI_IMR 0x001c 40 #define SPI_CSR0 0x0030 41 #define SPI_CSR1 0x0034 42 #define SPI_CSR2 0x0038 43 #define SPI_CSR3 0x003c 44 #define SPI_VERSION 0x00fc 45 #define SPI_RPR 0x0100 46 #define SPI_RCR 0x0104 47 #define SPI_TPR 0x0108 48 #define SPI_TCR 0x010c 49 #define SPI_RNPR 0x0110 50 #define SPI_RNCR 0x0114 51 #define SPI_TNPR 0x0118 52 #define SPI_TNCR 0x011c 53 #define SPI_PTCR 0x0120 54 #define SPI_PTSR 0x0124 55 56 /* Bitfields in CR */ 57 #define SPI_SPIEN_OFFSET 0 58 #define SPI_SPIEN_SIZE 1 59 #define SPI_SPIDIS_OFFSET 1 60 #define SPI_SPIDIS_SIZE 1 61 #define SPI_SWRST_OFFSET 7 62 #define SPI_SWRST_SIZE 1 63 #define SPI_LASTXFER_OFFSET 24 64 #define SPI_LASTXFER_SIZE 1 65 66 /* Bitfields in MR */ 67 #define SPI_MSTR_OFFSET 0 68 #define SPI_MSTR_SIZE 1 69 #define SPI_PS_OFFSET 1 70 #define SPI_PS_SIZE 1 71 #define SPI_PCSDEC_OFFSET 2 72 #define SPI_PCSDEC_SIZE 1 73 #define SPI_FDIV_OFFSET 3 74 #define SPI_FDIV_SIZE 1 75 #define SPI_MODFDIS_OFFSET 4 76 #define SPI_MODFDIS_SIZE 1 77 #define SPI_WDRBT_OFFSET 5 78 #define SPI_WDRBT_SIZE 1 79 #define SPI_LLB_OFFSET 7 80 #define SPI_LLB_SIZE 1 81 #define SPI_PCS_OFFSET 16 82 #define SPI_PCS_SIZE 4 83 #define SPI_DLYBCS_OFFSET 24 84 #define SPI_DLYBCS_SIZE 8 85 86 /* Bitfields in RDR */ 87 #define SPI_RD_OFFSET 0 88 #define SPI_RD_SIZE 16 89 90 /* Bitfields in TDR */ 91 #define SPI_TD_OFFSET 0 92 #define SPI_TD_SIZE 16 93 94 /* Bitfields in SR */ 95 #define SPI_RDRF_OFFSET 0 96 #define SPI_RDRF_SIZE 1 97 #define SPI_TDRE_OFFSET 1 98 #define SPI_TDRE_SIZE 1 99 #define SPI_MODF_OFFSET 2 100 #define SPI_MODF_SIZE 1 101 #define SPI_OVRES_OFFSET 3 102 #define SPI_OVRES_SIZE 1 103 #define SPI_ENDRX_OFFSET 4 104 #define SPI_ENDRX_SIZE 1 105 #define SPI_ENDTX_OFFSET 5 106 #define SPI_ENDTX_SIZE 1 107 #define SPI_RXBUFF_OFFSET 6 108 #define SPI_RXBUFF_SIZE 1 109 #define SPI_TXBUFE_OFFSET 7 110 #define SPI_TXBUFE_SIZE 1 111 #define SPI_NSSR_OFFSET 8 112 #define SPI_NSSR_SIZE 1 113 #define SPI_TXEMPTY_OFFSET 9 114 #define SPI_TXEMPTY_SIZE 1 115 #define SPI_SPIENS_OFFSET 16 116 #define SPI_SPIENS_SIZE 1 117 118 /* Bitfields in CSR0 */ 119 #define SPI_CPOL_OFFSET 0 120 #define SPI_CPOL_SIZE 1 121 #define SPI_NCPHA_OFFSET 1 122 #define SPI_NCPHA_SIZE 1 123 #define SPI_CSAAT_OFFSET 3 124 #define SPI_CSAAT_SIZE 1 125 #define SPI_BITS_OFFSET 4 126 #define SPI_BITS_SIZE 4 127 #define SPI_SCBR_OFFSET 8 128 #define SPI_SCBR_SIZE 8 129 #define SPI_DLYBS_OFFSET 16 130 #define SPI_DLYBS_SIZE 8 131 #define SPI_DLYBCT_OFFSET 24 132 #define SPI_DLYBCT_SIZE 8 133 134 /* Bitfields in RCR */ 135 #define SPI_RXCTR_OFFSET 0 136 #define SPI_RXCTR_SIZE 16 137 138 /* Bitfields in TCR */ 139 #define SPI_TXCTR_OFFSET 0 140 #define SPI_TXCTR_SIZE 16 141 142 /* Bitfields in RNCR */ 143 #define SPI_RXNCR_OFFSET 0 144 #define SPI_RXNCR_SIZE 16 145 146 /* Bitfields in TNCR */ 147 #define SPI_TXNCR_OFFSET 0 148 #define SPI_TXNCR_SIZE 16 149 150 /* Bitfields in PTCR */ 151 #define SPI_RXTEN_OFFSET 0 152 #define SPI_RXTEN_SIZE 1 153 #define SPI_RXTDIS_OFFSET 1 154 #define SPI_RXTDIS_SIZE 1 155 #define SPI_TXTEN_OFFSET 8 156 #define SPI_TXTEN_SIZE 1 157 #define SPI_TXTDIS_OFFSET 9 158 #define SPI_TXTDIS_SIZE 1 159 160 /* Constants for BITS */ 161 #define SPI_BITS_8_BPT 0 162 #define SPI_BITS_9_BPT 1 163 #define SPI_BITS_10_BPT 2 164 #define SPI_BITS_11_BPT 3 165 #define SPI_BITS_12_BPT 4 166 #define SPI_BITS_13_BPT 5 167 #define SPI_BITS_14_BPT 6 168 #define SPI_BITS_15_BPT 7 169 #define SPI_BITS_16_BPT 8 170 171 /* Bit manipulation macros */ 172 #define SPI_BIT(name) \ 173 (1 << SPI_##name##_OFFSET) 174 #define SPI_BF(name, value) \ 175 (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET) 176 #define SPI_BFEXT(name, value) \ 177 (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1)) 178 #define SPI_BFINS(name, value, old) \ 179 (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \ 180 | SPI_BF(name, value)) 181 182 /* Register access macros */ 183 #define spi_readl(port, reg) \ 184 __raw_readl((port)->regs + SPI_##reg) 185 #define spi_writel(port, reg, value) \ 186 __raw_writel((value), (port)->regs + SPI_##reg) 187 188 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and 189 * cache operations; better heuristics consider wordsize and bitrate. 190 */ 191 #define DMA_MIN_BYTES 16 192 193 #define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) 194 195 #define AUTOSUSPEND_TIMEOUT 2000 196 197 struct atmel_spi_dma { 198 struct dma_chan *chan_rx; 199 struct dma_chan *chan_tx; 200 struct scatterlist sgrx; 201 struct scatterlist sgtx; 202 struct dma_async_tx_descriptor *data_desc_rx; 203 struct dma_async_tx_descriptor *data_desc_tx; 204 205 struct at_dma_slave dma_slave; 206 }; 207 208 struct atmel_spi_caps { 209 bool is_spi2; 210 bool has_wdrbt; 211 bool has_dma_support; 212 }; 213 214 /* 215 * The core SPI transfer engine just talks to a register bank to set up 216 * DMA transfers; transfer queue progress is driven by IRQs. The clock 217 * framework provides the base clock, subdivided for each spi_device. 218 */ 219 struct atmel_spi { 220 spinlock_t lock; 221 unsigned long flags; 222 223 phys_addr_t phybase; 224 void __iomem *regs; 225 int irq; 226 struct clk *clk; 227 struct platform_device *pdev; 228 229 struct spi_transfer *current_transfer; 230 int current_remaining_bytes; 231 int done_status; 232 233 struct completion xfer_completion; 234 235 /* scratch buffer */ 236 void *buffer; 237 dma_addr_t buffer_dma; 238 239 struct atmel_spi_caps caps; 240 241 bool use_dma; 242 bool use_pdc; 243 /* dmaengine data */ 244 struct atmel_spi_dma dma; 245 246 bool keep_cs; 247 bool cs_active; 248 }; 249 250 /* Controller-specific per-slave state */ 251 struct atmel_spi_device { 252 unsigned int npcs_pin; 253 u32 csr; 254 }; 255 256 #define BUFFER_SIZE PAGE_SIZE 257 #define INVALID_DMA_ADDRESS 0xffffffff 258 259 /* 260 * Version 2 of the SPI controller has 261 * - CR.LASTXFER 262 * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero) 263 * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs) 264 * - SPI_CSRx.CSAAT 265 * - SPI_CSRx.SBCR allows faster clocking 266 */ 267 static bool atmel_spi_is_v2(struct atmel_spi *as) 268 { 269 return as->caps.is_spi2; 270 } 271 272 /* 273 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby 274 * they assume that spi slave device state will not change on deselect, so 275 * that automagic deselection is OK. ("NPCSx rises if no data is to be 276 * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer 277 * controllers have CSAAT and friends. 278 * 279 * Since the CSAAT functionality is a bit weird on newer controllers as 280 * well, we use GPIO to control nCSx pins on all controllers, updating 281 * MR.PCS to avoid confusing the controller. Using GPIOs also lets us 282 * support active-high chipselects despite the controller's belief that 283 * only active-low devices/systems exists. 284 * 285 * However, at91rm9200 has a second erratum whereby nCS0 doesn't work 286 * right when driven with GPIO. ("Mode Fault does not allow more than one 287 * Master on Chip Select 0.") No workaround exists for that ... so for 288 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, 289 * and (c) will trigger that first erratum in some cases. 290 */ 291 292 static void cs_activate(struct atmel_spi *as, struct spi_device *spi) 293 { 294 struct atmel_spi_device *asd = spi->controller_state; 295 unsigned active = spi->mode & SPI_CS_HIGH; 296 u32 mr; 297 298 if (atmel_spi_is_v2(as)) { 299 spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr); 300 /* For the low SPI version, there is a issue that PDC transfer 301 * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS 302 */ 303 spi_writel(as, CSR0, asd->csr); 304 if (as->caps.has_wdrbt) { 305 spi_writel(as, MR, 306 SPI_BF(PCS, ~(0x01 << spi->chip_select)) 307 | SPI_BIT(WDRBT) 308 | SPI_BIT(MODFDIS) 309 | SPI_BIT(MSTR)); 310 } else { 311 spi_writel(as, MR, 312 SPI_BF(PCS, ~(0x01 << spi->chip_select)) 313 | SPI_BIT(MODFDIS) 314 | SPI_BIT(MSTR)); 315 } 316 317 mr = spi_readl(as, MR); 318 gpio_set_value(asd->npcs_pin, active); 319 } else { 320 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; 321 int i; 322 u32 csr; 323 324 /* Make sure clock polarity is correct */ 325 for (i = 0; i < spi->master->num_chipselect; i++) { 326 csr = spi_readl(as, CSR0 + 4 * i); 327 if ((csr ^ cpol) & SPI_BIT(CPOL)) 328 spi_writel(as, CSR0 + 4 * i, 329 csr ^ SPI_BIT(CPOL)); 330 } 331 332 mr = spi_readl(as, MR); 333 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); 334 if (spi->chip_select != 0) 335 gpio_set_value(asd->npcs_pin, active); 336 spi_writel(as, MR, mr); 337 } 338 339 dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", 340 asd->npcs_pin, active ? " (high)" : "", 341 mr); 342 } 343 344 static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) 345 { 346 struct atmel_spi_device *asd = spi->controller_state; 347 unsigned active = spi->mode & SPI_CS_HIGH; 348 u32 mr; 349 350 /* only deactivate *this* device; sometimes transfers to 351 * another device may be active when this routine is called. 352 */ 353 mr = spi_readl(as, MR); 354 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) { 355 mr = SPI_BFINS(PCS, 0xf, mr); 356 spi_writel(as, MR, mr); 357 } 358 359 dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", 360 asd->npcs_pin, active ? " (low)" : "", 361 mr); 362 363 if (atmel_spi_is_v2(as) || spi->chip_select != 0) 364 gpio_set_value(asd->npcs_pin, !active); 365 } 366 367 static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock) 368 { 369 spin_lock_irqsave(&as->lock, as->flags); 370 } 371 372 static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock) 373 { 374 spin_unlock_irqrestore(&as->lock, as->flags); 375 } 376 377 static inline bool atmel_spi_use_dma(struct atmel_spi *as, 378 struct spi_transfer *xfer) 379 { 380 return as->use_dma && xfer->len >= DMA_MIN_BYTES; 381 } 382 383 static int atmel_spi_dma_slave_config(struct atmel_spi *as, 384 struct dma_slave_config *slave_config, 385 u8 bits_per_word) 386 { 387 int err = 0; 388 389 if (bits_per_word > 8) { 390 slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 391 slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 392 } else { 393 slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 394 slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 395 } 396 397 slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR; 398 slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR; 399 slave_config->src_maxburst = 1; 400 slave_config->dst_maxburst = 1; 401 slave_config->device_fc = false; 402 403 slave_config->direction = DMA_MEM_TO_DEV; 404 if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) { 405 dev_err(&as->pdev->dev, 406 "failed to configure tx dma channel\n"); 407 err = -EINVAL; 408 } 409 410 slave_config->direction = DMA_DEV_TO_MEM; 411 if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) { 412 dev_err(&as->pdev->dev, 413 "failed to configure rx dma channel\n"); 414 err = -EINVAL; 415 } 416 417 return err; 418 } 419 420 static int atmel_spi_configure_dma(struct atmel_spi *as) 421 { 422 struct dma_slave_config slave_config; 423 struct device *dev = &as->pdev->dev; 424 int err; 425 426 dma_cap_mask_t mask; 427 dma_cap_zero(mask); 428 dma_cap_set(DMA_SLAVE, mask); 429 430 as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx"); 431 if (IS_ERR(as->dma.chan_tx)) { 432 err = PTR_ERR(as->dma.chan_tx); 433 if (err == -EPROBE_DEFER) { 434 dev_warn(dev, "no DMA channel available at the moment\n"); 435 return err; 436 } 437 dev_err(dev, 438 "DMA TX channel not available, SPI unable to use DMA\n"); 439 err = -EBUSY; 440 goto error; 441 } 442 443 /* 444 * No reason to check EPROBE_DEFER here since we have already requested 445 * tx channel. If it fails here, it's for another reason. 446 */ 447 as->dma.chan_rx = dma_request_slave_channel(dev, "rx"); 448 449 if (!as->dma.chan_rx) { 450 dev_err(dev, 451 "DMA RX channel not available, SPI unable to use DMA\n"); 452 err = -EBUSY; 453 goto error; 454 } 455 456 err = atmel_spi_dma_slave_config(as, &slave_config, 8); 457 if (err) 458 goto error; 459 460 dev_info(&as->pdev->dev, 461 "Using %s (tx) and %s (rx) for DMA transfers\n", 462 dma_chan_name(as->dma.chan_tx), 463 dma_chan_name(as->dma.chan_rx)); 464 return 0; 465 error: 466 if (as->dma.chan_rx) 467 dma_release_channel(as->dma.chan_rx); 468 if (!IS_ERR(as->dma.chan_tx)) 469 dma_release_channel(as->dma.chan_tx); 470 return err; 471 } 472 473 static void atmel_spi_stop_dma(struct atmel_spi *as) 474 { 475 if (as->dma.chan_rx) 476 dmaengine_terminate_all(as->dma.chan_rx); 477 if (as->dma.chan_tx) 478 dmaengine_terminate_all(as->dma.chan_tx); 479 } 480 481 static void atmel_spi_release_dma(struct atmel_spi *as) 482 { 483 if (as->dma.chan_rx) 484 dma_release_channel(as->dma.chan_rx); 485 if (as->dma.chan_tx) 486 dma_release_channel(as->dma.chan_tx); 487 } 488 489 /* This function is called by the DMA driver from tasklet context */ 490 static void dma_callback(void *data) 491 { 492 struct spi_master *master = data; 493 struct atmel_spi *as = spi_master_get_devdata(master); 494 495 complete(&as->xfer_completion); 496 } 497 498 /* 499 * Next transfer using PIO. 500 */ 501 static void atmel_spi_next_xfer_pio(struct spi_master *master, 502 struct spi_transfer *xfer) 503 { 504 struct atmel_spi *as = spi_master_get_devdata(master); 505 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 506 507 dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n"); 508 509 /* Make sure data is not remaining in RDR */ 510 spi_readl(as, RDR); 511 while (spi_readl(as, SR) & SPI_BIT(RDRF)) { 512 spi_readl(as, RDR); 513 cpu_relax(); 514 } 515 516 if (xfer->tx_buf) { 517 if (xfer->bits_per_word > 8) 518 spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos)); 519 else 520 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos)); 521 } else { 522 spi_writel(as, TDR, 0); 523 } 524 525 dev_dbg(master->dev.parent, 526 " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", 527 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf, 528 xfer->bits_per_word); 529 530 /* Enable relevant interrupts */ 531 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); 532 } 533 534 /* 535 * Submit next transfer for DMA. 536 */ 537 static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, 538 struct spi_transfer *xfer, 539 u32 *plen) 540 { 541 struct atmel_spi *as = spi_master_get_devdata(master); 542 struct dma_chan *rxchan = as->dma.chan_rx; 543 struct dma_chan *txchan = as->dma.chan_tx; 544 struct dma_async_tx_descriptor *rxdesc; 545 struct dma_async_tx_descriptor *txdesc; 546 struct dma_slave_config slave_config; 547 dma_cookie_t cookie; 548 u32 len = *plen; 549 550 dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n"); 551 552 /* Check that the channels are available */ 553 if (!rxchan || !txchan) 554 return -ENODEV; 555 556 /* release lock for DMA operations */ 557 atmel_spi_unlock(as); 558 559 /* prepare the RX dma transfer */ 560 sg_init_table(&as->dma.sgrx, 1); 561 if (xfer->rx_buf) { 562 as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen; 563 } else { 564 as->dma.sgrx.dma_address = as->buffer_dma; 565 if (len > BUFFER_SIZE) 566 len = BUFFER_SIZE; 567 } 568 569 /* prepare the TX dma transfer */ 570 sg_init_table(&as->dma.sgtx, 1); 571 if (xfer->tx_buf) { 572 as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen; 573 } else { 574 as->dma.sgtx.dma_address = as->buffer_dma; 575 if (len > BUFFER_SIZE) 576 len = BUFFER_SIZE; 577 memset(as->buffer, 0, len); 578 } 579 580 sg_dma_len(&as->dma.sgtx) = len; 581 sg_dma_len(&as->dma.sgrx) = len; 582 583 *plen = len; 584 585 if (atmel_spi_dma_slave_config(as, &slave_config, 8)) 586 goto err_exit; 587 588 /* Send both scatterlists */ 589 rxdesc = dmaengine_prep_slave_sg(rxchan, &as->dma.sgrx, 1, 590 DMA_FROM_DEVICE, 591 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 592 if (!rxdesc) 593 goto err_dma; 594 595 txdesc = dmaengine_prep_slave_sg(txchan, &as->dma.sgtx, 1, 596 DMA_TO_DEVICE, 597 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 598 if (!txdesc) 599 goto err_dma; 600 601 dev_dbg(master->dev.parent, 602 " start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", 603 xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma, 604 xfer->rx_buf, (unsigned long long)xfer->rx_dma); 605 606 /* Enable relevant interrupts */ 607 spi_writel(as, IER, SPI_BIT(OVRES)); 608 609 /* Put the callback on the RX transfer only, that should finish last */ 610 rxdesc->callback = dma_callback; 611 rxdesc->callback_param = master; 612 613 /* Submit and fire RX and TX with TX last so we're ready to read! */ 614 cookie = rxdesc->tx_submit(rxdesc); 615 if (dma_submit_error(cookie)) 616 goto err_dma; 617 cookie = txdesc->tx_submit(txdesc); 618 if (dma_submit_error(cookie)) 619 goto err_dma; 620 rxchan->device->device_issue_pending(rxchan); 621 txchan->device->device_issue_pending(txchan); 622 623 /* take back lock */ 624 atmel_spi_lock(as); 625 return 0; 626 627 err_dma: 628 spi_writel(as, IDR, SPI_BIT(OVRES)); 629 atmel_spi_stop_dma(as); 630 err_exit: 631 atmel_spi_lock(as); 632 return -ENOMEM; 633 } 634 635 static void atmel_spi_next_xfer_data(struct spi_master *master, 636 struct spi_transfer *xfer, 637 dma_addr_t *tx_dma, 638 dma_addr_t *rx_dma, 639 u32 *plen) 640 { 641 struct atmel_spi *as = spi_master_get_devdata(master); 642 u32 len = *plen; 643 644 /* use scratch buffer only when rx or tx data is unspecified */ 645 if (xfer->rx_buf) 646 *rx_dma = xfer->rx_dma + xfer->len - *plen; 647 else { 648 *rx_dma = as->buffer_dma; 649 if (len > BUFFER_SIZE) 650 len = BUFFER_SIZE; 651 } 652 653 if (xfer->tx_buf) 654 *tx_dma = xfer->tx_dma + xfer->len - *plen; 655 else { 656 *tx_dma = as->buffer_dma; 657 if (len > BUFFER_SIZE) 658 len = BUFFER_SIZE; 659 memset(as->buffer, 0, len); 660 dma_sync_single_for_device(&as->pdev->dev, 661 as->buffer_dma, len, DMA_TO_DEVICE); 662 } 663 664 *plen = len; 665 } 666 667 static int atmel_spi_set_xfer_speed(struct atmel_spi *as, 668 struct spi_device *spi, 669 struct spi_transfer *xfer) 670 { 671 u32 scbr, csr; 672 unsigned long bus_hz; 673 674 /* v1 chips start out at half the peripheral bus speed. */ 675 bus_hz = clk_get_rate(as->clk); 676 if (!atmel_spi_is_v2(as)) 677 bus_hz /= 2; 678 679 /* 680 * Calculate the lowest divider that satisfies the 681 * constraint, assuming div32/fdiv/mbz == 0. 682 */ 683 if (xfer->speed_hz) 684 scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz); 685 else 686 /* 687 * This can happend if max_speed is null. 688 * In this case, we set the lowest possible speed 689 */ 690 scbr = 0xff; 691 692 /* 693 * If the resulting divider doesn't fit into the 694 * register bitfield, we can't satisfy the constraint. 695 */ 696 if (scbr >= (1 << SPI_SCBR_SIZE)) { 697 dev_err(&spi->dev, 698 "setup: %d Hz too slow, scbr %u; min %ld Hz\n", 699 xfer->speed_hz, scbr, bus_hz/255); 700 return -EINVAL; 701 } 702 if (scbr == 0) { 703 dev_err(&spi->dev, 704 "setup: %d Hz too high, scbr %u; max %ld Hz\n", 705 xfer->speed_hz, scbr, bus_hz); 706 return -EINVAL; 707 } 708 csr = spi_readl(as, CSR0 + 4 * spi->chip_select); 709 csr = SPI_BFINS(SCBR, scbr, csr); 710 spi_writel(as, CSR0 + 4 * spi->chip_select, csr); 711 712 return 0; 713 } 714 715 /* 716 * Submit next transfer for PDC. 717 * lock is held, spi irq is blocked 718 */ 719 static void atmel_spi_pdc_next_xfer(struct spi_master *master, 720 struct spi_message *msg, 721 struct spi_transfer *xfer) 722 { 723 struct atmel_spi *as = spi_master_get_devdata(master); 724 u32 len; 725 dma_addr_t tx_dma, rx_dma; 726 727 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 728 729 len = as->current_remaining_bytes; 730 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 731 as->current_remaining_bytes -= len; 732 733 spi_writel(as, RPR, rx_dma); 734 spi_writel(as, TPR, tx_dma); 735 736 if (msg->spi->bits_per_word > 8) 737 len >>= 1; 738 spi_writel(as, RCR, len); 739 spi_writel(as, TCR, len); 740 741 dev_dbg(&msg->spi->dev, 742 " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", 743 xfer, xfer->len, xfer->tx_buf, 744 (unsigned long long)xfer->tx_dma, xfer->rx_buf, 745 (unsigned long long)xfer->rx_dma); 746 747 if (as->current_remaining_bytes) { 748 len = as->current_remaining_bytes; 749 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 750 as->current_remaining_bytes -= len; 751 752 spi_writel(as, RNPR, rx_dma); 753 spi_writel(as, TNPR, tx_dma); 754 755 if (msg->spi->bits_per_word > 8) 756 len >>= 1; 757 spi_writel(as, RNCR, len); 758 spi_writel(as, TNCR, len); 759 760 dev_dbg(&msg->spi->dev, 761 " next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", 762 xfer, xfer->len, xfer->tx_buf, 763 (unsigned long long)xfer->tx_dma, xfer->rx_buf, 764 (unsigned long long)xfer->rx_dma); 765 } 766 767 /* REVISIT: We're waiting for RXBUFF before we start the next 768 * transfer because we need to handle some difficult timing 769 * issues otherwise. If we wait for TXBUFE in one transfer and 770 * then starts waiting for RXBUFF in the next, it's difficult 771 * to tell the difference between the RXBUFF interrupt we're 772 * actually waiting for and the RXBUFF interrupt of the 773 * previous transfer. 774 * 775 * It should be doable, though. Just not now... 776 */ 777 spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES)); 778 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); 779 } 780 781 /* 782 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: 783 * - The buffer is either valid for CPU access, else NULL 784 * - If the buffer is valid, so is its DMA address 785 * 786 * This driver manages the dma address unless message->is_dma_mapped. 787 */ 788 static int 789 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) 790 { 791 struct device *dev = &as->pdev->dev; 792 793 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; 794 if (xfer->tx_buf) { 795 /* tx_buf is a const void* where we need a void * for the dma 796 * mapping */ 797 void *nonconst_tx = (void *)xfer->tx_buf; 798 799 xfer->tx_dma = dma_map_single(dev, 800 nonconst_tx, xfer->len, 801 DMA_TO_DEVICE); 802 if (dma_mapping_error(dev, xfer->tx_dma)) 803 return -ENOMEM; 804 } 805 if (xfer->rx_buf) { 806 xfer->rx_dma = dma_map_single(dev, 807 xfer->rx_buf, xfer->len, 808 DMA_FROM_DEVICE); 809 if (dma_mapping_error(dev, xfer->rx_dma)) { 810 if (xfer->tx_buf) 811 dma_unmap_single(dev, 812 xfer->tx_dma, xfer->len, 813 DMA_TO_DEVICE); 814 return -ENOMEM; 815 } 816 } 817 return 0; 818 } 819 820 static void atmel_spi_dma_unmap_xfer(struct spi_master *master, 821 struct spi_transfer *xfer) 822 { 823 if (xfer->tx_dma != INVALID_DMA_ADDRESS) 824 dma_unmap_single(master->dev.parent, xfer->tx_dma, 825 xfer->len, DMA_TO_DEVICE); 826 if (xfer->rx_dma != INVALID_DMA_ADDRESS) 827 dma_unmap_single(master->dev.parent, xfer->rx_dma, 828 xfer->len, DMA_FROM_DEVICE); 829 } 830 831 static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as) 832 { 833 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 834 } 835 836 /* Called from IRQ 837 * 838 * Must update "current_remaining_bytes" to keep track of data 839 * to transfer. 840 */ 841 static void 842 atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) 843 { 844 u8 *rxp; 845 u16 *rxp16; 846 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 847 848 if (xfer->rx_buf) { 849 if (xfer->bits_per_word > 8) { 850 rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); 851 *rxp16 = spi_readl(as, RDR); 852 } else { 853 rxp = ((u8 *)xfer->rx_buf) + xfer_pos; 854 *rxp = spi_readl(as, RDR); 855 } 856 } else { 857 spi_readl(as, RDR); 858 } 859 if (xfer->bits_per_word > 8) { 860 if (as->current_remaining_bytes > 2) 861 as->current_remaining_bytes -= 2; 862 else 863 as->current_remaining_bytes = 0; 864 } else { 865 as->current_remaining_bytes--; 866 } 867 } 868 869 /* Interrupt 870 * 871 * No need for locking in this Interrupt handler: done_status is the 872 * only information modified. 873 */ 874 static irqreturn_t 875 atmel_spi_pio_interrupt(int irq, void *dev_id) 876 { 877 struct spi_master *master = dev_id; 878 struct atmel_spi *as = spi_master_get_devdata(master); 879 u32 status, pending, imr; 880 struct spi_transfer *xfer; 881 int ret = IRQ_NONE; 882 883 imr = spi_readl(as, IMR); 884 status = spi_readl(as, SR); 885 pending = status & imr; 886 887 if (pending & SPI_BIT(OVRES)) { 888 ret = IRQ_HANDLED; 889 spi_writel(as, IDR, SPI_BIT(OVRES)); 890 dev_warn(master->dev.parent, "overrun\n"); 891 892 /* 893 * When we get an overrun, we disregard the current 894 * transfer. Data will not be copied back from any 895 * bounce buffer and msg->actual_len will not be 896 * updated with the last xfer. 897 * 898 * We will also not process any remaning transfers in 899 * the message. 900 */ 901 as->done_status = -EIO; 902 smp_wmb(); 903 904 /* Clear any overrun happening while cleaning up */ 905 spi_readl(as, SR); 906 907 complete(&as->xfer_completion); 908 909 } else if (pending & SPI_BIT(RDRF)) { 910 atmel_spi_lock(as); 911 912 if (as->current_remaining_bytes) { 913 ret = IRQ_HANDLED; 914 xfer = as->current_transfer; 915 atmel_spi_pump_pio_data(as, xfer); 916 if (!as->current_remaining_bytes) 917 spi_writel(as, IDR, pending); 918 919 complete(&as->xfer_completion); 920 } 921 922 atmel_spi_unlock(as); 923 } else { 924 WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending); 925 ret = IRQ_HANDLED; 926 spi_writel(as, IDR, pending); 927 } 928 929 return ret; 930 } 931 932 static irqreturn_t 933 atmel_spi_pdc_interrupt(int irq, void *dev_id) 934 { 935 struct spi_master *master = dev_id; 936 struct atmel_spi *as = spi_master_get_devdata(master); 937 u32 status, pending, imr; 938 int ret = IRQ_NONE; 939 940 imr = spi_readl(as, IMR); 941 status = spi_readl(as, SR); 942 pending = status & imr; 943 944 if (pending & SPI_BIT(OVRES)) { 945 946 ret = IRQ_HANDLED; 947 948 spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) 949 | SPI_BIT(OVRES))); 950 951 /* Clear any overrun happening while cleaning up */ 952 spi_readl(as, SR); 953 954 as->done_status = -EIO; 955 956 complete(&as->xfer_completion); 957 958 } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { 959 ret = IRQ_HANDLED; 960 961 spi_writel(as, IDR, pending); 962 963 complete(&as->xfer_completion); 964 } 965 966 return ret; 967 } 968 969 static int atmel_spi_setup(struct spi_device *spi) 970 { 971 struct atmel_spi *as; 972 struct atmel_spi_device *asd; 973 u32 csr; 974 unsigned int bits = spi->bits_per_word; 975 unsigned int npcs_pin; 976 int ret; 977 978 as = spi_master_get_devdata(spi->master); 979 980 /* see notes above re chipselect */ 981 if (!atmel_spi_is_v2(as) 982 && spi->chip_select == 0 983 && (spi->mode & SPI_CS_HIGH)) { 984 dev_dbg(&spi->dev, "setup: can't be active-high\n"); 985 return -EINVAL; 986 } 987 988 csr = SPI_BF(BITS, bits - 8); 989 if (spi->mode & SPI_CPOL) 990 csr |= SPI_BIT(CPOL); 991 if (!(spi->mode & SPI_CPHA)) 992 csr |= SPI_BIT(NCPHA); 993 994 /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. 995 * 996 * DLYBCT would add delays between words, slowing down transfers. 997 * It could potentially be useful to cope with DMA bottlenecks, but 998 * in those cases it's probably best to just use a lower bitrate. 999 */ 1000 csr |= SPI_BF(DLYBS, 0); 1001 csr |= SPI_BF(DLYBCT, 0); 1002 1003 /* chipselect must have been muxed as GPIO (e.g. in board setup) */ 1004 npcs_pin = (unsigned long)spi->controller_data; 1005 1006 if (gpio_is_valid(spi->cs_gpio)) 1007 npcs_pin = spi->cs_gpio; 1008 1009 asd = spi->controller_state; 1010 if (!asd) { 1011 asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL); 1012 if (!asd) 1013 return -ENOMEM; 1014 1015 ret = gpio_request(npcs_pin, dev_name(&spi->dev)); 1016 if (ret) { 1017 kfree(asd); 1018 return ret; 1019 } 1020 1021 asd->npcs_pin = npcs_pin; 1022 spi->controller_state = asd; 1023 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); 1024 } 1025 1026 asd->csr = csr; 1027 1028 dev_dbg(&spi->dev, 1029 "setup: bpw %u mode 0x%x -> csr%d %08x\n", 1030 bits, spi->mode, spi->chip_select, csr); 1031 1032 if (!atmel_spi_is_v2(as)) 1033 spi_writel(as, CSR0 + 4 * spi->chip_select, csr); 1034 1035 return 0; 1036 } 1037 1038 static int atmel_spi_one_transfer(struct spi_master *master, 1039 struct spi_message *msg, 1040 struct spi_transfer *xfer) 1041 { 1042 struct atmel_spi *as; 1043 struct spi_device *spi = msg->spi; 1044 u8 bits; 1045 u32 len; 1046 struct atmel_spi_device *asd; 1047 int timeout; 1048 int ret; 1049 unsigned long dma_timeout; 1050 1051 as = spi_master_get_devdata(master); 1052 1053 if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1054 dev_dbg(&spi->dev, "missing rx or tx buf\n"); 1055 return -EINVAL; 1056 } 1057 1058 if (xfer->bits_per_word) { 1059 asd = spi->controller_state; 1060 bits = (asd->csr >> 4) & 0xf; 1061 if (bits != xfer->bits_per_word - 8) { 1062 dev_dbg(&spi->dev, 1063 "you can't yet change bits_per_word in transfers\n"); 1064 return -ENOPROTOOPT; 1065 } 1066 } 1067 1068 /* 1069 * DMA map early, for performance (empties dcache ASAP) and 1070 * better fault reporting. 1071 */ 1072 if ((!msg->is_dma_mapped) 1073 && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) { 1074 if (atmel_spi_dma_map_xfer(as, xfer) < 0) 1075 return -ENOMEM; 1076 } 1077 1078 atmel_spi_set_xfer_speed(as, msg->spi, xfer); 1079 1080 as->done_status = 0; 1081 as->current_transfer = xfer; 1082 as->current_remaining_bytes = xfer->len; 1083 while (as->current_remaining_bytes) { 1084 reinit_completion(&as->xfer_completion); 1085 1086 if (as->use_pdc) { 1087 atmel_spi_pdc_next_xfer(master, msg, xfer); 1088 } else if (atmel_spi_use_dma(as, xfer)) { 1089 len = as->current_remaining_bytes; 1090 ret = atmel_spi_next_xfer_dma_submit(master, 1091 xfer, &len); 1092 if (ret) { 1093 dev_err(&spi->dev, 1094 "unable to use DMA, fallback to PIO\n"); 1095 atmel_spi_next_xfer_pio(master, xfer); 1096 } else { 1097 as->current_remaining_bytes -= len; 1098 if (as->current_remaining_bytes < 0) 1099 as->current_remaining_bytes = 0; 1100 } 1101 } else { 1102 atmel_spi_next_xfer_pio(master, xfer); 1103 } 1104 1105 /* interrupts are disabled, so free the lock for schedule */ 1106 atmel_spi_unlock(as); 1107 dma_timeout = wait_for_completion_timeout(&as->xfer_completion, 1108 SPI_DMA_TIMEOUT); 1109 atmel_spi_lock(as); 1110 if (WARN_ON(dma_timeout == 0)) { 1111 dev_err(&spi->dev, "spi transfer timeout\n"); 1112 as->done_status = -EIO; 1113 } 1114 1115 if (as->done_status) 1116 break; 1117 } 1118 1119 if (as->done_status) { 1120 if (as->use_pdc) { 1121 dev_warn(master->dev.parent, 1122 "overrun (%u/%u remaining)\n", 1123 spi_readl(as, TCR), spi_readl(as, RCR)); 1124 1125 /* 1126 * Clean up DMA registers and make sure the data 1127 * registers are empty. 1128 */ 1129 spi_writel(as, RNCR, 0); 1130 spi_writel(as, TNCR, 0); 1131 spi_writel(as, RCR, 0); 1132 spi_writel(as, TCR, 0); 1133 for (timeout = 1000; timeout; timeout--) 1134 if (spi_readl(as, SR) & SPI_BIT(TXEMPTY)) 1135 break; 1136 if (!timeout) 1137 dev_warn(master->dev.parent, 1138 "timeout waiting for TXEMPTY"); 1139 while (spi_readl(as, SR) & SPI_BIT(RDRF)) 1140 spi_readl(as, RDR); 1141 1142 /* Clear any overrun happening while cleaning up */ 1143 spi_readl(as, SR); 1144 1145 } else if (atmel_spi_use_dma(as, xfer)) { 1146 atmel_spi_stop_dma(as); 1147 } 1148 1149 if (!msg->is_dma_mapped 1150 && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) 1151 atmel_spi_dma_unmap_xfer(master, xfer); 1152 1153 return 0; 1154 1155 } else { 1156 /* only update length if no error */ 1157 msg->actual_length += xfer->len; 1158 } 1159 1160 if (!msg->is_dma_mapped 1161 && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) 1162 atmel_spi_dma_unmap_xfer(master, xfer); 1163 1164 if (xfer->delay_usecs) 1165 udelay(xfer->delay_usecs); 1166 1167 if (xfer->cs_change) { 1168 if (list_is_last(&xfer->transfer_list, 1169 &msg->transfers)) { 1170 as->keep_cs = true; 1171 } else { 1172 as->cs_active = !as->cs_active; 1173 if (as->cs_active) 1174 cs_activate(as, msg->spi); 1175 else 1176 cs_deactivate(as, msg->spi); 1177 } 1178 } 1179 1180 return 0; 1181 } 1182 1183 static int atmel_spi_transfer_one_message(struct spi_master *master, 1184 struct spi_message *msg) 1185 { 1186 struct atmel_spi *as; 1187 struct spi_transfer *xfer; 1188 struct spi_device *spi = msg->spi; 1189 int ret = 0; 1190 1191 as = spi_master_get_devdata(master); 1192 1193 dev_dbg(&spi->dev, "new message %p submitted for %s\n", 1194 msg, dev_name(&spi->dev)); 1195 1196 atmel_spi_lock(as); 1197 cs_activate(as, spi); 1198 1199 as->cs_active = true; 1200 as->keep_cs = false; 1201 1202 msg->status = 0; 1203 msg->actual_length = 0; 1204 1205 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1206 ret = atmel_spi_one_transfer(master, msg, xfer); 1207 if (ret) 1208 goto msg_done; 1209 } 1210 1211 if (as->use_pdc) 1212 atmel_spi_disable_pdc_transfer(as); 1213 1214 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1215 dev_dbg(&spi->dev, 1216 " xfer %p: len %u tx %p/%pad rx %p/%pad\n", 1217 xfer, xfer->len, 1218 xfer->tx_buf, &xfer->tx_dma, 1219 xfer->rx_buf, &xfer->rx_dma); 1220 } 1221 1222 msg_done: 1223 if (!as->keep_cs) 1224 cs_deactivate(as, msg->spi); 1225 1226 atmel_spi_unlock(as); 1227 1228 msg->status = as->done_status; 1229 spi_finalize_current_message(spi->master); 1230 1231 return ret; 1232 } 1233 1234 static void atmel_spi_cleanup(struct spi_device *spi) 1235 { 1236 struct atmel_spi_device *asd = spi->controller_state; 1237 unsigned gpio = (unsigned long) spi->controller_data; 1238 1239 if (!asd) 1240 return; 1241 1242 spi->controller_state = NULL; 1243 gpio_free(gpio); 1244 kfree(asd); 1245 } 1246 1247 static inline unsigned int atmel_get_version(struct atmel_spi *as) 1248 { 1249 return spi_readl(as, VERSION) & 0x00000fff; 1250 } 1251 1252 static void atmel_get_caps(struct atmel_spi *as) 1253 { 1254 unsigned int version; 1255 1256 version = atmel_get_version(as); 1257 dev_info(&as->pdev->dev, "version: 0x%x\n", version); 1258 1259 as->caps.is_spi2 = version > 0x121; 1260 as->caps.has_wdrbt = version >= 0x210; 1261 as->caps.has_dma_support = version >= 0x212; 1262 } 1263 1264 /*-------------------------------------------------------------------------*/ 1265 1266 static int atmel_spi_probe(struct platform_device *pdev) 1267 { 1268 struct resource *regs; 1269 int irq; 1270 struct clk *clk; 1271 int ret; 1272 struct spi_master *master; 1273 struct atmel_spi *as; 1274 1275 /* Select default pin state */ 1276 pinctrl_pm_select_default_state(&pdev->dev); 1277 1278 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1279 if (!regs) 1280 return -ENXIO; 1281 1282 irq = platform_get_irq(pdev, 0); 1283 if (irq < 0) 1284 return irq; 1285 1286 clk = devm_clk_get(&pdev->dev, "spi_clk"); 1287 if (IS_ERR(clk)) 1288 return PTR_ERR(clk); 1289 1290 /* setup spi core then atmel-specific driver state */ 1291 ret = -ENOMEM; 1292 master = spi_alloc_master(&pdev->dev, sizeof(*as)); 1293 if (!master) 1294 goto out_free; 1295 1296 /* the spi->mode bits understood by this driver: */ 1297 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1298 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); 1299 master->dev.of_node = pdev->dev.of_node; 1300 master->bus_num = pdev->id; 1301 master->num_chipselect = master->dev.of_node ? 0 : 4; 1302 master->setup = atmel_spi_setup; 1303 master->transfer_one_message = atmel_spi_transfer_one_message; 1304 master->cleanup = atmel_spi_cleanup; 1305 master->auto_runtime_pm = true; 1306 platform_set_drvdata(pdev, master); 1307 1308 as = spi_master_get_devdata(master); 1309 1310 /* 1311 * Scratch buffer is used for throwaway rx and tx data. 1312 * It's coherent to minimize dcache pollution. 1313 */ 1314 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, 1315 &as->buffer_dma, GFP_KERNEL); 1316 if (!as->buffer) 1317 goto out_free; 1318 1319 spin_lock_init(&as->lock); 1320 1321 as->pdev = pdev; 1322 as->regs = devm_ioremap_resource(&pdev->dev, regs); 1323 if (IS_ERR(as->regs)) { 1324 ret = PTR_ERR(as->regs); 1325 goto out_free_buffer; 1326 } 1327 as->phybase = regs->start; 1328 as->irq = irq; 1329 as->clk = clk; 1330 1331 init_completion(&as->xfer_completion); 1332 1333 atmel_get_caps(as); 1334 1335 as->use_dma = false; 1336 as->use_pdc = false; 1337 if (as->caps.has_dma_support) { 1338 ret = atmel_spi_configure_dma(as); 1339 if (ret == 0) 1340 as->use_dma = true; 1341 else if (ret == -EPROBE_DEFER) 1342 return ret; 1343 } else { 1344 as->use_pdc = true; 1345 } 1346 1347 if (as->caps.has_dma_support && !as->use_dma) 1348 dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n"); 1349 1350 if (as->use_pdc) { 1351 ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt, 1352 0, dev_name(&pdev->dev), master); 1353 } else { 1354 ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt, 1355 0, dev_name(&pdev->dev), master); 1356 } 1357 if (ret) 1358 goto out_unmap_regs; 1359 1360 /* Initialize the hardware */ 1361 ret = clk_prepare_enable(clk); 1362 if (ret) 1363 goto out_free_irq; 1364 spi_writel(as, CR, SPI_BIT(SWRST)); 1365 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1366 if (as->caps.has_wdrbt) { 1367 spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS) 1368 | SPI_BIT(MSTR)); 1369 } else { 1370 spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); 1371 } 1372 1373 if (as->use_pdc) 1374 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 1375 spi_writel(as, CR, SPI_BIT(SPIEN)); 1376 1377 /* go! */ 1378 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 1379 (unsigned long)regs->start, irq); 1380 1381 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT); 1382 pm_runtime_use_autosuspend(&pdev->dev); 1383 pm_runtime_set_active(&pdev->dev); 1384 pm_runtime_enable(&pdev->dev); 1385 1386 ret = devm_spi_register_master(&pdev->dev, master); 1387 if (ret) 1388 goto out_free_dma; 1389 1390 return 0; 1391 1392 out_free_dma: 1393 pm_runtime_disable(&pdev->dev); 1394 pm_runtime_set_suspended(&pdev->dev); 1395 1396 if (as->use_dma) 1397 atmel_spi_release_dma(as); 1398 1399 spi_writel(as, CR, SPI_BIT(SWRST)); 1400 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1401 clk_disable_unprepare(clk); 1402 out_free_irq: 1403 out_unmap_regs: 1404 out_free_buffer: 1405 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1406 as->buffer_dma); 1407 out_free: 1408 spi_master_put(master); 1409 return ret; 1410 } 1411 1412 static int atmel_spi_remove(struct platform_device *pdev) 1413 { 1414 struct spi_master *master = platform_get_drvdata(pdev); 1415 struct atmel_spi *as = spi_master_get_devdata(master); 1416 1417 pm_runtime_get_sync(&pdev->dev); 1418 1419 /* reset the hardware and block queue progress */ 1420 spin_lock_irq(&as->lock); 1421 if (as->use_dma) { 1422 atmel_spi_stop_dma(as); 1423 atmel_spi_release_dma(as); 1424 } 1425 1426 spi_writel(as, CR, SPI_BIT(SWRST)); 1427 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1428 spi_readl(as, SR); 1429 spin_unlock_irq(&as->lock); 1430 1431 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1432 as->buffer_dma); 1433 1434 clk_disable_unprepare(as->clk); 1435 1436 pm_runtime_put_noidle(&pdev->dev); 1437 pm_runtime_disable(&pdev->dev); 1438 1439 return 0; 1440 } 1441 1442 #ifdef CONFIG_PM 1443 static int atmel_spi_runtime_suspend(struct device *dev) 1444 { 1445 struct spi_master *master = dev_get_drvdata(dev); 1446 struct atmel_spi *as = spi_master_get_devdata(master); 1447 1448 clk_disable_unprepare(as->clk); 1449 pinctrl_pm_select_sleep_state(dev); 1450 1451 return 0; 1452 } 1453 1454 static int atmel_spi_runtime_resume(struct device *dev) 1455 { 1456 struct spi_master *master = dev_get_drvdata(dev); 1457 struct atmel_spi *as = spi_master_get_devdata(master); 1458 1459 pinctrl_pm_select_default_state(dev); 1460 1461 return clk_prepare_enable(as->clk); 1462 } 1463 1464 static int atmel_spi_suspend(struct device *dev) 1465 { 1466 struct spi_master *master = dev_get_drvdata(dev); 1467 int ret; 1468 1469 /* Stop the queue running */ 1470 ret = spi_master_suspend(master); 1471 if (ret) { 1472 dev_warn(dev, "cannot suspend master\n"); 1473 return ret; 1474 } 1475 1476 if (!pm_runtime_suspended(dev)) 1477 atmel_spi_runtime_suspend(dev); 1478 1479 return 0; 1480 } 1481 1482 static int atmel_spi_resume(struct device *dev) 1483 { 1484 struct spi_master *master = dev_get_drvdata(dev); 1485 int ret; 1486 1487 if (!pm_runtime_suspended(dev)) { 1488 ret = atmel_spi_runtime_resume(dev); 1489 if (ret) 1490 return ret; 1491 } 1492 1493 /* Start the queue running */ 1494 ret = spi_master_resume(master); 1495 if (ret) 1496 dev_err(dev, "problem starting queue (%d)\n", ret); 1497 1498 return ret; 1499 } 1500 1501 static const struct dev_pm_ops atmel_spi_pm_ops = { 1502 SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) 1503 SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend, 1504 atmel_spi_runtime_resume, NULL) 1505 }; 1506 #define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops) 1507 #else 1508 #define ATMEL_SPI_PM_OPS NULL 1509 #endif 1510 1511 #if defined(CONFIG_OF) 1512 static const struct of_device_id atmel_spi_dt_ids[] = { 1513 { .compatible = "atmel,at91rm9200-spi" }, 1514 { /* sentinel */ } 1515 }; 1516 1517 MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids); 1518 #endif 1519 1520 static struct platform_driver atmel_spi_driver = { 1521 .driver = { 1522 .name = "atmel_spi", 1523 .pm = ATMEL_SPI_PM_OPS, 1524 .of_match_table = of_match_ptr(atmel_spi_dt_ids), 1525 }, 1526 .probe = atmel_spi_probe, 1527 .remove = atmel_spi_remove, 1528 }; 1529 module_platform_driver(atmel_spi_driver); 1530 1531 MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver"); 1532 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1533 MODULE_LICENSE("GPL"); 1534 MODULE_ALIAS("platform:atmel_spi"); 1535