1 /* 2 * Driver for Atmel AT32 and AT91 SPI Controllers 3 * 4 * Copyright (C) 2006 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/clk.h> 14 #include <linux/module.h> 15 #include <linux/platform_device.h> 16 #include <linux/delay.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/err.h> 19 #include <linux/interrupt.h> 20 #include <linux/spi/spi.h> 21 #include <linux/slab.h> 22 #include <linux/platform_data/atmel.h> 23 24 #include <asm/io.h> 25 #include <asm/gpio.h> 26 #include <mach/cpu.h> 27 28 /* SPI register offsets */ 29 #define SPI_CR 0x0000 30 #define SPI_MR 0x0004 31 #define SPI_RDR 0x0008 32 #define SPI_TDR 0x000c 33 #define SPI_SR 0x0010 34 #define SPI_IER 0x0014 35 #define SPI_IDR 0x0018 36 #define SPI_IMR 0x001c 37 #define SPI_CSR0 0x0030 38 #define SPI_CSR1 0x0034 39 #define SPI_CSR2 0x0038 40 #define SPI_CSR3 0x003c 41 #define SPI_RPR 0x0100 42 #define SPI_RCR 0x0104 43 #define SPI_TPR 0x0108 44 #define SPI_TCR 0x010c 45 #define SPI_RNPR 0x0110 46 #define SPI_RNCR 0x0114 47 #define SPI_TNPR 0x0118 48 #define SPI_TNCR 0x011c 49 #define SPI_PTCR 0x0120 50 #define SPI_PTSR 0x0124 51 52 /* Bitfields in CR */ 53 #define SPI_SPIEN_OFFSET 0 54 #define SPI_SPIEN_SIZE 1 55 #define SPI_SPIDIS_OFFSET 1 56 #define SPI_SPIDIS_SIZE 1 57 #define SPI_SWRST_OFFSET 7 58 #define SPI_SWRST_SIZE 1 59 #define SPI_LASTXFER_OFFSET 24 60 #define SPI_LASTXFER_SIZE 1 61 62 /* Bitfields in MR */ 63 #define SPI_MSTR_OFFSET 0 64 #define SPI_MSTR_SIZE 1 65 #define SPI_PS_OFFSET 1 66 #define SPI_PS_SIZE 1 67 #define SPI_PCSDEC_OFFSET 2 68 #define SPI_PCSDEC_SIZE 1 69 #define SPI_FDIV_OFFSET 3 70 #define SPI_FDIV_SIZE 1 71 #define SPI_MODFDIS_OFFSET 4 72 #define SPI_MODFDIS_SIZE 1 73 #define SPI_LLB_OFFSET 7 74 #define SPI_LLB_SIZE 1 75 #define SPI_PCS_OFFSET 16 76 #define SPI_PCS_SIZE 4 77 #define SPI_DLYBCS_OFFSET 24 78 #define SPI_DLYBCS_SIZE 8 79 80 /* Bitfields in RDR */ 81 #define SPI_RD_OFFSET 0 82 #define SPI_RD_SIZE 16 83 84 /* Bitfields in TDR */ 85 #define SPI_TD_OFFSET 0 86 #define SPI_TD_SIZE 16 87 88 /* Bitfields in SR */ 89 #define SPI_RDRF_OFFSET 0 90 #define SPI_RDRF_SIZE 1 91 #define SPI_TDRE_OFFSET 1 92 #define SPI_TDRE_SIZE 1 93 #define SPI_MODF_OFFSET 2 94 #define SPI_MODF_SIZE 1 95 #define SPI_OVRES_OFFSET 3 96 #define SPI_OVRES_SIZE 1 97 #define SPI_ENDRX_OFFSET 4 98 #define SPI_ENDRX_SIZE 1 99 #define SPI_ENDTX_OFFSET 5 100 #define SPI_ENDTX_SIZE 1 101 #define SPI_RXBUFF_OFFSET 6 102 #define SPI_RXBUFF_SIZE 1 103 #define SPI_TXBUFE_OFFSET 7 104 #define SPI_TXBUFE_SIZE 1 105 #define SPI_NSSR_OFFSET 8 106 #define SPI_NSSR_SIZE 1 107 #define SPI_TXEMPTY_OFFSET 9 108 #define SPI_TXEMPTY_SIZE 1 109 #define SPI_SPIENS_OFFSET 16 110 #define SPI_SPIENS_SIZE 1 111 112 /* Bitfields in CSR0 */ 113 #define SPI_CPOL_OFFSET 0 114 #define SPI_CPOL_SIZE 1 115 #define SPI_NCPHA_OFFSET 1 116 #define SPI_NCPHA_SIZE 1 117 #define SPI_CSAAT_OFFSET 3 118 #define SPI_CSAAT_SIZE 1 119 #define SPI_BITS_OFFSET 4 120 #define SPI_BITS_SIZE 4 121 #define SPI_SCBR_OFFSET 8 122 #define SPI_SCBR_SIZE 8 123 #define SPI_DLYBS_OFFSET 16 124 #define SPI_DLYBS_SIZE 8 125 #define SPI_DLYBCT_OFFSET 24 126 #define SPI_DLYBCT_SIZE 8 127 128 /* Bitfields in RCR */ 129 #define SPI_RXCTR_OFFSET 0 130 #define SPI_RXCTR_SIZE 16 131 132 /* Bitfields in TCR */ 133 #define SPI_TXCTR_OFFSET 0 134 #define SPI_TXCTR_SIZE 16 135 136 /* Bitfields in RNCR */ 137 #define SPI_RXNCR_OFFSET 0 138 #define SPI_RXNCR_SIZE 16 139 140 /* Bitfields in TNCR */ 141 #define SPI_TXNCR_OFFSET 0 142 #define SPI_TXNCR_SIZE 16 143 144 /* Bitfields in PTCR */ 145 #define SPI_RXTEN_OFFSET 0 146 #define SPI_RXTEN_SIZE 1 147 #define SPI_RXTDIS_OFFSET 1 148 #define SPI_RXTDIS_SIZE 1 149 #define SPI_TXTEN_OFFSET 8 150 #define SPI_TXTEN_SIZE 1 151 #define SPI_TXTDIS_OFFSET 9 152 #define SPI_TXTDIS_SIZE 1 153 154 /* Constants for BITS */ 155 #define SPI_BITS_8_BPT 0 156 #define SPI_BITS_9_BPT 1 157 #define SPI_BITS_10_BPT 2 158 #define SPI_BITS_11_BPT 3 159 #define SPI_BITS_12_BPT 4 160 #define SPI_BITS_13_BPT 5 161 #define SPI_BITS_14_BPT 6 162 #define SPI_BITS_15_BPT 7 163 #define SPI_BITS_16_BPT 8 164 165 /* Bit manipulation macros */ 166 #define SPI_BIT(name) \ 167 (1 << SPI_##name##_OFFSET) 168 #define SPI_BF(name,value) \ 169 (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET) 170 #define SPI_BFEXT(name,value) \ 171 (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1)) 172 #define SPI_BFINS(name,value,old) \ 173 ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \ 174 | SPI_BF(name,value)) 175 176 /* Register access macros */ 177 #define spi_readl(port,reg) \ 178 __raw_readl((port)->regs + SPI_##reg) 179 #define spi_writel(port,reg,value) \ 180 __raw_writel((value), (port)->regs + SPI_##reg) 181 182 183 /* 184 * The core SPI transfer engine just talks to a register bank to set up 185 * DMA transfers; transfer queue progress is driven by IRQs. The clock 186 * framework provides the base clock, subdivided for each spi_device. 187 */ 188 struct atmel_spi { 189 spinlock_t lock; 190 191 void __iomem *regs; 192 int irq; 193 struct clk *clk; 194 struct platform_device *pdev; 195 struct spi_device *stay; 196 197 u8 stopping; 198 struct list_head queue; 199 struct spi_transfer *current_transfer; 200 unsigned long current_remaining_bytes; 201 struct spi_transfer *next_transfer; 202 unsigned long next_remaining_bytes; 203 204 void *buffer; 205 dma_addr_t buffer_dma; 206 }; 207 208 /* Controller-specific per-slave state */ 209 struct atmel_spi_device { 210 unsigned int npcs_pin; 211 u32 csr; 212 }; 213 214 #define BUFFER_SIZE PAGE_SIZE 215 #define INVALID_DMA_ADDRESS 0xffffffff 216 217 /* 218 * Version 2 of the SPI controller has 219 * - CR.LASTXFER 220 * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero) 221 * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs) 222 * - SPI_CSRx.CSAAT 223 * - SPI_CSRx.SBCR allows faster clocking 224 * 225 * We can determine the controller version by reading the VERSION 226 * register, but I haven't checked that it exists on all chips, and 227 * this is cheaper anyway. 228 */ 229 static bool atmel_spi_is_v2(void) 230 { 231 return !cpu_is_at91rm9200(); 232 } 233 234 /* 235 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby 236 * they assume that spi slave device state will not change on deselect, so 237 * that automagic deselection is OK. ("NPCSx rises if no data is to be 238 * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer 239 * controllers have CSAAT and friends. 240 * 241 * Since the CSAAT functionality is a bit weird on newer controllers as 242 * well, we use GPIO to control nCSx pins on all controllers, updating 243 * MR.PCS to avoid confusing the controller. Using GPIOs also lets us 244 * support active-high chipselects despite the controller's belief that 245 * only active-low devices/systems exists. 246 * 247 * However, at91rm9200 has a second erratum whereby nCS0 doesn't work 248 * right when driven with GPIO. ("Mode Fault does not allow more than one 249 * Master on Chip Select 0.") No workaround exists for that ... so for 250 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, 251 * and (c) will trigger that first erratum in some cases. 252 * 253 * TODO: Test if the atmel_spi_is_v2() branch below works on 254 * AT91RM9200 if we use some other register than CSR0. However, don't 255 * do this unconditionally since AP7000 has an errata where the BITS 256 * field in CSR0 overrides all other CSRs. 257 */ 258 259 static void cs_activate(struct atmel_spi *as, struct spi_device *spi) 260 { 261 struct atmel_spi_device *asd = spi->controller_state; 262 unsigned active = spi->mode & SPI_CS_HIGH; 263 u32 mr; 264 265 if (atmel_spi_is_v2()) { 266 /* 267 * Always use CSR0. This ensures that the clock 268 * switches to the correct idle polarity before we 269 * toggle the CS. 270 */ 271 spi_writel(as, CSR0, asd->csr); 272 spi_writel(as, MR, SPI_BF(PCS, 0x0e) | SPI_BIT(MODFDIS) 273 | SPI_BIT(MSTR)); 274 mr = spi_readl(as, MR); 275 gpio_set_value(asd->npcs_pin, active); 276 } else { 277 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; 278 int i; 279 u32 csr; 280 281 /* Make sure clock polarity is correct */ 282 for (i = 0; i < spi->master->num_chipselect; i++) { 283 csr = spi_readl(as, CSR0 + 4 * i); 284 if ((csr ^ cpol) & SPI_BIT(CPOL)) 285 spi_writel(as, CSR0 + 4 * i, 286 csr ^ SPI_BIT(CPOL)); 287 } 288 289 mr = spi_readl(as, MR); 290 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); 291 if (spi->chip_select != 0) 292 gpio_set_value(asd->npcs_pin, active); 293 spi_writel(as, MR, mr); 294 } 295 296 dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", 297 asd->npcs_pin, active ? " (high)" : "", 298 mr); 299 } 300 301 static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) 302 { 303 struct atmel_spi_device *asd = spi->controller_state; 304 unsigned active = spi->mode & SPI_CS_HIGH; 305 u32 mr; 306 307 /* only deactivate *this* device; sometimes transfers to 308 * another device may be active when this routine is called. 309 */ 310 mr = spi_readl(as, MR); 311 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) { 312 mr = SPI_BFINS(PCS, 0xf, mr); 313 spi_writel(as, MR, mr); 314 } 315 316 dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", 317 asd->npcs_pin, active ? " (low)" : "", 318 mr); 319 320 if (atmel_spi_is_v2() || spi->chip_select != 0) 321 gpio_set_value(asd->npcs_pin, !active); 322 } 323 324 static inline int atmel_spi_xfer_is_last(struct spi_message *msg, 325 struct spi_transfer *xfer) 326 { 327 return msg->transfers.prev == &xfer->transfer_list; 328 } 329 330 static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer) 331 { 332 return xfer->delay_usecs == 0 && !xfer->cs_change; 333 } 334 335 static void atmel_spi_next_xfer_data(struct spi_master *master, 336 struct spi_transfer *xfer, 337 dma_addr_t *tx_dma, 338 dma_addr_t *rx_dma, 339 u32 *plen) 340 { 341 struct atmel_spi *as = spi_master_get_devdata(master); 342 u32 len = *plen; 343 344 /* use scratch buffer only when rx or tx data is unspecified */ 345 if (xfer->rx_buf) 346 *rx_dma = xfer->rx_dma + xfer->len - *plen; 347 else { 348 *rx_dma = as->buffer_dma; 349 if (len > BUFFER_SIZE) 350 len = BUFFER_SIZE; 351 } 352 if (xfer->tx_buf) 353 *tx_dma = xfer->tx_dma + xfer->len - *plen; 354 else { 355 *tx_dma = as->buffer_dma; 356 if (len > BUFFER_SIZE) 357 len = BUFFER_SIZE; 358 memset(as->buffer, 0, len); 359 dma_sync_single_for_device(&as->pdev->dev, 360 as->buffer_dma, len, DMA_TO_DEVICE); 361 } 362 363 *plen = len; 364 } 365 366 /* 367 * Submit next transfer for DMA. 368 * lock is held, spi irq is blocked 369 */ 370 static void atmel_spi_next_xfer(struct spi_master *master, 371 struct spi_message *msg) 372 { 373 struct atmel_spi *as = spi_master_get_devdata(master); 374 struct spi_transfer *xfer; 375 u32 len, remaining; 376 u32 ieval; 377 dma_addr_t tx_dma, rx_dma; 378 379 if (!as->current_transfer) 380 xfer = list_entry(msg->transfers.next, 381 struct spi_transfer, transfer_list); 382 else if (!as->next_transfer) 383 xfer = list_entry(as->current_transfer->transfer_list.next, 384 struct spi_transfer, transfer_list); 385 else 386 xfer = NULL; 387 388 if (xfer) { 389 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 390 391 len = xfer->len; 392 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 393 remaining = xfer->len - len; 394 395 spi_writel(as, RPR, rx_dma); 396 spi_writel(as, TPR, tx_dma); 397 398 if (msg->spi->bits_per_word > 8) 399 len >>= 1; 400 spi_writel(as, RCR, len); 401 spi_writel(as, TCR, len); 402 403 dev_dbg(&msg->spi->dev, 404 " start xfer %p: len %u tx %p/%08x rx %p/%08x\n", 405 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, 406 xfer->rx_buf, xfer->rx_dma); 407 } else { 408 xfer = as->next_transfer; 409 remaining = as->next_remaining_bytes; 410 } 411 412 as->current_transfer = xfer; 413 as->current_remaining_bytes = remaining; 414 415 if (remaining > 0) 416 len = remaining; 417 else if (!atmel_spi_xfer_is_last(msg, xfer) 418 && atmel_spi_xfer_can_be_chained(xfer)) { 419 xfer = list_entry(xfer->transfer_list.next, 420 struct spi_transfer, transfer_list); 421 len = xfer->len; 422 } else 423 xfer = NULL; 424 425 as->next_transfer = xfer; 426 427 if (xfer) { 428 u32 total; 429 430 total = len; 431 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 432 as->next_remaining_bytes = total - len; 433 434 spi_writel(as, RNPR, rx_dma); 435 spi_writel(as, TNPR, tx_dma); 436 437 if (msg->spi->bits_per_word > 8) 438 len >>= 1; 439 spi_writel(as, RNCR, len); 440 spi_writel(as, TNCR, len); 441 442 dev_dbg(&msg->spi->dev, 443 " next xfer %p: len %u tx %p/%08x rx %p/%08x\n", 444 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, 445 xfer->rx_buf, xfer->rx_dma); 446 ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES); 447 } else { 448 spi_writel(as, RNCR, 0); 449 spi_writel(as, TNCR, 0); 450 ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES); 451 } 452 453 /* REVISIT: We're waiting for ENDRX before we start the next 454 * transfer because we need to handle some difficult timing 455 * issues otherwise. If we wait for ENDTX in one transfer and 456 * then starts waiting for ENDRX in the next, it's difficult 457 * to tell the difference between the ENDRX interrupt we're 458 * actually waiting for and the ENDRX interrupt of the 459 * previous transfer. 460 * 461 * It should be doable, though. Just not now... 462 */ 463 spi_writel(as, IER, ieval); 464 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); 465 } 466 467 static void atmel_spi_next_message(struct spi_master *master) 468 { 469 struct atmel_spi *as = spi_master_get_devdata(master); 470 struct spi_message *msg; 471 struct spi_device *spi; 472 473 BUG_ON(as->current_transfer); 474 475 msg = list_entry(as->queue.next, struct spi_message, queue); 476 spi = msg->spi; 477 478 dev_dbg(master->dev.parent, "start message %p for %s\n", 479 msg, dev_name(&spi->dev)); 480 481 /* select chip if it's not still active */ 482 if (as->stay) { 483 if (as->stay != spi) { 484 cs_deactivate(as, as->stay); 485 cs_activate(as, spi); 486 } 487 as->stay = NULL; 488 } else 489 cs_activate(as, spi); 490 491 atmel_spi_next_xfer(master, msg); 492 } 493 494 /* 495 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: 496 * - The buffer is either valid for CPU access, else NULL 497 * - If the buffer is valid, so is its DMA address 498 * 499 * This driver manages the dma address unless message->is_dma_mapped. 500 */ 501 static int 502 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) 503 { 504 struct device *dev = &as->pdev->dev; 505 506 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; 507 if (xfer->tx_buf) { 508 /* tx_buf is a const void* where we need a void * for the dma 509 * mapping */ 510 void *nonconst_tx = (void *)xfer->tx_buf; 511 512 xfer->tx_dma = dma_map_single(dev, 513 nonconst_tx, xfer->len, 514 DMA_TO_DEVICE); 515 if (dma_mapping_error(dev, xfer->tx_dma)) 516 return -ENOMEM; 517 } 518 if (xfer->rx_buf) { 519 xfer->rx_dma = dma_map_single(dev, 520 xfer->rx_buf, xfer->len, 521 DMA_FROM_DEVICE); 522 if (dma_mapping_error(dev, xfer->rx_dma)) { 523 if (xfer->tx_buf) 524 dma_unmap_single(dev, 525 xfer->tx_dma, xfer->len, 526 DMA_TO_DEVICE); 527 return -ENOMEM; 528 } 529 } 530 return 0; 531 } 532 533 static void atmel_spi_dma_unmap_xfer(struct spi_master *master, 534 struct spi_transfer *xfer) 535 { 536 if (xfer->tx_dma != INVALID_DMA_ADDRESS) 537 dma_unmap_single(master->dev.parent, xfer->tx_dma, 538 xfer->len, DMA_TO_DEVICE); 539 if (xfer->rx_dma != INVALID_DMA_ADDRESS) 540 dma_unmap_single(master->dev.parent, xfer->rx_dma, 541 xfer->len, DMA_FROM_DEVICE); 542 } 543 544 static void 545 atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, 546 struct spi_message *msg, int status, int stay) 547 { 548 if (!stay || status < 0) 549 cs_deactivate(as, msg->spi); 550 else 551 as->stay = msg->spi; 552 553 list_del(&msg->queue); 554 msg->status = status; 555 556 dev_dbg(master->dev.parent, 557 "xfer complete: %u bytes transferred\n", 558 msg->actual_length); 559 560 spin_unlock(&as->lock); 561 msg->complete(msg->context); 562 spin_lock(&as->lock); 563 564 as->current_transfer = NULL; 565 as->next_transfer = NULL; 566 567 /* continue if needed */ 568 if (list_empty(&as->queue) || as->stopping) 569 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 570 else 571 atmel_spi_next_message(master); 572 } 573 574 static irqreturn_t 575 atmel_spi_interrupt(int irq, void *dev_id) 576 { 577 struct spi_master *master = dev_id; 578 struct atmel_spi *as = spi_master_get_devdata(master); 579 struct spi_message *msg; 580 struct spi_transfer *xfer; 581 u32 status, pending, imr; 582 int ret = IRQ_NONE; 583 584 spin_lock(&as->lock); 585 586 xfer = as->current_transfer; 587 msg = list_entry(as->queue.next, struct spi_message, queue); 588 589 imr = spi_readl(as, IMR); 590 status = spi_readl(as, SR); 591 pending = status & imr; 592 593 if (pending & SPI_BIT(OVRES)) { 594 int timeout; 595 596 ret = IRQ_HANDLED; 597 598 spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) 599 | SPI_BIT(OVRES))); 600 601 /* 602 * When we get an overrun, we disregard the current 603 * transfer. Data will not be copied back from any 604 * bounce buffer and msg->actual_len will not be 605 * updated with the last xfer. 606 * 607 * We will also not process any remaning transfers in 608 * the message. 609 * 610 * First, stop the transfer and unmap the DMA buffers. 611 */ 612 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 613 if (!msg->is_dma_mapped) 614 atmel_spi_dma_unmap_xfer(master, xfer); 615 616 /* REVISIT: udelay in irq is unfriendly */ 617 if (xfer->delay_usecs) 618 udelay(xfer->delay_usecs); 619 620 dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n", 621 spi_readl(as, TCR), spi_readl(as, RCR)); 622 623 /* 624 * Clean up DMA registers and make sure the data 625 * registers are empty. 626 */ 627 spi_writel(as, RNCR, 0); 628 spi_writel(as, TNCR, 0); 629 spi_writel(as, RCR, 0); 630 spi_writel(as, TCR, 0); 631 for (timeout = 1000; timeout; timeout--) 632 if (spi_readl(as, SR) & SPI_BIT(TXEMPTY)) 633 break; 634 if (!timeout) 635 dev_warn(master->dev.parent, 636 "timeout waiting for TXEMPTY"); 637 while (spi_readl(as, SR) & SPI_BIT(RDRF)) 638 spi_readl(as, RDR); 639 640 /* Clear any overrun happening while cleaning up */ 641 spi_readl(as, SR); 642 643 atmel_spi_msg_done(master, as, msg, -EIO, 0); 644 } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { 645 ret = IRQ_HANDLED; 646 647 spi_writel(as, IDR, pending); 648 649 if (as->current_remaining_bytes == 0) { 650 msg->actual_length += xfer->len; 651 652 if (!msg->is_dma_mapped) 653 atmel_spi_dma_unmap_xfer(master, xfer); 654 655 /* REVISIT: udelay in irq is unfriendly */ 656 if (xfer->delay_usecs) 657 udelay(xfer->delay_usecs); 658 659 if (atmel_spi_xfer_is_last(msg, xfer)) { 660 /* report completed message */ 661 atmel_spi_msg_done(master, as, msg, 0, 662 xfer->cs_change); 663 } else { 664 if (xfer->cs_change) { 665 cs_deactivate(as, msg->spi); 666 udelay(1); 667 cs_activate(as, msg->spi); 668 } 669 670 /* 671 * Not done yet. Submit the next transfer. 672 * 673 * FIXME handle protocol options for xfer 674 */ 675 atmel_spi_next_xfer(master, msg); 676 } 677 } else { 678 /* 679 * Keep going, we still have data to send in 680 * the current transfer. 681 */ 682 atmel_spi_next_xfer(master, msg); 683 } 684 } 685 686 spin_unlock(&as->lock); 687 688 return ret; 689 } 690 691 static int atmel_spi_setup(struct spi_device *spi) 692 { 693 struct atmel_spi *as; 694 struct atmel_spi_device *asd; 695 u32 scbr, csr; 696 unsigned int bits = spi->bits_per_word; 697 unsigned long bus_hz; 698 unsigned int npcs_pin; 699 int ret; 700 701 as = spi_master_get_devdata(spi->master); 702 703 if (as->stopping) 704 return -ESHUTDOWN; 705 706 if (spi->chip_select > spi->master->num_chipselect) { 707 dev_dbg(&spi->dev, 708 "setup: invalid chipselect %u (%u defined)\n", 709 spi->chip_select, spi->master->num_chipselect); 710 return -EINVAL; 711 } 712 713 if (bits < 8 || bits > 16) { 714 dev_dbg(&spi->dev, 715 "setup: invalid bits_per_word %u (8 to 16)\n", 716 bits); 717 return -EINVAL; 718 } 719 720 /* see notes above re chipselect */ 721 if (!atmel_spi_is_v2() 722 && spi->chip_select == 0 723 && (spi->mode & SPI_CS_HIGH)) { 724 dev_dbg(&spi->dev, "setup: can't be active-high\n"); 725 return -EINVAL; 726 } 727 728 /* v1 chips start out at half the peripheral bus speed. */ 729 bus_hz = clk_get_rate(as->clk); 730 if (!atmel_spi_is_v2()) 731 bus_hz /= 2; 732 733 if (spi->max_speed_hz) { 734 /* 735 * Calculate the lowest divider that satisfies the 736 * constraint, assuming div32/fdiv/mbz == 0. 737 */ 738 scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz); 739 740 /* 741 * If the resulting divider doesn't fit into the 742 * register bitfield, we can't satisfy the constraint. 743 */ 744 if (scbr >= (1 << SPI_SCBR_SIZE)) { 745 dev_dbg(&spi->dev, 746 "setup: %d Hz too slow, scbr %u; min %ld Hz\n", 747 spi->max_speed_hz, scbr, bus_hz/255); 748 return -EINVAL; 749 } 750 } else 751 /* speed zero means "as slow as possible" */ 752 scbr = 0xff; 753 754 csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8); 755 if (spi->mode & SPI_CPOL) 756 csr |= SPI_BIT(CPOL); 757 if (!(spi->mode & SPI_CPHA)) 758 csr |= SPI_BIT(NCPHA); 759 760 /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. 761 * 762 * DLYBCT would add delays between words, slowing down transfers. 763 * It could potentially be useful to cope with DMA bottlenecks, but 764 * in those cases it's probably best to just use a lower bitrate. 765 */ 766 csr |= SPI_BF(DLYBS, 0); 767 csr |= SPI_BF(DLYBCT, 0); 768 769 /* chipselect must have been muxed as GPIO (e.g. in board setup) */ 770 npcs_pin = (unsigned int)spi->controller_data; 771 asd = spi->controller_state; 772 if (!asd) { 773 asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL); 774 if (!asd) 775 return -ENOMEM; 776 777 ret = gpio_request(npcs_pin, dev_name(&spi->dev)); 778 if (ret) { 779 kfree(asd); 780 return ret; 781 } 782 783 asd->npcs_pin = npcs_pin; 784 spi->controller_state = asd; 785 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); 786 } else { 787 unsigned long flags; 788 789 spin_lock_irqsave(&as->lock, flags); 790 if (as->stay == spi) 791 as->stay = NULL; 792 cs_deactivate(as, spi); 793 spin_unlock_irqrestore(&as->lock, flags); 794 } 795 796 asd->csr = csr; 797 798 dev_dbg(&spi->dev, 799 "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", 800 bus_hz / scbr, bits, spi->mode, spi->chip_select, csr); 801 802 if (!atmel_spi_is_v2()) 803 spi_writel(as, CSR0 + 4 * spi->chip_select, csr); 804 805 return 0; 806 } 807 808 static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) 809 { 810 struct atmel_spi *as; 811 struct spi_transfer *xfer; 812 unsigned long flags; 813 struct device *controller = spi->master->dev.parent; 814 u8 bits; 815 struct atmel_spi_device *asd; 816 817 as = spi_master_get_devdata(spi->master); 818 819 dev_dbg(controller, "new message %p submitted for %s\n", 820 msg, dev_name(&spi->dev)); 821 822 if (unlikely(list_empty(&msg->transfers))) 823 return -EINVAL; 824 825 if (as->stopping) 826 return -ESHUTDOWN; 827 828 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 829 if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) { 830 dev_dbg(&spi->dev, "missing rx or tx buf\n"); 831 return -EINVAL; 832 } 833 834 if (xfer->bits_per_word) { 835 asd = spi->controller_state; 836 bits = (asd->csr >> 4) & 0xf; 837 if (bits != xfer->bits_per_word - 8) { 838 dev_dbg(&spi->dev, "you can't yet change " 839 "bits_per_word in transfers\n"); 840 return -ENOPROTOOPT; 841 } 842 } 843 844 /* FIXME implement these protocol options!! */ 845 if (xfer->speed_hz) { 846 dev_dbg(&spi->dev, "no protocol options yet\n"); 847 return -ENOPROTOOPT; 848 } 849 850 /* 851 * DMA map early, for performance (empties dcache ASAP) and 852 * better fault reporting. This is a DMA-only driver. 853 * 854 * NOTE that if dma_unmap_single() ever starts to do work on 855 * platforms supported by this driver, we would need to clean 856 * up mappings for previously-mapped transfers. 857 */ 858 if (!msg->is_dma_mapped) { 859 if (atmel_spi_dma_map_xfer(as, xfer) < 0) 860 return -ENOMEM; 861 } 862 } 863 864 #ifdef VERBOSE 865 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 866 dev_dbg(controller, 867 " xfer %p: len %u tx %p/%08x rx %p/%08x\n", 868 xfer, xfer->len, 869 xfer->tx_buf, xfer->tx_dma, 870 xfer->rx_buf, xfer->rx_dma); 871 } 872 #endif 873 874 msg->status = -EINPROGRESS; 875 msg->actual_length = 0; 876 877 spin_lock_irqsave(&as->lock, flags); 878 list_add_tail(&msg->queue, &as->queue); 879 if (!as->current_transfer) 880 atmel_spi_next_message(spi->master); 881 spin_unlock_irqrestore(&as->lock, flags); 882 883 return 0; 884 } 885 886 static void atmel_spi_cleanup(struct spi_device *spi) 887 { 888 struct atmel_spi *as = spi_master_get_devdata(spi->master); 889 struct atmel_spi_device *asd = spi->controller_state; 890 unsigned gpio = (unsigned) spi->controller_data; 891 unsigned long flags; 892 893 if (!asd) 894 return; 895 896 spin_lock_irqsave(&as->lock, flags); 897 if (as->stay == spi) { 898 as->stay = NULL; 899 cs_deactivate(as, spi); 900 } 901 spin_unlock_irqrestore(&as->lock, flags); 902 903 spi->controller_state = NULL; 904 gpio_free(gpio); 905 kfree(asd); 906 } 907 908 /*-------------------------------------------------------------------------*/ 909 910 static int atmel_spi_probe(struct platform_device *pdev) 911 { 912 struct resource *regs; 913 int irq; 914 struct clk *clk; 915 int ret; 916 struct spi_master *master; 917 struct atmel_spi *as; 918 919 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 920 if (!regs) 921 return -ENXIO; 922 923 irq = platform_get_irq(pdev, 0); 924 if (irq < 0) 925 return irq; 926 927 clk = clk_get(&pdev->dev, "spi_clk"); 928 if (IS_ERR(clk)) 929 return PTR_ERR(clk); 930 931 /* setup spi core then atmel-specific driver state */ 932 ret = -ENOMEM; 933 master = spi_alloc_master(&pdev->dev, sizeof *as); 934 if (!master) 935 goto out_free; 936 937 /* the spi->mode bits understood by this driver: */ 938 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 939 940 master->bus_num = pdev->id; 941 master->num_chipselect = 4; 942 master->setup = atmel_spi_setup; 943 master->transfer = atmel_spi_transfer; 944 master->cleanup = atmel_spi_cleanup; 945 platform_set_drvdata(pdev, master); 946 947 as = spi_master_get_devdata(master); 948 949 /* 950 * Scratch buffer is used for throwaway rx and tx data. 951 * It's coherent to minimize dcache pollution. 952 */ 953 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, 954 &as->buffer_dma, GFP_KERNEL); 955 if (!as->buffer) 956 goto out_free; 957 958 spin_lock_init(&as->lock); 959 INIT_LIST_HEAD(&as->queue); 960 as->pdev = pdev; 961 as->regs = ioremap(regs->start, resource_size(regs)); 962 if (!as->regs) 963 goto out_free_buffer; 964 as->irq = irq; 965 as->clk = clk; 966 967 ret = request_irq(irq, atmel_spi_interrupt, 0, 968 dev_name(&pdev->dev), master); 969 if (ret) 970 goto out_unmap_regs; 971 972 /* Initialize the hardware */ 973 clk_enable(clk); 974 spi_writel(as, CR, SPI_BIT(SWRST)); 975 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 976 spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); 977 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 978 spi_writel(as, CR, SPI_BIT(SPIEN)); 979 980 /* go! */ 981 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 982 (unsigned long)regs->start, irq); 983 984 ret = spi_register_master(master); 985 if (ret) 986 goto out_reset_hw; 987 988 return 0; 989 990 out_reset_hw: 991 spi_writel(as, CR, SPI_BIT(SWRST)); 992 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 993 clk_disable(clk); 994 free_irq(irq, master); 995 out_unmap_regs: 996 iounmap(as->regs); 997 out_free_buffer: 998 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 999 as->buffer_dma); 1000 out_free: 1001 clk_put(clk); 1002 spi_master_put(master); 1003 return ret; 1004 } 1005 1006 static int atmel_spi_remove(struct platform_device *pdev) 1007 { 1008 struct spi_master *master = platform_get_drvdata(pdev); 1009 struct atmel_spi *as = spi_master_get_devdata(master); 1010 struct spi_message *msg; 1011 1012 /* reset the hardware and block queue progress */ 1013 spin_lock_irq(&as->lock); 1014 as->stopping = 1; 1015 spi_writel(as, CR, SPI_BIT(SWRST)); 1016 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1017 spi_readl(as, SR); 1018 spin_unlock_irq(&as->lock); 1019 1020 /* Terminate remaining queued transfers */ 1021 list_for_each_entry(msg, &as->queue, queue) { 1022 /* REVISIT unmapping the dma is a NOP on ARM and AVR32 1023 * but we shouldn't depend on that... 1024 */ 1025 msg->status = -ESHUTDOWN; 1026 msg->complete(msg->context); 1027 } 1028 1029 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 1030 as->buffer_dma); 1031 1032 clk_disable(as->clk); 1033 clk_put(as->clk); 1034 free_irq(as->irq, master); 1035 iounmap(as->regs); 1036 1037 spi_unregister_master(master); 1038 1039 return 0; 1040 } 1041 1042 #ifdef CONFIG_PM 1043 1044 static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg) 1045 { 1046 struct spi_master *master = platform_get_drvdata(pdev); 1047 struct atmel_spi *as = spi_master_get_devdata(master); 1048 1049 clk_disable(as->clk); 1050 return 0; 1051 } 1052 1053 static int atmel_spi_resume(struct platform_device *pdev) 1054 { 1055 struct spi_master *master = platform_get_drvdata(pdev); 1056 struct atmel_spi *as = spi_master_get_devdata(master); 1057 1058 clk_enable(as->clk); 1059 return 0; 1060 } 1061 1062 #else 1063 #define atmel_spi_suspend NULL 1064 #define atmel_spi_resume NULL 1065 #endif 1066 1067 1068 static struct platform_driver atmel_spi_driver = { 1069 .driver = { 1070 .name = "atmel_spi", 1071 .owner = THIS_MODULE, 1072 }, 1073 .suspend = atmel_spi_suspend, 1074 .resume = atmel_spi_resume, 1075 .probe = atmel_spi_probe, 1076 .remove = __exit_p(atmel_spi_remove), 1077 }; 1078 module_platform_driver(atmel_spi_driver); 1079 1080 MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver"); 1081 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1082 MODULE_LICENSE("GPL"); 1083 MODULE_ALIAS("platform:atmel_spi"); 1084