1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom BCM2835 SPI Controllers 4 * 5 * Copyright (C) 2012 Chris Boot 6 * Copyright (C) 2013 Stephen Warren 7 * Copyright (C) 2015 Martin Sperl 8 * 9 * This driver is inspired by: 10 * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> 11 * spi-atmel.c, Copyright (C) 2006 Atmel Corporation 12 */ 13 14 #include <linux/cleanup.h> 15 #include <linux/clk.h> 16 #include <linux/completion.h> 17 #include <linux/debugfs.h> 18 #include <linux/delay.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/dmaengine.h> 21 #include <linux/err.h> 22 #include <linux/interrupt.h> 23 #include <linux/io.h> 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/of.h> 27 #include <linux/of_address.h> 28 #include <linux/platform_device.h> 29 #include <linux/gpio/consumer.h> 30 #include <linux/gpio/machine.h> /* FIXME: using GPIO lookup tables */ 31 #include <linux/of_irq.h> 32 #include <linux/overflow.h> 33 #include <linux/slab.h> 34 #include <linux/spi/spi.h> 35 36 /* SPI register offsets */ 37 #define BCM2835_SPI_CS 0x00 38 #define BCM2835_SPI_FIFO 0x04 39 #define BCM2835_SPI_CLK 0x08 40 #define BCM2835_SPI_DLEN 0x0c 41 #define BCM2835_SPI_LTOH 0x10 42 #define BCM2835_SPI_DC 0x14 43 44 /* Bitfields in CS */ 45 #define BCM2835_SPI_CS_LEN_LONG 0x02000000 46 #define BCM2835_SPI_CS_DMA_LEN 0x01000000 47 #define BCM2835_SPI_CS_CSPOL2 0x00800000 48 #define BCM2835_SPI_CS_CSPOL1 0x00400000 49 #define BCM2835_SPI_CS_CSPOL0 0x00200000 50 #define BCM2835_SPI_CS_RXF 0x00100000 51 #define BCM2835_SPI_CS_RXR 0x00080000 52 #define BCM2835_SPI_CS_TXD 0x00040000 53 #define BCM2835_SPI_CS_RXD 0x00020000 54 #define BCM2835_SPI_CS_DONE 0x00010000 55 #define BCM2835_SPI_CS_LEN 0x00002000 56 #define BCM2835_SPI_CS_REN 0x00001000 57 #define BCM2835_SPI_CS_ADCS 0x00000800 58 #define BCM2835_SPI_CS_INTR 0x00000400 59 #define BCM2835_SPI_CS_INTD 0x00000200 60 #define BCM2835_SPI_CS_DMAEN 0x00000100 61 #define BCM2835_SPI_CS_TA 0x00000080 62 #define BCM2835_SPI_CS_CSPOL 0x00000040 63 #define BCM2835_SPI_CS_CLEAR_RX 0x00000020 64 #define BCM2835_SPI_CS_CLEAR_TX 0x00000010 65 #define BCM2835_SPI_CS_CPOL 0x00000008 66 #define BCM2835_SPI_CS_CPHA 0x00000004 67 #define BCM2835_SPI_CS_CS_10 0x00000002 68 #define BCM2835_SPI_CS_CS_01 0x00000001 69 70 #define BCM2835_SPI_FIFO_SIZE 64 71 #define BCM2835_SPI_FIFO_SIZE_3_4 48 72 #define BCM2835_SPI_DMA_MIN_LENGTH 96 73 #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ 74 | SPI_NO_CS | SPI_3WIRE) 75 76 #define DRV_NAME "spi-bcm2835" 77 78 /* define polling limits */ 79 static unsigned int polling_limit_us = 30; 80 module_param(polling_limit_us, uint, 0664); 81 MODULE_PARM_DESC(polling_limit_us, 82 "time in us to run a transfer in polling mode\n"); 83 84 /** 85 * struct bcm2835_spi - BCM2835 SPI controller 86 * @regs: base address of register map 87 * @clk: core clock, divided to calculate serial clock 88 * @cs_gpio: chip-select GPIO descriptor 89 * @clk_hz: core clock cached speed 90 * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full 91 * @tfr: SPI transfer currently processed 92 * @ctlr: SPI controller reverse lookup 93 * @tx_buf: pointer whence next transmitted byte is read 94 * @rx_buf: pointer where next received byte is written 95 * @tx_len: remaining bytes to transmit 96 * @rx_len: remaining bytes to receive 97 * @tx_prologue: bytes transmitted without DMA if first TX sglist entry's 98 * length is not a multiple of 4 (to overcome hardware limitation) 99 * @rx_prologue: bytes received without DMA if first RX sglist entry's 100 * length is not a multiple of 4 (to overcome hardware limitation) 101 * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry 102 * @debugfs_dir: the debugfs directory - neede to remove debugfs when 103 * unloading the module 104 * @count_transfer_polling: count of how often polling mode is used 105 * @count_transfer_irq: count of how often interrupt mode is used 106 * @count_transfer_irq_after_polling: count of how often we fall back to 107 * interrupt mode after starting in polling mode. 108 * These are counted as well in @count_transfer_polling and 109 * @count_transfer_irq 110 * @count_transfer_dma: count how often dma mode is used 111 * @target: SPI target currently selected 112 * (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs) 113 * @tx_dma_active: whether a TX DMA descriptor is in progress 114 * @rx_dma_active: whether a RX DMA descriptor is in progress 115 * (used by bcm2835_spi_dma_tx_done() to handle a race) 116 * @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers 117 * (cyclically copies from zero page to TX FIFO) 118 * @fill_tx_addr: bus address of zero page 119 */ 120 struct bcm2835_spi { 121 void __iomem *regs; 122 struct clk *clk; 123 struct gpio_desc *cs_gpio; 124 unsigned long clk_hz; 125 int irq; 126 struct spi_transfer *tfr; 127 struct spi_controller *ctlr; 128 const u8 *tx_buf; 129 u8 *rx_buf; 130 int tx_len; 131 int rx_len; 132 int tx_prologue; 133 int rx_prologue; 134 unsigned int tx_spillover; 135 136 struct dentry *debugfs_dir; 137 u64 count_transfer_polling; 138 u64 count_transfer_irq; 139 u64 count_transfer_irq_after_polling; 140 u64 count_transfer_dma; 141 142 struct bcm2835_spidev *target; 143 unsigned int tx_dma_active; 144 unsigned int rx_dma_active; 145 struct dma_async_tx_descriptor *fill_tx_desc; 146 dma_addr_t fill_tx_addr; 147 }; 148 149 /** 150 * struct bcm2835_spidev - BCM2835 SPI target 151 * @prepare_cs: precalculated CS register value for ->prepare_message() 152 * (uses target-specific clock polarity and phase settings) 153 * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers 154 * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register) 155 * @clear_rx_addr: bus address of @clear_rx_cs 156 * @clear_rx_cs: precalculated CS register value to clear RX FIFO 157 * (uses target-specific clock polarity and phase settings) 158 */ 159 struct bcm2835_spidev { 160 u32 prepare_cs; 161 struct dma_async_tx_descriptor *clear_rx_desc; 162 dma_addr_t clear_rx_addr; 163 u32 clear_rx_cs ____cacheline_aligned; 164 }; 165 166 #if defined(CONFIG_DEBUG_FS) 167 static void bcm2835_debugfs_create(struct bcm2835_spi *bs, 168 const char *dname) 169 { 170 char name[64]; 171 struct dentry *dir; 172 173 /* get full name */ 174 snprintf(name, sizeof(name), "spi-bcm2835-%s", dname); 175 176 /* the base directory */ 177 dir = debugfs_create_dir(name, NULL); 178 bs->debugfs_dir = dir; 179 180 /* the counters */ 181 debugfs_create_u64("count_transfer_polling", 0444, dir, 182 &bs->count_transfer_polling); 183 debugfs_create_u64("count_transfer_irq", 0444, dir, 184 &bs->count_transfer_irq); 185 debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir, 186 &bs->count_transfer_irq_after_polling); 187 debugfs_create_u64("count_transfer_dma", 0444, dir, 188 &bs->count_transfer_dma); 189 } 190 191 static void bcm2835_debugfs_remove(struct bcm2835_spi *bs) 192 { 193 debugfs_remove_recursive(bs->debugfs_dir); 194 bs->debugfs_dir = NULL; 195 } 196 #else 197 static void bcm2835_debugfs_create(struct bcm2835_spi *bs, 198 const char *dname) 199 { 200 } 201 202 static void bcm2835_debugfs_remove(struct bcm2835_spi *bs) 203 { 204 } 205 #endif /* CONFIG_DEBUG_FS */ 206 207 static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned int reg) 208 { 209 return readl(bs->regs + reg); 210 } 211 212 static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned int reg, u32 val) 213 { 214 writel(val, bs->regs + reg); 215 } 216 217 static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs) 218 { 219 u8 byte; 220 221 while ((bs->rx_len) && 222 (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) { 223 byte = bcm2835_rd(bs, BCM2835_SPI_FIFO); 224 if (bs->rx_buf) 225 *bs->rx_buf++ = byte; 226 bs->rx_len--; 227 } 228 } 229 230 static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs) 231 { 232 u8 byte; 233 234 while ((bs->tx_len) && 235 (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) { 236 byte = bs->tx_buf ? *bs->tx_buf++ : 0; 237 bcm2835_wr(bs, BCM2835_SPI_FIFO, byte); 238 bs->tx_len--; 239 } 240 } 241 242 /** 243 * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO 244 * @bs: BCM2835 SPI controller 245 * @count: bytes to read from RX FIFO 246 * 247 * The caller must ensure that @bs->rx_len is greater than or equal to @count, 248 * that the RX FIFO contains at least @count bytes and that the DMA Enable flag 249 * in the CS register is set (such that a read from the FIFO register receives 250 * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL. 251 */ 252 static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count) 253 { 254 u32 val; 255 int len; 256 257 bs->rx_len -= count; 258 259 do { 260 val = bcm2835_rd(bs, BCM2835_SPI_FIFO); 261 len = min(count, 4); 262 memcpy(bs->rx_buf, &val, len); 263 bs->rx_buf += len; 264 count -= 4; 265 } while (count > 0); 266 } 267 268 /** 269 * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO 270 * @bs: BCM2835 SPI controller 271 * @count: bytes to write to TX FIFO 272 * 273 * The caller must ensure that @bs->tx_len is greater than or equal to @count, 274 * that the TX FIFO can accommodate @count bytes and that the DMA Enable flag 275 * in the CS register is set (such that a write to the FIFO register transmits 276 * 32-bit instead of just 8-bit). 277 */ 278 static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count) 279 { 280 u32 val; 281 int len; 282 283 bs->tx_len -= count; 284 285 do { 286 if (bs->tx_buf) { 287 len = min(count, 4); 288 memcpy(&val, bs->tx_buf, len); 289 bs->tx_buf += len; 290 } else { 291 val = 0; 292 } 293 bcm2835_wr(bs, BCM2835_SPI_FIFO, val); 294 count -= 4; 295 } while (count > 0); 296 } 297 298 /** 299 * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty 300 * @bs: BCM2835 SPI controller 301 * 302 * The caller must ensure that the RX FIFO can accommodate as many bytes 303 * as have been written to the TX FIFO: Transmission is halted once the 304 * RX FIFO is full, causing this function to spin forever. 305 */ 306 static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs) 307 { 308 while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE)) 309 cpu_relax(); 310 } 311 312 /** 313 * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO 314 * @bs: BCM2835 SPI controller 315 * @count: bytes available for reading in RX FIFO 316 */ 317 static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count) 318 { 319 u8 val; 320 321 count = min(count, bs->rx_len); 322 bs->rx_len -= count; 323 324 do { 325 val = bcm2835_rd(bs, BCM2835_SPI_FIFO); 326 if (bs->rx_buf) 327 *bs->rx_buf++ = val; 328 } while (--count); 329 } 330 331 /** 332 * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO 333 * @bs: BCM2835 SPI controller 334 * @count: bytes available for writing in TX FIFO 335 */ 336 static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count) 337 { 338 u8 val; 339 340 count = min(count, bs->tx_len); 341 bs->tx_len -= count; 342 343 do { 344 val = bs->tx_buf ? *bs->tx_buf++ : 0; 345 bcm2835_wr(bs, BCM2835_SPI_FIFO, val); 346 } while (--count); 347 } 348 349 static void bcm2835_spi_reset_hw(struct bcm2835_spi *bs) 350 { 351 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); 352 353 /* Disable SPI interrupts and transfer */ 354 cs &= ~(BCM2835_SPI_CS_INTR | 355 BCM2835_SPI_CS_INTD | 356 BCM2835_SPI_CS_DMAEN | 357 BCM2835_SPI_CS_TA); 358 /* 359 * Transmission sometimes breaks unless the DONE bit is written at the 360 * end of every transfer. The spec says it's a RO bit. Either the 361 * spec is wrong and the bit is actually of type RW1C, or it's a 362 * hardware erratum. 363 */ 364 cs |= BCM2835_SPI_CS_DONE; 365 /* and reset RX/TX FIFOS */ 366 cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX; 367 368 /* and reset the SPI_HW */ 369 bcm2835_wr(bs, BCM2835_SPI_CS, cs); 370 /* as well as DLEN */ 371 bcm2835_wr(bs, BCM2835_SPI_DLEN, 0); 372 } 373 374 static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) 375 { 376 struct bcm2835_spi *bs = dev_id; 377 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); 378 379 /* Bail out early if interrupts are not enabled */ 380 if (!(cs & BCM2835_SPI_CS_INTR)) 381 return IRQ_NONE; 382 383 /* 384 * An interrupt is signaled either if DONE is set (TX FIFO empty) 385 * or if RXR is set (RX FIFO >= ¾ full). 386 */ 387 if (cs & BCM2835_SPI_CS_RXF) 388 bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); 389 else if (cs & BCM2835_SPI_CS_RXR) 390 bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4); 391 392 if (bs->tx_len && cs & BCM2835_SPI_CS_DONE) 393 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); 394 395 /* Read as many bytes as possible from FIFO */ 396 bcm2835_rd_fifo(bs); 397 /* Write as many bytes as possible to FIFO */ 398 bcm2835_wr_fifo(bs); 399 400 if (!bs->rx_len) { 401 /* Transfer complete - reset SPI HW */ 402 bcm2835_spi_reset_hw(bs); 403 /* wake up the framework */ 404 spi_finalize_current_transfer(bs->ctlr); 405 } 406 407 return IRQ_HANDLED; 408 } 409 410 static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr, 411 struct spi_device *spi, 412 struct spi_transfer *tfr, 413 u32 cs, bool fifo_empty) 414 { 415 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 416 417 /* update usage statistics */ 418 bs->count_transfer_irq++; 419 420 /* 421 * Enable HW block, but with interrupts still disabled. 422 * Otherwise the empty TX FIFO would immediately trigger an interrupt. 423 */ 424 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); 425 426 /* fill TX FIFO as much as possible */ 427 if (fifo_empty) 428 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); 429 bcm2835_wr_fifo(bs); 430 431 /* enable interrupts */ 432 cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; 433 bcm2835_wr(bs, BCM2835_SPI_CS, cs); 434 435 /* signal that we need to wait for completion */ 436 return 1; 437 } 438 439 /** 440 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA 441 * @ctlr: SPI host controller 442 * @tfr: SPI transfer 443 * @bs: BCM2835 SPI controller 444 * @cs: CS register 445 * 446 * A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks. 447 * Only the final write access is permitted to transmit less than 4 bytes, the 448 * SPI controller deduces its intended size from the DLEN register. 449 * 450 * If a TX or RX sglist contains multiple entries, one per page, and the first 451 * entry starts in the middle of a page, that first entry's length may not be 452 * a multiple of 4. Subsequent entries are fine because they span an entire 453 * page, hence do have a length that's a multiple of 4. 454 * 455 * This cannot happen with kmalloc'ed buffers (which is what most clients use) 456 * because they are contiguous in physical memory and therefore not split on 457 * page boundaries by spi_map_buf(). But it *can* happen with vmalloc'ed 458 * buffers. 459 * 460 * The DMA engine is incapable of combining sglist entries into a continuous 461 * stream of 4 byte chunks, it treats every entry separately: A TX entry is 462 * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX 463 * entry is rounded up by throwing away received bytes. 464 * 465 * Overcome this limitation by transferring the first few bytes without DMA: 466 * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42, 467 * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO. 468 * The residue of 1 byte in the RX FIFO is picked up by DMA. Together with 469 * the rest of the first RX sglist entry it makes up a multiple of 4 bytes. 470 * 471 * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1, 472 * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO. 473 * Caution, the additional 4 bytes spill over to the second TX sglist entry 474 * if the length of the first is *exactly* 1. 475 * 476 * At most 6 bytes are written and at most 3 bytes read. Do we know the 477 * transfer has this many bytes? Yes, see BCM2835_SPI_DMA_MIN_LENGTH. 478 * 479 * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width 480 * by the DMA engine. Toggling the DMA Enable flag in the CS register switches 481 * the width but also garbles the FIFO's contents. The prologue must therefore 482 * be transmitted in 32-bit width to ensure that the following DMA transfer can 483 * pick up the residue in the RX FIFO in ungarbled form. 484 */ 485 static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr, 486 struct spi_transfer *tfr, 487 struct bcm2835_spi *bs, 488 u32 cs) 489 { 490 int tx_remaining; 491 492 bs->tfr = tfr; 493 bs->tx_prologue = 0; 494 bs->rx_prologue = 0; 495 bs->tx_spillover = false; 496 497 if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0])) 498 bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3; 499 500 if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) { 501 bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3; 502 503 if (bs->rx_prologue > bs->tx_prologue) { 504 if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) { 505 bs->tx_prologue = bs->rx_prologue; 506 } else { 507 bs->tx_prologue += 4; 508 bs->tx_spillover = 509 !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3); 510 } 511 } 512 } 513 514 /* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */ 515 if (!bs->tx_prologue) 516 return; 517 518 /* Write and read RX prologue. Adjust first entry in RX sglist. */ 519 if (bs->rx_prologue) { 520 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue); 521 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA 522 | BCM2835_SPI_CS_DMAEN); 523 bcm2835_wr_fifo_count(bs, bs->rx_prologue); 524 bcm2835_wait_tx_fifo_empty(bs); 525 bcm2835_rd_fifo_count(bs, bs->rx_prologue); 526 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX 527 | BCM2835_SPI_CS_CLEAR_TX 528 | BCM2835_SPI_CS_DONE); 529 530 dma_sync_single_for_device(ctlr->dma_rx->device->dev, 531 sg_dma_address(&tfr->rx_sg.sgl[0]), 532 bs->rx_prologue, DMA_FROM_DEVICE); 533 534 sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue; 535 sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue; 536 } 537 538 if (!bs->tx_buf) 539 return; 540 541 /* 542 * Write remaining TX prologue. Adjust first entry in TX sglist. 543 * Also adjust second entry if prologue spills over to it. 544 */ 545 tx_remaining = bs->tx_prologue - bs->rx_prologue; 546 if (tx_remaining) { 547 bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining); 548 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA 549 | BCM2835_SPI_CS_DMAEN); 550 bcm2835_wr_fifo_count(bs, tx_remaining); 551 bcm2835_wait_tx_fifo_empty(bs); 552 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX 553 | BCM2835_SPI_CS_DONE); 554 } 555 556 if (likely(!bs->tx_spillover)) { 557 sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue; 558 sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue; 559 } else { 560 sg_dma_len(&tfr->tx_sg.sgl[0]) = 0; 561 sg_dma_address(&tfr->tx_sg.sgl[1]) += 4; 562 sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4; 563 } 564 } 565 566 /** 567 * bcm2835_spi_undo_prologue() - reconstruct original sglist state 568 * @bs: BCM2835 SPI controller 569 * 570 * Undo changes which were made to an SPI transfer's sglist when transmitting 571 * the prologue. This is necessary to ensure the same memory ranges are 572 * unmapped that were originally mapped. 573 */ 574 static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs) 575 { 576 struct spi_transfer *tfr = bs->tfr; 577 578 if (!bs->tx_prologue) 579 return; 580 581 if (bs->rx_prologue) { 582 sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue; 583 sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue; 584 } 585 586 if (!bs->tx_buf) 587 goto out; 588 589 if (likely(!bs->tx_spillover)) { 590 sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue; 591 sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue; 592 } else { 593 sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4; 594 sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4; 595 sg_dma_len(&tfr->tx_sg.sgl[1]) += 4; 596 } 597 out: 598 bs->tx_prologue = 0; 599 } 600 601 /** 602 * bcm2835_spi_dma_rx_done() - callback for DMA RX channel 603 * @data: SPI host controller 604 * 605 * Used for bidirectional and RX-only transfers. 606 */ 607 static void bcm2835_spi_dma_rx_done(void *data) 608 { 609 struct spi_controller *ctlr = data; 610 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 611 612 /* terminate tx-dma as we do not have an irq for it 613 * because when the rx dma will terminate and this callback 614 * is called the tx-dma must have finished - can't get to this 615 * situation otherwise... 616 */ 617 dmaengine_terminate_async(ctlr->dma_tx); 618 bs->tx_dma_active = false; 619 bs->rx_dma_active = false; 620 bcm2835_spi_undo_prologue(bs); 621 622 /* reset fifo and HW */ 623 bcm2835_spi_reset_hw(bs); 624 625 /* and mark as completed */; 626 spi_finalize_current_transfer(ctlr); 627 } 628 629 /** 630 * bcm2835_spi_dma_tx_done() - callback for DMA TX channel 631 * @data: SPI host controller 632 * 633 * Used for TX-only transfers. 634 */ 635 static void bcm2835_spi_dma_tx_done(void *data) 636 { 637 struct spi_controller *ctlr = data; 638 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 639 640 /* busy-wait for TX FIFO to empty */ 641 while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE)) 642 bcm2835_wr(bs, BCM2835_SPI_CS, bs->target->clear_rx_cs); 643 644 bs->tx_dma_active = false; 645 smp_wmb(); 646 647 /* 648 * In case of a very short transfer, RX DMA may not have been 649 * issued yet. The onus is then on bcm2835_spi_transfer_one_dma() 650 * to terminate it immediately after issuing. 651 */ 652 if (cmpxchg(&bs->rx_dma_active, true, false)) 653 dmaengine_terminate_async(ctlr->dma_rx); 654 655 bcm2835_spi_undo_prologue(bs); 656 bcm2835_spi_reset_hw(bs); 657 spi_finalize_current_transfer(ctlr); 658 } 659 660 /** 661 * bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist 662 * @ctlr: SPI host controller 663 * @tfr: SPI transfer 664 * @bs: BCM2835 SPI controller 665 * @target: BCM2835 SPI target 666 * @is_tx: whether to submit DMA descriptor for TX or RX sglist 667 * 668 * Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr. 669 * Return 0 on success or a negative error number. 670 */ 671 static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr, 672 struct spi_transfer *tfr, 673 struct bcm2835_spi *bs, 674 struct bcm2835_spidev *target, 675 bool is_tx) 676 { 677 struct dma_chan *chan; 678 struct scatterlist *sgl; 679 unsigned int nents; 680 enum dma_transfer_direction dir; 681 unsigned long flags; 682 683 struct dma_async_tx_descriptor *desc; 684 dma_cookie_t cookie; 685 686 if (is_tx) { 687 dir = DMA_MEM_TO_DEV; 688 chan = ctlr->dma_tx; 689 nents = tfr->tx_sg.nents; 690 sgl = tfr->tx_sg.sgl; 691 flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT; 692 } else { 693 dir = DMA_DEV_TO_MEM; 694 chan = ctlr->dma_rx; 695 nents = tfr->rx_sg.nents; 696 sgl = tfr->rx_sg.sgl; 697 flags = DMA_PREP_INTERRUPT; 698 } 699 /* prepare the channel */ 700 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags); 701 if (!desc) 702 return -EINVAL; 703 704 /* 705 * Completion is signaled by the RX channel for bidirectional and 706 * RX-only transfers; else by the TX channel for TX-only transfers. 707 */ 708 if (!is_tx) { 709 desc->callback = bcm2835_spi_dma_rx_done; 710 desc->callback_param = ctlr; 711 } else if (!tfr->rx_buf) { 712 desc->callback = bcm2835_spi_dma_tx_done; 713 desc->callback_param = ctlr; 714 bs->target = target; 715 } 716 717 /* submit it to DMA-engine */ 718 cookie = dmaengine_submit(desc); 719 720 return dma_submit_error(cookie); 721 } 722 723 /** 724 * bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine 725 * @ctlr: SPI host controller 726 * @tfr: SPI transfer 727 * @target: BCM2835 SPI target 728 * @cs: CS register 729 * 730 * For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up 731 * the TX and RX DMA channel to copy between memory and FIFO register. 732 * 733 * For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to 734 * memory is pointless. However not reading the RX FIFO isn't an option either 735 * because transmission is halted once it's full. As a workaround, cyclically 736 * clear the RX FIFO by setting the CLEAR_RX bit in the CS register. 737 * 738 * The CS register value is precalculated in bcm2835_spi_setup(). Normally 739 * this is called only once, on target registration. A DMA descriptor to write 740 * this value is preallocated in bcm2835_dma_init(). All that's left to do 741 * when performing a TX-only transfer is to submit this descriptor to the RX 742 * DMA channel. Latency is thereby minimized. The descriptor does not 743 * generate any interrupts while running. It must be terminated once the 744 * TX DMA channel is done. 745 * 746 * Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted 747 * when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC 748 * register.) Reading 32 bytes from the RX FIFO would normally require 8 bus 749 * accesses, whereas clearing it requires only 1 bus access. So an 8-fold 750 * reduction in bus traffic and thus energy consumption is achieved. 751 * 752 * For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically 753 * copying from the zero page. The DMA descriptor to do this is preallocated 754 * in bcm2835_dma_init(). It must be terminated once the RX DMA channel is 755 * done and can then be reused. 756 * 757 * The BCM2835 DMA driver autodetects when a transaction copies from the zero 758 * page and utilizes the DMA controller's ability to synthesize zeroes instead 759 * of copying them from memory. This reduces traffic on the memory bus. The 760 * feature is not available on so-called "lite" channels, but normally TX DMA 761 * is backed by a full-featured channel. 762 * 763 * Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the 764 * BCM2835 SPI controller continues to assert DREQ even after the DLEN register 765 * has been counted down to zero (hardware erratum). Thus, when the transfer 766 * has finished, the DMA engine zero-fills the TX FIFO until it is half full. 767 * (Tuneable with the DC register.) So up to 9 gratuitous bus accesses are 768 * performed at the end of an RX-only transfer. 769 */ 770 static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr, 771 struct spi_transfer *tfr, 772 struct bcm2835_spidev *target, 773 u32 cs) 774 { 775 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 776 dma_cookie_t cookie; 777 int ret; 778 779 /* update usage statistics */ 780 bs->count_transfer_dma++; 781 782 /* 783 * Transfer first few bytes without DMA if length of first TX or RX 784 * sglist entry is not a multiple of 4 bytes (hardware limitation). 785 */ 786 bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs); 787 788 /* setup tx-DMA */ 789 if (bs->tx_buf) { 790 ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, target, true); 791 } else { 792 cookie = dmaengine_submit(bs->fill_tx_desc); 793 ret = dma_submit_error(cookie); 794 } 795 if (ret) 796 goto err_reset_hw; 797 798 /* set the DMA length */ 799 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len); 800 801 /* start the HW */ 802 bcm2835_wr(bs, BCM2835_SPI_CS, 803 cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN); 804 805 bs->tx_dma_active = true; 806 smp_wmb(); 807 808 /* start TX early */ 809 dma_async_issue_pending(ctlr->dma_tx); 810 811 /* setup rx-DMA late - to run transfers while 812 * mapping of the rx buffers still takes place 813 * this saves 10us or more. 814 */ 815 if (bs->rx_buf) { 816 ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, target, false); 817 } else { 818 cookie = dmaengine_submit(target->clear_rx_desc); 819 ret = dma_submit_error(cookie); 820 } 821 if (ret) { 822 /* need to reset on errors */ 823 dmaengine_terminate_sync(ctlr->dma_tx); 824 bs->tx_dma_active = false; 825 goto err_reset_hw; 826 } 827 828 /* start rx dma late */ 829 dma_async_issue_pending(ctlr->dma_rx); 830 bs->rx_dma_active = true; 831 smp_mb(); 832 833 /* 834 * In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done() 835 * may run before RX DMA is issued. Terminate RX DMA if so. 836 */ 837 if (!bs->rx_buf && !bs->tx_dma_active && 838 cmpxchg(&bs->rx_dma_active, true, false)) { 839 dmaengine_terminate_async(ctlr->dma_rx); 840 bcm2835_spi_reset_hw(bs); 841 } 842 843 /* wait for wakeup in framework */ 844 return 1; 845 846 err_reset_hw: 847 bcm2835_spi_reset_hw(bs); 848 bcm2835_spi_undo_prologue(bs); 849 return ret; 850 } 851 852 static bool bcm2835_spi_can_dma(struct spi_controller *ctlr, 853 struct spi_device *spi, 854 struct spi_transfer *tfr) 855 { 856 /* we start DMA efforts only on bigger transfers */ 857 if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH) 858 return false; 859 860 /* return OK */ 861 return true; 862 } 863 864 static void bcm2835_dma_release(struct spi_controller *ctlr, 865 struct bcm2835_spi *bs) 866 { 867 if (ctlr->dma_tx) { 868 dmaengine_terminate_sync(ctlr->dma_tx); 869 870 if (bs->fill_tx_desc) 871 dmaengine_desc_free(bs->fill_tx_desc); 872 873 if (bs->fill_tx_addr) 874 dma_unmap_page_attrs(ctlr->dma_tx->device->dev, 875 bs->fill_tx_addr, sizeof(u32), 876 DMA_TO_DEVICE, 877 DMA_ATTR_SKIP_CPU_SYNC); 878 879 dma_release_channel(ctlr->dma_tx); 880 ctlr->dma_tx = NULL; 881 } 882 883 if (ctlr->dma_rx) { 884 dmaengine_terminate_sync(ctlr->dma_rx); 885 dma_release_channel(ctlr->dma_rx); 886 ctlr->dma_rx = NULL; 887 } 888 } 889 890 static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, 891 struct bcm2835_spi *bs) 892 { 893 struct dma_slave_config slave_config; 894 const __be32 *addr; 895 dma_addr_t dma_reg_base; 896 int ret; 897 898 /* base address in dma-space */ 899 addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); 900 if (!addr) { 901 dev_err(dev, "could not get DMA-register address - not using dma mode\n"); 902 /* Fall back to interrupt mode */ 903 return 0; 904 } 905 dma_reg_base = be32_to_cpup(addr); 906 907 /* get tx/rx dma */ 908 ctlr->dma_tx = dma_request_chan(dev, "tx"); 909 if (IS_ERR(ctlr->dma_tx)) { 910 ret = dev_err_probe(dev, PTR_ERR(ctlr->dma_tx), 911 "no tx-dma configuration found - not using dma mode\n"); 912 ctlr->dma_tx = NULL; 913 goto err; 914 } 915 ctlr->dma_rx = dma_request_chan(dev, "rx"); 916 if (IS_ERR(ctlr->dma_rx)) { 917 ret = dev_err_probe(dev, PTR_ERR(ctlr->dma_rx), 918 "no rx-dma configuration found - not using dma mode\n"); 919 ctlr->dma_rx = NULL; 920 goto err_release; 921 } 922 923 /* 924 * The TX DMA channel either copies a transfer's TX buffer to the FIFO 925 * or, in case of an RX-only transfer, cyclically copies from the zero 926 * page to the FIFO using a preallocated, reusable descriptor. 927 */ 928 slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); 929 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 930 931 ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config); 932 if (ret) 933 goto err_config; 934 935 bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev, 936 ZERO_PAGE(0), 0, sizeof(u32), 937 DMA_TO_DEVICE, 938 DMA_ATTR_SKIP_CPU_SYNC); 939 if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) { 940 dev_err(dev, "cannot map zero page - not using DMA mode\n"); 941 bs->fill_tx_addr = 0; 942 ret = -ENOMEM; 943 goto err_release; 944 } 945 946 bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx, 947 bs->fill_tx_addr, 948 sizeof(u32), 0, 949 DMA_MEM_TO_DEV, 0); 950 if (!bs->fill_tx_desc) { 951 dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n"); 952 ret = -ENOMEM; 953 goto err_release; 954 } 955 956 ret = dmaengine_desc_set_reuse(bs->fill_tx_desc); 957 if (ret) { 958 dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n"); 959 goto err_release; 960 } 961 962 /* 963 * The RX DMA channel is used bidirectionally: It either reads the 964 * RX FIFO or, in case of a TX-only transfer, cyclically writes a 965 * precalculated value to the CS register to clear the RX FIFO. 966 */ 967 slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); 968 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 969 slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS); 970 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 971 972 ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config); 973 if (ret) 974 goto err_config; 975 976 /* all went well, so set can_dma */ 977 ctlr->can_dma = bcm2835_spi_can_dma; 978 979 return 0; 980 981 err_config: 982 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", 983 ret); 984 err_release: 985 bcm2835_dma_release(ctlr, bs); 986 err: 987 /* 988 * Only report error for deferred probing, otherwise fall back to 989 * interrupt mode 990 */ 991 if (ret != -EPROBE_DEFER) 992 ret = 0; 993 994 return ret; 995 } 996 997 static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr, 998 struct spi_device *spi, 999 struct spi_transfer *tfr, 1000 u32 cs) 1001 { 1002 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 1003 unsigned long timeout; 1004 1005 /* update usage statistics */ 1006 bs->count_transfer_polling++; 1007 1008 /* enable HW block without interrupts */ 1009 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); 1010 1011 /* fill in the fifo before timeout calculations 1012 * if we are interrupted here, then the data is 1013 * getting transferred by the HW while we are interrupted 1014 */ 1015 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); 1016 1017 /* set the timeout to at least 2 jiffies */ 1018 timeout = jiffies + 2 + HZ * polling_limit_us / 1000000; 1019 1020 /* loop until finished the transfer */ 1021 while (bs->rx_len) { 1022 /* fill in tx fifo with remaining data */ 1023 bcm2835_wr_fifo(bs); 1024 1025 /* read from fifo as much as possible */ 1026 bcm2835_rd_fifo(bs); 1027 1028 /* if there is still data pending to read 1029 * then check the timeout 1030 */ 1031 if (bs->rx_len && time_after(jiffies, timeout)) { 1032 dev_dbg_ratelimited(&spi->dev, 1033 "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n", 1034 jiffies - timeout, 1035 bs->tx_len, bs->rx_len); 1036 /* fall back to interrupt mode */ 1037 1038 /* update usage statistics */ 1039 bs->count_transfer_irq_after_polling++; 1040 1041 return bcm2835_spi_transfer_one_irq(ctlr, spi, 1042 tfr, cs, false); 1043 } 1044 } 1045 1046 /* Transfer complete - reset SPI HW */ 1047 bcm2835_spi_reset_hw(bs); 1048 /* and return without waiting for completion */ 1049 return 0; 1050 } 1051 1052 static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, 1053 struct spi_device *spi, 1054 struct spi_transfer *tfr) 1055 { 1056 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 1057 struct bcm2835_spidev *target = spi_get_ctldata(spi); 1058 unsigned long spi_hz, cdiv; 1059 unsigned long hz_per_byte, byte_limit; 1060 u32 cs = target->prepare_cs; 1061 1062 /* set clock */ 1063 spi_hz = tfr->speed_hz; 1064 1065 if (spi_hz >= bs->clk_hz / 2) { 1066 cdiv = 2; /* clk_hz/2 is the fastest we can go */ 1067 } else if (spi_hz) { 1068 /* CDIV must be a multiple of two */ 1069 cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz); 1070 cdiv += (cdiv % 2); 1071 1072 if (cdiv >= 65536) 1073 cdiv = 0; /* 0 is the slowest we can go */ 1074 } else { 1075 cdiv = 0; /* 0 is the slowest we can go */ 1076 } 1077 tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536); 1078 bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); 1079 1080 /* handle all the 3-wire mode */ 1081 if (spi->mode & SPI_3WIRE && tfr->rx_buf) 1082 cs |= BCM2835_SPI_CS_REN; 1083 1084 /* set transmit buffers and length */ 1085 bs->tx_buf = tfr->tx_buf; 1086 bs->rx_buf = tfr->rx_buf; 1087 bs->tx_len = tfr->len; 1088 bs->rx_len = tfr->len; 1089 1090 /* Calculate the estimated time in us the transfer runs. Note that 1091 * there is 1 idle clocks cycles after each byte getting transferred 1092 * so we have 9 cycles/byte. This is used to find the number of Hz 1093 * per byte per polling limit. E.g., we can transfer 1 byte in 30 us 1094 * per 300,000 Hz of bus clock. 1095 */ 1096 hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0; 1097 byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1; 1098 1099 /* run in polling mode for short transfers */ 1100 if (tfr->len < byte_limit) 1101 return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs); 1102 1103 /* run in dma mode if conditions are right 1104 * Note that unlike poll or interrupt mode DMA mode does not have 1105 * this 1 idle clock cycle pattern but runs the spi clock without gaps 1106 */ 1107 if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr)) 1108 return bcm2835_spi_transfer_one_dma(ctlr, tfr, target, cs); 1109 1110 /* run in interrupt-mode */ 1111 return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true); 1112 } 1113 1114 static int bcm2835_spi_prepare_message(struct spi_controller *ctlr, 1115 struct spi_message *msg) 1116 { 1117 struct spi_device *spi = msg->spi; 1118 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 1119 struct bcm2835_spidev *target = spi_get_ctldata(spi); 1120 int ret; 1121 1122 if (ctlr->can_dma) { 1123 /* 1124 * DMA transfers are limited to 16 bit (0 to 65535 bytes) by 1125 * the SPI HW due to DLEN. Split up transfers (32-bit FIFO 1126 * aligned) if the limit is exceeded. 1127 */ 1128 ret = spi_split_transfers_maxsize(ctlr, msg, 65532, 1129 GFP_KERNEL | GFP_DMA); 1130 if (ret) 1131 return ret; 1132 } 1133 1134 /* 1135 * Set up clock polarity before spi_transfer_one_message() asserts 1136 * chip select to avoid a gratuitous clock signal edge. 1137 */ 1138 bcm2835_wr(bs, BCM2835_SPI_CS, target->prepare_cs); 1139 1140 return 0; 1141 } 1142 1143 static void bcm2835_spi_handle_err(struct spi_controller *ctlr, 1144 struct spi_message *msg) 1145 { 1146 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 1147 1148 /* if an error occurred and we have an active dma, then terminate */ 1149 if (ctlr->dma_tx) { 1150 dmaengine_terminate_sync(ctlr->dma_tx); 1151 bs->tx_dma_active = false; 1152 } 1153 if (ctlr->dma_rx) { 1154 dmaengine_terminate_sync(ctlr->dma_rx); 1155 bs->rx_dma_active = false; 1156 } 1157 bcm2835_spi_undo_prologue(bs); 1158 1159 /* and reset */ 1160 bcm2835_spi_reset_hw(bs); 1161 } 1162 1163 static void bcm2835_spi_cleanup(struct spi_device *spi) 1164 { 1165 struct bcm2835_spidev *target = spi_get_ctldata(spi); 1166 struct spi_controller *ctlr = spi->controller; 1167 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 1168 1169 if (target->clear_rx_desc) 1170 dmaengine_desc_free(target->clear_rx_desc); 1171 1172 if (target->clear_rx_addr) 1173 dma_unmap_single(ctlr->dma_rx->device->dev, 1174 target->clear_rx_addr, 1175 sizeof(u32), 1176 DMA_TO_DEVICE); 1177 1178 gpiod_put(bs->cs_gpio); 1179 spi_set_csgpiod(spi, 0, NULL); 1180 1181 kfree(target); 1182 } 1183 1184 static int bcm2835_spi_setup_dma(struct spi_controller *ctlr, 1185 struct spi_device *spi, 1186 struct bcm2835_spi *bs, 1187 struct bcm2835_spidev *target) 1188 { 1189 int ret; 1190 1191 if (!ctlr->dma_rx) 1192 return 0; 1193 1194 target->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev, 1195 &target->clear_rx_cs, 1196 sizeof(u32), 1197 DMA_TO_DEVICE); 1198 if (dma_mapping_error(ctlr->dma_rx->device->dev, target->clear_rx_addr)) { 1199 dev_err(&spi->dev, "cannot map clear_rx_cs\n"); 1200 target->clear_rx_addr = 0; 1201 return -ENOMEM; 1202 } 1203 1204 target->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx, 1205 target->clear_rx_addr, 1206 sizeof(u32), 0, 1207 DMA_MEM_TO_DEV, 0); 1208 if (!target->clear_rx_desc) { 1209 dev_err(&spi->dev, "cannot prepare clear_rx_desc\n"); 1210 return -ENOMEM; 1211 } 1212 1213 ret = dmaengine_desc_set_reuse(target->clear_rx_desc); 1214 if (ret) { 1215 dev_err(&spi->dev, "cannot reuse clear_rx_desc\n"); 1216 return ret; 1217 } 1218 1219 return 0; 1220 } 1221 1222 static int bcm2835_spi_setup(struct spi_device *spi) 1223 { 1224 struct spi_controller *ctlr = spi->controller; 1225 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 1226 struct bcm2835_spidev *target = spi_get_ctldata(spi); 1227 struct gpiod_lookup_table *lookup __free(kfree) = NULL; 1228 int ret; 1229 u32 cs; 1230 1231 if (!target) { 1232 target = kzalloc(ALIGN(sizeof(*target), dma_get_cache_alignment()), 1233 GFP_KERNEL); 1234 if (!target) 1235 return -ENOMEM; 1236 1237 spi_set_ctldata(spi, target); 1238 1239 ret = bcm2835_spi_setup_dma(ctlr, spi, bs, target); 1240 if (ret) 1241 goto err_cleanup; 1242 } 1243 1244 /* 1245 * Precalculate SPI target's CS register value for ->prepare_message(): 1246 * The driver always uses software-controlled GPIO chip select, hence 1247 * set the hardware-controlled native chip select to an invalid value 1248 * to prevent it from interfering. 1249 */ 1250 cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; 1251 if (spi->mode & SPI_CPOL) 1252 cs |= BCM2835_SPI_CS_CPOL; 1253 if (spi->mode & SPI_CPHA) 1254 cs |= BCM2835_SPI_CS_CPHA; 1255 target->prepare_cs = cs; 1256 1257 /* 1258 * Precalculate SPI target's CS register value to clear RX FIFO 1259 * in case of a TX-only DMA transfer. 1260 */ 1261 if (ctlr->dma_rx) { 1262 target->clear_rx_cs = cs | BCM2835_SPI_CS_TA | 1263 BCM2835_SPI_CS_DMAEN | 1264 BCM2835_SPI_CS_CLEAR_RX; 1265 dma_sync_single_for_device(ctlr->dma_rx->device->dev, 1266 target->clear_rx_addr, 1267 sizeof(u32), 1268 DMA_TO_DEVICE); 1269 } 1270 1271 /* 1272 * sanity checking the native-chipselects 1273 */ 1274 if (spi->mode & SPI_NO_CS) 1275 return 0; 1276 /* 1277 * The SPI core has successfully requested the CS GPIO line from the 1278 * device tree, so we are done. 1279 */ 1280 if (spi_get_csgpiod(spi, 0)) 1281 return 0; 1282 if (spi_get_chipselect(spi, 0) > 1) { 1283 /* error in the case of native CS requested with CS > 1 1284 * officially there is a CS2, but it is not documented 1285 * which GPIO is connected with that... 1286 */ 1287 dev_err(&spi->dev, 1288 "setup: only two native chip-selects are supported\n"); 1289 ret = -EINVAL; 1290 goto err_cleanup; 1291 } 1292 1293 /* 1294 * TODO: The code below is a slightly better alternative to the utter 1295 * abuse of the GPIO API that I found here before. It creates a 1296 * temporary lookup table, assigns it to the SPI device, gets the GPIO 1297 * descriptor and then releases the lookup table. 1298 * 1299 * More on the problem that it addresses: 1300 * https://www.spinics.net/lists/linux-gpio/msg36218.html 1301 */ 1302 lookup = kzalloc(struct_size(lookup, table, 2), GFP_KERNEL); 1303 if (!lookup) { 1304 ret = -ENOMEM; 1305 goto err_cleanup; 1306 } 1307 1308 lookup->dev_id = dev_name(&spi->dev); 1309 lookup->table[0] = GPIO_LOOKUP("pinctrl-bcm2835", 1310 8 - (spi_get_chipselect(spi, 0)), 1311 "cs", GPIO_LOOKUP_FLAGS_DEFAULT); 1312 1313 gpiod_add_lookup_table(lookup); 1314 1315 bs->cs_gpio = gpiod_get(&spi->dev, "cs", GPIOD_OUT_LOW); 1316 gpiod_remove_lookup_table(lookup); 1317 if (IS_ERR(bs->cs_gpio)) { 1318 ret = PTR_ERR(bs->cs_gpio); 1319 goto err_cleanup; 1320 } 1321 1322 spi_set_csgpiod(spi, 0, bs->cs_gpio); 1323 1324 /* and set up the "mode" and level */ 1325 dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n", 1326 spi_get_chipselect(spi, 0)); 1327 1328 return 0; 1329 1330 err_cleanup: 1331 bcm2835_spi_cleanup(spi); 1332 return ret; 1333 } 1334 1335 static int bcm2835_spi_probe(struct platform_device *pdev) 1336 { 1337 struct spi_controller *ctlr; 1338 struct bcm2835_spi *bs; 1339 int err; 1340 1341 ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*bs)); 1342 if (!ctlr) 1343 return -ENOMEM; 1344 1345 platform_set_drvdata(pdev, ctlr); 1346 1347 ctlr->use_gpio_descriptors = true; 1348 ctlr->mode_bits = BCM2835_SPI_MODE_BITS; 1349 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 1350 ctlr->num_chipselect = 3; 1351 ctlr->setup = bcm2835_spi_setup; 1352 ctlr->cleanup = bcm2835_spi_cleanup; 1353 ctlr->transfer_one = bcm2835_spi_transfer_one; 1354 ctlr->handle_err = bcm2835_spi_handle_err; 1355 ctlr->prepare_message = bcm2835_spi_prepare_message; 1356 ctlr->dev.of_node = pdev->dev.of_node; 1357 1358 bs = spi_controller_get_devdata(ctlr); 1359 bs->ctlr = ctlr; 1360 1361 bs->regs = devm_platform_ioremap_resource(pdev, 0); 1362 if (IS_ERR(bs->regs)) 1363 return PTR_ERR(bs->regs); 1364 1365 bs->clk = devm_clk_get_enabled(&pdev->dev, NULL); 1366 if (IS_ERR(bs->clk)) 1367 return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk), 1368 "could not get clk\n"); 1369 1370 ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2; 1371 1372 bs->irq = platform_get_irq(pdev, 0); 1373 if (bs->irq < 0) 1374 return bs->irq; 1375 1376 bs->clk_hz = clk_get_rate(bs->clk); 1377 1378 err = bcm2835_dma_init(ctlr, &pdev->dev, bs); 1379 if (err) 1380 return err; 1381 1382 /* initialise the hardware with the default polarities */ 1383 bcm2835_wr(bs, BCM2835_SPI_CS, 1384 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); 1385 1386 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 1387 IRQF_SHARED, dev_name(&pdev->dev), bs); 1388 if (err) { 1389 dev_err(&pdev->dev, "could not request IRQ: %d\n", err); 1390 goto out_dma_release; 1391 } 1392 1393 err = spi_register_controller(ctlr); 1394 if (err) { 1395 dev_err(&pdev->dev, "could not register SPI controller: %d\n", 1396 err); 1397 goto out_dma_release; 1398 } 1399 1400 bcm2835_debugfs_create(bs, dev_name(&pdev->dev)); 1401 1402 return 0; 1403 1404 out_dma_release: 1405 bcm2835_dma_release(ctlr, bs); 1406 return err; 1407 } 1408 1409 static void bcm2835_spi_remove(struct platform_device *pdev) 1410 { 1411 struct spi_controller *ctlr = platform_get_drvdata(pdev); 1412 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); 1413 1414 bcm2835_debugfs_remove(bs); 1415 1416 spi_unregister_controller(ctlr); 1417 1418 bcm2835_dma_release(ctlr, bs); 1419 1420 /* Clear FIFOs, and disable the HW block */ 1421 bcm2835_wr(bs, BCM2835_SPI_CS, 1422 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); 1423 } 1424 1425 static const struct of_device_id bcm2835_spi_match[] = { 1426 { .compatible = "brcm,bcm2835-spi", }, 1427 {} 1428 }; 1429 MODULE_DEVICE_TABLE(of, bcm2835_spi_match); 1430 1431 static struct platform_driver bcm2835_spi_driver = { 1432 .driver = { 1433 .name = DRV_NAME, 1434 .of_match_table = bcm2835_spi_match, 1435 }, 1436 .probe = bcm2835_spi_probe, 1437 .remove_new = bcm2835_spi_remove, 1438 .shutdown = bcm2835_spi_remove, 1439 }; 1440 module_platform_driver(bcm2835_spi_driver); 1441 1442 MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835"); 1443 MODULE_AUTHOR("Chris Boot <bootc@bootc.net>"); 1444 MODULE_LICENSE("GPL"); 1445