1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * BCM2835 DMA engine support 4 * 5 * Author: Florian Meier <florian.meier@koalo.de> 6 * Copyright 2013 7 * 8 * Based on 9 * OMAP DMAengine support by Russell King 10 * 11 * BCM2708 DMA Driver 12 * Copyright (C) 2010 Broadcom 13 * 14 * Raspberry Pi PCM I2S ALSA Driver 15 * Copyright (c) by Phil Poole 2013 16 * 17 * MARVELL MMP Peripheral DMA Driver 18 * Copyright 2012 Marvell International Ltd. 19 */ 20 #include <linux/dmaengine.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/dmapool.h> 23 #include <linux/err.h> 24 #include <linux/init.h> 25 #include <linux/interrupt.h> 26 #include <linux/list.h> 27 #include <linux/module.h> 28 #include <linux/platform_device.h> 29 #include <linux/slab.h> 30 #include <linux/io.h> 31 #include <linux/spinlock.h> 32 #include <linux/of.h> 33 #include <linux/of_dma.h> 34 35 #include "virt-dma.h" 36 37 #define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14 38 #define BCM2835_DMA_CHAN_NAME_SIZE 8 39 40 /** 41 * struct bcm2835_dmadev - BCM2835 DMA controller 42 * @ddev: DMA device 43 * @base: base address of register map 44 * @zero_page: bus address of zero page (to detect transactions copying from 45 * zero page and avoid accessing memory if so) 46 */ 47 struct bcm2835_dmadev { 48 struct dma_device ddev; 49 void __iomem *base; 50 dma_addr_t zero_page; 51 }; 52 53 struct bcm2835_dma_cb { 54 uint32_t info; 55 uint32_t src; 56 uint32_t dst; 57 uint32_t length; 58 uint32_t stride; 59 uint32_t next; 60 uint32_t pad[2]; 61 }; 62 63 struct bcm2835_cb_entry { 64 struct bcm2835_dma_cb *cb; 65 dma_addr_t paddr; 66 }; 67 68 struct bcm2835_chan { 69 struct virt_dma_chan vc; 70 71 struct dma_slave_config cfg; 72 unsigned int dreq; 73 74 int ch; 75 struct bcm2835_desc *desc; 76 struct dma_pool *cb_pool; 77 78 void __iomem *chan_base; 79 int irq_number; 80 unsigned int irq_flags; 81 82 bool is_lite_channel; 83 }; 84 85 struct bcm2835_desc { 86 struct bcm2835_chan *c; 87 struct virt_dma_desc vd; 88 enum dma_transfer_direction dir; 89 90 unsigned int frames; 91 size_t size; 92 93 bool cyclic; 94 95 struct bcm2835_cb_entry cb_list[]; 96 }; 97 98 #define BCM2835_DMA_CS 0x00 99 #define BCM2835_DMA_ADDR 0x04 100 #define BCM2835_DMA_TI 0x08 101 #define BCM2835_DMA_SOURCE_AD 0x0c 102 #define BCM2835_DMA_DEST_AD 0x10 103 #define BCM2835_DMA_LEN 0x14 104 #define BCM2835_DMA_STRIDE 0x18 105 #define BCM2835_DMA_NEXTCB 0x1c 106 #define BCM2835_DMA_DEBUG 0x20 107 108 /* DMA CS Control and Status bits */ 109 #define BCM2835_DMA_ACTIVE BIT(0) /* activate the DMA */ 110 #define BCM2835_DMA_END BIT(1) /* current CB has ended */ 111 #define BCM2835_DMA_INT BIT(2) /* interrupt status */ 112 #define BCM2835_DMA_DREQ BIT(3) /* DREQ state */ 113 #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ 114 #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ 115 #define BCM2835_DMA_WAITING_FOR_WRITES BIT(6) /* waiting for last 116 * AXI-write to ack 117 */ 118 #define BCM2835_DMA_ERR BIT(8) 119 #define BCM2835_DMA_PRIORITY(x) ((x & 15) << 16) /* AXI priority */ 120 #define BCM2835_DMA_PANIC_PRIORITY(x) ((x & 15) << 20) /* panic priority */ 121 /* current value of TI.BCM2835_DMA_WAIT_RESP */ 122 #define BCM2835_DMA_WAIT_FOR_WRITES BIT(28) 123 #define BCM2835_DMA_DIS_DEBUG BIT(29) /* disable debug pause signal */ 124 #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */ 125 #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */ 126 127 /* Transfer information bits - also bcm2835_cb.info field */ 128 #define BCM2835_DMA_INT_EN BIT(0) 129 #define BCM2835_DMA_TDMODE BIT(1) /* 2D-Mode */ 130 #define BCM2835_DMA_WAIT_RESP BIT(3) /* wait for AXI-write to be acked */ 131 #define BCM2835_DMA_D_INC BIT(4) 132 #define BCM2835_DMA_D_WIDTH BIT(5) /* 128bit writes if set */ 133 #define BCM2835_DMA_D_DREQ BIT(6) /* enable DREQ for destination */ 134 #define BCM2835_DMA_D_IGNORE BIT(7) /* ignore destination writes */ 135 #define BCM2835_DMA_S_INC BIT(8) 136 #define BCM2835_DMA_S_WIDTH BIT(9) /* 128bit writes if set */ 137 #define BCM2835_DMA_S_DREQ BIT(10) /* enable SREQ for source */ 138 #define BCM2835_DMA_S_IGNORE BIT(11) /* ignore source reads - read 0 */ 139 #define BCM2835_DMA_BURST_LENGTH(x) ((x & 15) << 12) 140 #define BCM2835_DMA_PER_MAP(x) ((x & 31) << 16) /* REQ source */ 141 #define BCM2835_DMA_WAIT(x) ((x & 31) << 21) /* add DMA-wait cycles */ 142 #define BCM2835_DMA_NO_WIDE_BURSTS BIT(26) /* no 2 beat write bursts */ 143 144 /* debug register bits */ 145 #define BCM2835_DMA_DEBUG_LAST_NOT_SET_ERR BIT(0) 146 #define BCM2835_DMA_DEBUG_FIFO_ERR BIT(1) 147 #define BCM2835_DMA_DEBUG_READ_ERR BIT(2) 148 #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_SHIFT 4 149 #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_BITS 4 150 #define BCM2835_DMA_DEBUG_ID_SHIFT 16 151 #define BCM2835_DMA_DEBUG_ID_BITS 9 152 #define BCM2835_DMA_DEBUG_STATE_SHIFT 16 153 #define BCM2835_DMA_DEBUG_STATE_BITS 9 154 #define BCM2835_DMA_DEBUG_VERSION_SHIFT 25 155 #define BCM2835_DMA_DEBUG_VERSION_BITS 3 156 #define BCM2835_DMA_DEBUG_LITE BIT(28) 157 158 /* shared registers for all dma channels */ 159 #define BCM2835_DMA_INT_STATUS 0xfe0 160 #define BCM2835_DMA_ENABLE 0xff0 161 162 #define BCM2835_DMA_DATA_TYPE_S8 1 163 #define BCM2835_DMA_DATA_TYPE_S16 2 164 #define BCM2835_DMA_DATA_TYPE_S32 4 165 #define BCM2835_DMA_DATA_TYPE_S128 16 166 167 /* Valid only for channels 0 - 14, 15 has its own base address */ 168 #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ 169 #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) 170 171 /* the max dma length for different channels */ 172 #define MAX_DMA_LEN SZ_1G 173 #define MAX_LITE_DMA_LEN (SZ_64K - 4) 174 175 static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c) 176 { 177 /* lite and normal channels have different max frame length */ 178 return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN; 179 } 180 181 /* how many frames of max_len size do we need to transfer len bytes */ 182 static inline size_t bcm2835_dma_frames_for_length(size_t len, 183 size_t max_len) 184 { 185 return DIV_ROUND_UP(len, max_len); 186 } 187 188 static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) 189 { 190 return container_of(d, struct bcm2835_dmadev, ddev); 191 } 192 193 static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c) 194 { 195 return container_of(c, struct bcm2835_chan, vc.chan); 196 } 197 198 static inline struct bcm2835_desc *to_bcm2835_dma_desc( 199 struct dma_async_tx_descriptor *t) 200 { 201 return container_of(t, struct bcm2835_desc, vd.tx); 202 } 203 204 static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc) 205 { 206 size_t i; 207 208 for (i = 0; i < desc->frames; i++) 209 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, 210 desc->cb_list[i].paddr); 211 212 kfree(desc); 213 } 214 215 static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) 216 { 217 bcm2835_dma_free_cb_chain( 218 container_of(vd, struct bcm2835_desc, vd)); 219 } 220 221 static void bcm2835_dma_create_cb_set_length( 222 struct bcm2835_chan *chan, 223 struct bcm2835_dma_cb *control_block, 224 size_t len, 225 size_t period_len, 226 size_t *total_len, 227 u32 finalextrainfo) 228 { 229 size_t max_len = bcm2835_dma_max_frame_length(chan); 230 231 /* set the length taking lite-channel limitations into account */ 232 control_block->length = min_t(u32, len, max_len); 233 234 /* finished if we have no period_length */ 235 if (!period_len) 236 return; 237 238 /* 239 * period_len means: that we need to generate 240 * transfers that are terminating at every 241 * multiple of period_len - this is typically 242 * used to set the interrupt flag in info 243 * which is required during cyclic transfers 244 */ 245 246 /* have we filled in period_length yet? */ 247 if (*total_len + control_block->length < period_len) { 248 /* update number of bytes in this period so far */ 249 *total_len += control_block->length; 250 return; 251 } 252 253 /* calculate the length that remains to reach period_length */ 254 control_block->length = period_len - *total_len; 255 256 /* reset total_length for next period */ 257 *total_len = 0; 258 259 /* add extrainfo bits in info */ 260 control_block->info |= finalextrainfo; 261 } 262 263 /** 264 * bcm2835_dma_create_cb_chain - create a control block and fills data in 265 * 266 * @chan: the @dma_chan for which we run this 267 * @direction: the direction in which we transfer 268 * @cyclic: it is a cyclic transfer 269 * @info: the default info bits to apply per controlblock 270 * @frames: number of controlblocks to allocate 271 * @src: the src address to assign (if the S_INC bit is set 272 * in @info, then it gets incremented) 273 * @dst: the dst address to assign (if the D_INC bit is set 274 * in @info, then it gets incremented) 275 * @buf_len: the full buffer length (may also be 0) 276 * @period_len: the period length when to apply @finalextrainfo 277 * in addition to the last transfer 278 * this will also break some control-blocks early 279 * @finalextrainfo: additional bits in last controlblock 280 * (or when period_len is reached in case of cyclic) 281 * @gfp: the GFP flag to use for allocation 282 */ 283 static struct bcm2835_desc *bcm2835_dma_create_cb_chain( 284 struct dma_chan *chan, enum dma_transfer_direction direction, 285 bool cyclic, u32 info, u32 finalextrainfo, size_t frames, 286 dma_addr_t src, dma_addr_t dst, size_t buf_len, 287 size_t period_len, gfp_t gfp) 288 { 289 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 290 size_t len = buf_len, total_len; 291 size_t frame; 292 struct bcm2835_desc *d; 293 struct bcm2835_cb_entry *cb_entry; 294 struct bcm2835_dma_cb *control_block; 295 296 if (!frames) 297 return NULL; 298 299 /* allocate and setup the descriptor. */ 300 d = kzalloc_flex(*d, cb_list, frames, gfp); 301 if (!d) 302 return NULL; 303 304 d->c = c; 305 d->dir = direction; 306 d->cyclic = cyclic; 307 308 /* 309 * Iterate over all frames, create a control block 310 * for each frame and link them together. 311 */ 312 for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) { 313 cb_entry = &d->cb_list[frame]; 314 cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp, 315 &cb_entry->paddr); 316 if (!cb_entry->cb) 317 goto error_cb; 318 319 /* fill in the control block */ 320 control_block = cb_entry->cb; 321 control_block->info = info; 322 control_block->src = src; 323 control_block->dst = dst; 324 control_block->stride = 0; 325 control_block->next = 0; 326 /* set up length in control_block if requested */ 327 if (buf_len) { 328 /* calculate length honoring period_length */ 329 bcm2835_dma_create_cb_set_length( 330 c, control_block, 331 len, period_len, &total_len, 332 cyclic ? finalextrainfo : 0); 333 334 /* calculate new remaining length */ 335 len -= control_block->length; 336 } 337 338 /* link this the last controlblock */ 339 if (frame) 340 d->cb_list[frame - 1].cb->next = cb_entry->paddr; 341 342 /* update src and dst and length */ 343 if (src && (info & BCM2835_DMA_S_INC)) 344 src += control_block->length; 345 if (dst && (info & BCM2835_DMA_D_INC)) 346 dst += control_block->length; 347 348 /* Length of total transfer */ 349 d->size += control_block->length; 350 } 351 352 /* the last frame requires extra flags */ 353 d->cb_list[d->frames - 1].cb->info |= finalextrainfo; 354 355 /* detect a size mismatch */ 356 if (buf_len && (d->size != buf_len)) 357 goto error_cb; 358 359 return d; 360 error_cb: 361 bcm2835_dma_free_cb_chain(d); 362 363 return NULL; 364 } 365 366 static void bcm2835_dma_fill_cb_chain_with_sg( 367 struct dma_chan *chan, 368 enum dma_transfer_direction direction, 369 struct bcm2835_cb_entry *cb, 370 struct scatterlist *sgl, 371 unsigned int sg_len) 372 { 373 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 374 size_t len, max_len; 375 unsigned int i; 376 dma_addr_t addr; 377 struct scatterlist *sgent; 378 379 max_len = bcm2835_dma_max_frame_length(c); 380 for_each_sg(sgl, sgent, sg_len, i) { 381 for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent); 382 len > 0; 383 addr += cb->cb->length, len -= cb->cb->length, cb++) { 384 if (direction == DMA_DEV_TO_MEM) 385 cb->cb->dst = addr; 386 else 387 cb->cb->src = addr; 388 cb->cb->length = min(len, max_len); 389 } 390 } 391 } 392 393 static void bcm2835_dma_abort(struct bcm2835_chan *c) 394 { 395 void __iomem *chan_base = c->chan_base; 396 long int timeout = 10000; 397 398 /* 399 * A zero control block address means the channel is idle. 400 * (The ACTIVE flag in the CS register is not a reliable indicator.) 401 */ 402 if (!readl(chan_base + BCM2835_DMA_ADDR)) 403 return; 404 405 /* Write 0 to the active bit - Pause the DMA */ 406 writel(0, chan_base + BCM2835_DMA_CS); 407 408 /* Wait for any current AXI transfer to complete */ 409 while ((readl(chan_base + BCM2835_DMA_CS) & 410 BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) 411 cpu_relax(); 412 413 /* Peripheral might be stuck and fail to signal AXI write responses */ 414 if (!timeout) 415 dev_err(c->vc.chan.device->dev, 416 "failed to complete outstanding writes\n"); 417 418 writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); 419 } 420 421 static void bcm2835_dma_start_desc(struct bcm2835_chan *c) 422 { 423 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 424 struct bcm2835_desc *d; 425 426 if (!vd) { 427 c->desc = NULL; 428 return; 429 } 430 431 list_del(&vd->node); 432 433 c->desc = d = to_bcm2835_dma_desc(&vd->tx); 434 435 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); 436 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); 437 } 438 439 static irqreturn_t bcm2835_dma_callback(int irq, void *data) 440 { 441 struct bcm2835_chan *c = data; 442 struct bcm2835_desc *d; 443 unsigned long flags; 444 445 /* check the shared interrupt */ 446 if (c->irq_flags & IRQF_SHARED) { 447 /* check if the interrupt is enabled */ 448 flags = readl(c->chan_base + BCM2835_DMA_CS); 449 /* if not set then we are not the reason for the irq */ 450 if (!(flags & BCM2835_DMA_INT)) 451 return IRQ_NONE; 452 } 453 454 spin_lock_irqsave(&c->vc.lock, flags); 455 456 /* 457 * Clear the INT flag to receive further interrupts. Keep the channel 458 * active in case the descriptor is cyclic or in case the client has 459 * already terminated the descriptor and issued a new one. (May happen 460 * if this IRQ handler is threaded.) If the channel is finished, it 461 * will remain idle despite the ACTIVE flag being set. 462 */ 463 writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, 464 c->chan_base + BCM2835_DMA_CS); 465 466 d = c->desc; 467 468 if (d) { 469 if (d->cyclic) { 470 /* call the cyclic callback */ 471 vchan_cyclic_callback(&d->vd); 472 } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { 473 vchan_cookie_complete(&c->desc->vd); 474 bcm2835_dma_start_desc(c); 475 } 476 } 477 478 spin_unlock_irqrestore(&c->vc.lock, flags); 479 480 return IRQ_HANDLED; 481 } 482 483 static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) 484 { 485 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 486 struct device *dev = c->vc.chan.device->dev; 487 488 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); 489 490 /* 491 * Control blocks are 256 bit in length and must start at a 256 bit 492 * (32 byte) aligned address (BCM2835 ARM Peripherals, sec. 4.2.1.1). 493 */ 494 c->cb_pool = dma_pool_create(dev_name(dev), dev, 495 sizeof(struct bcm2835_dma_cb), 32, 0); 496 if (!c->cb_pool) { 497 dev_err(dev, "unable to allocate descriptor pool\n"); 498 return -ENOMEM; 499 } 500 501 return request_irq(c->irq_number, bcm2835_dma_callback, 502 c->irq_flags, "DMA IRQ", c); 503 } 504 505 static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) 506 { 507 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 508 509 vchan_free_chan_resources(&c->vc); 510 free_irq(c->irq_number, c); 511 dma_pool_destroy(c->cb_pool); 512 513 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); 514 } 515 516 static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d) 517 { 518 return d->size; 519 } 520 521 static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) 522 { 523 unsigned int i; 524 size_t size; 525 526 for (size = i = 0; i < d->frames; i++) { 527 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; 528 size_t this_size = control_block->length; 529 dma_addr_t dma; 530 531 if (d->dir == DMA_DEV_TO_MEM) 532 dma = control_block->dst; 533 else 534 dma = control_block->src; 535 536 if (size) 537 size += this_size; 538 else if (addr >= dma && addr < dma + this_size) 539 size += dma + this_size - addr; 540 } 541 542 return size; 543 } 544 545 static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan, 546 dma_cookie_t cookie, struct dma_tx_state *txstate) 547 { 548 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 549 struct virt_dma_desc *vd; 550 enum dma_status ret; 551 unsigned long flags; 552 553 ret = dma_cookie_status(chan, cookie, txstate); 554 if (ret == DMA_COMPLETE || !txstate) 555 return ret; 556 557 spin_lock_irqsave(&c->vc.lock, flags); 558 vd = vchan_find_desc(&c->vc, cookie); 559 if (vd) { 560 txstate->residue = 561 bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx)); 562 } else if (c->desc && c->desc->vd.tx.cookie == cookie) { 563 struct bcm2835_desc *d = c->desc; 564 dma_addr_t pos; 565 566 if (d->dir == DMA_MEM_TO_DEV) 567 pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD); 568 else if (d->dir == DMA_DEV_TO_MEM) 569 pos = readl(c->chan_base + BCM2835_DMA_DEST_AD); 570 else 571 pos = 0; 572 573 txstate->residue = bcm2835_dma_desc_size_pos(d, pos); 574 } else { 575 txstate->residue = 0; 576 } 577 578 spin_unlock_irqrestore(&c->vc.lock, flags); 579 580 return ret; 581 } 582 583 static void bcm2835_dma_issue_pending(struct dma_chan *chan) 584 { 585 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 586 unsigned long flags; 587 588 spin_lock_irqsave(&c->vc.lock, flags); 589 if (vchan_issue_pending(&c->vc) && !c->desc) 590 bcm2835_dma_start_desc(c); 591 592 spin_unlock_irqrestore(&c->vc.lock, flags); 593 } 594 595 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( 596 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 597 size_t len, unsigned long flags) 598 { 599 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 600 struct bcm2835_desc *d; 601 u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC; 602 u32 extra = BCM2835_DMA_INT_EN | BCM2835_DMA_WAIT_RESP; 603 size_t max_len = bcm2835_dma_max_frame_length(c); 604 size_t frames; 605 606 /* if src, dst or len is not given return with an error */ 607 if (!src || !dst || !len) 608 return NULL; 609 610 /* calculate number of frames */ 611 frames = bcm2835_dma_frames_for_length(len, max_len); 612 613 /* allocate the CB chain - this also fills in the pointers */ 614 d = bcm2835_dma_create_cb_chain(chan, DMA_MEM_TO_MEM, false, 615 info, extra, frames, 616 src, dst, len, 0, GFP_KERNEL); 617 if (!d) 618 return NULL; 619 620 return vchan_tx_prep(&c->vc, &d->vd, flags); 621 } 622 623 static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg( 624 struct dma_chan *chan, 625 struct scatterlist *sgl, unsigned int sg_len, 626 enum dma_transfer_direction direction, 627 unsigned long flags, void *context) 628 { 629 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 630 struct bcm2835_desc *d; 631 dma_addr_t src = 0, dst = 0; 632 u32 info = BCM2835_DMA_WAIT_RESP; 633 u32 extra = BCM2835_DMA_INT_EN; 634 size_t frames; 635 636 if (!is_slave_direction(direction)) { 637 dev_err(chan->device->dev, 638 "%s: bad direction?\n", __func__); 639 return NULL; 640 } 641 642 if (c->dreq != 0) 643 info |= BCM2835_DMA_PER_MAP(c->dreq); 644 645 if (direction == DMA_DEV_TO_MEM) { 646 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) 647 return NULL; 648 src = c->cfg.src_addr; 649 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC; 650 } else { 651 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) 652 return NULL; 653 dst = c->cfg.dst_addr; 654 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC; 655 } 656 657 /* count frames in sg list */ 658 frames = sg_nents_for_dma(sgl, sg_len, bcm2835_dma_max_frame_length(c)); 659 660 /* allocate the CB chain */ 661 d = bcm2835_dma_create_cb_chain(chan, direction, false, 662 info, extra, 663 frames, src, dst, 0, 0, 664 GFP_NOWAIT); 665 if (!d) 666 return NULL; 667 668 /* fill in frames with scatterlist pointers */ 669 bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list, 670 sgl, sg_len); 671 672 return vchan_tx_prep(&c->vc, &d->vd, flags); 673 } 674 675 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( 676 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 677 size_t period_len, enum dma_transfer_direction direction, 678 unsigned long flags) 679 { 680 struct bcm2835_dmadev *od = to_bcm2835_dma_dev(chan->device); 681 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 682 struct bcm2835_desc *d; 683 dma_addr_t src, dst; 684 u32 info = BCM2835_DMA_WAIT_RESP; 685 u32 extra = 0; 686 size_t max_len = bcm2835_dma_max_frame_length(c); 687 size_t frames; 688 689 /* Grab configuration */ 690 if (!is_slave_direction(direction)) { 691 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 692 return NULL; 693 } 694 695 if (!buf_len) { 696 dev_err(chan->device->dev, 697 "%s: bad buffer length (= 0)\n", __func__); 698 return NULL; 699 } 700 701 if (flags & DMA_PREP_INTERRUPT) 702 extra |= BCM2835_DMA_INT_EN; 703 else 704 period_len = buf_len; 705 706 /* 707 * warn if buf_len is not a multiple of period_len - this may leed 708 * to unexpected latencies for interrupts and thus audiable clicks 709 */ 710 if (buf_len % period_len) 711 dev_warn_once(chan->device->dev, 712 "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n", 713 __func__, buf_len, period_len); 714 715 /* Setup DREQ channel */ 716 if (c->dreq != 0) 717 info |= BCM2835_DMA_PER_MAP(c->dreq); 718 719 if (direction == DMA_DEV_TO_MEM) { 720 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) 721 return NULL; 722 src = c->cfg.src_addr; 723 dst = buf_addr; 724 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC; 725 } else { 726 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) 727 return NULL; 728 dst = c->cfg.dst_addr; 729 src = buf_addr; 730 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC; 731 732 /* non-lite channels can write zeroes w/o accessing memory */ 733 if (buf_addr == od->zero_page && !c->is_lite_channel) 734 info |= BCM2835_DMA_S_IGNORE; 735 } 736 737 /* calculate number of frames */ 738 frames = /* number of periods */ 739 DIV_ROUND_UP(buf_len, period_len) * 740 /* number of frames per period */ 741 bcm2835_dma_frames_for_length(period_len, max_len); 742 743 /* 744 * allocate the CB chain 745 * note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine 746 * implementation calls prep_dma_cyclic with interrupts disabled. 747 */ 748 d = bcm2835_dma_create_cb_chain(chan, direction, true, 749 info, extra, 750 frames, src, dst, buf_len, 751 period_len, GFP_NOWAIT); 752 if (!d) 753 return NULL; 754 755 /* wrap around into a loop */ 756 d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr; 757 758 return vchan_tx_prep(&c->vc, &d->vd, flags); 759 } 760 761 static int bcm2835_dma_slave_config(struct dma_chan *chan, 762 struct dma_slave_config *cfg) 763 { 764 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 765 766 c->cfg = *cfg; 767 768 return 0; 769 } 770 771 static int bcm2835_dma_terminate_all(struct dma_chan *chan) 772 { 773 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 774 unsigned long flags; 775 LIST_HEAD(head); 776 777 spin_lock_irqsave(&c->vc.lock, flags); 778 779 /* stop DMA activity */ 780 if (c->desc) { 781 vchan_terminate_vdesc(&c->desc->vd); 782 c->desc = NULL; 783 bcm2835_dma_abort(c); 784 } 785 786 vchan_get_all_descriptors(&c->vc, &head); 787 spin_unlock_irqrestore(&c->vc.lock, flags); 788 vchan_dma_desc_free_list(&c->vc, &head); 789 790 return 0; 791 } 792 793 static void bcm2835_dma_synchronize(struct dma_chan *chan) 794 { 795 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 796 797 vchan_synchronize(&c->vc); 798 } 799 800 static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, 801 int irq, unsigned int irq_flags) 802 { 803 struct bcm2835_chan *c; 804 805 c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL); 806 if (!c) 807 return -ENOMEM; 808 809 c->vc.desc_free = bcm2835_dma_desc_free; 810 vchan_init(&c->vc, &d->ddev); 811 812 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); 813 c->ch = chan_id; 814 c->irq_number = irq; 815 c->irq_flags = irq_flags; 816 817 /* check in DEBUG register if this is a LITE channel */ 818 if (readl(c->chan_base + BCM2835_DMA_DEBUG) & 819 BCM2835_DMA_DEBUG_LITE) 820 c->is_lite_channel = true; 821 822 return 0; 823 } 824 825 static void bcm2835_dma_free(struct bcm2835_dmadev *od) 826 { 827 struct bcm2835_chan *c, *next; 828 829 list_for_each_entry_safe(c, next, &od->ddev.channels, 830 vc.chan.device_node) { 831 list_del(&c->vc.chan.device_node); 832 tasklet_kill(&c->vc.task); 833 } 834 835 dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE, 836 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 837 } 838 839 static const struct of_device_id bcm2835_dma_of_match[] = { 840 { .compatible = "brcm,bcm2835-dma", }, 841 {}, 842 }; 843 MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match); 844 845 static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec, 846 struct of_dma *ofdma) 847 { 848 struct bcm2835_dmadev *d = ofdma->of_dma_data; 849 struct dma_chan *chan; 850 851 chan = dma_get_any_slave_channel(&d->ddev); 852 if (!chan) 853 return NULL; 854 855 /* Set DREQ from param */ 856 to_bcm2835_dma_chan(chan)->dreq = spec->args[0]; 857 858 return chan; 859 } 860 861 static int bcm2835_dma_suspend_late(struct device *dev) 862 { 863 struct bcm2835_dmadev *od = dev_get_drvdata(dev); 864 struct bcm2835_chan *c, *next; 865 866 list_for_each_entry_safe(c, next, &od->ddev.channels, 867 vc.chan.device_node) { 868 void __iomem *chan_base = c->chan_base; 869 870 /* Check if DMA channel is busy */ 871 if (readl(chan_base + BCM2835_DMA_ADDR)) 872 return -EBUSY; 873 } 874 875 return 0; 876 } 877 878 static const struct dev_pm_ops bcm2835_dma_pm_ops = { 879 LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL) 880 }; 881 882 static int bcm2835_dma_probe(struct platform_device *pdev) 883 { 884 struct bcm2835_dmadev *od; 885 void __iomem *base; 886 int rc; 887 int i, j; 888 int irq[BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1]; 889 int irq_flags; 890 uint32_t chans_available; 891 char chan_name[BCM2835_DMA_CHAN_NAME_SIZE]; 892 893 if (!pdev->dev.dma_mask) 894 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 895 896 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 897 if (rc) { 898 dev_err(&pdev->dev, "Unable to set DMA mask\n"); 899 return rc; 900 } 901 902 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); 903 if (!od) 904 return -ENOMEM; 905 906 dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF); 907 908 base = devm_platform_ioremap_resource(pdev, 0); 909 if (IS_ERR(base)) 910 return PTR_ERR(base); 911 912 od->base = base; 913 914 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); 915 dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); 916 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); 917 dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); 918 od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; 919 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; 920 od->ddev.device_tx_status = bcm2835_dma_tx_status; 921 od->ddev.device_issue_pending = bcm2835_dma_issue_pending; 922 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; 923 od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg; 924 od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy; 925 od->ddev.device_config = bcm2835_dma_slave_config; 926 od->ddev.device_terminate_all = bcm2835_dma_terminate_all; 927 od->ddev.device_synchronize = bcm2835_dma_synchronize; 928 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 929 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 930 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | 931 BIT(DMA_MEM_TO_MEM); 932 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 933 od->ddev.descriptor_reuse = true; 934 od->ddev.dev = &pdev->dev; 935 INIT_LIST_HEAD(&od->ddev.channels); 936 937 platform_set_drvdata(pdev, od); 938 939 od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0, 940 PAGE_SIZE, DMA_TO_DEVICE, 941 DMA_ATTR_SKIP_CPU_SYNC); 942 if (dma_mapping_error(od->ddev.dev, od->zero_page)) { 943 dev_err(&pdev->dev, "Failed to map zero page\n"); 944 return -ENOMEM; 945 } 946 947 /* Request DMA channel mask from device tree */ 948 if (of_property_read_u32(pdev->dev.of_node, 949 "brcm,dma-channel-mask", 950 &chans_available)) { 951 dev_err(&pdev->dev, "Failed to get channel mask\n"); 952 rc = -EINVAL; 953 goto err_no_dma; 954 } 955 956 /* get irqs for each channel that we support */ 957 for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) { 958 /* skip masked out channels */ 959 if (!(chans_available & (1 << i))) { 960 irq[i] = -1; 961 continue; 962 } 963 964 /* get the named irq */ 965 snprintf(chan_name, sizeof(chan_name), "dma%i", i); 966 irq[i] = platform_get_irq_byname(pdev, chan_name); 967 if (irq[i] >= 0) 968 continue; 969 970 /* legacy device tree case handling */ 971 dev_warn_once(&pdev->dev, 972 "missing interrupt-names property in device tree - legacy interpretation is used\n"); 973 /* 974 * in case of channel >= 11 975 * use the 11th interrupt and that is shared 976 */ 977 irq[i] = platform_get_irq(pdev, i < 11 ? i : 11); 978 } 979 980 /* get irqs for each channel */ 981 for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) { 982 /* skip channels without irq */ 983 if (irq[i] < 0) 984 continue; 985 986 /* check if there are other channels that also use this irq */ 987 irq_flags = 0; 988 for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++) 989 if ((i != j) && (irq[j] == irq[i])) { 990 irq_flags = IRQF_SHARED; 991 break; 992 } 993 994 /* initialize the channel */ 995 rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags); 996 if (rc) 997 goto err_no_dma; 998 } 999 1000 dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); 1001 1002 /* Device-tree DMA controller registration */ 1003 rc = of_dma_controller_register(pdev->dev.of_node, 1004 bcm2835_dma_xlate, od); 1005 if (rc) { 1006 dev_err(&pdev->dev, "Failed to register DMA controller\n"); 1007 goto err_no_dma; 1008 } 1009 1010 rc = dma_async_device_register(&od->ddev); 1011 if (rc) { 1012 dev_err(&pdev->dev, 1013 "Failed to register slave DMA engine device: %d\n", rc); 1014 goto err_no_dma; 1015 } 1016 1017 dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n"); 1018 1019 return 0; 1020 1021 err_no_dma: 1022 bcm2835_dma_free(od); 1023 return rc; 1024 } 1025 1026 static void bcm2835_dma_remove(struct platform_device *pdev) 1027 { 1028 struct bcm2835_dmadev *od = platform_get_drvdata(pdev); 1029 1030 dma_async_device_unregister(&od->ddev); 1031 bcm2835_dma_free(od); 1032 } 1033 1034 static struct platform_driver bcm2835_dma_driver = { 1035 .probe = bcm2835_dma_probe, 1036 .remove = bcm2835_dma_remove, 1037 .driver = { 1038 .name = "bcm2835-dma", 1039 .of_match_table = of_match_ptr(bcm2835_dma_of_match), 1040 .pm = pm_ptr(&bcm2835_dma_pm_ops), 1041 }, 1042 }; 1043 1044 module_platform_driver(bcm2835_dma_driver); 1045 1046 MODULE_DESCRIPTION("BCM2835 DMA engine driver"); 1047 MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>"); 1048 MODULE_LICENSE("GPL"); 1049