1 /* 2 * Core driver for the Synopsys DesignWare DMA Controller 3 * 4 * Copyright (C) 2007-2008 Atmel Corporation 5 * Copyright (C) 2010-2011 ST Microelectronics 6 * Copyright (C) 2013 Intel Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/bitops.h> 14 #include <linux/delay.h> 15 #include <linux/dmaengine.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/dmapool.h> 18 #include <linux/err.h> 19 #include <linux/init.h> 20 #include <linux/interrupt.h> 21 #include <linux/io.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/slab.h> 25 #include <linux/pm_runtime.h> 26 27 #include "../dmaengine.h" 28 #include "internal.h" 29 30 /* 31 * This supports the Synopsys "DesignWare AHB Central DMA Controller", 32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all 33 * of which use ARM any more). See the "Databook" from Synopsys for 34 * information beyond what licensees probably provide. 35 * 36 * The driver has been tested with the Atmel AT32AP7000, which does not 37 * support descriptor writeback. 38 */ 39 40 #define DWC_DEFAULT_CTLLO(_chan) ({ \ 41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ 42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ 43 bool _is_slave = is_slave_direction(_dwc->direction); \ 44 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ 45 DW_DMA_MSIZE_16; \ 46 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ 47 DW_DMA_MSIZE_16; \ 48 u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \ 49 _dwc->dws.p_master : _dwc->dws.m_master; \ 50 u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \ 51 _dwc->dws.p_master : _dwc->dws.m_master; \ 52 \ 53 (DWC_CTLL_DST_MSIZE(_dmsize) \ 54 | DWC_CTLL_SRC_MSIZE(_smsize) \ 55 | DWC_CTLL_LLP_D_EN \ 56 | DWC_CTLL_LLP_S_EN \ 57 | DWC_CTLL_DMS(_dms) \ 58 | DWC_CTLL_SMS(_sms)); \ 59 }) 60 61 /* The set of bus widths supported by the DMA controller */ 62 #define DW_DMA_BUSWIDTHS \ 63 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 64 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 65 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 66 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 67 68 /*----------------------------------------------------------------------*/ 69 70 static struct device *chan2dev(struct dma_chan *chan) 71 { 72 return &chan->dev->device; 73 } 74 75 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 76 { 77 return to_dw_desc(dwc->active_list.next); 78 } 79 80 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) 81 { 82 struct dw_desc *desc = txd_to_dw_desc(tx); 83 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); 84 dma_cookie_t cookie; 85 unsigned long flags; 86 87 spin_lock_irqsave(&dwc->lock, flags); 88 cookie = dma_cookie_assign(tx); 89 90 /* 91 * REVISIT: We should attempt to chain as many descriptors as 92 * possible, perhaps even appending to those already submitted 93 * for DMA. But this is hard to do in a race-free manner. 94 */ 95 96 list_add_tail(&desc->desc_node, &dwc->queue); 97 spin_unlock_irqrestore(&dwc->lock, flags); 98 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", 99 __func__, desc->txd.cookie); 100 101 return cookie; 102 } 103 104 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) 105 { 106 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 107 struct dw_desc *desc; 108 dma_addr_t phys; 109 110 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys); 111 if (!desc) 112 return NULL; 113 114 dwc->descs_allocated++; 115 INIT_LIST_HEAD(&desc->tx_list); 116 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan); 117 desc->txd.tx_submit = dwc_tx_submit; 118 desc->txd.flags = DMA_CTRL_ACK; 119 desc->txd.phys = phys; 120 return desc; 121 } 122 123 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) 124 { 125 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 126 struct dw_desc *child, *_next; 127 128 if (unlikely(!desc)) 129 return; 130 131 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) { 132 list_del(&child->desc_node); 133 dma_pool_free(dw->desc_pool, child, child->txd.phys); 134 dwc->descs_allocated--; 135 } 136 137 dma_pool_free(dw->desc_pool, desc, desc->txd.phys); 138 dwc->descs_allocated--; 139 } 140 141 static void dwc_initialize(struct dw_dma_chan *dwc) 142 { 143 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 144 u32 cfghi = DWC_CFGH_FIFO_MODE; 145 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); 146 bool hs_polarity = dwc->dws.hs_polarity; 147 148 if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) 149 return; 150 151 cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); 152 cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); 153 154 /* Set polarity of handshake interface */ 155 cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; 156 157 channel_writel(dwc, CFG_LO, cfglo); 158 channel_writel(dwc, CFG_HI, cfghi); 159 160 /* Enable interrupts */ 161 channel_set_bit(dw, MASK.XFER, dwc->mask); 162 channel_set_bit(dw, MASK.ERROR, dwc->mask); 163 164 set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); 165 } 166 167 /*----------------------------------------------------------------------*/ 168 169 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) 170 { 171 dev_err(chan2dev(&dwc->chan), 172 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 173 channel_readl(dwc, SAR), 174 channel_readl(dwc, DAR), 175 channel_readl(dwc, LLP), 176 channel_readl(dwc, CTL_HI), 177 channel_readl(dwc, CTL_LO)); 178 } 179 180 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) 181 { 182 channel_clear_bit(dw, CH_EN, dwc->mask); 183 while (dma_readl(dw, CH_EN) & dwc->mask) 184 cpu_relax(); 185 } 186 187 /*----------------------------------------------------------------------*/ 188 189 /* Perform single block transfer */ 190 static inline void dwc_do_single_block(struct dw_dma_chan *dwc, 191 struct dw_desc *desc) 192 { 193 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 194 u32 ctllo; 195 196 /* 197 * Software emulation of LLP mode relies on interrupts to continue 198 * multi block transfer. 199 */ 200 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN; 201 202 channel_writel(dwc, SAR, lli_read(desc, sar)); 203 channel_writel(dwc, DAR, lli_read(desc, dar)); 204 channel_writel(dwc, CTL_LO, ctllo); 205 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi)); 206 channel_set_bit(dw, CH_EN, dwc->mask); 207 208 /* Move pointer to next descriptor */ 209 dwc->tx_node_active = dwc->tx_node_active->next; 210 } 211 212 /* Called with dwc->lock held and bh disabled */ 213 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) 214 { 215 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 216 u8 lms = DWC_LLP_LMS(dwc->dws.m_master); 217 unsigned long was_soft_llp; 218 219 /* ASSERT: channel is idle */ 220 if (dma_readl(dw, CH_EN) & dwc->mask) { 221 dev_err(chan2dev(&dwc->chan), 222 "%s: BUG: Attempted to start non-idle channel\n", 223 __func__); 224 dwc_dump_chan_regs(dwc); 225 226 /* The tasklet will hopefully advance the queue... */ 227 return; 228 } 229 230 if (dwc->nollp) { 231 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, 232 &dwc->flags); 233 if (was_soft_llp) { 234 dev_err(chan2dev(&dwc->chan), 235 "BUG: Attempted to start new LLP transfer inside ongoing one\n"); 236 return; 237 } 238 239 dwc_initialize(dwc); 240 241 first->residue = first->total_len; 242 dwc->tx_node_active = &first->tx_list; 243 244 /* Submit first block */ 245 dwc_do_single_block(dwc, first); 246 247 return; 248 } 249 250 dwc_initialize(dwc); 251 252 channel_writel(dwc, LLP, first->txd.phys | lms); 253 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 254 channel_writel(dwc, CTL_HI, 0); 255 channel_set_bit(dw, CH_EN, dwc->mask); 256 } 257 258 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) 259 { 260 struct dw_desc *desc; 261 262 if (list_empty(&dwc->queue)) 263 return; 264 265 list_move(dwc->queue.next, &dwc->active_list); 266 desc = dwc_first_active(dwc); 267 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); 268 dwc_dostart(dwc, desc); 269 } 270 271 /*----------------------------------------------------------------------*/ 272 273 static void 274 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, 275 bool callback_required) 276 { 277 struct dma_async_tx_descriptor *txd = &desc->txd; 278 struct dw_desc *child; 279 unsigned long flags; 280 struct dmaengine_desc_callback cb; 281 282 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 283 284 spin_lock_irqsave(&dwc->lock, flags); 285 dma_cookie_complete(txd); 286 if (callback_required) 287 dmaengine_desc_get_callback(txd, &cb); 288 else 289 memset(&cb, 0, sizeof(cb)); 290 291 /* async_tx_ack */ 292 list_for_each_entry(child, &desc->tx_list, desc_node) 293 async_tx_ack(&child->txd); 294 async_tx_ack(&desc->txd); 295 dwc_desc_put(dwc, desc); 296 spin_unlock_irqrestore(&dwc->lock, flags); 297 298 dmaengine_desc_callback_invoke(&cb, NULL); 299 } 300 301 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) 302 { 303 struct dw_desc *desc, *_desc; 304 LIST_HEAD(list); 305 unsigned long flags; 306 307 spin_lock_irqsave(&dwc->lock, flags); 308 if (dma_readl(dw, CH_EN) & dwc->mask) { 309 dev_err(chan2dev(&dwc->chan), 310 "BUG: XFER bit set, but channel not idle!\n"); 311 312 /* Try to continue after resetting the channel... */ 313 dwc_chan_disable(dw, dwc); 314 } 315 316 /* 317 * Submit queued descriptors ASAP, i.e. before we go through 318 * the completed ones. 319 */ 320 list_splice_init(&dwc->active_list, &list); 321 dwc_dostart_first_queued(dwc); 322 323 spin_unlock_irqrestore(&dwc->lock, flags); 324 325 list_for_each_entry_safe(desc, _desc, &list, desc_node) 326 dwc_descriptor_complete(dwc, desc, true); 327 } 328 329 /* Returns how many bytes were already received from source */ 330 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) 331 { 332 u32 ctlhi = channel_readl(dwc, CTL_HI); 333 u32 ctllo = channel_readl(dwc, CTL_LO); 334 335 return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); 336 } 337 338 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) 339 { 340 dma_addr_t llp; 341 struct dw_desc *desc, *_desc; 342 struct dw_desc *child; 343 u32 status_xfer; 344 unsigned long flags; 345 346 spin_lock_irqsave(&dwc->lock, flags); 347 llp = channel_readl(dwc, LLP); 348 status_xfer = dma_readl(dw, RAW.XFER); 349 350 if (status_xfer & dwc->mask) { 351 /* Everything we've submitted is done */ 352 dma_writel(dw, CLEAR.XFER, dwc->mask); 353 354 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { 355 struct list_head *head, *active = dwc->tx_node_active; 356 357 /* 358 * We are inside first active descriptor. 359 * Otherwise something is really wrong. 360 */ 361 desc = dwc_first_active(dwc); 362 363 head = &desc->tx_list; 364 if (active != head) { 365 /* Update residue to reflect last sent descriptor */ 366 if (active == head->next) 367 desc->residue -= desc->len; 368 else 369 desc->residue -= to_dw_desc(active->prev)->len; 370 371 child = to_dw_desc(active); 372 373 /* Submit next block */ 374 dwc_do_single_block(dwc, child); 375 376 spin_unlock_irqrestore(&dwc->lock, flags); 377 return; 378 } 379 380 /* We are done here */ 381 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 382 } 383 384 spin_unlock_irqrestore(&dwc->lock, flags); 385 386 dwc_complete_all(dw, dwc); 387 return; 388 } 389 390 if (list_empty(&dwc->active_list)) { 391 spin_unlock_irqrestore(&dwc->lock, flags); 392 return; 393 } 394 395 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { 396 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); 397 spin_unlock_irqrestore(&dwc->lock, flags); 398 return; 399 } 400 401 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); 402 403 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 404 /* Initial residue value */ 405 desc->residue = desc->total_len; 406 407 /* Check first descriptors addr */ 408 if (desc->txd.phys == DWC_LLP_LOC(llp)) { 409 spin_unlock_irqrestore(&dwc->lock, flags); 410 return; 411 } 412 413 /* Check first descriptors llp */ 414 if (lli_read(desc, llp) == llp) { 415 /* This one is currently in progress */ 416 desc->residue -= dwc_get_sent(dwc); 417 spin_unlock_irqrestore(&dwc->lock, flags); 418 return; 419 } 420 421 desc->residue -= desc->len; 422 list_for_each_entry(child, &desc->tx_list, desc_node) { 423 if (lli_read(child, llp) == llp) { 424 /* Currently in progress */ 425 desc->residue -= dwc_get_sent(dwc); 426 spin_unlock_irqrestore(&dwc->lock, flags); 427 return; 428 } 429 desc->residue -= child->len; 430 } 431 432 /* 433 * No descriptors so far seem to be in progress, i.e. 434 * this one must be done. 435 */ 436 spin_unlock_irqrestore(&dwc->lock, flags); 437 dwc_descriptor_complete(dwc, desc, true); 438 spin_lock_irqsave(&dwc->lock, flags); 439 } 440 441 dev_err(chan2dev(&dwc->chan), 442 "BUG: All descriptors done, but channel not idle!\n"); 443 444 /* Try to continue after resetting the channel... */ 445 dwc_chan_disable(dw, dwc); 446 447 dwc_dostart_first_queued(dwc); 448 spin_unlock_irqrestore(&dwc->lock, flags); 449 } 450 451 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc) 452 { 453 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 454 lli_read(desc, sar), 455 lli_read(desc, dar), 456 lli_read(desc, llp), 457 lli_read(desc, ctlhi), 458 lli_read(desc, ctllo)); 459 } 460 461 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) 462 { 463 struct dw_desc *bad_desc; 464 struct dw_desc *child; 465 unsigned long flags; 466 467 dwc_scan_descriptors(dw, dwc); 468 469 spin_lock_irqsave(&dwc->lock, flags); 470 471 /* 472 * The descriptor currently at the head of the active list is 473 * borked. Since we don't have any way to report errors, we'll 474 * just have to scream loudly and try to carry on. 475 */ 476 bad_desc = dwc_first_active(dwc); 477 list_del_init(&bad_desc->desc_node); 478 list_move(dwc->queue.next, dwc->active_list.prev); 479 480 /* Clear the error flag and try to restart the controller */ 481 dma_writel(dw, CLEAR.ERROR, dwc->mask); 482 if (!list_empty(&dwc->active_list)) 483 dwc_dostart(dwc, dwc_first_active(dwc)); 484 485 /* 486 * WARN may seem harsh, but since this only happens 487 * when someone submits a bad physical address in a 488 * descriptor, we should consider ourselves lucky that the 489 * controller flagged an error instead of scribbling over 490 * random memory locations. 491 */ 492 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" 493 " cookie: %d\n", bad_desc->txd.cookie); 494 dwc_dump_lli(dwc, bad_desc); 495 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 496 dwc_dump_lli(dwc, child); 497 498 spin_unlock_irqrestore(&dwc->lock, flags); 499 500 /* Pretend the descriptor completed successfully */ 501 dwc_descriptor_complete(dwc, bad_desc, true); 502 } 503 504 /* --------------------- Cyclic DMA API extensions -------------------- */ 505 506 dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) 507 { 508 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 509 return channel_readl(dwc, SAR); 510 } 511 EXPORT_SYMBOL(dw_dma_get_src_addr); 512 513 dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) 514 { 515 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 516 return channel_readl(dwc, DAR); 517 } 518 EXPORT_SYMBOL(dw_dma_get_dst_addr); 519 520 /* Called with dwc->lock held and all DMAC interrupts disabled */ 521 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 522 u32 status_block, u32 status_err, u32 status_xfer) 523 { 524 unsigned long flags; 525 526 if (status_block & dwc->mask) { 527 void (*callback)(void *param); 528 void *callback_param; 529 530 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 531 channel_readl(dwc, LLP)); 532 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 533 534 callback = dwc->cdesc->period_callback; 535 callback_param = dwc->cdesc->period_callback_param; 536 537 if (callback) 538 callback(callback_param); 539 } 540 541 /* 542 * Error and transfer complete are highly unlikely, and will most 543 * likely be due to a configuration error by the user. 544 */ 545 if (unlikely(status_err & dwc->mask) || 546 unlikely(status_xfer & dwc->mask)) { 547 unsigned int i; 548 549 dev_err(chan2dev(&dwc->chan), 550 "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", 551 status_xfer ? "xfer" : "error"); 552 553 spin_lock_irqsave(&dwc->lock, flags); 554 555 dwc_dump_chan_regs(dwc); 556 557 dwc_chan_disable(dw, dwc); 558 559 /* Make sure DMA does not restart by loading a new list */ 560 channel_writel(dwc, LLP, 0); 561 channel_writel(dwc, CTL_LO, 0); 562 channel_writel(dwc, CTL_HI, 0); 563 564 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 565 dma_writel(dw, CLEAR.ERROR, dwc->mask); 566 dma_writel(dw, CLEAR.XFER, dwc->mask); 567 568 for (i = 0; i < dwc->cdesc->periods; i++) 569 dwc_dump_lli(dwc, dwc->cdesc->desc[i]); 570 571 spin_unlock_irqrestore(&dwc->lock, flags); 572 } 573 574 /* Re-enable interrupts */ 575 channel_set_bit(dw, MASK.BLOCK, dwc->mask); 576 } 577 578 /* ------------------------------------------------------------------------- */ 579 580 static void dw_dma_tasklet(unsigned long data) 581 { 582 struct dw_dma *dw = (struct dw_dma *)data; 583 struct dw_dma_chan *dwc; 584 u32 status_block; 585 u32 status_xfer; 586 u32 status_err; 587 unsigned int i; 588 589 status_block = dma_readl(dw, RAW.BLOCK); 590 status_xfer = dma_readl(dw, RAW.XFER); 591 status_err = dma_readl(dw, RAW.ERROR); 592 593 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); 594 595 for (i = 0; i < dw->dma.chancnt; i++) { 596 dwc = &dw->chan[i]; 597 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 598 dwc_handle_cyclic(dw, dwc, status_block, status_err, 599 status_xfer); 600 else if (status_err & (1 << i)) 601 dwc_handle_error(dw, dwc); 602 else if (status_xfer & (1 << i)) 603 dwc_scan_descriptors(dw, dwc); 604 } 605 606 /* Re-enable interrupts */ 607 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 608 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 609 } 610 611 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) 612 { 613 struct dw_dma *dw = dev_id; 614 u32 status; 615 616 /* Check if we have any interrupt from the DMAC which is not in use */ 617 if (!dw->in_use) 618 return IRQ_NONE; 619 620 status = dma_readl(dw, STATUS_INT); 621 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); 622 623 /* Check if we have any interrupt from the DMAC */ 624 if (!status) 625 return IRQ_NONE; 626 627 /* 628 * Just disable the interrupts. We'll turn them back on in the 629 * softirq handler. 630 */ 631 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 632 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 633 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 634 635 status = dma_readl(dw, STATUS_INT); 636 if (status) { 637 dev_err(dw->dma.dev, 638 "BUG: Unexpected interrupts pending: 0x%x\n", 639 status); 640 641 /* Try to recover */ 642 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); 643 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); 644 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); 645 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); 646 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); 647 } 648 649 tasklet_schedule(&dw->tasklet); 650 651 return IRQ_HANDLED; 652 } 653 654 /*----------------------------------------------------------------------*/ 655 656 static struct dma_async_tx_descriptor * 657 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 658 size_t len, unsigned long flags) 659 { 660 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 661 struct dw_dma *dw = to_dw_dma(chan->device); 662 struct dw_desc *desc; 663 struct dw_desc *first; 664 struct dw_desc *prev; 665 size_t xfer_count; 666 size_t offset; 667 u8 m_master = dwc->dws.m_master; 668 unsigned int src_width; 669 unsigned int dst_width; 670 unsigned int data_width = dw->pdata->data_width[m_master]; 671 u32 ctllo; 672 u8 lms = DWC_LLP_LMS(m_master); 673 674 dev_vdbg(chan2dev(chan), 675 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, 676 &dest, &src, len, flags); 677 678 if (unlikely(!len)) { 679 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 680 return NULL; 681 } 682 683 dwc->direction = DMA_MEM_TO_MEM; 684 685 src_width = dst_width = __ffs(data_width | src | dest | len); 686 687 ctllo = DWC_DEFAULT_CTLLO(chan) 688 | DWC_CTLL_DST_WIDTH(dst_width) 689 | DWC_CTLL_SRC_WIDTH(src_width) 690 | DWC_CTLL_DST_INC 691 | DWC_CTLL_SRC_INC 692 | DWC_CTLL_FC_M2M; 693 prev = first = NULL; 694 695 for (offset = 0; offset < len; offset += xfer_count << src_width) { 696 xfer_count = min_t(size_t, (len - offset) >> src_width, 697 dwc->block_size); 698 699 desc = dwc_desc_get(dwc); 700 if (!desc) 701 goto err_desc_get; 702 703 lli_write(desc, sar, src + offset); 704 lli_write(desc, dar, dest + offset); 705 lli_write(desc, ctllo, ctllo); 706 lli_write(desc, ctlhi, xfer_count); 707 desc->len = xfer_count << src_width; 708 709 if (!first) { 710 first = desc; 711 } else { 712 lli_write(prev, llp, desc->txd.phys | lms); 713 list_add_tail(&desc->desc_node, &first->tx_list); 714 } 715 prev = desc; 716 } 717 718 if (flags & DMA_PREP_INTERRUPT) 719 /* Trigger interrupt after last block */ 720 lli_set(prev, ctllo, DWC_CTLL_INT_EN); 721 722 prev->lli.llp = 0; 723 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 724 first->txd.flags = flags; 725 first->total_len = len; 726 727 return &first->txd; 728 729 err_desc_get: 730 dwc_desc_put(dwc, first); 731 return NULL; 732 } 733 734 static struct dma_async_tx_descriptor * 735 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 736 unsigned int sg_len, enum dma_transfer_direction direction, 737 unsigned long flags, void *context) 738 { 739 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 740 struct dw_dma *dw = to_dw_dma(chan->device); 741 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 742 struct dw_desc *prev; 743 struct dw_desc *first; 744 u32 ctllo; 745 u8 m_master = dwc->dws.m_master; 746 u8 lms = DWC_LLP_LMS(m_master); 747 dma_addr_t reg; 748 unsigned int reg_width; 749 unsigned int mem_width; 750 unsigned int data_width = dw->pdata->data_width[m_master]; 751 unsigned int i; 752 struct scatterlist *sg; 753 size_t total_len = 0; 754 755 dev_vdbg(chan2dev(chan), "%s\n", __func__); 756 757 if (unlikely(!is_slave_direction(direction) || !sg_len)) 758 return NULL; 759 760 dwc->direction = direction; 761 762 prev = first = NULL; 763 764 switch (direction) { 765 case DMA_MEM_TO_DEV: 766 reg_width = __ffs(sconfig->dst_addr_width); 767 reg = sconfig->dst_addr; 768 ctllo = (DWC_DEFAULT_CTLLO(chan) 769 | DWC_CTLL_DST_WIDTH(reg_width) 770 | DWC_CTLL_DST_FIX 771 | DWC_CTLL_SRC_INC); 772 773 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 774 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 775 776 for_each_sg(sgl, sg, sg_len, i) { 777 struct dw_desc *desc; 778 u32 len, dlen, mem; 779 780 mem = sg_dma_address(sg); 781 len = sg_dma_len(sg); 782 783 mem_width = __ffs(data_width | mem | len); 784 785 slave_sg_todev_fill_desc: 786 desc = dwc_desc_get(dwc); 787 if (!desc) 788 goto err_desc_get; 789 790 lli_write(desc, sar, mem); 791 lli_write(desc, dar, reg); 792 if ((len >> mem_width) > dwc->block_size) { 793 dlen = dwc->block_size << mem_width; 794 } else { 795 dlen = len; 796 } 797 lli_write(desc, ctlhi, dlen >> mem_width); 798 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); 799 desc->len = dlen; 800 801 if (!first) { 802 first = desc; 803 } else { 804 lli_write(prev, llp, desc->txd.phys | lms); 805 list_add_tail(&desc->desc_node, &first->tx_list); 806 } 807 prev = desc; 808 809 mem += dlen; 810 len -= dlen; 811 total_len += dlen; 812 813 if (len) 814 goto slave_sg_todev_fill_desc; 815 } 816 break; 817 case DMA_DEV_TO_MEM: 818 reg_width = __ffs(sconfig->src_addr_width); 819 reg = sconfig->src_addr; 820 ctllo = (DWC_DEFAULT_CTLLO(chan) 821 | DWC_CTLL_SRC_WIDTH(reg_width) 822 | DWC_CTLL_DST_INC 823 | DWC_CTLL_SRC_FIX); 824 825 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 826 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 827 828 for_each_sg(sgl, sg, sg_len, i) { 829 struct dw_desc *desc; 830 u32 len, dlen, mem; 831 832 mem = sg_dma_address(sg); 833 len = sg_dma_len(sg); 834 835 slave_sg_fromdev_fill_desc: 836 desc = dwc_desc_get(dwc); 837 if (!desc) 838 goto err_desc_get; 839 840 lli_write(desc, sar, reg); 841 lli_write(desc, dar, mem); 842 if ((len >> reg_width) > dwc->block_size) { 843 dlen = dwc->block_size << reg_width; 844 } else { 845 dlen = len; 846 } 847 lli_write(desc, ctlhi, dlen >> reg_width); 848 mem_width = __ffs(data_width | mem | dlen); 849 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); 850 desc->len = dlen; 851 852 if (!first) { 853 first = desc; 854 } else { 855 lli_write(prev, llp, desc->txd.phys | lms); 856 list_add_tail(&desc->desc_node, &first->tx_list); 857 } 858 prev = desc; 859 860 mem += dlen; 861 len -= dlen; 862 total_len += dlen; 863 864 if (len) 865 goto slave_sg_fromdev_fill_desc; 866 } 867 break; 868 default: 869 return NULL; 870 } 871 872 if (flags & DMA_PREP_INTERRUPT) 873 /* Trigger interrupt after last block */ 874 lli_set(prev, ctllo, DWC_CTLL_INT_EN); 875 876 prev->lli.llp = 0; 877 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 878 first->total_len = total_len; 879 880 return &first->txd; 881 882 err_desc_get: 883 dev_err(chan2dev(chan), 884 "not enough descriptors available. Direction %d\n", direction); 885 dwc_desc_put(dwc, first); 886 return NULL; 887 } 888 889 bool dw_dma_filter(struct dma_chan *chan, void *param) 890 { 891 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 892 struct dw_dma_slave *dws = param; 893 894 if (dws->dma_dev != chan->device->dev) 895 return false; 896 897 /* We have to copy data since dws can be temporary storage */ 898 memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave)); 899 900 return true; 901 } 902 EXPORT_SYMBOL_GPL(dw_dma_filter); 903 904 /* 905 * Fix sconfig's burst size according to dw_dmac. We need to convert them as: 906 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. 907 * 908 * NOTE: burst size 2 is not supported by controller. 909 * 910 * This can be done by finding least significant bit set: n & (n - 1) 911 */ 912 static inline void convert_burst(u32 *maxburst) 913 { 914 if (*maxburst > 1) 915 *maxburst = fls(*maxburst) - 2; 916 else 917 *maxburst = 0; 918 } 919 920 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) 921 { 922 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 923 924 /* Check if chan will be configured for slave transfers */ 925 if (!is_slave_direction(sconfig->direction)) 926 return -EINVAL; 927 928 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); 929 dwc->direction = sconfig->direction; 930 931 convert_burst(&dwc->dma_sconfig.src_maxburst); 932 convert_burst(&dwc->dma_sconfig.dst_maxburst); 933 934 return 0; 935 } 936 937 static int dwc_pause(struct dma_chan *chan) 938 { 939 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 940 unsigned long flags; 941 unsigned int count = 20; /* timeout iterations */ 942 u32 cfglo; 943 944 spin_lock_irqsave(&dwc->lock, flags); 945 946 cfglo = channel_readl(dwc, CFG_LO); 947 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); 948 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) 949 udelay(2); 950 951 set_bit(DW_DMA_IS_PAUSED, &dwc->flags); 952 953 spin_unlock_irqrestore(&dwc->lock, flags); 954 955 return 0; 956 } 957 958 static inline void dwc_chan_resume(struct dw_dma_chan *dwc) 959 { 960 u32 cfglo = channel_readl(dwc, CFG_LO); 961 962 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); 963 964 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); 965 } 966 967 static int dwc_resume(struct dma_chan *chan) 968 { 969 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 970 unsigned long flags; 971 972 spin_lock_irqsave(&dwc->lock, flags); 973 974 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) 975 dwc_chan_resume(dwc); 976 977 spin_unlock_irqrestore(&dwc->lock, flags); 978 979 return 0; 980 } 981 982 static int dwc_terminate_all(struct dma_chan *chan) 983 { 984 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 985 struct dw_dma *dw = to_dw_dma(chan->device); 986 struct dw_desc *desc, *_desc; 987 unsigned long flags; 988 LIST_HEAD(list); 989 990 spin_lock_irqsave(&dwc->lock, flags); 991 992 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 993 994 dwc_chan_disable(dw, dwc); 995 996 dwc_chan_resume(dwc); 997 998 /* active_list entries will end up before queued entries */ 999 list_splice_init(&dwc->queue, &list); 1000 list_splice_init(&dwc->active_list, &list); 1001 1002 spin_unlock_irqrestore(&dwc->lock, flags); 1003 1004 /* Flush all pending and queued descriptors */ 1005 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1006 dwc_descriptor_complete(dwc, desc, false); 1007 1008 return 0; 1009 } 1010 1011 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) 1012 { 1013 struct dw_desc *desc; 1014 1015 list_for_each_entry(desc, &dwc->active_list, desc_node) 1016 if (desc->txd.cookie == c) 1017 return desc; 1018 1019 return NULL; 1020 } 1021 1022 static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie) 1023 { 1024 struct dw_desc *desc; 1025 unsigned long flags; 1026 u32 residue; 1027 1028 spin_lock_irqsave(&dwc->lock, flags); 1029 1030 desc = dwc_find_desc(dwc, cookie); 1031 if (desc) { 1032 if (desc == dwc_first_active(dwc)) { 1033 residue = desc->residue; 1034 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) 1035 residue -= dwc_get_sent(dwc); 1036 } else { 1037 residue = desc->total_len; 1038 } 1039 } else { 1040 residue = 0; 1041 } 1042 1043 spin_unlock_irqrestore(&dwc->lock, flags); 1044 return residue; 1045 } 1046 1047 static enum dma_status 1048 dwc_tx_status(struct dma_chan *chan, 1049 dma_cookie_t cookie, 1050 struct dma_tx_state *txstate) 1051 { 1052 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1053 enum dma_status ret; 1054 1055 ret = dma_cookie_status(chan, cookie, txstate); 1056 if (ret == DMA_COMPLETE) 1057 return ret; 1058 1059 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1060 1061 ret = dma_cookie_status(chan, cookie, txstate); 1062 if (ret == DMA_COMPLETE) 1063 return ret; 1064 1065 dma_set_residue(txstate, dwc_get_residue(dwc, cookie)); 1066 1067 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS) 1068 return DMA_PAUSED; 1069 1070 return ret; 1071 } 1072 1073 static void dwc_issue_pending(struct dma_chan *chan) 1074 { 1075 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1076 unsigned long flags; 1077 1078 spin_lock_irqsave(&dwc->lock, flags); 1079 if (list_empty(&dwc->active_list)) 1080 dwc_dostart_first_queued(dwc); 1081 spin_unlock_irqrestore(&dwc->lock, flags); 1082 } 1083 1084 /*----------------------------------------------------------------------*/ 1085 1086 static void dw_dma_off(struct dw_dma *dw) 1087 { 1088 unsigned int i; 1089 1090 dma_writel(dw, CFG, 0); 1091 1092 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1093 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1094 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1095 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1096 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1097 1098 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) 1099 cpu_relax(); 1100 1101 for (i = 0; i < dw->dma.chancnt; i++) 1102 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); 1103 } 1104 1105 static void dw_dma_on(struct dw_dma *dw) 1106 { 1107 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1108 } 1109 1110 static int dwc_alloc_chan_resources(struct dma_chan *chan) 1111 { 1112 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1113 struct dw_dma *dw = to_dw_dma(chan->device); 1114 1115 dev_vdbg(chan2dev(chan), "%s\n", __func__); 1116 1117 /* ASSERT: channel is idle */ 1118 if (dma_readl(dw, CH_EN) & dwc->mask) { 1119 dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); 1120 return -EIO; 1121 } 1122 1123 dma_cookie_init(chan); 1124 1125 /* 1126 * NOTE: some controllers may have additional features that we 1127 * need to initialize here, like "scatter-gather" (which 1128 * doesn't mean what you think it means), and status writeback. 1129 */ 1130 1131 /* 1132 * We need controller-specific data to set up slave transfers. 1133 */ 1134 if (chan->private && !dw_dma_filter(chan, chan->private)) { 1135 dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); 1136 return -EINVAL; 1137 } 1138 1139 /* Enable controller here if needed */ 1140 if (!dw->in_use) 1141 dw_dma_on(dw); 1142 dw->in_use |= dwc->mask; 1143 1144 return 0; 1145 } 1146 1147 static void dwc_free_chan_resources(struct dma_chan *chan) 1148 { 1149 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1150 struct dw_dma *dw = to_dw_dma(chan->device); 1151 unsigned long flags; 1152 LIST_HEAD(list); 1153 1154 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, 1155 dwc->descs_allocated); 1156 1157 /* ASSERT: channel is idle */ 1158 BUG_ON(!list_empty(&dwc->active_list)); 1159 BUG_ON(!list_empty(&dwc->queue)); 1160 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); 1161 1162 spin_lock_irqsave(&dwc->lock, flags); 1163 1164 /* Clear custom channel configuration */ 1165 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); 1166 1167 clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); 1168 1169 /* Disable interrupts */ 1170 channel_clear_bit(dw, MASK.XFER, dwc->mask); 1171 channel_clear_bit(dw, MASK.BLOCK, dwc->mask); 1172 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1173 1174 spin_unlock_irqrestore(&dwc->lock, flags); 1175 1176 /* Disable controller in case it was a last user */ 1177 dw->in_use &= ~dwc->mask; 1178 if (!dw->in_use) 1179 dw_dma_off(dw); 1180 1181 dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1182 } 1183 1184 /* --------------------- Cyclic DMA API extensions -------------------- */ 1185 1186 /** 1187 * dw_dma_cyclic_start - start the cyclic DMA transfer 1188 * @chan: the DMA channel to start 1189 * 1190 * Must be called with soft interrupts disabled. Returns zero on success or 1191 * -errno on failure. 1192 */ 1193 int dw_dma_cyclic_start(struct dma_chan *chan) 1194 { 1195 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1196 struct dw_dma *dw = to_dw_dma(chan->device); 1197 unsigned long flags; 1198 1199 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1200 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); 1201 return -ENODEV; 1202 } 1203 1204 spin_lock_irqsave(&dwc->lock, flags); 1205 1206 /* Enable interrupts to perform cyclic transfer */ 1207 channel_set_bit(dw, MASK.BLOCK, dwc->mask); 1208 1209 dwc_dostart(dwc, dwc->cdesc->desc[0]); 1210 1211 spin_unlock_irqrestore(&dwc->lock, flags); 1212 1213 return 0; 1214 } 1215 EXPORT_SYMBOL(dw_dma_cyclic_start); 1216 1217 /** 1218 * dw_dma_cyclic_stop - stop the cyclic DMA transfer 1219 * @chan: the DMA channel to stop 1220 * 1221 * Must be called with soft interrupts disabled. 1222 */ 1223 void dw_dma_cyclic_stop(struct dma_chan *chan) 1224 { 1225 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1226 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1227 unsigned long flags; 1228 1229 spin_lock_irqsave(&dwc->lock, flags); 1230 1231 dwc_chan_disable(dw, dwc); 1232 1233 spin_unlock_irqrestore(&dwc->lock, flags); 1234 } 1235 EXPORT_SYMBOL(dw_dma_cyclic_stop); 1236 1237 /** 1238 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer 1239 * @chan: the DMA channel to prepare 1240 * @buf_addr: physical DMA address where the buffer starts 1241 * @buf_len: total number of bytes for the entire buffer 1242 * @period_len: number of bytes for each period 1243 * @direction: transfer direction, to or from device 1244 * 1245 * Must be called before trying to start the transfer. Returns a valid struct 1246 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. 1247 */ 1248 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 1249 dma_addr_t buf_addr, size_t buf_len, size_t period_len, 1250 enum dma_transfer_direction direction) 1251 { 1252 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1253 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 1254 struct dw_cyclic_desc *cdesc; 1255 struct dw_cyclic_desc *retval = NULL; 1256 struct dw_desc *desc; 1257 struct dw_desc *last = NULL; 1258 u8 lms = DWC_LLP_LMS(dwc->dws.m_master); 1259 unsigned long was_cyclic; 1260 unsigned int reg_width; 1261 unsigned int periods; 1262 unsigned int i; 1263 unsigned long flags; 1264 1265 spin_lock_irqsave(&dwc->lock, flags); 1266 if (dwc->nollp) { 1267 spin_unlock_irqrestore(&dwc->lock, flags); 1268 dev_dbg(chan2dev(&dwc->chan), 1269 "channel doesn't support LLP transfers\n"); 1270 return ERR_PTR(-EINVAL); 1271 } 1272 1273 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1274 spin_unlock_irqrestore(&dwc->lock, flags); 1275 dev_dbg(chan2dev(&dwc->chan), 1276 "queue and/or active list are not empty\n"); 1277 return ERR_PTR(-EBUSY); 1278 } 1279 1280 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1281 spin_unlock_irqrestore(&dwc->lock, flags); 1282 if (was_cyclic) { 1283 dev_dbg(chan2dev(&dwc->chan), 1284 "channel already prepared for cyclic DMA\n"); 1285 return ERR_PTR(-EBUSY); 1286 } 1287 1288 retval = ERR_PTR(-EINVAL); 1289 1290 if (unlikely(!is_slave_direction(direction))) 1291 goto out_err; 1292 1293 dwc->direction = direction; 1294 1295 if (direction == DMA_MEM_TO_DEV) 1296 reg_width = __ffs(sconfig->dst_addr_width); 1297 else 1298 reg_width = __ffs(sconfig->src_addr_width); 1299 1300 periods = buf_len / period_len; 1301 1302 /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1303 if (period_len > (dwc->block_size << reg_width)) 1304 goto out_err; 1305 if (unlikely(period_len & ((1 << reg_width) - 1))) 1306 goto out_err; 1307 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 1308 goto out_err; 1309 1310 retval = ERR_PTR(-ENOMEM); 1311 1312 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); 1313 if (!cdesc) 1314 goto out_err; 1315 1316 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); 1317 if (!cdesc->desc) 1318 goto out_err_alloc; 1319 1320 for (i = 0; i < periods; i++) { 1321 desc = dwc_desc_get(dwc); 1322 if (!desc) 1323 goto out_err_desc_get; 1324 1325 switch (direction) { 1326 case DMA_MEM_TO_DEV: 1327 lli_write(desc, dar, sconfig->dst_addr); 1328 lli_write(desc, sar, buf_addr + period_len * i); 1329 lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) 1330 | DWC_CTLL_DST_WIDTH(reg_width) 1331 | DWC_CTLL_SRC_WIDTH(reg_width) 1332 | DWC_CTLL_DST_FIX 1333 | DWC_CTLL_SRC_INC 1334 | DWC_CTLL_INT_EN)); 1335 1336 lli_set(desc, ctllo, sconfig->device_fc ? 1337 DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 1338 DWC_CTLL_FC(DW_DMA_FC_D_M2P)); 1339 1340 break; 1341 case DMA_DEV_TO_MEM: 1342 lli_write(desc, dar, buf_addr + period_len * i); 1343 lli_write(desc, sar, sconfig->src_addr); 1344 lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) 1345 | DWC_CTLL_SRC_WIDTH(reg_width) 1346 | DWC_CTLL_DST_WIDTH(reg_width) 1347 | DWC_CTLL_DST_INC 1348 | DWC_CTLL_SRC_FIX 1349 | DWC_CTLL_INT_EN)); 1350 1351 lli_set(desc, ctllo, sconfig->device_fc ? 1352 DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 1353 DWC_CTLL_FC(DW_DMA_FC_D_P2M)); 1354 1355 break; 1356 default: 1357 break; 1358 } 1359 1360 lli_write(desc, ctlhi, period_len >> reg_width); 1361 cdesc->desc[i] = desc; 1362 1363 if (last) 1364 lli_write(last, llp, desc->txd.phys | lms); 1365 1366 last = desc; 1367 } 1368 1369 /* Let's make a cyclic list */ 1370 lli_write(last, llp, cdesc->desc[0]->txd.phys | lms); 1371 1372 dev_dbg(chan2dev(&dwc->chan), 1373 "cyclic prepared buf %pad len %zu period %zu periods %d\n", 1374 &buf_addr, buf_len, period_len, periods); 1375 1376 cdesc->periods = periods; 1377 dwc->cdesc = cdesc; 1378 1379 return cdesc; 1380 1381 out_err_desc_get: 1382 while (i--) 1383 dwc_desc_put(dwc, cdesc->desc[i]); 1384 out_err_alloc: 1385 kfree(cdesc); 1386 out_err: 1387 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1388 return (struct dw_cyclic_desc *)retval; 1389 } 1390 EXPORT_SYMBOL(dw_dma_cyclic_prep); 1391 1392 /** 1393 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer 1394 * @chan: the DMA channel to free 1395 */ 1396 void dw_dma_cyclic_free(struct dma_chan *chan) 1397 { 1398 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1399 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1400 struct dw_cyclic_desc *cdesc = dwc->cdesc; 1401 unsigned int i; 1402 unsigned long flags; 1403 1404 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); 1405 1406 if (!cdesc) 1407 return; 1408 1409 spin_lock_irqsave(&dwc->lock, flags); 1410 1411 dwc_chan_disable(dw, dwc); 1412 1413 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 1414 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1415 dma_writel(dw, CLEAR.XFER, dwc->mask); 1416 1417 spin_unlock_irqrestore(&dwc->lock, flags); 1418 1419 for (i = 0; i < cdesc->periods; i++) 1420 dwc_desc_put(dwc, cdesc->desc[i]); 1421 1422 kfree(cdesc->desc); 1423 kfree(cdesc); 1424 1425 dwc->cdesc = NULL; 1426 1427 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1428 } 1429 EXPORT_SYMBOL(dw_dma_cyclic_free); 1430 1431 /*----------------------------------------------------------------------*/ 1432 1433 int dw_dma_probe(struct dw_dma_chip *chip) 1434 { 1435 struct dw_dma_platform_data *pdata; 1436 struct dw_dma *dw; 1437 bool autocfg = false; 1438 unsigned int dw_params; 1439 unsigned int i; 1440 int err; 1441 1442 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); 1443 if (!dw) 1444 return -ENOMEM; 1445 1446 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); 1447 if (!dw->pdata) 1448 return -ENOMEM; 1449 1450 dw->regs = chip->regs; 1451 chip->dw = dw; 1452 1453 pm_runtime_get_sync(chip->dev); 1454 1455 if (!chip->pdata) { 1456 dw_params = dma_readl(dw, DW_PARAMS); 1457 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); 1458 1459 autocfg = dw_params >> DW_PARAMS_EN & 1; 1460 if (!autocfg) { 1461 err = -EINVAL; 1462 goto err_pdata; 1463 } 1464 1465 /* Reassign the platform data pointer */ 1466 pdata = dw->pdata; 1467 1468 /* Get hardware configuration parameters */ 1469 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; 1470 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; 1471 for (i = 0; i < pdata->nr_masters; i++) { 1472 pdata->data_width[i] = 1473 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3); 1474 } 1475 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); 1476 1477 /* Fill platform data with the default values */ 1478 pdata->is_private = true; 1479 pdata->is_memcpy = true; 1480 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; 1481 pdata->chan_priority = CHAN_PRIORITY_ASCENDING; 1482 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { 1483 err = -EINVAL; 1484 goto err_pdata; 1485 } else { 1486 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata)); 1487 1488 /* Reassign the platform data pointer */ 1489 pdata = dw->pdata; 1490 } 1491 1492 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), 1493 GFP_KERNEL); 1494 if (!dw->chan) { 1495 err = -ENOMEM; 1496 goto err_pdata; 1497 } 1498 1499 /* Calculate all channel mask before DMA setup */ 1500 dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1501 1502 /* Force dma off, just in case */ 1503 dw_dma_off(dw); 1504 1505 /* Create a pool of consistent memory blocks for hardware descriptors */ 1506 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, 1507 sizeof(struct dw_desc), 4, 0); 1508 if (!dw->desc_pool) { 1509 dev_err(chip->dev, "No memory for descriptors dma pool\n"); 1510 err = -ENOMEM; 1511 goto err_pdata; 1512 } 1513 1514 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1515 1516 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, 1517 "dw_dmac", dw); 1518 if (err) 1519 goto err_pdata; 1520 1521 INIT_LIST_HEAD(&dw->dma.channels); 1522 for (i = 0; i < pdata->nr_channels; i++) { 1523 struct dw_dma_chan *dwc = &dw->chan[i]; 1524 1525 dwc->chan.device = &dw->dma; 1526 dma_cookie_init(&dwc->chan); 1527 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1528 list_add_tail(&dwc->chan.device_node, 1529 &dw->dma.channels); 1530 else 1531 list_add(&dwc->chan.device_node, &dw->dma.channels); 1532 1533 /* 7 is highest priority & 0 is lowest. */ 1534 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1535 dwc->priority = pdata->nr_channels - i - 1; 1536 else 1537 dwc->priority = i; 1538 1539 dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; 1540 spin_lock_init(&dwc->lock); 1541 dwc->mask = 1 << i; 1542 1543 INIT_LIST_HEAD(&dwc->active_list); 1544 INIT_LIST_HEAD(&dwc->queue); 1545 1546 channel_clear_bit(dw, CH_EN, dwc->mask); 1547 1548 dwc->direction = DMA_TRANS_NONE; 1549 1550 /* Hardware configuration */ 1551 if (autocfg) { 1552 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; 1553 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; 1554 unsigned int dwc_params = dma_readl_native(addr); 1555 1556 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, 1557 dwc_params); 1558 1559 /* 1560 * Decode maximum block size for given channel. The 1561 * stored 4 bit value represents blocks from 0x00 for 3 1562 * up to 0x0a for 4095. 1563 */ 1564 dwc->block_size = 1565 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1; 1566 dwc->nollp = 1567 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; 1568 } else { 1569 dwc->block_size = pdata->block_size; 1570 dwc->nollp = !pdata->multi_block[i]; 1571 } 1572 } 1573 1574 /* Clear all interrupts on all channels. */ 1575 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1576 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); 1577 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1578 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1579 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1580 1581 /* Set capabilities */ 1582 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1583 if (pdata->is_private) 1584 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); 1585 if (pdata->is_memcpy) 1586 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1587 1588 dw->dma.dev = chip->dev; 1589 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1590 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1591 1592 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1593 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1594 1595 dw->dma.device_config = dwc_config; 1596 dw->dma.device_pause = dwc_pause; 1597 dw->dma.device_resume = dwc_resume; 1598 dw->dma.device_terminate_all = dwc_terminate_all; 1599 1600 dw->dma.device_tx_status = dwc_tx_status; 1601 dw->dma.device_issue_pending = dwc_issue_pending; 1602 1603 /* DMA capabilities */ 1604 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; 1605 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; 1606 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | 1607 BIT(DMA_MEM_TO_MEM); 1608 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1609 1610 err = dma_async_device_register(&dw->dma); 1611 if (err) 1612 goto err_dma_register; 1613 1614 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", 1615 pdata->nr_channels); 1616 1617 pm_runtime_put_sync_suspend(chip->dev); 1618 1619 return 0; 1620 1621 err_dma_register: 1622 free_irq(chip->irq, dw); 1623 err_pdata: 1624 pm_runtime_put_sync_suspend(chip->dev); 1625 return err; 1626 } 1627 EXPORT_SYMBOL_GPL(dw_dma_probe); 1628 1629 int dw_dma_remove(struct dw_dma_chip *chip) 1630 { 1631 struct dw_dma *dw = chip->dw; 1632 struct dw_dma_chan *dwc, *_dwc; 1633 1634 pm_runtime_get_sync(chip->dev); 1635 1636 dw_dma_off(dw); 1637 dma_async_device_unregister(&dw->dma); 1638 1639 free_irq(chip->irq, dw); 1640 tasklet_kill(&dw->tasklet); 1641 1642 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1643 chan.device_node) { 1644 list_del(&dwc->chan.device_node); 1645 channel_clear_bit(dw, CH_EN, dwc->mask); 1646 } 1647 1648 pm_runtime_put_sync_suspend(chip->dev); 1649 return 0; 1650 } 1651 EXPORT_SYMBOL_GPL(dw_dma_remove); 1652 1653 int dw_dma_disable(struct dw_dma_chip *chip) 1654 { 1655 struct dw_dma *dw = chip->dw; 1656 1657 dw_dma_off(dw); 1658 return 0; 1659 } 1660 EXPORT_SYMBOL_GPL(dw_dma_disable); 1661 1662 int dw_dma_enable(struct dw_dma_chip *chip) 1663 { 1664 struct dw_dma *dw = chip->dw; 1665 1666 dw_dma_on(dw); 1667 return 0; 1668 } 1669 EXPORT_SYMBOL_GPL(dw_dma_enable); 1670 1671 MODULE_LICENSE("GPL v2"); 1672 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); 1673 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1674 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); 1675