1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * OMAP DMAengine support 4 */ 5 #include <linux/cpu_pm.h> 6 #include <linux/delay.h> 7 #include <linux/dmaengine.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/dmapool.h> 10 #include <linux/err.h> 11 #include <linux/init.h> 12 #include <linux/interrupt.h> 13 #include <linux/list.h> 14 #include <linux/module.h> 15 #include <linux/omap-dma.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/of_dma.h> 20 #include <linux/of_device.h> 21 22 #include "../virt-dma.h" 23 24 #define OMAP_SDMA_REQUESTS 127 25 #define OMAP_SDMA_CHANNELS 32 26 27 struct omap_dma_config { 28 int lch_end; 29 unsigned int rw_priority:1; 30 unsigned int needs_busy_check:1; 31 unsigned int may_lose_context:1; 32 unsigned int needs_lch_clear:1; 33 }; 34 35 struct omap_dma_context { 36 u32 irqenable_l0; 37 u32 irqenable_l1; 38 u32 ocp_sysconfig; 39 u32 gcr; 40 }; 41 42 struct omap_dmadev { 43 struct dma_device ddev; 44 spinlock_t lock; 45 void __iomem *base; 46 const struct omap_dma_reg *reg_map; 47 struct omap_system_dma_plat_info *plat; 48 const struct omap_dma_config *cfg; 49 struct notifier_block nb; 50 struct omap_dma_context context; 51 int lch_count; 52 DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS); 53 struct mutex lch_lock; /* for assigning logical channels */ 54 bool legacy; 55 bool ll123_supported; 56 struct dma_pool *desc_pool; 57 unsigned dma_requests; 58 spinlock_t irq_lock; 59 uint32_t irq_enable_mask; 60 struct omap_chan **lch_map; 61 }; 62 63 struct omap_chan { 64 struct virt_dma_chan vc; 65 void __iomem *channel_base; 66 const struct omap_dma_reg *reg_map; 67 uint32_t ccr; 68 69 struct dma_slave_config cfg; 70 unsigned dma_sig; 71 bool cyclic; 72 bool paused; 73 bool running; 74 75 int dma_ch; 76 struct omap_desc *desc; 77 unsigned sgidx; 78 }; 79 80 #define DESC_NXT_SV_REFRESH (0x1 << 24) 81 #define DESC_NXT_SV_REUSE (0x2 << 24) 82 #define DESC_NXT_DV_REFRESH (0x1 << 26) 83 #define DESC_NXT_DV_REUSE (0x2 << 26) 84 #define DESC_NTYPE_TYPE2 (0x2 << 29) 85 86 /* Type 2 descriptor with Source or Destination address update */ 87 struct omap_type2_desc { 88 uint32_t next_desc; 89 uint32_t en; 90 uint32_t addr; /* src or dst */ 91 uint16_t fn; 92 uint16_t cicr; 93 int16_t cdei; 94 int16_t csei; 95 int32_t cdfi; 96 int32_t csfi; 97 } __packed; 98 99 struct omap_sg { 100 dma_addr_t addr; 101 uint32_t en; /* number of elements (24-bit) */ 102 uint32_t fn; /* number of frames (16-bit) */ 103 int32_t fi; /* for double indexing */ 104 int16_t ei; /* for double indexing */ 105 106 /* Linked list */ 107 struct omap_type2_desc *t2_desc; 108 dma_addr_t t2_desc_paddr; 109 }; 110 111 struct omap_desc { 112 struct virt_dma_desc vd; 113 bool using_ll; 114 enum dma_transfer_direction dir; 115 dma_addr_t dev_addr; 116 bool polled; 117 118 int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */ 119 int16_t ei; /* for double indexing */ 120 uint8_t es; /* CSDP_DATA_TYPE_xxx */ 121 uint32_t ccr; /* CCR value */ 122 uint16_t clnk_ctrl; /* CLNK_CTRL value */ 123 uint16_t cicr; /* CICR value */ 124 uint32_t csdp; /* CSDP value */ 125 126 unsigned sglen; 127 struct omap_sg sg[]; 128 }; 129 130 enum { 131 CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */ 132 CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */ 133 134 CCR_FS = BIT(5), 135 CCR_READ_PRIORITY = BIT(6), 136 CCR_ENABLE = BIT(7), 137 CCR_AUTO_INIT = BIT(8), /* OMAP1 only */ 138 CCR_REPEAT = BIT(9), /* OMAP1 only */ 139 CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */ 140 CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */ 141 CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */ 142 CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */ 143 CCR_SRC_AMODE_CONSTANT = 0 << 12, 144 CCR_SRC_AMODE_POSTINC = 1 << 12, 145 CCR_SRC_AMODE_SGLIDX = 2 << 12, 146 CCR_SRC_AMODE_DBLIDX = 3 << 12, 147 CCR_DST_AMODE_CONSTANT = 0 << 14, 148 CCR_DST_AMODE_POSTINC = 1 << 14, 149 CCR_DST_AMODE_SGLIDX = 2 << 14, 150 CCR_DST_AMODE_DBLIDX = 3 << 14, 151 CCR_CONSTANT_FILL = BIT(16), 152 CCR_TRANSPARENT_COPY = BIT(17), 153 CCR_BS = BIT(18), 154 CCR_SUPERVISOR = BIT(22), 155 CCR_PREFETCH = BIT(23), 156 CCR_TRIGGER_SRC = BIT(24), 157 CCR_BUFFERING_DISABLE = BIT(25), 158 CCR_WRITE_PRIORITY = BIT(26), 159 CCR_SYNC_ELEMENT = 0, 160 CCR_SYNC_FRAME = CCR_FS, 161 CCR_SYNC_BLOCK = CCR_BS, 162 CCR_SYNC_PACKET = CCR_BS | CCR_FS, 163 164 CSDP_DATA_TYPE_8 = 0, 165 CSDP_DATA_TYPE_16 = 1, 166 CSDP_DATA_TYPE_32 = 2, 167 CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */ 168 CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */ 169 CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */ 170 CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */ 171 CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */ 172 CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */ 173 CSDP_SRC_PACKED = BIT(6), 174 CSDP_SRC_BURST_1 = 0 << 7, 175 CSDP_SRC_BURST_16 = 1 << 7, 176 CSDP_SRC_BURST_32 = 2 << 7, 177 CSDP_SRC_BURST_64 = 3 << 7, 178 CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */ 179 CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */ 180 CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */ 181 CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */ 182 CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */ 183 CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */ 184 CSDP_DST_PACKED = BIT(13), 185 CSDP_DST_BURST_1 = 0 << 14, 186 CSDP_DST_BURST_16 = 1 << 14, 187 CSDP_DST_BURST_32 = 2 << 14, 188 CSDP_DST_BURST_64 = 3 << 14, 189 CSDP_WRITE_NON_POSTED = 0 << 16, 190 CSDP_WRITE_POSTED = 1 << 16, 191 CSDP_WRITE_LAST_NON_POSTED = 2 << 16, 192 193 CICR_TOUT_IE = BIT(0), /* OMAP1 only */ 194 CICR_DROP_IE = BIT(1), 195 CICR_HALF_IE = BIT(2), 196 CICR_FRAME_IE = BIT(3), 197 CICR_LAST_IE = BIT(4), 198 CICR_BLOCK_IE = BIT(5), 199 CICR_PKT_IE = BIT(7), /* OMAP2+ only */ 200 CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */ 201 CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */ 202 CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */ 203 CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */ 204 CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ 205 206 CLNK_CTRL_ENABLE_LNK = BIT(15), 207 208 CDP_DST_VALID_INC = 0 << 0, 209 CDP_DST_VALID_RELOAD = 1 << 0, 210 CDP_DST_VALID_REUSE = 2 << 0, 211 CDP_SRC_VALID_INC = 0 << 2, 212 CDP_SRC_VALID_RELOAD = 1 << 2, 213 CDP_SRC_VALID_REUSE = 2 << 2, 214 CDP_NTYPE_TYPE1 = 1 << 4, 215 CDP_NTYPE_TYPE2 = 2 << 4, 216 CDP_NTYPE_TYPE3 = 3 << 4, 217 CDP_TMODE_NORMAL = 0 << 8, 218 CDP_TMODE_LLIST = 1 << 8, 219 CDP_FAST = BIT(10), 220 }; 221 222 static const unsigned es_bytes[] = { 223 [CSDP_DATA_TYPE_8] = 1, 224 [CSDP_DATA_TYPE_16] = 2, 225 [CSDP_DATA_TYPE_32] = 4, 226 }; 227 228 static bool omap_dma_filter_fn(struct dma_chan *chan, void *param); 229 static struct of_dma_filter_info omap_dma_info = { 230 .filter_fn = omap_dma_filter_fn, 231 }; 232 233 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) 234 { 235 return container_of(d, struct omap_dmadev, ddev); 236 } 237 238 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) 239 { 240 return container_of(c, struct omap_chan, vc.chan); 241 } 242 243 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) 244 { 245 return container_of(t, struct omap_desc, vd.tx); 246 } 247 248 static void omap_dma_desc_free(struct virt_dma_desc *vd) 249 { 250 struct omap_desc *d = to_omap_dma_desc(&vd->tx); 251 252 if (d->using_ll) { 253 struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device); 254 int i; 255 256 for (i = 0; i < d->sglen; i++) { 257 if (d->sg[i].t2_desc) 258 dma_pool_free(od->desc_pool, d->sg[i].t2_desc, 259 d->sg[i].t2_desc_paddr); 260 } 261 } 262 263 kfree(d); 264 } 265 266 static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx, 267 enum dma_transfer_direction dir, bool last) 268 { 269 struct omap_sg *sg = &d->sg[idx]; 270 struct omap_type2_desc *t2_desc = sg->t2_desc; 271 272 if (idx) 273 d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr; 274 if (last) 275 t2_desc->next_desc = 0xfffffffc; 276 277 t2_desc->en = sg->en; 278 t2_desc->addr = sg->addr; 279 t2_desc->fn = sg->fn & 0xffff; 280 t2_desc->cicr = d->cicr; 281 if (!last) 282 t2_desc->cicr &= ~CICR_BLOCK_IE; 283 284 switch (dir) { 285 case DMA_DEV_TO_MEM: 286 t2_desc->cdei = sg->ei; 287 t2_desc->csei = d->ei; 288 t2_desc->cdfi = sg->fi; 289 t2_desc->csfi = d->fi; 290 291 t2_desc->en |= DESC_NXT_DV_REFRESH; 292 t2_desc->en |= DESC_NXT_SV_REUSE; 293 break; 294 case DMA_MEM_TO_DEV: 295 t2_desc->cdei = d->ei; 296 t2_desc->csei = sg->ei; 297 t2_desc->cdfi = d->fi; 298 t2_desc->csfi = sg->fi; 299 300 t2_desc->en |= DESC_NXT_SV_REFRESH; 301 t2_desc->en |= DESC_NXT_DV_REUSE; 302 break; 303 default: 304 return; 305 } 306 307 t2_desc->en |= DESC_NTYPE_TYPE2; 308 } 309 310 static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) 311 { 312 switch (type) { 313 case OMAP_DMA_REG_16BIT: 314 writew_relaxed(val, addr); 315 break; 316 case OMAP_DMA_REG_2X16BIT: 317 writew_relaxed(val, addr); 318 writew_relaxed(val >> 16, addr + 2); 319 break; 320 case OMAP_DMA_REG_32BIT: 321 writel_relaxed(val, addr); 322 break; 323 default: 324 WARN_ON(1); 325 } 326 } 327 328 static unsigned omap_dma_read(unsigned type, void __iomem *addr) 329 { 330 unsigned val; 331 332 switch (type) { 333 case OMAP_DMA_REG_16BIT: 334 val = readw_relaxed(addr); 335 break; 336 case OMAP_DMA_REG_2X16BIT: 337 val = readw_relaxed(addr); 338 val |= readw_relaxed(addr + 2) << 16; 339 break; 340 case OMAP_DMA_REG_32BIT: 341 val = readl_relaxed(addr); 342 break; 343 default: 344 WARN_ON(1); 345 val = 0; 346 } 347 348 return val; 349 } 350 351 static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val) 352 { 353 const struct omap_dma_reg *r = od->reg_map + reg; 354 355 WARN_ON(r->stride); 356 357 omap_dma_write(val, r->type, od->base + r->offset); 358 } 359 360 static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg) 361 { 362 const struct omap_dma_reg *r = od->reg_map + reg; 363 364 WARN_ON(r->stride); 365 366 return omap_dma_read(r->type, od->base + r->offset); 367 } 368 369 static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val) 370 { 371 const struct omap_dma_reg *r = c->reg_map + reg; 372 373 omap_dma_write(val, r->type, c->channel_base + r->offset); 374 } 375 376 static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg) 377 { 378 const struct omap_dma_reg *r = c->reg_map + reg; 379 380 return omap_dma_read(r->type, c->channel_base + r->offset); 381 } 382 383 static void omap_dma_clear_csr(struct omap_chan *c) 384 { 385 if (dma_omap1()) 386 omap_dma_chan_read(c, CSR); 387 else 388 omap_dma_chan_write(c, CSR, ~0); 389 } 390 391 static unsigned omap_dma_get_csr(struct omap_chan *c) 392 { 393 unsigned val = omap_dma_chan_read(c, CSR); 394 395 if (!dma_omap1()) 396 omap_dma_chan_write(c, CSR, val); 397 398 return val; 399 } 400 401 static void omap_dma_clear_lch(struct omap_dmadev *od, int lch) 402 { 403 struct omap_chan *c; 404 int i; 405 406 c = od->lch_map[lch]; 407 if (!c) 408 return; 409 410 for (i = CSDP; i <= od->cfg->lch_end; i++) 411 omap_dma_chan_write(c, i, 0); 412 } 413 414 static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, 415 unsigned lch) 416 { 417 c->channel_base = od->base + od->plat->channel_stride * lch; 418 419 od->lch_map[lch] = c; 420 } 421 422 static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) 423 { 424 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 425 uint16_t cicr = d->cicr; 426 427 if (__dma_omap15xx(od->plat->dma_attr)) 428 omap_dma_chan_write(c, CPC, 0); 429 else 430 omap_dma_chan_write(c, CDAC, 0); 431 432 omap_dma_clear_csr(c); 433 434 if (d->using_ll) { 435 uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST; 436 437 if (d->dir == DMA_DEV_TO_MEM) 438 cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE); 439 else 440 cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD); 441 omap_dma_chan_write(c, CDP, cdp); 442 443 omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr); 444 omap_dma_chan_write(c, CCDN, 0); 445 omap_dma_chan_write(c, CCFN, 0xffff); 446 omap_dma_chan_write(c, CCEN, 0xffffff); 447 448 cicr &= ~CICR_BLOCK_IE; 449 } else if (od->ll123_supported) { 450 omap_dma_chan_write(c, CDP, 0); 451 } 452 453 /* Enable interrupts */ 454 omap_dma_chan_write(c, CICR, cicr); 455 456 /* Enable channel */ 457 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); 458 459 c->running = true; 460 } 461 462 static void omap_dma_drain_chan(struct omap_chan *c) 463 { 464 int i; 465 u32 val; 466 467 /* Wait for sDMA FIFO to drain */ 468 for (i = 0; ; i++) { 469 val = omap_dma_chan_read(c, CCR); 470 if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) 471 break; 472 473 if (i > 100) 474 break; 475 476 udelay(5); 477 } 478 479 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) 480 dev_err(c->vc.chan.device->dev, 481 "DMA drain did not complete on lch %d\n", 482 c->dma_ch); 483 } 484 485 static int omap_dma_stop(struct omap_chan *c) 486 { 487 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 488 uint32_t val; 489 490 /* disable irq */ 491 omap_dma_chan_write(c, CICR, 0); 492 493 omap_dma_clear_csr(c); 494 495 val = omap_dma_chan_read(c, CCR); 496 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { 497 uint32_t sysconfig; 498 499 sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); 500 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; 501 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); 502 omap_dma_glbl_write(od, OCP_SYSCONFIG, val); 503 504 val = omap_dma_chan_read(c, CCR); 505 val &= ~CCR_ENABLE; 506 omap_dma_chan_write(c, CCR, val); 507 508 if (!(c->ccr & CCR_BUFFERING_DISABLE)) 509 omap_dma_drain_chan(c); 510 511 omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); 512 } else { 513 if (!(val & CCR_ENABLE)) 514 return -EINVAL; 515 516 val &= ~CCR_ENABLE; 517 omap_dma_chan_write(c, CCR, val); 518 519 if (!(c->ccr & CCR_BUFFERING_DISABLE)) 520 omap_dma_drain_chan(c); 521 } 522 523 mb(); 524 525 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) { 526 val = omap_dma_chan_read(c, CLNK_CTRL); 527 528 if (dma_omap1()) 529 val |= 1 << 14; /* set the STOP_LNK bit */ 530 else 531 val &= ~CLNK_CTRL_ENABLE_LNK; 532 533 omap_dma_chan_write(c, CLNK_CTRL, val); 534 } 535 c->running = false; 536 return 0; 537 } 538 539 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d) 540 { 541 struct omap_sg *sg = d->sg + c->sgidx; 542 unsigned cxsa, cxei, cxfi; 543 544 if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { 545 cxsa = CDSA; 546 cxei = CDEI; 547 cxfi = CDFI; 548 } else { 549 cxsa = CSSA; 550 cxei = CSEI; 551 cxfi = CSFI; 552 } 553 554 omap_dma_chan_write(c, cxsa, sg->addr); 555 omap_dma_chan_write(c, cxei, sg->ei); 556 omap_dma_chan_write(c, cxfi, sg->fi); 557 omap_dma_chan_write(c, CEN, sg->en); 558 omap_dma_chan_write(c, CFN, sg->fn); 559 560 omap_dma_start(c, d); 561 c->sgidx++; 562 } 563 564 static void omap_dma_start_desc(struct omap_chan *c) 565 { 566 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 567 struct omap_desc *d; 568 unsigned cxsa, cxei, cxfi; 569 570 if (!vd) { 571 c->desc = NULL; 572 return; 573 } 574 575 list_del(&vd->node); 576 577 c->desc = d = to_omap_dma_desc(&vd->tx); 578 c->sgidx = 0; 579 580 /* 581 * This provides the necessary barrier to ensure data held in 582 * DMA coherent memory is visible to the DMA engine prior to 583 * the transfer starting. 584 */ 585 mb(); 586 587 omap_dma_chan_write(c, CCR, d->ccr); 588 if (dma_omap1()) 589 omap_dma_chan_write(c, CCR2, d->ccr >> 16); 590 591 if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { 592 cxsa = CSSA; 593 cxei = CSEI; 594 cxfi = CSFI; 595 } else { 596 cxsa = CDSA; 597 cxei = CDEI; 598 cxfi = CDFI; 599 } 600 601 omap_dma_chan_write(c, cxsa, d->dev_addr); 602 omap_dma_chan_write(c, cxei, d->ei); 603 omap_dma_chan_write(c, cxfi, d->fi); 604 omap_dma_chan_write(c, CSDP, d->csdp); 605 omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); 606 607 omap_dma_start_sg(c, d); 608 } 609 610 static void omap_dma_callback(int ch, u16 status, void *data) 611 { 612 struct omap_chan *c = data; 613 struct omap_desc *d; 614 unsigned long flags; 615 616 spin_lock_irqsave(&c->vc.lock, flags); 617 d = c->desc; 618 if (d) { 619 if (c->cyclic) { 620 vchan_cyclic_callback(&d->vd); 621 } else if (d->using_ll || c->sgidx == d->sglen) { 622 omap_dma_start_desc(c); 623 vchan_cookie_complete(&d->vd); 624 } else { 625 omap_dma_start_sg(c, d); 626 } 627 } 628 spin_unlock_irqrestore(&c->vc.lock, flags); 629 } 630 631 static irqreturn_t omap_dma_irq(int irq, void *devid) 632 { 633 struct omap_dmadev *od = devid; 634 unsigned status, channel; 635 636 spin_lock(&od->irq_lock); 637 638 status = omap_dma_glbl_read(od, IRQSTATUS_L1); 639 status &= od->irq_enable_mask; 640 if (status == 0) { 641 spin_unlock(&od->irq_lock); 642 return IRQ_NONE; 643 } 644 645 while ((channel = ffs(status)) != 0) { 646 unsigned mask, csr; 647 struct omap_chan *c; 648 649 channel -= 1; 650 mask = BIT(channel); 651 status &= ~mask; 652 653 c = od->lch_map[channel]; 654 if (c == NULL) { 655 /* This should never happen */ 656 dev_err(od->ddev.dev, "invalid channel %u\n", channel); 657 continue; 658 } 659 660 csr = omap_dma_get_csr(c); 661 omap_dma_glbl_write(od, IRQSTATUS_L1, mask); 662 663 omap_dma_callback(channel, csr, c); 664 } 665 666 spin_unlock(&od->irq_lock); 667 668 return IRQ_HANDLED; 669 } 670 671 static int omap_dma_get_lch(struct omap_dmadev *od, int *lch) 672 { 673 int channel; 674 675 mutex_lock(&od->lch_lock); 676 channel = find_first_zero_bit(od->lch_bitmap, od->lch_count); 677 if (channel >= od->lch_count) 678 goto out_busy; 679 set_bit(channel, od->lch_bitmap); 680 mutex_unlock(&od->lch_lock); 681 682 omap_dma_clear_lch(od, channel); 683 *lch = channel; 684 685 return 0; 686 687 out_busy: 688 mutex_unlock(&od->lch_lock); 689 *lch = -EINVAL; 690 691 return -EBUSY; 692 } 693 694 static void omap_dma_put_lch(struct omap_dmadev *od, int lch) 695 { 696 omap_dma_clear_lch(od, lch); 697 mutex_lock(&od->lch_lock); 698 clear_bit(lch, od->lch_bitmap); 699 mutex_unlock(&od->lch_lock); 700 } 701 702 static inline bool omap_dma_legacy(struct omap_dmadev *od) 703 { 704 return IS_ENABLED(CONFIG_ARCH_OMAP1) && od->legacy; 705 } 706 707 static int omap_dma_alloc_chan_resources(struct dma_chan *chan) 708 { 709 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 710 struct omap_chan *c = to_omap_dma_chan(chan); 711 struct device *dev = od->ddev.dev; 712 int ret; 713 714 if (omap_dma_legacy(od)) { 715 ret = omap_request_dma(c->dma_sig, "DMA engine", 716 omap_dma_callback, c, &c->dma_ch); 717 } else { 718 ret = omap_dma_get_lch(od, &c->dma_ch); 719 } 720 721 dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig); 722 723 if (ret >= 0) { 724 omap_dma_assign(od, c, c->dma_ch); 725 726 if (!omap_dma_legacy(od)) { 727 unsigned val; 728 729 spin_lock_irq(&od->irq_lock); 730 val = BIT(c->dma_ch); 731 omap_dma_glbl_write(od, IRQSTATUS_L1, val); 732 od->irq_enable_mask |= val; 733 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); 734 735 val = omap_dma_glbl_read(od, IRQENABLE_L0); 736 val &= ~BIT(c->dma_ch); 737 omap_dma_glbl_write(od, IRQENABLE_L0, val); 738 spin_unlock_irq(&od->irq_lock); 739 } 740 } 741 742 if (dma_omap1()) { 743 if (__dma_omap16xx(od->plat->dma_attr)) { 744 c->ccr = CCR_OMAP31_DISABLE; 745 /* Duplicate what plat-omap/dma.c does */ 746 c->ccr |= c->dma_ch + 1; 747 } else { 748 c->ccr = c->dma_sig & 0x1f; 749 } 750 } else { 751 c->ccr = c->dma_sig & 0x1f; 752 c->ccr |= (c->dma_sig & ~0x1f) << 14; 753 } 754 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING) 755 c->ccr |= CCR_BUFFERING_DISABLE; 756 757 return ret; 758 } 759 760 static void omap_dma_free_chan_resources(struct dma_chan *chan) 761 { 762 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 763 struct omap_chan *c = to_omap_dma_chan(chan); 764 765 if (!omap_dma_legacy(od)) { 766 spin_lock_irq(&od->irq_lock); 767 od->irq_enable_mask &= ~BIT(c->dma_ch); 768 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); 769 spin_unlock_irq(&od->irq_lock); 770 } 771 772 c->channel_base = NULL; 773 od->lch_map[c->dma_ch] = NULL; 774 vchan_free_chan_resources(&c->vc); 775 776 if (omap_dma_legacy(od)) 777 omap_free_dma(c->dma_ch); 778 else 779 omap_dma_put_lch(od, c->dma_ch); 780 781 dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch, 782 c->dma_sig); 783 c->dma_sig = 0; 784 } 785 786 static size_t omap_dma_sg_size(struct omap_sg *sg) 787 { 788 return sg->en * sg->fn; 789 } 790 791 static size_t omap_dma_desc_size(struct omap_desc *d) 792 { 793 unsigned i; 794 size_t size; 795 796 for (size = i = 0; i < d->sglen; i++) 797 size += omap_dma_sg_size(&d->sg[i]); 798 799 return size * es_bytes[d->es]; 800 } 801 802 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) 803 { 804 unsigned i; 805 size_t size, es_size = es_bytes[d->es]; 806 807 for (size = i = 0; i < d->sglen; i++) { 808 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; 809 810 if (size) 811 size += this_size; 812 else if (addr >= d->sg[i].addr && 813 addr < d->sg[i].addr + this_size) 814 size += d->sg[i].addr + this_size - addr; 815 } 816 return size; 817 } 818 819 /* 820 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is 821 * read before the DMA controller finished disabling the channel. 822 */ 823 static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg) 824 { 825 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 826 uint32_t val; 827 828 val = omap_dma_chan_read(c, reg); 829 if (val == 0 && od->plat->errata & DMA_ERRATA_3_3) 830 val = omap_dma_chan_read(c, reg); 831 832 return val; 833 } 834 835 static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c) 836 { 837 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 838 dma_addr_t addr, cdac; 839 840 if (__dma_omap15xx(od->plat->dma_attr)) { 841 addr = omap_dma_chan_read(c, CPC); 842 } else { 843 addr = omap_dma_chan_read_3_3(c, CSAC); 844 cdac = omap_dma_chan_read_3_3(c, CDAC); 845 846 /* 847 * CDAC == 0 indicates that the DMA transfer on the channel has 848 * not been started (no data has been transferred so far). 849 * Return the programmed source start address in this case. 850 */ 851 if (cdac == 0) 852 addr = omap_dma_chan_read(c, CSSA); 853 } 854 855 if (dma_omap1()) 856 addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000; 857 858 return addr; 859 } 860 861 static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c) 862 { 863 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 864 dma_addr_t addr; 865 866 if (__dma_omap15xx(od->plat->dma_attr)) { 867 addr = omap_dma_chan_read(c, CPC); 868 } else { 869 addr = omap_dma_chan_read_3_3(c, CDAC); 870 871 /* 872 * CDAC == 0 indicates that the DMA transfer on the channel 873 * has not been started (no data has been transferred so 874 * far). Return the programmed destination start address in 875 * this case. 876 */ 877 if (addr == 0) 878 addr = omap_dma_chan_read(c, CDSA); 879 } 880 881 if (dma_omap1()) 882 addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000; 883 884 return addr; 885 } 886 887 static enum dma_status omap_dma_tx_status(struct dma_chan *chan, 888 dma_cookie_t cookie, struct dma_tx_state *txstate) 889 { 890 struct omap_chan *c = to_omap_dma_chan(chan); 891 enum dma_status ret; 892 unsigned long flags; 893 struct omap_desc *d = NULL; 894 895 ret = dma_cookie_status(chan, cookie, txstate); 896 if (ret == DMA_COMPLETE) 897 return ret; 898 899 spin_lock_irqsave(&c->vc.lock, flags); 900 if (c->desc && c->desc->vd.tx.cookie == cookie) 901 d = c->desc; 902 903 if (!txstate) 904 goto out; 905 906 if (d) { 907 dma_addr_t pos; 908 909 if (d->dir == DMA_MEM_TO_DEV) 910 pos = omap_dma_get_src_pos(c); 911 else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) 912 pos = omap_dma_get_dst_pos(c); 913 else 914 pos = 0; 915 916 txstate->residue = omap_dma_desc_size_pos(d, pos); 917 } else { 918 struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie); 919 920 if (vd) 921 txstate->residue = omap_dma_desc_size( 922 to_omap_dma_desc(&vd->tx)); 923 else 924 txstate->residue = 0; 925 } 926 927 out: 928 if (ret == DMA_IN_PROGRESS && c->paused) { 929 ret = DMA_PAUSED; 930 } else if (d && d->polled && c->running) { 931 uint32_t ccr = omap_dma_chan_read(c, CCR); 932 /* 933 * The channel is no longer active, set the return value 934 * accordingly and mark it as completed 935 */ 936 if (!(ccr & CCR_ENABLE)) { 937 ret = DMA_COMPLETE; 938 omap_dma_start_desc(c); 939 vchan_cookie_complete(&d->vd); 940 } 941 } 942 943 spin_unlock_irqrestore(&c->vc.lock, flags); 944 945 return ret; 946 } 947 948 static void omap_dma_issue_pending(struct dma_chan *chan) 949 { 950 struct omap_chan *c = to_omap_dma_chan(chan); 951 unsigned long flags; 952 953 spin_lock_irqsave(&c->vc.lock, flags); 954 if (vchan_issue_pending(&c->vc) && !c->desc) 955 omap_dma_start_desc(c); 956 spin_unlock_irqrestore(&c->vc.lock, flags); 957 } 958 959 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( 960 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, 961 enum dma_transfer_direction dir, unsigned long tx_flags, void *context) 962 { 963 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 964 struct omap_chan *c = to_omap_dma_chan(chan); 965 enum dma_slave_buswidth dev_width; 966 struct scatterlist *sgent; 967 struct omap_desc *d; 968 dma_addr_t dev_addr; 969 unsigned i, es, en, frame_bytes; 970 bool ll_failed = false; 971 u32 burst; 972 u32 port_window, port_window_bytes; 973 974 if (dir == DMA_DEV_TO_MEM) { 975 dev_addr = c->cfg.src_addr; 976 dev_width = c->cfg.src_addr_width; 977 burst = c->cfg.src_maxburst; 978 port_window = c->cfg.src_port_window_size; 979 } else if (dir == DMA_MEM_TO_DEV) { 980 dev_addr = c->cfg.dst_addr; 981 dev_width = c->cfg.dst_addr_width; 982 burst = c->cfg.dst_maxburst; 983 port_window = c->cfg.dst_port_window_size; 984 } else { 985 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 986 return NULL; 987 } 988 989 /* Bus width translates to the element size (ES) */ 990 switch (dev_width) { 991 case DMA_SLAVE_BUSWIDTH_1_BYTE: 992 es = CSDP_DATA_TYPE_8; 993 break; 994 case DMA_SLAVE_BUSWIDTH_2_BYTES: 995 es = CSDP_DATA_TYPE_16; 996 break; 997 case DMA_SLAVE_BUSWIDTH_4_BYTES: 998 es = CSDP_DATA_TYPE_32; 999 break; 1000 default: /* not reached */ 1001 return NULL; 1002 } 1003 1004 /* Now allocate and setup the descriptor. */ 1005 d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC); 1006 if (!d) 1007 return NULL; 1008 1009 d->dir = dir; 1010 d->dev_addr = dev_addr; 1011 d->es = es; 1012 1013 /* When the port_window is used, one frame must cover the window */ 1014 if (port_window) { 1015 burst = port_window; 1016 port_window_bytes = port_window * es_bytes[es]; 1017 1018 d->ei = 1; 1019 /* 1020 * One frame covers the port_window and by configure 1021 * the source frame index to be -1 * (port_window - 1) 1022 * we instruct the sDMA that after a frame is processed 1023 * it should move back to the start of the window. 1024 */ 1025 d->fi = -(port_window_bytes - 1); 1026 } 1027 1028 d->ccr = c->ccr | CCR_SYNC_FRAME; 1029 if (dir == DMA_DEV_TO_MEM) { 1030 d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; 1031 1032 d->ccr |= CCR_DST_AMODE_POSTINC; 1033 if (port_window) { 1034 d->ccr |= CCR_SRC_AMODE_DBLIDX; 1035 1036 if (port_window_bytes >= 64) 1037 d->csdp |= CSDP_SRC_BURST_64; 1038 else if (port_window_bytes >= 32) 1039 d->csdp |= CSDP_SRC_BURST_32; 1040 else if (port_window_bytes >= 16) 1041 d->csdp |= CSDP_SRC_BURST_16; 1042 1043 } else { 1044 d->ccr |= CCR_SRC_AMODE_CONSTANT; 1045 } 1046 } else { 1047 d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; 1048 1049 d->ccr |= CCR_SRC_AMODE_POSTINC; 1050 if (port_window) { 1051 d->ccr |= CCR_DST_AMODE_DBLIDX; 1052 1053 if (port_window_bytes >= 64) 1054 d->csdp |= CSDP_DST_BURST_64; 1055 else if (port_window_bytes >= 32) 1056 d->csdp |= CSDP_DST_BURST_32; 1057 else if (port_window_bytes >= 16) 1058 d->csdp |= CSDP_DST_BURST_16; 1059 } else { 1060 d->ccr |= CCR_DST_AMODE_CONSTANT; 1061 } 1062 } 1063 1064 d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; 1065 d->csdp |= es; 1066 1067 if (dma_omap1()) { 1068 d->cicr |= CICR_TOUT_IE; 1069 1070 if (dir == DMA_DEV_TO_MEM) 1071 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB; 1072 else 1073 d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF; 1074 } else { 1075 if (dir == DMA_DEV_TO_MEM) 1076 d->ccr |= CCR_TRIGGER_SRC; 1077 1078 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; 1079 1080 if (port_window) 1081 d->csdp |= CSDP_WRITE_LAST_NON_POSTED; 1082 } 1083 if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) 1084 d->clnk_ctrl = c->dma_ch; 1085 1086 /* 1087 * Build our scatterlist entries: each contains the address, 1088 * the number of elements (EN) in each frame, and the number of 1089 * frames (FN). Number of bytes for this entry = ES * EN * FN. 1090 * 1091 * Burst size translates to number of elements with frame sync. 1092 * Note: DMA engine defines burst to be the number of dev-width 1093 * transfers. 1094 */ 1095 en = burst; 1096 frame_bytes = es_bytes[es] * en; 1097 1098 if (sglen >= 2) 1099 d->using_ll = od->ll123_supported; 1100 1101 for_each_sg(sgl, sgent, sglen, i) { 1102 struct omap_sg *osg = &d->sg[i]; 1103 1104 osg->addr = sg_dma_address(sgent); 1105 osg->en = en; 1106 osg->fn = sg_dma_len(sgent) / frame_bytes; 1107 1108 if (d->using_ll) { 1109 osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC, 1110 &osg->t2_desc_paddr); 1111 if (!osg->t2_desc) { 1112 dev_err(chan->device->dev, 1113 "t2_desc[%d] allocation failed\n", i); 1114 ll_failed = true; 1115 d->using_ll = false; 1116 continue; 1117 } 1118 1119 omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1)); 1120 } 1121 } 1122 1123 d->sglen = sglen; 1124 1125 /* Release the dma_pool entries if one allocation failed */ 1126 if (ll_failed) { 1127 for (i = 0; i < d->sglen; i++) { 1128 struct omap_sg *osg = &d->sg[i]; 1129 1130 if (osg->t2_desc) { 1131 dma_pool_free(od->desc_pool, osg->t2_desc, 1132 osg->t2_desc_paddr); 1133 osg->t2_desc = NULL; 1134 } 1135 } 1136 } 1137 1138 return vchan_tx_prep(&c->vc, &d->vd, tx_flags); 1139 } 1140 1141 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( 1142 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 1143 size_t period_len, enum dma_transfer_direction dir, unsigned long flags) 1144 { 1145 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 1146 struct omap_chan *c = to_omap_dma_chan(chan); 1147 enum dma_slave_buswidth dev_width; 1148 struct omap_desc *d; 1149 dma_addr_t dev_addr; 1150 unsigned es; 1151 u32 burst; 1152 1153 if (dir == DMA_DEV_TO_MEM) { 1154 dev_addr = c->cfg.src_addr; 1155 dev_width = c->cfg.src_addr_width; 1156 burst = c->cfg.src_maxburst; 1157 } else if (dir == DMA_MEM_TO_DEV) { 1158 dev_addr = c->cfg.dst_addr; 1159 dev_width = c->cfg.dst_addr_width; 1160 burst = c->cfg.dst_maxburst; 1161 } else { 1162 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 1163 return NULL; 1164 } 1165 1166 /* Bus width translates to the element size (ES) */ 1167 switch (dev_width) { 1168 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1169 es = CSDP_DATA_TYPE_8; 1170 break; 1171 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1172 es = CSDP_DATA_TYPE_16; 1173 break; 1174 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1175 es = CSDP_DATA_TYPE_32; 1176 break; 1177 default: /* not reached */ 1178 return NULL; 1179 } 1180 1181 /* Now allocate and setup the descriptor. */ 1182 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); 1183 if (!d) 1184 return NULL; 1185 1186 d->dir = dir; 1187 d->dev_addr = dev_addr; 1188 d->fi = burst; 1189 d->es = es; 1190 d->sg[0].addr = buf_addr; 1191 d->sg[0].en = period_len / es_bytes[es]; 1192 d->sg[0].fn = buf_len / period_len; 1193 d->sglen = 1; 1194 1195 d->ccr = c->ccr; 1196 if (dir == DMA_DEV_TO_MEM) 1197 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; 1198 else 1199 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; 1200 1201 d->cicr = CICR_DROP_IE; 1202 if (flags & DMA_PREP_INTERRUPT) 1203 d->cicr |= CICR_FRAME_IE; 1204 1205 d->csdp = es; 1206 1207 if (dma_omap1()) { 1208 d->cicr |= CICR_TOUT_IE; 1209 1210 if (dir == DMA_DEV_TO_MEM) 1211 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI; 1212 else 1213 d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF; 1214 } else { 1215 if (burst) 1216 d->ccr |= CCR_SYNC_PACKET; 1217 else 1218 d->ccr |= CCR_SYNC_ELEMENT; 1219 1220 if (dir == DMA_DEV_TO_MEM) { 1221 d->ccr |= CCR_TRIGGER_SRC; 1222 d->csdp |= CSDP_DST_PACKED; 1223 } else { 1224 d->csdp |= CSDP_SRC_PACKED; 1225 } 1226 1227 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; 1228 1229 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; 1230 } 1231 1232 if (__dma_omap15xx(od->plat->dma_attr)) 1233 d->ccr |= CCR_AUTO_INIT | CCR_REPEAT; 1234 else 1235 d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK; 1236 1237 c->cyclic = true; 1238 1239 return vchan_tx_prep(&c->vc, &d->vd, flags); 1240 } 1241 1242 static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy( 1243 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1244 size_t len, unsigned long tx_flags) 1245 { 1246 struct omap_chan *c = to_omap_dma_chan(chan); 1247 struct omap_desc *d; 1248 uint8_t data_type; 1249 1250 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); 1251 if (!d) 1252 return NULL; 1253 1254 data_type = __ffs((src | dest | len)); 1255 if (data_type > CSDP_DATA_TYPE_32) 1256 data_type = CSDP_DATA_TYPE_32; 1257 1258 d->dir = DMA_MEM_TO_MEM; 1259 d->dev_addr = src; 1260 d->fi = 0; 1261 d->es = data_type; 1262 d->sg[0].en = len / BIT(data_type); 1263 d->sg[0].fn = 1; 1264 d->sg[0].addr = dest; 1265 d->sglen = 1; 1266 d->ccr = c->ccr; 1267 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; 1268 1269 if (tx_flags & DMA_PREP_INTERRUPT) 1270 d->cicr |= CICR_FRAME_IE; 1271 else 1272 d->polled = true; 1273 1274 d->csdp = data_type; 1275 1276 if (dma_omap1()) { 1277 d->cicr |= CICR_TOUT_IE; 1278 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; 1279 } else { 1280 d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; 1281 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; 1282 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; 1283 } 1284 1285 return vchan_tx_prep(&c->vc, &d->vd, tx_flags); 1286 } 1287 1288 static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( 1289 struct dma_chan *chan, struct dma_interleaved_template *xt, 1290 unsigned long flags) 1291 { 1292 struct omap_chan *c = to_omap_dma_chan(chan); 1293 struct omap_desc *d; 1294 struct omap_sg *sg; 1295 uint8_t data_type; 1296 size_t src_icg, dst_icg; 1297 1298 /* Slave mode is not supported */ 1299 if (is_slave_direction(xt->dir)) 1300 return NULL; 1301 1302 if (xt->frame_size != 1 || xt->numf == 0) 1303 return NULL; 1304 1305 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); 1306 if (!d) 1307 return NULL; 1308 1309 data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size)); 1310 if (data_type > CSDP_DATA_TYPE_32) 1311 data_type = CSDP_DATA_TYPE_32; 1312 1313 sg = &d->sg[0]; 1314 d->dir = DMA_MEM_TO_MEM; 1315 d->dev_addr = xt->src_start; 1316 d->es = data_type; 1317 sg->en = xt->sgl[0].size / BIT(data_type); 1318 sg->fn = xt->numf; 1319 sg->addr = xt->dst_start; 1320 d->sglen = 1; 1321 d->ccr = c->ccr; 1322 1323 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); 1324 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); 1325 if (src_icg) { 1326 d->ccr |= CCR_SRC_AMODE_DBLIDX; 1327 d->ei = 1; 1328 d->fi = src_icg + 1; 1329 } else if (xt->src_inc) { 1330 d->ccr |= CCR_SRC_AMODE_POSTINC; 1331 d->fi = 0; 1332 } else { 1333 dev_err(chan->device->dev, 1334 "%s: SRC constant addressing is not supported\n", 1335 __func__); 1336 kfree(d); 1337 return NULL; 1338 } 1339 1340 if (dst_icg) { 1341 d->ccr |= CCR_DST_AMODE_DBLIDX; 1342 sg->ei = 1; 1343 sg->fi = dst_icg + 1; 1344 } else if (xt->dst_inc) { 1345 d->ccr |= CCR_DST_AMODE_POSTINC; 1346 sg->fi = 0; 1347 } else { 1348 dev_err(chan->device->dev, 1349 "%s: DST constant addressing is not supported\n", 1350 __func__); 1351 kfree(d); 1352 return NULL; 1353 } 1354 1355 d->cicr = CICR_DROP_IE | CICR_FRAME_IE; 1356 1357 d->csdp = data_type; 1358 1359 if (dma_omap1()) { 1360 d->cicr |= CICR_TOUT_IE; 1361 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; 1362 } else { 1363 d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; 1364 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; 1365 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; 1366 } 1367 1368 return vchan_tx_prep(&c->vc, &d->vd, flags); 1369 } 1370 1371 static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) 1372 { 1373 struct omap_chan *c = to_omap_dma_chan(chan); 1374 1375 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 1376 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 1377 return -EINVAL; 1378 1379 if (cfg->src_maxburst > chan->device->max_burst || 1380 cfg->dst_maxburst > chan->device->max_burst) 1381 return -EINVAL; 1382 1383 memcpy(&c->cfg, cfg, sizeof(c->cfg)); 1384 1385 return 0; 1386 } 1387 1388 static int omap_dma_terminate_all(struct dma_chan *chan) 1389 { 1390 struct omap_chan *c = to_omap_dma_chan(chan); 1391 unsigned long flags; 1392 LIST_HEAD(head); 1393 1394 spin_lock_irqsave(&c->vc.lock, flags); 1395 1396 /* 1397 * Stop DMA activity: we assume the callback will not be called 1398 * after omap_dma_stop() returns (even if it does, it will see 1399 * c->desc is NULL and exit.) 1400 */ 1401 if (c->desc) { 1402 vchan_terminate_vdesc(&c->desc->vd); 1403 c->desc = NULL; 1404 /* Avoid stopping the dma twice */ 1405 if (!c->paused) 1406 omap_dma_stop(c); 1407 } 1408 1409 c->cyclic = false; 1410 c->paused = false; 1411 1412 vchan_get_all_descriptors(&c->vc, &head); 1413 spin_unlock_irqrestore(&c->vc.lock, flags); 1414 vchan_dma_desc_free_list(&c->vc, &head); 1415 1416 return 0; 1417 } 1418 1419 static void omap_dma_synchronize(struct dma_chan *chan) 1420 { 1421 struct omap_chan *c = to_omap_dma_chan(chan); 1422 1423 vchan_synchronize(&c->vc); 1424 } 1425 1426 static int omap_dma_pause(struct dma_chan *chan) 1427 { 1428 struct omap_chan *c = to_omap_dma_chan(chan); 1429 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 1430 unsigned long flags; 1431 int ret = -EINVAL; 1432 bool can_pause = false; 1433 1434 spin_lock_irqsave(&od->irq_lock, flags); 1435 1436 if (!c->desc) 1437 goto out; 1438 1439 if (c->cyclic) 1440 can_pause = true; 1441 1442 /* 1443 * We do not allow DMA_MEM_TO_DEV transfers to be paused. 1444 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer: 1445 * "When a channel is disabled during a transfer, the channel undergoes 1446 * an abort, unless it is hardware-source-synchronized …". 1447 * A source-synchronised channel is one where the fetching of data is 1448 * under control of the device. In other words, a device-to-memory 1449 * transfer. So, a destination-synchronised channel (which would be a 1450 * memory-to-device transfer) undergoes an abort if the CCR_ENABLE 1451 * bit is cleared. 1452 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel 1453 * aborts immediately after completion of current read/write 1454 * transactions and then the FIFO is cleaned up." The term "cleaned up" 1455 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE 1456 * are both clear _before_ disabling the channel, otherwise data loss 1457 * will occur. 1458 * The problem is that if the channel is active, then device activity 1459 * can result in DMA activity starting between reading those as both 1460 * clear and the write to DMA_CCR to clear the enable bit hitting the 1461 * hardware. If the DMA hardware can't drain the data in its FIFO to the 1462 * destination, then data loss "might" occur (say if we write to an UART 1463 * and the UART is not accepting any further data). 1464 */ 1465 else if (c->desc->dir == DMA_DEV_TO_MEM) 1466 can_pause = true; 1467 1468 if (can_pause && !c->paused) { 1469 ret = omap_dma_stop(c); 1470 if (!ret) 1471 c->paused = true; 1472 } 1473 out: 1474 spin_unlock_irqrestore(&od->irq_lock, flags); 1475 1476 return ret; 1477 } 1478 1479 static int omap_dma_resume(struct dma_chan *chan) 1480 { 1481 struct omap_chan *c = to_omap_dma_chan(chan); 1482 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 1483 unsigned long flags; 1484 int ret = -EINVAL; 1485 1486 spin_lock_irqsave(&od->irq_lock, flags); 1487 1488 if (c->paused && c->desc) { 1489 mb(); 1490 1491 /* Restore channel link register */ 1492 omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl); 1493 1494 omap_dma_start(c, c->desc); 1495 c->paused = false; 1496 ret = 0; 1497 } 1498 spin_unlock_irqrestore(&od->irq_lock, flags); 1499 1500 return ret; 1501 } 1502 1503 static int omap_dma_chan_init(struct omap_dmadev *od) 1504 { 1505 struct omap_chan *c; 1506 1507 c = kzalloc(sizeof(*c), GFP_KERNEL); 1508 if (!c) 1509 return -ENOMEM; 1510 1511 c->reg_map = od->reg_map; 1512 c->vc.desc_free = omap_dma_desc_free; 1513 vchan_init(&c->vc, &od->ddev); 1514 1515 return 0; 1516 } 1517 1518 static void omap_dma_free(struct omap_dmadev *od) 1519 { 1520 while (!list_empty(&od->ddev.channels)) { 1521 struct omap_chan *c = list_first_entry(&od->ddev.channels, 1522 struct omap_chan, vc.chan.device_node); 1523 1524 list_del(&c->vc.chan.device_node); 1525 tasklet_kill(&c->vc.task); 1526 kfree(c); 1527 } 1528 } 1529 1530 /* Currently used by omap2 & 3 to block deeper SoC idle states */ 1531 static bool omap_dma_busy(struct omap_dmadev *od) 1532 { 1533 struct omap_chan *c; 1534 int lch = -1; 1535 1536 while (1) { 1537 lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1); 1538 if (lch >= od->lch_count) 1539 break; 1540 c = od->lch_map[lch]; 1541 if (!c) 1542 continue; 1543 if (omap_dma_chan_read(c, CCR) & CCR_ENABLE) 1544 return true; 1545 } 1546 1547 return false; 1548 } 1549 1550 /* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */ 1551 static int omap_dma_busy_notifier(struct notifier_block *nb, 1552 unsigned long cmd, void *v) 1553 { 1554 struct omap_dmadev *od; 1555 1556 od = container_of(nb, struct omap_dmadev, nb); 1557 1558 switch (cmd) { 1559 case CPU_CLUSTER_PM_ENTER: 1560 if (omap_dma_busy(od)) 1561 return NOTIFY_BAD; 1562 break; 1563 case CPU_CLUSTER_PM_ENTER_FAILED: 1564 case CPU_CLUSTER_PM_EXIT: 1565 break; 1566 } 1567 1568 return NOTIFY_OK; 1569 } 1570 1571 /* 1572 * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0. 1573 * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for 1574 * now. Context save seems to be only currently needed on omap3. 1575 */ 1576 static void omap_dma_context_save(struct omap_dmadev *od) 1577 { 1578 od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0); 1579 od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1); 1580 od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); 1581 od->context.gcr = omap_dma_glbl_read(od, GCR); 1582 } 1583 1584 static void omap_dma_context_restore(struct omap_dmadev *od) 1585 { 1586 int i; 1587 1588 omap_dma_glbl_write(od, GCR, od->context.gcr); 1589 omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig); 1590 omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0); 1591 omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1); 1592 1593 /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */ 1594 if (od->plat->errata & DMA_ROMCODE_BUG) 1595 omap_dma_glbl_write(od, IRQSTATUS_L0, 0); 1596 1597 /* Clear dma channels */ 1598 for (i = 0; i < od->lch_count; i++) 1599 omap_dma_clear_lch(od, i); 1600 } 1601 1602 /* Currently only used for omap3 */ 1603 static int omap_dma_context_notifier(struct notifier_block *nb, 1604 unsigned long cmd, void *v) 1605 { 1606 struct omap_dmadev *od; 1607 1608 od = container_of(nb, struct omap_dmadev, nb); 1609 1610 switch (cmd) { 1611 case CPU_CLUSTER_PM_ENTER: 1612 if (omap_dma_busy(od)) 1613 return NOTIFY_BAD; 1614 omap_dma_context_save(od); 1615 break; 1616 case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */ 1617 break; 1618 case CPU_CLUSTER_PM_EXIT: 1619 omap_dma_context_restore(od); 1620 break; 1621 } 1622 1623 return NOTIFY_OK; 1624 } 1625 1626 static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate, 1627 int max_fifo_depth, int tparams) 1628 { 1629 u32 val; 1630 1631 /* Set only for omap2430 and later */ 1632 if (!od->cfg->rw_priority) 1633 return; 1634 1635 if (max_fifo_depth == 0) 1636 max_fifo_depth = 1; 1637 if (arb_rate == 0) 1638 arb_rate = 1; 1639 1640 val = 0xff & max_fifo_depth; 1641 val |= (0x3 & tparams) << 12; 1642 val |= (arb_rate & 0xff) << 16; 1643 1644 omap_dma_glbl_write(od, GCR, val); 1645 } 1646 1647 #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 1648 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 1649 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 1650 1651 /* 1652 * No flags currently set for default configuration as omap1 is still 1653 * using platform data. 1654 */ 1655 static const struct omap_dma_config default_cfg; 1656 1657 static int omap_dma_probe(struct platform_device *pdev) 1658 { 1659 const struct omap_dma_config *conf; 1660 struct omap_dmadev *od; 1661 struct resource *res; 1662 int rc, i, irq; 1663 u32 val; 1664 1665 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); 1666 if (!od) 1667 return -ENOMEM; 1668 1669 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1670 od->base = devm_ioremap_resource(&pdev->dev, res); 1671 if (IS_ERR(od->base)) 1672 return PTR_ERR(od->base); 1673 1674 conf = of_device_get_match_data(&pdev->dev); 1675 if (conf) { 1676 od->cfg = conf; 1677 od->plat = dev_get_platdata(&pdev->dev); 1678 if (!od->plat) { 1679 dev_err(&pdev->dev, "omap_system_dma_plat_info is missing"); 1680 return -ENODEV; 1681 } 1682 } else if (IS_ENABLED(CONFIG_ARCH_OMAP1)) { 1683 od->cfg = &default_cfg; 1684 1685 od->plat = omap_get_plat_info(); 1686 if (!od->plat) 1687 return -EPROBE_DEFER; 1688 } else { 1689 return -ENODEV; 1690 } 1691 1692 od->reg_map = od->plat->reg_map; 1693 1694 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); 1695 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); 1696 dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); 1697 dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask); 1698 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; 1699 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; 1700 od->ddev.device_tx_status = omap_dma_tx_status; 1701 od->ddev.device_issue_pending = omap_dma_issue_pending; 1702 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; 1703 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; 1704 od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy; 1705 od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved; 1706 od->ddev.device_config = omap_dma_slave_config; 1707 od->ddev.device_pause = omap_dma_pause; 1708 od->ddev.device_resume = omap_dma_resume; 1709 od->ddev.device_terminate_all = omap_dma_terminate_all; 1710 od->ddev.device_synchronize = omap_dma_synchronize; 1711 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; 1712 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; 1713 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1714 if (__dma_omap15xx(od->plat->dma_attr)) 1715 od->ddev.residue_granularity = 1716 DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1717 else 1718 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1719 od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ 1720 od->ddev.dev = &pdev->dev; 1721 INIT_LIST_HEAD(&od->ddev.channels); 1722 mutex_init(&od->lch_lock); 1723 spin_lock_init(&od->lock); 1724 spin_lock_init(&od->irq_lock); 1725 1726 /* Number of DMA requests */ 1727 od->dma_requests = OMAP_SDMA_REQUESTS; 1728 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, 1729 "dma-requests", 1730 &od->dma_requests)) { 1731 dev_info(&pdev->dev, 1732 "Missing dma-requests property, using %u.\n", 1733 OMAP_SDMA_REQUESTS); 1734 } 1735 1736 /* Number of available logical channels */ 1737 if (!pdev->dev.of_node) { 1738 od->lch_count = od->plat->dma_attr->lch_count; 1739 if (unlikely(!od->lch_count)) 1740 od->lch_count = OMAP_SDMA_CHANNELS; 1741 } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels", 1742 &od->lch_count)) { 1743 dev_info(&pdev->dev, 1744 "Missing dma-channels property, using %u.\n", 1745 OMAP_SDMA_CHANNELS); 1746 od->lch_count = OMAP_SDMA_CHANNELS; 1747 } 1748 1749 /* Mask of allowed logical channels */ 1750 if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node, 1751 "dma-channel-mask", 1752 &val)) { 1753 /* Tag channels not in mask as reserved */ 1754 val = ~val; 1755 bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count); 1756 } 1757 if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED) 1758 bitmap_set(od->lch_bitmap, 0, 2); 1759 1760 od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count, 1761 sizeof(*od->lch_map), 1762 GFP_KERNEL); 1763 if (!od->lch_map) 1764 return -ENOMEM; 1765 1766 for (i = 0; i < od->dma_requests; i++) { 1767 rc = omap_dma_chan_init(od); 1768 if (rc) { 1769 omap_dma_free(od); 1770 return rc; 1771 } 1772 } 1773 1774 irq = platform_get_irq(pdev, 1); 1775 if (irq <= 0) { 1776 dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq); 1777 od->legacy = true; 1778 } else { 1779 /* Disable all interrupts */ 1780 od->irq_enable_mask = 0; 1781 omap_dma_glbl_write(od, IRQENABLE_L1, 0); 1782 1783 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, 1784 IRQF_SHARED, "omap-dma-engine", od); 1785 if (rc) { 1786 omap_dma_free(od); 1787 return rc; 1788 } 1789 } 1790 1791 if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) 1792 od->ll123_supported = true; 1793 1794 od->ddev.filter.map = od->plat->slave_map; 1795 od->ddev.filter.mapcnt = od->plat->slavecnt; 1796 od->ddev.filter.fn = omap_dma_filter_fn; 1797 1798 if (od->ll123_supported) { 1799 od->desc_pool = dma_pool_create(dev_name(&pdev->dev), 1800 &pdev->dev, 1801 sizeof(struct omap_type2_desc), 1802 4, 0); 1803 if (!od->desc_pool) { 1804 dev_err(&pdev->dev, 1805 "unable to allocate descriptor pool\n"); 1806 od->ll123_supported = false; 1807 } 1808 } 1809 1810 rc = dma_async_device_register(&od->ddev); 1811 if (rc) { 1812 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", 1813 rc); 1814 omap_dma_free(od); 1815 return rc; 1816 } 1817 1818 platform_set_drvdata(pdev, od); 1819 1820 if (pdev->dev.of_node) { 1821 omap_dma_info.dma_cap = od->ddev.cap_mask; 1822 1823 /* Device-tree DMA controller registration */ 1824 rc = of_dma_controller_register(pdev->dev.of_node, 1825 of_dma_simple_xlate, &omap_dma_info); 1826 if (rc) { 1827 pr_warn("OMAP-DMA: failed to register DMA controller\n"); 1828 dma_async_device_unregister(&od->ddev); 1829 omap_dma_free(od); 1830 } 1831 } 1832 1833 omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0); 1834 1835 if (od->cfg->needs_busy_check) { 1836 od->nb.notifier_call = omap_dma_busy_notifier; 1837 cpu_pm_register_notifier(&od->nb); 1838 } else if (od->cfg->may_lose_context) { 1839 od->nb.notifier_call = omap_dma_context_notifier; 1840 cpu_pm_register_notifier(&od->nb); 1841 } 1842 1843 dev_info(&pdev->dev, "OMAP DMA engine driver%s\n", 1844 od->ll123_supported ? " (LinkedList1/2/3 supported)" : ""); 1845 1846 return rc; 1847 } 1848 1849 static int omap_dma_remove(struct platform_device *pdev) 1850 { 1851 struct omap_dmadev *od = platform_get_drvdata(pdev); 1852 int irq; 1853 1854 if (od->cfg->may_lose_context) 1855 cpu_pm_unregister_notifier(&od->nb); 1856 1857 if (pdev->dev.of_node) 1858 of_dma_controller_free(pdev->dev.of_node); 1859 1860 irq = platform_get_irq(pdev, 1); 1861 devm_free_irq(&pdev->dev, irq, od); 1862 1863 dma_async_device_unregister(&od->ddev); 1864 1865 if (!omap_dma_legacy(od)) { 1866 /* Disable all interrupts */ 1867 omap_dma_glbl_write(od, IRQENABLE_L0, 0); 1868 } 1869 1870 if (od->ll123_supported) 1871 dma_pool_destroy(od->desc_pool); 1872 1873 omap_dma_free(od); 1874 1875 return 0; 1876 } 1877 1878 static const struct omap_dma_config omap2420_data = { 1879 .lch_end = CCFN, 1880 .rw_priority = true, 1881 .needs_lch_clear = true, 1882 .needs_busy_check = true, 1883 }; 1884 1885 static const struct omap_dma_config omap2430_data = { 1886 .lch_end = CCFN, 1887 .rw_priority = true, 1888 .needs_lch_clear = true, 1889 }; 1890 1891 static const struct omap_dma_config omap3430_data = { 1892 .lch_end = CCFN, 1893 .rw_priority = true, 1894 .needs_lch_clear = true, 1895 .may_lose_context = true, 1896 }; 1897 1898 static const struct omap_dma_config omap3630_data = { 1899 .lch_end = CCDN, 1900 .rw_priority = true, 1901 .needs_lch_clear = true, 1902 .may_lose_context = true, 1903 }; 1904 1905 static const struct omap_dma_config omap4_data = { 1906 .lch_end = CCDN, 1907 .rw_priority = true, 1908 .needs_lch_clear = true, 1909 }; 1910 1911 static const struct of_device_id omap_dma_match[] = { 1912 { .compatible = "ti,omap2420-sdma", .data = &omap2420_data, }, 1913 { .compatible = "ti,omap2430-sdma", .data = &omap2430_data, }, 1914 { .compatible = "ti,omap3430-sdma", .data = &omap3430_data, }, 1915 { .compatible = "ti,omap3630-sdma", .data = &omap3630_data, }, 1916 { .compatible = "ti,omap4430-sdma", .data = &omap4_data, }, 1917 {}, 1918 }; 1919 MODULE_DEVICE_TABLE(of, omap_dma_match); 1920 1921 static struct platform_driver omap_dma_driver = { 1922 .probe = omap_dma_probe, 1923 .remove = omap_dma_remove, 1924 .driver = { 1925 .name = "omap-dma-engine", 1926 .of_match_table = omap_dma_match, 1927 }, 1928 }; 1929 1930 static bool omap_dma_filter_fn(struct dma_chan *chan, void *param) 1931 { 1932 if (chan->device->dev->driver == &omap_dma_driver.driver) { 1933 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 1934 struct omap_chan *c = to_omap_dma_chan(chan); 1935 unsigned req = *(unsigned *)param; 1936 1937 if (req <= od->dma_requests) { 1938 c->dma_sig = req; 1939 return true; 1940 } 1941 } 1942 return false; 1943 } 1944 1945 static int omap_dma_init(void) 1946 { 1947 return platform_driver_register(&omap_dma_driver); 1948 } 1949 subsys_initcall(omap_dma_init); 1950 1951 static void __exit omap_dma_exit(void) 1952 { 1953 platform_driver_unregister(&omap_dma_driver); 1954 } 1955 module_exit(omap_dma_exit); 1956 1957 MODULE_AUTHOR("Russell King"); 1958 MODULE_LICENSE("GPL"); 1959