1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // drivers/dma/imx-dma.c 4 // 5 // This file contains a driver for the Freescale i.MX DMA engine 6 // found on i.MX1/21/27 7 // 8 // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 9 // Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> 10 11 #include <linux/err.h> 12 #include <linux/init.h> 13 #include <linux/types.h> 14 #include <linux/mm.h> 15 #include <linux/interrupt.h> 16 #include <linux/spinlock.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/slab.h> 20 #include <linux/platform_device.h> 21 #include <linux/clk.h> 22 #include <linux/dmaengine.h> 23 #include <linux/module.h> 24 #include <linux/of_device.h> 25 #include <linux/of_dma.h> 26 27 #include <asm/irq.h> 28 #include <linux/platform_data/dma-imx.h> 29 30 #include "dmaengine.h" 31 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 32 #define IMX_DMA_CHANNELS 16 33 34 #define IMX_DMA_2D_SLOTS 2 35 #define IMX_DMA_2D_SLOT_A 0 36 #define IMX_DMA_2D_SLOT_B 1 37 38 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) 39 #define IMX_DMA_MEMSIZE_32 (0 << 4) 40 #define IMX_DMA_MEMSIZE_8 (1 << 4) 41 #define IMX_DMA_MEMSIZE_16 (2 << 4) 42 #define IMX_DMA_TYPE_LINEAR (0 << 10) 43 #define IMX_DMA_TYPE_2D (1 << 10) 44 #define IMX_DMA_TYPE_FIFO (2 << 10) 45 46 #define IMX_DMA_ERR_BURST (1 << 0) 47 #define IMX_DMA_ERR_REQUEST (1 << 1) 48 #define IMX_DMA_ERR_TRANSFER (1 << 2) 49 #define IMX_DMA_ERR_BUFFER (1 << 3) 50 #define IMX_DMA_ERR_TIMEOUT (1 << 4) 51 52 #define DMA_DCR 0x00 /* Control Register */ 53 #define DMA_DISR 0x04 /* Interrupt status Register */ 54 #define DMA_DIMR 0x08 /* Interrupt mask Register */ 55 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ 56 #define DMA_DRTOSR 0x10 /* Request timeout Register */ 57 #define DMA_DSESR 0x14 /* Transfer Error Status Register */ 58 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ 59 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ 60 #define DMA_WSRA 0x40 /* W-Size Register A */ 61 #define DMA_XSRA 0x44 /* X-Size Register A */ 62 #define DMA_YSRA 0x48 /* Y-Size Register A */ 63 #define DMA_WSRB 0x4c /* W-Size Register B */ 64 #define DMA_XSRB 0x50 /* X-Size Register B */ 65 #define DMA_YSRB 0x54 /* Y-Size Register B */ 66 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ 67 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ 68 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ 69 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ 70 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ 71 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ 72 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ 73 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ 74 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ 75 76 #define DCR_DRST (1<<1) 77 #define DCR_DEN (1<<0) 78 #define DBTOCR_EN (1<<15) 79 #define DBTOCR_CNT(x) ((x) & 0x7fff) 80 #define CNTR_CNT(x) ((x) & 0xffffff) 81 #define CCR_ACRPT (1<<14) 82 #define CCR_DMOD_LINEAR (0x0 << 12) 83 #define CCR_DMOD_2D (0x1 << 12) 84 #define CCR_DMOD_FIFO (0x2 << 12) 85 #define CCR_DMOD_EOBFIFO (0x3 << 12) 86 #define CCR_SMOD_LINEAR (0x0 << 10) 87 #define CCR_SMOD_2D (0x1 << 10) 88 #define CCR_SMOD_FIFO (0x2 << 10) 89 #define CCR_SMOD_EOBFIFO (0x3 << 10) 90 #define CCR_MDIR_DEC (1<<9) 91 #define CCR_MSEL_B (1<<8) 92 #define CCR_DSIZ_32 (0x0 << 6) 93 #define CCR_DSIZ_8 (0x1 << 6) 94 #define CCR_DSIZ_16 (0x2 << 6) 95 #define CCR_SSIZ_32 (0x0 << 4) 96 #define CCR_SSIZ_8 (0x1 << 4) 97 #define CCR_SSIZ_16 (0x2 << 4) 98 #define CCR_REN (1<<3) 99 #define CCR_RPT (1<<2) 100 #define CCR_FRC (1<<1) 101 #define CCR_CEN (1<<0) 102 #define RTOR_EN (1<<15) 103 #define RTOR_CLK (1<<14) 104 #define RTOR_PSC (1<<13) 105 106 enum imxdma_prep_type { 107 IMXDMA_DESC_MEMCPY, 108 IMXDMA_DESC_INTERLEAVED, 109 IMXDMA_DESC_SLAVE_SG, 110 IMXDMA_DESC_CYCLIC, 111 }; 112 113 struct imx_dma_2d_config { 114 u16 xsr; 115 u16 ysr; 116 u16 wsr; 117 int count; 118 }; 119 120 struct imxdma_desc { 121 struct list_head node; 122 struct dma_async_tx_descriptor desc; 123 enum dma_status status; 124 dma_addr_t src; 125 dma_addr_t dest; 126 size_t len; 127 enum dma_transfer_direction direction; 128 enum imxdma_prep_type type; 129 /* For memcpy and interleaved */ 130 unsigned int config_port; 131 unsigned int config_mem; 132 /* For interleaved transfers */ 133 unsigned int x; 134 unsigned int y; 135 unsigned int w; 136 /* For slave sg and cyclic */ 137 struct scatterlist *sg; 138 unsigned int sgcount; 139 }; 140 141 struct imxdma_channel { 142 int hw_chaining; 143 struct timer_list watchdog; 144 struct imxdma_engine *imxdma; 145 unsigned int channel; 146 147 struct tasklet_struct dma_tasklet; 148 struct list_head ld_free; 149 struct list_head ld_queue; 150 struct list_head ld_active; 151 int descs_allocated; 152 enum dma_slave_buswidth word_size; 153 dma_addr_t per_address; 154 u32 watermark_level; 155 struct dma_chan chan; 156 struct dma_async_tx_descriptor desc; 157 enum dma_status status; 158 int dma_request; 159 struct scatterlist *sg_list; 160 u32 ccr_from_device; 161 u32 ccr_to_device; 162 bool enabled_2d; 163 int slot_2d; 164 unsigned int irq; 165 struct dma_slave_config config; 166 }; 167 168 enum imx_dma_type { 169 IMX1_DMA, 170 IMX21_DMA, 171 IMX27_DMA, 172 }; 173 174 struct imxdma_engine { 175 struct device *dev; 176 struct device_dma_parameters dma_parms; 177 struct dma_device dma_device; 178 void __iomem *base; 179 struct clk *dma_ahb; 180 struct clk *dma_ipg; 181 spinlock_t lock; 182 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; 183 struct imxdma_channel channel[IMX_DMA_CHANNELS]; 184 enum imx_dma_type devtype; 185 unsigned int irq; 186 unsigned int irq_err; 187 188 }; 189 190 struct imxdma_filter_data { 191 struct imxdma_engine *imxdma; 192 int request; 193 }; 194 195 static const struct platform_device_id imx_dma_devtype[] = { 196 { 197 .name = "imx1-dma", 198 .driver_data = IMX1_DMA, 199 }, { 200 .name = "imx21-dma", 201 .driver_data = IMX21_DMA, 202 }, { 203 .name = "imx27-dma", 204 .driver_data = IMX27_DMA, 205 }, { 206 /* sentinel */ 207 } 208 }; 209 MODULE_DEVICE_TABLE(platform, imx_dma_devtype); 210 211 static const struct of_device_id imx_dma_of_dev_id[] = { 212 { 213 .compatible = "fsl,imx1-dma", 214 .data = &imx_dma_devtype[IMX1_DMA], 215 }, { 216 .compatible = "fsl,imx21-dma", 217 .data = &imx_dma_devtype[IMX21_DMA], 218 }, { 219 .compatible = "fsl,imx27-dma", 220 .data = &imx_dma_devtype[IMX27_DMA], 221 }, { 222 /* sentinel */ 223 } 224 }; 225 MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id); 226 227 static inline int is_imx1_dma(struct imxdma_engine *imxdma) 228 { 229 return imxdma->devtype == IMX1_DMA; 230 } 231 232 static inline int is_imx27_dma(struct imxdma_engine *imxdma) 233 { 234 return imxdma->devtype == IMX27_DMA; 235 } 236 237 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) 238 { 239 return container_of(chan, struct imxdma_channel, chan); 240 } 241 242 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) 243 { 244 struct imxdma_desc *desc; 245 246 if (!list_empty(&imxdmac->ld_active)) { 247 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, 248 node); 249 if (desc->type == IMXDMA_DESC_CYCLIC) 250 return true; 251 } 252 return false; 253 } 254 255 256 257 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val, 258 unsigned offset) 259 { 260 __raw_writel(val, imxdma->base + offset); 261 } 262 263 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset) 264 { 265 return __raw_readl(imxdma->base + offset); 266 } 267 268 static int imxdma_hw_chain(struct imxdma_channel *imxdmac) 269 { 270 struct imxdma_engine *imxdma = imxdmac->imxdma; 271 272 if (is_imx27_dma(imxdma)) 273 return imxdmac->hw_chaining; 274 else 275 return 0; 276 } 277 278 /* 279 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation 280 */ 281 static inline void imxdma_sg_next(struct imxdma_desc *d) 282 { 283 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 284 struct imxdma_engine *imxdma = imxdmac->imxdma; 285 struct scatterlist *sg = d->sg; 286 size_t now; 287 288 now = min_t(size_t, d->len, sg_dma_len(sg)); 289 if (d->len != IMX_DMA_LENGTH_LOOP) 290 d->len -= now; 291 292 if (d->direction == DMA_DEV_TO_MEM) 293 imx_dmav1_writel(imxdma, sg->dma_address, 294 DMA_DAR(imxdmac->channel)); 295 else 296 imx_dmav1_writel(imxdma, sg->dma_address, 297 DMA_SAR(imxdmac->channel)); 298 299 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel)); 300 301 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, " 302 "size 0x%08x\n", __func__, imxdmac->channel, 303 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), 304 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), 305 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); 306 } 307 308 static void imxdma_enable_hw(struct imxdma_desc *d) 309 { 310 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 311 struct imxdma_engine *imxdma = imxdmac->imxdma; 312 int channel = imxdmac->channel; 313 unsigned long flags; 314 315 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); 316 317 local_irq_save(flags); 318 319 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); 320 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) & 321 ~(1 << channel), DMA_DIMR); 322 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) | 323 CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); 324 325 if (!is_imx1_dma(imxdma) && 326 d->sg && imxdma_hw_chain(imxdmac)) { 327 d->sg = sg_next(d->sg); 328 if (d->sg) { 329 u32 tmp; 330 imxdma_sg_next(d); 331 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel)); 332 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT, 333 DMA_CCR(channel)); 334 } 335 } 336 337 local_irq_restore(flags); 338 } 339 340 static void imxdma_disable_hw(struct imxdma_channel *imxdmac) 341 { 342 struct imxdma_engine *imxdma = imxdmac->imxdma; 343 int channel = imxdmac->channel; 344 unsigned long flags; 345 346 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); 347 348 if (imxdma_hw_chain(imxdmac)) 349 del_timer(&imxdmac->watchdog); 350 351 local_irq_save(flags); 352 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) | 353 (1 << channel), DMA_DIMR); 354 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) & 355 ~CCR_CEN, DMA_CCR(channel)); 356 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); 357 local_irq_restore(flags); 358 } 359 360 static void imxdma_watchdog(struct timer_list *t) 361 { 362 struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog); 363 struct imxdma_engine *imxdma = imxdmac->imxdma; 364 int channel = imxdmac->channel; 365 366 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel)); 367 368 /* Tasklet watchdog error handler */ 369 tasklet_schedule(&imxdmac->dma_tasklet); 370 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n", 371 imxdmac->channel); 372 } 373 374 static irqreturn_t imxdma_err_handler(int irq, void *dev_id) 375 { 376 struct imxdma_engine *imxdma = dev_id; 377 unsigned int err_mask; 378 int i, disr; 379 int errcode; 380 381 disr = imx_dmav1_readl(imxdma, DMA_DISR); 382 383 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) | 384 imx_dmav1_readl(imxdma, DMA_DRTOSR) | 385 imx_dmav1_readl(imxdma, DMA_DSESR) | 386 imx_dmav1_readl(imxdma, DMA_DBOSR); 387 388 if (!err_mask) 389 return IRQ_HANDLED; 390 391 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR); 392 393 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 394 if (!(err_mask & (1 << i))) 395 continue; 396 errcode = 0; 397 398 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) { 399 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR); 400 errcode |= IMX_DMA_ERR_BURST; 401 } 402 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) { 403 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR); 404 errcode |= IMX_DMA_ERR_REQUEST; 405 } 406 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) { 407 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR); 408 errcode |= IMX_DMA_ERR_TRANSFER; 409 } 410 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) { 411 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR); 412 errcode |= IMX_DMA_ERR_BUFFER; 413 } 414 /* Tasklet error handler */ 415 tasklet_schedule(&imxdma->channel[i].dma_tasklet); 416 417 dev_warn(imxdma->dev, 418 "DMA timeout on channel %d -%s%s%s%s\n", i, 419 errcode & IMX_DMA_ERR_BURST ? " burst" : "", 420 errcode & IMX_DMA_ERR_REQUEST ? " request" : "", 421 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", 422 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); 423 } 424 return IRQ_HANDLED; 425 } 426 427 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) 428 { 429 struct imxdma_engine *imxdma = imxdmac->imxdma; 430 int chno = imxdmac->channel; 431 struct imxdma_desc *desc; 432 unsigned long flags; 433 434 spin_lock_irqsave(&imxdma->lock, flags); 435 if (list_empty(&imxdmac->ld_active)) { 436 spin_unlock_irqrestore(&imxdma->lock, flags); 437 goto out; 438 } 439 440 desc = list_first_entry(&imxdmac->ld_active, 441 struct imxdma_desc, 442 node); 443 spin_unlock_irqrestore(&imxdma->lock, flags); 444 445 if (desc->sg) { 446 u32 tmp; 447 desc->sg = sg_next(desc->sg); 448 449 if (desc->sg) { 450 imxdma_sg_next(desc); 451 452 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno)); 453 454 if (imxdma_hw_chain(imxdmac)) { 455 /* FIXME: The timeout should probably be 456 * configurable 457 */ 458 mod_timer(&imxdmac->watchdog, 459 jiffies + msecs_to_jiffies(500)); 460 461 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; 462 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); 463 } else { 464 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN, 465 DMA_CCR(chno)); 466 tmp |= CCR_CEN; 467 } 468 469 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); 470 471 if (imxdma_chan_is_doing_cyclic(imxdmac)) 472 /* Tasklet progression */ 473 tasklet_schedule(&imxdmac->dma_tasklet); 474 475 return; 476 } 477 478 if (imxdma_hw_chain(imxdmac)) { 479 del_timer(&imxdmac->watchdog); 480 return; 481 } 482 } 483 484 out: 485 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno)); 486 /* Tasklet irq */ 487 tasklet_schedule(&imxdmac->dma_tasklet); 488 } 489 490 static irqreturn_t dma_irq_handler(int irq, void *dev_id) 491 { 492 struct imxdma_engine *imxdma = dev_id; 493 int i, disr; 494 495 if (!is_imx1_dma(imxdma)) 496 imxdma_err_handler(irq, dev_id); 497 498 disr = imx_dmav1_readl(imxdma, DMA_DISR); 499 500 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr); 501 502 imx_dmav1_writel(imxdma, disr, DMA_DISR); 503 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 504 if (disr & (1 << i)) 505 dma_irq_handle_channel(&imxdma->channel[i]); 506 } 507 508 return IRQ_HANDLED; 509 } 510 511 static int imxdma_xfer_desc(struct imxdma_desc *d) 512 { 513 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 514 struct imxdma_engine *imxdma = imxdmac->imxdma; 515 int slot = -1; 516 int i; 517 518 /* Configure and enable */ 519 switch (d->type) { 520 case IMXDMA_DESC_INTERLEAVED: 521 /* Try to get a free 2D slot */ 522 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { 523 if ((imxdma->slots_2d[i].count > 0) && 524 ((imxdma->slots_2d[i].xsr != d->x) || 525 (imxdma->slots_2d[i].ysr != d->y) || 526 (imxdma->slots_2d[i].wsr != d->w))) 527 continue; 528 slot = i; 529 break; 530 } 531 if (slot < 0) 532 return -EBUSY; 533 534 imxdma->slots_2d[slot].xsr = d->x; 535 imxdma->slots_2d[slot].ysr = d->y; 536 imxdma->slots_2d[slot].wsr = d->w; 537 imxdma->slots_2d[slot].count++; 538 539 imxdmac->slot_2d = slot; 540 imxdmac->enabled_2d = true; 541 542 if (slot == IMX_DMA_2D_SLOT_A) { 543 d->config_mem &= ~CCR_MSEL_B; 544 d->config_port &= ~CCR_MSEL_B; 545 imx_dmav1_writel(imxdma, d->x, DMA_XSRA); 546 imx_dmav1_writel(imxdma, d->y, DMA_YSRA); 547 imx_dmav1_writel(imxdma, d->w, DMA_WSRA); 548 } else { 549 d->config_mem |= CCR_MSEL_B; 550 d->config_port |= CCR_MSEL_B; 551 imx_dmav1_writel(imxdma, d->x, DMA_XSRB); 552 imx_dmav1_writel(imxdma, d->y, DMA_YSRB); 553 imx_dmav1_writel(imxdma, d->w, DMA_WSRB); 554 } 555 /* 556 * We fall-through here intentionally, since a 2D transfer is 557 * similar to MEMCPY just adding the 2D slot configuration. 558 */ 559 /* Fall through */ 560 case IMXDMA_DESC_MEMCPY: 561 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); 562 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); 563 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2), 564 DMA_CCR(imxdmac->channel)); 565 566 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); 567 568 dev_dbg(imxdma->dev, 569 "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n", 570 __func__, imxdmac->channel, 571 (unsigned long long)d->dest, 572 (unsigned long long)d->src, d->len); 573 574 break; 575 /* Cyclic transfer is the same as slave_sg with special sg configuration. */ 576 case IMXDMA_DESC_CYCLIC: 577 case IMXDMA_DESC_SLAVE_SG: 578 if (d->direction == DMA_DEV_TO_MEM) { 579 imx_dmav1_writel(imxdma, imxdmac->per_address, 580 DMA_SAR(imxdmac->channel)); 581 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, 582 DMA_CCR(imxdmac->channel)); 583 584 dev_dbg(imxdma->dev, 585 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n", 586 __func__, imxdmac->channel, 587 d->sg, d->sgcount, d->len, 588 (unsigned long long)imxdmac->per_address); 589 } else if (d->direction == DMA_MEM_TO_DEV) { 590 imx_dmav1_writel(imxdma, imxdmac->per_address, 591 DMA_DAR(imxdmac->channel)); 592 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, 593 DMA_CCR(imxdmac->channel)); 594 595 dev_dbg(imxdma->dev, 596 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n", 597 __func__, imxdmac->channel, 598 d->sg, d->sgcount, d->len, 599 (unsigned long long)imxdmac->per_address); 600 } else { 601 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", 602 __func__, imxdmac->channel); 603 return -EINVAL; 604 } 605 606 imxdma_sg_next(d); 607 608 break; 609 default: 610 return -EINVAL; 611 } 612 imxdma_enable_hw(d); 613 return 0; 614 } 615 616 static void imxdma_tasklet(unsigned long data) 617 { 618 struct imxdma_channel *imxdmac = (void *)data; 619 struct imxdma_engine *imxdma = imxdmac->imxdma; 620 struct imxdma_desc *desc, *next_desc; 621 unsigned long flags; 622 623 spin_lock_irqsave(&imxdma->lock, flags); 624 625 if (list_empty(&imxdmac->ld_active)) { 626 /* Someone might have called terminate all */ 627 spin_unlock_irqrestore(&imxdma->lock, flags); 628 return; 629 } 630 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); 631 632 /* If we are dealing with a cyclic descriptor, keep it on ld_active 633 * and dont mark the descriptor as complete. 634 * Only in non-cyclic cases it would be marked as complete 635 */ 636 if (imxdma_chan_is_doing_cyclic(imxdmac)) 637 goto out; 638 else 639 dma_cookie_complete(&desc->desc); 640 641 /* Free 2D slot if it was an interleaved transfer */ 642 if (imxdmac->enabled_2d) { 643 imxdma->slots_2d[imxdmac->slot_2d].count--; 644 imxdmac->enabled_2d = false; 645 } 646 647 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); 648 649 if (!list_empty(&imxdmac->ld_queue)) { 650 next_desc = list_first_entry(&imxdmac->ld_queue, 651 struct imxdma_desc, node); 652 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); 653 if (imxdma_xfer_desc(next_desc) < 0) 654 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", 655 __func__, imxdmac->channel); 656 } 657 out: 658 spin_unlock_irqrestore(&imxdma->lock, flags); 659 660 dmaengine_desc_get_callback_invoke(&desc->desc, NULL); 661 } 662 663 static int imxdma_terminate_all(struct dma_chan *chan) 664 { 665 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 666 struct imxdma_engine *imxdma = imxdmac->imxdma; 667 unsigned long flags; 668 669 imxdma_disable_hw(imxdmac); 670 671 spin_lock_irqsave(&imxdma->lock, flags); 672 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); 673 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); 674 spin_unlock_irqrestore(&imxdma->lock, flags); 675 return 0; 676 } 677 678 static int imxdma_config_write(struct dma_chan *chan, 679 struct dma_slave_config *dmaengine_cfg, 680 enum dma_transfer_direction direction) 681 { 682 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 683 struct imxdma_engine *imxdma = imxdmac->imxdma; 684 unsigned int mode = 0; 685 686 if (direction == DMA_DEV_TO_MEM) { 687 imxdmac->per_address = dmaengine_cfg->src_addr; 688 imxdmac->watermark_level = dmaengine_cfg->src_maxburst; 689 imxdmac->word_size = dmaengine_cfg->src_addr_width; 690 } else { 691 imxdmac->per_address = dmaengine_cfg->dst_addr; 692 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; 693 imxdmac->word_size = dmaengine_cfg->dst_addr_width; 694 } 695 696 switch (imxdmac->word_size) { 697 case DMA_SLAVE_BUSWIDTH_1_BYTE: 698 mode = IMX_DMA_MEMSIZE_8; 699 break; 700 case DMA_SLAVE_BUSWIDTH_2_BYTES: 701 mode = IMX_DMA_MEMSIZE_16; 702 break; 703 default: 704 case DMA_SLAVE_BUSWIDTH_4_BYTES: 705 mode = IMX_DMA_MEMSIZE_32; 706 break; 707 } 708 709 imxdmac->hw_chaining = 0; 710 711 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | 712 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | 713 CCR_REN; 714 imxdmac->ccr_to_device = 715 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | 716 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; 717 imx_dmav1_writel(imxdma, imxdmac->dma_request, 718 DMA_RSSR(imxdmac->channel)); 719 720 /* Set burst length */ 721 imx_dmav1_writel(imxdma, imxdmac->watermark_level * 722 imxdmac->word_size, DMA_BLR(imxdmac->channel)); 723 724 return 0; 725 } 726 727 static int imxdma_config(struct dma_chan *chan, 728 struct dma_slave_config *dmaengine_cfg) 729 { 730 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 731 732 memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg)); 733 734 return 0; 735 } 736 737 static enum dma_status imxdma_tx_status(struct dma_chan *chan, 738 dma_cookie_t cookie, 739 struct dma_tx_state *txstate) 740 { 741 return dma_cookie_status(chan, cookie, txstate); 742 } 743 744 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) 745 { 746 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); 747 struct imxdma_engine *imxdma = imxdmac->imxdma; 748 dma_cookie_t cookie; 749 unsigned long flags; 750 751 spin_lock_irqsave(&imxdma->lock, flags); 752 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue); 753 cookie = dma_cookie_assign(tx); 754 spin_unlock_irqrestore(&imxdma->lock, flags); 755 756 return cookie; 757 } 758 759 static int imxdma_alloc_chan_resources(struct dma_chan *chan) 760 { 761 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 762 struct imx_dma_data *data = chan->private; 763 764 if (data != NULL) 765 imxdmac->dma_request = data->dma_request; 766 767 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { 768 struct imxdma_desc *desc; 769 770 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 771 if (!desc) 772 break; 773 memset(&desc->desc, 0, sizeof(struct dma_async_tx_descriptor)); 774 dma_async_tx_descriptor_init(&desc->desc, chan); 775 desc->desc.tx_submit = imxdma_tx_submit; 776 /* txd.flags will be overwritten in prep funcs */ 777 desc->desc.flags = DMA_CTRL_ACK; 778 desc->status = DMA_COMPLETE; 779 780 list_add_tail(&desc->node, &imxdmac->ld_free); 781 imxdmac->descs_allocated++; 782 } 783 784 if (!imxdmac->descs_allocated) 785 return -ENOMEM; 786 787 return imxdmac->descs_allocated; 788 } 789 790 static void imxdma_free_chan_resources(struct dma_chan *chan) 791 { 792 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 793 struct imxdma_engine *imxdma = imxdmac->imxdma; 794 struct imxdma_desc *desc, *_desc; 795 unsigned long flags; 796 797 spin_lock_irqsave(&imxdma->lock, flags); 798 799 imxdma_disable_hw(imxdmac); 800 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); 801 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); 802 803 spin_unlock_irqrestore(&imxdma->lock, flags); 804 805 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { 806 kfree(desc); 807 imxdmac->descs_allocated--; 808 } 809 INIT_LIST_HEAD(&imxdmac->ld_free); 810 811 kfree(imxdmac->sg_list); 812 imxdmac->sg_list = NULL; 813 } 814 815 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( 816 struct dma_chan *chan, struct scatterlist *sgl, 817 unsigned int sg_len, enum dma_transfer_direction direction, 818 unsigned long flags, void *context) 819 { 820 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 821 struct scatterlist *sg; 822 int i, dma_length = 0; 823 struct imxdma_desc *desc; 824 825 if (list_empty(&imxdmac->ld_free) || 826 imxdma_chan_is_doing_cyclic(imxdmac)) 827 return NULL; 828 829 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 830 831 for_each_sg(sgl, sg, sg_len, i) { 832 dma_length += sg_dma_len(sg); 833 } 834 835 switch (imxdmac->word_size) { 836 case DMA_SLAVE_BUSWIDTH_4_BYTES: 837 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3) 838 return NULL; 839 break; 840 case DMA_SLAVE_BUSWIDTH_2_BYTES: 841 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1) 842 return NULL; 843 break; 844 case DMA_SLAVE_BUSWIDTH_1_BYTE: 845 break; 846 default: 847 return NULL; 848 } 849 850 desc->type = IMXDMA_DESC_SLAVE_SG; 851 desc->sg = sgl; 852 desc->sgcount = sg_len; 853 desc->len = dma_length; 854 desc->direction = direction; 855 if (direction == DMA_DEV_TO_MEM) { 856 desc->src = imxdmac->per_address; 857 } else { 858 desc->dest = imxdmac->per_address; 859 } 860 desc->desc.callback = NULL; 861 desc->desc.callback_param = NULL; 862 863 return &desc->desc; 864 } 865 866 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( 867 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 868 size_t period_len, enum dma_transfer_direction direction, 869 unsigned long flags) 870 { 871 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 872 struct imxdma_engine *imxdma = imxdmac->imxdma; 873 struct imxdma_desc *desc; 874 int i; 875 unsigned int periods = buf_len / period_len; 876 877 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n", 878 __func__, imxdmac->channel, buf_len, period_len); 879 880 if (list_empty(&imxdmac->ld_free) || 881 imxdma_chan_is_doing_cyclic(imxdmac)) 882 return NULL; 883 884 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 885 886 kfree(imxdmac->sg_list); 887 888 imxdmac->sg_list = kcalloc(periods + 1, 889 sizeof(struct scatterlist), GFP_ATOMIC); 890 if (!imxdmac->sg_list) 891 return NULL; 892 893 sg_init_table(imxdmac->sg_list, periods); 894 895 for (i = 0; i < periods; i++) { 896 sg_assign_page(&imxdmac->sg_list[i], NULL); 897 imxdmac->sg_list[i].offset = 0; 898 imxdmac->sg_list[i].dma_address = dma_addr; 899 sg_dma_len(&imxdmac->sg_list[i]) = period_len; 900 dma_addr += period_len; 901 } 902 903 /* close the loop */ 904 sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list); 905 906 desc->type = IMXDMA_DESC_CYCLIC; 907 desc->sg = imxdmac->sg_list; 908 desc->sgcount = periods; 909 desc->len = IMX_DMA_LENGTH_LOOP; 910 desc->direction = direction; 911 if (direction == DMA_DEV_TO_MEM) { 912 desc->src = imxdmac->per_address; 913 } else { 914 desc->dest = imxdmac->per_address; 915 } 916 desc->desc.callback = NULL; 917 desc->desc.callback_param = NULL; 918 919 imxdma_config_write(chan, &imxdmac->config, direction); 920 921 return &desc->desc; 922 } 923 924 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( 925 struct dma_chan *chan, dma_addr_t dest, 926 dma_addr_t src, size_t len, unsigned long flags) 927 { 928 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 929 struct imxdma_engine *imxdma = imxdmac->imxdma; 930 struct imxdma_desc *desc; 931 932 dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n", 933 __func__, imxdmac->channel, (unsigned long long)src, 934 (unsigned long long)dest, len); 935 936 if (list_empty(&imxdmac->ld_free) || 937 imxdma_chan_is_doing_cyclic(imxdmac)) 938 return NULL; 939 940 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 941 942 desc->type = IMXDMA_DESC_MEMCPY; 943 desc->src = src; 944 desc->dest = dest; 945 desc->len = len; 946 desc->direction = DMA_MEM_TO_MEM; 947 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; 948 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; 949 desc->desc.callback = NULL; 950 desc->desc.callback_param = NULL; 951 952 return &desc->desc; 953 } 954 955 static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( 956 struct dma_chan *chan, struct dma_interleaved_template *xt, 957 unsigned long flags) 958 { 959 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 960 struct imxdma_engine *imxdma = imxdmac->imxdma; 961 struct imxdma_desc *desc; 962 963 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n" 964 " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__, 965 imxdmac->channel, (unsigned long long)xt->src_start, 966 (unsigned long long) xt->dst_start, 967 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", 968 xt->numf, xt->frame_size); 969 970 if (list_empty(&imxdmac->ld_free) || 971 imxdma_chan_is_doing_cyclic(imxdmac)) 972 return NULL; 973 974 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM) 975 return NULL; 976 977 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 978 979 desc->type = IMXDMA_DESC_INTERLEAVED; 980 desc->src = xt->src_start; 981 desc->dest = xt->dst_start; 982 desc->x = xt->sgl[0].size; 983 desc->y = xt->numf; 984 desc->w = xt->sgl[0].icg + desc->x; 985 desc->len = desc->x * desc->y; 986 desc->direction = DMA_MEM_TO_MEM; 987 desc->config_port = IMX_DMA_MEMSIZE_32; 988 desc->config_mem = IMX_DMA_MEMSIZE_32; 989 if (xt->src_sgl) 990 desc->config_mem |= IMX_DMA_TYPE_2D; 991 if (xt->dst_sgl) 992 desc->config_port |= IMX_DMA_TYPE_2D; 993 desc->desc.callback = NULL; 994 desc->desc.callback_param = NULL; 995 996 return &desc->desc; 997 } 998 999 static void imxdma_issue_pending(struct dma_chan *chan) 1000 { 1001 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 1002 struct imxdma_engine *imxdma = imxdmac->imxdma; 1003 struct imxdma_desc *desc; 1004 unsigned long flags; 1005 1006 spin_lock_irqsave(&imxdma->lock, flags); 1007 if (list_empty(&imxdmac->ld_active) && 1008 !list_empty(&imxdmac->ld_queue)) { 1009 desc = list_first_entry(&imxdmac->ld_queue, 1010 struct imxdma_desc, node); 1011 1012 if (imxdma_xfer_desc(desc) < 0) { 1013 dev_warn(imxdma->dev, 1014 "%s: channel: %d couldn't issue DMA xfer\n", 1015 __func__, imxdmac->channel); 1016 } else { 1017 list_move_tail(imxdmac->ld_queue.next, 1018 &imxdmac->ld_active); 1019 } 1020 } 1021 spin_unlock_irqrestore(&imxdma->lock, flags); 1022 } 1023 1024 static bool imxdma_filter_fn(struct dma_chan *chan, void *param) 1025 { 1026 struct imxdma_filter_data *fdata = param; 1027 struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan); 1028 1029 if (chan->device->dev != fdata->imxdma->dev) 1030 return false; 1031 1032 imxdma_chan->dma_request = fdata->request; 1033 chan->private = NULL; 1034 1035 return true; 1036 } 1037 1038 static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, 1039 struct of_dma *ofdma) 1040 { 1041 int count = dma_spec->args_count; 1042 struct imxdma_engine *imxdma = ofdma->of_dma_data; 1043 struct imxdma_filter_data fdata = { 1044 .imxdma = imxdma, 1045 }; 1046 1047 if (count != 1) 1048 return NULL; 1049 1050 fdata.request = dma_spec->args[0]; 1051 1052 return dma_request_channel(imxdma->dma_device.cap_mask, 1053 imxdma_filter_fn, &fdata); 1054 } 1055 1056 static int __init imxdma_probe(struct platform_device *pdev) 1057 { 1058 struct imxdma_engine *imxdma; 1059 struct resource *res; 1060 const struct of_device_id *of_id; 1061 int ret, i; 1062 int irq, irq_err; 1063 1064 of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev); 1065 if (of_id) 1066 pdev->id_entry = of_id->data; 1067 1068 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL); 1069 if (!imxdma) 1070 return -ENOMEM; 1071 1072 imxdma->dev = &pdev->dev; 1073 imxdma->devtype = pdev->id_entry->driver_data; 1074 1075 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1076 imxdma->base = devm_ioremap_resource(&pdev->dev, res); 1077 if (IS_ERR(imxdma->base)) 1078 return PTR_ERR(imxdma->base); 1079 1080 irq = platform_get_irq(pdev, 0); 1081 if (irq < 0) 1082 return irq; 1083 1084 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); 1085 if (IS_ERR(imxdma->dma_ipg)) 1086 return PTR_ERR(imxdma->dma_ipg); 1087 1088 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); 1089 if (IS_ERR(imxdma->dma_ahb)) 1090 return PTR_ERR(imxdma->dma_ahb); 1091 1092 ret = clk_prepare_enable(imxdma->dma_ipg); 1093 if (ret) 1094 return ret; 1095 ret = clk_prepare_enable(imxdma->dma_ahb); 1096 if (ret) 1097 goto disable_dma_ipg_clk; 1098 1099 /* reset DMA module */ 1100 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); 1101 1102 if (is_imx1_dma(imxdma)) { 1103 ret = devm_request_irq(&pdev->dev, irq, 1104 dma_irq_handler, 0, "DMA", imxdma); 1105 if (ret) { 1106 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); 1107 goto disable_dma_ahb_clk; 1108 } 1109 imxdma->irq = irq; 1110 1111 irq_err = platform_get_irq(pdev, 1); 1112 if (irq_err < 0) { 1113 ret = irq_err; 1114 goto disable_dma_ahb_clk; 1115 } 1116 1117 ret = devm_request_irq(&pdev->dev, irq_err, 1118 imxdma_err_handler, 0, "DMA", imxdma); 1119 if (ret) { 1120 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); 1121 goto disable_dma_ahb_clk; 1122 } 1123 imxdma->irq_err = irq_err; 1124 } 1125 1126 /* enable DMA module */ 1127 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR); 1128 1129 /* clear all interrupts */ 1130 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); 1131 1132 /* disable interrupts */ 1133 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); 1134 1135 INIT_LIST_HEAD(&imxdma->dma_device.channels); 1136 1137 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); 1138 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); 1139 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); 1140 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask); 1141 1142 /* Initialize 2D global parameters */ 1143 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) 1144 imxdma->slots_2d[i].count = 0; 1145 1146 spin_lock_init(&imxdma->lock); 1147 1148 /* Initialize channel parameters */ 1149 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 1150 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1151 1152 if (!is_imx1_dma(imxdma)) { 1153 ret = devm_request_irq(&pdev->dev, irq + i, 1154 dma_irq_handler, 0, "DMA", imxdma); 1155 if (ret) { 1156 dev_warn(imxdma->dev, "Can't register IRQ %d " 1157 "for DMA channel %d\n", 1158 irq + i, i); 1159 goto disable_dma_ahb_clk; 1160 } 1161 1162 imxdmac->irq = irq + i; 1163 timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0); 1164 } 1165 1166 imxdmac->imxdma = imxdma; 1167 1168 INIT_LIST_HEAD(&imxdmac->ld_queue); 1169 INIT_LIST_HEAD(&imxdmac->ld_free); 1170 INIT_LIST_HEAD(&imxdmac->ld_active); 1171 1172 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, 1173 (unsigned long)imxdmac); 1174 imxdmac->chan.device = &imxdma->dma_device; 1175 dma_cookie_init(&imxdmac->chan); 1176 imxdmac->channel = i; 1177 1178 /* Add the channel to the DMAC list */ 1179 list_add_tail(&imxdmac->chan.device_node, 1180 &imxdma->dma_device.channels); 1181 } 1182 1183 imxdma->dma_device.dev = &pdev->dev; 1184 1185 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; 1186 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; 1187 imxdma->dma_device.device_tx_status = imxdma_tx_status; 1188 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; 1189 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 1190 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; 1191 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; 1192 imxdma->dma_device.device_config = imxdma_config; 1193 imxdma->dma_device.device_terminate_all = imxdma_terminate_all; 1194 imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 1195 1196 platform_set_drvdata(pdev, imxdma); 1197 1198 imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES; 1199 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; 1200 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); 1201 1202 ret = dma_async_device_register(&imxdma->dma_device); 1203 if (ret) { 1204 dev_err(&pdev->dev, "unable to register\n"); 1205 goto disable_dma_ahb_clk; 1206 } 1207 1208 if (pdev->dev.of_node) { 1209 ret = of_dma_controller_register(pdev->dev.of_node, 1210 imxdma_xlate, imxdma); 1211 if (ret) { 1212 dev_err(&pdev->dev, "unable to register of_dma_controller\n"); 1213 goto err_of_dma_controller; 1214 } 1215 } 1216 1217 return 0; 1218 1219 err_of_dma_controller: 1220 dma_async_device_unregister(&imxdma->dma_device); 1221 disable_dma_ahb_clk: 1222 clk_disable_unprepare(imxdma->dma_ahb); 1223 disable_dma_ipg_clk: 1224 clk_disable_unprepare(imxdma->dma_ipg); 1225 return ret; 1226 } 1227 1228 static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma) 1229 { 1230 int i; 1231 1232 if (is_imx1_dma(imxdma)) { 1233 disable_irq(imxdma->irq); 1234 disable_irq(imxdma->irq_err); 1235 } 1236 1237 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 1238 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1239 1240 if (!is_imx1_dma(imxdma)) 1241 disable_irq(imxdmac->irq); 1242 1243 tasklet_kill(&imxdmac->dma_tasklet); 1244 } 1245 } 1246 1247 static int imxdma_remove(struct platform_device *pdev) 1248 { 1249 struct imxdma_engine *imxdma = platform_get_drvdata(pdev); 1250 1251 imxdma_free_irq(pdev, imxdma); 1252 1253 dma_async_device_unregister(&imxdma->dma_device); 1254 1255 if (pdev->dev.of_node) 1256 of_dma_controller_free(pdev->dev.of_node); 1257 1258 clk_disable_unprepare(imxdma->dma_ipg); 1259 clk_disable_unprepare(imxdma->dma_ahb); 1260 1261 return 0; 1262 } 1263 1264 static struct platform_driver imxdma_driver = { 1265 .driver = { 1266 .name = "imx-dma", 1267 .of_match_table = imx_dma_of_dev_id, 1268 }, 1269 .id_table = imx_dma_devtype, 1270 .remove = imxdma_remove, 1271 }; 1272 1273 static int __init imxdma_module_init(void) 1274 { 1275 return platform_driver_probe(&imxdma_driver, imxdma_probe); 1276 } 1277 subsys_initcall(imxdma_module_init); 1278 1279 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 1280 MODULE_DESCRIPTION("i.MX dma driver"); 1281 MODULE_LICENSE("GPL"); 1282