1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // drivers/dma/imx-dma.c 4 // 5 // This file contains a driver for the Freescale i.MX DMA engine 6 // found on i.MX1/21/27 7 // 8 // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 9 // Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> 10 11 #include <linux/err.h> 12 #include <linux/init.h> 13 #include <linux/types.h> 14 #include <linux/mm.h> 15 #include <linux/interrupt.h> 16 #include <linux/spinlock.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/slab.h> 20 #include <linux/string_choices.h> 21 #include <linux/platform_device.h> 22 #include <linux/clk.h> 23 #include <linux/dmaengine.h> 24 #include <linux/module.h> 25 #include <linux/of.h> 26 #include <linux/of_dma.h> 27 28 #include <asm/irq.h> 29 #include <linux/dma/imx-dma.h> 30 31 #include "dmaengine.h" 32 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 33 #define IMX_DMA_CHANNELS 16 34 35 #define IMX_DMA_2D_SLOTS 2 36 #define IMX_DMA_2D_SLOT_A 0 37 #define IMX_DMA_2D_SLOT_B 1 38 39 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) 40 #define IMX_DMA_MEMSIZE_32 (0 << 4) 41 #define IMX_DMA_MEMSIZE_8 (1 << 4) 42 #define IMX_DMA_MEMSIZE_16 (2 << 4) 43 #define IMX_DMA_TYPE_LINEAR (0 << 10) 44 #define IMX_DMA_TYPE_2D (1 << 10) 45 #define IMX_DMA_TYPE_FIFO (2 << 10) 46 47 #define IMX_DMA_ERR_BURST (1 << 0) 48 #define IMX_DMA_ERR_REQUEST (1 << 1) 49 #define IMX_DMA_ERR_TRANSFER (1 << 2) 50 #define IMX_DMA_ERR_BUFFER (1 << 3) 51 #define IMX_DMA_ERR_TIMEOUT (1 << 4) 52 53 #define DMA_DCR 0x00 /* Control Register */ 54 #define DMA_DISR 0x04 /* Interrupt status Register */ 55 #define DMA_DIMR 0x08 /* Interrupt mask Register */ 56 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ 57 #define DMA_DRTOSR 0x10 /* Request timeout Register */ 58 #define DMA_DSESR 0x14 /* Transfer Error Status Register */ 59 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ 60 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ 61 #define DMA_WSRA 0x40 /* W-Size Register A */ 62 #define DMA_XSRA 0x44 /* X-Size Register A */ 63 #define DMA_YSRA 0x48 /* Y-Size Register A */ 64 #define DMA_WSRB 0x4c /* W-Size Register B */ 65 #define DMA_XSRB 0x50 /* X-Size Register B */ 66 #define DMA_YSRB 0x54 /* Y-Size Register B */ 67 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ 68 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ 69 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ 70 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ 71 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ 72 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ 73 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ 74 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ 75 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ 76 77 #define DCR_DRST (1<<1) 78 #define DCR_DEN (1<<0) 79 #define DBTOCR_EN (1<<15) 80 #define DBTOCR_CNT(x) ((x) & 0x7fff) 81 #define CNTR_CNT(x) ((x) & 0xffffff) 82 #define CCR_ACRPT (1<<14) 83 #define CCR_DMOD_LINEAR (0x0 << 12) 84 #define CCR_DMOD_2D (0x1 << 12) 85 #define CCR_DMOD_FIFO (0x2 << 12) 86 #define CCR_DMOD_EOBFIFO (0x3 << 12) 87 #define CCR_SMOD_LINEAR (0x0 << 10) 88 #define CCR_SMOD_2D (0x1 << 10) 89 #define CCR_SMOD_FIFO (0x2 << 10) 90 #define CCR_SMOD_EOBFIFO (0x3 << 10) 91 #define CCR_MDIR_DEC (1<<9) 92 #define CCR_MSEL_B (1<<8) 93 #define CCR_DSIZ_32 (0x0 << 6) 94 #define CCR_DSIZ_8 (0x1 << 6) 95 #define CCR_DSIZ_16 (0x2 << 6) 96 #define CCR_SSIZ_32 (0x0 << 4) 97 #define CCR_SSIZ_8 (0x1 << 4) 98 #define CCR_SSIZ_16 (0x2 << 4) 99 #define CCR_REN (1<<3) 100 #define CCR_RPT (1<<2) 101 #define CCR_FRC (1<<1) 102 #define CCR_CEN (1<<0) 103 #define RTOR_EN (1<<15) 104 #define RTOR_CLK (1<<14) 105 #define RTOR_PSC (1<<13) 106 107 enum imxdma_prep_type { 108 IMXDMA_DESC_MEMCPY, 109 IMXDMA_DESC_INTERLEAVED, 110 IMXDMA_DESC_SLAVE_SG, 111 IMXDMA_DESC_CYCLIC, 112 }; 113 114 struct imx_dma_2d_config { 115 u16 xsr; 116 u16 ysr; 117 u16 wsr; 118 int count; 119 }; 120 121 struct imxdma_desc { 122 struct list_head node; 123 struct dma_async_tx_descriptor desc; 124 enum dma_status status; 125 dma_addr_t src; 126 dma_addr_t dest; 127 size_t len; 128 enum dma_transfer_direction direction; 129 enum imxdma_prep_type type; 130 /* For memcpy and interleaved */ 131 unsigned int config_port; 132 unsigned int config_mem; 133 /* For interleaved transfers */ 134 unsigned int x; 135 unsigned int y; 136 unsigned int w; 137 /* For slave sg and cyclic */ 138 struct scatterlist *sg; 139 unsigned int sgcount; 140 }; 141 142 struct imxdma_channel { 143 int hw_chaining; 144 struct timer_list watchdog; 145 struct imxdma_engine *imxdma; 146 unsigned int channel; 147 148 struct tasklet_struct dma_tasklet; 149 struct list_head ld_free; 150 struct list_head ld_queue; 151 struct list_head ld_active; 152 int descs_allocated; 153 enum dma_slave_buswidth word_size; 154 dma_addr_t per_address; 155 u32 watermark_level; 156 struct dma_chan chan; 157 struct dma_async_tx_descriptor desc; 158 enum dma_status status; 159 int dma_request; 160 struct scatterlist *sg_list; 161 u32 ccr_from_device; 162 u32 ccr_to_device; 163 bool enabled_2d; 164 int slot_2d; 165 unsigned int irq; 166 struct dma_slave_config config; 167 }; 168 169 enum imx_dma_type { 170 IMX1_DMA, 171 IMX27_DMA, 172 }; 173 174 struct imxdma_engine { 175 struct device *dev; 176 struct dma_device dma_device; 177 void __iomem *base; 178 struct clk *dma_ahb; 179 struct clk *dma_ipg; 180 spinlock_t lock; 181 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; 182 struct imxdma_channel channel[IMX_DMA_CHANNELS]; 183 enum imx_dma_type devtype; 184 unsigned int irq; 185 unsigned int irq_err; 186 187 }; 188 189 struct imxdma_filter_data { 190 struct imxdma_engine *imxdma; 191 int request; 192 }; 193 194 static const struct of_device_id imx_dma_of_dev_id[] = { 195 { 196 .compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA, 197 }, { 198 .compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA, 199 }, { 200 /* sentinel */ 201 } 202 }; 203 MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id); 204 205 static inline int is_imx1_dma(struct imxdma_engine *imxdma) 206 { 207 return imxdma->devtype == IMX1_DMA; 208 } 209 210 static inline int is_imx27_dma(struct imxdma_engine *imxdma) 211 { 212 return imxdma->devtype == IMX27_DMA; 213 } 214 215 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) 216 { 217 return container_of(chan, struct imxdma_channel, chan); 218 } 219 220 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) 221 { 222 struct imxdma_desc *desc; 223 224 if (!list_empty(&imxdmac->ld_active)) { 225 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, 226 node); 227 if (desc->type == IMXDMA_DESC_CYCLIC) 228 return true; 229 } 230 return false; 231 } 232 233 234 235 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val, 236 unsigned offset) 237 { 238 __raw_writel(val, imxdma->base + offset); 239 } 240 241 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset) 242 { 243 return __raw_readl(imxdma->base + offset); 244 } 245 246 static int imxdma_hw_chain(struct imxdma_channel *imxdmac) 247 { 248 struct imxdma_engine *imxdma = imxdmac->imxdma; 249 250 if (is_imx27_dma(imxdma)) 251 return imxdmac->hw_chaining; 252 else 253 return 0; 254 } 255 256 /* 257 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation 258 */ 259 static inline void imxdma_sg_next(struct imxdma_desc *d) 260 { 261 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 262 struct imxdma_engine *imxdma = imxdmac->imxdma; 263 struct scatterlist *sg = d->sg; 264 size_t now; 265 266 now = min_t(size_t, d->len, sg_dma_len(sg)); 267 if (d->len != IMX_DMA_LENGTH_LOOP) 268 d->len -= now; 269 270 if (d->direction == DMA_DEV_TO_MEM) 271 imx_dmav1_writel(imxdma, sg->dma_address, 272 DMA_DAR(imxdmac->channel)); 273 else 274 imx_dmav1_writel(imxdma, sg->dma_address, 275 DMA_SAR(imxdmac->channel)); 276 277 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel)); 278 279 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, " 280 "size 0x%08x\n", __func__, imxdmac->channel, 281 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), 282 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), 283 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); 284 } 285 286 static void imxdma_enable_hw(struct imxdma_desc *d) 287 { 288 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 289 struct imxdma_engine *imxdma = imxdmac->imxdma; 290 int channel = imxdmac->channel; 291 unsigned long flags; 292 293 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); 294 295 local_irq_save(flags); 296 297 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); 298 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) & 299 ~(1 << channel), DMA_DIMR); 300 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) | 301 CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); 302 303 if (!is_imx1_dma(imxdma) && 304 d->sg && imxdma_hw_chain(imxdmac)) { 305 d->sg = sg_next(d->sg); 306 if (d->sg) { 307 u32 tmp; 308 imxdma_sg_next(d); 309 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel)); 310 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT, 311 DMA_CCR(channel)); 312 } 313 } 314 315 local_irq_restore(flags); 316 } 317 318 static void imxdma_disable_hw(struct imxdma_channel *imxdmac) 319 { 320 struct imxdma_engine *imxdma = imxdmac->imxdma; 321 int channel = imxdmac->channel; 322 unsigned long flags; 323 324 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); 325 326 if (imxdma_hw_chain(imxdmac)) 327 timer_delete(&imxdmac->watchdog); 328 329 local_irq_save(flags); 330 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) | 331 (1 << channel), DMA_DIMR); 332 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) & 333 ~CCR_CEN, DMA_CCR(channel)); 334 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); 335 local_irq_restore(flags); 336 } 337 338 static void imxdma_watchdog(struct timer_list *t) 339 { 340 struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog); 341 struct imxdma_engine *imxdma = imxdmac->imxdma; 342 int channel = imxdmac->channel; 343 344 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel)); 345 346 /* Tasklet watchdog error handler */ 347 tasklet_schedule(&imxdmac->dma_tasklet); 348 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n", 349 imxdmac->channel); 350 } 351 352 static irqreturn_t imxdma_err_handler(int irq, void *dev_id) 353 { 354 struct imxdma_engine *imxdma = dev_id; 355 unsigned int err_mask; 356 int i, disr; 357 int errcode; 358 359 disr = imx_dmav1_readl(imxdma, DMA_DISR); 360 361 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) | 362 imx_dmav1_readl(imxdma, DMA_DRTOSR) | 363 imx_dmav1_readl(imxdma, DMA_DSESR) | 364 imx_dmav1_readl(imxdma, DMA_DBOSR); 365 366 if (!err_mask) 367 return IRQ_HANDLED; 368 369 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR); 370 371 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 372 if (!(err_mask & (1 << i))) 373 continue; 374 errcode = 0; 375 376 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) { 377 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR); 378 errcode |= IMX_DMA_ERR_BURST; 379 } 380 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) { 381 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR); 382 errcode |= IMX_DMA_ERR_REQUEST; 383 } 384 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) { 385 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR); 386 errcode |= IMX_DMA_ERR_TRANSFER; 387 } 388 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) { 389 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR); 390 errcode |= IMX_DMA_ERR_BUFFER; 391 } 392 /* Tasklet error handler */ 393 tasklet_schedule(&imxdma->channel[i].dma_tasklet); 394 395 dev_warn(imxdma->dev, 396 "DMA timeout on channel %d -%s%s%s%s\n", i, 397 errcode & IMX_DMA_ERR_BURST ? " burst" : "", 398 errcode & IMX_DMA_ERR_REQUEST ? " request" : "", 399 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", 400 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); 401 } 402 return IRQ_HANDLED; 403 } 404 405 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) 406 { 407 struct imxdma_engine *imxdma = imxdmac->imxdma; 408 int chno = imxdmac->channel; 409 struct imxdma_desc *desc; 410 unsigned long flags; 411 412 spin_lock_irqsave(&imxdma->lock, flags); 413 if (list_empty(&imxdmac->ld_active)) { 414 spin_unlock_irqrestore(&imxdma->lock, flags); 415 goto out; 416 } 417 418 desc = list_first_entry(&imxdmac->ld_active, 419 struct imxdma_desc, 420 node); 421 spin_unlock_irqrestore(&imxdma->lock, flags); 422 423 if (desc->sg) { 424 u32 tmp; 425 desc->sg = sg_next(desc->sg); 426 427 if (desc->sg) { 428 imxdma_sg_next(desc); 429 430 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno)); 431 432 if (imxdma_hw_chain(imxdmac)) { 433 /* FIXME: The timeout should probably be 434 * configurable 435 */ 436 mod_timer(&imxdmac->watchdog, 437 jiffies + msecs_to_jiffies(500)); 438 439 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; 440 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); 441 } else { 442 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN, 443 DMA_CCR(chno)); 444 tmp |= CCR_CEN; 445 } 446 447 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); 448 449 if (imxdma_chan_is_doing_cyclic(imxdmac)) 450 /* Tasklet progression */ 451 tasklet_schedule(&imxdmac->dma_tasklet); 452 453 return; 454 } 455 456 if (imxdma_hw_chain(imxdmac)) { 457 timer_delete(&imxdmac->watchdog); 458 return; 459 } 460 } 461 462 out: 463 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno)); 464 /* Tasklet irq */ 465 tasklet_schedule(&imxdmac->dma_tasklet); 466 } 467 468 static irqreturn_t dma_irq_handler(int irq, void *dev_id) 469 { 470 struct imxdma_engine *imxdma = dev_id; 471 int i, disr; 472 473 if (!is_imx1_dma(imxdma)) 474 imxdma_err_handler(irq, dev_id); 475 476 disr = imx_dmav1_readl(imxdma, DMA_DISR); 477 478 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr); 479 480 imx_dmav1_writel(imxdma, disr, DMA_DISR); 481 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 482 if (disr & (1 << i)) 483 dma_irq_handle_channel(&imxdma->channel[i]); 484 } 485 486 return IRQ_HANDLED; 487 } 488 489 static int imxdma_xfer_desc(struct imxdma_desc *d) 490 { 491 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 492 struct imxdma_engine *imxdma = imxdmac->imxdma; 493 int slot = -1; 494 int i; 495 496 /* Configure and enable */ 497 switch (d->type) { 498 case IMXDMA_DESC_INTERLEAVED: 499 /* Try to get a free 2D slot */ 500 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { 501 if ((imxdma->slots_2d[i].count > 0) && 502 ((imxdma->slots_2d[i].xsr != d->x) || 503 (imxdma->slots_2d[i].ysr != d->y) || 504 (imxdma->slots_2d[i].wsr != d->w))) 505 continue; 506 slot = i; 507 break; 508 } 509 if (slot < 0) 510 return -EBUSY; 511 512 imxdma->slots_2d[slot].xsr = d->x; 513 imxdma->slots_2d[slot].ysr = d->y; 514 imxdma->slots_2d[slot].wsr = d->w; 515 imxdma->slots_2d[slot].count++; 516 517 imxdmac->slot_2d = slot; 518 imxdmac->enabled_2d = true; 519 520 if (slot == IMX_DMA_2D_SLOT_A) { 521 d->config_mem &= ~CCR_MSEL_B; 522 d->config_port &= ~CCR_MSEL_B; 523 imx_dmav1_writel(imxdma, d->x, DMA_XSRA); 524 imx_dmav1_writel(imxdma, d->y, DMA_YSRA); 525 imx_dmav1_writel(imxdma, d->w, DMA_WSRA); 526 } else { 527 d->config_mem |= CCR_MSEL_B; 528 d->config_port |= CCR_MSEL_B; 529 imx_dmav1_writel(imxdma, d->x, DMA_XSRB); 530 imx_dmav1_writel(imxdma, d->y, DMA_YSRB); 531 imx_dmav1_writel(imxdma, d->w, DMA_WSRB); 532 } 533 /* 534 * We fall-through here intentionally, since a 2D transfer is 535 * similar to MEMCPY just adding the 2D slot configuration. 536 */ 537 fallthrough; 538 case IMXDMA_DESC_MEMCPY: 539 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); 540 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); 541 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2), 542 DMA_CCR(imxdmac->channel)); 543 544 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); 545 546 dev_dbg(imxdma->dev, 547 "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n", 548 __func__, imxdmac->channel, 549 (unsigned long long)d->dest, 550 (unsigned long long)d->src, d->len); 551 552 break; 553 /* Cyclic transfer is the same as slave_sg with special sg configuration. */ 554 case IMXDMA_DESC_CYCLIC: 555 case IMXDMA_DESC_SLAVE_SG: 556 if (d->direction == DMA_DEV_TO_MEM) { 557 imx_dmav1_writel(imxdma, imxdmac->per_address, 558 DMA_SAR(imxdmac->channel)); 559 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, 560 DMA_CCR(imxdmac->channel)); 561 562 dev_dbg(imxdma->dev, 563 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n", 564 __func__, imxdmac->channel, 565 d->sg, d->sgcount, d->len, 566 (unsigned long long)imxdmac->per_address); 567 } else if (d->direction == DMA_MEM_TO_DEV) { 568 imx_dmav1_writel(imxdma, imxdmac->per_address, 569 DMA_DAR(imxdmac->channel)); 570 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, 571 DMA_CCR(imxdmac->channel)); 572 573 dev_dbg(imxdma->dev, 574 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n", 575 __func__, imxdmac->channel, 576 d->sg, d->sgcount, d->len, 577 (unsigned long long)imxdmac->per_address); 578 } else { 579 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", 580 __func__, imxdmac->channel); 581 return -EINVAL; 582 } 583 584 imxdma_sg_next(d); 585 586 break; 587 default: 588 return -EINVAL; 589 } 590 imxdma_enable_hw(d); 591 return 0; 592 } 593 594 static void imxdma_tasklet(struct tasklet_struct *t) 595 { 596 struct imxdma_channel *imxdmac = from_tasklet(imxdmac, t, dma_tasklet); 597 struct imxdma_engine *imxdma = imxdmac->imxdma; 598 struct imxdma_desc *desc, *next_desc; 599 unsigned long flags; 600 601 spin_lock_irqsave(&imxdma->lock, flags); 602 603 if (list_empty(&imxdmac->ld_active)) { 604 /* Someone might have called terminate all */ 605 spin_unlock_irqrestore(&imxdma->lock, flags); 606 return; 607 } 608 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); 609 610 /* If we are dealing with a cyclic descriptor, keep it on ld_active 611 * and dont mark the descriptor as complete. 612 * Only in non-cyclic cases it would be marked as complete 613 */ 614 if (imxdma_chan_is_doing_cyclic(imxdmac)) 615 goto out; 616 else 617 dma_cookie_complete(&desc->desc); 618 619 /* Free 2D slot if it was an interleaved transfer */ 620 if (imxdmac->enabled_2d) { 621 imxdma->slots_2d[imxdmac->slot_2d].count--; 622 imxdmac->enabled_2d = false; 623 } 624 625 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); 626 627 if (!list_empty(&imxdmac->ld_queue)) { 628 next_desc = list_first_entry(&imxdmac->ld_queue, 629 struct imxdma_desc, node); 630 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); 631 if (imxdma_xfer_desc(next_desc) < 0) 632 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", 633 __func__, imxdmac->channel); 634 } 635 out: 636 spin_unlock_irqrestore(&imxdma->lock, flags); 637 638 dmaengine_desc_get_callback_invoke(&desc->desc, NULL); 639 } 640 641 static int imxdma_terminate_all(struct dma_chan *chan) 642 { 643 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 644 struct imxdma_engine *imxdma = imxdmac->imxdma; 645 unsigned long flags; 646 647 imxdma_disable_hw(imxdmac); 648 649 spin_lock_irqsave(&imxdma->lock, flags); 650 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); 651 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); 652 spin_unlock_irqrestore(&imxdma->lock, flags); 653 return 0; 654 } 655 656 static int imxdma_config_write(struct dma_chan *chan, 657 struct dma_slave_config *dmaengine_cfg, 658 enum dma_transfer_direction direction) 659 { 660 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 661 struct imxdma_engine *imxdma = imxdmac->imxdma; 662 unsigned int mode = 0; 663 664 if (direction == DMA_DEV_TO_MEM) { 665 imxdmac->per_address = dmaengine_cfg->src_addr; 666 imxdmac->watermark_level = dmaengine_cfg->src_maxburst; 667 imxdmac->word_size = dmaengine_cfg->src_addr_width; 668 } else { 669 imxdmac->per_address = dmaengine_cfg->dst_addr; 670 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; 671 imxdmac->word_size = dmaengine_cfg->dst_addr_width; 672 } 673 674 switch (imxdmac->word_size) { 675 case DMA_SLAVE_BUSWIDTH_1_BYTE: 676 mode = IMX_DMA_MEMSIZE_8; 677 break; 678 case DMA_SLAVE_BUSWIDTH_2_BYTES: 679 mode = IMX_DMA_MEMSIZE_16; 680 break; 681 default: 682 case DMA_SLAVE_BUSWIDTH_4_BYTES: 683 mode = IMX_DMA_MEMSIZE_32; 684 break; 685 } 686 687 imxdmac->hw_chaining = 0; 688 689 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | 690 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | 691 CCR_REN; 692 imxdmac->ccr_to_device = 693 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | 694 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; 695 imx_dmav1_writel(imxdma, imxdmac->dma_request, 696 DMA_RSSR(imxdmac->channel)); 697 698 /* Set burst length */ 699 imx_dmav1_writel(imxdma, imxdmac->watermark_level * 700 imxdmac->word_size, DMA_BLR(imxdmac->channel)); 701 702 return 0; 703 } 704 705 static int imxdma_config(struct dma_chan *chan, 706 struct dma_slave_config *dmaengine_cfg) 707 { 708 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 709 710 memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg)); 711 712 return 0; 713 } 714 715 static enum dma_status imxdma_tx_status(struct dma_chan *chan, 716 dma_cookie_t cookie, 717 struct dma_tx_state *txstate) 718 { 719 return dma_cookie_status(chan, cookie, txstate); 720 } 721 722 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) 723 { 724 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); 725 struct imxdma_engine *imxdma = imxdmac->imxdma; 726 dma_cookie_t cookie; 727 unsigned long flags; 728 729 spin_lock_irqsave(&imxdma->lock, flags); 730 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue); 731 cookie = dma_cookie_assign(tx); 732 spin_unlock_irqrestore(&imxdma->lock, flags); 733 734 return cookie; 735 } 736 737 static int imxdma_alloc_chan_resources(struct dma_chan *chan) 738 { 739 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 740 struct imx_dma_data *data = chan->private; 741 742 if (data != NULL) 743 imxdmac->dma_request = data->dma_request; 744 745 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { 746 struct imxdma_desc *desc; 747 748 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 749 if (!desc) 750 break; 751 dma_async_tx_descriptor_init(&desc->desc, chan); 752 desc->desc.tx_submit = imxdma_tx_submit; 753 /* txd.flags will be overwritten in prep funcs */ 754 desc->desc.flags = DMA_CTRL_ACK; 755 desc->status = DMA_COMPLETE; 756 757 list_add_tail(&desc->node, &imxdmac->ld_free); 758 imxdmac->descs_allocated++; 759 } 760 761 if (!imxdmac->descs_allocated) 762 return -ENOMEM; 763 764 return imxdmac->descs_allocated; 765 } 766 767 static void imxdma_free_chan_resources(struct dma_chan *chan) 768 { 769 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 770 struct imxdma_engine *imxdma = imxdmac->imxdma; 771 struct imxdma_desc *desc, *_desc; 772 unsigned long flags; 773 774 spin_lock_irqsave(&imxdma->lock, flags); 775 776 imxdma_disable_hw(imxdmac); 777 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); 778 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); 779 780 spin_unlock_irqrestore(&imxdma->lock, flags); 781 782 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { 783 kfree(desc); 784 imxdmac->descs_allocated--; 785 } 786 INIT_LIST_HEAD(&imxdmac->ld_free); 787 788 kfree(imxdmac->sg_list); 789 imxdmac->sg_list = NULL; 790 } 791 792 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( 793 struct dma_chan *chan, struct scatterlist *sgl, 794 unsigned int sg_len, enum dma_transfer_direction direction, 795 unsigned long flags, void *context) 796 { 797 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 798 struct scatterlist *sg; 799 int i, dma_length = 0; 800 struct imxdma_desc *desc; 801 802 if (list_empty(&imxdmac->ld_free) || 803 imxdma_chan_is_doing_cyclic(imxdmac)) 804 return NULL; 805 806 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 807 808 for_each_sg(sgl, sg, sg_len, i) { 809 dma_length += sg_dma_len(sg); 810 } 811 812 imxdma_config_write(chan, &imxdmac->config, direction); 813 814 switch (imxdmac->word_size) { 815 case DMA_SLAVE_BUSWIDTH_4_BYTES: 816 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3) 817 return NULL; 818 break; 819 case DMA_SLAVE_BUSWIDTH_2_BYTES: 820 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1) 821 return NULL; 822 break; 823 case DMA_SLAVE_BUSWIDTH_1_BYTE: 824 break; 825 default: 826 return NULL; 827 } 828 829 desc->type = IMXDMA_DESC_SLAVE_SG; 830 desc->sg = sgl; 831 desc->sgcount = sg_len; 832 desc->len = dma_length; 833 desc->direction = direction; 834 if (direction == DMA_DEV_TO_MEM) { 835 desc->src = imxdmac->per_address; 836 } else { 837 desc->dest = imxdmac->per_address; 838 } 839 desc->desc.callback = NULL; 840 desc->desc.callback_param = NULL; 841 842 return &desc->desc; 843 } 844 845 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( 846 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 847 size_t period_len, enum dma_transfer_direction direction, 848 unsigned long flags) 849 { 850 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 851 struct imxdma_engine *imxdma = imxdmac->imxdma; 852 struct imxdma_desc *desc; 853 int i; 854 unsigned int periods = buf_len / period_len; 855 856 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n", 857 __func__, imxdmac->channel, buf_len, period_len); 858 859 if (list_empty(&imxdmac->ld_free) || 860 imxdma_chan_is_doing_cyclic(imxdmac)) 861 return NULL; 862 863 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 864 865 kfree(imxdmac->sg_list); 866 867 imxdmac->sg_list = kcalloc(periods + 1, 868 sizeof(struct scatterlist), GFP_ATOMIC); 869 if (!imxdmac->sg_list) 870 return NULL; 871 872 sg_init_table(imxdmac->sg_list, periods); 873 874 for (i = 0; i < periods; i++) { 875 sg_assign_page(&imxdmac->sg_list[i], NULL); 876 imxdmac->sg_list[i].offset = 0; 877 imxdmac->sg_list[i].dma_address = dma_addr; 878 sg_dma_len(&imxdmac->sg_list[i]) = period_len; 879 dma_addr += period_len; 880 } 881 882 /* close the loop */ 883 sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list); 884 885 desc->type = IMXDMA_DESC_CYCLIC; 886 desc->sg = imxdmac->sg_list; 887 desc->sgcount = periods; 888 desc->len = IMX_DMA_LENGTH_LOOP; 889 desc->direction = direction; 890 if (direction == DMA_DEV_TO_MEM) { 891 desc->src = imxdmac->per_address; 892 } else { 893 desc->dest = imxdmac->per_address; 894 } 895 desc->desc.callback = NULL; 896 desc->desc.callback_param = NULL; 897 898 imxdma_config_write(chan, &imxdmac->config, direction); 899 900 return &desc->desc; 901 } 902 903 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( 904 struct dma_chan *chan, dma_addr_t dest, 905 dma_addr_t src, size_t len, unsigned long flags) 906 { 907 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 908 struct imxdma_engine *imxdma = imxdmac->imxdma; 909 struct imxdma_desc *desc; 910 911 dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n", 912 __func__, imxdmac->channel, (unsigned long long)src, 913 (unsigned long long)dest, len); 914 915 if (list_empty(&imxdmac->ld_free) || 916 imxdma_chan_is_doing_cyclic(imxdmac)) 917 return NULL; 918 919 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 920 921 desc->type = IMXDMA_DESC_MEMCPY; 922 desc->src = src; 923 desc->dest = dest; 924 desc->len = len; 925 desc->direction = DMA_MEM_TO_MEM; 926 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; 927 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; 928 desc->desc.callback = NULL; 929 desc->desc.callback_param = NULL; 930 931 return &desc->desc; 932 } 933 934 static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( 935 struct dma_chan *chan, struct dma_interleaved_template *xt, 936 unsigned long flags) 937 { 938 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 939 struct imxdma_engine *imxdma = imxdmac->imxdma; 940 struct imxdma_desc *desc; 941 942 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n" 943 " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__, 944 imxdmac->channel, (unsigned long long)xt->src_start, 945 (unsigned long long) xt->dst_start, 946 str_true_false(xt->src_sgl), str_true_false(xt->dst_sgl), 947 xt->numf, xt->frame_size); 948 949 if (list_empty(&imxdmac->ld_free) || 950 imxdma_chan_is_doing_cyclic(imxdmac)) 951 return NULL; 952 953 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM) 954 return NULL; 955 956 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 957 958 desc->type = IMXDMA_DESC_INTERLEAVED; 959 desc->src = xt->src_start; 960 desc->dest = xt->dst_start; 961 desc->x = xt->sgl[0].size; 962 desc->y = xt->numf; 963 desc->w = xt->sgl[0].icg + desc->x; 964 desc->len = desc->x * desc->y; 965 desc->direction = DMA_MEM_TO_MEM; 966 desc->config_port = IMX_DMA_MEMSIZE_32; 967 desc->config_mem = IMX_DMA_MEMSIZE_32; 968 if (xt->src_sgl) 969 desc->config_mem |= IMX_DMA_TYPE_2D; 970 if (xt->dst_sgl) 971 desc->config_port |= IMX_DMA_TYPE_2D; 972 desc->desc.callback = NULL; 973 desc->desc.callback_param = NULL; 974 975 return &desc->desc; 976 } 977 978 static void imxdma_issue_pending(struct dma_chan *chan) 979 { 980 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 981 struct imxdma_engine *imxdma = imxdmac->imxdma; 982 struct imxdma_desc *desc; 983 unsigned long flags; 984 985 spin_lock_irqsave(&imxdma->lock, flags); 986 if (list_empty(&imxdmac->ld_active) && 987 !list_empty(&imxdmac->ld_queue)) { 988 desc = list_first_entry(&imxdmac->ld_queue, 989 struct imxdma_desc, node); 990 991 if (imxdma_xfer_desc(desc) < 0) { 992 dev_warn(imxdma->dev, 993 "%s: channel: %d couldn't issue DMA xfer\n", 994 __func__, imxdmac->channel); 995 } else { 996 list_move_tail(imxdmac->ld_queue.next, 997 &imxdmac->ld_active); 998 } 999 } 1000 spin_unlock_irqrestore(&imxdma->lock, flags); 1001 } 1002 1003 static bool imxdma_filter_fn(struct dma_chan *chan, void *param) 1004 { 1005 struct imxdma_filter_data *fdata = param; 1006 struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan); 1007 1008 if (chan->device->dev != fdata->imxdma->dev) 1009 return false; 1010 1011 imxdma_chan->dma_request = fdata->request; 1012 chan->private = NULL; 1013 1014 return true; 1015 } 1016 1017 static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, 1018 struct of_dma *ofdma) 1019 { 1020 int count = dma_spec->args_count; 1021 struct imxdma_engine *imxdma = ofdma->of_dma_data; 1022 struct imxdma_filter_data fdata = { 1023 .imxdma = imxdma, 1024 }; 1025 1026 if (count != 1) 1027 return NULL; 1028 1029 fdata.request = dma_spec->args[0]; 1030 1031 return dma_request_channel(imxdma->dma_device.cap_mask, 1032 imxdma_filter_fn, &fdata); 1033 } 1034 1035 static int __init imxdma_probe(struct platform_device *pdev) 1036 { 1037 struct imxdma_engine *imxdma; 1038 int ret, i; 1039 int irq, irq_err; 1040 1041 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL); 1042 if (!imxdma) 1043 return -ENOMEM; 1044 1045 imxdma->dev = &pdev->dev; 1046 imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev); 1047 1048 imxdma->base = devm_platform_ioremap_resource(pdev, 0); 1049 if (IS_ERR(imxdma->base)) 1050 return PTR_ERR(imxdma->base); 1051 1052 irq = platform_get_irq(pdev, 0); 1053 if (irq < 0) 1054 return irq; 1055 1056 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); 1057 if (IS_ERR(imxdma->dma_ipg)) 1058 return PTR_ERR(imxdma->dma_ipg); 1059 1060 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); 1061 if (IS_ERR(imxdma->dma_ahb)) 1062 return PTR_ERR(imxdma->dma_ahb); 1063 1064 ret = clk_prepare_enable(imxdma->dma_ipg); 1065 if (ret) 1066 return ret; 1067 ret = clk_prepare_enable(imxdma->dma_ahb); 1068 if (ret) 1069 goto disable_dma_ipg_clk; 1070 1071 /* reset DMA module */ 1072 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); 1073 1074 if (is_imx1_dma(imxdma)) { 1075 ret = devm_request_irq(&pdev->dev, irq, 1076 dma_irq_handler, 0, "DMA", imxdma); 1077 if (ret) { 1078 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); 1079 goto disable_dma_ahb_clk; 1080 } 1081 imxdma->irq = irq; 1082 1083 irq_err = platform_get_irq(pdev, 1); 1084 if (irq_err < 0) { 1085 ret = irq_err; 1086 goto disable_dma_ahb_clk; 1087 } 1088 1089 ret = devm_request_irq(&pdev->dev, irq_err, 1090 imxdma_err_handler, 0, "DMA", imxdma); 1091 if (ret) { 1092 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); 1093 goto disable_dma_ahb_clk; 1094 } 1095 imxdma->irq_err = irq_err; 1096 } 1097 1098 /* enable DMA module */ 1099 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR); 1100 1101 /* clear all interrupts */ 1102 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); 1103 1104 /* disable interrupts */ 1105 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); 1106 1107 INIT_LIST_HEAD(&imxdma->dma_device.channels); 1108 1109 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); 1110 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); 1111 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); 1112 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask); 1113 1114 /* Initialize 2D global parameters */ 1115 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) 1116 imxdma->slots_2d[i].count = 0; 1117 1118 spin_lock_init(&imxdma->lock); 1119 1120 /* Initialize channel parameters */ 1121 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 1122 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1123 1124 if (!is_imx1_dma(imxdma)) { 1125 ret = devm_request_irq(&pdev->dev, irq + i, 1126 dma_irq_handler, 0, "DMA", imxdma); 1127 if (ret) { 1128 dev_warn(imxdma->dev, "Can't register IRQ %d " 1129 "for DMA channel %d\n", 1130 irq + i, i); 1131 goto disable_dma_ahb_clk; 1132 } 1133 1134 imxdmac->irq = irq + i; 1135 timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0); 1136 } 1137 1138 imxdmac->imxdma = imxdma; 1139 1140 INIT_LIST_HEAD(&imxdmac->ld_queue); 1141 INIT_LIST_HEAD(&imxdmac->ld_free); 1142 INIT_LIST_HEAD(&imxdmac->ld_active); 1143 1144 tasklet_setup(&imxdmac->dma_tasklet, imxdma_tasklet); 1145 imxdmac->chan.device = &imxdma->dma_device; 1146 dma_cookie_init(&imxdmac->chan); 1147 imxdmac->channel = i; 1148 1149 /* Add the channel to the DMAC list */ 1150 list_add_tail(&imxdmac->chan.device_node, 1151 &imxdma->dma_device.channels); 1152 } 1153 1154 imxdma->dma_device.dev = &pdev->dev; 1155 1156 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; 1157 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; 1158 imxdma->dma_device.device_tx_status = imxdma_tx_status; 1159 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; 1160 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 1161 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; 1162 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; 1163 imxdma->dma_device.device_config = imxdma_config; 1164 imxdma->dma_device.device_terminate_all = imxdma_terminate_all; 1165 imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 1166 1167 platform_set_drvdata(pdev, imxdma); 1168 1169 imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES; 1170 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); 1171 1172 ret = dma_async_device_register(&imxdma->dma_device); 1173 if (ret) { 1174 dev_err(&pdev->dev, "unable to register\n"); 1175 goto disable_dma_ahb_clk; 1176 } 1177 1178 if (pdev->dev.of_node) { 1179 ret = of_dma_controller_register(pdev->dev.of_node, 1180 imxdma_xlate, imxdma); 1181 if (ret) { 1182 dev_err(&pdev->dev, "unable to register of_dma_controller\n"); 1183 goto err_of_dma_controller; 1184 } 1185 } 1186 1187 return 0; 1188 1189 err_of_dma_controller: 1190 dma_async_device_unregister(&imxdma->dma_device); 1191 disable_dma_ahb_clk: 1192 clk_disable_unprepare(imxdma->dma_ahb); 1193 disable_dma_ipg_clk: 1194 clk_disable_unprepare(imxdma->dma_ipg); 1195 return ret; 1196 } 1197 1198 static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma) 1199 { 1200 int i; 1201 1202 if (is_imx1_dma(imxdma)) { 1203 disable_irq(imxdma->irq); 1204 disable_irq(imxdma->irq_err); 1205 } 1206 1207 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 1208 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1209 1210 if (!is_imx1_dma(imxdma)) 1211 disable_irq(imxdmac->irq); 1212 1213 tasklet_kill(&imxdmac->dma_tasklet); 1214 } 1215 } 1216 1217 static void imxdma_remove(struct platform_device *pdev) 1218 { 1219 struct imxdma_engine *imxdma = platform_get_drvdata(pdev); 1220 1221 imxdma_free_irq(pdev, imxdma); 1222 1223 dma_async_device_unregister(&imxdma->dma_device); 1224 1225 if (pdev->dev.of_node) 1226 of_dma_controller_free(pdev->dev.of_node); 1227 1228 clk_disable_unprepare(imxdma->dma_ipg); 1229 clk_disable_unprepare(imxdma->dma_ahb); 1230 } 1231 1232 static struct platform_driver imxdma_driver = { 1233 .driver = { 1234 .name = "imx-dma", 1235 .of_match_table = imx_dma_of_dev_id, 1236 }, 1237 .remove = imxdma_remove, 1238 }; 1239 1240 static int __init imxdma_module_init(void) 1241 { 1242 return platform_driver_probe(&imxdma_driver, imxdma_probe); 1243 } 1244 subsys_initcall(imxdma_module_init); 1245 1246 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 1247 MODULE_DESCRIPTION("i.MX dma driver"); 1248 MODULE_LICENSE("GPL"); 1249