1 /* 2 * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. 3 * 4 * Refer to drivers/dma/imx-sdma.c 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/mm.h> 14 #include <linux/interrupt.h> 15 #include <linux/clk.h> 16 #include <linux/wait.h> 17 #include <linux/sched.h> 18 #include <linux/semaphore.h> 19 #include <linux/device.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/slab.h> 22 #include <linux/platform_device.h> 23 #include <linux/dmaengine.h> 24 #include <linux/delay.h> 25 26 #include <asm/irq.h> 27 #include <mach/mxs.h> 28 #include <mach/dma.h> 29 #include <mach/common.h> 30 31 /* 32 * NOTE: The term "PIO" throughout the mxs-dma implementation means 33 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, 34 * dma can program the controller registers of peripheral devices. 35 */ 36 37 #define MXS_DMA_APBH 0 38 #define MXS_DMA_APBX 1 39 #define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH) 40 41 #define APBH_VERSION_LATEST 3 42 #define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST) 43 44 #define HW_APBHX_CTRL0 0x000 45 #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) 46 #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) 47 #define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 48 #define BP_APBH_CTRL0_RESET_CHANNEL 16 49 #define HW_APBHX_CTRL1 0x010 50 #define HW_APBHX_CTRL2 0x020 51 #define HW_APBHX_CHANNEL_CTRL 0x030 52 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 53 #define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800) 54 #define HW_APBX_VERSION 0x800 55 #define BP_APBHX_VERSION_MAJOR 24 56 #define HW_APBHX_CHn_NXTCMDAR(n) \ 57 (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70) 58 #define HW_APBHX_CHn_SEMA(n) \ 59 (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70) 60 61 /* 62 * ccw bits definitions 63 * 64 * COMMAND: 0..1 (2) 65 * CHAIN: 2 (1) 66 * IRQ: 3 (1) 67 * NAND_LOCK: 4 (1) - not implemented 68 * NAND_WAIT4READY: 5 (1) - not implemented 69 * DEC_SEM: 6 (1) 70 * WAIT4END: 7 (1) 71 * HALT_ON_TERMINATE: 8 (1) 72 * TERMINATE_FLUSH: 9 (1) 73 * RESERVED: 10..11 (2) 74 * PIO_NUM: 12..15 (4) 75 */ 76 #define BP_CCW_COMMAND 0 77 #define BM_CCW_COMMAND (3 << 0) 78 #define CCW_CHAIN (1 << 2) 79 #define CCW_IRQ (1 << 3) 80 #define CCW_DEC_SEM (1 << 6) 81 #define CCW_WAIT4END (1 << 7) 82 #define CCW_HALT_ON_TERM (1 << 8) 83 #define CCW_TERM_FLUSH (1 << 9) 84 #define BP_CCW_PIO_NUM 12 85 #define BM_CCW_PIO_NUM (0xf << 12) 86 87 #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) 88 89 #define MXS_DMA_CMD_NO_XFER 0 90 #define MXS_DMA_CMD_WRITE 1 91 #define MXS_DMA_CMD_READ 2 92 #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ 93 94 struct mxs_dma_ccw { 95 u32 next; 96 u16 bits; 97 u16 xfer_bytes; 98 #define MAX_XFER_BYTES 0xff00 99 u32 bufaddr; 100 #define MXS_PIO_WORDS 16 101 u32 pio_words[MXS_PIO_WORDS]; 102 }; 103 104 #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) 105 106 struct mxs_dma_chan { 107 struct mxs_dma_engine *mxs_dma; 108 struct dma_chan chan; 109 struct dma_async_tx_descriptor desc; 110 struct tasklet_struct tasklet; 111 int chan_irq; 112 struct mxs_dma_ccw *ccw; 113 dma_addr_t ccw_phys; 114 dma_cookie_t last_completed; 115 enum dma_status status; 116 unsigned int flags; 117 #define MXS_DMA_SG_LOOP (1 << 0) 118 }; 119 120 #define MXS_DMA_CHANNELS 16 121 #define MXS_DMA_CHANNELS_MASK 0xffff 122 123 struct mxs_dma_engine { 124 int dev_id; 125 unsigned int version; 126 void __iomem *base; 127 struct clk *clk; 128 struct dma_device dma_device; 129 struct device_dma_parameters dma_parms; 130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 131 }; 132 133 static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable) 134 { 135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 136 int chan_id = mxs_chan->chan.chan_id; 137 int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR; 138 139 /* enable apbh channel clock */ 140 if (dma_is_apbh()) { 141 if (apbh_is_old()) 142 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), 143 mxs_dma->base + HW_APBHX_CTRL0 + set_clr); 144 else 145 writel(1 << chan_id, 146 mxs_dma->base + HW_APBHX_CTRL0 + set_clr); 147 } 148 } 149 150 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 151 { 152 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 153 int chan_id = mxs_chan->chan.chan_id; 154 155 if (dma_is_apbh() && apbh_is_old()) 156 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), 157 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 158 else 159 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), 160 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); 161 } 162 163 static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) 164 { 165 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 166 int chan_id = mxs_chan->chan.chan_id; 167 168 /* clkgate needs to be enabled before writing other registers */ 169 mxs_dma_clkgate(mxs_chan, 1); 170 171 /* set cmd_addr up */ 172 writel(mxs_chan->ccw_phys, 173 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); 174 175 /* write 1 to SEMA to kick off the channel */ 176 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); 177 } 178 179 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 180 { 181 /* disable apbh channel clock */ 182 mxs_dma_clkgate(mxs_chan, 0); 183 184 mxs_chan->status = DMA_SUCCESS; 185 } 186 187 static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) 188 { 189 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 190 int chan_id = mxs_chan->chan.chan_id; 191 192 /* freeze the channel */ 193 if (dma_is_apbh() && apbh_is_old()) 194 writel(1 << chan_id, 195 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 196 else 197 writel(1 << chan_id, 198 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); 199 200 mxs_chan->status = DMA_PAUSED; 201 } 202 203 static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) 204 { 205 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 206 int chan_id = mxs_chan->chan.chan_id; 207 208 /* unfreeze the channel */ 209 if (dma_is_apbh() && apbh_is_old()) 210 writel(1 << chan_id, 211 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); 212 else 213 writel(1 << chan_id, 214 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR); 215 216 mxs_chan->status = DMA_IN_PROGRESS; 217 } 218 219 static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan) 220 { 221 dma_cookie_t cookie = mxs_chan->chan.cookie; 222 223 if (++cookie < 0) 224 cookie = 1; 225 226 mxs_chan->chan.cookie = cookie; 227 mxs_chan->desc.cookie = cookie; 228 229 return cookie; 230 } 231 232 static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) 233 { 234 return container_of(chan, struct mxs_dma_chan, chan); 235 } 236 237 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 238 { 239 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan); 240 241 mxs_dma_enable_chan(mxs_chan); 242 243 return mxs_dma_assign_cookie(mxs_chan); 244 } 245 246 static void mxs_dma_tasklet(unsigned long data) 247 { 248 struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; 249 250 if (mxs_chan->desc.callback) 251 mxs_chan->desc.callback(mxs_chan->desc.callback_param); 252 } 253 254 static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) 255 { 256 struct mxs_dma_engine *mxs_dma = dev_id; 257 u32 stat1, stat2; 258 259 /* completion status */ 260 stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); 261 stat1 &= MXS_DMA_CHANNELS_MASK; 262 writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR); 263 264 /* error status */ 265 stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); 266 writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR); 267 268 /* 269 * When both completion and error of termination bits set at the 270 * same time, we do not take it as an error. IOW, it only becomes 271 * an error we need to handler here in case of ether it's (1) an bus 272 * error or (2) a termination error with no completion. 273 */ 274 stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ 275 (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ 276 277 /* combine error and completion status for checking */ 278 stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; 279 while (stat1) { 280 int channel = fls(stat1) - 1; 281 struct mxs_dma_chan *mxs_chan = 282 &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; 283 284 if (channel >= MXS_DMA_CHANNELS) { 285 dev_dbg(mxs_dma->dma_device.dev, 286 "%s: error in channel %d\n", __func__, 287 channel - MXS_DMA_CHANNELS); 288 mxs_chan->status = DMA_ERROR; 289 mxs_dma_reset_chan(mxs_chan); 290 } else { 291 if (mxs_chan->flags & MXS_DMA_SG_LOOP) 292 mxs_chan->status = DMA_IN_PROGRESS; 293 else 294 mxs_chan->status = DMA_SUCCESS; 295 } 296 297 stat1 &= ~(1 << channel); 298 299 if (mxs_chan->status == DMA_SUCCESS) 300 mxs_chan->last_completed = mxs_chan->desc.cookie; 301 302 /* schedule tasklet on this channel */ 303 tasklet_schedule(&mxs_chan->tasklet); 304 } 305 306 return IRQ_HANDLED; 307 } 308 309 static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) 310 { 311 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 312 struct mxs_dma_data *data = chan->private; 313 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 314 int ret; 315 316 if (!data) 317 return -EINVAL; 318 319 mxs_chan->chan_irq = data->chan_irq; 320 321 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 322 &mxs_chan->ccw_phys, GFP_KERNEL); 323 if (!mxs_chan->ccw) { 324 ret = -ENOMEM; 325 goto err_alloc; 326 } 327 328 memset(mxs_chan->ccw, 0, PAGE_SIZE); 329 330 if (mxs_chan->chan_irq != NO_IRQ) { 331 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, 332 0, "mxs-dma", mxs_dma); 333 if (ret) 334 goto err_irq; 335 } 336 337 ret = clk_enable(mxs_dma->clk); 338 if (ret) 339 goto err_clk; 340 341 /* clkgate needs to be enabled for reset to finish */ 342 mxs_dma_clkgate(mxs_chan, 1); 343 mxs_dma_reset_chan(mxs_chan); 344 mxs_dma_clkgate(mxs_chan, 0); 345 346 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 347 mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 348 349 /* the descriptor is ready */ 350 async_tx_ack(&mxs_chan->desc); 351 352 return 0; 353 354 err_clk: 355 free_irq(mxs_chan->chan_irq, mxs_dma); 356 err_irq: 357 dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 358 mxs_chan->ccw, mxs_chan->ccw_phys); 359 err_alloc: 360 return ret; 361 } 362 363 static void mxs_dma_free_chan_resources(struct dma_chan *chan) 364 { 365 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 366 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 367 368 mxs_dma_disable_chan(mxs_chan); 369 370 free_irq(mxs_chan->chan_irq, mxs_dma); 371 372 dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 373 mxs_chan->ccw, mxs_chan->ccw_phys); 374 375 clk_disable(mxs_dma->clk); 376 } 377 378 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( 379 struct dma_chan *chan, struct scatterlist *sgl, 380 unsigned int sg_len, enum dma_data_direction direction, 381 unsigned long append) 382 { 383 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 384 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 385 struct mxs_dma_ccw *ccw; 386 struct scatterlist *sg; 387 int i, j; 388 u32 *pio; 389 static int idx; 390 391 if (mxs_chan->status == DMA_IN_PROGRESS && !append) 392 return NULL; 393 394 if (sg_len + (append ? idx : 0) > NUM_CCW) { 395 dev_err(mxs_dma->dma_device.dev, 396 "maximum number of sg exceeded: %d > %d\n", 397 sg_len, NUM_CCW); 398 goto err_out; 399 } 400 401 mxs_chan->status = DMA_IN_PROGRESS; 402 mxs_chan->flags = 0; 403 404 /* 405 * If the sg is prepared with append flag set, the sg 406 * will be appended to the last prepared sg. 407 */ 408 if (append) { 409 BUG_ON(idx < 1); 410 ccw = &mxs_chan->ccw[idx - 1]; 411 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 412 ccw->bits |= CCW_CHAIN; 413 ccw->bits &= ~CCW_IRQ; 414 ccw->bits &= ~CCW_DEC_SEM; 415 ccw->bits &= ~CCW_WAIT4END; 416 } else { 417 idx = 0; 418 } 419 420 if (direction == DMA_NONE) { 421 ccw = &mxs_chan->ccw[idx++]; 422 pio = (u32 *) sgl; 423 424 for (j = 0; j < sg_len;) 425 ccw->pio_words[j++] = *pio++; 426 427 ccw->bits = 0; 428 ccw->bits |= CCW_IRQ; 429 ccw->bits |= CCW_DEC_SEM; 430 ccw->bits |= CCW_WAIT4END; 431 ccw->bits |= CCW_HALT_ON_TERM; 432 ccw->bits |= CCW_TERM_FLUSH; 433 ccw->bits |= BF_CCW(sg_len, PIO_NUM); 434 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); 435 } else { 436 for_each_sg(sgl, sg, sg_len, i) { 437 if (sg->length > MAX_XFER_BYTES) { 438 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", 439 sg->length, MAX_XFER_BYTES); 440 goto err_out; 441 } 442 443 ccw = &mxs_chan->ccw[idx++]; 444 445 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 446 ccw->bufaddr = sg->dma_address; 447 ccw->xfer_bytes = sg->length; 448 449 ccw->bits = 0; 450 ccw->bits |= CCW_CHAIN; 451 ccw->bits |= CCW_HALT_ON_TERM; 452 ccw->bits |= CCW_TERM_FLUSH; 453 ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? 454 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, 455 COMMAND); 456 457 if (i + 1 == sg_len) { 458 ccw->bits &= ~CCW_CHAIN; 459 ccw->bits |= CCW_IRQ; 460 ccw->bits |= CCW_DEC_SEM; 461 ccw->bits |= CCW_WAIT4END; 462 } 463 } 464 } 465 466 return &mxs_chan->desc; 467 468 err_out: 469 mxs_chan->status = DMA_ERROR; 470 return NULL; 471 } 472 473 static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( 474 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 475 size_t period_len, enum dma_data_direction direction) 476 { 477 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 478 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 479 int num_periods = buf_len / period_len; 480 int i = 0, buf = 0; 481 482 if (mxs_chan->status == DMA_IN_PROGRESS) 483 return NULL; 484 485 mxs_chan->status = DMA_IN_PROGRESS; 486 mxs_chan->flags |= MXS_DMA_SG_LOOP; 487 488 if (num_periods > NUM_CCW) { 489 dev_err(mxs_dma->dma_device.dev, 490 "maximum number of sg exceeded: %d > %d\n", 491 num_periods, NUM_CCW); 492 goto err_out; 493 } 494 495 if (period_len > MAX_XFER_BYTES) { 496 dev_err(mxs_dma->dma_device.dev, 497 "maximum period size exceeded: %d > %d\n", 498 period_len, MAX_XFER_BYTES); 499 goto err_out; 500 } 501 502 while (buf < buf_len) { 503 struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; 504 505 if (i + 1 == num_periods) 506 ccw->next = mxs_chan->ccw_phys; 507 else 508 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); 509 510 ccw->bufaddr = dma_addr; 511 ccw->xfer_bytes = period_len; 512 513 ccw->bits = 0; 514 ccw->bits |= CCW_CHAIN; 515 ccw->bits |= CCW_IRQ; 516 ccw->bits |= CCW_HALT_ON_TERM; 517 ccw->bits |= CCW_TERM_FLUSH; 518 ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? 519 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); 520 521 dma_addr += period_len; 522 buf += period_len; 523 524 i++; 525 } 526 527 return &mxs_chan->desc; 528 529 err_out: 530 mxs_chan->status = DMA_ERROR; 531 return NULL; 532 } 533 534 static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 535 unsigned long arg) 536 { 537 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 538 int ret = 0; 539 540 switch (cmd) { 541 case DMA_TERMINATE_ALL: 542 mxs_dma_disable_chan(mxs_chan); 543 mxs_dma_reset_chan(mxs_chan); 544 break; 545 case DMA_PAUSE: 546 mxs_dma_pause_chan(mxs_chan); 547 break; 548 case DMA_RESUME: 549 mxs_dma_resume_chan(mxs_chan); 550 break; 551 default: 552 ret = -ENOSYS; 553 } 554 555 return ret; 556 } 557 558 static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, 559 dma_cookie_t cookie, struct dma_tx_state *txstate) 560 { 561 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 562 dma_cookie_t last_used; 563 564 last_used = chan->cookie; 565 dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); 566 567 return mxs_chan->status; 568 } 569 570 static void mxs_dma_issue_pending(struct dma_chan *chan) 571 { 572 /* 573 * Nothing to do. We only have a single descriptor. 574 */ 575 } 576 577 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) 578 { 579 int ret; 580 581 ret = clk_enable(mxs_dma->clk); 582 if (ret) 583 goto err_out; 584 585 ret = mxs_reset_block(mxs_dma->base); 586 if (ret) 587 goto err_out; 588 589 /* only major version matters */ 590 mxs_dma->version = readl(mxs_dma->base + 591 ((mxs_dma->dev_id == MXS_DMA_APBX) ? 592 HW_APBX_VERSION : HW_APBH_VERSION)) >> 593 BP_APBHX_VERSION_MAJOR; 594 595 /* enable apbh burst */ 596 if (dma_is_apbh()) { 597 writel(BM_APBH_CTRL0_APB_BURST_EN, 598 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 599 writel(BM_APBH_CTRL0_APB_BURST8_EN, 600 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 601 } 602 603 /* enable irq for all the channels */ 604 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, 605 mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); 606 607 clk_disable(mxs_dma->clk); 608 609 return 0; 610 611 err_out: 612 return ret; 613 } 614 615 static int __init mxs_dma_probe(struct platform_device *pdev) 616 { 617 const struct platform_device_id *id_entry = 618 platform_get_device_id(pdev); 619 struct mxs_dma_engine *mxs_dma; 620 struct resource *iores; 621 int ret, i; 622 623 mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL); 624 if (!mxs_dma) 625 return -ENOMEM; 626 627 mxs_dma->dev_id = id_entry->driver_data; 628 629 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 630 631 if (!request_mem_region(iores->start, resource_size(iores), 632 pdev->name)) { 633 ret = -EBUSY; 634 goto err_request_region; 635 } 636 637 mxs_dma->base = ioremap(iores->start, resource_size(iores)); 638 if (!mxs_dma->base) { 639 ret = -ENOMEM; 640 goto err_ioremap; 641 } 642 643 mxs_dma->clk = clk_get(&pdev->dev, NULL); 644 if (IS_ERR(mxs_dma->clk)) { 645 ret = PTR_ERR(mxs_dma->clk); 646 goto err_clk; 647 } 648 649 dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); 650 dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); 651 652 INIT_LIST_HEAD(&mxs_dma->dma_device.channels); 653 654 /* Initialize channel parameters */ 655 for (i = 0; i < MXS_DMA_CHANNELS; i++) { 656 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; 657 658 mxs_chan->mxs_dma = mxs_dma; 659 mxs_chan->chan.device = &mxs_dma->dma_device; 660 661 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, 662 (unsigned long) mxs_chan); 663 664 665 /* Add the channel to mxs_chan list */ 666 list_add_tail(&mxs_chan->chan.device_node, 667 &mxs_dma->dma_device.channels); 668 } 669 670 ret = mxs_dma_init(mxs_dma); 671 if (ret) 672 goto err_init; 673 674 mxs_dma->dma_device.dev = &pdev->dev; 675 676 /* mxs_dma gets 65535 bytes maximum sg size */ 677 mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; 678 dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); 679 680 mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; 681 mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; 682 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; 683 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; 684 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; 685 mxs_dma->dma_device.device_control = mxs_dma_control; 686 mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; 687 688 ret = dma_async_device_register(&mxs_dma->dma_device); 689 if (ret) { 690 dev_err(mxs_dma->dma_device.dev, "unable to register\n"); 691 goto err_init; 692 } 693 694 dev_info(mxs_dma->dma_device.dev, "initialized\n"); 695 696 return 0; 697 698 err_init: 699 clk_put(mxs_dma->clk); 700 err_clk: 701 iounmap(mxs_dma->base); 702 err_ioremap: 703 release_mem_region(iores->start, resource_size(iores)); 704 err_request_region: 705 kfree(mxs_dma); 706 return ret; 707 } 708 709 static struct platform_device_id mxs_dma_type[] = { 710 { 711 .name = "mxs-dma-apbh", 712 .driver_data = MXS_DMA_APBH, 713 }, { 714 .name = "mxs-dma-apbx", 715 .driver_data = MXS_DMA_APBX, 716 }, { 717 /* end of list */ 718 } 719 }; 720 721 static struct platform_driver mxs_dma_driver = { 722 .driver = { 723 .name = "mxs-dma", 724 }, 725 .id_table = mxs_dma_type, 726 }; 727 728 static int __init mxs_dma_module_init(void) 729 { 730 return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); 731 } 732 subsys_initcall(mxs_dma_module_init); 733