1 #include <linux/device.h> 2 #include <linux/dma-mapping.h> 3 #include <linux/dmaengine.h> 4 #include <linux/sizes.h> 5 #include <linux/platform_device.h> 6 #include <linux/of.h> 7 8 #include "cppi_dma.h" 9 #include "musb_core.h" 10 #include "musb_trace.h" 11 12 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4)) 13 14 #define EP_MODE_AUTOREQ_NONE 0 15 #define EP_MODE_AUTOREQ_ALL_NEOP 1 16 #define EP_MODE_AUTOREQ_ALWAYS 3 17 18 #define EP_MODE_DMA_TRANSPARENT 0 19 #define EP_MODE_DMA_RNDIS 1 20 #define EP_MODE_DMA_GEN_RNDIS 3 21 22 #define USB_CTRL_TX_MODE 0x70 23 #define USB_CTRL_RX_MODE 0x74 24 #define USB_CTRL_AUTOREQ 0xd0 25 #define USB_TDOWN 0xd8 26 27 #define MUSB_DMA_NUM_CHANNELS 15 28 29 struct cppi41_dma_controller { 30 struct dma_controller controller; 31 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; 32 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; 33 struct musb *musb; 34 struct hrtimer early_tx; 35 struct list_head early_tx_list; 36 u32 rx_mode; 37 u32 tx_mode; 38 u32 auto_req; 39 }; 40 41 static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel) 42 { 43 u16 csr; 44 u8 toggle; 45 46 if (cppi41_channel->is_tx) 47 return; 48 if (!is_host_active(cppi41_channel->controller->musb)) 49 return; 50 51 csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR); 52 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0; 53 54 cppi41_channel->usb_toggle = toggle; 55 } 56 57 static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel) 58 { 59 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 60 struct musb *musb = hw_ep->musb; 61 u16 csr; 62 u8 toggle; 63 64 if (cppi41_channel->is_tx) 65 return; 66 if (!is_host_active(musb)) 67 return; 68 69 musb_ep_select(musb->mregs, hw_ep->epnum); 70 csr = musb_readw(hw_ep->regs, MUSB_RXCSR); 71 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0; 72 73 /* 74 * AM335x Advisory 1.0.13: Due to internal synchronisation error the 75 * data toggle may reset from DATA1 to DATA0 during receiving data from 76 * more than one endpoint. 77 */ 78 if (!toggle && toggle == cppi41_channel->usb_toggle) { 79 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE; 80 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr); 81 musb_dbg(cppi41_channel->controller->musb, 82 "Restoring DATA1 toggle."); 83 } 84 85 cppi41_channel->usb_toggle = toggle; 86 } 87 88 static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep) 89 { 90 u8 epnum = hw_ep->epnum; 91 struct musb *musb = hw_ep->musb; 92 void __iomem *epio = musb->endpoints[epnum].regs; 93 u16 csr; 94 95 musb_ep_select(musb->mregs, hw_ep->epnum); 96 csr = musb_readw(epio, MUSB_TXCSR); 97 if (csr & MUSB_TXCSR_TXPKTRDY) 98 return false; 99 return true; 100 } 101 102 static void cppi41_dma_callback(void *private_data); 103 104 static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) 105 { 106 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 107 struct musb *musb = hw_ep->musb; 108 void __iomem *epio = hw_ep->regs; 109 u16 csr; 110 111 if (!cppi41_channel->prog_len || 112 (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) { 113 114 /* done, complete */ 115 cppi41_channel->channel.actual_len = 116 cppi41_channel->transferred; 117 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; 118 cppi41_channel->channel.rx_packet_done = true; 119 120 /* 121 * transmit ZLP using PIO mode for transfers which size is 122 * multiple of EP packet size. 123 */ 124 if (cppi41_channel->tx_zlp && (cppi41_channel->transferred % 125 cppi41_channel->packet_sz) == 0) { 126 musb_ep_select(musb->mregs, hw_ep->epnum); 127 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY; 128 musb_writew(epio, MUSB_TXCSR, csr); 129 } 130 131 trace_musb_cppi41_done(cppi41_channel); 132 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx); 133 } else { 134 /* next iteration, reload */ 135 struct dma_chan *dc = cppi41_channel->dc; 136 struct dma_async_tx_descriptor *dma_desc; 137 enum dma_transfer_direction direction; 138 u32 remain_bytes; 139 140 cppi41_channel->buf_addr += cppi41_channel->packet_sz; 141 142 remain_bytes = cppi41_channel->total_len; 143 remain_bytes -= cppi41_channel->transferred; 144 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz); 145 cppi41_channel->prog_len = remain_bytes; 146 147 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV 148 : DMA_DEV_TO_MEM; 149 dma_desc = dmaengine_prep_slave_single(dc, 150 cppi41_channel->buf_addr, 151 remain_bytes, 152 direction, 153 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 154 if (WARN_ON(!dma_desc)) 155 return; 156 157 dma_desc->callback = cppi41_dma_callback; 158 dma_desc->callback_param = &cppi41_channel->channel; 159 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); 160 trace_musb_cppi41_cont(cppi41_channel); 161 dma_async_issue_pending(dc); 162 163 if (!cppi41_channel->is_tx) { 164 musb_ep_select(musb->mregs, hw_ep->epnum); 165 csr = musb_readw(epio, MUSB_RXCSR); 166 csr |= MUSB_RXCSR_H_REQPKT; 167 musb_writew(epio, MUSB_RXCSR, csr); 168 } 169 } 170 } 171 172 static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) 173 { 174 struct cppi41_dma_controller *controller; 175 struct cppi41_dma_channel *cppi41_channel, *n; 176 struct musb *musb; 177 unsigned long flags; 178 enum hrtimer_restart ret = HRTIMER_NORESTART; 179 180 controller = container_of(timer, struct cppi41_dma_controller, 181 early_tx); 182 musb = controller->musb; 183 184 spin_lock_irqsave(&musb->lock, flags); 185 list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list, 186 tx_check) { 187 bool empty; 188 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 189 190 empty = musb_is_tx_fifo_empty(hw_ep); 191 if (empty) { 192 list_del_init(&cppi41_channel->tx_check); 193 cppi41_trans_done(cppi41_channel); 194 } 195 } 196 197 if (!list_empty(&controller->early_tx_list) && 198 !hrtimer_is_queued(&controller->early_tx)) { 199 ret = HRTIMER_RESTART; 200 hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC); 201 } 202 203 spin_unlock_irqrestore(&musb->lock, flags); 204 return ret; 205 } 206 207 static void cppi41_dma_callback(void *private_data) 208 { 209 struct dma_channel *channel = private_data; 210 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 211 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 212 struct cppi41_dma_controller *controller; 213 struct musb *musb = hw_ep->musb; 214 unsigned long flags; 215 struct dma_tx_state txstate; 216 u32 transferred; 217 int is_hs = 0; 218 bool empty; 219 220 spin_lock_irqsave(&musb->lock, flags); 221 222 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, 223 &txstate); 224 transferred = cppi41_channel->prog_len - txstate.residue; 225 cppi41_channel->transferred += transferred; 226 227 trace_musb_cppi41_gb(cppi41_channel); 228 update_rx_toggle(cppi41_channel); 229 230 if (cppi41_channel->transferred == cppi41_channel->total_len || 231 transferred < cppi41_channel->packet_sz) 232 cppi41_channel->prog_len = 0; 233 234 if (cppi41_channel->is_tx) 235 empty = musb_is_tx_fifo_empty(hw_ep); 236 237 if (!cppi41_channel->is_tx || empty) { 238 cppi41_trans_done(cppi41_channel); 239 goto out; 240 } 241 242 /* 243 * On AM335x it has been observed that the TX interrupt fires 244 * too early that means the TXFIFO is not yet empty but the DMA 245 * engine says that it is done with the transfer. We don't 246 * receive a FIFO empty interrupt so the only thing we can do is 247 * to poll for the bit. On HS it usually takes 2us, on FS around 248 * 110us - 150us depending on the transfer size. 249 * We spin on HS (no longer than than 25us and setup a timer on 250 * FS to check for the bit and complete the transfer. 251 */ 252 controller = cppi41_channel->controller; 253 254 if (is_host_active(musb)) { 255 if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED) 256 is_hs = 1; 257 } else { 258 if (musb->g.speed == USB_SPEED_HIGH) 259 is_hs = 1; 260 } 261 if (is_hs) { 262 unsigned wait = 25; 263 264 do { 265 empty = musb_is_tx_fifo_empty(hw_ep); 266 if (empty) { 267 cppi41_trans_done(cppi41_channel); 268 goto out; 269 } 270 wait--; 271 if (!wait) 272 break; 273 cpu_relax(); 274 } while (1); 275 } 276 list_add_tail(&cppi41_channel->tx_check, 277 &controller->early_tx_list); 278 if (!hrtimer_is_queued(&controller->early_tx)) { 279 unsigned long usecs = cppi41_channel->total_len / 10; 280 281 hrtimer_start_range_ns(&controller->early_tx, 282 usecs * NSEC_PER_USEC, 283 20 * NSEC_PER_USEC, 284 HRTIMER_MODE_REL); 285 } 286 287 out: 288 spin_unlock_irqrestore(&musb->lock, flags); 289 } 290 291 static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old) 292 { 293 unsigned shift; 294 295 shift = (ep - 1) * 2; 296 old &= ~(3 << shift); 297 old |= mode << shift; 298 return old; 299 } 300 301 static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel, 302 unsigned mode) 303 { 304 struct cppi41_dma_controller *controller = cppi41_channel->controller; 305 u32 port; 306 u32 new_mode; 307 u32 old_mode; 308 309 if (cppi41_channel->is_tx) 310 old_mode = controller->tx_mode; 311 else 312 old_mode = controller->rx_mode; 313 port = cppi41_channel->port_num; 314 new_mode = update_ep_mode(port, mode, old_mode); 315 316 if (new_mode == old_mode) 317 return; 318 if (cppi41_channel->is_tx) { 319 controller->tx_mode = new_mode; 320 musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE, 321 new_mode); 322 } else { 323 controller->rx_mode = new_mode; 324 musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE, 325 new_mode); 326 } 327 } 328 329 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel, 330 unsigned mode) 331 { 332 struct cppi41_dma_controller *controller = cppi41_channel->controller; 333 u32 port; 334 u32 new_mode; 335 u32 old_mode; 336 337 old_mode = controller->auto_req; 338 port = cppi41_channel->port_num; 339 new_mode = update_ep_mode(port, mode, old_mode); 340 341 if (new_mode == old_mode) 342 return; 343 controller->auto_req = new_mode; 344 musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode); 345 } 346 347 static bool cppi41_configure_channel(struct dma_channel *channel, 348 u16 packet_sz, u8 mode, 349 dma_addr_t dma_addr, u32 len) 350 { 351 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 352 struct dma_chan *dc = cppi41_channel->dc; 353 struct dma_async_tx_descriptor *dma_desc; 354 enum dma_transfer_direction direction; 355 struct musb *musb = cppi41_channel->controller->musb; 356 unsigned use_gen_rndis = 0; 357 358 cppi41_channel->buf_addr = dma_addr; 359 cppi41_channel->total_len = len; 360 cppi41_channel->transferred = 0; 361 cppi41_channel->packet_sz = packet_sz; 362 cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0; 363 364 /* 365 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more 366 * than max packet size at a time. 367 */ 368 if (cppi41_channel->is_tx) 369 use_gen_rndis = 1; 370 371 if (use_gen_rndis) { 372 /* RNDIS mode */ 373 if (len > packet_sz) { 374 musb_writel(musb->ctrl_base, 375 RNDIS_REG(cppi41_channel->port_num), len); 376 /* gen rndis */ 377 cppi41_set_dma_mode(cppi41_channel, 378 EP_MODE_DMA_GEN_RNDIS); 379 380 /* auto req */ 381 cppi41_set_autoreq_mode(cppi41_channel, 382 EP_MODE_AUTOREQ_ALL_NEOP); 383 } else { 384 musb_writel(musb->ctrl_base, 385 RNDIS_REG(cppi41_channel->port_num), 0); 386 cppi41_set_dma_mode(cppi41_channel, 387 EP_MODE_DMA_TRANSPARENT); 388 cppi41_set_autoreq_mode(cppi41_channel, 389 EP_MODE_AUTOREQ_NONE); 390 } 391 } else { 392 /* fallback mode */ 393 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT); 394 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); 395 len = min_t(u32, packet_sz, len); 396 } 397 cppi41_channel->prog_len = len; 398 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 399 dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction, 400 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 401 if (!dma_desc) 402 return false; 403 404 dma_desc->callback = cppi41_dma_callback; 405 dma_desc->callback_param = channel; 406 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); 407 cppi41_channel->channel.rx_packet_done = false; 408 409 trace_musb_cppi41_config(cppi41_channel); 410 411 save_rx_toggle(cppi41_channel); 412 dma_async_issue_pending(dc); 413 return true; 414 } 415 416 static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c, 417 struct musb_hw_ep *hw_ep, u8 is_tx) 418 { 419 struct cppi41_dma_controller *controller = container_of(c, 420 struct cppi41_dma_controller, controller); 421 struct cppi41_dma_channel *cppi41_channel = NULL; 422 u8 ch_num = hw_ep->epnum - 1; 423 424 if (ch_num >= MUSB_DMA_NUM_CHANNELS) 425 return NULL; 426 427 if (is_tx) 428 cppi41_channel = &controller->tx_channel[ch_num]; 429 else 430 cppi41_channel = &controller->rx_channel[ch_num]; 431 432 if (!cppi41_channel->dc) 433 return NULL; 434 435 if (cppi41_channel->is_allocated) 436 return NULL; 437 438 cppi41_channel->hw_ep = hw_ep; 439 cppi41_channel->is_allocated = 1; 440 441 trace_musb_cppi41_alloc(cppi41_channel); 442 return &cppi41_channel->channel; 443 } 444 445 static void cppi41_dma_channel_release(struct dma_channel *channel) 446 { 447 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 448 449 trace_musb_cppi41_free(cppi41_channel); 450 if (cppi41_channel->is_allocated) { 451 cppi41_channel->is_allocated = 0; 452 channel->status = MUSB_DMA_STATUS_FREE; 453 channel->actual_len = 0; 454 } 455 } 456 457 static int cppi41_dma_channel_program(struct dma_channel *channel, 458 u16 packet_sz, u8 mode, 459 dma_addr_t dma_addr, u32 len) 460 { 461 int ret; 462 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 463 int hb_mult = 0; 464 465 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 466 channel->status == MUSB_DMA_STATUS_BUSY); 467 468 if (is_host_active(cppi41_channel->controller->musb)) { 469 if (cppi41_channel->is_tx) 470 hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult; 471 else 472 hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult; 473 } 474 475 channel->status = MUSB_DMA_STATUS_BUSY; 476 channel->actual_len = 0; 477 478 if (hb_mult) 479 packet_sz = hb_mult * (packet_sz & 0x7FF); 480 481 ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len); 482 if (!ret) 483 channel->status = MUSB_DMA_STATUS_FREE; 484 485 return ret; 486 } 487 488 static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket, 489 void *buf, u32 length) 490 { 491 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 492 struct cppi41_dma_controller *controller = cppi41_channel->controller; 493 struct musb *musb = controller->musb; 494 495 if (is_host_active(musb)) { 496 WARN_ON(1); 497 return 1; 498 } 499 if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK) 500 return 0; 501 if (cppi41_channel->is_tx) 502 return 1; 503 /* AM335x Advisory 1.0.13. No workaround for device RX mode */ 504 return 0; 505 } 506 507 static int cppi41_dma_channel_abort(struct dma_channel *channel) 508 { 509 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 510 struct cppi41_dma_controller *controller = cppi41_channel->controller; 511 struct musb *musb = controller->musb; 512 void __iomem *epio = cppi41_channel->hw_ep->regs; 513 int tdbit; 514 int ret; 515 unsigned is_tx; 516 u16 csr; 517 518 is_tx = cppi41_channel->is_tx; 519 trace_musb_cppi41_abort(cppi41_channel); 520 521 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) 522 return 0; 523 524 list_del_init(&cppi41_channel->tx_check); 525 if (is_tx) { 526 csr = musb_readw(epio, MUSB_TXCSR); 527 csr &= ~MUSB_TXCSR_DMAENAB; 528 musb_writew(epio, MUSB_TXCSR, csr); 529 } else { 530 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); 531 532 /* delay to drain to cppi dma pipeline for isoch */ 533 udelay(250); 534 535 csr = musb_readw(epio, MUSB_RXCSR); 536 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); 537 musb_writew(epio, MUSB_RXCSR, csr); 538 539 /* wait to drain cppi dma pipe line */ 540 udelay(50); 541 542 csr = musb_readw(epio, MUSB_RXCSR); 543 if (csr & MUSB_RXCSR_RXPKTRDY) { 544 csr |= MUSB_RXCSR_FLUSHFIFO; 545 musb_writew(epio, MUSB_RXCSR, csr); 546 musb_writew(epio, MUSB_RXCSR, csr); 547 } 548 } 549 550 tdbit = 1 << cppi41_channel->port_num; 551 if (is_tx) 552 tdbit <<= 16; 553 554 do { 555 if (is_tx) 556 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); 557 ret = dmaengine_terminate_all(cppi41_channel->dc); 558 } while (ret == -EAGAIN); 559 560 if (is_tx) { 561 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); 562 563 csr = musb_readw(epio, MUSB_TXCSR); 564 if (csr & MUSB_TXCSR_TXPKTRDY) { 565 csr |= MUSB_TXCSR_FLUSHFIFO; 566 musb_writew(epio, MUSB_TXCSR, csr); 567 } 568 } 569 570 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; 571 return 0; 572 } 573 574 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl) 575 { 576 struct dma_chan *dc; 577 int i; 578 579 for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) { 580 dc = ctrl->tx_channel[i].dc; 581 if (dc) 582 dma_release_channel(dc); 583 dc = ctrl->rx_channel[i].dc; 584 if (dc) 585 dma_release_channel(dc); 586 } 587 } 588 589 static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller) 590 { 591 cppi41_release_all_dma_chans(controller); 592 } 593 594 static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) 595 { 596 struct musb *musb = controller->musb; 597 struct device *dev = musb->controller; 598 struct device_node *np = dev->parent->of_node; 599 struct cppi41_dma_channel *cppi41_channel; 600 int count; 601 int i; 602 int ret; 603 604 count = of_property_count_strings(np, "dma-names"); 605 if (count < 0) 606 return count; 607 608 for (i = 0; i < count; i++) { 609 struct dma_chan *dc; 610 struct dma_channel *musb_dma; 611 const char *str; 612 unsigned is_tx; 613 unsigned int port; 614 615 ret = of_property_read_string_index(np, "dma-names", i, &str); 616 if (ret) 617 goto err; 618 if (strstarts(str, "tx")) 619 is_tx = 1; 620 else if (strstarts(str, "rx")) 621 is_tx = 0; 622 else { 623 dev_err(dev, "Wrong dmatype %s\n", str); 624 goto err; 625 } 626 ret = kstrtouint(str + 2, 0, &port); 627 if (ret) 628 goto err; 629 630 ret = -EINVAL; 631 if (port > MUSB_DMA_NUM_CHANNELS || !port) 632 goto err; 633 if (is_tx) 634 cppi41_channel = &controller->tx_channel[port - 1]; 635 else 636 cppi41_channel = &controller->rx_channel[port - 1]; 637 638 cppi41_channel->controller = controller; 639 cppi41_channel->port_num = port; 640 cppi41_channel->is_tx = is_tx; 641 INIT_LIST_HEAD(&cppi41_channel->tx_check); 642 643 musb_dma = &cppi41_channel->channel; 644 musb_dma->private_data = cppi41_channel; 645 musb_dma->status = MUSB_DMA_STATUS_FREE; 646 musb_dma->max_len = SZ_4M; 647 648 dc = dma_request_slave_channel(dev->parent, str); 649 if (!dc) { 650 dev_err(dev, "Failed to request %s.\n", str); 651 ret = -EPROBE_DEFER; 652 goto err; 653 } 654 cppi41_channel->dc = dc; 655 } 656 return 0; 657 err: 658 cppi41_release_all_dma_chans(controller); 659 return ret; 660 } 661 662 void cppi41_dma_controller_destroy(struct dma_controller *c) 663 { 664 struct cppi41_dma_controller *controller = container_of(c, 665 struct cppi41_dma_controller, controller); 666 667 hrtimer_cancel(&controller->early_tx); 668 cppi41_dma_controller_stop(controller); 669 kfree(controller); 670 } 671 EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy); 672 673 struct dma_controller * 674 cppi41_dma_controller_create(struct musb *musb, void __iomem *base) 675 { 676 struct cppi41_dma_controller *controller; 677 int ret = 0; 678 679 if (!musb->controller->parent->of_node) { 680 dev_err(musb->controller, "Need DT for the DMA engine.\n"); 681 return NULL; 682 } 683 684 controller = kzalloc(sizeof(*controller), GFP_KERNEL); 685 if (!controller) 686 goto kzalloc_fail; 687 688 hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 689 controller->early_tx.function = cppi41_recheck_tx_req; 690 INIT_LIST_HEAD(&controller->early_tx_list); 691 controller->musb = musb; 692 693 controller->controller.channel_alloc = cppi41_dma_channel_allocate; 694 controller->controller.channel_release = cppi41_dma_channel_release; 695 controller->controller.channel_program = cppi41_dma_channel_program; 696 controller->controller.channel_abort = cppi41_dma_channel_abort; 697 controller->controller.is_compatible = cppi41_is_compatible; 698 699 ret = cppi41_dma_controller_start(controller); 700 if (ret) 701 goto plat_get_fail; 702 return &controller->controller; 703 704 plat_get_fail: 705 kfree(controller); 706 kzalloc_fail: 707 if (ret == -EPROBE_DEFER) 708 return ERR_PTR(ret); 709 return NULL; 710 } 711 EXPORT_SYMBOL_GPL(cppi41_dma_controller_create); 712