1 /* 2 * hcd.c - DesignWare HS OTG Controller host-mode routines 3 * 4 * Copyright (C) 2004-2013 Synopsys, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The names of the above-listed copyright holders may not be used 16 * to endorse or promote products derived from this software without 17 * specific prior written permission. 18 * 19 * ALTERNATIVELY, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") as published by the Free Software 21 * Foundation; either version 2 of the License, or (at your option) any 22 * later version. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * This file contains the core HCD code, and implements the Linux hc_driver 39 * API 40 */ 41 #include <linux/kernel.h> 42 #include <linux/module.h> 43 #include <linux/spinlock.h> 44 #include <linux/interrupt.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/delay.h> 47 #include <linux/io.h> 48 #include <linux/slab.h> 49 #include <linux/usb.h> 50 51 #include <linux/usb/hcd.h> 52 #include <linux/usb/ch11.h> 53 54 #include "core.h" 55 #include "hcd.h" 56 57 /** 58 * dwc2_dump_channel_info() - Prints the state of a host channel 59 * 60 * @hsotg: Programming view of DWC_otg controller 61 * @chan: Pointer to the channel to dump 62 * 63 * Must be called with interrupt disabled and spinlock held 64 * 65 * NOTE: This function will be removed once the peripheral controller code 66 * is integrated and the driver is stable 67 */ 68 static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, 69 struct dwc2_host_chan *chan) 70 { 71 #ifdef VERBOSE_DEBUG 72 int num_channels = hsotg->core_params->host_channels; 73 struct dwc2_qh *qh; 74 u32 hcchar; 75 u32 hcsplt; 76 u32 hctsiz; 77 u32 hc_dma; 78 int i; 79 80 if (chan == NULL) 81 return; 82 83 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 84 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num)); 85 hctsiz = readl(hsotg->regs + HCTSIZ(chan->hc_num)); 86 hc_dma = readl(hsotg->regs + HCDMA(chan->hc_num)); 87 88 dev_dbg(hsotg->dev, " Assigned to channel %p:\n", chan); 89 dev_dbg(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", 90 hcchar, hcsplt); 91 dev_dbg(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", 92 hctsiz, hc_dma); 93 dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n", 94 chan->dev_addr, chan->ep_num, chan->ep_is_in); 95 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type); 96 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet); 97 dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start); 98 dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started); 99 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status); 100 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf); 101 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n", 102 (unsigned long)chan->xfer_dma); 103 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len); 104 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh); 105 dev_dbg(hsotg->dev, " NP inactive sched:\n"); 106 list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive, 107 qh_list_entry) 108 dev_dbg(hsotg->dev, " %p\n", qh); 109 dev_dbg(hsotg->dev, " NP active sched:\n"); 110 list_for_each_entry(qh, &hsotg->non_periodic_sched_active, 111 qh_list_entry) 112 dev_dbg(hsotg->dev, " %p\n", qh); 113 dev_dbg(hsotg->dev, " Channels:\n"); 114 for (i = 0; i < num_channels; i++) { 115 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i]; 116 117 dev_dbg(hsotg->dev, " %2d: %p\n", i, chan); 118 } 119 #endif /* VERBOSE_DEBUG */ 120 } 121 122 /* 123 * Processes all the URBs in a single list of QHs. Completes them with 124 * -ETIMEDOUT and frees the QTD. 125 * 126 * Must be called with interrupt disabled and spinlock held 127 */ 128 static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg, 129 struct list_head *qh_list) 130 { 131 struct dwc2_qh *qh, *qh_tmp; 132 struct dwc2_qtd *qtd, *qtd_tmp; 133 134 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) { 135 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, 136 qtd_list_entry) { 137 dwc2_host_complete(hsotg, qtd, -ETIMEDOUT); 138 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 139 } 140 } 141 } 142 143 static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg, 144 struct list_head *qh_list) 145 { 146 struct dwc2_qtd *qtd, *qtd_tmp; 147 struct dwc2_qh *qh, *qh_tmp; 148 unsigned long flags; 149 150 if (!qh_list->next) 151 /* The list hasn't been initialized yet */ 152 return; 153 154 spin_lock_irqsave(&hsotg->lock, flags); 155 156 /* Ensure there are no QTDs or URBs left */ 157 dwc2_kill_urbs_in_qh_list(hsotg, qh_list); 158 159 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) { 160 dwc2_hcd_qh_unlink(hsotg, qh); 161 162 /* Free each QTD in the QH's QTD list */ 163 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, 164 qtd_list_entry) 165 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 166 167 spin_unlock_irqrestore(&hsotg->lock, flags); 168 dwc2_hcd_qh_free(hsotg, qh); 169 spin_lock_irqsave(&hsotg->lock, flags); 170 } 171 172 spin_unlock_irqrestore(&hsotg->lock, flags); 173 } 174 175 /* 176 * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic 177 * and periodic schedules. The QTD associated with each URB is removed from 178 * the schedule and freed. This function may be called when a disconnect is 179 * detected or when the HCD is being stopped. 180 * 181 * Must be called with interrupt disabled and spinlock held 182 */ 183 static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg) 184 { 185 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive); 186 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active); 187 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive); 188 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready); 189 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned); 190 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued); 191 } 192 193 /** 194 * dwc2_hcd_start() - Starts the HCD when switching to Host mode 195 * 196 * @hsotg: Pointer to struct dwc2_hsotg 197 */ 198 void dwc2_hcd_start(struct dwc2_hsotg *hsotg) 199 { 200 u32 hprt0; 201 202 if (hsotg->op_state == OTG_STATE_B_HOST) { 203 /* 204 * Reset the port. During a HNP mode switch the reset 205 * needs to occur within 1ms and have a duration of at 206 * least 50ms. 207 */ 208 hprt0 = dwc2_read_hprt0(hsotg); 209 hprt0 |= HPRT0_RST; 210 writel(hprt0, hsotg->regs + HPRT0); 211 } 212 213 queue_delayed_work(hsotg->wq_otg, &hsotg->start_work, 214 msecs_to_jiffies(50)); 215 } 216 217 /* Must be called with interrupt disabled and spinlock held */ 218 static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) 219 { 220 int num_channels = hsotg->core_params->host_channels; 221 struct dwc2_host_chan *channel; 222 u32 hcchar; 223 int i; 224 225 if (hsotg->core_params->dma_enable <= 0) { 226 /* Flush out any channel requests in slave mode */ 227 for (i = 0; i < num_channels; i++) { 228 channel = hsotg->hc_ptr_array[i]; 229 if (!list_empty(&channel->hc_list_entry)) 230 continue; 231 hcchar = readl(hsotg->regs + HCCHAR(i)); 232 if (hcchar & HCCHAR_CHENA) { 233 hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR); 234 hcchar |= HCCHAR_CHDIS; 235 writel(hcchar, hsotg->regs + HCCHAR(i)); 236 } 237 } 238 } 239 240 for (i = 0; i < num_channels; i++) { 241 channel = hsotg->hc_ptr_array[i]; 242 if (!list_empty(&channel->hc_list_entry)) 243 continue; 244 hcchar = readl(hsotg->regs + HCCHAR(i)); 245 if (hcchar & HCCHAR_CHENA) { 246 /* Halt the channel */ 247 hcchar |= HCCHAR_CHDIS; 248 writel(hcchar, hsotg->regs + HCCHAR(i)); 249 } 250 251 dwc2_hc_cleanup(hsotg, channel); 252 list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list); 253 /* 254 * Added for Descriptor DMA to prevent channel double cleanup in 255 * release_channel_ddma(), which is called from ep_disable when 256 * device disconnects 257 */ 258 channel->qh = NULL; 259 } 260 } 261 262 /** 263 * dwc2_hcd_disconnect() - Handles disconnect of the HCD 264 * 265 * @hsotg: Pointer to struct dwc2_hsotg 266 * 267 * Must be called with interrupt disabled and spinlock held 268 */ 269 void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) 270 { 271 u32 intr; 272 273 /* Set status flags for the hub driver */ 274 hsotg->flags.b.port_connect_status_change = 1; 275 hsotg->flags.b.port_connect_status = 0; 276 277 /* 278 * Shutdown any transfers in process by clearing the Tx FIFO Empty 279 * interrupt mask and status bits and disabling subsequent host 280 * channel interrupts. 281 */ 282 intr = readl(hsotg->regs + GINTMSK); 283 intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT); 284 writel(intr, hsotg->regs + GINTMSK); 285 intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT; 286 writel(intr, hsotg->regs + GINTSTS); 287 288 /* 289 * Turn off the vbus power only if the core has transitioned to device 290 * mode. If still in host mode, need to keep power on to detect a 291 * reconnection. 292 */ 293 if (dwc2_is_device_mode(hsotg)) { 294 if (hsotg->op_state != OTG_STATE_A_SUSPEND) { 295 dev_dbg(hsotg->dev, "Disconnect: PortPower off\n"); 296 writel(0, hsotg->regs + HPRT0); 297 } 298 299 dwc2_disable_host_interrupts(hsotg); 300 } 301 302 /* Respond with an error status to all URBs in the schedule */ 303 dwc2_kill_all_urbs(hsotg); 304 305 if (dwc2_is_host_mode(hsotg)) 306 /* Clean up any host channels that were in use */ 307 dwc2_hcd_cleanup_channels(hsotg); 308 309 dwc2_host_disconnect(hsotg); 310 } 311 312 /** 313 * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup 314 * 315 * @hsotg: Pointer to struct dwc2_hsotg 316 */ 317 static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) 318 { 319 if (hsotg->lx_state == DWC2_L2) { 320 hsotg->flags.b.port_suspend_change = 1; 321 usb_hcd_resume_root_hub(hsotg->priv); 322 } else { 323 hsotg->flags.b.port_l1_change = 1; 324 } 325 } 326 327 /** 328 * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner 329 * 330 * @hsotg: Pointer to struct dwc2_hsotg 331 * 332 * Must be called with interrupt disabled and spinlock held 333 */ 334 void dwc2_hcd_stop(struct dwc2_hsotg *hsotg) 335 { 336 dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n"); 337 338 /* 339 * The root hub should be disconnected before this function is called. 340 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) 341 * and the QH lists (via ..._hcd_endpoint_disable). 342 */ 343 344 /* Turn off all host-specific interrupts */ 345 dwc2_disable_host_interrupts(hsotg); 346 347 /* Turn off the vbus power */ 348 dev_dbg(hsotg->dev, "PortPower off\n"); 349 writel(0, hsotg->regs + HPRT0); 350 } 351 352 static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, 353 struct dwc2_hcd_urb *urb, void **ep_handle, 354 gfp_t mem_flags) 355 { 356 struct dwc2_qtd *qtd; 357 unsigned long flags; 358 u32 intr_mask; 359 int retval; 360 int dev_speed; 361 362 if (!hsotg->flags.b.port_connect_status) { 363 /* No longer connected */ 364 dev_err(hsotg->dev, "Not connected\n"); 365 return -ENODEV; 366 } 367 368 dev_speed = dwc2_host_get_speed(hsotg, urb->priv); 369 370 /* Some configurations cannot support LS traffic on a FS root port */ 371 if ((dev_speed == USB_SPEED_LOW) && 372 (hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) && 373 (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) { 374 u32 hprt0 = readl(hsotg->regs + HPRT0); 375 u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; 376 377 if (prtspd == HPRT0_SPD_FULL_SPEED) 378 return -ENODEV; 379 } 380 381 qtd = kzalloc(sizeof(*qtd), mem_flags); 382 if (!qtd) 383 return -ENOMEM; 384 385 dwc2_hcd_qtd_init(qtd, urb); 386 retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle, 387 mem_flags); 388 if (retval) { 389 dev_err(hsotg->dev, 390 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n", 391 retval); 392 kfree(qtd); 393 return retval; 394 } 395 396 intr_mask = readl(hsotg->regs + GINTMSK); 397 if (!(intr_mask & GINTSTS_SOF)) { 398 enum dwc2_transaction_type tr_type; 399 400 if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK && 401 !(qtd->urb->flags & URB_GIVEBACK_ASAP)) 402 /* 403 * Do not schedule SG transactions until qtd has 404 * URB_GIVEBACK_ASAP set 405 */ 406 return 0; 407 408 spin_lock_irqsave(&hsotg->lock, flags); 409 tr_type = dwc2_hcd_select_transactions(hsotg); 410 if (tr_type != DWC2_TRANSACTION_NONE) 411 dwc2_hcd_queue_transactions(hsotg, tr_type); 412 spin_unlock_irqrestore(&hsotg->lock, flags); 413 } 414 415 return 0; 416 } 417 418 /* Must be called with interrupt disabled and spinlock held */ 419 static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg, 420 struct dwc2_hcd_urb *urb) 421 { 422 struct dwc2_qh *qh; 423 struct dwc2_qtd *urb_qtd; 424 425 urb_qtd = urb->qtd; 426 if (!urb_qtd) { 427 dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n"); 428 return -EINVAL; 429 } 430 431 qh = urb_qtd->qh; 432 if (!qh) { 433 dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n"); 434 return -EINVAL; 435 } 436 437 urb->priv = NULL; 438 439 if (urb_qtd->in_process && qh->channel) { 440 dwc2_dump_channel_info(hsotg, qh->channel); 441 442 /* The QTD is in process (it has been assigned to a channel) */ 443 if (hsotg->flags.b.port_connect_status) 444 /* 445 * If still connected (i.e. in host mode), halt the 446 * channel so it can be used for other transfers. If 447 * no longer connected, the host registers can't be 448 * written to halt the channel since the core is in 449 * device mode. 450 */ 451 dwc2_hc_halt(hsotg, qh->channel, 452 DWC2_HC_XFER_URB_DEQUEUE); 453 } 454 455 /* 456 * Free the QTD and clean up the associated QH. Leave the QH in the 457 * schedule if it has any remaining QTDs. 458 */ 459 if (hsotg->core_params->dma_desc_enable <= 0) { 460 u8 in_process = urb_qtd->in_process; 461 462 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); 463 if (in_process) { 464 dwc2_hcd_qh_deactivate(hsotg, qh, 0); 465 qh->channel = NULL; 466 } else if (list_empty(&qh->qtd_list)) { 467 dwc2_hcd_qh_unlink(hsotg, qh); 468 } 469 } else { 470 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); 471 } 472 473 return 0; 474 } 475 476 /* Must NOT be called with interrupt disabled or spinlock held */ 477 static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg, 478 struct usb_host_endpoint *ep, int retry) 479 { 480 struct dwc2_qtd *qtd, *qtd_tmp; 481 struct dwc2_qh *qh; 482 unsigned long flags; 483 int rc; 484 485 spin_lock_irqsave(&hsotg->lock, flags); 486 487 qh = ep->hcpriv; 488 if (!qh) { 489 rc = -EINVAL; 490 goto err; 491 } 492 493 while (!list_empty(&qh->qtd_list) && retry--) { 494 if (retry == 0) { 495 dev_err(hsotg->dev, 496 "## timeout in dwc2_hcd_endpoint_disable() ##\n"); 497 rc = -EBUSY; 498 goto err; 499 } 500 501 spin_unlock_irqrestore(&hsotg->lock, flags); 502 usleep_range(20000, 40000); 503 spin_lock_irqsave(&hsotg->lock, flags); 504 qh = ep->hcpriv; 505 if (!qh) { 506 rc = -EINVAL; 507 goto err; 508 } 509 } 510 511 dwc2_hcd_qh_unlink(hsotg, qh); 512 513 /* Free each QTD in the QH's QTD list */ 514 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) 515 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 516 517 ep->hcpriv = NULL; 518 spin_unlock_irqrestore(&hsotg->lock, flags); 519 dwc2_hcd_qh_free(hsotg, qh); 520 521 return 0; 522 523 err: 524 ep->hcpriv = NULL; 525 spin_unlock_irqrestore(&hsotg->lock, flags); 526 527 return rc; 528 } 529 530 /* Must be called with interrupt disabled and spinlock held */ 531 static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg, 532 struct usb_host_endpoint *ep) 533 { 534 struct dwc2_qh *qh = ep->hcpriv; 535 536 if (!qh) 537 return -EINVAL; 538 539 qh->data_toggle = DWC2_HC_PID_DATA0; 540 541 return 0; 542 } 543 544 /* 545 * Initializes dynamic portions of the DWC_otg HCD state 546 * 547 * Must be called with interrupt disabled and spinlock held 548 */ 549 static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg) 550 { 551 struct dwc2_host_chan *chan, *chan_tmp; 552 int num_channels; 553 int i; 554 555 hsotg->flags.d32 = 0; 556 hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active; 557 558 if (hsotg->core_params->uframe_sched > 0) { 559 hsotg->available_host_channels = 560 hsotg->core_params->host_channels; 561 } else { 562 hsotg->non_periodic_channels = 0; 563 hsotg->periodic_channels = 0; 564 } 565 566 /* 567 * Put all channels in the free channel list and clean up channel 568 * states 569 */ 570 list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list, 571 hc_list_entry) 572 list_del_init(&chan->hc_list_entry); 573 574 num_channels = hsotg->core_params->host_channels; 575 for (i = 0; i < num_channels; i++) { 576 chan = hsotg->hc_ptr_array[i]; 577 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); 578 dwc2_hc_cleanup(hsotg, chan); 579 } 580 581 /* Initialize the DWC core for host mode operation */ 582 dwc2_core_host_init(hsotg); 583 } 584 585 static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg, 586 struct dwc2_host_chan *chan, 587 struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) 588 { 589 int hub_addr, hub_port; 590 591 chan->do_split = 1; 592 chan->xact_pos = qtd->isoc_split_pos; 593 chan->complete_split = qtd->complete_split; 594 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port); 595 chan->hub_addr = (u8)hub_addr; 596 chan->hub_port = (u8)hub_port; 597 } 598 599 static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, 600 struct dwc2_host_chan *chan, 601 struct dwc2_qtd *qtd, void *bufptr) 602 { 603 struct dwc2_hcd_urb *urb = qtd->urb; 604 struct dwc2_hcd_iso_packet_desc *frame_desc; 605 606 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) { 607 case USB_ENDPOINT_XFER_CONTROL: 608 chan->ep_type = USB_ENDPOINT_XFER_CONTROL; 609 610 switch (qtd->control_phase) { 611 case DWC2_CONTROL_SETUP: 612 dev_vdbg(hsotg->dev, " Control setup transaction\n"); 613 chan->do_ping = 0; 614 chan->ep_is_in = 0; 615 chan->data_pid_start = DWC2_HC_PID_SETUP; 616 if (hsotg->core_params->dma_enable > 0) 617 chan->xfer_dma = urb->setup_dma; 618 else 619 chan->xfer_buf = urb->setup_packet; 620 chan->xfer_len = 8; 621 bufptr = NULL; 622 break; 623 624 case DWC2_CONTROL_DATA: 625 dev_vdbg(hsotg->dev, " Control data transaction\n"); 626 chan->data_pid_start = qtd->data_toggle; 627 break; 628 629 case DWC2_CONTROL_STATUS: 630 /* 631 * Direction is opposite of data direction or IN if no 632 * data 633 */ 634 dev_vdbg(hsotg->dev, " Control status transaction\n"); 635 if (urb->length == 0) 636 chan->ep_is_in = 1; 637 else 638 chan->ep_is_in = 639 dwc2_hcd_is_pipe_out(&urb->pipe_info); 640 if (chan->ep_is_in) 641 chan->do_ping = 0; 642 chan->data_pid_start = DWC2_HC_PID_DATA1; 643 chan->xfer_len = 0; 644 if (hsotg->core_params->dma_enable > 0) 645 chan->xfer_dma = hsotg->status_buf_dma; 646 else 647 chan->xfer_buf = hsotg->status_buf; 648 bufptr = NULL; 649 break; 650 } 651 break; 652 653 case USB_ENDPOINT_XFER_BULK: 654 chan->ep_type = USB_ENDPOINT_XFER_BULK; 655 break; 656 657 case USB_ENDPOINT_XFER_INT: 658 chan->ep_type = USB_ENDPOINT_XFER_INT; 659 break; 660 661 case USB_ENDPOINT_XFER_ISOC: 662 chan->ep_type = USB_ENDPOINT_XFER_ISOC; 663 if (hsotg->core_params->dma_desc_enable > 0) 664 break; 665 666 frame_desc = &urb->iso_descs[qtd->isoc_frame_index]; 667 frame_desc->status = 0; 668 669 if (hsotg->core_params->dma_enable > 0) { 670 chan->xfer_dma = urb->dma; 671 chan->xfer_dma += frame_desc->offset + 672 qtd->isoc_split_offset; 673 } else { 674 chan->xfer_buf = urb->buf; 675 chan->xfer_buf += frame_desc->offset + 676 qtd->isoc_split_offset; 677 } 678 679 chan->xfer_len = frame_desc->length - qtd->isoc_split_offset; 680 681 /* For non-dword aligned buffers */ 682 if (hsotg->core_params->dma_enable > 0 && 683 (chan->xfer_dma & 0x3)) 684 bufptr = (u8 *)urb->buf + frame_desc->offset + 685 qtd->isoc_split_offset; 686 else 687 bufptr = NULL; 688 689 if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) { 690 if (chan->xfer_len <= 188) 691 chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL; 692 else 693 chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN; 694 } 695 break; 696 } 697 698 return bufptr; 699 } 700 701 static int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 702 struct dwc2_host_chan *chan, 703 struct dwc2_hcd_urb *urb, void *bufptr) 704 { 705 u32 buf_size; 706 struct urb *usb_urb; 707 struct usb_hcd *hcd; 708 709 if (!qh->dw_align_buf) { 710 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) 711 buf_size = hsotg->core_params->max_transfer_size; 712 else 713 /* 3072 = 3 max-size Isoc packets */ 714 buf_size = 3072; 715 716 qh->dw_align_buf = dma_alloc_coherent(hsotg->dev, buf_size, 717 &qh->dw_align_buf_dma, 718 GFP_ATOMIC); 719 if (!qh->dw_align_buf) 720 return -ENOMEM; 721 qh->dw_align_buf_size = buf_size; 722 } 723 724 if (chan->xfer_len) { 725 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__); 726 usb_urb = urb->priv; 727 728 if (usb_urb) { 729 if (usb_urb->transfer_flags & 730 (URB_SETUP_MAP_SINGLE | URB_DMA_MAP_SG | 731 URB_DMA_MAP_PAGE | URB_DMA_MAP_SINGLE)) { 732 hcd = dwc2_hsotg_to_hcd(hsotg); 733 usb_hcd_unmap_urb_for_dma(hcd, usb_urb); 734 } 735 if (!chan->ep_is_in) 736 memcpy(qh->dw_align_buf, bufptr, 737 chan->xfer_len); 738 } else { 739 dev_warn(hsotg->dev, "no URB in dwc2_urb\n"); 740 } 741 } 742 743 chan->align_buf = qh->dw_align_buf_dma; 744 return 0; 745 } 746 747 /** 748 * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host 749 * channel and initializes the host channel to perform the transactions. The 750 * host channel is removed from the free list. 751 * 752 * @hsotg: The HCD state structure 753 * @qh: Transactions from the first QTD for this QH are selected and assigned 754 * to a free host channel 755 */ 756 static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 757 { 758 struct dwc2_host_chan *chan; 759 struct dwc2_hcd_urb *urb; 760 struct dwc2_qtd *qtd; 761 void *bufptr = NULL; 762 763 if (dbg_qh(qh)) 764 dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh); 765 766 if (list_empty(&qh->qtd_list)) { 767 dev_dbg(hsotg->dev, "No QTDs in QH list\n"); 768 return -ENOMEM; 769 } 770 771 if (list_empty(&hsotg->free_hc_list)) { 772 dev_dbg(hsotg->dev, "No free channel to assign\n"); 773 return -ENOMEM; 774 } 775 776 chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan, 777 hc_list_entry); 778 779 /* Remove host channel from free list */ 780 list_del_init(&chan->hc_list_entry); 781 782 qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry); 783 urb = qtd->urb; 784 qh->channel = chan; 785 qtd->in_process = 1; 786 787 /* 788 * Use usb_pipedevice to determine device address. This address is 789 * 0 before the SET_ADDRESS command and the correct address afterward. 790 */ 791 chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info); 792 chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info); 793 chan->speed = qh->dev_speed; 794 chan->max_packet = dwc2_max_packet(qh->maxp); 795 796 chan->xfer_started = 0; 797 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS; 798 chan->error_state = (qtd->error_count > 0); 799 chan->halt_on_queue = 0; 800 chan->halt_pending = 0; 801 chan->requests = 0; 802 803 /* 804 * The following values may be modified in the transfer type section 805 * below. The xfer_len value may be reduced when the transfer is 806 * started to accommodate the max widths of the XferSize and PktCnt 807 * fields in the HCTSIZn register. 808 */ 809 810 chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0); 811 if (chan->ep_is_in) 812 chan->do_ping = 0; 813 else 814 chan->do_ping = qh->ping_state; 815 816 chan->data_pid_start = qh->data_toggle; 817 chan->multi_count = 1; 818 819 if (urb->actual_length > urb->length && 820 !dwc2_hcd_is_pipe_in(&urb->pipe_info)) 821 urb->actual_length = urb->length; 822 823 if (hsotg->core_params->dma_enable > 0) { 824 chan->xfer_dma = urb->dma + urb->actual_length; 825 826 /* For non-dword aligned case */ 827 if (hsotg->core_params->dma_desc_enable <= 0 && 828 (chan->xfer_dma & 0x3)) 829 bufptr = (u8 *)urb->buf + urb->actual_length; 830 } else { 831 chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; 832 } 833 834 chan->xfer_len = urb->length - urb->actual_length; 835 chan->xfer_count = 0; 836 837 /* Set the split attributes if required */ 838 if (qh->do_split) 839 dwc2_hc_init_split(hsotg, chan, qtd, urb); 840 else 841 chan->do_split = 0; 842 843 /* Set the transfer attributes */ 844 bufptr = dwc2_hc_init_xfer(hsotg, chan, qtd, bufptr); 845 846 /* Non DWORD-aligned buffer case */ 847 if (bufptr) { 848 dev_vdbg(hsotg->dev, "Non-aligned buffer\n"); 849 if (dwc2_hc_setup_align_buf(hsotg, qh, chan, urb, bufptr)) { 850 dev_err(hsotg->dev, 851 "%s: Failed to allocate memory to handle non-dword aligned buffer\n", 852 __func__); 853 /* Add channel back to free list */ 854 chan->align_buf = 0; 855 chan->multi_count = 0; 856 list_add_tail(&chan->hc_list_entry, 857 &hsotg->free_hc_list); 858 qtd->in_process = 0; 859 qh->channel = NULL; 860 return -ENOMEM; 861 } 862 } else { 863 chan->align_buf = 0; 864 } 865 866 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 867 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 868 /* 869 * This value may be modified when the transfer is started 870 * to reflect the actual transfer length 871 */ 872 chan->multi_count = dwc2_hb_mult(qh->maxp); 873 874 if (hsotg->core_params->dma_desc_enable > 0) 875 chan->desc_list_addr = qh->desc_list_dma; 876 877 dwc2_hc_init(hsotg, chan); 878 chan->qh = qh; 879 880 return 0; 881 } 882 883 /** 884 * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer 885 * schedule and assigns them to available host channels. Called from the HCD 886 * interrupt handler functions. 887 * 888 * @hsotg: The HCD state structure 889 * 890 * Return: The types of new transactions that were assigned to host channels 891 */ 892 enum dwc2_transaction_type dwc2_hcd_select_transactions( 893 struct dwc2_hsotg *hsotg) 894 { 895 enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE; 896 struct list_head *qh_ptr; 897 struct dwc2_qh *qh; 898 int num_channels; 899 900 #ifdef DWC2_DEBUG_SOF 901 dev_vdbg(hsotg->dev, " Select Transactions\n"); 902 #endif 903 904 /* Process entries in the periodic ready list */ 905 qh_ptr = hsotg->periodic_sched_ready.next; 906 while (qh_ptr != &hsotg->periodic_sched_ready) { 907 if (list_empty(&hsotg->free_hc_list)) 908 break; 909 if (hsotg->core_params->uframe_sched > 0) { 910 if (hsotg->available_host_channels <= 1) 911 break; 912 hsotg->available_host_channels--; 913 } 914 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); 915 if (dwc2_assign_and_init_hc(hsotg, qh)) 916 break; 917 918 /* 919 * Move the QH from the periodic ready schedule to the 920 * periodic assigned schedule 921 */ 922 qh_ptr = qh_ptr->next; 923 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned); 924 ret_val = DWC2_TRANSACTION_PERIODIC; 925 } 926 927 /* 928 * Process entries in the inactive portion of the non-periodic 929 * schedule. Some free host channels may not be used if they are 930 * reserved for periodic transfers. 931 */ 932 num_channels = hsotg->core_params->host_channels; 933 qh_ptr = hsotg->non_periodic_sched_inactive.next; 934 while (qh_ptr != &hsotg->non_periodic_sched_inactive) { 935 if (hsotg->core_params->uframe_sched <= 0 && 936 hsotg->non_periodic_channels >= num_channels - 937 hsotg->periodic_channels) 938 break; 939 if (list_empty(&hsotg->free_hc_list)) 940 break; 941 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); 942 if (hsotg->core_params->uframe_sched > 0) { 943 if (hsotg->available_host_channels < 1) 944 break; 945 hsotg->available_host_channels--; 946 } 947 948 if (dwc2_assign_and_init_hc(hsotg, qh)) 949 break; 950 951 /* 952 * Move the QH from the non-periodic inactive schedule to the 953 * non-periodic active schedule 954 */ 955 qh_ptr = qh_ptr->next; 956 list_move(&qh->qh_list_entry, 957 &hsotg->non_periodic_sched_active); 958 959 if (ret_val == DWC2_TRANSACTION_NONE) 960 ret_val = DWC2_TRANSACTION_NON_PERIODIC; 961 else 962 ret_val = DWC2_TRANSACTION_ALL; 963 964 if (hsotg->core_params->uframe_sched <= 0) 965 hsotg->non_periodic_channels++; 966 } 967 968 return ret_val; 969 } 970 971 /** 972 * dwc2_queue_transaction() - Attempts to queue a single transaction request for 973 * a host channel associated with either a periodic or non-periodic transfer 974 * 975 * @hsotg: The HCD state structure 976 * @chan: Host channel descriptor associated with either a periodic or 977 * non-periodic transfer 978 * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO 979 * for periodic transfers or the non-periodic Tx FIFO 980 * for non-periodic transfers 981 * 982 * Return: 1 if a request is queued and more requests may be needed to 983 * complete the transfer, 0 if no more requests are required for this 984 * transfer, -1 if there is insufficient space in the Tx FIFO 985 * 986 * This function assumes that there is space available in the appropriate 987 * request queue. For an OUT transfer or SETUP transaction in Slave mode, 988 * it checks whether space is available in the appropriate Tx FIFO. 989 * 990 * Must be called with interrupt disabled and spinlock held 991 */ 992 static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg, 993 struct dwc2_host_chan *chan, 994 u16 fifo_dwords_avail) 995 { 996 int retval = 0; 997 998 if (hsotg->core_params->dma_enable > 0) { 999 if (hsotg->core_params->dma_desc_enable > 0) { 1000 if (!chan->xfer_started || 1001 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 1002 dwc2_hcd_start_xfer_ddma(hsotg, chan->qh); 1003 chan->qh->ping_state = 0; 1004 } 1005 } else if (!chan->xfer_started) { 1006 dwc2_hc_start_transfer(hsotg, chan); 1007 chan->qh->ping_state = 0; 1008 } 1009 } else if (chan->halt_pending) { 1010 /* Don't queue a request if the channel has been halted */ 1011 } else if (chan->halt_on_queue) { 1012 dwc2_hc_halt(hsotg, chan, chan->halt_status); 1013 } else if (chan->do_ping) { 1014 if (!chan->xfer_started) 1015 dwc2_hc_start_transfer(hsotg, chan); 1016 } else if (!chan->ep_is_in || 1017 chan->data_pid_start == DWC2_HC_PID_SETUP) { 1018 if ((fifo_dwords_avail * 4) >= chan->max_packet) { 1019 if (!chan->xfer_started) { 1020 dwc2_hc_start_transfer(hsotg, chan); 1021 retval = 1; 1022 } else { 1023 retval = dwc2_hc_continue_transfer(hsotg, chan); 1024 } 1025 } else { 1026 retval = -1; 1027 } 1028 } else { 1029 if (!chan->xfer_started) { 1030 dwc2_hc_start_transfer(hsotg, chan); 1031 retval = 1; 1032 } else { 1033 retval = dwc2_hc_continue_transfer(hsotg, chan); 1034 } 1035 } 1036 1037 return retval; 1038 } 1039 1040 /* 1041 * Processes periodic channels for the next frame and queues transactions for 1042 * these channels to the DWC_otg controller. After queueing transactions, the 1043 * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions 1044 * to queue as Periodic Tx FIFO or request queue space becomes available. 1045 * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled. 1046 * 1047 * Must be called with interrupt disabled and spinlock held 1048 */ 1049 static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) 1050 { 1051 struct list_head *qh_ptr; 1052 struct dwc2_qh *qh; 1053 u32 tx_status; 1054 u32 fspcavail; 1055 u32 gintmsk; 1056 int status; 1057 int no_queue_space = 0; 1058 int no_fifo_space = 0; 1059 u32 qspcavail; 1060 1061 if (dbg_perio()) 1062 dev_vdbg(hsotg->dev, "Queue periodic transactions\n"); 1063 1064 tx_status = readl(hsotg->regs + HPTXSTS); 1065 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1066 TXSTS_QSPCAVAIL_SHIFT; 1067 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1068 TXSTS_FSPCAVAIL_SHIFT; 1069 1070 if (dbg_perio()) { 1071 dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n", 1072 qspcavail); 1073 dev_vdbg(hsotg->dev, " P Tx FIFO Space Avail (before queue): %d\n", 1074 fspcavail); 1075 } 1076 1077 qh_ptr = hsotg->periodic_sched_assigned.next; 1078 while (qh_ptr != &hsotg->periodic_sched_assigned) { 1079 tx_status = readl(hsotg->regs + HPTXSTS); 1080 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1081 TXSTS_QSPCAVAIL_SHIFT; 1082 if (qspcavail == 0) { 1083 no_queue_space = 1; 1084 break; 1085 } 1086 1087 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); 1088 if (!qh->channel) { 1089 qh_ptr = qh_ptr->next; 1090 continue; 1091 } 1092 1093 /* Make sure EP's TT buffer is clean before queueing qtds */ 1094 if (qh->tt_buffer_dirty) { 1095 qh_ptr = qh_ptr->next; 1096 continue; 1097 } 1098 1099 /* 1100 * Set a flag if we're queuing high-bandwidth in slave mode. 1101 * The flag prevents any halts to get into the request queue in 1102 * the middle of multiple high-bandwidth packets getting queued. 1103 */ 1104 if (hsotg->core_params->dma_enable <= 0 && 1105 qh->channel->multi_count > 1) 1106 hsotg->queuing_high_bandwidth = 1; 1107 1108 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1109 TXSTS_FSPCAVAIL_SHIFT; 1110 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail); 1111 if (status < 0) { 1112 no_fifo_space = 1; 1113 break; 1114 } 1115 1116 /* 1117 * In Slave mode, stay on the current transfer until there is 1118 * nothing more to do or the high-bandwidth request count is 1119 * reached. In DMA mode, only need to queue one request. The 1120 * controller automatically handles multiple packets for 1121 * high-bandwidth transfers. 1122 */ 1123 if (hsotg->core_params->dma_enable > 0 || status == 0 || 1124 qh->channel->requests == qh->channel->multi_count) { 1125 qh_ptr = qh_ptr->next; 1126 /* 1127 * Move the QH from the periodic assigned schedule to 1128 * the periodic queued schedule 1129 */ 1130 list_move(&qh->qh_list_entry, 1131 &hsotg->periodic_sched_queued); 1132 1133 /* done queuing high bandwidth */ 1134 hsotg->queuing_high_bandwidth = 0; 1135 } 1136 } 1137 1138 if (hsotg->core_params->dma_enable <= 0) { 1139 tx_status = readl(hsotg->regs + HPTXSTS); 1140 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1141 TXSTS_QSPCAVAIL_SHIFT; 1142 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1143 TXSTS_FSPCAVAIL_SHIFT; 1144 if (dbg_perio()) { 1145 dev_vdbg(hsotg->dev, 1146 " P Tx Req Queue Space Avail (after queue): %d\n", 1147 qspcavail); 1148 dev_vdbg(hsotg->dev, 1149 " P Tx FIFO Space Avail (after queue): %d\n", 1150 fspcavail); 1151 } 1152 1153 if (!list_empty(&hsotg->periodic_sched_assigned) || 1154 no_queue_space || no_fifo_space) { 1155 /* 1156 * May need to queue more transactions as the request 1157 * queue or Tx FIFO empties. Enable the periodic Tx 1158 * FIFO empty interrupt. (Always use the half-empty 1159 * level to ensure that new requests are loaded as 1160 * soon as possible.) 1161 */ 1162 gintmsk = readl(hsotg->regs + GINTMSK); 1163 gintmsk |= GINTSTS_PTXFEMP; 1164 writel(gintmsk, hsotg->regs + GINTMSK); 1165 } else { 1166 /* 1167 * Disable the Tx FIFO empty interrupt since there are 1168 * no more transactions that need to be queued right 1169 * now. This function is called from interrupt 1170 * handlers to queue more transactions as transfer 1171 * states change. 1172 */ 1173 gintmsk = readl(hsotg->regs + GINTMSK); 1174 gintmsk &= ~GINTSTS_PTXFEMP; 1175 writel(gintmsk, hsotg->regs + GINTMSK); 1176 } 1177 } 1178 } 1179 1180 /* 1181 * Processes active non-periodic channels and queues transactions for these 1182 * channels to the DWC_otg controller. After queueing transactions, the NP Tx 1183 * FIFO Empty interrupt is enabled if there are more transactions to queue as 1184 * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx 1185 * FIFO Empty interrupt is disabled. 1186 * 1187 * Must be called with interrupt disabled and spinlock held 1188 */ 1189 static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg) 1190 { 1191 struct list_head *orig_qh_ptr; 1192 struct dwc2_qh *qh; 1193 u32 tx_status; 1194 u32 qspcavail; 1195 u32 fspcavail; 1196 u32 gintmsk; 1197 int status; 1198 int no_queue_space = 0; 1199 int no_fifo_space = 0; 1200 int more_to_do = 0; 1201 1202 dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n"); 1203 1204 tx_status = readl(hsotg->regs + GNPTXSTS); 1205 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1206 TXSTS_QSPCAVAIL_SHIFT; 1207 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1208 TXSTS_FSPCAVAIL_SHIFT; 1209 dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n", 1210 qspcavail); 1211 dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n", 1212 fspcavail); 1213 1214 /* 1215 * Keep track of the starting point. Skip over the start-of-list 1216 * entry. 1217 */ 1218 if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active) 1219 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next; 1220 orig_qh_ptr = hsotg->non_periodic_qh_ptr; 1221 1222 /* 1223 * Process once through the active list or until no more space is 1224 * available in the request queue or the Tx FIFO 1225 */ 1226 do { 1227 tx_status = readl(hsotg->regs + GNPTXSTS); 1228 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1229 TXSTS_QSPCAVAIL_SHIFT; 1230 if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) { 1231 no_queue_space = 1; 1232 break; 1233 } 1234 1235 qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh, 1236 qh_list_entry); 1237 if (!qh->channel) 1238 goto next; 1239 1240 /* Make sure EP's TT buffer is clean before queueing qtds */ 1241 if (qh->tt_buffer_dirty) 1242 goto next; 1243 1244 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1245 TXSTS_FSPCAVAIL_SHIFT; 1246 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail); 1247 1248 if (status > 0) { 1249 more_to_do = 1; 1250 } else if (status < 0) { 1251 no_fifo_space = 1; 1252 break; 1253 } 1254 next: 1255 /* Advance to next QH, skipping start-of-list entry */ 1256 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next; 1257 if (hsotg->non_periodic_qh_ptr == 1258 &hsotg->non_periodic_sched_active) 1259 hsotg->non_periodic_qh_ptr = 1260 hsotg->non_periodic_qh_ptr->next; 1261 } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr); 1262 1263 if (hsotg->core_params->dma_enable <= 0) { 1264 tx_status = readl(hsotg->regs + GNPTXSTS); 1265 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1266 TXSTS_QSPCAVAIL_SHIFT; 1267 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1268 TXSTS_FSPCAVAIL_SHIFT; 1269 dev_vdbg(hsotg->dev, 1270 " NP Tx Req Queue Space Avail (after queue): %d\n", 1271 qspcavail); 1272 dev_vdbg(hsotg->dev, 1273 " NP Tx FIFO Space Avail (after queue): %d\n", 1274 fspcavail); 1275 1276 if (more_to_do || no_queue_space || no_fifo_space) { 1277 /* 1278 * May need to queue more transactions as the request 1279 * queue or Tx FIFO empties. Enable the non-periodic 1280 * Tx FIFO empty interrupt. (Always use the half-empty 1281 * level to ensure that new requests are loaded as 1282 * soon as possible.) 1283 */ 1284 gintmsk = readl(hsotg->regs + GINTMSK); 1285 gintmsk |= GINTSTS_NPTXFEMP; 1286 writel(gintmsk, hsotg->regs + GINTMSK); 1287 } else { 1288 /* 1289 * Disable the Tx FIFO empty interrupt since there are 1290 * no more transactions that need to be queued right 1291 * now. This function is called from interrupt 1292 * handlers to queue more transactions as transfer 1293 * states change. 1294 */ 1295 gintmsk = readl(hsotg->regs + GINTMSK); 1296 gintmsk &= ~GINTSTS_NPTXFEMP; 1297 writel(gintmsk, hsotg->regs + GINTMSK); 1298 } 1299 } 1300 } 1301 1302 /** 1303 * dwc2_hcd_queue_transactions() - Processes the currently active host channels 1304 * and queues transactions for these channels to the DWC_otg controller. Called 1305 * from the HCD interrupt handler functions. 1306 * 1307 * @hsotg: The HCD state structure 1308 * @tr_type: The type(s) of transactions to queue (non-periodic, periodic, 1309 * or both) 1310 * 1311 * Must be called with interrupt disabled and spinlock held 1312 */ 1313 void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, 1314 enum dwc2_transaction_type tr_type) 1315 { 1316 #ifdef DWC2_DEBUG_SOF 1317 dev_vdbg(hsotg->dev, "Queue Transactions\n"); 1318 #endif 1319 /* Process host channels associated with periodic transfers */ 1320 if ((tr_type == DWC2_TRANSACTION_PERIODIC || 1321 tr_type == DWC2_TRANSACTION_ALL) && 1322 !list_empty(&hsotg->periodic_sched_assigned)) 1323 dwc2_process_periodic_channels(hsotg); 1324 1325 /* Process host channels associated with non-periodic transfers */ 1326 if (tr_type == DWC2_TRANSACTION_NON_PERIODIC || 1327 tr_type == DWC2_TRANSACTION_ALL) { 1328 if (!list_empty(&hsotg->non_periodic_sched_active)) { 1329 dwc2_process_non_periodic_channels(hsotg); 1330 } else { 1331 /* 1332 * Ensure NP Tx FIFO empty interrupt is disabled when 1333 * there are no non-periodic transfers to process 1334 */ 1335 u32 gintmsk = readl(hsotg->regs + GINTMSK); 1336 1337 gintmsk &= ~GINTSTS_NPTXFEMP; 1338 writel(gintmsk, hsotg->regs + GINTMSK); 1339 } 1340 } 1341 } 1342 1343 static void dwc2_conn_id_status_change(struct work_struct *work) 1344 { 1345 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, 1346 wf_otg); 1347 u32 count = 0; 1348 u32 gotgctl; 1349 1350 dev_dbg(hsotg->dev, "%s()\n", __func__); 1351 1352 gotgctl = readl(hsotg->regs + GOTGCTL); 1353 dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl); 1354 dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n", 1355 !!(gotgctl & GOTGCTL_CONID_B)); 1356 1357 /* B-Device connector (Device Mode) */ 1358 if (gotgctl & GOTGCTL_CONID_B) { 1359 /* Wait for switch to device mode */ 1360 dev_dbg(hsotg->dev, "connId B\n"); 1361 while (!dwc2_is_device_mode(hsotg)) { 1362 dev_info(hsotg->dev, 1363 "Waiting for Peripheral Mode, Mode=%s\n", 1364 dwc2_is_host_mode(hsotg) ? "Host" : 1365 "Peripheral"); 1366 usleep_range(20000, 40000); 1367 if (++count > 250) 1368 break; 1369 } 1370 if (count > 250) 1371 dev_err(hsotg->dev, 1372 "Connection id status change timed out\n"); 1373 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 1374 dwc2_core_init(hsotg, false, -1); 1375 dwc2_enable_global_interrupts(hsotg); 1376 s3c_hsotg_core_init_disconnected(hsotg, false); 1377 s3c_hsotg_core_connect(hsotg); 1378 } else { 1379 /* A-Device connector (Host Mode) */ 1380 dev_dbg(hsotg->dev, "connId A\n"); 1381 while (!dwc2_is_host_mode(hsotg)) { 1382 dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n", 1383 dwc2_is_host_mode(hsotg) ? 1384 "Host" : "Peripheral"); 1385 usleep_range(20000, 40000); 1386 if (++count > 250) 1387 break; 1388 } 1389 if (count > 250) 1390 dev_err(hsotg->dev, 1391 "Connection id status change timed out\n"); 1392 hsotg->op_state = OTG_STATE_A_HOST; 1393 1394 /* Initialize the Core for Host mode */ 1395 dwc2_core_init(hsotg, false, -1); 1396 dwc2_enable_global_interrupts(hsotg); 1397 dwc2_hcd_start(hsotg); 1398 } 1399 } 1400 1401 static void dwc2_wakeup_detected(unsigned long data) 1402 { 1403 struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data; 1404 u32 hprt0; 1405 1406 dev_dbg(hsotg->dev, "%s()\n", __func__); 1407 1408 /* 1409 * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms 1410 * so that OPT tests pass with all PHYs.) 1411 */ 1412 hprt0 = dwc2_read_hprt0(hsotg); 1413 dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0); 1414 hprt0 &= ~HPRT0_RES; 1415 writel(hprt0, hsotg->regs + HPRT0); 1416 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", 1417 readl(hsotg->regs + HPRT0)); 1418 1419 dwc2_hcd_rem_wakeup(hsotg); 1420 1421 /* Change to L0 state */ 1422 hsotg->lx_state = DWC2_L0; 1423 } 1424 1425 static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg) 1426 { 1427 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); 1428 1429 return hcd->self.b_hnp_enable; 1430 } 1431 1432 /* Must NOT be called with interrupt disabled or spinlock held */ 1433 static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex) 1434 { 1435 unsigned long flags; 1436 u32 hprt0; 1437 u32 pcgctl; 1438 u32 gotgctl; 1439 1440 dev_dbg(hsotg->dev, "%s()\n", __func__); 1441 1442 spin_lock_irqsave(&hsotg->lock, flags); 1443 1444 if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) { 1445 gotgctl = readl(hsotg->regs + GOTGCTL); 1446 gotgctl |= GOTGCTL_HSTSETHNPEN; 1447 writel(gotgctl, hsotg->regs + GOTGCTL); 1448 hsotg->op_state = OTG_STATE_A_SUSPEND; 1449 } 1450 1451 hprt0 = dwc2_read_hprt0(hsotg); 1452 hprt0 |= HPRT0_SUSP; 1453 writel(hprt0, hsotg->regs + HPRT0); 1454 1455 /* Update lx_state */ 1456 hsotg->lx_state = DWC2_L2; 1457 1458 /* Suspend the Phy Clock */ 1459 pcgctl = readl(hsotg->regs + PCGCTL); 1460 pcgctl |= PCGCTL_STOPPCLK; 1461 writel(pcgctl, hsotg->regs + PCGCTL); 1462 udelay(10); 1463 1464 /* For HNP the bus must be suspended for at least 200ms */ 1465 if (dwc2_host_is_b_hnp_enabled(hsotg)) { 1466 pcgctl = readl(hsotg->regs + PCGCTL); 1467 pcgctl &= ~PCGCTL_STOPPCLK; 1468 writel(pcgctl, hsotg->regs + PCGCTL); 1469 1470 spin_unlock_irqrestore(&hsotg->lock, flags); 1471 1472 usleep_range(200000, 250000); 1473 } else { 1474 spin_unlock_irqrestore(&hsotg->lock, flags); 1475 } 1476 } 1477 1478 /* Handles hub class-specific requests */ 1479 static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, 1480 u16 wvalue, u16 windex, char *buf, u16 wlength) 1481 { 1482 struct usb_hub_descriptor *hub_desc; 1483 int retval = 0; 1484 u32 hprt0; 1485 u32 port_status; 1486 u32 speed; 1487 u32 pcgctl; 1488 1489 switch (typereq) { 1490 case ClearHubFeature: 1491 dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue); 1492 1493 switch (wvalue) { 1494 case C_HUB_LOCAL_POWER: 1495 case C_HUB_OVER_CURRENT: 1496 /* Nothing required here */ 1497 break; 1498 1499 default: 1500 retval = -EINVAL; 1501 dev_err(hsotg->dev, 1502 "ClearHubFeature request %1xh unknown\n", 1503 wvalue); 1504 } 1505 break; 1506 1507 case ClearPortFeature: 1508 if (wvalue != USB_PORT_FEAT_L1) 1509 if (!windex || windex > 1) 1510 goto error; 1511 switch (wvalue) { 1512 case USB_PORT_FEAT_ENABLE: 1513 dev_dbg(hsotg->dev, 1514 "ClearPortFeature USB_PORT_FEAT_ENABLE\n"); 1515 hprt0 = dwc2_read_hprt0(hsotg); 1516 hprt0 |= HPRT0_ENA; 1517 writel(hprt0, hsotg->regs + HPRT0); 1518 break; 1519 1520 case USB_PORT_FEAT_SUSPEND: 1521 dev_dbg(hsotg->dev, 1522 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); 1523 writel(0, hsotg->regs + PCGCTL); 1524 usleep_range(20000, 40000); 1525 1526 hprt0 = dwc2_read_hprt0(hsotg); 1527 hprt0 |= HPRT0_RES; 1528 writel(hprt0, hsotg->regs + HPRT0); 1529 hprt0 &= ~HPRT0_SUSP; 1530 usleep_range(100000, 150000); 1531 1532 hprt0 &= ~HPRT0_RES; 1533 writel(hprt0, hsotg->regs + HPRT0); 1534 break; 1535 1536 case USB_PORT_FEAT_POWER: 1537 dev_dbg(hsotg->dev, 1538 "ClearPortFeature USB_PORT_FEAT_POWER\n"); 1539 hprt0 = dwc2_read_hprt0(hsotg); 1540 hprt0 &= ~HPRT0_PWR; 1541 writel(hprt0, hsotg->regs + HPRT0); 1542 break; 1543 1544 case USB_PORT_FEAT_INDICATOR: 1545 dev_dbg(hsotg->dev, 1546 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n"); 1547 /* Port indicator not supported */ 1548 break; 1549 1550 case USB_PORT_FEAT_C_CONNECTION: 1551 /* 1552 * Clears driver's internal Connect Status Change flag 1553 */ 1554 dev_dbg(hsotg->dev, 1555 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n"); 1556 hsotg->flags.b.port_connect_status_change = 0; 1557 break; 1558 1559 case USB_PORT_FEAT_C_RESET: 1560 /* Clears driver's internal Port Reset Change flag */ 1561 dev_dbg(hsotg->dev, 1562 "ClearPortFeature USB_PORT_FEAT_C_RESET\n"); 1563 hsotg->flags.b.port_reset_change = 0; 1564 break; 1565 1566 case USB_PORT_FEAT_C_ENABLE: 1567 /* 1568 * Clears the driver's internal Port Enable/Disable 1569 * Change flag 1570 */ 1571 dev_dbg(hsotg->dev, 1572 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n"); 1573 hsotg->flags.b.port_enable_change = 0; 1574 break; 1575 1576 case USB_PORT_FEAT_C_SUSPEND: 1577 /* 1578 * Clears the driver's internal Port Suspend Change 1579 * flag, which is set when resume signaling on the host 1580 * port is complete 1581 */ 1582 dev_dbg(hsotg->dev, 1583 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n"); 1584 hsotg->flags.b.port_suspend_change = 0; 1585 break; 1586 1587 case USB_PORT_FEAT_C_PORT_L1: 1588 dev_dbg(hsotg->dev, 1589 "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n"); 1590 hsotg->flags.b.port_l1_change = 0; 1591 break; 1592 1593 case USB_PORT_FEAT_C_OVER_CURRENT: 1594 dev_dbg(hsotg->dev, 1595 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n"); 1596 hsotg->flags.b.port_over_current_change = 0; 1597 break; 1598 1599 default: 1600 retval = -EINVAL; 1601 dev_err(hsotg->dev, 1602 "ClearPortFeature request %1xh unknown or unsupported\n", 1603 wvalue); 1604 } 1605 break; 1606 1607 case GetHubDescriptor: 1608 dev_dbg(hsotg->dev, "GetHubDescriptor\n"); 1609 hub_desc = (struct usb_hub_descriptor *)buf; 1610 hub_desc->bDescLength = 9; 1611 hub_desc->bDescriptorType = 0x29; 1612 hub_desc->bNbrPorts = 1; 1613 hub_desc->wHubCharacteristics = 1614 cpu_to_le16(HUB_CHAR_COMMON_LPSM | 1615 HUB_CHAR_INDV_PORT_OCPM); 1616 hub_desc->bPwrOn2PwrGood = 1; 1617 hub_desc->bHubContrCurrent = 0; 1618 hub_desc->u.hs.DeviceRemovable[0] = 0; 1619 hub_desc->u.hs.DeviceRemovable[1] = 0xff; 1620 break; 1621 1622 case GetHubStatus: 1623 dev_dbg(hsotg->dev, "GetHubStatus\n"); 1624 memset(buf, 0, 4); 1625 break; 1626 1627 case GetPortStatus: 1628 dev_vdbg(hsotg->dev, 1629 "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex, 1630 hsotg->flags.d32); 1631 if (!windex || windex > 1) 1632 goto error; 1633 1634 port_status = 0; 1635 if (hsotg->flags.b.port_connect_status_change) 1636 port_status |= USB_PORT_STAT_C_CONNECTION << 16; 1637 if (hsotg->flags.b.port_enable_change) 1638 port_status |= USB_PORT_STAT_C_ENABLE << 16; 1639 if (hsotg->flags.b.port_suspend_change) 1640 port_status |= USB_PORT_STAT_C_SUSPEND << 16; 1641 if (hsotg->flags.b.port_l1_change) 1642 port_status |= USB_PORT_STAT_C_L1 << 16; 1643 if (hsotg->flags.b.port_reset_change) 1644 port_status |= USB_PORT_STAT_C_RESET << 16; 1645 if (hsotg->flags.b.port_over_current_change) { 1646 dev_warn(hsotg->dev, "Overcurrent change detected\n"); 1647 port_status |= USB_PORT_STAT_C_OVERCURRENT << 16; 1648 } 1649 1650 if (!hsotg->flags.b.port_connect_status) { 1651 /* 1652 * The port is disconnected, which means the core is 1653 * either in device mode or it soon will be. Just 1654 * return 0's for the remainder of the port status 1655 * since the port register can't be read if the core 1656 * is in device mode. 1657 */ 1658 *(__le32 *)buf = cpu_to_le32(port_status); 1659 break; 1660 } 1661 1662 hprt0 = readl(hsotg->regs + HPRT0); 1663 dev_vdbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0); 1664 1665 if (hprt0 & HPRT0_CONNSTS) 1666 port_status |= USB_PORT_STAT_CONNECTION; 1667 if (hprt0 & HPRT0_ENA) 1668 port_status |= USB_PORT_STAT_ENABLE; 1669 if (hprt0 & HPRT0_SUSP) 1670 port_status |= USB_PORT_STAT_SUSPEND; 1671 if (hprt0 & HPRT0_OVRCURRACT) 1672 port_status |= USB_PORT_STAT_OVERCURRENT; 1673 if (hprt0 & HPRT0_RST) 1674 port_status |= USB_PORT_STAT_RESET; 1675 if (hprt0 & HPRT0_PWR) 1676 port_status |= USB_PORT_STAT_POWER; 1677 1678 speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; 1679 if (speed == HPRT0_SPD_HIGH_SPEED) 1680 port_status |= USB_PORT_STAT_HIGH_SPEED; 1681 else if (speed == HPRT0_SPD_LOW_SPEED) 1682 port_status |= USB_PORT_STAT_LOW_SPEED; 1683 1684 if (hprt0 & HPRT0_TSTCTL_MASK) 1685 port_status |= USB_PORT_STAT_TEST; 1686 /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ 1687 1688 dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status); 1689 *(__le32 *)buf = cpu_to_le32(port_status); 1690 break; 1691 1692 case SetHubFeature: 1693 dev_dbg(hsotg->dev, "SetHubFeature\n"); 1694 /* No HUB features supported */ 1695 break; 1696 1697 case SetPortFeature: 1698 dev_dbg(hsotg->dev, "SetPortFeature\n"); 1699 if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1)) 1700 goto error; 1701 1702 if (!hsotg->flags.b.port_connect_status) { 1703 /* 1704 * The port is disconnected, which means the core is 1705 * either in device mode or it soon will be. Just 1706 * return without doing anything since the port 1707 * register can't be written if the core is in device 1708 * mode. 1709 */ 1710 break; 1711 } 1712 1713 switch (wvalue) { 1714 case USB_PORT_FEAT_SUSPEND: 1715 dev_dbg(hsotg->dev, 1716 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n"); 1717 if (windex != hsotg->otg_port) 1718 goto error; 1719 dwc2_port_suspend(hsotg, windex); 1720 break; 1721 1722 case USB_PORT_FEAT_POWER: 1723 dev_dbg(hsotg->dev, 1724 "SetPortFeature - USB_PORT_FEAT_POWER\n"); 1725 hprt0 = dwc2_read_hprt0(hsotg); 1726 hprt0 |= HPRT0_PWR; 1727 writel(hprt0, hsotg->regs + HPRT0); 1728 break; 1729 1730 case USB_PORT_FEAT_RESET: 1731 hprt0 = dwc2_read_hprt0(hsotg); 1732 dev_dbg(hsotg->dev, 1733 "SetPortFeature - USB_PORT_FEAT_RESET\n"); 1734 pcgctl = readl(hsotg->regs + PCGCTL); 1735 pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK); 1736 writel(pcgctl, hsotg->regs + PCGCTL); 1737 /* ??? Original driver does this */ 1738 writel(0, hsotg->regs + PCGCTL); 1739 1740 hprt0 = dwc2_read_hprt0(hsotg); 1741 /* Clear suspend bit if resetting from suspend state */ 1742 hprt0 &= ~HPRT0_SUSP; 1743 1744 /* 1745 * When B-Host the Port reset bit is set in the Start 1746 * HCD Callback function, so that the reset is started 1747 * within 1ms of the HNP success interrupt 1748 */ 1749 if (!dwc2_hcd_is_b_host(hsotg)) { 1750 hprt0 |= HPRT0_PWR | HPRT0_RST; 1751 dev_dbg(hsotg->dev, 1752 "In host mode, hprt0=%08x\n", hprt0); 1753 writel(hprt0, hsotg->regs + HPRT0); 1754 } 1755 1756 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ 1757 usleep_range(50000, 70000); 1758 hprt0 &= ~HPRT0_RST; 1759 writel(hprt0, hsotg->regs + HPRT0); 1760 hsotg->lx_state = DWC2_L0; /* Now back to On state */ 1761 break; 1762 1763 case USB_PORT_FEAT_INDICATOR: 1764 dev_dbg(hsotg->dev, 1765 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n"); 1766 /* Not supported */ 1767 break; 1768 1769 default: 1770 retval = -EINVAL; 1771 dev_err(hsotg->dev, 1772 "SetPortFeature %1xh unknown or unsupported\n", 1773 wvalue); 1774 break; 1775 } 1776 break; 1777 1778 default: 1779 error: 1780 retval = -EINVAL; 1781 dev_dbg(hsotg->dev, 1782 "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n", 1783 typereq, windex, wvalue); 1784 break; 1785 } 1786 1787 return retval; 1788 } 1789 1790 static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port) 1791 { 1792 int retval; 1793 1794 if (port != 1) 1795 return -EINVAL; 1796 1797 retval = (hsotg->flags.b.port_connect_status_change || 1798 hsotg->flags.b.port_reset_change || 1799 hsotg->flags.b.port_enable_change || 1800 hsotg->flags.b.port_suspend_change || 1801 hsotg->flags.b.port_over_current_change); 1802 1803 if (retval) { 1804 dev_dbg(hsotg->dev, 1805 "DWC OTG HCD HUB STATUS DATA: Root port status changed\n"); 1806 dev_dbg(hsotg->dev, " port_connect_status_change: %d\n", 1807 hsotg->flags.b.port_connect_status_change); 1808 dev_dbg(hsotg->dev, " port_reset_change: %d\n", 1809 hsotg->flags.b.port_reset_change); 1810 dev_dbg(hsotg->dev, " port_enable_change: %d\n", 1811 hsotg->flags.b.port_enable_change); 1812 dev_dbg(hsotg->dev, " port_suspend_change: %d\n", 1813 hsotg->flags.b.port_suspend_change); 1814 dev_dbg(hsotg->dev, " port_over_current_change: %d\n", 1815 hsotg->flags.b.port_over_current_change); 1816 } 1817 1818 return retval; 1819 } 1820 1821 int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg) 1822 { 1823 u32 hfnum = readl(hsotg->regs + HFNUM); 1824 1825 #ifdef DWC2_DEBUG_SOF 1826 dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n", 1827 (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT); 1828 #endif 1829 return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; 1830 } 1831 1832 int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg) 1833 { 1834 return hsotg->op_state == OTG_STATE_B_HOST; 1835 } 1836 1837 static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg, 1838 int iso_desc_count, 1839 gfp_t mem_flags) 1840 { 1841 struct dwc2_hcd_urb *urb; 1842 u32 size = sizeof(*urb) + iso_desc_count * 1843 sizeof(struct dwc2_hcd_iso_packet_desc); 1844 1845 urb = kzalloc(size, mem_flags); 1846 if (urb) 1847 urb->packet_count = iso_desc_count; 1848 return urb; 1849 } 1850 1851 static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg, 1852 struct dwc2_hcd_urb *urb, u8 dev_addr, 1853 u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps) 1854 { 1855 if (dbg_perio() || 1856 ep_type == USB_ENDPOINT_XFER_BULK || 1857 ep_type == USB_ENDPOINT_XFER_CONTROL) 1858 dev_vdbg(hsotg->dev, 1859 "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n", 1860 dev_addr, ep_num, ep_dir, ep_type, mps); 1861 urb->pipe_info.dev_addr = dev_addr; 1862 urb->pipe_info.ep_num = ep_num; 1863 urb->pipe_info.pipe_type = ep_type; 1864 urb->pipe_info.pipe_dir = ep_dir; 1865 urb->pipe_info.mps = mps; 1866 } 1867 1868 /* 1869 * NOTE: This function will be removed once the peripheral controller code 1870 * is integrated and the driver is stable 1871 */ 1872 void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg) 1873 { 1874 #ifdef DEBUG 1875 struct dwc2_host_chan *chan; 1876 struct dwc2_hcd_urb *urb; 1877 struct dwc2_qtd *qtd; 1878 int num_channels; 1879 u32 np_tx_status; 1880 u32 p_tx_status; 1881 int i; 1882 1883 num_channels = hsotg->core_params->host_channels; 1884 dev_dbg(hsotg->dev, "\n"); 1885 dev_dbg(hsotg->dev, 1886 "************************************************************\n"); 1887 dev_dbg(hsotg->dev, "HCD State:\n"); 1888 dev_dbg(hsotg->dev, " Num channels: %d\n", num_channels); 1889 1890 for (i = 0; i < num_channels; i++) { 1891 chan = hsotg->hc_ptr_array[i]; 1892 dev_dbg(hsotg->dev, " Channel %d:\n", i); 1893 dev_dbg(hsotg->dev, 1894 " dev_addr: %d, ep_num: %d, ep_is_in: %d\n", 1895 chan->dev_addr, chan->ep_num, chan->ep_is_in); 1896 dev_dbg(hsotg->dev, " speed: %d\n", chan->speed); 1897 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type); 1898 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet); 1899 dev_dbg(hsotg->dev, " data_pid_start: %d\n", 1900 chan->data_pid_start); 1901 dev_dbg(hsotg->dev, " multi_count: %d\n", chan->multi_count); 1902 dev_dbg(hsotg->dev, " xfer_started: %d\n", 1903 chan->xfer_started); 1904 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf); 1905 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n", 1906 (unsigned long)chan->xfer_dma); 1907 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len); 1908 dev_dbg(hsotg->dev, " xfer_count: %d\n", chan->xfer_count); 1909 dev_dbg(hsotg->dev, " halt_on_queue: %d\n", 1910 chan->halt_on_queue); 1911 dev_dbg(hsotg->dev, " halt_pending: %d\n", 1912 chan->halt_pending); 1913 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status); 1914 dev_dbg(hsotg->dev, " do_split: %d\n", chan->do_split); 1915 dev_dbg(hsotg->dev, " complete_split: %d\n", 1916 chan->complete_split); 1917 dev_dbg(hsotg->dev, " hub_addr: %d\n", chan->hub_addr); 1918 dev_dbg(hsotg->dev, " hub_port: %d\n", chan->hub_port); 1919 dev_dbg(hsotg->dev, " xact_pos: %d\n", chan->xact_pos); 1920 dev_dbg(hsotg->dev, " requests: %d\n", chan->requests); 1921 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh); 1922 1923 if (chan->xfer_started) { 1924 u32 hfnum, hcchar, hctsiz, hcint, hcintmsk; 1925 1926 hfnum = readl(hsotg->regs + HFNUM); 1927 hcchar = readl(hsotg->regs + HCCHAR(i)); 1928 hctsiz = readl(hsotg->regs + HCTSIZ(i)); 1929 hcint = readl(hsotg->regs + HCINT(i)); 1930 hcintmsk = readl(hsotg->regs + HCINTMSK(i)); 1931 dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum); 1932 dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar); 1933 dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz); 1934 dev_dbg(hsotg->dev, " hcint: 0x%08x\n", hcint); 1935 dev_dbg(hsotg->dev, " hcintmsk: 0x%08x\n", hcintmsk); 1936 } 1937 1938 if (!(chan->xfer_started && chan->qh)) 1939 continue; 1940 1941 list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) { 1942 if (!qtd->in_process) 1943 break; 1944 urb = qtd->urb; 1945 dev_dbg(hsotg->dev, " URB Info:\n"); 1946 dev_dbg(hsotg->dev, " qtd: %p, urb: %p\n", 1947 qtd, urb); 1948 if (urb) { 1949 dev_dbg(hsotg->dev, 1950 " Dev: %d, EP: %d %s\n", 1951 dwc2_hcd_get_dev_addr(&urb->pipe_info), 1952 dwc2_hcd_get_ep_num(&urb->pipe_info), 1953 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1954 "IN" : "OUT"); 1955 dev_dbg(hsotg->dev, 1956 " Max packet size: %d\n", 1957 dwc2_hcd_get_mps(&urb->pipe_info)); 1958 dev_dbg(hsotg->dev, 1959 " transfer_buffer: %p\n", 1960 urb->buf); 1961 dev_dbg(hsotg->dev, 1962 " transfer_dma: %08lx\n", 1963 (unsigned long)urb->dma); 1964 dev_dbg(hsotg->dev, 1965 " transfer_buffer_length: %d\n", 1966 urb->length); 1967 dev_dbg(hsotg->dev, " actual_length: %d\n", 1968 urb->actual_length); 1969 } 1970 } 1971 } 1972 1973 dev_dbg(hsotg->dev, " non_periodic_channels: %d\n", 1974 hsotg->non_periodic_channels); 1975 dev_dbg(hsotg->dev, " periodic_channels: %d\n", 1976 hsotg->periodic_channels); 1977 dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs); 1978 np_tx_status = readl(hsotg->regs + GNPTXSTS); 1979 dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n", 1980 (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT); 1981 dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n", 1982 (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT); 1983 p_tx_status = readl(hsotg->regs + HPTXSTS); 1984 dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n", 1985 (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT); 1986 dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n", 1987 (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT); 1988 dwc2_hcd_dump_frrem(hsotg); 1989 dwc2_dump_global_registers(hsotg); 1990 dwc2_dump_host_registers(hsotg); 1991 dev_dbg(hsotg->dev, 1992 "************************************************************\n"); 1993 dev_dbg(hsotg->dev, "\n"); 1994 #endif 1995 } 1996 1997 /* 1998 * NOTE: This function will be removed once the peripheral controller code 1999 * is integrated and the driver is stable 2000 */ 2001 void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg) 2002 { 2003 #ifdef DWC2_DUMP_FRREM 2004 dev_dbg(hsotg->dev, "Frame remaining at SOF:\n"); 2005 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2006 hsotg->frrem_samples, hsotg->frrem_accum, 2007 hsotg->frrem_samples > 0 ? 2008 hsotg->frrem_accum / hsotg->frrem_samples : 0); 2009 dev_dbg(hsotg->dev, "\n"); 2010 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 7):\n"); 2011 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2012 hsotg->hfnum_7_samples, 2013 hsotg->hfnum_7_frrem_accum, 2014 hsotg->hfnum_7_samples > 0 ? 2015 hsotg->hfnum_7_frrem_accum / hsotg->hfnum_7_samples : 0); 2016 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 0):\n"); 2017 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2018 hsotg->hfnum_0_samples, 2019 hsotg->hfnum_0_frrem_accum, 2020 hsotg->hfnum_0_samples > 0 ? 2021 hsotg->hfnum_0_frrem_accum / hsotg->hfnum_0_samples : 0); 2022 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 1-6):\n"); 2023 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2024 hsotg->hfnum_other_samples, 2025 hsotg->hfnum_other_frrem_accum, 2026 hsotg->hfnum_other_samples > 0 ? 2027 hsotg->hfnum_other_frrem_accum / hsotg->hfnum_other_samples : 2028 0); 2029 dev_dbg(hsotg->dev, "\n"); 2030 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 7):\n"); 2031 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2032 hsotg->hfnum_7_samples_a, hsotg->hfnum_7_frrem_accum_a, 2033 hsotg->hfnum_7_samples_a > 0 ? 2034 hsotg->hfnum_7_frrem_accum_a / hsotg->hfnum_7_samples_a : 0); 2035 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 0):\n"); 2036 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2037 hsotg->hfnum_0_samples_a, hsotg->hfnum_0_frrem_accum_a, 2038 hsotg->hfnum_0_samples_a > 0 ? 2039 hsotg->hfnum_0_frrem_accum_a / hsotg->hfnum_0_samples_a : 0); 2040 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 1-6):\n"); 2041 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2042 hsotg->hfnum_other_samples_a, hsotg->hfnum_other_frrem_accum_a, 2043 hsotg->hfnum_other_samples_a > 0 ? 2044 hsotg->hfnum_other_frrem_accum_a / hsotg->hfnum_other_samples_a 2045 : 0); 2046 dev_dbg(hsotg->dev, "\n"); 2047 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 7):\n"); 2048 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2049 hsotg->hfnum_7_samples_b, hsotg->hfnum_7_frrem_accum_b, 2050 hsotg->hfnum_7_samples_b > 0 ? 2051 hsotg->hfnum_7_frrem_accum_b / hsotg->hfnum_7_samples_b : 0); 2052 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 0):\n"); 2053 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2054 hsotg->hfnum_0_samples_b, hsotg->hfnum_0_frrem_accum_b, 2055 (hsotg->hfnum_0_samples_b > 0) ? 2056 hsotg->hfnum_0_frrem_accum_b / hsotg->hfnum_0_samples_b : 0); 2057 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 1-6):\n"); 2058 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2059 hsotg->hfnum_other_samples_b, hsotg->hfnum_other_frrem_accum_b, 2060 (hsotg->hfnum_other_samples_b > 0) ? 2061 hsotg->hfnum_other_frrem_accum_b / hsotg->hfnum_other_samples_b 2062 : 0); 2063 #endif 2064 } 2065 2066 struct wrapper_priv_data { 2067 struct dwc2_hsotg *hsotg; 2068 }; 2069 2070 /* Gets the dwc2_hsotg from a usb_hcd */ 2071 static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd) 2072 { 2073 struct wrapper_priv_data *p; 2074 2075 p = (struct wrapper_priv_data *) &hcd->hcd_priv; 2076 return p->hsotg; 2077 } 2078 2079 static int _dwc2_hcd_start(struct usb_hcd *hcd); 2080 2081 void dwc2_host_start(struct dwc2_hsotg *hsotg) 2082 { 2083 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); 2084 2085 hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg); 2086 _dwc2_hcd_start(hcd); 2087 } 2088 2089 void dwc2_host_disconnect(struct dwc2_hsotg *hsotg) 2090 { 2091 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); 2092 2093 hcd->self.is_b_host = 0; 2094 } 2095 2096 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, 2097 int *hub_port) 2098 { 2099 struct urb *urb = context; 2100 2101 if (urb->dev->tt) 2102 *hub_addr = urb->dev->tt->hub->devnum; 2103 else 2104 *hub_addr = 0; 2105 *hub_port = urb->dev->ttport; 2106 } 2107 2108 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) 2109 { 2110 struct urb *urb = context; 2111 2112 return urb->dev->speed; 2113 } 2114 2115 static void dwc2_allocate_bus_bandwidth(struct usb_hcd *hcd, u16 bw, 2116 struct urb *urb) 2117 { 2118 struct usb_bus *bus = hcd_to_bus(hcd); 2119 2120 if (urb->interval) 2121 bus->bandwidth_allocated += bw / urb->interval; 2122 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 2123 bus->bandwidth_isoc_reqs++; 2124 else 2125 bus->bandwidth_int_reqs++; 2126 } 2127 2128 static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw, 2129 struct urb *urb) 2130 { 2131 struct usb_bus *bus = hcd_to_bus(hcd); 2132 2133 if (urb->interval) 2134 bus->bandwidth_allocated -= bw / urb->interval; 2135 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 2136 bus->bandwidth_isoc_reqs--; 2137 else 2138 bus->bandwidth_int_reqs--; 2139 } 2140 2141 /* 2142 * Sets the final status of an URB and returns it to the upper layer. Any 2143 * required cleanup of the URB is performed. 2144 * 2145 * Must be called with interrupt disabled and spinlock held 2146 */ 2147 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 2148 int status) 2149 { 2150 struct urb *urb; 2151 int i; 2152 2153 if (!qtd) { 2154 dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__); 2155 return; 2156 } 2157 2158 if (!qtd->urb) { 2159 dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__); 2160 return; 2161 } 2162 2163 urb = qtd->urb->priv; 2164 if (!urb) { 2165 dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__); 2166 return; 2167 } 2168 2169 urb->actual_length = dwc2_hcd_urb_get_actual_length(qtd->urb); 2170 2171 if (dbg_urb(urb)) 2172 dev_vdbg(hsotg->dev, 2173 "%s: urb %p device %d ep %d-%s status %d actual %d\n", 2174 __func__, urb, usb_pipedevice(urb->pipe), 2175 usb_pipeendpoint(urb->pipe), 2176 usb_pipein(urb->pipe) ? "IN" : "OUT", status, 2177 urb->actual_length); 2178 2179 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS && dbg_perio()) { 2180 for (i = 0; i < urb->number_of_packets; i++) 2181 dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n", 2182 i, urb->iso_frame_desc[i].status); 2183 } 2184 2185 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 2186 urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb); 2187 for (i = 0; i < urb->number_of_packets; ++i) { 2188 urb->iso_frame_desc[i].actual_length = 2189 dwc2_hcd_urb_get_iso_desc_actual_length( 2190 qtd->urb, i); 2191 urb->iso_frame_desc[i].status = 2192 dwc2_hcd_urb_get_iso_desc_status(qtd->urb, i); 2193 } 2194 } 2195 2196 urb->status = status; 2197 if (!status) { 2198 if ((urb->transfer_flags & URB_SHORT_NOT_OK) && 2199 urb->actual_length < urb->transfer_buffer_length) 2200 urb->status = -EREMOTEIO; 2201 } 2202 2203 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS || 2204 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 2205 struct usb_host_endpoint *ep = urb->ep; 2206 2207 if (ep) 2208 dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg), 2209 dwc2_hcd_get_ep_bandwidth(hsotg, ep), 2210 urb); 2211 } 2212 2213 usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg), urb); 2214 urb->hcpriv = NULL; 2215 kfree(qtd->urb); 2216 qtd->urb = NULL; 2217 2218 spin_unlock(&hsotg->lock); 2219 usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status); 2220 spin_lock(&hsotg->lock); 2221 } 2222 2223 /* 2224 * Work queue function for starting the HCD when A-Cable is connected 2225 */ 2226 static void dwc2_hcd_start_func(struct work_struct *work) 2227 { 2228 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, 2229 start_work.work); 2230 2231 dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg); 2232 dwc2_host_start(hsotg); 2233 } 2234 2235 /* 2236 * Reset work queue function 2237 */ 2238 static void dwc2_hcd_reset_func(struct work_struct *work) 2239 { 2240 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, 2241 reset_work.work); 2242 u32 hprt0; 2243 2244 dev_dbg(hsotg->dev, "USB RESET function called\n"); 2245 hprt0 = dwc2_read_hprt0(hsotg); 2246 hprt0 &= ~HPRT0_RST; 2247 writel(hprt0, hsotg->regs + HPRT0); 2248 hsotg->flags.b.port_reset_change = 1; 2249 } 2250 2251 /* 2252 * ========================================================================= 2253 * Linux HC Driver Functions 2254 * ========================================================================= 2255 */ 2256 2257 /* 2258 * Initializes the DWC_otg controller and its root hub and prepares it for host 2259 * mode operation. Activates the root port. Returns 0 on success and a negative 2260 * error code on failure. 2261 */ 2262 static int _dwc2_hcd_start(struct usb_hcd *hcd) 2263 { 2264 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2265 struct usb_bus *bus = hcd_to_bus(hcd); 2266 unsigned long flags; 2267 2268 dev_dbg(hsotg->dev, "DWC OTG HCD START\n"); 2269 2270 spin_lock_irqsave(&hsotg->lock, flags); 2271 2272 hcd->state = HC_STATE_RUNNING; 2273 2274 if (dwc2_is_device_mode(hsotg)) { 2275 spin_unlock_irqrestore(&hsotg->lock, flags); 2276 return 0; /* why 0 ?? */ 2277 } 2278 2279 dwc2_hcd_reinit(hsotg); 2280 2281 /* Initialize and connect root hub if one is not already attached */ 2282 if (bus->root_hub) { 2283 dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n"); 2284 /* Inform the HUB driver to resume */ 2285 usb_hcd_resume_root_hub(hcd); 2286 } 2287 2288 spin_unlock_irqrestore(&hsotg->lock, flags); 2289 return 0; 2290 } 2291 2292 /* 2293 * Halts the DWC_otg host mode operations in a clean manner. USB transfers are 2294 * stopped. 2295 */ 2296 static void _dwc2_hcd_stop(struct usb_hcd *hcd) 2297 { 2298 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2299 unsigned long flags; 2300 2301 spin_lock_irqsave(&hsotg->lock, flags); 2302 dwc2_hcd_stop(hsotg); 2303 spin_unlock_irqrestore(&hsotg->lock, flags); 2304 2305 usleep_range(1000, 3000); 2306 } 2307 2308 /* Returns the current frame number */ 2309 static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd) 2310 { 2311 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2312 2313 return dwc2_hcd_get_frame_number(hsotg); 2314 } 2315 2316 static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, 2317 char *fn_name) 2318 { 2319 #ifdef VERBOSE_DEBUG 2320 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2321 char *pipetype; 2322 char *speed; 2323 2324 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb); 2325 dev_vdbg(hsotg->dev, " Device address: %d\n", 2326 usb_pipedevice(urb->pipe)); 2327 dev_vdbg(hsotg->dev, " Endpoint: %d, %s\n", 2328 usb_pipeendpoint(urb->pipe), 2329 usb_pipein(urb->pipe) ? "IN" : "OUT"); 2330 2331 switch (usb_pipetype(urb->pipe)) { 2332 case PIPE_CONTROL: 2333 pipetype = "CONTROL"; 2334 break; 2335 case PIPE_BULK: 2336 pipetype = "BULK"; 2337 break; 2338 case PIPE_INTERRUPT: 2339 pipetype = "INTERRUPT"; 2340 break; 2341 case PIPE_ISOCHRONOUS: 2342 pipetype = "ISOCHRONOUS"; 2343 break; 2344 default: 2345 pipetype = "UNKNOWN"; 2346 break; 2347 } 2348 2349 dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype, 2350 usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ? 2351 "IN" : "OUT"); 2352 2353 switch (urb->dev->speed) { 2354 case USB_SPEED_HIGH: 2355 speed = "HIGH"; 2356 break; 2357 case USB_SPEED_FULL: 2358 speed = "FULL"; 2359 break; 2360 case USB_SPEED_LOW: 2361 speed = "LOW"; 2362 break; 2363 default: 2364 speed = "UNKNOWN"; 2365 break; 2366 } 2367 2368 dev_vdbg(hsotg->dev, " Speed: %s\n", speed); 2369 dev_vdbg(hsotg->dev, " Max packet size: %d\n", 2370 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); 2371 dev_vdbg(hsotg->dev, " Data buffer length: %d\n", 2372 urb->transfer_buffer_length); 2373 dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", 2374 urb->transfer_buffer, (unsigned long)urb->transfer_dma); 2375 dev_vdbg(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n", 2376 urb->setup_packet, (unsigned long)urb->setup_dma); 2377 dev_vdbg(hsotg->dev, " Interval: %d\n", urb->interval); 2378 2379 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 2380 int i; 2381 2382 for (i = 0; i < urb->number_of_packets; i++) { 2383 dev_vdbg(hsotg->dev, " ISO Desc %d:\n", i); 2384 dev_vdbg(hsotg->dev, " offset: %d, length %d\n", 2385 urb->iso_frame_desc[i].offset, 2386 urb->iso_frame_desc[i].length); 2387 } 2388 } 2389 #endif 2390 } 2391 2392 /* 2393 * Starts processing a USB transfer request specified by a USB Request Block 2394 * (URB). mem_flags indicates the type of memory allocation to use while 2395 * processing this URB. 2396 */ 2397 static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, 2398 gfp_t mem_flags) 2399 { 2400 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2401 struct usb_host_endpoint *ep = urb->ep; 2402 struct dwc2_hcd_urb *dwc2_urb; 2403 int i; 2404 int retval; 2405 int alloc_bandwidth = 0; 2406 u8 ep_type = 0; 2407 u32 tflags = 0; 2408 void *buf; 2409 unsigned long flags; 2410 2411 if (dbg_urb(urb)) { 2412 dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n"); 2413 dwc2_dump_urb_info(hcd, urb, "urb_enqueue"); 2414 } 2415 2416 if (ep == NULL) 2417 return -EINVAL; 2418 2419 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS || 2420 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 2421 spin_lock_irqsave(&hsotg->lock, flags); 2422 if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep)) 2423 alloc_bandwidth = 1; 2424 spin_unlock_irqrestore(&hsotg->lock, flags); 2425 } 2426 2427 switch (usb_pipetype(urb->pipe)) { 2428 case PIPE_CONTROL: 2429 ep_type = USB_ENDPOINT_XFER_CONTROL; 2430 break; 2431 case PIPE_ISOCHRONOUS: 2432 ep_type = USB_ENDPOINT_XFER_ISOC; 2433 break; 2434 case PIPE_BULK: 2435 ep_type = USB_ENDPOINT_XFER_BULK; 2436 break; 2437 case PIPE_INTERRUPT: 2438 ep_type = USB_ENDPOINT_XFER_INT; 2439 break; 2440 default: 2441 dev_warn(hsotg->dev, "Wrong ep type\n"); 2442 } 2443 2444 dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets, 2445 mem_flags); 2446 if (!dwc2_urb) 2447 return -ENOMEM; 2448 2449 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe), 2450 usb_pipeendpoint(urb->pipe), ep_type, 2451 usb_pipein(urb->pipe), 2452 usb_maxpacket(urb->dev, urb->pipe, 2453 !(usb_pipein(urb->pipe)))); 2454 2455 buf = urb->transfer_buffer; 2456 2457 if (hcd->self.uses_dma) { 2458 if (!buf && (urb->transfer_dma & 3)) { 2459 dev_err(hsotg->dev, 2460 "%s: unaligned transfer with no transfer_buffer", 2461 __func__); 2462 retval = -EINVAL; 2463 goto fail1; 2464 } 2465 } 2466 2467 if (!(urb->transfer_flags & URB_NO_INTERRUPT)) 2468 tflags |= URB_GIVEBACK_ASAP; 2469 if (urb->transfer_flags & URB_ZERO_PACKET) 2470 tflags |= URB_SEND_ZERO_PACKET; 2471 2472 dwc2_urb->priv = urb; 2473 dwc2_urb->buf = buf; 2474 dwc2_urb->dma = urb->transfer_dma; 2475 dwc2_urb->length = urb->transfer_buffer_length; 2476 dwc2_urb->setup_packet = urb->setup_packet; 2477 dwc2_urb->setup_dma = urb->setup_dma; 2478 dwc2_urb->flags = tflags; 2479 dwc2_urb->interval = urb->interval; 2480 dwc2_urb->status = -EINPROGRESS; 2481 2482 for (i = 0; i < urb->number_of_packets; ++i) 2483 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, 2484 urb->iso_frame_desc[i].offset, 2485 urb->iso_frame_desc[i].length); 2486 2487 urb->hcpriv = dwc2_urb; 2488 2489 spin_lock_irqsave(&hsotg->lock, flags); 2490 retval = usb_hcd_link_urb_to_ep(hcd, urb); 2491 spin_unlock_irqrestore(&hsotg->lock, flags); 2492 if (retval) 2493 goto fail1; 2494 2495 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags); 2496 if (retval) 2497 goto fail2; 2498 2499 if (alloc_bandwidth) { 2500 spin_lock_irqsave(&hsotg->lock, flags); 2501 dwc2_allocate_bus_bandwidth(hcd, 2502 dwc2_hcd_get_ep_bandwidth(hsotg, ep), 2503 urb); 2504 spin_unlock_irqrestore(&hsotg->lock, flags); 2505 } 2506 2507 return 0; 2508 2509 fail2: 2510 spin_lock_irqsave(&hsotg->lock, flags); 2511 dwc2_urb->priv = NULL; 2512 usb_hcd_unlink_urb_from_ep(hcd, urb); 2513 spin_unlock_irqrestore(&hsotg->lock, flags); 2514 fail1: 2515 urb->hcpriv = NULL; 2516 kfree(dwc2_urb); 2517 2518 return retval; 2519 } 2520 2521 /* 2522 * Aborts/cancels a USB transfer request. Always returns 0 to indicate success. 2523 */ 2524 static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, 2525 int status) 2526 { 2527 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2528 int rc; 2529 unsigned long flags; 2530 2531 dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n"); 2532 dwc2_dump_urb_info(hcd, urb, "urb_dequeue"); 2533 2534 spin_lock_irqsave(&hsotg->lock, flags); 2535 2536 rc = usb_hcd_check_unlink_urb(hcd, urb, status); 2537 if (rc) 2538 goto out; 2539 2540 if (!urb->hcpriv) { 2541 dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n"); 2542 goto out; 2543 } 2544 2545 rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv); 2546 2547 usb_hcd_unlink_urb_from_ep(hcd, urb); 2548 2549 kfree(urb->hcpriv); 2550 urb->hcpriv = NULL; 2551 2552 /* Higher layer software sets URB status */ 2553 spin_unlock(&hsotg->lock); 2554 usb_hcd_giveback_urb(hcd, urb, status); 2555 spin_lock(&hsotg->lock); 2556 2557 dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n"); 2558 dev_dbg(hsotg->dev, " urb->status = %d\n", urb->status); 2559 out: 2560 spin_unlock_irqrestore(&hsotg->lock, flags); 2561 2562 return rc; 2563 } 2564 2565 /* 2566 * Frees resources in the DWC_otg controller related to a given endpoint. Also 2567 * clears state in the HCD related to the endpoint. Any URBs for the endpoint 2568 * must already be dequeued. 2569 */ 2570 static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd, 2571 struct usb_host_endpoint *ep) 2572 { 2573 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2574 2575 dev_dbg(hsotg->dev, 2576 "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n", 2577 ep->desc.bEndpointAddress, ep->hcpriv); 2578 dwc2_hcd_endpoint_disable(hsotg, ep, 250); 2579 } 2580 2581 /* 2582 * Resets endpoint specific parameter values, in current version used to reset 2583 * the data toggle (as a WA). This function can be called from usb_clear_halt 2584 * routine. 2585 */ 2586 static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd, 2587 struct usb_host_endpoint *ep) 2588 { 2589 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2590 unsigned long flags; 2591 2592 dev_dbg(hsotg->dev, 2593 "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n", 2594 ep->desc.bEndpointAddress); 2595 2596 spin_lock_irqsave(&hsotg->lock, flags); 2597 dwc2_hcd_endpoint_reset(hsotg, ep); 2598 spin_unlock_irqrestore(&hsotg->lock, flags); 2599 } 2600 2601 /* 2602 * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if 2603 * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid 2604 * interrupt. 2605 * 2606 * This function is called by the USB core when an interrupt occurs 2607 */ 2608 static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd) 2609 { 2610 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2611 2612 return dwc2_handle_hcd_intr(hsotg); 2613 } 2614 2615 /* 2616 * Creates Status Change bitmap for the root hub and root port. The bitmap is 2617 * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1 2618 * is the status change indicator for the single root port. Returns 1 if either 2619 * change indicator is 1, otherwise returns 0. 2620 */ 2621 static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) 2622 { 2623 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2624 2625 buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1; 2626 return buf[0] != 0; 2627 } 2628 2629 /* Handles hub class-specific requests */ 2630 static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue, 2631 u16 windex, char *buf, u16 wlength) 2632 { 2633 int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq, 2634 wvalue, windex, buf, wlength); 2635 return retval; 2636 } 2637 2638 /* Handles hub TT buffer clear completions */ 2639 static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd, 2640 struct usb_host_endpoint *ep) 2641 { 2642 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2643 struct dwc2_qh *qh; 2644 unsigned long flags; 2645 2646 qh = ep->hcpriv; 2647 if (!qh) 2648 return; 2649 2650 spin_lock_irqsave(&hsotg->lock, flags); 2651 qh->tt_buffer_dirty = 0; 2652 2653 if (hsotg->flags.b.port_connect_status) 2654 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL); 2655 2656 spin_unlock_irqrestore(&hsotg->lock, flags); 2657 } 2658 2659 static struct hc_driver dwc2_hc_driver = { 2660 .description = "dwc2_hsotg", 2661 .product_desc = "DWC OTG Controller", 2662 .hcd_priv_size = sizeof(struct wrapper_priv_data), 2663 2664 .irq = _dwc2_hcd_irq, 2665 .flags = HCD_MEMORY | HCD_USB2, 2666 2667 .start = _dwc2_hcd_start, 2668 .stop = _dwc2_hcd_stop, 2669 .urb_enqueue = _dwc2_hcd_urb_enqueue, 2670 .urb_dequeue = _dwc2_hcd_urb_dequeue, 2671 .endpoint_disable = _dwc2_hcd_endpoint_disable, 2672 .endpoint_reset = _dwc2_hcd_endpoint_reset, 2673 .get_frame_number = _dwc2_hcd_get_frame_number, 2674 2675 .hub_status_data = _dwc2_hcd_hub_status_data, 2676 .hub_control = _dwc2_hcd_hub_control, 2677 .clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete, 2678 }; 2679 2680 /* 2681 * Frees secondary storage associated with the dwc2_hsotg structure contained 2682 * in the struct usb_hcd field 2683 */ 2684 static void dwc2_hcd_free(struct dwc2_hsotg *hsotg) 2685 { 2686 u32 ahbcfg; 2687 u32 dctl; 2688 int i; 2689 2690 dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n"); 2691 2692 /* Free memory for QH/QTD lists */ 2693 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive); 2694 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active); 2695 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive); 2696 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready); 2697 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned); 2698 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued); 2699 2700 /* Free memory for the host channels */ 2701 for (i = 0; i < MAX_EPS_CHANNELS; i++) { 2702 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i]; 2703 2704 if (chan != NULL) { 2705 dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n", 2706 i, chan); 2707 hsotg->hc_ptr_array[i] = NULL; 2708 kfree(chan); 2709 } 2710 } 2711 2712 if (hsotg->core_params->dma_enable > 0) { 2713 if (hsotg->status_buf) { 2714 dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, 2715 hsotg->status_buf, 2716 hsotg->status_buf_dma); 2717 hsotg->status_buf = NULL; 2718 } 2719 } else { 2720 kfree(hsotg->status_buf); 2721 hsotg->status_buf = NULL; 2722 } 2723 2724 ahbcfg = readl(hsotg->regs + GAHBCFG); 2725 2726 /* Disable all interrupts */ 2727 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN; 2728 writel(ahbcfg, hsotg->regs + GAHBCFG); 2729 writel(0, hsotg->regs + GINTMSK); 2730 2731 if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) { 2732 dctl = readl(hsotg->regs + DCTL); 2733 dctl |= DCTL_SFTDISCON; 2734 writel(dctl, hsotg->regs + DCTL); 2735 } 2736 2737 if (hsotg->wq_otg) { 2738 if (!cancel_work_sync(&hsotg->wf_otg)) 2739 flush_workqueue(hsotg->wq_otg); 2740 destroy_workqueue(hsotg->wq_otg); 2741 } 2742 2743 kfree(hsotg->core_params); 2744 hsotg->core_params = NULL; 2745 del_timer(&hsotg->wkp_timer); 2746 } 2747 2748 static void dwc2_hcd_release(struct dwc2_hsotg *hsotg) 2749 { 2750 /* Turn off all host-specific interrupts */ 2751 dwc2_disable_host_interrupts(hsotg); 2752 2753 dwc2_hcd_free(hsotg); 2754 } 2755 2756 /* 2757 * Sets all parameters to the given value. 2758 * 2759 * Assumes that the dwc2_core_params struct contains only integers. 2760 */ 2761 void dwc2_set_all_params(struct dwc2_core_params *params, int value) 2762 { 2763 int *p = (int *)params; 2764 size_t size = sizeof(*params) / sizeof(*p); 2765 int i; 2766 2767 for (i = 0; i < size; i++) 2768 p[i] = value; 2769 } 2770 EXPORT_SYMBOL_GPL(dwc2_set_all_params); 2771 2772 /* 2773 * Initializes the HCD. This function allocates memory for and initializes the 2774 * static parts of the usb_hcd and dwc2_hsotg structures. It also registers the 2775 * USB bus with the core and calls the hc_driver->start() function. It returns 2776 * a negative error on failure. 2777 */ 2778 int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq, 2779 const struct dwc2_core_params *params) 2780 { 2781 struct usb_hcd *hcd; 2782 struct dwc2_host_chan *channel; 2783 u32 hcfg; 2784 int i, num_channels; 2785 int retval; 2786 2787 if (usb_disabled()) 2788 return -ENODEV; 2789 2790 dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n"); 2791 2792 /* Detect config values from hardware */ 2793 retval = dwc2_get_hwparams(hsotg); 2794 2795 if (retval) 2796 return retval; 2797 2798 retval = -ENOMEM; 2799 2800 hcfg = readl(hsotg->regs + HCFG); 2801 dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg); 2802 2803 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 2804 hsotg->frame_num_array = kzalloc(sizeof(*hsotg->frame_num_array) * 2805 FRAME_NUM_ARRAY_SIZE, GFP_KERNEL); 2806 if (!hsotg->frame_num_array) 2807 goto error1; 2808 hsotg->last_frame_num_array = kzalloc( 2809 sizeof(*hsotg->last_frame_num_array) * 2810 FRAME_NUM_ARRAY_SIZE, GFP_KERNEL); 2811 if (!hsotg->last_frame_num_array) 2812 goto error1; 2813 hsotg->last_frame_num = HFNUM_MAX_FRNUM; 2814 #endif 2815 2816 hsotg->core_params = kzalloc(sizeof(*hsotg->core_params), GFP_KERNEL); 2817 if (!hsotg->core_params) 2818 goto error1; 2819 2820 dwc2_set_all_params(hsotg->core_params, -1); 2821 2822 /* Validate parameter values */ 2823 dwc2_set_parameters(hsotg, params); 2824 2825 /* Check if the bus driver or platform code has setup a dma_mask */ 2826 if (hsotg->core_params->dma_enable > 0 && 2827 hsotg->dev->dma_mask == NULL) { 2828 dev_warn(hsotg->dev, 2829 "dma_mask not set, disabling DMA\n"); 2830 hsotg->core_params->dma_enable = 0; 2831 hsotg->core_params->dma_desc_enable = 0; 2832 } 2833 2834 /* Set device flags indicating whether the HCD supports DMA */ 2835 if (hsotg->core_params->dma_enable > 0) { 2836 if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) 2837 dev_warn(hsotg->dev, "can't set DMA mask\n"); 2838 if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) 2839 dev_warn(hsotg->dev, "can't set coherent DMA mask\n"); 2840 } 2841 2842 hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev)); 2843 if (!hcd) 2844 goto error1; 2845 2846 if (hsotg->core_params->dma_enable <= 0) 2847 hcd->self.uses_dma = 0; 2848 2849 hcd->has_tt = 1; 2850 2851 ((struct wrapper_priv_data *) &hcd->hcd_priv)->hsotg = hsotg; 2852 hsotg->priv = hcd; 2853 2854 /* 2855 * Disable the global interrupt until all the interrupt handlers are 2856 * installed 2857 */ 2858 dwc2_disable_global_interrupts(hsotg); 2859 2860 /* Initialize the DWC_otg core, and select the Phy type */ 2861 retval = dwc2_core_init(hsotg, true, irq); 2862 if (retval) 2863 goto error2; 2864 2865 /* Create new workqueue and init work */ 2866 retval = -ENOMEM; 2867 hsotg->wq_otg = create_singlethread_workqueue("dwc2"); 2868 if (!hsotg->wq_otg) { 2869 dev_err(hsotg->dev, "Failed to create workqueue\n"); 2870 goto error2; 2871 } 2872 INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change); 2873 2874 setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected, 2875 (unsigned long)hsotg); 2876 2877 /* Initialize the non-periodic schedule */ 2878 INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive); 2879 INIT_LIST_HEAD(&hsotg->non_periodic_sched_active); 2880 2881 /* Initialize the periodic schedule */ 2882 INIT_LIST_HEAD(&hsotg->periodic_sched_inactive); 2883 INIT_LIST_HEAD(&hsotg->periodic_sched_ready); 2884 INIT_LIST_HEAD(&hsotg->periodic_sched_assigned); 2885 INIT_LIST_HEAD(&hsotg->periodic_sched_queued); 2886 2887 /* 2888 * Create a host channel descriptor for each host channel implemented 2889 * in the controller. Initialize the channel descriptor array. 2890 */ 2891 INIT_LIST_HEAD(&hsotg->free_hc_list); 2892 num_channels = hsotg->core_params->host_channels; 2893 memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array)); 2894 2895 for (i = 0; i < num_channels; i++) { 2896 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 2897 if (channel == NULL) 2898 goto error3; 2899 channel->hc_num = i; 2900 hsotg->hc_ptr_array[i] = channel; 2901 } 2902 2903 if (hsotg->core_params->uframe_sched > 0) 2904 dwc2_hcd_init_usecs(hsotg); 2905 2906 /* Initialize hsotg start work */ 2907 INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func); 2908 2909 /* Initialize port reset work */ 2910 INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func); 2911 2912 /* 2913 * Allocate space for storing data on status transactions. Normally no 2914 * data is sent, but this space acts as a bit bucket. This must be 2915 * done after usb_add_hcd since that function allocates the DMA buffer 2916 * pool. 2917 */ 2918 if (hsotg->core_params->dma_enable > 0) 2919 hsotg->status_buf = dma_alloc_coherent(hsotg->dev, 2920 DWC2_HCD_STATUS_BUF_SIZE, 2921 &hsotg->status_buf_dma, GFP_KERNEL); 2922 else 2923 hsotg->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE, 2924 GFP_KERNEL); 2925 2926 if (!hsotg->status_buf) 2927 goto error3; 2928 2929 hsotg->otg_port = 1; 2930 hsotg->frame_list = NULL; 2931 hsotg->frame_list_dma = 0; 2932 hsotg->periodic_qh_count = 0; 2933 2934 /* Initiate lx_state to L3 disconnected state */ 2935 hsotg->lx_state = DWC2_L3; 2936 2937 hcd->self.otg_port = hsotg->otg_port; 2938 2939 /* Don't support SG list at this point */ 2940 hcd->self.sg_tablesize = 0; 2941 2942 /* 2943 * Finish generic HCD initialization and start the HCD. This function 2944 * allocates the DMA buffer pool, registers the USB bus, requests the 2945 * IRQ line, and calls hcd_start method. 2946 */ 2947 retval = usb_add_hcd(hcd, irq, IRQF_SHARED); 2948 if (retval < 0) 2949 goto error3; 2950 2951 device_wakeup_enable(hcd->self.controller); 2952 2953 dwc2_hcd_dump_state(hsotg); 2954 2955 dwc2_enable_global_interrupts(hsotg); 2956 2957 return 0; 2958 2959 error3: 2960 dwc2_hcd_release(hsotg); 2961 error2: 2962 usb_put_hcd(hcd); 2963 error1: 2964 kfree(hsotg->core_params); 2965 2966 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 2967 kfree(hsotg->last_frame_num_array); 2968 kfree(hsotg->frame_num_array); 2969 #endif 2970 2971 dev_err(hsotg->dev, "%s() FAILED, returning %d\n", __func__, retval); 2972 return retval; 2973 } 2974 EXPORT_SYMBOL_GPL(dwc2_hcd_init); 2975 2976 /* 2977 * Removes the HCD. 2978 * Frees memory and resources associated with the HCD and deregisters the bus. 2979 */ 2980 void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) 2981 { 2982 struct usb_hcd *hcd; 2983 2984 dev_dbg(hsotg->dev, "DWC OTG HCD REMOVE\n"); 2985 2986 hcd = dwc2_hsotg_to_hcd(hsotg); 2987 dev_dbg(hsotg->dev, "hsotg->hcd = %p\n", hcd); 2988 2989 if (!hcd) { 2990 dev_dbg(hsotg->dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n", 2991 __func__); 2992 return; 2993 } 2994 2995 usb_remove_hcd(hcd); 2996 hsotg->priv = NULL; 2997 dwc2_hcd_release(hsotg); 2998 usb_put_hcd(hcd); 2999 3000 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 3001 kfree(hsotg->last_frame_num_array); 3002 kfree(hsotg->frame_num_array); 3003 #endif 3004 } 3005 EXPORT_SYMBOL_GPL(dwc2_hcd_remove); 3006