1 /* 2 * hcd.c - DesignWare HS OTG Controller host-mode routines 3 * 4 * Copyright (C) 2004-2013 Synopsys, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The names of the above-listed copyright holders may not be used 16 * to endorse or promote products derived from this software without 17 * specific prior written permission. 18 * 19 * ALTERNATIVELY, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") as published by the Free Software 21 * Foundation; either version 2 of the License, or (at your option) any 22 * later version. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * This file contains the core HCD code, and implements the Linux hc_driver 39 * API 40 */ 41 #include <linux/kernel.h> 42 #include <linux/module.h> 43 #include <linux/spinlock.h> 44 #include <linux/interrupt.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/delay.h> 47 #include <linux/io.h> 48 #include <linux/slab.h> 49 #include <linux/usb.h> 50 51 #include <linux/usb/hcd.h> 52 #include <linux/usb/ch11.h> 53 54 #include "core.h" 55 #include "hcd.h" 56 57 /** 58 * dwc2_dump_channel_info() - Prints the state of a host channel 59 * 60 * @hsotg: Programming view of DWC_otg controller 61 * @chan: Pointer to the channel to dump 62 * 63 * Must be called with interrupt disabled and spinlock held 64 * 65 * NOTE: This function will be removed once the peripheral controller code 66 * is integrated and the driver is stable 67 */ 68 static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, 69 struct dwc2_host_chan *chan) 70 { 71 #ifdef VERBOSE_DEBUG 72 int num_channels = hsotg->core_params->host_channels; 73 struct dwc2_qh *qh; 74 u32 hcchar; 75 u32 hcsplt; 76 u32 hctsiz; 77 u32 hc_dma; 78 int i; 79 80 if (chan == NULL) 81 return; 82 83 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num)); 84 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num)); 85 hctsiz = readl(hsotg->regs + HCTSIZ(chan->hc_num)); 86 hc_dma = readl(hsotg->regs + HCDMA(chan->hc_num)); 87 88 dev_dbg(hsotg->dev, " Assigned to channel %p:\n", chan); 89 dev_dbg(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", 90 hcchar, hcsplt); 91 dev_dbg(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", 92 hctsiz, hc_dma); 93 dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n", 94 chan->dev_addr, chan->ep_num, chan->ep_is_in); 95 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type); 96 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet); 97 dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start); 98 dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started); 99 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status); 100 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf); 101 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n", 102 (unsigned long)chan->xfer_dma); 103 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len); 104 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh); 105 dev_dbg(hsotg->dev, " NP inactive sched:\n"); 106 list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive, 107 qh_list_entry) 108 dev_dbg(hsotg->dev, " %p\n", qh); 109 dev_dbg(hsotg->dev, " NP active sched:\n"); 110 list_for_each_entry(qh, &hsotg->non_periodic_sched_active, 111 qh_list_entry) 112 dev_dbg(hsotg->dev, " %p\n", qh); 113 dev_dbg(hsotg->dev, " Channels:\n"); 114 for (i = 0; i < num_channels; i++) { 115 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i]; 116 117 dev_dbg(hsotg->dev, " %2d: %p\n", i, chan); 118 } 119 #endif /* VERBOSE_DEBUG */ 120 } 121 122 /* 123 * Processes all the URBs in a single list of QHs. Completes them with 124 * -ETIMEDOUT and frees the QTD. 125 * 126 * Must be called with interrupt disabled and spinlock held 127 */ 128 static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg, 129 struct list_head *qh_list) 130 { 131 struct dwc2_qh *qh, *qh_tmp; 132 struct dwc2_qtd *qtd, *qtd_tmp; 133 134 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) { 135 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, 136 qtd_list_entry) { 137 dwc2_host_complete(hsotg, qtd, -ETIMEDOUT); 138 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 139 } 140 } 141 } 142 143 static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg, 144 struct list_head *qh_list) 145 { 146 struct dwc2_qtd *qtd, *qtd_tmp; 147 struct dwc2_qh *qh, *qh_tmp; 148 unsigned long flags; 149 150 if (!qh_list->next) 151 /* The list hasn't been initialized yet */ 152 return; 153 154 spin_lock_irqsave(&hsotg->lock, flags); 155 156 /* Ensure there are no QTDs or URBs left */ 157 dwc2_kill_urbs_in_qh_list(hsotg, qh_list); 158 159 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) { 160 dwc2_hcd_qh_unlink(hsotg, qh); 161 162 /* Free each QTD in the QH's QTD list */ 163 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, 164 qtd_list_entry) 165 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 166 167 spin_unlock_irqrestore(&hsotg->lock, flags); 168 dwc2_hcd_qh_free(hsotg, qh); 169 spin_lock_irqsave(&hsotg->lock, flags); 170 } 171 172 spin_unlock_irqrestore(&hsotg->lock, flags); 173 } 174 175 /* 176 * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic 177 * and periodic schedules. The QTD associated with each URB is removed from 178 * the schedule and freed. This function may be called when a disconnect is 179 * detected or when the HCD is being stopped. 180 * 181 * Must be called with interrupt disabled and spinlock held 182 */ 183 static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg) 184 { 185 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive); 186 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active); 187 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive); 188 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready); 189 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned); 190 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued); 191 } 192 193 /** 194 * dwc2_hcd_start() - Starts the HCD when switching to Host mode 195 * 196 * @hsotg: Pointer to struct dwc2_hsotg 197 */ 198 void dwc2_hcd_start(struct dwc2_hsotg *hsotg) 199 { 200 u32 hprt0; 201 202 if (hsotg->op_state == OTG_STATE_B_HOST) { 203 /* 204 * Reset the port. During a HNP mode switch the reset 205 * needs to occur within 1ms and have a duration of at 206 * least 50ms. 207 */ 208 hprt0 = dwc2_read_hprt0(hsotg); 209 hprt0 |= HPRT0_RST; 210 writel(hprt0, hsotg->regs + HPRT0); 211 } 212 213 queue_delayed_work(hsotg->wq_otg, &hsotg->start_work, 214 msecs_to_jiffies(50)); 215 } 216 217 /* Must be called with interrupt disabled and spinlock held */ 218 static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) 219 { 220 int num_channels = hsotg->core_params->host_channels; 221 struct dwc2_host_chan *channel; 222 u32 hcchar; 223 int i; 224 225 if (hsotg->core_params->dma_enable <= 0) { 226 /* Flush out any channel requests in slave mode */ 227 for (i = 0; i < num_channels; i++) { 228 channel = hsotg->hc_ptr_array[i]; 229 if (!list_empty(&channel->hc_list_entry)) 230 continue; 231 hcchar = readl(hsotg->regs + HCCHAR(i)); 232 if (hcchar & HCCHAR_CHENA) { 233 hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR); 234 hcchar |= HCCHAR_CHDIS; 235 writel(hcchar, hsotg->regs + HCCHAR(i)); 236 } 237 } 238 } 239 240 for (i = 0; i < num_channels; i++) { 241 channel = hsotg->hc_ptr_array[i]; 242 if (!list_empty(&channel->hc_list_entry)) 243 continue; 244 hcchar = readl(hsotg->regs + HCCHAR(i)); 245 if (hcchar & HCCHAR_CHENA) { 246 /* Halt the channel */ 247 hcchar |= HCCHAR_CHDIS; 248 writel(hcchar, hsotg->regs + HCCHAR(i)); 249 } 250 251 dwc2_hc_cleanup(hsotg, channel); 252 list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list); 253 /* 254 * Added for Descriptor DMA to prevent channel double cleanup in 255 * release_channel_ddma(), which is called from ep_disable when 256 * device disconnects 257 */ 258 channel->qh = NULL; 259 } 260 } 261 262 /** 263 * dwc2_hcd_disconnect() - Handles disconnect of the HCD 264 * 265 * @hsotg: Pointer to struct dwc2_hsotg 266 * 267 * Must be called with interrupt disabled and spinlock held 268 */ 269 void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) 270 { 271 u32 intr; 272 273 /* Set status flags for the hub driver */ 274 hsotg->flags.b.port_connect_status_change = 1; 275 hsotg->flags.b.port_connect_status = 0; 276 277 /* 278 * Shutdown any transfers in process by clearing the Tx FIFO Empty 279 * interrupt mask and status bits and disabling subsequent host 280 * channel interrupts. 281 */ 282 intr = readl(hsotg->regs + GINTMSK); 283 intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT); 284 writel(intr, hsotg->regs + GINTMSK); 285 intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT; 286 writel(intr, hsotg->regs + GINTSTS); 287 288 /* 289 * Turn off the vbus power only if the core has transitioned to device 290 * mode. If still in host mode, need to keep power on to detect a 291 * reconnection. 292 */ 293 if (dwc2_is_device_mode(hsotg)) { 294 if (hsotg->op_state != OTG_STATE_A_SUSPEND) { 295 dev_dbg(hsotg->dev, "Disconnect: PortPower off\n"); 296 writel(0, hsotg->regs + HPRT0); 297 } 298 299 dwc2_disable_host_interrupts(hsotg); 300 } 301 302 /* Respond with an error status to all URBs in the schedule */ 303 dwc2_kill_all_urbs(hsotg); 304 305 if (dwc2_is_host_mode(hsotg)) 306 /* Clean up any host channels that were in use */ 307 dwc2_hcd_cleanup_channels(hsotg); 308 309 dwc2_host_disconnect(hsotg); 310 } 311 312 /** 313 * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup 314 * 315 * @hsotg: Pointer to struct dwc2_hsotg 316 */ 317 static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) 318 { 319 if (hsotg->lx_state == DWC2_L2) 320 hsotg->flags.b.port_suspend_change = 1; 321 else 322 hsotg->flags.b.port_l1_change = 1; 323 } 324 325 /** 326 * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner 327 * 328 * @hsotg: Pointer to struct dwc2_hsotg 329 * 330 * Must be called with interrupt disabled and spinlock held 331 */ 332 void dwc2_hcd_stop(struct dwc2_hsotg *hsotg) 333 { 334 dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n"); 335 336 /* 337 * The root hub should be disconnected before this function is called. 338 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) 339 * and the QH lists (via ..._hcd_endpoint_disable). 340 */ 341 342 /* Turn off all host-specific interrupts */ 343 dwc2_disable_host_interrupts(hsotg); 344 345 /* Turn off the vbus power */ 346 dev_dbg(hsotg->dev, "PortPower off\n"); 347 writel(0, hsotg->regs + HPRT0); 348 } 349 350 static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, 351 struct dwc2_hcd_urb *urb, void **ep_handle, 352 gfp_t mem_flags) 353 { 354 struct dwc2_qtd *qtd; 355 unsigned long flags; 356 u32 intr_mask; 357 int retval; 358 int dev_speed; 359 360 if (!hsotg->flags.b.port_connect_status) { 361 /* No longer connected */ 362 dev_err(hsotg->dev, "Not connected\n"); 363 return -ENODEV; 364 } 365 366 dev_speed = dwc2_host_get_speed(hsotg, urb->priv); 367 368 /* Some configurations cannot support LS traffic on a FS root port */ 369 if ((dev_speed == USB_SPEED_LOW) && 370 (hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) && 371 (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) { 372 u32 hprt0 = readl(hsotg->regs + HPRT0); 373 u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; 374 375 if (prtspd == HPRT0_SPD_FULL_SPEED) 376 return -ENODEV; 377 } 378 379 qtd = kzalloc(sizeof(*qtd), mem_flags); 380 if (!qtd) 381 return -ENOMEM; 382 383 dwc2_hcd_qtd_init(qtd, urb); 384 retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle, 385 mem_flags); 386 if (retval) { 387 dev_err(hsotg->dev, 388 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n", 389 retval); 390 kfree(qtd); 391 return retval; 392 } 393 394 intr_mask = readl(hsotg->regs + GINTMSK); 395 if (!(intr_mask & GINTSTS_SOF)) { 396 enum dwc2_transaction_type tr_type; 397 398 if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK && 399 !(qtd->urb->flags & URB_GIVEBACK_ASAP)) 400 /* 401 * Do not schedule SG transactions until qtd has 402 * URB_GIVEBACK_ASAP set 403 */ 404 return 0; 405 406 spin_lock_irqsave(&hsotg->lock, flags); 407 tr_type = dwc2_hcd_select_transactions(hsotg); 408 if (tr_type != DWC2_TRANSACTION_NONE) 409 dwc2_hcd_queue_transactions(hsotg, tr_type); 410 spin_unlock_irqrestore(&hsotg->lock, flags); 411 } 412 413 return 0; 414 } 415 416 /* Must be called with interrupt disabled and spinlock held */ 417 static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg, 418 struct dwc2_hcd_urb *urb) 419 { 420 struct dwc2_qh *qh; 421 struct dwc2_qtd *urb_qtd; 422 423 urb_qtd = urb->qtd; 424 if (!urb_qtd) { 425 dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n"); 426 return -EINVAL; 427 } 428 429 qh = urb_qtd->qh; 430 if (!qh) { 431 dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n"); 432 return -EINVAL; 433 } 434 435 urb->priv = NULL; 436 437 if (urb_qtd->in_process && qh->channel) { 438 dwc2_dump_channel_info(hsotg, qh->channel); 439 440 /* The QTD is in process (it has been assigned to a channel) */ 441 if (hsotg->flags.b.port_connect_status) 442 /* 443 * If still connected (i.e. in host mode), halt the 444 * channel so it can be used for other transfers. If 445 * no longer connected, the host registers can't be 446 * written to halt the channel since the core is in 447 * device mode. 448 */ 449 dwc2_hc_halt(hsotg, qh->channel, 450 DWC2_HC_XFER_URB_DEQUEUE); 451 } 452 453 /* 454 * Free the QTD and clean up the associated QH. Leave the QH in the 455 * schedule if it has any remaining QTDs. 456 */ 457 if (hsotg->core_params->dma_desc_enable <= 0) { 458 u8 in_process = urb_qtd->in_process; 459 460 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); 461 if (in_process) { 462 dwc2_hcd_qh_deactivate(hsotg, qh, 0); 463 qh->channel = NULL; 464 } else if (list_empty(&qh->qtd_list)) { 465 dwc2_hcd_qh_unlink(hsotg, qh); 466 } 467 } else { 468 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); 469 } 470 471 return 0; 472 } 473 474 /* Must NOT be called with interrupt disabled or spinlock held */ 475 static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg, 476 struct usb_host_endpoint *ep, int retry) 477 { 478 struct dwc2_qtd *qtd, *qtd_tmp; 479 struct dwc2_qh *qh; 480 unsigned long flags; 481 int rc; 482 483 spin_lock_irqsave(&hsotg->lock, flags); 484 485 qh = ep->hcpriv; 486 if (!qh) { 487 rc = -EINVAL; 488 goto err; 489 } 490 491 while (!list_empty(&qh->qtd_list) && retry--) { 492 if (retry == 0) { 493 dev_err(hsotg->dev, 494 "## timeout in dwc2_hcd_endpoint_disable() ##\n"); 495 rc = -EBUSY; 496 goto err; 497 } 498 499 spin_unlock_irqrestore(&hsotg->lock, flags); 500 usleep_range(20000, 40000); 501 spin_lock_irqsave(&hsotg->lock, flags); 502 qh = ep->hcpriv; 503 if (!qh) { 504 rc = -EINVAL; 505 goto err; 506 } 507 } 508 509 dwc2_hcd_qh_unlink(hsotg, qh); 510 511 /* Free each QTD in the QH's QTD list */ 512 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) 513 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 514 515 ep->hcpriv = NULL; 516 spin_unlock_irqrestore(&hsotg->lock, flags); 517 dwc2_hcd_qh_free(hsotg, qh); 518 519 return 0; 520 521 err: 522 ep->hcpriv = NULL; 523 spin_unlock_irqrestore(&hsotg->lock, flags); 524 525 return rc; 526 } 527 528 /* Must be called with interrupt disabled and spinlock held */ 529 static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg, 530 struct usb_host_endpoint *ep) 531 { 532 struct dwc2_qh *qh = ep->hcpriv; 533 534 if (!qh) 535 return -EINVAL; 536 537 qh->data_toggle = DWC2_HC_PID_DATA0; 538 539 return 0; 540 } 541 542 /* 543 * Initializes dynamic portions of the DWC_otg HCD state 544 * 545 * Must be called with interrupt disabled and spinlock held 546 */ 547 static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg) 548 { 549 struct dwc2_host_chan *chan, *chan_tmp; 550 int num_channels; 551 int i; 552 553 hsotg->flags.d32 = 0; 554 hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active; 555 556 if (hsotg->core_params->uframe_sched > 0) { 557 hsotg->available_host_channels = 558 hsotg->core_params->host_channels; 559 } else { 560 hsotg->non_periodic_channels = 0; 561 hsotg->periodic_channels = 0; 562 } 563 564 /* 565 * Put all channels in the free channel list and clean up channel 566 * states 567 */ 568 list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list, 569 hc_list_entry) 570 list_del_init(&chan->hc_list_entry); 571 572 num_channels = hsotg->core_params->host_channels; 573 for (i = 0; i < num_channels; i++) { 574 chan = hsotg->hc_ptr_array[i]; 575 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); 576 dwc2_hc_cleanup(hsotg, chan); 577 } 578 579 /* Initialize the DWC core for host mode operation */ 580 dwc2_core_host_init(hsotg); 581 } 582 583 static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg, 584 struct dwc2_host_chan *chan, 585 struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) 586 { 587 int hub_addr, hub_port; 588 589 chan->do_split = 1; 590 chan->xact_pos = qtd->isoc_split_pos; 591 chan->complete_split = qtd->complete_split; 592 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port); 593 chan->hub_addr = (u8)hub_addr; 594 chan->hub_port = (u8)hub_port; 595 } 596 597 static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, 598 struct dwc2_host_chan *chan, 599 struct dwc2_qtd *qtd, void *bufptr) 600 { 601 struct dwc2_hcd_urb *urb = qtd->urb; 602 struct dwc2_hcd_iso_packet_desc *frame_desc; 603 604 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) { 605 case USB_ENDPOINT_XFER_CONTROL: 606 chan->ep_type = USB_ENDPOINT_XFER_CONTROL; 607 608 switch (qtd->control_phase) { 609 case DWC2_CONTROL_SETUP: 610 dev_vdbg(hsotg->dev, " Control setup transaction\n"); 611 chan->do_ping = 0; 612 chan->ep_is_in = 0; 613 chan->data_pid_start = DWC2_HC_PID_SETUP; 614 if (hsotg->core_params->dma_enable > 0) 615 chan->xfer_dma = urb->setup_dma; 616 else 617 chan->xfer_buf = urb->setup_packet; 618 chan->xfer_len = 8; 619 bufptr = NULL; 620 break; 621 622 case DWC2_CONTROL_DATA: 623 dev_vdbg(hsotg->dev, " Control data transaction\n"); 624 chan->data_pid_start = qtd->data_toggle; 625 break; 626 627 case DWC2_CONTROL_STATUS: 628 /* 629 * Direction is opposite of data direction or IN if no 630 * data 631 */ 632 dev_vdbg(hsotg->dev, " Control status transaction\n"); 633 if (urb->length == 0) 634 chan->ep_is_in = 1; 635 else 636 chan->ep_is_in = 637 dwc2_hcd_is_pipe_out(&urb->pipe_info); 638 if (chan->ep_is_in) 639 chan->do_ping = 0; 640 chan->data_pid_start = DWC2_HC_PID_DATA1; 641 chan->xfer_len = 0; 642 if (hsotg->core_params->dma_enable > 0) 643 chan->xfer_dma = hsotg->status_buf_dma; 644 else 645 chan->xfer_buf = hsotg->status_buf; 646 bufptr = NULL; 647 break; 648 } 649 break; 650 651 case USB_ENDPOINT_XFER_BULK: 652 chan->ep_type = USB_ENDPOINT_XFER_BULK; 653 break; 654 655 case USB_ENDPOINT_XFER_INT: 656 chan->ep_type = USB_ENDPOINT_XFER_INT; 657 break; 658 659 case USB_ENDPOINT_XFER_ISOC: 660 chan->ep_type = USB_ENDPOINT_XFER_ISOC; 661 if (hsotg->core_params->dma_desc_enable > 0) 662 break; 663 664 frame_desc = &urb->iso_descs[qtd->isoc_frame_index]; 665 frame_desc->status = 0; 666 667 if (hsotg->core_params->dma_enable > 0) { 668 chan->xfer_dma = urb->dma; 669 chan->xfer_dma += frame_desc->offset + 670 qtd->isoc_split_offset; 671 } else { 672 chan->xfer_buf = urb->buf; 673 chan->xfer_buf += frame_desc->offset + 674 qtd->isoc_split_offset; 675 } 676 677 chan->xfer_len = frame_desc->length - qtd->isoc_split_offset; 678 679 /* For non-dword aligned buffers */ 680 if (hsotg->core_params->dma_enable > 0 && 681 (chan->xfer_dma & 0x3)) 682 bufptr = (u8 *)urb->buf + frame_desc->offset + 683 qtd->isoc_split_offset; 684 else 685 bufptr = NULL; 686 687 if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) { 688 if (chan->xfer_len <= 188) 689 chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL; 690 else 691 chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN; 692 } 693 break; 694 } 695 696 return bufptr; 697 } 698 699 static int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 700 struct dwc2_host_chan *chan, void *bufptr) 701 { 702 u32 buf_size; 703 704 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) 705 buf_size = hsotg->core_params->max_transfer_size; 706 else 707 buf_size = 4096; 708 709 if (!qh->dw_align_buf) { 710 qh->dw_align_buf = dma_alloc_coherent(hsotg->dev, buf_size, 711 &qh->dw_align_buf_dma, 712 GFP_ATOMIC); 713 if (!qh->dw_align_buf) 714 return -ENOMEM; 715 } 716 717 if (!chan->ep_is_in && chan->xfer_len) { 718 dma_sync_single_for_cpu(hsotg->dev, chan->xfer_dma, buf_size, 719 DMA_TO_DEVICE); 720 memcpy(qh->dw_align_buf, bufptr, chan->xfer_len); 721 dma_sync_single_for_device(hsotg->dev, chan->xfer_dma, buf_size, 722 DMA_TO_DEVICE); 723 } 724 725 chan->align_buf = qh->dw_align_buf_dma; 726 return 0; 727 } 728 729 /** 730 * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host 731 * channel and initializes the host channel to perform the transactions. The 732 * host channel is removed from the free list. 733 * 734 * @hsotg: The HCD state structure 735 * @qh: Transactions from the first QTD for this QH are selected and assigned 736 * to a free host channel 737 */ 738 static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 739 { 740 struct dwc2_host_chan *chan; 741 struct dwc2_hcd_urb *urb; 742 struct dwc2_qtd *qtd; 743 void *bufptr = NULL; 744 745 if (dbg_qh(qh)) 746 dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh); 747 748 if (list_empty(&qh->qtd_list)) { 749 dev_dbg(hsotg->dev, "No QTDs in QH list\n"); 750 return -ENOMEM; 751 } 752 753 if (list_empty(&hsotg->free_hc_list)) { 754 dev_dbg(hsotg->dev, "No free channel to assign\n"); 755 return -ENOMEM; 756 } 757 758 chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan, 759 hc_list_entry); 760 761 /* Remove host channel from free list */ 762 list_del_init(&chan->hc_list_entry); 763 764 qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry); 765 urb = qtd->urb; 766 qh->channel = chan; 767 qtd->in_process = 1; 768 769 /* 770 * Use usb_pipedevice to determine device address. This address is 771 * 0 before the SET_ADDRESS command and the correct address afterward. 772 */ 773 chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info); 774 chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info); 775 chan->speed = qh->dev_speed; 776 chan->max_packet = dwc2_max_packet(qh->maxp); 777 778 chan->xfer_started = 0; 779 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS; 780 chan->error_state = (qtd->error_count > 0); 781 chan->halt_on_queue = 0; 782 chan->halt_pending = 0; 783 chan->requests = 0; 784 785 /* 786 * The following values may be modified in the transfer type section 787 * below. The xfer_len value may be reduced when the transfer is 788 * started to accommodate the max widths of the XferSize and PktCnt 789 * fields in the HCTSIZn register. 790 */ 791 792 chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0); 793 if (chan->ep_is_in) 794 chan->do_ping = 0; 795 else 796 chan->do_ping = qh->ping_state; 797 798 chan->data_pid_start = qh->data_toggle; 799 chan->multi_count = 1; 800 801 if (urb->actual_length > urb->length && 802 !dwc2_hcd_is_pipe_in(&urb->pipe_info)) 803 urb->actual_length = urb->length; 804 805 if (hsotg->core_params->dma_enable > 0) { 806 chan->xfer_dma = urb->dma + urb->actual_length; 807 808 /* For non-dword aligned case */ 809 if (hsotg->core_params->dma_desc_enable <= 0 && 810 (chan->xfer_dma & 0x3)) 811 bufptr = (u8 *)urb->buf + urb->actual_length; 812 } else { 813 chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; 814 } 815 816 chan->xfer_len = urb->length - urb->actual_length; 817 chan->xfer_count = 0; 818 819 /* Set the split attributes if required */ 820 if (qh->do_split) 821 dwc2_hc_init_split(hsotg, chan, qtd, urb); 822 else 823 chan->do_split = 0; 824 825 /* Set the transfer attributes */ 826 bufptr = dwc2_hc_init_xfer(hsotg, chan, qtd, bufptr); 827 828 /* Non DWORD-aligned buffer case */ 829 if (bufptr) { 830 dev_vdbg(hsotg->dev, "Non-aligned buffer\n"); 831 if (dwc2_hc_setup_align_buf(hsotg, qh, chan, bufptr)) { 832 dev_err(hsotg->dev, 833 "%s: Failed to allocate memory to handle non-dword aligned buffer\n", 834 __func__); 835 /* Add channel back to free list */ 836 chan->align_buf = 0; 837 chan->multi_count = 0; 838 list_add_tail(&chan->hc_list_entry, 839 &hsotg->free_hc_list); 840 qtd->in_process = 0; 841 qh->channel = NULL; 842 return -ENOMEM; 843 } 844 } else { 845 chan->align_buf = 0; 846 } 847 848 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 849 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 850 /* 851 * This value may be modified when the transfer is started 852 * to reflect the actual transfer length 853 */ 854 chan->multi_count = dwc2_hb_mult(qh->maxp); 855 856 if (hsotg->core_params->dma_desc_enable > 0) 857 chan->desc_list_addr = qh->desc_list_dma; 858 859 dwc2_hc_init(hsotg, chan); 860 chan->qh = qh; 861 862 return 0; 863 } 864 865 /** 866 * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer 867 * schedule and assigns them to available host channels. Called from the HCD 868 * interrupt handler functions. 869 * 870 * @hsotg: The HCD state structure 871 * 872 * Return: The types of new transactions that were assigned to host channels 873 */ 874 enum dwc2_transaction_type dwc2_hcd_select_transactions( 875 struct dwc2_hsotg *hsotg) 876 { 877 enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE; 878 struct list_head *qh_ptr; 879 struct dwc2_qh *qh; 880 int num_channels; 881 882 #ifdef DWC2_DEBUG_SOF 883 dev_vdbg(hsotg->dev, " Select Transactions\n"); 884 #endif 885 886 /* Process entries in the periodic ready list */ 887 qh_ptr = hsotg->periodic_sched_ready.next; 888 while (qh_ptr != &hsotg->periodic_sched_ready) { 889 if (list_empty(&hsotg->free_hc_list)) 890 break; 891 if (hsotg->core_params->uframe_sched > 0) { 892 if (hsotg->available_host_channels <= 1) 893 break; 894 hsotg->available_host_channels--; 895 } 896 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); 897 if (dwc2_assign_and_init_hc(hsotg, qh)) 898 break; 899 900 /* 901 * Move the QH from the periodic ready schedule to the 902 * periodic assigned schedule 903 */ 904 qh_ptr = qh_ptr->next; 905 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned); 906 ret_val = DWC2_TRANSACTION_PERIODIC; 907 } 908 909 /* 910 * Process entries in the inactive portion of the non-periodic 911 * schedule. Some free host channels may not be used if they are 912 * reserved for periodic transfers. 913 */ 914 num_channels = hsotg->core_params->host_channels; 915 qh_ptr = hsotg->non_periodic_sched_inactive.next; 916 while (qh_ptr != &hsotg->non_periodic_sched_inactive) { 917 if (hsotg->core_params->uframe_sched <= 0 && 918 hsotg->non_periodic_channels >= num_channels - 919 hsotg->periodic_channels) 920 break; 921 if (list_empty(&hsotg->free_hc_list)) 922 break; 923 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); 924 if (hsotg->core_params->uframe_sched > 0) { 925 if (hsotg->available_host_channels < 1) 926 break; 927 hsotg->available_host_channels--; 928 } 929 930 if (dwc2_assign_and_init_hc(hsotg, qh)) 931 break; 932 933 /* 934 * Move the QH from the non-periodic inactive schedule to the 935 * non-periodic active schedule 936 */ 937 qh_ptr = qh_ptr->next; 938 list_move(&qh->qh_list_entry, 939 &hsotg->non_periodic_sched_active); 940 941 if (ret_val == DWC2_TRANSACTION_NONE) 942 ret_val = DWC2_TRANSACTION_NON_PERIODIC; 943 else 944 ret_val = DWC2_TRANSACTION_ALL; 945 946 if (hsotg->core_params->uframe_sched <= 0) 947 hsotg->non_periodic_channels++; 948 } 949 950 return ret_val; 951 } 952 953 /** 954 * dwc2_queue_transaction() - Attempts to queue a single transaction request for 955 * a host channel associated with either a periodic or non-periodic transfer 956 * 957 * @hsotg: The HCD state structure 958 * @chan: Host channel descriptor associated with either a periodic or 959 * non-periodic transfer 960 * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO 961 * for periodic transfers or the non-periodic Tx FIFO 962 * for non-periodic transfers 963 * 964 * Return: 1 if a request is queued and more requests may be needed to 965 * complete the transfer, 0 if no more requests are required for this 966 * transfer, -1 if there is insufficient space in the Tx FIFO 967 * 968 * This function assumes that there is space available in the appropriate 969 * request queue. For an OUT transfer or SETUP transaction in Slave mode, 970 * it checks whether space is available in the appropriate Tx FIFO. 971 * 972 * Must be called with interrupt disabled and spinlock held 973 */ 974 static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg, 975 struct dwc2_host_chan *chan, 976 u16 fifo_dwords_avail) 977 { 978 int retval = 0; 979 980 if (hsotg->core_params->dma_enable > 0) { 981 if (hsotg->core_params->dma_desc_enable > 0) { 982 if (!chan->xfer_started || 983 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 984 dwc2_hcd_start_xfer_ddma(hsotg, chan->qh); 985 chan->qh->ping_state = 0; 986 } 987 } else if (!chan->xfer_started) { 988 dwc2_hc_start_transfer(hsotg, chan); 989 chan->qh->ping_state = 0; 990 } 991 } else if (chan->halt_pending) { 992 /* Don't queue a request if the channel has been halted */ 993 } else if (chan->halt_on_queue) { 994 dwc2_hc_halt(hsotg, chan, chan->halt_status); 995 } else if (chan->do_ping) { 996 if (!chan->xfer_started) 997 dwc2_hc_start_transfer(hsotg, chan); 998 } else if (!chan->ep_is_in || 999 chan->data_pid_start == DWC2_HC_PID_SETUP) { 1000 if ((fifo_dwords_avail * 4) >= chan->max_packet) { 1001 if (!chan->xfer_started) { 1002 dwc2_hc_start_transfer(hsotg, chan); 1003 retval = 1; 1004 } else { 1005 retval = dwc2_hc_continue_transfer(hsotg, chan); 1006 } 1007 } else { 1008 retval = -1; 1009 } 1010 } else { 1011 if (!chan->xfer_started) { 1012 dwc2_hc_start_transfer(hsotg, chan); 1013 retval = 1; 1014 } else { 1015 retval = dwc2_hc_continue_transfer(hsotg, chan); 1016 } 1017 } 1018 1019 return retval; 1020 } 1021 1022 /* 1023 * Processes periodic channels for the next frame and queues transactions for 1024 * these channels to the DWC_otg controller. After queueing transactions, the 1025 * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions 1026 * to queue as Periodic Tx FIFO or request queue space becomes available. 1027 * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled. 1028 * 1029 * Must be called with interrupt disabled and spinlock held 1030 */ 1031 static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) 1032 { 1033 struct list_head *qh_ptr; 1034 struct dwc2_qh *qh; 1035 u32 tx_status; 1036 u32 fspcavail; 1037 u32 gintmsk; 1038 int status; 1039 int no_queue_space = 0; 1040 int no_fifo_space = 0; 1041 u32 qspcavail; 1042 1043 if (dbg_perio()) 1044 dev_vdbg(hsotg->dev, "Queue periodic transactions\n"); 1045 1046 tx_status = readl(hsotg->regs + HPTXSTS); 1047 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1048 TXSTS_QSPCAVAIL_SHIFT; 1049 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1050 TXSTS_FSPCAVAIL_SHIFT; 1051 1052 if (dbg_perio()) { 1053 dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n", 1054 qspcavail); 1055 dev_vdbg(hsotg->dev, " P Tx FIFO Space Avail (before queue): %d\n", 1056 fspcavail); 1057 } 1058 1059 qh_ptr = hsotg->periodic_sched_assigned.next; 1060 while (qh_ptr != &hsotg->periodic_sched_assigned) { 1061 tx_status = readl(hsotg->regs + HPTXSTS); 1062 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1063 TXSTS_QSPCAVAIL_SHIFT; 1064 if (qspcavail == 0) { 1065 no_queue_space = 1; 1066 break; 1067 } 1068 1069 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); 1070 if (!qh->channel) { 1071 qh_ptr = qh_ptr->next; 1072 continue; 1073 } 1074 1075 /* Make sure EP's TT buffer is clean before queueing qtds */ 1076 if (qh->tt_buffer_dirty) { 1077 qh_ptr = qh_ptr->next; 1078 continue; 1079 } 1080 1081 /* 1082 * Set a flag if we're queuing high-bandwidth in slave mode. 1083 * The flag prevents any halts to get into the request queue in 1084 * the middle of multiple high-bandwidth packets getting queued. 1085 */ 1086 if (hsotg->core_params->dma_enable <= 0 && 1087 qh->channel->multi_count > 1) 1088 hsotg->queuing_high_bandwidth = 1; 1089 1090 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1091 TXSTS_FSPCAVAIL_SHIFT; 1092 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail); 1093 if (status < 0) { 1094 no_fifo_space = 1; 1095 break; 1096 } 1097 1098 /* 1099 * In Slave mode, stay on the current transfer until there is 1100 * nothing more to do or the high-bandwidth request count is 1101 * reached. In DMA mode, only need to queue one request. The 1102 * controller automatically handles multiple packets for 1103 * high-bandwidth transfers. 1104 */ 1105 if (hsotg->core_params->dma_enable > 0 || status == 0 || 1106 qh->channel->requests == qh->channel->multi_count) { 1107 qh_ptr = qh_ptr->next; 1108 /* 1109 * Move the QH from the periodic assigned schedule to 1110 * the periodic queued schedule 1111 */ 1112 list_move(&qh->qh_list_entry, 1113 &hsotg->periodic_sched_queued); 1114 1115 /* done queuing high bandwidth */ 1116 hsotg->queuing_high_bandwidth = 0; 1117 } 1118 } 1119 1120 if (hsotg->core_params->dma_enable <= 0) { 1121 tx_status = readl(hsotg->regs + HPTXSTS); 1122 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1123 TXSTS_QSPCAVAIL_SHIFT; 1124 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1125 TXSTS_FSPCAVAIL_SHIFT; 1126 if (dbg_perio()) { 1127 dev_vdbg(hsotg->dev, 1128 " P Tx Req Queue Space Avail (after queue): %d\n", 1129 qspcavail); 1130 dev_vdbg(hsotg->dev, 1131 " P Tx FIFO Space Avail (after queue): %d\n", 1132 fspcavail); 1133 } 1134 1135 if (!list_empty(&hsotg->periodic_sched_assigned) || 1136 no_queue_space || no_fifo_space) { 1137 /* 1138 * May need to queue more transactions as the request 1139 * queue or Tx FIFO empties. Enable the periodic Tx 1140 * FIFO empty interrupt. (Always use the half-empty 1141 * level to ensure that new requests are loaded as 1142 * soon as possible.) 1143 */ 1144 gintmsk = readl(hsotg->regs + GINTMSK); 1145 gintmsk |= GINTSTS_PTXFEMP; 1146 writel(gintmsk, hsotg->regs + GINTMSK); 1147 } else { 1148 /* 1149 * Disable the Tx FIFO empty interrupt since there are 1150 * no more transactions that need to be queued right 1151 * now. This function is called from interrupt 1152 * handlers to queue more transactions as transfer 1153 * states change. 1154 */ 1155 gintmsk = readl(hsotg->regs + GINTMSK); 1156 gintmsk &= ~GINTSTS_PTXFEMP; 1157 writel(gintmsk, hsotg->regs + GINTMSK); 1158 } 1159 } 1160 } 1161 1162 /* 1163 * Processes active non-periodic channels and queues transactions for these 1164 * channels to the DWC_otg controller. After queueing transactions, the NP Tx 1165 * FIFO Empty interrupt is enabled if there are more transactions to queue as 1166 * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx 1167 * FIFO Empty interrupt is disabled. 1168 * 1169 * Must be called with interrupt disabled and spinlock held 1170 */ 1171 static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg) 1172 { 1173 struct list_head *orig_qh_ptr; 1174 struct dwc2_qh *qh; 1175 u32 tx_status; 1176 u32 qspcavail; 1177 u32 fspcavail; 1178 u32 gintmsk; 1179 int status; 1180 int no_queue_space = 0; 1181 int no_fifo_space = 0; 1182 int more_to_do = 0; 1183 1184 dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n"); 1185 1186 tx_status = readl(hsotg->regs + GNPTXSTS); 1187 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1188 TXSTS_QSPCAVAIL_SHIFT; 1189 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1190 TXSTS_FSPCAVAIL_SHIFT; 1191 dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n", 1192 qspcavail); 1193 dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n", 1194 fspcavail); 1195 1196 /* 1197 * Keep track of the starting point. Skip over the start-of-list 1198 * entry. 1199 */ 1200 if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active) 1201 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next; 1202 orig_qh_ptr = hsotg->non_periodic_qh_ptr; 1203 1204 /* 1205 * Process once through the active list or until no more space is 1206 * available in the request queue or the Tx FIFO 1207 */ 1208 do { 1209 tx_status = readl(hsotg->regs + GNPTXSTS); 1210 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1211 TXSTS_QSPCAVAIL_SHIFT; 1212 if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) { 1213 no_queue_space = 1; 1214 break; 1215 } 1216 1217 qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh, 1218 qh_list_entry); 1219 if (!qh->channel) 1220 goto next; 1221 1222 /* Make sure EP's TT buffer is clean before queueing qtds */ 1223 if (qh->tt_buffer_dirty) 1224 goto next; 1225 1226 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1227 TXSTS_FSPCAVAIL_SHIFT; 1228 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail); 1229 1230 if (status > 0) { 1231 more_to_do = 1; 1232 } else if (status < 0) { 1233 no_fifo_space = 1; 1234 break; 1235 } 1236 next: 1237 /* Advance to next QH, skipping start-of-list entry */ 1238 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next; 1239 if (hsotg->non_periodic_qh_ptr == 1240 &hsotg->non_periodic_sched_active) 1241 hsotg->non_periodic_qh_ptr = 1242 hsotg->non_periodic_qh_ptr->next; 1243 } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr); 1244 1245 if (hsotg->core_params->dma_enable <= 0) { 1246 tx_status = readl(hsotg->regs + GNPTXSTS); 1247 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 1248 TXSTS_QSPCAVAIL_SHIFT; 1249 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 1250 TXSTS_FSPCAVAIL_SHIFT; 1251 dev_vdbg(hsotg->dev, 1252 " NP Tx Req Queue Space Avail (after queue): %d\n", 1253 qspcavail); 1254 dev_vdbg(hsotg->dev, 1255 " NP Tx FIFO Space Avail (after queue): %d\n", 1256 fspcavail); 1257 1258 if (more_to_do || no_queue_space || no_fifo_space) { 1259 /* 1260 * May need to queue more transactions as the request 1261 * queue or Tx FIFO empties. Enable the non-periodic 1262 * Tx FIFO empty interrupt. (Always use the half-empty 1263 * level to ensure that new requests are loaded as 1264 * soon as possible.) 1265 */ 1266 gintmsk = readl(hsotg->regs + GINTMSK); 1267 gintmsk |= GINTSTS_NPTXFEMP; 1268 writel(gintmsk, hsotg->regs + GINTMSK); 1269 } else { 1270 /* 1271 * Disable the Tx FIFO empty interrupt since there are 1272 * no more transactions that need to be queued right 1273 * now. This function is called from interrupt 1274 * handlers to queue more transactions as transfer 1275 * states change. 1276 */ 1277 gintmsk = readl(hsotg->regs + GINTMSK); 1278 gintmsk &= ~GINTSTS_NPTXFEMP; 1279 writel(gintmsk, hsotg->regs + GINTMSK); 1280 } 1281 } 1282 } 1283 1284 /** 1285 * dwc2_hcd_queue_transactions() - Processes the currently active host channels 1286 * and queues transactions for these channels to the DWC_otg controller. Called 1287 * from the HCD interrupt handler functions. 1288 * 1289 * @hsotg: The HCD state structure 1290 * @tr_type: The type(s) of transactions to queue (non-periodic, periodic, 1291 * or both) 1292 * 1293 * Must be called with interrupt disabled and spinlock held 1294 */ 1295 void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, 1296 enum dwc2_transaction_type tr_type) 1297 { 1298 #ifdef DWC2_DEBUG_SOF 1299 dev_vdbg(hsotg->dev, "Queue Transactions\n"); 1300 #endif 1301 /* Process host channels associated with periodic transfers */ 1302 if ((tr_type == DWC2_TRANSACTION_PERIODIC || 1303 tr_type == DWC2_TRANSACTION_ALL) && 1304 !list_empty(&hsotg->periodic_sched_assigned)) 1305 dwc2_process_periodic_channels(hsotg); 1306 1307 /* Process host channels associated with non-periodic transfers */ 1308 if (tr_type == DWC2_TRANSACTION_NON_PERIODIC || 1309 tr_type == DWC2_TRANSACTION_ALL) { 1310 if (!list_empty(&hsotg->non_periodic_sched_active)) { 1311 dwc2_process_non_periodic_channels(hsotg); 1312 } else { 1313 /* 1314 * Ensure NP Tx FIFO empty interrupt is disabled when 1315 * there are no non-periodic transfers to process 1316 */ 1317 u32 gintmsk = readl(hsotg->regs + GINTMSK); 1318 1319 gintmsk &= ~GINTSTS_NPTXFEMP; 1320 writel(gintmsk, hsotg->regs + GINTMSK); 1321 } 1322 } 1323 } 1324 1325 static void dwc2_conn_id_status_change(struct work_struct *work) 1326 { 1327 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, 1328 wf_otg); 1329 u32 count = 0; 1330 u32 gotgctl; 1331 1332 dev_dbg(hsotg->dev, "%s()\n", __func__); 1333 1334 gotgctl = readl(hsotg->regs + GOTGCTL); 1335 dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl); 1336 dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n", 1337 !!(gotgctl & GOTGCTL_CONID_B)); 1338 1339 /* B-Device connector (Device Mode) */ 1340 if (gotgctl & GOTGCTL_CONID_B) { 1341 /* Wait for switch to device mode */ 1342 dev_dbg(hsotg->dev, "connId B\n"); 1343 while (!dwc2_is_device_mode(hsotg)) { 1344 dev_info(hsotg->dev, 1345 "Waiting for Peripheral Mode, Mode=%s\n", 1346 dwc2_is_host_mode(hsotg) ? "Host" : 1347 "Peripheral"); 1348 usleep_range(20000, 40000); 1349 if (++count > 250) 1350 break; 1351 } 1352 if (count > 250) 1353 dev_err(hsotg->dev, 1354 "Connection id status change timed out\n"); 1355 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 1356 dwc2_core_init(hsotg, false, -1); 1357 dwc2_enable_global_interrupts(hsotg); 1358 } else { 1359 /* A-Device connector (Host Mode) */ 1360 dev_dbg(hsotg->dev, "connId A\n"); 1361 while (!dwc2_is_host_mode(hsotg)) { 1362 dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n", 1363 dwc2_is_host_mode(hsotg) ? 1364 "Host" : "Peripheral"); 1365 usleep_range(20000, 40000); 1366 if (++count > 250) 1367 break; 1368 } 1369 if (count > 250) 1370 dev_err(hsotg->dev, 1371 "Connection id status change timed out\n"); 1372 hsotg->op_state = OTG_STATE_A_HOST; 1373 1374 /* Initialize the Core for Host mode */ 1375 dwc2_core_init(hsotg, false, -1); 1376 dwc2_enable_global_interrupts(hsotg); 1377 dwc2_hcd_start(hsotg); 1378 } 1379 } 1380 1381 static void dwc2_wakeup_detected(unsigned long data) 1382 { 1383 struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data; 1384 u32 hprt0; 1385 1386 dev_dbg(hsotg->dev, "%s()\n", __func__); 1387 1388 /* 1389 * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms 1390 * so that OPT tests pass with all PHYs.) 1391 */ 1392 hprt0 = dwc2_read_hprt0(hsotg); 1393 dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0); 1394 hprt0 &= ~HPRT0_RES; 1395 writel(hprt0, hsotg->regs + HPRT0); 1396 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", 1397 readl(hsotg->regs + HPRT0)); 1398 1399 dwc2_hcd_rem_wakeup(hsotg); 1400 1401 /* Change to L0 state */ 1402 hsotg->lx_state = DWC2_L0; 1403 } 1404 1405 static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg) 1406 { 1407 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); 1408 1409 return hcd->self.b_hnp_enable; 1410 } 1411 1412 /* Must NOT be called with interrupt disabled or spinlock held */ 1413 static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex) 1414 { 1415 unsigned long flags; 1416 u32 hprt0; 1417 u32 pcgctl; 1418 u32 gotgctl; 1419 1420 dev_dbg(hsotg->dev, "%s()\n", __func__); 1421 1422 spin_lock_irqsave(&hsotg->lock, flags); 1423 1424 if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) { 1425 gotgctl = readl(hsotg->regs + GOTGCTL); 1426 gotgctl |= GOTGCTL_HSTSETHNPEN; 1427 writel(gotgctl, hsotg->regs + GOTGCTL); 1428 hsotg->op_state = OTG_STATE_A_SUSPEND; 1429 } 1430 1431 hprt0 = dwc2_read_hprt0(hsotg); 1432 hprt0 |= HPRT0_SUSP; 1433 writel(hprt0, hsotg->regs + HPRT0); 1434 1435 /* Update lx_state */ 1436 hsotg->lx_state = DWC2_L2; 1437 1438 /* Suspend the Phy Clock */ 1439 pcgctl = readl(hsotg->regs + PCGCTL); 1440 pcgctl |= PCGCTL_STOPPCLK; 1441 writel(pcgctl, hsotg->regs + PCGCTL); 1442 udelay(10); 1443 1444 /* For HNP the bus must be suspended for at least 200ms */ 1445 if (dwc2_host_is_b_hnp_enabled(hsotg)) { 1446 pcgctl = readl(hsotg->regs + PCGCTL); 1447 pcgctl &= ~PCGCTL_STOPPCLK; 1448 writel(pcgctl, hsotg->regs + PCGCTL); 1449 1450 spin_unlock_irqrestore(&hsotg->lock, flags); 1451 1452 usleep_range(200000, 250000); 1453 } else { 1454 spin_unlock_irqrestore(&hsotg->lock, flags); 1455 } 1456 } 1457 1458 /* Handles hub class-specific requests */ 1459 static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, 1460 u16 wvalue, u16 windex, char *buf, u16 wlength) 1461 { 1462 struct usb_hub_descriptor *hub_desc; 1463 int retval = 0; 1464 u32 hprt0; 1465 u32 port_status; 1466 u32 speed; 1467 u32 pcgctl; 1468 1469 switch (typereq) { 1470 case ClearHubFeature: 1471 dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue); 1472 1473 switch (wvalue) { 1474 case C_HUB_LOCAL_POWER: 1475 case C_HUB_OVER_CURRENT: 1476 /* Nothing required here */ 1477 break; 1478 1479 default: 1480 retval = -EINVAL; 1481 dev_err(hsotg->dev, 1482 "ClearHubFeature request %1xh unknown\n", 1483 wvalue); 1484 } 1485 break; 1486 1487 case ClearPortFeature: 1488 if (wvalue != USB_PORT_FEAT_L1) 1489 if (!windex || windex > 1) 1490 goto error; 1491 switch (wvalue) { 1492 case USB_PORT_FEAT_ENABLE: 1493 dev_dbg(hsotg->dev, 1494 "ClearPortFeature USB_PORT_FEAT_ENABLE\n"); 1495 hprt0 = dwc2_read_hprt0(hsotg); 1496 hprt0 |= HPRT0_ENA; 1497 writel(hprt0, hsotg->regs + HPRT0); 1498 break; 1499 1500 case USB_PORT_FEAT_SUSPEND: 1501 dev_dbg(hsotg->dev, 1502 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); 1503 writel(0, hsotg->regs + PCGCTL); 1504 usleep_range(20000, 40000); 1505 1506 hprt0 = dwc2_read_hprt0(hsotg); 1507 hprt0 |= HPRT0_RES; 1508 writel(hprt0, hsotg->regs + HPRT0); 1509 hprt0 &= ~HPRT0_SUSP; 1510 usleep_range(100000, 150000); 1511 1512 hprt0 &= ~HPRT0_RES; 1513 writel(hprt0, hsotg->regs + HPRT0); 1514 break; 1515 1516 case USB_PORT_FEAT_POWER: 1517 dev_dbg(hsotg->dev, 1518 "ClearPortFeature USB_PORT_FEAT_POWER\n"); 1519 hprt0 = dwc2_read_hprt0(hsotg); 1520 hprt0 &= ~HPRT0_PWR; 1521 writel(hprt0, hsotg->regs + HPRT0); 1522 break; 1523 1524 case USB_PORT_FEAT_INDICATOR: 1525 dev_dbg(hsotg->dev, 1526 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n"); 1527 /* Port indicator not supported */ 1528 break; 1529 1530 case USB_PORT_FEAT_C_CONNECTION: 1531 /* 1532 * Clears driver's internal Connect Status Change flag 1533 */ 1534 dev_dbg(hsotg->dev, 1535 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n"); 1536 hsotg->flags.b.port_connect_status_change = 0; 1537 break; 1538 1539 case USB_PORT_FEAT_C_RESET: 1540 /* Clears driver's internal Port Reset Change flag */ 1541 dev_dbg(hsotg->dev, 1542 "ClearPortFeature USB_PORT_FEAT_C_RESET\n"); 1543 hsotg->flags.b.port_reset_change = 0; 1544 break; 1545 1546 case USB_PORT_FEAT_C_ENABLE: 1547 /* 1548 * Clears the driver's internal Port Enable/Disable 1549 * Change flag 1550 */ 1551 dev_dbg(hsotg->dev, 1552 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n"); 1553 hsotg->flags.b.port_enable_change = 0; 1554 break; 1555 1556 case USB_PORT_FEAT_C_SUSPEND: 1557 /* 1558 * Clears the driver's internal Port Suspend Change 1559 * flag, which is set when resume signaling on the host 1560 * port is complete 1561 */ 1562 dev_dbg(hsotg->dev, 1563 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n"); 1564 hsotg->flags.b.port_suspend_change = 0; 1565 break; 1566 1567 case USB_PORT_FEAT_C_PORT_L1: 1568 dev_dbg(hsotg->dev, 1569 "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n"); 1570 hsotg->flags.b.port_l1_change = 0; 1571 break; 1572 1573 case USB_PORT_FEAT_C_OVER_CURRENT: 1574 dev_dbg(hsotg->dev, 1575 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n"); 1576 hsotg->flags.b.port_over_current_change = 0; 1577 break; 1578 1579 default: 1580 retval = -EINVAL; 1581 dev_err(hsotg->dev, 1582 "ClearPortFeature request %1xh unknown or unsupported\n", 1583 wvalue); 1584 } 1585 break; 1586 1587 case GetHubDescriptor: 1588 dev_dbg(hsotg->dev, "GetHubDescriptor\n"); 1589 hub_desc = (struct usb_hub_descriptor *)buf; 1590 hub_desc->bDescLength = 9; 1591 hub_desc->bDescriptorType = 0x29; 1592 hub_desc->bNbrPorts = 1; 1593 hub_desc->wHubCharacteristics = cpu_to_le16(0x08); 1594 hub_desc->bPwrOn2PwrGood = 1; 1595 hub_desc->bHubContrCurrent = 0; 1596 hub_desc->u.hs.DeviceRemovable[0] = 0; 1597 hub_desc->u.hs.DeviceRemovable[1] = 0xff; 1598 break; 1599 1600 case GetHubStatus: 1601 dev_dbg(hsotg->dev, "GetHubStatus\n"); 1602 memset(buf, 0, 4); 1603 break; 1604 1605 case GetPortStatus: 1606 dev_vdbg(hsotg->dev, 1607 "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex, 1608 hsotg->flags.d32); 1609 if (!windex || windex > 1) 1610 goto error; 1611 1612 port_status = 0; 1613 if (hsotg->flags.b.port_connect_status_change) 1614 port_status |= USB_PORT_STAT_C_CONNECTION << 16; 1615 if (hsotg->flags.b.port_enable_change) 1616 port_status |= USB_PORT_STAT_C_ENABLE << 16; 1617 if (hsotg->flags.b.port_suspend_change) 1618 port_status |= USB_PORT_STAT_C_SUSPEND << 16; 1619 if (hsotg->flags.b.port_l1_change) 1620 port_status |= USB_PORT_STAT_C_L1 << 16; 1621 if (hsotg->flags.b.port_reset_change) 1622 port_status |= USB_PORT_STAT_C_RESET << 16; 1623 if (hsotg->flags.b.port_over_current_change) { 1624 dev_warn(hsotg->dev, "Overcurrent change detected\n"); 1625 port_status |= USB_PORT_STAT_C_OVERCURRENT << 16; 1626 } 1627 1628 if (!hsotg->flags.b.port_connect_status) { 1629 /* 1630 * The port is disconnected, which means the core is 1631 * either in device mode or it soon will be. Just 1632 * return 0's for the remainder of the port status 1633 * since the port register can't be read if the core 1634 * is in device mode. 1635 */ 1636 *(__le32 *)buf = cpu_to_le32(port_status); 1637 break; 1638 } 1639 1640 hprt0 = readl(hsotg->regs + HPRT0); 1641 dev_vdbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0); 1642 1643 if (hprt0 & HPRT0_CONNSTS) 1644 port_status |= USB_PORT_STAT_CONNECTION; 1645 if (hprt0 & HPRT0_ENA) 1646 port_status |= USB_PORT_STAT_ENABLE; 1647 if (hprt0 & HPRT0_SUSP) 1648 port_status |= USB_PORT_STAT_SUSPEND; 1649 if (hprt0 & HPRT0_OVRCURRACT) 1650 port_status |= USB_PORT_STAT_OVERCURRENT; 1651 if (hprt0 & HPRT0_RST) 1652 port_status |= USB_PORT_STAT_RESET; 1653 if (hprt0 & HPRT0_PWR) 1654 port_status |= USB_PORT_STAT_POWER; 1655 1656 speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; 1657 if (speed == HPRT0_SPD_HIGH_SPEED) 1658 port_status |= USB_PORT_STAT_HIGH_SPEED; 1659 else if (speed == HPRT0_SPD_LOW_SPEED) 1660 port_status |= USB_PORT_STAT_LOW_SPEED; 1661 1662 if (hprt0 & HPRT0_TSTCTL_MASK) 1663 port_status |= USB_PORT_STAT_TEST; 1664 /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ 1665 1666 dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status); 1667 *(__le32 *)buf = cpu_to_le32(port_status); 1668 break; 1669 1670 case SetHubFeature: 1671 dev_dbg(hsotg->dev, "SetHubFeature\n"); 1672 /* No HUB features supported */ 1673 break; 1674 1675 case SetPortFeature: 1676 dev_dbg(hsotg->dev, "SetPortFeature\n"); 1677 if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1)) 1678 goto error; 1679 1680 if (!hsotg->flags.b.port_connect_status) { 1681 /* 1682 * The port is disconnected, which means the core is 1683 * either in device mode or it soon will be. Just 1684 * return without doing anything since the port 1685 * register can't be written if the core is in device 1686 * mode. 1687 */ 1688 break; 1689 } 1690 1691 switch (wvalue) { 1692 case USB_PORT_FEAT_SUSPEND: 1693 dev_dbg(hsotg->dev, 1694 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n"); 1695 if (windex != hsotg->otg_port) 1696 goto error; 1697 dwc2_port_suspend(hsotg, windex); 1698 break; 1699 1700 case USB_PORT_FEAT_POWER: 1701 dev_dbg(hsotg->dev, 1702 "SetPortFeature - USB_PORT_FEAT_POWER\n"); 1703 hprt0 = dwc2_read_hprt0(hsotg); 1704 hprt0 |= HPRT0_PWR; 1705 writel(hprt0, hsotg->regs + HPRT0); 1706 break; 1707 1708 case USB_PORT_FEAT_RESET: 1709 hprt0 = dwc2_read_hprt0(hsotg); 1710 dev_dbg(hsotg->dev, 1711 "SetPortFeature - USB_PORT_FEAT_RESET\n"); 1712 pcgctl = readl(hsotg->regs + PCGCTL); 1713 pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK); 1714 writel(pcgctl, hsotg->regs + PCGCTL); 1715 /* ??? Original driver does this */ 1716 writel(0, hsotg->regs + PCGCTL); 1717 1718 hprt0 = dwc2_read_hprt0(hsotg); 1719 /* Clear suspend bit if resetting from suspend state */ 1720 hprt0 &= ~HPRT0_SUSP; 1721 1722 /* 1723 * When B-Host the Port reset bit is set in the Start 1724 * HCD Callback function, so that the reset is started 1725 * within 1ms of the HNP success interrupt 1726 */ 1727 if (!dwc2_hcd_is_b_host(hsotg)) { 1728 hprt0 |= HPRT0_PWR | HPRT0_RST; 1729 dev_dbg(hsotg->dev, 1730 "In host mode, hprt0=%08x\n", hprt0); 1731 writel(hprt0, hsotg->regs + HPRT0); 1732 } 1733 1734 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ 1735 usleep_range(50000, 70000); 1736 hprt0 &= ~HPRT0_RST; 1737 writel(hprt0, hsotg->regs + HPRT0); 1738 hsotg->lx_state = DWC2_L0; /* Now back to On state */ 1739 break; 1740 1741 case USB_PORT_FEAT_INDICATOR: 1742 dev_dbg(hsotg->dev, 1743 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n"); 1744 /* Not supported */ 1745 break; 1746 1747 default: 1748 retval = -EINVAL; 1749 dev_err(hsotg->dev, 1750 "SetPortFeature %1xh unknown or unsupported\n", 1751 wvalue); 1752 break; 1753 } 1754 break; 1755 1756 default: 1757 error: 1758 retval = -EINVAL; 1759 dev_dbg(hsotg->dev, 1760 "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n", 1761 typereq, windex, wvalue); 1762 break; 1763 } 1764 1765 return retval; 1766 } 1767 1768 static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port) 1769 { 1770 int retval; 1771 1772 if (port != 1) 1773 return -EINVAL; 1774 1775 retval = (hsotg->flags.b.port_connect_status_change || 1776 hsotg->flags.b.port_reset_change || 1777 hsotg->flags.b.port_enable_change || 1778 hsotg->flags.b.port_suspend_change || 1779 hsotg->flags.b.port_over_current_change); 1780 1781 if (retval) { 1782 dev_dbg(hsotg->dev, 1783 "DWC OTG HCD HUB STATUS DATA: Root port status changed\n"); 1784 dev_dbg(hsotg->dev, " port_connect_status_change: %d\n", 1785 hsotg->flags.b.port_connect_status_change); 1786 dev_dbg(hsotg->dev, " port_reset_change: %d\n", 1787 hsotg->flags.b.port_reset_change); 1788 dev_dbg(hsotg->dev, " port_enable_change: %d\n", 1789 hsotg->flags.b.port_enable_change); 1790 dev_dbg(hsotg->dev, " port_suspend_change: %d\n", 1791 hsotg->flags.b.port_suspend_change); 1792 dev_dbg(hsotg->dev, " port_over_current_change: %d\n", 1793 hsotg->flags.b.port_over_current_change); 1794 } 1795 1796 return retval; 1797 } 1798 1799 int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg) 1800 { 1801 u32 hfnum = readl(hsotg->regs + HFNUM); 1802 1803 #ifdef DWC2_DEBUG_SOF 1804 dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n", 1805 (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT); 1806 #endif 1807 return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; 1808 } 1809 1810 int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg) 1811 { 1812 return hsotg->op_state == OTG_STATE_B_HOST; 1813 } 1814 1815 static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg, 1816 int iso_desc_count, 1817 gfp_t mem_flags) 1818 { 1819 struct dwc2_hcd_urb *urb; 1820 u32 size = sizeof(*urb) + iso_desc_count * 1821 sizeof(struct dwc2_hcd_iso_packet_desc); 1822 1823 urb = kzalloc(size, mem_flags); 1824 if (urb) 1825 urb->packet_count = iso_desc_count; 1826 return urb; 1827 } 1828 1829 static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg, 1830 struct dwc2_hcd_urb *urb, u8 dev_addr, 1831 u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps) 1832 { 1833 if (dbg_perio() || 1834 ep_type == USB_ENDPOINT_XFER_BULK || 1835 ep_type == USB_ENDPOINT_XFER_CONTROL) 1836 dev_vdbg(hsotg->dev, 1837 "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n", 1838 dev_addr, ep_num, ep_dir, ep_type, mps); 1839 urb->pipe_info.dev_addr = dev_addr; 1840 urb->pipe_info.ep_num = ep_num; 1841 urb->pipe_info.pipe_type = ep_type; 1842 urb->pipe_info.pipe_dir = ep_dir; 1843 urb->pipe_info.mps = mps; 1844 } 1845 1846 /* 1847 * NOTE: This function will be removed once the peripheral controller code 1848 * is integrated and the driver is stable 1849 */ 1850 void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg) 1851 { 1852 #ifdef DEBUG 1853 struct dwc2_host_chan *chan; 1854 struct dwc2_hcd_urb *urb; 1855 struct dwc2_qtd *qtd; 1856 int num_channels; 1857 u32 np_tx_status; 1858 u32 p_tx_status; 1859 int i; 1860 1861 num_channels = hsotg->core_params->host_channels; 1862 dev_dbg(hsotg->dev, "\n"); 1863 dev_dbg(hsotg->dev, 1864 "************************************************************\n"); 1865 dev_dbg(hsotg->dev, "HCD State:\n"); 1866 dev_dbg(hsotg->dev, " Num channels: %d\n", num_channels); 1867 1868 for (i = 0; i < num_channels; i++) { 1869 chan = hsotg->hc_ptr_array[i]; 1870 dev_dbg(hsotg->dev, " Channel %d:\n", i); 1871 dev_dbg(hsotg->dev, 1872 " dev_addr: %d, ep_num: %d, ep_is_in: %d\n", 1873 chan->dev_addr, chan->ep_num, chan->ep_is_in); 1874 dev_dbg(hsotg->dev, " speed: %d\n", chan->speed); 1875 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type); 1876 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet); 1877 dev_dbg(hsotg->dev, " data_pid_start: %d\n", 1878 chan->data_pid_start); 1879 dev_dbg(hsotg->dev, " multi_count: %d\n", chan->multi_count); 1880 dev_dbg(hsotg->dev, " xfer_started: %d\n", 1881 chan->xfer_started); 1882 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf); 1883 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n", 1884 (unsigned long)chan->xfer_dma); 1885 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len); 1886 dev_dbg(hsotg->dev, " xfer_count: %d\n", chan->xfer_count); 1887 dev_dbg(hsotg->dev, " halt_on_queue: %d\n", 1888 chan->halt_on_queue); 1889 dev_dbg(hsotg->dev, " halt_pending: %d\n", 1890 chan->halt_pending); 1891 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status); 1892 dev_dbg(hsotg->dev, " do_split: %d\n", chan->do_split); 1893 dev_dbg(hsotg->dev, " complete_split: %d\n", 1894 chan->complete_split); 1895 dev_dbg(hsotg->dev, " hub_addr: %d\n", chan->hub_addr); 1896 dev_dbg(hsotg->dev, " hub_port: %d\n", chan->hub_port); 1897 dev_dbg(hsotg->dev, " xact_pos: %d\n", chan->xact_pos); 1898 dev_dbg(hsotg->dev, " requests: %d\n", chan->requests); 1899 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh); 1900 1901 if (chan->xfer_started) { 1902 u32 hfnum, hcchar, hctsiz, hcint, hcintmsk; 1903 1904 hfnum = readl(hsotg->regs + HFNUM); 1905 hcchar = readl(hsotg->regs + HCCHAR(i)); 1906 hctsiz = readl(hsotg->regs + HCTSIZ(i)); 1907 hcint = readl(hsotg->regs + HCINT(i)); 1908 hcintmsk = readl(hsotg->regs + HCINTMSK(i)); 1909 dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum); 1910 dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar); 1911 dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz); 1912 dev_dbg(hsotg->dev, " hcint: 0x%08x\n", hcint); 1913 dev_dbg(hsotg->dev, " hcintmsk: 0x%08x\n", hcintmsk); 1914 } 1915 1916 if (!(chan->xfer_started && chan->qh)) 1917 continue; 1918 1919 list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) { 1920 if (!qtd->in_process) 1921 break; 1922 urb = qtd->urb; 1923 dev_dbg(hsotg->dev, " URB Info:\n"); 1924 dev_dbg(hsotg->dev, " qtd: %p, urb: %p\n", 1925 qtd, urb); 1926 if (urb) { 1927 dev_dbg(hsotg->dev, 1928 " Dev: %d, EP: %d %s\n", 1929 dwc2_hcd_get_dev_addr(&urb->pipe_info), 1930 dwc2_hcd_get_ep_num(&urb->pipe_info), 1931 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1932 "IN" : "OUT"); 1933 dev_dbg(hsotg->dev, 1934 " Max packet size: %d\n", 1935 dwc2_hcd_get_mps(&urb->pipe_info)); 1936 dev_dbg(hsotg->dev, 1937 " transfer_buffer: %p\n", 1938 urb->buf); 1939 dev_dbg(hsotg->dev, 1940 " transfer_dma: %08lx\n", 1941 (unsigned long)urb->dma); 1942 dev_dbg(hsotg->dev, 1943 " transfer_buffer_length: %d\n", 1944 urb->length); 1945 dev_dbg(hsotg->dev, " actual_length: %d\n", 1946 urb->actual_length); 1947 } 1948 } 1949 } 1950 1951 dev_dbg(hsotg->dev, " non_periodic_channels: %d\n", 1952 hsotg->non_periodic_channels); 1953 dev_dbg(hsotg->dev, " periodic_channels: %d\n", 1954 hsotg->periodic_channels); 1955 dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs); 1956 np_tx_status = readl(hsotg->regs + GNPTXSTS); 1957 dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n", 1958 (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT); 1959 dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n", 1960 (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT); 1961 p_tx_status = readl(hsotg->regs + HPTXSTS); 1962 dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n", 1963 (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT); 1964 dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n", 1965 (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT); 1966 dwc2_hcd_dump_frrem(hsotg); 1967 dwc2_dump_global_registers(hsotg); 1968 dwc2_dump_host_registers(hsotg); 1969 dev_dbg(hsotg->dev, 1970 "************************************************************\n"); 1971 dev_dbg(hsotg->dev, "\n"); 1972 #endif 1973 } 1974 1975 /* 1976 * NOTE: This function will be removed once the peripheral controller code 1977 * is integrated and the driver is stable 1978 */ 1979 void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg) 1980 { 1981 #ifdef DWC2_DUMP_FRREM 1982 dev_dbg(hsotg->dev, "Frame remaining at SOF:\n"); 1983 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 1984 hsotg->frrem_samples, hsotg->frrem_accum, 1985 hsotg->frrem_samples > 0 ? 1986 hsotg->frrem_accum / hsotg->frrem_samples : 0); 1987 dev_dbg(hsotg->dev, "\n"); 1988 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 7):\n"); 1989 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 1990 hsotg->hfnum_7_samples, 1991 hsotg->hfnum_7_frrem_accum, 1992 hsotg->hfnum_7_samples > 0 ? 1993 hsotg->hfnum_7_frrem_accum / hsotg->hfnum_7_samples : 0); 1994 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 0):\n"); 1995 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 1996 hsotg->hfnum_0_samples, 1997 hsotg->hfnum_0_frrem_accum, 1998 hsotg->hfnum_0_samples > 0 ? 1999 hsotg->hfnum_0_frrem_accum / hsotg->hfnum_0_samples : 0); 2000 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 1-6):\n"); 2001 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2002 hsotg->hfnum_other_samples, 2003 hsotg->hfnum_other_frrem_accum, 2004 hsotg->hfnum_other_samples > 0 ? 2005 hsotg->hfnum_other_frrem_accum / hsotg->hfnum_other_samples : 2006 0); 2007 dev_dbg(hsotg->dev, "\n"); 2008 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 7):\n"); 2009 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2010 hsotg->hfnum_7_samples_a, hsotg->hfnum_7_frrem_accum_a, 2011 hsotg->hfnum_7_samples_a > 0 ? 2012 hsotg->hfnum_7_frrem_accum_a / hsotg->hfnum_7_samples_a : 0); 2013 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 0):\n"); 2014 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2015 hsotg->hfnum_0_samples_a, hsotg->hfnum_0_frrem_accum_a, 2016 hsotg->hfnum_0_samples_a > 0 ? 2017 hsotg->hfnum_0_frrem_accum_a / hsotg->hfnum_0_samples_a : 0); 2018 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 1-6):\n"); 2019 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2020 hsotg->hfnum_other_samples_a, hsotg->hfnum_other_frrem_accum_a, 2021 hsotg->hfnum_other_samples_a > 0 ? 2022 hsotg->hfnum_other_frrem_accum_a / hsotg->hfnum_other_samples_a 2023 : 0); 2024 dev_dbg(hsotg->dev, "\n"); 2025 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 7):\n"); 2026 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2027 hsotg->hfnum_7_samples_b, hsotg->hfnum_7_frrem_accum_b, 2028 hsotg->hfnum_7_samples_b > 0 ? 2029 hsotg->hfnum_7_frrem_accum_b / hsotg->hfnum_7_samples_b : 0); 2030 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 0):\n"); 2031 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2032 hsotg->hfnum_0_samples_b, hsotg->hfnum_0_frrem_accum_b, 2033 (hsotg->hfnum_0_samples_b > 0) ? 2034 hsotg->hfnum_0_frrem_accum_b / hsotg->hfnum_0_samples_b : 0); 2035 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 1-6):\n"); 2036 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n", 2037 hsotg->hfnum_other_samples_b, hsotg->hfnum_other_frrem_accum_b, 2038 (hsotg->hfnum_other_samples_b > 0) ? 2039 hsotg->hfnum_other_frrem_accum_b / hsotg->hfnum_other_samples_b 2040 : 0); 2041 #endif 2042 } 2043 2044 struct wrapper_priv_data { 2045 struct dwc2_hsotg *hsotg; 2046 }; 2047 2048 /* Gets the dwc2_hsotg from a usb_hcd */ 2049 static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd) 2050 { 2051 struct wrapper_priv_data *p; 2052 2053 p = (struct wrapper_priv_data *) &hcd->hcd_priv; 2054 return p->hsotg; 2055 } 2056 2057 static int _dwc2_hcd_start(struct usb_hcd *hcd); 2058 2059 void dwc2_host_start(struct dwc2_hsotg *hsotg) 2060 { 2061 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); 2062 2063 hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg); 2064 _dwc2_hcd_start(hcd); 2065 } 2066 2067 void dwc2_host_disconnect(struct dwc2_hsotg *hsotg) 2068 { 2069 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); 2070 2071 hcd->self.is_b_host = 0; 2072 } 2073 2074 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, 2075 int *hub_port) 2076 { 2077 struct urb *urb = context; 2078 2079 if (urb->dev->tt) 2080 *hub_addr = urb->dev->tt->hub->devnum; 2081 else 2082 *hub_addr = 0; 2083 *hub_port = urb->dev->ttport; 2084 } 2085 2086 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) 2087 { 2088 struct urb *urb = context; 2089 2090 return urb->dev->speed; 2091 } 2092 2093 static void dwc2_allocate_bus_bandwidth(struct usb_hcd *hcd, u16 bw, 2094 struct urb *urb) 2095 { 2096 struct usb_bus *bus = hcd_to_bus(hcd); 2097 2098 if (urb->interval) 2099 bus->bandwidth_allocated += bw / urb->interval; 2100 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 2101 bus->bandwidth_isoc_reqs++; 2102 else 2103 bus->bandwidth_int_reqs++; 2104 } 2105 2106 static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw, 2107 struct urb *urb) 2108 { 2109 struct usb_bus *bus = hcd_to_bus(hcd); 2110 2111 if (urb->interval) 2112 bus->bandwidth_allocated -= bw / urb->interval; 2113 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 2114 bus->bandwidth_isoc_reqs--; 2115 else 2116 bus->bandwidth_int_reqs--; 2117 } 2118 2119 /* 2120 * Sets the final status of an URB and returns it to the upper layer. Any 2121 * required cleanup of the URB is performed. 2122 * 2123 * Must be called with interrupt disabled and spinlock held 2124 */ 2125 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 2126 int status) 2127 { 2128 struct urb *urb; 2129 int i; 2130 2131 if (!qtd) { 2132 dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__); 2133 return; 2134 } 2135 2136 if (!qtd->urb) { 2137 dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__); 2138 return; 2139 } 2140 2141 urb = qtd->urb->priv; 2142 if (!urb) { 2143 dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__); 2144 return; 2145 } 2146 2147 urb->actual_length = dwc2_hcd_urb_get_actual_length(qtd->urb); 2148 2149 if (dbg_urb(urb)) 2150 dev_vdbg(hsotg->dev, 2151 "%s: urb %p device %d ep %d-%s status %d actual %d\n", 2152 __func__, urb, usb_pipedevice(urb->pipe), 2153 usb_pipeendpoint(urb->pipe), 2154 usb_pipein(urb->pipe) ? "IN" : "OUT", status, 2155 urb->actual_length); 2156 2157 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS && dbg_perio()) { 2158 for (i = 0; i < urb->number_of_packets; i++) 2159 dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n", 2160 i, urb->iso_frame_desc[i].status); 2161 } 2162 2163 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 2164 urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb); 2165 for (i = 0; i < urb->number_of_packets; ++i) { 2166 urb->iso_frame_desc[i].actual_length = 2167 dwc2_hcd_urb_get_iso_desc_actual_length( 2168 qtd->urb, i); 2169 urb->iso_frame_desc[i].status = 2170 dwc2_hcd_urb_get_iso_desc_status(qtd->urb, i); 2171 } 2172 } 2173 2174 urb->status = status; 2175 if (!status) { 2176 if ((urb->transfer_flags & URB_SHORT_NOT_OK) && 2177 urb->actual_length < urb->transfer_buffer_length) 2178 urb->status = -EREMOTEIO; 2179 } 2180 2181 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS || 2182 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 2183 struct usb_host_endpoint *ep = urb->ep; 2184 2185 if (ep) 2186 dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg), 2187 dwc2_hcd_get_ep_bandwidth(hsotg, ep), 2188 urb); 2189 } 2190 2191 usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg), urb); 2192 urb->hcpriv = NULL; 2193 kfree(qtd->urb); 2194 qtd->urb = NULL; 2195 2196 spin_unlock(&hsotg->lock); 2197 usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status); 2198 spin_lock(&hsotg->lock); 2199 } 2200 2201 /* 2202 * Work queue function for starting the HCD when A-Cable is connected 2203 */ 2204 static void dwc2_hcd_start_func(struct work_struct *work) 2205 { 2206 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, 2207 start_work.work); 2208 2209 dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg); 2210 dwc2_host_start(hsotg); 2211 } 2212 2213 /* 2214 * Reset work queue function 2215 */ 2216 static void dwc2_hcd_reset_func(struct work_struct *work) 2217 { 2218 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, 2219 reset_work.work); 2220 u32 hprt0; 2221 2222 dev_dbg(hsotg->dev, "USB RESET function called\n"); 2223 hprt0 = dwc2_read_hprt0(hsotg); 2224 hprt0 &= ~HPRT0_RST; 2225 writel(hprt0, hsotg->regs + HPRT0); 2226 hsotg->flags.b.port_reset_change = 1; 2227 } 2228 2229 /* 2230 * ========================================================================= 2231 * Linux HC Driver Functions 2232 * ========================================================================= 2233 */ 2234 2235 /* 2236 * Initializes the DWC_otg controller and its root hub and prepares it for host 2237 * mode operation. Activates the root port. Returns 0 on success and a negative 2238 * error code on failure. 2239 */ 2240 static int _dwc2_hcd_start(struct usb_hcd *hcd) 2241 { 2242 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2243 struct usb_bus *bus = hcd_to_bus(hcd); 2244 unsigned long flags; 2245 2246 dev_dbg(hsotg->dev, "DWC OTG HCD START\n"); 2247 2248 spin_lock_irqsave(&hsotg->lock, flags); 2249 2250 hcd->state = HC_STATE_RUNNING; 2251 2252 if (dwc2_is_device_mode(hsotg)) { 2253 spin_unlock_irqrestore(&hsotg->lock, flags); 2254 return 0; /* why 0 ?? */ 2255 } 2256 2257 dwc2_hcd_reinit(hsotg); 2258 2259 /* Initialize and connect root hub if one is not already attached */ 2260 if (bus->root_hub) { 2261 dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n"); 2262 /* Inform the HUB driver to resume */ 2263 usb_hcd_resume_root_hub(hcd); 2264 } 2265 2266 spin_unlock_irqrestore(&hsotg->lock, flags); 2267 return 0; 2268 } 2269 2270 /* 2271 * Halts the DWC_otg host mode operations in a clean manner. USB transfers are 2272 * stopped. 2273 */ 2274 static void _dwc2_hcd_stop(struct usb_hcd *hcd) 2275 { 2276 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2277 unsigned long flags; 2278 2279 spin_lock_irqsave(&hsotg->lock, flags); 2280 dwc2_hcd_stop(hsotg); 2281 spin_unlock_irqrestore(&hsotg->lock, flags); 2282 2283 usleep_range(1000, 3000); 2284 } 2285 2286 /* Returns the current frame number */ 2287 static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd) 2288 { 2289 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2290 2291 return dwc2_hcd_get_frame_number(hsotg); 2292 } 2293 2294 static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, 2295 char *fn_name) 2296 { 2297 #ifdef VERBOSE_DEBUG 2298 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2299 char *pipetype; 2300 char *speed; 2301 2302 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb); 2303 dev_vdbg(hsotg->dev, " Device address: %d\n", 2304 usb_pipedevice(urb->pipe)); 2305 dev_vdbg(hsotg->dev, " Endpoint: %d, %s\n", 2306 usb_pipeendpoint(urb->pipe), 2307 usb_pipein(urb->pipe) ? "IN" : "OUT"); 2308 2309 switch (usb_pipetype(urb->pipe)) { 2310 case PIPE_CONTROL: 2311 pipetype = "CONTROL"; 2312 break; 2313 case PIPE_BULK: 2314 pipetype = "BULK"; 2315 break; 2316 case PIPE_INTERRUPT: 2317 pipetype = "INTERRUPT"; 2318 break; 2319 case PIPE_ISOCHRONOUS: 2320 pipetype = "ISOCHRONOUS"; 2321 break; 2322 default: 2323 pipetype = "UNKNOWN"; 2324 break; 2325 } 2326 2327 dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype, 2328 usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ? 2329 "IN" : "OUT"); 2330 2331 switch (urb->dev->speed) { 2332 case USB_SPEED_HIGH: 2333 speed = "HIGH"; 2334 break; 2335 case USB_SPEED_FULL: 2336 speed = "FULL"; 2337 break; 2338 case USB_SPEED_LOW: 2339 speed = "LOW"; 2340 break; 2341 default: 2342 speed = "UNKNOWN"; 2343 break; 2344 } 2345 2346 dev_vdbg(hsotg->dev, " Speed: %s\n", speed); 2347 dev_vdbg(hsotg->dev, " Max packet size: %d\n", 2348 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); 2349 dev_vdbg(hsotg->dev, " Data buffer length: %d\n", 2350 urb->transfer_buffer_length); 2351 dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", 2352 urb->transfer_buffer, (unsigned long)urb->transfer_dma); 2353 dev_vdbg(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n", 2354 urb->setup_packet, (unsigned long)urb->setup_dma); 2355 dev_vdbg(hsotg->dev, " Interval: %d\n", urb->interval); 2356 2357 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 2358 int i; 2359 2360 for (i = 0; i < urb->number_of_packets; i++) { 2361 dev_vdbg(hsotg->dev, " ISO Desc %d:\n", i); 2362 dev_vdbg(hsotg->dev, " offset: %d, length %d\n", 2363 urb->iso_frame_desc[i].offset, 2364 urb->iso_frame_desc[i].length); 2365 } 2366 } 2367 #endif 2368 } 2369 2370 /* 2371 * Starts processing a USB transfer request specified by a USB Request Block 2372 * (URB). mem_flags indicates the type of memory allocation to use while 2373 * processing this URB. 2374 */ 2375 static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, 2376 gfp_t mem_flags) 2377 { 2378 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2379 struct usb_host_endpoint *ep = urb->ep; 2380 struct dwc2_hcd_urb *dwc2_urb; 2381 int i; 2382 int retval; 2383 int alloc_bandwidth = 0; 2384 u8 ep_type = 0; 2385 u32 tflags = 0; 2386 void *buf; 2387 unsigned long flags; 2388 2389 if (dbg_urb(urb)) { 2390 dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n"); 2391 dwc2_dump_urb_info(hcd, urb, "urb_enqueue"); 2392 } 2393 2394 if (ep == NULL) 2395 return -EINVAL; 2396 2397 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS || 2398 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 2399 spin_lock_irqsave(&hsotg->lock, flags); 2400 if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep)) 2401 alloc_bandwidth = 1; 2402 spin_unlock_irqrestore(&hsotg->lock, flags); 2403 } 2404 2405 switch (usb_pipetype(urb->pipe)) { 2406 case PIPE_CONTROL: 2407 ep_type = USB_ENDPOINT_XFER_CONTROL; 2408 break; 2409 case PIPE_ISOCHRONOUS: 2410 ep_type = USB_ENDPOINT_XFER_ISOC; 2411 break; 2412 case PIPE_BULK: 2413 ep_type = USB_ENDPOINT_XFER_BULK; 2414 break; 2415 case PIPE_INTERRUPT: 2416 ep_type = USB_ENDPOINT_XFER_INT; 2417 break; 2418 default: 2419 dev_warn(hsotg->dev, "Wrong ep type\n"); 2420 } 2421 2422 dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets, 2423 mem_flags); 2424 if (!dwc2_urb) 2425 return -ENOMEM; 2426 2427 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe), 2428 usb_pipeendpoint(urb->pipe), ep_type, 2429 usb_pipein(urb->pipe), 2430 usb_maxpacket(urb->dev, urb->pipe, 2431 !(usb_pipein(urb->pipe)))); 2432 2433 buf = urb->transfer_buffer; 2434 2435 if (hcd->self.uses_dma) { 2436 if (!buf && (urb->transfer_dma & 3)) { 2437 dev_err(hsotg->dev, 2438 "%s: unaligned transfer with no transfer_buffer", 2439 __func__); 2440 retval = -EINVAL; 2441 goto fail1; 2442 } 2443 } 2444 2445 if (!(urb->transfer_flags & URB_NO_INTERRUPT)) 2446 tflags |= URB_GIVEBACK_ASAP; 2447 if (urb->transfer_flags & URB_ZERO_PACKET) 2448 tflags |= URB_SEND_ZERO_PACKET; 2449 2450 dwc2_urb->priv = urb; 2451 dwc2_urb->buf = buf; 2452 dwc2_urb->dma = urb->transfer_dma; 2453 dwc2_urb->length = urb->transfer_buffer_length; 2454 dwc2_urb->setup_packet = urb->setup_packet; 2455 dwc2_urb->setup_dma = urb->setup_dma; 2456 dwc2_urb->flags = tflags; 2457 dwc2_urb->interval = urb->interval; 2458 dwc2_urb->status = -EINPROGRESS; 2459 2460 for (i = 0; i < urb->number_of_packets; ++i) 2461 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, 2462 urb->iso_frame_desc[i].offset, 2463 urb->iso_frame_desc[i].length); 2464 2465 urb->hcpriv = dwc2_urb; 2466 2467 spin_lock_irqsave(&hsotg->lock, flags); 2468 retval = usb_hcd_link_urb_to_ep(hcd, urb); 2469 spin_unlock_irqrestore(&hsotg->lock, flags); 2470 if (retval) 2471 goto fail1; 2472 2473 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags); 2474 if (retval) 2475 goto fail2; 2476 2477 if (alloc_bandwidth) { 2478 spin_lock_irqsave(&hsotg->lock, flags); 2479 dwc2_allocate_bus_bandwidth(hcd, 2480 dwc2_hcd_get_ep_bandwidth(hsotg, ep), 2481 urb); 2482 spin_unlock_irqrestore(&hsotg->lock, flags); 2483 } 2484 2485 return 0; 2486 2487 fail2: 2488 spin_lock_irqsave(&hsotg->lock, flags); 2489 dwc2_urb->priv = NULL; 2490 usb_hcd_unlink_urb_from_ep(hcd, urb); 2491 spin_unlock_irqrestore(&hsotg->lock, flags); 2492 fail1: 2493 urb->hcpriv = NULL; 2494 kfree(dwc2_urb); 2495 2496 return retval; 2497 } 2498 2499 /* 2500 * Aborts/cancels a USB transfer request. Always returns 0 to indicate success. 2501 */ 2502 static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, 2503 int status) 2504 { 2505 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2506 int rc; 2507 unsigned long flags; 2508 2509 dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n"); 2510 dwc2_dump_urb_info(hcd, urb, "urb_dequeue"); 2511 2512 spin_lock_irqsave(&hsotg->lock, flags); 2513 2514 rc = usb_hcd_check_unlink_urb(hcd, urb, status); 2515 if (rc) 2516 goto out; 2517 2518 if (!urb->hcpriv) { 2519 dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n"); 2520 goto out; 2521 } 2522 2523 rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv); 2524 2525 usb_hcd_unlink_urb_from_ep(hcd, urb); 2526 2527 kfree(urb->hcpriv); 2528 urb->hcpriv = NULL; 2529 2530 /* Higher layer software sets URB status */ 2531 spin_unlock(&hsotg->lock); 2532 usb_hcd_giveback_urb(hcd, urb, status); 2533 spin_lock(&hsotg->lock); 2534 2535 dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n"); 2536 dev_dbg(hsotg->dev, " urb->status = %d\n", urb->status); 2537 out: 2538 spin_unlock_irqrestore(&hsotg->lock, flags); 2539 2540 return rc; 2541 } 2542 2543 /* 2544 * Frees resources in the DWC_otg controller related to a given endpoint. Also 2545 * clears state in the HCD related to the endpoint. Any URBs for the endpoint 2546 * must already be dequeued. 2547 */ 2548 static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd, 2549 struct usb_host_endpoint *ep) 2550 { 2551 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2552 2553 dev_dbg(hsotg->dev, 2554 "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n", 2555 ep->desc.bEndpointAddress, ep->hcpriv); 2556 dwc2_hcd_endpoint_disable(hsotg, ep, 250); 2557 } 2558 2559 /* 2560 * Resets endpoint specific parameter values, in current version used to reset 2561 * the data toggle (as a WA). This function can be called from usb_clear_halt 2562 * routine. 2563 */ 2564 static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd, 2565 struct usb_host_endpoint *ep) 2566 { 2567 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2568 unsigned long flags; 2569 2570 dev_dbg(hsotg->dev, 2571 "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n", 2572 ep->desc.bEndpointAddress); 2573 2574 spin_lock_irqsave(&hsotg->lock, flags); 2575 dwc2_hcd_endpoint_reset(hsotg, ep); 2576 spin_unlock_irqrestore(&hsotg->lock, flags); 2577 } 2578 2579 /* 2580 * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if 2581 * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid 2582 * interrupt. 2583 * 2584 * This function is called by the USB core when an interrupt occurs 2585 */ 2586 static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd) 2587 { 2588 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2589 2590 return dwc2_handle_hcd_intr(hsotg); 2591 } 2592 2593 /* 2594 * Creates Status Change bitmap for the root hub and root port. The bitmap is 2595 * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1 2596 * is the status change indicator for the single root port. Returns 1 if either 2597 * change indicator is 1, otherwise returns 0. 2598 */ 2599 static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) 2600 { 2601 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2602 2603 buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1; 2604 return buf[0] != 0; 2605 } 2606 2607 /* Handles hub class-specific requests */ 2608 static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue, 2609 u16 windex, char *buf, u16 wlength) 2610 { 2611 int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq, 2612 wvalue, windex, buf, wlength); 2613 return retval; 2614 } 2615 2616 /* Handles hub TT buffer clear completions */ 2617 static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd, 2618 struct usb_host_endpoint *ep) 2619 { 2620 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2621 struct dwc2_qh *qh; 2622 unsigned long flags; 2623 2624 qh = ep->hcpriv; 2625 if (!qh) 2626 return; 2627 2628 spin_lock_irqsave(&hsotg->lock, flags); 2629 qh->tt_buffer_dirty = 0; 2630 2631 if (hsotg->flags.b.port_connect_status) 2632 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL); 2633 2634 spin_unlock_irqrestore(&hsotg->lock, flags); 2635 } 2636 2637 static struct hc_driver dwc2_hc_driver = { 2638 .description = "dwc2_hsotg", 2639 .product_desc = "DWC OTG Controller", 2640 .hcd_priv_size = sizeof(struct wrapper_priv_data), 2641 2642 .irq = _dwc2_hcd_irq, 2643 .flags = HCD_MEMORY | HCD_USB2, 2644 2645 .start = _dwc2_hcd_start, 2646 .stop = _dwc2_hcd_stop, 2647 .urb_enqueue = _dwc2_hcd_urb_enqueue, 2648 .urb_dequeue = _dwc2_hcd_urb_dequeue, 2649 .endpoint_disable = _dwc2_hcd_endpoint_disable, 2650 .endpoint_reset = _dwc2_hcd_endpoint_reset, 2651 .get_frame_number = _dwc2_hcd_get_frame_number, 2652 2653 .hub_status_data = _dwc2_hcd_hub_status_data, 2654 .hub_control = _dwc2_hcd_hub_control, 2655 .clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete, 2656 }; 2657 2658 /* 2659 * Frees secondary storage associated with the dwc2_hsotg structure contained 2660 * in the struct usb_hcd field 2661 */ 2662 static void dwc2_hcd_free(struct dwc2_hsotg *hsotg) 2663 { 2664 u32 ahbcfg; 2665 u32 dctl; 2666 int i; 2667 2668 dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n"); 2669 2670 /* Free memory for QH/QTD lists */ 2671 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive); 2672 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active); 2673 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive); 2674 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready); 2675 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned); 2676 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued); 2677 2678 /* Free memory for the host channels */ 2679 for (i = 0; i < MAX_EPS_CHANNELS; i++) { 2680 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i]; 2681 2682 if (chan != NULL) { 2683 dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n", 2684 i, chan); 2685 hsotg->hc_ptr_array[i] = NULL; 2686 kfree(chan); 2687 } 2688 } 2689 2690 if (hsotg->core_params->dma_enable > 0) { 2691 if (hsotg->status_buf) { 2692 dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, 2693 hsotg->status_buf, 2694 hsotg->status_buf_dma); 2695 hsotg->status_buf = NULL; 2696 } 2697 } else { 2698 kfree(hsotg->status_buf); 2699 hsotg->status_buf = NULL; 2700 } 2701 2702 ahbcfg = readl(hsotg->regs + GAHBCFG); 2703 2704 /* Disable all interrupts */ 2705 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN; 2706 writel(ahbcfg, hsotg->regs + GAHBCFG); 2707 writel(0, hsotg->regs + GINTMSK); 2708 2709 if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) { 2710 dctl = readl(hsotg->regs + DCTL); 2711 dctl |= DCTL_SFTDISCON; 2712 writel(dctl, hsotg->regs + DCTL); 2713 } 2714 2715 if (hsotg->wq_otg) { 2716 if (!cancel_work_sync(&hsotg->wf_otg)) 2717 flush_workqueue(hsotg->wq_otg); 2718 destroy_workqueue(hsotg->wq_otg); 2719 } 2720 2721 kfree(hsotg->core_params); 2722 hsotg->core_params = NULL; 2723 del_timer(&hsotg->wkp_timer); 2724 } 2725 2726 static void dwc2_hcd_release(struct dwc2_hsotg *hsotg) 2727 { 2728 /* Turn off all host-specific interrupts */ 2729 dwc2_disable_host_interrupts(hsotg); 2730 2731 dwc2_hcd_free(hsotg); 2732 } 2733 2734 /* 2735 * Sets all parameters to the given value. 2736 * 2737 * Assumes that the dwc2_core_params struct contains only integers. 2738 */ 2739 void dwc2_set_all_params(struct dwc2_core_params *params, int value) 2740 { 2741 int *p = (int *)params; 2742 size_t size = sizeof(*params) / sizeof(*p); 2743 int i; 2744 2745 for (i = 0; i < size; i++) 2746 p[i] = value; 2747 } 2748 EXPORT_SYMBOL_GPL(dwc2_set_all_params); 2749 2750 /* 2751 * Initializes the HCD. This function allocates memory for and initializes the 2752 * static parts of the usb_hcd and dwc2_hsotg structures. It also registers the 2753 * USB bus with the core and calls the hc_driver->start() function. It returns 2754 * a negative error on failure. 2755 */ 2756 int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq, 2757 const struct dwc2_core_params *params) 2758 { 2759 struct usb_hcd *hcd; 2760 struct dwc2_host_chan *channel; 2761 u32 hcfg; 2762 int i, num_channels; 2763 int retval; 2764 2765 dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n"); 2766 2767 /* Detect config values from hardware */ 2768 retval = dwc2_get_hwparams(hsotg); 2769 2770 if (retval) 2771 return retval; 2772 2773 retval = -ENOMEM; 2774 2775 hcfg = readl(hsotg->regs + HCFG); 2776 dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg); 2777 2778 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 2779 hsotg->frame_num_array = kzalloc(sizeof(*hsotg->frame_num_array) * 2780 FRAME_NUM_ARRAY_SIZE, GFP_KERNEL); 2781 if (!hsotg->frame_num_array) 2782 goto error1; 2783 hsotg->last_frame_num_array = kzalloc( 2784 sizeof(*hsotg->last_frame_num_array) * 2785 FRAME_NUM_ARRAY_SIZE, GFP_KERNEL); 2786 if (!hsotg->last_frame_num_array) 2787 goto error1; 2788 hsotg->last_frame_num = HFNUM_MAX_FRNUM; 2789 #endif 2790 2791 hsotg->core_params = kzalloc(sizeof(*hsotg->core_params), GFP_KERNEL); 2792 if (!hsotg->core_params) 2793 goto error1; 2794 2795 dwc2_set_all_params(hsotg->core_params, -1); 2796 2797 /* Validate parameter values */ 2798 dwc2_set_parameters(hsotg, params); 2799 2800 /* Check if the bus driver or platform code has setup a dma_mask */ 2801 if (hsotg->core_params->dma_enable > 0 && 2802 hsotg->dev->dma_mask == NULL) { 2803 dev_warn(hsotg->dev, 2804 "dma_mask not set, disabling DMA\n"); 2805 hsotg->core_params->dma_enable = 0; 2806 hsotg->core_params->dma_desc_enable = 0; 2807 } 2808 2809 /* Set device flags indicating whether the HCD supports DMA */ 2810 if (hsotg->core_params->dma_enable > 0) { 2811 if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) 2812 dev_warn(hsotg->dev, "can't set DMA mask\n"); 2813 if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) 2814 dev_warn(hsotg->dev, "can't set coherent DMA mask\n"); 2815 } 2816 2817 hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev)); 2818 if (!hcd) 2819 goto error1; 2820 2821 if (hsotg->core_params->dma_enable <= 0) 2822 hcd->self.uses_dma = 0; 2823 2824 hcd->has_tt = 1; 2825 2826 spin_lock_init(&hsotg->lock); 2827 ((struct wrapper_priv_data *) &hcd->hcd_priv)->hsotg = hsotg; 2828 hsotg->priv = hcd; 2829 2830 /* 2831 * Disable the global interrupt until all the interrupt handlers are 2832 * installed 2833 */ 2834 dwc2_disable_global_interrupts(hsotg); 2835 2836 /* Initialize the DWC_otg core, and select the Phy type */ 2837 retval = dwc2_core_init(hsotg, true, irq); 2838 if (retval) 2839 goto error2; 2840 2841 /* Create new workqueue and init work */ 2842 retval = -ENOMEM; 2843 hsotg->wq_otg = create_singlethread_workqueue("dwc2"); 2844 if (!hsotg->wq_otg) { 2845 dev_err(hsotg->dev, "Failed to create workqueue\n"); 2846 goto error2; 2847 } 2848 INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change); 2849 2850 setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected, 2851 (unsigned long)hsotg); 2852 2853 /* Initialize the non-periodic schedule */ 2854 INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive); 2855 INIT_LIST_HEAD(&hsotg->non_periodic_sched_active); 2856 2857 /* Initialize the periodic schedule */ 2858 INIT_LIST_HEAD(&hsotg->periodic_sched_inactive); 2859 INIT_LIST_HEAD(&hsotg->periodic_sched_ready); 2860 INIT_LIST_HEAD(&hsotg->periodic_sched_assigned); 2861 INIT_LIST_HEAD(&hsotg->periodic_sched_queued); 2862 2863 /* 2864 * Create a host channel descriptor for each host channel implemented 2865 * in the controller. Initialize the channel descriptor array. 2866 */ 2867 INIT_LIST_HEAD(&hsotg->free_hc_list); 2868 num_channels = hsotg->core_params->host_channels; 2869 memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array)); 2870 2871 for (i = 0; i < num_channels; i++) { 2872 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 2873 if (channel == NULL) 2874 goto error3; 2875 channel->hc_num = i; 2876 hsotg->hc_ptr_array[i] = channel; 2877 } 2878 2879 if (hsotg->core_params->uframe_sched > 0) 2880 dwc2_hcd_init_usecs(hsotg); 2881 2882 /* Initialize hsotg start work */ 2883 INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func); 2884 2885 /* Initialize port reset work */ 2886 INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func); 2887 2888 /* 2889 * Allocate space for storing data on status transactions. Normally no 2890 * data is sent, but this space acts as a bit bucket. This must be 2891 * done after usb_add_hcd since that function allocates the DMA buffer 2892 * pool. 2893 */ 2894 if (hsotg->core_params->dma_enable > 0) 2895 hsotg->status_buf = dma_alloc_coherent(hsotg->dev, 2896 DWC2_HCD_STATUS_BUF_SIZE, 2897 &hsotg->status_buf_dma, GFP_KERNEL); 2898 else 2899 hsotg->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE, 2900 GFP_KERNEL); 2901 2902 if (!hsotg->status_buf) 2903 goto error3; 2904 2905 hsotg->otg_port = 1; 2906 hsotg->frame_list = NULL; 2907 hsotg->frame_list_dma = 0; 2908 hsotg->periodic_qh_count = 0; 2909 2910 /* Initiate lx_state to L3 disconnected state */ 2911 hsotg->lx_state = DWC2_L3; 2912 2913 hcd->self.otg_port = hsotg->otg_port; 2914 2915 /* Don't support SG list at this point */ 2916 hcd->self.sg_tablesize = 0; 2917 2918 /* 2919 * Finish generic HCD initialization and start the HCD. This function 2920 * allocates the DMA buffer pool, registers the USB bus, requests the 2921 * IRQ line, and calls hcd_start method. 2922 */ 2923 retval = usb_add_hcd(hcd, irq, IRQF_SHARED); 2924 if (retval < 0) 2925 goto error3; 2926 2927 device_wakeup_enable(hcd->self.controller); 2928 2929 dwc2_hcd_dump_state(hsotg); 2930 2931 dwc2_enable_global_interrupts(hsotg); 2932 2933 return 0; 2934 2935 error3: 2936 dwc2_hcd_release(hsotg); 2937 error2: 2938 usb_put_hcd(hcd); 2939 error1: 2940 kfree(hsotg->core_params); 2941 2942 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 2943 kfree(hsotg->last_frame_num_array); 2944 kfree(hsotg->frame_num_array); 2945 #endif 2946 2947 dev_err(hsotg->dev, "%s() FAILED, returning %d\n", __func__, retval); 2948 return retval; 2949 } 2950 EXPORT_SYMBOL_GPL(dwc2_hcd_init); 2951 2952 /* 2953 * Removes the HCD. 2954 * Frees memory and resources associated with the HCD and deregisters the bus. 2955 */ 2956 void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) 2957 { 2958 struct usb_hcd *hcd; 2959 2960 dev_dbg(hsotg->dev, "DWC OTG HCD REMOVE\n"); 2961 2962 hcd = dwc2_hsotg_to_hcd(hsotg); 2963 dev_dbg(hsotg->dev, "hsotg->hcd = %p\n", hcd); 2964 2965 if (!hcd) { 2966 dev_dbg(hsotg->dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n", 2967 __func__); 2968 return; 2969 } 2970 2971 usb_remove_hcd(hcd); 2972 hsotg->priv = NULL; 2973 dwc2_hcd_release(hsotg); 2974 usb_put_hcd(hcd); 2975 2976 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 2977 kfree(hsotg->last_frame_num_array); 2978 kfree(hsotg->frame_num_array); 2979 #endif 2980 } 2981 EXPORT_SYMBOL_GPL(dwc2_hcd_remove); 2982