1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * http://www.samsung.com 5 * 6 * Copyright 2008 Openmoko, Inc. 7 * Copyright 2008 Simtec Electronics 8 * Ben Dooks <ben@simtec.co.uk> 9 * http://armlinux.simtec.co.uk/ 10 * 11 * S3C USB2.0 High-speed / OtG driver 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/platform_device.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/mutex.h> 21 #include <linux/seq_file.h> 22 #include <linux/delay.h> 23 #include <linux/io.h> 24 #include <linux/slab.h> 25 #include <linux/of_platform.h> 26 27 #include <linux/usb/ch9.h> 28 #include <linux/usb/gadget.h> 29 #include <linux/usb/phy.h> 30 31 #include "core.h" 32 #include "hw.h" 33 34 /* conversion functions */ 35 static inline struct dwc2_hsotg_req *our_req(struct usb_request *req) 36 { 37 return container_of(req, struct dwc2_hsotg_req, req); 38 } 39 40 static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep) 41 { 42 return container_of(ep, struct dwc2_hsotg_ep, ep); 43 } 44 45 static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget) 46 { 47 return container_of(gadget, struct dwc2_hsotg, gadget); 48 } 49 50 static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val) 51 { 52 dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset); 53 } 54 55 static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val) 56 { 57 dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset); 58 } 59 60 static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg, 61 u32 ep_index, u32 dir_in) 62 { 63 if (dir_in) 64 return hsotg->eps_in[ep_index]; 65 else 66 return hsotg->eps_out[ep_index]; 67 } 68 69 /* forward declaration of functions */ 70 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg); 71 72 /** 73 * using_dma - return the DMA status of the driver. 74 * @hsotg: The driver state. 75 * 76 * Return true if we're using DMA. 77 * 78 * Currently, we have the DMA support code worked into everywhere 79 * that needs it, but the AMBA DMA implementation in the hardware can 80 * only DMA from 32bit aligned addresses. This means that gadgets such 81 * as the CDC Ethernet cannot work as they often pass packets which are 82 * not 32bit aligned. 83 * 84 * Unfortunately the choice to use DMA or not is global to the controller 85 * and seems to be only settable when the controller is being put through 86 * a core reset. This means we either need to fix the gadgets to take 87 * account of DMA alignment, or add bounce buffers (yuerk). 88 * 89 * g_using_dma is set depending on dts flag. 90 */ 91 static inline bool using_dma(struct dwc2_hsotg *hsotg) 92 { 93 return hsotg->params.g_dma; 94 } 95 96 /* 97 * using_desc_dma - return the descriptor DMA status of the driver. 98 * @hsotg: The driver state. 99 * 100 * Return true if we're using descriptor DMA. 101 */ 102 static inline bool using_desc_dma(struct dwc2_hsotg *hsotg) 103 { 104 return hsotg->params.g_dma_desc; 105 } 106 107 /** 108 * dwc2_gadget_incr_frame_num - Increments the targeted frame number. 109 * @hs_ep: The endpoint 110 * 111 * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT. 112 * If an overrun occurs it will wrap the value and set the frame_overrun flag. 113 */ 114 static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep) 115 { 116 hs_ep->target_frame += hs_ep->interval; 117 if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) { 118 hs_ep->frame_overrun = true; 119 hs_ep->target_frame &= DSTS_SOFFN_LIMIT; 120 } else { 121 hs_ep->frame_overrun = false; 122 } 123 } 124 125 /** 126 * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number 127 * by one. 128 * @hs_ep: The endpoint. 129 * 130 * This function used in service interval based scheduling flow to calculate 131 * descriptor frame number filed value. For service interval mode frame 132 * number in descriptor should point to last (u)frame in the interval. 133 * 134 */ 135 static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep) 136 { 137 if (hs_ep->target_frame) 138 hs_ep->target_frame -= 1; 139 else 140 hs_ep->target_frame = DSTS_SOFFN_LIMIT; 141 } 142 143 /** 144 * dwc2_hsotg_en_gsint - enable one or more of the general interrupt 145 * @hsotg: The device state 146 * @ints: A bitmask of the interrupts to enable 147 */ 148 static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints) 149 { 150 u32 gsintmsk = dwc2_readl(hsotg, GINTMSK); 151 u32 new_gsintmsk; 152 153 new_gsintmsk = gsintmsk | ints; 154 155 if (new_gsintmsk != gsintmsk) { 156 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk); 157 dwc2_writel(hsotg, new_gsintmsk, GINTMSK); 158 } 159 } 160 161 /** 162 * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt 163 * @hsotg: The device state 164 * @ints: A bitmask of the interrupts to enable 165 */ 166 static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints) 167 { 168 u32 gsintmsk = dwc2_readl(hsotg, GINTMSK); 169 u32 new_gsintmsk; 170 171 new_gsintmsk = gsintmsk & ~ints; 172 173 if (new_gsintmsk != gsintmsk) 174 dwc2_writel(hsotg, new_gsintmsk, GINTMSK); 175 } 176 177 /** 178 * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq 179 * @hsotg: The device state 180 * @ep: The endpoint index 181 * @dir_in: True if direction is in. 182 * @en: The enable value, true to enable 183 * 184 * Set or clear the mask for an individual endpoint's interrupt 185 * request. 186 */ 187 static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg, 188 unsigned int ep, unsigned int dir_in, 189 unsigned int en) 190 { 191 unsigned long flags; 192 u32 bit = 1 << ep; 193 u32 daint; 194 195 if (!dir_in) 196 bit <<= 16; 197 198 local_irq_save(flags); 199 daint = dwc2_readl(hsotg, DAINTMSK); 200 if (en) 201 daint |= bit; 202 else 203 daint &= ~bit; 204 dwc2_writel(hsotg, daint, DAINTMSK); 205 local_irq_restore(flags); 206 } 207 208 /** 209 * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode 210 * 211 * @hsotg: Programming view of the DWC_otg controller 212 */ 213 int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg) 214 { 215 if (hsotg->hw_params.en_multiple_tx_fifo) 216 /* In dedicated FIFO mode we need count of IN EPs */ 217 return hsotg->hw_params.num_dev_in_eps; 218 else 219 /* In shared FIFO mode we need count of Periodic IN EPs */ 220 return hsotg->hw_params.num_dev_perio_in_ep; 221 } 222 223 /** 224 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for 225 * device mode TX FIFOs 226 * 227 * @hsotg: Programming view of the DWC_otg controller 228 */ 229 int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) 230 { 231 int addr; 232 int tx_addr_max; 233 u32 np_tx_fifo_size; 234 235 np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size, 236 hsotg->params.g_np_tx_fifo_size); 237 238 /* Get Endpoint Info Control block size in DWORDs. */ 239 tx_addr_max = hsotg->hw_params.total_fifo_size; 240 241 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size; 242 if (tx_addr_max <= addr) 243 return 0; 244 245 return tx_addr_max - addr; 246 } 247 248 /** 249 * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt 250 * 251 * @hsotg: Programming view of the DWC_otg controller 252 * 253 */ 254 static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg) 255 { 256 u32 gintsts2; 257 u32 gintmsk2; 258 259 gintsts2 = dwc2_readl(hsotg, GINTSTS2); 260 gintmsk2 = dwc2_readl(hsotg, GINTMSK2); 261 262 if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) { 263 dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__); 264 dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT); 265 dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG); 266 } 267 } 268 269 /** 270 * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode 271 * TX FIFOs 272 * 273 * @hsotg: Programming view of the DWC_otg controller 274 */ 275 int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg) 276 { 277 int tx_fifo_count; 278 int tx_fifo_depth; 279 280 tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg); 281 282 tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg); 283 284 if (!tx_fifo_count) 285 return tx_fifo_depth; 286 else 287 return tx_fifo_depth / tx_fifo_count; 288 } 289 290 /** 291 * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs 292 * @hsotg: The device instance. 293 */ 294 static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) 295 { 296 unsigned int ep; 297 unsigned int addr; 298 int timeout; 299 300 u32 val; 301 u32 *txfsz = hsotg->params.g_tx_fifo_size; 302 303 /* Reset fifo map if not correctly cleared during previous session */ 304 WARN_ON(hsotg->fifo_map); 305 hsotg->fifo_map = 0; 306 307 /* set RX/NPTX FIFO sizes */ 308 dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ); 309 dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size << 310 FIFOSIZE_STARTADDR_SHIFT) | 311 (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT), 312 GNPTXFSIZ); 313 314 /* 315 * arange all the rest of the TX FIFOs, as some versions of this 316 * block have overlapping default addresses. This also ensures 317 * that if the settings have been changed, then they are set to 318 * known values. 319 */ 320 321 /* start at the end of the GNPTXFSIZ, rounded up */ 322 addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size; 323 324 /* 325 * Configure fifos sizes from provided configuration and assign 326 * them to endpoints dynamically according to maxpacket size value of 327 * given endpoint. 328 */ 329 for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) { 330 if (!txfsz[ep]) 331 continue; 332 val = addr; 333 val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT; 334 WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem, 335 "insufficient fifo memory"); 336 addr += txfsz[ep]; 337 338 dwc2_writel(hsotg, val, DPTXFSIZN(ep)); 339 val = dwc2_readl(hsotg, DPTXFSIZN(ep)); 340 } 341 342 dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size | 343 addr << GDFIFOCFG_EPINFOBASE_SHIFT, 344 GDFIFOCFG); 345 /* 346 * according to p428 of the design guide, we need to ensure that 347 * all fifos are flushed before continuing 348 */ 349 350 dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH | 351 GRSTCTL_RXFFLSH, GRSTCTL); 352 353 /* wait until the fifos are both flushed */ 354 timeout = 100; 355 while (1) { 356 val = dwc2_readl(hsotg, GRSTCTL); 357 358 if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0) 359 break; 360 361 if (--timeout == 0) { 362 dev_err(hsotg->dev, 363 "%s: timeout flushing fifos (GRSTCTL=%08x)\n", 364 __func__, val); 365 break; 366 } 367 368 udelay(1); 369 } 370 371 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout); 372 } 373 374 /** 375 * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure 376 * @ep: USB endpoint to allocate request for. 377 * @flags: Allocation flags 378 * 379 * Allocate a new USB request structure appropriate for the specified endpoint 380 */ 381 static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep, 382 gfp_t flags) 383 { 384 struct dwc2_hsotg_req *req; 385 386 req = kzalloc(sizeof(*req), flags); 387 if (!req) 388 return NULL; 389 390 INIT_LIST_HEAD(&req->queue); 391 392 return &req->req; 393 } 394 395 /** 396 * is_ep_periodic - return true if the endpoint is in periodic mode. 397 * @hs_ep: The endpoint to query. 398 * 399 * Returns true if the endpoint is in periodic mode, meaning it is being 400 * used for an Interrupt or ISO transfer. 401 */ 402 static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep) 403 { 404 return hs_ep->periodic; 405 } 406 407 /** 408 * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request 409 * @hsotg: The device state. 410 * @hs_ep: The endpoint for the request 411 * @hs_req: The request being processed. 412 * 413 * This is the reverse of dwc2_hsotg_map_dma(), called for the completion 414 * of a request to ensure the buffer is ready for access by the caller. 415 */ 416 static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg, 417 struct dwc2_hsotg_ep *hs_ep, 418 struct dwc2_hsotg_req *hs_req) 419 { 420 struct usb_request *req = &hs_req->req; 421 422 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); 423 } 424 425 /* 426 * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains 427 * for Control endpoint 428 * @hsotg: The device state. 429 * 430 * This function will allocate 4 descriptor chains for EP 0: 2 for 431 * Setup stage, per one for IN and OUT data/status transactions. 432 */ 433 static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg) 434 { 435 hsotg->setup_desc[0] = 436 dmam_alloc_coherent(hsotg->dev, 437 sizeof(struct dwc2_dma_desc), 438 &hsotg->setup_desc_dma[0], 439 GFP_KERNEL); 440 if (!hsotg->setup_desc[0]) 441 goto fail; 442 443 hsotg->setup_desc[1] = 444 dmam_alloc_coherent(hsotg->dev, 445 sizeof(struct dwc2_dma_desc), 446 &hsotg->setup_desc_dma[1], 447 GFP_KERNEL); 448 if (!hsotg->setup_desc[1]) 449 goto fail; 450 451 hsotg->ctrl_in_desc = 452 dmam_alloc_coherent(hsotg->dev, 453 sizeof(struct dwc2_dma_desc), 454 &hsotg->ctrl_in_desc_dma, 455 GFP_KERNEL); 456 if (!hsotg->ctrl_in_desc) 457 goto fail; 458 459 hsotg->ctrl_out_desc = 460 dmam_alloc_coherent(hsotg->dev, 461 sizeof(struct dwc2_dma_desc), 462 &hsotg->ctrl_out_desc_dma, 463 GFP_KERNEL); 464 if (!hsotg->ctrl_out_desc) 465 goto fail; 466 467 return 0; 468 469 fail: 470 return -ENOMEM; 471 } 472 473 /** 474 * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO 475 * @hsotg: The controller state. 476 * @hs_ep: The endpoint we're going to write for. 477 * @hs_req: The request to write data for. 478 * 479 * This is called when the TxFIFO has some space in it to hold a new 480 * transmission and we have something to give it. The actual setup of 481 * the data size is done elsewhere, so all we have to do is to actually 482 * write the data. 483 * 484 * The return value is zero if there is more space (or nothing was done) 485 * otherwise -ENOSPC is returned if the FIFO space was used up. 486 * 487 * This routine is only needed for PIO 488 */ 489 static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg, 490 struct dwc2_hsotg_ep *hs_ep, 491 struct dwc2_hsotg_req *hs_req) 492 { 493 bool periodic = is_ep_periodic(hs_ep); 494 u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS); 495 int buf_pos = hs_req->req.actual; 496 int to_write = hs_ep->size_loaded; 497 void *data; 498 int can_write; 499 int pkt_round; 500 int max_transfer; 501 502 to_write -= (buf_pos - hs_ep->last_load); 503 504 /* if there's nothing to write, get out early */ 505 if (to_write == 0) 506 return 0; 507 508 if (periodic && !hsotg->dedicated_fifos) { 509 u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index)); 510 int size_left; 511 int size_done; 512 513 /* 514 * work out how much data was loaded so we can calculate 515 * how much data is left in the fifo. 516 */ 517 518 size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 519 520 /* 521 * if shared fifo, we cannot write anything until the 522 * previous data has been completely sent. 523 */ 524 if (hs_ep->fifo_load != 0) { 525 dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP); 526 return -ENOSPC; 527 } 528 529 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n", 530 __func__, size_left, 531 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size); 532 533 /* how much of the data has moved */ 534 size_done = hs_ep->size_loaded - size_left; 535 536 /* how much data is left in the fifo */ 537 can_write = hs_ep->fifo_load - size_done; 538 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n", 539 __func__, can_write); 540 541 can_write = hs_ep->fifo_size - can_write; 542 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n", 543 __func__, can_write); 544 545 if (can_write <= 0) { 546 dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP); 547 return -ENOSPC; 548 } 549 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) { 550 can_write = dwc2_readl(hsotg, 551 DTXFSTS(hs_ep->fifo_index)); 552 553 can_write &= 0xffff; 554 can_write *= 4; 555 } else { 556 if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) { 557 dev_dbg(hsotg->dev, 558 "%s: no queue slots available (0x%08x)\n", 559 __func__, gnptxsts); 560 561 dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP); 562 return -ENOSPC; 563 } 564 565 can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts); 566 can_write *= 4; /* fifo size is in 32bit quantities. */ 567 } 568 569 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc; 570 571 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n", 572 __func__, gnptxsts, can_write, to_write, max_transfer); 573 574 /* 575 * limit to 512 bytes of data, it seems at least on the non-periodic 576 * FIFO, requests of >512 cause the endpoint to get stuck with a 577 * fragment of the end of the transfer in it. 578 */ 579 if (can_write > 512 && !periodic) 580 can_write = 512; 581 582 /* 583 * limit the write to one max-packet size worth of data, but allow 584 * the transfer to return that it did not run out of fifo space 585 * doing it. 586 */ 587 if (to_write > max_transfer) { 588 to_write = max_transfer; 589 590 /* it's needed only when we do not use dedicated fifos */ 591 if (!hsotg->dedicated_fifos) 592 dwc2_hsotg_en_gsint(hsotg, 593 periodic ? GINTSTS_PTXFEMP : 594 GINTSTS_NPTXFEMP); 595 } 596 597 /* see if we can write data */ 598 599 if (to_write > can_write) { 600 to_write = can_write; 601 pkt_round = to_write % max_transfer; 602 603 /* 604 * Round the write down to an 605 * exact number of packets. 606 * 607 * Note, we do not currently check to see if we can ever 608 * write a full packet or not to the FIFO. 609 */ 610 611 if (pkt_round) 612 to_write -= pkt_round; 613 614 /* 615 * enable correct FIFO interrupt to alert us when there 616 * is more room left. 617 */ 618 619 /* it's needed only when we do not use dedicated fifos */ 620 if (!hsotg->dedicated_fifos) 621 dwc2_hsotg_en_gsint(hsotg, 622 periodic ? GINTSTS_PTXFEMP : 623 GINTSTS_NPTXFEMP); 624 } 625 626 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n", 627 to_write, hs_req->req.length, can_write, buf_pos); 628 629 if (to_write <= 0) 630 return -ENOSPC; 631 632 hs_req->req.actual = buf_pos + to_write; 633 hs_ep->total_data += to_write; 634 635 if (periodic) 636 hs_ep->fifo_load += to_write; 637 638 to_write = DIV_ROUND_UP(to_write, 4); 639 data = hs_req->req.buf + buf_pos; 640 641 dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write); 642 643 return (to_write >= can_write) ? -ENOSPC : 0; 644 } 645 646 /** 647 * get_ep_limit - get the maximum data legnth for this endpoint 648 * @hs_ep: The endpoint 649 * 650 * Return the maximum data that can be queued in one go on a given endpoint 651 * so that transfers that are too long can be split. 652 */ 653 static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep) 654 { 655 int index = hs_ep->index; 656 unsigned int maxsize; 657 unsigned int maxpkt; 658 659 if (index != 0) { 660 maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1; 661 maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1; 662 } else { 663 maxsize = 64 + 64; 664 if (hs_ep->dir_in) 665 maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1; 666 else 667 maxpkt = 2; 668 } 669 670 /* we made the constant loading easier above by using +1 */ 671 maxpkt--; 672 maxsize--; 673 674 /* 675 * constrain by packet count if maxpkts*pktsize is greater 676 * than the length register size. 677 */ 678 679 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize) 680 maxsize = maxpkt * hs_ep->ep.maxpacket; 681 682 return maxsize; 683 } 684 685 /** 686 * dwc2_hsotg_read_frameno - read current frame number 687 * @hsotg: The device instance 688 * 689 * Return the current frame number 690 */ 691 static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) 692 { 693 u32 dsts; 694 695 dsts = dwc2_readl(hsotg, DSTS); 696 dsts &= DSTS_SOFFN_MASK; 697 dsts >>= DSTS_SOFFN_SHIFT; 698 699 return dsts; 700 } 701 702 /** 703 * dwc2_gadget_get_chain_limit - get the maximum data payload value of the 704 * DMA descriptor chain prepared for specific endpoint 705 * @hs_ep: The endpoint 706 * 707 * Return the maximum data that can be queued in one go on a given endpoint 708 * depending on its descriptor chain capacity so that transfers that 709 * are too long can be split. 710 */ 711 static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) 712 { 713 int is_isoc = hs_ep->isochronous; 714 unsigned int maxsize; 715 716 if (is_isoc) 717 maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : 718 DEV_DMA_ISOC_RX_NBYTES_LIMIT; 719 else 720 maxsize = DEV_DMA_NBYTES_LIMIT; 721 722 /* Above size of one descriptor was chosen, multiple it */ 723 maxsize *= MAX_DMA_DESC_NUM_GENERIC; 724 725 return maxsize; 726 } 727 728 /* 729 * dwc2_gadget_get_desc_params - get DMA descriptor parameters. 730 * @hs_ep: The endpoint 731 * @mask: RX/TX bytes mask to be defined 732 * 733 * Returns maximum data payload for one descriptor after analyzing endpoint 734 * characteristics. 735 * DMA descriptor transfer bytes limit depends on EP type: 736 * Control out - MPS, 737 * Isochronous - descriptor rx/tx bytes bitfield limit, 738 * Control In/Bulk/Interrupt - multiple of mps. This will allow to not 739 * have concatenations from various descriptors within one packet. 740 * 741 * Selects corresponding mask for RX/TX bytes as well. 742 */ 743 static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) 744 { 745 u32 mps = hs_ep->ep.maxpacket; 746 int dir_in = hs_ep->dir_in; 747 u32 desc_size = 0; 748 749 if (!hs_ep->index && !dir_in) { 750 desc_size = mps; 751 *mask = DEV_DMA_NBYTES_MASK; 752 } else if (hs_ep->isochronous) { 753 if (dir_in) { 754 desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT; 755 *mask = DEV_DMA_ISOC_TX_NBYTES_MASK; 756 } else { 757 desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT; 758 *mask = DEV_DMA_ISOC_RX_NBYTES_MASK; 759 } 760 } else { 761 desc_size = DEV_DMA_NBYTES_LIMIT; 762 *mask = DEV_DMA_NBYTES_MASK; 763 764 /* Round down desc_size to be mps multiple */ 765 desc_size -= desc_size % mps; 766 } 767 768 return desc_size; 769 } 770 771 static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep, 772 struct dwc2_dma_desc **desc, 773 dma_addr_t dma_buff, 774 unsigned int len, 775 bool true_last) 776 { 777 int dir_in = hs_ep->dir_in; 778 u32 mps = hs_ep->ep.maxpacket; 779 u32 maxsize = 0; 780 u32 offset = 0; 781 u32 mask = 0; 782 int i; 783 784 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 785 786 hs_ep->desc_count = (len / maxsize) + 787 ((len % maxsize) ? 1 : 0); 788 if (len == 0) 789 hs_ep->desc_count = 1; 790 791 for (i = 0; i < hs_ep->desc_count; ++i) { 792 (*desc)->status = 0; 793 (*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY 794 << DEV_DMA_BUFF_STS_SHIFT); 795 796 if (len > maxsize) { 797 if (!hs_ep->index && !dir_in) 798 (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC); 799 800 (*desc)->status |= 801 maxsize << DEV_DMA_NBYTES_SHIFT & mask; 802 (*desc)->buf = dma_buff + offset; 803 804 len -= maxsize; 805 offset += maxsize; 806 } else { 807 if (true_last) 808 (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC); 809 810 if (dir_in) 811 (*desc)->status |= (len % mps) ? DEV_DMA_SHORT : 812 ((hs_ep->send_zlp && true_last) ? 813 DEV_DMA_SHORT : 0); 814 815 (*desc)->status |= 816 len << DEV_DMA_NBYTES_SHIFT & mask; 817 (*desc)->buf = dma_buff + offset; 818 } 819 820 (*desc)->status &= ~DEV_DMA_BUFF_STS_MASK; 821 (*desc)->status |= (DEV_DMA_BUFF_STS_HREADY 822 << DEV_DMA_BUFF_STS_SHIFT); 823 (*desc)++; 824 } 825 } 826 827 /* 828 * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain. 829 * @hs_ep: The endpoint 830 * @ureq: Request to transfer 831 * @offset: offset in bytes 832 * @len: Length of the transfer 833 * 834 * This function will iterate over descriptor chain and fill its entries 835 * with corresponding information based on transfer data. 836 */ 837 static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep, 838 struct usb_request *ureq, 839 unsigned int offset, 840 unsigned int len) 841 { 842 struct dwc2_dma_desc *desc = hs_ep->desc_list; 843 struct scatterlist *sg; 844 int i; 845 u8 desc_count = 0; 846 847 /* non-DMA sg buffer */ 848 if (!ureq->num_sgs) { 849 dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc, 850 ureq->dma + offset, len, true); 851 return; 852 } 853 854 /* DMA sg buffer */ 855 for_each_sg(ureq->sg, sg, ureq->num_sgs, i) { 856 dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc, 857 sg_dma_address(sg) + sg->offset, sg_dma_len(sg), 858 sg_is_last(sg)); 859 desc_count += hs_ep->desc_count; 860 } 861 862 hs_ep->desc_count = desc_count; 863 } 864 865 /* 866 * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain. 867 * @hs_ep: The isochronous endpoint. 868 * @dma_buff: usb requests dma buffer. 869 * @len: usb request transfer length. 870 * 871 * Fills next free descriptor with the data of the arrived usb request, 872 * frame info, sets Last and IOC bits increments next_desc. If filled 873 * descriptor is not the first one, removes L bit from the previous descriptor 874 * status. 875 */ 876 static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, 877 dma_addr_t dma_buff, unsigned int len) 878 { 879 struct dwc2_dma_desc *desc; 880 struct dwc2_hsotg *hsotg = hs_ep->parent; 881 u32 index; 882 u32 maxsize = 0; 883 u32 mask = 0; 884 u8 pid = 0; 885 886 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 887 888 index = hs_ep->next_desc; 889 desc = &hs_ep->desc_list[index]; 890 891 /* Check if descriptor chain full */ 892 if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) == 893 DEV_DMA_BUFF_STS_HREADY) { 894 dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__); 895 return 1; 896 } 897 898 /* Clear L bit of previous desc if more than one entries in the chain */ 899 if (hs_ep->next_desc) 900 hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L; 901 902 dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n", 903 __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index); 904 905 desc->status = 0; 906 desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT); 907 908 desc->buf = dma_buff; 909 desc->status |= (DEV_DMA_L | DEV_DMA_IOC | 910 ((len << DEV_DMA_NBYTES_SHIFT) & mask)); 911 912 if (hs_ep->dir_in) { 913 if (len) 914 pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket); 915 else 916 pid = 1; 917 desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) & 918 DEV_DMA_ISOC_PID_MASK) | 919 ((len % hs_ep->ep.maxpacket) ? 920 DEV_DMA_SHORT : 0) | 921 ((hs_ep->target_frame << 922 DEV_DMA_ISOC_FRNUM_SHIFT) & 923 DEV_DMA_ISOC_FRNUM_MASK); 924 } 925 926 desc->status &= ~DEV_DMA_BUFF_STS_MASK; 927 desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT); 928 929 /* Increment frame number by interval for IN */ 930 if (hs_ep->dir_in) 931 dwc2_gadget_incr_frame_num(hs_ep); 932 933 /* Update index of last configured entry in the chain */ 934 hs_ep->next_desc++; 935 if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC) 936 hs_ep->next_desc = 0; 937 938 return 0; 939 } 940 941 /* 942 * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA 943 * @hs_ep: The isochronous endpoint. 944 * 945 * Prepare descriptor chain for isochronous endpoints. Afterwards 946 * write DMA address to HW and enable the endpoint. 947 */ 948 static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) 949 { 950 struct dwc2_hsotg *hsotg = hs_ep->parent; 951 struct dwc2_hsotg_req *hs_req, *treq; 952 int index = hs_ep->index; 953 int ret; 954 int i; 955 u32 dma_reg; 956 u32 depctl; 957 u32 ctrl; 958 struct dwc2_dma_desc *desc; 959 960 if (list_empty(&hs_ep->queue)) { 961 hs_ep->target_frame = TARGET_FRAME_INITIAL; 962 dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); 963 return; 964 } 965 966 /* Initialize descriptor chain by Host Busy status */ 967 for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) { 968 desc = &hs_ep->desc_list[i]; 969 desc->status = 0; 970 desc->status |= (DEV_DMA_BUFF_STS_HBUSY 971 << DEV_DMA_BUFF_STS_SHIFT); 972 } 973 974 hs_ep->next_desc = 0; 975 list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) { 976 dma_addr_t dma_addr = hs_req->req.dma; 977 978 if (hs_req->req.num_sgs) { 979 WARN_ON(hs_req->req.num_sgs > 1); 980 dma_addr = sg_dma_address(hs_req->req.sg); 981 } 982 ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr, 983 hs_req->req.length); 984 if (ret) 985 break; 986 } 987 988 hs_ep->compl_desc = 0; 989 depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 990 dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); 991 992 /* write descriptor chain address to control register */ 993 dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg); 994 995 ctrl = dwc2_readl(hsotg, depctl); 996 ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; 997 dwc2_writel(hsotg, ctrl, depctl); 998 } 999 1000 /** 1001 * dwc2_hsotg_start_req - start a USB request from an endpoint's queue 1002 * @hsotg: The controller state. 1003 * @hs_ep: The endpoint to process a request for 1004 * @hs_req: The request to start. 1005 * @continuing: True if we are doing more for the current request. 1006 * 1007 * Start the given request running by setting the endpoint registers 1008 * appropriately, and writing any data to the FIFOs. 1009 */ 1010 static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, 1011 struct dwc2_hsotg_ep *hs_ep, 1012 struct dwc2_hsotg_req *hs_req, 1013 bool continuing) 1014 { 1015 struct usb_request *ureq = &hs_req->req; 1016 int index = hs_ep->index; 1017 int dir_in = hs_ep->dir_in; 1018 u32 epctrl_reg; 1019 u32 epsize_reg; 1020 u32 epsize; 1021 u32 ctrl; 1022 unsigned int length; 1023 unsigned int packets; 1024 unsigned int maxreq; 1025 unsigned int dma_reg; 1026 1027 if (index != 0) { 1028 if (hs_ep->req && !continuing) { 1029 dev_err(hsotg->dev, "%s: active request\n", __func__); 1030 WARN_ON(1); 1031 return; 1032 } else if (hs_ep->req != hs_req && continuing) { 1033 dev_err(hsotg->dev, 1034 "%s: continue different req\n", __func__); 1035 WARN_ON(1); 1036 return; 1037 } 1038 } 1039 1040 dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index); 1041 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 1042 epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); 1043 1044 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n", 1045 __func__, dwc2_readl(hsotg, epctrl_reg), index, 1046 hs_ep->dir_in ? "in" : "out"); 1047 1048 /* If endpoint is stalled, we will restart request later */ 1049 ctrl = dwc2_readl(hsotg, epctrl_reg); 1050 1051 if (index && ctrl & DXEPCTL_STALL) { 1052 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index); 1053 return; 1054 } 1055 1056 length = ureq->length - ureq->actual; 1057 dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n", 1058 ureq->length, ureq->actual); 1059 1060 if (!using_desc_dma(hsotg)) 1061 maxreq = get_ep_limit(hs_ep); 1062 else 1063 maxreq = dwc2_gadget_get_chain_limit(hs_ep); 1064 1065 if (length > maxreq) { 1066 int round = maxreq % hs_ep->ep.maxpacket; 1067 1068 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n", 1069 __func__, length, maxreq, round); 1070 1071 /* round down to multiple of packets */ 1072 if (round) 1073 maxreq -= round; 1074 1075 length = maxreq; 1076 } 1077 1078 if (length) 1079 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket); 1080 else 1081 packets = 1; /* send one packet if length is zero. */ 1082 1083 if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) { 1084 dev_err(hsotg->dev, "req length > maxpacket*mc\n"); 1085 return; 1086 } 1087 1088 if (dir_in && index != 0) 1089 if (hs_ep->isochronous) 1090 epsize = DXEPTSIZ_MC(packets); 1091 else 1092 epsize = DXEPTSIZ_MC(1); 1093 else 1094 epsize = 0; 1095 1096 /* 1097 * zero length packet should be programmed on its own and should not 1098 * be counted in DIEPTSIZ.PktCnt with other packets. 1099 */ 1100 if (dir_in && ureq->zero && !continuing) { 1101 /* Test if zlp is actually required. */ 1102 if ((ureq->length >= hs_ep->ep.maxpacket) && 1103 !(ureq->length % hs_ep->ep.maxpacket)) 1104 hs_ep->send_zlp = 1; 1105 } 1106 1107 epsize |= DXEPTSIZ_PKTCNT(packets); 1108 epsize |= DXEPTSIZ_XFERSIZE(length); 1109 1110 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n", 1111 __func__, packets, length, ureq->length, epsize, epsize_reg); 1112 1113 /* store the request as the current one we're doing */ 1114 hs_ep->req = hs_req; 1115 1116 if (using_desc_dma(hsotg)) { 1117 u32 offset = 0; 1118 u32 mps = hs_ep->ep.maxpacket; 1119 1120 /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */ 1121 if (!dir_in) { 1122 if (!index) 1123 length = mps; 1124 else if (length % mps) 1125 length += (mps - (length % mps)); 1126 } 1127 1128 /* 1129 * If more data to send, adjust DMA for EP0 out data stage. 1130 * ureq->dma stays unchanged, hence increment it by already 1131 * passed passed data count before starting new transaction. 1132 */ 1133 if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT && 1134 continuing) 1135 offset = ureq->actual; 1136 1137 /* Fill DDMA chain entries */ 1138 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq, offset, 1139 length); 1140 1141 /* write descriptor chain address to control register */ 1142 dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg); 1143 1144 dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n", 1145 __func__, (u32)hs_ep->desc_list_dma, dma_reg); 1146 } else { 1147 /* write size / packets */ 1148 dwc2_writel(hsotg, epsize, epsize_reg); 1149 1150 if (using_dma(hsotg) && !continuing && (length != 0)) { 1151 /* 1152 * write DMA address to control register, buffer 1153 * already synced by dwc2_hsotg_ep_queue(). 1154 */ 1155 1156 dwc2_writel(hsotg, ureq->dma, dma_reg); 1157 1158 dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n", 1159 __func__, &ureq->dma, dma_reg); 1160 } 1161 } 1162 1163 if (hs_ep->isochronous && hs_ep->interval == 1) { 1164 hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg); 1165 dwc2_gadget_incr_frame_num(hs_ep); 1166 1167 if (hs_ep->target_frame & 0x1) 1168 ctrl |= DXEPCTL_SETODDFR; 1169 else 1170 ctrl |= DXEPCTL_SETEVENFR; 1171 } 1172 1173 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ 1174 1175 dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state); 1176 1177 /* For Setup request do not clear NAK */ 1178 if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP)) 1179 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ 1180 1181 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 1182 dwc2_writel(hsotg, ctrl, epctrl_reg); 1183 1184 /* 1185 * set these, it seems that DMA support increments past the end 1186 * of the packet buffer so we need to calculate the length from 1187 * this information. 1188 */ 1189 hs_ep->size_loaded = length; 1190 hs_ep->last_load = ureq->actual; 1191 1192 if (dir_in && !using_dma(hsotg)) { 1193 /* set these anyway, we may need them for non-periodic in */ 1194 hs_ep->fifo_load = 0; 1195 1196 dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req); 1197 } 1198 1199 /* 1200 * Note, trying to clear the NAK here causes problems with transmit 1201 * on the S3C6400 ending up with the TXFIFO becoming full. 1202 */ 1203 1204 /* check ep is enabled */ 1205 if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA)) 1206 dev_dbg(hsotg->dev, 1207 "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n", 1208 index, dwc2_readl(hsotg, epctrl_reg)); 1209 1210 dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n", 1211 __func__, dwc2_readl(hsotg, epctrl_reg)); 1212 1213 /* enable ep interrupts */ 1214 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1); 1215 } 1216 1217 /** 1218 * dwc2_hsotg_map_dma - map the DMA memory being used for the request 1219 * @hsotg: The device state. 1220 * @hs_ep: The endpoint the request is on. 1221 * @req: The request being processed. 1222 * 1223 * We've been asked to queue a request, so ensure that the memory buffer 1224 * is correctly setup for DMA. If we've been passed an extant DMA address 1225 * then ensure the buffer has been synced to memory. If our buffer has no 1226 * DMA memory, then we map the memory and mark our request to allow us to 1227 * cleanup on completion. 1228 */ 1229 static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg, 1230 struct dwc2_hsotg_ep *hs_ep, 1231 struct usb_request *req) 1232 { 1233 int ret; 1234 1235 ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in); 1236 if (ret) 1237 goto dma_error; 1238 1239 return 0; 1240 1241 dma_error: 1242 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n", 1243 __func__, req->buf, req->length); 1244 1245 return -EIO; 1246 } 1247 1248 static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg, 1249 struct dwc2_hsotg_ep *hs_ep, 1250 struct dwc2_hsotg_req *hs_req) 1251 { 1252 void *req_buf = hs_req->req.buf; 1253 1254 /* If dma is not being used or buffer is aligned */ 1255 if (!using_dma(hsotg) || !((long)req_buf & 3)) 1256 return 0; 1257 1258 WARN_ON(hs_req->saved_req_buf); 1259 1260 dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__, 1261 hs_ep->ep.name, req_buf, hs_req->req.length); 1262 1263 hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC); 1264 if (!hs_req->req.buf) { 1265 hs_req->req.buf = req_buf; 1266 dev_err(hsotg->dev, 1267 "%s: unable to allocate memory for bounce buffer\n", 1268 __func__); 1269 return -ENOMEM; 1270 } 1271 1272 /* Save actual buffer */ 1273 hs_req->saved_req_buf = req_buf; 1274 1275 if (hs_ep->dir_in) 1276 memcpy(hs_req->req.buf, req_buf, hs_req->req.length); 1277 return 0; 1278 } 1279 1280 static void 1281 dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg, 1282 struct dwc2_hsotg_ep *hs_ep, 1283 struct dwc2_hsotg_req *hs_req) 1284 { 1285 /* If dma is not being used or buffer was aligned */ 1286 if (!using_dma(hsotg) || !hs_req->saved_req_buf) 1287 return; 1288 1289 dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__, 1290 hs_ep->ep.name, hs_req->req.status, hs_req->req.actual); 1291 1292 /* Copy data from bounce buffer on successful out transfer */ 1293 if (!hs_ep->dir_in && !hs_req->req.status) 1294 memcpy(hs_req->saved_req_buf, hs_req->req.buf, 1295 hs_req->req.actual); 1296 1297 /* Free bounce buffer */ 1298 kfree(hs_req->req.buf); 1299 1300 hs_req->req.buf = hs_req->saved_req_buf; 1301 hs_req->saved_req_buf = NULL; 1302 } 1303 1304 /** 1305 * dwc2_gadget_target_frame_elapsed - Checks target frame 1306 * @hs_ep: The driver endpoint to check 1307 * 1308 * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop 1309 * corresponding transfer. 1310 */ 1311 static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep) 1312 { 1313 struct dwc2_hsotg *hsotg = hs_ep->parent; 1314 u32 target_frame = hs_ep->target_frame; 1315 u32 current_frame = hsotg->frame_number; 1316 bool frame_overrun = hs_ep->frame_overrun; 1317 1318 if (!frame_overrun && current_frame >= target_frame) 1319 return true; 1320 1321 if (frame_overrun && current_frame >= target_frame && 1322 ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2)) 1323 return true; 1324 1325 return false; 1326 } 1327 1328 /* 1329 * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers 1330 * @hsotg: The driver state 1331 * @hs_ep: the ep descriptor chain is for 1332 * 1333 * Called to update EP0 structure's pointers depend on stage of 1334 * control transfer. 1335 */ 1336 static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg, 1337 struct dwc2_hsotg_ep *hs_ep) 1338 { 1339 switch (hsotg->ep0_state) { 1340 case DWC2_EP0_SETUP: 1341 case DWC2_EP0_STATUS_OUT: 1342 hs_ep->desc_list = hsotg->setup_desc[0]; 1343 hs_ep->desc_list_dma = hsotg->setup_desc_dma[0]; 1344 break; 1345 case DWC2_EP0_DATA_IN: 1346 case DWC2_EP0_STATUS_IN: 1347 hs_ep->desc_list = hsotg->ctrl_in_desc; 1348 hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma; 1349 break; 1350 case DWC2_EP0_DATA_OUT: 1351 hs_ep->desc_list = hsotg->ctrl_out_desc; 1352 hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma; 1353 break; 1354 default: 1355 dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n", 1356 hsotg->ep0_state); 1357 return -EINVAL; 1358 } 1359 1360 return 0; 1361 } 1362 1363 static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, 1364 gfp_t gfp_flags) 1365 { 1366 struct dwc2_hsotg_req *hs_req = our_req(req); 1367 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1368 struct dwc2_hsotg *hs = hs_ep->parent; 1369 bool first; 1370 int ret; 1371 u32 maxsize = 0; 1372 u32 mask = 0; 1373 1374 1375 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n", 1376 ep->name, req, req->length, req->buf, req->no_interrupt, 1377 req->zero, req->short_not_ok); 1378 1379 /* Prevent new request submission when controller is suspended */ 1380 if (hs->lx_state != DWC2_L0) { 1381 dev_dbg(hs->dev, "%s: submit request only in active state\n", 1382 __func__); 1383 return -EAGAIN; 1384 } 1385 1386 /* initialise status of the request */ 1387 INIT_LIST_HEAD(&hs_req->queue); 1388 req->actual = 0; 1389 req->status = -EINPROGRESS; 1390 1391 /* In DDMA mode for ISOC's don't queue request if length greater 1392 * than descriptor limits. 1393 */ 1394 if (using_desc_dma(hs) && hs_ep->isochronous) { 1395 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 1396 if (hs_ep->dir_in && req->length > maxsize) { 1397 dev_err(hs->dev, "wrong length %d (maxsize=%d)\n", 1398 req->length, maxsize); 1399 return -EINVAL; 1400 } 1401 1402 if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) { 1403 dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n", 1404 req->length, hs_ep->ep.maxpacket); 1405 return -EINVAL; 1406 } 1407 } 1408 1409 ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req); 1410 if (ret) 1411 return ret; 1412 1413 /* if we're using DMA, sync the buffers as necessary */ 1414 if (using_dma(hs)) { 1415 ret = dwc2_hsotg_map_dma(hs, hs_ep, req); 1416 if (ret) 1417 return ret; 1418 } 1419 /* If using descriptor DMA configure EP0 descriptor chain pointers */ 1420 if (using_desc_dma(hs) && !hs_ep->index) { 1421 ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep); 1422 if (ret) 1423 return ret; 1424 } 1425 1426 first = list_empty(&hs_ep->queue); 1427 list_add_tail(&hs_req->queue, &hs_ep->queue); 1428 1429 /* 1430 * Handle DDMA isochronous transfers separately - just add new entry 1431 * to the descriptor chain. 1432 * Transfer will be started once SW gets either one of NAK or 1433 * OutTknEpDis interrupts. 1434 */ 1435 if (using_desc_dma(hs) && hs_ep->isochronous) { 1436 if (hs_ep->target_frame != TARGET_FRAME_INITIAL) { 1437 dma_addr_t dma_addr = hs_req->req.dma; 1438 1439 if (hs_req->req.num_sgs) { 1440 WARN_ON(hs_req->req.num_sgs > 1); 1441 dma_addr = sg_dma_address(hs_req->req.sg); 1442 } 1443 dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr, 1444 hs_req->req.length); 1445 } 1446 return 0; 1447 } 1448 1449 if (first) { 1450 if (!hs_ep->isochronous) { 1451 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); 1452 return 0; 1453 } 1454 1455 /* Update current frame number value. */ 1456 hs->frame_number = dwc2_hsotg_read_frameno(hs); 1457 while (dwc2_gadget_target_frame_elapsed(hs_ep)) { 1458 dwc2_gadget_incr_frame_num(hs_ep); 1459 /* Update current frame number value once more as it 1460 * changes here. 1461 */ 1462 hs->frame_number = dwc2_hsotg_read_frameno(hs); 1463 } 1464 1465 if (hs_ep->target_frame != TARGET_FRAME_INITIAL) 1466 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); 1467 } 1468 return 0; 1469 } 1470 1471 static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req, 1472 gfp_t gfp_flags) 1473 { 1474 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1475 struct dwc2_hsotg *hs = hs_ep->parent; 1476 unsigned long flags = 0; 1477 int ret = 0; 1478 1479 spin_lock_irqsave(&hs->lock, flags); 1480 ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags); 1481 spin_unlock_irqrestore(&hs->lock, flags); 1482 1483 return ret; 1484 } 1485 1486 static void dwc2_hsotg_ep_free_request(struct usb_ep *ep, 1487 struct usb_request *req) 1488 { 1489 struct dwc2_hsotg_req *hs_req = our_req(req); 1490 1491 kfree(hs_req); 1492 } 1493 1494 /** 1495 * dwc2_hsotg_complete_oursetup - setup completion callback 1496 * @ep: The endpoint the request was on. 1497 * @req: The request completed. 1498 * 1499 * Called on completion of any requests the driver itself 1500 * submitted that need cleaning up. 1501 */ 1502 static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, 1503 struct usb_request *req) 1504 { 1505 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1506 struct dwc2_hsotg *hsotg = hs_ep->parent; 1507 1508 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req); 1509 1510 dwc2_hsotg_ep_free_request(ep, req); 1511 } 1512 1513 /** 1514 * ep_from_windex - convert control wIndex value to endpoint 1515 * @hsotg: The driver state. 1516 * @windex: The control request wIndex field (in host order). 1517 * 1518 * Convert the given wIndex into a pointer to an driver endpoint 1519 * structure, or return NULL if it is not a valid endpoint. 1520 */ 1521 static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, 1522 u32 windex) 1523 { 1524 struct dwc2_hsotg_ep *ep; 1525 int dir = (windex & USB_DIR_IN) ? 1 : 0; 1526 int idx = windex & 0x7F; 1527 1528 if (windex >= 0x100) 1529 return NULL; 1530 1531 if (idx > hsotg->num_of_eps) 1532 return NULL; 1533 1534 ep = index_to_ep(hsotg, idx, dir); 1535 1536 if (idx && ep->dir_in != dir) 1537 return NULL; 1538 1539 return ep; 1540 } 1541 1542 /** 1543 * dwc2_hsotg_set_test_mode - Enable usb Test Modes 1544 * @hsotg: The driver state. 1545 * @testmode: requested usb test mode 1546 * Enable usb Test Mode requested by the Host. 1547 */ 1548 int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) 1549 { 1550 int dctl = dwc2_readl(hsotg, DCTL); 1551 1552 dctl &= ~DCTL_TSTCTL_MASK; 1553 switch (testmode) { 1554 case TEST_J: 1555 case TEST_K: 1556 case TEST_SE0_NAK: 1557 case TEST_PACKET: 1558 case TEST_FORCE_EN: 1559 dctl |= testmode << DCTL_TSTCTL_SHIFT; 1560 break; 1561 default: 1562 return -EINVAL; 1563 } 1564 dwc2_writel(hsotg, dctl, DCTL); 1565 return 0; 1566 } 1567 1568 /** 1569 * dwc2_hsotg_send_reply - send reply to control request 1570 * @hsotg: The device state 1571 * @ep: Endpoint 0 1572 * @buff: Buffer for request 1573 * @length: Length of reply. 1574 * 1575 * Create a request and queue it on the given endpoint. This is useful as 1576 * an internal method of sending replies to certain control requests, etc. 1577 */ 1578 static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg, 1579 struct dwc2_hsotg_ep *ep, 1580 void *buff, 1581 int length) 1582 { 1583 struct usb_request *req; 1584 int ret; 1585 1586 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length); 1587 1588 req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC); 1589 hsotg->ep0_reply = req; 1590 if (!req) { 1591 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__); 1592 return -ENOMEM; 1593 } 1594 1595 req->buf = hsotg->ep0_buff; 1596 req->length = length; 1597 /* 1598 * zero flag is for sending zlp in DATA IN stage. It has no impact on 1599 * STATUS stage. 1600 */ 1601 req->zero = 0; 1602 req->complete = dwc2_hsotg_complete_oursetup; 1603 1604 if (length) 1605 memcpy(req->buf, buff, length); 1606 1607 ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC); 1608 if (ret) { 1609 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__); 1610 return ret; 1611 } 1612 1613 return 0; 1614 } 1615 1616 /** 1617 * dwc2_hsotg_process_req_status - process request GET_STATUS 1618 * @hsotg: The device state 1619 * @ctrl: USB control request 1620 */ 1621 static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, 1622 struct usb_ctrlrequest *ctrl) 1623 { 1624 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1625 struct dwc2_hsotg_ep *ep; 1626 __le16 reply; 1627 int ret; 1628 1629 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__); 1630 1631 if (!ep0->dir_in) { 1632 dev_warn(hsotg->dev, "%s: direction out?\n", __func__); 1633 return -EINVAL; 1634 } 1635 1636 switch (ctrl->bRequestType & USB_RECIP_MASK) { 1637 case USB_RECIP_DEVICE: 1638 /* 1639 * bit 0 => self powered 1640 * bit 1 => remote wakeup 1641 */ 1642 reply = cpu_to_le16(0); 1643 break; 1644 1645 case USB_RECIP_INTERFACE: 1646 /* currently, the data result should be zero */ 1647 reply = cpu_to_le16(0); 1648 break; 1649 1650 case USB_RECIP_ENDPOINT: 1651 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); 1652 if (!ep) 1653 return -ENOENT; 1654 1655 reply = cpu_to_le16(ep->halted ? 1 : 0); 1656 break; 1657 1658 default: 1659 return 0; 1660 } 1661 1662 if (le16_to_cpu(ctrl->wLength) != 2) 1663 return -EINVAL; 1664 1665 ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2); 1666 if (ret) { 1667 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__); 1668 return ret; 1669 } 1670 1671 return 1; 1672 } 1673 1674 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now); 1675 1676 /** 1677 * get_ep_head - return the first request on the endpoint 1678 * @hs_ep: The controller endpoint to get 1679 * 1680 * Get the first request on the endpoint. 1681 */ 1682 static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep) 1683 { 1684 return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req, 1685 queue); 1686 } 1687 1688 /** 1689 * dwc2_gadget_start_next_request - Starts next request from ep queue 1690 * @hs_ep: Endpoint structure 1691 * 1692 * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked 1693 * in its handler. Hence we need to unmask it here to be able to do 1694 * resynchronization. 1695 */ 1696 static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep) 1697 { 1698 u32 mask; 1699 struct dwc2_hsotg *hsotg = hs_ep->parent; 1700 int dir_in = hs_ep->dir_in; 1701 struct dwc2_hsotg_req *hs_req; 1702 u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK; 1703 1704 if (!list_empty(&hs_ep->queue)) { 1705 hs_req = get_ep_head(hs_ep); 1706 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false); 1707 return; 1708 } 1709 if (!hs_ep->isochronous) 1710 return; 1711 1712 if (dir_in) { 1713 dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n", 1714 __func__); 1715 } else { 1716 dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n", 1717 __func__); 1718 mask = dwc2_readl(hsotg, epmsk_reg); 1719 mask |= DOEPMSK_OUTTKNEPDISMSK; 1720 dwc2_writel(hsotg, mask, epmsk_reg); 1721 } 1722 } 1723 1724 /** 1725 * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE 1726 * @hsotg: The device state 1727 * @ctrl: USB control request 1728 */ 1729 static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, 1730 struct usb_ctrlrequest *ctrl) 1731 { 1732 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1733 struct dwc2_hsotg_req *hs_req; 1734 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); 1735 struct dwc2_hsotg_ep *ep; 1736 int ret; 1737 bool halted; 1738 u32 recip; 1739 u32 wValue; 1740 u32 wIndex; 1741 1742 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n", 1743 __func__, set ? "SET" : "CLEAR"); 1744 1745 wValue = le16_to_cpu(ctrl->wValue); 1746 wIndex = le16_to_cpu(ctrl->wIndex); 1747 recip = ctrl->bRequestType & USB_RECIP_MASK; 1748 1749 switch (recip) { 1750 case USB_RECIP_DEVICE: 1751 switch (wValue) { 1752 case USB_DEVICE_REMOTE_WAKEUP: 1753 hsotg->remote_wakeup_allowed = 1; 1754 break; 1755 1756 case USB_DEVICE_TEST_MODE: 1757 if ((wIndex & 0xff) != 0) 1758 return -EINVAL; 1759 if (!set) 1760 return -EINVAL; 1761 1762 hsotg->test_mode = wIndex >> 8; 1763 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1764 if (ret) { 1765 dev_err(hsotg->dev, 1766 "%s: failed to send reply\n", __func__); 1767 return ret; 1768 } 1769 break; 1770 default: 1771 return -ENOENT; 1772 } 1773 break; 1774 1775 case USB_RECIP_ENDPOINT: 1776 ep = ep_from_windex(hsotg, wIndex); 1777 if (!ep) { 1778 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n", 1779 __func__, wIndex); 1780 return -ENOENT; 1781 } 1782 1783 switch (wValue) { 1784 case USB_ENDPOINT_HALT: 1785 halted = ep->halted; 1786 1787 dwc2_hsotg_ep_sethalt(&ep->ep, set, true); 1788 1789 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1790 if (ret) { 1791 dev_err(hsotg->dev, 1792 "%s: failed to send reply\n", __func__); 1793 return ret; 1794 } 1795 1796 /* 1797 * we have to complete all requests for ep if it was 1798 * halted, and the halt was cleared by CLEAR_FEATURE 1799 */ 1800 1801 if (!set && halted) { 1802 /* 1803 * If we have request in progress, 1804 * then complete it 1805 */ 1806 if (ep->req) { 1807 hs_req = ep->req; 1808 ep->req = NULL; 1809 list_del_init(&hs_req->queue); 1810 if (hs_req->req.complete) { 1811 spin_unlock(&hsotg->lock); 1812 usb_gadget_giveback_request( 1813 &ep->ep, &hs_req->req); 1814 spin_lock(&hsotg->lock); 1815 } 1816 } 1817 1818 /* If we have pending request, then start it */ 1819 if (!ep->req) 1820 dwc2_gadget_start_next_request(ep); 1821 } 1822 1823 break; 1824 1825 default: 1826 return -ENOENT; 1827 } 1828 break; 1829 default: 1830 return -ENOENT; 1831 } 1832 return 1; 1833 } 1834 1835 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg); 1836 1837 /** 1838 * dwc2_hsotg_stall_ep0 - stall ep0 1839 * @hsotg: The device state 1840 * 1841 * Set stall for ep0 as response for setup request. 1842 */ 1843 static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg) 1844 { 1845 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1846 u32 reg; 1847 u32 ctrl; 1848 1849 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in); 1850 reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0; 1851 1852 /* 1853 * DxEPCTL_Stall will be cleared by EP once it has 1854 * taken effect, so no need to clear later. 1855 */ 1856 1857 ctrl = dwc2_readl(hsotg, reg); 1858 ctrl |= DXEPCTL_STALL; 1859 ctrl |= DXEPCTL_CNAK; 1860 dwc2_writel(hsotg, ctrl, reg); 1861 1862 dev_dbg(hsotg->dev, 1863 "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n", 1864 ctrl, reg, dwc2_readl(hsotg, reg)); 1865 1866 /* 1867 * complete won't be called, so we enqueue 1868 * setup request here 1869 */ 1870 dwc2_hsotg_enqueue_setup(hsotg); 1871 } 1872 1873 /** 1874 * dwc2_hsotg_process_control - process a control request 1875 * @hsotg: The device state 1876 * @ctrl: The control request received 1877 * 1878 * The controller has received the SETUP phase of a control request, and 1879 * needs to work out what to do next (and whether to pass it on to the 1880 * gadget driver). 1881 */ 1882 static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg, 1883 struct usb_ctrlrequest *ctrl) 1884 { 1885 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1886 int ret = 0; 1887 u32 dcfg; 1888 1889 dev_dbg(hsotg->dev, 1890 "ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n", 1891 ctrl->bRequestType, ctrl->bRequest, ctrl->wValue, 1892 ctrl->wIndex, ctrl->wLength); 1893 1894 if (ctrl->wLength == 0) { 1895 ep0->dir_in = 1; 1896 hsotg->ep0_state = DWC2_EP0_STATUS_IN; 1897 } else if (ctrl->bRequestType & USB_DIR_IN) { 1898 ep0->dir_in = 1; 1899 hsotg->ep0_state = DWC2_EP0_DATA_IN; 1900 } else { 1901 ep0->dir_in = 0; 1902 hsotg->ep0_state = DWC2_EP0_DATA_OUT; 1903 } 1904 1905 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1906 switch (ctrl->bRequest) { 1907 case USB_REQ_SET_ADDRESS: 1908 hsotg->connected = 1; 1909 dcfg = dwc2_readl(hsotg, DCFG); 1910 dcfg &= ~DCFG_DEVADDR_MASK; 1911 dcfg |= (le16_to_cpu(ctrl->wValue) << 1912 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK; 1913 dwc2_writel(hsotg, dcfg, DCFG); 1914 1915 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue); 1916 1917 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1918 return; 1919 1920 case USB_REQ_GET_STATUS: 1921 ret = dwc2_hsotg_process_req_status(hsotg, ctrl); 1922 break; 1923 1924 case USB_REQ_CLEAR_FEATURE: 1925 case USB_REQ_SET_FEATURE: 1926 ret = dwc2_hsotg_process_req_feature(hsotg, ctrl); 1927 break; 1928 } 1929 } 1930 1931 /* as a fallback, try delivering it to the driver to deal with */ 1932 1933 if (ret == 0 && hsotg->driver) { 1934 spin_unlock(&hsotg->lock); 1935 ret = hsotg->driver->setup(&hsotg->gadget, ctrl); 1936 spin_lock(&hsotg->lock); 1937 if (ret < 0) 1938 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); 1939 } 1940 1941 /* 1942 * the request is either unhandlable, or is not formatted correctly 1943 * so respond with a STALL for the status stage to indicate failure. 1944 */ 1945 1946 if (ret < 0) 1947 dwc2_hsotg_stall_ep0(hsotg); 1948 } 1949 1950 /** 1951 * dwc2_hsotg_complete_setup - completion of a setup transfer 1952 * @ep: The endpoint the request was on. 1953 * @req: The request completed. 1954 * 1955 * Called on completion of any requests the driver itself submitted for 1956 * EP0 setup packets 1957 */ 1958 static void dwc2_hsotg_complete_setup(struct usb_ep *ep, 1959 struct usb_request *req) 1960 { 1961 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1962 struct dwc2_hsotg *hsotg = hs_ep->parent; 1963 1964 if (req->status < 0) { 1965 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status); 1966 return; 1967 } 1968 1969 spin_lock(&hsotg->lock); 1970 if (req->actual == 0) 1971 dwc2_hsotg_enqueue_setup(hsotg); 1972 else 1973 dwc2_hsotg_process_control(hsotg, req->buf); 1974 spin_unlock(&hsotg->lock); 1975 } 1976 1977 /** 1978 * dwc2_hsotg_enqueue_setup - start a request for EP0 packets 1979 * @hsotg: The device state. 1980 * 1981 * Enqueue a request on EP0 if necessary to received any SETUP packets 1982 * received from the host. 1983 */ 1984 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg) 1985 { 1986 struct usb_request *req = hsotg->ctrl_req; 1987 struct dwc2_hsotg_req *hs_req = our_req(req); 1988 int ret; 1989 1990 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__); 1991 1992 req->zero = 0; 1993 req->length = 8; 1994 req->buf = hsotg->ctrl_buff; 1995 req->complete = dwc2_hsotg_complete_setup; 1996 1997 if (!list_empty(&hs_req->queue)) { 1998 dev_dbg(hsotg->dev, "%s already queued???\n", __func__); 1999 return; 2000 } 2001 2002 hsotg->eps_out[0]->dir_in = 0; 2003 hsotg->eps_out[0]->send_zlp = 0; 2004 hsotg->ep0_state = DWC2_EP0_SETUP; 2005 2006 ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC); 2007 if (ret < 0) { 2008 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret); 2009 /* 2010 * Don't think there's much we can do other than watch the 2011 * driver fail. 2012 */ 2013 } 2014 } 2015 2016 static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg, 2017 struct dwc2_hsotg_ep *hs_ep) 2018 { 2019 u32 ctrl; 2020 u8 index = hs_ep->index; 2021 u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 2022 u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); 2023 2024 if (hs_ep->dir_in) 2025 dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n", 2026 index); 2027 else 2028 dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n", 2029 index); 2030 if (using_desc_dma(hsotg)) { 2031 if (!index) 2032 dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); 2033 2034 /* Not specific buffer needed for ep0 ZLP */ 2035 dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &hs_ep->desc_list, 2036 hs_ep->desc_list_dma, 0, true); 2037 } else { 2038 dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 2039 DXEPTSIZ_XFERSIZE(0), 2040 epsiz_reg); 2041 } 2042 2043 ctrl = dwc2_readl(hsotg, epctl_reg); 2044 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ 2045 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ 2046 ctrl |= DXEPCTL_USBACTEP; 2047 dwc2_writel(hsotg, ctrl, epctl_reg); 2048 } 2049 2050 /** 2051 * dwc2_hsotg_complete_request - complete a request given to us 2052 * @hsotg: The device state. 2053 * @hs_ep: The endpoint the request was on. 2054 * @hs_req: The request to complete. 2055 * @result: The result code (0 => Ok, otherwise errno) 2056 * 2057 * The given request has finished, so call the necessary completion 2058 * if it has one and then look to see if we can start a new request 2059 * on the endpoint. 2060 * 2061 * Note, expects the ep to already be locked as appropriate. 2062 */ 2063 static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg, 2064 struct dwc2_hsotg_ep *hs_ep, 2065 struct dwc2_hsotg_req *hs_req, 2066 int result) 2067 { 2068 if (!hs_req) { 2069 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__); 2070 return; 2071 } 2072 2073 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n", 2074 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete); 2075 2076 /* 2077 * only replace the status if we've not already set an error 2078 * from a previous transaction 2079 */ 2080 2081 if (hs_req->req.status == -EINPROGRESS) 2082 hs_req->req.status = result; 2083 2084 if (using_dma(hsotg)) 2085 dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req); 2086 2087 dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req); 2088 2089 hs_ep->req = NULL; 2090 list_del_init(&hs_req->queue); 2091 2092 /* 2093 * call the complete request with the locks off, just in case the 2094 * request tries to queue more work for this endpoint. 2095 */ 2096 2097 if (hs_req->req.complete) { 2098 spin_unlock(&hsotg->lock); 2099 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req); 2100 spin_lock(&hsotg->lock); 2101 } 2102 2103 /* In DDMA don't need to proceed to starting of next ISOC request */ 2104 if (using_desc_dma(hsotg) && hs_ep->isochronous) 2105 return; 2106 2107 /* 2108 * Look to see if there is anything else to do. Note, the completion 2109 * of the previous request may have caused a new request to be started 2110 * so be careful when doing this. 2111 */ 2112 2113 if (!hs_ep->req && result >= 0) 2114 dwc2_gadget_start_next_request(hs_ep); 2115 } 2116 2117 /* 2118 * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA 2119 * @hs_ep: The endpoint the request was on. 2120 * 2121 * Get first request from the ep queue, determine descriptor on which complete 2122 * happened. SW discovers which descriptor currently in use by HW, adjusts 2123 * dma_address and calculates index of completed descriptor based on the value 2124 * of DEPDMA register. Update actual length of request, giveback to gadget. 2125 */ 2126 static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep) 2127 { 2128 struct dwc2_hsotg *hsotg = hs_ep->parent; 2129 struct dwc2_hsotg_req *hs_req; 2130 struct usb_request *ureq; 2131 u32 desc_sts; 2132 u32 mask; 2133 2134 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status; 2135 2136 /* Process only descriptors with buffer status set to DMA done */ 2137 while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >> 2138 DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) { 2139 2140 hs_req = get_ep_head(hs_ep); 2141 if (!hs_req) { 2142 dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__); 2143 return; 2144 } 2145 ureq = &hs_req->req; 2146 2147 /* Check completion status */ 2148 if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT == 2149 DEV_DMA_STS_SUCC) { 2150 mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK : 2151 DEV_DMA_ISOC_RX_NBYTES_MASK; 2152 ureq->actual = ureq->length - ((desc_sts & mask) >> 2153 DEV_DMA_ISOC_NBYTES_SHIFT); 2154 2155 /* Adjust actual len for ISOC Out if len is 2156 * not align of 4 2157 */ 2158 if (!hs_ep->dir_in && ureq->length & 0x3) 2159 ureq->actual += 4 - (ureq->length & 0x3); 2160 } 2161 2162 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2163 2164 hs_ep->compl_desc++; 2165 if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1)) 2166 hs_ep->compl_desc = 0; 2167 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status; 2168 } 2169 } 2170 2171 /* 2172 * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC. 2173 * @hs_ep: The isochronous endpoint. 2174 * 2175 * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA 2176 * interrupt. Reset target frame and next_desc to allow to start 2177 * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS 2178 * interrupt for OUT direction. 2179 */ 2180 static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep) 2181 { 2182 struct dwc2_hsotg *hsotg = hs_ep->parent; 2183 2184 if (!hs_ep->dir_in) 2185 dwc2_flush_rx_fifo(hsotg); 2186 dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0); 2187 2188 hs_ep->target_frame = TARGET_FRAME_INITIAL; 2189 hs_ep->next_desc = 0; 2190 hs_ep->compl_desc = 0; 2191 } 2192 2193 /** 2194 * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint 2195 * @hsotg: The device state. 2196 * @ep_idx: The endpoint index for the data 2197 * @size: The size of data in the fifo, in bytes 2198 * 2199 * The FIFO status shows there is data to read from the FIFO for a given 2200 * endpoint, so sort out whether we need to read the data into a request 2201 * that has been made for that endpoint. 2202 */ 2203 static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size) 2204 { 2205 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx]; 2206 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2207 int to_read; 2208 int max_req; 2209 int read_ptr; 2210 2211 if (!hs_req) { 2212 u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx)); 2213 int ptr; 2214 2215 dev_dbg(hsotg->dev, 2216 "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n", 2217 __func__, size, ep_idx, epctl); 2218 2219 /* dump the data from the FIFO, we've nothing we can do */ 2220 for (ptr = 0; ptr < size; ptr += 4) 2221 (void)dwc2_readl(hsotg, EPFIFO(ep_idx)); 2222 2223 return; 2224 } 2225 2226 to_read = size; 2227 read_ptr = hs_req->req.actual; 2228 max_req = hs_req->req.length - read_ptr; 2229 2230 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n", 2231 __func__, to_read, max_req, read_ptr, hs_req->req.length); 2232 2233 if (to_read > max_req) { 2234 /* 2235 * more data appeared than we where willing 2236 * to deal with in this request. 2237 */ 2238 2239 /* currently we don't deal this */ 2240 WARN_ON_ONCE(1); 2241 } 2242 2243 hs_ep->total_data += to_read; 2244 hs_req->req.actual += to_read; 2245 to_read = DIV_ROUND_UP(to_read, 4); 2246 2247 /* 2248 * note, we might over-write the buffer end by 3 bytes depending on 2249 * alignment of the data. 2250 */ 2251 dwc2_readl_rep(hsotg, EPFIFO(ep_idx), 2252 hs_req->req.buf + read_ptr, to_read); 2253 } 2254 2255 /** 2256 * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint 2257 * @hsotg: The device instance 2258 * @dir_in: If IN zlp 2259 * 2260 * Generate a zero-length IN packet request for terminating a SETUP 2261 * transaction. 2262 * 2263 * Note, since we don't write any data to the TxFIFO, then it is 2264 * currently believed that we do not need to wait for any space in 2265 * the TxFIFO. 2266 */ 2267 static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in) 2268 { 2269 /* eps_out[0] is used in both directions */ 2270 hsotg->eps_out[0]->dir_in = dir_in; 2271 hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT; 2272 2273 dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]); 2274 } 2275 2276 static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, 2277 u32 epctl_reg) 2278 { 2279 u32 ctrl; 2280 2281 ctrl = dwc2_readl(hsotg, epctl_reg); 2282 if (ctrl & DXEPCTL_EOFRNUM) 2283 ctrl |= DXEPCTL_SETEVENFR; 2284 else 2285 ctrl |= DXEPCTL_SETODDFR; 2286 dwc2_writel(hsotg, ctrl, epctl_reg); 2287 } 2288 2289 /* 2290 * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc 2291 * @hs_ep - The endpoint on which transfer went 2292 * 2293 * Iterate over endpoints descriptor chain and get info on bytes remained 2294 * in DMA descriptors after transfer has completed. Used for non isoc EPs. 2295 */ 2296 static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep) 2297 { 2298 struct dwc2_hsotg *hsotg = hs_ep->parent; 2299 unsigned int bytes_rem = 0; 2300 struct dwc2_dma_desc *desc = hs_ep->desc_list; 2301 int i; 2302 u32 status; 2303 2304 if (!desc) 2305 return -EINVAL; 2306 2307 for (i = 0; i < hs_ep->desc_count; ++i) { 2308 status = desc->status; 2309 bytes_rem += status & DEV_DMA_NBYTES_MASK; 2310 2311 if (status & DEV_DMA_STS_MASK) 2312 dev_err(hsotg->dev, "descriptor %d closed with %x\n", 2313 i, status & DEV_DMA_STS_MASK); 2314 } 2315 2316 return bytes_rem; 2317 } 2318 2319 /** 2320 * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO 2321 * @hsotg: The device instance 2322 * @epnum: The endpoint received from 2323 * 2324 * The RXFIFO has delivered an OutDone event, which means that the data 2325 * transfer for an OUT endpoint has been completed, either by a short 2326 * packet or by the finish of a transfer. 2327 */ 2328 static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum) 2329 { 2330 u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum)); 2331 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum]; 2332 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2333 struct usb_request *req = &hs_req->req; 2334 unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 2335 int result = 0; 2336 2337 if (!hs_req) { 2338 dev_dbg(hsotg->dev, "%s: no request active\n", __func__); 2339 return; 2340 } 2341 2342 if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) { 2343 dev_dbg(hsotg->dev, "zlp packet received\n"); 2344 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2345 dwc2_hsotg_enqueue_setup(hsotg); 2346 return; 2347 } 2348 2349 if (using_desc_dma(hsotg)) 2350 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); 2351 2352 if (using_dma(hsotg)) { 2353 unsigned int size_done; 2354 2355 /* 2356 * Calculate the size of the transfer by checking how much 2357 * is left in the endpoint size register and then working it 2358 * out from the amount we loaded for the transfer. 2359 * 2360 * We need to do this as DMA pointers are always 32bit aligned 2361 * so may overshoot/undershoot the transfer. 2362 */ 2363 2364 size_done = hs_ep->size_loaded - size_left; 2365 size_done += hs_ep->last_load; 2366 2367 req->actual = size_done; 2368 } 2369 2370 /* if there is more request to do, schedule new transfer */ 2371 if (req->actual < req->length && size_left == 0) { 2372 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true); 2373 return; 2374 } 2375 2376 if (req->actual < req->length && req->short_not_ok) { 2377 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n", 2378 __func__, req->actual, req->length); 2379 2380 /* 2381 * todo - what should we return here? there's no one else 2382 * even bothering to check the status. 2383 */ 2384 } 2385 2386 /* DDMA IN status phase will start from StsPhseRcvd interrupt */ 2387 if (!using_desc_dma(hsotg) && epnum == 0 && 2388 hsotg->ep0_state == DWC2_EP0_DATA_OUT) { 2389 /* Move to STATUS IN */ 2390 dwc2_hsotg_ep0_zlp(hsotg, true); 2391 return; 2392 } 2393 2394 /* 2395 * Slave mode OUT transfers do not go through XferComplete so 2396 * adjust the ISOC parity here. 2397 */ 2398 if (!using_dma(hsotg)) { 2399 if (hs_ep->isochronous && hs_ep->interval == 1) 2400 dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum)); 2401 else if (hs_ep->isochronous && hs_ep->interval > 1) 2402 dwc2_gadget_incr_frame_num(hs_ep); 2403 } 2404 2405 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result); 2406 } 2407 2408 /** 2409 * dwc2_hsotg_handle_rx - RX FIFO has data 2410 * @hsotg: The device instance 2411 * 2412 * The IRQ handler has detected that the RX FIFO has some data in it 2413 * that requires processing, so find out what is in there and do the 2414 * appropriate read. 2415 * 2416 * The RXFIFO is a true FIFO, the packets coming out are still in packet 2417 * chunks, so if you have x packets received on an endpoint you'll get x 2418 * FIFO events delivered, each with a packet's worth of data in it. 2419 * 2420 * When using DMA, we should not be processing events from the RXFIFO 2421 * as the actual data should be sent to the memory directly and we turn 2422 * on the completion interrupts to get notifications of transfer completion. 2423 */ 2424 static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg) 2425 { 2426 u32 grxstsr = dwc2_readl(hsotg, GRXSTSP); 2427 u32 epnum, status, size; 2428 2429 WARN_ON(using_dma(hsotg)); 2430 2431 epnum = grxstsr & GRXSTS_EPNUM_MASK; 2432 status = grxstsr & GRXSTS_PKTSTS_MASK; 2433 2434 size = grxstsr & GRXSTS_BYTECNT_MASK; 2435 size >>= GRXSTS_BYTECNT_SHIFT; 2436 2437 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n", 2438 __func__, grxstsr, size, epnum); 2439 2440 switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) { 2441 case GRXSTS_PKTSTS_GLOBALOUTNAK: 2442 dev_dbg(hsotg->dev, "GLOBALOUTNAK\n"); 2443 break; 2444 2445 case GRXSTS_PKTSTS_OUTDONE: 2446 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n", 2447 dwc2_hsotg_read_frameno(hsotg)); 2448 2449 if (!using_dma(hsotg)) 2450 dwc2_hsotg_handle_outdone(hsotg, epnum); 2451 break; 2452 2453 case GRXSTS_PKTSTS_SETUPDONE: 2454 dev_dbg(hsotg->dev, 2455 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 2456 dwc2_hsotg_read_frameno(hsotg), 2457 dwc2_readl(hsotg, DOEPCTL(0))); 2458 /* 2459 * Call dwc2_hsotg_handle_outdone here if it was not called from 2460 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't 2461 * generate GRXSTS_PKTSTS_OUTDONE for setup packet. 2462 */ 2463 if (hsotg->ep0_state == DWC2_EP0_SETUP) 2464 dwc2_hsotg_handle_outdone(hsotg, epnum); 2465 break; 2466 2467 case GRXSTS_PKTSTS_OUTRX: 2468 dwc2_hsotg_rx_data(hsotg, epnum, size); 2469 break; 2470 2471 case GRXSTS_PKTSTS_SETUPRX: 2472 dev_dbg(hsotg->dev, 2473 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 2474 dwc2_hsotg_read_frameno(hsotg), 2475 dwc2_readl(hsotg, DOEPCTL(0))); 2476 2477 WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP); 2478 2479 dwc2_hsotg_rx_data(hsotg, epnum, size); 2480 break; 2481 2482 default: 2483 dev_warn(hsotg->dev, "%s: unknown status %08x\n", 2484 __func__, grxstsr); 2485 2486 dwc2_hsotg_dump(hsotg); 2487 break; 2488 } 2489 } 2490 2491 /** 2492 * dwc2_hsotg_ep0_mps - turn max packet size into register setting 2493 * @mps: The maximum packet size in bytes. 2494 */ 2495 static u32 dwc2_hsotg_ep0_mps(unsigned int mps) 2496 { 2497 switch (mps) { 2498 case 64: 2499 return D0EPCTL_MPS_64; 2500 case 32: 2501 return D0EPCTL_MPS_32; 2502 case 16: 2503 return D0EPCTL_MPS_16; 2504 case 8: 2505 return D0EPCTL_MPS_8; 2506 } 2507 2508 /* bad max packet size, warn and return invalid result */ 2509 WARN_ON(1); 2510 return (u32)-1; 2511 } 2512 2513 /** 2514 * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field 2515 * @hsotg: The driver state. 2516 * @ep: The index number of the endpoint 2517 * @mps: The maximum packet size in bytes 2518 * @mc: The multicount value 2519 * @dir_in: True if direction is in. 2520 * 2521 * Configure the maximum packet size for the given endpoint, updating 2522 * the hardware control registers to reflect this. 2523 */ 2524 static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg, 2525 unsigned int ep, unsigned int mps, 2526 unsigned int mc, unsigned int dir_in) 2527 { 2528 struct dwc2_hsotg_ep *hs_ep; 2529 u32 reg; 2530 2531 hs_ep = index_to_ep(hsotg, ep, dir_in); 2532 if (!hs_ep) 2533 return; 2534 2535 if (ep == 0) { 2536 u32 mps_bytes = mps; 2537 2538 /* EP0 is a special case */ 2539 mps = dwc2_hsotg_ep0_mps(mps_bytes); 2540 if (mps > 3) 2541 goto bad_mps; 2542 hs_ep->ep.maxpacket = mps_bytes; 2543 hs_ep->mc = 1; 2544 } else { 2545 if (mps > 1024) 2546 goto bad_mps; 2547 hs_ep->mc = mc; 2548 if (mc > 3) 2549 goto bad_mps; 2550 hs_ep->ep.maxpacket = mps; 2551 } 2552 2553 if (dir_in) { 2554 reg = dwc2_readl(hsotg, DIEPCTL(ep)); 2555 reg &= ~DXEPCTL_MPS_MASK; 2556 reg |= mps; 2557 dwc2_writel(hsotg, reg, DIEPCTL(ep)); 2558 } else { 2559 reg = dwc2_readl(hsotg, DOEPCTL(ep)); 2560 reg &= ~DXEPCTL_MPS_MASK; 2561 reg |= mps; 2562 dwc2_writel(hsotg, reg, DOEPCTL(ep)); 2563 } 2564 2565 return; 2566 2567 bad_mps: 2568 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps); 2569 } 2570 2571 /** 2572 * dwc2_hsotg_txfifo_flush - flush Tx FIFO 2573 * @hsotg: The driver state 2574 * @idx: The index for the endpoint (0..15) 2575 */ 2576 static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx) 2577 { 2578 dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH, 2579 GRSTCTL); 2580 2581 /* wait until the fifo is flushed */ 2582 if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100)) 2583 dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n", 2584 __func__); 2585 } 2586 2587 /** 2588 * dwc2_hsotg_trytx - check to see if anything needs transmitting 2589 * @hsotg: The driver state 2590 * @hs_ep: The driver endpoint to check. 2591 * 2592 * Check to see if there is a request that has data to send, and if so 2593 * make an attempt to write data into the FIFO. 2594 */ 2595 static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg, 2596 struct dwc2_hsotg_ep *hs_ep) 2597 { 2598 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2599 2600 if (!hs_ep->dir_in || !hs_req) { 2601 /** 2602 * if request is not enqueued, we disable interrupts 2603 * for endpoints, excepting ep0 2604 */ 2605 if (hs_ep->index != 0) 2606 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, 2607 hs_ep->dir_in, 0); 2608 return 0; 2609 } 2610 2611 if (hs_req->req.actual < hs_req->req.length) { 2612 dev_dbg(hsotg->dev, "trying to write more for ep%d\n", 2613 hs_ep->index); 2614 return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req); 2615 } 2616 2617 return 0; 2618 } 2619 2620 /** 2621 * dwc2_hsotg_complete_in - complete IN transfer 2622 * @hsotg: The device state. 2623 * @hs_ep: The endpoint that has just completed. 2624 * 2625 * An IN transfer has been completed, update the transfer's state and then 2626 * call the relevant completion routines. 2627 */ 2628 static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, 2629 struct dwc2_hsotg_ep *hs_ep) 2630 { 2631 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2632 u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index)); 2633 int size_left, size_done; 2634 2635 if (!hs_req) { 2636 dev_dbg(hsotg->dev, "XferCompl but no req\n"); 2637 return; 2638 } 2639 2640 /* Finish ZLP handling for IN EP0 transactions */ 2641 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) { 2642 dev_dbg(hsotg->dev, "zlp packet sent\n"); 2643 2644 /* 2645 * While send zlp for DWC2_EP0_STATUS_IN EP direction was 2646 * changed to IN. Change back to complete OUT transfer request 2647 */ 2648 hs_ep->dir_in = 0; 2649 2650 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2651 if (hsotg->test_mode) { 2652 int ret; 2653 2654 ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode); 2655 if (ret < 0) { 2656 dev_dbg(hsotg->dev, "Invalid Test #%d\n", 2657 hsotg->test_mode); 2658 dwc2_hsotg_stall_ep0(hsotg); 2659 return; 2660 } 2661 } 2662 dwc2_hsotg_enqueue_setup(hsotg); 2663 return; 2664 } 2665 2666 /* 2667 * Calculate the size of the transfer by checking how much is left 2668 * in the endpoint size register and then working it out from 2669 * the amount we loaded for the transfer. 2670 * 2671 * We do this even for DMA, as the transfer may have incremented 2672 * past the end of the buffer (DMA transfers are always 32bit 2673 * aligned). 2674 */ 2675 if (using_desc_dma(hsotg)) { 2676 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); 2677 if (size_left < 0) 2678 dev_err(hsotg->dev, "error parsing DDMA results %d\n", 2679 size_left); 2680 } else { 2681 size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 2682 } 2683 2684 size_done = hs_ep->size_loaded - size_left; 2685 size_done += hs_ep->last_load; 2686 2687 if (hs_req->req.actual != size_done) 2688 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n", 2689 __func__, hs_req->req.actual, size_done); 2690 2691 hs_req->req.actual = size_done; 2692 dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n", 2693 hs_req->req.length, hs_req->req.actual, hs_req->req.zero); 2694 2695 if (!size_left && hs_req->req.actual < hs_req->req.length) { 2696 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__); 2697 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true); 2698 return; 2699 } 2700 2701 /* Zlp for all endpoints, for ep0 only in DATA IN stage */ 2702 if (hs_ep->send_zlp) { 2703 dwc2_hsotg_program_zlp(hsotg, hs_ep); 2704 hs_ep->send_zlp = 0; 2705 /* transfer will be completed on next complete interrupt */ 2706 return; 2707 } 2708 2709 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) { 2710 /* Move to STATUS OUT */ 2711 dwc2_hsotg_ep0_zlp(hsotg, false); 2712 return; 2713 } 2714 2715 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2716 } 2717 2718 /** 2719 * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep 2720 * @hsotg: The device state. 2721 * @idx: Index of ep. 2722 * @dir_in: Endpoint direction 1-in 0-out. 2723 * 2724 * Reads for endpoint with given index and direction, by masking 2725 * epint_reg with coresponding mask. 2726 */ 2727 static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg, 2728 unsigned int idx, int dir_in) 2729 { 2730 u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK; 2731 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx); 2732 u32 ints; 2733 u32 mask; 2734 u32 diepempmsk; 2735 2736 mask = dwc2_readl(hsotg, epmsk_reg); 2737 diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK); 2738 mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0; 2739 mask |= DXEPINT_SETUP_RCVD; 2740 2741 ints = dwc2_readl(hsotg, epint_reg); 2742 ints &= mask; 2743 return ints; 2744 } 2745 2746 /** 2747 * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD 2748 * @hs_ep: The endpoint on which interrupt is asserted. 2749 * 2750 * This interrupt indicates that the endpoint has been disabled per the 2751 * application's request. 2752 * 2753 * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK, 2754 * in case of ISOC completes current request. 2755 * 2756 * For ISOC-OUT endpoints completes expired requests. If there is remaining 2757 * request starts it. 2758 */ 2759 static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep) 2760 { 2761 struct dwc2_hsotg *hsotg = hs_ep->parent; 2762 struct dwc2_hsotg_req *hs_req; 2763 unsigned char idx = hs_ep->index; 2764 int dir_in = hs_ep->dir_in; 2765 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx); 2766 int dctl = dwc2_readl(hsotg, DCTL); 2767 2768 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__); 2769 2770 if (dir_in) { 2771 int epctl = dwc2_readl(hsotg, epctl_reg); 2772 2773 dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index); 2774 2775 if (hs_ep->isochronous) { 2776 dwc2_hsotg_complete_in(hsotg, hs_ep); 2777 return; 2778 } 2779 2780 if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) { 2781 int dctl = dwc2_readl(hsotg, DCTL); 2782 2783 dctl |= DCTL_CGNPINNAK; 2784 dwc2_writel(hsotg, dctl, DCTL); 2785 } 2786 return; 2787 } 2788 2789 if (dctl & DCTL_GOUTNAKSTS) { 2790 dctl |= DCTL_CGOUTNAK; 2791 dwc2_writel(hsotg, dctl, DCTL); 2792 } 2793 2794 if (!hs_ep->isochronous) 2795 return; 2796 2797 if (list_empty(&hs_ep->queue)) { 2798 dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n", 2799 __func__, hs_ep); 2800 return; 2801 } 2802 2803 do { 2804 hs_req = get_ep_head(hs_ep); 2805 if (hs_req) 2806 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 2807 -ENODATA); 2808 dwc2_gadget_incr_frame_num(hs_ep); 2809 /* Update current frame number value. */ 2810 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg); 2811 } while (dwc2_gadget_target_frame_elapsed(hs_ep)); 2812 2813 dwc2_gadget_start_next_request(hs_ep); 2814 } 2815 2816 /** 2817 * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS 2818 * @ep: The endpoint on which interrupt is asserted. 2819 * 2820 * This is starting point for ISOC-OUT transfer, synchronization done with 2821 * first out token received from host while corresponding EP is disabled. 2822 * 2823 * Device does not know initial frame in which out token will come. For this 2824 * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon 2825 * getting this interrupt SW starts calculation for next transfer frame. 2826 */ 2827 static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) 2828 { 2829 struct dwc2_hsotg *hsotg = ep->parent; 2830 int dir_in = ep->dir_in; 2831 u32 doepmsk; 2832 2833 if (dir_in || !ep->isochronous) 2834 return; 2835 2836 if (using_desc_dma(hsotg)) { 2837 if (ep->target_frame == TARGET_FRAME_INITIAL) { 2838 /* Start first ISO Out */ 2839 ep->target_frame = hsotg->frame_number; 2840 dwc2_gadget_start_isoc_ddma(ep); 2841 } 2842 return; 2843 } 2844 2845 if (ep->interval > 1 && 2846 ep->target_frame == TARGET_FRAME_INITIAL) { 2847 u32 ctrl; 2848 2849 ep->target_frame = hsotg->frame_number; 2850 dwc2_gadget_incr_frame_num(ep); 2851 2852 ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index)); 2853 if (ep->target_frame & 0x1) 2854 ctrl |= DXEPCTL_SETODDFR; 2855 else 2856 ctrl |= DXEPCTL_SETEVENFR; 2857 2858 dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index)); 2859 } 2860 2861 dwc2_gadget_start_next_request(ep); 2862 doepmsk = dwc2_readl(hsotg, DOEPMSK); 2863 doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK; 2864 dwc2_writel(hsotg, doepmsk, DOEPMSK); 2865 } 2866 2867 /** 2868 * dwc2_gadget_handle_nak - handle NAK interrupt 2869 * @hs_ep: The endpoint on which interrupt is asserted. 2870 * 2871 * This is starting point for ISOC-IN transfer, synchronization done with 2872 * first IN token received from host while corresponding EP is disabled. 2873 * 2874 * Device does not know when first one token will arrive from host. On first 2875 * token arrival HW generates 2 interrupts: 'in token received while FIFO empty' 2876 * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was 2877 * sent in response to that as there was no data in FIFO. SW is basing on this 2878 * interrupt to obtain frame in which token has come and then based on the 2879 * interval calculates next frame for transfer. 2880 */ 2881 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) 2882 { 2883 struct dwc2_hsotg *hsotg = hs_ep->parent; 2884 int dir_in = hs_ep->dir_in; 2885 2886 if (!dir_in || !hs_ep->isochronous) 2887 return; 2888 2889 if (hs_ep->target_frame == TARGET_FRAME_INITIAL) { 2890 2891 if (using_desc_dma(hsotg)) { 2892 hs_ep->target_frame = hsotg->frame_number; 2893 dwc2_gadget_incr_frame_num(hs_ep); 2894 2895 /* In service interval mode target_frame must 2896 * be set to last (u)frame of the service interval. 2897 */ 2898 if (hsotg->params.service_interval) { 2899 /* Set target_frame to the first (u)frame of 2900 * the service interval 2901 */ 2902 hs_ep->target_frame &= ~hs_ep->interval + 1; 2903 2904 /* Set target_frame to the last (u)frame of 2905 * the service interval 2906 */ 2907 dwc2_gadget_incr_frame_num(hs_ep); 2908 dwc2_gadget_dec_frame_num_by_one(hs_ep); 2909 } 2910 2911 dwc2_gadget_start_isoc_ddma(hs_ep); 2912 return; 2913 } 2914 2915 hs_ep->target_frame = hsotg->frame_number; 2916 if (hs_ep->interval > 1) { 2917 u32 ctrl = dwc2_readl(hsotg, 2918 DIEPCTL(hs_ep->index)); 2919 if (hs_ep->target_frame & 0x1) 2920 ctrl |= DXEPCTL_SETODDFR; 2921 else 2922 ctrl |= DXEPCTL_SETEVENFR; 2923 2924 dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index)); 2925 } 2926 2927 dwc2_hsotg_complete_request(hsotg, hs_ep, 2928 get_ep_head(hs_ep), 0); 2929 } 2930 2931 if (!using_desc_dma(hsotg)) 2932 dwc2_gadget_incr_frame_num(hs_ep); 2933 } 2934 2935 /** 2936 * dwc2_hsotg_epint - handle an in/out endpoint interrupt 2937 * @hsotg: The driver state 2938 * @idx: The index for the endpoint (0..15) 2939 * @dir_in: Set if this is an IN endpoint 2940 * 2941 * Process and clear any interrupt pending for an individual endpoint 2942 */ 2943 static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, 2944 int dir_in) 2945 { 2946 struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in); 2947 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx); 2948 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx); 2949 u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx); 2950 u32 ints; 2951 u32 ctrl; 2952 2953 ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in); 2954 ctrl = dwc2_readl(hsotg, epctl_reg); 2955 2956 /* Clear endpoint interrupts */ 2957 dwc2_writel(hsotg, ints, epint_reg); 2958 2959 if (!hs_ep) { 2960 dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n", 2961 __func__, idx, dir_in ? "in" : "out"); 2962 return; 2963 } 2964 2965 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n", 2966 __func__, idx, dir_in ? "in" : "out", ints); 2967 2968 /* Don't process XferCompl interrupt if it is a setup packet */ 2969 if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD))) 2970 ints &= ~DXEPINT_XFERCOMPL; 2971 2972 /* 2973 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP 2974 * stage and xfercomplete was generated without SETUP phase done 2975 * interrupt. SW should parse received setup packet only after host's 2976 * exit from setup phase of control transfer. 2977 */ 2978 if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in && 2979 hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP)) 2980 ints &= ~DXEPINT_XFERCOMPL; 2981 2982 if (ints & DXEPINT_XFERCOMPL) { 2983 dev_dbg(hsotg->dev, 2984 "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n", 2985 __func__, dwc2_readl(hsotg, epctl_reg), 2986 dwc2_readl(hsotg, epsiz_reg)); 2987 2988 /* In DDMA handle isochronous requests separately */ 2989 if (using_desc_dma(hsotg) && hs_ep->isochronous) { 2990 /* XferCompl set along with BNA */ 2991 if (!(ints & DXEPINT_BNAINTR)) 2992 dwc2_gadget_complete_isoc_request_ddma(hs_ep); 2993 } else if (dir_in) { 2994 /* 2995 * We get OutDone from the FIFO, so we only 2996 * need to look at completing IN requests here 2997 * if operating slave mode 2998 */ 2999 if (hs_ep->isochronous && hs_ep->interval > 1) 3000 dwc2_gadget_incr_frame_num(hs_ep); 3001 3002 dwc2_hsotg_complete_in(hsotg, hs_ep); 3003 if (ints & DXEPINT_NAKINTRPT) 3004 ints &= ~DXEPINT_NAKINTRPT; 3005 3006 if (idx == 0 && !hs_ep->req) 3007 dwc2_hsotg_enqueue_setup(hsotg); 3008 } else if (using_dma(hsotg)) { 3009 /* 3010 * We're using DMA, we need to fire an OutDone here 3011 * as we ignore the RXFIFO. 3012 */ 3013 if (hs_ep->isochronous && hs_ep->interval > 1) 3014 dwc2_gadget_incr_frame_num(hs_ep); 3015 3016 dwc2_hsotg_handle_outdone(hsotg, idx); 3017 } 3018 } 3019 3020 if (ints & DXEPINT_EPDISBLD) 3021 dwc2_gadget_handle_ep_disabled(hs_ep); 3022 3023 if (ints & DXEPINT_OUTTKNEPDIS) 3024 dwc2_gadget_handle_out_token_ep_disabled(hs_ep); 3025 3026 if (ints & DXEPINT_NAKINTRPT) 3027 dwc2_gadget_handle_nak(hs_ep); 3028 3029 if (ints & DXEPINT_AHBERR) 3030 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__); 3031 3032 if (ints & DXEPINT_SETUP) { /* Setup or Timeout */ 3033 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__); 3034 3035 if (using_dma(hsotg) && idx == 0) { 3036 /* 3037 * this is the notification we've received a 3038 * setup packet. In non-DMA mode we'd get this 3039 * from the RXFIFO, instead we need to process 3040 * the setup here. 3041 */ 3042 3043 if (dir_in) 3044 WARN_ON_ONCE(1); 3045 else 3046 dwc2_hsotg_handle_outdone(hsotg, 0); 3047 } 3048 } 3049 3050 if (ints & DXEPINT_STSPHSERCVD) { 3051 dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); 3052 3053 /* Safety check EP0 state when STSPHSERCVD asserted */ 3054 if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) { 3055 /* Move to STATUS IN for DDMA */ 3056 if (using_desc_dma(hsotg)) 3057 dwc2_hsotg_ep0_zlp(hsotg, true); 3058 } 3059 3060 } 3061 3062 if (ints & DXEPINT_BACK2BACKSETUP) 3063 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__); 3064 3065 if (ints & DXEPINT_BNAINTR) { 3066 dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__); 3067 if (hs_ep->isochronous) 3068 dwc2_gadget_handle_isoc_bna(hs_ep); 3069 } 3070 3071 if (dir_in && !hs_ep->isochronous) { 3072 /* not sure if this is important, but we'll clear it anyway */ 3073 if (ints & DXEPINT_INTKNTXFEMP) { 3074 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n", 3075 __func__, idx); 3076 } 3077 3078 /* this probably means something bad is happening */ 3079 if (ints & DXEPINT_INTKNEPMIS) { 3080 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n", 3081 __func__, idx); 3082 } 3083 3084 /* FIFO has space or is empty (see GAHBCFG) */ 3085 if (hsotg->dedicated_fifos && 3086 ints & DXEPINT_TXFEMP) { 3087 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n", 3088 __func__, idx); 3089 if (!using_dma(hsotg)) 3090 dwc2_hsotg_trytx(hsotg, hs_ep); 3091 } 3092 } 3093 } 3094 3095 /** 3096 * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done) 3097 * @hsotg: The device state. 3098 * 3099 * Handle updating the device settings after the enumeration phase has 3100 * been completed. 3101 */ 3102 static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg) 3103 { 3104 u32 dsts = dwc2_readl(hsotg, DSTS); 3105 int ep0_mps = 0, ep_mps = 8; 3106 3107 /* 3108 * This should signal the finish of the enumeration phase 3109 * of the USB handshaking, so we should now know what rate 3110 * we connected at. 3111 */ 3112 3113 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts); 3114 3115 /* 3116 * note, since we're limited by the size of transfer on EP0, and 3117 * it seems IN transfers must be a even number of packets we do 3118 * not advertise a 64byte MPS on EP0. 3119 */ 3120 3121 /* catch both EnumSpd_FS and EnumSpd_FS48 */ 3122 switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) { 3123 case DSTS_ENUMSPD_FS: 3124 case DSTS_ENUMSPD_FS48: 3125 hsotg->gadget.speed = USB_SPEED_FULL; 3126 ep0_mps = EP0_MPS_LIMIT; 3127 ep_mps = 1023; 3128 break; 3129 3130 case DSTS_ENUMSPD_HS: 3131 hsotg->gadget.speed = USB_SPEED_HIGH; 3132 ep0_mps = EP0_MPS_LIMIT; 3133 ep_mps = 1024; 3134 break; 3135 3136 case DSTS_ENUMSPD_LS: 3137 hsotg->gadget.speed = USB_SPEED_LOW; 3138 ep0_mps = 8; 3139 ep_mps = 8; 3140 /* 3141 * note, we don't actually support LS in this driver at the 3142 * moment, and the documentation seems to imply that it isn't 3143 * supported by the PHYs on some of the devices. 3144 */ 3145 break; 3146 } 3147 dev_info(hsotg->dev, "new device is %s\n", 3148 usb_speed_string(hsotg->gadget.speed)); 3149 3150 /* 3151 * we should now know the maximum packet size for an 3152 * endpoint, so set the endpoints to a default value. 3153 */ 3154 3155 if (ep0_mps) { 3156 int i; 3157 /* Initialize ep0 for both in and out directions */ 3158 dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1); 3159 dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0); 3160 for (i = 1; i < hsotg->num_of_eps; i++) { 3161 if (hsotg->eps_in[i]) 3162 dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 3163 0, 1); 3164 if (hsotg->eps_out[i]) 3165 dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 3166 0, 0); 3167 } 3168 } 3169 3170 /* ensure after enumeration our EP0 is active */ 3171 3172 dwc2_hsotg_enqueue_setup(hsotg); 3173 3174 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3175 dwc2_readl(hsotg, DIEPCTL0), 3176 dwc2_readl(hsotg, DOEPCTL0)); 3177 } 3178 3179 /** 3180 * kill_all_requests - remove all requests from the endpoint's queue 3181 * @hsotg: The device state. 3182 * @ep: The endpoint the requests may be on. 3183 * @result: The result code to use. 3184 * 3185 * Go through the requests on the given endpoint and mark them 3186 * completed with the given result code. 3187 */ 3188 static void kill_all_requests(struct dwc2_hsotg *hsotg, 3189 struct dwc2_hsotg_ep *ep, 3190 int result) 3191 { 3192 struct dwc2_hsotg_req *req, *treq; 3193 unsigned int size; 3194 3195 ep->req = NULL; 3196 3197 list_for_each_entry_safe(req, treq, &ep->queue, queue) 3198 dwc2_hsotg_complete_request(hsotg, ep, req, 3199 result); 3200 3201 if (!hsotg->dedicated_fifos) 3202 return; 3203 size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4; 3204 if (size < ep->fifo_size) 3205 dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index); 3206 } 3207 3208 /** 3209 * dwc2_hsotg_disconnect - disconnect service 3210 * @hsotg: The device state. 3211 * 3212 * The device has been disconnected. Remove all current 3213 * transactions and signal the gadget driver that this 3214 * has happened. 3215 */ 3216 void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg) 3217 { 3218 unsigned int ep; 3219 3220 if (!hsotg->connected) 3221 return; 3222 3223 hsotg->connected = 0; 3224 hsotg->test_mode = 0; 3225 3226 /* all endpoints should be shutdown */ 3227 for (ep = 0; ep < hsotg->num_of_eps; ep++) { 3228 if (hsotg->eps_in[ep]) 3229 kill_all_requests(hsotg, hsotg->eps_in[ep], 3230 -ESHUTDOWN); 3231 if (hsotg->eps_out[ep]) 3232 kill_all_requests(hsotg, hsotg->eps_out[ep], 3233 -ESHUTDOWN); 3234 } 3235 3236 call_gadget(hsotg, disconnect); 3237 hsotg->lx_state = DWC2_L3; 3238 3239 usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED); 3240 } 3241 3242 /** 3243 * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler 3244 * @hsotg: The device state: 3245 * @periodic: True if this is a periodic FIFO interrupt 3246 */ 3247 static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic) 3248 { 3249 struct dwc2_hsotg_ep *ep; 3250 int epno, ret; 3251 3252 /* look through for any more data to transmit */ 3253 for (epno = 0; epno < hsotg->num_of_eps; epno++) { 3254 ep = index_to_ep(hsotg, epno, 1); 3255 3256 if (!ep) 3257 continue; 3258 3259 if (!ep->dir_in) 3260 continue; 3261 3262 if ((periodic && !ep->periodic) || 3263 (!periodic && ep->periodic)) 3264 continue; 3265 3266 ret = dwc2_hsotg_trytx(hsotg, ep); 3267 if (ret < 0) 3268 break; 3269 } 3270 } 3271 3272 /* IRQ flags which will trigger a retry around the IRQ loop */ 3273 #define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \ 3274 GINTSTS_PTXFEMP | \ 3275 GINTSTS_RXFLVL) 3276 3277 static int dwc2_hsotg_ep_disable(struct usb_ep *ep); 3278 /** 3279 * dwc2_hsotg_core_init - issue softreset to the core 3280 * @hsotg: The device state 3281 * @is_usb_reset: Usb resetting flag 3282 * 3283 * Issue a soft reset to the core, and await the core finishing it. 3284 */ 3285 void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, 3286 bool is_usb_reset) 3287 { 3288 u32 intmsk; 3289 u32 val; 3290 u32 usbcfg; 3291 u32 dcfg = 0; 3292 int ep; 3293 3294 /* Kill any ep0 requests as controller will be reinitialized */ 3295 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); 3296 3297 if (!is_usb_reset) { 3298 if (dwc2_core_reset(hsotg, true)) 3299 return; 3300 } else { 3301 /* all endpoints should be shutdown */ 3302 for (ep = 1; ep < hsotg->num_of_eps; ep++) { 3303 if (hsotg->eps_in[ep]) 3304 dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); 3305 if (hsotg->eps_out[ep]) 3306 dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); 3307 } 3308 } 3309 3310 /* 3311 * we must now enable ep0 ready for host detection and then 3312 * set configuration. 3313 */ 3314 3315 /* keep other bits untouched (so e.g. forced modes are not lost) */ 3316 usbcfg = dwc2_readl(hsotg, GUSBCFG); 3317 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 3318 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); 3319 3320 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && 3321 (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || 3322 hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) { 3323 /* FS/LS Dedicated Transceiver Interface */ 3324 usbcfg |= GUSBCFG_PHYSEL; 3325 } else { 3326 /* set the PLL on, remove the HNP/SRP and set the PHY */ 3327 val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 3328 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | 3329 (val << GUSBCFG_USBTRDTIM_SHIFT); 3330 } 3331 dwc2_writel(hsotg, usbcfg, GUSBCFG); 3332 3333 dwc2_hsotg_init_fifo(hsotg); 3334 3335 if (!is_usb_reset) 3336 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON); 3337 3338 dcfg |= DCFG_EPMISCNT(1); 3339 3340 switch (hsotg->params.speed) { 3341 case DWC2_SPEED_PARAM_LOW: 3342 dcfg |= DCFG_DEVSPD_LS; 3343 break; 3344 case DWC2_SPEED_PARAM_FULL: 3345 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) 3346 dcfg |= DCFG_DEVSPD_FS48; 3347 else 3348 dcfg |= DCFG_DEVSPD_FS; 3349 break; 3350 default: 3351 dcfg |= DCFG_DEVSPD_HS; 3352 } 3353 3354 if (hsotg->params.ipg_isoc_en) 3355 dcfg |= DCFG_IPG_ISOC_SUPPORDED; 3356 3357 dwc2_writel(hsotg, dcfg, DCFG); 3358 3359 /* Clear any pending OTG interrupts */ 3360 dwc2_writel(hsotg, 0xffffffff, GOTGINT); 3361 3362 /* Clear any pending interrupts */ 3363 dwc2_writel(hsotg, 0xffffffff, GINTSTS); 3364 intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT | 3365 GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF | 3366 GINTSTS_USBRST | GINTSTS_RESETDET | 3367 GINTSTS_ENUMDONE | GINTSTS_OTGINT | 3368 GINTSTS_USBSUSP | GINTSTS_WKUPINT | 3369 GINTSTS_LPMTRANRCVD; 3370 3371 if (!using_desc_dma(hsotg)) 3372 intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT; 3373 3374 if (!hsotg->params.external_id_pin_ctl) 3375 intmsk |= GINTSTS_CONIDSTSCHNG; 3376 3377 dwc2_writel(hsotg, intmsk, GINTMSK); 3378 3379 if (using_dma(hsotg)) { 3380 dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN | 3381 hsotg->params.ahbcfg, 3382 GAHBCFG); 3383 3384 /* Set DDMA mode support in the core if needed */ 3385 if (using_desc_dma(hsotg)) 3386 dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN); 3387 3388 } else { 3389 dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ? 3390 (GAHBCFG_NP_TXF_EMP_LVL | 3391 GAHBCFG_P_TXF_EMP_LVL) : 0) | 3392 GAHBCFG_GLBL_INTR_EN, GAHBCFG); 3393 } 3394 3395 /* 3396 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts 3397 * when we have no data to transfer. Otherwise we get being flooded by 3398 * interrupts. 3399 */ 3400 3401 dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ? 3402 DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) | 3403 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK | 3404 DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK, 3405 DIEPMSK); 3406 3407 /* 3408 * don't need XferCompl, we get that from RXFIFO in slave mode. In 3409 * DMA mode we may need this and StsPhseRcvd. 3410 */ 3411 dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK | 3412 DOEPMSK_STSPHSERCVDMSK) : 0) | 3413 DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK | 3414 DOEPMSK_SETUPMSK, 3415 DOEPMSK); 3416 3417 /* Enable BNA interrupt for DDMA */ 3418 if (using_desc_dma(hsotg)) { 3419 dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK); 3420 dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK); 3421 } 3422 3423 /* Enable Service Interval mode if supported */ 3424 if (using_desc_dma(hsotg) && hsotg->params.service_interval) 3425 dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED); 3426 3427 dwc2_writel(hsotg, 0, DAINTMSK); 3428 3429 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3430 dwc2_readl(hsotg, DIEPCTL0), 3431 dwc2_readl(hsotg, DOEPCTL0)); 3432 3433 /* enable in and out endpoint interrupts */ 3434 dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT); 3435 3436 /* 3437 * Enable the RXFIFO when in slave mode, as this is how we collect 3438 * the data. In DMA mode, we get events from the FIFO but also 3439 * things we cannot process, so do not use it. 3440 */ 3441 if (!using_dma(hsotg)) 3442 dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL); 3443 3444 /* Enable interrupts for EP0 in and out */ 3445 dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1); 3446 dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1); 3447 3448 if (!is_usb_reset) { 3449 dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE); 3450 udelay(10); /* see openiboot */ 3451 dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE); 3452 } 3453 3454 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL)); 3455 3456 /* 3457 * DxEPCTL_USBActEp says RO in manual, but seems to be set by 3458 * writing to the EPCTL register.. 3459 */ 3460 3461 /* set to read 1 8byte packet */ 3462 dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 3463 DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0); 3464 3465 dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | 3466 DXEPCTL_CNAK | DXEPCTL_EPENA | 3467 DXEPCTL_USBACTEP, 3468 DOEPCTL0); 3469 3470 /* enable, but don't activate EP0in */ 3471 dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | 3472 DXEPCTL_USBACTEP, DIEPCTL0); 3473 3474 /* clear global NAKs */ 3475 val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; 3476 if (!is_usb_reset) 3477 val |= DCTL_SFTDISCON; 3478 dwc2_set_bit(hsotg, DCTL, val); 3479 3480 /* configure the core to support LPM */ 3481 dwc2_gadget_init_lpm(hsotg); 3482 3483 /* program GREFCLK register if needed */ 3484 if (using_desc_dma(hsotg) && hsotg->params.service_interval) 3485 dwc2_gadget_program_ref_clk(hsotg); 3486 3487 /* must be at-least 3ms to allow bus to see disconnect */ 3488 mdelay(3); 3489 3490 hsotg->lx_state = DWC2_L0; 3491 3492 dwc2_hsotg_enqueue_setup(hsotg); 3493 3494 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3495 dwc2_readl(hsotg, DIEPCTL0), 3496 dwc2_readl(hsotg, DOEPCTL0)); 3497 } 3498 3499 static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) 3500 { 3501 /* set the soft-disconnect bit */ 3502 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON); 3503 } 3504 3505 void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) 3506 { 3507 /* remove the soft-disconnect and let's go */ 3508 dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON); 3509 } 3510 3511 /** 3512 * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt. 3513 * @hsotg: The device state: 3514 * 3515 * This interrupt indicates one of the following conditions occurred while 3516 * transmitting an ISOC transaction. 3517 * - Corrupted IN Token for ISOC EP. 3518 * - Packet not complete in FIFO. 3519 * 3520 * The following actions will be taken: 3521 * - Determine the EP 3522 * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO 3523 */ 3524 static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg) 3525 { 3526 struct dwc2_hsotg_ep *hs_ep; 3527 u32 epctrl; 3528 u32 daintmsk; 3529 u32 idx; 3530 3531 dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n"); 3532 3533 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3534 3535 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3536 hs_ep = hsotg->eps_in[idx]; 3537 /* Proceed only unmasked ISOC EPs */ 3538 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3539 continue; 3540 3541 epctrl = dwc2_readl(hsotg, DIEPCTL(idx)); 3542 if ((epctrl & DXEPCTL_EPENA) && 3543 dwc2_gadget_target_frame_elapsed(hs_ep)) { 3544 epctrl |= DXEPCTL_SNAK; 3545 epctrl |= DXEPCTL_EPDIS; 3546 dwc2_writel(hsotg, epctrl, DIEPCTL(idx)); 3547 } 3548 } 3549 3550 /* Clear interrupt */ 3551 dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS); 3552 } 3553 3554 /** 3555 * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt 3556 * @hsotg: The device state: 3557 * 3558 * This interrupt indicates one of the following conditions occurred while 3559 * transmitting an ISOC transaction. 3560 * - Corrupted OUT Token for ISOC EP. 3561 * - Packet not complete in FIFO. 3562 * 3563 * The following actions will be taken: 3564 * - Determine the EP 3565 * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed. 3566 */ 3567 static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg) 3568 { 3569 u32 gintsts; 3570 u32 gintmsk; 3571 u32 daintmsk; 3572 u32 epctrl; 3573 struct dwc2_hsotg_ep *hs_ep; 3574 int idx; 3575 3576 dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__); 3577 3578 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3579 daintmsk >>= DAINT_OUTEP_SHIFT; 3580 3581 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3582 hs_ep = hsotg->eps_out[idx]; 3583 /* Proceed only unmasked ISOC EPs */ 3584 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3585 continue; 3586 3587 epctrl = dwc2_readl(hsotg, DOEPCTL(idx)); 3588 if ((epctrl & DXEPCTL_EPENA) && 3589 dwc2_gadget_target_frame_elapsed(hs_ep)) { 3590 /* Unmask GOUTNAKEFF interrupt */ 3591 gintmsk = dwc2_readl(hsotg, GINTMSK); 3592 gintmsk |= GINTSTS_GOUTNAKEFF; 3593 dwc2_writel(hsotg, gintmsk, GINTMSK); 3594 3595 gintsts = dwc2_readl(hsotg, GINTSTS); 3596 if (!(gintsts & GINTSTS_GOUTNAKEFF)) { 3597 dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK); 3598 break; 3599 } 3600 } 3601 } 3602 3603 /* Clear interrupt */ 3604 dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS); 3605 } 3606 3607 /** 3608 * dwc2_hsotg_irq - handle device interrupt 3609 * @irq: The IRQ number triggered 3610 * @pw: The pw value when registered the handler. 3611 */ 3612 static irqreturn_t dwc2_hsotg_irq(int irq, void *pw) 3613 { 3614 struct dwc2_hsotg *hsotg = pw; 3615 int retry_count = 8; 3616 u32 gintsts; 3617 u32 gintmsk; 3618 3619 if (!dwc2_is_device_mode(hsotg)) 3620 return IRQ_NONE; 3621 3622 spin_lock(&hsotg->lock); 3623 irq_retry: 3624 gintsts = dwc2_readl(hsotg, GINTSTS); 3625 gintmsk = dwc2_readl(hsotg, GINTMSK); 3626 3627 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n", 3628 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count); 3629 3630 gintsts &= gintmsk; 3631 3632 if (gintsts & GINTSTS_RESETDET) { 3633 dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__); 3634 3635 dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS); 3636 3637 /* This event must be used only if controller is suspended */ 3638 if (hsotg->lx_state == DWC2_L2) { 3639 dwc2_exit_partial_power_down(hsotg, true); 3640 hsotg->lx_state = DWC2_L0; 3641 } 3642 } 3643 3644 if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) { 3645 u32 usb_status = dwc2_readl(hsotg, GOTGCTL); 3646 u32 connected = hsotg->connected; 3647 3648 dev_dbg(hsotg->dev, "%s: USBRst\n", __func__); 3649 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n", 3650 dwc2_readl(hsotg, GNPTXSTS)); 3651 3652 dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS); 3653 3654 /* Report disconnection if it is not already done. */ 3655 dwc2_hsotg_disconnect(hsotg); 3656 3657 /* Reset device address to zero */ 3658 dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK); 3659 3660 if (usb_status & GOTGCTL_BSESVLD && connected) 3661 dwc2_hsotg_core_init_disconnected(hsotg, true); 3662 } 3663 3664 if (gintsts & GINTSTS_ENUMDONE) { 3665 dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS); 3666 3667 dwc2_hsotg_irq_enumdone(hsotg); 3668 } 3669 3670 if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) { 3671 u32 daint = dwc2_readl(hsotg, DAINT); 3672 u32 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3673 u32 daint_out, daint_in; 3674 int ep; 3675 3676 daint &= daintmsk; 3677 daint_out = daint >> DAINT_OUTEP_SHIFT; 3678 daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT); 3679 3680 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint); 3681 3682 for (ep = 0; ep < hsotg->num_of_eps && daint_out; 3683 ep++, daint_out >>= 1) { 3684 if (daint_out & 1) 3685 dwc2_hsotg_epint(hsotg, ep, 0); 3686 } 3687 3688 for (ep = 0; ep < hsotg->num_of_eps && daint_in; 3689 ep++, daint_in >>= 1) { 3690 if (daint_in & 1) 3691 dwc2_hsotg_epint(hsotg, ep, 1); 3692 } 3693 } 3694 3695 /* check both FIFOs */ 3696 3697 if (gintsts & GINTSTS_NPTXFEMP) { 3698 dev_dbg(hsotg->dev, "NPTxFEmp\n"); 3699 3700 /* 3701 * Disable the interrupt to stop it happening again 3702 * unless one of these endpoint routines decides that 3703 * it needs re-enabling 3704 */ 3705 3706 dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP); 3707 dwc2_hsotg_irq_fifoempty(hsotg, false); 3708 } 3709 3710 if (gintsts & GINTSTS_PTXFEMP) { 3711 dev_dbg(hsotg->dev, "PTxFEmp\n"); 3712 3713 /* See note in GINTSTS_NPTxFEmp */ 3714 3715 dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP); 3716 dwc2_hsotg_irq_fifoempty(hsotg, true); 3717 } 3718 3719 if (gintsts & GINTSTS_RXFLVL) { 3720 /* 3721 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty, 3722 * we need to retry dwc2_hsotg_handle_rx if this is still 3723 * set. 3724 */ 3725 3726 dwc2_hsotg_handle_rx(hsotg); 3727 } 3728 3729 if (gintsts & GINTSTS_ERLYSUSP) { 3730 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n"); 3731 dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS); 3732 } 3733 3734 /* 3735 * these next two seem to crop-up occasionally causing the core 3736 * to shutdown the USB transfer, so try clearing them and logging 3737 * the occurrence. 3738 */ 3739 3740 if (gintsts & GINTSTS_GOUTNAKEFF) { 3741 u8 idx; 3742 u32 epctrl; 3743 u32 gintmsk; 3744 u32 daintmsk; 3745 struct dwc2_hsotg_ep *hs_ep; 3746 3747 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3748 daintmsk >>= DAINT_OUTEP_SHIFT; 3749 /* Mask this interrupt */ 3750 gintmsk = dwc2_readl(hsotg, GINTMSK); 3751 gintmsk &= ~GINTSTS_GOUTNAKEFF; 3752 dwc2_writel(hsotg, gintmsk, GINTMSK); 3753 3754 dev_dbg(hsotg->dev, "GOUTNakEff triggered\n"); 3755 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3756 hs_ep = hsotg->eps_out[idx]; 3757 /* Proceed only unmasked ISOC EPs */ 3758 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3759 continue; 3760 3761 epctrl = dwc2_readl(hsotg, DOEPCTL(idx)); 3762 3763 if (epctrl & DXEPCTL_EPENA) { 3764 epctrl |= DXEPCTL_SNAK; 3765 epctrl |= DXEPCTL_EPDIS; 3766 dwc2_writel(hsotg, epctrl, DOEPCTL(idx)); 3767 } 3768 } 3769 3770 /* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */ 3771 } 3772 3773 if (gintsts & GINTSTS_GINNAKEFF) { 3774 dev_info(hsotg->dev, "GINNakEff triggered\n"); 3775 3776 dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK); 3777 3778 dwc2_hsotg_dump(hsotg); 3779 } 3780 3781 if (gintsts & GINTSTS_INCOMPL_SOIN) 3782 dwc2_gadget_handle_incomplete_isoc_in(hsotg); 3783 3784 if (gintsts & GINTSTS_INCOMPL_SOOUT) 3785 dwc2_gadget_handle_incomplete_isoc_out(hsotg); 3786 3787 /* 3788 * if we've had fifo events, we should try and go around the 3789 * loop again to see if there's any point in returning yet. 3790 */ 3791 3792 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0) 3793 goto irq_retry; 3794 3795 /* Check WKUP_ALERT interrupt*/ 3796 if (hsotg->params.service_interval) 3797 dwc2_gadget_wkup_alert_handler(hsotg); 3798 3799 spin_unlock(&hsotg->lock); 3800 3801 return IRQ_HANDLED; 3802 } 3803 3804 static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, 3805 struct dwc2_hsotg_ep *hs_ep) 3806 { 3807 u32 epctrl_reg; 3808 u32 epint_reg; 3809 3810 epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) : 3811 DOEPCTL(hs_ep->index); 3812 epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) : 3813 DOEPINT(hs_ep->index); 3814 3815 dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__, 3816 hs_ep->name); 3817 3818 if (hs_ep->dir_in) { 3819 if (hsotg->dedicated_fifos || hs_ep->periodic) { 3820 dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK); 3821 /* Wait for Nak effect */ 3822 if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, 3823 DXEPINT_INEPNAKEFF, 100)) 3824 dev_warn(hsotg->dev, 3825 "%s: timeout DIEPINT.NAKEFF\n", 3826 __func__); 3827 } else { 3828 dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK); 3829 /* Wait for Nak effect */ 3830 if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, 3831 GINTSTS_GINNAKEFF, 100)) 3832 dev_warn(hsotg->dev, 3833 "%s: timeout GINTSTS.GINNAKEFF\n", 3834 __func__); 3835 } 3836 } else { 3837 if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF)) 3838 dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK); 3839 3840 /* Wait for global nak to take effect */ 3841 if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, 3842 GINTSTS_GOUTNAKEFF, 100)) 3843 dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n", 3844 __func__); 3845 } 3846 3847 /* Disable ep */ 3848 dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK); 3849 3850 /* Wait for ep to be disabled */ 3851 if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100)) 3852 dev_warn(hsotg->dev, 3853 "%s: timeout DOEPCTL.EPDisable\n", __func__); 3854 3855 /* Clear EPDISBLD interrupt */ 3856 dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD); 3857 3858 if (hs_ep->dir_in) { 3859 unsigned short fifo_index; 3860 3861 if (hsotg->dedicated_fifos || hs_ep->periodic) 3862 fifo_index = hs_ep->fifo_index; 3863 else 3864 fifo_index = 0; 3865 3866 /* Flush TX FIFO */ 3867 dwc2_flush_tx_fifo(hsotg, fifo_index); 3868 3869 /* Clear Global In NP NAK in Shared FIFO for non periodic ep */ 3870 if (!hsotg->dedicated_fifos && !hs_ep->periodic) 3871 dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK); 3872 3873 } else { 3874 /* Remove global NAKs */ 3875 dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK); 3876 } 3877 } 3878 3879 /** 3880 * dwc2_hsotg_ep_enable - enable the given endpoint 3881 * @ep: The USB endpint to configure 3882 * @desc: The USB endpoint descriptor to configure with. 3883 * 3884 * This is called from the USB gadget code's usb_ep_enable(). 3885 */ 3886 static int dwc2_hsotg_ep_enable(struct usb_ep *ep, 3887 const struct usb_endpoint_descriptor *desc) 3888 { 3889 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 3890 struct dwc2_hsotg *hsotg = hs_ep->parent; 3891 unsigned long flags; 3892 unsigned int index = hs_ep->index; 3893 u32 epctrl_reg; 3894 u32 epctrl; 3895 u32 mps; 3896 u32 mc; 3897 u32 mask; 3898 unsigned int dir_in; 3899 unsigned int i, val, size; 3900 int ret = 0; 3901 unsigned char ep_type; 3902 3903 dev_dbg(hsotg->dev, 3904 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", 3905 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes, 3906 desc->wMaxPacketSize, desc->bInterval); 3907 3908 /* not to be called for EP0 */ 3909 if (index == 0) { 3910 dev_err(hsotg->dev, "%s: called for EP 0\n", __func__); 3911 return -EINVAL; 3912 } 3913 3914 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; 3915 if (dir_in != hs_ep->dir_in) { 3916 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__); 3917 return -EINVAL; 3918 } 3919 3920 ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 3921 mps = usb_endpoint_maxp(desc); 3922 mc = usb_endpoint_maxp_mult(desc); 3923 3924 /* ISOC IN in DDMA supported bInterval up to 10 */ 3925 if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC && 3926 dir_in && desc->bInterval > 10) { 3927 dev_err(hsotg->dev, 3928 "%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__); 3929 return -EINVAL; 3930 } 3931 3932 /* High bandwidth ISOC OUT in DDMA not supported */ 3933 if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC && 3934 !dir_in && mc > 1) { 3935 dev_err(hsotg->dev, 3936 "%s: ISOC OUT, DDMA: HB not supported!\n", __func__); 3937 return -EINVAL; 3938 } 3939 3940 /* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */ 3941 3942 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 3943 epctrl = dwc2_readl(hsotg, epctrl_reg); 3944 3945 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", 3946 __func__, epctrl, epctrl_reg); 3947 3948 /* Allocate DMA descriptor chain for non-ctrl endpoints */ 3949 if (using_desc_dma(hsotg) && !hs_ep->desc_list) { 3950 hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev, 3951 MAX_DMA_DESC_NUM_GENERIC * 3952 sizeof(struct dwc2_dma_desc), 3953 &hs_ep->desc_list_dma, GFP_ATOMIC); 3954 if (!hs_ep->desc_list) { 3955 ret = -ENOMEM; 3956 goto error2; 3957 } 3958 } 3959 3960 spin_lock_irqsave(&hsotg->lock, flags); 3961 3962 epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK); 3963 epctrl |= DXEPCTL_MPS(mps); 3964 3965 /* 3966 * mark the endpoint as active, otherwise the core may ignore 3967 * transactions entirely for this endpoint 3968 */ 3969 epctrl |= DXEPCTL_USBACTEP; 3970 3971 /* update the endpoint state */ 3972 dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in); 3973 3974 /* default, set to non-periodic */ 3975 hs_ep->isochronous = 0; 3976 hs_ep->periodic = 0; 3977 hs_ep->halted = 0; 3978 hs_ep->interval = desc->bInterval; 3979 3980 switch (ep_type) { 3981 case USB_ENDPOINT_XFER_ISOC: 3982 epctrl |= DXEPCTL_EPTYPE_ISO; 3983 epctrl |= DXEPCTL_SETEVENFR; 3984 hs_ep->isochronous = 1; 3985 hs_ep->interval = 1 << (desc->bInterval - 1); 3986 hs_ep->target_frame = TARGET_FRAME_INITIAL; 3987 hs_ep->next_desc = 0; 3988 hs_ep->compl_desc = 0; 3989 if (dir_in) { 3990 hs_ep->periodic = 1; 3991 mask = dwc2_readl(hsotg, DIEPMSK); 3992 mask |= DIEPMSK_NAKMSK; 3993 dwc2_writel(hsotg, mask, DIEPMSK); 3994 } else { 3995 mask = dwc2_readl(hsotg, DOEPMSK); 3996 mask |= DOEPMSK_OUTTKNEPDISMSK; 3997 dwc2_writel(hsotg, mask, DOEPMSK); 3998 } 3999 break; 4000 4001 case USB_ENDPOINT_XFER_BULK: 4002 epctrl |= DXEPCTL_EPTYPE_BULK; 4003 break; 4004 4005 case USB_ENDPOINT_XFER_INT: 4006 if (dir_in) 4007 hs_ep->periodic = 1; 4008 4009 if (hsotg->gadget.speed == USB_SPEED_HIGH) 4010 hs_ep->interval = 1 << (desc->bInterval - 1); 4011 4012 epctrl |= DXEPCTL_EPTYPE_INTERRUPT; 4013 break; 4014 4015 case USB_ENDPOINT_XFER_CONTROL: 4016 epctrl |= DXEPCTL_EPTYPE_CONTROL; 4017 break; 4018 } 4019 4020 /* 4021 * if the hardware has dedicated fifos, we must give each IN EP 4022 * a unique tx-fifo even if it is non-periodic. 4023 */ 4024 if (dir_in && hsotg->dedicated_fifos) { 4025 u32 fifo_index = 0; 4026 u32 fifo_size = UINT_MAX; 4027 4028 size = hs_ep->ep.maxpacket * hs_ep->mc; 4029 for (i = 1; i < hsotg->num_of_eps; ++i) { 4030 if (hsotg->fifo_map & (1 << i)) 4031 continue; 4032 val = dwc2_readl(hsotg, DPTXFSIZN(i)); 4033 val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4; 4034 if (val < size) 4035 continue; 4036 /* Search for smallest acceptable fifo */ 4037 if (val < fifo_size) { 4038 fifo_size = val; 4039 fifo_index = i; 4040 } 4041 } 4042 if (!fifo_index) { 4043 dev_err(hsotg->dev, 4044 "%s: No suitable fifo found\n", __func__); 4045 ret = -ENOMEM; 4046 goto error1; 4047 } 4048 epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT); 4049 hsotg->fifo_map |= 1 << fifo_index; 4050 epctrl |= DXEPCTL_TXFNUM(fifo_index); 4051 hs_ep->fifo_index = fifo_index; 4052 hs_ep->fifo_size = fifo_size; 4053 } 4054 4055 /* for non control endpoints, set PID to D0 */ 4056 if (index && !hs_ep->isochronous) 4057 epctrl |= DXEPCTL_SETD0PID; 4058 4059 /* WA for Full speed ISOC IN in DDMA mode. 4060 * By Clear NAK status of EP, core will send ZLP 4061 * to IN token and assert NAK interrupt relying 4062 * on TxFIFO status only 4063 */ 4064 4065 if (hsotg->gadget.speed == USB_SPEED_FULL && 4066 hs_ep->isochronous && dir_in) { 4067 /* The WA applies only to core versions from 2.72a 4068 * to 4.00a (including both). Also for FS_IOT_1.00a 4069 * and HS_IOT_1.00a. 4070 */ 4071 u32 gsnpsid = dwc2_readl(hsotg, GSNPSID); 4072 4073 if ((gsnpsid >= DWC2_CORE_REV_2_72a && 4074 gsnpsid <= DWC2_CORE_REV_4_00a) || 4075 gsnpsid == DWC2_FS_IOT_REV_1_00a || 4076 gsnpsid == DWC2_HS_IOT_REV_1_00a) 4077 epctrl |= DXEPCTL_CNAK; 4078 } 4079 4080 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n", 4081 __func__, epctrl); 4082 4083 dwc2_writel(hsotg, epctrl, epctrl_reg); 4084 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n", 4085 __func__, dwc2_readl(hsotg, epctrl_reg)); 4086 4087 /* enable the endpoint interrupt */ 4088 dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1); 4089 4090 error1: 4091 spin_unlock_irqrestore(&hsotg->lock, flags); 4092 4093 error2: 4094 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { 4095 dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * 4096 sizeof(struct dwc2_dma_desc), 4097 hs_ep->desc_list, hs_ep->desc_list_dma); 4098 hs_ep->desc_list = NULL; 4099 } 4100 4101 return ret; 4102 } 4103 4104 /** 4105 * dwc2_hsotg_ep_disable - disable given endpoint 4106 * @ep: The endpoint to disable. 4107 */ 4108 static int dwc2_hsotg_ep_disable(struct usb_ep *ep) 4109 { 4110 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4111 struct dwc2_hsotg *hsotg = hs_ep->parent; 4112 int dir_in = hs_ep->dir_in; 4113 int index = hs_ep->index; 4114 u32 epctrl_reg; 4115 u32 ctrl; 4116 4117 dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep); 4118 4119 if (ep == &hsotg->eps_out[0]->ep) { 4120 dev_err(hsotg->dev, "%s: called for ep0\n", __func__); 4121 return -EINVAL; 4122 } 4123 4124 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) { 4125 dev_err(hsotg->dev, "%s: called in host mode?\n", __func__); 4126 return -EINVAL; 4127 } 4128 4129 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 4130 4131 ctrl = dwc2_readl(hsotg, epctrl_reg); 4132 4133 if (ctrl & DXEPCTL_EPENA) 4134 dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep); 4135 4136 ctrl &= ~DXEPCTL_EPENA; 4137 ctrl &= ~DXEPCTL_USBACTEP; 4138 ctrl |= DXEPCTL_SNAK; 4139 4140 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 4141 dwc2_writel(hsotg, ctrl, epctrl_reg); 4142 4143 /* disable endpoint interrupts */ 4144 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0); 4145 4146 /* terminate all requests with shutdown */ 4147 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN); 4148 4149 hsotg->fifo_map &= ~(1 << hs_ep->fifo_index); 4150 hs_ep->fifo_index = 0; 4151 hs_ep->fifo_size = 0; 4152 4153 return 0; 4154 } 4155 4156 static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep) 4157 { 4158 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4159 struct dwc2_hsotg *hsotg = hs_ep->parent; 4160 unsigned long flags; 4161 int ret; 4162 4163 spin_lock_irqsave(&hsotg->lock, flags); 4164 ret = dwc2_hsotg_ep_disable(ep); 4165 spin_unlock_irqrestore(&hsotg->lock, flags); 4166 return ret; 4167 } 4168 4169 /** 4170 * on_list - check request is on the given endpoint 4171 * @ep: The endpoint to check. 4172 * @test: The request to test if it is on the endpoint. 4173 */ 4174 static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test) 4175 { 4176 struct dwc2_hsotg_req *req, *treq; 4177 4178 list_for_each_entry_safe(req, treq, &ep->queue, queue) { 4179 if (req == test) 4180 return true; 4181 } 4182 4183 return false; 4184 } 4185 4186 /** 4187 * dwc2_hsotg_ep_dequeue - dequeue given endpoint 4188 * @ep: The endpoint to dequeue. 4189 * @req: The request to be removed from a queue. 4190 */ 4191 static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) 4192 { 4193 struct dwc2_hsotg_req *hs_req = our_req(req); 4194 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4195 struct dwc2_hsotg *hs = hs_ep->parent; 4196 unsigned long flags; 4197 4198 dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req); 4199 4200 spin_lock_irqsave(&hs->lock, flags); 4201 4202 if (!on_list(hs_ep, hs_req)) { 4203 spin_unlock_irqrestore(&hs->lock, flags); 4204 return -EINVAL; 4205 } 4206 4207 /* Dequeue already started request */ 4208 if (req == &hs_ep->req->req) 4209 dwc2_hsotg_ep_stop_xfr(hs, hs_ep); 4210 4211 dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET); 4212 spin_unlock_irqrestore(&hs->lock, flags); 4213 4214 return 0; 4215 } 4216 4217 /** 4218 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint 4219 * @ep: The endpoint to set halt. 4220 * @value: Set or unset the halt. 4221 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if 4222 * the endpoint is busy processing requests. 4223 * 4224 * We need to stall the endpoint immediately if request comes from set_feature 4225 * protocol command handler. 4226 */ 4227 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now) 4228 { 4229 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4230 struct dwc2_hsotg *hs = hs_ep->parent; 4231 int index = hs_ep->index; 4232 u32 epreg; 4233 u32 epctl; 4234 u32 xfertype; 4235 4236 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value); 4237 4238 if (index == 0) { 4239 if (value) 4240 dwc2_hsotg_stall_ep0(hs); 4241 else 4242 dev_warn(hs->dev, 4243 "%s: can't clear halt on ep0\n", __func__); 4244 return 0; 4245 } 4246 4247 if (hs_ep->isochronous) { 4248 dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name); 4249 return -EINVAL; 4250 } 4251 4252 if (!now && value && !list_empty(&hs_ep->queue)) { 4253 dev_dbg(hs->dev, "%s request is pending, cannot halt\n", 4254 ep->name); 4255 return -EAGAIN; 4256 } 4257 4258 if (hs_ep->dir_in) { 4259 epreg = DIEPCTL(index); 4260 epctl = dwc2_readl(hs, epreg); 4261 4262 if (value) { 4263 epctl |= DXEPCTL_STALL | DXEPCTL_SNAK; 4264 if (epctl & DXEPCTL_EPENA) 4265 epctl |= DXEPCTL_EPDIS; 4266 } else { 4267 epctl &= ~DXEPCTL_STALL; 4268 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 4269 if (xfertype == DXEPCTL_EPTYPE_BULK || 4270 xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4271 epctl |= DXEPCTL_SETD0PID; 4272 } 4273 dwc2_writel(hs, epctl, epreg); 4274 } else { 4275 epreg = DOEPCTL(index); 4276 epctl = dwc2_readl(hs, epreg); 4277 4278 if (value) { 4279 epctl |= DXEPCTL_STALL; 4280 } else { 4281 epctl &= ~DXEPCTL_STALL; 4282 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 4283 if (xfertype == DXEPCTL_EPTYPE_BULK || 4284 xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4285 epctl |= DXEPCTL_SETD0PID; 4286 } 4287 dwc2_writel(hs, epctl, epreg); 4288 } 4289 4290 hs_ep->halted = value; 4291 4292 return 0; 4293 } 4294 4295 /** 4296 * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held 4297 * @ep: The endpoint to set halt. 4298 * @value: Set or unset the halt. 4299 */ 4300 static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value) 4301 { 4302 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4303 struct dwc2_hsotg *hs = hs_ep->parent; 4304 unsigned long flags = 0; 4305 int ret = 0; 4306 4307 spin_lock_irqsave(&hs->lock, flags); 4308 ret = dwc2_hsotg_ep_sethalt(ep, value, false); 4309 spin_unlock_irqrestore(&hs->lock, flags); 4310 4311 return ret; 4312 } 4313 4314 static const struct usb_ep_ops dwc2_hsotg_ep_ops = { 4315 .enable = dwc2_hsotg_ep_enable, 4316 .disable = dwc2_hsotg_ep_disable_lock, 4317 .alloc_request = dwc2_hsotg_ep_alloc_request, 4318 .free_request = dwc2_hsotg_ep_free_request, 4319 .queue = dwc2_hsotg_ep_queue_lock, 4320 .dequeue = dwc2_hsotg_ep_dequeue, 4321 .set_halt = dwc2_hsotg_ep_sethalt_lock, 4322 /* note, don't believe we have any call for the fifo routines */ 4323 }; 4324 4325 /** 4326 * dwc2_hsotg_init - initialize the usb core 4327 * @hsotg: The driver state 4328 */ 4329 static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) 4330 { 4331 u32 trdtim; 4332 u32 usbcfg; 4333 /* unmask subset of endpoint interrupts */ 4334 4335 dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK | 4336 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK, 4337 DIEPMSK); 4338 4339 dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK | 4340 DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK, 4341 DOEPMSK); 4342 4343 dwc2_writel(hsotg, 0, DAINTMSK); 4344 4345 /* Be in disconnected state until gadget is registered */ 4346 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON); 4347 4348 /* setup fifos */ 4349 4350 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 4351 dwc2_readl(hsotg, GRXFSIZ), 4352 dwc2_readl(hsotg, GNPTXFSIZ)); 4353 4354 dwc2_hsotg_init_fifo(hsotg); 4355 4356 /* keep other bits untouched (so e.g. forced modes are not lost) */ 4357 usbcfg = dwc2_readl(hsotg, GUSBCFG); 4358 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 4359 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); 4360 4361 /* set the PLL on, remove the HNP/SRP and set the PHY */ 4362 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 4363 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | 4364 (trdtim << GUSBCFG_USBTRDTIM_SHIFT); 4365 dwc2_writel(hsotg, usbcfg, GUSBCFG); 4366 4367 if (using_dma(hsotg)) 4368 dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN); 4369 } 4370 4371 /** 4372 * dwc2_hsotg_udc_start - prepare the udc for work 4373 * @gadget: The usb gadget state 4374 * @driver: The usb gadget driver 4375 * 4376 * Perform initialization to prepare udc device and driver 4377 * to work. 4378 */ 4379 static int dwc2_hsotg_udc_start(struct usb_gadget *gadget, 4380 struct usb_gadget_driver *driver) 4381 { 4382 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4383 unsigned long flags; 4384 int ret; 4385 4386 if (!hsotg) { 4387 pr_err("%s: called with no device\n", __func__); 4388 return -ENODEV; 4389 } 4390 4391 if (!driver) { 4392 dev_err(hsotg->dev, "%s: no driver\n", __func__); 4393 return -EINVAL; 4394 } 4395 4396 if (driver->max_speed < USB_SPEED_FULL) 4397 dev_err(hsotg->dev, "%s: bad speed\n", __func__); 4398 4399 if (!driver->setup) { 4400 dev_err(hsotg->dev, "%s: missing entry points\n", __func__); 4401 return -EINVAL; 4402 } 4403 4404 WARN_ON(hsotg->driver); 4405 4406 driver->driver.bus = NULL; 4407 hsotg->driver = driver; 4408 hsotg->gadget.dev.of_node = hsotg->dev->of_node; 4409 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4410 4411 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { 4412 ret = dwc2_lowlevel_hw_enable(hsotg); 4413 if (ret) 4414 goto err; 4415 } 4416 4417 if (!IS_ERR_OR_NULL(hsotg->uphy)) 4418 otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget); 4419 4420 spin_lock_irqsave(&hsotg->lock, flags); 4421 if (dwc2_hw_is_device(hsotg)) { 4422 dwc2_hsotg_init(hsotg); 4423 dwc2_hsotg_core_init_disconnected(hsotg, false); 4424 } 4425 4426 hsotg->enabled = 0; 4427 spin_unlock_irqrestore(&hsotg->lock, flags); 4428 4429 gadget->sg_supported = using_desc_dma(hsotg); 4430 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name); 4431 4432 return 0; 4433 4434 err: 4435 hsotg->driver = NULL; 4436 return ret; 4437 } 4438 4439 /** 4440 * dwc2_hsotg_udc_stop - stop the udc 4441 * @gadget: The usb gadget state 4442 * 4443 * Stop udc hw block and stay tunned for future transmissions 4444 */ 4445 static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) 4446 { 4447 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4448 unsigned long flags = 0; 4449 int ep; 4450 4451 if (!hsotg) 4452 return -ENODEV; 4453 4454 /* all endpoints should be shutdown */ 4455 for (ep = 1; ep < hsotg->num_of_eps; ep++) { 4456 if (hsotg->eps_in[ep]) 4457 dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep); 4458 if (hsotg->eps_out[ep]) 4459 dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep); 4460 } 4461 4462 spin_lock_irqsave(&hsotg->lock, flags); 4463 4464 hsotg->driver = NULL; 4465 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4466 hsotg->enabled = 0; 4467 4468 spin_unlock_irqrestore(&hsotg->lock, flags); 4469 4470 if (!IS_ERR_OR_NULL(hsotg->uphy)) 4471 otg_set_peripheral(hsotg->uphy->otg, NULL); 4472 4473 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) 4474 dwc2_lowlevel_hw_disable(hsotg); 4475 4476 return 0; 4477 } 4478 4479 /** 4480 * dwc2_hsotg_gadget_getframe - read the frame number 4481 * @gadget: The usb gadget state 4482 * 4483 * Read the {micro} frame number 4484 */ 4485 static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget) 4486 { 4487 return dwc2_hsotg_read_frameno(to_hsotg(gadget)); 4488 } 4489 4490 /** 4491 * dwc2_hsotg_pullup - connect/disconnect the USB PHY 4492 * @gadget: The usb gadget state 4493 * @is_on: Current state of the USB PHY 4494 * 4495 * Connect/Disconnect the USB PHY pullup 4496 */ 4497 static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on) 4498 { 4499 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4500 unsigned long flags = 0; 4501 4502 dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on, 4503 hsotg->op_state); 4504 4505 /* Don't modify pullup state while in host mode */ 4506 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) { 4507 hsotg->enabled = is_on; 4508 return 0; 4509 } 4510 4511 spin_lock_irqsave(&hsotg->lock, flags); 4512 if (is_on) { 4513 hsotg->enabled = 1; 4514 dwc2_hsotg_core_init_disconnected(hsotg, false); 4515 /* Enable ACG feature in device mode,if supported */ 4516 dwc2_enable_acg(hsotg); 4517 dwc2_hsotg_core_connect(hsotg); 4518 } else { 4519 dwc2_hsotg_core_disconnect(hsotg); 4520 dwc2_hsotg_disconnect(hsotg); 4521 hsotg->enabled = 0; 4522 } 4523 4524 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4525 spin_unlock_irqrestore(&hsotg->lock, flags); 4526 4527 return 0; 4528 } 4529 4530 static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active) 4531 { 4532 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4533 unsigned long flags; 4534 4535 dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active); 4536 spin_lock_irqsave(&hsotg->lock, flags); 4537 4538 /* 4539 * If controller is hibernated, it must exit from power_down 4540 * before being initialized / de-initialized 4541 */ 4542 if (hsotg->lx_state == DWC2_L2) 4543 dwc2_exit_partial_power_down(hsotg, false); 4544 4545 if (is_active) { 4546 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 4547 4548 dwc2_hsotg_core_init_disconnected(hsotg, false); 4549 if (hsotg->enabled) { 4550 /* Enable ACG feature in device mode,if supported */ 4551 dwc2_enable_acg(hsotg); 4552 dwc2_hsotg_core_connect(hsotg); 4553 } 4554 } else { 4555 dwc2_hsotg_core_disconnect(hsotg); 4556 dwc2_hsotg_disconnect(hsotg); 4557 } 4558 4559 spin_unlock_irqrestore(&hsotg->lock, flags); 4560 return 0; 4561 } 4562 4563 /** 4564 * dwc2_hsotg_vbus_draw - report bMaxPower field 4565 * @gadget: The usb gadget state 4566 * @mA: Amount of current 4567 * 4568 * Report how much power the device may consume to the phy. 4569 */ 4570 static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA) 4571 { 4572 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4573 4574 if (IS_ERR_OR_NULL(hsotg->uphy)) 4575 return -ENOTSUPP; 4576 return usb_phy_set_power(hsotg->uphy, mA); 4577 } 4578 4579 static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = { 4580 .get_frame = dwc2_hsotg_gadget_getframe, 4581 .udc_start = dwc2_hsotg_udc_start, 4582 .udc_stop = dwc2_hsotg_udc_stop, 4583 .pullup = dwc2_hsotg_pullup, 4584 .vbus_session = dwc2_hsotg_vbus_session, 4585 .vbus_draw = dwc2_hsotg_vbus_draw, 4586 }; 4587 4588 /** 4589 * dwc2_hsotg_initep - initialise a single endpoint 4590 * @hsotg: The device state. 4591 * @hs_ep: The endpoint to be initialised. 4592 * @epnum: The endpoint number 4593 * @dir_in: True if direction is in. 4594 * 4595 * Initialise the given endpoint (as part of the probe and device state 4596 * creation) to give to the gadget driver. Setup the endpoint name, any 4597 * direction information and other state that may be required. 4598 */ 4599 static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg, 4600 struct dwc2_hsotg_ep *hs_ep, 4601 int epnum, 4602 bool dir_in) 4603 { 4604 char *dir; 4605 4606 if (epnum == 0) 4607 dir = ""; 4608 else if (dir_in) 4609 dir = "in"; 4610 else 4611 dir = "out"; 4612 4613 hs_ep->dir_in = dir_in; 4614 hs_ep->index = epnum; 4615 4616 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir); 4617 4618 INIT_LIST_HEAD(&hs_ep->queue); 4619 INIT_LIST_HEAD(&hs_ep->ep.ep_list); 4620 4621 /* add to the list of endpoints known by the gadget driver */ 4622 if (epnum) 4623 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list); 4624 4625 hs_ep->parent = hsotg; 4626 hs_ep->ep.name = hs_ep->name; 4627 4628 if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW) 4629 usb_ep_set_maxpacket_limit(&hs_ep->ep, 8); 4630 else 4631 usb_ep_set_maxpacket_limit(&hs_ep->ep, 4632 epnum ? 1024 : EP0_MPS_LIMIT); 4633 hs_ep->ep.ops = &dwc2_hsotg_ep_ops; 4634 4635 if (epnum == 0) { 4636 hs_ep->ep.caps.type_control = true; 4637 } else { 4638 if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) { 4639 hs_ep->ep.caps.type_iso = true; 4640 hs_ep->ep.caps.type_bulk = true; 4641 } 4642 hs_ep->ep.caps.type_int = true; 4643 } 4644 4645 if (dir_in) 4646 hs_ep->ep.caps.dir_in = true; 4647 else 4648 hs_ep->ep.caps.dir_out = true; 4649 4650 /* 4651 * if we're using dma, we need to set the next-endpoint pointer 4652 * to be something valid. 4653 */ 4654 4655 if (using_dma(hsotg)) { 4656 u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15); 4657 4658 if (dir_in) 4659 dwc2_writel(hsotg, next, DIEPCTL(epnum)); 4660 else 4661 dwc2_writel(hsotg, next, DOEPCTL(epnum)); 4662 } 4663 } 4664 4665 /** 4666 * dwc2_hsotg_hw_cfg - read HW configuration registers 4667 * @hsotg: Programming view of the DWC_otg controller 4668 * 4669 * Read the USB core HW configuration registers 4670 */ 4671 static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg) 4672 { 4673 u32 cfg; 4674 u32 ep_type; 4675 u32 i; 4676 4677 /* check hardware configuration */ 4678 4679 hsotg->num_of_eps = hsotg->hw_params.num_dev_ep; 4680 4681 /* Add ep0 */ 4682 hsotg->num_of_eps++; 4683 4684 hsotg->eps_in[0] = devm_kzalloc(hsotg->dev, 4685 sizeof(struct dwc2_hsotg_ep), 4686 GFP_KERNEL); 4687 if (!hsotg->eps_in[0]) 4688 return -ENOMEM; 4689 /* Same dwc2_hsotg_ep is used in both directions for ep0 */ 4690 hsotg->eps_out[0] = hsotg->eps_in[0]; 4691 4692 cfg = hsotg->hw_params.dev_ep_dirs; 4693 for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) { 4694 ep_type = cfg & 3; 4695 /* Direction in or both */ 4696 if (!(ep_type & 2)) { 4697 hsotg->eps_in[i] = devm_kzalloc(hsotg->dev, 4698 sizeof(struct dwc2_hsotg_ep), GFP_KERNEL); 4699 if (!hsotg->eps_in[i]) 4700 return -ENOMEM; 4701 } 4702 /* Direction out or both */ 4703 if (!(ep_type & 1)) { 4704 hsotg->eps_out[i] = devm_kzalloc(hsotg->dev, 4705 sizeof(struct dwc2_hsotg_ep), GFP_KERNEL); 4706 if (!hsotg->eps_out[i]) 4707 return -ENOMEM; 4708 } 4709 } 4710 4711 hsotg->fifo_mem = hsotg->hw_params.total_fifo_size; 4712 hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo; 4713 4714 dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n", 4715 hsotg->num_of_eps, 4716 hsotg->dedicated_fifos ? "dedicated" : "shared", 4717 hsotg->fifo_mem); 4718 return 0; 4719 } 4720 4721 /** 4722 * dwc2_hsotg_dump - dump state of the udc 4723 * @hsotg: Programming view of the DWC_otg controller 4724 * 4725 */ 4726 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg) 4727 { 4728 #ifdef DEBUG 4729 struct device *dev = hsotg->dev; 4730 u32 val; 4731 int idx; 4732 4733 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n", 4734 dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL), 4735 dwc2_readl(hsotg, DIEPMSK)); 4736 4737 dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n", 4738 dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1)); 4739 4740 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 4741 dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ)); 4742 4743 /* show periodic fifo settings */ 4744 4745 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 4746 val = dwc2_readl(hsotg, DPTXFSIZN(idx)); 4747 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx, 4748 val >> FIFOSIZE_DEPTH_SHIFT, 4749 val & FIFOSIZE_STARTADDR_MASK); 4750 } 4751 4752 for (idx = 0; idx < hsotg->num_of_eps; idx++) { 4753 dev_info(dev, 4754 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx, 4755 dwc2_readl(hsotg, DIEPCTL(idx)), 4756 dwc2_readl(hsotg, DIEPTSIZ(idx)), 4757 dwc2_readl(hsotg, DIEPDMA(idx))); 4758 4759 val = dwc2_readl(hsotg, DOEPCTL(idx)); 4760 dev_info(dev, 4761 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", 4762 idx, dwc2_readl(hsotg, DOEPCTL(idx)), 4763 dwc2_readl(hsotg, DOEPTSIZ(idx)), 4764 dwc2_readl(hsotg, DOEPDMA(idx))); 4765 } 4766 4767 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n", 4768 dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE)); 4769 #endif 4770 } 4771 4772 /** 4773 * dwc2_gadget_init - init function for gadget 4774 * @hsotg: Programming view of the DWC_otg controller 4775 * 4776 */ 4777 int dwc2_gadget_init(struct dwc2_hsotg *hsotg) 4778 { 4779 struct device *dev = hsotg->dev; 4780 int epnum; 4781 int ret; 4782 4783 /* Dump fifo information */ 4784 dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n", 4785 hsotg->params.g_np_tx_fifo_size); 4786 dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size); 4787 4788 hsotg->gadget.max_speed = USB_SPEED_HIGH; 4789 hsotg->gadget.ops = &dwc2_hsotg_gadget_ops; 4790 hsotg->gadget.name = dev_name(dev); 4791 hsotg->remote_wakeup_allowed = 0; 4792 4793 if (hsotg->params.lpm) 4794 hsotg->gadget.lpm_capable = true; 4795 4796 if (hsotg->dr_mode == USB_DR_MODE_OTG) 4797 hsotg->gadget.is_otg = 1; 4798 else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) 4799 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 4800 4801 ret = dwc2_hsotg_hw_cfg(hsotg); 4802 if (ret) { 4803 dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret); 4804 return ret; 4805 } 4806 4807 hsotg->ctrl_buff = devm_kzalloc(hsotg->dev, 4808 DWC2_CTRL_BUFF_SIZE, GFP_KERNEL); 4809 if (!hsotg->ctrl_buff) 4810 return -ENOMEM; 4811 4812 hsotg->ep0_buff = devm_kzalloc(hsotg->dev, 4813 DWC2_CTRL_BUFF_SIZE, GFP_KERNEL); 4814 if (!hsotg->ep0_buff) 4815 return -ENOMEM; 4816 4817 if (using_desc_dma(hsotg)) { 4818 ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg); 4819 if (ret < 0) 4820 return ret; 4821 } 4822 4823 ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq, 4824 IRQF_SHARED, dev_name(hsotg->dev), hsotg); 4825 if (ret < 0) { 4826 dev_err(dev, "cannot claim IRQ for gadget\n"); 4827 return ret; 4828 } 4829 4830 /* hsotg->num_of_eps holds number of EPs other than ep0 */ 4831 4832 if (hsotg->num_of_eps == 0) { 4833 dev_err(dev, "wrong number of EPs (zero)\n"); 4834 return -EINVAL; 4835 } 4836 4837 /* setup endpoint information */ 4838 4839 INIT_LIST_HEAD(&hsotg->gadget.ep_list); 4840 hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep; 4841 4842 /* allocate EP0 request */ 4843 4844 hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep, 4845 GFP_KERNEL); 4846 if (!hsotg->ctrl_req) { 4847 dev_err(dev, "failed to allocate ctrl req\n"); 4848 return -ENOMEM; 4849 } 4850 4851 /* initialise the endpoints now the core has been initialised */ 4852 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) { 4853 if (hsotg->eps_in[epnum]) 4854 dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum], 4855 epnum, 1); 4856 if (hsotg->eps_out[epnum]) 4857 dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum], 4858 epnum, 0); 4859 } 4860 4861 ret = usb_add_gadget_udc(dev, &hsotg->gadget); 4862 if (ret) { 4863 dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, 4864 hsotg->ctrl_req); 4865 return ret; 4866 } 4867 dwc2_hsotg_dump(hsotg); 4868 4869 return 0; 4870 } 4871 4872 /** 4873 * dwc2_hsotg_remove - remove function for hsotg driver 4874 * @hsotg: Programming view of the DWC_otg controller 4875 * 4876 */ 4877 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg) 4878 { 4879 usb_del_gadget_udc(&hsotg->gadget); 4880 dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req); 4881 4882 return 0; 4883 } 4884 4885 int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg) 4886 { 4887 unsigned long flags; 4888 4889 if (hsotg->lx_state != DWC2_L0) 4890 return 0; 4891 4892 if (hsotg->driver) { 4893 int ep; 4894 4895 dev_info(hsotg->dev, "suspending usb gadget %s\n", 4896 hsotg->driver->driver.name); 4897 4898 spin_lock_irqsave(&hsotg->lock, flags); 4899 if (hsotg->enabled) 4900 dwc2_hsotg_core_disconnect(hsotg); 4901 dwc2_hsotg_disconnect(hsotg); 4902 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4903 spin_unlock_irqrestore(&hsotg->lock, flags); 4904 4905 for (ep = 0; ep < hsotg->num_of_eps; ep++) { 4906 if (hsotg->eps_in[ep]) 4907 dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep); 4908 if (hsotg->eps_out[ep]) 4909 dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep); 4910 } 4911 } 4912 4913 return 0; 4914 } 4915 4916 int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg) 4917 { 4918 unsigned long flags; 4919 4920 if (hsotg->lx_state == DWC2_L2) 4921 return 0; 4922 4923 if (hsotg->driver) { 4924 dev_info(hsotg->dev, "resuming usb gadget %s\n", 4925 hsotg->driver->driver.name); 4926 4927 spin_lock_irqsave(&hsotg->lock, flags); 4928 dwc2_hsotg_core_init_disconnected(hsotg, false); 4929 if (hsotg->enabled) { 4930 /* Enable ACG feature in device mode,if supported */ 4931 dwc2_enable_acg(hsotg); 4932 dwc2_hsotg_core_connect(hsotg); 4933 } 4934 spin_unlock_irqrestore(&hsotg->lock, flags); 4935 } 4936 4937 return 0; 4938 } 4939 4940 /** 4941 * dwc2_backup_device_registers() - Backup controller device registers. 4942 * When suspending usb bus, registers needs to be backuped 4943 * if controller power is disabled once suspended. 4944 * 4945 * @hsotg: Programming view of the DWC_otg controller 4946 */ 4947 int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 4948 { 4949 struct dwc2_dregs_backup *dr; 4950 int i; 4951 4952 dev_dbg(hsotg->dev, "%s\n", __func__); 4953 4954 /* Backup dev regs */ 4955 dr = &hsotg->dr_backup; 4956 4957 dr->dcfg = dwc2_readl(hsotg, DCFG); 4958 dr->dctl = dwc2_readl(hsotg, DCTL); 4959 dr->daintmsk = dwc2_readl(hsotg, DAINTMSK); 4960 dr->diepmsk = dwc2_readl(hsotg, DIEPMSK); 4961 dr->doepmsk = dwc2_readl(hsotg, DOEPMSK); 4962 4963 for (i = 0; i < hsotg->num_of_eps; i++) { 4964 /* Backup IN EPs */ 4965 dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i)); 4966 4967 /* Ensure DATA PID is correctly configured */ 4968 if (dr->diepctl[i] & DXEPCTL_DPID) 4969 dr->diepctl[i] |= DXEPCTL_SETD1PID; 4970 else 4971 dr->diepctl[i] |= DXEPCTL_SETD0PID; 4972 4973 dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i)); 4974 dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i)); 4975 4976 /* Backup OUT EPs */ 4977 dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i)); 4978 4979 /* Ensure DATA PID is correctly configured */ 4980 if (dr->doepctl[i] & DXEPCTL_DPID) 4981 dr->doepctl[i] |= DXEPCTL_SETD1PID; 4982 else 4983 dr->doepctl[i] |= DXEPCTL_SETD0PID; 4984 4985 dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i)); 4986 dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i)); 4987 dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i)); 4988 } 4989 dr->valid = true; 4990 return 0; 4991 } 4992 4993 /** 4994 * dwc2_restore_device_registers() - Restore controller device registers. 4995 * When resuming usb bus, device registers needs to be restored 4996 * if controller power were disabled. 4997 * 4998 * @hsotg: Programming view of the DWC_otg controller 4999 * @remote_wakeup: Indicates whether resume is initiated by Device or Host. 5000 * 5001 * Return: 0 if successful, negative error code otherwise 5002 */ 5003 int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup) 5004 { 5005 struct dwc2_dregs_backup *dr; 5006 int i; 5007 5008 dev_dbg(hsotg->dev, "%s\n", __func__); 5009 5010 /* Restore dev regs */ 5011 dr = &hsotg->dr_backup; 5012 if (!dr->valid) { 5013 dev_err(hsotg->dev, "%s: no device registers to restore\n", 5014 __func__); 5015 return -EINVAL; 5016 } 5017 dr->valid = false; 5018 5019 if (!remote_wakeup) 5020 dwc2_writel(hsotg, dr->dctl, DCTL); 5021 5022 dwc2_writel(hsotg, dr->daintmsk, DAINTMSK); 5023 dwc2_writel(hsotg, dr->diepmsk, DIEPMSK); 5024 dwc2_writel(hsotg, dr->doepmsk, DOEPMSK); 5025 5026 for (i = 0; i < hsotg->num_of_eps; i++) { 5027 /* Restore IN EPs */ 5028 dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i)); 5029 dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i)); 5030 dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i)); 5031 /** WA for enabled EPx's IN in DDMA mode. On entering to 5032 * hibernation wrong value read and saved from DIEPDMAx, 5033 * as result BNA interrupt asserted on hibernation exit 5034 * by restoring from saved area. 5035 */ 5036 if (hsotg->params.g_dma_desc && 5037 (dr->diepctl[i] & DXEPCTL_EPENA)) 5038 dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma; 5039 dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i)); 5040 dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i)); 5041 /* Restore OUT EPs */ 5042 dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i)); 5043 /* WA for enabled EPx's OUT in DDMA mode. On entering to 5044 * hibernation wrong value read and saved from DOEPDMAx, 5045 * as result BNA interrupt asserted on hibernation exit 5046 * by restoring from saved area. 5047 */ 5048 if (hsotg->params.g_dma_desc && 5049 (dr->doepctl[i] & DXEPCTL_EPENA)) 5050 dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma; 5051 dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i)); 5052 dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i)); 5053 } 5054 5055 return 0; 5056 } 5057 5058 /** 5059 * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode 5060 * 5061 * @hsotg: Programming view of DWC_otg controller 5062 * 5063 */ 5064 void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg) 5065 { 5066 u32 val; 5067 5068 if (!hsotg->params.lpm) 5069 return; 5070 5071 val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES; 5072 val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0; 5073 val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0; 5074 val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT; 5075 val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0; 5076 val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC; 5077 dwc2_writel(hsotg, val, GLPMCFG); 5078 dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG)); 5079 5080 /* Unmask WKUP_ALERT Interrupt */ 5081 if (hsotg->params.service_interval) 5082 dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK); 5083 } 5084 5085 /** 5086 * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode 5087 * 5088 * @hsotg: Programming view of DWC_otg controller 5089 * 5090 */ 5091 void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg) 5092 { 5093 u32 val = 0; 5094 5095 val |= GREFCLK_REF_CLK_MODE; 5096 val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT; 5097 val |= hsotg->params.sof_cnt_wkup_alert << 5098 GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT; 5099 5100 dwc2_writel(hsotg, val, GREFCLK); 5101 dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK)); 5102 } 5103 5104 /** 5105 * dwc2_gadget_enter_hibernation() - Put controller in Hibernation. 5106 * 5107 * @hsotg: Programming view of the DWC_otg controller 5108 * 5109 * Return non-zero if failed to enter to hibernation. 5110 */ 5111 int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg) 5112 { 5113 u32 gpwrdn; 5114 int ret = 0; 5115 5116 /* Change to L2(suspend) state */ 5117 hsotg->lx_state = DWC2_L2; 5118 dev_dbg(hsotg->dev, "Start of hibernation completed\n"); 5119 ret = dwc2_backup_global_registers(hsotg); 5120 if (ret) { 5121 dev_err(hsotg->dev, "%s: failed to backup global registers\n", 5122 __func__); 5123 return ret; 5124 } 5125 ret = dwc2_backup_device_registers(hsotg); 5126 if (ret) { 5127 dev_err(hsotg->dev, "%s: failed to backup device registers\n", 5128 __func__); 5129 return ret; 5130 } 5131 5132 gpwrdn = GPWRDN_PWRDNRSTN; 5133 gpwrdn |= GPWRDN_PMUACTV; 5134 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5135 udelay(10); 5136 5137 /* Set flag to indicate that we are in hibernation */ 5138 hsotg->hibernated = 1; 5139 5140 /* Enable interrupts from wake up logic */ 5141 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5142 gpwrdn |= GPWRDN_PMUINTSEL; 5143 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5144 udelay(10); 5145 5146 /* Unmask device mode interrupts in GPWRDN */ 5147 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5148 gpwrdn |= GPWRDN_RST_DET_MSK; 5149 gpwrdn |= GPWRDN_LNSTSCHG_MSK; 5150 gpwrdn |= GPWRDN_STS_CHGINT_MSK; 5151 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5152 udelay(10); 5153 5154 /* Enable Power Down Clamp */ 5155 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5156 gpwrdn |= GPWRDN_PWRDNCLMP; 5157 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5158 udelay(10); 5159 5160 /* Switch off VDD */ 5161 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5162 gpwrdn |= GPWRDN_PWRDNSWTCH; 5163 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5164 udelay(10); 5165 5166 /* Save gpwrdn register for further usage if stschng interrupt */ 5167 hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN); 5168 dev_dbg(hsotg->dev, "Hibernation completed\n"); 5169 5170 return ret; 5171 } 5172 5173 /** 5174 * dwc2_gadget_exit_hibernation() 5175 * This function is for exiting from Device mode hibernation by host initiated 5176 * resume/reset and device initiated remote-wakeup. 5177 * 5178 * @hsotg: Programming view of the DWC_otg controller 5179 * @rem_wakeup: indicates whether resume is initiated by Device or Host. 5180 * @reset: indicates whether resume is initiated by Reset. 5181 * 5182 * Return non-zero if failed to exit from hibernation. 5183 */ 5184 int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg, 5185 int rem_wakeup, int reset) 5186 { 5187 u32 pcgcctl; 5188 u32 gpwrdn; 5189 u32 dctl; 5190 int ret = 0; 5191 struct dwc2_gregs_backup *gr; 5192 struct dwc2_dregs_backup *dr; 5193 5194 gr = &hsotg->gr_backup; 5195 dr = &hsotg->dr_backup; 5196 5197 if (!hsotg->hibernated) { 5198 dev_dbg(hsotg->dev, "Already exited from Hibernation\n"); 5199 return 1; 5200 } 5201 dev_dbg(hsotg->dev, 5202 "%s: called with rem_wakeup = %d reset = %d\n", 5203 __func__, rem_wakeup, reset); 5204 5205 dwc2_hib_restore_common(hsotg, rem_wakeup, 0); 5206 5207 if (!reset) { 5208 /* Clear all pending interupts */ 5209 dwc2_writel(hsotg, 0xffffffff, GINTSTS); 5210 } 5211 5212 /* De-assert Restore */ 5213 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5214 gpwrdn &= ~GPWRDN_RESTORE; 5215 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5216 udelay(10); 5217 5218 if (!rem_wakeup) { 5219 pcgcctl = dwc2_readl(hsotg, PCGCTL); 5220 pcgcctl &= ~PCGCTL_RSTPDWNMODULE; 5221 dwc2_writel(hsotg, pcgcctl, PCGCTL); 5222 } 5223 5224 /* Restore GUSBCFG, DCFG and DCTL */ 5225 dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG); 5226 dwc2_writel(hsotg, dr->dcfg, DCFG); 5227 dwc2_writel(hsotg, dr->dctl, DCTL); 5228 5229 /* De-assert Wakeup Logic */ 5230 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5231 gpwrdn &= ~GPWRDN_PMUACTV; 5232 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5233 5234 if (rem_wakeup) { 5235 udelay(10); 5236 /* Start Remote Wakeup Signaling */ 5237 dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL); 5238 } else { 5239 udelay(50); 5240 /* Set Device programming done bit */ 5241 dctl = dwc2_readl(hsotg, DCTL); 5242 dctl |= DCTL_PWRONPRGDONE; 5243 dwc2_writel(hsotg, dctl, DCTL); 5244 } 5245 /* Wait for interrupts which must be cleared */ 5246 mdelay(2); 5247 /* Clear all pending interupts */ 5248 dwc2_writel(hsotg, 0xffffffff, GINTSTS); 5249 5250 /* Restore global registers */ 5251 ret = dwc2_restore_global_registers(hsotg); 5252 if (ret) { 5253 dev_err(hsotg->dev, "%s: failed to restore registers\n", 5254 __func__); 5255 return ret; 5256 } 5257 5258 /* Restore device registers */ 5259 ret = dwc2_restore_device_registers(hsotg, rem_wakeup); 5260 if (ret) { 5261 dev_err(hsotg->dev, "%s: failed to restore device registers\n", 5262 __func__); 5263 return ret; 5264 } 5265 5266 if (rem_wakeup) { 5267 mdelay(10); 5268 dctl = dwc2_readl(hsotg, DCTL); 5269 dctl &= ~DCTL_RMTWKUPSIG; 5270 dwc2_writel(hsotg, dctl, DCTL); 5271 } 5272 5273 hsotg->hibernated = 0; 5274 hsotg->lx_state = DWC2_L0; 5275 dev_dbg(hsotg->dev, "Hibernation recovery completes here\n"); 5276 5277 return ret; 5278 } 5279