1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * http://www.samsung.com 5 * 6 * Copyright 2008 Openmoko, Inc. 7 * Copyright 2008 Simtec Electronics 8 * Ben Dooks <ben@simtec.co.uk> 9 * http://armlinux.simtec.co.uk/ 10 * 11 * S3C USB2.0 High-speed / OtG driver 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/platform_device.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/mutex.h> 21 #include <linux/seq_file.h> 22 #include <linux/delay.h> 23 #include <linux/io.h> 24 #include <linux/slab.h> 25 #include <linux/of_platform.h> 26 27 #include <linux/usb/ch9.h> 28 #include <linux/usb/gadget.h> 29 #include <linux/usb/phy.h> 30 31 #include "core.h" 32 #include "hw.h" 33 34 /* conversion functions */ 35 static inline struct dwc2_hsotg_req *our_req(struct usb_request *req) 36 { 37 return container_of(req, struct dwc2_hsotg_req, req); 38 } 39 40 static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep) 41 { 42 return container_of(ep, struct dwc2_hsotg_ep, ep); 43 } 44 45 static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget) 46 { 47 return container_of(gadget, struct dwc2_hsotg, gadget); 48 } 49 50 static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val) 51 { 52 dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset); 53 } 54 55 static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val) 56 { 57 dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset); 58 } 59 60 static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg, 61 u32 ep_index, u32 dir_in) 62 { 63 if (dir_in) 64 return hsotg->eps_in[ep_index]; 65 else 66 return hsotg->eps_out[ep_index]; 67 } 68 69 /* forward declaration of functions */ 70 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg); 71 72 /** 73 * using_dma - return the DMA status of the driver. 74 * @hsotg: The driver state. 75 * 76 * Return true if we're using DMA. 77 * 78 * Currently, we have the DMA support code worked into everywhere 79 * that needs it, but the AMBA DMA implementation in the hardware can 80 * only DMA from 32bit aligned addresses. This means that gadgets such 81 * as the CDC Ethernet cannot work as they often pass packets which are 82 * not 32bit aligned. 83 * 84 * Unfortunately the choice to use DMA or not is global to the controller 85 * and seems to be only settable when the controller is being put through 86 * a core reset. This means we either need to fix the gadgets to take 87 * account of DMA alignment, or add bounce buffers (yuerk). 88 * 89 * g_using_dma is set depending on dts flag. 90 */ 91 static inline bool using_dma(struct dwc2_hsotg *hsotg) 92 { 93 return hsotg->params.g_dma; 94 } 95 96 /* 97 * using_desc_dma - return the descriptor DMA status of the driver. 98 * @hsotg: The driver state. 99 * 100 * Return true if we're using descriptor DMA. 101 */ 102 static inline bool using_desc_dma(struct dwc2_hsotg *hsotg) 103 { 104 return hsotg->params.g_dma_desc; 105 } 106 107 /** 108 * dwc2_gadget_incr_frame_num - Increments the targeted frame number. 109 * @hs_ep: The endpoint 110 * 111 * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT. 112 * If an overrun occurs it will wrap the value and set the frame_overrun flag. 113 */ 114 static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep) 115 { 116 hs_ep->target_frame += hs_ep->interval; 117 if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) { 118 hs_ep->frame_overrun = true; 119 hs_ep->target_frame &= DSTS_SOFFN_LIMIT; 120 } else { 121 hs_ep->frame_overrun = false; 122 } 123 } 124 125 /** 126 * dwc2_hsotg_en_gsint - enable one or more of the general interrupt 127 * @hsotg: The device state 128 * @ints: A bitmask of the interrupts to enable 129 */ 130 static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints) 131 { 132 u32 gsintmsk = dwc2_readl(hsotg, GINTMSK); 133 u32 new_gsintmsk; 134 135 new_gsintmsk = gsintmsk | ints; 136 137 if (new_gsintmsk != gsintmsk) { 138 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk); 139 dwc2_writel(hsotg, new_gsintmsk, GINTMSK); 140 } 141 } 142 143 /** 144 * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt 145 * @hsotg: The device state 146 * @ints: A bitmask of the interrupts to enable 147 */ 148 static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints) 149 { 150 u32 gsintmsk = dwc2_readl(hsotg, GINTMSK); 151 u32 new_gsintmsk; 152 153 new_gsintmsk = gsintmsk & ~ints; 154 155 if (new_gsintmsk != gsintmsk) 156 dwc2_writel(hsotg, new_gsintmsk, GINTMSK); 157 } 158 159 /** 160 * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq 161 * @hsotg: The device state 162 * @ep: The endpoint index 163 * @dir_in: True if direction is in. 164 * @en: The enable value, true to enable 165 * 166 * Set or clear the mask for an individual endpoint's interrupt 167 * request. 168 */ 169 static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg, 170 unsigned int ep, unsigned int dir_in, 171 unsigned int en) 172 { 173 unsigned long flags; 174 u32 bit = 1 << ep; 175 u32 daint; 176 177 if (!dir_in) 178 bit <<= 16; 179 180 local_irq_save(flags); 181 daint = dwc2_readl(hsotg, DAINTMSK); 182 if (en) 183 daint |= bit; 184 else 185 daint &= ~bit; 186 dwc2_writel(hsotg, daint, DAINTMSK); 187 local_irq_restore(flags); 188 } 189 190 /** 191 * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode 192 * 193 * @hsotg: Programming view of the DWC_otg controller 194 */ 195 int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg) 196 { 197 if (hsotg->hw_params.en_multiple_tx_fifo) 198 /* In dedicated FIFO mode we need count of IN EPs */ 199 return hsotg->hw_params.num_dev_in_eps; 200 else 201 /* In shared FIFO mode we need count of Periodic IN EPs */ 202 return hsotg->hw_params.num_dev_perio_in_ep; 203 } 204 205 /** 206 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for 207 * device mode TX FIFOs 208 * 209 * @hsotg: Programming view of the DWC_otg controller 210 */ 211 int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) 212 { 213 int addr; 214 int tx_addr_max; 215 u32 np_tx_fifo_size; 216 217 np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size, 218 hsotg->params.g_np_tx_fifo_size); 219 220 /* Get Endpoint Info Control block size in DWORDs. */ 221 tx_addr_max = hsotg->hw_params.total_fifo_size; 222 223 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size; 224 if (tx_addr_max <= addr) 225 return 0; 226 227 return tx_addr_max - addr; 228 } 229 230 /** 231 * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode 232 * TX FIFOs 233 * 234 * @hsotg: Programming view of the DWC_otg controller 235 */ 236 int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg) 237 { 238 int tx_fifo_count; 239 int tx_fifo_depth; 240 241 tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg); 242 243 tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg); 244 245 if (!tx_fifo_count) 246 return tx_fifo_depth; 247 else 248 return tx_fifo_depth / tx_fifo_count; 249 } 250 251 /** 252 * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs 253 * @hsotg: The device instance. 254 */ 255 static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) 256 { 257 unsigned int ep; 258 unsigned int addr; 259 int timeout; 260 261 u32 val; 262 u32 *txfsz = hsotg->params.g_tx_fifo_size; 263 264 /* Reset fifo map if not correctly cleared during previous session */ 265 WARN_ON(hsotg->fifo_map); 266 hsotg->fifo_map = 0; 267 268 /* set RX/NPTX FIFO sizes */ 269 dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ); 270 dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size << 271 FIFOSIZE_STARTADDR_SHIFT) | 272 (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT), 273 GNPTXFSIZ); 274 275 /* 276 * arange all the rest of the TX FIFOs, as some versions of this 277 * block have overlapping default addresses. This also ensures 278 * that if the settings have been changed, then they are set to 279 * known values. 280 */ 281 282 /* start at the end of the GNPTXFSIZ, rounded up */ 283 addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size; 284 285 /* 286 * Configure fifos sizes from provided configuration and assign 287 * them to endpoints dynamically according to maxpacket size value of 288 * given endpoint. 289 */ 290 for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) { 291 if (!txfsz[ep]) 292 continue; 293 val = addr; 294 val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT; 295 WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem, 296 "insufficient fifo memory"); 297 addr += txfsz[ep]; 298 299 dwc2_writel(hsotg, val, DPTXFSIZN(ep)); 300 val = dwc2_readl(hsotg, DPTXFSIZN(ep)); 301 } 302 303 dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size | 304 addr << GDFIFOCFG_EPINFOBASE_SHIFT, 305 GDFIFOCFG); 306 /* 307 * according to p428 of the design guide, we need to ensure that 308 * all fifos are flushed before continuing 309 */ 310 311 dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH | 312 GRSTCTL_RXFFLSH, GRSTCTL); 313 314 /* wait until the fifos are both flushed */ 315 timeout = 100; 316 while (1) { 317 val = dwc2_readl(hsotg, GRSTCTL); 318 319 if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0) 320 break; 321 322 if (--timeout == 0) { 323 dev_err(hsotg->dev, 324 "%s: timeout flushing fifos (GRSTCTL=%08x)\n", 325 __func__, val); 326 break; 327 } 328 329 udelay(1); 330 } 331 332 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout); 333 } 334 335 /** 336 * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure 337 * @ep: USB endpoint to allocate request for. 338 * @flags: Allocation flags 339 * 340 * Allocate a new USB request structure appropriate for the specified endpoint 341 */ 342 static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep, 343 gfp_t flags) 344 { 345 struct dwc2_hsotg_req *req; 346 347 req = kzalloc(sizeof(*req), flags); 348 if (!req) 349 return NULL; 350 351 INIT_LIST_HEAD(&req->queue); 352 353 return &req->req; 354 } 355 356 /** 357 * is_ep_periodic - return true if the endpoint is in periodic mode. 358 * @hs_ep: The endpoint to query. 359 * 360 * Returns true if the endpoint is in periodic mode, meaning it is being 361 * used for an Interrupt or ISO transfer. 362 */ 363 static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep) 364 { 365 return hs_ep->periodic; 366 } 367 368 /** 369 * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request 370 * @hsotg: The device state. 371 * @hs_ep: The endpoint for the request 372 * @hs_req: The request being processed. 373 * 374 * This is the reverse of dwc2_hsotg_map_dma(), called for the completion 375 * of a request to ensure the buffer is ready for access by the caller. 376 */ 377 static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg, 378 struct dwc2_hsotg_ep *hs_ep, 379 struct dwc2_hsotg_req *hs_req) 380 { 381 struct usb_request *req = &hs_req->req; 382 383 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); 384 } 385 386 /* 387 * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains 388 * for Control endpoint 389 * @hsotg: The device state. 390 * 391 * This function will allocate 4 descriptor chains for EP 0: 2 for 392 * Setup stage, per one for IN and OUT data/status transactions. 393 */ 394 static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg) 395 { 396 hsotg->setup_desc[0] = 397 dmam_alloc_coherent(hsotg->dev, 398 sizeof(struct dwc2_dma_desc), 399 &hsotg->setup_desc_dma[0], 400 GFP_KERNEL); 401 if (!hsotg->setup_desc[0]) 402 goto fail; 403 404 hsotg->setup_desc[1] = 405 dmam_alloc_coherent(hsotg->dev, 406 sizeof(struct dwc2_dma_desc), 407 &hsotg->setup_desc_dma[1], 408 GFP_KERNEL); 409 if (!hsotg->setup_desc[1]) 410 goto fail; 411 412 hsotg->ctrl_in_desc = 413 dmam_alloc_coherent(hsotg->dev, 414 sizeof(struct dwc2_dma_desc), 415 &hsotg->ctrl_in_desc_dma, 416 GFP_KERNEL); 417 if (!hsotg->ctrl_in_desc) 418 goto fail; 419 420 hsotg->ctrl_out_desc = 421 dmam_alloc_coherent(hsotg->dev, 422 sizeof(struct dwc2_dma_desc), 423 &hsotg->ctrl_out_desc_dma, 424 GFP_KERNEL); 425 if (!hsotg->ctrl_out_desc) 426 goto fail; 427 428 return 0; 429 430 fail: 431 return -ENOMEM; 432 } 433 434 /** 435 * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO 436 * @hsotg: The controller state. 437 * @hs_ep: The endpoint we're going to write for. 438 * @hs_req: The request to write data for. 439 * 440 * This is called when the TxFIFO has some space in it to hold a new 441 * transmission and we have something to give it. The actual setup of 442 * the data size is done elsewhere, so all we have to do is to actually 443 * write the data. 444 * 445 * The return value is zero if there is more space (or nothing was done) 446 * otherwise -ENOSPC is returned if the FIFO space was used up. 447 * 448 * This routine is only needed for PIO 449 */ 450 static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg, 451 struct dwc2_hsotg_ep *hs_ep, 452 struct dwc2_hsotg_req *hs_req) 453 { 454 bool periodic = is_ep_periodic(hs_ep); 455 u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS); 456 int buf_pos = hs_req->req.actual; 457 int to_write = hs_ep->size_loaded; 458 void *data; 459 int can_write; 460 int pkt_round; 461 int max_transfer; 462 463 to_write -= (buf_pos - hs_ep->last_load); 464 465 /* if there's nothing to write, get out early */ 466 if (to_write == 0) 467 return 0; 468 469 if (periodic && !hsotg->dedicated_fifos) { 470 u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index)); 471 int size_left; 472 int size_done; 473 474 /* 475 * work out how much data was loaded so we can calculate 476 * how much data is left in the fifo. 477 */ 478 479 size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 480 481 /* 482 * if shared fifo, we cannot write anything until the 483 * previous data has been completely sent. 484 */ 485 if (hs_ep->fifo_load != 0) { 486 dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP); 487 return -ENOSPC; 488 } 489 490 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n", 491 __func__, size_left, 492 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size); 493 494 /* how much of the data has moved */ 495 size_done = hs_ep->size_loaded - size_left; 496 497 /* how much data is left in the fifo */ 498 can_write = hs_ep->fifo_load - size_done; 499 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n", 500 __func__, can_write); 501 502 can_write = hs_ep->fifo_size - can_write; 503 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n", 504 __func__, can_write); 505 506 if (can_write <= 0) { 507 dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP); 508 return -ENOSPC; 509 } 510 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) { 511 can_write = dwc2_readl(hsotg, 512 DTXFSTS(hs_ep->fifo_index)); 513 514 can_write &= 0xffff; 515 can_write *= 4; 516 } else { 517 if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) { 518 dev_dbg(hsotg->dev, 519 "%s: no queue slots available (0x%08x)\n", 520 __func__, gnptxsts); 521 522 dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP); 523 return -ENOSPC; 524 } 525 526 can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts); 527 can_write *= 4; /* fifo size is in 32bit quantities. */ 528 } 529 530 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc; 531 532 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n", 533 __func__, gnptxsts, can_write, to_write, max_transfer); 534 535 /* 536 * limit to 512 bytes of data, it seems at least on the non-periodic 537 * FIFO, requests of >512 cause the endpoint to get stuck with a 538 * fragment of the end of the transfer in it. 539 */ 540 if (can_write > 512 && !periodic) 541 can_write = 512; 542 543 /* 544 * limit the write to one max-packet size worth of data, but allow 545 * the transfer to return that it did not run out of fifo space 546 * doing it. 547 */ 548 if (to_write > max_transfer) { 549 to_write = max_transfer; 550 551 /* it's needed only when we do not use dedicated fifos */ 552 if (!hsotg->dedicated_fifos) 553 dwc2_hsotg_en_gsint(hsotg, 554 periodic ? GINTSTS_PTXFEMP : 555 GINTSTS_NPTXFEMP); 556 } 557 558 /* see if we can write data */ 559 560 if (to_write > can_write) { 561 to_write = can_write; 562 pkt_round = to_write % max_transfer; 563 564 /* 565 * Round the write down to an 566 * exact number of packets. 567 * 568 * Note, we do not currently check to see if we can ever 569 * write a full packet or not to the FIFO. 570 */ 571 572 if (pkt_round) 573 to_write -= pkt_round; 574 575 /* 576 * enable correct FIFO interrupt to alert us when there 577 * is more room left. 578 */ 579 580 /* it's needed only when we do not use dedicated fifos */ 581 if (!hsotg->dedicated_fifos) 582 dwc2_hsotg_en_gsint(hsotg, 583 periodic ? GINTSTS_PTXFEMP : 584 GINTSTS_NPTXFEMP); 585 } 586 587 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n", 588 to_write, hs_req->req.length, can_write, buf_pos); 589 590 if (to_write <= 0) 591 return -ENOSPC; 592 593 hs_req->req.actual = buf_pos + to_write; 594 hs_ep->total_data += to_write; 595 596 if (periodic) 597 hs_ep->fifo_load += to_write; 598 599 to_write = DIV_ROUND_UP(to_write, 4); 600 data = hs_req->req.buf + buf_pos; 601 602 dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write); 603 604 return (to_write >= can_write) ? -ENOSPC : 0; 605 } 606 607 /** 608 * get_ep_limit - get the maximum data legnth for this endpoint 609 * @hs_ep: The endpoint 610 * 611 * Return the maximum data that can be queued in one go on a given endpoint 612 * so that transfers that are too long can be split. 613 */ 614 static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep) 615 { 616 int index = hs_ep->index; 617 unsigned int maxsize; 618 unsigned int maxpkt; 619 620 if (index != 0) { 621 maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1; 622 maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1; 623 } else { 624 maxsize = 64 + 64; 625 if (hs_ep->dir_in) 626 maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1; 627 else 628 maxpkt = 2; 629 } 630 631 /* we made the constant loading easier above by using +1 */ 632 maxpkt--; 633 maxsize--; 634 635 /* 636 * constrain by packet count if maxpkts*pktsize is greater 637 * than the length register size. 638 */ 639 640 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize) 641 maxsize = maxpkt * hs_ep->ep.maxpacket; 642 643 return maxsize; 644 } 645 646 /** 647 * dwc2_hsotg_read_frameno - read current frame number 648 * @hsotg: The device instance 649 * 650 * Return the current frame number 651 */ 652 static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) 653 { 654 u32 dsts; 655 656 dsts = dwc2_readl(hsotg, DSTS); 657 dsts &= DSTS_SOFFN_MASK; 658 dsts >>= DSTS_SOFFN_SHIFT; 659 660 return dsts; 661 } 662 663 /** 664 * dwc2_gadget_get_chain_limit - get the maximum data payload value of the 665 * DMA descriptor chain prepared for specific endpoint 666 * @hs_ep: The endpoint 667 * 668 * Return the maximum data that can be queued in one go on a given endpoint 669 * depending on its descriptor chain capacity so that transfers that 670 * are too long can be split. 671 */ 672 static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) 673 { 674 int is_isoc = hs_ep->isochronous; 675 unsigned int maxsize; 676 677 if (is_isoc) 678 maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : 679 DEV_DMA_ISOC_RX_NBYTES_LIMIT; 680 else 681 maxsize = DEV_DMA_NBYTES_LIMIT; 682 683 /* Above size of one descriptor was chosen, multiple it */ 684 maxsize *= MAX_DMA_DESC_NUM_GENERIC; 685 686 return maxsize; 687 } 688 689 /* 690 * dwc2_gadget_get_desc_params - get DMA descriptor parameters. 691 * @hs_ep: The endpoint 692 * @mask: RX/TX bytes mask to be defined 693 * 694 * Returns maximum data payload for one descriptor after analyzing endpoint 695 * characteristics. 696 * DMA descriptor transfer bytes limit depends on EP type: 697 * Control out - MPS, 698 * Isochronous - descriptor rx/tx bytes bitfield limit, 699 * Control In/Bulk/Interrupt - multiple of mps. This will allow to not 700 * have concatenations from various descriptors within one packet. 701 * 702 * Selects corresponding mask for RX/TX bytes as well. 703 */ 704 static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) 705 { 706 u32 mps = hs_ep->ep.maxpacket; 707 int dir_in = hs_ep->dir_in; 708 u32 desc_size = 0; 709 710 if (!hs_ep->index && !dir_in) { 711 desc_size = mps; 712 *mask = DEV_DMA_NBYTES_MASK; 713 } else if (hs_ep->isochronous) { 714 if (dir_in) { 715 desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT; 716 *mask = DEV_DMA_ISOC_TX_NBYTES_MASK; 717 } else { 718 desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT; 719 *mask = DEV_DMA_ISOC_RX_NBYTES_MASK; 720 } 721 } else { 722 desc_size = DEV_DMA_NBYTES_LIMIT; 723 *mask = DEV_DMA_NBYTES_MASK; 724 725 /* Round down desc_size to be mps multiple */ 726 desc_size -= desc_size % mps; 727 } 728 729 return desc_size; 730 } 731 732 /* 733 * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain. 734 * @hs_ep: The endpoint 735 * @dma_buff: DMA address to use 736 * @len: Length of the transfer 737 * 738 * This function will iterate over descriptor chain and fill its entries 739 * with corresponding information based on transfer data. 740 */ 741 static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep, 742 dma_addr_t dma_buff, 743 unsigned int len) 744 { 745 struct dwc2_hsotg *hsotg = hs_ep->parent; 746 int dir_in = hs_ep->dir_in; 747 struct dwc2_dma_desc *desc = hs_ep->desc_list; 748 u32 mps = hs_ep->ep.maxpacket; 749 u32 maxsize = 0; 750 u32 offset = 0; 751 u32 mask = 0; 752 int i; 753 754 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 755 756 hs_ep->desc_count = (len / maxsize) + 757 ((len % maxsize) ? 1 : 0); 758 if (len == 0) 759 hs_ep->desc_count = 1; 760 761 for (i = 0; i < hs_ep->desc_count; ++i) { 762 desc->status = 0; 763 desc->status |= (DEV_DMA_BUFF_STS_HBUSY 764 << DEV_DMA_BUFF_STS_SHIFT); 765 766 if (len > maxsize) { 767 if (!hs_ep->index && !dir_in) 768 desc->status |= (DEV_DMA_L | DEV_DMA_IOC); 769 770 desc->status |= (maxsize << 771 DEV_DMA_NBYTES_SHIFT & mask); 772 desc->buf = dma_buff + offset; 773 774 len -= maxsize; 775 offset += maxsize; 776 } else { 777 desc->status |= (DEV_DMA_L | DEV_DMA_IOC); 778 779 if (dir_in) 780 desc->status |= (len % mps) ? DEV_DMA_SHORT : 781 ((hs_ep->send_zlp) ? DEV_DMA_SHORT : 0); 782 if (len > maxsize) 783 dev_err(hsotg->dev, "wrong len %d\n", len); 784 785 desc->status |= 786 len << DEV_DMA_NBYTES_SHIFT & mask; 787 desc->buf = dma_buff + offset; 788 } 789 790 desc->status &= ~DEV_DMA_BUFF_STS_MASK; 791 desc->status |= (DEV_DMA_BUFF_STS_HREADY 792 << DEV_DMA_BUFF_STS_SHIFT); 793 desc++; 794 } 795 } 796 797 /* 798 * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain. 799 * @hs_ep: The isochronous endpoint. 800 * @dma_buff: usb requests dma buffer. 801 * @len: usb request transfer length. 802 * 803 * Fills next free descriptor with the data of the arrived usb request, 804 * frame info, sets Last and IOC bits increments next_desc. If filled 805 * descriptor is not the first one, removes L bit from the previous descriptor 806 * status. 807 */ 808 static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, 809 dma_addr_t dma_buff, unsigned int len) 810 { 811 struct dwc2_dma_desc *desc; 812 struct dwc2_hsotg *hsotg = hs_ep->parent; 813 u32 index; 814 u32 maxsize = 0; 815 u32 mask = 0; 816 u8 pid = 0; 817 818 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 819 820 index = hs_ep->next_desc; 821 desc = &hs_ep->desc_list[index]; 822 823 /* Check if descriptor chain full */ 824 if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) == 825 DEV_DMA_BUFF_STS_HREADY) { 826 dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__); 827 return 1; 828 } 829 830 /* Clear L bit of previous desc if more than one entries in the chain */ 831 if (hs_ep->next_desc) 832 hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L; 833 834 dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n", 835 __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index); 836 837 desc->status = 0; 838 desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT); 839 840 desc->buf = dma_buff; 841 desc->status |= (DEV_DMA_L | DEV_DMA_IOC | 842 ((len << DEV_DMA_NBYTES_SHIFT) & mask)); 843 844 if (hs_ep->dir_in) { 845 if (len) 846 pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket); 847 else 848 pid = 1; 849 desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) & 850 DEV_DMA_ISOC_PID_MASK) | 851 ((len % hs_ep->ep.maxpacket) ? 852 DEV_DMA_SHORT : 0) | 853 ((hs_ep->target_frame << 854 DEV_DMA_ISOC_FRNUM_SHIFT) & 855 DEV_DMA_ISOC_FRNUM_MASK); 856 } 857 858 desc->status &= ~DEV_DMA_BUFF_STS_MASK; 859 desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT); 860 861 /* Increment frame number by interval for IN */ 862 if (hs_ep->dir_in) 863 dwc2_gadget_incr_frame_num(hs_ep); 864 865 /* Update index of last configured entry in the chain */ 866 hs_ep->next_desc++; 867 if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC) 868 hs_ep->next_desc = 0; 869 870 return 0; 871 } 872 873 /* 874 * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA 875 * @hs_ep: The isochronous endpoint. 876 * 877 * Prepare descriptor chain for isochronous endpoints. Afterwards 878 * write DMA address to HW and enable the endpoint. 879 */ 880 static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) 881 { 882 struct dwc2_hsotg *hsotg = hs_ep->parent; 883 struct dwc2_hsotg_req *hs_req, *treq; 884 int index = hs_ep->index; 885 int ret; 886 int i; 887 u32 dma_reg; 888 u32 depctl; 889 u32 ctrl; 890 struct dwc2_dma_desc *desc; 891 892 if (list_empty(&hs_ep->queue)) { 893 hs_ep->target_frame = TARGET_FRAME_INITIAL; 894 dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); 895 return; 896 } 897 898 /* Initialize descriptor chain by Host Busy status */ 899 for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) { 900 desc = &hs_ep->desc_list[i]; 901 desc->status = 0; 902 desc->status |= (DEV_DMA_BUFF_STS_HBUSY 903 << DEV_DMA_BUFF_STS_SHIFT); 904 } 905 906 hs_ep->next_desc = 0; 907 list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) { 908 ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, 909 hs_req->req.length); 910 if (ret) 911 break; 912 } 913 914 hs_ep->compl_desc = 0; 915 depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 916 dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); 917 918 /* write descriptor chain address to control register */ 919 dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg); 920 921 ctrl = dwc2_readl(hsotg, depctl); 922 ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; 923 dwc2_writel(hsotg, ctrl, depctl); 924 } 925 926 /** 927 * dwc2_hsotg_start_req - start a USB request from an endpoint's queue 928 * @hsotg: The controller state. 929 * @hs_ep: The endpoint to process a request for 930 * @hs_req: The request to start. 931 * @continuing: True if we are doing more for the current request. 932 * 933 * Start the given request running by setting the endpoint registers 934 * appropriately, and writing any data to the FIFOs. 935 */ 936 static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, 937 struct dwc2_hsotg_ep *hs_ep, 938 struct dwc2_hsotg_req *hs_req, 939 bool continuing) 940 { 941 struct usb_request *ureq = &hs_req->req; 942 int index = hs_ep->index; 943 int dir_in = hs_ep->dir_in; 944 u32 epctrl_reg; 945 u32 epsize_reg; 946 u32 epsize; 947 u32 ctrl; 948 unsigned int length; 949 unsigned int packets; 950 unsigned int maxreq; 951 unsigned int dma_reg; 952 953 if (index != 0) { 954 if (hs_ep->req && !continuing) { 955 dev_err(hsotg->dev, "%s: active request\n", __func__); 956 WARN_ON(1); 957 return; 958 } else if (hs_ep->req != hs_req && continuing) { 959 dev_err(hsotg->dev, 960 "%s: continue different req\n", __func__); 961 WARN_ON(1); 962 return; 963 } 964 } 965 966 dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index); 967 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 968 epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); 969 970 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n", 971 __func__, dwc2_readl(hsotg, epctrl_reg), index, 972 hs_ep->dir_in ? "in" : "out"); 973 974 /* If endpoint is stalled, we will restart request later */ 975 ctrl = dwc2_readl(hsotg, epctrl_reg); 976 977 if (index && ctrl & DXEPCTL_STALL) { 978 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index); 979 return; 980 } 981 982 length = ureq->length - ureq->actual; 983 dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n", 984 ureq->length, ureq->actual); 985 986 if (!using_desc_dma(hsotg)) 987 maxreq = get_ep_limit(hs_ep); 988 else 989 maxreq = dwc2_gadget_get_chain_limit(hs_ep); 990 991 if (length > maxreq) { 992 int round = maxreq % hs_ep->ep.maxpacket; 993 994 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n", 995 __func__, length, maxreq, round); 996 997 /* round down to multiple of packets */ 998 if (round) 999 maxreq -= round; 1000 1001 length = maxreq; 1002 } 1003 1004 if (length) 1005 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket); 1006 else 1007 packets = 1; /* send one packet if length is zero. */ 1008 1009 if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) { 1010 dev_err(hsotg->dev, "req length > maxpacket*mc\n"); 1011 return; 1012 } 1013 1014 if (dir_in && index != 0) 1015 if (hs_ep->isochronous) 1016 epsize = DXEPTSIZ_MC(packets); 1017 else 1018 epsize = DXEPTSIZ_MC(1); 1019 else 1020 epsize = 0; 1021 1022 /* 1023 * zero length packet should be programmed on its own and should not 1024 * be counted in DIEPTSIZ.PktCnt with other packets. 1025 */ 1026 if (dir_in && ureq->zero && !continuing) { 1027 /* Test if zlp is actually required. */ 1028 if ((ureq->length >= hs_ep->ep.maxpacket) && 1029 !(ureq->length % hs_ep->ep.maxpacket)) 1030 hs_ep->send_zlp = 1; 1031 } 1032 1033 epsize |= DXEPTSIZ_PKTCNT(packets); 1034 epsize |= DXEPTSIZ_XFERSIZE(length); 1035 1036 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n", 1037 __func__, packets, length, ureq->length, epsize, epsize_reg); 1038 1039 /* store the request as the current one we're doing */ 1040 hs_ep->req = hs_req; 1041 1042 if (using_desc_dma(hsotg)) { 1043 u32 offset = 0; 1044 u32 mps = hs_ep->ep.maxpacket; 1045 1046 /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */ 1047 if (!dir_in) { 1048 if (!index) 1049 length = mps; 1050 else if (length % mps) 1051 length += (mps - (length % mps)); 1052 } 1053 1054 /* 1055 * If more data to send, adjust DMA for EP0 out data stage. 1056 * ureq->dma stays unchanged, hence increment it by already 1057 * passed passed data count before starting new transaction. 1058 */ 1059 if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT && 1060 continuing) 1061 offset = ureq->actual; 1062 1063 /* Fill DDMA chain entries */ 1064 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset, 1065 length); 1066 1067 /* write descriptor chain address to control register */ 1068 dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg); 1069 1070 dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n", 1071 __func__, (u32)hs_ep->desc_list_dma, dma_reg); 1072 } else { 1073 /* write size / packets */ 1074 dwc2_writel(hsotg, epsize, epsize_reg); 1075 1076 if (using_dma(hsotg) && !continuing && (length != 0)) { 1077 /* 1078 * write DMA address to control register, buffer 1079 * already synced by dwc2_hsotg_ep_queue(). 1080 */ 1081 1082 dwc2_writel(hsotg, ureq->dma, dma_reg); 1083 1084 dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n", 1085 __func__, &ureq->dma, dma_reg); 1086 } 1087 } 1088 1089 if (hs_ep->isochronous && hs_ep->interval == 1) { 1090 hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg); 1091 dwc2_gadget_incr_frame_num(hs_ep); 1092 1093 if (hs_ep->target_frame & 0x1) 1094 ctrl |= DXEPCTL_SETODDFR; 1095 else 1096 ctrl |= DXEPCTL_SETEVENFR; 1097 } 1098 1099 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ 1100 1101 dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state); 1102 1103 /* For Setup request do not clear NAK */ 1104 if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP)) 1105 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ 1106 1107 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 1108 dwc2_writel(hsotg, ctrl, epctrl_reg); 1109 1110 /* 1111 * set these, it seems that DMA support increments past the end 1112 * of the packet buffer so we need to calculate the length from 1113 * this information. 1114 */ 1115 hs_ep->size_loaded = length; 1116 hs_ep->last_load = ureq->actual; 1117 1118 if (dir_in && !using_dma(hsotg)) { 1119 /* set these anyway, we may need them for non-periodic in */ 1120 hs_ep->fifo_load = 0; 1121 1122 dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req); 1123 } 1124 1125 /* 1126 * Note, trying to clear the NAK here causes problems with transmit 1127 * on the S3C6400 ending up with the TXFIFO becoming full. 1128 */ 1129 1130 /* check ep is enabled */ 1131 if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA)) 1132 dev_dbg(hsotg->dev, 1133 "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n", 1134 index, dwc2_readl(hsotg, epctrl_reg)); 1135 1136 dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n", 1137 __func__, dwc2_readl(hsotg, epctrl_reg)); 1138 1139 /* enable ep interrupts */ 1140 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1); 1141 } 1142 1143 /** 1144 * dwc2_hsotg_map_dma - map the DMA memory being used for the request 1145 * @hsotg: The device state. 1146 * @hs_ep: The endpoint the request is on. 1147 * @req: The request being processed. 1148 * 1149 * We've been asked to queue a request, so ensure that the memory buffer 1150 * is correctly setup for DMA. If we've been passed an extant DMA address 1151 * then ensure the buffer has been synced to memory. If our buffer has no 1152 * DMA memory, then we map the memory and mark our request to allow us to 1153 * cleanup on completion. 1154 */ 1155 static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg, 1156 struct dwc2_hsotg_ep *hs_ep, 1157 struct usb_request *req) 1158 { 1159 int ret; 1160 1161 ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in); 1162 if (ret) 1163 goto dma_error; 1164 1165 return 0; 1166 1167 dma_error: 1168 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n", 1169 __func__, req->buf, req->length); 1170 1171 return -EIO; 1172 } 1173 1174 static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg, 1175 struct dwc2_hsotg_ep *hs_ep, 1176 struct dwc2_hsotg_req *hs_req) 1177 { 1178 void *req_buf = hs_req->req.buf; 1179 1180 /* If dma is not being used or buffer is aligned */ 1181 if (!using_dma(hsotg) || !((long)req_buf & 3)) 1182 return 0; 1183 1184 WARN_ON(hs_req->saved_req_buf); 1185 1186 dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__, 1187 hs_ep->ep.name, req_buf, hs_req->req.length); 1188 1189 hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC); 1190 if (!hs_req->req.buf) { 1191 hs_req->req.buf = req_buf; 1192 dev_err(hsotg->dev, 1193 "%s: unable to allocate memory for bounce buffer\n", 1194 __func__); 1195 return -ENOMEM; 1196 } 1197 1198 /* Save actual buffer */ 1199 hs_req->saved_req_buf = req_buf; 1200 1201 if (hs_ep->dir_in) 1202 memcpy(hs_req->req.buf, req_buf, hs_req->req.length); 1203 return 0; 1204 } 1205 1206 static void 1207 dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg, 1208 struct dwc2_hsotg_ep *hs_ep, 1209 struct dwc2_hsotg_req *hs_req) 1210 { 1211 /* If dma is not being used or buffer was aligned */ 1212 if (!using_dma(hsotg) || !hs_req->saved_req_buf) 1213 return; 1214 1215 dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__, 1216 hs_ep->ep.name, hs_req->req.status, hs_req->req.actual); 1217 1218 /* Copy data from bounce buffer on successful out transfer */ 1219 if (!hs_ep->dir_in && !hs_req->req.status) 1220 memcpy(hs_req->saved_req_buf, hs_req->req.buf, 1221 hs_req->req.actual); 1222 1223 /* Free bounce buffer */ 1224 kfree(hs_req->req.buf); 1225 1226 hs_req->req.buf = hs_req->saved_req_buf; 1227 hs_req->saved_req_buf = NULL; 1228 } 1229 1230 /** 1231 * dwc2_gadget_target_frame_elapsed - Checks target frame 1232 * @hs_ep: The driver endpoint to check 1233 * 1234 * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop 1235 * corresponding transfer. 1236 */ 1237 static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep) 1238 { 1239 struct dwc2_hsotg *hsotg = hs_ep->parent; 1240 u32 target_frame = hs_ep->target_frame; 1241 u32 current_frame = hsotg->frame_number; 1242 bool frame_overrun = hs_ep->frame_overrun; 1243 1244 if (!frame_overrun && current_frame >= target_frame) 1245 return true; 1246 1247 if (frame_overrun && current_frame >= target_frame && 1248 ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2)) 1249 return true; 1250 1251 return false; 1252 } 1253 1254 /* 1255 * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers 1256 * @hsotg: The driver state 1257 * @hs_ep: the ep descriptor chain is for 1258 * 1259 * Called to update EP0 structure's pointers depend on stage of 1260 * control transfer. 1261 */ 1262 static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg, 1263 struct dwc2_hsotg_ep *hs_ep) 1264 { 1265 switch (hsotg->ep0_state) { 1266 case DWC2_EP0_SETUP: 1267 case DWC2_EP0_STATUS_OUT: 1268 hs_ep->desc_list = hsotg->setup_desc[0]; 1269 hs_ep->desc_list_dma = hsotg->setup_desc_dma[0]; 1270 break; 1271 case DWC2_EP0_DATA_IN: 1272 case DWC2_EP0_STATUS_IN: 1273 hs_ep->desc_list = hsotg->ctrl_in_desc; 1274 hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma; 1275 break; 1276 case DWC2_EP0_DATA_OUT: 1277 hs_ep->desc_list = hsotg->ctrl_out_desc; 1278 hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma; 1279 break; 1280 default: 1281 dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n", 1282 hsotg->ep0_state); 1283 return -EINVAL; 1284 } 1285 1286 return 0; 1287 } 1288 1289 static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, 1290 gfp_t gfp_flags) 1291 { 1292 struct dwc2_hsotg_req *hs_req = our_req(req); 1293 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1294 struct dwc2_hsotg *hs = hs_ep->parent; 1295 bool first; 1296 int ret; 1297 u32 maxsize = 0; 1298 u32 mask = 0; 1299 1300 1301 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n", 1302 ep->name, req, req->length, req->buf, req->no_interrupt, 1303 req->zero, req->short_not_ok); 1304 1305 /* Prevent new request submission when controller is suspended */ 1306 if (hs->lx_state != DWC2_L0) { 1307 dev_dbg(hs->dev, "%s: submit request only in active state\n", 1308 __func__); 1309 return -EAGAIN; 1310 } 1311 1312 /* initialise status of the request */ 1313 INIT_LIST_HEAD(&hs_req->queue); 1314 req->actual = 0; 1315 req->status = -EINPROGRESS; 1316 1317 /* In DDMA mode for ISOC's don't queue request if length greater 1318 * than descriptor limits. 1319 */ 1320 if (using_desc_dma(hs) && hs_ep->isochronous) { 1321 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 1322 if (hs_ep->dir_in && req->length > maxsize) { 1323 dev_err(hs->dev, "wrong length %d (maxsize=%d)\n", 1324 req->length, maxsize); 1325 return -EINVAL; 1326 } 1327 1328 if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) { 1329 dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n", 1330 req->length, hs_ep->ep.maxpacket); 1331 return -EINVAL; 1332 } 1333 } 1334 1335 ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req); 1336 if (ret) 1337 return ret; 1338 1339 /* if we're using DMA, sync the buffers as necessary */ 1340 if (using_dma(hs)) { 1341 ret = dwc2_hsotg_map_dma(hs, hs_ep, req); 1342 if (ret) 1343 return ret; 1344 } 1345 /* If using descriptor DMA configure EP0 descriptor chain pointers */ 1346 if (using_desc_dma(hs) && !hs_ep->index) { 1347 ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep); 1348 if (ret) 1349 return ret; 1350 } 1351 1352 first = list_empty(&hs_ep->queue); 1353 list_add_tail(&hs_req->queue, &hs_ep->queue); 1354 1355 /* 1356 * Handle DDMA isochronous transfers separately - just add new entry 1357 * to the descriptor chain. 1358 * Transfer will be started once SW gets either one of NAK or 1359 * OutTknEpDis interrupts. 1360 */ 1361 if (using_desc_dma(hs) && hs_ep->isochronous) { 1362 if (hs_ep->target_frame != TARGET_FRAME_INITIAL) { 1363 dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, 1364 hs_req->req.length); 1365 } 1366 return 0; 1367 } 1368 1369 if (first) { 1370 if (!hs_ep->isochronous) { 1371 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); 1372 return 0; 1373 } 1374 1375 /* Update current frame number value. */ 1376 hs->frame_number = dwc2_hsotg_read_frameno(hs); 1377 while (dwc2_gadget_target_frame_elapsed(hs_ep)) { 1378 dwc2_gadget_incr_frame_num(hs_ep); 1379 /* Update current frame number value once more as it 1380 * changes here. 1381 */ 1382 hs->frame_number = dwc2_hsotg_read_frameno(hs); 1383 } 1384 1385 if (hs_ep->target_frame != TARGET_FRAME_INITIAL) 1386 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); 1387 } 1388 return 0; 1389 } 1390 1391 static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req, 1392 gfp_t gfp_flags) 1393 { 1394 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1395 struct dwc2_hsotg *hs = hs_ep->parent; 1396 unsigned long flags = 0; 1397 int ret = 0; 1398 1399 spin_lock_irqsave(&hs->lock, flags); 1400 ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags); 1401 spin_unlock_irqrestore(&hs->lock, flags); 1402 1403 return ret; 1404 } 1405 1406 static void dwc2_hsotg_ep_free_request(struct usb_ep *ep, 1407 struct usb_request *req) 1408 { 1409 struct dwc2_hsotg_req *hs_req = our_req(req); 1410 1411 kfree(hs_req); 1412 } 1413 1414 /** 1415 * dwc2_hsotg_complete_oursetup - setup completion callback 1416 * @ep: The endpoint the request was on. 1417 * @req: The request completed. 1418 * 1419 * Called on completion of any requests the driver itself 1420 * submitted that need cleaning up. 1421 */ 1422 static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, 1423 struct usb_request *req) 1424 { 1425 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1426 struct dwc2_hsotg *hsotg = hs_ep->parent; 1427 1428 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req); 1429 1430 dwc2_hsotg_ep_free_request(ep, req); 1431 } 1432 1433 /** 1434 * ep_from_windex - convert control wIndex value to endpoint 1435 * @hsotg: The driver state. 1436 * @windex: The control request wIndex field (in host order). 1437 * 1438 * Convert the given wIndex into a pointer to an driver endpoint 1439 * structure, or return NULL if it is not a valid endpoint. 1440 */ 1441 static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, 1442 u32 windex) 1443 { 1444 struct dwc2_hsotg_ep *ep; 1445 int dir = (windex & USB_DIR_IN) ? 1 : 0; 1446 int idx = windex & 0x7F; 1447 1448 if (windex >= 0x100) 1449 return NULL; 1450 1451 if (idx > hsotg->num_of_eps) 1452 return NULL; 1453 1454 ep = index_to_ep(hsotg, idx, dir); 1455 1456 if (idx && ep->dir_in != dir) 1457 return NULL; 1458 1459 return ep; 1460 } 1461 1462 /** 1463 * dwc2_hsotg_set_test_mode - Enable usb Test Modes 1464 * @hsotg: The driver state. 1465 * @testmode: requested usb test mode 1466 * Enable usb Test Mode requested by the Host. 1467 */ 1468 int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) 1469 { 1470 int dctl = dwc2_readl(hsotg, DCTL); 1471 1472 dctl &= ~DCTL_TSTCTL_MASK; 1473 switch (testmode) { 1474 case TEST_J: 1475 case TEST_K: 1476 case TEST_SE0_NAK: 1477 case TEST_PACKET: 1478 case TEST_FORCE_EN: 1479 dctl |= testmode << DCTL_TSTCTL_SHIFT; 1480 break; 1481 default: 1482 return -EINVAL; 1483 } 1484 dwc2_writel(hsotg, dctl, DCTL); 1485 return 0; 1486 } 1487 1488 /** 1489 * dwc2_hsotg_send_reply - send reply to control request 1490 * @hsotg: The device state 1491 * @ep: Endpoint 0 1492 * @buff: Buffer for request 1493 * @length: Length of reply. 1494 * 1495 * Create a request and queue it on the given endpoint. This is useful as 1496 * an internal method of sending replies to certain control requests, etc. 1497 */ 1498 static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg, 1499 struct dwc2_hsotg_ep *ep, 1500 void *buff, 1501 int length) 1502 { 1503 struct usb_request *req; 1504 int ret; 1505 1506 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length); 1507 1508 req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC); 1509 hsotg->ep0_reply = req; 1510 if (!req) { 1511 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__); 1512 return -ENOMEM; 1513 } 1514 1515 req->buf = hsotg->ep0_buff; 1516 req->length = length; 1517 /* 1518 * zero flag is for sending zlp in DATA IN stage. It has no impact on 1519 * STATUS stage. 1520 */ 1521 req->zero = 0; 1522 req->complete = dwc2_hsotg_complete_oursetup; 1523 1524 if (length) 1525 memcpy(req->buf, buff, length); 1526 1527 ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC); 1528 if (ret) { 1529 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__); 1530 return ret; 1531 } 1532 1533 return 0; 1534 } 1535 1536 /** 1537 * dwc2_hsotg_process_req_status - process request GET_STATUS 1538 * @hsotg: The device state 1539 * @ctrl: USB control request 1540 */ 1541 static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, 1542 struct usb_ctrlrequest *ctrl) 1543 { 1544 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1545 struct dwc2_hsotg_ep *ep; 1546 __le16 reply; 1547 int ret; 1548 1549 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__); 1550 1551 if (!ep0->dir_in) { 1552 dev_warn(hsotg->dev, "%s: direction out?\n", __func__); 1553 return -EINVAL; 1554 } 1555 1556 switch (ctrl->bRequestType & USB_RECIP_MASK) { 1557 case USB_RECIP_DEVICE: 1558 /* 1559 * bit 0 => self powered 1560 * bit 1 => remote wakeup 1561 */ 1562 reply = cpu_to_le16(0); 1563 break; 1564 1565 case USB_RECIP_INTERFACE: 1566 /* currently, the data result should be zero */ 1567 reply = cpu_to_le16(0); 1568 break; 1569 1570 case USB_RECIP_ENDPOINT: 1571 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); 1572 if (!ep) 1573 return -ENOENT; 1574 1575 reply = cpu_to_le16(ep->halted ? 1 : 0); 1576 break; 1577 1578 default: 1579 return 0; 1580 } 1581 1582 if (le16_to_cpu(ctrl->wLength) != 2) 1583 return -EINVAL; 1584 1585 ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2); 1586 if (ret) { 1587 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__); 1588 return ret; 1589 } 1590 1591 return 1; 1592 } 1593 1594 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now); 1595 1596 /** 1597 * get_ep_head - return the first request on the endpoint 1598 * @hs_ep: The controller endpoint to get 1599 * 1600 * Get the first request on the endpoint. 1601 */ 1602 static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep) 1603 { 1604 return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req, 1605 queue); 1606 } 1607 1608 /** 1609 * dwc2_gadget_start_next_request - Starts next request from ep queue 1610 * @hs_ep: Endpoint structure 1611 * 1612 * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked 1613 * in its handler. Hence we need to unmask it here to be able to do 1614 * resynchronization. 1615 */ 1616 static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep) 1617 { 1618 u32 mask; 1619 struct dwc2_hsotg *hsotg = hs_ep->parent; 1620 int dir_in = hs_ep->dir_in; 1621 struct dwc2_hsotg_req *hs_req; 1622 u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK; 1623 1624 if (!list_empty(&hs_ep->queue)) { 1625 hs_req = get_ep_head(hs_ep); 1626 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false); 1627 return; 1628 } 1629 if (!hs_ep->isochronous) 1630 return; 1631 1632 if (dir_in) { 1633 dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n", 1634 __func__); 1635 } else { 1636 dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n", 1637 __func__); 1638 mask = dwc2_readl(hsotg, epmsk_reg); 1639 mask |= DOEPMSK_OUTTKNEPDISMSK; 1640 dwc2_writel(hsotg, mask, epmsk_reg); 1641 } 1642 } 1643 1644 /** 1645 * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE 1646 * @hsotg: The device state 1647 * @ctrl: USB control request 1648 */ 1649 static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, 1650 struct usb_ctrlrequest *ctrl) 1651 { 1652 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1653 struct dwc2_hsotg_req *hs_req; 1654 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); 1655 struct dwc2_hsotg_ep *ep; 1656 int ret; 1657 bool halted; 1658 u32 recip; 1659 u32 wValue; 1660 u32 wIndex; 1661 1662 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n", 1663 __func__, set ? "SET" : "CLEAR"); 1664 1665 wValue = le16_to_cpu(ctrl->wValue); 1666 wIndex = le16_to_cpu(ctrl->wIndex); 1667 recip = ctrl->bRequestType & USB_RECIP_MASK; 1668 1669 switch (recip) { 1670 case USB_RECIP_DEVICE: 1671 switch (wValue) { 1672 case USB_DEVICE_REMOTE_WAKEUP: 1673 hsotg->remote_wakeup_allowed = 1; 1674 break; 1675 1676 case USB_DEVICE_TEST_MODE: 1677 if ((wIndex & 0xff) != 0) 1678 return -EINVAL; 1679 if (!set) 1680 return -EINVAL; 1681 1682 hsotg->test_mode = wIndex >> 8; 1683 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1684 if (ret) { 1685 dev_err(hsotg->dev, 1686 "%s: failed to send reply\n", __func__); 1687 return ret; 1688 } 1689 break; 1690 default: 1691 return -ENOENT; 1692 } 1693 break; 1694 1695 case USB_RECIP_ENDPOINT: 1696 ep = ep_from_windex(hsotg, wIndex); 1697 if (!ep) { 1698 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n", 1699 __func__, wIndex); 1700 return -ENOENT; 1701 } 1702 1703 switch (wValue) { 1704 case USB_ENDPOINT_HALT: 1705 halted = ep->halted; 1706 1707 dwc2_hsotg_ep_sethalt(&ep->ep, set, true); 1708 1709 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1710 if (ret) { 1711 dev_err(hsotg->dev, 1712 "%s: failed to send reply\n", __func__); 1713 return ret; 1714 } 1715 1716 /* 1717 * we have to complete all requests for ep if it was 1718 * halted, and the halt was cleared by CLEAR_FEATURE 1719 */ 1720 1721 if (!set && halted) { 1722 /* 1723 * If we have request in progress, 1724 * then complete it 1725 */ 1726 if (ep->req) { 1727 hs_req = ep->req; 1728 ep->req = NULL; 1729 list_del_init(&hs_req->queue); 1730 if (hs_req->req.complete) { 1731 spin_unlock(&hsotg->lock); 1732 usb_gadget_giveback_request( 1733 &ep->ep, &hs_req->req); 1734 spin_lock(&hsotg->lock); 1735 } 1736 } 1737 1738 /* If we have pending request, then start it */ 1739 if (!ep->req) 1740 dwc2_gadget_start_next_request(ep); 1741 } 1742 1743 break; 1744 1745 default: 1746 return -ENOENT; 1747 } 1748 break; 1749 default: 1750 return -ENOENT; 1751 } 1752 return 1; 1753 } 1754 1755 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg); 1756 1757 /** 1758 * dwc2_hsotg_stall_ep0 - stall ep0 1759 * @hsotg: The device state 1760 * 1761 * Set stall for ep0 as response for setup request. 1762 */ 1763 static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg) 1764 { 1765 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1766 u32 reg; 1767 u32 ctrl; 1768 1769 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in); 1770 reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0; 1771 1772 /* 1773 * DxEPCTL_Stall will be cleared by EP once it has 1774 * taken effect, so no need to clear later. 1775 */ 1776 1777 ctrl = dwc2_readl(hsotg, reg); 1778 ctrl |= DXEPCTL_STALL; 1779 ctrl |= DXEPCTL_CNAK; 1780 dwc2_writel(hsotg, ctrl, reg); 1781 1782 dev_dbg(hsotg->dev, 1783 "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n", 1784 ctrl, reg, dwc2_readl(hsotg, reg)); 1785 1786 /* 1787 * complete won't be called, so we enqueue 1788 * setup request here 1789 */ 1790 dwc2_hsotg_enqueue_setup(hsotg); 1791 } 1792 1793 /** 1794 * dwc2_hsotg_process_control - process a control request 1795 * @hsotg: The device state 1796 * @ctrl: The control request received 1797 * 1798 * The controller has received the SETUP phase of a control request, and 1799 * needs to work out what to do next (and whether to pass it on to the 1800 * gadget driver). 1801 */ 1802 static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg, 1803 struct usb_ctrlrequest *ctrl) 1804 { 1805 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1806 int ret = 0; 1807 u32 dcfg; 1808 1809 dev_dbg(hsotg->dev, 1810 "ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n", 1811 ctrl->bRequestType, ctrl->bRequest, ctrl->wValue, 1812 ctrl->wIndex, ctrl->wLength); 1813 1814 if (ctrl->wLength == 0) { 1815 ep0->dir_in = 1; 1816 hsotg->ep0_state = DWC2_EP0_STATUS_IN; 1817 } else if (ctrl->bRequestType & USB_DIR_IN) { 1818 ep0->dir_in = 1; 1819 hsotg->ep0_state = DWC2_EP0_DATA_IN; 1820 } else { 1821 ep0->dir_in = 0; 1822 hsotg->ep0_state = DWC2_EP0_DATA_OUT; 1823 } 1824 1825 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { 1826 switch (ctrl->bRequest) { 1827 case USB_REQ_SET_ADDRESS: 1828 hsotg->connected = 1; 1829 dcfg = dwc2_readl(hsotg, DCFG); 1830 dcfg &= ~DCFG_DEVADDR_MASK; 1831 dcfg |= (le16_to_cpu(ctrl->wValue) << 1832 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK; 1833 dwc2_writel(hsotg, dcfg, DCFG); 1834 1835 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue); 1836 1837 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1838 return; 1839 1840 case USB_REQ_GET_STATUS: 1841 ret = dwc2_hsotg_process_req_status(hsotg, ctrl); 1842 break; 1843 1844 case USB_REQ_CLEAR_FEATURE: 1845 case USB_REQ_SET_FEATURE: 1846 ret = dwc2_hsotg_process_req_feature(hsotg, ctrl); 1847 break; 1848 } 1849 } 1850 1851 /* as a fallback, try delivering it to the driver to deal with */ 1852 1853 if (ret == 0 && hsotg->driver) { 1854 spin_unlock(&hsotg->lock); 1855 ret = hsotg->driver->setup(&hsotg->gadget, ctrl); 1856 spin_lock(&hsotg->lock); 1857 if (ret < 0) 1858 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); 1859 } 1860 1861 /* 1862 * the request is either unhandlable, or is not formatted correctly 1863 * so respond with a STALL for the status stage to indicate failure. 1864 */ 1865 1866 if (ret < 0) 1867 dwc2_hsotg_stall_ep0(hsotg); 1868 } 1869 1870 /** 1871 * dwc2_hsotg_complete_setup - completion of a setup transfer 1872 * @ep: The endpoint the request was on. 1873 * @req: The request completed. 1874 * 1875 * Called on completion of any requests the driver itself submitted for 1876 * EP0 setup packets 1877 */ 1878 static void dwc2_hsotg_complete_setup(struct usb_ep *ep, 1879 struct usb_request *req) 1880 { 1881 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1882 struct dwc2_hsotg *hsotg = hs_ep->parent; 1883 1884 if (req->status < 0) { 1885 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status); 1886 return; 1887 } 1888 1889 spin_lock(&hsotg->lock); 1890 if (req->actual == 0) 1891 dwc2_hsotg_enqueue_setup(hsotg); 1892 else 1893 dwc2_hsotg_process_control(hsotg, req->buf); 1894 spin_unlock(&hsotg->lock); 1895 } 1896 1897 /** 1898 * dwc2_hsotg_enqueue_setup - start a request for EP0 packets 1899 * @hsotg: The device state. 1900 * 1901 * Enqueue a request on EP0 if necessary to received any SETUP packets 1902 * received from the host. 1903 */ 1904 static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg) 1905 { 1906 struct usb_request *req = hsotg->ctrl_req; 1907 struct dwc2_hsotg_req *hs_req = our_req(req); 1908 int ret; 1909 1910 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__); 1911 1912 req->zero = 0; 1913 req->length = 8; 1914 req->buf = hsotg->ctrl_buff; 1915 req->complete = dwc2_hsotg_complete_setup; 1916 1917 if (!list_empty(&hs_req->queue)) { 1918 dev_dbg(hsotg->dev, "%s already queued???\n", __func__); 1919 return; 1920 } 1921 1922 hsotg->eps_out[0]->dir_in = 0; 1923 hsotg->eps_out[0]->send_zlp = 0; 1924 hsotg->ep0_state = DWC2_EP0_SETUP; 1925 1926 ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC); 1927 if (ret < 0) { 1928 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret); 1929 /* 1930 * Don't think there's much we can do other than watch the 1931 * driver fail. 1932 */ 1933 } 1934 } 1935 1936 static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg, 1937 struct dwc2_hsotg_ep *hs_ep) 1938 { 1939 u32 ctrl; 1940 u8 index = hs_ep->index; 1941 u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); 1942 u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); 1943 1944 if (hs_ep->dir_in) 1945 dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n", 1946 index); 1947 else 1948 dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n", 1949 index); 1950 if (using_desc_dma(hsotg)) { 1951 /* Not specific buffer needed for ep0 ZLP */ 1952 dma_addr_t dma = hs_ep->desc_list_dma; 1953 1954 if (!index) 1955 dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); 1956 1957 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0); 1958 } else { 1959 dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 1960 DXEPTSIZ_XFERSIZE(0), 1961 epsiz_reg); 1962 } 1963 1964 ctrl = dwc2_readl(hsotg, epctl_reg); 1965 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ 1966 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ 1967 ctrl |= DXEPCTL_USBACTEP; 1968 dwc2_writel(hsotg, ctrl, epctl_reg); 1969 } 1970 1971 /** 1972 * dwc2_hsotg_complete_request - complete a request given to us 1973 * @hsotg: The device state. 1974 * @hs_ep: The endpoint the request was on. 1975 * @hs_req: The request to complete. 1976 * @result: The result code (0 => Ok, otherwise errno) 1977 * 1978 * The given request has finished, so call the necessary completion 1979 * if it has one and then look to see if we can start a new request 1980 * on the endpoint. 1981 * 1982 * Note, expects the ep to already be locked as appropriate. 1983 */ 1984 static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg, 1985 struct dwc2_hsotg_ep *hs_ep, 1986 struct dwc2_hsotg_req *hs_req, 1987 int result) 1988 { 1989 if (!hs_req) { 1990 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__); 1991 return; 1992 } 1993 1994 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n", 1995 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete); 1996 1997 /* 1998 * only replace the status if we've not already set an error 1999 * from a previous transaction 2000 */ 2001 2002 if (hs_req->req.status == -EINPROGRESS) 2003 hs_req->req.status = result; 2004 2005 if (using_dma(hsotg)) 2006 dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req); 2007 2008 dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req); 2009 2010 hs_ep->req = NULL; 2011 list_del_init(&hs_req->queue); 2012 2013 /* 2014 * call the complete request with the locks off, just in case the 2015 * request tries to queue more work for this endpoint. 2016 */ 2017 2018 if (hs_req->req.complete) { 2019 spin_unlock(&hsotg->lock); 2020 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req); 2021 spin_lock(&hsotg->lock); 2022 } 2023 2024 /* In DDMA don't need to proceed to starting of next ISOC request */ 2025 if (using_desc_dma(hsotg) && hs_ep->isochronous) 2026 return; 2027 2028 /* 2029 * Look to see if there is anything else to do. Note, the completion 2030 * of the previous request may have caused a new request to be started 2031 * so be careful when doing this. 2032 */ 2033 2034 if (!hs_ep->req && result >= 0) 2035 dwc2_gadget_start_next_request(hs_ep); 2036 } 2037 2038 /* 2039 * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA 2040 * @hs_ep: The endpoint the request was on. 2041 * 2042 * Get first request from the ep queue, determine descriptor on which complete 2043 * happened. SW discovers which descriptor currently in use by HW, adjusts 2044 * dma_address and calculates index of completed descriptor based on the value 2045 * of DEPDMA register. Update actual length of request, giveback to gadget. 2046 */ 2047 static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep) 2048 { 2049 struct dwc2_hsotg *hsotg = hs_ep->parent; 2050 struct dwc2_hsotg_req *hs_req; 2051 struct usb_request *ureq; 2052 u32 desc_sts; 2053 u32 mask; 2054 2055 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status; 2056 2057 /* Process only descriptors with buffer status set to DMA done */ 2058 while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >> 2059 DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) { 2060 2061 hs_req = get_ep_head(hs_ep); 2062 if (!hs_req) { 2063 dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__); 2064 return; 2065 } 2066 ureq = &hs_req->req; 2067 2068 /* Check completion status */ 2069 if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT == 2070 DEV_DMA_STS_SUCC) { 2071 mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK : 2072 DEV_DMA_ISOC_RX_NBYTES_MASK; 2073 ureq->actual = ureq->length - ((desc_sts & mask) >> 2074 DEV_DMA_ISOC_NBYTES_SHIFT); 2075 2076 /* Adjust actual len for ISOC Out if len is 2077 * not align of 4 2078 */ 2079 if (!hs_ep->dir_in && ureq->length & 0x3) 2080 ureq->actual += 4 - (ureq->length & 0x3); 2081 } 2082 2083 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2084 2085 hs_ep->compl_desc++; 2086 if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1)) 2087 hs_ep->compl_desc = 0; 2088 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status; 2089 } 2090 } 2091 2092 /* 2093 * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC. 2094 * @hs_ep: The isochronous endpoint. 2095 * 2096 * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA 2097 * interrupt. Reset target frame and next_desc to allow to start 2098 * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS 2099 * interrupt for OUT direction. 2100 */ 2101 static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep) 2102 { 2103 struct dwc2_hsotg *hsotg = hs_ep->parent; 2104 2105 if (!hs_ep->dir_in) 2106 dwc2_flush_rx_fifo(hsotg); 2107 dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0); 2108 2109 hs_ep->target_frame = TARGET_FRAME_INITIAL; 2110 hs_ep->next_desc = 0; 2111 hs_ep->compl_desc = 0; 2112 } 2113 2114 /** 2115 * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint 2116 * @hsotg: The device state. 2117 * @ep_idx: The endpoint index for the data 2118 * @size: The size of data in the fifo, in bytes 2119 * 2120 * The FIFO status shows there is data to read from the FIFO for a given 2121 * endpoint, so sort out whether we need to read the data into a request 2122 * that has been made for that endpoint. 2123 */ 2124 static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size) 2125 { 2126 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx]; 2127 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2128 int to_read; 2129 int max_req; 2130 int read_ptr; 2131 2132 if (!hs_req) { 2133 u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx)); 2134 int ptr; 2135 2136 dev_dbg(hsotg->dev, 2137 "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n", 2138 __func__, size, ep_idx, epctl); 2139 2140 /* dump the data from the FIFO, we've nothing we can do */ 2141 for (ptr = 0; ptr < size; ptr += 4) 2142 (void)dwc2_readl(hsotg, EPFIFO(ep_idx)); 2143 2144 return; 2145 } 2146 2147 to_read = size; 2148 read_ptr = hs_req->req.actual; 2149 max_req = hs_req->req.length - read_ptr; 2150 2151 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n", 2152 __func__, to_read, max_req, read_ptr, hs_req->req.length); 2153 2154 if (to_read > max_req) { 2155 /* 2156 * more data appeared than we where willing 2157 * to deal with in this request. 2158 */ 2159 2160 /* currently we don't deal this */ 2161 WARN_ON_ONCE(1); 2162 } 2163 2164 hs_ep->total_data += to_read; 2165 hs_req->req.actual += to_read; 2166 to_read = DIV_ROUND_UP(to_read, 4); 2167 2168 /* 2169 * note, we might over-write the buffer end by 3 bytes depending on 2170 * alignment of the data. 2171 */ 2172 dwc2_readl_rep(hsotg, EPFIFO(ep_idx), 2173 hs_req->req.buf + read_ptr, to_read); 2174 } 2175 2176 /** 2177 * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint 2178 * @hsotg: The device instance 2179 * @dir_in: If IN zlp 2180 * 2181 * Generate a zero-length IN packet request for terminating a SETUP 2182 * transaction. 2183 * 2184 * Note, since we don't write any data to the TxFIFO, then it is 2185 * currently believed that we do not need to wait for any space in 2186 * the TxFIFO. 2187 */ 2188 static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in) 2189 { 2190 /* eps_out[0] is used in both directions */ 2191 hsotg->eps_out[0]->dir_in = dir_in; 2192 hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT; 2193 2194 dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]); 2195 } 2196 2197 static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, 2198 u32 epctl_reg) 2199 { 2200 u32 ctrl; 2201 2202 ctrl = dwc2_readl(hsotg, epctl_reg); 2203 if (ctrl & DXEPCTL_EOFRNUM) 2204 ctrl |= DXEPCTL_SETEVENFR; 2205 else 2206 ctrl |= DXEPCTL_SETODDFR; 2207 dwc2_writel(hsotg, ctrl, epctl_reg); 2208 } 2209 2210 /* 2211 * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc 2212 * @hs_ep - The endpoint on which transfer went 2213 * 2214 * Iterate over endpoints descriptor chain and get info on bytes remained 2215 * in DMA descriptors after transfer has completed. Used for non isoc EPs. 2216 */ 2217 static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep) 2218 { 2219 struct dwc2_hsotg *hsotg = hs_ep->parent; 2220 unsigned int bytes_rem = 0; 2221 struct dwc2_dma_desc *desc = hs_ep->desc_list; 2222 int i; 2223 u32 status; 2224 2225 if (!desc) 2226 return -EINVAL; 2227 2228 for (i = 0; i < hs_ep->desc_count; ++i) { 2229 status = desc->status; 2230 bytes_rem += status & DEV_DMA_NBYTES_MASK; 2231 2232 if (status & DEV_DMA_STS_MASK) 2233 dev_err(hsotg->dev, "descriptor %d closed with %x\n", 2234 i, status & DEV_DMA_STS_MASK); 2235 } 2236 2237 return bytes_rem; 2238 } 2239 2240 /** 2241 * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO 2242 * @hsotg: The device instance 2243 * @epnum: The endpoint received from 2244 * 2245 * The RXFIFO has delivered an OutDone event, which means that the data 2246 * transfer for an OUT endpoint has been completed, either by a short 2247 * packet or by the finish of a transfer. 2248 */ 2249 static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum) 2250 { 2251 u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum)); 2252 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum]; 2253 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2254 struct usb_request *req = &hs_req->req; 2255 unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 2256 int result = 0; 2257 2258 if (!hs_req) { 2259 dev_dbg(hsotg->dev, "%s: no request active\n", __func__); 2260 return; 2261 } 2262 2263 if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) { 2264 dev_dbg(hsotg->dev, "zlp packet received\n"); 2265 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2266 dwc2_hsotg_enqueue_setup(hsotg); 2267 return; 2268 } 2269 2270 if (using_desc_dma(hsotg)) 2271 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); 2272 2273 if (using_dma(hsotg)) { 2274 unsigned int size_done; 2275 2276 /* 2277 * Calculate the size of the transfer by checking how much 2278 * is left in the endpoint size register and then working it 2279 * out from the amount we loaded for the transfer. 2280 * 2281 * We need to do this as DMA pointers are always 32bit aligned 2282 * so may overshoot/undershoot the transfer. 2283 */ 2284 2285 size_done = hs_ep->size_loaded - size_left; 2286 size_done += hs_ep->last_load; 2287 2288 req->actual = size_done; 2289 } 2290 2291 /* if there is more request to do, schedule new transfer */ 2292 if (req->actual < req->length && size_left == 0) { 2293 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true); 2294 return; 2295 } 2296 2297 if (req->actual < req->length && req->short_not_ok) { 2298 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n", 2299 __func__, req->actual, req->length); 2300 2301 /* 2302 * todo - what should we return here? there's no one else 2303 * even bothering to check the status. 2304 */ 2305 } 2306 2307 /* DDMA IN status phase will start from StsPhseRcvd interrupt */ 2308 if (!using_desc_dma(hsotg) && epnum == 0 && 2309 hsotg->ep0_state == DWC2_EP0_DATA_OUT) { 2310 /* Move to STATUS IN */ 2311 dwc2_hsotg_ep0_zlp(hsotg, true); 2312 return; 2313 } 2314 2315 /* 2316 * Slave mode OUT transfers do not go through XferComplete so 2317 * adjust the ISOC parity here. 2318 */ 2319 if (!using_dma(hsotg)) { 2320 if (hs_ep->isochronous && hs_ep->interval == 1) 2321 dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum)); 2322 else if (hs_ep->isochronous && hs_ep->interval > 1) 2323 dwc2_gadget_incr_frame_num(hs_ep); 2324 } 2325 2326 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result); 2327 } 2328 2329 /** 2330 * dwc2_hsotg_handle_rx - RX FIFO has data 2331 * @hsotg: The device instance 2332 * 2333 * The IRQ handler has detected that the RX FIFO has some data in it 2334 * that requires processing, so find out what is in there and do the 2335 * appropriate read. 2336 * 2337 * The RXFIFO is a true FIFO, the packets coming out are still in packet 2338 * chunks, so if you have x packets received on an endpoint you'll get x 2339 * FIFO events delivered, each with a packet's worth of data in it. 2340 * 2341 * When using DMA, we should not be processing events from the RXFIFO 2342 * as the actual data should be sent to the memory directly and we turn 2343 * on the completion interrupts to get notifications of transfer completion. 2344 */ 2345 static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg) 2346 { 2347 u32 grxstsr = dwc2_readl(hsotg, GRXSTSP); 2348 u32 epnum, status, size; 2349 2350 WARN_ON(using_dma(hsotg)); 2351 2352 epnum = grxstsr & GRXSTS_EPNUM_MASK; 2353 status = grxstsr & GRXSTS_PKTSTS_MASK; 2354 2355 size = grxstsr & GRXSTS_BYTECNT_MASK; 2356 size >>= GRXSTS_BYTECNT_SHIFT; 2357 2358 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n", 2359 __func__, grxstsr, size, epnum); 2360 2361 switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) { 2362 case GRXSTS_PKTSTS_GLOBALOUTNAK: 2363 dev_dbg(hsotg->dev, "GLOBALOUTNAK\n"); 2364 break; 2365 2366 case GRXSTS_PKTSTS_OUTDONE: 2367 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n", 2368 dwc2_hsotg_read_frameno(hsotg)); 2369 2370 if (!using_dma(hsotg)) 2371 dwc2_hsotg_handle_outdone(hsotg, epnum); 2372 break; 2373 2374 case GRXSTS_PKTSTS_SETUPDONE: 2375 dev_dbg(hsotg->dev, 2376 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 2377 dwc2_hsotg_read_frameno(hsotg), 2378 dwc2_readl(hsotg, DOEPCTL(0))); 2379 /* 2380 * Call dwc2_hsotg_handle_outdone here if it was not called from 2381 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't 2382 * generate GRXSTS_PKTSTS_OUTDONE for setup packet. 2383 */ 2384 if (hsotg->ep0_state == DWC2_EP0_SETUP) 2385 dwc2_hsotg_handle_outdone(hsotg, epnum); 2386 break; 2387 2388 case GRXSTS_PKTSTS_OUTRX: 2389 dwc2_hsotg_rx_data(hsotg, epnum, size); 2390 break; 2391 2392 case GRXSTS_PKTSTS_SETUPRX: 2393 dev_dbg(hsotg->dev, 2394 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n", 2395 dwc2_hsotg_read_frameno(hsotg), 2396 dwc2_readl(hsotg, DOEPCTL(0))); 2397 2398 WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP); 2399 2400 dwc2_hsotg_rx_data(hsotg, epnum, size); 2401 break; 2402 2403 default: 2404 dev_warn(hsotg->dev, "%s: unknown status %08x\n", 2405 __func__, grxstsr); 2406 2407 dwc2_hsotg_dump(hsotg); 2408 break; 2409 } 2410 } 2411 2412 /** 2413 * dwc2_hsotg_ep0_mps - turn max packet size into register setting 2414 * @mps: The maximum packet size in bytes. 2415 */ 2416 static u32 dwc2_hsotg_ep0_mps(unsigned int mps) 2417 { 2418 switch (mps) { 2419 case 64: 2420 return D0EPCTL_MPS_64; 2421 case 32: 2422 return D0EPCTL_MPS_32; 2423 case 16: 2424 return D0EPCTL_MPS_16; 2425 case 8: 2426 return D0EPCTL_MPS_8; 2427 } 2428 2429 /* bad max packet size, warn and return invalid result */ 2430 WARN_ON(1); 2431 return (u32)-1; 2432 } 2433 2434 /** 2435 * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field 2436 * @hsotg: The driver state. 2437 * @ep: The index number of the endpoint 2438 * @mps: The maximum packet size in bytes 2439 * @mc: The multicount value 2440 * @dir_in: True if direction is in. 2441 * 2442 * Configure the maximum packet size for the given endpoint, updating 2443 * the hardware control registers to reflect this. 2444 */ 2445 static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg, 2446 unsigned int ep, unsigned int mps, 2447 unsigned int mc, unsigned int dir_in) 2448 { 2449 struct dwc2_hsotg_ep *hs_ep; 2450 u32 reg; 2451 2452 hs_ep = index_to_ep(hsotg, ep, dir_in); 2453 if (!hs_ep) 2454 return; 2455 2456 if (ep == 0) { 2457 u32 mps_bytes = mps; 2458 2459 /* EP0 is a special case */ 2460 mps = dwc2_hsotg_ep0_mps(mps_bytes); 2461 if (mps > 3) 2462 goto bad_mps; 2463 hs_ep->ep.maxpacket = mps_bytes; 2464 hs_ep->mc = 1; 2465 } else { 2466 if (mps > 1024) 2467 goto bad_mps; 2468 hs_ep->mc = mc; 2469 if (mc > 3) 2470 goto bad_mps; 2471 hs_ep->ep.maxpacket = mps; 2472 } 2473 2474 if (dir_in) { 2475 reg = dwc2_readl(hsotg, DIEPCTL(ep)); 2476 reg &= ~DXEPCTL_MPS_MASK; 2477 reg |= mps; 2478 dwc2_writel(hsotg, reg, DIEPCTL(ep)); 2479 } else { 2480 reg = dwc2_readl(hsotg, DOEPCTL(ep)); 2481 reg &= ~DXEPCTL_MPS_MASK; 2482 reg |= mps; 2483 dwc2_writel(hsotg, reg, DOEPCTL(ep)); 2484 } 2485 2486 return; 2487 2488 bad_mps: 2489 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps); 2490 } 2491 2492 /** 2493 * dwc2_hsotg_txfifo_flush - flush Tx FIFO 2494 * @hsotg: The driver state 2495 * @idx: The index for the endpoint (0..15) 2496 */ 2497 static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx) 2498 { 2499 dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH, 2500 GRSTCTL); 2501 2502 /* wait until the fifo is flushed */ 2503 if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100)) 2504 dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n", 2505 __func__); 2506 } 2507 2508 /** 2509 * dwc2_hsotg_trytx - check to see if anything needs transmitting 2510 * @hsotg: The driver state 2511 * @hs_ep: The driver endpoint to check. 2512 * 2513 * Check to see if there is a request that has data to send, and if so 2514 * make an attempt to write data into the FIFO. 2515 */ 2516 static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg, 2517 struct dwc2_hsotg_ep *hs_ep) 2518 { 2519 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2520 2521 if (!hs_ep->dir_in || !hs_req) { 2522 /** 2523 * if request is not enqueued, we disable interrupts 2524 * for endpoints, excepting ep0 2525 */ 2526 if (hs_ep->index != 0) 2527 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, 2528 hs_ep->dir_in, 0); 2529 return 0; 2530 } 2531 2532 if (hs_req->req.actual < hs_req->req.length) { 2533 dev_dbg(hsotg->dev, "trying to write more for ep%d\n", 2534 hs_ep->index); 2535 return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req); 2536 } 2537 2538 return 0; 2539 } 2540 2541 /** 2542 * dwc2_hsotg_complete_in - complete IN transfer 2543 * @hsotg: The device state. 2544 * @hs_ep: The endpoint that has just completed. 2545 * 2546 * An IN transfer has been completed, update the transfer's state and then 2547 * call the relevant completion routines. 2548 */ 2549 static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, 2550 struct dwc2_hsotg_ep *hs_ep) 2551 { 2552 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2553 u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index)); 2554 int size_left, size_done; 2555 2556 if (!hs_req) { 2557 dev_dbg(hsotg->dev, "XferCompl but no req\n"); 2558 return; 2559 } 2560 2561 /* Finish ZLP handling for IN EP0 transactions */ 2562 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) { 2563 dev_dbg(hsotg->dev, "zlp packet sent\n"); 2564 2565 /* 2566 * While send zlp for DWC2_EP0_STATUS_IN EP direction was 2567 * changed to IN. Change back to complete OUT transfer request 2568 */ 2569 hs_ep->dir_in = 0; 2570 2571 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2572 if (hsotg->test_mode) { 2573 int ret; 2574 2575 ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode); 2576 if (ret < 0) { 2577 dev_dbg(hsotg->dev, "Invalid Test #%d\n", 2578 hsotg->test_mode); 2579 dwc2_hsotg_stall_ep0(hsotg); 2580 return; 2581 } 2582 } 2583 dwc2_hsotg_enqueue_setup(hsotg); 2584 return; 2585 } 2586 2587 /* 2588 * Calculate the size of the transfer by checking how much is left 2589 * in the endpoint size register and then working it out from 2590 * the amount we loaded for the transfer. 2591 * 2592 * We do this even for DMA, as the transfer may have incremented 2593 * past the end of the buffer (DMA transfers are always 32bit 2594 * aligned). 2595 */ 2596 if (using_desc_dma(hsotg)) { 2597 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); 2598 if (size_left < 0) 2599 dev_err(hsotg->dev, "error parsing DDMA results %d\n", 2600 size_left); 2601 } else { 2602 size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 2603 } 2604 2605 size_done = hs_ep->size_loaded - size_left; 2606 size_done += hs_ep->last_load; 2607 2608 if (hs_req->req.actual != size_done) 2609 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n", 2610 __func__, hs_req->req.actual, size_done); 2611 2612 hs_req->req.actual = size_done; 2613 dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n", 2614 hs_req->req.length, hs_req->req.actual, hs_req->req.zero); 2615 2616 if (!size_left && hs_req->req.actual < hs_req->req.length) { 2617 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__); 2618 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true); 2619 return; 2620 } 2621 2622 /* Zlp for all endpoints, for ep0 only in DATA IN stage */ 2623 if (hs_ep->send_zlp) { 2624 dwc2_hsotg_program_zlp(hsotg, hs_ep); 2625 hs_ep->send_zlp = 0; 2626 /* transfer will be completed on next complete interrupt */ 2627 return; 2628 } 2629 2630 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) { 2631 /* Move to STATUS OUT */ 2632 dwc2_hsotg_ep0_zlp(hsotg, false); 2633 return; 2634 } 2635 2636 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2637 } 2638 2639 /** 2640 * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep 2641 * @hsotg: The device state. 2642 * @idx: Index of ep. 2643 * @dir_in: Endpoint direction 1-in 0-out. 2644 * 2645 * Reads for endpoint with given index and direction, by masking 2646 * epint_reg with coresponding mask. 2647 */ 2648 static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg, 2649 unsigned int idx, int dir_in) 2650 { 2651 u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK; 2652 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx); 2653 u32 ints; 2654 u32 mask; 2655 u32 diepempmsk; 2656 2657 mask = dwc2_readl(hsotg, epmsk_reg); 2658 diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK); 2659 mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0; 2660 mask |= DXEPINT_SETUP_RCVD; 2661 2662 ints = dwc2_readl(hsotg, epint_reg); 2663 ints &= mask; 2664 return ints; 2665 } 2666 2667 /** 2668 * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD 2669 * @hs_ep: The endpoint on which interrupt is asserted. 2670 * 2671 * This interrupt indicates that the endpoint has been disabled per the 2672 * application's request. 2673 * 2674 * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK, 2675 * in case of ISOC completes current request. 2676 * 2677 * For ISOC-OUT endpoints completes expired requests. If there is remaining 2678 * request starts it. 2679 */ 2680 static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep) 2681 { 2682 struct dwc2_hsotg *hsotg = hs_ep->parent; 2683 struct dwc2_hsotg_req *hs_req; 2684 unsigned char idx = hs_ep->index; 2685 int dir_in = hs_ep->dir_in; 2686 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx); 2687 int dctl = dwc2_readl(hsotg, DCTL); 2688 2689 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__); 2690 2691 if (dir_in) { 2692 int epctl = dwc2_readl(hsotg, epctl_reg); 2693 2694 dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index); 2695 2696 if (hs_ep->isochronous) { 2697 dwc2_hsotg_complete_in(hsotg, hs_ep); 2698 return; 2699 } 2700 2701 if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) { 2702 int dctl = dwc2_readl(hsotg, DCTL); 2703 2704 dctl |= DCTL_CGNPINNAK; 2705 dwc2_writel(hsotg, dctl, DCTL); 2706 } 2707 return; 2708 } 2709 2710 if (dctl & DCTL_GOUTNAKSTS) { 2711 dctl |= DCTL_CGOUTNAK; 2712 dwc2_writel(hsotg, dctl, DCTL); 2713 } 2714 2715 if (!hs_ep->isochronous) 2716 return; 2717 2718 if (list_empty(&hs_ep->queue)) { 2719 dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n", 2720 __func__, hs_ep); 2721 return; 2722 } 2723 2724 do { 2725 hs_req = get_ep_head(hs_ep); 2726 if (hs_req) 2727 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 2728 -ENODATA); 2729 dwc2_gadget_incr_frame_num(hs_ep); 2730 /* Update current frame number value. */ 2731 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg); 2732 } while (dwc2_gadget_target_frame_elapsed(hs_ep)); 2733 2734 dwc2_gadget_start_next_request(hs_ep); 2735 } 2736 2737 /** 2738 * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS 2739 * @ep: The endpoint on which interrupt is asserted. 2740 * 2741 * This is starting point for ISOC-OUT transfer, synchronization done with 2742 * first out token received from host while corresponding EP is disabled. 2743 * 2744 * Device does not know initial frame in which out token will come. For this 2745 * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon 2746 * getting this interrupt SW starts calculation for next transfer frame. 2747 */ 2748 static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) 2749 { 2750 struct dwc2_hsotg *hsotg = ep->parent; 2751 int dir_in = ep->dir_in; 2752 u32 doepmsk; 2753 2754 if (dir_in || !ep->isochronous) 2755 return; 2756 2757 if (using_desc_dma(hsotg)) { 2758 if (ep->target_frame == TARGET_FRAME_INITIAL) { 2759 /* Start first ISO Out */ 2760 ep->target_frame = hsotg->frame_number; 2761 dwc2_gadget_start_isoc_ddma(ep); 2762 } 2763 return; 2764 } 2765 2766 if (ep->interval > 1 && 2767 ep->target_frame == TARGET_FRAME_INITIAL) { 2768 u32 ctrl; 2769 2770 ep->target_frame = hsotg->frame_number; 2771 dwc2_gadget_incr_frame_num(ep); 2772 2773 ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index)); 2774 if (ep->target_frame & 0x1) 2775 ctrl |= DXEPCTL_SETODDFR; 2776 else 2777 ctrl |= DXEPCTL_SETEVENFR; 2778 2779 dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index)); 2780 } 2781 2782 dwc2_gadget_start_next_request(ep); 2783 doepmsk = dwc2_readl(hsotg, DOEPMSK); 2784 doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK; 2785 dwc2_writel(hsotg, doepmsk, DOEPMSK); 2786 } 2787 2788 /** 2789 * dwc2_gadget_handle_nak - handle NAK interrupt 2790 * @hs_ep: The endpoint on which interrupt is asserted. 2791 * 2792 * This is starting point for ISOC-IN transfer, synchronization done with 2793 * first IN token received from host while corresponding EP is disabled. 2794 * 2795 * Device does not know when first one token will arrive from host. On first 2796 * token arrival HW generates 2 interrupts: 'in token received while FIFO empty' 2797 * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was 2798 * sent in response to that as there was no data in FIFO. SW is basing on this 2799 * interrupt to obtain frame in which token has come and then based on the 2800 * interval calculates next frame for transfer. 2801 */ 2802 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) 2803 { 2804 struct dwc2_hsotg *hsotg = hs_ep->parent; 2805 int dir_in = hs_ep->dir_in; 2806 2807 if (!dir_in || !hs_ep->isochronous) 2808 return; 2809 2810 if (hs_ep->target_frame == TARGET_FRAME_INITIAL) { 2811 2812 if (using_desc_dma(hsotg)) { 2813 hs_ep->target_frame = hsotg->frame_number; 2814 dwc2_gadget_incr_frame_num(hs_ep); 2815 dwc2_gadget_start_isoc_ddma(hs_ep); 2816 return; 2817 } 2818 2819 hs_ep->target_frame = hsotg->frame_number; 2820 if (hs_ep->interval > 1) { 2821 u32 ctrl = dwc2_readl(hsotg, 2822 DIEPCTL(hs_ep->index)); 2823 if (hs_ep->target_frame & 0x1) 2824 ctrl |= DXEPCTL_SETODDFR; 2825 else 2826 ctrl |= DXEPCTL_SETEVENFR; 2827 2828 dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index)); 2829 } 2830 2831 dwc2_hsotg_complete_request(hsotg, hs_ep, 2832 get_ep_head(hs_ep), 0); 2833 } 2834 2835 if (!using_desc_dma(hsotg)) 2836 dwc2_gadget_incr_frame_num(hs_ep); 2837 } 2838 2839 /** 2840 * dwc2_hsotg_epint - handle an in/out endpoint interrupt 2841 * @hsotg: The driver state 2842 * @idx: The index for the endpoint (0..15) 2843 * @dir_in: Set if this is an IN endpoint 2844 * 2845 * Process and clear any interrupt pending for an individual endpoint 2846 */ 2847 static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, 2848 int dir_in) 2849 { 2850 struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in); 2851 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx); 2852 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx); 2853 u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx); 2854 u32 ints; 2855 u32 ctrl; 2856 2857 ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in); 2858 ctrl = dwc2_readl(hsotg, epctl_reg); 2859 2860 /* Clear endpoint interrupts */ 2861 dwc2_writel(hsotg, ints, epint_reg); 2862 2863 if (!hs_ep) { 2864 dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n", 2865 __func__, idx, dir_in ? "in" : "out"); 2866 return; 2867 } 2868 2869 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n", 2870 __func__, idx, dir_in ? "in" : "out", ints); 2871 2872 /* Don't process XferCompl interrupt if it is a setup packet */ 2873 if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD))) 2874 ints &= ~DXEPINT_XFERCOMPL; 2875 2876 /* 2877 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP 2878 * stage and xfercomplete was generated without SETUP phase done 2879 * interrupt. SW should parse received setup packet only after host's 2880 * exit from setup phase of control transfer. 2881 */ 2882 if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in && 2883 hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP)) 2884 ints &= ~DXEPINT_XFERCOMPL; 2885 2886 if (ints & DXEPINT_XFERCOMPL) { 2887 dev_dbg(hsotg->dev, 2888 "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n", 2889 __func__, dwc2_readl(hsotg, epctl_reg), 2890 dwc2_readl(hsotg, epsiz_reg)); 2891 2892 /* In DDMA handle isochronous requests separately */ 2893 if (using_desc_dma(hsotg) && hs_ep->isochronous) { 2894 /* XferCompl set along with BNA */ 2895 if (!(ints & DXEPINT_BNAINTR)) 2896 dwc2_gadget_complete_isoc_request_ddma(hs_ep); 2897 } else if (dir_in) { 2898 /* 2899 * We get OutDone from the FIFO, so we only 2900 * need to look at completing IN requests here 2901 * if operating slave mode 2902 */ 2903 if (hs_ep->isochronous && hs_ep->interval > 1) 2904 dwc2_gadget_incr_frame_num(hs_ep); 2905 2906 dwc2_hsotg_complete_in(hsotg, hs_ep); 2907 if (ints & DXEPINT_NAKINTRPT) 2908 ints &= ~DXEPINT_NAKINTRPT; 2909 2910 if (idx == 0 && !hs_ep->req) 2911 dwc2_hsotg_enqueue_setup(hsotg); 2912 } else if (using_dma(hsotg)) { 2913 /* 2914 * We're using DMA, we need to fire an OutDone here 2915 * as we ignore the RXFIFO. 2916 */ 2917 if (hs_ep->isochronous && hs_ep->interval > 1) 2918 dwc2_gadget_incr_frame_num(hs_ep); 2919 2920 dwc2_hsotg_handle_outdone(hsotg, idx); 2921 } 2922 } 2923 2924 if (ints & DXEPINT_EPDISBLD) 2925 dwc2_gadget_handle_ep_disabled(hs_ep); 2926 2927 if (ints & DXEPINT_OUTTKNEPDIS) 2928 dwc2_gadget_handle_out_token_ep_disabled(hs_ep); 2929 2930 if (ints & DXEPINT_NAKINTRPT) 2931 dwc2_gadget_handle_nak(hs_ep); 2932 2933 if (ints & DXEPINT_AHBERR) 2934 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__); 2935 2936 if (ints & DXEPINT_SETUP) { /* Setup or Timeout */ 2937 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__); 2938 2939 if (using_dma(hsotg) && idx == 0) { 2940 /* 2941 * this is the notification we've received a 2942 * setup packet. In non-DMA mode we'd get this 2943 * from the RXFIFO, instead we need to process 2944 * the setup here. 2945 */ 2946 2947 if (dir_in) 2948 WARN_ON_ONCE(1); 2949 else 2950 dwc2_hsotg_handle_outdone(hsotg, 0); 2951 } 2952 } 2953 2954 if (ints & DXEPINT_STSPHSERCVD) { 2955 dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); 2956 2957 /* Safety check EP0 state when STSPHSERCVD asserted */ 2958 if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) { 2959 /* Move to STATUS IN for DDMA */ 2960 if (using_desc_dma(hsotg)) 2961 dwc2_hsotg_ep0_zlp(hsotg, true); 2962 } 2963 2964 } 2965 2966 if (ints & DXEPINT_BACK2BACKSETUP) 2967 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__); 2968 2969 if (ints & DXEPINT_BNAINTR) { 2970 dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__); 2971 if (hs_ep->isochronous) 2972 dwc2_gadget_handle_isoc_bna(hs_ep); 2973 } 2974 2975 if (dir_in && !hs_ep->isochronous) { 2976 /* not sure if this is important, but we'll clear it anyway */ 2977 if (ints & DXEPINT_INTKNTXFEMP) { 2978 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n", 2979 __func__, idx); 2980 } 2981 2982 /* this probably means something bad is happening */ 2983 if (ints & DXEPINT_INTKNEPMIS) { 2984 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n", 2985 __func__, idx); 2986 } 2987 2988 /* FIFO has space or is empty (see GAHBCFG) */ 2989 if (hsotg->dedicated_fifos && 2990 ints & DXEPINT_TXFEMP) { 2991 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n", 2992 __func__, idx); 2993 if (!using_dma(hsotg)) 2994 dwc2_hsotg_trytx(hsotg, hs_ep); 2995 } 2996 } 2997 } 2998 2999 /** 3000 * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done) 3001 * @hsotg: The device state. 3002 * 3003 * Handle updating the device settings after the enumeration phase has 3004 * been completed. 3005 */ 3006 static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg) 3007 { 3008 u32 dsts = dwc2_readl(hsotg, DSTS); 3009 int ep0_mps = 0, ep_mps = 8; 3010 3011 /* 3012 * This should signal the finish of the enumeration phase 3013 * of the USB handshaking, so we should now know what rate 3014 * we connected at. 3015 */ 3016 3017 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts); 3018 3019 /* 3020 * note, since we're limited by the size of transfer on EP0, and 3021 * it seems IN transfers must be a even number of packets we do 3022 * not advertise a 64byte MPS on EP0. 3023 */ 3024 3025 /* catch both EnumSpd_FS and EnumSpd_FS48 */ 3026 switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) { 3027 case DSTS_ENUMSPD_FS: 3028 case DSTS_ENUMSPD_FS48: 3029 hsotg->gadget.speed = USB_SPEED_FULL; 3030 ep0_mps = EP0_MPS_LIMIT; 3031 ep_mps = 1023; 3032 break; 3033 3034 case DSTS_ENUMSPD_HS: 3035 hsotg->gadget.speed = USB_SPEED_HIGH; 3036 ep0_mps = EP0_MPS_LIMIT; 3037 ep_mps = 1024; 3038 break; 3039 3040 case DSTS_ENUMSPD_LS: 3041 hsotg->gadget.speed = USB_SPEED_LOW; 3042 ep0_mps = 8; 3043 ep_mps = 8; 3044 /* 3045 * note, we don't actually support LS in this driver at the 3046 * moment, and the documentation seems to imply that it isn't 3047 * supported by the PHYs on some of the devices. 3048 */ 3049 break; 3050 } 3051 dev_info(hsotg->dev, "new device is %s\n", 3052 usb_speed_string(hsotg->gadget.speed)); 3053 3054 /* 3055 * we should now know the maximum packet size for an 3056 * endpoint, so set the endpoints to a default value. 3057 */ 3058 3059 if (ep0_mps) { 3060 int i; 3061 /* Initialize ep0 for both in and out directions */ 3062 dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1); 3063 dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0); 3064 for (i = 1; i < hsotg->num_of_eps; i++) { 3065 if (hsotg->eps_in[i]) 3066 dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 3067 0, 1); 3068 if (hsotg->eps_out[i]) 3069 dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 3070 0, 0); 3071 } 3072 } 3073 3074 /* ensure after enumeration our EP0 is active */ 3075 3076 dwc2_hsotg_enqueue_setup(hsotg); 3077 3078 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3079 dwc2_readl(hsotg, DIEPCTL0), 3080 dwc2_readl(hsotg, DOEPCTL0)); 3081 } 3082 3083 /** 3084 * kill_all_requests - remove all requests from the endpoint's queue 3085 * @hsotg: The device state. 3086 * @ep: The endpoint the requests may be on. 3087 * @result: The result code to use. 3088 * 3089 * Go through the requests on the given endpoint and mark them 3090 * completed with the given result code. 3091 */ 3092 static void kill_all_requests(struct dwc2_hsotg *hsotg, 3093 struct dwc2_hsotg_ep *ep, 3094 int result) 3095 { 3096 struct dwc2_hsotg_req *req, *treq; 3097 unsigned int size; 3098 3099 ep->req = NULL; 3100 3101 list_for_each_entry_safe(req, treq, &ep->queue, queue) 3102 dwc2_hsotg_complete_request(hsotg, ep, req, 3103 result); 3104 3105 if (!hsotg->dedicated_fifos) 3106 return; 3107 size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4; 3108 if (size < ep->fifo_size) 3109 dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index); 3110 } 3111 3112 /** 3113 * dwc2_hsotg_disconnect - disconnect service 3114 * @hsotg: The device state. 3115 * 3116 * The device has been disconnected. Remove all current 3117 * transactions and signal the gadget driver that this 3118 * has happened. 3119 */ 3120 void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg) 3121 { 3122 unsigned int ep; 3123 3124 if (!hsotg->connected) 3125 return; 3126 3127 hsotg->connected = 0; 3128 hsotg->test_mode = 0; 3129 3130 for (ep = 0; ep < hsotg->num_of_eps; ep++) { 3131 if (hsotg->eps_in[ep]) 3132 kill_all_requests(hsotg, hsotg->eps_in[ep], 3133 -ESHUTDOWN); 3134 if (hsotg->eps_out[ep]) 3135 kill_all_requests(hsotg, hsotg->eps_out[ep], 3136 -ESHUTDOWN); 3137 } 3138 3139 call_gadget(hsotg, disconnect); 3140 hsotg->lx_state = DWC2_L3; 3141 3142 usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED); 3143 } 3144 3145 /** 3146 * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler 3147 * @hsotg: The device state: 3148 * @periodic: True if this is a periodic FIFO interrupt 3149 */ 3150 static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic) 3151 { 3152 struct dwc2_hsotg_ep *ep; 3153 int epno, ret; 3154 3155 /* look through for any more data to transmit */ 3156 for (epno = 0; epno < hsotg->num_of_eps; epno++) { 3157 ep = index_to_ep(hsotg, epno, 1); 3158 3159 if (!ep) 3160 continue; 3161 3162 if (!ep->dir_in) 3163 continue; 3164 3165 if ((periodic && !ep->periodic) || 3166 (!periodic && ep->periodic)) 3167 continue; 3168 3169 ret = dwc2_hsotg_trytx(hsotg, ep); 3170 if (ret < 0) 3171 break; 3172 } 3173 } 3174 3175 /* IRQ flags which will trigger a retry around the IRQ loop */ 3176 #define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \ 3177 GINTSTS_PTXFEMP | \ 3178 GINTSTS_RXFLVL) 3179 3180 /** 3181 * dwc2_hsotg_core_init - issue softreset to the core 3182 * @hsotg: The device state 3183 * @is_usb_reset: Usb resetting flag 3184 * 3185 * Issue a soft reset to the core, and await the core finishing it. 3186 */ 3187 void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, 3188 bool is_usb_reset) 3189 { 3190 u32 intmsk; 3191 u32 val; 3192 u32 usbcfg; 3193 u32 dcfg = 0; 3194 3195 /* Kill any ep0 requests as controller will be reinitialized */ 3196 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); 3197 3198 if (!is_usb_reset) 3199 if (dwc2_core_reset(hsotg, true)) 3200 return; 3201 3202 /* 3203 * we must now enable ep0 ready for host detection and then 3204 * set configuration. 3205 */ 3206 3207 /* keep other bits untouched (so e.g. forced modes are not lost) */ 3208 usbcfg = dwc2_readl(hsotg, GUSBCFG); 3209 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 3210 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); 3211 3212 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && 3213 (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || 3214 hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) { 3215 /* FS/LS Dedicated Transceiver Interface */ 3216 usbcfg |= GUSBCFG_PHYSEL; 3217 } else { 3218 /* set the PLL on, remove the HNP/SRP and set the PHY */ 3219 val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 3220 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | 3221 (val << GUSBCFG_USBTRDTIM_SHIFT); 3222 } 3223 dwc2_writel(hsotg, usbcfg, GUSBCFG); 3224 3225 dwc2_hsotg_init_fifo(hsotg); 3226 3227 if (!is_usb_reset) 3228 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON); 3229 3230 dcfg |= DCFG_EPMISCNT(1); 3231 3232 switch (hsotg->params.speed) { 3233 case DWC2_SPEED_PARAM_LOW: 3234 dcfg |= DCFG_DEVSPD_LS; 3235 break; 3236 case DWC2_SPEED_PARAM_FULL: 3237 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) 3238 dcfg |= DCFG_DEVSPD_FS48; 3239 else 3240 dcfg |= DCFG_DEVSPD_FS; 3241 break; 3242 default: 3243 dcfg |= DCFG_DEVSPD_HS; 3244 } 3245 3246 if (hsotg->params.ipg_isoc_en) 3247 dcfg |= DCFG_IPG_ISOC_SUPPORDED; 3248 3249 dwc2_writel(hsotg, dcfg, DCFG); 3250 3251 /* Clear any pending OTG interrupts */ 3252 dwc2_writel(hsotg, 0xffffffff, GOTGINT); 3253 3254 /* Clear any pending interrupts */ 3255 dwc2_writel(hsotg, 0xffffffff, GINTSTS); 3256 intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT | 3257 GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF | 3258 GINTSTS_USBRST | GINTSTS_RESETDET | 3259 GINTSTS_ENUMDONE | GINTSTS_OTGINT | 3260 GINTSTS_USBSUSP | GINTSTS_WKUPINT | 3261 GINTSTS_LPMTRANRCVD; 3262 3263 if (!using_desc_dma(hsotg)) 3264 intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT; 3265 3266 if (!hsotg->params.external_id_pin_ctl) 3267 intmsk |= GINTSTS_CONIDSTSCHNG; 3268 3269 dwc2_writel(hsotg, intmsk, GINTMSK); 3270 3271 if (using_dma(hsotg)) { 3272 dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN | 3273 hsotg->params.ahbcfg, 3274 GAHBCFG); 3275 3276 /* Set DDMA mode support in the core if needed */ 3277 if (using_desc_dma(hsotg)) 3278 dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN); 3279 3280 } else { 3281 dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ? 3282 (GAHBCFG_NP_TXF_EMP_LVL | 3283 GAHBCFG_P_TXF_EMP_LVL) : 0) | 3284 GAHBCFG_GLBL_INTR_EN, GAHBCFG); 3285 } 3286 3287 /* 3288 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts 3289 * when we have no data to transfer. Otherwise we get being flooded by 3290 * interrupts. 3291 */ 3292 3293 dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ? 3294 DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) | 3295 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK | 3296 DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK, 3297 DIEPMSK); 3298 3299 /* 3300 * don't need XferCompl, we get that from RXFIFO in slave mode. In 3301 * DMA mode we may need this and StsPhseRcvd. 3302 */ 3303 dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK | 3304 DOEPMSK_STSPHSERCVDMSK) : 0) | 3305 DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK | 3306 DOEPMSK_SETUPMSK, 3307 DOEPMSK); 3308 3309 /* Enable BNA interrupt for DDMA */ 3310 if (using_desc_dma(hsotg)) { 3311 dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK); 3312 dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK); 3313 } 3314 3315 dwc2_writel(hsotg, 0, DAINTMSK); 3316 3317 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3318 dwc2_readl(hsotg, DIEPCTL0), 3319 dwc2_readl(hsotg, DOEPCTL0)); 3320 3321 /* enable in and out endpoint interrupts */ 3322 dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT); 3323 3324 /* 3325 * Enable the RXFIFO when in slave mode, as this is how we collect 3326 * the data. In DMA mode, we get events from the FIFO but also 3327 * things we cannot process, so do not use it. 3328 */ 3329 if (!using_dma(hsotg)) 3330 dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL); 3331 3332 /* Enable interrupts for EP0 in and out */ 3333 dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1); 3334 dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1); 3335 3336 if (!is_usb_reset) { 3337 dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE); 3338 udelay(10); /* see openiboot */ 3339 dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE); 3340 } 3341 3342 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL)); 3343 3344 /* 3345 * DxEPCTL_USBActEp says RO in manual, but seems to be set by 3346 * writing to the EPCTL register.. 3347 */ 3348 3349 /* set to read 1 8byte packet */ 3350 dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | 3351 DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0); 3352 3353 dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | 3354 DXEPCTL_CNAK | DXEPCTL_EPENA | 3355 DXEPCTL_USBACTEP, 3356 DOEPCTL0); 3357 3358 /* enable, but don't activate EP0in */ 3359 dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | 3360 DXEPCTL_USBACTEP, DIEPCTL0); 3361 3362 /* clear global NAKs */ 3363 val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; 3364 if (!is_usb_reset) 3365 val |= DCTL_SFTDISCON; 3366 dwc2_set_bit(hsotg, DCTL, val); 3367 3368 /* configure the core to support LPM */ 3369 dwc2_gadget_init_lpm(hsotg); 3370 3371 /* must be at-least 3ms to allow bus to see disconnect */ 3372 mdelay(3); 3373 3374 hsotg->lx_state = DWC2_L0; 3375 3376 dwc2_hsotg_enqueue_setup(hsotg); 3377 3378 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 3379 dwc2_readl(hsotg, DIEPCTL0), 3380 dwc2_readl(hsotg, DOEPCTL0)); 3381 } 3382 3383 static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) 3384 { 3385 /* set the soft-disconnect bit */ 3386 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON); 3387 } 3388 3389 void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) 3390 { 3391 /* remove the soft-disconnect and let's go */ 3392 dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON); 3393 } 3394 3395 /** 3396 * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt. 3397 * @hsotg: The device state: 3398 * 3399 * This interrupt indicates one of the following conditions occurred while 3400 * transmitting an ISOC transaction. 3401 * - Corrupted IN Token for ISOC EP. 3402 * - Packet not complete in FIFO. 3403 * 3404 * The following actions will be taken: 3405 * - Determine the EP 3406 * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO 3407 */ 3408 static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg) 3409 { 3410 struct dwc2_hsotg_ep *hs_ep; 3411 u32 epctrl; 3412 u32 daintmsk; 3413 u32 idx; 3414 3415 dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n"); 3416 3417 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3418 3419 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3420 hs_ep = hsotg->eps_in[idx]; 3421 /* Proceed only unmasked ISOC EPs */ 3422 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3423 continue; 3424 3425 epctrl = dwc2_readl(hsotg, DIEPCTL(idx)); 3426 if ((epctrl & DXEPCTL_EPENA) && 3427 dwc2_gadget_target_frame_elapsed(hs_ep)) { 3428 epctrl |= DXEPCTL_SNAK; 3429 epctrl |= DXEPCTL_EPDIS; 3430 dwc2_writel(hsotg, epctrl, DIEPCTL(idx)); 3431 } 3432 } 3433 3434 /* Clear interrupt */ 3435 dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS); 3436 } 3437 3438 /** 3439 * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt 3440 * @hsotg: The device state: 3441 * 3442 * This interrupt indicates one of the following conditions occurred while 3443 * transmitting an ISOC transaction. 3444 * - Corrupted OUT Token for ISOC EP. 3445 * - Packet not complete in FIFO. 3446 * 3447 * The following actions will be taken: 3448 * - Determine the EP 3449 * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed. 3450 */ 3451 static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg) 3452 { 3453 u32 gintsts; 3454 u32 gintmsk; 3455 u32 daintmsk; 3456 u32 epctrl; 3457 struct dwc2_hsotg_ep *hs_ep; 3458 int idx; 3459 3460 dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__); 3461 3462 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3463 daintmsk >>= DAINT_OUTEP_SHIFT; 3464 3465 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3466 hs_ep = hsotg->eps_out[idx]; 3467 /* Proceed only unmasked ISOC EPs */ 3468 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3469 continue; 3470 3471 epctrl = dwc2_readl(hsotg, DOEPCTL(idx)); 3472 if ((epctrl & DXEPCTL_EPENA) && 3473 dwc2_gadget_target_frame_elapsed(hs_ep)) { 3474 /* Unmask GOUTNAKEFF interrupt */ 3475 gintmsk = dwc2_readl(hsotg, GINTMSK); 3476 gintmsk |= GINTSTS_GOUTNAKEFF; 3477 dwc2_writel(hsotg, gintmsk, GINTMSK); 3478 3479 gintsts = dwc2_readl(hsotg, GINTSTS); 3480 if (!(gintsts & GINTSTS_GOUTNAKEFF)) { 3481 dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK); 3482 break; 3483 } 3484 } 3485 } 3486 3487 /* Clear interrupt */ 3488 dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS); 3489 } 3490 3491 /** 3492 * dwc2_hsotg_irq - handle device interrupt 3493 * @irq: The IRQ number triggered 3494 * @pw: The pw value when registered the handler. 3495 */ 3496 static irqreturn_t dwc2_hsotg_irq(int irq, void *pw) 3497 { 3498 struct dwc2_hsotg *hsotg = pw; 3499 int retry_count = 8; 3500 u32 gintsts; 3501 u32 gintmsk; 3502 3503 if (!dwc2_is_device_mode(hsotg)) 3504 return IRQ_NONE; 3505 3506 spin_lock(&hsotg->lock); 3507 irq_retry: 3508 gintsts = dwc2_readl(hsotg, GINTSTS); 3509 gintmsk = dwc2_readl(hsotg, GINTMSK); 3510 3511 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n", 3512 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count); 3513 3514 gintsts &= gintmsk; 3515 3516 if (gintsts & GINTSTS_RESETDET) { 3517 dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__); 3518 3519 dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS); 3520 3521 /* This event must be used only if controller is suspended */ 3522 if (hsotg->lx_state == DWC2_L2) { 3523 dwc2_exit_partial_power_down(hsotg, true); 3524 hsotg->lx_state = DWC2_L0; 3525 } 3526 } 3527 3528 if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) { 3529 u32 usb_status = dwc2_readl(hsotg, GOTGCTL); 3530 u32 connected = hsotg->connected; 3531 3532 dev_dbg(hsotg->dev, "%s: USBRst\n", __func__); 3533 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n", 3534 dwc2_readl(hsotg, GNPTXSTS)); 3535 3536 dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS); 3537 3538 /* Report disconnection if it is not already done. */ 3539 dwc2_hsotg_disconnect(hsotg); 3540 3541 /* Reset device address to zero */ 3542 dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK); 3543 3544 if (usb_status & GOTGCTL_BSESVLD && connected) 3545 dwc2_hsotg_core_init_disconnected(hsotg, true); 3546 } 3547 3548 if (gintsts & GINTSTS_ENUMDONE) { 3549 dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS); 3550 3551 dwc2_hsotg_irq_enumdone(hsotg); 3552 } 3553 3554 if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) { 3555 u32 daint = dwc2_readl(hsotg, DAINT); 3556 u32 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3557 u32 daint_out, daint_in; 3558 int ep; 3559 3560 daint &= daintmsk; 3561 daint_out = daint >> DAINT_OUTEP_SHIFT; 3562 daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT); 3563 3564 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint); 3565 3566 for (ep = 0; ep < hsotg->num_of_eps && daint_out; 3567 ep++, daint_out >>= 1) { 3568 if (daint_out & 1) 3569 dwc2_hsotg_epint(hsotg, ep, 0); 3570 } 3571 3572 for (ep = 0; ep < hsotg->num_of_eps && daint_in; 3573 ep++, daint_in >>= 1) { 3574 if (daint_in & 1) 3575 dwc2_hsotg_epint(hsotg, ep, 1); 3576 } 3577 } 3578 3579 /* check both FIFOs */ 3580 3581 if (gintsts & GINTSTS_NPTXFEMP) { 3582 dev_dbg(hsotg->dev, "NPTxFEmp\n"); 3583 3584 /* 3585 * Disable the interrupt to stop it happening again 3586 * unless one of these endpoint routines decides that 3587 * it needs re-enabling 3588 */ 3589 3590 dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP); 3591 dwc2_hsotg_irq_fifoempty(hsotg, false); 3592 } 3593 3594 if (gintsts & GINTSTS_PTXFEMP) { 3595 dev_dbg(hsotg->dev, "PTxFEmp\n"); 3596 3597 /* See note in GINTSTS_NPTxFEmp */ 3598 3599 dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP); 3600 dwc2_hsotg_irq_fifoempty(hsotg, true); 3601 } 3602 3603 if (gintsts & GINTSTS_RXFLVL) { 3604 /* 3605 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty, 3606 * we need to retry dwc2_hsotg_handle_rx if this is still 3607 * set. 3608 */ 3609 3610 dwc2_hsotg_handle_rx(hsotg); 3611 } 3612 3613 if (gintsts & GINTSTS_ERLYSUSP) { 3614 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n"); 3615 dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS); 3616 } 3617 3618 /* 3619 * these next two seem to crop-up occasionally causing the core 3620 * to shutdown the USB transfer, so try clearing them and logging 3621 * the occurrence. 3622 */ 3623 3624 if (gintsts & GINTSTS_GOUTNAKEFF) { 3625 u8 idx; 3626 u32 epctrl; 3627 u32 gintmsk; 3628 u32 daintmsk; 3629 struct dwc2_hsotg_ep *hs_ep; 3630 3631 daintmsk = dwc2_readl(hsotg, DAINTMSK); 3632 daintmsk >>= DAINT_OUTEP_SHIFT; 3633 /* Mask this interrupt */ 3634 gintmsk = dwc2_readl(hsotg, GINTMSK); 3635 gintmsk &= ~GINTSTS_GOUTNAKEFF; 3636 dwc2_writel(hsotg, gintmsk, GINTMSK); 3637 3638 dev_dbg(hsotg->dev, "GOUTNakEff triggered\n"); 3639 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3640 hs_ep = hsotg->eps_out[idx]; 3641 /* Proceed only unmasked ISOC EPs */ 3642 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous) 3643 continue; 3644 3645 epctrl = dwc2_readl(hsotg, DOEPCTL(idx)); 3646 3647 if (epctrl & DXEPCTL_EPENA) { 3648 epctrl |= DXEPCTL_SNAK; 3649 epctrl |= DXEPCTL_EPDIS; 3650 dwc2_writel(hsotg, epctrl, DOEPCTL(idx)); 3651 } 3652 } 3653 3654 /* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */ 3655 } 3656 3657 if (gintsts & GINTSTS_GINNAKEFF) { 3658 dev_info(hsotg->dev, "GINNakEff triggered\n"); 3659 3660 dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK); 3661 3662 dwc2_hsotg_dump(hsotg); 3663 } 3664 3665 if (gintsts & GINTSTS_INCOMPL_SOIN) 3666 dwc2_gadget_handle_incomplete_isoc_in(hsotg); 3667 3668 if (gintsts & GINTSTS_INCOMPL_SOOUT) 3669 dwc2_gadget_handle_incomplete_isoc_out(hsotg); 3670 3671 /* 3672 * if we've had fifo events, we should try and go around the 3673 * loop again to see if there's any point in returning yet. 3674 */ 3675 3676 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0) 3677 goto irq_retry; 3678 3679 spin_unlock(&hsotg->lock); 3680 3681 return IRQ_HANDLED; 3682 } 3683 3684 static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, 3685 struct dwc2_hsotg_ep *hs_ep) 3686 { 3687 u32 epctrl_reg; 3688 u32 epint_reg; 3689 3690 epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) : 3691 DOEPCTL(hs_ep->index); 3692 epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) : 3693 DOEPINT(hs_ep->index); 3694 3695 dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__, 3696 hs_ep->name); 3697 3698 if (hs_ep->dir_in) { 3699 if (hsotg->dedicated_fifos || hs_ep->periodic) { 3700 dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK); 3701 /* Wait for Nak effect */ 3702 if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, 3703 DXEPINT_INEPNAKEFF, 100)) 3704 dev_warn(hsotg->dev, 3705 "%s: timeout DIEPINT.NAKEFF\n", 3706 __func__); 3707 } else { 3708 dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK); 3709 /* Wait for Nak effect */ 3710 if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, 3711 GINTSTS_GINNAKEFF, 100)) 3712 dev_warn(hsotg->dev, 3713 "%s: timeout GINTSTS.GINNAKEFF\n", 3714 __func__); 3715 } 3716 } else { 3717 if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF)) 3718 dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK); 3719 3720 /* Wait for global nak to take effect */ 3721 if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, 3722 GINTSTS_GOUTNAKEFF, 100)) 3723 dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n", 3724 __func__); 3725 } 3726 3727 /* Disable ep */ 3728 dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK); 3729 3730 /* Wait for ep to be disabled */ 3731 if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100)) 3732 dev_warn(hsotg->dev, 3733 "%s: timeout DOEPCTL.EPDisable\n", __func__); 3734 3735 /* Clear EPDISBLD interrupt */ 3736 dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD); 3737 3738 if (hs_ep->dir_in) { 3739 unsigned short fifo_index; 3740 3741 if (hsotg->dedicated_fifos || hs_ep->periodic) 3742 fifo_index = hs_ep->fifo_index; 3743 else 3744 fifo_index = 0; 3745 3746 /* Flush TX FIFO */ 3747 dwc2_flush_tx_fifo(hsotg, fifo_index); 3748 3749 /* Clear Global In NP NAK in Shared FIFO for non periodic ep */ 3750 if (!hsotg->dedicated_fifos && !hs_ep->periodic) 3751 dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK); 3752 3753 } else { 3754 /* Remove global NAKs */ 3755 dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK); 3756 } 3757 } 3758 3759 /** 3760 * dwc2_hsotg_ep_enable - enable the given endpoint 3761 * @ep: The USB endpint to configure 3762 * @desc: The USB endpoint descriptor to configure with. 3763 * 3764 * This is called from the USB gadget code's usb_ep_enable(). 3765 */ 3766 static int dwc2_hsotg_ep_enable(struct usb_ep *ep, 3767 const struct usb_endpoint_descriptor *desc) 3768 { 3769 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 3770 struct dwc2_hsotg *hsotg = hs_ep->parent; 3771 unsigned long flags; 3772 unsigned int index = hs_ep->index; 3773 u32 epctrl_reg; 3774 u32 epctrl; 3775 u32 mps; 3776 u32 mc; 3777 u32 mask; 3778 unsigned int dir_in; 3779 unsigned int i, val, size; 3780 int ret = 0; 3781 unsigned char ep_type; 3782 3783 dev_dbg(hsotg->dev, 3784 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", 3785 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes, 3786 desc->wMaxPacketSize, desc->bInterval); 3787 3788 /* not to be called for EP0 */ 3789 if (index == 0) { 3790 dev_err(hsotg->dev, "%s: called for EP 0\n", __func__); 3791 return -EINVAL; 3792 } 3793 3794 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; 3795 if (dir_in != hs_ep->dir_in) { 3796 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__); 3797 return -EINVAL; 3798 } 3799 3800 ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 3801 mps = usb_endpoint_maxp(desc); 3802 mc = usb_endpoint_maxp_mult(desc); 3803 3804 /* ISOC IN in DDMA supported bInterval up to 10 */ 3805 if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC && 3806 dir_in && desc->bInterval > 10) { 3807 dev_err(hsotg->dev, 3808 "%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__); 3809 return -EINVAL; 3810 } 3811 3812 /* High bandwidth ISOC OUT in DDMA not supported */ 3813 if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC && 3814 !dir_in && mc > 1) { 3815 dev_err(hsotg->dev, 3816 "%s: ISOC OUT, DDMA: HB not supported!\n", __func__); 3817 return -EINVAL; 3818 } 3819 3820 /* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */ 3821 3822 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 3823 epctrl = dwc2_readl(hsotg, epctrl_reg); 3824 3825 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", 3826 __func__, epctrl, epctrl_reg); 3827 3828 /* Allocate DMA descriptor chain for non-ctrl endpoints */ 3829 if (using_desc_dma(hsotg) && !hs_ep->desc_list) { 3830 hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev, 3831 MAX_DMA_DESC_NUM_GENERIC * 3832 sizeof(struct dwc2_dma_desc), 3833 &hs_ep->desc_list_dma, GFP_ATOMIC); 3834 if (!hs_ep->desc_list) { 3835 ret = -ENOMEM; 3836 goto error2; 3837 } 3838 } 3839 3840 spin_lock_irqsave(&hsotg->lock, flags); 3841 3842 epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK); 3843 epctrl |= DXEPCTL_MPS(mps); 3844 3845 /* 3846 * mark the endpoint as active, otherwise the core may ignore 3847 * transactions entirely for this endpoint 3848 */ 3849 epctrl |= DXEPCTL_USBACTEP; 3850 3851 /* update the endpoint state */ 3852 dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in); 3853 3854 /* default, set to non-periodic */ 3855 hs_ep->isochronous = 0; 3856 hs_ep->periodic = 0; 3857 hs_ep->halted = 0; 3858 hs_ep->interval = desc->bInterval; 3859 3860 switch (ep_type) { 3861 case USB_ENDPOINT_XFER_ISOC: 3862 epctrl |= DXEPCTL_EPTYPE_ISO; 3863 epctrl |= DXEPCTL_SETEVENFR; 3864 hs_ep->isochronous = 1; 3865 hs_ep->interval = 1 << (desc->bInterval - 1); 3866 hs_ep->target_frame = TARGET_FRAME_INITIAL; 3867 hs_ep->next_desc = 0; 3868 hs_ep->compl_desc = 0; 3869 if (dir_in) { 3870 hs_ep->periodic = 1; 3871 mask = dwc2_readl(hsotg, DIEPMSK); 3872 mask |= DIEPMSK_NAKMSK; 3873 dwc2_writel(hsotg, mask, DIEPMSK); 3874 } else { 3875 mask = dwc2_readl(hsotg, DOEPMSK); 3876 mask |= DOEPMSK_OUTTKNEPDISMSK; 3877 dwc2_writel(hsotg, mask, DOEPMSK); 3878 } 3879 break; 3880 3881 case USB_ENDPOINT_XFER_BULK: 3882 epctrl |= DXEPCTL_EPTYPE_BULK; 3883 break; 3884 3885 case USB_ENDPOINT_XFER_INT: 3886 if (dir_in) 3887 hs_ep->periodic = 1; 3888 3889 if (hsotg->gadget.speed == USB_SPEED_HIGH) 3890 hs_ep->interval = 1 << (desc->bInterval - 1); 3891 3892 epctrl |= DXEPCTL_EPTYPE_INTERRUPT; 3893 break; 3894 3895 case USB_ENDPOINT_XFER_CONTROL: 3896 epctrl |= DXEPCTL_EPTYPE_CONTROL; 3897 break; 3898 } 3899 3900 /* 3901 * if the hardware has dedicated fifos, we must give each IN EP 3902 * a unique tx-fifo even if it is non-periodic. 3903 */ 3904 if (dir_in && hsotg->dedicated_fifos) { 3905 u32 fifo_index = 0; 3906 u32 fifo_size = UINT_MAX; 3907 3908 size = hs_ep->ep.maxpacket * hs_ep->mc; 3909 for (i = 1; i < hsotg->num_of_eps; ++i) { 3910 if (hsotg->fifo_map & (1 << i)) 3911 continue; 3912 val = dwc2_readl(hsotg, DPTXFSIZN(i)); 3913 val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4; 3914 if (val < size) 3915 continue; 3916 /* Search for smallest acceptable fifo */ 3917 if (val < fifo_size) { 3918 fifo_size = val; 3919 fifo_index = i; 3920 } 3921 } 3922 if (!fifo_index) { 3923 dev_err(hsotg->dev, 3924 "%s: No suitable fifo found\n", __func__); 3925 ret = -ENOMEM; 3926 goto error1; 3927 } 3928 hsotg->fifo_map |= 1 << fifo_index; 3929 epctrl |= DXEPCTL_TXFNUM(fifo_index); 3930 hs_ep->fifo_index = fifo_index; 3931 hs_ep->fifo_size = fifo_size; 3932 } 3933 3934 /* for non control endpoints, set PID to D0 */ 3935 if (index && !hs_ep->isochronous) 3936 epctrl |= DXEPCTL_SETD0PID; 3937 3938 /* WA for Full speed ISOC IN in DDMA mode. 3939 * By Clear NAK status of EP, core will send ZLP 3940 * to IN token and assert NAK interrupt relying 3941 * on TxFIFO status only 3942 */ 3943 3944 if (hsotg->gadget.speed == USB_SPEED_FULL && 3945 hs_ep->isochronous && dir_in) { 3946 /* The WA applies only to core versions from 2.72a 3947 * to 4.00a (including both). Also for FS_IOT_1.00a 3948 * and HS_IOT_1.00a. 3949 */ 3950 u32 gsnpsid = dwc2_readl(hsotg, GSNPSID); 3951 3952 if ((gsnpsid >= DWC2_CORE_REV_2_72a && 3953 gsnpsid <= DWC2_CORE_REV_4_00a) || 3954 gsnpsid == DWC2_FS_IOT_REV_1_00a || 3955 gsnpsid == DWC2_HS_IOT_REV_1_00a) 3956 epctrl |= DXEPCTL_CNAK; 3957 } 3958 3959 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n", 3960 __func__, epctrl); 3961 3962 dwc2_writel(hsotg, epctrl, epctrl_reg); 3963 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n", 3964 __func__, dwc2_readl(hsotg, epctrl_reg)); 3965 3966 /* enable the endpoint interrupt */ 3967 dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1); 3968 3969 error1: 3970 spin_unlock_irqrestore(&hsotg->lock, flags); 3971 3972 error2: 3973 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { 3974 dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * 3975 sizeof(struct dwc2_dma_desc), 3976 hs_ep->desc_list, hs_ep->desc_list_dma); 3977 hs_ep->desc_list = NULL; 3978 } 3979 3980 return ret; 3981 } 3982 3983 /** 3984 * dwc2_hsotg_ep_disable - disable given endpoint 3985 * @ep: The endpoint to disable. 3986 */ 3987 static int dwc2_hsotg_ep_disable(struct usb_ep *ep) 3988 { 3989 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 3990 struct dwc2_hsotg *hsotg = hs_ep->parent; 3991 int dir_in = hs_ep->dir_in; 3992 int index = hs_ep->index; 3993 unsigned long flags; 3994 u32 epctrl_reg; 3995 u32 ctrl; 3996 3997 dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep); 3998 3999 if (ep == &hsotg->eps_out[0]->ep) { 4000 dev_err(hsotg->dev, "%s: called for ep0\n", __func__); 4001 return -EINVAL; 4002 } 4003 4004 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) { 4005 dev_err(hsotg->dev, "%s: called in host mode?\n", __func__); 4006 return -EINVAL; 4007 } 4008 4009 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 4010 4011 spin_lock_irqsave(&hsotg->lock, flags); 4012 4013 ctrl = dwc2_readl(hsotg, epctrl_reg); 4014 4015 if (ctrl & DXEPCTL_EPENA) 4016 dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep); 4017 4018 ctrl &= ~DXEPCTL_EPENA; 4019 ctrl &= ~DXEPCTL_USBACTEP; 4020 ctrl |= DXEPCTL_SNAK; 4021 4022 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); 4023 dwc2_writel(hsotg, ctrl, epctrl_reg); 4024 4025 /* disable endpoint interrupts */ 4026 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0); 4027 4028 /* terminate all requests with shutdown */ 4029 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN); 4030 4031 hsotg->fifo_map &= ~(1 << hs_ep->fifo_index); 4032 hs_ep->fifo_index = 0; 4033 hs_ep->fifo_size = 0; 4034 4035 spin_unlock_irqrestore(&hsotg->lock, flags); 4036 return 0; 4037 } 4038 4039 /** 4040 * on_list - check request is on the given endpoint 4041 * @ep: The endpoint to check. 4042 * @test: The request to test if it is on the endpoint. 4043 */ 4044 static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test) 4045 { 4046 struct dwc2_hsotg_req *req, *treq; 4047 4048 list_for_each_entry_safe(req, treq, &ep->queue, queue) { 4049 if (req == test) 4050 return true; 4051 } 4052 4053 return false; 4054 } 4055 4056 /** 4057 * dwc2_hsotg_ep_dequeue - dequeue given endpoint 4058 * @ep: The endpoint to dequeue. 4059 * @req: The request to be removed from a queue. 4060 */ 4061 static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) 4062 { 4063 struct dwc2_hsotg_req *hs_req = our_req(req); 4064 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4065 struct dwc2_hsotg *hs = hs_ep->parent; 4066 unsigned long flags; 4067 4068 dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req); 4069 4070 spin_lock_irqsave(&hs->lock, flags); 4071 4072 if (!on_list(hs_ep, hs_req)) { 4073 spin_unlock_irqrestore(&hs->lock, flags); 4074 return -EINVAL; 4075 } 4076 4077 /* Dequeue already started request */ 4078 if (req == &hs_ep->req->req) 4079 dwc2_hsotg_ep_stop_xfr(hs, hs_ep); 4080 4081 dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET); 4082 spin_unlock_irqrestore(&hs->lock, flags); 4083 4084 return 0; 4085 } 4086 4087 /** 4088 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint 4089 * @ep: The endpoint to set halt. 4090 * @value: Set or unset the halt. 4091 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if 4092 * the endpoint is busy processing requests. 4093 * 4094 * We need to stall the endpoint immediately if request comes from set_feature 4095 * protocol command handler. 4096 */ 4097 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now) 4098 { 4099 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4100 struct dwc2_hsotg *hs = hs_ep->parent; 4101 int index = hs_ep->index; 4102 u32 epreg; 4103 u32 epctl; 4104 u32 xfertype; 4105 4106 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value); 4107 4108 if (index == 0) { 4109 if (value) 4110 dwc2_hsotg_stall_ep0(hs); 4111 else 4112 dev_warn(hs->dev, 4113 "%s: can't clear halt on ep0\n", __func__); 4114 return 0; 4115 } 4116 4117 if (hs_ep->isochronous) { 4118 dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name); 4119 return -EINVAL; 4120 } 4121 4122 if (!now && value && !list_empty(&hs_ep->queue)) { 4123 dev_dbg(hs->dev, "%s request is pending, cannot halt\n", 4124 ep->name); 4125 return -EAGAIN; 4126 } 4127 4128 if (hs_ep->dir_in) { 4129 epreg = DIEPCTL(index); 4130 epctl = dwc2_readl(hs, epreg); 4131 4132 if (value) { 4133 epctl |= DXEPCTL_STALL | DXEPCTL_SNAK; 4134 if (epctl & DXEPCTL_EPENA) 4135 epctl |= DXEPCTL_EPDIS; 4136 } else { 4137 epctl &= ~DXEPCTL_STALL; 4138 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 4139 if (xfertype == DXEPCTL_EPTYPE_BULK || 4140 xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4141 epctl |= DXEPCTL_SETD0PID; 4142 } 4143 dwc2_writel(hs, epctl, epreg); 4144 } else { 4145 epreg = DOEPCTL(index); 4146 epctl = dwc2_readl(hs, epreg); 4147 4148 if (value) { 4149 epctl |= DXEPCTL_STALL; 4150 } else { 4151 epctl &= ~DXEPCTL_STALL; 4152 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 4153 if (xfertype == DXEPCTL_EPTYPE_BULK || 4154 xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4155 epctl |= DXEPCTL_SETD0PID; 4156 } 4157 dwc2_writel(hs, epctl, epreg); 4158 } 4159 4160 hs_ep->halted = value; 4161 4162 return 0; 4163 } 4164 4165 /** 4166 * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held 4167 * @ep: The endpoint to set halt. 4168 * @value: Set or unset the halt. 4169 */ 4170 static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value) 4171 { 4172 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 4173 struct dwc2_hsotg *hs = hs_ep->parent; 4174 unsigned long flags = 0; 4175 int ret = 0; 4176 4177 spin_lock_irqsave(&hs->lock, flags); 4178 ret = dwc2_hsotg_ep_sethalt(ep, value, false); 4179 spin_unlock_irqrestore(&hs->lock, flags); 4180 4181 return ret; 4182 } 4183 4184 static const struct usb_ep_ops dwc2_hsotg_ep_ops = { 4185 .enable = dwc2_hsotg_ep_enable, 4186 .disable = dwc2_hsotg_ep_disable, 4187 .alloc_request = dwc2_hsotg_ep_alloc_request, 4188 .free_request = dwc2_hsotg_ep_free_request, 4189 .queue = dwc2_hsotg_ep_queue_lock, 4190 .dequeue = dwc2_hsotg_ep_dequeue, 4191 .set_halt = dwc2_hsotg_ep_sethalt_lock, 4192 /* note, don't believe we have any call for the fifo routines */ 4193 }; 4194 4195 /** 4196 * dwc2_hsotg_init - initialize the usb core 4197 * @hsotg: The driver state 4198 */ 4199 static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) 4200 { 4201 u32 trdtim; 4202 u32 usbcfg; 4203 /* unmask subset of endpoint interrupts */ 4204 4205 dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK | 4206 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK, 4207 DIEPMSK); 4208 4209 dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK | 4210 DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK, 4211 DOEPMSK); 4212 4213 dwc2_writel(hsotg, 0, DAINTMSK); 4214 4215 /* Be in disconnected state until gadget is registered */ 4216 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON); 4217 4218 /* setup fifos */ 4219 4220 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 4221 dwc2_readl(hsotg, GRXFSIZ), 4222 dwc2_readl(hsotg, GNPTXFSIZ)); 4223 4224 dwc2_hsotg_init_fifo(hsotg); 4225 4226 /* keep other bits untouched (so e.g. forced modes are not lost) */ 4227 usbcfg = dwc2_readl(hsotg, GUSBCFG); 4228 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 4229 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); 4230 4231 /* set the PLL on, remove the HNP/SRP and set the PHY */ 4232 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 4233 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | 4234 (trdtim << GUSBCFG_USBTRDTIM_SHIFT); 4235 dwc2_writel(hsotg, usbcfg, GUSBCFG); 4236 4237 if (using_dma(hsotg)) 4238 dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN); 4239 } 4240 4241 /** 4242 * dwc2_hsotg_udc_start - prepare the udc for work 4243 * @gadget: The usb gadget state 4244 * @driver: The usb gadget driver 4245 * 4246 * Perform initialization to prepare udc device and driver 4247 * to work. 4248 */ 4249 static int dwc2_hsotg_udc_start(struct usb_gadget *gadget, 4250 struct usb_gadget_driver *driver) 4251 { 4252 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4253 unsigned long flags; 4254 int ret; 4255 4256 if (!hsotg) { 4257 pr_err("%s: called with no device\n", __func__); 4258 return -ENODEV; 4259 } 4260 4261 if (!driver) { 4262 dev_err(hsotg->dev, "%s: no driver\n", __func__); 4263 return -EINVAL; 4264 } 4265 4266 if (driver->max_speed < USB_SPEED_FULL) 4267 dev_err(hsotg->dev, "%s: bad speed\n", __func__); 4268 4269 if (!driver->setup) { 4270 dev_err(hsotg->dev, "%s: missing entry points\n", __func__); 4271 return -EINVAL; 4272 } 4273 4274 WARN_ON(hsotg->driver); 4275 4276 driver->driver.bus = NULL; 4277 hsotg->driver = driver; 4278 hsotg->gadget.dev.of_node = hsotg->dev->of_node; 4279 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4280 4281 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { 4282 ret = dwc2_lowlevel_hw_enable(hsotg); 4283 if (ret) 4284 goto err; 4285 } 4286 4287 if (!IS_ERR_OR_NULL(hsotg->uphy)) 4288 otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget); 4289 4290 spin_lock_irqsave(&hsotg->lock, flags); 4291 if (dwc2_hw_is_device(hsotg)) { 4292 dwc2_hsotg_init(hsotg); 4293 dwc2_hsotg_core_init_disconnected(hsotg, false); 4294 } 4295 4296 hsotg->enabled = 0; 4297 spin_unlock_irqrestore(&hsotg->lock, flags); 4298 4299 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name); 4300 4301 return 0; 4302 4303 err: 4304 hsotg->driver = NULL; 4305 return ret; 4306 } 4307 4308 /** 4309 * dwc2_hsotg_udc_stop - stop the udc 4310 * @gadget: The usb gadget state 4311 * 4312 * Stop udc hw block and stay tunned for future transmissions 4313 */ 4314 static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) 4315 { 4316 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4317 unsigned long flags = 0; 4318 int ep; 4319 4320 if (!hsotg) 4321 return -ENODEV; 4322 4323 /* all endpoints should be shutdown */ 4324 for (ep = 1; ep < hsotg->num_of_eps; ep++) { 4325 if (hsotg->eps_in[ep]) 4326 dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); 4327 if (hsotg->eps_out[ep]) 4328 dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); 4329 } 4330 4331 spin_lock_irqsave(&hsotg->lock, flags); 4332 4333 hsotg->driver = NULL; 4334 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4335 hsotg->enabled = 0; 4336 4337 spin_unlock_irqrestore(&hsotg->lock, flags); 4338 4339 if (!IS_ERR_OR_NULL(hsotg->uphy)) 4340 otg_set_peripheral(hsotg->uphy->otg, NULL); 4341 4342 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) 4343 dwc2_lowlevel_hw_disable(hsotg); 4344 4345 return 0; 4346 } 4347 4348 /** 4349 * dwc2_hsotg_gadget_getframe - read the frame number 4350 * @gadget: The usb gadget state 4351 * 4352 * Read the {micro} frame number 4353 */ 4354 static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget) 4355 { 4356 return dwc2_hsotg_read_frameno(to_hsotg(gadget)); 4357 } 4358 4359 /** 4360 * dwc2_hsotg_pullup - connect/disconnect the USB PHY 4361 * @gadget: The usb gadget state 4362 * @is_on: Current state of the USB PHY 4363 * 4364 * Connect/Disconnect the USB PHY pullup 4365 */ 4366 static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on) 4367 { 4368 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4369 unsigned long flags = 0; 4370 4371 dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on, 4372 hsotg->op_state); 4373 4374 /* Don't modify pullup state while in host mode */ 4375 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) { 4376 hsotg->enabled = is_on; 4377 return 0; 4378 } 4379 4380 spin_lock_irqsave(&hsotg->lock, flags); 4381 if (is_on) { 4382 hsotg->enabled = 1; 4383 dwc2_hsotg_core_init_disconnected(hsotg, false); 4384 /* Enable ACG feature in device mode,if supported */ 4385 dwc2_enable_acg(hsotg); 4386 dwc2_hsotg_core_connect(hsotg); 4387 } else { 4388 dwc2_hsotg_core_disconnect(hsotg); 4389 dwc2_hsotg_disconnect(hsotg); 4390 hsotg->enabled = 0; 4391 } 4392 4393 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4394 spin_unlock_irqrestore(&hsotg->lock, flags); 4395 4396 return 0; 4397 } 4398 4399 static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active) 4400 { 4401 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4402 unsigned long flags; 4403 4404 dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active); 4405 spin_lock_irqsave(&hsotg->lock, flags); 4406 4407 /* 4408 * If controller is hibernated, it must exit from power_down 4409 * before being initialized / de-initialized 4410 */ 4411 if (hsotg->lx_state == DWC2_L2) 4412 dwc2_exit_partial_power_down(hsotg, false); 4413 4414 if (is_active) { 4415 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 4416 4417 dwc2_hsotg_core_init_disconnected(hsotg, false); 4418 if (hsotg->enabled) { 4419 /* Enable ACG feature in device mode,if supported */ 4420 dwc2_enable_acg(hsotg); 4421 dwc2_hsotg_core_connect(hsotg); 4422 } 4423 } else { 4424 dwc2_hsotg_core_disconnect(hsotg); 4425 dwc2_hsotg_disconnect(hsotg); 4426 } 4427 4428 spin_unlock_irqrestore(&hsotg->lock, flags); 4429 return 0; 4430 } 4431 4432 /** 4433 * dwc2_hsotg_vbus_draw - report bMaxPower field 4434 * @gadget: The usb gadget state 4435 * @mA: Amount of current 4436 * 4437 * Report how much power the device may consume to the phy. 4438 */ 4439 static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA) 4440 { 4441 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4442 4443 if (IS_ERR_OR_NULL(hsotg->uphy)) 4444 return -ENOTSUPP; 4445 return usb_phy_set_power(hsotg->uphy, mA); 4446 } 4447 4448 static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = { 4449 .get_frame = dwc2_hsotg_gadget_getframe, 4450 .udc_start = dwc2_hsotg_udc_start, 4451 .udc_stop = dwc2_hsotg_udc_stop, 4452 .pullup = dwc2_hsotg_pullup, 4453 .vbus_session = dwc2_hsotg_vbus_session, 4454 .vbus_draw = dwc2_hsotg_vbus_draw, 4455 }; 4456 4457 /** 4458 * dwc2_hsotg_initep - initialise a single endpoint 4459 * @hsotg: The device state. 4460 * @hs_ep: The endpoint to be initialised. 4461 * @epnum: The endpoint number 4462 * @dir_in: True if direction is in. 4463 * 4464 * Initialise the given endpoint (as part of the probe and device state 4465 * creation) to give to the gadget driver. Setup the endpoint name, any 4466 * direction information and other state that may be required. 4467 */ 4468 static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg, 4469 struct dwc2_hsotg_ep *hs_ep, 4470 int epnum, 4471 bool dir_in) 4472 { 4473 char *dir; 4474 4475 if (epnum == 0) 4476 dir = ""; 4477 else if (dir_in) 4478 dir = "in"; 4479 else 4480 dir = "out"; 4481 4482 hs_ep->dir_in = dir_in; 4483 hs_ep->index = epnum; 4484 4485 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir); 4486 4487 INIT_LIST_HEAD(&hs_ep->queue); 4488 INIT_LIST_HEAD(&hs_ep->ep.ep_list); 4489 4490 /* add to the list of endpoints known by the gadget driver */ 4491 if (epnum) 4492 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list); 4493 4494 hs_ep->parent = hsotg; 4495 hs_ep->ep.name = hs_ep->name; 4496 4497 if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW) 4498 usb_ep_set_maxpacket_limit(&hs_ep->ep, 8); 4499 else 4500 usb_ep_set_maxpacket_limit(&hs_ep->ep, 4501 epnum ? 1024 : EP0_MPS_LIMIT); 4502 hs_ep->ep.ops = &dwc2_hsotg_ep_ops; 4503 4504 if (epnum == 0) { 4505 hs_ep->ep.caps.type_control = true; 4506 } else { 4507 if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) { 4508 hs_ep->ep.caps.type_iso = true; 4509 hs_ep->ep.caps.type_bulk = true; 4510 } 4511 hs_ep->ep.caps.type_int = true; 4512 } 4513 4514 if (dir_in) 4515 hs_ep->ep.caps.dir_in = true; 4516 else 4517 hs_ep->ep.caps.dir_out = true; 4518 4519 /* 4520 * if we're using dma, we need to set the next-endpoint pointer 4521 * to be something valid. 4522 */ 4523 4524 if (using_dma(hsotg)) { 4525 u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15); 4526 4527 if (dir_in) 4528 dwc2_writel(hsotg, next, DIEPCTL(epnum)); 4529 else 4530 dwc2_writel(hsotg, next, DOEPCTL(epnum)); 4531 } 4532 } 4533 4534 /** 4535 * dwc2_hsotg_hw_cfg - read HW configuration registers 4536 * @hsotg: Programming view of the DWC_otg controller 4537 * 4538 * Read the USB core HW configuration registers 4539 */ 4540 static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg) 4541 { 4542 u32 cfg; 4543 u32 ep_type; 4544 u32 i; 4545 4546 /* check hardware configuration */ 4547 4548 hsotg->num_of_eps = hsotg->hw_params.num_dev_ep; 4549 4550 /* Add ep0 */ 4551 hsotg->num_of_eps++; 4552 4553 hsotg->eps_in[0] = devm_kzalloc(hsotg->dev, 4554 sizeof(struct dwc2_hsotg_ep), 4555 GFP_KERNEL); 4556 if (!hsotg->eps_in[0]) 4557 return -ENOMEM; 4558 /* Same dwc2_hsotg_ep is used in both directions for ep0 */ 4559 hsotg->eps_out[0] = hsotg->eps_in[0]; 4560 4561 cfg = hsotg->hw_params.dev_ep_dirs; 4562 for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) { 4563 ep_type = cfg & 3; 4564 /* Direction in or both */ 4565 if (!(ep_type & 2)) { 4566 hsotg->eps_in[i] = devm_kzalloc(hsotg->dev, 4567 sizeof(struct dwc2_hsotg_ep), GFP_KERNEL); 4568 if (!hsotg->eps_in[i]) 4569 return -ENOMEM; 4570 } 4571 /* Direction out or both */ 4572 if (!(ep_type & 1)) { 4573 hsotg->eps_out[i] = devm_kzalloc(hsotg->dev, 4574 sizeof(struct dwc2_hsotg_ep), GFP_KERNEL); 4575 if (!hsotg->eps_out[i]) 4576 return -ENOMEM; 4577 } 4578 } 4579 4580 hsotg->fifo_mem = hsotg->hw_params.total_fifo_size; 4581 hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo; 4582 4583 dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n", 4584 hsotg->num_of_eps, 4585 hsotg->dedicated_fifos ? "dedicated" : "shared", 4586 hsotg->fifo_mem); 4587 return 0; 4588 } 4589 4590 /** 4591 * dwc2_hsotg_dump - dump state of the udc 4592 * @hsotg: Programming view of the DWC_otg controller 4593 * 4594 */ 4595 static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg) 4596 { 4597 #ifdef DEBUG 4598 struct device *dev = hsotg->dev; 4599 u32 val; 4600 int idx; 4601 4602 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n", 4603 dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL), 4604 dwc2_readl(hsotg, DIEPMSK)); 4605 4606 dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n", 4607 dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1)); 4608 4609 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 4610 dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ)); 4611 4612 /* show periodic fifo settings */ 4613 4614 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 4615 val = dwc2_readl(hsotg, DPTXFSIZN(idx)); 4616 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx, 4617 val >> FIFOSIZE_DEPTH_SHIFT, 4618 val & FIFOSIZE_STARTADDR_MASK); 4619 } 4620 4621 for (idx = 0; idx < hsotg->num_of_eps; idx++) { 4622 dev_info(dev, 4623 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx, 4624 dwc2_readl(hsotg, DIEPCTL(idx)), 4625 dwc2_readl(hsotg, DIEPTSIZ(idx)), 4626 dwc2_readl(hsotg, DIEPDMA(idx))); 4627 4628 val = dwc2_readl(hsotg, DOEPCTL(idx)); 4629 dev_info(dev, 4630 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", 4631 idx, dwc2_readl(hsotg, DOEPCTL(idx)), 4632 dwc2_readl(hsotg, DOEPTSIZ(idx)), 4633 dwc2_readl(hsotg, DOEPDMA(idx))); 4634 } 4635 4636 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n", 4637 dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE)); 4638 #endif 4639 } 4640 4641 /** 4642 * dwc2_gadget_init - init function for gadget 4643 * @hsotg: Programming view of the DWC_otg controller 4644 * 4645 */ 4646 int dwc2_gadget_init(struct dwc2_hsotg *hsotg) 4647 { 4648 struct device *dev = hsotg->dev; 4649 int epnum; 4650 int ret; 4651 4652 /* Dump fifo information */ 4653 dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n", 4654 hsotg->params.g_np_tx_fifo_size); 4655 dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size); 4656 4657 hsotg->gadget.max_speed = USB_SPEED_HIGH; 4658 hsotg->gadget.ops = &dwc2_hsotg_gadget_ops; 4659 hsotg->gadget.name = dev_name(dev); 4660 hsotg->remote_wakeup_allowed = 0; 4661 4662 if (hsotg->params.lpm) 4663 hsotg->gadget.lpm_capable = true; 4664 4665 if (hsotg->dr_mode == USB_DR_MODE_OTG) 4666 hsotg->gadget.is_otg = 1; 4667 else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) 4668 hsotg->op_state = OTG_STATE_B_PERIPHERAL; 4669 4670 ret = dwc2_hsotg_hw_cfg(hsotg); 4671 if (ret) { 4672 dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret); 4673 return ret; 4674 } 4675 4676 hsotg->ctrl_buff = devm_kzalloc(hsotg->dev, 4677 DWC2_CTRL_BUFF_SIZE, GFP_KERNEL); 4678 if (!hsotg->ctrl_buff) 4679 return -ENOMEM; 4680 4681 hsotg->ep0_buff = devm_kzalloc(hsotg->dev, 4682 DWC2_CTRL_BUFF_SIZE, GFP_KERNEL); 4683 if (!hsotg->ep0_buff) 4684 return -ENOMEM; 4685 4686 if (using_desc_dma(hsotg)) { 4687 ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg); 4688 if (ret < 0) 4689 return ret; 4690 } 4691 4692 ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq, 4693 IRQF_SHARED, dev_name(hsotg->dev), hsotg); 4694 if (ret < 0) { 4695 dev_err(dev, "cannot claim IRQ for gadget\n"); 4696 return ret; 4697 } 4698 4699 /* hsotg->num_of_eps holds number of EPs other than ep0 */ 4700 4701 if (hsotg->num_of_eps == 0) { 4702 dev_err(dev, "wrong number of EPs (zero)\n"); 4703 return -EINVAL; 4704 } 4705 4706 /* setup endpoint information */ 4707 4708 INIT_LIST_HEAD(&hsotg->gadget.ep_list); 4709 hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep; 4710 4711 /* allocate EP0 request */ 4712 4713 hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep, 4714 GFP_KERNEL); 4715 if (!hsotg->ctrl_req) { 4716 dev_err(dev, "failed to allocate ctrl req\n"); 4717 return -ENOMEM; 4718 } 4719 4720 /* initialise the endpoints now the core has been initialised */ 4721 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) { 4722 if (hsotg->eps_in[epnum]) 4723 dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum], 4724 epnum, 1); 4725 if (hsotg->eps_out[epnum]) 4726 dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum], 4727 epnum, 0); 4728 } 4729 4730 ret = usb_add_gadget_udc(dev, &hsotg->gadget); 4731 if (ret) { 4732 dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, 4733 hsotg->ctrl_req); 4734 return ret; 4735 } 4736 dwc2_hsotg_dump(hsotg); 4737 4738 return 0; 4739 } 4740 4741 /** 4742 * dwc2_hsotg_remove - remove function for hsotg driver 4743 * @hsotg: Programming view of the DWC_otg controller 4744 * 4745 */ 4746 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg) 4747 { 4748 usb_del_gadget_udc(&hsotg->gadget); 4749 dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req); 4750 4751 return 0; 4752 } 4753 4754 int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg) 4755 { 4756 unsigned long flags; 4757 4758 if (hsotg->lx_state != DWC2_L0) 4759 return 0; 4760 4761 if (hsotg->driver) { 4762 int ep; 4763 4764 dev_info(hsotg->dev, "suspending usb gadget %s\n", 4765 hsotg->driver->driver.name); 4766 4767 spin_lock_irqsave(&hsotg->lock, flags); 4768 if (hsotg->enabled) 4769 dwc2_hsotg_core_disconnect(hsotg); 4770 dwc2_hsotg_disconnect(hsotg); 4771 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 4772 spin_unlock_irqrestore(&hsotg->lock, flags); 4773 4774 for (ep = 0; ep < hsotg->num_of_eps; ep++) { 4775 if (hsotg->eps_in[ep]) 4776 dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); 4777 if (hsotg->eps_out[ep]) 4778 dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); 4779 } 4780 } 4781 4782 return 0; 4783 } 4784 4785 int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg) 4786 { 4787 unsigned long flags; 4788 4789 if (hsotg->lx_state == DWC2_L2) 4790 return 0; 4791 4792 if (hsotg->driver) { 4793 dev_info(hsotg->dev, "resuming usb gadget %s\n", 4794 hsotg->driver->driver.name); 4795 4796 spin_lock_irqsave(&hsotg->lock, flags); 4797 dwc2_hsotg_core_init_disconnected(hsotg, false); 4798 if (hsotg->enabled) { 4799 /* Enable ACG feature in device mode,if supported */ 4800 dwc2_enable_acg(hsotg); 4801 dwc2_hsotg_core_connect(hsotg); 4802 } 4803 spin_unlock_irqrestore(&hsotg->lock, flags); 4804 } 4805 4806 return 0; 4807 } 4808 4809 /** 4810 * dwc2_backup_device_registers() - Backup controller device registers. 4811 * When suspending usb bus, registers needs to be backuped 4812 * if controller power is disabled once suspended. 4813 * 4814 * @hsotg: Programming view of the DWC_otg controller 4815 */ 4816 int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 4817 { 4818 struct dwc2_dregs_backup *dr; 4819 int i; 4820 4821 dev_dbg(hsotg->dev, "%s\n", __func__); 4822 4823 /* Backup dev regs */ 4824 dr = &hsotg->dr_backup; 4825 4826 dr->dcfg = dwc2_readl(hsotg, DCFG); 4827 dr->dctl = dwc2_readl(hsotg, DCTL); 4828 dr->daintmsk = dwc2_readl(hsotg, DAINTMSK); 4829 dr->diepmsk = dwc2_readl(hsotg, DIEPMSK); 4830 dr->doepmsk = dwc2_readl(hsotg, DOEPMSK); 4831 4832 for (i = 0; i < hsotg->num_of_eps; i++) { 4833 /* Backup IN EPs */ 4834 dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i)); 4835 4836 /* Ensure DATA PID is correctly configured */ 4837 if (dr->diepctl[i] & DXEPCTL_DPID) 4838 dr->diepctl[i] |= DXEPCTL_SETD1PID; 4839 else 4840 dr->diepctl[i] |= DXEPCTL_SETD0PID; 4841 4842 dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i)); 4843 dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i)); 4844 4845 /* Backup OUT EPs */ 4846 dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i)); 4847 4848 /* Ensure DATA PID is correctly configured */ 4849 if (dr->doepctl[i] & DXEPCTL_DPID) 4850 dr->doepctl[i] |= DXEPCTL_SETD1PID; 4851 else 4852 dr->doepctl[i] |= DXEPCTL_SETD0PID; 4853 4854 dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i)); 4855 dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i)); 4856 dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i)); 4857 } 4858 dr->valid = true; 4859 return 0; 4860 } 4861 4862 /** 4863 * dwc2_restore_device_registers() - Restore controller device registers. 4864 * When resuming usb bus, device registers needs to be restored 4865 * if controller power were disabled. 4866 * 4867 * @hsotg: Programming view of the DWC_otg controller 4868 * @remote_wakeup: Indicates whether resume is initiated by Device or Host. 4869 * 4870 * Return: 0 if successful, negative error code otherwise 4871 */ 4872 int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup) 4873 { 4874 struct dwc2_dregs_backup *dr; 4875 int i; 4876 4877 dev_dbg(hsotg->dev, "%s\n", __func__); 4878 4879 /* Restore dev regs */ 4880 dr = &hsotg->dr_backup; 4881 if (!dr->valid) { 4882 dev_err(hsotg->dev, "%s: no device registers to restore\n", 4883 __func__); 4884 return -EINVAL; 4885 } 4886 dr->valid = false; 4887 4888 if (!remote_wakeup) 4889 dwc2_writel(hsotg, dr->dctl, DCTL); 4890 4891 dwc2_writel(hsotg, dr->daintmsk, DAINTMSK); 4892 dwc2_writel(hsotg, dr->diepmsk, DIEPMSK); 4893 dwc2_writel(hsotg, dr->doepmsk, DOEPMSK); 4894 4895 for (i = 0; i < hsotg->num_of_eps; i++) { 4896 /* Restore IN EPs */ 4897 dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i)); 4898 dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i)); 4899 dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i)); 4900 /** WA for enabled EPx's IN in DDMA mode. On entering to 4901 * hibernation wrong value read and saved from DIEPDMAx, 4902 * as result BNA interrupt asserted on hibernation exit 4903 * by restoring from saved area. 4904 */ 4905 if (hsotg->params.g_dma_desc && 4906 (dr->diepctl[i] & DXEPCTL_EPENA)) 4907 dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma; 4908 dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i)); 4909 dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i)); 4910 /* Restore OUT EPs */ 4911 dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i)); 4912 /* WA for enabled EPx's OUT in DDMA mode. On entering to 4913 * hibernation wrong value read and saved from DOEPDMAx, 4914 * as result BNA interrupt asserted on hibernation exit 4915 * by restoring from saved area. 4916 */ 4917 if (hsotg->params.g_dma_desc && 4918 (dr->doepctl[i] & DXEPCTL_EPENA)) 4919 dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma; 4920 dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i)); 4921 dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i)); 4922 } 4923 4924 return 0; 4925 } 4926 4927 /** 4928 * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode 4929 * 4930 * @hsotg: Programming view of DWC_otg controller 4931 * 4932 */ 4933 void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg) 4934 { 4935 u32 val; 4936 4937 if (!hsotg->params.lpm) 4938 return; 4939 4940 val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES; 4941 val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0; 4942 val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0; 4943 val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT; 4944 val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0; 4945 dwc2_writel(hsotg, val, GLPMCFG); 4946 dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG)); 4947 } 4948 4949 /** 4950 * dwc2_gadget_enter_hibernation() - Put controller in Hibernation. 4951 * 4952 * @hsotg: Programming view of the DWC_otg controller 4953 * 4954 * Return non-zero if failed to enter to hibernation. 4955 */ 4956 int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg) 4957 { 4958 u32 gpwrdn; 4959 int ret = 0; 4960 4961 /* Change to L2(suspend) state */ 4962 hsotg->lx_state = DWC2_L2; 4963 dev_dbg(hsotg->dev, "Start of hibernation completed\n"); 4964 ret = dwc2_backup_global_registers(hsotg); 4965 if (ret) { 4966 dev_err(hsotg->dev, "%s: failed to backup global registers\n", 4967 __func__); 4968 return ret; 4969 } 4970 ret = dwc2_backup_device_registers(hsotg); 4971 if (ret) { 4972 dev_err(hsotg->dev, "%s: failed to backup device registers\n", 4973 __func__); 4974 return ret; 4975 } 4976 4977 gpwrdn = GPWRDN_PWRDNRSTN; 4978 gpwrdn |= GPWRDN_PMUACTV; 4979 dwc2_writel(hsotg, gpwrdn, GPWRDN); 4980 udelay(10); 4981 4982 /* Set flag to indicate that we are in hibernation */ 4983 hsotg->hibernated = 1; 4984 4985 /* Enable interrupts from wake up logic */ 4986 gpwrdn = dwc2_readl(hsotg, GPWRDN); 4987 gpwrdn |= GPWRDN_PMUINTSEL; 4988 dwc2_writel(hsotg, gpwrdn, GPWRDN); 4989 udelay(10); 4990 4991 /* Unmask device mode interrupts in GPWRDN */ 4992 gpwrdn = dwc2_readl(hsotg, GPWRDN); 4993 gpwrdn |= GPWRDN_RST_DET_MSK; 4994 gpwrdn |= GPWRDN_LNSTSCHG_MSK; 4995 gpwrdn |= GPWRDN_STS_CHGINT_MSK; 4996 dwc2_writel(hsotg, gpwrdn, GPWRDN); 4997 udelay(10); 4998 4999 /* Enable Power Down Clamp */ 5000 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5001 gpwrdn |= GPWRDN_PWRDNCLMP; 5002 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5003 udelay(10); 5004 5005 /* Switch off VDD */ 5006 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5007 gpwrdn |= GPWRDN_PWRDNSWTCH; 5008 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5009 udelay(10); 5010 5011 /* Save gpwrdn register for further usage if stschng interrupt */ 5012 hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN); 5013 dev_dbg(hsotg->dev, "Hibernation completed\n"); 5014 5015 return ret; 5016 } 5017 5018 /** 5019 * dwc2_gadget_exit_hibernation() 5020 * This function is for exiting from Device mode hibernation by host initiated 5021 * resume/reset and device initiated remote-wakeup. 5022 * 5023 * @hsotg: Programming view of the DWC_otg controller 5024 * @rem_wakeup: indicates whether resume is initiated by Device or Host. 5025 * @reset: indicates whether resume is initiated by Reset. 5026 * 5027 * Return non-zero if failed to exit from hibernation. 5028 */ 5029 int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg, 5030 int rem_wakeup, int reset) 5031 { 5032 u32 pcgcctl; 5033 u32 gpwrdn; 5034 u32 dctl; 5035 int ret = 0; 5036 struct dwc2_gregs_backup *gr; 5037 struct dwc2_dregs_backup *dr; 5038 5039 gr = &hsotg->gr_backup; 5040 dr = &hsotg->dr_backup; 5041 5042 if (!hsotg->hibernated) { 5043 dev_dbg(hsotg->dev, "Already exited from Hibernation\n"); 5044 return 1; 5045 } 5046 dev_dbg(hsotg->dev, 5047 "%s: called with rem_wakeup = %d reset = %d\n", 5048 __func__, rem_wakeup, reset); 5049 5050 dwc2_hib_restore_common(hsotg, rem_wakeup, 0); 5051 5052 if (!reset) { 5053 /* Clear all pending interupts */ 5054 dwc2_writel(hsotg, 0xffffffff, GINTSTS); 5055 } 5056 5057 /* De-assert Restore */ 5058 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5059 gpwrdn &= ~GPWRDN_RESTORE; 5060 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5061 udelay(10); 5062 5063 if (!rem_wakeup) { 5064 pcgcctl = dwc2_readl(hsotg, PCGCTL); 5065 pcgcctl &= ~PCGCTL_RSTPDWNMODULE; 5066 dwc2_writel(hsotg, pcgcctl, PCGCTL); 5067 } 5068 5069 /* Restore GUSBCFG, DCFG and DCTL */ 5070 dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG); 5071 dwc2_writel(hsotg, dr->dcfg, DCFG); 5072 dwc2_writel(hsotg, dr->dctl, DCTL); 5073 5074 /* De-assert Wakeup Logic */ 5075 gpwrdn = dwc2_readl(hsotg, GPWRDN); 5076 gpwrdn &= ~GPWRDN_PMUACTV; 5077 dwc2_writel(hsotg, gpwrdn, GPWRDN); 5078 5079 if (rem_wakeup) { 5080 udelay(10); 5081 /* Start Remote Wakeup Signaling */ 5082 dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL); 5083 } else { 5084 udelay(50); 5085 /* Set Device programming done bit */ 5086 dctl = dwc2_readl(hsotg, DCTL); 5087 dctl |= DCTL_PWRONPRGDONE; 5088 dwc2_writel(hsotg, dctl, DCTL); 5089 } 5090 /* Wait for interrupts which must be cleared */ 5091 mdelay(2); 5092 /* Clear all pending interupts */ 5093 dwc2_writel(hsotg, 0xffffffff, GINTSTS); 5094 5095 /* Restore global registers */ 5096 ret = dwc2_restore_global_registers(hsotg); 5097 if (ret) { 5098 dev_err(hsotg->dev, "%s: failed to restore registers\n", 5099 __func__); 5100 return ret; 5101 } 5102 5103 /* Restore device registers */ 5104 ret = dwc2_restore_device_registers(hsotg, rem_wakeup); 5105 if (ret) { 5106 dev_err(hsotg->dev, "%s: failed to restore device registers\n", 5107 __func__); 5108 return ret; 5109 } 5110 5111 if (rem_wakeup) { 5112 mdelay(10); 5113 dctl = dwc2_readl(hsotg, DCTL); 5114 dctl &= ~DCTL_RMTWKUPSIG; 5115 dwc2_writel(hsotg, dctl, DCTL); 5116 } 5117 5118 hsotg->hibernated = 0; 5119 hsotg->lx_state = DWC2_L0; 5120 dev_dbg(hsotg->dev, "Hibernation recovery completes here\n"); 5121 5122 return ret; 5123 } 5124