1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
4 *
5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
6 *
7 * Authors: Felipe Balbi <balbi@ti.com>,
8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/list.h>
19 #include <linux/dma-mapping.h>
20
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/composite.h>
24
25 #include "core.h"
26 #include "debug.h"
27 #include "gadget.h"
28 #include "io.h"
29
30 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
31 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
32 struct dwc3_ep *dep, struct dwc3_request *req);
33 static int dwc3_ep0_delegate_req(struct dwc3 *dwc,
34 struct usb_ctrlrequest *ctrl);
35
dwc3_ep0_prepare_one_trb(struct dwc3_ep * dep,dma_addr_t buf_dma,u32 len,u32 type,bool chain)36 static void dwc3_ep0_prepare_one_trb(struct dwc3_ep *dep,
37 dma_addr_t buf_dma, u32 len, u32 type, bool chain)
38 {
39 struct dwc3_trb *trb;
40 struct dwc3 *dwc;
41
42 dwc = dep->dwc;
43 trb = &dwc->ep0_trb[dep->trb_enqueue];
44
45 if (chain)
46 dep->trb_enqueue++;
47
48 trb->bpl = lower_32_bits(buf_dma);
49 trb->bph = upper_32_bits(buf_dma);
50 trb->size = len;
51 trb->ctrl = type;
52
53 trb->ctrl |= (DWC3_TRB_CTRL_HWO
54 | DWC3_TRB_CTRL_ISP_IMI);
55
56 if (chain)
57 trb->ctrl |= DWC3_TRB_CTRL_CHN;
58 else
59 trb->ctrl |= (DWC3_TRB_CTRL_IOC
60 | DWC3_TRB_CTRL_LST);
61
62 trace_dwc3_prepare_trb(dep, trb);
63 }
64
dwc3_ep0_start_trans(struct dwc3_ep * dep)65 static int dwc3_ep0_start_trans(struct dwc3_ep *dep)
66 {
67 struct dwc3_gadget_ep_cmd_params params;
68 struct dwc3 *dwc;
69 int ret;
70
71 if (dep->flags & DWC3_EP_TRANSFER_STARTED)
72 return 0;
73
74 dwc = dep->dwc;
75
76 memset(¶ms, 0, sizeof(params));
77 params.param0 = upper_32_bits(dwc->ep0_trb_addr);
78 params.param1 = lower_32_bits(dwc->ep0_trb_addr);
79
80 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, ¶ms);
81 if (ret < 0)
82 return ret;
83
84 dwc->ep0_next_event = DWC3_EP0_COMPLETE;
85
86 return 0;
87 }
88
__dwc3_gadget_ep0_queue(struct dwc3_ep * dep,struct dwc3_request * req)89 static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
90 struct dwc3_request *req)
91 {
92 struct dwc3 *dwc = dep->dwc;
93
94 req->request.actual = 0;
95 req->request.status = -EINPROGRESS;
96 req->epnum = dep->number;
97
98 list_add_tail(&req->list, &dep->pending_list);
99
100 /*
101 * Gadget driver might not be quick enough to queue a request
102 * before we get a Transfer Not Ready event on this endpoint.
103 *
104 * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
105 * flag is set, it's telling us that as soon as Gadget queues the
106 * required request, we should kick the transfer here because the
107 * IRQ we were waiting for is long gone.
108 */
109 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
110 unsigned int direction;
111
112 direction = !!(dep->flags & DWC3_EP0_DIR_IN);
113
114 if (dwc->ep0state != EP0_DATA_PHASE) {
115 dev_WARN(dwc->dev, "Unexpected pending request\n");
116 return 0;
117 }
118
119 __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
120
121 dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
122 DWC3_EP0_DIR_IN);
123
124 return 0;
125 }
126
127 /*
128 * In case gadget driver asked us to delay the STATUS phase,
129 * handle it here.
130 */
131 if (dwc->delayed_status) {
132 unsigned int direction;
133
134 direction = !dwc->ep0_expect_in;
135 dwc->delayed_status = false;
136 usb_gadget_set_state(dwc->gadget, USB_STATE_CONFIGURED);
137
138 if (dwc->ep0state == EP0_STATUS_PHASE)
139 __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
140
141 return 0;
142 }
143
144 /*
145 * Unfortunately we have uncovered a limitation wrt the Data Phase.
146 *
147 * Section 9.4 says we can wait for the XferNotReady(DATA) event to
148 * come before issuing Start Transfer command, but if we do, we will
149 * miss situations where the host starts another SETUP phase instead of
150 * the DATA phase. Such cases happen at least on TD.7.6 of the Link
151 * Layer Compliance Suite.
152 *
153 * The problem surfaces due to the fact that in case of back-to-back
154 * SETUP packets there will be no XferNotReady(DATA) generated and we
155 * will be stuck waiting for XferNotReady(DATA) forever.
156 *
157 * By looking at tables 9-13 and 9-14 of the Databook, we can see that
158 * it tells us to start Data Phase right away. It also mentions that if
159 * we receive a SETUP phase instead of the DATA phase, core will issue
160 * XferComplete for the DATA phase, before actually initiating it in
161 * the wire, with the TRB's status set to "SETUP_PENDING". Such status
162 * can only be used to print some debugging logs, as the core expects
163 * us to go through to the STATUS phase and start a CONTROL_STATUS TRB,
164 * just so it completes right away, without transferring anything and,
165 * only then, we can go back to the SETUP phase.
166 *
167 * Because of this scenario, SNPS decided to change the programming
168 * model of control transfers and support on-demand transfers only for
169 * the STATUS phase. To fix the issue we have now, we will always wait
170 * for gadget driver to queue the DATA phase's struct usb_request, then
171 * start it right away.
172 *
173 * If we're actually in a 2-stage transfer, we will wait for
174 * XferNotReady(STATUS).
175 */
176 if (dwc->three_stage_setup) {
177 unsigned int direction;
178
179 direction = dwc->ep0_expect_in;
180 dwc->ep0state = EP0_DATA_PHASE;
181
182 __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
183
184 dep->flags &= ~DWC3_EP0_DIR_IN;
185 }
186
187 return 0;
188 }
189
dwc3_gadget_ep0_queue(struct usb_ep * ep,struct usb_request * request,gfp_t gfp_flags)190 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
191 gfp_t gfp_flags)
192 {
193 struct dwc3_request *req = to_dwc3_request(request);
194 struct dwc3_ep *dep = to_dwc3_ep(ep);
195 struct dwc3 *dwc = dep->dwc;
196
197 unsigned long flags;
198
199 int ret;
200
201 spin_lock_irqsave(&dwc->lock, flags);
202 if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
203 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
204 dep->name);
205 ret = -ESHUTDOWN;
206 goto out;
207 }
208
209 /* we share one TRB for ep0/1 */
210 if (!list_empty(&dep->pending_list)) {
211 ret = -EBUSY;
212 goto out;
213 }
214
215 ret = __dwc3_gadget_ep0_queue(dep, req);
216
217 out:
218 spin_unlock_irqrestore(&dwc->lock, flags);
219
220 return ret;
221 }
222
dwc3_ep0_stall_and_restart(struct dwc3 * dwc)223 void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
224 {
225 struct dwc3_ep *dep;
226
227 /* reinitialize physical ep1 */
228 dep = dwc->eps[1];
229 dep->flags &= DWC3_EP_RESOURCE_ALLOCATED;
230 dep->flags |= DWC3_EP_ENABLED;
231
232 /* stall is always issued on EP0 */
233 dep = dwc->eps[0];
234 __dwc3_gadget_ep_set_halt(dep, 1, false);
235 dep->flags &= DWC3_EP_RESOURCE_ALLOCATED | DWC3_EP_TRANSFER_STARTED;
236 dep->flags |= DWC3_EP_ENABLED;
237 dwc->delayed_status = false;
238
239 if (!list_empty(&dep->pending_list)) {
240 struct dwc3_request *req;
241
242 req = next_request(&dep->pending_list);
243 if (!dwc->connected)
244 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
245 else
246 dwc3_gadget_giveback(dep, req, -ECONNRESET);
247 }
248
249 dwc->eps[0]->trb_enqueue = 0;
250 dwc->eps[1]->trb_enqueue = 0;
251 dwc->ep0state = EP0_SETUP_PHASE;
252 dwc3_ep0_out_start(dwc);
253 }
254
__dwc3_gadget_ep0_set_halt(struct usb_ep * ep,int value)255 int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
256 {
257 struct dwc3_ep *dep = to_dwc3_ep(ep);
258 struct dwc3 *dwc = dep->dwc;
259
260 dwc3_ep0_stall_and_restart(dwc);
261
262 return 0;
263 }
264
dwc3_gadget_ep0_set_halt(struct usb_ep * ep,int value)265 int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
266 {
267 struct dwc3_ep *dep = to_dwc3_ep(ep);
268 struct dwc3 *dwc = dep->dwc;
269 unsigned long flags;
270 int ret;
271
272 spin_lock_irqsave(&dwc->lock, flags);
273 ret = __dwc3_gadget_ep0_set_halt(ep, value);
274 spin_unlock_irqrestore(&dwc->lock, flags);
275
276 return ret;
277 }
278
dwc3_ep0_out_start(struct dwc3 * dwc)279 void dwc3_ep0_out_start(struct dwc3 *dwc)
280 {
281 struct dwc3_ep *dep;
282 int ret;
283 int i;
284
285 complete(&dwc->ep0_in_setup);
286
287 dep = dwc->eps[0];
288 dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8,
289 DWC3_TRBCTL_CONTROL_SETUP, false);
290 ret = dwc3_ep0_start_trans(dep);
291 if (ret < 0)
292 dev_err(dwc->dev, "ep0 out start transfer failed: %d\n", ret);
293
294 for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) {
295 struct dwc3_ep *dwc3_ep;
296
297 dwc3_ep = dwc->eps[i];
298 if (!dwc3_ep)
299 continue;
300
301 if (!(dwc3_ep->flags & DWC3_EP_DELAY_STOP))
302 continue;
303
304 dwc3_ep->flags &= ~DWC3_EP_DELAY_STOP;
305 if (dwc->connected)
306 dwc3_stop_active_transfer(dwc3_ep, true, true);
307 else
308 dwc3_remove_requests(dwc, dwc3_ep, -ESHUTDOWN);
309 }
310 }
311
dwc3_wIndex_to_dep(struct dwc3 * dwc,__le16 wIndex_le)312 static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
313 {
314 struct dwc3_ep *dep;
315 u32 windex = le16_to_cpu(wIndex_le);
316 u32 epnum;
317
318 epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
319 if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
320 epnum |= 1;
321
322 dep = dwc->eps[epnum];
323 if (dep == NULL)
324 return NULL;
325
326 if (dep->flags & DWC3_EP_ENABLED)
327 return dep;
328
329 return NULL;
330 }
331
dwc3_ep0_status_cmpl(struct usb_ep * ep,struct usb_request * req)332 static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
333 {
334 }
335 /*
336 * ch 9.4.5
337 */
dwc3_ep0_handle_status(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)338 static int dwc3_ep0_handle_status(struct dwc3 *dwc,
339 struct usb_ctrlrequest *ctrl)
340 {
341 struct dwc3_ep *dep;
342 u32 recip;
343 u32 value;
344 u32 reg;
345 u16 usb_status = 0;
346 __le16 *response_pkt;
347
348 /* We don't support PTM_STATUS */
349 value = le16_to_cpu(ctrl->wValue);
350 if (value != 0)
351 return -EINVAL;
352
353 recip = ctrl->bRequestType & USB_RECIP_MASK;
354 switch (recip) {
355 case USB_RECIP_DEVICE:
356 /*
357 * LTM will be set once we know how to set this in HW.
358 */
359 usb_status |= dwc->gadget->is_selfpowered;
360
361 if ((dwc->speed == DWC3_DSTS_SUPERSPEED) ||
362 (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
363 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
364 if (reg & DWC3_DCTL_INITU1ENA)
365 usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
366 if (reg & DWC3_DCTL_INITU2ENA)
367 usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
368 } else {
369 usb_status |= dwc->gadget->wakeup_armed <<
370 USB_DEVICE_REMOTE_WAKEUP;
371 }
372
373 break;
374
375 case USB_RECIP_INTERFACE:
376 /*
377 * Function Remote Wake Capable D0
378 * Function Remote Wakeup D1
379 */
380 return dwc3_ep0_delegate_req(dwc, ctrl);
381
382 case USB_RECIP_ENDPOINT:
383 dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
384 if (!dep)
385 return -EINVAL;
386
387 if (dep->flags & DWC3_EP_STALL)
388 usb_status = 1 << USB_ENDPOINT_HALT;
389 break;
390 default:
391 return -EINVAL;
392 }
393
394 response_pkt = (__le16 *) dwc->setup_buf;
395 *response_pkt = cpu_to_le16(usb_status);
396
397 dep = dwc->eps[0];
398 dwc->ep0_usb_req.dep = dep;
399 dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
400 dwc->ep0_usb_req.request.buf = dwc->setup_buf;
401 dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
402
403 return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
404 }
405
dwc3_ep0_handle_u1(struct dwc3 * dwc,enum usb_device_state state,int set)406 static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state,
407 int set)
408 {
409 u32 reg;
410
411 if (state != USB_STATE_CONFIGURED)
412 return -EINVAL;
413 if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
414 (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
415 return -EINVAL;
416 if (set && dwc->dis_u1_entry_quirk)
417 return -EINVAL;
418
419 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
420 if (set)
421 reg |= DWC3_DCTL_INITU1ENA;
422 else
423 reg &= ~DWC3_DCTL_INITU1ENA;
424 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
425
426 return 0;
427 }
428
dwc3_ep0_handle_u2(struct dwc3 * dwc,enum usb_device_state state,int set)429 static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state,
430 int set)
431 {
432 u32 reg;
433
434
435 if (state != USB_STATE_CONFIGURED)
436 return -EINVAL;
437 if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
438 (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
439 return -EINVAL;
440 if (set && dwc->dis_u2_entry_quirk)
441 return -EINVAL;
442
443 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
444 if (set)
445 reg |= DWC3_DCTL_INITU2ENA;
446 else
447 reg &= ~DWC3_DCTL_INITU2ENA;
448 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
449
450 return 0;
451 }
452
dwc3_ep0_handle_test(struct dwc3 * dwc,enum usb_device_state state,u32 wIndex,int set)453 static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state,
454 u32 wIndex, int set)
455 {
456 if ((wIndex & 0xff) != 0)
457 return -EINVAL;
458 if (!set)
459 return -EINVAL;
460
461 switch (wIndex >> 8) {
462 case USB_TEST_J:
463 case USB_TEST_K:
464 case USB_TEST_SE0_NAK:
465 case USB_TEST_PACKET:
466 case USB_TEST_FORCE_ENABLE:
467 dwc->test_mode_nr = wIndex >> 8;
468 dwc->test_mode = true;
469 break;
470 default:
471 return -EINVAL;
472 }
473
474 return 0;
475 }
476
dwc3_ep0_handle_device(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl,int set)477 static int dwc3_ep0_handle_device(struct dwc3 *dwc,
478 struct usb_ctrlrequest *ctrl, int set)
479 {
480 enum usb_device_state state;
481 u32 wValue;
482 u32 wIndex;
483 int ret = 0;
484
485 wValue = le16_to_cpu(ctrl->wValue);
486 wIndex = le16_to_cpu(ctrl->wIndex);
487 state = dwc->gadget->state;
488
489 switch (wValue) {
490 case USB_DEVICE_REMOTE_WAKEUP:
491 if (dwc->wakeup_configured)
492 dwc->gadget->wakeup_armed = set;
493 else
494 ret = -EINVAL;
495 break;
496 /*
497 * 9.4.1 says only for SS, in AddressState only for
498 * default control pipe
499 */
500 case USB_DEVICE_U1_ENABLE:
501 ret = dwc3_ep0_handle_u1(dwc, state, set);
502 break;
503 case USB_DEVICE_U2_ENABLE:
504 ret = dwc3_ep0_handle_u2(dwc, state, set);
505 break;
506 case USB_DEVICE_LTM_ENABLE:
507 ret = -EINVAL;
508 break;
509 case USB_DEVICE_TEST_MODE:
510 ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
511 break;
512 default:
513 ret = -EINVAL;
514 }
515
516 return ret;
517 }
518
dwc3_ep0_handle_intf(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl,int set)519 static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
520 struct usb_ctrlrequest *ctrl, int set)
521 {
522 u32 wValue;
523 int ret = 0;
524
525 wValue = le16_to_cpu(ctrl->wValue);
526
527 switch (wValue) {
528 case USB_INTRF_FUNC_SUSPEND:
529 ret = dwc3_ep0_delegate_req(dwc, ctrl);
530 break;
531 default:
532 ret = -EINVAL;
533 }
534
535 return ret;
536 }
537
dwc3_ep0_handle_endpoint(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl,int set)538 static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
539 struct usb_ctrlrequest *ctrl, int set)
540 {
541 struct dwc3_ep *dep;
542 u32 wValue;
543 int ret;
544
545 wValue = le16_to_cpu(ctrl->wValue);
546
547 switch (wValue) {
548 case USB_ENDPOINT_HALT:
549 dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
550 if (!dep)
551 return -EINVAL;
552
553 if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
554 break;
555
556 ret = __dwc3_gadget_ep_set_halt(dep, set, true);
557 if (ret)
558 return -EINVAL;
559
560 /* ClearFeature(Halt) may need delayed status */
561 if (!set && (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
562 return USB_GADGET_DELAYED_STATUS;
563
564 break;
565 default:
566 return -EINVAL;
567 }
568
569 return 0;
570 }
571
dwc3_ep0_handle_feature(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl,int set)572 static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
573 struct usb_ctrlrequest *ctrl, int set)
574 {
575 u32 recip;
576 int ret;
577
578 recip = ctrl->bRequestType & USB_RECIP_MASK;
579
580 switch (recip) {
581 case USB_RECIP_DEVICE:
582 ret = dwc3_ep0_handle_device(dwc, ctrl, set);
583 break;
584 case USB_RECIP_INTERFACE:
585 ret = dwc3_ep0_handle_intf(dwc, ctrl, set);
586 break;
587 case USB_RECIP_ENDPOINT:
588 ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set);
589 break;
590 default:
591 ret = -EINVAL;
592 }
593
594 return ret;
595 }
596
dwc3_ep0_set_address(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)597 static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
598 {
599 enum usb_device_state state = dwc->gadget->state;
600 u32 addr;
601 u32 reg;
602
603 addr = le16_to_cpu(ctrl->wValue);
604 if (addr > 127) {
605 dev_err(dwc->dev, "invalid device address %d\n", addr);
606 return -EINVAL;
607 }
608
609 if (state == USB_STATE_CONFIGURED) {
610 dev_err(dwc->dev, "can't SetAddress() from Configured State\n");
611 return -EINVAL;
612 }
613
614 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
615 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
616 reg |= DWC3_DCFG_DEVADDR(addr);
617 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
618
619 if (addr)
620 usb_gadget_set_state(dwc->gadget, USB_STATE_ADDRESS);
621 else
622 usb_gadget_set_state(dwc->gadget, USB_STATE_DEFAULT);
623
624 return 0;
625 }
626
dwc3_ep0_delegate_req(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)627 static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
628 {
629 int ret = -EINVAL;
630
631 if (dwc->async_callbacks) {
632 spin_unlock(&dwc->lock);
633 ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
634 spin_lock(&dwc->lock);
635 }
636 return ret;
637 }
638
dwc3_ep0_set_config(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)639 static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
640 {
641 enum usb_device_state state = dwc->gadget->state;
642 u32 cfg;
643 int ret;
644 u32 reg;
645
646 cfg = le16_to_cpu(ctrl->wValue);
647
648 switch (state) {
649 case USB_STATE_DEFAULT:
650 return -EINVAL;
651
652 case USB_STATE_ADDRESS:
653 dwc3_gadget_start_config(dwc, 2);
654 dwc3_gadget_clear_tx_fifos(dwc);
655
656 ret = dwc3_ep0_delegate_req(dwc, ctrl);
657 /* if the cfg matches and the cfg is non zero */
658 if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
659
660 /*
661 * only change state if set_config has already
662 * been processed. If gadget driver returns
663 * USB_GADGET_DELAYED_STATUS, we will wait
664 * to change the state on the next usb_ep_queue()
665 */
666 if (ret == 0)
667 usb_gadget_set_state(dwc->gadget,
668 USB_STATE_CONFIGURED);
669
670 /*
671 * Enable transition to U1/U2 state when
672 * nothing is pending from application.
673 */
674 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
675 if (!dwc->dis_u1_entry_quirk)
676 reg |= DWC3_DCTL_ACCEPTU1ENA;
677 if (!dwc->dis_u2_entry_quirk)
678 reg |= DWC3_DCTL_ACCEPTU2ENA;
679 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
680 }
681 break;
682
683 case USB_STATE_CONFIGURED:
684 ret = dwc3_ep0_delegate_req(dwc, ctrl);
685 if (!cfg && !ret)
686 usb_gadget_set_state(dwc->gadget,
687 USB_STATE_ADDRESS);
688 break;
689 default:
690 ret = -EINVAL;
691 }
692 return ret;
693 }
694
dwc3_ep0_set_sel_cmpl(struct usb_ep * ep,struct usb_request * req)695 static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
696 {
697 struct dwc3_ep *dep = to_dwc3_ep(ep);
698 struct dwc3 *dwc = dep->dwc;
699
700 u32 param = 0;
701 u32 reg;
702
703 struct timing {
704 u8 u1sel;
705 u8 u1pel;
706 __le16 u2sel;
707 __le16 u2pel;
708 } __packed timing;
709
710 int ret;
711
712 memcpy(&timing, req->buf, sizeof(timing));
713
714 dwc->u1sel = timing.u1sel;
715 dwc->u1pel = timing.u1pel;
716 dwc->u2sel = le16_to_cpu(timing.u2sel);
717 dwc->u2pel = le16_to_cpu(timing.u2pel);
718
719 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
720 if (reg & DWC3_DCTL_INITU2ENA)
721 param = dwc->u2pel;
722 if (reg & DWC3_DCTL_INITU1ENA)
723 param = dwc->u1pel;
724
725 /*
726 * According to Synopsys Databook, if parameter is
727 * greater than 125, a value of zero should be
728 * programmed in the register.
729 */
730 if (param > 125)
731 param = 0;
732
733 /* now that we have the time, issue DGCMD Set Sel */
734 ret = dwc3_send_gadget_generic_command(dwc,
735 DWC3_DGCMD_SET_PERIODIC_PAR, param);
736 WARN_ON(ret < 0);
737 }
738
dwc3_ep0_set_sel(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)739 static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
740 {
741 struct dwc3_ep *dep;
742 enum usb_device_state state = dwc->gadget->state;
743 u16 wLength;
744
745 if (state == USB_STATE_DEFAULT)
746 return -EINVAL;
747
748 wLength = le16_to_cpu(ctrl->wLength);
749
750 if (wLength != 6) {
751 dev_err(dwc->dev, "Set SEL should be 6 bytes, got %d\n",
752 wLength);
753 return -EINVAL;
754 }
755
756 /*
757 * To handle Set SEL we need to receive 6 bytes from Host. So let's
758 * queue a usb_request for 6 bytes.
759 *
760 * Remember, though, this controller can't handle non-wMaxPacketSize
761 * aligned transfers on the OUT direction, so we queue a request for
762 * wMaxPacketSize instead.
763 */
764 dep = dwc->eps[0];
765 dwc->ep0_usb_req.dep = dep;
766 dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
767 dwc->ep0_usb_req.request.buf = dwc->setup_buf;
768 dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;
769
770 return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
771 }
772
dwc3_ep0_set_isoch_delay(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)773 static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
774 {
775 u16 wLength;
776 u16 wValue;
777 u16 wIndex;
778
779 wValue = le16_to_cpu(ctrl->wValue);
780 wLength = le16_to_cpu(ctrl->wLength);
781 wIndex = le16_to_cpu(ctrl->wIndex);
782
783 if (wIndex || wLength)
784 return -EINVAL;
785
786 dwc->gadget->isoch_delay = wValue;
787
788 return 0;
789 }
790
dwc3_ep0_std_request(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)791 static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
792 {
793 int ret;
794
795 switch (ctrl->bRequest) {
796 case USB_REQ_GET_STATUS:
797 ret = dwc3_ep0_handle_status(dwc, ctrl);
798 break;
799 case USB_REQ_CLEAR_FEATURE:
800 ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
801 break;
802 case USB_REQ_SET_FEATURE:
803 ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
804 break;
805 case USB_REQ_SET_ADDRESS:
806 ret = dwc3_ep0_set_address(dwc, ctrl);
807 break;
808 case USB_REQ_SET_CONFIGURATION:
809 ret = dwc3_ep0_set_config(dwc, ctrl);
810 break;
811 case USB_REQ_SET_SEL:
812 ret = dwc3_ep0_set_sel(dwc, ctrl);
813 break;
814 case USB_REQ_SET_ISOCH_DELAY:
815 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
816 break;
817 default:
818 ret = dwc3_ep0_delegate_req(dwc, ctrl);
819 break;
820 }
821
822 return ret;
823 }
824
dwc3_ep0_inspect_setup(struct dwc3 * dwc,const struct dwc3_event_depevt * event)825 static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
826 const struct dwc3_event_depevt *event)
827 {
828 struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb;
829 int ret = -EINVAL;
830 u32 len;
831
832 if (!dwc->gadget_driver || !dwc->softconnect || !dwc->connected)
833 goto out;
834
835 trace_dwc3_ctrl_req(ctrl);
836
837 len = le16_to_cpu(ctrl->wLength);
838 if (!len) {
839 dwc->three_stage_setup = false;
840 dwc->ep0_expect_in = false;
841 dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
842 } else {
843 dwc->three_stage_setup = true;
844 dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
845 dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
846 }
847
848 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
849 ret = dwc3_ep0_std_request(dwc, ctrl);
850 else
851 ret = dwc3_ep0_delegate_req(dwc, ctrl);
852
853 if (ret == USB_GADGET_DELAYED_STATUS)
854 dwc->delayed_status = true;
855
856 out:
857 if (ret < 0)
858 dwc3_ep0_stall_and_restart(dwc);
859 }
860
dwc3_ep0_complete_data(struct dwc3 * dwc,const struct dwc3_event_depevt * event)861 static void dwc3_ep0_complete_data(struct dwc3 *dwc,
862 const struct dwc3_event_depevt *event)
863 {
864 struct dwc3_request *r;
865 struct usb_request *ur;
866 struct dwc3_trb *trb;
867 struct dwc3_ep *ep0;
868 u32 transferred = 0;
869 u32 status;
870 u32 length;
871 u8 epnum;
872
873 epnum = event->endpoint_number;
874 ep0 = dwc->eps[0];
875
876 dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
877 trb = dwc->ep0_trb;
878 trace_dwc3_complete_trb(ep0, trb);
879
880 r = next_request(&ep0->pending_list);
881 if (!r)
882 return;
883
884 status = DWC3_TRB_SIZE_TRBSTS(trb->size);
885 if (status == DWC3_TRBSTS_SETUP_PENDING) {
886 dwc->setup_packet_pending = true;
887 if (r)
888 dwc3_gadget_giveback(ep0, r, -ECONNRESET);
889
890 return;
891 }
892
893 ur = &r->request;
894
895 length = trb->size & DWC3_TRB_SIZE_MASK;
896 transferred = ur->length - length;
897 ur->actual += transferred;
898
899 if ((IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
900 ur->length && ur->zero) || dwc->ep0_bounced) {
901 trb++;
902 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
903 trace_dwc3_complete_trb(ep0, trb);
904
905 if (r->direction)
906 dwc->eps[1]->trb_enqueue = 0;
907 else
908 dwc->eps[0]->trb_enqueue = 0;
909
910 dwc->ep0_bounced = false;
911 }
912
913 if ((epnum & 1) && ur->actual < ur->length)
914 dwc3_ep0_stall_and_restart(dwc);
915 else
916 dwc3_gadget_giveback(ep0, r, 0);
917 }
918
dwc3_ep0_complete_status(struct dwc3 * dwc,const struct dwc3_event_depevt * event)919 static void dwc3_ep0_complete_status(struct dwc3 *dwc,
920 const struct dwc3_event_depevt *event)
921 {
922 struct dwc3_request *r;
923 struct dwc3_ep *dep;
924 struct dwc3_trb *trb;
925 u32 status;
926
927 dep = dwc->eps[0];
928 trb = dwc->ep0_trb;
929
930 trace_dwc3_complete_trb(dep, trb);
931
932 if (!list_empty(&dep->pending_list)) {
933 r = next_request(&dep->pending_list);
934
935 dwc3_gadget_giveback(dep, r, 0);
936 }
937
938 if (dwc->test_mode) {
939 int ret;
940
941 ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
942 if (ret < 0) {
943 dev_err(dwc->dev, "invalid test #%d\n",
944 dwc->test_mode_nr);
945 dwc3_ep0_stall_and_restart(dwc);
946 return;
947 }
948 }
949
950 status = DWC3_TRB_SIZE_TRBSTS(trb->size);
951 if (status == DWC3_TRBSTS_SETUP_PENDING)
952 dwc->setup_packet_pending = true;
953
954 dwc->ep0state = EP0_SETUP_PHASE;
955 dwc3_ep0_out_start(dwc);
956 }
957
dwc3_ep0_xfer_complete(struct dwc3 * dwc,const struct dwc3_event_depevt * event)958 static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
959 const struct dwc3_event_depevt *event)
960 {
961 struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
962
963 dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
964 dep->resource_index = 0;
965 dwc->setup_packet_pending = false;
966
967 switch (dwc->ep0state) {
968 case EP0_SETUP_PHASE:
969 dwc3_ep0_inspect_setup(dwc, event);
970 break;
971
972 case EP0_DATA_PHASE:
973 dwc3_ep0_complete_data(dwc, event);
974 break;
975
976 case EP0_STATUS_PHASE:
977 dwc3_ep0_complete_status(dwc, event);
978 break;
979 default:
980 WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
981 }
982 }
983
__dwc3_ep0_do_control_data(struct dwc3 * dwc,struct dwc3_ep * dep,struct dwc3_request * req)984 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
985 struct dwc3_ep *dep, struct dwc3_request *req)
986 {
987 unsigned int trb_length = 0;
988 int ret;
989
990 req->direction = !!dep->number;
991
992 if (req->request.length == 0) {
993 if (!req->direction)
994 trb_length = dep->endpoint.maxpacket;
995
996 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, trb_length,
997 DWC3_TRBCTL_CONTROL_DATA, false);
998 ret = dwc3_ep0_start_trans(dep);
999 } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
1000 && (dep->number == 0)) {
1001 u32 maxpacket;
1002 u32 rem;
1003
1004 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
1005 &req->request, dep->number);
1006 if (ret)
1007 return;
1008
1009 maxpacket = dep->endpoint.maxpacket;
1010 rem = req->request.length % maxpacket;
1011 dwc->ep0_bounced = true;
1012
1013 /* prepare normal TRB */
1014 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
1015 req->request.length,
1016 DWC3_TRBCTL_CONTROL_DATA,
1017 true);
1018
1019 req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
1020
1021 /* Now prepare one extra TRB to align transfer size */
1022 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
1023 maxpacket - rem,
1024 DWC3_TRBCTL_CONTROL_DATA,
1025 false);
1026 ret = dwc3_ep0_start_trans(dep);
1027 } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
1028 req->request.length && req->request.zero) {
1029
1030 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
1031 &req->request, dep->number);
1032 if (ret)
1033 return;
1034
1035 /* prepare normal TRB */
1036 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
1037 req->request.length,
1038 DWC3_TRBCTL_CONTROL_DATA,
1039 true);
1040
1041 req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
1042
1043 if (!req->direction)
1044 trb_length = dep->endpoint.maxpacket;
1045
1046 /* Now prepare one extra TRB to align transfer size */
1047 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
1048 trb_length, DWC3_TRBCTL_CONTROL_DATA,
1049 false);
1050 ret = dwc3_ep0_start_trans(dep);
1051 } else {
1052 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
1053 &req->request, dep->number);
1054 if (ret)
1055 return;
1056
1057 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
1058 req->request.length, DWC3_TRBCTL_CONTROL_DATA,
1059 false);
1060
1061 req->trb = &dwc->ep0_trb[dep->trb_enqueue];
1062
1063 ret = dwc3_ep0_start_trans(dep);
1064 }
1065
1066 if (ret < 0)
1067 dev_err(dwc->dev,
1068 "ep0 data phase start transfer failed: %d\n", ret);
1069 }
1070
dwc3_ep0_start_control_status(struct dwc3_ep * dep)1071 static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
1072 {
1073 struct dwc3 *dwc = dep->dwc;
1074 u32 type;
1075
1076 type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
1077 : DWC3_TRBCTL_CONTROL_STATUS2;
1078
1079 dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, type, false);
1080 return dwc3_ep0_start_trans(dep);
1081 }
1082
__dwc3_ep0_do_control_status(struct dwc3 * dwc,struct dwc3_ep * dep)1083 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
1084 {
1085 int ret;
1086
1087 ret = dwc3_ep0_start_control_status(dep);
1088 if (ret)
1089 dev_err(dwc->dev,
1090 "ep0 status phase start transfer failed: %d\n", ret);
1091 }
1092
dwc3_ep0_do_control_status(struct dwc3 * dwc,const struct dwc3_event_depevt * event)1093 static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
1094 const struct dwc3_event_depevt *event)
1095 {
1096 struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
1097
1098 __dwc3_ep0_do_control_status(dwc, dep);
1099 }
1100
dwc3_ep0_send_delayed_status(struct dwc3 * dwc)1101 void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
1102 {
1103 unsigned int direction = !dwc->ep0_expect_in;
1104
1105 dwc->delayed_status = false;
1106 dwc->clear_stall_protocol = 0;
1107
1108 if (dwc->ep0state != EP0_STATUS_PHASE)
1109 return;
1110
1111 __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
1112 }
1113
dwc3_ep0_end_control_data(struct dwc3 * dwc,struct dwc3_ep * dep)1114 void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
1115 {
1116 struct dwc3_gadget_ep_cmd_params params;
1117 u32 cmd;
1118 int ret;
1119
1120 /*
1121 * For status/DATA OUT stage, TRB will be queued on ep0 out
1122 * endpoint for which resource index is zero. Hence allow
1123 * queuing ENDXFER command for ep0 out endpoint.
1124 */
1125 if (!dep->resource_index && dep->number)
1126 return;
1127
1128 cmd = DWC3_DEPCMD_ENDTRANSFER;
1129 cmd |= DWC3_DEPCMD_CMDIOC;
1130 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
1131 memset(¶ms, 0, sizeof(params));
1132 ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
1133 if (ret)
1134 dev_err_ratelimited(dwc->dev,
1135 "ep0 data phase end transfer failed: %d\n", ret);
1136
1137 dep->resource_index = 0;
1138 }
1139
dwc3_ep0_xfernotready(struct dwc3 * dwc,const struct dwc3_event_depevt * event)1140 static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
1141 const struct dwc3_event_depevt *event)
1142 {
1143 switch (event->status) {
1144 case DEPEVT_STATUS_CONTROL_DATA:
1145 if (!dwc->softconnect || !dwc->connected)
1146 return;
1147 /*
1148 * We already have a DATA transfer in the controller's cache,
1149 * if we receive a XferNotReady(DATA) we will ignore it, unless
1150 * it's for the wrong direction.
1151 *
1152 * In that case, we must issue END_TRANSFER command to the Data
1153 * Phase we already have started and issue SetStall on the
1154 * control endpoint.
1155 */
1156 if (dwc->ep0_expect_in != event->endpoint_number) {
1157 struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in];
1158
1159 dev_err(dwc->dev, "unexpected direction for Data Phase\n");
1160 dwc3_ep0_end_control_data(dwc, dep);
1161 dwc3_ep0_stall_and_restart(dwc);
1162 return;
1163 }
1164
1165 break;
1166
1167 case DEPEVT_STATUS_CONTROL_STATUS:
1168 if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
1169 return;
1170
1171 if (dwc->setup_packet_pending) {
1172 dwc3_ep0_stall_and_restart(dwc);
1173 return;
1174 }
1175
1176 dwc->ep0state = EP0_STATUS_PHASE;
1177
1178 if (dwc->delayed_status) {
1179 struct dwc3_ep *dep = dwc->eps[0];
1180
1181 WARN_ON_ONCE(event->endpoint_number != 1);
1182 /*
1183 * We should handle the delay STATUS phase here if the
1184 * request for handling delay STATUS has been queued
1185 * into the list.
1186 */
1187 if (!list_empty(&dep->pending_list)) {
1188 dwc->delayed_status = false;
1189 usb_gadget_set_state(dwc->gadget,
1190 USB_STATE_CONFIGURED);
1191 dwc3_ep0_do_control_status(dwc, event);
1192 }
1193
1194 return;
1195 }
1196
1197 dwc3_ep0_do_control_status(dwc, event);
1198 }
1199 }
1200
dwc3_ep0_interrupt(struct dwc3 * dwc,const struct dwc3_event_depevt * event)1201 void dwc3_ep0_interrupt(struct dwc3 *dwc,
1202 const struct dwc3_event_depevt *event)
1203 {
1204 struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
1205 u8 cmd;
1206
1207 switch (event->endpoint_event) {
1208 case DWC3_DEPEVT_XFERCOMPLETE:
1209 dwc3_ep0_xfer_complete(dwc, event);
1210 break;
1211
1212 case DWC3_DEPEVT_XFERNOTREADY:
1213 dwc3_ep0_xfernotready(dwc, event);
1214 break;
1215
1216 case DWC3_DEPEVT_XFERINPROGRESS:
1217 case DWC3_DEPEVT_RXTXFIFOEVT:
1218 case DWC3_DEPEVT_STREAMEVT:
1219 break;
1220 case DWC3_DEPEVT_EPCMDCMPLT:
1221 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
1222
1223 if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
1224 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
1225 dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
1226 }
1227 break;
1228 default:
1229 dev_err(dwc->dev, "unknown endpoint event %d\n", event->endpoint_event);
1230 break;
1231 }
1232 }
1233