xref: /linux/drivers/usb/gadget/udc/bcm63xx_udc.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
4  *
5  * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
6  * Copyright (C) 2012 Broadcom Corporation
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/bug.h>
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/errno.h>
18 #include <linux/interrupt.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/seq_file.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/usb/ch9.h>
30 #include <linux/usb/gadget.h>
31 #include <linux/workqueue.h>
32 
33 #include <bcm63xx_cpu.h>
34 #include <bcm63xx_iudma.h>
35 #include <bcm63xx_dev_usb_usbd.h>
36 #include <bcm63xx_io.h>
37 #include <bcm63xx_regs.h>
38 
39 #define DRV_MODULE_NAME		"bcm63xx_udc"
40 
41 static const char bcm63xx_ep0name[] = "ep0";
42 
43 static const struct {
44 	const char *name;
45 	const struct usb_ep_caps caps;
46 } bcm63xx_ep_info[] = {
47 #define EP_INFO(_name, _caps) \
48 	{ \
49 		.name = _name, \
50 		.caps = _caps, \
51 	}
52 
53 	EP_INFO(bcm63xx_ep0name,
54 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
55 	EP_INFO("ep1in-bulk",
56 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
57 	EP_INFO("ep2out-bulk",
58 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
59 	EP_INFO("ep3in-int",
60 		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
61 	EP_INFO("ep4out-int",
62 		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
63 
64 #undef EP_INFO
65 };
66 
67 static bool use_fullspeed;
68 module_param(use_fullspeed, bool, S_IRUGO);
69 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
70 
71 /*
72  * RX IRQ coalescing options:
73  *
74  * false (default) - one IRQ per DATAx packet.  Slow but reliable.  The
75  * driver is able to pass the "testusb" suite and recover from conditions like:
76  *
77  *   1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
78  *   2) Host sends 512 bytes of data
79  *   3) Host decides to reconfigure the device and sends SET_INTERFACE
80  *   4) Device shuts down the endpoint and cancels the RX transaction
81  *
82  * true - one IRQ per transfer, for transfers <= 2048B.  Generates
83  * considerably fewer IRQs, but error recovery is less robust.  Does not
84  * reliably pass "testusb".
85  *
86  * TX always uses coalescing, because we can cancel partially complete TX
87  * transfers by repeatedly flushing the FIFO.  The hardware doesn't allow
88  * this on RX.
89  */
90 static bool irq_coalesce;
91 module_param(irq_coalesce, bool, S_IRUGO);
92 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
93 
94 #define BCM63XX_NUM_EP			5
95 #define BCM63XX_NUM_IUDMA		6
96 #define BCM63XX_NUM_FIFO_PAIRS		3
97 
98 #define IUDMA_RESET_TIMEOUT_US		10000
99 
100 #define IUDMA_EP0_RXCHAN		0
101 #define IUDMA_EP0_TXCHAN		1
102 
103 #define IUDMA_MAX_FRAGMENT		2048
104 #define BCM63XX_MAX_CTRL_PKT		64
105 
106 #define BCMEP_CTRL			0x00
107 #define BCMEP_ISOC			0x01
108 #define BCMEP_BULK			0x02
109 #define BCMEP_INTR			0x03
110 
111 #define BCMEP_OUT			0x00
112 #define BCMEP_IN			0x01
113 
114 #define BCM63XX_SPD_FULL		1
115 #define BCM63XX_SPD_HIGH		0
116 
117 #define IUDMA_DMAC_OFFSET		0x200
118 #define IUDMA_DMAS_OFFSET		0x400
119 
120 enum bcm63xx_ep0_state {
121 	EP0_REQUEUE,
122 	EP0_IDLE,
123 	EP0_IN_DATA_PHASE_SETUP,
124 	EP0_IN_DATA_PHASE_COMPLETE,
125 	EP0_OUT_DATA_PHASE_SETUP,
126 	EP0_OUT_DATA_PHASE_COMPLETE,
127 	EP0_OUT_STATUS_PHASE,
128 	EP0_IN_FAKE_STATUS_PHASE,
129 	EP0_SHUTDOWN,
130 };
131 
132 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
133 	"REQUEUE",
134 	"IDLE",
135 	"IN_DATA_PHASE_SETUP",
136 	"IN_DATA_PHASE_COMPLETE",
137 	"OUT_DATA_PHASE_SETUP",
138 	"OUT_DATA_PHASE_COMPLETE",
139 	"OUT_STATUS_PHASE",
140 	"IN_FAKE_STATUS_PHASE",
141 	"SHUTDOWN",
142 };
143 
144 /**
145  * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
146  * @ep_num: USB endpoint number.
147  * @n_bds: Number of buffer descriptors in the ring.
148  * @ep_type: Endpoint type (control, bulk, interrupt).
149  * @dir: Direction (in, out).
150  * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
151  * @max_pkt_hs: Maximum packet size in high speed mode.
152  * @max_pkt_fs: Maximum packet size in full speed mode.
153  */
154 struct iudma_ch_cfg {
155 	int				ep_num;
156 	int				n_bds;
157 	int				ep_type;
158 	int				dir;
159 	int				n_fifo_slots;
160 	int				max_pkt_hs;
161 	int				max_pkt_fs;
162 };
163 
164 static const struct iudma_ch_cfg iudma_defaults[] = {
165 
166 	/* This controller was designed to support a CDC/RNDIS application.
167 	   It may be possible to reconfigure some of the endpoints, but
168 	   the hardware limitations (FIFO sizing and number of DMA channels)
169 	   may significantly impact flexibility and/or stability.  Change
170 	   these values at your own risk.
171 
172 	      ep_num       ep_type           n_fifo_slots    max_pkt_fs
173 	idx      |  n_bds     |         dir       |  max_pkt_hs  |
174 	 |       |    |       |          |        |      |       |       */
175 	[0] = { -1,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
176 	[1] = {  0,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
177 	[2] = {  2,  16, BCMEP_BULK, BCMEP_OUT, 128,   512,     64 },
178 	[3] = {  1,  16, BCMEP_BULK, BCMEP_IN,  128,   512,     64 },
179 	[4] = {  4,   4, BCMEP_INTR, BCMEP_OUT,  32,    64,     64 },
180 	[5] = {  3,   4, BCMEP_INTR, BCMEP_IN,   32,    64,     64 },
181 };
182 
183 struct bcm63xx_udc;
184 
185 /**
186  * struct iudma_ch - Represents the current state of a single IUDMA channel.
187  * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
188  * @ep_num: USB endpoint number.  -1 for ep0 RX.
189  * @enabled: Whether bcm63xx_ep_enable() has been called.
190  * @max_pkt: "Chunk size" on the USB interface.  Based on interface speed.
191  * @is_tx: true for TX, false for RX.
192  * @bep: Pointer to the associated endpoint.  NULL for ep0 RX.
193  * @udc: Reference to the device controller.
194  * @read_bd: Next buffer descriptor to reap from the hardware.
195  * @write_bd: Next BD available for a new packet.
196  * @end_bd: Points to the final BD in the ring.
197  * @n_bds_used: Number of BD entries currently occupied.
198  * @bd_ring: Base pointer to the BD ring.
199  * @bd_ring_dma: Physical (DMA) address of bd_ring.
200  * @n_bds: Total number of BDs in the ring.
201  *
202  * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
203  * bidirectional.  The "struct usb_ep" associated with ep0 is for TX (IN)
204  * only.
205  *
206  * Each bulk/intr endpoint has a single IUDMA channel and a single
207  * struct usb_ep.
208  */
209 struct iudma_ch {
210 	unsigned int			ch_idx;
211 	int				ep_num;
212 	bool				enabled;
213 	int				max_pkt;
214 	bool				is_tx;
215 	struct bcm63xx_ep		*bep;
216 	struct bcm63xx_udc		*udc;
217 
218 	struct bcm_enet_desc		*read_bd;
219 	struct bcm_enet_desc		*write_bd;
220 	struct bcm_enet_desc		*end_bd;
221 	int				n_bds_used;
222 
223 	struct bcm_enet_desc		*bd_ring;
224 	dma_addr_t			bd_ring_dma;
225 	unsigned int			n_bds;
226 };
227 
228 /**
229  * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
230  * @ep_num: USB endpoint number.
231  * @iudma: Pointer to IUDMA channel state.
232  * @ep: USB gadget layer representation of the EP.
233  * @udc: Reference to the device controller.
234  * @queue: Linked list of outstanding requests for this EP.
235  * @halted: 1 if the EP is stalled; 0 otherwise.
236  */
237 struct bcm63xx_ep {
238 	unsigned int			ep_num;
239 	struct iudma_ch			*iudma;
240 	struct usb_ep			ep;
241 	struct bcm63xx_udc		*udc;
242 	struct list_head		queue;
243 	unsigned			halted:1;
244 };
245 
246 /**
247  * struct bcm63xx_req - Internal (driver) state of a single request.
248  * @queue: Links back to the EP's request list.
249  * @req: USB gadget layer representation of the request.
250  * @offset: Current byte offset into the data buffer (next byte to queue).
251  * @bd_bytes: Number of data bytes in outstanding BD entries.
252  * @iudma: IUDMA channel used for the request.
253  */
254 struct bcm63xx_req {
255 	struct list_head		queue;		/* ep's requests */
256 	struct usb_request		req;
257 	unsigned int			offset;
258 	unsigned int			bd_bytes;
259 	struct iudma_ch			*iudma;
260 };
261 
262 /**
263  * struct bcm63xx_udc - Driver/hardware private context.
264  * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
265  * @dev: Generic Linux device structure.
266  * @pd: Platform data (board/port info).
267  * @usbd_clk: Clock descriptor for the USB device block.
268  * @usbh_clk: Clock descriptor for the USB host block.
269  * @gadget: USB slave device.
270  * @driver: Driver for USB slave devices.
271  * @usbd_regs: Base address of the USBD/USB20D block.
272  * @iudma_regs: Base address of the USBD's associated IUDMA block.
273  * @bep: Array of endpoints, including ep0.
274  * @iudma: Array of all IUDMA channels used by this controller.
275  * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
276  * @iface: USB interface number, from SET_INTERFACE wIndex.
277  * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
278  * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
279  * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
280  * @ep0state: Current state of the ep0 state machine.
281  * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
282  * @wedgemap: Bitmap of wedged endpoints.
283  * @ep0_req_reset: USB reset is pending.
284  * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
285  * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
286  * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
287  * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
288  * @ep0_reply: Pending reply from gadget driver.
289  * @ep0_request: Outstanding ep0 request.
290  * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
291  */
292 struct bcm63xx_udc {
293 	spinlock_t			lock;
294 
295 	struct device			*dev;
296 	struct bcm63xx_usbd_platform_data *pd;
297 	struct clk			*usbd_clk;
298 	struct clk			*usbh_clk;
299 
300 	struct usb_gadget		gadget;
301 	struct usb_gadget_driver	*driver;
302 
303 	void __iomem			*usbd_regs;
304 	void __iomem			*iudma_regs;
305 
306 	struct bcm63xx_ep		bep[BCM63XX_NUM_EP];
307 	struct iudma_ch			iudma[BCM63XX_NUM_IUDMA];
308 
309 	int				cfg;
310 	int				iface;
311 	int				alt_iface;
312 
313 	struct bcm63xx_req		ep0_ctrl_req;
314 	u8				*ep0_ctrl_buf;
315 
316 	int				ep0state;
317 	struct work_struct		ep0_wq;
318 
319 	unsigned long			wedgemap;
320 
321 	unsigned			ep0_req_reset:1;
322 	unsigned			ep0_req_set_cfg:1;
323 	unsigned			ep0_req_set_iface:1;
324 	unsigned			ep0_req_shutdown:1;
325 
326 	unsigned			ep0_req_completed:1;
327 	struct usb_request		*ep0_reply;
328 	struct usb_request		*ep0_request;
329 
330 	struct dentry			*debugfs_root;
331 };
332 
333 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
334 
335 /***********************************************************************
336  * Convenience functions
337  ***********************************************************************/
338 
339 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
340 {
341 	return container_of(g, struct bcm63xx_udc, gadget);
342 }
343 
344 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
345 {
346 	return container_of(ep, struct bcm63xx_ep, ep);
347 }
348 
349 static inline struct bcm63xx_req *our_req(struct usb_request *req)
350 {
351 	return container_of(req, struct bcm63xx_req, req);
352 }
353 
354 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
355 {
356 	return bcm_readl(udc->usbd_regs + off);
357 }
358 
359 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
360 {
361 	bcm_writel(val, udc->usbd_regs + off);
362 }
363 
364 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
365 {
366 	return bcm_readl(udc->iudma_regs + off);
367 }
368 
369 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
370 {
371 	bcm_writel(val, udc->iudma_regs + off);
372 }
373 
374 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
375 {
376 	return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
377 			(ENETDMA_CHAN_WIDTH * chan));
378 }
379 
380 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
381 					int chan)
382 {
383 	bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
384 			(ENETDMA_CHAN_WIDTH * chan));
385 }
386 
387 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
388 {
389 	return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
390 			(ENETDMA_CHAN_WIDTH * chan));
391 }
392 
393 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
394 					int chan)
395 {
396 	bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
397 			(ENETDMA_CHAN_WIDTH * chan));
398 }
399 
400 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
401 {
402 	if (is_enabled) {
403 		clk_enable(udc->usbh_clk);
404 		clk_enable(udc->usbd_clk);
405 		udelay(10);
406 	} else {
407 		clk_disable(udc->usbd_clk);
408 		clk_disable(udc->usbh_clk);
409 	}
410 }
411 
412 /***********************************************************************
413  * Low-level IUDMA / FIFO operations
414  ***********************************************************************/
415 
416 /**
417  * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
418  * @udc: Reference to the device controller.
419  * @idx: Desired init_sel value.
420  *
421  * The "init_sel" signal is used as a selection index for both endpoints
422  * and IUDMA channels.  Since these do not map 1:1, the use of this signal
423  * depends on the context.
424  */
425 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
426 {
427 	u32 val = usbd_readl(udc, USBD_CONTROL_REG);
428 
429 	val &= ~USBD_CONTROL_INIT_SEL_MASK;
430 	val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
431 	usbd_writel(udc, val, USBD_CONTROL_REG);
432 }
433 
434 /**
435  * bcm63xx_set_stall - Enable/disable stall on one endpoint.
436  * @udc: Reference to the device controller.
437  * @bep: Endpoint on which to operate.
438  * @is_stalled: true to enable stall, false to disable.
439  *
440  * See notes in bcm63xx_update_wedge() regarding automatic clearing of
441  * halt/stall conditions.
442  */
443 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
444 	bool is_stalled)
445 {
446 	u32 val;
447 
448 	val = USBD_STALL_UPDATE_MASK |
449 		(is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
450 		(bep->ep_num << USBD_STALL_EPNUM_SHIFT);
451 	usbd_writel(udc, val, USBD_STALL_REG);
452 }
453 
454 /**
455  * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
456  * @udc: Reference to the device controller.
457  *
458  * These parameters depend on the USB link speed.  Settings are
459  * per-IUDMA-channel-pair.
460  */
461 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
462 {
463 	int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
464 	u32 i, val, rx_fifo_slot, tx_fifo_slot;
465 
466 	/* set up FIFO boundaries and packet sizes; this is done in pairs */
467 	rx_fifo_slot = tx_fifo_slot = 0;
468 	for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
469 		const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
470 		const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
471 
472 		bcm63xx_ep_dma_select(udc, i >> 1);
473 
474 		val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
475 			((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
476 			 USBD_RXFIFO_CONFIG_END_SHIFT);
477 		rx_fifo_slot += rx_cfg->n_fifo_slots;
478 		usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
479 		usbd_writel(udc,
480 			    is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
481 			    USBD_RXFIFO_EPSIZE_REG);
482 
483 		val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
484 			((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
485 			 USBD_TXFIFO_CONFIG_END_SHIFT);
486 		tx_fifo_slot += tx_cfg->n_fifo_slots;
487 		usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
488 		usbd_writel(udc,
489 			    is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
490 			    USBD_TXFIFO_EPSIZE_REG);
491 
492 		usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
493 	}
494 }
495 
496 /**
497  * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
498  * @udc: Reference to the device controller.
499  * @ep_num: Endpoint number.
500  */
501 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
502 {
503 	u32 val;
504 
505 	bcm63xx_ep_dma_select(udc, ep_num);
506 
507 	val = usbd_readl(udc, USBD_CONTROL_REG);
508 	val |= USBD_CONTROL_FIFO_RESET_MASK;
509 	usbd_writel(udc, val, USBD_CONTROL_REG);
510 	usbd_readl(udc, USBD_CONTROL_REG);
511 }
512 
513 /**
514  * bcm63xx_fifo_reset - Flush all hardware FIFOs.
515  * @udc: Reference to the device controller.
516  */
517 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
518 {
519 	int i;
520 
521 	for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
522 		bcm63xx_fifo_reset_ep(udc, i);
523 }
524 
525 /**
526  * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
527  * @udc: Reference to the device controller.
528  */
529 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
530 {
531 	u32 i, val;
532 
533 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
534 		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
535 
536 		if (cfg->ep_num < 0)
537 			continue;
538 
539 		bcm63xx_ep_dma_select(udc, cfg->ep_num);
540 		val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
541 			((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
542 		usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
543 	}
544 }
545 
546 /**
547  * bcm63xx_ep_setup - Configure per-endpoint settings.
548  * @udc: Reference to the device controller.
549  *
550  * This needs to be rerun if the speed/cfg/intf/altintf changes.
551  */
552 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
553 {
554 	u32 val, i;
555 
556 	usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
557 
558 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
559 		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
560 		int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
561 			      cfg->max_pkt_hs : cfg->max_pkt_fs;
562 		int idx = cfg->ep_num;
563 
564 		udc->iudma[i].max_pkt = max_pkt;
565 
566 		if (idx < 0)
567 			continue;
568 		usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
569 
570 		val = (idx << USBD_CSR_EP_LOG_SHIFT) |
571 		      (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
572 		      (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
573 		      (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
574 		      (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
575 		      (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
576 		      (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
577 		usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
578 	}
579 }
580 
581 /**
582  * iudma_write - Queue a single IUDMA transaction.
583  * @udc: Reference to the device controller.
584  * @iudma: IUDMA channel to use.
585  * @breq: Request containing the transaction data.
586  *
587  * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
588  * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
589  * So iudma_write() may be called several times to fulfill a single
590  * usb_request.
591  *
592  * For TX IUDMA, this can queue multiple buffer descriptors if needed.
593  */
594 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
595 	struct bcm63xx_req *breq)
596 {
597 	int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
598 	unsigned int bytes_left = breq->req.length - breq->offset;
599 	const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
600 		iudma->max_pkt : IUDMA_MAX_FRAGMENT;
601 
602 	iudma->n_bds_used = 0;
603 	breq->bd_bytes = 0;
604 	breq->iudma = iudma;
605 
606 	if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
607 		extra_zero_pkt = 1;
608 
609 	do {
610 		struct bcm_enet_desc *d = iudma->write_bd;
611 		u32 dmaflags = 0;
612 		unsigned int n_bytes;
613 
614 		if (d == iudma->end_bd) {
615 			dmaflags |= DMADESC_WRAP_MASK;
616 			iudma->write_bd = iudma->bd_ring;
617 		} else {
618 			iudma->write_bd++;
619 		}
620 		iudma->n_bds_used++;
621 
622 		n_bytes = min_t(int, bytes_left, max_bd_bytes);
623 		if (n_bytes)
624 			dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
625 		else
626 			dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
627 				    DMADESC_USB_ZERO_MASK;
628 
629 		dmaflags |= DMADESC_OWNER_MASK;
630 		if (first_bd) {
631 			dmaflags |= DMADESC_SOP_MASK;
632 			first_bd = 0;
633 		}
634 
635 		/*
636 		 * extra_zero_pkt forces one more iteration through the loop
637 		 * after all data is queued up, to send the zero packet
638 		 */
639 		if (extra_zero_pkt && !bytes_left)
640 			extra_zero_pkt = 0;
641 
642 		if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
643 		    (n_bytes == bytes_left && !extra_zero_pkt)) {
644 			last_bd = 1;
645 			dmaflags |= DMADESC_EOP_MASK;
646 		}
647 
648 		d->address = breq->req.dma + breq->offset;
649 		mb();
650 		d->len_stat = dmaflags;
651 
652 		breq->offset += n_bytes;
653 		breq->bd_bytes += n_bytes;
654 		bytes_left -= n_bytes;
655 	} while (!last_bd);
656 
657 	usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
658 			ENETDMAC_CHANCFG_REG, iudma->ch_idx);
659 }
660 
661 /**
662  * iudma_read - Check for IUDMA buffer completion.
663  * @udc: Reference to the device controller.
664  * @iudma: IUDMA channel to use.
665  *
666  * This checks to see if ALL of the outstanding BDs on the DMA channel
667  * have been filled.  If so, it returns the actual transfer length;
668  * otherwise it returns -EBUSY.
669  */
670 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
671 {
672 	int i, actual_len = 0;
673 	struct bcm_enet_desc *d = iudma->read_bd;
674 
675 	if (!iudma->n_bds_used)
676 		return -EINVAL;
677 
678 	for (i = 0; i < iudma->n_bds_used; i++) {
679 		u32 dmaflags;
680 
681 		dmaflags = d->len_stat;
682 
683 		if (dmaflags & DMADESC_OWNER_MASK)
684 			return -EBUSY;
685 
686 		actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
687 			      DMADESC_LENGTH_SHIFT;
688 		if (d == iudma->end_bd)
689 			d = iudma->bd_ring;
690 		else
691 			d++;
692 	}
693 
694 	iudma->read_bd = d;
695 	iudma->n_bds_used = 0;
696 	return actual_len;
697 }
698 
699 /**
700  * iudma_reset_channel - Stop DMA on a single channel.
701  * @udc: Reference to the device controller.
702  * @iudma: IUDMA channel to reset.
703  */
704 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
705 {
706 	int timeout = IUDMA_RESET_TIMEOUT_US;
707 	struct bcm_enet_desc *d;
708 	int ch_idx = iudma->ch_idx;
709 
710 	if (!iudma->is_tx)
711 		bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
712 
713 	/* stop DMA, then wait for the hardware to wrap up */
714 	usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
715 
716 	while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
717 				   ENETDMAC_CHANCFG_EN_MASK) {
718 		udelay(1);
719 
720 		/* repeatedly flush the FIFO data until the BD completes */
721 		if (iudma->is_tx && iudma->ep_num >= 0)
722 			bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
723 
724 		if (!timeout--) {
725 			dev_err(udc->dev, "can't reset IUDMA channel %d\n",
726 				ch_idx);
727 			break;
728 		}
729 		if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
730 			dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
731 				 ch_idx);
732 			usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
733 					ENETDMAC_CHANCFG_REG, ch_idx);
734 		}
735 	}
736 	usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
737 
738 	/* don't leave "live" HW-owned entries for the next guy to step on */
739 	for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
740 		d->len_stat = 0;
741 	mb();
742 
743 	iudma->read_bd = iudma->write_bd = iudma->bd_ring;
744 	iudma->n_bds_used = 0;
745 
746 	/* set up IRQs, UBUS burst size, and BD base for this channel */
747 	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
748 			ENETDMAC_IRMASK_REG, ch_idx);
749 	usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
750 
751 	usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
752 	usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
753 }
754 
755 /**
756  * iudma_init_channel - One-time IUDMA channel initialization.
757  * @udc: Reference to the device controller.
758  * @ch_idx: Channel to initialize.
759  */
760 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
761 {
762 	struct iudma_ch *iudma = &udc->iudma[ch_idx];
763 	const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
764 	unsigned int n_bds = cfg->n_bds;
765 	struct bcm63xx_ep *bep = NULL;
766 
767 	iudma->ep_num = cfg->ep_num;
768 	iudma->ch_idx = ch_idx;
769 	iudma->is_tx = !!(ch_idx & 0x01);
770 	if (iudma->ep_num >= 0) {
771 		bep = &udc->bep[iudma->ep_num];
772 		bep->iudma = iudma;
773 		INIT_LIST_HEAD(&bep->queue);
774 	}
775 
776 	iudma->bep = bep;
777 	iudma->udc = udc;
778 
779 	/* ep0 is always active; others are controlled by the gadget driver */
780 	if (iudma->ep_num <= 0)
781 		iudma->enabled = true;
782 
783 	iudma->n_bds = n_bds;
784 	iudma->bd_ring = dmam_alloc_coherent(udc->dev,
785 		n_bds * sizeof(struct bcm_enet_desc),
786 		&iudma->bd_ring_dma, GFP_KERNEL);
787 	if (!iudma->bd_ring)
788 		return -ENOMEM;
789 	iudma->end_bd = &iudma->bd_ring[n_bds - 1];
790 
791 	return 0;
792 }
793 
794 /**
795  * iudma_init - One-time initialization of all IUDMA channels.
796  * @udc: Reference to the device controller.
797  *
798  * Enable DMA, flush channels, and enable global IUDMA IRQs.
799  */
800 static int iudma_init(struct bcm63xx_udc *udc)
801 {
802 	int i, rc;
803 
804 	usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
805 
806 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
807 		rc = iudma_init_channel(udc, i);
808 		if (rc)
809 			return rc;
810 		iudma_reset_channel(udc, &udc->iudma[i]);
811 	}
812 
813 	usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
814 	return 0;
815 }
816 
817 /**
818  * iudma_uninit - Uninitialize IUDMA channels.
819  * @udc: Reference to the device controller.
820  *
821  * Kill global IUDMA IRQs, flush channels, and kill DMA.
822  */
823 static void iudma_uninit(struct bcm63xx_udc *udc)
824 {
825 	int i;
826 
827 	usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
828 
829 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
830 		iudma_reset_channel(udc, &udc->iudma[i]);
831 
832 	usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
833 }
834 
835 /***********************************************************************
836  * Other low-level USBD operations
837  ***********************************************************************/
838 
839 /**
840  * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
841  * @udc: Reference to the device controller.
842  * @enable_irqs: true to enable, false to disable.
843  */
844 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
845 {
846 	u32 val;
847 
848 	usbd_writel(udc, 0, USBD_STATUS_REG);
849 
850 	val = BIT(USBD_EVENT_IRQ_USB_RESET) |
851 	      BIT(USBD_EVENT_IRQ_SETUP) |
852 	      BIT(USBD_EVENT_IRQ_SETCFG) |
853 	      BIT(USBD_EVENT_IRQ_SETINTF) |
854 	      BIT(USBD_EVENT_IRQ_USB_LINK);
855 	usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
856 	usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
857 }
858 
859 /**
860  * bcm63xx_select_phy_mode - Select between USB device and host mode.
861  * @udc: Reference to the device controller.
862  * @is_device: true for device, false for host.
863  *
864  * This should probably be reworked to use the drivers/usb/otg
865  * infrastructure.
866  *
867  * By default, the AFE/pullups are disabled in device mode, until
868  * bcm63xx_select_pullup() is called.
869  */
870 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
871 {
872 	u32 val, portmask = BIT(udc->pd->port_no);
873 
874 	if (BCMCPU_IS_6328()) {
875 		/* configure pinmux to sense VBUS signal */
876 		val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
877 		val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
878 		val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
879 			       GPIO_PINMUX_OTHR_6328_USB_HOST;
880 		bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
881 	}
882 
883 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
884 	if (is_device) {
885 		val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
886 		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
887 	} else {
888 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
889 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
890 	}
891 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
892 
893 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
894 	if (is_device)
895 		val |= USBH_PRIV_SWAP_USBD_MASK;
896 	else
897 		val &= ~USBH_PRIV_SWAP_USBD_MASK;
898 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
899 }
900 
901 /**
902  * bcm63xx_select_pullup - Enable/disable the pullup on D+
903  * @udc: Reference to the device controller.
904  * @is_on: true to enable the pullup, false to disable.
905  *
906  * If the pullup is active, the host will sense a FS/HS device connected to
907  * the port.  If the pullup is inactive, the host will think the USB
908  * device has been disconnected.
909  */
910 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
911 {
912 	u32 val, portmask = BIT(udc->pd->port_no);
913 
914 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
915 	if (is_on)
916 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
917 	else
918 		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
919 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
920 }
921 
922 /**
923  * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
924  * @udc: Reference to the device controller.
925  *
926  * This just masks the IUDMA IRQs and releases the clocks.  It is assumed
927  * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
928  */
929 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
930 {
931 	set_clocks(udc, true);
932 	iudma_uninit(udc);
933 	set_clocks(udc, false);
934 
935 	clk_put(udc->usbd_clk);
936 	clk_put(udc->usbh_clk);
937 }
938 
939 /**
940  * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
941  * @udc: Reference to the device controller.
942  */
943 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
944 {
945 	int i, rc = 0;
946 	u32 val;
947 
948 	udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
949 					 GFP_KERNEL);
950 	if (!udc->ep0_ctrl_buf)
951 		return -ENOMEM;
952 
953 	INIT_LIST_HEAD(&udc->gadget.ep_list);
954 	for (i = 0; i < BCM63XX_NUM_EP; i++) {
955 		struct bcm63xx_ep *bep = &udc->bep[i];
956 
957 		bep->ep.name = bcm63xx_ep_info[i].name;
958 		bep->ep.caps = bcm63xx_ep_info[i].caps;
959 		bep->ep_num = i;
960 		bep->ep.ops = &bcm63xx_udc_ep_ops;
961 		list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
962 		bep->halted = 0;
963 		usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
964 		bep->udc = udc;
965 		bep->ep.desc = NULL;
966 		INIT_LIST_HEAD(&bep->queue);
967 	}
968 
969 	udc->gadget.ep0 = &udc->bep[0].ep;
970 	list_del(&udc->bep[0].ep.ep_list);
971 
972 	udc->gadget.speed = USB_SPEED_UNKNOWN;
973 	udc->ep0state = EP0_SHUTDOWN;
974 
975 	udc->usbh_clk = clk_get(udc->dev, "usbh");
976 	if (IS_ERR(udc->usbh_clk))
977 		return -EIO;
978 
979 	udc->usbd_clk = clk_get(udc->dev, "usbd");
980 	if (IS_ERR(udc->usbd_clk)) {
981 		clk_put(udc->usbh_clk);
982 		return -EIO;
983 	}
984 
985 	set_clocks(udc, true);
986 
987 	val = USBD_CONTROL_AUTO_CSRS_MASK |
988 	      USBD_CONTROL_DONE_CSRS_MASK |
989 	      (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
990 	usbd_writel(udc, val, USBD_CONTROL_REG);
991 
992 	val = USBD_STRAPS_APP_SELF_PWR_MASK |
993 	      USBD_STRAPS_APP_RAM_IF_MASK |
994 	      USBD_STRAPS_APP_CSRPRGSUP_MASK |
995 	      USBD_STRAPS_APP_8BITPHY_MASK |
996 	      USBD_STRAPS_APP_RMTWKUP_MASK;
997 
998 	if (udc->gadget.max_speed == USB_SPEED_HIGH)
999 		val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1000 	else
1001 		val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1002 	usbd_writel(udc, val, USBD_STRAPS_REG);
1003 
1004 	bcm63xx_set_ctrl_irqs(udc, false);
1005 
1006 	usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1007 
1008 	val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1009 	      USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1010 	usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1011 
1012 	rc = iudma_init(udc);
1013 	set_clocks(udc, false);
1014 	if (rc)
1015 		bcm63xx_uninit_udc_hw(udc);
1016 
1017 	return 0;
1018 }
1019 
1020 /***********************************************************************
1021  * Standard EP gadget operations
1022  ***********************************************************************/
1023 
1024 /**
1025  * bcm63xx_ep_enable - Enable one endpoint.
1026  * @ep: Endpoint to enable.
1027  * @desc: Contains max packet, direction, etc.
1028  *
1029  * Most of the endpoint parameters are fixed in this controller, so there
1030  * isn't much for this function to do.
1031  */
1032 static int bcm63xx_ep_enable(struct usb_ep *ep,
1033 	const struct usb_endpoint_descriptor *desc)
1034 {
1035 	struct bcm63xx_ep *bep = our_ep(ep);
1036 	struct bcm63xx_udc *udc = bep->udc;
1037 	struct iudma_ch *iudma = bep->iudma;
1038 	unsigned long flags;
1039 
1040 	if (!ep || !desc || ep->name == bcm63xx_ep0name)
1041 		return -EINVAL;
1042 
1043 	if (!udc->driver)
1044 		return -ESHUTDOWN;
1045 
1046 	spin_lock_irqsave(&udc->lock, flags);
1047 	if (iudma->enabled) {
1048 		spin_unlock_irqrestore(&udc->lock, flags);
1049 		return -EINVAL;
1050 	}
1051 
1052 	iudma->enabled = true;
1053 	BUG_ON(!list_empty(&bep->queue));
1054 
1055 	iudma_reset_channel(udc, iudma);
1056 
1057 	bep->halted = 0;
1058 	bcm63xx_set_stall(udc, bep, false);
1059 	clear_bit(bep->ep_num, &udc->wedgemap);
1060 
1061 	ep->desc = desc;
1062 	ep->maxpacket = usb_endpoint_maxp(desc);
1063 
1064 	spin_unlock_irqrestore(&udc->lock, flags);
1065 	return 0;
1066 }
1067 
1068 /**
1069  * bcm63xx_ep_disable - Disable one endpoint.
1070  * @ep: Endpoint to disable.
1071  */
1072 static int bcm63xx_ep_disable(struct usb_ep *ep)
1073 {
1074 	struct bcm63xx_ep *bep = our_ep(ep);
1075 	struct bcm63xx_udc *udc = bep->udc;
1076 	struct iudma_ch *iudma = bep->iudma;
1077 	struct bcm63xx_req *breq, *n;
1078 	unsigned long flags;
1079 
1080 	if (!ep || !ep->desc)
1081 		return -EINVAL;
1082 
1083 	spin_lock_irqsave(&udc->lock, flags);
1084 	if (!iudma->enabled) {
1085 		spin_unlock_irqrestore(&udc->lock, flags);
1086 		return -EINVAL;
1087 	}
1088 	iudma->enabled = false;
1089 
1090 	iudma_reset_channel(udc, iudma);
1091 
1092 	if (!list_empty(&bep->queue)) {
1093 		list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1094 			usb_gadget_unmap_request(&udc->gadget, &breq->req,
1095 						 iudma->is_tx);
1096 			list_del(&breq->queue);
1097 			breq->req.status = -ESHUTDOWN;
1098 
1099 			spin_unlock_irqrestore(&udc->lock, flags);
1100 			usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1101 			spin_lock_irqsave(&udc->lock, flags);
1102 		}
1103 	}
1104 	ep->desc = NULL;
1105 
1106 	spin_unlock_irqrestore(&udc->lock, flags);
1107 	return 0;
1108 }
1109 
1110 /**
1111  * bcm63xx_udc_alloc_request - Allocate a new request.
1112  * @ep: Endpoint associated with the request.
1113  * @mem_flags: Flags to pass to kzalloc().
1114  */
1115 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1116 	gfp_t mem_flags)
1117 {
1118 	struct bcm63xx_req *breq;
1119 
1120 	breq = kzalloc(sizeof(*breq), mem_flags);
1121 	if (!breq)
1122 		return NULL;
1123 	return &breq->req;
1124 }
1125 
1126 /**
1127  * bcm63xx_udc_free_request - Free a request.
1128  * @ep: Endpoint associated with the request.
1129  * @req: Request to free.
1130  */
1131 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1132 	struct usb_request *req)
1133 {
1134 	struct bcm63xx_req *breq = our_req(req);
1135 	kfree(breq);
1136 }
1137 
1138 /**
1139  * bcm63xx_udc_queue - Queue up a new request.
1140  * @ep: Endpoint associated with the request.
1141  * @req: Request to add.
1142  * @mem_flags: Unused.
1143  *
1144  * If the queue is empty, start this request immediately.  Otherwise, add
1145  * it to the list.
1146  *
1147  * ep0 replies are sent through this function from the gadget driver, but
1148  * they are treated differently because they need to be handled by the ep0
1149  * state machine.  (Sometimes they are replies to control requests that
1150  * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1151  */
1152 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1153 	gfp_t mem_flags)
1154 {
1155 	struct bcm63xx_ep *bep = our_ep(ep);
1156 	struct bcm63xx_udc *udc = bep->udc;
1157 	struct bcm63xx_req *breq = our_req(req);
1158 	unsigned long flags;
1159 	int rc = 0;
1160 
1161 	if (unlikely(!req || !req->complete || !req->buf || !ep))
1162 		return -EINVAL;
1163 
1164 	req->actual = 0;
1165 	req->status = 0;
1166 	breq->offset = 0;
1167 
1168 	if (bep == &udc->bep[0]) {
1169 		/* only one reply per request, please */
1170 		if (udc->ep0_reply)
1171 			return -EINVAL;
1172 
1173 		udc->ep0_reply = req;
1174 		schedule_work(&udc->ep0_wq);
1175 		return 0;
1176 	}
1177 
1178 	spin_lock_irqsave(&udc->lock, flags);
1179 	if (!bep->iudma->enabled) {
1180 		rc = -ESHUTDOWN;
1181 		goto out;
1182 	}
1183 
1184 	rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1185 	if (rc == 0) {
1186 		list_add_tail(&breq->queue, &bep->queue);
1187 		if (list_is_singular(&bep->queue))
1188 			iudma_write(udc, bep->iudma, breq);
1189 	}
1190 
1191 out:
1192 	spin_unlock_irqrestore(&udc->lock, flags);
1193 	return rc;
1194 }
1195 
1196 /**
1197  * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1198  * @ep: Endpoint associated with the request.
1199  * @req: Request to remove.
1200  *
1201  * If the request is not at the head of the queue, this is easy - just nuke
1202  * it.  If the request is at the head of the queue, we'll need to stop the
1203  * DMA transaction and then queue up the successor.
1204  */
1205 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1206 {
1207 	struct bcm63xx_ep *bep = our_ep(ep);
1208 	struct bcm63xx_udc *udc = bep->udc;
1209 	struct bcm63xx_req *breq = our_req(req), *cur;
1210 	unsigned long flags;
1211 	int rc = 0;
1212 
1213 	spin_lock_irqsave(&udc->lock, flags);
1214 	if (list_empty(&bep->queue)) {
1215 		rc = -EINVAL;
1216 		goto out;
1217 	}
1218 
1219 	cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1220 	usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1221 
1222 	if (breq == cur) {
1223 		iudma_reset_channel(udc, bep->iudma);
1224 		list_del(&breq->queue);
1225 
1226 		if (!list_empty(&bep->queue)) {
1227 			struct bcm63xx_req *next;
1228 
1229 			next = list_first_entry(&bep->queue,
1230 				struct bcm63xx_req, queue);
1231 			iudma_write(udc, bep->iudma, next);
1232 		}
1233 	} else {
1234 		list_del(&breq->queue);
1235 	}
1236 
1237 out:
1238 	spin_unlock_irqrestore(&udc->lock, flags);
1239 
1240 	req->status = -ESHUTDOWN;
1241 	req->complete(ep, req);
1242 
1243 	return rc;
1244 }
1245 
1246 /**
1247  * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1248  * @ep: Endpoint to halt.
1249  * @value: Zero to clear halt; nonzero to set halt.
1250  *
1251  * See comments in bcm63xx_update_wedge().
1252  */
1253 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1254 {
1255 	struct bcm63xx_ep *bep = our_ep(ep);
1256 	struct bcm63xx_udc *udc = bep->udc;
1257 	unsigned long flags;
1258 
1259 	spin_lock_irqsave(&udc->lock, flags);
1260 	bcm63xx_set_stall(udc, bep, !!value);
1261 	bep->halted = value;
1262 	spin_unlock_irqrestore(&udc->lock, flags);
1263 
1264 	return 0;
1265 }
1266 
1267 /**
1268  * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1269  * @ep: Endpoint to wedge.
1270  *
1271  * See comments in bcm63xx_update_wedge().
1272  */
1273 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1274 {
1275 	struct bcm63xx_ep *bep = our_ep(ep);
1276 	struct bcm63xx_udc *udc = bep->udc;
1277 	unsigned long flags;
1278 
1279 	spin_lock_irqsave(&udc->lock, flags);
1280 	set_bit(bep->ep_num, &udc->wedgemap);
1281 	bcm63xx_set_stall(udc, bep, true);
1282 	spin_unlock_irqrestore(&udc->lock, flags);
1283 
1284 	return 0;
1285 }
1286 
1287 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1288 	.enable		= bcm63xx_ep_enable,
1289 	.disable	= bcm63xx_ep_disable,
1290 
1291 	.alloc_request	= bcm63xx_udc_alloc_request,
1292 	.free_request	= bcm63xx_udc_free_request,
1293 
1294 	.queue		= bcm63xx_udc_queue,
1295 	.dequeue	= bcm63xx_udc_dequeue,
1296 
1297 	.set_halt	= bcm63xx_udc_set_halt,
1298 	.set_wedge	= bcm63xx_udc_set_wedge,
1299 };
1300 
1301 /***********************************************************************
1302  * EP0 handling
1303  ***********************************************************************/
1304 
1305 /**
1306  * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1307  * @udc: Reference to the device controller.
1308  * @ctrl: 8-byte SETUP request.
1309  */
1310 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1311 	struct usb_ctrlrequest *ctrl)
1312 {
1313 	int rc;
1314 
1315 	spin_unlock_irq(&udc->lock);
1316 	rc = udc->driver->setup(&udc->gadget, ctrl);
1317 	spin_lock_irq(&udc->lock);
1318 	return rc;
1319 }
1320 
1321 /**
1322  * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1323  * @udc: Reference to the device controller.
1324  *
1325  * Many standard requests are handled automatically in the hardware, but
1326  * we still need to pass them to the gadget driver so that it can
1327  * reconfigure the interfaces/endpoints if necessary.
1328  *
1329  * Unfortunately we are not able to send a STALL response if the host
1330  * requests an invalid configuration.  If this happens, we'll have to be
1331  * content with printing a warning.
1332  */
1333 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1334 {
1335 	struct usb_ctrlrequest ctrl;
1336 	int rc;
1337 
1338 	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1339 	ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1340 	ctrl.wValue = cpu_to_le16(udc->cfg);
1341 	ctrl.wIndex = 0;
1342 	ctrl.wLength = 0;
1343 
1344 	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1345 	if (rc < 0) {
1346 		dev_warn_ratelimited(udc->dev,
1347 			"hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1348 			udc->cfg);
1349 	}
1350 	return rc;
1351 }
1352 
1353 /**
1354  * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1355  * @udc: Reference to the device controller.
1356  */
1357 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1358 {
1359 	struct usb_ctrlrequest ctrl;
1360 	int rc;
1361 
1362 	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1363 	ctrl.bRequest = USB_REQ_SET_INTERFACE;
1364 	ctrl.wValue = cpu_to_le16(udc->alt_iface);
1365 	ctrl.wIndex = cpu_to_le16(udc->iface);
1366 	ctrl.wLength = 0;
1367 
1368 	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1369 	if (rc < 0) {
1370 		dev_warn_ratelimited(udc->dev,
1371 			"hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1372 			udc->iface, udc->alt_iface);
1373 	}
1374 	return rc;
1375 }
1376 
1377 /**
1378  * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1379  * @udc: Reference to the device controller.
1380  * @ch_idx: IUDMA channel number.
1381  * @req: USB gadget layer representation of the request.
1382  */
1383 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1384 	struct usb_request *req)
1385 {
1386 	struct bcm63xx_req *breq = our_req(req);
1387 	struct iudma_ch *iudma = &udc->iudma[ch_idx];
1388 
1389 	BUG_ON(udc->ep0_request);
1390 	udc->ep0_request = req;
1391 
1392 	req->actual = 0;
1393 	breq->offset = 0;
1394 	usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1395 	iudma_write(udc, iudma, breq);
1396 }
1397 
1398 /**
1399  * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1400  * @udc: Reference to the device controller.
1401  * @req: USB gadget layer representation of the request.
1402  * @status: Status to return to the gadget driver.
1403  */
1404 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1405 	struct usb_request *req, int status)
1406 {
1407 	req->status = status;
1408 	if (status)
1409 		req->actual = 0;
1410 	if (req->complete) {
1411 		spin_unlock_irq(&udc->lock);
1412 		req->complete(&udc->bep[0].ep, req);
1413 		spin_lock_irq(&udc->lock);
1414 	}
1415 }
1416 
1417 /**
1418  * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1419  *   reset/shutdown.
1420  * @udc: Reference to the device controller.
1421  * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1422  */
1423 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1424 {
1425 	struct usb_request *req = udc->ep0_reply;
1426 
1427 	udc->ep0_reply = NULL;
1428 	usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1429 	if (udc->ep0_request == req) {
1430 		udc->ep0_req_completed = 0;
1431 		udc->ep0_request = NULL;
1432 	}
1433 	bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1434 }
1435 
1436 /**
1437  * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1438  *   transfer len.
1439  * @udc: Reference to the device controller.
1440  */
1441 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1442 {
1443 	struct usb_request *req = udc->ep0_request;
1444 
1445 	udc->ep0_req_completed = 0;
1446 	udc->ep0_request = NULL;
1447 
1448 	return req->actual;
1449 }
1450 
1451 /**
1452  * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1453  * @udc: Reference to the device controller.
1454  * @ch_idx: IUDMA channel number.
1455  * @length: Number of bytes to TX/RX.
1456  *
1457  * Used for simple transfers performed by the ep0 worker.  This will always
1458  * use ep0_ctrl_req / ep0_ctrl_buf.
1459  */
1460 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1461 	int length)
1462 {
1463 	struct usb_request *req = &udc->ep0_ctrl_req.req;
1464 
1465 	req->buf = udc->ep0_ctrl_buf;
1466 	req->length = length;
1467 	req->complete = NULL;
1468 
1469 	bcm63xx_ep0_map_write(udc, ch_idx, req);
1470 }
1471 
1472 /**
1473  * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1474  * @udc: Reference to the device controller.
1475  *
1476  * EP0_IDLE probably shouldn't ever happen.  EP0_REQUEUE means we're ready
1477  * for the next packet.  Anything else means the transaction requires multiple
1478  * stages of handling.
1479  */
1480 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1481 {
1482 	int rc;
1483 	struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1484 
1485 	rc = bcm63xx_ep0_read_complete(udc);
1486 
1487 	if (rc < 0) {
1488 		dev_err(udc->dev, "missing SETUP packet\n");
1489 		return EP0_IDLE;
1490 	}
1491 
1492 	/*
1493 	 * Handle 0-byte IN STATUS acknowledgement.  The hardware doesn't
1494 	 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1495 	 * just throw it away.
1496 	 */
1497 	if (rc == 0)
1498 		return EP0_REQUEUE;
1499 
1500 	/* Drop malformed SETUP packets */
1501 	if (rc != sizeof(*ctrl)) {
1502 		dev_warn_ratelimited(udc->dev,
1503 			"malformed SETUP packet (%d bytes)\n", rc);
1504 		return EP0_REQUEUE;
1505 	}
1506 
1507 	/* Process new SETUP packet arriving on ep0 */
1508 	rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1509 	if (rc < 0) {
1510 		bcm63xx_set_stall(udc, &udc->bep[0], true);
1511 		return EP0_REQUEUE;
1512 	}
1513 
1514 	if (!ctrl->wLength)
1515 		return EP0_REQUEUE;
1516 	else if (ctrl->bRequestType & USB_DIR_IN)
1517 		return EP0_IN_DATA_PHASE_SETUP;
1518 	else
1519 		return EP0_OUT_DATA_PHASE_SETUP;
1520 }
1521 
1522 /**
1523  * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1524  * @udc: Reference to the device controller.
1525  *
1526  * In state EP0_IDLE, the RX descriptor is either pending, or has been
1527  * filled with a SETUP packet from the host.  This function handles new
1528  * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1529  * and reset/shutdown events.
1530  *
1531  * Returns 0 if work was done; -EAGAIN if nothing to do.
1532  */
1533 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1534 {
1535 	if (udc->ep0_req_reset) {
1536 		udc->ep0_req_reset = 0;
1537 	} else if (udc->ep0_req_set_cfg) {
1538 		udc->ep0_req_set_cfg = 0;
1539 		if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1540 			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1541 	} else if (udc->ep0_req_set_iface) {
1542 		udc->ep0_req_set_iface = 0;
1543 		if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1544 			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1545 	} else if (udc->ep0_req_completed) {
1546 		udc->ep0state = bcm63xx_ep0_do_setup(udc);
1547 		return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1548 	} else if (udc->ep0_req_shutdown) {
1549 		udc->ep0_req_shutdown = 0;
1550 		udc->ep0_req_completed = 0;
1551 		udc->ep0_request = NULL;
1552 		iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1553 		usb_gadget_unmap_request(&udc->gadget,
1554 			&udc->ep0_ctrl_req.req, 0);
1555 
1556 		/* bcm63xx_udc_pullup() is waiting for this */
1557 		mb();
1558 		udc->ep0state = EP0_SHUTDOWN;
1559 	} else if (udc->ep0_reply) {
1560 		/*
1561 		 * This could happen if a USB RESET shows up during an ep0
1562 		 * transaction (especially if a laggy driver like gadgetfs
1563 		 * is in use).
1564 		 */
1565 		dev_warn(udc->dev, "nuking unexpected reply\n");
1566 		bcm63xx_ep0_nuke_reply(udc, 0);
1567 	} else {
1568 		return -EAGAIN;
1569 	}
1570 
1571 	return 0;
1572 }
1573 
1574 /**
1575  * bcm63xx_ep0_one_round - Handle the current ep0 state.
1576  * @udc: Reference to the device controller.
1577  *
1578  * Returns 0 if work was done; -EAGAIN if nothing to do.
1579  */
1580 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1581 {
1582 	enum bcm63xx_ep0_state ep0state = udc->ep0state;
1583 	bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1584 
1585 	switch (udc->ep0state) {
1586 	case EP0_REQUEUE:
1587 		/* set up descriptor to receive SETUP packet */
1588 		bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1589 					     BCM63XX_MAX_CTRL_PKT);
1590 		ep0state = EP0_IDLE;
1591 		break;
1592 	case EP0_IDLE:
1593 		return bcm63xx_ep0_do_idle(udc);
1594 	case EP0_IN_DATA_PHASE_SETUP:
1595 		/*
1596 		 * Normal case: TX request is in ep0_reply (queued by the
1597 		 * callback), or will be queued shortly.  When it's here,
1598 		 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1599 		 *
1600 		 * Shutdown case: Stop waiting for the reply.  Just
1601 		 * REQUEUE->IDLE.  The gadget driver is NOT expected to
1602 		 * queue anything else now.
1603 		 */
1604 		if (udc->ep0_reply) {
1605 			bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1606 					      udc->ep0_reply);
1607 			ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1608 		} else if (shutdown) {
1609 			ep0state = EP0_REQUEUE;
1610 		}
1611 		break;
1612 	case EP0_IN_DATA_PHASE_COMPLETE: {
1613 		/*
1614 		 * Normal case: TX packet (ep0_reply) is in flight; wait for
1615 		 * it to finish, then go back to REQUEUE->IDLE.
1616 		 *
1617 		 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1618 		 * completion to the gadget driver, then REQUEUE->IDLE.
1619 		 */
1620 		if (udc->ep0_req_completed) {
1621 			udc->ep0_reply = NULL;
1622 			bcm63xx_ep0_read_complete(udc);
1623 			/*
1624 			 * the "ack" sometimes gets eaten (see
1625 			 * bcm63xx_ep0_do_idle)
1626 			 */
1627 			ep0state = EP0_REQUEUE;
1628 		} else if (shutdown) {
1629 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1630 			bcm63xx_ep0_nuke_reply(udc, 1);
1631 			ep0state = EP0_REQUEUE;
1632 		}
1633 		break;
1634 	}
1635 	case EP0_OUT_DATA_PHASE_SETUP:
1636 		/* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1637 		if (udc->ep0_reply) {
1638 			bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1639 					      udc->ep0_reply);
1640 			ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1641 		} else if (shutdown) {
1642 			ep0state = EP0_REQUEUE;
1643 		}
1644 		break;
1645 	case EP0_OUT_DATA_PHASE_COMPLETE: {
1646 		/* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1647 		if (udc->ep0_req_completed) {
1648 			udc->ep0_reply = NULL;
1649 			bcm63xx_ep0_read_complete(udc);
1650 
1651 			/* send 0-byte ack to host */
1652 			bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1653 			ep0state = EP0_OUT_STATUS_PHASE;
1654 		} else if (shutdown) {
1655 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1656 			bcm63xx_ep0_nuke_reply(udc, 0);
1657 			ep0state = EP0_REQUEUE;
1658 		}
1659 		break;
1660 	}
1661 	case EP0_OUT_STATUS_PHASE:
1662 		/*
1663 		 * Normal case: 0-byte OUT ack packet is in flight; wait
1664 		 * for it to finish, then go back to REQUEUE->IDLE.
1665 		 *
1666 		 * Shutdown case: just cancel the transmission.  Don't bother
1667 		 * calling the completion, because it originated from this
1668 		 * function anyway.  Then go back to REQUEUE->IDLE.
1669 		 */
1670 		if (udc->ep0_req_completed) {
1671 			bcm63xx_ep0_read_complete(udc);
1672 			ep0state = EP0_REQUEUE;
1673 		} else if (shutdown) {
1674 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1675 			udc->ep0_request = NULL;
1676 			ep0state = EP0_REQUEUE;
1677 		}
1678 		break;
1679 	case EP0_IN_FAKE_STATUS_PHASE: {
1680 		/*
1681 		 * Normal case: we spoofed a SETUP packet and are now
1682 		 * waiting for the gadget driver to send a 0-byte reply.
1683 		 * This doesn't actually get sent to the HW because the
1684 		 * HW has already sent its own reply.  Once we get the
1685 		 * response, return to IDLE.
1686 		 *
1687 		 * Shutdown case: return to IDLE immediately.
1688 		 *
1689 		 * Note that the ep0 RX descriptor has remained queued
1690 		 * (and possibly unfilled) during this entire transaction.
1691 		 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1692 		 * or SET_INTERFACE transactions.
1693 		 */
1694 		struct usb_request *r = udc->ep0_reply;
1695 
1696 		if (!r) {
1697 			if (shutdown)
1698 				ep0state = EP0_IDLE;
1699 			break;
1700 		}
1701 
1702 		bcm63xx_ep0_complete(udc, r, 0);
1703 		udc->ep0_reply = NULL;
1704 		ep0state = EP0_IDLE;
1705 		break;
1706 	}
1707 	case EP0_SHUTDOWN:
1708 		break;
1709 	}
1710 
1711 	if (udc->ep0state == ep0state)
1712 		return -EAGAIN;
1713 
1714 	udc->ep0state = ep0state;
1715 	return 0;
1716 }
1717 
1718 /**
1719  * bcm63xx_ep0_process - ep0 worker thread / state machine.
1720  * @w: Workqueue struct.
1721  *
1722  * bcm63xx_ep0_process is triggered any time an event occurs on ep0.  It
1723  * is used to synchronize ep0 events and ensure that both HW and SW events
1724  * occur in a well-defined order.  When the ep0 IUDMA queues are idle, it may
1725  * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1726  * by the USBD hardware.
1727  *
1728  * The worker function will continue iterating around the state machine
1729  * until there is nothing left to do.  Usually "nothing left to do" means
1730  * that we're waiting for a new event from the hardware.
1731  */
1732 static void bcm63xx_ep0_process(struct work_struct *w)
1733 {
1734 	struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1735 	spin_lock_irq(&udc->lock);
1736 	while (bcm63xx_ep0_one_round(udc) == 0)
1737 		;
1738 	spin_unlock_irq(&udc->lock);
1739 }
1740 
1741 /***********************************************************************
1742  * Standard UDC gadget operations
1743  ***********************************************************************/
1744 
1745 /**
1746  * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1747  * @gadget: USB slave device.
1748  */
1749 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1750 {
1751 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1752 
1753 	return (usbd_readl(udc, USBD_STATUS_REG) &
1754 		USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1755 }
1756 
1757 /**
1758  * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1759  * @gadget: USB slave device.
1760  * @is_on: 0 to disable pullup, 1 to enable.
1761  *
1762  * See notes in bcm63xx_select_pullup().
1763  */
1764 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1765 {
1766 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1767 	unsigned long flags;
1768 	int i, rc = -EINVAL;
1769 
1770 	spin_lock_irqsave(&udc->lock, flags);
1771 	if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1772 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1773 		udc->ep0state = EP0_REQUEUE;
1774 		bcm63xx_fifo_setup(udc);
1775 		bcm63xx_fifo_reset(udc);
1776 		bcm63xx_ep_setup(udc);
1777 
1778 		bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1779 		for (i = 0; i < BCM63XX_NUM_EP; i++)
1780 			bcm63xx_set_stall(udc, &udc->bep[i], false);
1781 
1782 		bcm63xx_set_ctrl_irqs(udc, true);
1783 		bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1784 		rc = 0;
1785 	} else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1786 		bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1787 
1788 		udc->ep0_req_shutdown = 1;
1789 		spin_unlock_irqrestore(&udc->lock, flags);
1790 
1791 		while (1) {
1792 			schedule_work(&udc->ep0_wq);
1793 			if (udc->ep0state == EP0_SHUTDOWN)
1794 				break;
1795 			msleep(50);
1796 		}
1797 		bcm63xx_set_ctrl_irqs(udc, false);
1798 		cancel_work_sync(&udc->ep0_wq);
1799 		return 0;
1800 	}
1801 
1802 	spin_unlock_irqrestore(&udc->lock, flags);
1803 	return rc;
1804 }
1805 
1806 /**
1807  * bcm63xx_udc_start - Start the controller.
1808  * @gadget: USB slave device.
1809  * @driver: Driver for USB slave devices.
1810  */
1811 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1812 		struct usb_gadget_driver *driver)
1813 {
1814 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1815 	unsigned long flags;
1816 
1817 	if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1818 	    !driver->setup)
1819 		return -EINVAL;
1820 	if (!udc)
1821 		return -ENODEV;
1822 	if (udc->driver)
1823 		return -EBUSY;
1824 
1825 	spin_lock_irqsave(&udc->lock, flags);
1826 
1827 	set_clocks(udc, true);
1828 	bcm63xx_fifo_setup(udc);
1829 	bcm63xx_ep_init(udc);
1830 	bcm63xx_ep_setup(udc);
1831 	bcm63xx_fifo_reset(udc);
1832 	bcm63xx_select_phy_mode(udc, true);
1833 
1834 	udc->driver = driver;
1835 	driver->driver.bus = NULL;
1836 	udc->gadget.dev.of_node = udc->dev->of_node;
1837 
1838 	spin_unlock_irqrestore(&udc->lock, flags);
1839 
1840 	return 0;
1841 }
1842 
1843 /**
1844  * bcm63xx_udc_stop - Shut down the controller.
1845  * @gadget: USB slave device.
1846  * @driver: Driver for USB slave devices.
1847  */
1848 static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1849 {
1850 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1851 	unsigned long flags;
1852 
1853 	spin_lock_irqsave(&udc->lock, flags);
1854 
1855 	udc->driver = NULL;
1856 
1857 	/*
1858 	 * If we switch the PHY too abruptly after dropping D+, the host
1859 	 * will often complain:
1860 	 *
1861 	 *     hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1862 	 */
1863 	msleep(100);
1864 
1865 	bcm63xx_select_phy_mode(udc, false);
1866 	set_clocks(udc, false);
1867 
1868 	spin_unlock_irqrestore(&udc->lock, flags);
1869 
1870 	return 0;
1871 }
1872 
1873 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1874 	.get_frame	= bcm63xx_udc_get_frame,
1875 	.pullup		= bcm63xx_udc_pullup,
1876 	.udc_start	= bcm63xx_udc_start,
1877 	.udc_stop	= bcm63xx_udc_stop,
1878 };
1879 
1880 /***********************************************************************
1881  * IRQ handling
1882  ***********************************************************************/
1883 
1884 /**
1885  * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1886  * @udc: Reference to the device controller.
1887  *
1888  * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1889  * The driver never sees the raw control packets coming in on the ep0
1890  * IUDMA channel, but at least we get an interrupt event to tell us that
1891  * new values are waiting in the USBD_STATUS register.
1892  */
1893 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1894 {
1895 	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1896 
1897 	udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1898 	udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1899 	udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1900 			 USBD_STATUS_ALTINTF_SHIFT;
1901 	bcm63xx_ep_setup(udc);
1902 }
1903 
1904 /**
1905  * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1906  * @udc: Reference to the device controller.
1907  *
1908  * The link speed update coincides with a SETUP IRQ.  Returns 1 if the
1909  * speed has changed, so that the caller can update the endpoint settings.
1910  */
1911 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1912 {
1913 	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1914 	enum usb_device_speed oldspeed = udc->gadget.speed;
1915 
1916 	switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1917 	case BCM63XX_SPD_HIGH:
1918 		udc->gadget.speed = USB_SPEED_HIGH;
1919 		break;
1920 	case BCM63XX_SPD_FULL:
1921 		udc->gadget.speed = USB_SPEED_FULL;
1922 		break;
1923 	default:
1924 		/* this should never happen */
1925 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1926 		dev_err(udc->dev,
1927 			"received SETUP packet with invalid link speed\n");
1928 		return 0;
1929 	}
1930 
1931 	if (udc->gadget.speed != oldspeed) {
1932 		dev_info(udc->dev, "link up, %s-speed mode\n",
1933 			 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1934 		return 1;
1935 	} else {
1936 		return 0;
1937 	}
1938 }
1939 
1940 /**
1941  * bcm63xx_update_wedge - Iterate through wedged endpoints.
1942  * @udc: Reference to the device controller.
1943  * @new_status: true to "refresh" wedge status; false to clear it.
1944  *
1945  * On a SETUP interrupt, we need to manually "refresh" the wedge status
1946  * because the controller hardware is designed to automatically clear
1947  * stalls in response to a CLEAR_FEATURE request from the host.
1948  *
1949  * On a RESET interrupt, we do want to restore all wedged endpoints.
1950  */
1951 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1952 {
1953 	int i;
1954 
1955 	for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1956 		bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1957 		if (!new_status)
1958 			clear_bit(i, &udc->wedgemap);
1959 	}
1960 }
1961 
1962 /**
1963  * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1964  * @irq: IRQ number (unused).
1965  * @dev_id: Reference to the device controller.
1966  *
1967  * This is where we handle link (VBUS) down, USB reset, speed changes,
1968  * SET_CONFIGURATION, and SET_INTERFACE events.
1969  */
1970 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1971 {
1972 	struct bcm63xx_udc *udc = dev_id;
1973 	u32 stat;
1974 	bool disconnected = false, bus_reset = false;
1975 
1976 	stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1977 	       usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1978 
1979 	usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1980 
1981 	spin_lock(&udc->lock);
1982 	if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1983 		/* VBUS toggled */
1984 
1985 		if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1986 		      USBD_EVENTS_USB_LINK_MASK) &&
1987 		      udc->gadget.speed != USB_SPEED_UNKNOWN)
1988 			dev_info(udc->dev, "link down\n");
1989 
1990 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1991 		disconnected = true;
1992 	}
1993 	if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1994 		bcm63xx_fifo_setup(udc);
1995 		bcm63xx_fifo_reset(udc);
1996 		bcm63xx_ep_setup(udc);
1997 
1998 		bcm63xx_update_wedge(udc, false);
1999 
2000 		udc->ep0_req_reset = 1;
2001 		schedule_work(&udc->ep0_wq);
2002 		bus_reset = true;
2003 	}
2004 	if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2005 		if (bcm63xx_update_link_speed(udc)) {
2006 			bcm63xx_fifo_setup(udc);
2007 			bcm63xx_ep_setup(udc);
2008 		}
2009 		bcm63xx_update_wedge(udc, true);
2010 	}
2011 	if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2012 		bcm63xx_update_cfg_iface(udc);
2013 		udc->ep0_req_set_cfg = 1;
2014 		schedule_work(&udc->ep0_wq);
2015 	}
2016 	if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2017 		bcm63xx_update_cfg_iface(udc);
2018 		udc->ep0_req_set_iface = 1;
2019 		schedule_work(&udc->ep0_wq);
2020 	}
2021 	spin_unlock(&udc->lock);
2022 
2023 	if (disconnected && udc->driver)
2024 		udc->driver->disconnect(&udc->gadget);
2025 	else if (bus_reset && udc->driver)
2026 		usb_gadget_udc_reset(&udc->gadget, udc->driver);
2027 
2028 	return IRQ_HANDLED;
2029 }
2030 
2031 /**
2032  * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2033  * @irq: IRQ number (unused).
2034  * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2035  *
2036  * For the two ep0 channels, we have special handling that triggers the
2037  * ep0 worker thread.  For normal bulk/intr channels, either queue up
2038  * the next buffer descriptor for the transaction (incomplete transaction),
2039  * or invoke the completion callback (complete transactions).
2040  */
2041 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2042 {
2043 	struct iudma_ch *iudma = dev_id;
2044 	struct bcm63xx_udc *udc = iudma->udc;
2045 	struct bcm63xx_ep *bep;
2046 	struct usb_request *req = NULL;
2047 	struct bcm63xx_req *breq = NULL;
2048 	int rc;
2049 	bool is_done = false;
2050 
2051 	spin_lock(&udc->lock);
2052 
2053 	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2054 			ENETDMAC_IR_REG, iudma->ch_idx);
2055 	bep = iudma->bep;
2056 	rc = iudma_read(udc, iudma);
2057 
2058 	/* special handling for EP0 RX (0) and TX (1) */
2059 	if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2060 	    iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2061 		req = udc->ep0_request;
2062 		breq = our_req(req);
2063 
2064 		/* a single request could require multiple submissions */
2065 		if (rc >= 0) {
2066 			req->actual += rc;
2067 
2068 			if (req->actual >= req->length || breq->bd_bytes > rc) {
2069 				udc->ep0_req_completed = 1;
2070 				is_done = true;
2071 				schedule_work(&udc->ep0_wq);
2072 
2073 				/* "actual" on a ZLP is 1 byte */
2074 				req->actual = min(req->actual, req->length);
2075 			} else {
2076 				/* queue up the next BD (same request) */
2077 				iudma_write(udc, iudma, breq);
2078 			}
2079 		}
2080 	} else if (!list_empty(&bep->queue)) {
2081 		breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2082 		req = &breq->req;
2083 
2084 		if (rc >= 0) {
2085 			req->actual += rc;
2086 
2087 			if (req->actual >= req->length || breq->bd_bytes > rc) {
2088 				is_done = true;
2089 				list_del(&breq->queue);
2090 
2091 				req->actual = min(req->actual, req->length);
2092 
2093 				if (!list_empty(&bep->queue)) {
2094 					struct bcm63xx_req *next;
2095 
2096 					next = list_first_entry(&bep->queue,
2097 						struct bcm63xx_req, queue);
2098 					iudma_write(udc, iudma, next);
2099 				}
2100 			} else {
2101 				iudma_write(udc, iudma, breq);
2102 			}
2103 		}
2104 	}
2105 	spin_unlock(&udc->lock);
2106 
2107 	if (is_done) {
2108 		usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2109 		if (req->complete)
2110 			req->complete(&bep->ep, req);
2111 	}
2112 
2113 	return IRQ_HANDLED;
2114 }
2115 
2116 /***********************************************************************
2117  * Debug filesystem
2118  ***********************************************************************/
2119 
2120 /*
2121  * bcm63xx_usbd_dbg_show - Show USBD controller state.
2122  * @s: seq_file to which the information will be written.
2123  * @p: Unused.
2124  *
2125  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2126  */
2127 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2128 {
2129 	struct bcm63xx_udc *udc = s->private;
2130 
2131 	if (!udc->driver)
2132 		return -ENODEV;
2133 
2134 	seq_printf(s, "ep0 state: %s\n",
2135 		   bcm63xx_ep0_state_names[udc->ep0state]);
2136 	seq_printf(s, "  pending requests: %s%s%s%s%s%s%s\n",
2137 		   udc->ep0_req_reset ? "reset " : "",
2138 		   udc->ep0_req_set_cfg ? "set_cfg " : "",
2139 		   udc->ep0_req_set_iface ? "set_iface " : "",
2140 		   udc->ep0_req_shutdown ? "shutdown " : "",
2141 		   udc->ep0_request ? "pending " : "",
2142 		   udc->ep0_req_completed ? "completed " : "",
2143 		   udc->ep0_reply ? "reply " : "");
2144 	seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2145 		   udc->cfg, udc->iface, udc->alt_iface);
2146 	seq_printf(s, "regs:\n");
2147 	seq_printf(s, "  control: %08x; straps: %08x; status: %08x\n",
2148 		   usbd_readl(udc, USBD_CONTROL_REG),
2149 		   usbd_readl(udc, USBD_STRAPS_REG),
2150 		   usbd_readl(udc, USBD_STATUS_REG));
2151 	seq_printf(s, "  events:  %08x; stall:  %08x\n",
2152 		   usbd_readl(udc, USBD_EVENTS_REG),
2153 		   usbd_readl(udc, USBD_STALL_REG));
2154 
2155 	return 0;
2156 }
2157 DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
2158 
2159 /*
2160  * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2161  * @s: seq_file to which the information will be written.
2162  * @p: Unused.
2163  *
2164  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2165  */
2166 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2167 {
2168 	struct bcm63xx_udc *udc = s->private;
2169 	int ch_idx, i;
2170 	u32 sram2, sram3;
2171 
2172 	if (!udc->driver)
2173 		return -ENODEV;
2174 
2175 	for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2176 		struct iudma_ch *iudma = &udc->iudma[ch_idx];
2177 		struct list_head *pos;
2178 
2179 		seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2180 		switch (iudma_defaults[ch_idx].ep_type) {
2181 		case BCMEP_CTRL:
2182 			seq_printf(s, "control");
2183 			break;
2184 		case BCMEP_BULK:
2185 			seq_printf(s, "bulk");
2186 			break;
2187 		case BCMEP_INTR:
2188 			seq_printf(s, "interrupt");
2189 			break;
2190 		}
2191 		seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2192 		seq_printf(s, " [ep%d]:\n",
2193 			   max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2194 		seq_printf(s, "  cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2195 			   usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2196 			   usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2197 			   usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2198 			   usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2199 
2200 		sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2201 		sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2202 		seq_printf(s, "  base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2203 			   usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2204 			   sram2 >> 16, sram2 & 0xffff,
2205 			   sram3 >> 16, sram3 & 0xffff,
2206 			   usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2207 		seq_printf(s, "  desc: %d/%d used", iudma->n_bds_used,
2208 			   iudma->n_bds);
2209 
2210 		if (iudma->bep) {
2211 			i = 0;
2212 			list_for_each(pos, &iudma->bep->queue)
2213 				i++;
2214 			seq_printf(s, "; %d queued\n", i);
2215 		} else {
2216 			seq_printf(s, "\n");
2217 		}
2218 
2219 		for (i = 0; i < iudma->n_bds; i++) {
2220 			struct bcm_enet_desc *d = &iudma->bd_ring[i];
2221 
2222 			seq_printf(s, "  %03x (%02x): len_stat: %04x_%04x; pa %08x",
2223 				   i * sizeof(*d), i,
2224 				   d->len_stat >> 16, d->len_stat & 0xffff,
2225 				   d->address);
2226 			if (d == iudma->read_bd)
2227 				seq_printf(s, "   <<RD");
2228 			if (d == iudma->write_bd)
2229 				seq_printf(s, "   <<WR");
2230 			seq_printf(s, "\n");
2231 		}
2232 
2233 		seq_printf(s, "\n");
2234 	}
2235 
2236 	return 0;
2237 }
2238 DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
2239 
2240 /**
2241  * bcm63xx_udc_init_debugfs - Create debugfs entries.
2242  * @udc: Reference to the device controller.
2243  */
2244 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2245 {
2246 	struct dentry *root;
2247 
2248 	if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2249 		return;
2250 
2251 	root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
2252 	udc->debugfs_root = root;
2253 
2254 	debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
2255 	debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
2256 }
2257 
2258 /**
2259  * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2260  * @udc: Reference to the device controller.
2261  *
2262  * debugfs_remove() is safe to call with a NULL argument.
2263  */
2264 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2265 {
2266 	debugfs_remove_recursive(udc->debugfs_root);
2267 }
2268 
2269 /***********************************************************************
2270  * Driver init/exit
2271  ***********************************************************************/
2272 
2273 /**
2274  * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2275  * @pdev: Platform device struct from the bcm63xx BSP code.
2276  *
2277  * Note that platform data is required, because pd.port_no varies from chip
2278  * to chip and is used to switch the correct USB port to device mode.
2279  */
2280 static int bcm63xx_udc_probe(struct platform_device *pdev)
2281 {
2282 	struct device *dev = &pdev->dev;
2283 	struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2284 	struct bcm63xx_udc *udc;
2285 	int rc = -ENOMEM, i, irq;
2286 
2287 	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2288 	if (!udc)
2289 		return -ENOMEM;
2290 
2291 	platform_set_drvdata(pdev, udc);
2292 	udc->dev = dev;
2293 	udc->pd = pd;
2294 
2295 	if (!pd) {
2296 		dev_err(dev, "missing platform data\n");
2297 		return -EINVAL;
2298 	}
2299 
2300 	udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
2301 	if (IS_ERR(udc->usbd_regs))
2302 		return PTR_ERR(udc->usbd_regs);
2303 
2304 	udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
2305 	if (IS_ERR(udc->iudma_regs))
2306 		return PTR_ERR(udc->iudma_regs);
2307 
2308 	spin_lock_init(&udc->lock);
2309 	INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2310 
2311 	udc->gadget.ops = &bcm63xx_udc_ops;
2312 	udc->gadget.name = dev_name(dev);
2313 
2314 	if (!pd->use_fullspeed && !use_fullspeed)
2315 		udc->gadget.max_speed = USB_SPEED_HIGH;
2316 	else
2317 		udc->gadget.max_speed = USB_SPEED_FULL;
2318 
2319 	/* request clocks, allocate buffers, and clear any pending IRQs */
2320 	rc = bcm63xx_init_udc_hw(udc);
2321 	if (rc)
2322 		return rc;
2323 
2324 	rc = -ENXIO;
2325 
2326 	/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2327 	irq = platform_get_irq(pdev, 0);
2328 	if (irq < 0)
2329 		goto out_uninit;
2330 	if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2331 			     dev_name(dev), udc) < 0)
2332 		goto report_request_failure;
2333 
2334 	/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2335 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2336 		irq = platform_get_irq(pdev, i + 1);
2337 		if (irq < 0)
2338 			goto out_uninit;
2339 		if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2340 				     dev_name(dev), &udc->iudma[i]) < 0)
2341 			goto report_request_failure;
2342 	}
2343 
2344 	bcm63xx_udc_init_debugfs(udc);
2345 	rc = usb_add_gadget_udc(dev, &udc->gadget);
2346 	if (!rc)
2347 		return 0;
2348 
2349 	bcm63xx_udc_cleanup_debugfs(udc);
2350 out_uninit:
2351 	bcm63xx_uninit_udc_hw(udc);
2352 	return rc;
2353 
2354 report_request_failure:
2355 	dev_err(dev, "error requesting IRQ #%d\n", irq);
2356 	goto out_uninit;
2357 }
2358 
2359 /**
2360  * bcm63xx_udc_remove - Remove the device from the system.
2361  * @pdev: Platform device struct from the bcm63xx BSP code.
2362  */
2363 static int bcm63xx_udc_remove(struct platform_device *pdev)
2364 {
2365 	struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2366 
2367 	bcm63xx_udc_cleanup_debugfs(udc);
2368 	usb_del_gadget_udc(&udc->gadget);
2369 	BUG_ON(udc->driver);
2370 
2371 	bcm63xx_uninit_udc_hw(udc);
2372 
2373 	return 0;
2374 }
2375 
2376 static struct platform_driver bcm63xx_udc_driver = {
2377 	.probe		= bcm63xx_udc_probe,
2378 	.remove		= bcm63xx_udc_remove,
2379 	.driver		= {
2380 		.name	= DRV_MODULE_NAME,
2381 	},
2382 };
2383 module_platform_driver(bcm63xx_udc_driver);
2384 
2385 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2386 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2387 MODULE_LICENSE("GPL");
2388 MODULE_ALIAS("platform:" DRV_MODULE_NAME);
2389