xref: /linux/drivers/usb/gadget/udc/bcm63xx_udc.c (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 /*
2  * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
3  *
4  * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5  * Copyright (C) 2012 Broadcom Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 #include <linux/bitops.h>
14 #include <linux/bug.h>
15 #include <linux/clk.h>
16 #include <linux/compiler.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kconfig.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/platform_device.h>
30 #include <linux/sched.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
33 #include <linux/timer.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 #include <linux/workqueue.h>
37 
38 #include <bcm63xx_cpu.h>
39 #include <bcm63xx_iudma.h>
40 #include <bcm63xx_dev_usb_usbd.h>
41 #include <bcm63xx_io.h>
42 #include <bcm63xx_regs.h>
43 
44 #define DRV_MODULE_NAME		"bcm63xx_udc"
45 
46 static const char bcm63xx_ep0name[] = "ep0";
47 static const char *const bcm63xx_ep_name[] = {
48 	bcm63xx_ep0name,
49 	"ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
50 };
51 
52 static bool use_fullspeed;
53 module_param(use_fullspeed, bool, S_IRUGO);
54 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
55 
56 /*
57  * RX IRQ coalescing options:
58  *
59  * false (default) - one IRQ per DATAx packet.  Slow but reliable.  The
60  * driver is able to pass the "testusb" suite and recover from conditions like:
61  *
62  *   1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
63  *   2) Host sends 512 bytes of data
64  *   3) Host decides to reconfigure the device and sends SET_INTERFACE
65  *   4) Device shuts down the endpoint and cancels the RX transaction
66  *
67  * true - one IRQ per transfer, for transfers <= 2048B.  Generates
68  * considerably fewer IRQs, but error recovery is less robust.  Does not
69  * reliably pass "testusb".
70  *
71  * TX always uses coalescing, because we can cancel partially complete TX
72  * transfers by repeatedly flushing the FIFO.  The hardware doesn't allow
73  * this on RX.
74  */
75 static bool irq_coalesce;
76 module_param(irq_coalesce, bool, S_IRUGO);
77 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
78 
79 #define BCM63XX_NUM_EP			5
80 #define BCM63XX_NUM_IUDMA		6
81 #define BCM63XX_NUM_FIFO_PAIRS		3
82 
83 #define IUDMA_RESET_TIMEOUT_US		10000
84 
85 #define IUDMA_EP0_RXCHAN		0
86 #define IUDMA_EP0_TXCHAN		1
87 
88 #define IUDMA_MAX_FRAGMENT		2048
89 #define BCM63XX_MAX_CTRL_PKT		64
90 
91 #define BCMEP_CTRL			0x00
92 #define BCMEP_ISOC			0x01
93 #define BCMEP_BULK			0x02
94 #define BCMEP_INTR			0x03
95 
96 #define BCMEP_OUT			0x00
97 #define BCMEP_IN			0x01
98 
99 #define BCM63XX_SPD_FULL		1
100 #define BCM63XX_SPD_HIGH		0
101 
102 #define IUDMA_DMAC_OFFSET		0x200
103 #define IUDMA_DMAS_OFFSET		0x400
104 
105 enum bcm63xx_ep0_state {
106 	EP0_REQUEUE,
107 	EP0_IDLE,
108 	EP0_IN_DATA_PHASE_SETUP,
109 	EP0_IN_DATA_PHASE_COMPLETE,
110 	EP0_OUT_DATA_PHASE_SETUP,
111 	EP0_OUT_DATA_PHASE_COMPLETE,
112 	EP0_OUT_STATUS_PHASE,
113 	EP0_IN_FAKE_STATUS_PHASE,
114 	EP0_SHUTDOWN,
115 };
116 
117 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
118 	"REQUEUE",
119 	"IDLE",
120 	"IN_DATA_PHASE_SETUP",
121 	"IN_DATA_PHASE_COMPLETE",
122 	"OUT_DATA_PHASE_SETUP",
123 	"OUT_DATA_PHASE_COMPLETE",
124 	"OUT_STATUS_PHASE",
125 	"IN_FAKE_STATUS_PHASE",
126 	"SHUTDOWN",
127 };
128 
129 /**
130  * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
131  * @ep_num: USB endpoint number.
132  * @n_bds: Number of buffer descriptors in the ring.
133  * @ep_type: Endpoint type (control, bulk, interrupt).
134  * @dir: Direction (in, out).
135  * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
136  * @max_pkt_hs: Maximum packet size in high speed mode.
137  * @max_pkt_fs: Maximum packet size in full speed mode.
138  */
139 struct iudma_ch_cfg {
140 	int				ep_num;
141 	int				n_bds;
142 	int				ep_type;
143 	int				dir;
144 	int				n_fifo_slots;
145 	int				max_pkt_hs;
146 	int				max_pkt_fs;
147 };
148 
149 static const struct iudma_ch_cfg iudma_defaults[] = {
150 
151 	/* This controller was designed to support a CDC/RNDIS application.
152 	   It may be possible to reconfigure some of the endpoints, but
153 	   the hardware limitations (FIFO sizing and number of DMA channels)
154 	   may significantly impact flexibility and/or stability.  Change
155 	   these values at your own risk.
156 
157 	      ep_num       ep_type           n_fifo_slots    max_pkt_fs
158 	idx      |  n_bds     |         dir       |  max_pkt_hs  |
159 	 |       |    |       |          |        |      |       |       */
160 	[0] = { -1,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
161 	[1] = {  0,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
162 	[2] = {  2,  16, BCMEP_BULK, BCMEP_OUT, 128,   512,     64 },
163 	[3] = {  1,  16, BCMEP_BULK, BCMEP_IN,  128,   512,     64 },
164 	[4] = {  4,   4, BCMEP_INTR, BCMEP_OUT,  32,    64,     64 },
165 	[5] = {  3,   4, BCMEP_INTR, BCMEP_IN,   32,    64,     64 },
166 };
167 
168 struct bcm63xx_udc;
169 
170 /**
171  * struct iudma_ch - Represents the current state of a single IUDMA channel.
172  * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
173  * @ep_num: USB endpoint number.  -1 for ep0 RX.
174  * @enabled: Whether bcm63xx_ep_enable() has been called.
175  * @max_pkt: "Chunk size" on the USB interface.  Based on interface speed.
176  * @is_tx: true for TX, false for RX.
177  * @bep: Pointer to the associated endpoint.  NULL for ep0 RX.
178  * @udc: Reference to the device controller.
179  * @read_bd: Next buffer descriptor to reap from the hardware.
180  * @write_bd: Next BD available for a new packet.
181  * @end_bd: Points to the final BD in the ring.
182  * @n_bds_used: Number of BD entries currently occupied.
183  * @bd_ring: Base pointer to the BD ring.
184  * @bd_ring_dma: Physical (DMA) address of bd_ring.
185  * @n_bds: Total number of BDs in the ring.
186  *
187  * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
188  * bidirectional.  The "struct usb_ep" associated with ep0 is for TX (IN)
189  * only.
190  *
191  * Each bulk/intr endpoint has a single IUDMA channel and a single
192  * struct usb_ep.
193  */
194 struct iudma_ch {
195 	unsigned int			ch_idx;
196 	int				ep_num;
197 	bool				enabled;
198 	int				max_pkt;
199 	bool				is_tx;
200 	struct bcm63xx_ep		*bep;
201 	struct bcm63xx_udc		*udc;
202 
203 	struct bcm_enet_desc		*read_bd;
204 	struct bcm_enet_desc		*write_bd;
205 	struct bcm_enet_desc		*end_bd;
206 	int				n_bds_used;
207 
208 	struct bcm_enet_desc		*bd_ring;
209 	dma_addr_t			bd_ring_dma;
210 	unsigned int			n_bds;
211 };
212 
213 /**
214  * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
215  * @ep_num: USB endpoint number.
216  * @iudma: Pointer to IUDMA channel state.
217  * @ep: USB gadget layer representation of the EP.
218  * @udc: Reference to the device controller.
219  * @queue: Linked list of outstanding requests for this EP.
220  * @halted: 1 if the EP is stalled; 0 otherwise.
221  */
222 struct bcm63xx_ep {
223 	unsigned int			ep_num;
224 	struct iudma_ch			*iudma;
225 	struct usb_ep			ep;
226 	struct bcm63xx_udc		*udc;
227 	struct list_head		queue;
228 	unsigned			halted:1;
229 };
230 
231 /**
232  * struct bcm63xx_req - Internal (driver) state of a single request.
233  * @queue: Links back to the EP's request list.
234  * @req: USB gadget layer representation of the request.
235  * @offset: Current byte offset into the data buffer (next byte to queue).
236  * @bd_bytes: Number of data bytes in outstanding BD entries.
237  * @iudma: IUDMA channel used for the request.
238  */
239 struct bcm63xx_req {
240 	struct list_head		queue;		/* ep's requests */
241 	struct usb_request		req;
242 	unsigned int			offset;
243 	unsigned int			bd_bytes;
244 	struct iudma_ch			*iudma;
245 };
246 
247 /**
248  * struct bcm63xx_udc - Driver/hardware private context.
249  * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
250  * @dev: Generic Linux device structure.
251  * @pd: Platform data (board/port info).
252  * @usbd_clk: Clock descriptor for the USB device block.
253  * @usbh_clk: Clock descriptor for the USB host block.
254  * @gadget: USB slave device.
255  * @driver: Driver for USB slave devices.
256  * @usbd_regs: Base address of the USBD/USB20D block.
257  * @iudma_regs: Base address of the USBD's associated IUDMA block.
258  * @bep: Array of endpoints, including ep0.
259  * @iudma: Array of all IUDMA channels used by this controller.
260  * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
261  * @iface: USB interface number, from SET_INTERFACE wIndex.
262  * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
263  * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
264  * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
265  * @ep0state: Current state of the ep0 state machine.
266  * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
267  * @wedgemap: Bitmap of wedged endpoints.
268  * @ep0_req_reset: USB reset is pending.
269  * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
270  * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
271  * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
272  * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
273  * @ep0_reply: Pending reply from gadget driver.
274  * @ep0_request: Outstanding ep0 request.
275  * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
276  * @debugfs_usbd: debugfs file "usbd" for controller state.
277  * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
278  */
279 struct bcm63xx_udc {
280 	spinlock_t			lock;
281 
282 	struct device			*dev;
283 	struct bcm63xx_usbd_platform_data *pd;
284 	struct clk			*usbd_clk;
285 	struct clk			*usbh_clk;
286 
287 	struct usb_gadget		gadget;
288 	struct usb_gadget_driver	*driver;
289 
290 	void __iomem			*usbd_regs;
291 	void __iomem			*iudma_regs;
292 
293 	struct bcm63xx_ep		bep[BCM63XX_NUM_EP];
294 	struct iudma_ch			iudma[BCM63XX_NUM_IUDMA];
295 
296 	int				cfg;
297 	int				iface;
298 	int				alt_iface;
299 
300 	struct bcm63xx_req		ep0_ctrl_req;
301 	u8				*ep0_ctrl_buf;
302 
303 	int				ep0state;
304 	struct work_struct		ep0_wq;
305 
306 	unsigned long			wedgemap;
307 
308 	unsigned			ep0_req_reset:1;
309 	unsigned			ep0_req_set_cfg:1;
310 	unsigned			ep0_req_set_iface:1;
311 	unsigned			ep0_req_shutdown:1;
312 
313 	unsigned			ep0_req_completed:1;
314 	struct usb_request		*ep0_reply;
315 	struct usb_request		*ep0_request;
316 
317 	struct dentry			*debugfs_root;
318 	struct dentry			*debugfs_usbd;
319 	struct dentry			*debugfs_iudma;
320 };
321 
322 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
323 
324 /***********************************************************************
325  * Convenience functions
326  ***********************************************************************/
327 
328 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
329 {
330 	return container_of(g, struct bcm63xx_udc, gadget);
331 }
332 
333 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
334 {
335 	return container_of(ep, struct bcm63xx_ep, ep);
336 }
337 
338 static inline struct bcm63xx_req *our_req(struct usb_request *req)
339 {
340 	return container_of(req, struct bcm63xx_req, req);
341 }
342 
343 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
344 {
345 	return bcm_readl(udc->usbd_regs + off);
346 }
347 
348 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
349 {
350 	bcm_writel(val, udc->usbd_regs + off);
351 }
352 
353 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
354 {
355 	return bcm_readl(udc->iudma_regs + off);
356 }
357 
358 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
359 {
360 	bcm_writel(val, udc->iudma_regs + off);
361 }
362 
363 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
364 {
365 	return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
366 			(ENETDMA_CHAN_WIDTH * chan));
367 }
368 
369 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
370 					int chan)
371 {
372 	bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
373 			(ENETDMA_CHAN_WIDTH * chan));
374 }
375 
376 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
377 {
378 	return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
379 			(ENETDMA_CHAN_WIDTH * chan));
380 }
381 
382 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
383 					int chan)
384 {
385 	bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
386 			(ENETDMA_CHAN_WIDTH * chan));
387 }
388 
389 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
390 {
391 	if (is_enabled) {
392 		clk_enable(udc->usbh_clk);
393 		clk_enable(udc->usbd_clk);
394 		udelay(10);
395 	} else {
396 		clk_disable(udc->usbd_clk);
397 		clk_disable(udc->usbh_clk);
398 	}
399 }
400 
401 /***********************************************************************
402  * Low-level IUDMA / FIFO operations
403  ***********************************************************************/
404 
405 /**
406  * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
407  * @udc: Reference to the device controller.
408  * @idx: Desired init_sel value.
409  *
410  * The "init_sel" signal is used as a selection index for both endpoints
411  * and IUDMA channels.  Since these do not map 1:1, the use of this signal
412  * depends on the context.
413  */
414 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
415 {
416 	u32 val = usbd_readl(udc, USBD_CONTROL_REG);
417 
418 	val &= ~USBD_CONTROL_INIT_SEL_MASK;
419 	val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
420 	usbd_writel(udc, val, USBD_CONTROL_REG);
421 }
422 
423 /**
424  * bcm63xx_set_stall - Enable/disable stall on one endpoint.
425  * @udc: Reference to the device controller.
426  * @bep: Endpoint on which to operate.
427  * @is_stalled: true to enable stall, false to disable.
428  *
429  * See notes in bcm63xx_update_wedge() regarding automatic clearing of
430  * halt/stall conditions.
431  */
432 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
433 	bool is_stalled)
434 {
435 	u32 val;
436 
437 	val = USBD_STALL_UPDATE_MASK |
438 		(is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
439 		(bep->ep_num << USBD_STALL_EPNUM_SHIFT);
440 	usbd_writel(udc, val, USBD_STALL_REG);
441 }
442 
443 /**
444  * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
445  * @udc: Reference to the device controller.
446  *
447  * These parameters depend on the USB link speed.  Settings are
448  * per-IUDMA-channel-pair.
449  */
450 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
451 {
452 	int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
453 	u32 i, val, rx_fifo_slot, tx_fifo_slot;
454 
455 	/* set up FIFO boundaries and packet sizes; this is done in pairs */
456 	rx_fifo_slot = tx_fifo_slot = 0;
457 	for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
458 		const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
459 		const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
460 
461 		bcm63xx_ep_dma_select(udc, i >> 1);
462 
463 		val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
464 			((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
465 			 USBD_RXFIFO_CONFIG_END_SHIFT);
466 		rx_fifo_slot += rx_cfg->n_fifo_slots;
467 		usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
468 		usbd_writel(udc,
469 			    is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
470 			    USBD_RXFIFO_EPSIZE_REG);
471 
472 		val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
473 			((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
474 			 USBD_TXFIFO_CONFIG_END_SHIFT);
475 		tx_fifo_slot += tx_cfg->n_fifo_slots;
476 		usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
477 		usbd_writel(udc,
478 			    is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
479 			    USBD_TXFIFO_EPSIZE_REG);
480 
481 		usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
482 	}
483 }
484 
485 /**
486  * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
487  * @udc: Reference to the device controller.
488  * @ep_num: Endpoint number.
489  */
490 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
491 {
492 	u32 val;
493 
494 	bcm63xx_ep_dma_select(udc, ep_num);
495 
496 	val = usbd_readl(udc, USBD_CONTROL_REG);
497 	val |= USBD_CONTROL_FIFO_RESET_MASK;
498 	usbd_writel(udc, val, USBD_CONTROL_REG);
499 	usbd_readl(udc, USBD_CONTROL_REG);
500 }
501 
502 /**
503  * bcm63xx_fifo_reset - Flush all hardware FIFOs.
504  * @udc: Reference to the device controller.
505  */
506 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
507 {
508 	int i;
509 
510 	for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
511 		bcm63xx_fifo_reset_ep(udc, i);
512 }
513 
514 /**
515  * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
516  * @udc: Reference to the device controller.
517  */
518 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
519 {
520 	u32 i, val;
521 
522 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
523 		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
524 
525 		if (cfg->ep_num < 0)
526 			continue;
527 
528 		bcm63xx_ep_dma_select(udc, cfg->ep_num);
529 		val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
530 			((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
531 		usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
532 	}
533 }
534 
535 /**
536  * bcm63xx_ep_setup - Configure per-endpoint settings.
537  * @udc: Reference to the device controller.
538  *
539  * This needs to be rerun if the speed/cfg/intf/altintf changes.
540  */
541 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
542 {
543 	u32 val, i;
544 
545 	usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
546 
547 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
548 		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
549 		int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
550 			      cfg->max_pkt_hs : cfg->max_pkt_fs;
551 		int idx = cfg->ep_num;
552 
553 		udc->iudma[i].max_pkt = max_pkt;
554 
555 		if (idx < 0)
556 			continue;
557 		usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
558 
559 		val = (idx << USBD_CSR_EP_LOG_SHIFT) |
560 		      (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
561 		      (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
562 		      (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
563 		      (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
564 		      (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
565 		      (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
566 		usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
567 	}
568 }
569 
570 /**
571  * iudma_write - Queue a single IUDMA transaction.
572  * @udc: Reference to the device controller.
573  * @iudma: IUDMA channel to use.
574  * @breq: Request containing the transaction data.
575  *
576  * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
577  * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
578  * So iudma_write() may be called several times to fulfill a single
579  * usb_request.
580  *
581  * For TX IUDMA, this can queue multiple buffer descriptors if needed.
582  */
583 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
584 	struct bcm63xx_req *breq)
585 {
586 	int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
587 	unsigned int bytes_left = breq->req.length - breq->offset;
588 	const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
589 		iudma->max_pkt : IUDMA_MAX_FRAGMENT;
590 
591 	iudma->n_bds_used = 0;
592 	breq->bd_bytes = 0;
593 	breq->iudma = iudma;
594 
595 	if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
596 		extra_zero_pkt = 1;
597 
598 	do {
599 		struct bcm_enet_desc *d = iudma->write_bd;
600 		u32 dmaflags = 0;
601 		unsigned int n_bytes;
602 
603 		if (d == iudma->end_bd) {
604 			dmaflags |= DMADESC_WRAP_MASK;
605 			iudma->write_bd = iudma->bd_ring;
606 		} else {
607 			iudma->write_bd++;
608 		}
609 		iudma->n_bds_used++;
610 
611 		n_bytes = min_t(int, bytes_left, max_bd_bytes);
612 		if (n_bytes)
613 			dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
614 		else
615 			dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
616 				    DMADESC_USB_ZERO_MASK;
617 
618 		dmaflags |= DMADESC_OWNER_MASK;
619 		if (first_bd) {
620 			dmaflags |= DMADESC_SOP_MASK;
621 			first_bd = 0;
622 		}
623 
624 		/*
625 		 * extra_zero_pkt forces one more iteration through the loop
626 		 * after all data is queued up, to send the zero packet
627 		 */
628 		if (extra_zero_pkt && !bytes_left)
629 			extra_zero_pkt = 0;
630 
631 		if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
632 		    (n_bytes == bytes_left && !extra_zero_pkt)) {
633 			last_bd = 1;
634 			dmaflags |= DMADESC_EOP_MASK;
635 		}
636 
637 		d->address = breq->req.dma + breq->offset;
638 		mb();
639 		d->len_stat = dmaflags;
640 
641 		breq->offset += n_bytes;
642 		breq->bd_bytes += n_bytes;
643 		bytes_left -= n_bytes;
644 	} while (!last_bd);
645 
646 	usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
647 			ENETDMAC_CHANCFG_REG, iudma->ch_idx);
648 }
649 
650 /**
651  * iudma_read - Check for IUDMA buffer completion.
652  * @udc: Reference to the device controller.
653  * @iudma: IUDMA channel to use.
654  *
655  * This checks to see if ALL of the outstanding BDs on the DMA channel
656  * have been filled.  If so, it returns the actual transfer length;
657  * otherwise it returns -EBUSY.
658  */
659 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
660 {
661 	int i, actual_len = 0;
662 	struct bcm_enet_desc *d = iudma->read_bd;
663 
664 	if (!iudma->n_bds_used)
665 		return -EINVAL;
666 
667 	for (i = 0; i < iudma->n_bds_used; i++) {
668 		u32 dmaflags;
669 
670 		dmaflags = d->len_stat;
671 
672 		if (dmaflags & DMADESC_OWNER_MASK)
673 			return -EBUSY;
674 
675 		actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
676 			      DMADESC_LENGTH_SHIFT;
677 		if (d == iudma->end_bd)
678 			d = iudma->bd_ring;
679 		else
680 			d++;
681 	}
682 
683 	iudma->read_bd = d;
684 	iudma->n_bds_used = 0;
685 	return actual_len;
686 }
687 
688 /**
689  * iudma_reset_channel - Stop DMA on a single channel.
690  * @udc: Reference to the device controller.
691  * @iudma: IUDMA channel to reset.
692  */
693 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
694 {
695 	int timeout = IUDMA_RESET_TIMEOUT_US;
696 	struct bcm_enet_desc *d;
697 	int ch_idx = iudma->ch_idx;
698 
699 	if (!iudma->is_tx)
700 		bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
701 
702 	/* stop DMA, then wait for the hardware to wrap up */
703 	usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
704 
705 	while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
706 				   ENETDMAC_CHANCFG_EN_MASK) {
707 		udelay(1);
708 
709 		/* repeatedly flush the FIFO data until the BD completes */
710 		if (iudma->is_tx && iudma->ep_num >= 0)
711 			bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
712 
713 		if (!timeout--) {
714 			dev_err(udc->dev, "can't reset IUDMA channel %d\n",
715 				ch_idx);
716 			break;
717 		}
718 		if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
719 			dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
720 				 ch_idx);
721 			usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
722 					ENETDMAC_CHANCFG_REG, ch_idx);
723 		}
724 	}
725 	usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
726 
727 	/* don't leave "live" HW-owned entries for the next guy to step on */
728 	for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
729 		d->len_stat = 0;
730 	mb();
731 
732 	iudma->read_bd = iudma->write_bd = iudma->bd_ring;
733 	iudma->n_bds_used = 0;
734 
735 	/* set up IRQs, UBUS burst size, and BD base for this channel */
736 	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
737 			ENETDMAC_IRMASK_REG, ch_idx);
738 	usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
739 
740 	usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
741 	usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
742 }
743 
744 /**
745  * iudma_init_channel - One-time IUDMA channel initialization.
746  * @udc: Reference to the device controller.
747  * @ch_idx: Channel to initialize.
748  */
749 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
750 {
751 	struct iudma_ch *iudma = &udc->iudma[ch_idx];
752 	const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
753 	unsigned int n_bds = cfg->n_bds;
754 	struct bcm63xx_ep *bep = NULL;
755 
756 	iudma->ep_num = cfg->ep_num;
757 	iudma->ch_idx = ch_idx;
758 	iudma->is_tx = !!(ch_idx & 0x01);
759 	if (iudma->ep_num >= 0) {
760 		bep = &udc->bep[iudma->ep_num];
761 		bep->iudma = iudma;
762 		INIT_LIST_HEAD(&bep->queue);
763 	}
764 
765 	iudma->bep = bep;
766 	iudma->udc = udc;
767 
768 	/* ep0 is always active; others are controlled by the gadget driver */
769 	if (iudma->ep_num <= 0)
770 		iudma->enabled = true;
771 
772 	iudma->n_bds = n_bds;
773 	iudma->bd_ring = dmam_alloc_coherent(udc->dev,
774 		n_bds * sizeof(struct bcm_enet_desc),
775 		&iudma->bd_ring_dma, GFP_KERNEL);
776 	if (!iudma->bd_ring)
777 		return -ENOMEM;
778 	iudma->end_bd = &iudma->bd_ring[n_bds - 1];
779 
780 	return 0;
781 }
782 
783 /**
784  * iudma_init - One-time initialization of all IUDMA channels.
785  * @udc: Reference to the device controller.
786  *
787  * Enable DMA, flush channels, and enable global IUDMA IRQs.
788  */
789 static int iudma_init(struct bcm63xx_udc *udc)
790 {
791 	int i, rc;
792 
793 	usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
794 
795 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
796 		rc = iudma_init_channel(udc, i);
797 		if (rc)
798 			return rc;
799 		iudma_reset_channel(udc, &udc->iudma[i]);
800 	}
801 
802 	usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
803 	return 0;
804 }
805 
806 /**
807  * iudma_uninit - Uninitialize IUDMA channels.
808  * @udc: Reference to the device controller.
809  *
810  * Kill global IUDMA IRQs, flush channels, and kill DMA.
811  */
812 static void iudma_uninit(struct bcm63xx_udc *udc)
813 {
814 	int i;
815 
816 	usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
817 
818 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
819 		iudma_reset_channel(udc, &udc->iudma[i]);
820 
821 	usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
822 }
823 
824 /***********************************************************************
825  * Other low-level USBD operations
826  ***********************************************************************/
827 
828 /**
829  * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
830  * @udc: Reference to the device controller.
831  * @enable_irqs: true to enable, false to disable.
832  */
833 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
834 {
835 	u32 val;
836 
837 	usbd_writel(udc, 0, USBD_STATUS_REG);
838 
839 	val = BIT(USBD_EVENT_IRQ_USB_RESET) |
840 	      BIT(USBD_EVENT_IRQ_SETUP) |
841 	      BIT(USBD_EVENT_IRQ_SETCFG) |
842 	      BIT(USBD_EVENT_IRQ_SETINTF) |
843 	      BIT(USBD_EVENT_IRQ_USB_LINK);
844 	usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
845 	usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
846 }
847 
848 /**
849  * bcm63xx_select_phy_mode - Select between USB device and host mode.
850  * @udc: Reference to the device controller.
851  * @is_device: true for device, false for host.
852  *
853  * This should probably be reworked to use the drivers/usb/otg
854  * infrastructure.
855  *
856  * By default, the AFE/pullups are disabled in device mode, until
857  * bcm63xx_select_pullup() is called.
858  */
859 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
860 {
861 	u32 val, portmask = BIT(udc->pd->port_no);
862 
863 	if (BCMCPU_IS_6328()) {
864 		/* configure pinmux to sense VBUS signal */
865 		val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
866 		val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
867 		val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
868 			       GPIO_PINMUX_OTHR_6328_USB_HOST;
869 		bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
870 	}
871 
872 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
873 	if (is_device) {
874 		val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
875 		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
876 	} else {
877 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
878 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
879 	}
880 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
881 
882 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
883 	if (is_device)
884 		val |= USBH_PRIV_SWAP_USBD_MASK;
885 	else
886 		val &= ~USBH_PRIV_SWAP_USBD_MASK;
887 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
888 }
889 
890 /**
891  * bcm63xx_select_pullup - Enable/disable the pullup on D+
892  * @udc: Reference to the device controller.
893  * @is_on: true to enable the pullup, false to disable.
894  *
895  * If the pullup is active, the host will sense a FS/HS device connected to
896  * the port.  If the pullup is inactive, the host will think the USB
897  * device has been disconnected.
898  */
899 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
900 {
901 	u32 val, portmask = BIT(udc->pd->port_no);
902 
903 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
904 	if (is_on)
905 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
906 	else
907 		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
908 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
909 }
910 
911 /**
912  * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
913  * @udc: Reference to the device controller.
914  *
915  * This just masks the IUDMA IRQs and releases the clocks.  It is assumed
916  * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
917  */
918 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
919 {
920 	set_clocks(udc, true);
921 	iudma_uninit(udc);
922 	set_clocks(udc, false);
923 
924 	clk_put(udc->usbd_clk);
925 	clk_put(udc->usbh_clk);
926 }
927 
928 /**
929  * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
930  * @udc: Reference to the device controller.
931  */
932 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
933 {
934 	int i, rc = 0;
935 	u32 val;
936 
937 	udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
938 					 GFP_KERNEL);
939 	if (!udc->ep0_ctrl_buf)
940 		return -ENOMEM;
941 
942 	INIT_LIST_HEAD(&udc->gadget.ep_list);
943 	for (i = 0; i < BCM63XX_NUM_EP; i++) {
944 		struct bcm63xx_ep *bep = &udc->bep[i];
945 
946 		bep->ep.name = bcm63xx_ep_name[i];
947 		bep->ep_num = i;
948 		bep->ep.ops = &bcm63xx_udc_ep_ops;
949 		list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
950 		bep->halted = 0;
951 		usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
952 		bep->udc = udc;
953 		bep->ep.desc = NULL;
954 		INIT_LIST_HEAD(&bep->queue);
955 	}
956 
957 	udc->gadget.ep0 = &udc->bep[0].ep;
958 	list_del(&udc->bep[0].ep.ep_list);
959 
960 	udc->gadget.speed = USB_SPEED_UNKNOWN;
961 	udc->ep0state = EP0_SHUTDOWN;
962 
963 	udc->usbh_clk = clk_get(udc->dev, "usbh");
964 	if (IS_ERR(udc->usbh_clk))
965 		return -EIO;
966 
967 	udc->usbd_clk = clk_get(udc->dev, "usbd");
968 	if (IS_ERR(udc->usbd_clk)) {
969 		clk_put(udc->usbh_clk);
970 		return -EIO;
971 	}
972 
973 	set_clocks(udc, true);
974 
975 	val = USBD_CONTROL_AUTO_CSRS_MASK |
976 	      USBD_CONTROL_DONE_CSRS_MASK |
977 	      (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
978 	usbd_writel(udc, val, USBD_CONTROL_REG);
979 
980 	val = USBD_STRAPS_APP_SELF_PWR_MASK |
981 	      USBD_STRAPS_APP_RAM_IF_MASK |
982 	      USBD_STRAPS_APP_CSRPRGSUP_MASK |
983 	      USBD_STRAPS_APP_8BITPHY_MASK |
984 	      USBD_STRAPS_APP_RMTWKUP_MASK;
985 
986 	if (udc->gadget.max_speed == USB_SPEED_HIGH)
987 		val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
988 	else
989 		val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
990 	usbd_writel(udc, val, USBD_STRAPS_REG);
991 
992 	bcm63xx_set_ctrl_irqs(udc, false);
993 
994 	usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
995 
996 	val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
997 	      USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
998 	usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
999 
1000 	rc = iudma_init(udc);
1001 	set_clocks(udc, false);
1002 	if (rc)
1003 		bcm63xx_uninit_udc_hw(udc);
1004 
1005 	return 0;
1006 }
1007 
1008 /***********************************************************************
1009  * Standard EP gadget operations
1010  ***********************************************************************/
1011 
1012 /**
1013  * bcm63xx_ep_enable - Enable one endpoint.
1014  * @ep: Endpoint to enable.
1015  * @desc: Contains max packet, direction, etc.
1016  *
1017  * Most of the endpoint parameters are fixed in this controller, so there
1018  * isn't much for this function to do.
1019  */
1020 static int bcm63xx_ep_enable(struct usb_ep *ep,
1021 	const struct usb_endpoint_descriptor *desc)
1022 {
1023 	struct bcm63xx_ep *bep = our_ep(ep);
1024 	struct bcm63xx_udc *udc = bep->udc;
1025 	struct iudma_ch *iudma = bep->iudma;
1026 	unsigned long flags;
1027 
1028 	if (!ep || !desc || ep->name == bcm63xx_ep0name)
1029 		return -EINVAL;
1030 
1031 	if (!udc->driver)
1032 		return -ESHUTDOWN;
1033 
1034 	spin_lock_irqsave(&udc->lock, flags);
1035 	if (iudma->enabled) {
1036 		spin_unlock_irqrestore(&udc->lock, flags);
1037 		return -EINVAL;
1038 	}
1039 
1040 	iudma->enabled = true;
1041 	BUG_ON(!list_empty(&bep->queue));
1042 
1043 	iudma_reset_channel(udc, iudma);
1044 
1045 	bep->halted = 0;
1046 	bcm63xx_set_stall(udc, bep, false);
1047 	clear_bit(bep->ep_num, &udc->wedgemap);
1048 
1049 	ep->desc = desc;
1050 	ep->maxpacket = usb_endpoint_maxp(desc);
1051 
1052 	spin_unlock_irqrestore(&udc->lock, flags);
1053 	return 0;
1054 }
1055 
1056 /**
1057  * bcm63xx_ep_disable - Disable one endpoint.
1058  * @ep: Endpoint to disable.
1059  */
1060 static int bcm63xx_ep_disable(struct usb_ep *ep)
1061 {
1062 	struct bcm63xx_ep *bep = our_ep(ep);
1063 	struct bcm63xx_udc *udc = bep->udc;
1064 	struct iudma_ch *iudma = bep->iudma;
1065 	struct list_head *pos, *n;
1066 	unsigned long flags;
1067 
1068 	if (!ep || !ep->desc)
1069 		return -EINVAL;
1070 
1071 	spin_lock_irqsave(&udc->lock, flags);
1072 	if (!iudma->enabled) {
1073 		spin_unlock_irqrestore(&udc->lock, flags);
1074 		return -EINVAL;
1075 	}
1076 	iudma->enabled = false;
1077 
1078 	iudma_reset_channel(udc, iudma);
1079 
1080 	if (!list_empty(&bep->queue)) {
1081 		list_for_each_safe(pos, n, &bep->queue) {
1082 			struct bcm63xx_req *breq =
1083 				list_entry(pos, struct bcm63xx_req, queue);
1084 
1085 			usb_gadget_unmap_request(&udc->gadget, &breq->req,
1086 						 iudma->is_tx);
1087 			list_del(&breq->queue);
1088 			breq->req.status = -ESHUTDOWN;
1089 
1090 			spin_unlock_irqrestore(&udc->lock, flags);
1091 			usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1092 			spin_lock_irqsave(&udc->lock, flags);
1093 		}
1094 	}
1095 	ep->desc = NULL;
1096 
1097 	spin_unlock_irqrestore(&udc->lock, flags);
1098 	return 0;
1099 }
1100 
1101 /**
1102  * bcm63xx_udc_alloc_request - Allocate a new request.
1103  * @ep: Endpoint associated with the request.
1104  * @mem_flags: Flags to pass to kzalloc().
1105  */
1106 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1107 	gfp_t mem_flags)
1108 {
1109 	struct bcm63xx_req *breq;
1110 
1111 	breq = kzalloc(sizeof(*breq), mem_flags);
1112 	if (!breq)
1113 		return NULL;
1114 	return &breq->req;
1115 }
1116 
1117 /**
1118  * bcm63xx_udc_free_request - Free a request.
1119  * @ep: Endpoint associated with the request.
1120  * @req: Request to free.
1121  */
1122 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1123 	struct usb_request *req)
1124 {
1125 	struct bcm63xx_req *breq = our_req(req);
1126 	kfree(breq);
1127 }
1128 
1129 /**
1130  * bcm63xx_udc_queue - Queue up a new request.
1131  * @ep: Endpoint associated with the request.
1132  * @req: Request to add.
1133  * @mem_flags: Unused.
1134  *
1135  * If the queue is empty, start this request immediately.  Otherwise, add
1136  * it to the list.
1137  *
1138  * ep0 replies are sent through this function from the gadget driver, but
1139  * they are treated differently because they need to be handled by the ep0
1140  * state machine.  (Sometimes they are replies to control requests that
1141  * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1142  */
1143 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1144 	gfp_t mem_flags)
1145 {
1146 	struct bcm63xx_ep *bep = our_ep(ep);
1147 	struct bcm63xx_udc *udc = bep->udc;
1148 	struct bcm63xx_req *breq = our_req(req);
1149 	unsigned long flags;
1150 	int rc = 0;
1151 
1152 	if (unlikely(!req || !req->complete || !req->buf || !ep))
1153 		return -EINVAL;
1154 
1155 	req->actual = 0;
1156 	req->status = 0;
1157 	breq->offset = 0;
1158 
1159 	if (bep == &udc->bep[0]) {
1160 		/* only one reply per request, please */
1161 		if (udc->ep0_reply)
1162 			return -EINVAL;
1163 
1164 		udc->ep0_reply = req;
1165 		schedule_work(&udc->ep0_wq);
1166 		return 0;
1167 	}
1168 
1169 	spin_lock_irqsave(&udc->lock, flags);
1170 	if (!bep->iudma->enabled) {
1171 		rc = -ESHUTDOWN;
1172 		goto out;
1173 	}
1174 
1175 	rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1176 	if (rc == 0) {
1177 		list_add_tail(&breq->queue, &bep->queue);
1178 		if (list_is_singular(&bep->queue))
1179 			iudma_write(udc, bep->iudma, breq);
1180 	}
1181 
1182 out:
1183 	spin_unlock_irqrestore(&udc->lock, flags);
1184 	return rc;
1185 }
1186 
1187 /**
1188  * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1189  * @ep: Endpoint associated with the request.
1190  * @req: Request to remove.
1191  *
1192  * If the request is not at the head of the queue, this is easy - just nuke
1193  * it.  If the request is at the head of the queue, we'll need to stop the
1194  * DMA transaction and then queue up the successor.
1195  */
1196 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1197 {
1198 	struct bcm63xx_ep *bep = our_ep(ep);
1199 	struct bcm63xx_udc *udc = bep->udc;
1200 	struct bcm63xx_req *breq = our_req(req), *cur;
1201 	unsigned long flags;
1202 	int rc = 0;
1203 
1204 	spin_lock_irqsave(&udc->lock, flags);
1205 	if (list_empty(&bep->queue)) {
1206 		rc = -EINVAL;
1207 		goto out;
1208 	}
1209 
1210 	cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1211 	usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1212 
1213 	if (breq == cur) {
1214 		iudma_reset_channel(udc, bep->iudma);
1215 		list_del(&breq->queue);
1216 
1217 		if (!list_empty(&bep->queue)) {
1218 			struct bcm63xx_req *next;
1219 
1220 			next = list_first_entry(&bep->queue,
1221 				struct bcm63xx_req, queue);
1222 			iudma_write(udc, bep->iudma, next);
1223 		}
1224 	} else {
1225 		list_del(&breq->queue);
1226 	}
1227 
1228 out:
1229 	spin_unlock_irqrestore(&udc->lock, flags);
1230 
1231 	req->status = -ESHUTDOWN;
1232 	req->complete(ep, req);
1233 
1234 	return rc;
1235 }
1236 
1237 /**
1238  * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1239  * @ep: Endpoint to halt.
1240  * @value: Zero to clear halt; nonzero to set halt.
1241  *
1242  * See comments in bcm63xx_update_wedge().
1243  */
1244 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1245 {
1246 	struct bcm63xx_ep *bep = our_ep(ep);
1247 	struct bcm63xx_udc *udc = bep->udc;
1248 	unsigned long flags;
1249 
1250 	spin_lock_irqsave(&udc->lock, flags);
1251 	bcm63xx_set_stall(udc, bep, !!value);
1252 	bep->halted = value;
1253 	spin_unlock_irqrestore(&udc->lock, flags);
1254 
1255 	return 0;
1256 }
1257 
1258 /**
1259  * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1260  * @ep: Endpoint to wedge.
1261  *
1262  * See comments in bcm63xx_update_wedge().
1263  */
1264 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1265 {
1266 	struct bcm63xx_ep *bep = our_ep(ep);
1267 	struct bcm63xx_udc *udc = bep->udc;
1268 	unsigned long flags;
1269 
1270 	spin_lock_irqsave(&udc->lock, flags);
1271 	set_bit(bep->ep_num, &udc->wedgemap);
1272 	bcm63xx_set_stall(udc, bep, true);
1273 	spin_unlock_irqrestore(&udc->lock, flags);
1274 
1275 	return 0;
1276 }
1277 
1278 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1279 	.enable		= bcm63xx_ep_enable,
1280 	.disable	= bcm63xx_ep_disable,
1281 
1282 	.alloc_request	= bcm63xx_udc_alloc_request,
1283 	.free_request	= bcm63xx_udc_free_request,
1284 
1285 	.queue		= bcm63xx_udc_queue,
1286 	.dequeue	= bcm63xx_udc_dequeue,
1287 
1288 	.set_halt	= bcm63xx_udc_set_halt,
1289 	.set_wedge	= bcm63xx_udc_set_wedge,
1290 };
1291 
1292 /***********************************************************************
1293  * EP0 handling
1294  ***********************************************************************/
1295 
1296 /**
1297  * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1298  * @udc: Reference to the device controller.
1299  * @ctrl: 8-byte SETUP request.
1300  */
1301 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1302 	struct usb_ctrlrequest *ctrl)
1303 {
1304 	int rc;
1305 
1306 	spin_unlock_irq(&udc->lock);
1307 	rc = udc->driver->setup(&udc->gadget, ctrl);
1308 	spin_lock_irq(&udc->lock);
1309 	return rc;
1310 }
1311 
1312 /**
1313  * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1314  * @udc: Reference to the device controller.
1315  *
1316  * Many standard requests are handled automatically in the hardware, but
1317  * we still need to pass them to the gadget driver so that it can
1318  * reconfigure the interfaces/endpoints if necessary.
1319  *
1320  * Unfortunately we are not able to send a STALL response if the host
1321  * requests an invalid configuration.  If this happens, we'll have to be
1322  * content with printing a warning.
1323  */
1324 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1325 {
1326 	struct usb_ctrlrequest ctrl;
1327 	int rc;
1328 
1329 	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1330 	ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1331 	ctrl.wValue = cpu_to_le16(udc->cfg);
1332 	ctrl.wIndex = 0;
1333 	ctrl.wLength = 0;
1334 
1335 	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1336 	if (rc < 0) {
1337 		dev_warn_ratelimited(udc->dev,
1338 			"hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1339 			udc->cfg);
1340 	}
1341 	return rc;
1342 }
1343 
1344 /**
1345  * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1346  * @udc: Reference to the device controller.
1347  */
1348 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1349 {
1350 	struct usb_ctrlrequest ctrl;
1351 	int rc;
1352 
1353 	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1354 	ctrl.bRequest = USB_REQ_SET_INTERFACE;
1355 	ctrl.wValue = cpu_to_le16(udc->alt_iface);
1356 	ctrl.wIndex = cpu_to_le16(udc->iface);
1357 	ctrl.wLength = 0;
1358 
1359 	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1360 	if (rc < 0) {
1361 		dev_warn_ratelimited(udc->dev,
1362 			"hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1363 			udc->iface, udc->alt_iface);
1364 	}
1365 	return rc;
1366 }
1367 
1368 /**
1369  * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1370  * @udc: Reference to the device controller.
1371  * @ch_idx: IUDMA channel number.
1372  * @req: USB gadget layer representation of the request.
1373  */
1374 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1375 	struct usb_request *req)
1376 {
1377 	struct bcm63xx_req *breq = our_req(req);
1378 	struct iudma_ch *iudma = &udc->iudma[ch_idx];
1379 
1380 	BUG_ON(udc->ep0_request);
1381 	udc->ep0_request = req;
1382 
1383 	req->actual = 0;
1384 	breq->offset = 0;
1385 	usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1386 	iudma_write(udc, iudma, breq);
1387 }
1388 
1389 /**
1390  * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1391  * @udc: Reference to the device controller.
1392  * @req: USB gadget layer representation of the request.
1393  * @status: Status to return to the gadget driver.
1394  */
1395 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1396 	struct usb_request *req, int status)
1397 {
1398 	req->status = status;
1399 	if (status)
1400 		req->actual = 0;
1401 	if (req->complete) {
1402 		spin_unlock_irq(&udc->lock);
1403 		req->complete(&udc->bep[0].ep, req);
1404 		spin_lock_irq(&udc->lock);
1405 	}
1406 }
1407 
1408 /**
1409  * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1410  *   reset/shutdown.
1411  * @udc: Reference to the device controller.
1412  * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1413  */
1414 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1415 {
1416 	struct usb_request *req = udc->ep0_reply;
1417 
1418 	udc->ep0_reply = NULL;
1419 	usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1420 	if (udc->ep0_request == req) {
1421 		udc->ep0_req_completed = 0;
1422 		udc->ep0_request = NULL;
1423 	}
1424 	bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1425 }
1426 
1427 /**
1428  * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1429  *   transfer len.
1430  * @udc: Reference to the device controller.
1431  */
1432 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1433 {
1434 	struct usb_request *req = udc->ep0_request;
1435 
1436 	udc->ep0_req_completed = 0;
1437 	udc->ep0_request = NULL;
1438 
1439 	return req->actual;
1440 }
1441 
1442 /**
1443  * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1444  * @udc: Reference to the device controller.
1445  * @ch_idx: IUDMA channel number.
1446  * @length: Number of bytes to TX/RX.
1447  *
1448  * Used for simple transfers performed by the ep0 worker.  This will always
1449  * use ep0_ctrl_req / ep0_ctrl_buf.
1450  */
1451 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1452 	int length)
1453 {
1454 	struct usb_request *req = &udc->ep0_ctrl_req.req;
1455 
1456 	req->buf = udc->ep0_ctrl_buf;
1457 	req->length = length;
1458 	req->complete = NULL;
1459 
1460 	bcm63xx_ep0_map_write(udc, ch_idx, req);
1461 }
1462 
1463 /**
1464  * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1465  * @udc: Reference to the device controller.
1466  *
1467  * EP0_IDLE probably shouldn't ever happen.  EP0_REQUEUE means we're ready
1468  * for the next packet.  Anything else means the transaction requires multiple
1469  * stages of handling.
1470  */
1471 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1472 {
1473 	int rc;
1474 	struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1475 
1476 	rc = bcm63xx_ep0_read_complete(udc);
1477 
1478 	if (rc < 0) {
1479 		dev_err(udc->dev, "missing SETUP packet\n");
1480 		return EP0_IDLE;
1481 	}
1482 
1483 	/*
1484 	 * Handle 0-byte IN STATUS acknowledgement.  The hardware doesn't
1485 	 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1486 	 * just throw it away.
1487 	 */
1488 	if (rc == 0)
1489 		return EP0_REQUEUE;
1490 
1491 	/* Drop malformed SETUP packets */
1492 	if (rc != sizeof(*ctrl)) {
1493 		dev_warn_ratelimited(udc->dev,
1494 			"malformed SETUP packet (%d bytes)\n", rc);
1495 		return EP0_REQUEUE;
1496 	}
1497 
1498 	/* Process new SETUP packet arriving on ep0 */
1499 	rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1500 	if (rc < 0) {
1501 		bcm63xx_set_stall(udc, &udc->bep[0], true);
1502 		return EP0_REQUEUE;
1503 	}
1504 
1505 	if (!ctrl->wLength)
1506 		return EP0_REQUEUE;
1507 	else if (ctrl->bRequestType & USB_DIR_IN)
1508 		return EP0_IN_DATA_PHASE_SETUP;
1509 	else
1510 		return EP0_OUT_DATA_PHASE_SETUP;
1511 }
1512 
1513 /**
1514  * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1515  * @udc: Reference to the device controller.
1516  *
1517  * In state EP0_IDLE, the RX descriptor is either pending, or has been
1518  * filled with a SETUP packet from the host.  This function handles new
1519  * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1520  * and reset/shutdown events.
1521  *
1522  * Returns 0 if work was done; -EAGAIN if nothing to do.
1523  */
1524 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1525 {
1526 	if (udc->ep0_req_reset) {
1527 		udc->ep0_req_reset = 0;
1528 	} else if (udc->ep0_req_set_cfg) {
1529 		udc->ep0_req_set_cfg = 0;
1530 		if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1531 			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1532 	} else if (udc->ep0_req_set_iface) {
1533 		udc->ep0_req_set_iface = 0;
1534 		if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1535 			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1536 	} else if (udc->ep0_req_completed) {
1537 		udc->ep0state = bcm63xx_ep0_do_setup(udc);
1538 		return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1539 	} else if (udc->ep0_req_shutdown) {
1540 		udc->ep0_req_shutdown = 0;
1541 		udc->ep0_req_completed = 0;
1542 		udc->ep0_request = NULL;
1543 		iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1544 		usb_gadget_unmap_request(&udc->gadget,
1545 			&udc->ep0_ctrl_req.req, 0);
1546 
1547 		/* bcm63xx_udc_pullup() is waiting for this */
1548 		mb();
1549 		udc->ep0state = EP0_SHUTDOWN;
1550 	} else if (udc->ep0_reply) {
1551 		/*
1552 		 * This could happen if a USB RESET shows up during an ep0
1553 		 * transaction (especially if a laggy driver like gadgetfs
1554 		 * is in use).
1555 		 */
1556 		dev_warn(udc->dev, "nuking unexpected reply\n");
1557 		bcm63xx_ep0_nuke_reply(udc, 0);
1558 	} else {
1559 		return -EAGAIN;
1560 	}
1561 
1562 	return 0;
1563 }
1564 
1565 /**
1566  * bcm63xx_ep0_one_round - Handle the current ep0 state.
1567  * @udc: Reference to the device controller.
1568  *
1569  * Returns 0 if work was done; -EAGAIN if nothing to do.
1570  */
1571 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1572 {
1573 	enum bcm63xx_ep0_state ep0state = udc->ep0state;
1574 	bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1575 
1576 	switch (udc->ep0state) {
1577 	case EP0_REQUEUE:
1578 		/* set up descriptor to receive SETUP packet */
1579 		bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1580 					     BCM63XX_MAX_CTRL_PKT);
1581 		ep0state = EP0_IDLE;
1582 		break;
1583 	case EP0_IDLE:
1584 		return bcm63xx_ep0_do_idle(udc);
1585 	case EP0_IN_DATA_PHASE_SETUP:
1586 		/*
1587 		 * Normal case: TX request is in ep0_reply (queued by the
1588 		 * callback), or will be queued shortly.  When it's here,
1589 		 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1590 		 *
1591 		 * Shutdown case: Stop waiting for the reply.  Just
1592 		 * REQUEUE->IDLE.  The gadget driver is NOT expected to
1593 		 * queue anything else now.
1594 		 */
1595 		if (udc->ep0_reply) {
1596 			bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1597 					      udc->ep0_reply);
1598 			ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1599 		} else if (shutdown) {
1600 			ep0state = EP0_REQUEUE;
1601 		}
1602 		break;
1603 	case EP0_IN_DATA_PHASE_COMPLETE: {
1604 		/*
1605 		 * Normal case: TX packet (ep0_reply) is in flight; wait for
1606 		 * it to finish, then go back to REQUEUE->IDLE.
1607 		 *
1608 		 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1609 		 * completion to the gadget driver, then REQUEUE->IDLE.
1610 		 */
1611 		if (udc->ep0_req_completed) {
1612 			udc->ep0_reply = NULL;
1613 			bcm63xx_ep0_read_complete(udc);
1614 			/*
1615 			 * the "ack" sometimes gets eaten (see
1616 			 * bcm63xx_ep0_do_idle)
1617 			 */
1618 			ep0state = EP0_REQUEUE;
1619 		} else if (shutdown) {
1620 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1621 			bcm63xx_ep0_nuke_reply(udc, 1);
1622 			ep0state = EP0_REQUEUE;
1623 		}
1624 		break;
1625 	}
1626 	case EP0_OUT_DATA_PHASE_SETUP:
1627 		/* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1628 		if (udc->ep0_reply) {
1629 			bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1630 					      udc->ep0_reply);
1631 			ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1632 		} else if (shutdown) {
1633 			ep0state = EP0_REQUEUE;
1634 		}
1635 		break;
1636 	case EP0_OUT_DATA_PHASE_COMPLETE: {
1637 		/* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1638 		if (udc->ep0_req_completed) {
1639 			udc->ep0_reply = NULL;
1640 			bcm63xx_ep0_read_complete(udc);
1641 
1642 			/* send 0-byte ack to host */
1643 			bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1644 			ep0state = EP0_OUT_STATUS_PHASE;
1645 		} else if (shutdown) {
1646 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1647 			bcm63xx_ep0_nuke_reply(udc, 0);
1648 			ep0state = EP0_REQUEUE;
1649 		}
1650 		break;
1651 	}
1652 	case EP0_OUT_STATUS_PHASE:
1653 		/*
1654 		 * Normal case: 0-byte OUT ack packet is in flight; wait
1655 		 * for it to finish, then go back to REQUEUE->IDLE.
1656 		 *
1657 		 * Shutdown case: just cancel the transmission.  Don't bother
1658 		 * calling the completion, because it originated from this
1659 		 * function anyway.  Then go back to REQUEUE->IDLE.
1660 		 */
1661 		if (udc->ep0_req_completed) {
1662 			bcm63xx_ep0_read_complete(udc);
1663 			ep0state = EP0_REQUEUE;
1664 		} else if (shutdown) {
1665 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1666 			udc->ep0_request = NULL;
1667 			ep0state = EP0_REQUEUE;
1668 		}
1669 		break;
1670 	case EP0_IN_FAKE_STATUS_PHASE: {
1671 		/*
1672 		 * Normal case: we spoofed a SETUP packet and are now
1673 		 * waiting for the gadget driver to send a 0-byte reply.
1674 		 * This doesn't actually get sent to the HW because the
1675 		 * HW has already sent its own reply.  Once we get the
1676 		 * response, return to IDLE.
1677 		 *
1678 		 * Shutdown case: return to IDLE immediately.
1679 		 *
1680 		 * Note that the ep0 RX descriptor has remained queued
1681 		 * (and possibly unfilled) during this entire transaction.
1682 		 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1683 		 * or SET_INTERFACE transactions.
1684 		 */
1685 		struct usb_request *r = udc->ep0_reply;
1686 
1687 		if (!r) {
1688 			if (shutdown)
1689 				ep0state = EP0_IDLE;
1690 			break;
1691 		}
1692 
1693 		bcm63xx_ep0_complete(udc, r, 0);
1694 		udc->ep0_reply = NULL;
1695 		ep0state = EP0_IDLE;
1696 		break;
1697 	}
1698 	case EP0_SHUTDOWN:
1699 		break;
1700 	}
1701 
1702 	if (udc->ep0state == ep0state)
1703 		return -EAGAIN;
1704 
1705 	udc->ep0state = ep0state;
1706 	return 0;
1707 }
1708 
1709 /**
1710  * bcm63xx_ep0_process - ep0 worker thread / state machine.
1711  * @w: Workqueue struct.
1712  *
1713  * bcm63xx_ep0_process is triggered any time an event occurs on ep0.  It
1714  * is used to synchronize ep0 events and ensure that both HW and SW events
1715  * occur in a well-defined order.  When the ep0 IUDMA queues are idle, it may
1716  * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1717  * by the USBD hardware.
1718  *
1719  * The worker function will continue iterating around the state machine
1720  * until there is nothing left to do.  Usually "nothing left to do" means
1721  * that we're waiting for a new event from the hardware.
1722  */
1723 static void bcm63xx_ep0_process(struct work_struct *w)
1724 {
1725 	struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1726 	spin_lock_irq(&udc->lock);
1727 	while (bcm63xx_ep0_one_round(udc) == 0)
1728 		;
1729 	spin_unlock_irq(&udc->lock);
1730 }
1731 
1732 /***********************************************************************
1733  * Standard UDC gadget operations
1734  ***********************************************************************/
1735 
1736 /**
1737  * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1738  * @gadget: USB slave device.
1739  */
1740 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1741 {
1742 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1743 
1744 	return (usbd_readl(udc, USBD_STATUS_REG) &
1745 		USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1746 }
1747 
1748 /**
1749  * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1750  * @gadget: USB slave device.
1751  * @is_on: 0 to disable pullup, 1 to enable.
1752  *
1753  * See notes in bcm63xx_select_pullup().
1754  */
1755 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1756 {
1757 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1758 	unsigned long flags;
1759 	int i, rc = -EINVAL;
1760 
1761 	spin_lock_irqsave(&udc->lock, flags);
1762 	if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1763 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1764 		udc->ep0state = EP0_REQUEUE;
1765 		bcm63xx_fifo_setup(udc);
1766 		bcm63xx_fifo_reset(udc);
1767 		bcm63xx_ep_setup(udc);
1768 
1769 		bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1770 		for (i = 0; i < BCM63XX_NUM_EP; i++)
1771 			bcm63xx_set_stall(udc, &udc->bep[i], false);
1772 
1773 		bcm63xx_set_ctrl_irqs(udc, true);
1774 		bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1775 		rc = 0;
1776 	} else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1777 		bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1778 
1779 		udc->ep0_req_shutdown = 1;
1780 		spin_unlock_irqrestore(&udc->lock, flags);
1781 
1782 		while (1) {
1783 			schedule_work(&udc->ep0_wq);
1784 			if (udc->ep0state == EP0_SHUTDOWN)
1785 				break;
1786 			msleep(50);
1787 		}
1788 		bcm63xx_set_ctrl_irqs(udc, false);
1789 		cancel_work_sync(&udc->ep0_wq);
1790 		return 0;
1791 	}
1792 
1793 	spin_unlock_irqrestore(&udc->lock, flags);
1794 	return rc;
1795 }
1796 
1797 /**
1798  * bcm63xx_udc_start - Start the controller.
1799  * @gadget: USB slave device.
1800  * @driver: Driver for USB slave devices.
1801  */
1802 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1803 		struct usb_gadget_driver *driver)
1804 {
1805 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1806 	unsigned long flags;
1807 
1808 	if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1809 	    !driver->setup)
1810 		return -EINVAL;
1811 	if (!udc)
1812 		return -ENODEV;
1813 	if (udc->driver)
1814 		return -EBUSY;
1815 
1816 	spin_lock_irqsave(&udc->lock, flags);
1817 
1818 	set_clocks(udc, true);
1819 	bcm63xx_fifo_setup(udc);
1820 	bcm63xx_ep_init(udc);
1821 	bcm63xx_ep_setup(udc);
1822 	bcm63xx_fifo_reset(udc);
1823 	bcm63xx_select_phy_mode(udc, true);
1824 
1825 	udc->driver = driver;
1826 	driver->driver.bus = NULL;
1827 	udc->gadget.dev.of_node = udc->dev->of_node;
1828 
1829 	spin_unlock_irqrestore(&udc->lock, flags);
1830 
1831 	return 0;
1832 }
1833 
1834 /**
1835  * bcm63xx_udc_stop - Shut down the controller.
1836  * @gadget: USB slave device.
1837  * @driver: Driver for USB slave devices.
1838  */
1839 static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1840 {
1841 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1842 	unsigned long flags;
1843 
1844 	spin_lock_irqsave(&udc->lock, flags);
1845 
1846 	udc->driver = NULL;
1847 
1848 	/*
1849 	 * If we switch the PHY too abruptly after dropping D+, the host
1850 	 * will often complain:
1851 	 *
1852 	 *     hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1853 	 */
1854 	msleep(100);
1855 
1856 	bcm63xx_select_phy_mode(udc, false);
1857 	set_clocks(udc, false);
1858 
1859 	spin_unlock_irqrestore(&udc->lock, flags);
1860 
1861 	return 0;
1862 }
1863 
1864 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1865 	.get_frame	= bcm63xx_udc_get_frame,
1866 	.pullup		= bcm63xx_udc_pullup,
1867 	.udc_start	= bcm63xx_udc_start,
1868 	.udc_stop	= bcm63xx_udc_stop,
1869 };
1870 
1871 /***********************************************************************
1872  * IRQ handling
1873  ***********************************************************************/
1874 
1875 /**
1876  * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1877  * @udc: Reference to the device controller.
1878  *
1879  * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1880  * The driver never sees the raw control packets coming in on the ep0
1881  * IUDMA channel, but at least we get an interrupt event to tell us that
1882  * new values are waiting in the USBD_STATUS register.
1883  */
1884 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1885 {
1886 	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1887 
1888 	udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1889 	udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1890 	udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1891 			 USBD_STATUS_ALTINTF_SHIFT;
1892 	bcm63xx_ep_setup(udc);
1893 }
1894 
1895 /**
1896  * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1897  * @udc: Reference to the device controller.
1898  *
1899  * The link speed update coincides with a SETUP IRQ.  Returns 1 if the
1900  * speed has changed, so that the caller can update the endpoint settings.
1901  */
1902 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1903 {
1904 	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1905 	enum usb_device_speed oldspeed = udc->gadget.speed;
1906 
1907 	switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1908 	case BCM63XX_SPD_HIGH:
1909 		udc->gadget.speed = USB_SPEED_HIGH;
1910 		break;
1911 	case BCM63XX_SPD_FULL:
1912 		udc->gadget.speed = USB_SPEED_FULL;
1913 		break;
1914 	default:
1915 		/* this should never happen */
1916 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1917 		dev_err(udc->dev,
1918 			"received SETUP packet with invalid link speed\n");
1919 		return 0;
1920 	}
1921 
1922 	if (udc->gadget.speed != oldspeed) {
1923 		dev_info(udc->dev, "link up, %s-speed mode\n",
1924 			 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1925 		return 1;
1926 	} else {
1927 		return 0;
1928 	}
1929 }
1930 
1931 /**
1932  * bcm63xx_update_wedge - Iterate through wedged endpoints.
1933  * @udc: Reference to the device controller.
1934  * @new_status: true to "refresh" wedge status; false to clear it.
1935  *
1936  * On a SETUP interrupt, we need to manually "refresh" the wedge status
1937  * because the controller hardware is designed to automatically clear
1938  * stalls in response to a CLEAR_FEATURE request from the host.
1939  *
1940  * On a RESET interrupt, we do want to restore all wedged endpoints.
1941  */
1942 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1943 {
1944 	int i;
1945 
1946 	for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1947 		bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1948 		if (!new_status)
1949 			clear_bit(i, &udc->wedgemap);
1950 	}
1951 }
1952 
1953 /**
1954  * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1955  * @irq: IRQ number (unused).
1956  * @dev_id: Reference to the device controller.
1957  *
1958  * This is where we handle link (VBUS) down, USB reset, speed changes,
1959  * SET_CONFIGURATION, and SET_INTERFACE events.
1960  */
1961 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1962 {
1963 	struct bcm63xx_udc *udc = dev_id;
1964 	u32 stat;
1965 	bool disconnected = false, bus_reset = false;
1966 
1967 	stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1968 	       usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1969 
1970 	usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1971 
1972 	spin_lock(&udc->lock);
1973 	if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1974 		/* VBUS toggled */
1975 
1976 		if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1977 		      USBD_EVENTS_USB_LINK_MASK) &&
1978 		      udc->gadget.speed != USB_SPEED_UNKNOWN)
1979 			dev_info(udc->dev, "link down\n");
1980 
1981 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1982 		disconnected = true;
1983 	}
1984 	if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1985 		bcm63xx_fifo_setup(udc);
1986 		bcm63xx_fifo_reset(udc);
1987 		bcm63xx_ep_setup(udc);
1988 
1989 		bcm63xx_update_wedge(udc, false);
1990 
1991 		udc->ep0_req_reset = 1;
1992 		schedule_work(&udc->ep0_wq);
1993 		bus_reset = true;
1994 	}
1995 	if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
1996 		if (bcm63xx_update_link_speed(udc)) {
1997 			bcm63xx_fifo_setup(udc);
1998 			bcm63xx_ep_setup(udc);
1999 		}
2000 		bcm63xx_update_wedge(udc, true);
2001 	}
2002 	if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2003 		bcm63xx_update_cfg_iface(udc);
2004 		udc->ep0_req_set_cfg = 1;
2005 		schedule_work(&udc->ep0_wq);
2006 	}
2007 	if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2008 		bcm63xx_update_cfg_iface(udc);
2009 		udc->ep0_req_set_iface = 1;
2010 		schedule_work(&udc->ep0_wq);
2011 	}
2012 	spin_unlock(&udc->lock);
2013 
2014 	if (disconnected && udc->driver)
2015 		udc->driver->disconnect(&udc->gadget);
2016 	else if (bus_reset && udc->driver)
2017 		usb_gadget_udc_reset(&udc->gadget, udc->driver);
2018 
2019 	return IRQ_HANDLED;
2020 }
2021 
2022 /**
2023  * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2024  * @irq: IRQ number (unused).
2025  * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2026  *
2027  * For the two ep0 channels, we have special handling that triggers the
2028  * ep0 worker thread.  For normal bulk/intr channels, either queue up
2029  * the next buffer descriptor for the transaction (incomplete transaction),
2030  * or invoke the completion callback (complete transactions).
2031  */
2032 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2033 {
2034 	struct iudma_ch *iudma = dev_id;
2035 	struct bcm63xx_udc *udc = iudma->udc;
2036 	struct bcm63xx_ep *bep;
2037 	struct usb_request *req = NULL;
2038 	struct bcm63xx_req *breq = NULL;
2039 	int rc;
2040 	bool is_done = false;
2041 
2042 	spin_lock(&udc->lock);
2043 
2044 	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2045 			ENETDMAC_IR_REG, iudma->ch_idx);
2046 	bep = iudma->bep;
2047 	rc = iudma_read(udc, iudma);
2048 
2049 	/* special handling for EP0 RX (0) and TX (1) */
2050 	if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2051 	    iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2052 		req = udc->ep0_request;
2053 		breq = our_req(req);
2054 
2055 		/* a single request could require multiple submissions */
2056 		if (rc >= 0) {
2057 			req->actual += rc;
2058 
2059 			if (req->actual >= req->length || breq->bd_bytes > rc) {
2060 				udc->ep0_req_completed = 1;
2061 				is_done = true;
2062 				schedule_work(&udc->ep0_wq);
2063 
2064 				/* "actual" on a ZLP is 1 byte */
2065 				req->actual = min(req->actual, req->length);
2066 			} else {
2067 				/* queue up the next BD (same request) */
2068 				iudma_write(udc, iudma, breq);
2069 			}
2070 		}
2071 	} else if (!list_empty(&bep->queue)) {
2072 		breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2073 		req = &breq->req;
2074 
2075 		if (rc >= 0) {
2076 			req->actual += rc;
2077 
2078 			if (req->actual >= req->length || breq->bd_bytes > rc) {
2079 				is_done = true;
2080 				list_del(&breq->queue);
2081 
2082 				req->actual = min(req->actual, req->length);
2083 
2084 				if (!list_empty(&bep->queue)) {
2085 					struct bcm63xx_req *next;
2086 
2087 					next = list_first_entry(&bep->queue,
2088 						struct bcm63xx_req, queue);
2089 					iudma_write(udc, iudma, next);
2090 				}
2091 			} else {
2092 				iudma_write(udc, iudma, breq);
2093 			}
2094 		}
2095 	}
2096 	spin_unlock(&udc->lock);
2097 
2098 	if (is_done) {
2099 		usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2100 		if (req->complete)
2101 			req->complete(&bep->ep, req);
2102 	}
2103 
2104 	return IRQ_HANDLED;
2105 }
2106 
2107 /***********************************************************************
2108  * Debug filesystem
2109  ***********************************************************************/
2110 
2111 /*
2112  * bcm63xx_usbd_dbg_show - Show USBD controller state.
2113  * @s: seq_file to which the information will be written.
2114  * @p: Unused.
2115  *
2116  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2117  */
2118 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2119 {
2120 	struct bcm63xx_udc *udc = s->private;
2121 
2122 	if (!udc->driver)
2123 		return -ENODEV;
2124 
2125 	seq_printf(s, "ep0 state: %s\n",
2126 		   bcm63xx_ep0_state_names[udc->ep0state]);
2127 	seq_printf(s, "  pending requests: %s%s%s%s%s%s%s\n",
2128 		   udc->ep0_req_reset ? "reset " : "",
2129 		   udc->ep0_req_set_cfg ? "set_cfg " : "",
2130 		   udc->ep0_req_set_iface ? "set_iface " : "",
2131 		   udc->ep0_req_shutdown ? "shutdown " : "",
2132 		   udc->ep0_request ? "pending " : "",
2133 		   udc->ep0_req_completed ? "completed " : "",
2134 		   udc->ep0_reply ? "reply " : "");
2135 	seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2136 		   udc->cfg, udc->iface, udc->alt_iface);
2137 	seq_printf(s, "regs:\n");
2138 	seq_printf(s, "  control: %08x; straps: %08x; status: %08x\n",
2139 		   usbd_readl(udc, USBD_CONTROL_REG),
2140 		   usbd_readl(udc, USBD_STRAPS_REG),
2141 		   usbd_readl(udc, USBD_STATUS_REG));
2142 	seq_printf(s, "  events:  %08x; stall:  %08x\n",
2143 		   usbd_readl(udc, USBD_EVENTS_REG),
2144 		   usbd_readl(udc, USBD_STALL_REG));
2145 
2146 	return 0;
2147 }
2148 
2149 /*
2150  * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2151  * @s: seq_file to which the information will be written.
2152  * @p: Unused.
2153  *
2154  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2155  */
2156 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2157 {
2158 	struct bcm63xx_udc *udc = s->private;
2159 	int ch_idx, i;
2160 	u32 sram2, sram3;
2161 
2162 	if (!udc->driver)
2163 		return -ENODEV;
2164 
2165 	for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2166 		struct iudma_ch *iudma = &udc->iudma[ch_idx];
2167 		struct list_head *pos;
2168 
2169 		seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2170 		switch (iudma_defaults[ch_idx].ep_type) {
2171 		case BCMEP_CTRL:
2172 			seq_printf(s, "control");
2173 			break;
2174 		case BCMEP_BULK:
2175 			seq_printf(s, "bulk");
2176 			break;
2177 		case BCMEP_INTR:
2178 			seq_printf(s, "interrupt");
2179 			break;
2180 		}
2181 		seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2182 		seq_printf(s, " [ep%d]:\n",
2183 			   max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2184 		seq_printf(s, "  cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2185 			   usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2186 			   usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2187 			   usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2188 			   usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2189 
2190 		sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2191 		sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2192 		seq_printf(s, "  base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2193 			   usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2194 			   sram2 >> 16, sram2 & 0xffff,
2195 			   sram3 >> 16, sram3 & 0xffff,
2196 			   usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2197 		seq_printf(s, "  desc: %d/%d used", iudma->n_bds_used,
2198 			   iudma->n_bds);
2199 
2200 		if (iudma->bep) {
2201 			i = 0;
2202 			list_for_each(pos, &iudma->bep->queue)
2203 				i++;
2204 			seq_printf(s, "; %d queued\n", i);
2205 		} else {
2206 			seq_printf(s, "\n");
2207 		}
2208 
2209 		for (i = 0; i < iudma->n_bds; i++) {
2210 			struct bcm_enet_desc *d = &iudma->bd_ring[i];
2211 
2212 			seq_printf(s, "  %03x (%02x): len_stat: %04x_%04x; pa %08x",
2213 				   i * sizeof(*d), i,
2214 				   d->len_stat >> 16, d->len_stat & 0xffff,
2215 				   d->address);
2216 			if (d == iudma->read_bd)
2217 				seq_printf(s, "   <<RD");
2218 			if (d == iudma->write_bd)
2219 				seq_printf(s, "   <<WR");
2220 			seq_printf(s, "\n");
2221 		}
2222 
2223 		seq_printf(s, "\n");
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2230 {
2231 	return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2232 }
2233 
2234 static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2235 {
2236 	return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2237 }
2238 
2239 static const struct file_operations usbd_dbg_fops = {
2240 	.owner		= THIS_MODULE,
2241 	.open		= bcm63xx_usbd_dbg_open,
2242 	.llseek		= seq_lseek,
2243 	.read		= seq_read,
2244 	.release	= single_release,
2245 };
2246 
2247 static const struct file_operations iudma_dbg_fops = {
2248 	.owner		= THIS_MODULE,
2249 	.open		= bcm63xx_iudma_dbg_open,
2250 	.llseek		= seq_lseek,
2251 	.read		= seq_read,
2252 	.release	= single_release,
2253 };
2254 
2255 
2256 /**
2257  * bcm63xx_udc_init_debugfs - Create debugfs entries.
2258  * @udc: Reference to the device controller.
2259  */
2260 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2261 {
2262 	struct dentry *root, *usbd, *iudma;
2263 
2264 	if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2265 		return;
2266 
2267 	root = debugfs_create_dir(udc->gadget.name, NULL);
2268 	if (IS_ERR(root) || !root)
2269 		goto err_root;
2270 
2271 	usbd = debugfs_create_file("usbd", 0400, root, udc,
2272 			&usbd_dbg_fops);
2273 	if (!usbd)
2274 		goto err_usbd;
2275 	iudma = debugfs_create_file("iudma", 0400, root, udc,
2276 			&iudma_dbg_fops);
2277 	if (!iudma)
2278 		goto err_iudma;
2279 
2280 	udc->debugfs_root = root;
2281 	udc->debugfs_usbd = usbd;
2282 	udc->debugfs_iudma = iudma;
2283 	return;
2284 err_iudma:
2285 	debugfs_remove(usbd);
2286 err_usbd:
2287 	debugfs_remove(root);
2288 err_root:
2289 	dev_err(udc->dev, "debugfs is not available\n");
2290 }
2291 
2292 /**
2293  * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2294  * @udc: Reference to the device controller.
2295  *
2296  * debugfs_remove() is safe to call with a NULL argument.
2297  */
2298 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2299 {
2300 	debugfs_remove(udc->debugfs_iudma);
2301 	debugfs_remove(udc->debugfs_usbd);
2302 	debugfs_remove(udc->debugfs_root);
2303 	udc->debugfs_iudma = NULL;
2304 	udc->debugfs_usbd = NULL;
2305 	udc->debugfs_root = NULL;
2306 }
2307 
2308 /***********************************************************************
2309  * Driver init/exit
2310  ***********************************************************************/
2311 
2312 /**
2313  * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2314  * @pdev: Platform device struct from the bcm63xx BSP code.
2315  *
2316  * Note that platform data is required, because pd.port_no varies from chip
2317  * to chip and is used to switch the correct USB port to device mode.
2318  */
2319 static int bcm63xx_udc_probe(struct platform_device *pdev)
2320 {
2321 	struct device *dev = &pdev->dev;
2322 	struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2323 	struct bcm63xx_udc *udc;
2324 	struct resource *res;
2325 	int rc = -ENOMEM, i, irq;
2326 
2327 	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2328 	if (!udc)
2329 		return -ENOMEM;
2330 
2331 	platform_set_drvdata(pdev, udc);
2332 	udc->dev = dev;
2333 	udc->pd = pd;
2334 
2335 	if (!pd) {
2336 		dev_err(dev, "missing platform data\n");
2337 		return -EINVAL;
2338 	}
2339 
2340 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2341 	udc->usbd_regs = devm_ioremap_resource(dev, res);
2342 	if (IS_ERR(udc->usbd_regs))
2343 		return PTR_ERR(udc->usbd_regs);
2344 
2345 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2346 	udc->iudma_regs = devm_ioremap_resource(dev, res);
2347 	if (IS_ERR(udc->iudma_regs))
2348 		return PTR_ERR(udc->iudma_regs);
2349 
2350 	spin_lock_init(&udc->lock);
2351 	INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2352 
2353 	udc->gadget.ops = &bcm63xx_udc_ops;
2354 	udc->gadget.name = dev_name(dev);
2355 
2356 	if (!pd->use_fullspeed && !use_fullspeed)
2357 		udc->gadget.max_speed = USB_SPEED_HIGH;
2358 	else
2359 		udc->gadget.max_speed = USB_SPEED_FULL;
2360 
2361 	/* request clocks, allocate buffers, and clear any pending IRQs */
2362 	rc = bcm63xx_init_udc_hw(udc);
2363 	if (rc)
2364 		return rc;
2365 
2366 	rc = -ENXIO;
2367 
2368 	/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2369 	irq = platform_get_irq(pdev, 0);
2370 	if (irq < 0) {
2371 		dev_err(dev, "missing IRQ resource #0\n");
2372 		goto out_uninit;
2373 	}
2374 	if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2375 			     dev_name(dev), udc) < 0) {
2376 		dev_err(dev, "error requesting IRQ #%d\n", irq);
2377 		goto out_uninit;
2378 	}
2379 
2380 	/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2381 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2382 		irq = platform_get_irq(pdev, i + 1);
2383 		if (irq < 0) {
2384 			dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2385 			goto out_uninit;
2386 		}
2387 		if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2388 				     dev_name(dev), &udc->iudma[i]) < 0) {
2389 			dev_err(dev, "error requesting IRQ #%d\n", irq);
2390 			goto out_uninit;
2391 		}
2392 	}
2393 
2394 	bcm63xx_udc_init_debugfs(udc);
2395 	rc = usb_add_gadget_udc(dev, &udc->gadget);
2396 	if (!rc)
2397 		return 0;
2398 
2399 	bcm63xx_udc_cleanup_debugfs(udc);
2400 out_uninit:
2401 	bcm63xx_uninit_udc_hw(udc);
2402 	return rc;
2403 }
2404 
2405 /**
2406  * bcm63xx_udc_remove - Remove the device from the system.
2407  * @pdev: Platform device struct from the bcm63xx BSP code.
2408  */
2409 static int bcm63xx_udc_remove(struct platform_device *pdev)
2410 {
2411 	struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2412 
2413 	bcm63xx_udc_cleanup_debugfs(udc);
2414 	usb_del_gadget_udc(&udc->gadget);
2415 	BUG_ON(udc->driver);
2416 
2417 	bcm63xx_uninit_udc_hw(udc);
2418 
2419 	return 0;
2420 }
2421 
2422 static struct platform_driver bcm63xx_udc_driver = {
2423 	.probe		= bcm63xx_udc_probe,
2424 	.remove		= bcm63xx_udc_remove,
2425 	.driver		= {
2426 		.name	= DRV_MODULE_NAME,
2427 	},
2428 };
2429 module_platform_driver(bcm63xx_udc_driver);
2430 
2431 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2432 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2433 MODULE_LICENSE("GPL");
2434 MODULE_ALIAS("platform:" DRV_MODULE_NAME);
2435