xref: /linux/drivers/usb/gadget/udc/bcm63xx_udc.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
3  *
4  * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5  * Copyright (C) 2012 Broadcom Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 #include <linux/bitops.h>
14 #include <linux/bug.h>
15 #include <linux/clk.h>
16 #include <linux/compiler.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kconfig.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/platform_device.h>
30 #include <linux/sched.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
33 #include <linux/timer.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 #include <linux/workqueue.h>
37 
38 #include <bcm63xx_cpu.h>
39 #include <bcm63xx_iudma.h>
40 #include <bcm63xx_dev_usb_usbd.h>
41 #include <bcm63xx_io.h>
42 #include <bcm63xx_regs.h>
43 
44 #define DRV_MODULE_NAME		"bcm63xx_udc"
45 
46 static const char bcm63xx_ep0name[] = "ep0";
47 
48 static const struct {
49 	const char *name;
50 	const struct usb_ep_caps caps;
51 } bcm63xx_ep_info[] = {
52 #define EP_INFO(_name, _caps) \
53 	{ \
54 		.name = _name, \
55 		.caps = _caps, \
56 	}
57 
58 	EP_INFO(bcm63xx_ep0name,
59 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
60 	EP_INFO("ep1in-bulk",
61 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
62 	EP_INFO("ep2out-bulk",
63 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
64 	EP_INFO("ep3in-int",
65 		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
66 	EP_INFO("ep4out-int",
67 		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
68 
69 #undef EP_INFO
70 };
71 
72 static bool use_fullspeed;
73 module_param(use_fullspeed, bool, S_IRUGO);
74 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
75 
76 /*
77  * RX IRQ coalescing options:
78  *
79  * false (default) - one IRQ per DATAx packet.  Slow but reliable.  The
80  * driver is able to pass the "testusb" suite and recover from conditions like:
81  *
82  *   1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
83  *   2) Host sends 512 bytes of data
84  *   3) Host decides to reconfigure the device and sends SET_INTERFACE
85  *   4) Device shuts down the endpoint and cancels the RX transaction
86  *
87  * true - one IRQ per transfer, for transfers <= 2048B.  Generates
88  * considerably fewer IRQs, but error recovery is less robust.  Does not
89  * reliably pass "testusb".
90  *
91  * TX always uses coalescing, because we can cancel partially complete TX
92  * transfers by repeatedly flushing the FIFO.  The hardware doesn't allow
93  * this on RX.
94  */
95 static bool irq_coalesce;
96 module_param(irq_coalesce, bool, S_IRUGO);
97 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
98 
99 #define BCM63XX_NUM_EP			5
100 #define BCM63XX_NUM_IUDMA		6
101 #define BCM63XX_NUM_FIFO_PAIRS		3
102 
103 #define IUDMA_RESET_TIMEOUT_US		10000
104 
105 #define IUDMA_EP0_RXCHAN		0
106 #define IUDMA_EP0_TXCHAN		1
107 
108 #define IUDMA_MAX_FRAGMENT		2048
109 #define BCM63XX_MAX_CTRL_PKT		64
110 
111 #define BCMEP_CTRL			0x00
112 #define BCMEP_ISOC			0x01
113 #define BCMEP_BULK			0x02
114 #define BCMEP_INTR			0x03
115 
116 #define BCMEP_OUT			0x00
117 #define BCMEP_IN			0x01
118 
119 #define BCM63XX_SPD_FULL		1
120 #define BCM63XX_SPD_HIGH		0
121 
122 #define IUDMA_DMAC_OFFSET		0x200
123 #define IUDMA_DMAS_OFFSET		0x400
124 
125 enum bcm63xx_ep0_state {
126 	EP0_REQUEUE,
127 	EP0_IDLE,
128 	EP0_IN_DATA_PHASE_SETUP,
129 	EP0_IN_DATA_PHASE_COMPLETE,
130 	EP0_OUT_DATA_PHASE_SETUP,
131 	EP0_OUT_DATA_PHASE_COMPLETE,
132 	EP0_OUT_STATUS_PHASE,
133 	EP0_IN_FAKE_STATUS_PHASE,
134 	EP0_SHUTDOWN,
135 };
136 
137 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
138 	"REQUEUE",
139 	"IDLE",
140 	"IN_DATA_PHASE_SETUP",
141 	"IN_DATA_PHASE_COMPLETE",
142 	"OUT_DATA_PHASE_SETUP",
143 	"OUT_DATA_PHASE_COMPLETE",
144 	"OUT_STATUS_PHASE",
145 	"IN_FAKE_STATUS_PHASE",
146 	"SHUTDOWN",
147 };
148 
149 /**
150  * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
151  * @ep_num: USB endpoint number.
152  * @n_bds: Number of buffer descriptors in the ring.
153  * @ep_type: Endpoint type (control, bulk, interrupt).
154  * @dir: Direction (in, out).
155  * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
156  * @max_pkt_hs: Maximum packet size in high speed mode.
157  * @max_pkt_fs: Maximum packet size in full speed mode.
158  */
159 struct iudma_ch_cfg {
160 	int				ep_num;
161 	int				n_bds;
162 	int				ep_type;
163 	int				dir;
164 	int				n_fifo_slots;
165 	int				max_pkt_hs;
166 	int				max_pkt_fs;
167 };
168 
169 static const struct iudma_ch_cfg iudma_defaults[] = {
170 
171 	/* This controller was designed to support a CDC/RNDIS application.
172 	   It may be possible to reconfigure some of the endpoints, but
173 	   the hardware limitations (FIFO sizing and number of DMA channels)
174 	   may significantly impact flexibility and/or stability.  Change
175 	   these values at your own risk.
176 
177 	      ep_num       ep_type           n_fifo_slots    max_pkt_fs
178 	idx      |  n_bds     |         dir       |  max_pkt_hs  |
179 	 |       |    |       |          |        |      |       |       */
180 	[0] = { -1,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
181 	[1] = {  0,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
182 	[2] = {  2,  16, BCMEP_BULK, BCMEP_OUT, 128,   512,     64 },
183 	[3] = {  1,  16, BCMEP_BULK, BCMEP_IN,  128,   512,     64 },
184 	[4] = {  4,   4, BCMEP_INTR, BCMEP_OUT,  32,    64,     64 },
185 	[5] = {  3,   4, BCMEP_INTR, BCMEP_IN,   32,    64,     64 },
186 };
187 
188 struct bcm63xx_udc;
189 
190 /**
191  * struct iudma_ch - Represents the current state of a single IUDMA channel.
192  * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
193  * @ep_num: USB endpoint number.  -1 for ep0 RX.
194  * @enabled: Whether bcm63xx_ep_enable() has been called.
195  * @max_pkt: "Chunk size" on the USB interface.  Based on interface speed.
196  * @is_tx: true for TX, false for RX.
197  * @bep: Pointer to the associated endpoint.  NULL for ep0 RX.
198  * @udc: Reference to the device controller.
199  * @read_bd: Next buffer descriptor to reap from the hardware.
200  * @write_bd: Next BD available for a new packet.
201  * @end_bd: Points to the final BD in the ring.
202  * @n_bds_used: Number of BD entries currently occupied.
203  * @bd_ring: Base pointer to the BD ring.
204  * @bd_ring_dma: Physical (DMA) address of bd_ring.
205  * @n_bds: Total number of BDs in the ring.
206  *
207  * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
208  * bidirectional.  The "struct usb_ep" associated with ep0 is for TX (IN)
209  * only.
210  *
211  * Each bulk/intr endpoint has a single IUDMA channel and a single
212  * struct usb_ep.
213  */
214 struct iudma_ch {
215 	unsigned int			ch_idx;
216 	int				ep_num;
217 	bool				enabled;
218 	int				max_pkt;
219 	bool				is_tx;
220 	struct bcm63xx_ep		*bep;
221 	struct bcm63xx_udc		*udc;
222 
223 	struct bcm_enet_desc		*read_bd;
224 	struct bcm_enet_desc		*write_bd;
225 	struct bcm_enet_desc		*end_bd;
226 	int				n_bds_used;
227 
228 	struct bcm_enet_desc		*bd_ring;
229 	dma_addr_t			bd_ring_dma;
230 	unsigned int			n_bds;
231 };
232 
233 /**
234  * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
235  * @ep_num: USB endpoint number.
236  * @iudma: Pointer to IUDMA channel state.
237  * @ep: USB gadget layer representation of the EP.
238  * @udc: Reference to the device controller.
239  * @queue: Linked list of outstanding requests for this EP.
240  * @halted: 1 if the EP is stalled; 0 otherwise.
241  */
242 struct bcm63xx_ep {
243 	unsigned int			ep_num;
244 	struct iudma_ch			*iudma;
245 	struct usb_ep			ep;
246 	struct bcm63xx_udc		*udc;
247 	struct list_head		queue;
248 	unsigned			halted:1;
249 };
250 
251 /**
252  * struct bcm63xx_req - Internal (driver) state of a single request.
253  * @queue: Links back to the EP's request list.
254  * @req: USB gadget layer representation of the request.
255  * @offset: Current byte offset into the data buffer (next byte to queue).
256  * @bd_bytes: Number of data bytes in outstanding BD entries.
257  * @iudma: IUDMA channel used for the request.
258  */
259 struct bcm63xx_req {
260 	struct list_head		queue;		/* ep's requests */
261 	struct usb_request		req;
262 	unsigned int			offset;
263 	unsigned int			bd_bytes;
264 	struct iudma_ch			*iudma;
265 };
266 
267 /**
268  * struct bcm63xx_udc - Driver/hardware private context.
269  * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
270  * @dev: Generic Linux device structure.
271  * @pd: Platform data (board/port info).
272  * @usbd_clk: Clock descriptor for the USB device block.
273  * @usbh_clk: Clock descriptor for the USB host block.
274  * @gadget: USB slave device.
275  * @driver: Driver for USB slave devices.
276  * @usbd_regs: Base address of the USBD/USB20D block.
277  * @iudma_regs: Base address of the USBD's associated IUDMA block.
278  * @bep: Array of endpoints, including ep0.
279  * @iudma: Array of all IUDMA channels used by this controller.
280  * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
281  * @iface: USB interface number, from SET_INTERFACE wIndex.
282  * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
283  * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
284  * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
285  * @ep0state: Current state of the ep0 state machine.
286  * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
287  * @wedgemap: Bitmap of wedged endpoints.
288  * @ep0_req_reset: USB reset is pending.
289  * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
290  * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
291  * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
292  * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
293  * @ep0_reply: Pending reply from gadget driver.
294  * @ep0_request: Outstanding ep0 request.
295  * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
296  * @debugfs_usbd: debugfs file "usbd" for controller state.
297  * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
298  */
299 struct bcm63xx_udc {
300 	spinlock_t			lock;
301 
302 	struct device			*dev;
303 	struct bcm63xx_usbd_platform_data *pd;
304 	struct clk			*usbd_clk;
305 	struct clk			*usbh_clk;
306 
307 	struct usb_gadget		gadget;
308 	struct usb_gadget_driver	*driver;
309 
310 	void __iomem			*usbd_regs;
311 	void __iomem			*iudma_regs;
312 
313 	struct bcm63xx_ep		bep[BCM63XX_NUM_EP];
314 	struct iudma_ch			iudma[BCM63XX_NUM_IUDMA];
315 
316 	int				cfg;
317 	int				iface;
318 	int				alt_iface;
319 
320 	struct bcm63xx_req		ep0_ctrl_req;
321 	u8				*ep0_ctrl_buf;
322 
323 	int				ep0state;
324 	struct work_struct		ep0_wq;
325 
326 	unsigned long			wedgemap;
327 
328 	unsigned			ep0_req_reset:1;
329 	unsigned			ep0_req_set_cfg:1;
330 	unsigned			ep0_req_set_iface:1;
331 	unsigned			ep0_req_shutdown:1;
332 
333 	unsigned			ep0_req_completed:1;
334 	struct usb_request		*ep0_reply;
335 	struct usb_request		*ep0_request;
336 
337 	struct dentry			*debugfs_root;
338 	struct dentry			*debugfs_usbd;
339 	struct dentry			*debugfs_iudma;
340 };
341 
342 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
343 
344 /***********************************************************************
345  * Convenience functions
346  ***********************************************************************/
347 
348 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
349 {
350 	return container_of(g, struct bcm63xx_udc, gadget);
351 }
352 
353 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
354 {
355 	return container_of(ep, struct bcm63xx_ep, ep);
356 }
357 
358 static inline struct bcm63xx_req *our_req(struct usb_request *req)
359 {
360 	return container_of(req, struct bcm63xx_req, req);
361 }
362 
363 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
364 {
365 	return bcm_readl(udc->usbd_regs + off);
366 }
367 
368 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
369 {
370 	bcm_writel(val, udc->usbd_regs + off);
371 }
372 
373 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
374 {
375 	return bcm_readl(udc->iudma_regs + off);
376 }
377 
378 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
379 {
380 	bcm_writel(val, udc->iudma_regs + off);
381 }
382 
383 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
384 {
385 	return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
386 			(ENETDMA_CHAN_WIDTH * chan));
387 }
388 
389 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
390 					int chan)
391 {
392 	bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
393 			(ENETDMA_CHAN_WIDTH * chan));
394 }
395 
396 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
397 {
398 	return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
399 			(ENETDMA_CHAN_WIDTH * chan));
400 }
401 
402 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
403 					int chan)
404 {
405 	bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
406 			(ENETDMA_CHAN_WIDTH * chan));
407 }
408 
409 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
410 {
411 	if (is_enabled) {
412 		clk_enable(udc->usbh_clk);
413 		clk_enable(udc->usbd_clk);
414 		udelay(10);
415 	} else {
416 		clk_disable(udc->usbd_clk);
417 		clk_disable(udc->usbh_clk);
418 	}
419 }
420 
421 /***********************************************************************
422  * Low-level IUDMA / FIFO operations
423  ***********************************************************************/
424 
425 /**
426  * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
427  * @udc: Reference to the device controller.
428  * @idx: Desired init_sel value.
429  *
430  * The "init_sel" signal is used as a selection index for both endpoints
431  * and IUDMA channels.  Since these do not map 1:1, the use of this signal
432  * depends on the context.
433  */
434 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
435 {
436 	u32 val = usbd_readl(udc, USBD_CONTROL_REG);
437 
438 	val &= ~USBD_CONTROL_INIT_SEL_MASK;
439 	val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
440 	usbd_writel(udc, val, USBD_CONTROL_REG);
441 }
442 
443 /**
444  * bcm63xx_set_stall - Enable/disable stall on one endpoint.
445  * @udc: Reference to the device controller.
446  * @bep: Endpoint on which to operate.
447  * @is_stalled: true to enable stall, false to disable.
448  *
449  * See notes in bcm63xx_update_wedge() regarding automatic clearing of
450  * halt/stall conditions.
451  */
452 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
453 	bool is_stalled)
454 {
455 	u32 val;
456 
457 	val = USBD_STALL_UPDATE_MASK |
458 		(is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
459 		(bep->ep_num << USBD_STALL_EPNUM_SHIFT);
460 	usbd_writel(udc, val, USBD_STALL_REG);
461 }
462 
463 /**
464  * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
465  * @udc: Reference to the device controller.
466  *
467  * These parameters depend on the USB link speed.  Settings are
468  * per-IUDMA-channel-pair.
469  */
470 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
471 {
472 	int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
473 	u32 i, val, rx_fifo_slot, tx_fifo_slot;
474 
475 	/* set up FIFO boundaries and packet sizes; this is done in pairs */
476 	rx_fifo_slot = tx_fifo_slot = 0;
477 	for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
478 		const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
479 		const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
480 
481 		bcm63xx_ep_dma_select(udc, i >> 1);
482 
483 		val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
484 			((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
485 			 USBD_RXFIFO_CONFIG_END_SHIFT);
486 		rx_fifo_slot += rx_cfg->n_fifo_slots;
487 		usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
488 		usbd_writel(udc,
489 			    is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
490 			    USBD_RXFIFO_EPSIZE_REG);
491 
492 		val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
493 			((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
494 			 USBD_TXFIFO_CONFIG_END_SHIFT);
495 		tx_fifo_slot += tx_cfg->n_fifo_slots;
496 		usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
497 		usbd_writel(udc,
498 			    is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
499 			    USBD_TXFIFO_EPSIZE_REG);
500 
501 		usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
502 	}
503 }
504 
505 /**
506  * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
507  * @udc: Reference to the device controller.
508  * @ep_num: Endpoint number.
509  */
510 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
511 {
512 	u32 val;
513 
514 	bcm63xx_ep_dma_select(udc, ep_num);
515 
516 	val = usbd_readl(udc, USBD_CONTROL_REG);
517 	val |= USBD_CONTROL_FIFO_RESET_MASK;
518 	usbd_writel(udc, val, USBD_CONTROL_REG);
519 	usbd_readl(udc, USBD_CONTROL_REG);
520 }
521 
522 /**
523  * bcm63xx_fifo_reset - Flush all hardware FIFOs.
524  * @udc: Reference to the device controller.
525  */
526 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
527 {
528 	int i;
529 
530 	for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
531 		bcm63xx_fifo_reset_ep(udc, i);
532 }
533 
534 /**
535  * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
536  * @udc: Reference to the device controller.
537  */
538 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
539 {
540 	u32 i, val;
541 
542 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
543 		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
544 
545 		if (cfg->ep_num < 0)
546 			continue;
547 
548 		bcm63xx_ep_dma_select(udc, cfg->ep_num);
549 		val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
550 			((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
551 		usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
552 	}
553 }
554 
555 /**
556  * bcm63xx_ep_setup - Configure per-endpoint settings.
557  * @udc: Reference to the device controller.
558  *
559  * This needs to be rerun if the speed/cfg/intf/altintf changes.
560  */
561 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
562 {
563 	u32 val, i;
564 
565 	usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
566 
567 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
568 		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
569 		int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
570 			      cfg->max_pkt_hs : cfg->max_pkt_fs;
571 		int idx = cfg->ep_num;
572 
573 		udc->iudma[i].max_pkt = max_pkt;
574 
575 		if (idx < 0)
576 			continue;
577 		usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
578 
579 		val = (idx << USBD_CSR_EP_LOG_SHIFT) |
580 		      (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
581 		      (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
582 		      (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
583 		      (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
584 		      (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
585 		      (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
586 		usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
587 	}
588 }
589 
590 /**
591  * iudma_write - Queue a single IUDMA transaction.
592  * @udc: Reference to the device controller.
593  * @iudma: IUDMA channel to use.
594  * @breq: Request containing the transaction data.
595  *
596  * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
597  * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
598  * So iudma_write() may be called several times to fulfill a single
599  * usb_request.
600  *
601  * For TX IUDMA, this can queue multiple buffer descriptors if needed.
602  */
603 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
604 	struct bcm63xx_req *breq)
605 {
606 	int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
607 	unsigned int bytes_left = breq->req.length - breq->offset;
608 	const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
609 		iudma->max_pkt : IUDMA_MAX_FRAGMENT;
610 
611 	iudma->n_bds_used = 0;
612 	breq->bd_bytes = 0;
613 	breq->iudma = iudma;
614 
615 	if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
616 		extra_zero_pkt = 1;
617 
618 	do {
619 		struct bcm_enet_desc *d = iudma->write_bd;
620 		u32 dmaflags = 0;
621 		unsigned int n_bytes;
622 
623 		if (d == iudma->end_bd) {
624 			dmaflags |= DMADESC_WRAP_MASK;
625 			iudma->write_bd = iudma->bd_ring;
626 		} else {
627 			iudma->write_bd++;
628 		}
629 		iudma->n_bds_used++;
630 
631 		n_bytes = min_t(int, bytes_left, max_bd_bytes);
632 		if (n_bytes)
633 			dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
634 		else
635 			dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
636 				    DMADESC_USB_ZERO_MASK;
637 
638 		dmaflags |= DMADESC_OWNER_MASK;
639 		if (first_bd) {
640 			dmaflags |= DMADESC_SOP_MASK;
641 			first_bd = 0;
642 		}
643 
644 		/*
645 		 * extra_zero_pkt forces one more iteration through the loop
646 		 * after all data is queued up, to send the zero packet
647 		 */
648 		if (extra_zero_pkt && !bytes_left)
649 			extra_zero_pkt = 0;
650 
651 		if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
652 		    (n_bytes == bytes_left && !extra_zero_pkt)) {
653 			last_bd = 1;
654 			dmaflags |= DMADESC_EOP_MASK;
655 		}
656 
657 		d->address = breq->req.dma + breq->offset;
658 		mb();
659 		d->len_stat = dmaflags;
660 
661 		breq->offset += n_bytes;
662 		breq->bd_bytes += n_bytes;
663 		bytes_left -= n_bytes;
664 	} while (!last_bd);
665 
666 	usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
667 			ENETDMAC_CHANCFG_REG, iudma->ch_idx);
668 }
669 
670 /**
671  * iudma_read - Check for IUDMA buffer completion.
672  * @udc: Reference to the device controller.
673  * @iudma: IUDMA channel to use.
674  *
675  * This checks to see if ALL of the outstanding BDs on the DMA channel
676  * have been filled.  If so, it returns the actual transfer length;
677  * otherwise it returns -EBUSY.
678  */
679 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
680 {
681 	int i, actual_len = 0;
682 	struct bcm_enet_desc *d = iudma->read_bd;
683 
684 	if (!iudma->n_bds_used)
685 		return -EINVAL;
686 
687 	for (i = 0; i < iudma->n_bds_used; i++) {
688 		u32 dmaflags;
689 
690 		dmaflags = d->len_stat;
691 
692 		if (dmaflags & DMADESC_OWNER_MASK)
693 			return -EBUSY;
694 
695 		actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
696 			      DMADESC_LENGTH_SHIFT;
697 		if (d == iudma->end_bd)
698 			d = iudma->bd_ring;
699 		else
700 			d++;
701 	}
702 
703 	iudma->read_bd = d;
704 	iudma->n_bds_used = 0;
705 	return actual_len;
706 }
707 
708 /**
709  * iudma_reset_channel - Stop DMA on a single channel.
710  * @udc: Reference to the device controller.
711  * @iudma: IUDMA channel to reset.
712  */
713 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
714 {
715 	int timeout = IUDMA_RESET_TIMEOUT_US;
716 	struct bcm_enet_desc *d;
717 	int ch_idx = iudma->ch_idx;
718 
719 	if (!iudma->is_tx)
720 		bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
721 
722 	/* stop DMA, then wait for the hardware to wrap up */
723 	usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
724 
725 	while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
726 				   ENETDMAC_CHANCFG_EN_MASK) {
727 		udelay(1);
728 
729 		/* repeatedly flush the FIFO data until the BD completes */
730 		if (iudma->is_tx && iudma->ep_num >= 0)
731 			bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
732 
733 		if (!timeout--) {
734 			dev_err(udc->dev, "can't reset IUDMA channel %d\n",
735 				ch_idx);
736 			break;
737 		}
738 		if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
739 			dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
740 				 ch_idx);
741 			usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
742 					ENETDMAC_CHANCFG_REG, ch_idx);
743 		}
744 	}
745 	usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
746 
747 	/* don't leave "live" HW-owned entries for the next guy to step on */
748 	for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
749 		d->len_stat = 0;
750 	mb();
751 
752 	iudma->read_bd = iudma->write_bd = iudma->bd_ring;
753 	iudma->n_bds_used = 0;
754 
755 	/* set up IRQs, UBUS burst size, and BD base for this channel */
756 	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
757 			ENETDMAC_IRMASK_REG, ch_idx);
758 	usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
759 
760 	usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
761 	usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
762 }
763 
764 /**
765  * iudma_init_channel - One-time IUDMA channel initialization.
766  * @udc: Reference to the device controller.
767  * @ch_idx: Channel to initialize.
768  */
769 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
770 {
771 	struct iudma_ch *iudma = &udc->iudma[ch_idx];
772 	const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
773 	unsigned int n_bds = cfg->n_bds;
774 	struct bcm63xx_ep *bep = NULL;
775 
776 	iudma->ep_num = cfg->ep_num;
777 	iudma->ch_idx = ch_idx;
778 	iudma->is_tx = !!(ch_idx & 0x01);
779 	if (iudma->ep_num >= 0) {
780 		bep = &udc->bep[iudma->ep_num];
781 		bep->iudma = iudma;
782 		INIT_LIST_HEAD(&bep->queue);
783 	}
784 
785 	iudma->bep = bep;
786 	iudma->udc = udc;
787 
788 	/* ep0 is always active; others are controlled by the gadget driver */
789 	if (iudma->ep_num <= 0)
790 		iudma->enabled = true;
791 
792 	iudma->n_bds = n_bds;
793 	iudma->bd_ring = dmam_alloc_coherent(udc->dev,
794 		n_bds * sizeof(struct bcm_enet_desc),
795 		&iudma->bd_ring_dma, GFP_KERNEL);
796 	if (!iudma->bd_ring)
797 		return -ENOMEM;
798 	iudma->end_bd = &iudma->bd_ring[n_bds - 1];
799 
800 	return 0;
801 }
802 
803 /**
804  * iudma_init - One-time initialization of all IUDMA channels.
805  * @udc: Reference to the device controller.
806  *
807  * Enable DMA, flush channels, and enable global IUDMA IRQs.
808  */
809 static int iudma_init(struct bcm63xx_udc *udc)
810 {
811 	int i, rc;
812 
813 	usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
814 
815 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
816 		rc = iudma_init_channel(udc, i);
817 		if (rc)
818 			return rc;
819 		iudma_reset_channel(udc, &udc->iudma[i]);
820 	}
821 
822 	usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
823 	return 0;
824 }
825 
826 /**
827  * iudma_uninit - Uninitialize IUDMA channels.
828  * @udc: Reference to the device controller.
829  *
830  * Kill global IUDMA IRQs, flush channels, and kill DMA.
831  */
832 static void iudma_uninit(struct bcm63xx_udc *udc)
833 {
834 	int i;
835 
836 	usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
837 
838 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
839 		iudma_reset_channel(udc, &udc->iudma[i]);
840 
841 	usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
842 }
843 
844 /***********************************************************************
845  * Other low-level USBD operations
846  ***********************************************************************/
847 
848 /**
849  * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
850  * @udc: Reference to the device controller.
851  * @enable_irqs: true to enable, false to disable.
852  */
853 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
854 {
855 	u32 val;
856 
857 	usbd_writel(udc, 0, USBD_STATUS_REG);
858 
859 	val = BIT(USBD_EVENT_IRQ_USB_RESET) |
860 	      BIT(USBD_EVENT_IRQ_SETUP) |
861 	      BIT(USBD_EVENT_IRQ_SETCFG) |
862 	      BIT(USBD_EVENT_IRQ_SETINTF) |
863 	      BIT(USBD_EVENT_IRQ_USB_LINK);
864 	usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
865 	usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
866 }
867 
868 /**
869  * bcm63xx_select_phy_mode - Select between USB device and host mode.
870  * @udc: Reference to the device controller.
871  * @is_device: true for device, false for host.
872  *
873  * This should probably be reworked to use the drivers/usb/otg
874  * infrastructure.
875  *
876  * By default, the AFE/pullups are disabled in device mode, until
877  * bcm63xx_select_pullup() is called.
878  */
879 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
880 {
881 	u32 val, portmask = BIT(udc->pd->port_no);
882 
883 	if (BCMCPU_IS_6328()) {
884 		/* configure pinmux to sense VBUS signal */
885 		val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
886 		val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
887 		val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
888 			       GPIO_PINMUX_OTHR_6328_USB_HOST;
889 		bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
890 	}
891 
892 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
893 	if (is_device) {
894 		val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
895 		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
896 	} else {
897 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
898 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
899 	}
900 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
901 
902 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
903 	if (is_device)
904 		val |= USBH_PRIV_SWAP_USBD_MASK;
905 	else
906 		val &= ~USBH_PRIV_SWAP_USBD_MASK;
907 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
908 }
909 
910 /**
911  * bcm63xx_select_pullup - Enable/disable the pullup on D+
912  * @udc: Reference to the device controller.
913  * @is_on: true to enable the pullup, false to disable.
914  *
915  * If the pullup is active, the host will sense a FS/HS device connected to
916  * the port.  If the pullup is inactive, the host will think the USB
917  * device has been disconnected.
918  */
919 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
920 {
921 	u32 val, portmask = BIT(udc->pd->port_no);
922 
923 	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
924 	if (is_on)
925 		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
926 	else
927 		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
928 	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
929 }
930 
931 /**
932  * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
933  * @udc: Reference to the device controller.
934  *
935  * This just masks the IUDMA IRQs and releases the clocks.  It is assumed
936  * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
937  */
938 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
939 {
940 	set_clocks(udc, true);
941 	iudma_uninit(udc);
942 	set_clocks(udc, false);
943 
944 	clk_put(udc->usbd_clk);
945 	clk_put(udc->usbh_clk);
946 }
947 
948 /**
949  * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
950  * @udc: Reference to the device controller.
951  */
952 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
953 {
954 	int i, rc = 0;
955 	u32 val;
956 
957 	udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
958 					 GFP_KERNEL);
959 	if (!udc->ep0_ctrl_buf)
960 		return -ENOMEM;
961 
962 	INIT_LIST_HEAD(&udc->gadget.ep_list);
963 	for (i = 0; i < BCM63XX_NUM_EP; i++) {
964 		struct bcm63xx_ep *bep = &udc->bep[i];
965 
966 		bep->ep.name = bcm63xx_ep_info[i].name;
967 		bep->ep.caps = bcm63xx_ep_info[i].caps;
968 		bep->ep_num = i;
969 		bep->ep.ops = &bcm63xx_udc_ep_ops;
970 		list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
971 		bep->halted = 0;
972 		usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
973 		bep->udc = udc;
974 		bep->ep.desc = NULL;
975 		INIT_LIST_HEAD(&bep->queue);
976 	}
977 
978 	udc->gadget.ep0 = &udc->bep[0].ep;
979 	list_del(&udc->bep[0].ep.ep_list);
980 
981 	udc->gadget.speed = USB_SPEED_UNKNOWN;
982 	udc->ep0state = EP0_SHUTDOWN;
983 
984 	udc->usbh_clk = clk_get(udc->dev, "usbh");
985 	if (IS_ERR(udc->usbh_clk))
986 		return -EIO;
987 
988 	udc->usbd_clk = clk_get(udc->dev, "usbd");
989 	if (IS_ERR(udc->usbd_clk)) {
990 		clk_put(udc->usbh_clk);
991 		return -EIO;
992 	}
993 
994 	set_clocks(udc, true);
995 
996 	val = USBD_CONTROL_AUTO_CSRS_MASK |
997 	      USBD_CONTROL_DONE_CSRS_MASK |
998 	      (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
999 	usbd_writel(udc, val, USBD_CONTROL_REG);
1000 
1001 	val = USBD_STRAPS_APP_SELF_PWR_MASK |
1002 	      USBD_STRAPS_APP_RAM_IF_MASK |
1003 	      USBD_STRAPS_APP_CSRPRGSUP_MASK |
1004 	      USBD_STRAPS_APP_8BITPHY_MASK |
1005 	      USBD_STRAPS_APP_RMTWKUP_MASK;
1006 
1007 	if (udc->gadget.max_speed == USB_SPEED_HIGH)
1008 		val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1009 	else
1010 		val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1011 	usbd_writel(udc, val, USBD_STRAPS_REG);
1012 
1013 	bcm63xx_set_ctrl_irqs(udc, false);
1014 
1015 	usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1016 
1017 	val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1018 	      USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1019 	usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1020 
1021 	rc = iudma_init(udc);
1022 	set_clocks(udc, false);
1023 	if (rc)
1024 		bcm63xx_uninit_udc_hw(udc);
1025 
1026 	return 0;
1027 }
1028 
1029 /***********************************************************************
1030  * Standard EP gadget operations
1031  ***********************************************************************/
1032 
1033 /**
1034  * bcm63xx_ep_enable - Enable one endpoint.
1035  * @ep: Endpoint to enable.
1036  * @desc: Contains max packet, direction, etc.
1037  *
1038  * Most of the endpoint parameters are fixed in this controller, so there
1039  * isn't much for this function to do.
1040  */
1041 static int bcm63xx_ep_enable(struct usb_ep *ep,
1042 	const struct usb_endpoint_descriptor *desc)
1043 {
1044 	struct bcm63xx_ep *bep = our_ep(ep);
1045 	struct bcm63xx_udc *udc = bep->udc;
1046 	struct iudma_ch *iudma = bep->iudma;
1047 	unsigned long flags;
1048 
1049 	if (!ep || !desc || ep->name == bcm63xx_ep0name)
1050 		return -EINVAL;
1051 
1052 	if (!udc->driver)
1053 		return -ESHUTDOWN;
1054 
1055 	spin_lock_irqsave(&udc->lock, flags);
1056 	if (iudma->enabled) {
1057 		spin_unlock_irqrestore(&udc->lock, flags);
1058 		return -EINVAL;
1059 	}
1060 
1061 	iudma->enabled = true;
1062 	BUG_ON(!list_empty(&bep->queue));
1063 
1064 	iudma_reset_channel(udc, iudma);
1065 
1066 	bep->halted = 0;
1067 	bcm63xx_set_stall(udc, bep, false);
1068 	clear_bit(bep->ep_num, &udc->wedgemap);
1069 
1070 	ep->desc = desc;
1071 	ep->maxpacket = usb_endpoint_maxp(desc);
1072 
1073 	spin_unlock_irqrestore(&udc->lock, flags);
1074 	return 0;
1075 }
1076 
1077 /**
1078  * bcm63xx_ep_disable - Disable one endpoint.
1079  * @ep: Endpoint to disable.
1080  */
1081 static int bcm63xx_ep_disable(struct usb_ep *ep)
1082 {
1083 	struct bcm63xx_ep *bep = our_ep(ep);
1084 	struct bcm63xx_udc *udc = bep->udc;
1085 	struct iudma_ch *iudma = bep->iudma;
1086 	struct bcm63xx_req *breq, *n;
1087 	unsigned long flags;
1088 
1089 	if (!ep || !ep->desc)
1090 		return -EINVAL;
1091 
1092 	spin_lock_irqsave(&udc->lock, flags);
1093 	if (!iudma->enabled) {
1094 		spin_unlock_irqrestore(&udc->lock, flags);
1095 		return -EINVAL;
1096 	}
1097 	iudma->enabled = false;
1098 
1099 	iudma_reset_channel(udc, iudma);
1100 
1101 	if (!list_empty(&bep->queue)) {
1102 		list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1103 			usb_gadget_unmap_request(&udc->gadget, &breq->req,
1104 						 iudma->is_tx);
1105 			list_del(&breq->queue);
1106 			breq->req.status = -ESHUTDOWN;
1107 
1108 			spin_unlock_irqrestore(&udc->lock, flags);
1109 			usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1110 			spin_lock_irqsave(&udc->lock, flags);
1111 		}
1112 	}
1113 	ep->desc = NULL;
1114 
1115 	spin_unlock_irqrestore(&udc->lock, flags);
1116 	return 0;
1117 }
1118 
1119 /**
1120  * bcm63xx_udc_alloc_request - Allocate a new request.
1121  * @ep: Endpoint associated with the request.
1122  * @mem_flags: Flags to pass to kzalloc().
1123  */
1124 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1125 	gfp_t mem_flags)
1126 {
1127 	struct bcm63xx_req *breq;
1128 
1129 	breq = kzalloc(sizeof(*breq), mem_flags);
1130 	if (!breq)
1131 		return NULL;
1132 	return &breq->req;
1133 }
1134 
1135 /**
1136  * bcm63xx_udc_free_request - Free a request.
1137  * @ep: Endpoint associated with the request.
1138  * @req: Request to free.
1139  */
1140 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1141 	struct usb_request *req)
1142 {
1143 	struct bcm63xx_req *breq = our_req(req);
1144 	kfree(breq);
1145 }
1146 
1147 /**
1148  * bcm63xx_udc_queue - Queue up a new request.
1149  * @ep: Endpoint associated with the request.
1150  * @req: Request to add.
1151  * @mem_flags: Unused.
1152  *
1153  * If the queue is empty, start this request immediately.  Otherwise, add
1154  * it to the list.
1155  *
1156  * ep0 replies are sent through this function from the gadget driver, but
1157  * they are treated differently because they need to be handled by the ep0
1158  * state machine.  (Sometimes they are replies to control requests that
1159  * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1160  */
1161 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1162 	gfp_t mem_flags)
1163 {
1164 	struct bcm63xx_ep *bep = our_ep(ep);
1165 	struct bcm63xx_udc *udc = bep->udc;
1166 	struct bcm63xx_req *breq = our_req(req);
1167 	unsigned long flags;
1168 	int rc = 0;
1169 
1170 	if (unlikely(!req || !req->complete || !req->buf || !ep))
1171 		return -EINVAL;
1172 
1173 	req->actual = 0;
1174 	req->status = 0;
1175 	breq->offset = 0;
1176 
1177 	if (bep == &udc->bep[0]) {
1178 		/* only one reply per request, please */
1179 		if (udc->ep0_reply)
1180 			return -EINVAL;
1181 
1182 		udc->ep0_reply = req;
1183 		schedule_work(&udc->ep0_wq);
1184 		return 0;
1185 	}
1186 
1187 	spin_lock_irqsave(&udc->lock, flags);
1188 	if (!bep->iudma->enabled) {
1189 		rc = -ESHUTDOWN;
1190 		goto out;
1191 	}
1192 
1193 	rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1194 	if (rc == 0) {
1195 		list_add_tail(&breq->queue, &bep->queue);
1196 		if (list_is_singular(&bep->queue))
1197 			iudma_write(udc, bep->iudma, breq);
1198 	}
1199 
1200 out:
1201 	spin_unlock_irqrestore(&udc->lock, flags);
1202 	return rc;
1203 }
1204 
1205 /**
1206  * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1207  * @ep: Endpoint associated with the request.
1208  * @req: Request to remove.
1209  *
1210  * If the request is not at the head of the queue, this is easy - just nuke
1211  * it.  If the request is at the head of the queue, we'll need to stop the
1212  * DMA transaction and then queue up the successor.
1213  */
1214 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1215 {
1216 	struct bcm63xx_ep *bep = our_ep(ep);
1217 	struct bcm63xx_udc *udc = bep->udc;
1218 	struct bcm63xx_req *breq = our_req(req), *cur;
1219 	unsigned long flags;
1220 	int rc = 0;
1221 
1222 	spin_lock_irqsave(&udc->lock, flags);
1223 	if (list_empty(&bep->queue)) {
1224 		rc = -EINVAL;
1225 		goto out;
1226 	}
1227 
1228 	cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1229 	usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1230 
1231 	if (breq == cur) {
1232 		iudma_reset_channel(udc, bep->iudma);
1233 		list_del(&breq->queue);
1234 
1235 		if (!list_empty(&bep->queue)) {
1236 			struct bcm63xx_req *next;
1237 
1238 			next = list_first_entry(&bep->queue,
1239 				struct bcm63xx_req, queue);
1240 			iudma_write(udc, bep->iudma, next);
1241 		}
1242 	} else {
1243 		list_del(&breq->queue);
1244 	}
1245 
1246 out:
1247 	spin_unlock_irqrestore(&udc->lock, flags);
1248 
1249 	req->status = -ESHUTDOWN;
1250 	req->complete(ep, req);
1251 
1252 	return rc;
1253 }
1254 
1255 /**
1256  * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1257  * @ep: Endpoint to halt.
1258  * @value: Zero to clear halt; nonzero to set halt.
1259  *
1260  * See comments in bcm63xx_update_wedge().
1261  */
1262 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1263 {
1264 	struct bcm63xx_ep *bep = our_ep(ep);
1265 	struct bcm63xx_udc *udc = bep->udc;
1266 	unsigned long flags;
1267 
1268 	spin_lock_irqsave(&udc->lock, flags);
1269 	bcm63xx_set_stall(udc, bep, !!value);
1270 	bep->halted = value;
1271 	spin_unlock_irqrestore(&udc->lock, flags);
1272 
1273 	return 0;
1274 }
1275 
1276 /**
1277  * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1278  * @ep: Endpoint to wedge.
1279  *
1280  * See comments in bcm63xx_update_wedge().
1281  */
1282 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1283 {
1284 	struct bcm63xx_ep *bep = our_ep(ep);
1285 	struct bcm63xx_udc *udc = bep->udc;
1286 	unsigned long flags;
1287 
1288 	spin_lock_irqsave(&udc->lock, flags);
1289 	set_bit(bep->ep_num, &udc->wedgemap);
1290 	bcm63xx_set_stall(udc, bep, true);
1291 	spin_unlock_irqrestore(&udc->lock, flags);
1292 
1293 	return 0;
1294 }
1295 
1296 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1297 	.enable		= bcm63xx_ep_enable,
1298 	.disable	= bcm63xx_ep_disable,
1299 
1300 	.alloc_request	= bcm63xx_udc_alloc_request,
1301 	.free_request	= bcm63xx_udc_free_request,
1302 
1303 	.queue		= bcm63xx_udc_queue,
1304 	.dequeue	= bcm63xx_udc_dequeue,
1305 
1306 	.set_halt	= bcm63xx_udc_set_halt,
1307 	.set_wedge	= bcm63xx_udc_set_wedge,
1308 };
1309 
1310 /***********************************************************************
1311  * EP0 handling
1312  ***********************************************************************/
1313 
1314 /**
1315  * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1316  * @udc: Reference to the device controller.
1317  * @ctrl: 8-byte SETUP request.
1318  */
1319 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1320 	struct usb_ctrlrequest *ctrl)
1321 {
1322 	int rc;
1323 
1324 	spin_unlock_irq(&udc->lock);
1325 	rc = udc->driver->setup(&udc->gadget, ctrl);
1326 	spin_lock_irq(&udc->lock);
1327 	return rc;
1328 }
1329 
1330 /**
1331  * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1332  * @udc: Reference to the device controller.
1333  *
1334  * Many standard requests are handled automatically in the hardware, but
1335  * we still need to pass them to the gadget driver so that it can
1336  * reconfigure the interfaces/endpoints if necessary.
1337  *
1338  * Unfortunately we are not able to send a STALL response if the host
1339  * requests an invalid configuration.  If this happens, we'll have to be
1340  * content with printing a warning.
1341  */
1342 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1343 {
1344 	struct usb_ctrlrequest ctrl;
1345 	int rc;
1346 
1347 	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1348 	ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1349 	ctrl.wValue = cpu_to_le16(udc->cfg);
1350 	ctrl.wIndex = 0;
1351 	ctrl.wLength = 0;
1352 
1353 	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1354 	if (rc < 0) {
1355 		dev_warn_ratelimited(udc->dev,
1356 			"hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1357 			udc->cfg);
1358 	}
1359 	return rc;
1360 }
1361 
1362 /**
1363  * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1364  * @udc: Reference to the device controller.
1365  */
1366 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1367 {
1368 	struct usb_ctrlrequest ctrl;
1369 	int rc;
1370 
1371 	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1372 	ctrl.bRequest = USB_REQ_SET_INTERFACE;
1373 	ctrl.wValue = cpu_to_le16(udc->alt_iface);
1374 	ctrl.wIndex = cpu_to_le16(udc->iface);
1375 	ctrl.wLength = 0;
1376 
1377 	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1378 	if (rc < 0) {
1379 		dev_warn_ratelimited(udc->dev,
1380 			"hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1381 			udc->iface, udc->alt_iface);
1382 	}
1383 	return rc;
1384 }
1385 
1386 /**
1387  * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1388  * @udc: Reference to the device controller.
1389  * @ch_idx: IUDMA channel number.
1390  * @req: USB gadget layer representation of the request.
1391  */
1392 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1393 	struct usb_request *req)
1394 {
1395 	struct bcm63xx_req *breq = our_req(req);
1396 	struct iudma_ch *iudma = &udc->iudma[ch_idx];
1397 
1398 	BUG_ON(udc->ep0_request);
1399 	udc->ep0_request = req;
1400 
1401 	req->actual = 0;
1402 	breq->offset = 0;
1403 	usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1404 	iudma_write(udc, iudma, breq);
1405 }
1406 
1407 /**
1408  * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1409  * @udc: Reference to the device controller.
1410  * @req: USB gadget layer representation of the request.
1411  * @status: Status to return to the gadget driver.
1412  */
1413 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1414 	struct usb_request *req, int status)
1415 {
1416 	req->status = status;
1417 	if (status)
1418 		req->actual = 0;
1419 	if (req->complete) {
1420 		spin_unlock_irq(&udc->lock);
1421 		req->complete(&udc->bep[0].ep, req);
1422 		spin_lock_irq(&udc->lock);
1423 	}
1424 }
1425 
1426 /**
1427  * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1428  *   reset/shutdown.
1429  * @udc: Reference to the device controller.
1430  * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1431  */
1432 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1433 {
1434 	struct usb_request *req = udc->ep0_reply;
1435 
1436 	udc->ep0_reply = NULL;
1437 	usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1438 	if (udc->ep0_request == req) {
1439 		udc->ep0_req_completed = 0;
1440 		udc->ep0_request = NULL;
1441 	}
1442 	bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1443 }
1444 
1445 /**
1446  * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1447  *   transfer len.
1448  * @udc: Reference to the device controller.
1449  */
1450 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1451 {
1452 	struct usb_request *req = udc->ep0_request;
1453 
1454 	udc->ep0_req_completed = 0;
1455 	udc->ep0_request = NULL;
1456 
1457 	return req->actual;
1458 }
1459 
1460 /**
1461  * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1462  * @udc: Reference to the device controller.
1463  * @ch_idx: IUDMA channel number.
1464  * @length: Number of bytes to TX/RX.
1465  *
1466  * Used for simple transfers performed by the ep0 worker.  This will always
1467  * use ep0_ctrl_req / ep0_ctrl_buf.
1468  */
1469 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1470 	int length)
1471 {
1472 	struct usb_request *req = &udc->ep0_ctrl_req.req;
1473 
1474 	req->buf = udc->ep0_ctrl_buf;
1475 	req->length = length;
1476 	req->complete = NULL;
1477 
1478 	bcm63xx_ep0_map_write(udc, ch_idx, req);
1479 }
1480 
1481 /**
1482  * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1483  * @udc: Reference to the device controller.
1484  *
1485  * EP0_IDLE probably shouldn't ever happen.  EP0_REQUEUE means we're ready
1486  * for the next packet.  Anything else means the transaction requires multiple
1487  * stages of handling.
1488  */
1489 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1490 {
1491 	int rc;
1492 	struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1493 
1494 	rc = bcm63xx_ep0_read_complete(udc);
1495 
1496 	if (rc < 0) {
1497 		dev_err(udc->dev, "missing SETUP packet\n");
1498 		return EP0_IDLE;
1499 	}
1500 
1501 	/*
1502 	 * Handle 0-byte IN STATUS acknowledgement.  The hardware doesn't
1503 	 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1504 	 * just throw it away.
1505 	 */
1506 	if (rc == 0)
1507 		return EP0_REQUEUE;
1508 
1509 	/* Drop malformed SETUP packets */
1510 	if (rc != sizeof(*ctrl)) {
1511 		dev_warn_ratelimited(udc->dev,
1512 			"malformed SETUP packet (%d bytes)\n", rc);
1513 		return EP0_REQUEUE;
1514 	}
1515 
1516 	/* Process new SETUP packet arriving on ep0 */
1517 	rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1518 	if (rc < 0) {
1519 		bcm63xx_set_stall(udc, &udc->bep[0], true);
1520 		return EP0_REQUEUE;
1521 	}
1522 
1523 	if (!ctrl->wLength)
1524 		return EP0_REQUEUE;
1525 	else if (ctrl->bRequestType & USB_DIR_IN)
1526 		return EP0_IN_DATA_PHASE_SETUP;
1527 	else
1528 		return EP0_OUT_DATA_PHASE_SETUP;
1529 }
1530 
1531 /**
1532  * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1533  * @udc: Reference to the device controller.
1534  *
1535  * In state EP0_IDLE, the RX descriptor is either pending, or has been
1536  * filled with a SETUP packet from the host.  This function handles new
1537  * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1538  * and reset/shutdown events.
1539  *
1540  * Returns 0 if work was done; -EAGAIN if nothing to do.
1541  */
1542 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1543 {
1544 	if (udc->ep0_req_reset) {
1545 		udc->ep0_req_reset = 0;
1546 	} else if (udc->ep0_req_set_cfg) {
1547 		udc->ep0_req_set_cfg = 0;
1548 		if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1549 			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1550 	} else if (udc->ep0_req_set_iface) {
1551 		udc->ep0_req_set_iface = 0;
1552 		if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1553 			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1554 	} else if (udc->ep0_req_completed) {
1555 		udc->ep0state = bcm63xx_ep0_do_setup(udc);
1556 		return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1557 	} else if (udc->ep0_req_shutdown) {
1558 		udc->ep0_req_shutdown = 0;
1559 		udc->ep0_req_completed = 0;
1560 		udc->ep0_request = NULL;
1561 		iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1562 		usb_gadget_unmap_request(&udc->gadget,
1563 			&udc->ep0_ctrl_req.req, 0);
1564 
1565 		/* bcm63xx_udc_pullup() is waiting for this */
1566 		mb();
1567 		udc->ep0state = EP0_SHUTDOWN;
1568 	} else if (udc->ep0_reply) {
1569 		/*
1570 		 * This could happen if a USB RESET shows up during an ep0
1571 		 * transaction (especially if a laggy driver like gadgetfs
1572 		 * is in use).
1573 		 */
1574 		dev_warn(udc->dev, "nuking unexpected reply\n");
1575 		bcm63xx_ep0_nuke_reply(udc, 0);
1576 	} else {
1577 		return -EAGAIN;
1578 	}
1579 
1580 	return 0;
1581 }
1582 
1583 /**
1584  * bcm63xx_ep0_one_round - Handle the current ep0 state.
1585  * @udc: Reference to the device controller.
1586  *
1587  * Returns 0 if work was done; -EAGAIN if nothing to do.
1588  */
1589 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1590 {
1591 	enum bcm63xx_ep0_state ep0state = udc->ep0state;
1592 	bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1593 
1594 	switch (udc->ep0state) {
1595 	case EP0_REQUEUE:
1596 		/* set up descriptor to receive SETUP packet */
1597 		bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1598 					     BCM63XX_MAX_CTRL_PKT);
1599 		ep0state = EP0_IDLE;
1600 		break;
1601 	case EP0_IDLE:
1602 		return bcm63xx_ep0_do_idle(udc);
1603 	case EP0_IN_DATA_PHASE_SETUP:
1604 		/*
1605 		 * Normal case: TX request is in ep0_reply (queued by the
1606 		 * callback), or will be queued shortly.  When it's here,
1607 		 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1608 		 *
1609 		 * Shutdown case: Stop waiting for the reply.  Just
1610 		 * REQUEUE->IDLE.  The gadget driver is NOT expected to
1611 		 * queue anything else now.
1612 		 */
1613 		if (udc->ep0_reply) {
1614 			bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1615 					      udc->ep0_reply);
1616 			ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1617 		} else if (shutdown) {
1618 			ep0state = EP0_REQUEUE;
1619 		}
1620 		break;
1621 	case EP0_IN_DATA_PHASE_COMPLETE: {
1622 		/*
1623 		 * Normal case: TX packet (ep0_reply) is in flight; wait for
1624 		 * it to finish, then go back to REQUEUE->IDLE.
1625 		 *
1626 		 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1627 		 * completion to the gadget driver, then REQUEUE->IDLE.
1628 		 */
1629 		if (udc->ep0_req_completed) {
1630 			udc->ep0_reply = NULL;
1631 			bcm63xx_ep0_read_complete(udc);
1632 			/*
1633 			 * the "ack" sometimes gets eaten (see
1634 			 * bcm63xx_ep0_do_idle)
1635 			 */
1636 			ep0state = EP0_REQUEUE;
1637 		} else if (shutdown) {
1638 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1639 			bcm63xx_ep0_nuke_reply(udc, 1);
1640 			ep0state = EP0_REQUEUE;
1641 		}
1642 		break;
1643 	}
1644 	case EP0_OUT_DATA_PHASE_SETUP:
1645 		/* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1646 		if (udc->ep0_reply) {
1647 			bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1648 					      udc->ep0_reply);
1649 			ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1650 		} else if (shutdown) {
1651 			ep0state = EP0_REQUEUE;
1652 		}
1653 		break;
1654 	case EP0_OUT_DATA_PHASE_COMPLETE: {
1655 		/* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1656 		if (udc->ep0_req_completed) {
1657 			udc->ep0_reply = NULL;
1658 			bcm63xx_ep0_read_complete(udc);
1659 
1660 			/* send 0-byte ack to host */
1661 			bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1662 			ep0state = EP0_OUT_STATUS_PHASE;
1663 		} else if (shutdown) {
1664 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1665 			bcm63xx_ep0_nuke_reply(udc, 0);
1666 			ep0state = EP0_REQUEUE;
1667 		}
1668 		break;
1669 	}
1670 	case EP0_OUT_STATUS_PHASE:
1671 		/*
1672 		 * Normal case: 0-byte OUT ack packet is in flight; wait
1673 		 * for it to finish, then go back to REQUEUE->IDLE.
1674 		 *
1675 		 * Shutdown case: just cancel the transmission.  Don't bother
1676 		 * calling the completion, because it originated from this
1677 		 * function anyway.  Then go back to REQUEUE->IDLE.
1678 		 */
1679 		if (udc->ep0_req_completed) {
1680 			bcm63xx_ep0_read_complete(udc);
1681 			ep0state = EP0_REQUEUE;
1682 		} else if (shutdown) {
1683 			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1684 			udc->ep0_request = NULL;
1685 			ep0state = EP0_REQUEUE;
1686 		}
1687 		break;
1688 	case EP0_IN_FAKE_STATUS_PHASE: {
1689 		/*
1690 		 * Normal case: we spoofed a SETUP packet and are now
1691 		 * waiting for the gadget driver to send a 0-byte reply.
1692 		 * This doesn't actually get sent to the HW because the
1693 		 * HW has already sent its own reply.  Once we get the
1694 		 * response, return to IDLE.
1695 		 *
1696 		 * Shutdown case: return to IDLE immediately.
1697 		 *
1698 		 * Note that the ep0 RX descriptor has remained queued
1699 		 * (and possibly unfilled) during this entire transaction.
1700 		 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1701 		 * or SET_INTERFACE transactions.
1702 		 */
1703 		struct usb_request *r = udc->ep0_reply;
1704 
1705 		if (!r) {
1706 			if (shutdown)
1707 				ep0state = EP0_IDLE;
1708 			break;
1709 		}
1710 
1711 		bcm63xx_ep0_complete(udc, r, 0);
1712 		udc->ep0_reply = NULL;
1713 		ep0state = EP0_IDLE;
1714 		break;
1715 	}
1716 	case EP0_SHUTDOWN:
1717 		break;
1718 	}
1719 
1720 	if (udc->ep0state == ep0state)
1721 		return -EAGAIN;
1722 
1723 	udc->ep0state = ep0state;
1724 	return 0;
1725 }
1726 
1727 /**
1728  * bcm63xx_ep0_process - ep0 worker thread / state machine.
1729  * @w: Workqueue struct.
1730  *
1731  * bcm63xx_ep0_process is triggered any time an event occurs on ep0.  It
1732  * is used to synchronize ep0 events and ensure that both HW and SW events
1733  * occur in a well-defined order.  When the ep0 IUDMA queues are idle, it may
1734  * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1735  * by the USBD hardware.
1736  *
1737  * The worker function will continue iterating around the state machine
1738  * until there is nothing left to do.  Usually "nothing left to do" means
1739  * that we're waiting for a new event from the hardware.
1740  */
1741 static void bcm63xx_ep0_process(struct work_struct *w)
1742 {
1743 	struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1744 	spin_lock_irq(&udc->lock);
1745 	while (bcm63xx_ep0_one_round(udc) == 0)
1746 		;
1747 	spin_unlock_irq(&udc->lock);
1748 }
1749 
1750 /***********************************************************************
1751  * Standard UDC gadget operations
1752  ***********************************************************************/
1753 
1754 /**
1755  * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1756  * @gadget: USB slave device.
1757  */
1758 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1759 {
1760 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1761 
1762 	return (usbd_readl(udc, USBD_STATUS_REG) &
1763 		USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1764 }
1765 
1766 /**
1767  * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1768  * @gadget: USB slave device.
1769  * @is_on: 0 to disable pullup, 1 to enable.
1770  *
1771  * See notes in bcm63xx_select_pullup().
1772  */
1773 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1774 {
1775 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1776 	unsigned long flags;
1777 	int i, rc = -EINVAL;
1778 
1779 	spin_lock_irqsave(&udc->lock, flags);
1780 	if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1781 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1782 		udc->ep0state = EP0_REQUEUE;
1783 		bcm63xx_fifo_setup(udc);
1784 		bcm63xx_fifo_reset(udc);
1785 		bcm63xx_ep_setup(udc);
1786 
1787 		bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1788 		for (i = 0; i < BCM63XX_NUM_EP; i++)
1789 			bcm63xx_set_stall(udc, &udc->bep[i], false);
1790 
1791 		bcm63xx_set_ctrl_irqs(udc, true);
1792 		bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1793 		rc = 0;
1794 	} else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1795 		bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1796 
1797 		udc->ep0_req_shutdown = 1;
1798 		spin_unlock_irqrestore(&udc->lock, flags);
1799 
1800 		while (1) {
1801 			schedule_work(&udc->ep0_wq);
1802 			if (udc->ep0state == EP0_SHUTDOWN)
1803 				break;
1804 			msleep(50);
1805 		}
1806 		bcm63xx_set_ctrl_irqs(udc, false);
1807 		cancel_work_sync(&udc->ep0_wq);
1808 		return 0;
1809 	}
1810 
1811 	spin_unlock_irqrestore(&udc->lock, flags);
1812 	return rc;
1813 }
1814 
1815 /**
1816  * bcm63xx_udc_start - Start the controller.
1817  * @gadget: USB slave device.
1818  * @driver: Driver for USB slave devices.
1819  */
1820 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1821 		struct usb_gadget_driver *driver)
1822 {
1823 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1824 	unsigned long flags;
1825 
1826 	if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1827 	    !driver->setup)
1828 		return -EINVAL;
1829 	if (!udc)
1830 		return -ENODEV;
1831 	if (udc->driver)
1832 		return -EBUSY;
1833 
1834 	spin_lock_irqsave(&udc->lock, flags);
1835 
1836 	set_clocks(udc, true);
1837 	bcm63xx_fifo_setup(udc);
1838 	bcm63xx_ep_init(udc);
1839 	bcm63xx_ep_setup(udc);
1840 	bcm63xx_fifo_reset(udc);
1841 	bcm63xx_select_phy_mode(udc, true);
1842 
1843 	udc->driver = driver;
1844 	driver->driver.bus = NULL;
1845 	udc->gadget.dev.of_node = udc->dev->of_node;
1846 
1847 	spin_unlock_irqrestore(&udc->lock, flags);
1848 
1849 	return 0;
1850 }
1851 
1852 /**
1853  * bcm63xx_udc_stop - Shut down the controller.
1854  * @gadget: USB slave device.
1855  * @driver: Driver for USB slave devices.
1856  */
1857 static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1858 {
1859 	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1860 	unsigned long flags;
1861 
1862 	spin_lock_irqsave(&udc->lock, flags);
1863 
1864 	udc->driver = NULL;
1865 
1866 	/*
1867 	 * If we switch the PHY too abruptly after dropping D+, the host
1868 	 * will often complain:
1869 	 *
1870 	 *     hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1871 	 */
1872 	msleep(100);
1873 
1874 	bcm63xx_select_phy_mode(udc, false);
1875 	set_clocks(udc, false);
1876 
1877 	spin_unlock_irqrestore(&udc->lock, flags);
1878 
1879 	return 0;
1880 }
1881 
1882 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1883 	.get_frame	= bcm63xx_udc_get_frame,
1884 	.pullup		= bcm63xx_udc_pullup,
1885 	.udc_start	= bcm63xx_udc_start,
1886 	.udc_stop	= bcm63xx_udc_stop,
1887 };
1888 
1889 /***********************************************************************
1890  * IRQ handling
1891  ***********************************************************************/
1892 
1893 /**
1894  * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1895  * @udc: Reference to the device controller.
1896  *
1897  * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1898  * The driver never sees the raw control packets coming in on the ep0
1899  * IUDMA channel, but at least we get an interrupt event to tell us that
1900  * new values are waiting in the USBD_STATUS register.
1901  */
1902 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1903 {
1904 	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1905 
1906 	udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1907 	udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1908 	udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1909 			 USBD_STATUS_ALTINTF_SHIFT;
1910 	bcm63xx_ep_setup(udc);
1911 }
1912 
1913 /**
1914  * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1915  * @udc: Reference to the device controller.
1916  *
1917  * The link speed update coincides with a SETUP IRQ.  Returns 1 if the
1918  * speed has changed, so that the caller can update the endpoint settings.
1919  */
1920 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1921 {
1922 	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1923 	enum usb_device_speed oldspeed = udc->gadget.speed;
1924 
1925 	switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1926 	case BCM63XX_SPD_HIGH:
1927 		udc->gadget.speed = USB_SPEED_HIGH;
1928 		break;
1929 	case BCM63XX_SPD_FULL:
1930 		udc->gadget.speed = USB_SPEED_FULL;
1931 		break;
1932 	default:
1933 		/* this should never happen */
1934 		udc->gadget.speed = USB_SPEED_UNKNOWN;
1935 		dev_err(udc->dev,
1936 			"received SETUP packet with invalid link speed\n");
1937 		return 0;
1938 	}
1939 
1940 	if (udc->gadget.speed != oldspeed) {
1941 		dev_info(udc->dev, "link up, %s-speed mode\n",
1942 			 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1943 		return 1;
1944 	} else {
1945 		return 0;
1946 	}
1947 }
1948 
1949 /**
1950  * bcm63xx_update_wedge - Iterate through wedged endpoints.
1951  * @udc: Reference to the device controller.
1952  * @new_status: true to "refresh" wedge status; false to clear it.
1953  *
1954  * On a SETUP interrupt, we need to manually "refresh" the wedge status
1955  * because the controller hardware is designed to automatically clear
1956  * stalls in response to a CLEAR_FEATURE request from the host.
1957  *
1958  * On a RESET interrupt, we do want to restore all wedged endpoints.
1959  */
1960 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1961 {
1962 	int i;
1963 
1964 	for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1965 		bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1966 		if (!new_status)
1967 			clear_bit(i, &udc->wedgemap);
1968 	}
1969 }
1970 
1971 /**
1972  * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1973  * @irq: IRQ number (unused).
1974  * @dev_id: Reference to the device controller.
1975  *
1976  * This is where we handle link (VBUS) down, USB reset, speed changes,
1977  * SET_CONFIGURATION, and SET_INTERFACE events.
1978  */
1979 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1980 {
1981 	struct bcm63xx_udc *udc = dev_id;
1982 	u32 stat;
1983 	bool disconnected = false, bus_reset = false;
1984 
1985 	stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1986 	       usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1987 
1988 	usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1989 
1990 	spin_lock(&udc->lock);
1991 	if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1992 		/* VBUS toggled */
1993 
1994 		if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1995 		      USBD_EVENTS_USB_LINK_MASK) &&
1996 		      udc->gadget.speed != USB_SPEED_UNKNOWN)
1997 			dev_info(udc->dev, "link down\n");
1998 
1999 		udc->gadget.speed = USB_SPEED_UNKNOWN;
2000 		disconnected = true;
2001 	}
2002 	if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
2003 		bcm63xx_fifo_setup(udc);
2004 		bcm63xx_fifo_reset(udc);
2005 		bcm63xx_ep_setup(udc);
2006 
2007 		bcm63xx_update_wedge(udc, false);
2008 
2009 		udc->ep0_req_reset = 1;
2010 		schedule_work(&udc->ep0_wq);
2011 		bus_reset = true;
2012 	}
2013 	if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2014 		if (bcm63xx_update_link_speed(udc)) {
2015 			bcm63xx_fifo_setup(udc);
2016 			bcm63xx_ep_setup(udc);
2017 		}
2018 		bcm63xx_update_wedge(udc, true);
2019 	}
2020 	if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2021 		bcm63xx_update_cfg_iface(udc);
2022 		udc->ep0_req_set_cfg = 1;
2023 		schedule_work(&udc->ep0_wq);
2024 	}
2025 	if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2026 		bcm63xx_update_cfg_iface(udc);
2027 		udc->ep0_req_set_iface = 1;
2028 		schedule_work(&udc->ep0_wq);
2029 	}
2030 	spin_unlock(&udc->lock);
2031 
2032 	if (disconnected && udc->driver)
2033 		udc->driver->disconnect(&udc->gadget);
2034 	else if (bus_reset && udc->driver)
2035 		usb_gadget_udc_reset(&udc->gadget, udc->driver);
2036 
2037 	return IRQ_HANDLED;
2038 }
2039 
2040 /**
2041  * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2042  * @irq: IRQ number (unused).
2043  * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2044  *
2045  * For the two ep0 channels, we have special handling that triggers the
2046  * ep0 worker thread.  For normal bulk/intr channels, either queue up
2047  * the next buffer descriptor for the transaction (incomplete transaction),
2048  * or invoke the completion callback (complete transactions).
2049  */
2050 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2051 {
2052 	struct iudma_ch *iudma = dev_id;
2053 	struct bcm63xx_udc *udc = iudma->udc;
2054 	struct bcm63xx_ep *bep;
2055 	struct usb_request *req = NULL;
2056 	struct bcm63xx_req *breq = NULL;
2057 	int rc;
2058 	bool is_done = false;
2059 
2060 	spin_lock(&udc->lock);
2061 
2062 	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2063 			ENETDMAC_IR_REG, iudma->ch_idx);
2064 	bep = iudma->bep;
2065 	rc = iudma_read(udc, iudma);
2066 
2067 	/* special handling for EP0 RX (0) and TX (1) */
2068 	if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2069 	    iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2070 		req = udc->ep0_request;
2071 		breq = our_req(req);
2072 
2073 		/* a single request could require multiple submissions */
2074 		if (rc >= 0) {
2075 			req->actual += rc;
2076 
2077 			if (req->actual >= req->length || breq->bd_bytes > rc) {
2078 				udc->ep0_req_completed = 1;
2079 				is_done = true;
2080 				schedule_work(&udc->ep0_wq);
2081 
2082 				/* "actual" on a ZLP is 1 byte */
2083 				req->actual = min(req->actual, req->length);
2084 			} else {
2085 				/* queue up the next BD (same request) */
2086 				iudma_write(udc, iudma, breq);
2087 			}
2088 		}
2089 	} else if (!list_empty(&bep->queue)) {
2090 		breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2091 		req = &breq->req;
2092 
2093 		if (rc >= 0) {
2094 			req->actual += rc;
2095 
2096 			if (req->actual >= req->length || breq->bd_bytes > rc) {
2097 				is_done = true;
2098 				list_del(&breq->queue);
2099 
2100 				req->actual = min(req->actual, req->length);
2101 
2102 				if (!list_empty(&bep->queue)) {
2103 					struct bcm63xx_req *next;
2104 
2105 					next = list_first_entry(&bep->queue,
2106 						struct bcm63xx_req, queue);
2107 					iudma_write(udc, iudma, next);
2108 				}
2109 			} else {
2110 				iudma_write(udc, iudma, breq);
2111 			}
2112 		}
2113 	}
2114 	spin_unlock(&udc->lock);
2115 
2116 	if (is_done) {
2117 		usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2118 		if (req->complete)
2119 			req->complete(&bep->ep, req);
2120 	}
2121 
2122 	return IRQ_HANDLED;
2123 }
2124 
2125 /***********************************************************************
2126  * Debug filesystem
2127  ***********************************************************************/
2128 
2129 /*
2130  * bcm63xx_usbd_dbg_show - Show USBD controller state.
2131  * @s: seq_file to which the information will be written.
2132  * @p: Unused.
2133  *
2134  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2135  */
2136 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2137 {
2138 	struct bcm63xx_udc *udc = s->private;
2139 
2140 	if (!udc->driver)
2141 		return -ENODEV;
2142 
2143 	seq_printf(s, "ep0 state: %s\n",
2144 		   bcm63xx_ep0_state_names[udc->ep0state]);
2145 	seq_printf(s, "  pending requests: %s%s%s%s%s%s%s\n",
2146 		   udc->ep0_req_reset ? "reset " : "",
2147 		   udc->ep0_req_set_cfg ? "set_cfg " : "",
2148 		   udc->ep0_req_set_iface ? "set_iface " : "",
2149 		   udc->ep0_req_shutdown ? "shutdown " : "",
2150 		   udc->ep0_request ? "pending " : "",
2151 		   udc->ep0_req_completed ? "completed " : "",
2152 		   udc->ep0_reply ? "reply " : "");
2153 	seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2154 		   udc->cfg, udc->iface, udc->alt_iface);
2155 	seq_printf(s, "regs:\n");
2156 	seq_printf(s, "  control: %08x; straps: %08x; status: %08x\n",
2157 		   usbd_readl(udc, USBD_CONTROL_REG),
2158 		   usbd_readl(udc, USBD_STRAPS_REG),
2159 		   usbd_readl(udc, USBD_STATUS_REG));
2160 	seq_printf(s, "  events:  %08x; stall:  %08x\n",
2161 		   usbd_readl(udc, USBD_EVENTS_REG),
2162 		   usbd_readl(udc, USBD_STALL_REG));
2163 
2164 	return 0;
2165 }
2166 
2167 /*
2168  * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2169  * @s: seq_file to which the information will be written.
2170  * @p: Unused.
2171  *
2172  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2173  */
2174 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2175 {
2176 	struct bcm63xx_udc *udc = s->private;
2177 	int ch_idx, i;
2178 	u32 sram2, sram3;
2179 
2180 	if (!udc->driver)
2181 		return -ENODEV;
2182 
2183 	for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2184 		struct iudma_ch *iudma = &udc->iudma[ch_idx];
2185 		struct list_head *pos;
2186 
2187 		seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2188 		switch (iudma_defaults[ch_idx].ep_type) {
2189 		case BCMEP_CTRL:
2190 			seq_printf(s, "control");
2191 			break;
2192 		case BCMEP_BULK:
2193 			seq_printf(s, "bulk");
2194 			break;
2195 		case BCMEP_INTR:
2196 			seq_printf(s, "interrupt");
2197 			break;
2198 		}
2199 		seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2200 		seq_printf(s, " [ep%d]:\n",
2201 			   max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2202 		seq_printf(s, "  cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2203 			   usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2204 			   usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2205 			   usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2206 			   usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2207 
2208 		sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2209 		sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2210 		seq_printf(s, "  base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2211 			   usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2212 			   sram2 >> 16, sram2 & 0xffff,
2213 			   sram3 >> 16, sram3 & 0xffff,
2214 			   usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2215 		seq_printf(s, "  desc: %d/%d used", iudma->n_bds_used,
2216 			   iudma->n_bds);
2217 
2218 		if (iudma->bep) {
2219 			i = 0;
2220 			list_for_each(pos, &iudma->bep->queue)
2221 				i++;
2222 			seq_printf(s, "; %d queued\n", i);
2223 		} else {
2224 			seq_printf(s, "\n");
2225 		}
2226 
2227 		for (i = 0; i < iudma->n_bds; i++) {
2228 			struct bcm_enet_desc *d = &iudma->bd_ring[i];
2229 
2230 			seq_printf(s, "  %03x (%02x): len_stat: %04x_%04x; pa %08x",
2231 				   i * sizeof(*d), i,
2232 				   d->len_stat >> 16, d->len_stat & 0xffff,
2233 				   d->address);
2234 			if (d == iudma->read_bd)
2235 				seq_printf(s, "   <<RD");
2236 			if (d == iudma->write_bd)
2237 				seq_printf(s, "   <<WR");
2238 			seq_printf(s, "\n");
2239 		}
2240 
2241 		seq_printf(s, "\n");
2242 	}
2243 
2244 	return 0;
2245 }
2246 
2247 static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2248 {
2249 	return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2250 }
2251 
2252 static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2253 {
2254 	return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2255 }
2256 
2257 static const struct file_operations usbd_dbg_fops = {
2258 	.owner		= THIS_MODULE,
2259 	.open		= bcm63xx_usbd_dbg_open,
2260 	.llseek		= seq_lseek,
2261 	.read		= seq_read,
2262 	.release	= single_release,
2263 };
2264 
2265 static const struct file_operations iudma_dbg_fops = {
2266 	.owner		= THIS_MODULE,
2267 	.open		= bcm63xx_iudma_dbg_open,
2268 	.llseek		= seq_lseek,
2269 	.read		= seq_read,
2270 	.release	= single_release,
2271 };
2272 
2273 
2274 /**
2275  * bcm63xx_udc_init_debugfs - Create debugfs entries.
2276  * @udc: Reference to the device controller.
2277  */
2278 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2279 {
2280 	struct dentry *root, *usbd, *iudma;
2281 
2282 	if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2283 		return;
2284 
2285 	root = debugfs_create_dir(udc->gadget.name, NULL);
2286 	if (IS_ERR(root) || !root)
2287 		goto err_root;
2288 
2289 	usbd = debugfs_create_file("usbd", 0400, root, udc,
2290 			&usbd_dbg_fops);
2291 	if (!usbd)
2292 		goto err_usbd;
2293 	iudma = debugfs_create_file("iudma", 0400, root, udc,
2294 			&iudma_dbg_fops);
2295 	if (!iudma)
2296 		goto err_iudma;
2297 
2298 	udc->debugfs_root = root;
2299 	udc->debugfs_usbd = usbd;
2300 	udc->debugfs_iudma = iudma;
2301 	return;
2302 err_iudma:
2303 	debugfs_remove(usbd);
2304 err_usbd:
2305 	debugfs_remove(root);
2306 err_root:
2307 	dev_err(udc->dev, "debugfs is not available\n");
2308 }
2309 
2310 /**
2311  * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2312  * @udc: Reference to the device controller.
2313  *
2314  * debugfs_remove() is safe to call with a NULL argument.
2315  */
2316 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2317 {
2318 	debugfs_remove(udc->debugfs_iudma);
2319 	debugfs_remove(udc->debugfs_usbd);
2320 	debugfs_remove(udc->debugfs_root);
2321 	udc->debugfs_iudma = NULL;
2322 	udc->debugfs_usbd = NULL;
2323 	udc->debugfs_root = NULL;
2324 }
2325 
2326 /***********************************************************************
2327  * Driver init/exit
2328  ***********************************************************************/
2329 
2330 /**
2331  * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2332  * @pdev: Platform device struct from the bcm63xx BSP code.
2333  *
2334  * Note that platform data is required, because pd.port_no varies from chip
2335  * to chip and is used to switch the correct USB port to device mode.
2336  */
2337 static int bcm63xx_udc_probe(struct platform_device *pdev)
2338 {
2339 	struct device *dev = &pdev->dev;
2340 	struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2341 	struct bcm63xx_udc *udc;
2342 	struct resource *res;
2343 	int rc = -ENOMEM, i, irq;
2344 
2345 	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2346 	if (!udc)
2347 		return -ENOMEM;
2348 
2349 	platform_set_drvdata(pdev, udc);
2350 	udc->dev = dev;
2351 	udc->pd = pd;
2352 
2353 	if (!pd) {
2354 		dev_err(dev, "missing platform data\n");
2355 		return -EINVAL;
2356 	}
2357 
2358 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2359 	udc->usbd_regs = devm_ioremap_resource(dev, res);
2360 	if (IS_ERR(udc->usbd_regs))
2361 		return PTR_ERR(udc->usbd_regs);
2362 
2363 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2364 	udc->iudma_regs = devm_ioremap_resource(dev, res);
2365 	if (IS_ERR(udc->iudma_regs))
2366 		return PTR_ERR(udc->iudma_regs);
2367 
2368 	spin_lock_init(&udc->lock);
2369 	INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2370 
2371 	udc->gadget.ops = &bcm63xx_udc_ops;
2372 	udc->gadget.name = dev_name(dev);
2373 
2374 	if (!pd->use_fullspeed && !use_fullspeed)
2375 		udc->gadget.max_speed = USB_SPEED_HIGH;
2376 	else
2377 		udc->gadget.max_speed = USB_SPEED_FULL;
2378 
2379 	/* request clocks, allocate buffers, and clear any pending IRQs */
2380 	rc = bcm63xx_init_udc_hw(udc);
2381 	if (rc)
2382 		return rc;
2383 
2384 	rc = -ENXIO;
2385 
2386 	/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2387 	irq = platform_get_irq(pdev, 0);
2388 	if (irq < 0) {
2389 		dev_err(dev, "missing IRQ resource #0\n");
2390 		goto out_uninit;
2391 	}
2392 	if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2393 			     dev_name(dev), udc) < 0) {
2394 		dev_err(dev, "error requesting IRQ #%d\n", irq);
2395 		goto out_uninit;
2396 	}
2397 
2398 	/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2399 	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2400 		irq = platform_get_irq(pdev, i + 1);
2401 		if (irq < 0) {
2402 			dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2403 			goto out_uninit;
2404 		}
2405 		if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2406 				     dev_name(dev), &udc->iudma[i]) < 0) {
2407 			dev_err(dev, "error requesting IRQ #%d\n", irq);
2408 			goto out_uninit;
2409 		}
2410 	}
2411 
2412 	bcm63xx_udc_init_debugfs(udc);
2413 	rc = usb_add_gadget_udc(dev, &udc->gadget);
2414 	if (!rc)
2415 		return 0;
2416 
2417 	bcm63xx_udc_cleanup_debugfs(udc);
2418 out_uninit:
2419 	bcm63xx_uninit_udc_hw(udc);
2420 	return rc;
2421 }
2422 
2423 /**
2424  * bcm63xx_udc_remove - Remove the device from the system.
2425  * @pdev: Platform device struct from the bcm63xx BSP code.
2426  */
2427 static int bcm63xx_udc_remove(struct platform_device *pdev)
2428 {
2429 	struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2430 
2431 	bcm63xx_udc_cleanup_debugfs(udc);
2432 	usb_del_gadget_udc(&udc->gadget);
2433 	BUG_ON(udc->driver);
2434 
2435 	bcm63xx_uninit_udc_hw(udc);
2436 
2437 	return 0;
2438 }
2439 
2440 static struct platform_driver bcm63xx_udc_driver = {
2441 	.probe		= bcm63xx_udc_probe,
2442 	.remove		= bcm63xx_udc_remove,
2443 	.driver		= {
2444 		.name	= DRV_MODULE_NAME,
2445 	},
2446 };
2447 module_platform_driver(bcm63xx_udc_driver);
2448 
2449 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2450 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2451 MODULE_LICENSE("GPL");
2452 MODULE_ALIAS("platform:" DRV_MODULE_NAME);
2453