xref: /linux/drivers/usb/musb/musb_host.c (revision 367b8112fe2ea5c39a7bb4d263dcdd9b612fae18)
1 /*
2  * MUSB OTG driver host support
3  *
4  * Copyright 2005 Mentor Graphics Corporation
5  * Copyright (C) 2005-2006 by Texas Instruments
6  * Copyright (C) 2006-2007 Nokia Corporation
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
25  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/delay.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/init.h>
42 #include <linux/list.h>
43 
44 #include "musb_core.h"
45 #include "musb_host.h"
46 
47 
48 /* MUSB HOST status 22-mar-2006
49  *
50  * - There's still lots of partial code duplication for fault paths, so
51  *   they aren't handled as consistently as they need to be.
52  *
53  * - PIO mostly behaved when last tested.
54  *     + including ep0, with all usbtest cases 9, 10
55  *     + usbtest 14 (ep0out) doesn't seem to run at all
56  *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
57  *       configurations, but otherwise double buffering passes basic tests.
58  *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
59  *
60  * - DMA (CPPI) ... partially behaves, not currently recommended
61  *     + about 1/15 the speed of typical EHCI implementations (PCI)
62  *     + RX, all too often reqpkt seems to misbehave after tx
63  *     + TX, no known issues (other than evident silicon issue)
64  *
65  * - DMA (Mentor/OMAP) ...has at least toggle update problems
66  *
67  * - Still no traffic scheduling code to make NAKing for bulk or control
68  *   transfers unable to starve other requests; or to make efficient use
69  *   of hardware with periodic transfers.  (Note that network drivers
70  *   commonly post bulk reads that stay pending for a long time; these
71  *   would make very visible trouble.)
72  *
73  * - Not tested with HNP, but some SRP paths seem to behave.
74  *
75  * NOTE 24-August-2006:
76  *
77  * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
78  *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
79  *   mostly works, except that with "usbnet" it's easy to trigger cases
80  *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
81  *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
82  *   although ARP RX wins.  (That test was done with a full speed link.)
83  */
84 
85 
86 /*
87  * NOTE on endpoint usage:
88  *
89  * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
90  * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
91  *
92  * (Yes, bulk _could_ use more of the endpoints than that, and would even
93  * benefit from it ... one remote device may easily be NAKing while others
94  * need to perform transfers in that same direction.  The same thing could
95  * be done in software though, assuming dma cooperates.)
96  *
97  * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
98  * So far that scheduling is both dumb and optimistic:  the endpoint will be
99  * "claimed" until its software queue is no longer refilled.  No multiplexing
100  * of transfers between endpoints, or anything clever.
101  */
102 
103 
104 static void musb_ep_program(struct musb *musb, u8 epnum,
105 			struct urb *urb, unsigned int nOut,
106 			u8 *buf, u32 len);
107 
108 /*
109  * Clear TX fifo. Needed to avoid BABBLE errors.
110  */
111 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112 {
113 	void __iomem	*epio = ep->regs;
114 	u16		csr;
115 	int		retries = 1000;
116 
117 	csr = musb_readw(epio, MUSB_TXCSR);
118 	while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
119 		DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
120 		csr |= MUSB_TXCSR_FLUSHFIFO;
121 		musb_writew(epio, MUSB_TXCSR, csr);
122 		csr = musb_readw(epio, MUSB_TXCSR);
123 		if (retries-- < 1) {
124 			ERR("Could not flush host TX fifo: csr: %04x\n", csr);
125 			return;
126 		}
127 		mdelay(1);
128 	}
129 }
130 
131 /*
132  * Start transmit. Caller is responsible for locking shared resources.
133  * musb must be locked.
134  */
135 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
136 {
137 	u16	txcsr;
138 
139 	/* NOTE: no locks here; caller should lock and select EP */
140 	if (ep->epnum) {
141 		txcsr = musb_readw(ep->regs, MUSB_TXCSR);
142 		txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
143 		musb_writew(ep->regs, MUSB_TXCSR, txcsr);
144 	} else {
145 		txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
146 		musb_writew(ep->regs, MUSB_CSR0, txcsr);
147 	}
148 
149 }
150 
151 static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
152 {
153 	u16	txcsr;
154 
155 	/* NOTE: no locks here; caller should lock and select EP */
156 	txcsr = musb_readw(ep->regs, MUSB_TXCSR);
157 	txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
158 	musb_writew(ep->regs, MUSB_TXCSR, txcsr);
159 }
160 
161 /*
162  * Start the URB at the front of an endpoint's queue
163  * end must be claimed from the caller.
164  *
165  * Context: controller locked, irqs blocked
166  */
167 static void
168 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
169 {
170 	u16			frame;
171 	u32			len;
172 	void			*buf;
173 	void __iomem		*mbase =  musb->mregs;
174 	struct urb		*urb = next_urb(qh);
175 	struct musb_hw_ep	*hw_ep = qh->hw_ep;
176 	unsigned		pipe = urb->pipe;
177 	u8			address = usb_pipedevice(pipe);
178 	int			epnum = hw_ep->epnum;
179 
180 	/* initialize software qh state */
181 	qh->offset = 0;
182 	qh->segsize = 0;
183 
184 	/* gather right source of data */
185 	switch (qh->type) {
186 	case USB_ENDPOINT_XFER_CONTROL:
187 		/* control transfers always start with SETUP */
188 		is_in = 0;
189 		hw_ep->out_qh = qh;
190 		musb->ep0_stage = MUSB_EP0_START;
191 		buf = urb->setup_packet;
192 		len = 8;
193 		break;
194 	case USB_ENDPOINT_XFER_ISOC:
195 		qh->iso_idx = 0;
196 		qh->frame = 0;
197 		buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
198 		len = urb->iso_frame_desc[0].length;
199 		break;
200 	default:		/* bulk, interrupt */
201 		buf = urb->transfer_buffer;
202 		len = urb->transfer_buffer_length;
203 	}
204 
205 	DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
206 			qh, urb, address, qh->epnum,
207 			is_in ? "in" : "out",
208 			({char *s; switch (qh->type) {
209 			case USB_ENDPOINT_XFER_CONTROL:	s = ""; break;
210 			case USB_ENDPOINT_XFER_BULK:	s = "-bulk"; break;
211 			case USB_ENDPOINT_XFER_ISOC:	s = "-iso"; break;
212 			default:			s = "-intr"; break;
213 			}; s; }),
214 			epnum, buf, len);
215 
216 	/* Configure endpoint */
217 	if (is_in || hw_ep->is_shared_fifo)
218 		hw_ep->in_qh = qh;
219 	else
220 		hw_ep->out_qh = qh;
221 	musb_ep_program(musb, epnum, urb, !is_in, buf, len);
222 
223 	/* transmit may have more work: start it when it is time */
224 	if (is_in)
225 		return;
226 
227 	/* determine if the time is right for a periodic transfer */
228 	switch (qh->type) {
229 	case USB_ENDPOINT_XFER_ISOC:
230 	case USB_ENDPOINT_XFER_INT:
231 		DBG(3, "check whether there's still time for periodic Tx\n");
232 		qh->iso_idx = 0;
233 		frame = musb_readw(mbase, MUSB_FRAME);
234 		/* FIXME this doesn't implement that scheduling policy ...
235 		 * or handle framecounter wrapping
236 		 */
237 		if ((urb->transfer_flags & URB_ISO_ASAP)
238 				|| (frame >= urb->start_frame)) {
239 			/* REVISIT the SOF irq handler shouldn't duplicate
240 			 * this code; and we don't init urb->start_frame...
241 			 */
242 			qh->frame = 0;
243 			goto start;
244 		} else {
245 			qh->frame = urb->start_frame;
246 			/* enable SOF interrupt so we can count down */
247 			DBG(1, "SOF for %d\n", epnum);
248 #if 1 /* ifndef	CONFIG_ARCH_DAVINCI */
249 			musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
250 #endif
251 		}
252 		break;
253 	default:
254 start:
255 		DBG(4, "Start TX%d %s\n", epnum,
256 			hw_ep->tx_channel ? "dma" : "pio");
257 
258 		if (!hw_ep->tx_channel)
259 			musb_h_tx_start(hw_ep);
260 		else if (is_cppi_enabled() || tusb_dma_omap())
261 			cppi_host_txdma_start(hw_ep);
262 	}
263 }
264 
265 /* caller owns controller lock, irqs are blocked */
266 static void
267 __musb_giveback(struct musb *musb, struct urb *urb, int status)
268 __releases(musb->lock)
269 __acquires(musb->lock)
270 {
271 	DBG(({ int level; switch (urb->status) {
272 				case 0:
273 					level = 4;
274 					break;
275 				/* common/boring faults */
276 				case -EREMOTEIO:
277 				case -ESHUTDOWN:
278 				case -ECONNRESET:
279 				case -EPIPE:
280 					level = 3;
281 					break;
282 				default:
283 					level = 2;
284 					break;
285 				}; level; }),
286 			"complete %p (%d), dev%d ep%d%s, %d/%d\n",
287 			urb, urb->status,
288 			usb_pipedevice(urb->pipe),
289 			usb_pipeendpoint(urb->pipe),
290 			usb_pipein(urb->pipe) ? "in" : "out",
291 			urb->actual_length, urb->transfer_buffer_length
292 			);
293 
294 	usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
295 	spin_unlock(&musb->lock);
296 	usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
297 	spin_lock(&musb->lock);
298 }
299 
300 /* for bulk/interrupt endpoints only */
301 static inline void
302 musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
303 {
304 	struct usb_device	*udev = urb->dev;
305 	u16			csr;
306 	void __iomem		*epio = ep->regs;
307 	struct musb_qh		*qh;
308 
309 	/* FIXME:  the current Mentor DMA code seems to have
310 	 * problems getting toggle correct.
311 	 */
312 
313 	if (is_in || ep->is_shared_fifo)
314 		qh = ep->in_qh;
315 	else
316 		qh = ep->out_qh;
317 
318 	if (!is_in) {
319 		csr = musb_readw(epio, MUSB_TXCSR);
320 		usb_settoggle(udev, qh->epnum, 1,
321 			(csr & MUSB_TXCSR_H_DATATOGGLE)
322 				? 1 : 0);
323 	} else {
324 		csr = musb_readw(epio, MUSB_RXCSR);
325 		usb_settoggle(udev, qh->epnum, 0,
326 			(csr & MUSB_RXCSR_H_DATATOGGLE)
327 				? 1 : 0);
328 	}
329 }
330 
331 /* caller owns controller lock, irqs are blocked */
332 static struct musb_qh *
333 musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
334 {
335 	int			is_in;
336 	struct musb_hw_ep	*ep = qh->hw_ep;
337 	struct musb		*musb = ep->musb;
338 	int			ready = qh->is_ready;
339 
340 	if (ep->is_shared_fifo)
341 		is_in = 1;
342 	else
343 		is_in = usb_pipein(urb->pipe);
344 
345 	/* save toggle eagerly, for paranoia */
346 	switch (qh->type) {
347 	case USB_ENDPOINT_XFER_BULK:
348 	case USB_ENDPOINT_XFER_INT:
349 		musb_save_toggle(ep, is_in, urb);
350 		break;
351 	case USB_ENDPOINT_XFER_ISOC:
352 		if (status == 0 && urb->error_count)
353 			status = -EXDEV;
354 		break;
355 	}
356 
357 	qh->is_ready = 0;
358 	__musb_giveback(musb, urb, status);
359 	qh->is_ready = ready;
360 
361 	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
362 	 * invalidate qh as soon as list_empty(&hep->urb_list)
363 	 */
364 	if (list_empty(&qh->hep->urb_list)) {
365 		struct list_head	*head;
366 
367 		if (is_in)
368 			ep->rx_reinit = 1;
369 		else
370 			ep->tx_reinit = 1;
371 
372 		/* clobber old pointers to this qh */
373 		if (is_in || ep->is_shared_fifo)
374 			ep->in_qh = NULL;
375 		else
376 			ep->out_qh = NULL;
377 		qh->hep->hcpriv = NULL;
378 
379 		switch (qh->type) {
380 
381 		case USB_ENDPOINT_XFER_ISOC:
382 		case USB_ENDPOINT_XFER_INT:
383 			/* this is where periodic bandwidth should be
384 			 * de-allocated if it's tracked and allocated;
385 			 * and where we'd update the schedule tree...
386 			 */
387 			musb->periodic[ep->epnum] = NULL;
388 			kfree(qh);
389 			qh = NULL;
390 			break;
391 
392 		case USB_ENDPOINT_XFER_CONTROL:
393 		case USB_ENDPOINT_XFER_BULK:
394 			/* fifo policy for these lists, except that NAKing
395 			 * should rotate a qh to the end (for fairness).
396 			 */
397 			head = qh->ring.prev;
398 			list_del(&qh->ring);
399 			kfree(qh);
400 			qh = first_qh(head);
401 			break;
402 		}
403 	}
404 	return qh;
405 }
406 
407 /*
408  * Advance this hardware endpoint's queue, completing the specified urb and
409  * advancing to either the next urb queued to that qh, or else invalidating
410  * that qh and advancing to the next qh scheduled after the current one.
411  *
412  * Context: caller owns controller lock, irqs are blocked
413  */
414 static void
415 musb_advance_schedule(struct musb *musb, struct urb *urb,
416 		struct musb_hw_ep *hw_ep, int is_in)
417 {
418 	struct musb_qh	*qh;
419 
420 	if (is_in || hw_ep->is_shared_fifo)
421 		qh = hw_ep->in_qh;
422 	else
423 		qh = hw_ep->out_qh;
424 
425 	if (urb->status == -EINPROGRESS)
426 		qh = musb_giveback(qh, urb, 0);
427 	else
428 		qh = musb_giveback(qh, urb, urb->status);
429 
430 	if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
431 		DBG(4, "... next ep%d %cX urb %p\n",
432 				hw_ep->epnum, is_in ? 'R' : 'T',
433 				next_urb(qh));
434 		musb_start_urb(musb, is_in, qh);
435 	}
436 }
437 
438 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
439 {
440 	/* we don't want fifo to fill itself again;
441 	 * ignore dma (various models),
442 	 * leave toggle alone (may not have been saved yet)
443 	 */
444 	csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
445 	csr &= ~(MUSB_RXCSR_H_REQPKT
446 		| MUSB_RXCSR_H_AUTOREQ
447 		| MUSB_RXCSR_AUTOCLEAR);
448 
449 	/* write 2x to allow double buffering */
450 	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
451 	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
452 
453 	/* flush writebuffer */
454 	return musb_readw(hw_ep->regs, MUSB_RXCSR);
455 }
456 
457 /*
458  * PIO RX for a packet (or part of it).
459  */
460 static bool
461 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
462 {
463 	u16			rx_count;
464 	u8			*buf;
465 	u16			csr;
466 	bool			done = false;
467 	u32			length;
468 	int			do_flush = 0;
469 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
470 	void __iomem		*epio = hw_ep->regs;
471 	struct musb_qh		*qh = hw_ep->in_qh;
472 	int			pipe = urb->pipe;
473 	void			*buffer = urb->transfer_buffer;
474 
475 	/* musb_ep_select(mbase, epnum); */
476 	rx_count = musb_readw(epio, MUSB_RXCOUNT);
477 	DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
478 			urb->transfer_buffer, qh->offset,
479 			urb->transfer_buffer_length);
480 
481 	/* unload FIFO */
482 	if (usb_pipeisoc(pipe)) {
483 		int					status = 0;
484 		struct usb_iso_packet_descriptor	*d;
485 
486 		if (iso_err) {
487 			status = -EILSEQ;
488 			urb->error_count++;
489 		}
490 
491 		d = urb->iso_frame_desc + qh->iso_idx;
492 		buf = buffer + d->offset;
493 		length = d->length;
494 		if (rx_count > length) {
495 			if (status == 0) {
496 				status = -EOVERFLOW;
497 				urb->error_count++;
498 			}
499 			DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
500 			do_flush = 1;
501 		} else
502 			length = rx_count;
503 		urb->actual_length += length;
504 		d->actual_length = length;
505 
506 		d->status = status;
507 
508 		/* see if we are done */
509 		done = (++qh->iso_idx >= urb->number_of_packets);
510 	} else {
511 		/* non-isoch */
512 		buf = buffer + qh->offset;
513 		length = urb->transfer_buffer_length - qh->offset;
514 		if (rx_count > length) {
515 			if (urb->status == -EINPROGRESS)
516 				urb->status = -EOVERFLOW;
517 			DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
518 			do_flush = 1;
519 		} else
520 			length = rx_count;
521 		urb->actual_length += length;
522 		qh->offset += length;
523 
524 		/* see if we are done */
525 		done = (urb->actual_length == urb->transfer_buffer_length)
526 			|| (rx_count < qh->maxpacket)
527 			|| (urb->status != -EINPROGRESS);
528 		if (done
529 				&& (urb->status == -EINPROGRESS)
530 				&& (urb->transfer_flags & URB_SHORT_NOT_OK)
531 				&& (urb->actual_length
532 					< urb->transfer_buffer_length))
533 			urb->status = -EREMOTEIO;
534 	}
535 
536 	musb_read_fifo(hw_ep, length, buf);
537 
538 	csr = musb_readw(epio, MUSB_RXCSR);
539 	csr |= MUSB_RXCSR_H_WZC_BITS;
540 	if (unlikely(do_flush))
541 		musb_h_flush_rxfifo(hw_ep, csr);
542 	else {
543 		/* REVISIT this assumes AUTOCLEAR is never set */
544 		csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
545 		if (!done)
546 			csr |= MUSB_RXCSR_H_REQPKT;
547 		musb_writew(epio, MUSB_RXCSR, csr);
548 	}
549 
550 	return done;
551 }
552 
553 /* we don't always need to reinit a given side of an endpoint...
554  * when we do, use tx/rx reinit routine and then construct a new CSR
555  * to address data toggle, NYET, and DMA or PIO.
556  *
557  * it's possible that driver bugs (especially for DMA) or aborting a
558  * transfer might have left the endpoint busier than it should be.
559  * the busy/not-empty tests are basically paranoia.
560  */
561 static void
562 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
563 {
564 	u16	csr;
565 
566 	/* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
567 	 * That always uses tx_reinit since ep0 repurposes TX register
568 	 * offsets; the initial SETUP packet is also a kind of OUT.
569 	 */
570 
571 	/* if programmed for Tx, put it in RX mode */
572 	if (ep->is_shared_fifo) {
573 		csr = musb_readw(ep->regs, MUSB_TXCSR);
574 		if (csr & MUSB_TXCSR_MODE) {
575 			musb_h_tx_flush_fifo(ep);
576 			musb_writew(ep->regs, MUSB_TXCSR,
577 					MUSB_TXCSR_FRCDATATOG);
578 		}
579 		/* clear mode (and everything else) to enable Rx */
580 		musb_writew(ep->regs, MUSB_TXCSR, 0);
581 
582 	/* scrub all previous state, clearing toggle */
583 	} else {
584 		csr = musb_readw(ep->regs, MUSB_RXCSR);
585 		if (csr & MUSB_RXCSR_RXPKTRDY)
586 			WARNING("rx%d, packet/%d ready?\n", ep->epnum,
587 				musb_readw(ep->regs, MUSB_RXCOUNT));
588 
589 		musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
590 	}
591 
592 	/* target addr and (for multipoint) hub addr/port */
593 	if (musb->is_multipoint) {
594 		musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
595 			qh->addr_reg);
596 		musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
597 			qh->h_addr_reg);
598 		musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
599 			qh->h_port_reg);
600 	} else
601 		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
602 
603 	/* protocol/endpoint, interval/NAKlimit, i/o size */
604 	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
605 	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
606 	/* NOTE: bulk combining rewrites high bits of maxpacket */
607 	musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
608 
609 	ep->rx_reinit = 0;
610 }
611 
612 
613 /*
614  * Program an HDRC endpoint as per the given URB
615  * Context: irqs blocked, controller lock held
616  */
617 static void musb_ep_program(struct musb *musb, u8 epnum,
618 			struct urb *urb, unsigned int is_out,
619 			u8 *buf, u32 len)
620 {
621 	struct dma_controller	*dma_controller;
622 	struct dma_channel	*dma_channel;
623 	u8			dma_ok;
624 	void __iomem		*mbase = musb->mregs;
625 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
626 	void __iomem		*epio = hw_ep->regs;
627 	struct musb_qh		*qh;
628 	u16			packet_sz;
629 
630 	if (!is_out || hw_ep->is_shared_fifo)
631 		qh = hw_ep->in_qh;
632 	else
633 		qh = hw_ep->out_qh;
634 
635 	packet_sz = qh->maxpacket;
636 
637 	DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
638 				"h_addr%02x h_port%02x bytes %d\n",
639 			is_out ? "-->" : "<--",
640 			epnum, urb, urb->dev->speed,
641 			qh->addr_reg, qh->epnum, is_out ? "out" : "in",
642 			qh->h_addr_reg, qh->h_port_reg,
643 			len);
644 
645 	musb_ep_select(mbase, epnum);
646 
647 	/* candidate for DMA? */
648 	dma_controller = musb->dma_controller;
649 	if (is_dma_capable() && epnum && dma_controller) {
650 		dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
651 		if (!dma_channel) {
652 			dma_channel = dma_controller->channel_alloc(
653 					dma_controller, hw_ep, is_out);
654 			if (is_out)
655 				hw_ep->tx_channel = dma_channel;
656 			else
657 				hw_ep->rx_channel = dma_channel;
658 		}
659 	} else
660 		dma_channel = NULL;
661 
662 	/* make sure we clear DMAEnab, autoSet bits from previous run */
663 
664 	/* OUT/transmit/EP0 or IN/receive? */
665 	if (is_out) {
666 		u16	csr;
667 		u16	int_txe;
668 		u16	load_count;
669 
670 		csr = musb_readw(epio, MUSB_TXCSR);
671 
672 		/* disable interrupt in case we flush */
673 		int_txe = musb_readw(mbase, MUSB_INTRTXE);
674 		musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
675 
676 		/* general endpoint setup */
677 		if (epnum) {
678 			/* ASSERT:  TXCSR_DMAENAB was already cleared */
679 
680 			/* flush all old state, set default */
681 			musb_h_tx_flush_fifo(hw_ep);
682 			csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
683 					| MUSB_TXCSR_DMAMODE
684 					| MUSB_TXCSR_FRCDATATOG
685 					| MUSB_TXCSR_H_RXSTALL
686 					| MUSB_TXCSR_H_ERROR
687 					| MUSB_TXCSR_TXPKTRDY
688 					);
689 			csr |= MUSB_TXCSR_MODE;
690 
691 			if (usb_gettoggle(urb->dev,
692 					qh->epnum, 1))
693 				csr |= MUSB_TXCSR_H_WR_DATATOGGLE
694 					| MUSB_TXCSR_H_DATATOGGLE;
695 			else
696 				csr |= MUSB_TXCSR_CLRDATATOG;
697 
698 			/* twice in case of double packet buffering */
699 			musb_writew(epio, MUSB_TXCSR, csr);
700 			/* REVISIT may need to clear FLUSHFIFO ... */
701 			musb_writew(epio, MUSB_TXCSR, csr);
702 			csr = musb_readw(epio, MUSB_TXCSR);
703 		} else {
704 			/* endpoint 0: just flush */
705 			musb_writew(epio, MUSB_CSR0,
706 				csr | MUSB_CSR0_FLUSHFIFO);
707 			musb_writew(epio, MUSB_CSR0,
708 				csr | MUSB_CSR0_FLUSHFIFO);
709 		}
710 
711 		/* target addr and (for multipoint) hub addr/port */
712 		if (musb->is_multipoint) {
713 			musb_writeb(mbase,
714 				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
715 				qh->addr_reg);
716 			musb_writeb(mbase,
717 				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
718 				qh->h_addr_reg);
719 			musb_writeb(mbase,
720 				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
721 				qh->h_port_reg);
722 /* FIXME if !epnum, do the same for RX ... */
723 		} else
724 			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
725 
726 		/* protocol/endpoint/interval/NAKlimit */
727 		if (epnum) {
728 			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
729 			if (can_bulk_split(musb, qh->type))
730 				musb_writew(epio, MUSB_TXMAXP,
731 					packet_sz
732 					| ((hw_ep->max_packet_sz_tx /
733 						packet_sz) - 1) << 11);
734 			else
735 				musb_writew(epio, MUSB_TXMAXP,
736 					packet_sz);
737 			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
738 		} else {
739 			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
740 			if (musb->is_multipoint)
741 				musb_writeb(epio, MUSB_TYPE0,
742 						qh->type_reg);
743 		}
744 
745 		if (can_bulk_split(musb, qh->type))
746 			load_count = min((u32) hw_ep->max_packet_sz_tx,
747 						len);
748 		else
749 			load_count = min((u32) packet_sz, len);
750 
751 #ifdef CONFIG_USB_INVENTRA_DMA
752 		if (dma_channel) {
753 
754 			/* clear previous state */
755 			csr = musb_readw(epio, MUSB_TXCSR);
756 			csr &= ~(MUSB_TXCSR_AUTOSET
757 				| MUSB_TXCSR_DMAMODE
758 				| MUSB_TXCSR_DMAENAB);
759 			csr |= MUSB_TXCSR_MODE;
760 			musb_writew(epio, MUSB_TXCSR,
761 				csr | MUSB_TXCSR_MODE);
762 
763 			qh->segsize = min(len, dma_channel->max_len);
764 
765 			if (qh->segsize <= packet_sz)
766 				dma_channel->desired_mode = 0;
767 			else
768 				dma_channel->desired_mode = 1;
769 
770 
771 			if (dma_channel->desired_mode == 0) {
772 				csr &= ~(MUSB_TXCSR_AUTOSET
773 					| MUSB_TXCSR_DMAMODE);
774 				csr |= (MUSB_TXCSR_DMAENAB);
775 					/* against programming guide */
776 			} else
777 				csr |= (MUSB_TXCSR_AUTOSET
778 					| MUSB_TXCSR_DMAENAB
779 					| MUSB_TXCSR_DMAMODE);
780 
781 			musb_writew(epio, MUSB_TXCSR, csr);
782 
783 			dma_ok = dma_controller->channel_program(
784 					dma_channel, packet_sz,
785 					dma_channel->desired_mode,
786 					urb->transfer_dma,
787 					qh->segsize);
788 			if (dma_ok) {
789 				load_count = 0;
790 			} else {
791 				dma_controller->channel_release(dma_channel);
792 				if (is_out)
793 					hw_ep->tx_channel = NULL;
794 				else
795 					hw_ep->rx_channel = NULL;
796 				dma_channel = NULL;
797 			}
798 		}
799 #endif
800 
801 		/* candidate for DMA */
802 		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
803 
804 			/* program endpoint CSRs first, then setup DMA.
805 			 * assume CPPI setup succeeds.
806 			 * defer enabling dma.
807 			 */
808 			csr = musb_readw(epio, MUSB_TXCSR);
809 			csr &= ~(MUSB_TXCSR_AUTOSET
810 					| MUSB_TXCSR_DMAMODE
811 					| MUSB_TXCSR_DMAENAB);
812 			csr |= MUSB_TXCSR_MODE;
813 			musb_writew(epio, MUSB_TXCSR,
814 				csr | MUSB_TXCSR_MODE);
815 
816 			dma_channel->actual_len = 0L;
817 			qh->segsize = len;
818 
819 			/* TX uses "rndis" mode automatically, but needs help
820 			 * to identify the zero-length-final-packet case.
821 			 */
822 			dma_ok = dma_controller->channel_program(
823 					dma_channel, packet_sz,
824 					(urb->transfer_flags
825 							& URB_ZERO_PACKET)
826 						== URB_ZERO_PACKET,
827 					urb->transfer_dma,
828 					qh->segsize);
829 			if (dma_ok) {
830 				load_count = 0;
831 			} else {
832 				dma_controller->channel_release(dma_channel);
833 				hw_ep->tx_channel = NULL;
834 				dma_channel = NULL;
835 
836 				/* REVISIT there's an error path here that
837 				 * needs handling:  can't do dma, but
838 				 * there's no pio buffer address...
839 				 */
840 			}
841 		}
842 
843 		if (load_count) {
844 			/* ASSERT:  TXCSR_DMAENAB was already cleared */
845 
846 			/* PIO to load FIFO */
847 			qh->segsize = load_count;
848 			musb_write_fifo(hw_ep, load_count, buf);
849 			csr = musb_readw(epio, MUSB_TXCSR);
850 			csr &= ~(MUSB_TXCSR_DMAENAB
851 				| MUSB_TXCSR_DMAMODE
852 				| MUSB_TXCSR_AUTOSET);
853 			/* write CSR */
854 			csr |= MUSB_TXCSR_MODE;
855 
856 			if (epnum)
857 				musb_writew(epio, MUSB_TXCSR, csr);
858 		}
859 
860 		/* re-enable interrupt */
861 		musb_writew(mbase, MUSB_INTRTXE, int_txe);
862 
863 	/* IN/receive */
864 	} else {
865 		u16	csr;
866 
867 		if (hw_ep->rx_reinit) {
868 			musb_rx_reinit(musb, qh, hw_ep);
869 
870 			/* init new state: toggle and NYET, maybe DMA later */
871 			if (usb_gettoggle(urb->dev, qh->epnum, 0))
872 				csr = MUSB_RXCSR_H_WR_DATATOGGLE
873 					| MUSB_RXCSR_H_DATATOGGLE;
874 			else
875 				csr = 0;
876 			if (qh->type == USB_ENDPOINT_XFER_INT)
877 				csr |= MUSB_RXCSR_DISNYET;
878 
879 		} else {
880 			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
881 
882 			if (csr & (MUSB_RXCSR_RXPKTRDY
883 					| MUSB_RXCSR_DMAENAB
884 					| MUSB_RXCSR_H_REQPKT))
885 				ERR("broken !rx_reinit, ep%d csr %04x\n",
886 						hw_ep->epnum, csr);
887 
888 			/* scrub any stale state, leaving toggle alone */
889 			csr &= MUSB_RXCSR_DISNYET;
890 		}
891 
892 		/* kick things off */
893 
894 		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
895 			/* candidate for DMA */
896 			if (dma_channel) {
897 				dma_channel->actual_len = 0L;
898 				qh->segsize = len;
899 
900 				/* AUTOREQ is in a DMA register */
901 				musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
902 				csr = musb_readw(hw_ep->regs,
903 						MUSB_RXCSR);
904 
905 				/* unless caller treats short rx transfers as
906 				 * errors, we dare not queue multiple transfers.
907 				 */
908 				dma_ok = dma_controller->channel_program(
909 						dma_channel, packet_sz,
910 						!(urb->transfer_flags
911 							& URB_SHORT_NOT_OK),
912 						urb->transfer_dma,
913 						qh->segsize);
914 				if (!dma_ok) {
915 					dma_controller->channel_release(
916 							dma_channel);
917 					hw_ep->rx_channel = NULL;
918 					dma_channel = NULL;
919 				} else
920 					csr |= MUSB_RXCSR_DMAENAB;
921 			}
922 		}
923 
924 		csr |= MUSB_RXCSR_H_REQPKT;
925 		DBG(7, "RXCSR%d := %04x\n", epnum, csr);
926 		musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
927 		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
928 	}
929 }
930 
931 
932 /*
933  * Service the default endpoint (ep0) as host.
934  * Return true until it's time to start the status stage.
935  */
936 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
937 {
938 	bool			 more = false;
939 	u8			*fifo_dest = NULL;
940 	u16			fifo_count = 0;
941 	struct musb_hw_ep	*hw_ep = musb->control_ep;
942 	struct musb_qh		*qh = hw_ep->in_qh;
943 	struct usb_ctrlrequest	*request;
944 
945 	switch (musb->ep0_stage) {
946 	case MUSB_EP0_IN:
947 		fifo_dest = urb->transfer_buffer + urb->actual_length;
948 		fifo_count = min(len, ((u16) (urb->transfer_buffer_length
949 					- urb->actual_length)));
950 		if (fifo_count < len)
951 			urb->status = -EOVERFLOW;
952 
953 		musb_read_fifo(hw_ep, fifo_count, fifo_dest);
954 
955 		urb->actual_length += fifo_count;
956 		if (len < qh->maxpacket) {
957 			/* always terminate on short read; it's
958 			 * rarely reported as an error.
959 			 */
960 		} else if (urb->actual_length <
961 				urb->transfer_buffer_length)
962 			more = true;
963 		break;
964 	case MUSB_EP0_START:
965 		request = (struct usb_ctrlrequest *) urb->setup_packet;
966 
967 		if (!request->wLength) {
968 			DBG(4, "start no-DATA\n");
969 			break;
970 		} else if (request->bRequestType & USB_DIR_IN) {
971 			DBG(4, "start IN-DATA\n");
972 			musb->ep0_stage = MUSB_EP0_IN;
973 			more = true;
974 			break;
975 		} else {
976 			DBG(4, "start OUT-DATA\n");
977 			musb->ep0_stage = MUSB_EP0_OUT;
978 			more = true;
979 		}
980 		/* FALLTHROUGH */
981 	case MUSB_EP0_OUT:
982 		fifo_count = min(qh->maxpacket, ((u16)
983 				(urb->transfer_buffer_length
984 				- urb->actual_length)));
985 
986 		if (fifo_count) {
987 			fifo_dest = (u8 *) (urb->transfer_buffer
988 					+ urb->actual_length);
989 			DBG(3, "Sending %d bytes to %p\n",
990 					fifo_count, fifo_dest);
991 			musb_write_fifo(hw_ep, fifo_count, fifo_dest);
992 
993 			urb->actual_length += fifo_count;
994 			more = true;
995 		}
996 		break;
997 	default:
998 		ERR("bogus ep0 stage %d\n", musb->ep0_stage);
999 		break;
1000 	}
1001 
1002 	return more;
1003 }
1004 
1005 /*
1006  * Handle default endpoint interrupt as host. Only called in IRQ time
1007  * from musb_interrupt().
1008  *
1009  * called with controller irqlocked
1010  */
1011 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1012 {
1013 	struct urb		*urb;
1014 	u16			csr, len;
1015 	int			status = 0;
1016 	void __iomem		*mbase = musb->mregs;
1017 	struct musb_hw_ep	*hw_ep = musb->control_ep;
1018 	void __iomem		*epio = hw_ep->regs;
1019 	struct musb_qh		*qh = hw_ep->in_qh;
1020 	bool			complete = false;
1021 	irqreturn_t		retval = IRQ_NONE;
1022 
1023 	/* ep0 only has one queue, "in" */
1024 	urb = next_urb(qh);
1025 
1026 	musb_ep_select(mbase, 0);
1027 	csr = musb_readw(epio, MUSB_CSR0);
1028 	len = (csr & MUSB_CSR0_RXPKTRDY)
1029 			? musb_readb(epio, MUSB_COUNT0)
1030 			: 0;
1031 
1032 	DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1033 		csr, qh, len, urb, musb->ep0_stage);
1034 
1035 	/* if we just did status stage, we are done */
1036 	if (MUSB_EP0_STATUS == musb->ep0_stage) {
1037 		retval = IRQ_HANDLED;
1038 		complete = true;
1039 	}
1040 
1041 	/* prepare status */
1042 	if (csr & MUSB_CSR0_H_RXSTALL) {
1043 		DBG(6, "STALLING ENDPOINT\n");
1044 		status = -EPIPE;
1045 
1046 	} else if (csr & MUSB_CSR0_H_ERROR) {
1047 		DBG(2, "no response, csr0 %04x\n", csr);
1048 		status = -EPROTO;
1049 
1050 	} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1051 		DBG(2, "control NAK timeout\n");
1052 
1053 		/* NOTE:  this code path would be a good place to PAUSE a
1054 		 * control transfer, if another one is queued, so that
1055 		 * ep0 is more likely to stay busy.
1056 		 *
1057 		 * if (qh->ring.next != &musb->control), then
1058 		 * we have a candidate... NAKing is *NOT* an error
1059 		 */
1060 		musb_writew(epio, MUSB_CSR0, 0);
1061 		retval = IRQ_HANDLED;
1062 	}
1063 
1064 	if (status) {
1065 		DBG(6, "aborting\n");
1066 		retval = IRQ_HANDLED;
1067 		if (urb)
1068 			urb->status = status;
1069 		complete = true;
1070 
1071 		/* use the proper sequence to abort the transfer */
1072 		if (csr & MUSB_CSR0_H_REQPKT) {
1073 			csr &= ~MUSB_CSR0_H_REQPKT;
1074 			musb_writew(epio, MUSB_CSR0, csr);
1075 			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1076 			musb_writew(epio, MUSB_CSR0, csr);
1077 		} else {
1078 			csr |= MUSB_CSR0_FLUSHFIFO;
1079 			musb_writew(epio, MUSB_CSR0, csr);
1080 			musb_writew(epio, MUSB_CSR0, csr);
1081 			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1082 			musb_writew(epio, MUSB_CSR0, csr);
1083 		}
1084 
1085 		musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1086 
1087 		/* clear it */
1088 		musb_writew(epio, MUSB_CSR0, 0);
1089 	}
1090 
1091 	if (unlikely(!urb)) {
1092 		/* stop endpoint since we have no place for its data, this
1093 		 * SHOULD NEVER HAPPEN! */
1094 		ERR("no URB for end 0\n");
1095 
1096 		musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
1097 		musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
1098 		musb_writew(epio, MUSB_CSR0, 0);
1099 
1100 		goto done;
1101 	}
1102 
1103 	if (!complete) {
1104 		/* call common logic and prepare response */
1105 		if (musb_h_ep0_continue(musb, len, urb)) {
1106 			/* more packets required */
1107 			csr = (MUSB_EP0_IN == musb->ep0_stage)
1108 				?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1109 		} else {
1110 			/* data transfer complete; perform status phase */
1111 			if (usb_pipeout(urb->pipe)
1112 					|| !urb->transfer_buffer_length)
1113 				csr = MUSB_CSR0_H_STATUSPKT
1114 					| MUSB_CSR0_H_REQPKT;
1115 			else
1116 				csr = MUSB_CSR0_H_STATUSPKT
1117 					| MUSB_CSR0_TXPKTRDY;
1118 
1119 			/* flag status stage */
1120 			musb->ep0_stage = MUSB_EP0_STATUS;
1121 
1122 			DBG(5, "ep0 STATUS, csr %04x\n", csr);
1123 
1124 		}
1125 		musb_writew(epio, MUSB_CSR0, csr);
1126 		retval = IRQ_HANDLED;
1127 	} else
1128 		musb->ep0_stage = MUSB_EP0_IDLE;
1129 
1130 	/* call completion handler if done */
1131 	if (complete)
1132 		musb_advance_schedule(musb, urb, hw_ep, 1);
1133 done:
1134 	return retval;
1135 }
1136 
1137 
1138 #ifdef CONFIG_USB_INVENTRA_DMA
1139 
1140 /* Host side TX (OUT) using Mentor DMA works as follows:
1141 	submit_urb ->
1142 		- if queue was empty, Program Endpoint
1143 		- ... which starts DMA to fifo in mode 1 or 0
1144 
1145 	DMA Isr (transfer complete) -> TxAvail()
1146 		- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens
1147 					only in musb_cleanup_urb)
1148 		- TxPktRdy has to be set in mode 0 or for
1149 			short packets in mode 1.
1150 */
1151 
1152 #endif
1153 
1154 /* Service a Tx-Available or dma completion irq for the endpoint */
1155 void musb_host_tx(struct musb *musb, u8 epnum)
1156 {
1157 	int			pipe;
1158 	bool			done = false;
1159 	u16			tx_csr;
1160 	size_t			wLength = 0;
1161 	u8			*buf = NULL;
1162 	struct urb		*urb;
1163 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1164 	void __iomem		*epio = hw_ep->regs;
1165 	struct musb_qh		*qh = hw_ep->out_qh;
1166 	u32			status = 0;
1167 	void __iomem		*mbase = musb->mregs;
1168 	struct dma_channel	*dma;
1169 
1170 	urb = next_urb(qh);
1171 
1172 	musb_ep_select(mbase, epnum);
1173 	tx_csr = musb_readw(epio, MUSB_TXCSR);
1174 
1175 	/* with CPPI, DMA sometimes triggers "extra" irqs */
1176 	if (!urb) {
1177 		DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1178 		goto finish;
1179 	}
1180 
1181 	pipe = urb->pipe;
1182 	dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1183 	DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1184 			dma ? ", dma" : "");
1185 
1186 	/* check for errors */
1187 	if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1188 		/* dma was disabled, fifo flushed */
1189 		DBG(3, "TX end %d stall\n", epnum);
1190 
1191 		/* stall; record URB status */
1192 		status = -EPIPE;
1193 
1194 	} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1195 		/* (NON-ISO) dma was disabled, fifo flushed */
1196 		DBG(3, "TX 3strikes on ep=%d\n", epnum);
1197 
1198 		status = -ETIMEDOUT;
1199 
1200 	} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1201 		DBG(6, "TX end=%d device not responding\n", epnum);
1202 
1203 		/* NOTE:  this code path would be a good place to PAUSE a
1204 		 * transfer, if there's some other (nonperiodic) tx urb
1205 		 * that could use this fifo.  (dma complicates it...)
1206 		 *
1207 		 * if (bulk && qh->ring.next != &musb->out_bulk), then
1208 		 * we have a candidate... NAKing is *NOT* an error
1209 		 */
1210 		musb_ep_select(mbase, epnum);
1211 		musb_writew(epio, MUSB_TXCSR,
1212 				MUSB_TXCSR_H_WZC_BITS
1213 				| MUSB_TXCSR_TXPKTRDY);
1214 		goto finish;
1215 	}
1216 
1217 	if (status) {
1218 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1219 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1220 			(void) musb->dma_controller->channel_abort(dma);
1221 		}
1222 
1223 		/* do the proper sequence to abort the transfer in the
1224 		 * usb core; the dma engine should already be stopped.
1225 		 */
1226 		musb_h_tx_flush_fifo(hw_ep);
1227 		tx_csr &= ~(MUSB_TXCSR_AUTOSET
1228 				| MUSB_TXCSR_DMAENAB
1229 				| MUSB_TXCSR_H_ERROR
1230 				| MUSB_TXCSR_H_RXSTALL
1231 				| MUSB_TXCSR_H_NAKTIMEOUT
1232 				);
1233 
1234 		musb_ep_select(mbase, epnum);
1235 		musb_writew(epio, MUSB_TXCSR, tx_csr);
1236 		/* REVISIT may need to clear FLUSHFIFO ... */
1237 		musb_writew(epio, MUSB_TXCSR, tx_csr);
1238 		musb_writeb(epio, MUSB_TXINTERVAL, 0);
1239 
1240 		done = true;
1241 	}
1242 
1243 	/* second cppi case */
1244 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1245 		DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1246 		goto finish;
1247 
1248 	}
1249 
1250 	/* REVISIT this looks wrong... */
1251 	if (!status || dma || usb_pipeisoc(pipe)) {
1252 		if (dma)
1253 			wLength = dma->actual_len;
1254 		else
1255 			wLength = qh->segsize;
1256 		qh->offset += wLength;
1257 
1258 		if (usb_pipeisoc(pipe)) {
1259 			struct usb_iso_packet_descriptor	*d;
1260 
1261 			d = urb->iso_frame_desc + qh->iso_idx;
1262 			d->actual_length = qh->segsize;
1263 			if (++qh->iso_idx >= urb->number_of_packets) {
1264 				done = true;
1265 			} else {
1266 				d++;
1267 				buf = urb->transfer_buffer + d->offset;
1268 				wLength = d->length;
1269 			}
1270 		} else if (dma) {
1271 			done = true;
1272 		} else {
1273 			/* see if we need to send more data, or ZLP */
1274 			if (qh->segsize < qh->maxpacket)
1275 				done = true;
1276 			else if (qh->offset == urb->transfer_buffer_length
1277 					&& !(urb->transfer_flags
1278 						& URB_ZERO_PACKET))
1279 				done = true;
1280 			if (!done) {
1281 				buf = urb->transfer_buffer
1282 						+ qh->offset;
1283 				wLength = urb->transfer_buffer_length
1284 						- qh->offset;
1285 			}
1286 		}
1287 	}
1288 
1289 	/* urb->status != -EINPROGRESS means request has been faulted,
1290 	 * so we must abort this transfer after cleanup
1291 	 */
1292 	if (urb->status != -EINPROGRESS) {
1293 		done = true;
1294 		if (status == 0)
1295 			status = urb->status;
1296 	}
1297 
1298 	if (done) {
1299 		/* set status */
1300 		urb->status = status;
1301 		urb->actual_length = qh->offset;
1302 		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1303 
1304 	} else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
1305 		/* WARN_ON(!buf); */
1306 
1307 		/* REVISIT:  some docs say that when hw_ep->tx_double_buffered,
1308 		 * (and presumably, fifo is not half-full) we should write TWO
1309 		 * packets before updating TXCSR ... other docs disagree ...
1310 		 */
1311 		/* PIO:  start next packet in this URB */
1312 		wLength = min(qh->maxpacket, (u16) wLength);
1313 		musb_write_fifo(hw_ep, wLength, buf);
1314 		qh->segsize = wLength;
1315 
1316 		musb_ep_select(mbase, epnum);
1317 		musb_writew(epio, MUSB_TXCSR,
1318 				MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1319 	} else
1320 		DBG(1, "not complete, but dma enabled?\n");
1321 
1322 finish:
1323 	return;
1324 }
1325 
1326 
1327 #ifdef CONFIG_USB_INVENTRA_DMA
1328 
1329 /* Host side RX (IN) using Mentor DMA works as follows:
1330 	submit_urb ->
1331 		- if queue was empty, ProgramEndpoint
1332 		- first IN token is sent out (by setting ReqPkt)
1333 	LinuxIsr -> RxReady()
1334 	/\	=> first packet is received
1335 	|	- Set in mode 0 (DmaEnab, ~ReqPkt)
1336 	|		-> DMA Isr (transfer complete) -> RxReady()
1337 	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1338 	|		    - if urb not complete, send next IN token (ReqPkt)
1339 	|			   |		else complete urb.
1340 	|			   |
1341 	---------------------------
1342  *
1343  * Nuances of mode 1:
1344  *	For short packets, no ack (+RxPktRdy) is sent automatically
1345  *	(even if AutoClear is ON)
1346  *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1347  *	automatically => major problem, as collecting the next packet becomes
1348  *	difficult. Hence mode 1 is not used.
1349  *
1350  * REVISIT
1351  *	All we care about at this driver level is that
1352  *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1353  *       (b) termination conditions are: short RX, or buffer full;
1354  *       (c) fault modes include
1355  *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1356  *             (and that endpoint's dma queue stops immediately)
1357  *           - overflow (full, PLUS more bytes in the terminal packet)
1358  *
1359  *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1360  *	thus be a great candidate for using mode 1 ... for all but the
1361  *	last packet of one URB's transfer.
1362  */
1363 
1364 #endif
1365 
1366 /*
1367  * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1368  * and high-bandwidth IN transfer cases.
1369  */
1370 void musb_host_rx(struct musb *musb, u8 epnum)
1371 {
1372 	struct urb		*urb;
1373 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1374 	void __iomem		*epio = hw_ep->regs;
1375 	struct musb_qh		*qh = hw_ep->in_qh;
1376 	size_t			xfer_len;
1377 	void __iomem		*mbase = musb->mregs;
1378 	int			pipe;
1379 	u16			rx_csr, val;
1380 	bool			iso_err = false;
1381 	bool			done = false;
1382 	u32			status;
1383 	struct dma_channel	*dma;
1384 
1385 	musb_ep_select(mbase, epnum);
1386 
1387 	urb = next_urb(qh);
1388 	dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1389 	status = 0;
1390 	xfer_len = 0;
1391 
1392 	rx_csr = musb_readw(epio, MUSB_RXCSR);
1393 	val = rx_csr;
1394 
1395 	if (unlikely(!urb)) {
1396 		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1397 		 * usbtest #11 (unlinks) triggers it regularly, sometimes
1398 		 * with fifo full.  (Only with DMA??)
1399 		 */
1400 		DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1401 			musb_readw(epio, MUSB_RXCOUNT));
1402 		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1403 		return;
1404 	}
1405 
1406 	pipe = urb->pipe;
1407 
1408 	DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1409 		epnum, rx_csr, urb->actual_length,
1410 		dma ? dma->actual_len : 0);
1411 
1412 	/* check for errors, concurrent stall & unlink is not really
1413 	 * handled yet! */
1414 	if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1415 		DBG(3, "RX end %d STALL\n", epnum);
1416 
1417 		/* stall; record URB status */
1418 		status = -EPIPE;
1419 
1420 	} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1421 		DBG(3, "end %d RX proto error\n", epnum);
1422 
1423 		status = -EPROTO;
1424 		musb_writeb(epio, MUSB_RXINTERVAL, 0);
1425 
1426 	} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1427 
1428 		if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1429 			/* NOTE this code path would be a good place to PAUSE a
1430 			 * transfer, if there's some other (nonperiodic) rx urb
1431 			 * that could use this fifo.  (dma complicates it...)
1432 			 *
1433 			 * if (bulk && qh->ring.next != &musb->in_bulk), then
1434 			 * we have a candidate... NAKing is *NOT* an error
1435 			 */
1436 			DBG(6, "RX end %d NAK timeout\n", epnum);
1437 			musb_ep_select(mbase, epnum);
1438 			musb_writew(epio, MUSB_RXCSR,
1439 					MUSB_RXCSR_H_WZC_BITS
1440 					| MUSB_RXCSR_H_REQPKT);
1441 
1442 			goto finish;
1443 		} else {
1444 			DBG(4, "RX end %d ISO data error\n", epnum);
1445 			/* packet error reported later */
1446 			iso_err = true;
1447 		}
1448 	}
1449 
1450 	/* faults abort the transfer */
1451 	if (status) {
1452 		/* clean up dma and collect transfer count */
1453 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1454 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1455 			(void) musb->dma_controller->channel_abort(dma);
1456 			xfer_len = dma->actual_len;
1457 		}
1458 		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1459 		musb_writeb(epio, MUSB_RXINTERVAL, 0);
1460 		done = true;
1461 		goto finish;
1462 	}
1463 
1464 	if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1465 		/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1466 		ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1467 		goto finish;
1468 	}
1469 
1470 	/* thorough shutdown for now ... given more precise fault handling
1471 	 * and better queueing support, we might keep a DMA pipeline going
1472 	 * while processing this irq for earlier completions.
1473 	 */
1474 
1475 	/* FIXME this is _way_ too much in-line logic for Mentor DMA */
1476 
1477 #ifndef CONFIG_USB_INVENTRA_DMA
1478 	if (rx_csr & MUSB_RXCSR_H_REQPKT)  {
1479 		/* REVISIT this happened for a while on some short reads...
1480 		 * the cleanup still needs investigation... looks bad...
1481 		 * and also duplicates dma cleanup code above ... plus,
1482 		 * shouldn't this be the "half full" double buffer case?
1483 		 */
1484 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1485 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1486 			(void) musb->dma_controller->channel_abort(dma);
1487 			xfer_len = dma->actual_len;
1488 			done = true;
1489 		}
1490 
1491 		DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1492 				xfer_len, dma ? ", dma" : "");
1493 		rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1494 
1495 		musb_ep_select(mbase, epnum);
1496 		musb_writew(epio, MUSB_RXCSR,
1497 				MUSB_RXCSR_H_WZC_BITS | rx_csr);
1498 	}
1499 #endif
1500 	if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1501 		xfer_len = dma->actual_len;
1502 
1503 		val &= ~(MUSB_RXCSR_DMAENAB
1504 			| MUSB_RXCSR_H_AUTOREQ
1505 			| MUSB_RXCSR_AUTOCLEAR
1506 			| MUSB_RXCSR_RXPKTRDY);
1507 		musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1508 
1509 #ifdef CONFIG_USB_INVENTRA_DMA
1510 		/* done if urb buffer is full or short packet is recd */
1511 		done = (urb->actual_length + xfer_len >=
1512 				urb->transfer_buffer_length
1513 			|| dma->actual_len < qh->maxpacket);
1514 
1515 		/* send IN token for next packet, without AUTOREQ */
1516 		if (!done) {
1517 			val |= MUSB_RXCSR_H_REQPKT;
1518 			musb_writew(epio, MUSB_RXCSR,
1519 				MUSB_RXCSR_H_WZC_BITS | val);
1520 		}
1521 
1522 		DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1523 			done ? "off" : "reset",
1524 			musb_readw(epio, MUSB_RXCSR),
1525 			musb_readw(epio, MUSB_RXCOUNT));
1526 #else
1527 		done = true;
1528 #endif
1529 	} else if (urb->status == -EINPROGRESS) {
1530 		/* if no errors, be sure a packet is ready for unloading */
1531 		if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1532 			status = -EPROTO;
1533 			ERR("Rx interrupt with no errors or packet!\n");
1534 
1535 			/* FIXME this is another "SHOULD NEVER HAPPEN" */
1536 
1537 /* SCRUB (RX) */
1538 			/* do the proper sequence to abort the transfer */
1539 			musb_ep_select(mbase, epnum);
1540 			val &= ~MUSB_RXCSR_H_REQPKT;
1541 			musb_writew(epio, MUSB_RXCSR, val);
1542 			goto finish;
1543 		}
1544 
1545 		/* we are expecting IN packets */
1546 #ifdef CONFIG_USB_INVENTRA_DMA
1547 		if (dma) {
1548 			struct dma_controller	*c;
1549 			u16			rx_count;
1550 			int			ret;
1551 
1552 			rx_count = musb_readw(epio, MUSB_RXCOUNT);
1553 
1554 			DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1555 					epnum, rx_count,
1556 					urb->transfer_dma
1557 						+ urb->actual_length,
1558 					qh->offset,
1559 					urb->transfer_buffer_length);
1560 
1561 			c = musb->dma_controller;
1562 
1563 			dma->desired_mode = 0;
1564 #ifdef USE_MODE1
1565 			/* because of the issue below, mode 1 will
1566 			 * only rarely behave with correct semantics.
1567 			 */
1568 			if ((urb->transfer_flags &
1569 						URB_SHORT_NOT_OK)
1570 				&& (urb->transfer_buffer_length -
1571 						urb->actual_length)
1572 					> qh->maxpacket)
1573 				dma->desired_mode = 1;
1574 #endif
1575 
1576 /* Disadvantage of using mode 1:
1577  *	It's basically usable only for mass storage class; essentially all
1578  *	other protocols also terminate transfers on short packets.
1579  *
1580  * Details:
1581  *	An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1582  *	If you try to use mode 1 for (transfer_buffer_length - 512), and try
1583  *	to use the extra IN token to grab the last packet using mode 0, then
1584  *	the problem is that you cannot be sure when the device will send the
1585  *	last packet and RxPktRdy set. Sometimes the packet is recd too soon
1586  *	such that it gets lost when RxCSR is re-set at the end of the mode 1
1587  *	transfer, while sometimes it is recd just a little late so that if you
1588  *	try to configure for mode 0 soon after the mode 1 transfer is
1589  *	completed, you will find rxcount 0. Okay, so you might think why not
1590  *	wait for an interrupt when the pkt is recd. Well, you won't get any!
1591  */
1592 
1593 			val = musb_readw(epio, MUSB_RXCSR);
1594 			val &= ~MUSB_RXCSR_H_REQPKT;
1595 
1596 			if (dma->desired_mode == 0)
1597 				val &= ~MUSB_RXCSR_H_AUTOREQ;
1598 			else
1599 				val |= MUSB_RXCSR_H_AUTOREQ;
1600 			val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
1601 
1602 			musb_writew(epio, MUSB_RXCSR,
1603 				MUSB_RXCSR_H_WZC_BITS | val);
1604 
1605 			/* REVISIT if when actual_length != 0,
1606 			 * transfer_buffer_length needs to be
1607 			 * adjusted first...
1608 			 */
1609 			ret = c->channel_program(
1610 				dma, qh->maxpacket,
1611 				dma->desired_mode,
1612 				urb->transfer_dma
1613 					+ urb->actual_length,
1614 				(dma->desired_mode == 0)
1615 					? rx_count
1616 					: urb->transfer_buffer_length);
1617 
1618 			if (!ret) {
1619 				c->channel_release(dma);
1620 				hw_ep->rx_channel = NULL;
1621 				dma = NULL;
1622 				/* REVISIT reset CSR */
1623 			}
1624 		}
1625 #endif	/* Mentor DMA */
1626 
1627 		if (!dma) {
1628 			done = musb_host_packet_rx(musb, urb,
1629 					epnum, iso_err);
1630 			DBG(6, "read %spacket\n", done ? "last " : "");
1631 		}
1632 	}
1633 
1634 	if (dma && usb_pipeisoc(pipe)) {
1635 		struct usb_iso_packet_descriptor	*d;
1636 		int					iso_stat = status;
1637 
1638 		d = urb->iso_frame_desc + qh->iso_idx;
1639 		d->actual_length += xfer_len;
1640 		if (iso_err) {
1641 			iso_stat = -EILSEQ;
1642 			urb->error_count++;
1643 		}
1644 		d->status = iso_stat;
1645 	}
1646 
1647 finish:
1648 	urb->actual_length += xfer_len;
1649 	qh->offset += xfer_len;
1650 	if (done) {
1651 		if (urb->status == -EINPROGRESS)
1652 			urb->status = status;
1653 		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1654 	}
1655 }
1656 
1657 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1658  * the software schedule associates multiple such nodes with a given
1659  * host side hardware endpoint + direction; scheduling may activate
1660  * that hardware endpoint.
1661  */
1662 static int musb_schedule(
1663 	struct musb		*musb,
1664 	struct musb_qh		*qh,
1665 	int			is_in)
1666 {
1667 	int			idle;
1668 	int			best_diff;
1669 	int			best_end, epnum;
1670 	struct musb_hw_ep	*hw_ep = NULL;
1671 	struct list_head	*head = NULL;
1672 
1673 	/* use fixed hardware for control and bulk */
1674 	switch (qh->type) {
1675 	case USB_ENDPOINT_XFER_CONTROL:
1676 		head = &musb->control;
1677 		hw_ep = musb->control_ep;
1678 		break;
1679 	case USB_ENDPOINT_XFER_BULK:
1680 		hw_ep = musb->bulk_ep;
1681 		if (is_in)
1682 			head = &musb->in_bulk;
1683 		else
1684 			head = &musb->out_bulk;
1685 		break;
1686 	}
1687 	if (head) {
1688 		idle = list_empty(head);
1689 		list_add_tail(&qh->ring, head);
1690 		goto success;
1691 	}
1692 
1693 	/* else, periodic transfers get muxed to other endpoints */
1694 
1695 	/* FIXME this doesn't consider direction, so it can only
1696 	 * work for one half of the endpoint hardware, and assumes
1697 	 * the previous cases handled all non-shared endpoints...
1698 	 */
1699 
1700 	/* we know this qh hasn't been scheduled, so all we need to do
1701 	 * is choose which hardware endpoint to put it on ...
1702 	 *
1703 	 * REVISIT what we really want here is a regular schedule tree
1704 	 * like e.g. OHCI uses, but for now musb->periodic is just an
1705 	 * array of the _single_ logical endpoint associated with a
1706 	 * given physical one (identity mapping logical->physical).
1707 	 *
1708 	 * that simplistic approach makes TT scheduling a lot simpler;
1709 	 * there is none, and thus none of its complexity...
1710 	 */
1711 	best_diff = 4096;
1712 	best_end = -1;
1713 
1714 	for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
1715 		int	diff;
1716 
1717 		if (musb->periodic[epnum])
1718 			continue;
1719 		hw_ep = &musb->endpoints[epnum];
1720 		if (hw_ep == musb->bulk_ep)
1721 			continue;
1722 
1723 		if (is_in)
1724 			diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
1725 		else
1726 			diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
1727 
1728 		if (diff > 0 && best_diff > diff) {
1729 			best_diff = diff;
1730 			best_end = epnum;
1731 		}
1732 	}
1733 	if (best_end < 0)
1734 		return -ENOSPC;
1735 
1736 	idle = 1;
1737 	hw_ep = musb->endpoints + best_end;
1738 	musb->periodic[best_end] = qh;
1739 	DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1740 success:
1741 	qh->hw_ep = hw_ep;
1742 	qh->hep->hcpriv = qh;
1743 	if (idle)
1744 		musb_start_urb(musb, is_in, qh);
1745 	return 0;
1746 }
1747 
1748 static int musb_urb_enqueue(
1749 	struct usb_hcd			*hcd,
1750 	struct urb			*urb,
1751 	gfp_t				mem_flags)
1752 {
1753 	unsigned long			flags;
1754 	struct musb			*musb = hcd_to_musb(hcd);
1755 	struct usb_host_endpoint	*hep = urb->ep;
1756 	struct musb_qh			*qh = hep->hcpriv;
1757 	struct usb_endpoint_descriptor	*epd = &hep->desc;
1758 	int				ret;
1759 	unsigned			type_reg;
1760 	unsigned			interval;
1761 
1762 	/* host role must be active */
1763 	if (!is_host_active(musb) || !musb->is_active)
1764 		return -ENODEV;
1765 
1766 	spin_lock_irqsave(&musb->lock, flags);
1767 	ret = usb_hcd_link_urb_to_ep(hcd, urb);
1768 	spin_unlock_irqrestore(&musb->lock, flags);
1769 	if (ret)
1770 		return ret;
1771 
1772 	/* DMA mapping was already done, if needed, and this urb is on
1773 	 * hep->urb_list ... so there's little to do unless hep wasn't
1774 	 * yet scheduled onto a live qh.
1775 	 *
1776 	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1777 	 * disabled, testing for empty qh->ring and avoiding qh setup costs
1778 	 * except for the first urb queued after a config change.
1779 	 */
1780 	if (qh) {
1781 		urb->hcpriv = qh;
1782 		return 0;
1783 	}
1784 
1785 	/* Allocate and initialize qh, minimizing the work done each time
1786 	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
1787 	 *
1788 	 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1789 	 * for bugs in other kernel code to break this driver...
1790 	 */
1791 	qh = kzalloc(sizeof *qh, mem_flags);
1792 	if (!qh) {
1793 		spin_lock_irqsave(&musb->lock, flags);
1794 		usb_hcd_unlink_urb_from_ep(hcd, urb);
1795 		spin_unlock_irqrestore(&musb->lock, flags);
1796 		return -ENOMEM;
1797 	}
1798 
1799 	qh->hep = hep;
1800 	qh->dev = urb->dev;
1801 	INIT_LIST_HEAD(&qh->ring);
1802 	qh->is_ready = 1;
1803 
1804 	qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
1805 
1806 	/* no high bandwidth support yet */
1807 	if (qh->maxpacket & ~0x7ff) {
1808 		ret = -EMSGSIZE;
1809 		goto done;
1810 	}
1811 
1812 	qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
1813 	qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
1814 
1815 	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1816 	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1817 
1818 	/* precompute rxtype/txtype/type0 register */
1819 	type_reg = (qh->type << 4) | qh->epnum;
1820 	switch (urb->dev->speed) {
1821 	case USB_SPEED_LOW:
1822 		type_reg |= 0xc0;
1823 		break;
1824 	case USB_SPEED_FULL:
1825 		type_reg |= 0x80;
1826 		break;
1827 	default:
1828 		type_reg |= 0x40;
1829 	}
1830 	qh->type_reg = type_reg;
1831 
1832 	/* precompute rxinterval/txinterval register */
1833 	interval = min((u8)16, epd->bInterval);	/* log encoding */
1834 	switch (qh->type) {
1835 	case USB_ENDPOINT_XFER_INT:
1836 		/* fullspeed uses linear encoding */
1837 		if (USB_SPEED_FULL == urb->dev->speed) {
1838 			interval = epd->bInterval;
1839 			if (!interval)
1840 				interval = 1;
1841 		}
1842 		/* FALLTHROUGH */
1843 	case USB_ENDPOINT_XFER_ISOC:
1844 		/* iso always uses log encoding */
1845 		break;
1846 	default:
1847 		/* REVISIT we actually want to use NAK limits, hinting to the
1848 		 * transfer scheduling logic to try some other qh, e.g. try
1849 		 * for 2 msec first:
1850 		 *
1851 		 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
1852 		 *
1853 		 * The downside of disabling this is that transfer scheduling
1854 		 * gets VERY unfair for nonperiodic transfers; a misbehaving
1855 		 * peripheral could make that hurt.  Or for reads, one that's
1856 		 * perfectly normal:  network and other drivers keep reads
1857 		 * posted at all times, having one pending for a week should
1858 		 * be perfectly safe.
1859 		 *
1860 		 * The upside of disabling it is avoidng transfer scheduling
1861 		 * code to put this aside for while.
1862 		 */
1863 		interval = 0;
1864 	}
1865 	qh->intv_reg = interval;
1866 
1867 	/* precompute addressing for external hub/tt ports */
1868 	if (musb->is_multipoint) {
1869 		struct usb_device	*parent = urb->dev->parent;
1870 
1871 		if (parent != hcd->self.root_hub) {
1872 			qh->h_addr_reg = (u8) parent->devnum;
1873 
1874 			/* set up tt info if needed */
1875 			if (urb->dev->tt) {
1876 				qh->h_port_reg = (u8) urb->dev->ttport;
1877 				if (urb->dev->tt->hub)
1878 					qh->h_addr_reg =
1879 						(u8) urb->dev->tt->hub->devnum;
1880 				if (urb->dev->tt->multi)
1881 					qh->h_addr_reg |= 0x80;
1882 			}
1883 		}
1884 	}
1885 
1886 	/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
1887 	 * until we get real dma queues (with an entry for each urb/buffer),
1888 	 * we only have work to do in the former case.
1889 	 */
1890 	spin_lock_irqsave(&musb->lock, flags);
1891 	if (hep->hcpriv) {
1892 		/* some concurrent activity submitted another urb to hep...
1893 		 * odd, rare, error prone, but legal.
1894 		 */
1895 		kfree(qh);
1896 		ret = 0;
1897 	} else
1898 		ret = musb_schedule(musb, qh,
1899 				epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
1900 
1901 	if (ret == 0) {
1902 		urb->hcpriv = qh;
1903 		/* FIXME set urb->start_frame for iso/intr, it's tested in
1904 		 * musb_start_urb(), but otherwise only konicawc cares ...
1905 		 */
1906 	}
1907 	spin_unlock_irqrestore(&musb->lock, flags);
1908 
1909 done:
1910 	if (ret != 0) {
1911 		spin_lock_irqsave(&musb->lock, flags);
1912 		usb_hcd_unlink_urb_from_ep(hcd, urb);
1913 		spin_unlock_irqrestore(&musb->lock, flags);
1914 		kfree(qh);
1915 	}
1916 	return ret;
1917 }
1918 
1919 
1920 /*
1921  * abort a transfer that's at the head of a hardware queue.
1922  * called with controller locked, irqs blocked
1923  * that hardware queue advances to the next transfer, unless prevented
1924  */
1925 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
1926 {
1927 	struct musb_hw_ep	*ep = qh->hw_ep;
1928 	void __iomem		*epio = ep->regs;
1929 	unsigned		hw_end = ep->epnum;
1930 	void __iomem		*regs = ep->musb->mregs;
1931 	u16			csr;
1932 	int			status = 0;
1933 
1934 	musb_ep_select(regs, hw_end);
1935 
1936 	if (is_dma_capable()) {
1937 		struct dma_channel	*dma;
1938 
1939 		dma = is_in ? ep->rx_channel : ep->tx_channel;
1940 		if (dma) {
1941 			status = ep->musb->dma_controller->channel_abort(dma);
1942 			DBG(status ? 1 : 3,
1943 				"abort %cX%d DMA for urb %p --> %d\n",
1944 				is_in ? 'R' : 'T', ep->epnum,
1945 				urb, status);
1946 			urb->actual_length += dma->actual_len;
1947 		}
1948 	}
1949 
1950 	/* turn off DMA requests, discard state, stop polling ... */
1951 	if (is_in) {
1952 		/* giveback saves bulk toggle */
1953 		csr = musb_h_flush_rxfifo(ep, 0);
1954 
1955 		/* REVISIT we still get an irq; should likely clear the
1956 		 * endpoint's irq status here to avoid bogus irqs.
1957 		 * clearing that status is platform-specific...
1958 		 */
1959 	} else {
1960 		musb_h_tx_flush_fifo(ep);
1961 		csr = musb_readw(epio, MUSB_TXCSR);
1962 		csr &= ~(MUSB_TXCSR_AUTOSET
1963 			| MUSB_TXCSR_DMAENAB
1964 			| MUSB_TXCSR_H_RXSTALL
1965 			| MUSB_TXCSR_H_NAKTIMEOUT
1966 			| MUSB_TXCSR_H_ERROR
1967 			| MUSB_TXCSR_TXPKTRDY);
1968 		musb_writew(epio, MUSB_TXCSR, csr);
1969 		/* REVISIT may need to clear FLUSHFIFO ... */
1970 		musb_writew(epio, MUSB_TXCSR, csr);
1971 		/* flush cpu writebuffer */
1972 		csr = musb_readw(epio, MUSB_TXCSR);
1973 	}
1974 	if (status == 0)
1975 		musb_advance_schedule(ep->musb, urb, ep, is_in);
1976 	return status;
1977 }
1978 
1979 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1980 {
1981 	struct musb		*musb = hcd_to_musb(hcd);
1982 	struct musb_qh		*qh;
1983 	struct list_head	*sched;
1984 	unsigned long		flags;
1985 	int			ret;
1986 
1987 	DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
1988 			usb_pipedevice(urb->pipe),
1989 			usb_pipeendpoint(urb->pipe),
1990 			usb_pipein(urb->pipe) ? "in" : "out");
1991 
1992 	spin_lock_irqsave(&musb->lock, flags);
1993 	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1994 	if (ret)
1995 		goto done;
1996 
1997 	qh = urb->hcpriv;
1998 	if (!qh)
1999 		goto done;
2000 
2001 	/* Any URB not actively programmed into endpoint hardware can be
2002 	 * immediately given back.  Such an URB must be at the head of its
2003 	 * endpoint queue, unless someday we get real DMA queues.  And even
2004 	 * then, it might not be known to the hardware...
2005 	 *
2006 	 * Otherwise abort current transfer, pending dma, etc.; urb->status
2007 	 * has already been updated.  This is a synchronous abort; it'd be
2008 	 * OK to hold off until after some IRQ, though.
2009 	 */
2010 	if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
2011 		ret = -EINPROGRESS;
2012 	else {
2013 		switch (qh->type) {
2014 		case USB_ENDPOINT_XFER_CONTROL:
2015 			sched = &musb->control;
2016 			break;
2017 		case USB_ENDPOINT_XFER_BULK:
2018 			if (usb_pipein(urb->pipe))
2019 				sched = &musb->in_bulk;
2020 			else
2021 				sched = &musb->out_bulk;
2022 			break;
2023 		default:
2024 			/* REVISIT when we get a schedule tree, periodic
2025 			 * transfers won't always be at the head of a
2026 			 * singleton queue...
2027 			 */
2028 			sched = NULL;
2029 			break;
2030 		}
2031 	}
2032 
2033 	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
2034 	if (ret < 0 || (sched && qh != first_qh(sched))) {
2035 		int	ready = qh->is_ready;
2036 
2037 		ret = 0;
2038 		qh->is_ready = 0;
2039 		__musb_giveback(musb, urb, 0);
2040 		qh->is_ready = ready;
2041 	} else
2042 		ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2043 done:
2044 	spin_unlock_irqrestore(&musb->lock, flags);
2045 	return ret;
2046 }
2047 
2048 /* disable an endpoint */
2049 static void
2050 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2051 {
2052 	u8			epnum = hep->desc.bEndpointAddress;
2053 	unsigned long		flags;
2054 	struct musb		*musb = hcd_to_musb(hcd);
2055 	u8			is_in = epnum & USB_DIR_IN;
2056 	struct musb_qh		*qh = hep->hcpriv;
2057 	struct urb		*urb, *tmp;
2058 	struct list_head	*sched;
2059 
2060 	if (!qh)
2061 		return;
2062 
2063 	spin_lock_irqsave(&musb->lock, flags);
2064 
2065 	switch (qh->type) {
2066 	case USB_ENDPOINT_XFER_CONTROL:
2067 		sched = &musb->control;
2068 		break;
2069 	case USB_ENDPOINT_XFER_BULK:
2070 		if (is_in)
2071 			sched = &musb->in_bulk;
2072 		else
2073 			sched = &musb->out_bulk;
2074 		break;
2075 	default:
2076 		/* REVISIT when we get a schedule tree, periodic transfers
2077 		 * won't always be at the head of a singleton queue...
2078 		 */
2079 		sched = NULL;
2080 		break;
2081 	}
2082 
2083 	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
2084 
2085 	/* kick first urb off the hardware, if needed */
2086 	qh->is_ready = 0;
2087 	if (!sched || qh == first_qh(sched)) {
2088 		urb = next_urb(qh);
2089 
2090 		/* make software (then hardware) stop ASAP */
2091 		if (!urb->unlinked)
2092 			urb->status = -ESHUTDOWN;
2093 
2094 		/* cleanup */
2095 		musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2096 	} else
2097 		urb = NULL;
2098 
2099 	/* then just nuke all the others */
2100 	list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
2101 		musb_giveback(qh, urb, -ESHUTDOWN);
2102 
2103 	spin_unlock_irqrestore(&musb->lock, flags);
2104 }
2105 
2106 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2107 {
2108 	struct musb	*musb = hcd_to_musb(hcd);
2109 
2110 	return musb_readw(musb->mregs, MUSB_FRAME);
2111 }
2112 
2113 static int musb_h_start(struct usb_hcd *hcd)
2114 {
2115 	struct musb	*musb = hcd_to_musb(hcd);
2116 
2117 	/* NOTE: musb_start() is called when the hub driver turns
2118 	 * on port power, or when (OTG) peripheral starts.
2119 	 */
2120 	hcd->state = HC_STATE_RUNNING;
2121 	musb->port1_status = 0;
2122 	return 0;
2123 }
2124 
2125 static void musb_h_stop(struct usb_hcd *hcd)
2126 {
2127 	musb_stop(hcd_to_musb(hcd));
2128 	hcd->state = HC_STATE_HALT;
2129 }
2130 
2131 static int musb_bus_suspend(struct usb_hcd *hcd)
2132 {
2133 	struct musb	*musb = hcd_to_musb(hcd);
2134 
2135 	if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
2136 		return 0;
2137 
2138 	if (is_host_active(musb) && musb->is_active) {
2139 		WARNING("trying to suspend as %s is_active=%i\n",
2140 			otg_state_string(musb), musb->is_active);
2141 		return -EBUSY;
2142 	} else
2143 		return 0;
2144 }
2145 
2146 static int musb_bus_resume(struct usb_hcd *hcd)
2147 {
2148 	/* resuming child port does the work */
2149 	return 0;
2150 }
2151 
2152 const struct hc_driver musb_hc_driver = {
2153 	.description		= "musb-hcd",
2154 	.product_desc		= "MUSB HDRC host driver",
2155 	.hcd_priv_size		= sizeof(struct musb),
2156 	.flags			= HCD_USB2 | HCD_MEMORY,
2157 
2158 	/* not using irq handler or reset hooks from usbcore, since
2159 	 * those must be shared with peripheral code for OTG configs
2160 	 */
2161 
2162 	.start			= musb_h_start,
2163 	.stop			= musb_h_stop,
2164 
2165 	.get_frame_number	= musb_h_get_frame_number,
2166 
2167 	.urb_enqueue		= musb_urb_enqueue,
2168 	.urb_dequeue		= musb_urb_dequeue,
2169 	.endpoint_disable	= musb_h_disable,
2170 
2171 	.hub_status_data	= musb_hub_status_data,
2172 	.hub_control		= musb_hub_control,
2173 	.bus_suspend		= musb_bus_suspend,
2174 	.bus_resume		= musb_bus_resume,
2175 	/* .start_port_reset	= NULL, */
2176 	/* .hub_irq_enable	= NULL, */
2177 };
2178