musb_host.c (81ec4e4a5116c2bccec2dd1d350ceb4372846ba8) | musb_host.c (c9cd06b3d6ea825c62e277def929cc4315802b48) |
---|---|
1/* 2 * MUSB OTG driver host support 3 * 4 * Copyright 2005 Mentor Graphics Corporation 5 * Copyright (C) 2005-2006 by Texas Instruments 6 * Copyright (C) 2006-2007 Nokia Corporation 7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> 8 * --- 281 unchanged lines hidden (view full) --- 290 291 if (!hw_ep->tx_channel) 292 musb_h_tx_start(hw_ep); 293 else if (is_cppi_enabled() || tusb_dma_omap()) 294 musb_h_tx_dma_start(hw_ep); 295 } 296} 297 | 1/* 2 * MUSB OTG driver host support 3 * 4 * Copyright 2005 Mentor Graphics Corporation 5 * Copyright (C) 2005-2006 by Texas Instruments 6 * Copyright (C) 2006-2007 Nokia Corporation 7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> 8 * --- 281 unchanged lines hidden (view full) --- 290 291 if (!hw_ep->tx_channel) 292 musb_h_tx_start(hw_ep); 293 else if (is_cppi_enabled() || tusb_dma_omap()) 294 musb_h_tx_dma_start(hw_ep); 295 } 296} 297 |
298/* caller owns controller lock, irqs are blocked */ 299static void 300__musb_giveback(struct musb *musb, struct urb *urb, int status) | 298/* Context: caller owns controller lock, IRQs are blocked */ 299static void musb_giveback(struct musb *musb, struct urb *urb, int status) |
301__releases(musb->lock) 302__acquires(musb->lock) 303{ 304 DBG(({ int level; switch (status) { 305 case 0: 306 level = 4; 307 break; 308 /* common/boring faults */ --- 36 unchanged lines hidden (view full) --- 345 if (is_in) 346 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; 347 else 348 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; 349 350 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); 351} 352 | 300__releases(musb->lock) 301__acquires(musb->lock) 302{ 303 DBG(({ int level; switch (status) { 304 case 0: 305 level = 4; 306 break; 307 /* common/boring faults */ --- 36 unchanged lines hidden (view full) --- 344 if (is_in) 345 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; 346 else 347 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; 348 349 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); 350} 351 |
353/* caller owns controller lock, irqs are blocked */ 354static struct musb_qh * 355musb_giveback(struct musb_qh *qh, struct urb *urb, int status) | 352/* 353 * Advance this hardware endpoint's queue, completing the specified URB and 354 * advancing to either the next URB queued to that qh, or else invalidating 355 * that qh and advancing to the next qh scheduled after the current one. 356 * 357 * Context: caller owns controller lock, IRQs are blocked 358 */ 359static void musb_advance_schedule(struct musb *musb, struct urb *urb, 360 struct musb_hw_ep *hw_ep, int is_in) |
356{ | 361{ |
362 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); |
|
357 struct musb_hw_ep *ep = qh->hw_ep; | 363 struct musb_hw_ep *ep = qh->hw_ep; |
358 struct musb *musb = ep->musb; 359 int is_in = usb_pipein(urb->pipe); | |
360 int ready = qh->is_ready; | 364 int ready = qh->is_ready; |
365 int status; |
|
361 | 366 |
367 status = (urb->status == -EINPROGRESS) ? 0 : urb->status; 368 |
|
362 /* save toggle eagerly, for paranoia */ 363 switch (qh->type) { 364 case USB_ENDPOINT_XFER_BULK: 365 case USB_ENDPOINT_XFER_INT: 366 musb_save_toggle(qh, is_in, urb); 367 break; 368 case USB_ENDPOINT_XFER_ISOC: | 369 /* save toggle eagerly, for paranoia */ 370 switch (qh->type) { 371 case USB_ENDPOINT_XFER_BULK: 372 case USB_ENDPOINT_XFER_INT: 373 musb_save_toggle(qh, is_in, urb); 374 break; 375 case USB_ENDPOINT_XFER_ISOC: |
369 if (status == 0 && urb->error_count) | 376 if (urb->error_count) |
370 status = -EXDEV; 371 break; 372 } 373 374 qh->is_ready = 0; | 377 status = -EXDEV; 378 break; 379 } 380 381 qh->is_ready = 0; |
375 __musb_giveback(musb, urb, status); | 382 musb_giveback(musb, urb, status); |
376 qh->is_ready = ready; 377 378 /* reclaim resources (and bandwidth) ASAP; deschedule it, and 379 * invalidate qh as soon as list_empty(&hep->urb_list) 380 */ 381 if (list_empty(&qh->hep->urb_list)) { 382 struct list_head *head; 383 --- 27 unchanged lines hidden (view full) --- 411 * de-allocated if it's tracked and allocated; 412 * and where we'd update the schedule tree... 413 */ 414 kfree(qh); 415 qh = NULL; 416 break; 417 } 418 } | 383 qh->is_ready = ready; 384 385 /* reclaim resources (and bandwidth) ASAP; deschedule it, and 386 * invalidate qh as soon as list_empty(&hep->urb_list) 387 */ 388 if (list_empty(&qh->hep->urb_list)) { 389 struct list_head *head; 390 --- 27 unchanged lines hidden (view full) --- 418 * de-allocated if it's tracked and allocated; 419 * and where we'd update the schedule tree... 420 */ 421 kfree(qh); 422 qh = NULL; 423 break; 424 } 425 } |
419 return qh; 420} | |
421 | 426 |
422/* 423 * Advance this hardware endpoint's queue, completing the specified urb and 424 * advancing to either the next urb queued to that qh, or else invalidating 425 * that qh and advancing to the next qh scheduled after the current one. 426 * 427 * Context: caller owns controller lock, irqs are blocked 428 */ 429static void 430musb_advance_schedule(struct musb *musb, struct urb *urb, 431 struct musb_hw_ep *hw_ep, int is_in) 432{ 433 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); 434 435 if (urb->status == -EINPROGRESS) 436 qh = musb_giveback(qh, urb, 0); 437 else 438 qh = musb_giveback(qh, urb, urb->status); 439 | |
440 if (qh != NULL && qh->is_ready) { 441 DBG(4, "... next ep%d %cX urb %p\n", | 427 if (qh != NULL && qh->is_ready) { 428 DBG(4, "... next ep%d %cX urb %p\n", |
442 hw_ep->epnum, is_in ? 'R' : 'T', 443 next_urb(qh)); | 429 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); |
444 musb_start_urb(musb, is_in, qh); 445 } 446} 447 448static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) 449{ 450 /* we don't want fifo to fill itself again; 451 * ignore dma (various models), --- 1669 unchanged lines hidden (view full) --- 2121 * NOTE: qh is invalid unless !list_empty(&hep->urb_list) 2122 */ 2123 if (!qh->is_ready 2124 || urb->urb_list.prev != &qh->hep->urb_list 2125 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { 2126 int ready = qh->is_ready; 2127 2128 qh->is_ready = 0; | 430 musb_start_urb(musb, is_in, qh); 431 } 432} 433 434static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) 435{ 436 /* we don't want fifo to fill itself again; 437 * ignore dma (various models), --- 1669 unchanged lines hidden (view full) --- 2107 * NOTE: qh is invalid unless !list_empty(&hep->urb_list) 2108 */ 2109 if (!qh->is_ready 2110 || urb->urb_list.prev != &qh->hep->urb_list 2111 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { 2112 int ready = qh->is_ready; 2113 2114 qh->is_ready = 0; |
2129 __musb_giveback(musb, urb, 0); | 2115 musb_giveback(musb, urb, 0); |
2130 qh->is_ready = ready; 2131 2132 /* If nothing else (usually musb_giveback) is using it 2133 * and its URB list has emptied, recycle this qh. 2134 */ 2135 if (ready && list_empty(&qh->hep->urb_list)) { 2136 qh->hep->hcpriv = NULL; 2137 list_del(&qh->ring); --- 45 unchanged lines hidden (view full) --- 2183 musb_advance_schedule(musb, urb, qh->hw_ep, is_in); 2184 } 2185 } else { 2186 /* Just empty the queue; the hardware is busy with 2187 * other transfers, and since !qh->is_ready nothing 2188 * will activate any of these as it advances. 2189 */ 2190 while (!list_empty(&hep->urb_list)) | 2116 qh->is_ready = ready; 2117 2118 /* If nothing else (usually musb_giveback) is using it 2119 * and its URB list has emptied, recycle this qh. 2120 */ 2121 if (ready && list_empty(&qh->hep->urb_list)) { 2122 qh->hep->hcpriv = NULL; 2123 list_del(&qh->ring); --- 45 unchanged lines hidden (view full) --- 2169 musb_advance_schedule(musb, urb, qh->hw_ep, is_in); 2170 } 2171 } else { 2172 /* Just empty the queue; the hardware is busy with 2173 * other transfers, and since !qh->is_ready nothing 2174 * will activate any of these as it advances. 2175 */ 2176 while (!list_empty(&hep->urb_list)) |
2191 __musb_giveback(musb, next_urb(qh), -ESHUTDOWN); | 2177 musb_giveback(musb, next_urb(qh), -ESHUTDOWN); |
2192 2193 hep->hcpriv = NULL; 2194 list_del(&qh->ring); 2195 kfree(qh); 2196 } 2197exit: 2198 spin_unlock_irqrestore(&musb->lock, flags); 2199} --- 73 unchanged lines hidden --- | 2178 2179 hep->hcpriv = NULL; 2180 list_del(&qh->ring); 2181 kfree(qh); 2182 } 2183exit: 2184 spin_unlock_irqrestore(&musb->lock, flags); 2185} --- 73 unchanged lines hidden --- |