Lines Matching defs:musb
33 struct musb *musb, struct musb_ep *musb_ep)
36 struct dma_controller *dma = musb->dma_controller;
59 musb->controller,
65 ret = dma_mapping_error(musb->controller, dma_addr);
72 dma_sync_single_for_device(musb->controller,
84 struct musb *musb)
92 dev_vdbg(musb->controller,
97 dma_unmap_single(musb->controller,
105 dma_sync_single_for_cpu(musb->controller,
126 __releases(ep->musb->lock)
127 __acquires(ep->musb->lock)
130 struct musb *musb;
138 musb = req->musb;
141 spin_unlock(&musb->lock);
143 if (!dma_mapping_error(&musb->g.dev, request->dma))
144 unmap_dma_buffer(req, musb);
148 spin_lock(&musb->lock);
160 struct musb *musb = ep->musb;
162 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
167 struct dma_controller *c = ep->musb->dma_controller;
188 musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
208 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
210 if (can_bulk_split(musb, ep->type))
223 static void txstate(struct musb *musb, struct musb_request *req)
227 void __iomem *epio = musb->endpoints[epnum].regs;
236 musb_dbg(musb, "ep:%s disabled - ignore request",
243 musb_dbg(musb, "dma pending...");
251 fifo_count = min(max_ep_writesize(musb, musb_ep),
255 musb_dbg(musb, "%s old packet still ready , txcsr %03x",
261 musb_dbg(musb, "%s stalling, txcsr %03x",
266 musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
272 struct dma_controller *c = musb->dma_controller;
283 if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
323 can_bulk_split(musb,
333 if (is_cppi_enabled(musb)) {
369 } else if (tusb_dma_omap(musb))
383 unmap_dma_buffer(req, musb);
394 musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
406 void musb_g_tx(struct musb *musb, u8 epnum)
411 u8 __iomem *mbase = musb->mregs;
412 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
413 void __iomem *epio = musb->endpoints[epnum].regs;
421 musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
441 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
450 musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
466 musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
503 musb_dbg(musb, "%s idle now",
509 txstate(musb, req);
518 static void rxstate(struct musb *musb, struct musb_request *req)
523 void __iomem *epio = musb->endpoints[epnum].regs;
527 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
539 musb_dbg(musb, "ep:%s disabled - ignore request",
546 musb_dbg(musb, "DMA pending...");
551 musb_dbg(musb, "%s stalling, RXCSR %04x",
556 if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
557 struct dma_controller *c = musb->dma_controller;
601 if (musb_dma_inventra(musb)) {
607 c = musb->dma_controller;
676 if ((musb_dma_ux500(musb)) &&
683 c = musb->dma_controller;
726 musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
733 if (tusb_dma_omap(musb)) {
734 struct dma_controller *c = musb->dma_controller;
753 unmap_dma_buffer(req, musb);
790 void musb_g_rx(struct musb *musb, u8 epnum)
795 void __iomem *mbase = musb->mregs;
797 void __iomem *epio = musb->endpoints[epnum].regs;
799 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
818 musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
833 musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
839 musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
844 musb_dbg(musb, "%s busy, csr %04x",
903 rxstate(musb, req);
915 struct musb *musb;
928 musb = musb_ep->musb;
929 mbase = musb->mregs;
932 spin_lock_irqsave(&musb->lock, flags);
950 ok = musb->hb_iso_tx;
952 ok = musb->hb_iso_rx;
955 musb_dbg(musb, "no support for high bandwidth ISO");
978 musb_dbg(musb, "packet size beyond hardware FIFO size");
982 musb->intrtxe |= (1 << epnum);
983 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
991 if (can_bulk_split(musb, musb_ep->type))
1017 musb_dbg(musb, "packet size beyond hardware FIFO size");
1021 musb->intrrxe |= (1 << epnum);
1022 musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
1054 if (is_dma_capable() && musb->dma_controller) {
1055 struct dma_controller *c = musb->dma_controller;
1074 schedule_delayed_work(&musb->irq_work, 0);
1077 spin_unlock_irqrestore(&musb->lock, flags);
1087 struct musb *musb;
1093 musb = musb_ep->musb;
1095 epio = musb->endpoints[epnum].regs;
1097 spin_lock_irqsave(&musb->lock, flags);
1098 musb_ep_select(musb->mregs, epnum);
1102 musb->intrtxe &= ~(1 << epnum);
1103 musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
1106 musb->intrrxe &= ~(1 << epnum);
1107 musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
1117 schedule_delayed_work(&musb->irq_work, 0);
1119 spin_unlock_irqrestore(&(musb->lock), flags);
1121 musb_dbg(musb, "%s", musb_ep->end_point.name);
1162 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1165 musb_ep_select(musb->mregs, req->epnum);
1167 txstate(musb, req);
1169 rxstate(musb, req);
1172 static int musb_ep_restart_resume_work(struct musb *musb, void *data)
1176 musb_ep_restart(musb, req);
1186 struct musb *musb;
1196 musb = musb_ep->musb;
1199 request->musb = musb;
1204 status = pm_runtime_get(musb->controller);
1206 dev_err(musb->controller,
1209 pm_runtime_put_noidle(musb->controller);
1223 map_dma_buffer(request, musb, musb_ep);
1225 spin_lock_irqsave(&musb->lock, lockflags);
1229 musb_dbg(musb, "req %p queued to %s while ep %s",
1232 unmap_dma_buffer(request, musb);
1241 status = musb_queue_resume_work(musb,
1245 dev_err(musb->controller, "%s resume work: %i\n",
1252 spin_unlock_irqrestore(&musb->lock, lockflags);
1253 pm_runtime_mark_last_busy(musb->controller);
1254 pm_runtime_put_autosuspend(musb->controller);
1266 struct musb *musb = musb_ep->musb;
1273 spin_lock_irqsave(&musb->lock, flags);
1280 dev_err(musb->controller, "request %p not queued to %s\n",
1292 struct dma_controller *c = musb->dma_controller;
1294 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1309 spin_unlock_irqrestore(&musb->lock, flags);
1323 struct musb *musb = musb_ep->musb;
1324 void __iomem *epio = musb->endpoints[epnum].regs;
1333 mbase = musb->mregs;
1335 spin_lock_irqsave(&musb->lock, flags);
1347 musb_dbg(musb, "request in progress, cannot halt %s",
1356 musb_dbg(musb, "FIFO busy, cannot halt %s",
1366 musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
1393 musb_dbg(musb, "restarting the request");
1394 musb_ep_restart(musb, request);
1398 spin_unlock_irqrestore(&musb->lock, flags);
1424 struct musb *musb = musb_ep->musb;
1426 void __iomem *mbase = musb->mregs;
1429 spin_lock_irqsave(&musb->lock, flags);
1435 spin_unlock_irqrestore(&musb->lock, flags);
1443 struct musb *musb = musb_ep->musb;
1445 void __iomem *epio = musb->endpoints[epnum].regs;
1450 mbase = musb->mregs;
1452 spin_lock_irqsave(&musb->lock, flags);
1456 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
1480 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1481 spin_unlock_irqrestore(&musb->lock, flags);
1501 struct musb *musb = gadget_to_musb(gadget);
1503 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1508 struct musb *musb = gadget_to_musb(gadget);
1509 void __iomem *mregs = musb->mregs;
1515 spin_lock_irqsave(&musb->lock, flags);
1517 switch (musb_get_state(musb)) {
1523 if (musb->may_wakeup && musb->is_suspended)
1529 musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
1546 if (musb->xceiv) {
1547 spin_unlock_irqrestore(&musb->lock, flags);
1548 otg_start_srp(musb->xceiv->otg);
1549 spin_lock_irqsave(&musb->lock, flags);
1553 musb_platform_try_idle(musb,
1559 musb_dbg(musb, "Unhandled wake: %s",
1560 musb_otg_state_string(musb));
1569 musb_dbg(musb, "issue wakeup");
1578 spin_unlock_irqrestore(&musb->lock, flags);
1589 static void musb_pullup(struct musb *musb, int is_on)
1593 power = musb_readb(musb->mregs, MUSB_POWER);
1601 musb_dbg(musb, "gadget D+ pullup %s",
1603 musb_writeb(musb->mregs, MUSB_POWER, power);
1609 musb_dbg(musb, "<= %s =>\n", __func__);
1622 struct musb *musb = gadget_to_musb(gadget);
1624 return usb_phy_set_power(musb->xceiv, mA);
1629 struct musb *musb;
1632 musb = container_of(work, struct musb, gadget_work.work);
1633 pm_runtime_get_sync(musb->controller);
1634 spin_lock_irqsave(&musb->lock, flags);
1635 musb_pullup(musb, musb->softconnect);
1636 spin_unlock_irqrestore(&musb->lock, flags);
1637 pm_runtime_mark_last_busy(musb->controller);
1638 pm_runtime_put_autosuspend(musb->controller);
1643 struct musb *musb = gadget_to_musb(gadget);
1651 spin_lock_irqsave(&musb->lock, flags);
1652 if (is_on != musb->softconnect) {
1653 musb->softconnect = is_on;
1654 schedule_delayed_work(&musb->gadget_work, 0);
1656 spin_unlock_irqrestore(&musb->lock, flags);
1686 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1688 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1693 ep->musb = musb;
1708 musb->g.ep0 = &ep->end_point;
1718 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1734 static inline void musb_g_init_endpoints(struct musb *musb)
1740 INIT_LIST_HEAD(&(musb->g.ep_list));
1742 for (epnum = 0, hw_ep = musb->endpoints;
1743 epnum < musb->nr_endpoints;
1746 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1749 init_peripheral_ep(musb, &hw_ep->ep_in,
1753 init_peripheral_ep(musb, &hw_ep->ep_out,
1763 int musb_gadget_setup(struct musb *musb)
1768 * musb peripherals at the same time, only the bus lock
1772 musb->g.ops = &musb_gadget_operations;
1773 musb->g.max_speed = USB_SPEED_HIGH;
1774 musb->g.speed = USB_SPEED_UNKNOWN;
1776 MUSB_DEV_MODE(musb);
1777 musb_set_state(musb, OTG_STATE_B_IDLE);
1780 musb->g.name = musb_driver_name;
1782 musb->g.is_otg = 0;
1783 INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
1784 musb_g_init_endpoints(musb);
1786 musb->is_active = 0;
1787 musb_platform_try_idle(musb, 0);
1789 status = usb_add_gadget_udc(musb->controller, &musb->g);
1795 musb->g.dev.parent = NULL;
1796 device_unregister(&musb->g.dev);
1800 void musb_gadget_cleanup(struct musb *musb)
1802 if (musb->port_mode == MUSB_HOST)
1805 cancel_delayed_work_sync(&musb->gadget_work);
1806 usb_del_gadget_udc(&musb->g);
1823 struct musb *musb = gadget_to_musb(g);
1832 pm_runtime_get_sync(musb->controller);
1834 musb->softconnect = 0;
1835 musb->gadget_driver = driver;
1837 spin_lock_irqsave(&musb->lock, flags);
1838 musb->is_active = 1;
1840 if (musb->xceiv)
1841 otg_set_peripheral(musb->xceiv->otg, &musb->g);
1843 phy_set_mode(musb->phy, PHY_MODE_USB_DEVICE);
1845 musb_set_state(musb, OTG_STATE_B_IDLE);
1846 spin_unlock_irqrestore(&musb->lock, flags);
1848 musb_start(musb);
1854 if (musb->xceiv && musb->xceiv->last_event == USB_EVENT_ID)
1855 musb_platform_set_vbus(musb, 1);
1857 pm_runtime_mark_last_busy(musb->controller);
1858 pm_runtime_put_autosuspend(musb->controller);
1874 struct musb *musb = gadget_to_musb(g);
1877 pm_runtime_get_sync(musb->controller);
1884 spin_lock_irqsave(&musb->lock, flags);
1886 musb_hnp_stop(musb);
1888 (void) musb_gadget_vbus_draw(&musb->g, 0);
1890 musb_set_state(musb, OTG_STATE_UNDEFINED);
1891 musb_stop(musb);
1893 if (musb->xceiv)
1894 otg_set_peripheral(musb->xceiv->otg, NULL);
1896 phy_set_mode(musb->phy, PHY_MODE_INVALID);
1898 musb->is_active = 0;
1899 musb->gadget_driver = NULL;
1900 musb_platform_try_idle(musb, 0);
1901 spin_unlock_irqrestore(&musb->lock, flags);
1910 pm_runtime_mark_last_busy(musb->controller);
1911 pm_runtime_put_autosuspend(musb->controller);
1920 void musb_g_resume(struct musb *musb)
1922 musb->is_suspended = 0;
1923 switch (musb_get_state(musb)) {
1928 musb->is_active = 1;
1929 if (musb->gadget_driver && musb->gadget_driver->resume) {
1930 spin_unlock(&musb->lock);
1931 musb->gadget_driver->resume(&musb->g);
1932 spin_lock(&musb->lock);
1937 musb_otg_state_string(musb));
1942 void musb_g_suspend(struct musb *musb)
1946 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1947 musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
1949 switch (musb_get_state(musb)) {
1952 musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
1955 musb->is_suspended = 1;
1956 if (musb->gadget_driver && musb->gadget_driver->suspend) {
1957 spin_unlock(&musb->lock);
1958 musb->gadget_driver->suspend(&musb->g);
1959 spin_lock(&musb->lock);
1967 musb_otg_state_string(musb));
1972 void musb_g_wakeup(struct musb *musb)
1974 musb_gadget_wakeup(&musb->g);
1978 void musb_g_disconnect(struct musb *musb)
1980 void __iomem *mregs = musb->mregs;
1983 musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
1989 (void) musb_gadget_vbus_draw(&musb->g, 0);
1991 musb->g.speed = USB_SPEED_UNKNOWN;
1992 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
1993 spin_unlock(&musb->lock);
1994 musb->gadget_driver->disconnect(&musb->g);
1995 spin_lock(&musb->lock);
1998 switch (musb_get_state(musb)) {
2000 musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
2001 musb_otg_state_string(musb));
2002 musb_set_state(musb, OTG_STATE_A_IDLE);
2003 MUSB_HST_MODE(musb);
2006 musb_set_state(musb, OTG_STATE_A_WAIT_BCON);
2007 MUSB_HST_MODE(musb);
2013 musb_set_state(musb, OTG_STATE_B_IDLE);
2019 musb->is_active = 0;
2022 void musb_g_reset(struct musb *musb)
2023 __releases(musb->lock)
2024 __acquires(musb->lock)
2026 void __iomem *mbase = musb->mregs;
2030 musb_dbg(musb, "<== %s driver '%s'",
2033 musb->gadget_driver
2034 ? musb->gadget_driver->driver.name
2039 if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
2040 spin_unlock(&musb->lock);
2041 usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
2042 spin_lock(&musb->lock);
2052 musb->g.speed = (power & MUSB_POWER_HSMODE)
2056 musb->is_active = 1;
2057 musb->is_suspended = 0;
2058 MUSB_DEV_MODE(musb);
2059 musb->address = 0;
2060 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2062 musb->may_wakeup = 0;
2063 musb->g.b_hnp_enable = 0;
2064 musb->g.a_alt_hnp_support = 0;
2065 musb->g.a_hnp_support = 0;
2066 musb->g.quirk_zlp_not_supp = 1;
2071 if (!musb->g.is_otg) {
2077 musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
2078 musb->g.is_a_peripheral = 0;
2080 musb_set_state(musb, OTG_STATE_B_PERIPHERAL);
2081 musb->g.is_a_peripheral = 0;
2083 musb_set_state(musb, OTG_STATE_A_PERIPHERAL);
2084 musb->g.is_a_peripheral = 1;
2088 (void) musb_gadget_vbus_draw(&musb->g, 8);