/*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009 Andrew Thompson (thompsa@FreeBSD.org) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_net, OID_AUTO, ue, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "USB Ethernet parameters"); #define UE_LOCK(_ue) mtx_lock((_ue)->ue_mtx) #define UE_UNLOCK(_ue) mtx_unlock((_ue)->ue_mtx) #define UE_LOCK_ASSERT(_ue, t) mtx_assert((_ue)->ue_mtx, t) MODULE_DEPEND(uether, usb, 1, 1, 1); MODULE_DEPEND(uether, miibus, 1, 1, 1); static struct unrhdr *ueunit; static usb_proc_callback_t ue_attach_post_task; static usb_proc_callback_t ue_promisc_task; static usb_proc_callback_t ue_setmulti_task; static usb_proc_callback_t ue_ifmedia_task; static usb_proc_callback_t ue_tick_task; static usb_proc_callback_t ue_start_task; static usb_proc_callback_t ue_stop_task; static void ue_init(void *); static void ue_start(if_t); static int ue_ifmedia_upd(if_t); static void ue_watchdog(void *); /* * Return values: * 0: success * Else: device has been detached */ uint8_t uether_pause(struct usb_ether *ue, unsigned _ticks) { if (usb_proc_is_gone(&ue->ue_tq)) { /* nothing to do */ return (1); } usb_pause_mtx(ue->ue_mtx, _ticks); return (0); } static void ue_queue_command(struct usb_ether *ue, usb_proc_callback_t *fn, struct usb_proc_msg *t0, struct usb_proc_msg *t1) { struct usb_ether_cfg_task *task; UE_LOCK_ASSERT(ue, MA_OWNED); if (usb_proc_is_gone(&ue->ue_tq)) { return; /* nothing to do */ } /* * NOTE: The task cannot get executed before we drop the * "sc_mtx" mutex. It is safe to update fields in the message * structure after that the message got queued. */ task = (struct usb_ether_cfg_task *) usb_proc_msignal(&ue->ue_tq, t0, t1); /* Setup callback and self pointers */ task->hdr.pm_callback = fn; task->ue = ue; /* * Start and stop must be synchronous! */ if ((fn == ue_start_task) || (fn == ue_stop_task)) usb_proc_mwait(&ue->ue_tq, t0, t1); } if_t uether_getifp(struct usb_ether *ue) { return (ue->ue_ifp); } struct mii_data * uether_getmii(struct usb_ether *ue) { return (device_get_softc(ue->ue_miibus)); } void * uether_getsc(struct usb_ether *ue) { return (ue->ue_sc); } static int ue_sysctl_parent(SYSCTL_HANDLER_ARGS) { struct usb_ether *ue = arg1; const char *name; name = device_get_nameunit(ue->ue_dev); return SYSCTL_OUT_STR(req, name); } int uether_ifattach(struct usb_ether *ue) { int error; /* check some critical parameters */ if ((ue->ue_dev == NULL) || (ue->ue_udev == NULL) || (ue->ue_mtx == NULL) || (ue->ue_methods == NULL)) return (EINVAL); error = usb_proc_create(&ue->ue_tq, ue->ue_mtx, device_get_nameunit(ue->ue_dev), USB_PRI_MED); if (error) { device_printf(ue->ue_dev, "could not setup taskqueue\n"); goto error; } /* fork rest of the attach code */ UE_LOCK(ue); ue_queue_command(ue, ue_attach_post_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); UE_UNLOCK(ue); error: return (error); } void uether_ifattach_wait(struct usb_ether *ue) { UE_LOCK(ue); usb_proc_mwait(&ue->ue_tq, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); UE_UNLOCK(ue); } static void ue_attach_post_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; if_t ifp; int error; char num[14]; /* sufficient for 32 bits */ /* first call driver's post attach routine */ ue->ue_methods->ue_attach_post(ue); UE_UNLOCK(ue); ue->ue_unit = alloc_unr(ueunit); usb_callout_init_mtx(&ue->ue_watchdog, ue->ue_mtx, 0); sysctl_ctx_init(&ue->ue_sysctl_ctx); mbufq_init(&ue->ue_rxq, 0 /* unlimited length */); error = 0; CURVNET_SET_QUIET(vnet0); ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(ue->ue_dev, "could not allocate ifnet\n"); goto fail; } if_setsoftc(ifp, ue); if_initname(ifp, "ue", ue->ue_unit); if (ue->ue_methods->ue_attach_post_sub != NULL) { ue->ue_ifp = ifp; error = ue->ue_methods->ue_attach_post_sub(ue); } else { if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if (ue->ue_methods->ue_ioctl != NULL) if_setioctlfn(ifp, ue->ue_methods->ue_ioctl); else if_setioctlfn(ifp, uether_ioctl); if_setstartfn(ifp, ue_start); if_setinitfn(ifp, ue_init); if_setsendqlen(ifp, ifqmaxlen); if_setsendqready(ifp); ue->ue_ifp = ifp; if (ue->ue_methods->ue_mii_upd != NULL && ue->ue_methods->ue_mii_sts != NULL) { bus_topo_lock(); error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, ue_ifmedia_upd, ue->ue_methods->ue_mii_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); bus_topo_unlock(); } } if (error) { device_printf(ue->ue_dev, "attaching PHYs failed\n"); goto fail; } if_printf(ifp, " on %s\n", device_get_nameunit(ue->ue_dev)); ether_ifattach(ifp, ue->ue_eaddr); /* Tell upper layer we support VLAN oversized frames. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); CURVNET_RESTORE(); snprintf(num, sizeof(num), "%u", ue->ue_unit); ue->ue_sysctl_oid = SYSCTL_ADD_NODE(&ue->ue_sysctl_ctx, &SYSCTL_NODE_CHILDREN(_net, ue), OID_AUTO, num, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); SYSCTL_ADD_PROC(&ue->ue_sysctl_ctx, SYSCTL_CHILDREN(ue->ue_sysctl_oid), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ue, 0, ue_sysctl_parent, "A", "parent device"); UE_LOCK(ue); return; fail: CURVNET_RESTORE(); /* drain mbuf queue */ mbufq_drain(&ue->ue_rxq); /* free unit */ free_unr(ueunit, ue->ue_unit); if (ue->ue_ifp != NULL) { if_free(ue->ue_ifp); ue->ue_ifp = NULL; } UE_LOCK(ue); return; } void uether_ifdetach(struct usb_ether *ue) { if_t ifp; /* wait for any post attach or other command to complete */ usb_proc_drain(&ue->ue_tq); /* read "ifnet" pointer after taskqueue drain */ ifp = ue->ue_ifp; if (ifp != NULL) { /* we are not running any more */ UE_LOCK(ue); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); UE_UNLOCK(ue); /* drain any callouts */ usb_callout_drain(&ue->ue_watchdog); /* * Detach ethernet first to stop miibus calls from * user-space: */ ether_ifdetach(ifp); /* detach miibus */ if (ue->ue_miibus != NULL) { bus_topo_lock(); device_delete_child(ue->ue_dev, ue->ue_miibus); bus_topo_unlock(); } /* free interface instance */ if_free(ifp); /* free sysctl */ sysctl_ctx_free(&ue->ue_sysctl_ctx); /* drain mbuf queue */ mbufq_drain(&ue->ue_rxq); /* free unit */ free_unr(ueunit, ue->ue_unit); } /* free taskqueue, if any */ usb_proc_free(&ue->ue_tq); } uint8_t uether_is_gone(struct usb_ether *ue) { return (usb_proc_is_gone(&ue->ue_tq)); } void uether_init(void *arg) { ue_init(arg); } static void ue_init(void *arg) { struct usb_ether *ue = arg; UE_LOCK(ue); ue_queue_command(ue, ue_start_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); UE_UNLOCK(ue); } static void ue_start_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; if_t ifp = ue->ue_ifp; UE_LOCK_ASSERT(ue, MA_OWNED); ue->ue_methods->ue_init(ue); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; if (ue->ue_methods->ue_tick != NULL) usb_callout_reset(&ue->ue_watchdog, hz, ue_watchdog, ue); } static void ue_stop_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; UE_LOCK_ASSERT(ue, MA_OWNED); usb_callout_stop(&ue->ue_watchdog); ue->ue_methods->ue_stop(ue); } void uether_start(if_t ifp) { ue_start(ifp); } static void ue_start(if_t ifp) { struct usb_ether *ue = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; UE_LOCK(ue); ue->ue_methods->ue_start(ue); UE_UNLOCK(ue); } static void ue_promisc_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; ue->ue_methods->ue_setpromisc(ue); } static void ue_setmulti_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; ue->ue_methods->ue_setmulti(ue); } int uether_ifmedia_upd(if_t ifp) { return (ue_ifmedia_upd(ifp)); } static int ue_ifmedia_upd(if_t ifp) { struct usb_ether *ue = if_getsoftc(ifp); /* Defer to process context */ UE_LOCK(ue); ue_queue_command(ue, ue_ifmedia_task, &ue->ue_media_task[0].hdr, &ue->ue_media_task[1].hdr); UE_UNLOCK(ue); return (0); } static void ue_ifmedia_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; if_t ifp = ue->ue_ifp; ue->ue_methods->ue_mii_upd(ifp); } static void ue_watchdog(void *arg) { struct usb_ether *ue = arg; if_t ifp = ue->ue_ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; ue_queue_command(ue, ue_tick_task, &ue->ue_tick_task[0].hdr, &ue->ue_tick_task[1].hdr); usb_callout_reset(&ue->ue_watchdog, hz, ue_watchdog, ue); } static void ue_tick_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; if_t ifp = ue->ue_ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; ue->ue_methods->ue_tick(ue); } int uether_ioctl(if_t ifp, u_long command, caddr_t data) { struct usb_ether *ue = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFFLAGS: UE_LOCK(ue); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) ue_queue_command(ue, ue_promisc_task, &ue->ue_promisc_task[0].hdr, &ue->ue_promisc_task[1].hdr); else ue_queue_command(ue, ue_start_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); } else { ue_queue_command(ue, ue_stop_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); } UE_UNLOCK(ue); break; case SIOCADDMULTI: case SIOCDELMULTI: UE_LOCK(ue); ue_queue_command(ue, ue_setmulti_task, &ue->ue_multi_task[0].hdr, &ue->ue_multi_task[1].hdr); UE_UNLOCK(ue); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (ue->ue_miibus != NULL) { mii = device_get_softc(ue->ue_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } else error = ether_ioctl(ifp, command, data); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int uether_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: ueunit = new_unrhdr(0, INT_MAX, NULL); break; case MOD_UNLOAD: break; default: return (EOPNOTSUPP); } return (0); } static moduledata_t uether_mod = { "uether", uether_modevent, 0 }; struct mbuf * uether_newbuf(void) { struct mbuf *m_new; m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (NULL); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_adj(m_new, ETHER_ALIGN); return (m_new); } int uether_rxmbuf(struct usb_ether *ue, struct mbuf *m, unsigned len) { if_t ifp = ue->ue_ifp; UE_LOCK_ASSERT(ue, MA_OWNED); /* finalize mbuf */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* enqueue for later when the lock can be released */ (void)mbufq_enqueue(&ue->ue_rxq, m); return (0); } int uether_rxbuf(struct usb_ether *ue, struct usb_page_cache *pc, unsigned offset, unsigned len) { if_t ifp = ue->ue_ifp; struct mbuf *m; UE_LOCK_ASSERT(ue, MA_OWNED); if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) return (1); m = uether_newbuf(); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); return (ENOMEM); } usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); /* finalize mbuf */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* enqueue for later when the lock can be released */ (void)mbufq_enqueue(&ue->ue_rxq, m); return (0); } void uether_rxflush(struct usb_ether *ue) { if_t ifp = ue->ue_ifp; struct epoch_tracker et; struct mbuf *m, *n; UE_LOCK_ASSERT(ue, MA_OWNED); n = mbufq_flush(&ue->ue_rxq); UE_UNLOCK(ue); NET_EPOCH_ENTER(et); while ((m = n) != NULL) { n = STAILQ_NEXT(m, m_stailqpkt); m->m_nextpkt = NULL; if_input(ifp, m); } NET_EPOCH_EXIT(et); UE_LOCK(ue); } /* * USB net drivers are run by DRIVER_MODULE() thus SI_SUB_DRIVERS, * SI_ORDER_MIDDLE. Run uether after that. */ DECLARE_MODULE(uether, uether_mod, SI_SUB_DRIVERS, SI_ORDER_ANY); MODULE_VERSION(uether, 1);