198c230c8SBjoern A. Zeeb /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 498c230c8SBjoern A. Zeeb * Copyright (c) 2008 The FreeBSD Foundation 5eea3faf7SBjoern A. Zeeb * Copyright (c) 2009-2010 Bjoern A. Zeeb <bz@FreeBSD.org> 698c230c8SBjoern A. Zeeb * All rights reserved. 798c230c8SBjoern A. Zeeb * 898c230c8SBjoern A. Zeeb * This software was developed by CK Software GmbH under sponsorship 998c230c8SBjoern A. Zeeb * from the FreeBSD Foundation. 1098c230c8SBjoern A. Zeeb * 1198c230c8SBjoern A. Zeeb * Redistribution and use in source and binary forms, with or without 1298c230c8SBjoern A. Zeeb * modification, are permitted provided that the following conditions 1398c230c8SBjoern A. Zeeb * are met: 1498c230c8SBjoern A. Zeeb * 1. Redistributions of source code must retain the above copyright 1598c230c8SBjoern A. Zeeb * notice, this list of conditions and the following disclaimer. 1698c230c8SBjoern A. Zeeb * 2. Redistributions in binary form must reproduce the above copyright 1798c230c8SBjoern A. Zeeb * notice, this list of conditions and the following disclaimer in the 1898c230c8SBjoern A. Zeeb * documentation and/or other materials provided with the distribution. 1998c230c8SBjoern A. Zeeb * 2098c230c8SBjoern A. Zeeb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 2198c230c8SBjoern A. Zeeb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2298c230c8SBjoern A. Zeeb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2398c230c8SBjoern A. Zeeb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 2498c230c8SBjoern A. Zeeb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2598c230c8SBjoern A. Zeeb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2698c230c8SBjoern A. Zeeb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2798c230c8SBjoern A. Zeeb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2898c230c8SBjoern A. Zeeb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2998c230c8SBjoern A. Zeeb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3098c230c8SBjoern A. Zeeb * SUCH DAMAGE. 3198c230c8SBjoern A. Zeeb */ 3298c230c8SBjoern A. Zeeb 3398c230c8SBjoern A. Zeeb /* 34d0ea4743SBjoern A. Zeeb * A pair of virtual back-to-back connected ethernet like interfaces 35d0ea4743SBjoern A. Zeeb * (``two interfaces with a virtual cross-over cable''). 36d0ea4743SBjoern A. Zeeb * 3798c230c8SBjoern A. Zeeb * This is mostly intended to be used to provide connectivity between 3898c230c8SBjoern A. Zeeb * different virtual network stack instances. 3998c230c8SBjoern A. Zeeb */ 4098c230c8SBjoern A. Zeeb /* 4198c230c8SBjoern A. Zeeb * Things to re-think once we have more experience: 42d0ea4743SBjoern A. Zeeb * - ifp->if_reassign function once we can test with vimage. Depending on 43530c0060SRobert Watson * how if_vmove() is going to be improved. 44d0ea4743SBjoern A. Zeeb * - Real random etheraddrs that are checked to be uniquish; we would need 45d0ea4743SBjoern A. Zeeb * to re-do them in case we move the interface between network stacks 46d0ea4743SBjoern A. Zeeb * in a private if_reassign function. 47d0ea4743SBjoern A. Zeeb * In case we bridge to a real interface/network or between indepedent 48d0ea4743SBjoern A. Zeeb * epairs on multiple stacks/machines, we may need this. 49d0ea4743SBjoern A. Zeeb * For now let the user handle that case. 5098c230c8SBjoern A. Zeeb */ 5198c230c8SBjoern A. Zeeb 5298c230c8SBjoern A. Zeeb #include <sys/cdefs.h> 5398c230c8SBjoern A. Zeeb __FBSDID("$FreeBSD$"); 5498c230c8SBjoern A. Zeeb 5598c230c8SBjoern A. Zeeb #include <sys/param.h> 5611d41666SLuca Pizzamiglio #include <sys/hash.h> 5711d41666SLuca Pizzamiglio #include <sys/jail.h> 5898c230c8SBjoern A. Zeeb #include <sys/kernel.h> 5911d41666SLuca Pizzamiglio #include <sys/libkern.h> 608ec07310SGleb Smirnoff #include <sys/malloc.h> 6198c230c8SBjoern A. Zeeb #include <sys/mbuf.h> 6298c230c8SBjoern A. Zeeb #include <sys/module.h> 6311d41666SLuca Pizzamiglio #include <sys/proc.h> 6498c230c8SBjoern A. Zeeb #include <sys/refcount.h> 6598c230c8SBjoern A. Zeeb #include <sys/queue.h> 66d0ea4743SBjoern A. Zeeb #include <sys/smp.h> 6798c230c8SBjoern A. Zeeb #include <sys/socket.h> 6898c230c8SBjoern A. Zeeb #include <sys/sockio.h> 6998c230c8SBjoern A. Zeeb #include <sys/sysctl.h> 7098c230c8SBjoern A. Zeeb #include <sys/types.h> 7198c230c8SBjoern A. Zeeb 7298c230c8SBjoern A. Zeeb #include <net/bpf.h> 7398c230c8SBjoern A. Zeeb #include <net/ethernet.h> 7498c230c8SBjoern A. Zeeb #include <net/if.h> 7576039bc8SGleb Smirnoff #include <net/if_var.h> 7698c230c8SBjoern A. Zeeb #include <net/if_clone.h> 772dccdd45SMarko Zec #include <net/if_media.h> 7898c230c8SBjoern A. Zeeb #include <net/if_var.h> 7998c230c8SBjoern A. Zeeb #include <net/if_types.h> 8098c230c8SBjoern A. Zeeb #include <net/netisr.h> 81530c0060SRobert Watson #include <net/vnet.h> 8298c230c8SBjoern A. Zeeb 8398c230c8SBjoern A. Zeeb SYSCTL_DECL(_net_link); 846472ac3dSEd Schouten static SYSCTL_NODE(_net_link, OID_AUTO, epair, CTLFLAG_RW, 0, "epair sysctl"); 85d0ea4743SBjoern A. Zeeb 86d0ea4743SBjoern A. Zeeb #ifdef EPAIR_DEBUG 87d0ea4743SBjoern A. Zeeb static int epair_debug = 0; 882c8b047cSBjoern A. Zeeb SYSCTL_INT(_net_link_epair, OID_AUTO, epair_debug, CTLFLAG_RW, 8998c230c8SBjoern A. Zeeb &epair_debug, 0, "if_epair(4) debugging."); 90d0ea4743SBjoern A. Zeeb #define DPRINTF(fmt, arg...) \ 91d0ea4743SBjoern A. Zeeb if (epair_debug) \ 9298c230c8SBjoern A. Zeeb printf("[%s:%d] " fmt, __func__, __LINE__, ##arg) 9398c230c8SBjoern A. Zeeb #else 9498c230c8SBjoern A. Zeeb #define DPRINTF(fmt, arg...) 9598c230c8SBjoern A. Zeeb #endif 9698c230c8SBjoern A. Zeeb 97d0ea4743SBjoern A. Zeeb static void epair_nh_sintr(struct mbuf *); 98d0ea4743SBjoern A. Zeeb static struct mbuf *epair_nh_m2cpuid(struct mbuf *, uintptr_t, u_int *); 99d0ea4743SBjoern A. Zeeb static void epair_nh_drainedcpu(u_int); 10098c230c8SBjoern A. Zeeb 101d0ea4743SBjoern A. Zeeb static void epair_start_locked(struct ifnet *); 1022dccdd45SMarko Zec static int epair_media_change(struct ifnet *); 1032dccdd45SMarko Zec static void epair_media_status(struct ifnet *, struct ifmediareq *); 10498c230c8SBjoern A. Zeeb 10598c230c8SBjoern A. Zeeb static int epair_clone_match(struct if_clone *, const char *); 10698c230c8SBjoern A. Zeeb static int epair_clone_create(struct if_clone *, char *, size_t, caddr_t); 10798c230c8SBjoern A. Zeeb static int epair_clone_destroy(struct if_clone *, struct ifnet *); 10898c230c8SBjoern A. Zeeb 10942a58907SGleb Smirnoff static const char epairname[] = "epair"; 110804771f5SEugene Grosbein static unsigned int next_index = 0; 11142a58907SGleb Smirnoff 1123c3136b1SHiroki Sato /* Netisr related definitions and sysctl. */ 113d0ea4743SBjoern A. Zeeb static struct netisr_handler epair_nh = { 11442a58907SGleb Smirnoff .nh_name = epairname, 115d0ea4743SBjoern A. Zeeb .nh_proto = NETISR_EPAIR, 116d0ea4743SBjoern A. Zeeb .nh_policy = NETISR_POLICY_CPU, 117d0ea4743SBjoern A. Zeeb .nh_handler = epair_nh_sintr, 118d0ea4743SBjoern A. Zeeb .nh_m2cpuid = epair_nh_m2cpuid, 119d0ea4743SBjoern A. Zeeb .nh_drainedcpu = epair_nh_drainedcpu, 120d0ea4743SBjoern A. Zeeb }; 121d0ea4743SBjoern A. Zeeb 122d0ea4743SBjoern A. Zeeb static int 123d0ea4743SBjoern A. Zeeb sysctl_epair_netisr_maxqlen(SYSCTL_HANDLER_ARGS) 124d0ea4743SBjoern A. Zeeb { 125d0ea4743SBjoern A. Zeeb int error, qlimit; 126d0ea4743SBjoern A. Zeeb 127d0ea4743SBjoern A. Zeeb netisr_getqlimit(&epair_nh, &qlimit); 128d0ea4743SBjoern A. Zeeb error = sysctl_handle_int(oidp, &qlimit, 0, req); 129d0ea4743SBjoern A. Zeeb if (error || !req->newptr) 130d0ea4743SBjoern A. Zeeb return (error); 131d0ea4743SBjoern A. Zeeb if (qlimit < 1) 132d0ea4743SBjoern A. Zeeb return (EINVAL); 133d0ea4743SBjoern A. Zeeb return (netisr_setqlimit(&epair_nh, qlimit)); 134d0ea4743SBjoern A. Zeeb } 135d0ea4743SBjoern A. Zeeb SYSCTL_PROC(_net_link_epair, OID_AUTO, netisr_maxqlen, CTLTYPE_INT|CTLFLAG_RW, 136d0ea4743SBjoern A. Zeeb 0, 0, sysctl_epair_netisr_maxqlen, "I", 137d0ea4743SBjoern A. Zeeb "Maximum if_epair(4) netisr \"hw\" queue length"); 138d0ea4743SBjoern A. Zeeb 139d0ea4743SBjoern A. Zeeb struct epair_softc { 140d0ea4743SBjoern A. Zeeb struct ifnet *ifp; /* This ifp. */ 141d0ea4743SBjoern A. Zeeb struct ifnet *oifp; /* other ifp of pair. */ 1422dccdd45SMarko Zec struct ifmedia media; /* Media config (fake). */ 143d0ea4743SBjoern A. Zeeb u_int refcount; /* # of mbufs in flight. */ 144d0ea4743SBjoern A. Zeeb u_int cpuid; /* CPU ID assigned upon creation. */ 145d0ea4743SBjoern A. Zeeb void (*if_qflush)(struct ifnet *); 146d0ea4743SBjoern A. Zeeb /* Original if_qflush routine. */ 147d0ea4743SBjoern A. Zeeb }; 148d0ea4743SBjoern A. Zeeb 149d0ea4743SBjoern A. Zeeb /* 150d0ea4743SBjoern A. Zeeb * Per-CPU list of ifps with data in the ifq that needs to be flushed 151d0ea4743SBjoern A. Zeeb * to the netisr ``hw'' queue before we allow any further direct queuing 152d0ea4743SBjoern A. Zeeb * to the ``hw'' queue. 153d0ea4743SBjoern A. Zeeb */ 154d0ea4743SBjoern A. Zeeb struct epair_ifp_drain { 155d0ea4743SBjoern A. Zeeb STAILQ_ENTRY(epair_ifp_drain) ifp_next; 156d0ea4743SBjoern A. Zeeb struct ifnet *ifp; 157d0ea4743SBjoern A. Zeeb }; 158d0ea4743SBjoern A. Zeeb STAILQ_HEAD(eid_list, epair_ifp_drain); 159d0ea4743SBjoern A. Zeeb 160d0ea4743SBjoern A. Zeeb #define EPAIR_LOCK_INIT(dpcpu) mtx_init(&(dpcpu)->if_epair_mtx, \ 161d0ea4743SBjoern A. Zeeb "if_epair", NULL, MTX_DEF) 162d0ea4743SBjoern A. Zeeb #define EPAIR_LOCK_DESTROY(dpcpu) mtx_destroy(&(dpcpu)->if_epair_mtx) 163d0ea4743SBjoern A. Zeeb #define EPAIR_LOCK_ASSERT(dpcpu) mtx_assert(&(dpcpu)->if_epair_mtx, \ 164d0ea4743SBjoern A. Zeeb MA_OWNED) 165d0ea4743SBjoern A. Zeeb #define EPAIR_LOCK(dpcpu) mtx_lock(&(dpcpu)->if_epair_mtx) 166d0ea4743SBjoern A. Zeeb #define EPAIR_UNLOCK(dpcpu) mtx_unlock(&(dpcpu)->if_epair_mtx) 167d0ea4743SBjoern A. Zeeb 168d0ea4743SBjoern A. Zeeb #ifdef INVARIANTS 169d0ea4743SBjoern A. Zeeb #define EPAIR_REFCOUNT_INIT(r, v) refcount_init((r), (v)) 170d0ea4743SBjoern A. Zeeb #define EPAIR_REFCOUNT_AQUIRE(r) refcount_acquire((r)) 171d0ea4743SBjoern A. Zeeb #define EPAIR_REFCOUNT_RELEASE(r) refcount_release((r)) 172d0ea4743SBjoern A. Zeeb #define EPAIR_REFCOUNT_ASSERT(a, p) KASSERT(a, p) 173d0ea4743SBjoern A. Zeeb #else 174d0ea4743SBjoern A. Zeeb #define EPAIR_REFCOUNT_INIT(r, v) 175d0ea4743SBjoern A. Zeeb #define EPAIR_REFCOUNT_AQUIRE(r) 176d0ea4743SBjoern A. Zeeb #define EPAIR_REFCOUNT_RELEASE(r) 177d0ea4743SBjoern A. Zeeb #define EPAIR_REFCOUNT_ASSERT(a, p) 178d0ea4743SBjoern A. Zeeb #endif 179d0ea4743SBjoern A. Zeeb 18042a58907SGleb Smirnoff static MALLOC_DEFINE(M_EPAIR, epairname, 181d0ea4743SBjoern A. Zeeb "Pair of virtual cross-over connected Ethernet-like interfaces"); 18298c230c8SBjoern A. Zeeb 1835f901c92SAndrew Turner VNET_DEFINE_STATIC(struct if_clone *, epair_cloner); 1843c3136b1SHiroki Sato #define V_epair_cloner VNET(epair_cloner) 18598c230c8SBjoern A. Zeeb 186d0ea4743SBjoern A. Zeeb /* 187d0ea4743SBjoern A. Zeeb * DPCPU area and functions. 188d0ea4743SBjoern A. Zeeb */ 189d0ea4743SBjoern A. Zeeb struct epair_dpcpu { 190d0ea4743SBjoern A. Zeeb struct mtx if_epair_mtx; /* Per-CPU locking. */ 191d0ea4743SBjoern A. Zeeb int epair_drv_flags; /* Per-CPU ``hw'' drv flags. */ 192d0ea4743SBjoern A. Zeeb struct eid_list epair_ifp_drain_list; /* Per-CPU list of ifps with 193d0ea4743SBjoern A. Zeeb * data in the ifq. */ 194d0ea4743SBjoern A. Zeeb }; 195d0ea4743SBjoern A. Zeeb DPCPU_DEFINE(struct epair_dpcpu, epair_dpcpu); 196d0ea4743SBjoern A. Zeeb 197d0ea4743SBjoern A. Zeeb static void 198d0ea4743SBjoern A. Zeeb epair_dpcpu_init(void) 199d0ea4743SBjoern A. Zeeb { 200d0ea4743SBjoern A. Zeeb struct epair_dpcpu *epair_dpcpu; 201d0ea4743SBjoern A. Zeeb struct eid_list *s; 202d0ea4743SBjoern A. Zeeb u_int cpuid; 203d0ea4743SBjoern A. Zeeb 2043aa6d94eSJohn Baldwin CPU_FOREACH(cpuid) { 205d0ea4743SBjoern A. Zeeb epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu); 206d0ea4743SBjoern A. Zeeb 207d0ea4743SBjoern A. Zeeb /* Initialize per-cpu lock. */ 208d0ea4743SBjoern A. Zeeb EPAIR_LOCK_INIT(epair_dpcpu); 209d0ea4743SBjoern A. Zeeb 210d0ea4743SBjoern A. Zeeb /* Driver flags are per-cpu as are our netisr "hw" queues. */ 211d0ea4743SBjoern A. Zeeb epair_dpcpu->epair_drv_flags = 0; 212d0ea4743SBjoern A. Zeeb 213d0ea4743SBjoern A. Zeeb /* 214d0ea4743SBjoern A. Zeeb * Initialize per-cpu drain list. 215d0ea4743SBjoern A. Zeeb * Manually do what STAILQ_HEAD_INITIALIZER would do. 216d0ea4743SBjoern A. Zeeb */ 217d0ea4743SBjoern A. Zeeb s = &epair_dpcpu->epair_ifp_drain_list; 218d0ea4743SBjoern A. Zeeb s->stqh_first = NULL; 219d0ea4743SBjoern A. Zeeb s->stqh_last = &s->stqh_first; 220d0ea4743SBjoern A. Zeeb } 221d0ea4743SBjoern A. Zeeb } 222d0ea4743SBjoern A. Zeeb 223d0ea4743SBjoern A. Zeeb static void 224d0ea4743SBjoern A. Zeeb epair_dpcpu_detach(void) 225d0ea4743SBjoern A. Zeeb { 226d0ea4743SBjoern A. Zeeb struct epair_dpcpu *epair_dpcpu; 227d0ea4743SBjoern A. Zeeb u_int cpuid; 228d0ea4743SBjoern A. Zeeb 2293aa6d94eSJohn Baldwin CPU_FOREACH(cpuid) { 230d0ea4743SBjoern A. Zeeb epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu); 231d0ea4743SBjoern A. Zeeb 232d0ea4743SBjoern A. Zeeb /* Destroy per-cpu lock. */ 233d0ea4743SBjoern A. Zeeb EPAIR_LOCK_DESTROY(epair_dpcpu); 234d0ea4743SBjoern A. Zeeb } 235d0ea4743SBjoern A. Zeeb } 236d0ea4743SBjoern A. Zeeb 237d0ea4743SBjoern A. Zeeb /* 238d0ea4743SBjoern A. Zeeb * Helper functions. 239d0ea4743SBjoern A. Zeeb */ 240d0ea4743SBjoern A. Zeeb static u_int 241d0ea4743SBjoern A. Zeeb cpuid_from_ifp(struct ifnet *ifp) 242d0ea4743SBjoern A. Zeeb { 243d0ea4743SBjoern A. Zeeb struct epair_softc *sc; 244d0ea4743SBjoern A. Zeeb 245d0ea4743SBjoern A. Zeeb if (ifp == NULL) 246d0ea4743SBjoern A. Zeeb return (0); 247d0ea4743SBjoern A. Zeeb sc = ifp->if_softc; 248d0ea4743SBjoern A. Zeeb 249d0ea4743SBjoern A. Zeeb return (sc->cpuid); 250d0ea4743SBjoern A. Zeeb } 25198c230c8SBjoern A. Zeeb 25298c230c8SBjoern A. Zeeb /* 25398c230c8SBjoern A. Zeeb * Netisr handler functions. 25498c230c8SBjoern A. Zeeb */ 25598c230c8SBjoern A. Zeeb static void 256d0ea4743SBjoern A. Zeeb epair_nh_sintr(struct mbuf *m) 25798c230c8SBjoern A. Zeeb { 25898c230c8SBjoern A. Zeeb struct ifnet *ifp; 25946d0f824SMatt Macy struct epair_softc *sc __unused; 26098c230c8SBjoern A. Zeeb 26198c230c8SBjoern A. Zeeb ifp = m->m_pkthdr.rcvif; 26298c230c8SBjoern A. Zeeb (*ifp->if_input)(ifp, m); 26398c230c8SBjoern A. Zeeb sc = ifp->if_softc; 264d0ea4743SBjoern A. Zeeb EPAIR_REFCOUNT_RELEASE(&sc->refcount); 265eea3faf7SBjoern A. Zeeb EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1, 266eea3faf7SBjoern A. Zeeb ("%s: ifp=%p sc->refcount not >= 1: %d", 267eea3faf7SBjoern A. Zeeb __func__, ifp, sc->refcount)); 26898c230c8SBjoern A. Zeeb DPRINTF("ifp=%p refcount=%u\n", ifp, sc->refcount); 26998c230c8SBjoern A. Zeeb } 27098c230c8SBjoern A. Zeeb 271d0ea4743SBjoern A. Zeeb static struct mbuf * 272d0ea4743SBjoern A. Zeeb epair_nh_m2cpuid(struct mbuf *m, uintptr_t source, u_int *cpuid) 27398c230c8SBjoern A. Zeeb { 274d0ea4743SBjoern A. Zeeb 275d0ea4743SBjoern A. Zeeb *cpuid = cpuid_from_ifp(m->m_pkthdr.rcvif); 276d0ea4743SBjoern A. Zeeb 277d0ea4743SBjoern A. Zeeb return (m); 278d0ea4743SBjoern A. Zeeb } 279d0ea4743SBjoern A. Zeeb 280d0ea4743SBjoern A. Zeeb static void 281d0ea4743SBjoern A. Zeeb epair_nh_drainedcpu(u_int cpuid) 282d0ea4743SBjoern A. Zeeb { 283d0ea4743SBjoern A. Zeeb struct epair_dpcpu *epair_dpcpu; 28498c230c8SBjoern A. Zeeb struct epair_ifp_drain *elm, *tvar; 28598c230c8SBjoern A. Zeeb struct ifnet *ifp; 28698c230c8SBjoern A. Zeeb 287d0ea4743SBjoern A. Zeeb epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu); 288d0ea4743SBjoern A. Zeeb EPAIR_LOCK(epair_dpcpu); 28998c230c8SBjoern A. Zeeb /* 29098c230c8SBjoern A. Zeeb * Assume our "hw" queue and possibly ifq will be emptied 29198c230c8SBjoern A. Zeeb * again. In case we will overflow the "hw" queue while 29298c230c8SBjoern A. Zeeb * draining, epair_start_locked will set IFF_DRV_OACTIVE 29398c230c8SBjoern A. Zeeb * again and we will stop and return. 29498c230c8SBjoern A. Zeeb */ 295d0ea4743SBjoern A. Zeeb STAILQ_FOREACH_SAFE(elm, &epair_dpcpu->epair_ifp_drain_list, 296d0ea4743SBjoern A. Zeeb ifp_next, tvar) { 29798c230c8SBjoern A. Zeeb ifp = elm->ifp; 298d0ea4743SBjoern A. Zeeb epair_dpcpu->epair_drv_flags &= ~IFF_DRV_OACTIVE; 29998c230c8SBjoern A. Zeeb ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 30098c230c8SBjoern A. Zeeb epair_start_locked(ifp); 30198c230c8SBjoern A. Zeeb 30298c230c8SBjoern A. Zeeb IFQ_LOCK(&ifp->if_snd); 30398c230c8SBjoern A. Zeeb if (IFQ_IS_EMPTY(&ifp->if_snd)) { 30446d0f824SMatt Macy struct epair_softc *sc __unused; 305eea3faf7SBjoern A. Zeeb 306d0ea4743SBjoern A. Zeeb STAILQ_REMOVE(&epair_dpcpu->epair_ifp_drain_list, 307d0ea4743SBjoern A. Zeeb elm, epair_ifp_drain, ifp_next); 308eea3faf7SBjoern A. Zeeb /* The cached ifp goes off the list. */ 309eea3faf7SBjoern A. Zeeb sc = ifp->if_softc; 310eea3faf7SBjoern A. Zeeb EPAIR_REFCOUNT_RELEASE(&sc->refcount); 311eea3faf7SBjoern A. Zeeb EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1, 312eea3faf7SBjoern A. Zeeb ("%s: ifp=%p sc->refcount not >= 1: %d", 313eea3faf7SBjoern A. Zeeb __func__, ifp, sc->refcount)); 31498c230c8SBjoern A. Zeeb free(elm, M_EPAIR); 31598c230c8SBjoern A. Zeeb } 31698c230c8SBjoern A. Zeeb IFQ_UNLOCK(&ifp->if_snd); 31798c230c8SBjoern A. Zeeb 31898c230c8SBjoern A. Zeeb if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { 31998c230c8SBjoern A. Zeeb /* Our "hw"q overflew again. */ 3202c8b047cSBjoern A. Zeeb epair_dpcpu->epair_drv_flags |= IFF_DRV_OACTIVE; 32198c230c8SBjoern A. Zeeb DPRINTF("hw queue length overflow at %u\n", 322d0ea4743SBjoern A. Zeeb epair_nh.nh_qlimit); 32398c230c8SBjoern A. Zeeb break; 32498c230c8SBjoern A. Zeeb } 32598c230c8SBjoern A. Zeeb } 326d0ea4743SBjoern A. Zeeb EPAIR_UNLOCK(epair_dpcpu); 32798c230c8SBjoern A. Zeeb } 32898c230c8SBjoern A. Zeeb 32998c230c8SBjoern A. Zeeb /* 33098c230c8SBjoern A. Zeeb * Network interface (`if') related functions. 33198c230c8SBjoern A. Zeeb */ 332eea3faf7SBjoern A. Zeeb static void 333eea3faf7SBjoern A. Zeeb epair_remove_ifp_from_draining(struct ifnet *ifp) 334eea3faf7SBjoern A. Zeeb { 335eea3faf7SBjoern A. Zeeb struct epair_dpcpu *epair_dpcpu; 336eea3faf7SBjoern A. Zeeb struct epair_ifp_drain *elm, *tvar; 337eea3faf7SBjoern A. Zeeb u_int cpuid; 338eea3faf7SBjoern A. Zeeb 3393aa6d94eSJohn Baldwin CPU_FOREACH(cpuid) { 340eea3faf7SBjoern A. Zeeb epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu); 341eea3faf7SBjoern A. Zeeb EPAIR_LOCK(epair_dpcpu); 342eea3faf7SBjoern A. Zeeb STAILQ_FOREACH_SAFE(elm, &epair_dpcpu->epair_ifp_drain_list, 343eea3faf7SBjoern A. Zeeb ifp_next, tvar) { 344eea3faf7SBjoern A. Zeeb if (ifp == elm->ifp) { 34546d0f824SMatt Macy struct epair_softc *sc __unused; 346eea3faf7SBjoern A. Zeeb 347eea3faf7SBjoern A. Zeeb STAILQ_REMOVE( 348eea3faf7SBjoern A. Zeeb &epair_dpcpu->epair_ifp_drain_list, elm, 349eea3faf7SBjoern A. Zeeb epair_ifp_drain, ifp_next); 350eea3faf7SBjoern A. Zeeb /* The cached ifp goes off the list. */ 351eea3faf7SBjoern A. Zeeb sc = ifp->if_softc; 352eea3faf7SBjoern A. Zeeb EPAIR_REFCOUNT_RELEASE(&sc->refcount); 353eea3faf7SBjoern A. Zeeb EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1, 354eea3faf7SBjoern A. Zeeb ("%s: ifp=%p sc->refcount not >= 1: %d", 355eea3faf7SBjoern A. Zeeb __func__, ifp, sc->refcount)); 356eea3faf7SBjoern A. Zeeb free(elm, M_EPAIR); 357eea3faf7SBjoern A. Zeeb } 358eea3faf7SBjoern A. Zeeb } 359eea3faf7SBjoern A. Zeeb EPAIR_UNLOCK(epair_dpcpu); 360eea3faf7SBjoern A. Zeeb } 361eea3faf7SBjoern A. Zeeb } 362eea3faf7SBjoern A. Zeeb 363d0ea4743SBjoern A. Zeeb static int 364d0ea4743SBjoern A. Zeeb epair_add_ifp_for_draining(struct ifnet *ifp) 365d0ea4743SBjoern A. Zeeb { 366d0ea4743SBjoern A. Zeeb struct epair_dpcpu *epair_dpcpu; 367eea3faf7SBjoern A. Zeeb struct epair_softc *sc; 368d0ea4743SBjoern A. Zeeb struct epair_ifp_drain *elm = NULL; 369d0ea4743SBjoern A. Zeeb 370eea3faf7SBjoern A. Zeeb sc = ifp->if_softc; 371d0ea4743SBjoern A. Zeeb epair_dpcpu = DPCPU_ID_PTR(sc->cpuid, epair_dpcpu); 372eea3faf7SBjoern A. Zeeb EPAIR_LOCK_ASSERT(epair_dpcpu); 373d0ea4743SBjoern A. Zeeb STAILQ_FOREACH(elm, &epair_dpcpu->epair_ifp_drain_list, ifp_next) 374d0ea4743SBjoern A. Zeeb if (elm->ifp == ifp) 375d0ea4743SBjoern A. Zeeb break; 3763c20163aSBjoern A. Zeeb /* If the ifp is there already, return success. */ 377d0ea4743SBjoern A. Zeeb if (elm != NULL) 378d0ea4743SBjoern A. Zeeb return (0); 379d0ea4743SBjoern A. Zeeb 380d0ea4743SBjoern A. Zeeb elm = malloc(sizeof(struct epair_ifp_drain), M_EPAIR, M_NOWAIT|M_ZERO); 381d0ea4743SBjoern A. Zeeb if (elm == NULL) 382d0ea4743SBjoern A. Zeeb return (ENOMEM); 383d0ea4743SBjoern A. Zeeb 384d0ea4743SBjoern A. Zeeb elm->ifp = ifp; 385eea3faf7SBjoern A. Zeeb /* Add a reference for the ifp pointer on the list. */ 386eea3faf7SBjoern A. Zeeb EPAIR_REFCOUNT_AQUIRE(&sc->refcount); 387d0ea4743SBjoern A. Zeeb STAILQ_INSERT_TAIL(&epair_dpcpu->epair_ifp_drain_list, elm, ifp_next); 388d0ea4743SBjoern A. Zeeb 389d0ea4743SBjoern A. Zeeb return (0); 390d0ea4743SBjoern A. Zeeb } 391d0ea4743SBjoern A. Zeeb 39298c230c8SBjoern A. Zeeb static void 39398c230c8SBjoern A. Zeeb epair_start_locked(struct ifnet *ifp) 39498c230c8SBjoern A. Zeeb { 395d0ea4743SBjoern A. Zeeb struct epair_dpcpu *epair_dpcpu; 39698c230c8SBjoern A. Zeeb struct mbuf *m; 39798c230c8SBjoern A. Zeeb struct epair_softc *sc; 39898c230c8SBjoern A. Zeeb struct ifnet *oifp; 39998c230c8SBjoern A. Zeeb int error; 40098c230c8SBjoern A. Zeeb 40198c230c8SBjoern A. Zeeb DPRINTF("ifp=%p\n", ifp); 402d0ea4743SBjoern A. Zeeb sc = ifp->if_softc; 403d0ea4743SBjoern A. Zeeb epair_dpcpu = DPCPU_ID_PTR(sc->cpuid, epair_dpcpu); 404d0ea4743SBjoern A. Zeeb EPAIR_LOCK_ASSERT(epair_dpcpu); 40598c230c8SBjoern A. Zeeb 40698c230c8SBjoern A. Zeeb if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 40798c230c8SBjoern A. Zeeb return; 40898c230c8SBjoern A. Zeeb if ((ifp->if_flags & IFF_UP) == 0) 40998c230c8SBjoern A. Zeeb return; 41098c230c8SBjoern A. Zeeb 41198c230c8SBjoern A. Zeeb /* 41259f35a82SPatrick Kelsey * We get packets here from ether_output via if_handoff() 41375580d58SPatrick Kelsey * and need to put them into the input queue of the oifp 41498c230c8SBjoern A. Zeeb * and call oifp->if_input() via netisr/epair_sintr(). 41598c230c8SBjoern A. Zeeb */ 41698c230c8SBjoern A. Zeeb oifp = sc->oifp; 41798c230c8SBjoern A. Zeeb sc = oifp->if_softc; 41898c230c8SBjoern A. Zeeb for (;;) { 41998c230c8SBjoern A. Zeeb IFQ_DEQUEUE(&ifp->if_snd, m); 42098c230c8SBjoern A. Zeeb if (m == NULL) 42198c230c8SBjoern A. Zeeb break; 42298c230c8SBjoern A. Zeeb BPF_MTAP(ifp, m); 42398c230c8SBjoern A. Zeeb 42498c230c8SBjoern A. Zeeb /* 42598c230c8SBjoern A. Zeeb * In case the outgoing interface is not usable, 42698c230c8SBjoern A. Zeeb * drop the packet. 42798c230c8SBjoern A. Zeeb */ 42898c230c8SBjoern A. Zeeb if ((oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 42998c230c8SBjoern A. Zeeb (oifp->if_flags & IFF_UP) ==0) { 4303751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 43198c230c8SBjoern A. Zeeb m_freem(m); 43298c230c8SBjoern A. Zeeb continue; 43398c230c8SBjoern A. Zeeb } 43498c230c8SBjoern A. Zeeb DPRINTF("packet %s -> %s\n", ifp->if_xname, oifp->if_xname); 43598c230c8SBjoern A. Zeeb 43698c230c8SBjoern A. Zeeb /* 43798c230c8SBjoern A. Zeeb * Add a reference so the interface cannot go while the 43898c230c8SBjoern A. Zeeb * packet is in transit as we rely on rcvif to stay valid. 43998c230c8SBjoern A. Zeeb */ 440d0ea4743SBjoern A. Zeeb EPAIR_REFCOUNT_AQUIRE(&sc->refcount); 44198c230c8SBjoern A. Zeeb m->m_pkthdr.rcvif = oifp; 44298c230c8SBjoern A. Zeeb CURVNET_SET_QUIET(oifp->if_vnet); 44398c230c8SBjoern A. Zeeb error = netisr_queue(NETISR_EPAIR, m); 44498c230c8SBjoern A. Zeeb CURVNET_RESTORE(); 44598c230c8SBjoern A. Zeeb if (!error) { 4463751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 44798c230c8SBjoern A. Zeeb /* Someone else received the packet. */ 4483751dddbSGleb Smirnoff if_inc_counter(oifp, IFCOUNTER_IPACKETS, 1); 44998c230c8SBjoern A. Zeeb } else { 450eea3faf7SBjoern A. Zeeb /* The packet was freed already. */ 451d0ea4743SBjoern A. Zeeb epair_dpcpu->epair_drv_flags |= IFF_DRV_OACTIVE; 45298c230c8SBjoern A. Zeeb ifp->if_drv_flags |= IFF_DRV_OACTIVE; 453eea3faf7SBjoern A. Zeeb (void) epair_add_ifp_for_draining(ifp); 4543751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 455d0ea4743SBjoern A. Zeeb EPAIR_REFCOUNT_RELEASE(&sc->refcount); 456eea3faf7SBjoern A. Zeeb EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1, 457eea3faf7SBjoern A. Zeeb ("%s: ifp=%p sc->refcount not >= 1: %d", 458eea3faf7SBjoern A. Zeeb __func__, oifp, sc->refcount)); 45998c230c8SBjoern A. Zeeb } 46098c230c8SBjoern A. Zeeb } 46198c230c8SBjoern A. Zeeb } 46298c230c8SBjoern A. Zeeb 46398c230c8SBjoern A. Zeeb static void 46498c230c8SBjoern A. Zeeb epair_start(struct ifnet *ifp) 46598c230c8SBjoern A. Zeeb { 466d0ea4743SBjoern A. Zeeb struct epair_dpcpu *epair_dpcpu; 46798c230c8SBjoern A. Zeeb 468d0ea4743SBjoern A. Zeeb epair_dpcpu = DPCPU_ID_PTR(cpuid_from_ifp(ifp), epair_dpcpu); 469d0ea4743SBjoern A. Zeeb EPAIR_LOCK(epair_dpcpu); 47098c230c8SBjoern A. Zeeb epair_start_locked(ifp); 471d0ea4743SBjoern A. Zeeb EPAIR_UNLOCK(epair_dpcpu); 47298c230c8SBjoern A. Zeeb } 47398c230c8SBjoern A. Zeeb 47498c230c8SBjoern A. Zeeb static int 47598c230c8SBjoern A. Zeeb epair_transmit_locked(struct ifnet *ifp, struct mbuf *m) 47698c230c8SBjoern A. Zeeb { 477d0ea4743SBjoern A. Zeeb struct epair_dpcpu *epair_dpcpu; 47898c230c8SBjoern A. Zeeb struct epair_softc *sc; 47998c230c8SBjoern A. Zeeb struct ifnet *oifp; 48098c230c8SBjoern A. Zeeb int error, len; 48198c230c8SBjoern A. Zeeb short mflags; 48298c230c8SBjoern A. Zeeb 48398c230c8SBjoern A. Zeeb DPRINTF("ifp=%p m=%p\n", ifp, m); 484d0ea4743SBjoern A. Zeeb sc = ifp->if_softc; 485d0ea4743SBjoern A. Zeeb epair_dpcpu = DPCPU_ID_PTR(sc->cpuid, epair_dpcpu); 486d0ea4743SBjoern A. Zeeb EPAIR_LOCK_ASSERT(epair_dpcpu); 48798c230c8SBjoern A. Zeeb 48898c230c8SBjoern A. Zeeb if (m == NULL) 48998c230c8SBjoern A. Zeeb return (0); 49098c230c8SBjoern A. Zeeb 49198c230c8SBjoern A. Zeeb /* 49298c230c8SBjoern A. Zeeb * We are not going to use the interface en/dequeue mechanism 49398c230c8SBjoern A. Zeeb * on the TX side. We are called from ether_output_frame() 49498c230c8SBjoern A. Zeeb * and will put the packet into the incoming queue of the 49598c230c8SBjoern A. Zeeb * other interface of our pair via the netsir. 49698c230c8SBjoern A. Zeeb */ 49798c230c8SBjoern A. Zeeb if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 49898c230c8SBjoern A. Zeeb m_freem(m); 49998c230c8SBjoern A. Zeeb return (ENXIO); 50098c230c8SBjoern A. Zeeb } 50198c230c8SBjoern A. Zeeb if ((ifp->if_flags & IFF_UP) == 0) { 50298c230c8SBjoern A. Zeeb m_freem(m); 50398c230c8SBjoern A. Zeeb return (ENETDOWN); 50498c230c8SBjoern A. Zeeb } 50598c230c8SBjoern A. Zeeb 50698c230c8SBjoern A. Zeeb BPF_MTAP(ifp, m); 50798c230c8SBjoern A. Zeeb 50898c230c8SBjoern A. Zeeb /* 50998c230c8SBjoern A. Zeeb * In case the outgoing interface is not usable, 51098c230c8SBjoern A. Zeeb * drop the packet. 51198c230c8SBjoern A. Zeeb */ 51298c230c8SBjoern A. Zeeb oifp = sc->oifp; 51398c230c8SBjoern A. Zeeb if ((oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 51498c230c8SBjoern A. Zeeb (oifp->if_flags & IFF_UP) ==0) { 5153751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 51698c230c8SBjoern A. Zeeb m_freem(m); 51798c230c8SBjoern A. Zeeb return (0); 51898c230c8SBjoern A. Zeeb } 51998c230c8SBjoern A. Zeeb len = m->m_pkthdr.len; 52098c230c8SBjoern A. Zeeb mflags = m->m_flags; 52198c230c8SBjoern A. Zeeb DPRINTF("packet %s -> %s\n", ifp->if_xname, oifp->if_xname); 52298c230c8SBjoern A. Zeeb 52398c230c8SBjoern A. Zeeb #ifdef ALTQ 524a4641f4eSPedro F. Giffuni /* Support ALTQ via the classic if_start() path. */ 52598c230c8SBjoern A. Zeeb IF_LOCK(&ifp->if_snd); 52698c230c8SBjoern A. Zeeb if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 52798c230c8SBjoern A. Zeeb ALTQ_ENQUEUE(&ifp->if_snd, m, NULL, error); 52898c230c8SBjoern A. Zeeb if (error) 5293751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); 53098c230c8SBjoern A. Zeeb IF_UNLOCK(&ifp->if_snd); 53198c230c8SBjoern A. Zeeb if (!error) { 5323751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 53398c230c8SBjoern A. Zeeb if (mflags & (M_BCAST|M_MCAST)) 5343751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 53598c230c8SBjoern A. Zeeb 53698c230c8SBjoern A. Zeeb if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) 53798c230c8SBjoern A. Zeeb epair_start_locked(ifp); 53898c230c8SBjoern A. Zeeb else 539d0ea4743SBjoern A. Zeeb (void)epair_add_ifp_for_draining(ifp); 54098c230c8SBjoern A. Zeeb } 54198c230c8SBjoern A. Zeeb return (error); 54298c230c8SBjoern A. Zeeb } 54398c230c8SBjoern A. Zeeb IF_UNLOCK(&ifp->if_snd); 54498c230c8SBjoern A. Zeeb #endif 54598c230c8SBjoern A. Zeeb 546d0ea4743SBjoern A. Zeeb if ((epair_dpcpu->epair_drv_flags & IFF_DRV_OACTIVE) != 0) { 54798c230c8SBjoern A. Zeeb /* 54898c230c8SBjoern A. Zeeb * Our hardware queue is full, try to fall back 54998c230c8SBjoern A. Zeeb * queuing to the ifq but do not call ifp->if_start. 55098c230c8SBjoern A. Zeeb * Either we are lucky or the packet is gone. 55198c230c8SBjoern A. Zeeb */ 55298c230c8SBjoern A. Zeeb IFQ_ENQUEUE(&ifp->if_snd, m, error); 55398c230c8SBjoern A. Zeeb if (!error) 554d0ea4743SBjoern A. Zeeb (void)epair_add_ifp_for_draining(ifp); 55598c230c8SBjoern A. Zeeb return (error); 55698c230c8SBjoern A. Zeeb } 55798c230c8SBjoern A. Zeeb sc = oifp->if_softc; 55898c230c8SBjoern A. Zeeb /* 55998c230c8SBjoern A. Zeeb * Add a reference so the interface cannot go while the 56098c230c8SBjoern A. Zeeb * packet is in transit as we rely on rcvif to stay valid. 56198c230c8SBjoern A. Zeeb */ 562d0ea4743SBjoern A. Zeeb EPAIR_REFCOUNT_AQUIRE(&sc->refcount); 56398c230c8SBjoern A. Zeeb m->m_pkthdr.rcvif = oifp; 56498c230c8SBjoern A. Zeeb CURVNET_SET_QUIET(oifp->if_vnet); 56598c230c8SBjoern A. Zeeb error = netisr_queue(NETISR_EPAIR, m); 56698c230c8SBjoern A. Zeeb CURVNET_RESTORE(); 56798c230c8SBjoern A. Zeeb if (!error) { 5683751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 56998c230c8SBjoern A. Zeeb /* 57098c230c8SBjoern A. Zeeb * IFQ_HANDOFF_ADJ/ip_handoff() update statistics, 57198c230c8SBjoern A. Zeeb * but as we bypass all this we have to duplicate 57298c230c8SBjoern A. Zeeb * the logic another time. 57398c230c8SBjoern A. Zeeb */ 5743751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 57598c230c8SBjoern A. Zeeb if (mflags & (M_BCAST|M_MCAST)) 5763751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 57798c230c8SBjoern A. Zeeb /* Someone else received the packet. */ 5783751dddbSGleb Smirnoff if_inc_counter(oifp, IFCOUNTER_IPACKETS, 1); 57998c230c8SBjoern A. Zeeb } else { 58098c230c8SBjoern A. Zeeb /* The packet was freed already. */ 581d0ea4743SBjoern A. Zeeb epair_dpcpu->epair_drv_flags |= IFF_DRV_OACTIVE; 58298c230c8SBjoern A. Zeeb ifp->if_drv_flags |= IFF_DRV_OACTIVE; 5833751dddbSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 584eea3faf7SBjoern A. Zeeb EPAIR_REFCOUNT_RELEASE(&sc->refcount); 585eea3faf7SBjoern A. Zeeb EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1, 586eea3faf7SBjoern A. Zeeb ("%s: ifp=%p sc->refcount not >= 1: %d", 587eea3faf7SBjoern A. Zeeb __func__, oifp, sc->refcount)); 58898c230c8SBjoern A. Zeeb } 58998c230c8SBjoern A. Zeeb 59098c230c8SBjoern A. Zeeb return (error); 59198c230c8SBjoern A. Zeeb } 59298c230c8SBjoern A. Zeeb 59398c230c8SBjoern A. Zeeb static int 59498c230c8SBjoern A. Zeeb epair_transmit(struct ifnet *ifp, struct mbuf *m) 59598c230c8SBjoern A. Zeeb { 596d0ea4743SBjoern A. Zeeb struct epair_dpcpu *epair_dpcpu; 59798c230c8SBjoern A. Zeeb int error; 59898c230c8SBjoern A. Zeeb 599d0ea4743SBjoern A. Zeeb epair_dpcpu = DPCPU_ID_PTR(cpuid_from_ifp(ifp), epair_dpcpu); 600d0ea4743SBjoern A. Zeeb EPAIR_LOCK(epair_dpcpu); 60198c230c8SBjoern A. Zeeb error = epair_transmit_locked(ifp, m); 602d0ea4743SBjoern A. Zeeb EPAIR_UNLOCK(epair_dpcpu); 60398c230c8SBjoern A. Zeeb return (error); 60498c230c8SBjoern A. Zeeb } 60598c230c8SBjoern A. Zeeb 60698c230c8SBjoern A. Zeeb static void 60798c230c8SBjoern A. Zeeb epair_qflush(struct ifnet *ifp) 60898c230c8SBjoern A. Zeeb { 60998c230c8SBjoern A. Zeeb struct epair_softc *sc; 61098c230c8SBjoern A. Zeeb 61198c230c8SBjoern A. Zeeb sc = ifp->if_softc; 612eea3faf7SBjoern A. Zeeb KASSERT(sc != NULL, ("%s: ifp=%p, epair_softc gone? sc=%p\n", 613eea3faf7SBjoern A. Zeeb __func__, ifp, sc)); 61498c230c8SBjoern A. Zeeb /* 615eea3faf7SBjoern A. Zeeb * Remove this ifp from all backpointer lists. The interface will not 616eea3faf7SBjoern A. Zeeb * usable for flushing anyway nor should it have anything to flush 617eea3faf7SBjoern A. Zeeb * after if_qflush(). 61898c230c8SBjoern A. Zeeb */ 619eea3faf7SBjoern A. Zeeb epair_remove_ifp_from_draining(ifp); 620eea3faf7SBjoern A. Zeeb 62198c230c8SBjoern A. Zeeb if (sc->if_qflush) 62298c230c8SBjoern A. Zeeb sc->if_qflush(ifp); 62398c230c8SBjoern A. Zeeb } 62498c230c8SBjoern A. Zeeb 62598c230c8SBjoern A. Zeeb static int 6262dccdd45SMarko Zec epair_media_change(struct ifnet *ifp __unused) 6272dccdd45SMarko Zec { 6282dccdd45SMarko Zec 6292dccdd45SMarko Zec /* Do nothing. */ 6302dccdd45SMarko Zec return (0); 6312dccdd45SMarko Zec } 6322dccdd45SMarko Zec 6332dccdd45SMarko Zec static void 6342dccdd45SMarko Zec epair_media_status(struct ifnet *ifp __unused, struct ifmediareq *imr) 6352dccdd45SMarko Zec { 6362dccdd45SMarko Zec 6372dccdd45SMarko Zec imr->ifm_status = IFM_AVALID | IFM_ACTIVE; 6382dccdd45SMarko Zec imr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 6392dccdd45SMarko Zec } 6402dccdd45SMarko Zec 6412dccdd45SMarko Zec static int 64298c230c8SBjoern A. Zeeb epair_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 64398c230c8SBjoern A. Zeeb { 6442dccdd45SMarko Zec struct epair_softc *sc; 64598c230c8SBjoern A. Zeeb struct ifreq *ifr; 64698c230c8SBjoern A. Zeeb int error; 64798c230c8SBjoern A. Zeeb 64898c230c8SBjoern A. Zeeb ifr = (struct ifreq *)data; 64998c230c8SBjoern A. Zeeb switch (cmd) { 65098c230c8SBjoern A. Zeeb case SIOCSIFFLAGS: 65198c230c8SBjoern A. Zeeb case SIOCADDMULTI: 65298c230c8SBjoern A. Zeeb case SIOCDELMULTI: 65398c230c8SBjoern A. Zeeb error = 0; 65498c230c8SBjoern A. Zeeb break; 65598c230c8SBjoern A. Zeeb 6562dccdd45SMarko Zec case SIOCSIFMEDIA: 6572dccdd45SMarko Zec case SIOCGIFMEDIA: 6582dccdd45SMarko Zec sc = ifp->if_softc; 6592dccdd45SMarko Zec error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 6602dccdd45SMarko Zec break; 6612dccdd45SMarko Zec 662d0ea4743SBjoern A. Zeeb case SIOCSIFMTU: 663d0ea4743SBjoern A. Zeeb /* We basically allow all kinds of MTUs. */ 664d0ea4743SBjoern A. Zeeb ifp->if_mtu = ifr->ifr_mtu; 665d0ea4743SBjoern A. Zeeb error = 0; 666d0ea4743SBjoern A. Zeeb break; 667d0ea4743SBjoern A. Zeeb 66898c230c8SBjoern A. Zeeb default: 66998c230c8SBjoern A. Zeeb /* Let the common ethernet handler process this. */ 67098c230c8SBjoern A. Zeeb error = ether_ioctl(ifp, cmd, data); 67198c230c8SBjoern A. Zeeb break; 67298c230c8SBjoern A. Zeeb } 67398c230c8SBjoern A. Zeeb 67498c230c8SBjoern A. Zeeb return (error); 67598c230c8SBjoern A. Zeeb } 67698c230c8SBjoern A. Zeeb 67798c230c8SBjoern A. Zeeb static void 67898c230c8SBjoern A. Zeeb epair_init(void *dummy __unused) 67998c230c8SBjoern A. Zeeb { 68098c230c8SBjoern A. Zeeb } 68198c230c8SBjoern A. Zeeb 68298c230c8SBjoern A. Zeeb 68398c230c8SBjoern A. Zeeb /* 68498c230c8SBjoern A. Zeeb * Interface cloning functions. 68598c230c8SBjoern A. Zeeb * We use our private ones so that we can create/destroy our secondary 68698c230c8SBjoern A. Zeeb * device along with the primary one. 68798c230c8SBjoern A. Zeeb */ 68898c230c8SBjoern A. Zeeb static int 68998c230c8SBjoern A. Zeeb epair_clone_match(struct if_clone *ifc, const char *name) 69098c230c8SBjoern A. Zeeb { 69198c230c8SBjoern A. Zeeb const char *cp; 69298c230c8SBjoern A. Zeeb 69398c230c8SBjoern A. Zeeb DPRINTF("name='%s'\n", name); 69498c230c8SBjoern A. Zeeb 69598c230c8SBjoern A. Zeeb /* 69698c230c8SBjoern A. Zeeb * Our base name is epair. 69798c230c8SBjoern A. Zeeb * Our interfaces will be named epair<n>[ab]. 69898c230c8SBjoern A. Zeeb * So accept anything of the following list: 69998c230c8SBjoern A. Zeeb * - epair 70098c230c8SBjoern A. Zeeb * - epair<n> 70198c230c8SBjoern A. Zeeb * but not the epair<n>[ab] versions. 70298c230c8SBjoern A. Zeeb */ 70342a58907SGleb Smirnoff if (strncmp(epairname, name, sizeof(epairname)-1) != 0) 70498c230c8SBjoern A. Zeeb return (0); 70598c230c8SBjoern A. Zeeb 70642a58907SGleb Smirnoff for (cp = name + sizeof(epairname) - 1; *cp != '\0'; cp++) { 70798c230c8SBjoern A. Zeeb if (*cp < '0' || *cp > '9') 70898c230c8SBjoern A. Zeeb return (0); 70998c230c8SBjoern A. Zeeb } 71098c230c8SBjoern A. Zeeb 71198c230c8SBjoern A. Zeeb return (1); 71298c230c8SBjoern A. Zeeb } 71398c230c8SBjoern A. Zeeb 714*b02fd8b7SKristof Provost static void 715*b02fd8b7SKristof Provost epair_clone_add(struct if_clone *ifc, struct epair_softc *scb) 716*b02fd8b7SKristof Provost { 717*b02fd8b7SKristof Provost struct ifnet *ifp; 718*b02fd8b7SKristof Provost uint8_t eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 719*b02fd8b7SKristof Provost 720*b02fd8b7SKristof Provost ifp = scb->ifp; 721*b02fd8b7SKristof Provost /* Copy epairNa etheraddr and change the last byte. */ 722*b02fd8b7SKristof Provost memcpy(eaddr, scb->oifp->if_hw_addr, ETHER_ADDR_LEN); 723*b02fd8b7SKristof Provost eaddr[5] = 0x0b; 724*b02fd8b7SKristof Provost ether_ifattach(ifp, eaddr); 725*b02fd8b7SKristof Provost 726*b02fd8b7SKristof Provost if_clone_addif(ifc, ifp); 727*b02fd8b7SKristof Provost } 728*b02fd8b7SKristof Provost 72998c230c8SBjoern A. Zeeb static int 73098c230c8SBjoern A. Zeeb epair_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 73198c230c8SBjoern A. Zeeb { 73298c230c8SBjoern A. Zeeb struct epair_softc *sca, *scb; 73398c230c8SBjoern A. Zeeb struct ifnet *ifp; 73498c230c8SBjoern A. Zeeb char *dp; 73598c230c8SBjoern A. Zeeb int error, unit, wildcard; 73611d41666SLuca Pizzamiglio uint64_t hostid; 73711d41666SLuca Pizzamiglio uint32_t key[3]; 73811d41666SLuca Pizzamiglio uint32_t hash; 73998c230c8SBjoern A. Zeeb uint8_t eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 74098c230c8SBjoern A. Zeeb 74198c230c8SBjoern A. Zeeb /* Try to see if a special unit was requested. */ 74298c230c8SBjoern A. Zeeb error = ifc_name2unit(name, &unit); 74398c230c8SBjoern A. Zeeb if (error != 0) 74498c230c8SBjoern A. Zeeb return (error); 74598c230c8SBjoern A. Zeeb wildcard = (unit < 0); 74698c230c8SBjoern A. Zeeb 74798c230c8SBjoern A. Zeeb error = ifc_alloc_unit(ifc, &unit); 74898c230c8SBjoern A. Zeeb if (error != 0) 74998c230c8SBjoern A. Zeeb return (error); 75098c230c8SBjoern A. Zeeb 75198c230c8SBjoern A. Zeeb /* 75298c230c8SBjoern A. Zeeb * If no unit had been given, we need to adjust the ifName. 75398c230c8SBjoern A. Zeeb * Also make sure there is space for our extra [ab] suffix. 75498c230c8SBjoern A. Zeeb */ 75598c230c8SBjoern A. Zeeb for (dp = name; *dp != '\0'; dp++); 75698c230c8SBjoern A. Zeeb if (wildcard) { 75798c230c8SBjoern A. Zeeb error = snprintf(dp, len - (dp - name), "%d", unit); 75898c230c8SBjoern A. Zeeb if (error > len - (dp - name) - 1) { 75998c230c8SBjoern A. Zeeb /* ifName too long. */ 76098c230c8SBjoern A. Zeeb ifc_free_unit(ifc, unit); 76198c230c8SBjoern A. Zeeb return (ENOSPC); 76298c230c8SBjoern A. Zeeb } 76398c230c8SBjoern A. Zeeb dp += error; 76498c230c8SBjoern A. Zeeb } 76598c230c8SBjoern A. Zeeb if (len - (dp - name) - 1 < 1) { 76698c230c8SBjoern A. Zeeb /* No space left for our [ab] suffix. */ 76798c230c8SBjoern A. Zeeb ifc_free_unit(ifc, unit); 76898c230c8SBjoern A. Zeeb return (ENOSPC); 76998c230c8SBjoern A. Zeeb } 7703c3136b1SHiroki Sato *dp = 'b'; 77198c230c8SBjoern A. Zeeb /* Must not change dp so we can replace 'a' by 'b' later. */ 77298c230c8SBjoern A. Zeeb *(dp+1) = '\0'; 77398c230c8SBjoern A. Zeeb 7743c3136b1SHiroki Sato /* Check if 'a' and 'b' interfaces already exist. */ 7753c3136b1SHiroki Sato if (ifunit(name) != NULL) 7763c3136b1SHiroki Sato return (EEXIST); 7773c3136b1SHiroki Sato *dp = 'a'; 7783c3136b1SHiroki Sato if (ifunit(name) != NULL) 7793c3136b1SHiroki Sato return (EEXIST); 7803c3136b1SHiroki Sato 78198c230c8SBjoern A. Zeeb /* Allocate memory for both [ab] interfaces */ 78298c230c8SBjoern A. Zeeb sca = malloc(sizeof(struct epair_softc), M_EPAIR, M_WAITOK | M_ZERO); 783d0ea4743SBjoern A. Zeeb EPAIR_REFCOUNT_INIT(&sca->refcount, 1); 78498c230c8SBjoern A. Zeeb sca->ifp = if_alloc(IFT_ETHER); 78598c230c8SBjoern A. Zeeb if (sca->ifp == NULL) { 78698c230c8SBjoern A. Zeeb free(sca, M_EPAIR); 78798c230c8SBjoern A. Zeeb ifc_free_unit(ifc, unit); 78898c230c8SBjoern A. Zeeb return (ENOSPC); 78998c230c8SBjoern A. Zeeb } 79098c230c8SBjoern A. Zeeb 79198c230c8SBjoern A. Zeeb scb = malloc(sizeof(struct epair_softc), M_EPAIR, M_WAITOK | M_ZERO); 792d0ea4743SBjoern A. Zeeb EPAIR_REFCOUNT_INIT(&scb->refcount, 1); 79398c230c8SBjoern A. Zeeb scb->ifp = if_alloc(IFT_ETHER); 79498c230c8SBjoern A. Zeeb if (scb->ifp == NULL) { 79598c230c8SBjoern A. Zeeb free(scb, M_EPAIR); 79698c230c8SBjoern A. Zeeb if_free(sca->ifp); 79798c230c8SBjoern A. Zeeb free(sca, M_EPAIR); 79898c230c8SBjoern A. Zeeb ifc_free_unit(ifc, unit); 79998c230c8SBjoern A. Zeeb return (ENOSPC); 80098c230c8SBjoern A. Zeeb } 80198c230c8SBjoern A. Zeeb 80298c230c8SBjoern A. Zeeb /* 80398c230c8SBjoern A. Zeeb * Cross-reference the interfaces so we will be able to free both. 80498c230c8SBjoern A. Zeeb */ 80598c230c8SBjoern A. Zeeb sca->oifp = scb->ifp; 80698c230c8SBjoern A. Zeeb scb->oifp = sca->ifp; 80798c230c8SBjoern A. Zeeb 808d0ea4743SBjoern A. Zeeb /* 809d0ea4743SBjoern A. Zeeb * Calculate the cpuid for netisr queueing based on the 810d0ea4743SBjoern A. Zeeb * ifIndex of the interfaces. As long as we cannot configure 811d0ea4743SBjoern A. Zeeb * this or use cpuset information easily we cannot guarantee 812d0ea4743SBjoern A. Zeeb * cache locality but we can at least allow parallelism. 813d0ea4743SBjoern A. Zeeb */ 814d0ea4743SBjoern A. Zeeb sca->cpuid = 815fdf95c0bSAndrey V. Elsukov netisr_get_cpuid(sca->ifp->if_index); 816d0ea4743SBjoern A. Zeeb scb->cpuid = 817fdf95c0bSAndrey V. Elsukov netisr_get_cpuid(scb->ifp->if_index); 818d0ea4743SBjoern A. Zeeb 81918e199adSHiroki Sato /* Initialise pseudo media types. */ 82018e199adSHiroki Sato ifmedia_init(&sca->media, 0, epair_media_change, epair_media_status); 82118e199adSHiroki Sato ifmedia_add(&sca->media, IFM_ETHER | IFM_10G_T, 0, NULL); 82218e199adSHiroki Sato ifmedia_set(&sca->media, IFM_ETHER | IFM_10G_T); 82318e199adSHiroki Sato ifmedia_init(&scb->media, 0, epair_media_change, epair_media_status); 82418e199adSHiroki Sato ifmedia_add(&scb->media, IFM_ETHER | IFM_10G_T, 0, NULL); 82518e199adSHiroki Sato ifmedia_set(&scb->media, IFM_ETHER | IFM_10G_T); 82618e199adSHiroki Sato 82798c230c8SBjoern A. Zeeb /* Finish initialization of interface <n>a. */ 82898c230c8SBjoern A. Zeeb ifp = sca->ifp; 82998c230c8SBjoern A. Zeeb ifp->if_softc = sca; 83098c230c8SBjoern A. Zeeb strlcpy(ifp->if_xname, name, IFNAMSIZ); 83142a58907SGleb Smirnoff ifp->if_dname = epairname; 83298c230c8SBjoern A. Zeeb ifp->if_dunit = unit; 83398c230c8SBjoern A. Zeeb ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 8349f8cab7fSMarko Zec ifp->if_capabilities = IFCAP_VLAN_MTU; 8359f8cab7fSMarko Zec ifp->if_capenable = IFCAP_VLAN_MTU; 83698c230c8SBjoern A. Zeeb ifp->if_start = epair_start; 83798c230c8SBjoern A. Zeeb ifp->if_ioctl = epair_ioctl; 83898c230c8SBjoern A. Zeeb ifp->if_init = epair_init; 8394e950412SErmal Luçi if_setsendqlen(ifp, ifqmaxlen); 8404e950412SErmal Luçi if_setsendqready(ifp); 84111d41666SLuca Pizzamiglio 84211d41666SLuca Pizzamiglio /* 84311d41666SLuca Pizzamiglio * Calculate the etheraddr hashing the hostid and the 844804771f5SEugene Grosbein * interface index. The result would be hopefully unique. 845804771f5SEugene Grosbein * Note that the "a" component of an epair instance may get moved 846804771f5SEugene Grosbein * to a different VNET after creation. In that case its index 847804771f5SEugene Grosbein * will be freed and the index can get reused by new epair instance. 848804771f5SEugene Grosbein * Make sure we do not create same etheraddr again. 84911d41666SLuca Pizzamiglio */ 85011d41666SLuca Pizzamiglio getcredhostid(curthread->td_ucred, (unsigned long *)&hostid); 85111d41666SLuca Pizzamiglio if (hostid == 0) 85211d41666SLuca Pizzamiglio arc4rand(&hostid, sizeof(hostid), 0); 853804771f5SEugene Grosbein 854804771f5SEugene Grosbein if (ifp->if_index > next_index) 855804771f5SEugene Grosbein next_index = ifp->if_index; 856804771f5SEugene Grosbein else 857804771f5SEugene Grosbein next_index++; 858804771f5SEugene Grosbein 859804771f5SEugene Grosbein key[0] = (uint32_t)next_index; 86011d41666SLuca Pizzamiglio key[1] = (uint32_t)(hostid & 0xffffffff); 86111d41666SLuca Pizzamiglio key[2] = (uint32_t)((hostid >> 32) & 0xfffffffff); 86211d41666SLuca Pizzamiglio hash = jenkins_hash32(key, 3, 0); 86311d41666SLuca Pizzamiglio 86498c230c8SBjoern A. Zeeb eaddr[0] = 0x02; 86511d41666SLuca Pizzamiglio memcpy(&eaddr[1], &hash, 4); 86698c230c8SBjoern A. Zeeb eaddr[5] = 0x0a; 86798c230c8SBjoern A. Zeeb ether_ifattach(ifp, eaddr); 86898c230c8SBjoern A. Zeeb sca->if_qflush = ifp->if_qflush; 86998c230c8SBjoern A. Zeeb ifp->if_qflush = epair_qflush; 87098c230c8SBjoern A. Zeeb ifp->if_transmit = epair_transmit; 871b245f96cSGleb Smirnoff ifp->if_baudrate = IF_Gbps(10); /* arbitrary maximum */ 87298c230c8SBjoern A. Zeeb 87398c230c8SBjoern A. Zeeb /* Swap the name and finish initialization of interface <n>b. */ 87498c230c8SBjoern A. Zeeb *dp = 'b'; 87598c230c8SBjoern A. Zeeb 87698c230c8SBjoern A. Zeeb ifp = scb->ifp; 87798c230c8SBjoern A. Zeeb ifp->if_softc = scb; 87898c230c8SBjoern A. Zeeb strlcpy(ifp->if_xname, name, IFNAMSIZ); 87942a58907SGleb Smirnoff ifp->if_dname = epairname; 88098c230c8SBjoern A. Zeeb ifp->if_dunit = unit; 88198c230c8SBjoern A. Zeeb ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 8829f8cab7fSMarko Zec ifp->if_capabilities = IFCAP_VLAN_MTU; 8839f8cab7fSMarko Zec ifp->if_capenable = IFCAP_VLAN_MTU; 88498c230c8SBjoern A. Zeeb ifp->if_start = epair_start; 88598c230c8SBjoern A. Zeeb ifp->if_ioctl = epair_ioctl; 88698c230c8SBjoern A. Zeeb ifp->if_init = epair_init; 8874e950412SErmal Luçi if_setsendqlen(ifp, ifqmaxlen); 8884e950412SErmal Luçi if_setsendqready(ifp); 88998c230c8SBjoern A. Zeeb /* We need to play some tricks here for the second interface. */ 89042a58907SGleb Smirnoff strlcpy(name, epairname, len); 891*b02fd8b7SKristof Provost 892*b02fd8b7SKristof Provost /* Correctly set the name for the cloner list. */ 893*b02fd8b7SKristof Provost strlcpy(name, scb->ifp->if_xname, len); 894*b02fd8b7SKristof Provost epair_clone_add(ifc, scb); 895*b02fd8b7SKristof Provost 89698c230c8SBjoern A. Zeeb scb->if_qflush = ifp->if_qflush; 89798c230c8SBjoern A. Zeeb ifp->if_qflush = epair_qflush; 89898c230c8SBjoern A. Zeeb ifp->if_transmit = epair_transmit; 899b245f96cSGleb Smirnoff ifp->if_baudrate = IF_Gbps(10); /* arbitrary maximum */ 90098c230c8SBjoern A. Zeeb 90198c230c8SBjoern A. Zeeb /* 90298c230c8SBjoern A. Zeeb * Restore name to <n>a as the ifp for this will go into the 90398c230c8SBjoern A. Zeeb * cloner list for the initial call. 90498c230c8SBjoern A. Zeeb */ 90598c230c8SBjoern A. Zeeb strlcpy(name, sca->ifp->if_xname, len); 90698c230c8SBjoern A. Zeeb DPRINTF("name='%s/%db' created sca=%p scb=%p\n", name, unit, sca, scb); 90798c230c8SBjoern A. Zeeb 90898c230c8SBjoern A. Zeeb /* Tell the world, that we are ready to rock. */ 90998c230c8SBjoern A. Zeeb sca->ifp->if_drv_flags |= IFF_DRV_RUNNING; 91098c230c8SBjoern A. Zeeb scb->ifp->if_drv_flags |= IFF_DRV_RUNNING; 911c7493539SBjoern A. Zeeb if_link_state_change(sca->ifp, LINK_STATE_UP); 912c7493539SBjoern A. Zeeb if_link_state_change(scb->ifp, LINK_STATE_UP); 91398c230c8SBjoern A. Zeeb 91498c230c8SBjoern A. Zeeb return (0); 91598c230c8SBjoern A. Zeeb } 91698c230c8SBjoern A. Zeeb 91798c230c8SBjoern A. Zeeb static int 91898c230c8SBjoern A. Zeeb epair_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 91998c230c8SBjoern A. Zeeb { 92098c230c8SBjoern A. Zeeb struct ifnet *oifp; 92198c230c8SBjoern A. Zeeb struct epair_softc *sca, *scb; 92298c230c8SBjoern A. Zeeb int unit, error; 92398c230c8SBjoern A. Zeeb 92498c230c8SBjoern A. Zeeb DPRINTF("ifp=%p\n", ifp); 92598c230c8SBjoern A. Zeeb 92698c230c8SBjoern A. Zeeb /* 92798c230c8SBjoern A. Zeeb * In case we called into if_clone_destroyif() ourselves 92898c230c8SBjoern A. Zeeb * again to remove the second interface, the softc will be 92998c230c8SBjoern A. Zeeb * NULL. In that case so not do anything but return success. 93098c230c8SBjoern A. Zeeb */ 93198c230c8SBjoern A. Zeeb if (ifp->if_softc == NULL) 93298c230c8SBjoern A. Zeeb return (0); 93398c230c8SBjoern A. Zeeb 93498c230c8SBjoern A. Zeeb unit = ifp->if_dunit; 93598c230c8SBjoern A. Zeeb sca = ifp->if_softc; 93698c230c8SBjoern A. Zeeb oifp = sca->oifp; 93798c230c8SBjoern A. Zeeb scb = oifp->if_softc; 93898c230c8SBjoern A. Zeeb 93998c230c8SBjoern A. Zeeb DPRINTF("ifp=%p oifp=%p\n", ifp, oifp); 940c7493539SBjoern A. Zeeb if_link_state_change(ifp, LINK_STATE_DOWN); 941c7493539SBjoern A. Zeeb if_link_state_change(oifp, LINK_STATE_DOWN); 94298c230c8SBjoern A. Zeeb ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 94398c230c8SBjoern A. Zeeb oifp->if_drv_flags &= ~IFF_DRV_RUNNING; 94498c230c8SBjoern A. Zeeb 94598c230c8SBjoern A. Zeeb /* 9467edc3d88SMikolaj Golub * Get rid of our second half. As the other of the two 9477edc3d88SMikolaj Golub * interfaces may reside in a different vnet, we need to 9487edc3d88SMikolaj Golub * switch before freeing them. 94998c230c8SBjoern A. Zeeb */ 9507edc3d88SMikolaj Golub CURVNET_SET_QUIET(oifp->if_vnet); 9517edc3d88SMikolaj Golub ether_ifdetach(oifp); 9527edc3d88SMikolaj Golub /* 9537edc3d88SMikolaj Golub * Wait for all packets to be dispatched to if_input. 9547edc3d88SMikolaj Golub * The numbers can only go down as the interface is 9557edc3d88SMikolaj Golub * detached so there is no need to use atomics. 9567edc3d88SMikolaj Golub */ 9577edc3d88SMikolaj Golub DPRINTF("scb refcnt=%u\n", scb->refcount); 9587edc3d88SMikolaj Golub EPAIR_REFCOUNT_ASSERT(scb->refcount == 1, 9597edc3d88SMikolaj Golub ("%s: ifp=%p scb->refcount!=1: %d", __func__, oifp, scb->refcount)); 96098c230c8SBjoern A. Zeeb oifp->if_softc = NULL; 96198c230c8SBjoern A. Zeeb error = if_clone_destroyif(ifc, oifp); 96298c230c8SBjoern A. Zeeb if (error) 96398c230c8SBjoern A. Zeeb panic("%s: if_clone_destroyif() for our 2nd iface failed: %d", 96498c230c8SBjoern A. Zeeb __func__, error); 96573e39d61SBjoern A. Zeeb if_free(oifp); 9662dccdd45SMarko Zec ifmedia_removeall(&scb->media); 96798c230c8SBjoern A. Zeeb free(scb, M_EPAIR); 9687edc3d88SMikolaj Golub CURVNET_RESTORE(); 9697edc3d88SMikolaj Golub 9707edc3d88SMikolaj Golub ether_ifdetach(ifp); 9717edc3d88SMikolaj Golub /* 9727edc3d88SMikolaj Golub * Wait for all packets to be dispatched to if_input. 9737edc3d88SMikolaj Golub */ 9747edc3d88SMikolaj Golub DPRINTF("sca refcnt=%u\n", sca->refcount); 9757edc3d88SMikolaj Golub EPAIR_REFCOUNT_ASSERT(sca->refcount == 1, 9767edc3d88SMikolaj Golub ("%s: ifp=%p sca->refcount!=1: %d", __func__, ifp, sca->refcount)); 9777edc3d88SMikolaj Golub if_free(ifp); 9787edc3d88SMikolaj Golub ifmedia_removeall(&sca->media); 97998c230c8SBjoern A. Zeeb free(sca, M_EPAIR); 98098c230c8SBjoern A. Zeeb ifc_free_unit(ifc, unit); 98198c230c8SBjoern A. Zeeb 98298c230c8SBjoern A. Zeeb return (0); 98398c230c8SBjoern A. Zeeb } 98498c230c8SBjoern A. Zeeb 9853c3136b1SHiroki Sato static void 9863c3136b1SHiroki Sato vnet_epair_init(const void *unused __unused) 9873c3136b1SHiroki Sato { 9883c3136b1SHiroki Sato 9893c3136b1SHiroki Sato V_epair_cloner = if_clone_advanced(epairname, 0, 9903c3136b1SHiroki Sato epair_clone_match, epair_clone_create, epair_clone_destroy); 991484149deSBjoern A. Zeeb #ifdef VIMAGE 992484149deSBjoern A. Zeeb netisr_register_vnet(&epair_nh); 993484149deSBjoern A. Zeeb #endif 9943c3136b1SHiroki Sato } 99589856f7eSBjoern A. Zeeb VNET_SYSINIT(vnet_epair_init, SI_SUB_PSEUDO, SI_ORDER_ANY, 9963c3136b1SHiroki Sato vnet_epair_init, NULL); 9973c3136b1SHiroki Sato 9983c3136b1SHiroki Sato static void 9993c3136b1SHiroki Sato vnet_epair_uninit(const void *unused __unused) 10003c3136b1SHiroki Sato { 10013c3136b1SHiroki Sato 1002484149deSBjoern A. Zeeb #ifdef VIMAGE 1003484149deSBjoern A. Zeeb netisr_unregister_vnet(&epair_nh); 1004484149deSBjoern A. Zeeb #endif 10053c3136b1SHiroki Sato if_clone_detach(V_epair_cloner); 10063c3136b1SHiroki Sato } 100789856f7eSBjoern A. Zeeb VNET_SYSUNINIT(vnet_epair_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY, 10083c3136b1SHiroki Sato vnet_epair_uninit, NULL); 10093c3136b1SHiroki Sato 101085f330e5SKristof Provost static void 101185f330e5SKristof Provost epair_uninit(const void *unused __unused) 101285f330e5SKristof Provost { 101385f330e5SKristof Provost netisr_unregister(&epair_nh); 101485f330e5SKristof Provost epair_dpcpu_detach(); 101585f330e5SKristof Provost if (bootverbose) 101685f330e5SKristof Provost printf("%s unloaded.\n", epairname); 101785f330e5SKristof Provost } 101885f330e5SKristof Provost SYSUNINIT(epair_uninit, SI_SUB_INIT_IF, SI_ORDER_MIDDLE, 101985f330e5SKristof Provost epair_uninit, NULL); 102085f330e5SKristof Provost 102198c230c8SBjoern A. Zeeb static int 102298c230c8SBjoern A. Zeeb epair_modevent(module_t mod, int type, void *data) 102398c230c8SBjoern A. Zeeb { 1024d0ea4743SBjoern A. Zeeb int qlimit; 102598c230c8SBjoern A. Zeeb 102698c230c8SBjoern A. Zeeb switch (type) { 102798c230c8SBjoern A. Zeeb case MOD_LOAD: 102898c230c8SBjoern A. Zeeb /* For now limit us to one global mutex and one inq. */ 1029d0ea4743SBjoern A. Zeeb epair_dpcpu_init(); 1030d0ea4743SBjoern A. Zeeb epair_nh.nh_qlimit = 42 * ifqmaxlen; /* 42 shall be the number. */ 1031d0ea4743SBjoern A. Zeeb if (TUNABLE_INT_FETCH("net.link.epair.netisr_maxqlen", &qlimit)) 1032d0ea4743SBjoern A. Zeeb epair_nh.nh_qlimit = qlimit; 1033d0ea4743SBjoern A. Zeeb netisr_register(&epair_nh); 103498c230c8SBjoern A. Zeeb if (bootverbose) 103542a58907SGleb Smirnoff printf("%s initialized.\n", epairname); 103698c230c8SBjoern A. Zeeb break; 103798c230c8SBjoern A. Zeeb case MOD_UNLOAD: 103885f330e5SKristof Provost /* Handled in epair_uninit() */ 103998c230c8SBjoern A. Zeeb break; 104098c230c8SBjoern A. Zeeb default: 104198c230c8SBjoern A. Zeeb return (EOPNOTSUPP); 104298c230c8SBjoern A. Zeeb } 104398c230c8SBjoern A. Zeeb return (0); 104498c230c8SBjoern A. Zeeb } 104598c230c8SBjoern A. Zeeb 104698c230c8SBjoern A. Zeeb static moduledata_t epair_mod = { 104798c230c8SBjoern A. Zeeb "if_epair", 104898c230c8SBjoern A. Zeeb epair_modevent, 10499823d527SKevin Lo 0 105098c230c8SBjoern A. Zeeb }; 105198c230c8SBjoern A. Zeeb 105289856f7eSBjoern A. Zeeb DECLARE_MODULE(if_epair, epair_mod, SI_SUB_PSEUDO, SI_ORDER_MIDDLE); 105398c230c8SBjoern A. Zeeb MODULE_VERSION(if_epair, 1); 1054