1a9e644cdSLuigi Rizzo /*- 2a9e644cdSLuigi Rizzo * Copyright (c) 2016, Vincenzo Maffione 3a9e644cdSLuigi Rizzo * All rights reserved. 4a9e644cdSLuigi Rizzo * 5a9e644cdSLuigi Rizzo * Redistribution and use in source and binary forms, with or without 6a9e644cdSLuigi Rizzo * modification, are permitted provided that the following conditions 7a9e644cdSLuigi Rizzo * are met: 8a9e644cdSLuigi Rizzo * 1. Redistributions of source code must retain the above copyright 9a9e644cdSLuigi Rizzo * notice unmodified, this list of conditions, and the following 10a9e644cdSLuigi Rizzo * disclaimer. 11a9e644cdSLuigi Rizzo * 2. Redistributions in binary form must reproduce the above copyright 12a9e644cdSLuigi Rizzo * notice, this list of conditions and the following disclaimer in the 13a9e644cdSLuigi Rizzo * documentation and/or other materials provided with the distribution. 14a9e644cdSLuigi Rizzo * 15a9e644cdSLuigi Rizzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16a9e644cdSLuigi Rizzo * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17a9e644cdSLuigi Rizzo * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18a9e644cdSLuigi Rizzo * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19a9e644cdSLuigi Rizzo * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20a9e644cdSLuigi Rizzo * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21a9e644cdSLuigi Rizzo * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22a9e644cdSLuigi Rizzo * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23a9e644cdSLuigi Rizzo * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24a9e644cdSLuigi Rizzo * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25a9e644cdSLuigi Rizzo * 26a9e644cdSLuigi Rizzo * $FreeBSD$ 27a9e644cdSLuigi Rizzo */ 28a9e644cdSLuigi Rizzo 29a9e644cdSLuigi Rizzo /* Driver for ptnet paravirtualized network device. */ 30a9e644cdSLuigi Rizzo 31a9e644cdSLuigi Rizzo #include <sys/cdefs.h> 32a9e644cdSLuigi Rizzo 33a9e644cdSLuigi Rizzo #include <sys/types.h> 34a9e644cdSLuigi Rizzo #include <sys/param.h> 35a9e644cdSLuigi Rizzo #include <sys/systm.h> 36a9e644cdSLuigi Rizzo #include <sys/kernel.h> 37a9e644cdSLuigi Rizzo #include <sys/sockio.h> 38a9e644cdSLuigi Rizzo #include <sys/mbuf.h> 39a9e644cdSLuigi Rizzo #include <sys/malloc.h> 40a9e644cdSLuigi Rizzo #include <sys/module.h> 41a9e644cdSLuigi Rizzo #include <sys/socket.h> 42a9e644cdSLuigi Rizzo #include <sys/sysctl.h> 43a9e644cdSLuigi Rizzo #include <sys/lock.h> 44a9e644cdSLuigi Rizzo #include <sys/mutex.h> 45a9e644cdSLuigi Rizzo #include <sys/taskqueue.h> 46a9e644cdSLuigi Rizzo #include <sys/smp.h> 47a9e644cdSLuigi Rizzo #include <sys/time.h> 48a9e644cdSLuigi Rizzo #include <machine/smp.h> 49a9e644cdSLuigi Rizzo 50a9e644cdSLuigi Rizzo #include <vm/uma.h> 51a9e644cdSLuigi Rizzo #include <vm/vm.h> 52a9e644cdSLuigi Rizzo #include <vm/pmap.h> 53a9e644cdSLuigi Rizzo 54a9e644cdSLuigi Rizzo #include <net/ethernet.h> 55a9e644cdSLuigi Rizzo #include <net/if.h> 56a9e644cdSLuigi Rizzo #include <net/if_var.h> 57a9e644cdSLuigi Rizzo #include <net/if_arp.h> 58a9e644cdSLuigi Rizzo #include <net/if_dl.h> 59a9e644cdSLuigi Rizzo #include <net/if_types.h> 60a9e644cdSLuigi Rizzo #include <net/if_media.h> 61a9e644cdSLuigi Rizzo #include <net/if_vlan_var.h> 62a9e644cdSLuigi Rizzo #include <net/bpf.h> 63a9e644cdSLuigi Rizzo 64a9e644cdSLuigi Rizzo #include <netinet/in_systm.h> 65a9e644cdSLuigi Rizzo #include <netinet/in.h> 66a9e644cdSLuigi Rizzo #include <netinet/ip.h> 67a9e644cdSLuigi Rizzo #include <netinet/ip6.h> 68a9e644cdSLuigi Rizzo #include <netinet6/ip6_var.h> 69a9e644cdSLuigi Rizzo #include <netinet/udp.h> 70a9e644cdSLuigi Rizzo #include <netinet/tcp.h> 71a9e644cdSLuigi Rizzo #include <netinet/sctp.h> 72a9e644cdSLuigi Rizzo 73a9e644cdSLuigi Rizzo #include <machine/bus.h> 74a9e644cdSLuigi Rizzo #include <machine/resource.h> 75a9e644cdSLuigi Rizzo #include <sys/bus.h> 76a9e644cdSLuigi Rizzo #include <sys/rman.h> 77a9e644cdSLuigi Rizzo 78a9e644cdSLuigi Rizzo #include <dev/pci/pcivar.h> 79a9e644cdSLuigi Rizzo #include <dev/pci/pcireg.h> 80a9e644cdSLuigi Rizzo 81a9e644cdSLuigi Rizzo #include "opt_inet.h" 82a9e644cdSLuigi Rizzo #include "opt_inet6.h" 83a9e644cdSLuigi Rizzo 84a9e644cdSLuigi Rizzo #include <sys/selinfo.h> 85a9e644cdSLuigi Rizzo #include <net/netmap.h> 86a9e644cdSLuigi Rizzo #include <dev/netmap/netmap_kern.h> 87a9e644cdSLuigi Rizzo #include <net/netmap_virt.h> 88a9e644cdSLuigi Rizzo #include <dev/netmap/netmap_mem2.h> 89a9e644cdSLuigi Rizzo #include <dev/virtio/network/virtio_net.h> 90a9e644cdSLuigi Rizzo 91a9e644cdSLuigi Rizzo #ifndef INET 92a9e644cdSLuigi Rizzo #error "INET not defined, cannot support offloadings" 93a9e644cdSLuigi Rizzo #endif 94a9e644cdSLuigi Rizzo 95a9e644cdSLuigi Rizzo #if __FreeBSD_version >= 1100000 96a9e644cdSLuigi Rizzo static uint64_t ptnet_get_counter(if_t, ift_counter); 97a9e644cdSLuigi Rizzo #else 98a9e644cdSLuigi Rizzo typedef struct ifnet *if_t; 99a9e644cdSLuigi Rizzo #define if_getsoftc(_ifp) (_ifp)->if_softc 100a9e644cdSLuigi Rizzo #endif 101a9e644cdSLuigi Rizzo 102a9e644cdSLuigi Rizzo //#define PTNETMAP_STATS 103a9e644cdSLuigi Rizzo //#define DEBUG 104a9e644cdSLuigi Rizzo #ifdef DEBUG 105a9e644cdSLuigi Rizzo #define DBG(x) x 106a9e644cdSLuigi Rizzo #else /* !DEBUG */ 107a9e644cdSLuigi Rizzo #define DBG(x) 108a9e644cdSLuigi Rizzo #endif /* !DEBUG */ 109a9e644cdSLuigi Rizzo 110a9e644cdSLuigi Rizzo extern int ptnet_vnet_hdr; /* Tunable parameter */ 111a9e644cdSLuigi Rizzo 112a9e644cdSLuigi Rizzo struct ptnet_softc; 113a9e644cdSLuigi Rizzo 114a9e644cdSLuigi Rizzo struct ptnet_queue_stats { 115a9e644cdSLuigi Rizzo uint64_t packets; /* if_[io]packets */ 116a9e644cdSLuigi Rizzo uint64_t bytes; /* if_[io]bytes */ 117a9e644cdSLuigi Rizzo uint64_t errors; /* if_[io]errors */ 118a9e644cdSLuigi Rizzo uint64_t iqdrops; /* if_iqdrops */ 119a9e644cdSLuigi Rizzo uint64_t mcasts; /* if_[io]mcasts */ 120a9e644cdSLuigi Rizzo #ifdef PTNETMAP_STATS 121a9e644cdSLuigi Rizzo uint64_t intrs; 122a9e644cdSLuigi Rizzo uint64_t kicks; 123a9e644cdSLuigi Rizzo #endif /* PTNETMAP_STATS */ 124a9e644cdSLuigi Rizzo }; 125a9e644cdSLuigi Rizzo 126a9e644cdSLuigi Rizzo struct ptnet_queue { 127a9e644cdSLuigi Rizzo struct ptnet_softc *sc; 128a9e644cdSLuigi Rizzo struct resource *irq; 129a9e644cdSLuigi Rizzo void *cookie; 130a9e644cdSLuigi Rizzo int kring_id; 13146023447SVincenzo Maffione struct ptnet_csb_gh *ptgh; 13246023447SVincenzo Maffione struct ptnet_csb_hg *pthg; 133a9e644cdSLuigi Rizzo unsigned int kick; 134a9e644cdSLuigi Rizzo struct mtx lock; 135a9e644cdSLuigi Rizzo struct buf_ring *bufring; /* for TX queues */ 136a9e644cdSLuigi Rizzo struct ptnet_queue_stats stats; 137a9e644cdSLuigi Rizzo #ifdef PTNETMAP_STATS 138a9e644cdSLuigi Rizzo struct ptnet_queue_stats last_stats; 139a9e644cdSLuigi Rizzo #endif /* PTNETMAP_STATS */ 140a9e644cdSLuigi Rizzo struct taskqueue *taskq; 141a9e644cdSLuigi Rizzo struct task task; 142a9e644cdSLuigi Rizzo char lock_name[16]; 143a9e644cdSLuigi Rizzo }; 144a9e644cdSLuigi Rizzo 145a9e644cdSLuigi Rizzo #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 146a9e644cdSLuigi Rizzo #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 147a9e644cdSLuigi Rizzo #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 148a9e644cdSLuigi Rizzo 149a9e644cdSLuigi Rizzo struct ptnet_softc { 150a9e644cdSLuigi Rizzo device_t dev; 151a9e644cdSLuigi Rizzo if_t ifp; 152a9e644cdSLuigi Rizzo struct ifmedia media; 153a9e644cdSLuigi Rizzo struct mtx lock; 154a9e644cdSLuigi Rizzo char lock_name[16]; 155a9e644cdSLuigi Rizzo char hwaddr[ETHER_ADDR_LEN]; 156a9e644cdSLuigi Rizzo 157a9e644cdSLuigi Rizzo /* Mirror of PTFEAT register. */ 158a9e644cdSLuigi Rizzo uint32_t ptfeatures; 159a9e644cdSLuigi Rizzo unsigned int vnet_hdr_len; 160a9e644cdSLuigi Rizzo 161a9e644cdSLuigi Rizzo /* PCI BARs support. */ 162a9e644cdSLuigi Rizzo struct resource *iomem; 163a9e644cdSLuigi Rizzo struct resource *msix_mem; 164a9e644cdSLuigi Rizzo 165a9e644cdSLuigi Rizzo unsigned int num_rings; 166a9e644cdSLuigi Rizzo unsigned int num_tx_rings; 167a9e644cdSLuigi Rizzo struct ptnet_queue *queues; 168a9e644cdSLuigi Rizzo struct ptnet_queue *rxqueues; 16946023447SVincenzo Maffione struct ptnet_csb_gh *csb_gh; 17046023447SVincenzo Maffione struct ptnet_csb_hg *csb_hg; 171a9e644cdSLuigi Rizzo 172a9e644cdSLuigi Rizzo unsigned int min_tx_space; 173a9e644cdSLuigi Rizzo 174a9e644cdSLuigi Rizzo struct netmap_pt_guest_adapter *ptna; 175a9e644cdSLuigi Rizzo 176a9e644cdSLuigi Rizzo struct callout tick; 177a9e644cdSLuigi Rizzo #ifdef PTNETMAP_STATS 178a9e644cdSLuigi Rizzo struct timeval last_ts; 179a9e644cdSLuigi Rizzo #endif /* PTNETMAP_STATS */ 180a9e644cdSLuigi Rizzo }; 181a9e644cdSLuigi Rizzo 182a9e644cdSLuigi Rizzo #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 183a9e644cdSLuigi Rizzo #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 184a9e644cdSLuigi Rizzo 185a9e644cdSLuigi Rizzo static int ptnet_probe(device_t); 186a9e644cdSLuigi Rizzo static int ptnet_attach(device_t); 187a9e644cdSLuigi Rizzo static int ptnet_detach(device_t); 188a9e644cdSLuigi Rizzo static int ptnet_suspend(device_t); 189a9e644cdSLuigi Rizzo static int ptnet_resume(device_t); 190a9e644cdSLuigi Rizzo static int ptnet_shutdown(device_t); 191a9e644cdSLuigi Rizzo 192a9e644cdSLuigi Rizzo static void ptnet_init(void *opaque); 193a9e644cdSLuigi Rizzo static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 194a9e644cdSLuigi Rizzo static int ptnet_init_locked(struct ptnet_softc *sc); 195a9e644cdSLuigi Rizzo static int ptnet_stop(struct ptnet_softc *sc); 196a9e644cdSLuigi Rizzo static int ptnet_transmit(if_t ifp, struct mbuf *m); 197a9e644cdSLuigi Rizzo static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 198a9e644cdSLuigi Rizzo unsigned int budget, 199a9e644cdSLuigi Rizzo bool may_resched); 200a9e644cdSLuigi Rizzo static void ptnet_qflush(if_t ifp); 201a9e644cdSLuigi Rizzo static void ptnet_tx_task(void *context, int pending); 202a9e644cdSLuigi Rizzo 203a9e644cdSLuigi Rizzo static int ptnet_media_change(if_t ifp); 204a9e644cdSLuigi Rizzo static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 205a9e644cdSLuigi Rizzo #ifdef PTNETMAP_STATS 206a9e644cdSLuigi Rizzo static void ptnet_tick(void *opaque); 207a9e644cdSLuigi Rizzo #endif 208a9e644cdSLuigi Rizzo 209a9e644cdSLuigi Rizzo static int ptnet_irqs_init(struct ptnet_softc *sc); 210a9e644cdSLuigi Rizzo static void ptnet_irqs_fini(struct ptnet_softc *sc); 211a9e644cdSLuigi Rizzo 212a9e644cdSLuigi Rizzo static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd); 2132ff91c17SVincenzo Maffione static int ptnet_nm_config(struct netmap_adapter *na, 2142ff91c17SVincenzo Maffione struct nm_config_info *info); 215a9e644cdSLuigi Rizzo static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 216a9e644cdSLuigi Rizzo static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 217a9e644cdSLuigi Rizzo static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 218a9e644cdSLuigi Rizzo static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 2194f80b14cSVincenzo Maffione static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); 220a9e644cdSLuigi Rizzo 221a9e644cdSLuigi Rizzo static void ptnet_tx_intr(void *opaque); 222a9e644cdSLuigi Rizzo static void ptnet_rx_intr(void *opaque); 223a9e644cdSLuigi Rizzo 224a9e644cdSLuigi Rizzo static unsigned ptnet_rx_discard(struct netmap_kring *kring, 225a9e644cdSLuigi Rizzo unsigned int head); 226a9e644cdSLuigi Rizzo static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 227a9e644cdSLuigi Rizzo bool may_resched); 228a9e644cdSLuigi Rizzo static void ptnet_rx_task(void *context, int pending); 229a9e644cdSLuigi Rizzo 230a9e644cdSLuigi Rizzo #ifdef DEVICE_POLLING 231a9e644cdSLuigi Rizzo static poll_handler_t ptnet_poll; 232a9e644cdSLuigi Rizzo #endif 233a9e644cdSLuigi Rizzo 234a9e644cdSLuigi Rizzo static device_method_t ptnet_methods[] = { 235a9e644cdSLuigi Rizzo DEVMETHOD(device_probe, ptnet_probe), 236a9e644cdSLuigi Rizzo DEVMETHOD(device_attach, ptnet_attach), 237a9e644cdSLuigi Rizzo DEVMETHOD(device_detach, ptnet_detach), 238a9e644cdSLuigi Rizzo DEVMETHOD(device_suspend, ptnet_suspend), 239a9e644cdSLuigi Rizzo DEVMETHOD(device_resume, ptnet_resume), 240a9e644cdSLuigi Rizzo DEVMETHOD(device_shutdown, ptnet_shutdown), 241a9e644cdSLuigi Rizzo DEVMETHOD_END 242a9e644cdSLuigi Rizzo }; 243a9e644cdSLuigi Rizzo 244a9e644cdSLuigi Rizzo static driver_t ptnet_driver = { 245a9e644cdSLuigi Rizzo "ptnet", 246a9e644cdSLuigi Rizzo ptnet_methods, 247a9e644cdSLuigi Rizzo sizeof(struct ptnet_softc) 248a9e644cdSLuigi Rizzo }; 249a9e644cdSLuigi Rizzo 250a9e644cdSLuigi Rizzo /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 251a9e644cdSLuigi Rizzo static devclass_t ptnet_devclass; 252a9e644cdSLuigi Rizzo DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, 253a9e644cdSLuigi Rizzo NULL, NULL, SI_ORDER_MIDDLE + 2); 254a9e644cdSLuigi Rizzo 255a9e644cdSLuigi Rizzo static int 256a9e644cdSLuigi Rizzo ptnet_probe(device_t dev) 257a9e644cdSLuigi Rizzo { 258a9e644cdSLuigi Rizzo if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 259a9e644cdSLuigi Rizzo pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 260a9e644cdSLuigi Rizzo return (ENXIO); 261a9e644cdSLuigi Rizzo } 262a9e644cdSLuigi Rizzo 263a9e644cdSLuigi Rizzo device_set_desc(dev, "ptnet network adapter"); 264a9e644cdSLuigi Rizzo 265a9e644cdSLuigi Rizzo return (BUS_PROBE_DEFAULT); 266a9e644cdSLuigi Rizzo } 267a9e644cdSLuigi Rizzo 268a9e644cdSLuigi Rizzo static inline void ptnet_kick(struct ptnet_queue *pq) 269a9e644cdSLuigi Rizzo { 270a9e644cdSLuigi Rizzo #ifdef PTNETMAP_STATS 271a9e644cdSLuigi Rizzo pq->stats.kicks ++; 272a9e644cdSLuigi Rizzo #endif /* PTNETMAP_STATS */ 273a9e644cdSLuigi Rizzo bus_write_4(pq->sc->iomem, pq->kick, 0); 274a9e644cdSLuigi Rizzo } 275a9e644cdSLuigi Rizzo 276a9e644cdSLuigi Rizzo #define PTNET_BUF_RING_SIZE 4096 277a9e644cdSLuigi Rizzo #define PTNET_RX_BUDGET 512 278a9e644cdSLuigi Rizzo #define PTNET_RX_BATCH 1 279a9e644cdSLuigi Rizzo #define PTNET_TX_BUDGET 512 280a9e644cdSLuigi Rizzo #define PTNET_TX_BATCH 64 281a9e644cdSLuigi Rizzo #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 282a9e644cdSLuigi Rizzo #define PTNET_MAX_PKT_SIZE 65536 283a9e644cdSLuigi Rizzo 284a9e644cdSLuigi Rizzo #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) 285a9e644cdSLuigi Rizzo #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\ 286a9e644cdSLuigi Rizzo CSUM_SCTP_IPV6) 287a9e644cdSLuigi Rizzo #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 288a9e644cdSLuigi Rizzo PTNET_CSUM_OFFLOAD_IPV6) 289a9e644cdSLuigi Rizzo 290a9e644cdSLuigi Rizzo static int 291a9e644cdSLuigi Rizzo ptnet_attach(device_t dev) 292a9e644cdSLuigi Rizzo { 293844a6f0cSLuigi Rizzo uint32_t ptfeatures = 0; 294a9e644cdSLuigi Rizzo unsigned int num_rx_rings, num_tx_rings; 295a9e644cdSLuigi Rizzo struct netmap_adapter na_arg; 296a9e644cdSLuigi Rizzo unsigned int nifp_offset; 297a9e644cdSLuigi Rizzo struct ptnet_softc *sc; 298a9e644cdSLuigi Rizzo if_t ifp; 299a9e644cdSLuigi Rizzo uint32_t macreg; 300a9e644cdSLuigi Rizzo int err, rid; 301a9e644cdSLuigi Rizzo int i; 302a9e644cdSLuigi Rizzo 303a9e644cdSLuigi Rizzo sc = device_get_softc(dev); 304a9e644cdSLuigi Rizzo sc->dev = dev; 305a9e644cdSLuigi Rizzo 306a9e644cdSLuigi Rizzo /* Setup PCI resources. */ 307a9e644cdSLuigi Rizzo pci_enable_busmaster(dev); 308a9e644cdSLuigi Rizzo 309a9e644cdSLuigi Rizzo rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 310a9e644cdSLuigi Rizzo sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 311a9e644cdSLuigi Rizzo RF_ACTIVE); 312a9e644cdSLuigi Rizzo if (sc->iomem == NULL) { 313a9e644cdSLuigi Rizzo device_printf(dev, "Failed to map I/O BAR\n"); 314a9e644cdSLuigi Rizzo return (ENXIO); 315a9e644cdSLuigi Rizzo } 316a9e644cdSLuigi Rizzo 317844a6f0cSLuigi Rizzo /* Negotiate features with the hypervisor. */ 318a9e644cdSLuigi Rizzo if (ptnet_vnet_hdr) { 319a9e644cdSLuigi Rizzo ptfeatures |= PTNETMAP_F_VNET_HDR; 320a9e644cdSLuigi Rizzo } 321a9e644cdSLuigi Rizzo bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 322a9e644cdSLuigi Rizzo ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 323a9e644cdSLuigi Rizzo sc->ptfeatures = ptfeatures; 324a9e644cdSLuigi Rizzo 32546023447SVincenzo Maffione num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 32646023447SVincenzo Maffione num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 32746023447SVincenzo Maffione sc->num_rings = num_tx_rings + num_rx_rings; 32846023447SVincenzo Maffione sc->num_tx_rings = num_tx_rings; 32946023447SVincenzo Maffione 33046023447SVincenzo Maffione if (sc->num_rings * sizeof(struct ptnet_csb_gh) > PAGE_SIZE) { 33146023447SVincenzo Maffione device_printf(dev, "CSB cannot handle that many rings (%u)\n", 33246023447SVincenzo Maffione sc->num_rings); 33346023447SVincenzo Maffione err = ENOMEM; 33446023447SVincenzo Maffione goto err_path; 33546023447SVincenzo Maffione } 33646023447SVincenzo Maffione 33746023447SVincenzo Maffione /* Allocate CSB and carry out CSB allocation protocol. */ 33846023447SVincenzo Maffione sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 33946023447SVincenzo Maffione (size_t)0, -1UL, PAGE_SIZE, 0); 34046023447SVincenzo Maffione if (sc->csb_gh == NULL) { 341a9e644cdSLuigi Rizzo device_printf(dev, "Failed to allocate CSB\n"); 342a9e644cdSLuigi Rizzo err = ENOMEM; 343a9e644cdSLuigi Rizzo goto err_path; 344a9e644cdSLuigi Rizzo } 34546023447SVincenzo Maffione sc->csb_hg = (struct ptnet_csb_hg *)(((char *)sc->csb_gh) + PAGE_SIZE); 346a9e644cdSLuigi Rizzo 347a9e644cdSLuigi Rizzo { 348a2a74091SLuigi Rizzo /* 349a2a74091SLuigi Rizzo * We use uint64_t rather than vm_paddr_t since we 350a2a74091SLuigi Rizzo * need 64 bit addresses even on 32 bit platforms. 351a2a74091SLuigi Rizzo */ 35246023447SVincenzo Maffione uint64_t paddr = vtophys(sc->csb_gh); 353a9e644cdSLuigi Rizzo 35446023447SVincenzo Maffione /* CSB allocation protocol: write to BAH first, then 35546023447SVincenzo Maffione * to BAL (for both GH and HG sections). */ 35646023447SVincenzo Maffione bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 357a9e644cdSLuigi Rizzo (paddr >> 32) & 0xffffffff); 35846023447SVincenzo Maffione bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 35946023447SVincenzo Maffione paddr & 0xffffffff); 36046023447SVincenzo Maffione paddr = vtophys(sc->csb_hg); 36146023447SVincenzo Maffione bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 36246023447SVincenzo Maffione (paddr >> 32) & 0xffffffff); 36346023447SVincenzo Maffione bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 36446023447SVincenzo Maffione paddr & 0xffffffff); 365a9e644cdSLuigi Rizzo } 366a9e644cdSLuigi Rizzo 367a9e644cdSLuigi Rizzo /* Allocate and initialize per-queue data structures. */ 368ac2fffa4SPedro F. Giffuni sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 369a9e644cdSLuigi Rizzo M_DEVBUF, M_NOWAIT | M_ZERO); 370a9e644cdSLuigi Rizzo if (sc->queues == NULL) { 371a9e644cdSLuigi Rizzo err = ENOMEM; 372a9e644cdSLuigi Rizzo goto err_path; 373a9e644cdSLuigi Rizzo } 374a9e644cdSLuigi Rizzo sc->rxqueues = sc->queues + num_tx_rings; 375a9e644cdSLuigi Rizzo 376a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 377a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 378a9e644cdSLuigi Rizzo 379a9e644cdSLuigi Rizzo pq->sc = sc; 380a9e644cdSLuigi Rizzo pq->kring_id = i; 381a9e644cdSLuigi Rizzo pq->kick = PTNET_IO_KICK_BASE + 4 * i; 38246023447SVincenzo Maffione pq->ptgh = sc->csb_gh + i; 38346023447SVincenzo Maffione pq->pthg = sc->csb_hg + i; 384a9e644cdSLuigi Rizzo snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 385a9e644cdSLuigi Rizzo device_get_nameunit(dev), i); 386a9e644cdSLuigi Rizzo mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 387a9e644cdSLuigi Rizzo if (i >= num_tx_rings) { 388a9e644cdSLuigi Rizzo /* RX queue: fix kring_id. */ 389a9e644cdSLuigi Rizzo pq->kring_id -= num_tx_rings; 390a9e644cdSLuigi Rizzo } else { 391a9e644cdSLuigi Rizzo /* TX queue: allocate buf_ring. */ 392a9e644cdSLuigi Rizzo pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 393a9e644cdSLuigi Rizzo M_DEVBUF, M_NOWAIT, &pq->lock); 394a9e644cdSLuigi Rizzo if (pq->bufring == NULL) { 395a9e644cdSLuigi Rizzo err = ENOMEM; 396a9e644cdSLuigi Rizzo goto err_path; 397a9e644cdSLuigi Rizzo } 398a9e644cdSLuigi Rizzo } 399a9e644cdSLuigi Rizzo } 400a9e644cdSLuigi Rizzo 401a9e644cdSLuigi Rizzo sc->min_tx_space = 64; /* Safe initial value. */ 402a9e644cdSLuigi Rizzo 403a9e644cdSLuigi Rizzo err = ptnet_irqs_init(sc); 404a9e644cdSLuigi Rizzo if (err) { 405a9e644cdSLuigi Rizzo goto err_path; 406a9e644cdSLuigi Rizzo } 407a9e644cdSLuigi Rizzo 408a9e644cdSLuigi Rizzo /* Setup Ethernet interface. */ 409a9e644cdSLuigi Rizzo sc->ifp = ifp = if_alloc(IFT_ETHER); 410a9e644cdSLuigi Rizzo if (ifp == NULL) { 411a9e644cdSLuigi Rizzo device_printf(dev, "Failed to allocate ifnet\n"); 412a9e644cdSLuigi Rizzo err = ENOMEM; 413a9e644cdSLuigi Rizzo goto err_path; 414a9e644cdSLuigi Rizzo } 415a9e644cdSLuigi Rizzo 416a9e644cdSLuigi Rizzo if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 417a9e644cdSLuigi Rizzo ifp->if_baudrate = IF_Gbps(10); 418a9e644cdSLuigi Rizzo ifp->if_softc = sc; 419a9e644cdSLuigi Rizzo ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 420a9e644cdSLuigi Rizzo ifp->if_init = ptnet_init; 421a9e644cdSLuigi Rizzo ifp->if_ioctl = ptnet_ioctl; 422a9e644cdSLuigi Rizzo #if __FreeBSD_version >= 1100000 423a9e644cdSLuigi Rizzo ifp->if_get_counter = ptnet_get_counter; 424a9e644cdSLuigi Rizzo #endif 425a9e644cdSLuigi Rizzo ifp->if_transmit = ptnet_transmit; 426a9e644cdSLuigi Rizzo ifp->if_qflush = ptnet_qflush; 427a9e644cdSLuigi Rizzo 428a9e644cdSLuigi Rizzo ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 429a9e644cdSLuigi Rizzo ptnet_media_status); 430a9e644cdSLuigi Rizzo ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 431a9e644cdSLuigi Rizzo ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 432a9e644cdSLuigi Rizzo 433a9e644cdSLuigi Rizzo macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 434a9e644cdSLuigi Rizzo sc->hwaddr[0] = (macreg >> 8) & 0xff; 435a9e644cdSLuigi Rizzo sc->hwaddr[1] = macreg & 0xff; 436a9e644cdSLuigi Rizzo macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 437a9e644cdSLuigi Rizzo sc->hwaddr[2] = (macreg >> 24) & 0xff; 438a9e644cdSLuigi Rizzo sc->hwaddr[3] = (macreg >> 16) & 0xff; 439a9e644cdSLuigi Rizzo sc->hwaddr[4] = (macreg >> 8) & 0xff; 440a9e644cdSLuigi Rizzo sc->hwaddr[5] = macreg & 0xff; 441a9e644cdSLuigi Rizzo 442a9e644cdSLuigi Rizzo ether_ifattach(ifp, sc->hwaddr); 443a9e644cdSLuigi Rizzo 444a9e644cdSLuigi Rizzo ifp->if_hdrlen = sizeof(struct ether_vlan_header); 445a9e644cdSLuigi Rizzo ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 446a9e644cdSLuigi Rizzo 447a9e644cdSLuigi Rizzo if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 448a9e644cdSLuigi Rizzo /* Similarly to what the vtnet driver does, we can emulate 449a9e644cdSLuigi Rizzo * VLAN offloadings by inserting and removing the 802.1Q 450a9e644cdSLuigi Rizzo * header during transmit and receive. We are then able 451a9e644cdSLuigi Rizzo * to do checksum offloading of VLAN frames. */ 452a9e644cdSLuigi Rizzo ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 453a9e644cdSLuigi Rizzo | IFCAP_VLAN_HWCSUM 454a9e644cdSLuigi Rizzo | IFCAP_TSO | IFCAP_LRO 455a9e644cdSLuigi Rizzo | IFCAP_VLAN_HWTSO 456a9e644cdSLuigi Rizzo | IFCAP_VLAN_HWTAGGING; 457a9e644cdSLuigi Rizzo } 458a9e644cdSLuigi Rizzo 459a9e644cdSLuigi Rizzo ifp->if_capenable = ifp->if_capabilities; 460a9e644cdSLuigi Rizzo #ifdef DEVICE_POLLING 461a9e644cdSLuigi Rizzo /* Don't enable polling by default. */ 462a9e644cdSLuigi Rizzo ifp->if_capabilities |= IFCAP_POLLING; 463a9e644cdSLuigi Rizzo #endif 464a9e644cdSLuigi Rizzo snprintf(sc->lock_name, sizeof(sc->lock_name), 465a9e644cdSLuigi Rizzo "%s", device_get_nameunit(dev)); 466a9e644cdSLuigi Rizzo mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 467a9e644cdSLuigi Rizzo callout_init_mtx(&sc->tick, &sc->lock, 0); 468a9e644cdSLuigi Rizzo 469a9e644cdSLuigi Rizzo /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 470a9e644cdSLuigi Rizzo nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 471a9e644cdSLuigi Rizzo memset(&na_arg, 0, sizeof(na_arg)); 472a9e644cdSLuigi Rizzo na_arg.ifp = ifp; 473a9e644cdSLuigi Rizzo na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 474a9e644cdSLuigi Rizzo na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 475a9e644cdSLuigi Rizzo na_arg.num_tx_rings = num_tx_rings; 476a9e644cdSLuigi Rizzo na_arg.num_rx_rings = num_rx_rings; 477a9e644cdSLuigi Rizzo na_arg.nm_config = ptnet_nm_config; 478a9e644cdSLuigi Rizzo na_arg.nm_krings_create = ptnet_nm_krings_create; 479a9e644cdSLuigi Rizzo na_arg.nm_krings_delete = ptnet_nm_krings_delete; 480a9e644cdSLuigi Rizzo na_arg.nm_dtor = ptnet_nm_dtor; 4814f80b14cSVincenzo Maffione na_arg.nm_intr = ptnet_nm_intr; 482a9e644cdSLuigi Rizzo na_arg.nm_register = ptnet_nm_register; 483a9e644cdSLuigi Rizzo na_arg.nm_txsync = ptnet_nm_txsync; 484a9e644cdSLuigi Rizzo na_arg.nm_rxsync = ptnet_nm_rxsync; 485a9e644cdSLuigi Rizzo 48646023447SVincenzo Maffione netmap_pt_guest_attach(&na_arg, nifp_offset, 487844a6f0cSLuigi Rizzo bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); 488a9e644cdSLuigi Rizzo 489a9e644cdSLuigi Rizzo /* Now a netmap adapter for this ifp has been allocated, and it 490a9e644cdSLuigi Rizzo * can be accessed through NA(ifp). We also have to initialize the CSB 491a9e644cdSLuigi Rizzo * pointer. */ 492a9e644cdSLuigi Rizzo sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 493a9e644cdSLuigi Rizzo 494a9e644cdSLuigi Rizzo /* If virtio-net header was negotiated, set the virt_hdr_len field in 495a9e644cdSLuigi Rizzo * the netmap adapter, to inform users that this netmap adapter requires 496a9e644cdSLuigi Rizzo * the application to deal with the headers. */ 497a9e644cdSLuigi Rizzo ptnet_update_vnet_hdr(sc); 498a9e644cdSLuigi Rizzo 499a9e644cdSLuigi Rizzo device_printf(dev, "%s() completed\n", __func__); 500a9e644cdSLuigi Rizzo 501a9e644cdSLuigi Rizzo return (0); 502a9e644cdSLuigi Rizzo 503a9e644cdSLuigi Rizzo err_path: 504a9e644cdSLuigi Rizzo ptnet_detach(dev); 505a9e644cdSLuigi Rizzo return err; 506a9e644cdSLuigi Rizzo } 507a9e644cdSLuigi Rizzo 508a9e644cdSLuigi Rizzo static int 509a9e644cdSLuigi Rizzo ptnet_detach(device_t dev) 510a9e644cdSLuigi Rizzo { 511a9e644cdSLuigi Rizzo struct ptnet_softc *sc = device_get_softc(dev); 512a9e644cdSLuigi Rizzo int i; 513a9e644cdSLuigi Rizzo 514a9e644cdSLuigi Rizzo #ifdef DEVICE_POLLING 515a9e644cdSLuigi Rizzo if (sc->ifp->if_capenable & IFCAP_POLLING) { 516a9e644cdSLuigi Rizzo ether_poll_deregister(sc->ifp); 517a9e644cdSLuigi Rizzo } 518a9e644cdSLuigi Rizzo #endif 519a9e644cdSLuigi Rizzo callout_drain(&sc->tick); 520a9e644cdSLuigi Rizzo 521a9e644cdSLuigi Rizzo if (sc->queues) { 522a9e644cdSLuigi Rizzo /* Drain taskqueues before calling if_detach. */ 523a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 524a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 525a9e644cdSLuigi Rizzo 526a9e644cdSLuigi Rizzo if (pq->taskq) { 527a9e644cdSLuigi Rizzo taskqueue_drain(pq->taskq, &pq->task); 528a9e644cdSLuigi Rizzo } 529a9e644cdSLuigi Rizzo } 530a9e644cdSLuigi Rizzo } 531a9e644cdSLuigi Rizzo 532a9e644cdSLuigi Rizzo if (sc->ifp) { 533a9e644cdSLuigi Rizzo ether_ifdetach(sc->ifp); 534a9e644cdSLuigi Rizzo 535a9e644cdSLuigi Rizzo /* Uninitialize netmap adapters for this device. */ 536a9e644cdSLuigi Rizzo netmap_detach(sc->ifp); 537a9e644cdSLuigi Rizzo 538a9e644cdSLuigi Rizzo ifmedia_removeall(&sc->media); 539a9e644cdSLuigi Rizzo if_free(sc->ifp); 540a9e644cdSLuigi Rizzo sc->ifp = NULL; 541a9e644cdSLuigi Rizzo } 542a9e644cdSLuigi Rizzo 543a9e644cdSLuigi Rizzo ptnet_irqs_fini(sc); 544a9e644cdSLuigi Rizzo 54546023447SVincenzo Maffione if (sc->csb_gh) { 54646023447SVincenzo Maffione bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); 54746023447SVincenzo Maffione bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); 54846023447SVincenzo Maffione bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); 54946023447SVincenzo Maffione bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); 55046023447SVincenzo Maffione contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); 55146023447SVincenzo Maffione sc->csb_gh = NULL; 55246023447SVincenzo Maffione sc->csb_hg = NULL; 553a9e644cdSLuigi Rizzo } 554a9e644cdSLuigi Rizzo 555a9e644cdSLuigi Rizzo if (sc->queues) { 556a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 557a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 558a9e644cdSLuigi Rizzo 559a9e644cdSLuigi Rizzo if (mtx_initialized(&pq->lock)) { 560a9e644cdSLuigi Rizzo mtx_destroy(&pq->lock); 561a9e644cdSLuigi Rizzo } 562a9e644cdSLuigi Rizzo if (pq->bufring != NULL) { 563a9e644cdSLuigi Rizzo buf_ring_free(pq->bufring, M_DEVBUF); 564a9e644cdSLuigi Rizzo } 565a9e644cdSLuigi Rizzo } 566a9e644cdSLuigi Rizzo free(sc->queues, M_DEVBUF); 567a9e644cdSLuigi Rizzo sc->queues = NULL; 568a9e644cdSLuigi Rizzo } 569a9e644cdSLuigi Rizzo 570a9e644cdSLuigi Rizzo if (sc->iomem) { 571a9e644cdSLuigi Rizzo bus_release_resource(dev, SYS_RES_IOPORT, 572a9e644cdSLuigi Rizzo PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 573a9e644cdSLuigi Rizzo sc->iomem = NULL; 574a9e644cdSLuigi Rizzo } 575a9e644cdSLuigi Rizzo 576a9e644cdSLuigi Rizzo mtx_destroy(&sc->lock); 577a9e644cdSLuigi Rizzo 578a9e644cdSLuigi Rizzo device_printf(dev, "%s() completed\n", __func__); 579a9e644cdSLuigi Rizzo 580a9e644cdSLuigi Rizzo return (0); 581a9e644cdSLuigi Rizzo } 582a9e644cdSLuigi Rizzo 583a9e644cdSLuigi Rizzo static int 584a9e644cdSLuigi Rizzo ptnet_suspend(device_t dev) 585a9e644cdSLuigi Rizzo { 586a9e644cdSLuigi Rizzo struct ptnet_softc *sc; 587a9e644cdSLuigi Rizzo 588a9e644cdSLuigi Rizzo sc = device_get_softc(dev); 589a9e644cdSLuigi Rizzo (void)sc; 590a9e644cdSLuigi Rizzo 591a9e644cdSLuigi Rizzo return (0); 592a9e644cdSLuigi Rizzo } 593a9e644cdSLuigi Rizzo 594a9e644cdSLuigi Rizzo static int 595a9e644cdSLuigi Rizzo ptnet_resume(device_t dev) 596a9e644cdSLuigi Rizzo { 597a9e644cdSLuigi Rizzo struct ptnet_softc *sc; 598a9e644cdSLuigi Rizzo 599a9e644cdSLuigi Rizzo sc = device_get_softc(dev); 600a9e644cdSLuigi Rizzo (void)sc; 601a9e644cdSLuigi Rizzo 602a9e644cdSLuigi Rizzo return (0); 603a9e644cdSLuigi Rizzo } 604a9e644cdSLuigi Rizzo 605a9e644cdSLuigi Rizzo static int 606a9e644cdSLuigi Rizzo ptnet_shutdown(device_t dev) 607a9e644cdSLuigi Rizzo { 608a9e644cdSLuigi Rizzo /* 609a9e644cdSLuigi Rizzo * Suspend already does all of what we need to 610a9e644cdSLuigi Rizzo * do here; we just never expect to be resumed. 611a9e644cdSLuigi Rizzo */ 612a9e644cdSLuigi Rizzo return (ptnet_suspend(dev)); 613a9e644cdSLuigi Rizzo } 614a9e644cdSLuigi Rizzo 615a9e644cdSLuigi Rizzo static int 616a9e644cdSLuigi Rizzo ptnet_irqs_init(struct ptnet_softc *sc) 617a9e644cdSLuigi Rizzo { 618a9e644cdSLuigi Rizzo int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 619a9e644cdSLuigi Rizzo int nvecs = sc->num_rings; 620a9e644cdSLuigi Rizzo device_t dev = sc->dev; 621a9e644cdSLuigi Rizzo int err = ENOSPC; 622a9e644cdSLuigi Rizzo int cpu_cur; 623a9e644cdSLuigi Rizzo int i; 624a9e644cdSLuigi Rizzo 625a9e644cdSLuigi Rizzo if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 626a9e644cdSLuigi Rizzo device_printf(dev, "Could not find MSI-X capability\n"); 627a9e644cdSLuigi Rizzo return (ENXIO); 628a9e644cdSLuigi Rizzo } 629a9e644cdSLuigi Rizzo 630a9e644cdSLuigi Rizzo sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 631a9e644cdSLuigi Rizzo &rid, RF_ACTIVE); 632a9e644cdSLuigi Rizzo if (sc->msix_mem == NULL) { 633a9e644cdSLuigi Rizzo device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 634a9e644cdSLuigi Rizzo return (ENXIO); 635a9e644cdSLuigi Rizzo } 636a9e644cdSLuigi Rizzo 637a9e644cdSLuigi Rizzo if (pci_msix_count(dev) < nvecs) { 638a9e644cdSLuigi Rizzo device_printf(dev, "Not enough MSI-X vectors\n"); 639a9e644cdSLuigi Rizzo goto err_path; 640a9e644cdSLuigi Rizzo } 641a9e644cdSLuigi Rizzo 642a9e644cdSLuigi Rizzo err = pci_alloc_msix(dev, &nvecs); 643a9e644cdSLuigi Rizzo if (err) { 644a9e644cdSLuigi Rizzo device_printf(dev, "Failed to allocate MSI-X vectors\n"); 645a9e644cdSLuigi Rizzo goto err_path; 646a9e644cdSLuigi Rizzo } 647a9e644cdSLuigi Rizzo 648a9e644cdSLuigi Rizzo for (i = 0; i < nvecs; i++) { 649a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 650a9e644cdSLuigi Rizzo 651a9e644cdSLuigi Rizzo rid = i + 1; 652a9e644cdSLuigi Rizzo pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 653a9e644cdSLuigi Rizzo RF_ACTIVE); 654a9e644cdSLuigi Rizzo if (pq->irq == NULL) { 655a9e644cdSLuigi Rizzo device_printf(dev, "Failed to allocate interrupt " 656a9e644cdSLuigi Rizzo "for queue #%d\n", i); 657a9e644cdSLuigi Rizzo err = ENOSPC; 658a9e644cdSLuigi Rizzo goto err_path; 659a9e644cdSLuigi Rizzo } 660a9e644cdSLuigi Rizzo } 661a9e644cdSLuigi Rizzo 662a9e644cdSLuigi Rizzo cpu_cur = CPU_FIRST(); 663a9e644cdSLuigi Rizzo for (i = 0; i < nvecs; i++) { 664a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 665a9e644cdSLuigi Rizzo void (*handler)(void *) = ptnet_tx_intr; 666a9e644cdSLuigi Rizzo 667a9e644cdSLuigi Rizzo if (i >= sc->num_tx_rings) { 668a9e644cdSLuigi Rizzo handler = ptnet_rx_intr; 669a9e644cdSLuigi Rizzo } 670a9e644cdSLuigi Rizzo err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 671a9e644cdSLuigi Rizzo NULL /* intr_filter */, handler, 672a9e644cdSLuigi Rizzo pq, &pq->cookie); 673a9e644cdSLuigi Rizzo if (err) { 674a9e644cdSLuigi Rizzo device_printf(dev, "Failed to register intr handler " 675a9e644cdSLuigi Rizzo "for queue #%d\n", i); 676a9e644cdSLuigi Rizzo goto err_path; 677a9e644cdSLuigi Rizzo } 678a9e644cdSLuigi Rizzo 679a9e644cdSLuigi Rizzo bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 680a9e644cdSLuigi Rizzo #if 0 681a9e644cdSLuigi Rizzo bus_bind_intr(sc->dev, pq->irq, cpu_cur); 682a9e644cdSLuigi Rizzo #endif 683a9e644cdSLuigi Rizzo cpu_cur = CPU_NEXT(cpu_cur); 684a9e644cdSLuigi Rizzo } 685a9e644cdSLuigi Rizzo 686a9e644cdSLuigi Rizzo device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 687a9e644cdSLuigi Rizzo 688a9e644cdSLuigi Rizzo cpu_cur = CPU_FIRST(); 689a9e644cdSLuigi Rizzo for (i = 0; i < nvecs; i++) { 690a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 691a9e644cdSLuigi Rizzo static void (*handler)(void *context, int pending); 692a9e644cdSLuigi Rizzo 693a9e644cdSLuigi Rizzo handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; 694a9e644cdSLuigi Rizzo 695a9e644cdSLuigi Rizzo TASK_INIT(&pq->task, 0, handler, pq); 696a9e644cdSLuigi Rizzo pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 697a9e644cdSLuigi Rizzo taskqueue_thread_enqueue, &pq->taskq); 698a9e644cdSLuigi Rizzo taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 699a9e644cdSLuigi Rizzo device_get_nameunit(sc->dev), cpu_cur); 700a9e644cdSLuigi Rizzo cpu_cur = CPU_NEXT(cpu_cur); 701a9e644cdSLuigi Rizzo } 702a9e644cdSLuigi Rizzo 703a9e644cdSLuigi Rizzo return 0; 704a9e644cdSLuigi Rizzo err_path: 705a9e644cdSLuigi Rizzo ptnet_irqs_fini(sc); 706a9e644cdSLuigi Rizzo return err; 707a9e644cdSLuigi Rizzo } 708a9e644cdSLuigi Rizzo 709a9e644cdSLuigi Rizzo static void 710a9e644cdSLuigi Rizzo ptnet_irqs_fini(struct ptnet_softc *sc) 711a9e644cdSLuigi Rizzo { 712a9e644cdSLuigi Rizzo device_t dev = sc->dev; 713a9e644cdSLuigi Rizzo int i; 714a9e644cdSLuigi Rizzo 715a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 716a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 717a9e644cdSLuigi Rizzo 718a9e644cdSLuigi Rizzo if (pq->taskq) { 719a9e644cdSLuigi Rizzo taskqueue_free(pq->taskq); 720a9e644cdSLuigi Rizzo pq->taskq = NULL; 721a9e644cdSLuigi Rizzo } 722a9e644cdSLuigi Rizzo 723a9e644cdSLuigi Rizzo if (pq->cookie) { 724a9e644cdSLuigi Rizzo bus_teardown_intr(dev, pq->irq, pq->cookie); 725a9e644cdSLuigi Rizzo pq->cookie = NULL; 726a9e644cdSLuigi Rizzo } 727a9e644cdSLuigi Rizzo 728a9e644cdSLuigi Rizzo if (pq->irq) { 729a9e644cdSLuigi Rizzo bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 730a9e644cdSLuigi Rizzo pq->irq = NULL; 731a9e644cdSLuigi Rizzo } 732a9e644cdSLuigi Rizzo } 733a9e644cdSLuigi Rizzo 734a9e644cdSLuigi Rizzo if (sc->msix_mem) { 735a9e644cdSLuigi Rizzo pci_release_msi(dev); 736a9e644cdSLuigi Rizzo 737a9e644cdSLuigi Rizzo bus_release_resource(dev, SYS_RES_MEMORY, 738a9e644cdSLuigi Rizzo PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 739a9e644cdSLuigi Rizzo sc->msix_mem); 740a9e644cdSLuigi Rizzo sc->msix_mem = NULL; 741a9e644cdSLuigi Rizzo } 742a9e644cdSLuigi Rizzo } 743a9e644cdSLuigi Rizzo 744a9e644cdSLuigi Rizzo static void 745a9e644cdSLuigi Rizzo ptnet_init(void *opaque) 746a9e644cdSLuigi Rizzo { 747a9e644cdSLuigi Rizzo struct ptnet_softc *sc = opaque; 748a9e644cdSLuigi Rizzo 749a9e644cdSLuigi Rizzo PTNET_CORE_LOCK(sc); 750a9e644cdSLuigi Rizzo ptnet_init_locked(sc); 751a9e644cdSLuigi Rizzo PTNET_CORE_UNLOCK(sc); 752a9e644cdSLuigi Rizzo } 753a9e644cdSLuigi Rizzo 754a9e644cdSLuigi Rizzo static int 755a9e644cdSLuigi Rizzo ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 756a9e644cdSLuigi Rizzo { 757a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(ifp); 758a9e644cdSLuigi Rizzo device_t dev = sc->dev; 759a9e644cdSLuigi Rizzo struct ifreq *ifr = (struct ifreq *)data; 760*24a7d6d3SMatt Macy int mask __unused, err = 0; 761a9e644cdSLuigi Rizzo 762a9e644cdSLuigi Rizzo switch (cmd) { 763a9e644cdSLuigi Rizzo case SIOCSIFFLAGS: 764a9e644cdSLuigi Rizzo device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); 765a9e644cdSLuigi Rizzo PTNET_CORE_LOCK(sc); 766a9e644cdSLuigi Rizzo if (ifp->if_flags & IFF_UP) { 767a9e644cdSLuigi Rizzo /* Network stack wants the iff to be up. */ 768a9e644cdSLuigi Rizzo err = ptnet_init_locked(sc); 769a9e644cdSLuigi Rizzo } else { 770a9e644cdSLuigi Rizzo /* Network stack wants the iff to be down. */ 771a9e644cdSLuigi Rizzo err = ptnet_stop(sc); 772a9e644cdSLuigi Rizzo } 773a9e644cdSLuigi Rizzo /* We don't need to do nothing to support IFF_PROMISC, 774a9e644cdSLuigi Rizzo * since that is managed by the backend port. */ 775a9e644cdSLuigi Rizzo PTNET_CORE_UNLOCK(sc); 776a9e644cdSLuigi Rizzo break; 777a9e644cdSLuigi Rizzo 778a9e644cdSLuigi Rizzo case SIOCSIFCAP: 779a9e644cdSLuigi Rizzo device_printf(dev, "SIOCSIFCAP %x %x\n", 780a9e644cdSLuigi Rizzo ifr->ifr_reqcap, ifp->if_capenable); 781a9e644cdSLuigi Rizzo mask = ifr->ifr_reqcap ^ ifp->if_capenable; 782a9e644cdSLuigi Rizzo #ifdef DEVICE_POLLING 783a9e644cdSLuigi Rizzo if (mask & IFCAP_POLLING) { 784a9e644cdSLuigi Rizzo struct ptnet_queue *pq; 785a9e644cdSLuigi Rizzo int i; 786a9e644cdSLuigi Rizzo 787a9e644cdSLuigi Rizzo if (ifr->ifr_reqcap & IFCAP_POLLING) { 788a9e644cdSLuigi Rizzo err = ether_poll_register(ptnet_poll, ifp); 789a9e644cdSLuigi Rizzo if (err) { 790a9e644cdSLuigi Rizzo break; 791a9e644cdSLuigi Rizzo } 792a9e644cdSLuigi Rizzo /* Stop queues and sync with taskqueues. */ 793a9e644cdSLuigi Rizzo ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 794a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 795a9e644cdSLuigi Rizzo pq = sc-> queues + i; 796a9e644cdSLuigi Rizzo /* Make sure the worker sees the 797a9e644cdSLuigi Rizzo * IFF_DRV_RUNNING down. */ 798a9e644cdSLuigi Rizzo PTNET_Q_LOCK(pq); 79946023447SVincenzo Maffione pq->ptgh->guest_need_kick = 0; 800a9e644cdSLuigi Rizzo PTNET_Q_UNLOCK(pq); 801a9e644cdSLuigi Rizzo /* Wait for rescheduling to finish. */ 802a9e644cdSLuigi Rizzo if (pq->taskq) { 803a9e644cdSLuigi Rizzo taskqueue_drain(pq->taskq, 804a9e644cdSLuigi Rizzo &pq->task); 805a9e644cdSLuigi Rizzo } 806a9e644cdSLuigi Rizzo } 807a9e644cdSLuigi Rizzo ifp->if_drv_flags |= IFF_DRV_RUNNING; 808a9e644cdSLuigi Rizzo } else { 809a9e644cdSLuigi Rizzo err = ether_poll_deregister(ifp); 810a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 811a9e644cdSLuigi Rizzo pq = sc-> queues + i; 812a9e644cdSLuigi Rizzo PTNET_Q_LOCK(pq); 81346023447SVincenzo Maffione pq->ptgh->guest_need_kick = 1; 814a9e644cdSLuigi Rizzo PTNET_Q_UNLOCK(pq); 815a9e644cdSLuigi Rizzo } 816a9e644cdSLuigi Rizzo } 817a9e644cdSLuigi Rizzo } 818a9e644cdSLuigi Rizzo #endif /* DEVICE_POLLING */ 819a9e644cdSLuigi Rizzo ifp->if_capenable = ifr->ifr_reqcap; 820a9e644cdSLuigi Rizzo break; 821a9e644cdSLuigi Rizzo 822a9e644cdSLuigi Rizzo case SIOCSIFMTU: 823a9e644cdSLuigi Rizzo /* We support any reasonable MTU. */ 824a9e644cdSLuigi Rizzo if (ifr->ifr_mtu < ETHERMIN || 825a9e644cdSLuigi Rizzo ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 826a9e644cdSLuigi Rizzo err = EINVAL; 827a9e644cdSLuigi Rizzo } else { 828a9e644cdSLuigi Rizzo PTNET_CORE_LOCK(sc); 829a9e644cdSLuigi Rizzo ifp->if_mtu = ifr->ifr_mtu; 830a9e644cdSLuigi Rizzo PTNET_CORE_UNLOCK(sc); 831a9e644cdSLuigi Rizzo } 832a9e644cdSLuigi Rizzo break; 833a9e644cdSLuigi Rizzo 834a9e644cdSLuigi Rizzo case SIOCSIFMEDIA: 835a9e644cdSLuigi Rizzo case SIOCGIFMEDIA: 836a9e644cdSLuigi Rizzo err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 837a9e644cdSLuigi Rizzo break; 838a9e644cdSLuigi Rizzo 839a9e644cdSLuigi Rizzo default: 840a9e644cdSLuigi Rizzo err = ether_ioctl(ifp, cmd, data); 841a9e644cdSLuigi Rizzo break; 842a9e644cdSLuigi Rizzo } 843a9e644cdSLuigi Rizzo 844a9e644cdSLuigi Rizzo return err; 845a9e644cdSLuigi Rizzo } 846a9e644cdSLuigi Rizzo 847a9e644cdSLuigi Rizzo static int 848a9e644cdSLuigi Rizzo ptnet_init_locked(struct ptnet_softc *sc) 849a9e644cdSLuigi Rizzo { 850a9e644cdSLuigi Rizzo if_t ifp = sc->ifp; 851a9e644cdSLuigi Rizzo struct netmap_adapter *na_dr = &sc->ptna->dr.up; 852a9e644cdSLuigi Rizzo struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 853a9e644cdSLuigi Rizzo unsigned int nm_buf_size; 854a9e644cdSLuigi Rizzo int ret; 855a9e644cdSLuigi Rizzo 856a9e644cdSLuigi Rizzo if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 857a9e644cdSLuigi Rizzo return 0; /* nothing to do */ 858a9e644cdSLuigi Rizzo } 859a9e644cdSLuigi Rizzo 860a9e644cdSLuigi Rizzo device_printf(sc->dev, "%s\n", __func__); 861a9e644cdSLuigi Rizzo 862a9e644cdSLuigi Rizzo /* Translate offload capabilities according to if_capenable. */ 863a9e644cdSLuigi Rizzo ifp->if_hwassist = 0; 864a9e644cdSLuigi Rizzo if (ifp->if_capenable & IFCAP_TXCSUM) 865a9e644cdSLuigi Rizzo ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; 866a9e644cdSLuigi Rizzo if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 867a9e644cdSLuigi Rizzo ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; 868a9e644cdSLuigi Rizzo if (ifp->if_capenable & IFCAP_TSO4) 869a9e644cdSLuigi Rizzo ifp->if_hwassist |= CSUM_IP_TSO; 870a9e644cdSLuigi Rizzo if (ifp->if_capenable & IFCAP_TSO6) 871a9e644cdSLuigi Rizzo ifp->if_hwassist |= CSUM_IP6_TSO; 872a9e644cdSLuigi Rizzo 873a9e644cdSLuigi Rizzo /* 874a9e644cdSLuigi Rizzo * Prepare the interface for netmap mode access. 875a9e644cdSLuigi Rizzo */ 876a9e644cdSLuigi Rizzo netmap_update_config(na_dr); 877a9e644cdSLuigi Rizzo 878a9e644cdSLuigi Rizzo ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 879a9e644cdSLuigi Rizzo if (ret) { 880a9e644cdSLuigi Rizzo device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 881a9e644cdSLuigi Rizzo return ret; 882a9e644cdSLuigi Rizzo } 883a9e644cdSLuigi Rizzo 884a9e644cdSLuigi Rizzo if (sc->ptna->backend_regifs == 0) { 885a9e644cdSLuigi Rizzo ret = ptnet_nm_krings_create(na_nm); 886a9e644cdSLuigi Rizzo if (ret) { 887a9e644cdSLuigi Rizzo device_printf(sc->dev, "ptnet_nm_krings_create() " 888a9e644cdSLuigi Rizzo "failed\n"); 889a9e644cdSLuigi Rizzo goto err_mem_finalize; 890a9e644cdSLuigi Rizzo } 891a9e644cdSLuigi Rizzo 892a9e644cdSLuigi Rizzo ret = netmap_mem_rings_create(na_dr); 893a9e644cdSLuigi Rizzo if (ret) { 894a9e644cdSLuigi Rizzo device_printf(sc->dev, "netmap_mem_rings_create() " 895a9e644cdSLuigi Rizzo "failed\n"); 896a9e644cdSLuigi Rizzo goto err_rings_create; 897a9e644cdSLuigi Rizzo } 898a9e644cdSLuigi Rizzo 899a9e644cdSLuigi Rizzo ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 900a9e644cdSLuigi Rizzo if (ret) { 901a9e644cdSLuigi Rizzo device_printf(sc->dev, "netmap_mem_get_lut() " 902a9e644cdSLuigi Rizzo "failed\n"); 903a9e644cdSLuigi Rizzo goto err_get_lut; 904a9e644cdSLuigi Rizzo } 905a9e644cdSLuigi Rizzo } 906a9e644cdSLuigi Rizzo 907a9e644cdSLuigi Rizzo ret = ptnet_nm_register(na_dr, 1 /* on */); 908a9e644cdSLuigi Rizzo if (ret) { 909a9e644cdSLuigi Rizzo goto err_register; 910a9e644cdSLuigi Rizzo } 911a9e644cdSLuigi Rizzo 912a9e644cdSLuigi Rizzo nm_buf_size = NETMAP_BUF_SIZE(na_dr); 913a9e644cdSLuigi Rizzo 914a9e644cdSLuigi Rizzo KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 915a9e644cdSLuigi Rizzo sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 916a9e644cdSLuigi Rizzo device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 917a9e644cdSLuigi Rizzo sc->min_tx_space); 918a9e644cdSLuigi Rizzo #ifdef PTNETMAP_STATS 919a9e644cdSLuigi Rizzo callout_reset(&sc->tick, hz, ptnet_tick, sc); 920a9e644cdSLuigi Rizzo #endif 921a9e644cdSLuigi Rizzo 922a9e644cdSLuigi Rizzo ifp->if_drv_flags |= IFF_DRV_RUNNING; 923a9e644cdSLuigi Rizzo 924a9e644cdSLuigi Rizzo return 0; 925a9e644cdSLuigi Rizzo 926a9e644cdSLuigi Rizzo err_register: 927a9e644cdSLuigi Rizzo memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 928a9e644cdSLuigi Rizzo err_get_lut: 929a9e644cdSLuigi Rizzo netmap_mem_rings_delete(na_dr); 930a9e644cdSLuigi Rizzo err_rings_create: 931a9e644cdSLuigi Rizzo ptnet_nm_krings_delete(na_nm); 932a9e644cdSLuigi Rizzo err_mem_finalize: 933a9e644cdSLuigi Rizzo netmap_mem_deref(na_dr->nm_mem, na_dr); 934a9e644cdSLuigi Rizzo 935a9e644cdSLuigi Rizzo return ret; 936a9e644cdSLuigi Rizzo } 937a9e644cdSLuigi Rizzo 938a9e644cdSLuigi Rizzo /* To be called under core lock. */ 939a9e644cdSLuigi Rizzo static int 940a9e644cdSLuigi Rizzo ptnet_stop(struct ptnet_softc *sc) 941a9e644cdSLuigi Rizzo { 942a9e644cdSLuigi Rizzo if_t ifp = sc->ifp; 943a9e644cdSLuigi Rizzo struct netmap_adapter *na_dr = &sc->ptna->dr.up; 944a9e644cdSLuigi Rizzo struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 945a9e644cdSLuigi Rizzo int i; 946a9e644cdSLuigi Rizzo 947a9e644cdSLuigi Rizzo device_printf(sc->dev, "%s\n", __func__); 948a9e644cdSLuigi Rizzo 949a9e644cdSLuigi Rizzo if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 950a9e644cdSLuigi Rizzo return 0; /* nothing to do */ 951a9e644cdSLuigi Rizzo } 952a9e644cdSLuigi Rizzo 953a9e644cdSLuigi Rizzo /* Clear the driver-ready flag, and synchronize with all the queues, 954a9e644cdSLuigi Rizzo * so that after this loop we are sure nobody is working anymore with 955a9e644cdSLuigi Rizzo * the device. This scheme is taken from the vtnet driver. */ 956a9e644cdSLuigi Rizzo ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 957a9e644cdSLuigi Rizzo callout_stop(&sc->tick); 958a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 959a9e644cdSLuigi Rizzo PTNET_Q_LOCK(sc->queues + i); 960a9e644cdSLuigi Rizzo PTNET_Q_UNLOCK(sc->queues + i); 961a9e644cdSLuigi Rizzo } 962a9e644cdSLuigi Rizzo 963a9e644cdSLuigi Rizzo ptnet_nm_register(na_dr, 0 /* off */); 964a9e644cdSLuigi Rizzo 965a9e644cdSLuigi Rizzo if (sc->ptna->backend_regifs == 0) { 966a9e644cdSLuigi Rizzo netmap_mem_rings_delete(na_dr); 967a9e644cdSLuigi Rizzo ptnet_nm_krings_delete(na_nm); 968a9e644cdSLuigi Rizzo } 969a9e644cdSLuigi Rizzo netmap_mem_deref(na_dr->nm_mem, na_dr); 970a9e644cdSLuigi Rizzo 971a9e644cdSLuigi Rizzo return 0; 972a9e644cdSLuigi Rizzo } 973a9e644cdSLuigi Rizzo 974a9e644cdSLuigi Rizzo static void 975a9e644cdSLuigi Rizzo ptnet_qflush(if_t ifp) 976a9e644cdSLuigi Rizzo { 977a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(ifp); 978a9e644cdSLuigi Rizzo int i; 979a9e644cdSLuigi Rizzo 980a9e644cdSLuigi Rizzo /* Flush all the bufrings and do the interface flush. */ 981a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 982a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 983a9e644cdSLuigi Rizzo struct mbuf *m; 984a9e644cdSLuigi Rizzo 985a9e644cdSLuigi Rizzo PTNET_Q_LOCK(pq); 986a9e644cdSLuigi Rizzo if (pq->bufring) { 987a9e644cdSLuigi Rizzo while ((m = buf_ring_dequeue_sc(pq->bufring))) { 988a9e644cdSLuigi Rizzo m_freem(m); 989a9e644cdSLuigi Rizzo } 990a9e644cdSLuigi Rizzo } 991a9e644cdSLuigi Rizzo PTNET_Q_UNLOCK(pq); 992a9e644cdSLuigi Rizzo } 993a9e644cdSLuigi Rizzo 994a9e644cdSLuigi Rizzo if_qflush(ifp); 995a9e644cdSLuigi Rizzo } 996a9e644cdSLuigi Rizzo 997a9e644cdSLuigi Rizzo static int 998a9e644cdSLuigi Rizzo ptnet_media_change(if_t ifp) 999a9e644cdSLuigi Rizzo { 1000a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(ifp); 1001a9e644cdSLuigi Rizzo struct ifmedia *ifm = &sc->media; 1002a9e644cdSLuigi Rizzo 1003a9e644cdSLuigi Rizzo if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1004a9e644cdSLuigi Rizzo return EINVAL; 1005a9e644cdSLuigi Rizzo } 1006a9e644cdSLuigi Rizzo 1007a9e644cdSLuigi Rizzo return 0; 1008a9e644cdSLuigi Rizzo } 1009a9e644cdSLuigi Rizzo 1010a9e644cdSLuigi Rizzo #if __FreeBSD_version >= 1100000 1011a9e644cdSLuigi Rizzo static uint64_t 1012a9e644cdSLuigi Rizzo ptnet_get_counter(if_t ifp, ift_counter cnt) 1013a9e644cdSLuigi Rizzo { 1014a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(ifp); 1015a9e644cdSLuigi Rizzo struct ptnet_queue_stats stats[2]; 1016a9e644cdSLuigi Rizzo int i; 1017a9e644cdSLuigi Rizzo 1018a9e644cdSLuigi Rizzo /* Accumulate statistics over the queues. */ 1019a9e644cdSLuigi Rizzo memset(stats, 0, sizeof(stats)); 1020a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 1021a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 1022a9e644cdSLuigi Rizzo int idx = (i < sc->num_tx_rings) ? 0 : 1; 1023a9e644cdSLuigi Rizzo 1024a9e644cdSLuigi Rizzo stats[idx].packets += pq->stats.packets; 1025a9e644cdSLuigi Rizzo stats[idx].bytes += pq->stats.bytes; 1026a9e644cdSLuigi Rizzo stats[idx].errors += pq->stats.errors; 1027a9e644cdSLuigi Rizzo stats[idx].iqdrops += pq->stats.iqdrops; 1028a9e644cdSLuigi Rizzo stats[idx].mcasts += pq->stats.mcasts; 1029a9e644cdSLuigi Rizzo } 1030a9e644cdSLuigi Rizzo 1031a9e644cdSLuigi Rizzo switch (cnt) { 1032a9e644cdSLuigi Rizzo case IFCOUNTER_IPACKETS: 1033a9e644cdSLuigi Rizzo return (stats[1].packets); 1034a9e644cdSLuigi Rizzo case IFCOUNTER_IQDROPS: 1035a9e644cdSLuigi Rizzo return (stats[1].iqdrops); 1036a9e644cdSLuigi Rizzo case IFCOUNTER_IERRORS: 1037a9e644cdSLuigi Rizzo return (stats[1].errors); 1038a9e644cdSLuigi Rizzo case IFCOUNTER_OPACKETS: 1039a9e644cdSLuigi Rizzo return (stats[0].packets); 1040a9e644cdSLuigi Rizzo case IFCOUNTER_OBYTES: 1041a9e644cdSLuigi Rizzo return (stats[0].bytes); 1042a9e644cdSLuigi Rizzo case IFCOUNTER_OMCASTS: 1043a9e644cdSLuigi Rizzo return (stats[0].mcasts); 1044a9e644cdSLuigi Rizzo default: 1045a9e644cdSLuigi Rizzo return (if_get_counter_default(ifp, cnt)); 1046a9e644cdSLuigi Rizzo } 1047a9e644cdSLuigi Rizzo } 1048a9e644cdSLuigi Rizzo #endif 1049a9e644cdSLuigi Rizzo 1050a9e644cdSLuigi Rizzo 1051a9e644cdSLuigi Rizzo #ifdef PTNETMAP_STATS 1052a9e644cdSLuigi Rizzo /* Called under core lock. */ 1053a9e644cdSLuigi Rizzo static void 1054a9e644cdSLuigi Rizzo ptnet_tick(void *opaque) 1055a9e644cdSLuigi Rizzo { 1056a9e644cdSLuigi Rizzo struct ptnet_softc *sc = opaque; 1057a9e644cdSLuigi Rizzo int i; 1058a9e644cdSLuigi Rizzo 1059a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 1060a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 1061a9e644cdSLuigi Rizzo struct ptnet_queue_stats cur = pq->stats; 1062a9e644cdSLuigi Rizzo struct timeval now; 1063a9e644cdSLuigi Rizzo unsigned int delta; 1064a9e644cdSLuigi Rizzo 1065a9e644cdSLuigi Rizzo microtime(&now); 1066a9e644cdSLuigi Rizzo delta = now.tv_usec - sc->last_ts.tv_usec + 1067a9e644cdSLuigi Rizzo (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1068a9e644cdSLuigi Rizzo delta /= 1000; /* in milliseconds */ 1069a9e644cdSLuigi Rizzo 1070a9e644cdSLuigi Rizzo if (delta == 0) 1071a9e644cdSLuigi Rizzo continue; 1072a9e644cdSLuigi Rizzo 1073a9e644cdSLuigi Rizzo device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1074a9e644cdSLuigi Rizzo "intr %lu\n", i, delta, 1075a9e644cdSLuigi Rizzo (cur.packets - pq->last_stats.packets), 1076a9e644cdSLuigi Rizzo (cur.kicks - pq->last_stats.kicks), 1077a9e644cdSLuigi Rizzo (cur.intrs - pq->last_stats.intrs)); 1078a9e644cdSLuigi Rizzo pq->last_stats = cur; 1079a9e644cdSLuigi Rizzo } 1080a9e644cdSLuigi Rizzo microtime(&sc->last_ts); 1081a9e644cdSLuigi Rizzo callout_schedule(&sc->tick, hz); 1082a9e644cdSLuigi Rizzo } 1083a9e644cdSLuigi Rizzo #endif /* PTNETMAP_STATS */ 1084a9e644cdSLuigi Rizzo 1085a9e644cdSLuigi Rizzo static void 1086a9e644cdSLuigi Rizzo ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1087a9e644cdSLuigi Rizzo { 1088a9e644cdSLuigi Rizzo /* We are always active, as the backend netmap port is 1089a9e644cdSLuigi Rizzo * always open in netmap mode. */ 1090a9e644cdSLuigi Rizzo ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1091a9e644cdSLuigi Rizzo ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1092a9e644cdSLuigi Rizzo } 1093a9e644cdSLuigi Rizzo 1094a9e644cdSLuigi Rizzo static uint32_t 1095a9e644cdSLuigi Rizzo ptnet_nm_ptctl(if_t ifp, uint32_t cmd) 1096a9e644cdSLuigi Rizzo { 1097a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(ifp); 1098844a6f0cSLuigi Rizzo /* 1099844a6f0cSLuigi Rizzo * Write a command and read back error status, 1100844a6f0cSLuigi Rizzo * with zero meaning success. 1101844a6f0cSLuigi Rizzo */ 1102a9e644cdSLuigi Rizzo bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1103844a6f0cSLuigi Rizzo return bus_read_4(sc->iomem, PTNET_IO_PTCTL); 1104a9e644cdSLuigi Rizzo } 1105a9e644cdSLuigi Rizzo 1106a9e644cdSLuigi Rizzo static int 11072ff91c17SVincenzo Maffione ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) 1108a9e644cdSLuigi Rizzo { 1109a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(na->ifp); 1110a9e644cdSLuigi Rizzo 11112ff91c17SVincenzo Maffione info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 11122ff91c17SVincenzo Maffione info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 11132ff91c17SVincenzo Maffione info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 11142ff91c17SVincenzo Maffione info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 11152ff91c17SVincenzo Maffione info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); 1116a9e644cdSLuigi Rizzo 11172ff91c17SVincenzo Maffione device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", 11182ff91c17SVincenzo Maffione info->num_tx_rings, info->num_rx_rings, 11192ff91c17SVincenzo Maffione info->num_tx_descs, info->num_rx_descs, 11202ff91c17SVincenzo Maffione info->rx_buf_maxsize); 1121a9e644cdSLuigi Rizzo 1122a9e644cdSLuigi Rizzo return 0; 1123a9e644cdSLuigi Rizzo } 1124a9e644cdSLuigi Rizzo 1125a9e644cdSLuigi Rizzo static void 1126a9e644cdSLuigi Rizzo ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1127a9e644cdSLuigi Rizzo { 1128a9e644cdSLuigi Rizzo int i; 1129a9e644cdSLuigi Rizzo 1130a9e644cdSLuigi Rizzo /* Sync krings from the host, reading from 1131a9e644cdSLuigi Rizzo * CSB. */ 1132a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 113346023447SVincenzo Maffione struct ptnet_csb_gh *ptgh = sc->queues[i].ptgh; 113446023447SVincenzo Maffione struct ptnet_csb_hg *pthg = sc->queues[i].pthg; 1135a9e644cdSLuigi Rizzo struct netmap_kring *kring; 1136a9e644cdSLuigi Rizzo 1137a9e644cdSLuigi Rizzo if (i < na->num_tx_rings) { 11382ff91c17SVincenzo Maffione kring = na->tx_rings[i]; 1139a9e644cdSLuigi Rizzo } else { 11402ff91c17SVincenzo Maffione kring = na->rx_rings[i - na->num_tx_rings]; 1141a9e644cdSLuigi Rizzo } 114246023447SVincenzo Maffione kring->rhead = kring->ring->head = ptgh->head; 114346023447SVincenzo Maffione kring->rcur = kring->ring->cur = ptgh->cur; 114446023447SVincenzo Maffione kring->nr_hwcur = pthg->hwcur; 1145a9e644cdSLuigi Rizzo kring->nr_hwtail = kring->rtail = 114646023447SVincenzo Maffione kring->ring->tail = pthg->hwtail; 1147a9e644cdSLuigi Rizzo 1148a9e644cdSLuigi Rizzo ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 114946023447SVincenzo Maffione pthg->hwcur, ptgh->head, ptgh->cur, 115046023447SVincenzo Maffione pthg->hwtail); 1151a9e644cdSLuigi Rizzo ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1152a9e644cdSLuigi Rizzo t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1153a9e644cdSLuigi Rizzo kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1154a9e644cdSLuigi Rizzo kring->rtail, kring->ring->tail); 1155a9e644cdSLuigi Rizzo } 1156a9e644cdSLuigi Rizzo } 1157a9e644cdSLuigi Rizzo 1158a9e644cdSLuigi Rizzo static void 1159a9e644cdSLuigi Rizzo ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1160a9e644cdSLuigi Rizzo { 1161a2a74091SLuigi Rizzo unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1162a2a74091SLuigi Rizzo 1163a2a74091SLuigi Rizzo bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); 1164a2a74091SLuigi Rizzo sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); 1165a9e644cdSLuigi Rizzo sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1166a9e644cdSLuigi Rizzo } 1167a9e644cdSLuigi Rizzo 1168a9e644cdSLuigi Rizzo static int 1169a9e644cdSLuigi Rizzo ptnet_nm_register(struct netmap_adapter *na, int onoff) 1170a9e644cdSLuigi Rizzo { 1171a9e644cdSLuigi Rizzo /* device-specific */ 1172a9e644cdSLuigi Rizzo if_t ifp = na->ifp; 1173a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(ifp); 1174a9e644cdSLuigi Rizzo int native = (na == &sc->ptna->hwup.up); 1175a9e644cdSLuigi Rizzo struct ptnet_queue *pq; 1176a9e644cdSLuigi Rizzo enum txrx t; 1177a9e644cdSLuigi Rizzo int ret = 0; 1178a9e644cdSLuigi Rizzo int i; 1179a9e644cdSLuigi Rizzo 1180a9e644cdSLuigi Rizzo if (!onoff) { 1181a9e644cdSLuigi Rizzo sc->ptna->backend_regifs--; 1182a9e644cdSLuigi Rizzo } 1183a9e644cdSLuigi Rizzo 1184a9e644cdSLuigi Rizzo /* If this is the last netmap client, guest interrupt enable flags may 1185a9e644cdSLuigi Rizzo * be in arbitrary state. Since these flags are going to be used also 1186a9e644cdSLuigi Rizzo * by the netdevice driver, we have to make sure to start with 1187a9e644cdSLuigi Rizzo * notifications enabled. Also, schedule NAPI to flush pending packets 1188a9e644cdSLuigi Rizzo * in the RX rings, since we will not receive further interrupts 1189a9e644cdSLuigi Rizzo * until these will be processed. */ 1190a9e644cdSLuigi Rizzo if (native && !onoff && na->active_fds == 0) { 1191a9e644cdSLuigi Rizzo D("Exit netmap mode, re-enable interrupts"); 1192a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 1193a9e644cdSLuigi Rizzo pq = sc->queues + i; 119446023447SVincenzo Maffione pq->ptgh->guest_need_kick = 1; 1195a9e644cdSLuigi Rizzo } 1196a9e644cdSLuigi Rizzo } 1197a9e644cdSLuigi Rizzo 1198a9e644cdSLuigi Rizzo if (onoff) { 1199a9e644cdSLuigi Rizzo if (sc->ptna->backend_regifs == 0) { 1200a9e644cdSLuigi Rizzo /* Initialize notification enable fields in the CSB. */ 1201a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 1202a9e644cdSLuigi Rizzo pq = sc->queues + i; 120346023447SVincenzo Maffione pq->pthg->host_need_kick = 1; 120446023447SVincenzo Maffione pq->ptgh->guest_need_kick = 1205a9e644cdSLuigi Rizzo (!(ifp->if_capenable & IFCAP_POLLING) 1206a9e644cdSLuigi Rizzo && i >= sc->num_tx_rings); 1207a9e644cdSLuigi Rizzo } 1208a9e644cdSLuigi Rizzo 1209a9e644cdSLuigi Rizzo /* Set the virtio-net header length. */ 1210a9e644cdSLuigi Rizzo ptnet_update_vnet_hdr(sc); 1211a9e644cdSLuigi Rizzo 1212a9e644cdSLuigi Rizzo /* Make sure the host adapter passed through is ready 1213a9e644cdSLuigi Rizzo * for txsync/rxsync. */ 1214844a6f0cSLuigi Rizzo ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_CREATE); 1215a9e644cdSLuigi Rizzo if (ret) { 1216a9e644cdSLuigi Rizzo return ret; 1217a9e644cdSLuigi Rizzo } 1218a9e644cdSLuigi Rizzo } 1219a9e644cdSLuigi Rizzo 1220a9e644cdSLuigi Rizzo /* Sync from CSB must be done after REGIF PTCTL. Skip this 1221a9e644cdSLuigi Rizzo * step only if this is a netmap client and it is not the 1222a9e644cdSLuigi Rizzo * first one. */ 1223a9e644cdSLuigi Rizzo if ((!native && sc->ptna->backend_regifs == 0) || 1224a9e644cdSLuigi Rizzo (native && na->active_fds == 0)) { 1225a9e644cdSLuigi Rizzo ptnet_sync_from_csb(sc, na); 1226a9e644cdSLuigi Rizzo } 1227a9e644cdSLuigi Rizzo 1228a9e644cdSLuigi Rizzo /* If not native, don't call nm_set_native_flags, since we don't want 1229a9e644cdSLuigi Rizzo * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1230a9e644cdSLuigi Rizzo if (native) { 1231a9e644cdSLuigi Rizzo for_rx_tx(t) { 1232a9e644cdSLuigi Rizzo for (i = 0; i <= nma_get_nrings(na, t); i++) { 12332ff91c17SVincenzo Maffione struct netmap_kring *kring = NMR(na, t)[i]; 1234a9e644cdSLuigi Rizzo 1235a9e644cdSLuigi Rizzo if (nm_kring_pending_on(kring)) { 1236a9e644cdSLuigi Rizzo kring->nr_mode = NKR_NETMAP_ON; 1237a9e644cdSLuigi Rizzo } 1238a9e644cdSLuigi Rizzo } 1239a9e644cdSLuigi Rizzo } 1240a9e644cdSLuigi Rizzo nm_set_native_flags(na); 1241a9e644cdSLuigi Rizzo } 1242a9e644cdSLuigi Rizzo 1243a9e644cdSLuigi Rizzo } else { 1244a9e644cdSLuigi Rizzo if (native) { 1245a9e644cdSLuigi Rizzo nm_clear_native_flags(na); 1246a9e644cdSLuigi Rizzo for_rx_tx(t) { 1247a9e644cdSLuigi Rizzo for (i = 0; i <= nma_get_nrings(na, t); i++) { 12482ff91c17SVincenzo Maffione struct netmap_kring *kring = NMR(na, t)[i]; 1249a9e644cdSLuigi Rizzo 1250a9e644cdSLuigi Rizzo if (nm_kring_pending_off(kring)) { 1251a9e644cdSLuigi Rizzo kring->nr_mode = NKR_NETMAP_OFF; 1252a9e644cdSLuigi Rizzo } 1253a9e644cdSLuigi Rizzo } 1254a9e644cdSLuigi Rizzo } 1255a9e644cdSLuigi Rizzo } 1256a9e644cdSLuigi Rizzo 1257a9e644cdSLuigi Rizzo /* Sync from CSB must be done before UNREGIF PTCTL, on the last 1258a9e644cdSLuigi Rizzo * netmap client. */ 1259a9e644cdSLuigi Rizzo if (native && na->active_fds == 0) { 1260a9e644cdSLuigi Rizzo ptnet_sync_from_csb(sc, na); 1261a9e644cdSLuigi Rizzo } 1262a9e644cdSLuigi Rizzo 1263a9e644cdSLuigi Rizzo if (sc->ptna->backend_regifs == 0) { 1264844a6f0cSLuigi Rizzo ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_DELETE); 1265a9e644cdSLuigi Rizzo } 1266a9e644cdSLuigi Rizzo } 1267a9e644cdSLuigi Rizzo 1268a9e644cdSLuigi Rizzo if (onoff) { 1269a9e644cdSLuigi Rizzo sc->ptna->backend_regifs++; 1270a9e644cdSLuigi Rizzo } 1271a9e644cdSLuigi Rizzo 1272a9e644cdSLuigi Rizzo return ret; 1273a9e644cdSLuigi Rizzo } 1274a9e644cdSLuigi Rizzo 1275a9e644cdSLuigi Rizzo static int 1276a9e644cdSLuigi Rizzo ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1277a9e644cdSLuigi Rizzo { 1278a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1279a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + kring->ring_id; 1280a9e644cdSLuigi Rizzo bool notify; 1281a9e644cdSLuigi Rizzo 128246023447SVincenzo Maffione notify = netmap_pt_guest_txsync(pq->ptgh, pq->pthg, kring, flags); 1283a9e644cdSLuigi Rizzo if (notify) { 1284a9e644cdSLuigi Rizzo ptnet_kick(pq); 1285a9e644cdSLuigi Rizzo } 1286a9e644cdSLuigi Rizzo 1287a9e644cdSLuigi Rizzo return 0; 1288a9e644cdSLuigi Rizzo } 1289a9e644cdSLuigi Rizzo 1290a9e644cdSLuigi Rizzo static int 1291a9e644cdSLuigi Rizzo ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1292a9e644cdSLuigi Rizzo { 1293a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1294a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1295a9e644cdSLuigi Rizzo bool notify; 1296a9e644cdSLuigi Rizzo 129746023447SVincenzo Maffione notify = netmap_pt_guest_rxsync(pq->ptgh, pq->pthg, kring, flags); 1298a9e644cdSLuigi Rizzo if (notify) { 1299a9e644cdSLuigi Rizzo ptnet_kick(pq); 1300a9e644cdSLuigi Rizzo } 1301a9e644cdSLuigi Rizzo 1302a9e644cdSLuigi Rizzo return 0; 1303a9e644cdSLuigi Rizzo } 1304a9e644cdSLuigi Rizzo 1305a9e644cdSLuigi Rizzo static void 13064f80b14cSVincenzo Maffione ptnet_nm_intr(struct netmap_adapter *na, int onoff) 13074f80b14cSVincenzo Maffione { 13084f80b14cSVincenzo Maffione struct ptnet_softc *sc = if_getsoftc(na->ifp); 13094f80b14cSVincenzo Maffione int i; 13104f80b14cSVincenzo Maffione 13114f80b14cSVincenzo Maffione for (i = 0; i < sc->num_rings; i++) { 13124f80b14cSVincenzo Maffione struct ptnet_queue *pq = sc->queues + i; 13134f80b14cSVincenzo Maffione pq->ptgh->guest_need_kick = onoff; 13144f80b14cSVincenzo Maffione } 13154f80b14cSVincenzo Maffione } 13164f80b14cSVincenzo Maffione 13174f80b14cSVincenzo Maffione static void 1318a9e644cdSLuigi Rizzo ptnet_tx_intr(void *opaque) 1319a9e644cdSLuigi Rizzo { 1320a9e644cdSLuigi Rizzo struct ptnet_queue *pq = opaque; 1321a9e644cdSLuigi Rizzo struct ptnet_softc *sc = pq->sc; 1322a9e644cdSLuigi Rizzo 1323a9e644cdSLuigi Rizzo DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1324a9e644cdSLuigi Rizzo #ifdef PTNETMAP_STATS 1325a9e644cdSLuigi Rizzo pq->stats.intrs ++; 1326a9e644cdSLuigi Rizzo #endif /* PTNETMAP_STATS */ 1327a9e644cdSLuigi Rizzo 1328a9e644cdSLuigi Rizzo if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1329a9e644cdSLuigi Rizzo return; 1330a9e644cdSLuigi Rizzo } 1331a9e644cdSLuigi Rizzo 1332a9e644cdSLuigi Rizzo /* Schedule the tasqueue to flush process transmissions requests. 1333a9e644cdSLuigi Rizzo * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1334a9e644cdSLuigi Rizzo * at least when using MSI-X interrupts. The if_em driver, instead 1335a9e644cdSLuigi Rizzo * schedule taskqueue when using legacy interrupts. */ 1336a9e644cdSLuigi Rizzo taskqueue_enqueue(pq->taskq, &pq->task); 1337a9e644cdSLuigi Rizzo } 1338a9e644cdSLuigi Rizzo 1339a9e644cdSLuigi Rizzo static void 1340a9e644cdSLuigi Rizzo ptnet_rx_intr(void *opaque) 1341a9e644cdSLuigi Rizzo { 1342a9e644cdSLuigi Rizzo struct ptnet_queue *pq = opaque; 1343a9e644cdSLuigi Rizzo struct ptnet_softc *sc = pq->sc; 1344a9e644cdSLuigi Rizzo unsigned int unused; 1345a9e644cdSLuigi Rizzo 1346a9e644cdSLuigi Rizzo DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1347a9e644cdSLuigi Rizzo #ifdef PTNETMAP_STATS 1348a9e644cdSLuigi Rizzo pq->stats.intrs ++; 1349a9e644cdSLuigi Rizzo #endif /* PTNETMAP_STATS */ 1350a9e644cdSLuigi Rizzo 1351a9e644cdSLuigi Rizzo if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1352a9e644cdSLuigi Rizzo return; 1353a9e644cdSLuigi Rizzo } 1354a9e644cdSLuigi Rizzo 1355a9e644cdSLuigi Rizzo /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1356a9e644cdSLuigi Rizzo * receive-side processing is executed directly in the interrupt 1357a9e644cdSLuigi Rizzo * service routine. Alternatively, we may schedule the taskqueue. */ 1358a9e644cdSLuigi Rizzo ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1359a9e644cdSLuigi Rizzo } 1360a9e644cdSLuigi Rizzo 1361a9e644cdSLuigi Rizzo /* The following offloadings-related functions are taken from the vtnet 1362a9e644cdSLuigi Rizzo * driver, but the same functionality is required for the ptnet driver. 1363a9e644cdSLuigi Rizzo * As a temporary solution, I copied this code from vtnet and I started 1364a9e644cdSLuigi Rizzo * to generalize it (taking away driver-specific statistic accounting), 1365a9e644cdSLuigi Rizzo * making as little modifications as possible. 1366a9e644cdSLuigi Rizzo * In the future we need to share these functions between vtnet and ptnet. 1367a9e644cdSLuigi Rizzo */ 1368a9e644cdSLuigi Rizzo static int 1369a9e644cdSLuigi Rizzo ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) 1370a9e644cdSLuigi Rizzo { 1371a9e644cdSLuigi Rizzo struct ether_vlan_header *evh; 1372a9e644cdSLuigi Rizzo int offset; 1373a9e644cdSLuigi Rizzo 1374a9e644cdSLuigi Rizzo evh = mtod(m, struct ether_vlan_header *); 1375a9e644cdSLuigi Rizzo if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1376a9e644cdSLuigi Rizzo /* BMV: We should handle nested VLAN tags too. */ 1377a9e644cdSLuigi Rizzo *etype = ntohs(evh->evl_proto); 1378a9e644cdSLuigi Rizzo offset = sizeof(struct ether_vlan_header); 1379a9e644cdSLuigi Rizzo } else { 1380a9e644cdSLuigi Rizzo *etype = ntohs(evh->evl_encap_proto); 1381a9e644cdSLuigi Rizzo offset = sizeof(struct ether_header); 1382a9e644cdSLuigi Rizzo } 1383a9e644cdSLuigi Rizzo 1384a9e644cdSLuigi Rizzo switch (*etype) { 1385a9e644cdSLuigi Rizzo #if defined(INET) 1386a9e644cdSLuigi Rizzo case ETHERTYPE_IP: { 1387a9e644cdSLuigi Rizzo struct ip *ip, iphdr; 1388a9e644cdSLuigi Rizzo if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 1389a9e644cdSLuigi Rizzo m_copydata(m, offset, sizeof(struct ip), 1390a9e644cdSLuigi Rizzo (caddr_t) &iphdr); 1391a9e644cdSLuigi Rizzo ip = &iphdr; 1392a9e644cdSLuigi Rizzo } else 1393a9e644cdSLuigi Rizzo ip = (struct ip *)(m->m_data + offset); 1394a9e644cdSLuigi Rizzo *proto = ip->ip_p; 1395a9e644cdSLuigi Rizzo *start = offset + (ip->ip_hl << 2); 1396a9e644cdSLuigi Rizzo break; 1397a9e644cdSLuigi Rizzo } 1398a9e644cdSLuigi Rizzo #endif 1399a9e644cdSLuigi Rizzo #if defined(INET6) 1400a9e644cdSLuigi Rizzo case ETHERTYPE_IPV6: 1401a9e644cdSLuigi Rizzo *proto = -1; 1402a9e644cdSLuigi Rizzo *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 1403a9e644cdSLuigi Rizzo /* Assert the network stack sent us a valid packet. */ 1404a9e644cdSLuigi Rizzo KASSERT(*start > offset, 1405a9e644cdSLuigi Rizzo ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 1406a9e644cdSLuigi Rizzo *start, offset, *proto)); 1407a9e644cdSLuigi Rizzo break; 1408a9e644cdSLuigi Rizzo #endif 1409a9e644cdSLuigi Rizzo default: 1410a9e644cdSLuigi Rizzo /* Here we should increment the tx_csum_bad_ethtype counter. */ 1411a9e644cdSLuigi Rizzo return (EINVAL); 1412a9e644cdSLuigi Rizzo } 1413a9e644cdSLuigi Rizzo 1414a9e644cdSLuigi Rizzo return (0); 1415a9e644cdSLuigi Rizzo } 1416a9e644cdSLuigi Rizzo 1417a9e644cdSLuigi Rizzo static int 1418a9e644cdSLuigi Rizzo ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, 1419a9e644cdSLuigi Rizzo int offset, bool allow_ecn, struct virtio_net_hdr *hdr) 1420a9e644cdSLuigi Rizzo { 1421a9e644cdSLuigi Rizzo static struct timeval lastecn; 1422a9e644cdSLuigi Rizzo static int curecn; 1423a9e644cdSLuigi Rizzo struct tcphdr *tcp, tcphdr; 1424a9e644cdSLuigi Rizzo 1425a9e644cdSLuigi Rizzo if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { 1426a9e644cdSLuigi Rizzo m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); 1427a9e644cdSLuigi Rizzo tcp = &tcphdr; 1428a9e644cdSLuigi Rizzo } else 1429a9e644cdSLuigi Rizzo tcp = (struct tcphdr *)(m->m_data + offset); 1430a9e644cdSLuigi Rizzo 1431a9e644cdSLuigi Rizzo hdr->hdr_len = offset + (tcp->th_off << 2); 1432a9e644cdSLuigi Rizzo hdr->gso_size = m->m_pkthdr.tso_segsz; 1433a9e644cdSLuigi Rizzo hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : 1434a9e644cdSLuigi Rizzo VIRTIO_NET_HDR_GSO_TCPV6; 1435a9e644cdSLuigi Rizzo 1436a9e644cdSLuigi Rizzo if (tcp->th_flags & TH_CWR) { 1437a9e644cdSLuigi Rizzo /* 1438a9e644cdSLuigi Rizzo * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, 1439a9e644cdSLuigi Rizzo * ECN support is not on a per-interface basis, but globally via 1440a9e644cdSLuigi Rizzo * the net.inet.tcp.ecn.enable sysctl knob. The default is off. 1441a9e644cdSLuigi Rizzo */ 1442a9e644cdSLuigi Rizzo if (!allow_ecn) { 1443a9e644cdSLuigi Rizzo if (ppsratecheck(&lastecn, &curecn, 1)) 1444a9e644cdSLuigi Rizzo if_printf(ifp, 1445a9e644cdSLuigi Rizzo "TSO with ECN not negotiated with host\n"); 1446a9e644cdSLuigi Rizzo return (ENOTSUP); 1447a9e644cdSLuigi Rizzo } 1448a9e644cdSLuigi Rizzo hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1449a9e644cdSLuigi Rizzo } 1450a9e644cdSLuigi Rizzo 1451a9e644cdSLuigi Rizzo /* Here we should increment tx_tso counter. */ 1452a9e644cdSLuigi Rizzo 1453a9e644cdSLuigi Rizzo return (0); 1454a9e644cdSLuigi Rizzo } 1455a9e644cdSLuigi Rizzo 1456a9e644cdSLuigi Rizzo static struct mbuf * 1457a9e644cdSLuigi Rizzo ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, 1458a9e644cdSLuigi Rizzo struct virtio_net_hdr *hdr) 1459a9e644cdSLuigi Rizzo { 1460a9e644cdSLuigi Rizzo int flags, etype, csum_start, proto, error; 1461a9e644cdSLuigi Rizzo 1462a9e644cdSLuigi Rizzo flags = m->m_pkthdr.csum_flags; 1463a9e644cdSLuigi Rizzo 1464a9e644cdSLuigi Rizzo error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); 1465a9e644cdSLuigi Rizzo if (error) 1466a9e644cdSLuigi Rizzo goto drop; 1467a9e644cdSLuigi Rizzo 1468a9e644cdSLuigi Rizzo if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || 1469a9e644cdSLuigi Rizzo (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { 1470a9e644cdSLuigi Rizzo /* 1471a9e644cdSLuigi Rizzo * We could compare the IP protocol vs the CSUM_ flag too, 1472a9e644cdSLuigi Rizzo * but that really should not be necessary. 1473a9e644cdSLuigi Rizzo */ 1474a9e644cdSLuigi Rizzo hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1475a9e644cdSLuigi Rizzo hdr->csum_start = csum_start; 1476a9e644cdSLuigi Rizzo hdr->csum_offset = m->m_pkthdr.csum_data; 1477a9e644cdSLuigi Rizzo /* Here we should increment the tx_csum counter. */ 1478a9e644cdSLuigi Rizzo } 1479a9e644cdSLuigi Rizzo 1480a9e644cdSLuigi Rizzo if (flags & CSUM_TSO) { 1481a9e644cdSLuigi Rizzo if (__predict_false(proto != IPPROTO_TCP)) { 1482a9e644cdSLuigi Rizzo /* Likely failed to correctly parse the mbuf. 1483a9e644cdSLuigi Rizzo * Here we should increment the tx_tso_not_tcp 1484a9e644cdSLuigi Rizzo * counter. */ 1485a9e644cdSLuigi Rizzo goto drop; 1486a9e644cdSLuigi Rizzo } 1487a9e644cdSLuigi Rizzo 1488a9e644cdSLuigi Rizzo KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, 1489a9e644cdSLuigi Rizzo ("%s: mbuf %p TSO without checksum offload %#x", 1490a9e644cdSLuigi Rizzo __func__, m, flags)); 1491a9e644cdSLuigi Rizzo 1492a9e644cdSLuigi Rizzo error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, 1493a9e644cdSLuigi Rizzo allow_ecn, hdr); 1494a9e644cdSLuigi Rizzo if (error) 1495a9e644cdSLuigi Rizzo goto drop; 1496a9e644cdSLuigi Rizzo } 1497a9e644cdSLuigi Rizzo 1498a9e644cdSLuigi Rizzo return (m); 1499a9e644cdSLuigi Rizzo 1500a9e644cdSLuigi Rizzo drop: 1501a9e644cdSLuigi Rizzo m_freem(m); 1502a9e644cdSLuigi Rizzo return (NULL); 1503a9e644cdSLuigi Rizzo } 1504a9e644cdSLuigi Rizzo 1505a9e644cdSLuigi Rizzo static void 1506a9e644cdSLuigi Rizzo ptnet_vlan_tag_remove(struct mbuf *m) 1507a9e644cdSLuigi Rizzo { 1508a9e644cdSLuigi Rizzo struct ether_vlan_header *evh; 1509a9e644cdSLuigi Rizzo 1510a9e644cdSLuigi Rizzo evh = mtod(m, struct ether_vlan_header *); 1511a9e644cdSLuigi Rizzo m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1512a9e644cdSLuigi Rizzo m->m_flags |= M_VLANTAG; 1513a9e644cdSLuigi Rizzo 1514a9e644cdSLuigi Rizzo /* Strip the 802.1Q header. */ 1515a9e644cdSLuigi Rizzo bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1516a9e644cdSLuigi Rizzo ETHER_HDR_LEN - ETHER_TYPE_LEN); 1517a9e644cdSLuigi Rizzo m_adj(m, ETHER_VLAN_ENCAP_LEN); 1518a9e644cdSLuigi Rizzo } 1519a9e644cdSLuigi Rizzo 1520a9e644cdSLuigi Rizzo /* 1521a9e644cdSLuigi Rizzo * Use the checksum offset in the VirtIO header to set the 1522a9e644cdSLuigi Rizzo * correct CSUM_* flags. 1523a9e644cdSLuigi Rizzo */ 1524a9e644cdSLuigi Rizzo static int 1525a9e644cdSLuigi Rizzo ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, 1526a9e644cdSLuigi Rizzo struct virtio_net_hdr *hdr) 1527a9e644cdSLuigi Rizzo { 1528a9e644cdSLuigi Rizzo #if defined(INET) || defined(INET6) 1529a9e644cdSLuigi Rizzo int offset = hdr->csum_start + hdr->csum_offset; 1530a9e644cdSLuigi Rizzo #endif 1531a9e644cdSLuigi Rizzo 1532a9e644cdSLuigi Rizzo /* Only do a basic sanity check on the offset. */ 1533a9e644cdSLuigi Rizzo switch (eth_type) { 1534a9e644cdSLuigi Rizzo #if defined(INET) 1535a9e644cdSLuigi Rizzo case ETHERTYPE_IP: 1536a9e644cdSLuigi Rizzo if (__predict_false(offset < ip_start + sizeof(struct ip))) 1537a9e644cdSLuigi Rizzo return (1); 1538a9e644cdSLuigi Rizzo break; 1539a9e644cdSLuigi Rizzo #endif 1540a9e644cdSLuigi Rizzo #if defined(INET6) 1541a9e644cdSLuigi Rizzo case ETHERTYPE_IPV6: 1542a9e644cdSLuigi Rizzo if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) 1543a9e644cdSLuigi Rizzo return (1); 1544a9e644cdSLuigi Rizzo break; 1545a9e644cdSLuigi Rizzo #endif 1546a9e644cdSLuigi Rizzo default: 1547a9e644cdSLuigi Rizzo /* Here we should increment the rx_csum_bad_ethtype counter. */ 1548a9e644cdSLuigi Rizzo return (1); 1549a9e644cdSLuigi Rizzo } 1550a9e644cdSLuigi Rizzo 1551a9e644cdSLuigi Rizzo /* 1552a9e644cdSLuigi Rizzo * Use the offset to determine the appropriate CSUM_* flags. This is 1553a9e644cdSLuigi Rizzo * a bit dirty, but we can get by with it since the checksum offsets 1554a9e644cdSLuigi Rizzo * happen to be different. We assume the host host does not do IPv4 1555a9e644cdSLuigi Rizzo * header checksum offloading. 1556a9e644cdSLuigi Rizzo */ 1557a9e644cdSLuigi Rizzo switch (hdr->csum_offset) { 1558a9e644cdSLuigi Rizzo case offsetof(struct udphdr, uh_sum): 1559a9e644cdSLuigi Rizzo case offsetof(struct tcphdr, th_sum): 1560a9e644cdSLuigi Rizzo m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1561a9e644cdSLuigi Rizzo m->m_pkthdr.csum_data = 0xFFFF; 1562a9e644cdSLuigi Rizzo break; 1563a9e644cdSLuigi Rizzo case offsetof(struct sctphdr, checksum): 1564a9e644cdSLuigi Rizzo m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1565a9e644cdSLuigi Rizzo break; 1566a9e644cdSLuigi Rizzo default: 1567a9e644cdSLuigi Rizzo /* Here we should increment the rx_csum_bad_offset counter. */ 1568a9e644cdSLuigi Rizzo return (1); 1569a9e644cdSLuigi Rizzo } 1570a9e644cdSLuigi Rizzo 1571a9e644cdSLuigi Rizzo return (0); 1572a9e644cdSLuigi Rizzo } 1573a9e644cdSLuigi Rizzo 1574a9e644cdSLuigi Rizzo static int 1575a9e644cdSLuigi Rizzo ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, 1576a9e644cdSLuigi Rizzo struct virtio_net_hdr *hdr) 1577a9e644cdSLuigi Rizzo { 1578a9e644cdSLuigi Rizzo int offset, proto; 1579a9e644cdSLuigi Rizzo 1580a9e644cdSLuigi Rizzo switch (eth_type) { 1581a9e644cdSLuigi Rizzo #if defined(INET) 1582a9e644cdSLuigi Rizzo case ETHERTYPE_IP: { 1583a9e644cdSLuigi Rizzo struct ip *ip; 1584a9e644cdSLuigi Rizzo if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) 1585a9e644cdSLuigi Rizzo return (1); 1586a9e644cdSLuigi Rizzo ip = (struct ip *)(m->m_data + ip_start); 1587a9e644cdSLuigi Rizzo proto = ip->ip_p; 1588a9e644cdSLuigi Rizzo offset = ip_start + (ip->ip_hl << 2); 1589a9e644cdSLuigi Rizzo break; 1590a9e644cdSLuigi Rizzo } 1591a9e644cdSLuigi Rizzo #endif 1592a9e644cdSLuigi Rizzo #if defined(INET6) 1593a9e644cdSLuigi Rizzo case ETHERTYPE_IPV6: 1594a9e644cdSLuigi Rizzo if (__predict_false(m->m_len < ip_start + 1595a9e644cdSLuigi Rizzo sizeof(struct ip6_hdr))) 1596a9e644cdSLuigi Rizzo return (1); 1597a9e644cdSLuigi Rizzo offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); 1598a9e644cdSLuigi Rizzo if (__predict_false(offset < 0)) 1599a9e644cdSLuigi Rizzo return (1); 1600a9e644cdSLuigi Rizzo break; 1601a9e644cdSLuigi Rizzo #endif 1602a9e644cdSLuigi Rizzo default: 1603a9e644cdSLuigi Rizzo /* Here we should increment the rx_csum_bad_ethtype counter. */ 1604a9e644cdSLuigi Rizzo return (1); 1605a9e644cdSLuigi Rizzo } 1606a9e644cdSLuigi Rizzo 1607a9e644cdSLuigi Rizzo switch (proto) { 1608a9e644cdSLuigi Rizzo case IPPROTO_TCP: 1609a9e644cdSLuigi Rizzo if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) 1610a9e644cdSLuigi Rizzo return (1); 1611a9e644cdSLuigi Rizzo m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1612a9e644cdSLuigi Rizzo m->m_pkthdr.csum_data = 0xFFFF; 1613a9e644cdSLuigi Rizzo break; 1614a9e644cdSLuigi Rizzo case IPPROTO_UDP: 1615a9e644cdSLuigi Rizzo if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) 1616a9e644cdSLuigi Rizzo return (1); 1617a9e644cdSLuigi Rizzo m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1618a9e644cdSLuigi Rizzo m->m_pkthdr.csum_data = 0xFFFF; 1619a9e644cdSLuigi Rizzo break; 1620a9e644cdSLuigi Rizzo case IPPROTO_SCTP: 1621a9e644cdSLuigi Rizzo if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) 1622a9e644cdSLuigi Rizzo return (1); 1623a9e644cdSLuigi Rizzo m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1624a9e644cdSLuigi Rizzo break; 1625a9e644cdSLuigi Rizzo default: 1626a9e644cdSLuigi Rizzo /* 1627a9e644cdSLuigi Rizzo * For the remaining protocols, FreeBSD does not support 1628a9e644cdSLuigi Rizzo * checksum offloading, so the checksum will be recomputed. 1629a9e644cdSLuigi Rizzo */ 1630a9e644cdSLuigi Rizzo #if 0 1631a9e644cdSLuigi Rizzo if_printf(ifp, "cksum offload of unsupported " 1632a9e644cdSLuigi Rizzo "protocol eth_type=%#x proto=%d csum_start=%d " 1633a9e644cdSLuigi Rizzo "csum_offset=%d\n", __func__, eth_type, proto, 1634a9e644cdSLuigi Rizzo hdr->csum_start, hdr->csum_offset); 1635a9e644cdSLuigi Rizzo #endif 1636a9e644cdSLuigi Rizzo break; 1637a9e644cdSLuigi Rizzo } 1638a9e644cdSLuigi Rizzo 1639a9e644cdSLuigi Rizzo return (0); 1640a9e644cdSLuigi Rizzo } 1641a9e644cdSLuigi Rizzo 1642a9e644cdSLuigi Rizzo /* 1643a9e644cdSLuigi Rizzo * Set the appropriate CSUM_* flags. Unfortunately, the information 1644a9e644cdSLuigi Rizzo * provided is not directly useful to us. The VirtIO header gives the 1645a9e644cdSLuigi Rizzo * offset of the checksum, which is all Linux needs, but this is not 1646a9e644cdSLuigi Rizzo * how FreeBSD does things. We are forced to peek inside the packet 1647a9e644cdSLuigi Rizzo * a bit. 1648a9e644cdSLuigi Rizzo * 1649a9e644cdSLuigi Rizzo * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD 1650a9e644cdSLuigi Rizzo * could accept the offsets and let the stack figure it out. 1651a9e644cdSLuigi Rizzo */ 1652a9e644cdSLuigi Rizzo static int 1653a9e644cdSLuigi Rizzo ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) 1654a9e644cdSLuigi Rizzo { 1655a9e644cdSLuigi Rizzo struct ether_header *eh; 1656a9e644cdSLuigi Rizzo struct ether_vlan_header *evh; 1657a9e644cdSLuigi Rizzo uint16_t eth_type; 1658a9e644cdSLuigi Rizzo int offset, error; 1659a9e644cdSLuigi Rizzo 1660a9e644cdSLuigi Rizzo eh = mtod(m, struct ether_header *); 1661a9e644cdSLuigi Rizzo eth_type = ntohs(eh->ether_type); 1662a9e644cdSLuigi Rizzo if (eth_type == ETHERTYPE_VLAN) { 1663a9e644cdSLuigi Rizzo /* BMV: We should handle nested VLAN tags too. */ 1664a9e644cdSLuigi Rizzo evh = mtod(m, struct ether_vlan_header *); 1665a9e644cdSLuigi Rizzo eth_type = ntohs(evh->evl_proto); 1666a9e644cdSLuigi Rizzo offset = sizeof(struct ether_vlan_header); 1667a9e644cdSLuigi Rizzo } else 1668a9e644cdSLuigi Rizzo offset = sizeof(struct ether_header); 1669a9e644cdSLuigi Rizzo 1670a9e644cdSLuigi Rizzo if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1671a9e644cdSLuigi Rizzo error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); 1672a9e644cdSLuigi Rizzo else 1673a9e644cdSLuigi Rizzo error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); 1674a9e644cdSLuigi Rizzo 1675a9e644cdSLuigi Rizzo return (error); 1676a9e644cdSLuigi Rizzo } 1677a9e644cdSLuigi Rizzo /* End of offloading-related functions to be shared with vtnet. */ 1678a9e644cdSLuigi Rizzo 1679a9e644cdSLuigi Rizzo static inline void 168046023447SVincenzo Maffione ptnet_sync_tail(struct ptnet_csb_hg *pthg, struct netmap_kring *kring) 1681a9e644cdSLuigi Rizzo { 1682a9e644cdSLuigi Rizzo struct netmap_ring *ring = kring->ring; 1683a9e644cdSLuigi Rizzo 1684a9e644cdSLuigi Rizzo /* Update hwcur and hwtail as known by the host. */ 168546023447SVincenzo Maffione ptnetmap_guest_read_kring_csb(pthg, kring); 1686a9e644cdSLuigi Rizzo 1687a9e644cdSLuigi Rizzo /* nm_sync_finalize */ 1688a9e644cdSLuigi Rizzo ring->tail = kring->rtail = kring->nr_hwtail; 1689a9e644cdSLuigi Rizzo } 1690a9e644cdSLuigi Rizzo 1691a9e644cdSLuigi Rizzo static void 1692a9e644cdSLuigi Rizzo ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1693a9e644cdSLuigi Rizzo unsigned int head, unsigned int sync_flags) 1694a9e644cdSLuigi Rizzo { 1695a9e644cdSLuigi Rizzo struct netmap_ring *ring = kring->ring; 169646023447SVincenzo Maffione struct ptnet_csb_gh *ptgh = pq->ptgh; 169746023447SVincenzo Maffione struct ptnet_csb_hg *pthg = pq->pthg; 1698a9e644cdSLuigi Rizzo 1699a9e644cdSLuigi Rizzo /* Some packets have been pushed to the netmap ring. We have 1700a9e644cdSLuigi Rizzo * to tell the host to process the new packets, updating cur 1701a9e644cdSLuigi Rizzo * and head in the CSB. */ 1702a9e644cdSLuigi Rizzo ring->head = ring->cur = head; 1703a9e644cdSLuigi Rizzo 1704a9e644cdSLuigi Rizzo /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1705a9e644cdSLuigi Rizzo kring->rcur = kring->rhead = head; 1706a9e644cdSLuigi Rizzo 170746023447SVincenzo Maffione ptnetmap_guest_write_kring_csb(ptgh, kring->rcur, kring->rhead); 1708a9e644cdSLuigi Rizzo 1709a9e644cdSLuigi Rizzo /* Kick the host if needed. */ 171046023447SVincenzo Maffione if (NM_ACCESS_ONCE(pthg->host_need_kick)) { 171146023447SVincenzo Maffione ptgh->sync_flags = sync_flags; 1712a9e644cdSLuigi Rizzo ptnet_kick(pq); 1713a9e644cdSLuigi Rizzo } 1714a9e644cdSLuigi Rizzo } 1715a9e644cdSLuigi Rizzo 1716a9e644cdSLuigi Rizzo #define PTNET_TX_NOSPACE(_h, _k, _min) \ 1717a9e644cdSLuigi Rizzo ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1718a9e644cdSLuigi Rizzo (_k)->rtail - (_h)) < (_min) 1719a9e644cdSLuigi Rizzo 1720a9e644cdSLuigi Rizzo /* This function may be called by the network stack, or by 1721a9e644cdSLuigi Rizzo * by the taskqueue thread. */ 1722a9e644cdSLuigi Rizzo static int 1723a9e644cdSLuigi Rizzo ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1724a9e644cdSLuigi Rizzo bool may_resched) 1725a9e644cdSLuigi Rizzo { 1726a9e644cdSLuigi Rizzo struct ptnet_softc *sc = pq->sc; 1727a9e644cdSLuigi Rizzo bool have_vnet_hdr = sc->vnet_hdr_len; 1728a9e644cdSLuigi Rizzo struct netmap_adapter *na = &sc->ptna->dr.up; 1729a9e644cdSLuigi Rizzo if_t ifp = sc->ifp; 1730a9e644cdSLuigi Rizzo unsigned int batch_count = 0; 173146023447SVincenzo Maffione struct ptnet_csb_gh *ptgh; 173246023447SVincenzo Maffione struct ptnet_csb_hg *pthg; 1733a9e644cdSLuigi Rizzo struct netmap_kring *kring; 1734a9e644cdSLuigi Rizzo struct netmap_ring *ring; 1735a9e644cdSLuigi Rizzo struct netmap_slot *slot; 1736a9e644cdSLuigi Rizzo unsigned int count = 0; 1737a9e644cdSLuigi Rizzo unsigned int minspace; 1738a9e644cdSLuigi Rizzo unsigned int head; 1739a9e644cdSLuigi Rizzo unsigned int lim; 1740a9e644cdSLuigi Rizzo struct mbuf *mhead; 1741a9e644cdSLuigi Rizzo struct mbuf *mf; 1742a9e644cdSLuigi Rizzo int nmbuf_bytes; 1743a9e644cdSLuigi Rizzo uint8_t *nmbuf; 1744a9e644cdSLuigi Rizzo 1745a9e644cdSLuigi Rizzo if (!PTNET_Q_TRYLOCK(pq)) { 1746a9e644cdSLuigi Rizzo /* We failed to acquire the lock, schedule the taskqueue. */ 1747a9e644cdSLuigi Rizzo RD(1, "Deferring TX work"); 1748a9e644cdSLuigi Rizzo if (may_resched) { 1749a9e644cdSLuigi Rizzo taskqueue_enqueue(pq->taskq, &pq->task); 1750a9e644cdSLuigi Rizzo } 1751a9e644cdSLuigi Rizzo 1752a9e644cdSLuigi Rizzo return 0; 1753a9e644cdSLuigi Rizzo } 1754a9e644cdSLuigi Rizzo 1755a9e644cdSLuigi Rizzo if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1756a9e644cdSLuigi Rizzo PTNET_Q_UNLOCK(pq); 1757a9e644cdSLuigi Rizzo RD(1, "Interface is down"); 1758a9e644cdSLuigi Rizzo return ENETDOWN; 1759a9e644cdSLuigi Rizzo } 1760a9e644cdSLuigi Rizzo 176146023447SVincenzo Maffione ptgh = pq->ptgh; 176246023447SVincenzo Maffione pthg = pq->pthg; 17632ff91c17SVincenzo Maffione kring = na->tx_rings[pq->kring_id]; 1764a9e644cdSLuigi Rizzo ring = kring->ring; 1765a9e644cdSLuigi Rizzo lim = kring->nkr_num_slots - 1; 1766a9e644cdSLuigi Rizzo head = ring->head; 1767a9e644cdSLuigi Rizzo minspace = sc->min_tx_space; 1768a9e644cdSLuigi Rizzo 1769a9e644cdSLuigi Rizzo while (count < budget) { 1770a9e644cdSLuigi Rizzo if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1771a9e644cdSLuigi Rizzo /* We ran out of slot, let's see if the host has 1772a9e644cdSLuigi Rizzo * freed up some, by reading hwcur and hwtail from 1773a9e644cdSLuigi Rizzo * the CSB. */ 177446023447SVincenzo Maffione ptnet_sync_tail(pthg, kring); 1775a9e644cdSLuigi Rizzo 1776a9e644cdSLuigi Rizzo if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1777a9e644cdSLuigi Rizzo /* Still no slots available. Reactivate the 1778a9e644cdSLuigi Rizzo * interrupts so that we can be notified 1779a9e644cdSLuigi Rizzo * when some free slots are made available by 1780a9e644cdSLuigi Rizzo * the host. */ 178146023447SVincenzo Maffione ptgh->guest_need_kick = 1; 1782a9e644cdSLuigi Rizzo 1783a9e644cdSLuigi Rizzo /* Double-check. */ 178446023447SVincenzo Maffione ptnet_sync_tail(pthg, kring); 1785a9e644cdSLuigi Rizzo if (likely(PTNET_TX_NOSPACE(head, kring, 1786a9e644cdSLuigi Rizzo minspace))) { 1787a9e644cdSLuigi Rizzo break; 1788a9e644cdSLuigi Rizzo } 1789a9e644cdSLuigi Rizzo 1790a9e644cdSLuigi Rizzo RD(1, "Found more slots by doublecheck"); 1791a9e644cdSLuigi Rizzo /* More slots were freed before reactivating 1792a9e644cdSLuigi Rizzo * the interrupts. */ 179346023447SVincenzo Maffione ptgh->guest_need_kick = 0; 1794a9e644cdSLuigi Rizzo } 1795a9e644cdSLuigi Rizzo } 1796a9e644cdSLuigi Rizzo 1797a9e644cdSLuigi Rizzo mhead = drbr_peek(ifp, pq->bufring); 1798a9e644cdSLuigi Rizzo if (!mhead) { 1799a9e644cdSLuigi Rizzo break; 1800a9e644cdSLuigi Rizzo } 1801a9e644cdSLuigi Rizzo 1802a9e644cdSLuigi Rizzo /* Initialize transmission state variables. */ 1803a9e644cdSLuigi Rizzo slot = ring->slot + head; 1804a9e644cdSLuigi Rizzo nmbuf = NMB(na, slot); 1805a9e644cdSLuigi Rizzo nmbuf_bytes = 0; 1806a9e644cdSLuigi Rizzo 1807a9e644cdSLuigi Rizzo /* If needed, prepare the virtio-net header at the beginning 1808a9e644cdSLuigi Rizzo * of the first slot. */ 1809a9e644cdSLuigi Rizzo if (have_vnet_hdr) { 1810a9e644cdSLuigi Rizzo struct virtio_net_hdr *vh = 1811a9e644cdSLuigi Rizzo (struct virtio_net_hdr *)nmbuf; 1812a9e644cdSLuigi Rizzo 1813a9e644cdSLuigi Rizzo /* For performance, we could replace this memset() with 1814a9e644cdSLuigi Rizzo * two 8-bytes-wide writes. */ 1815a9e644cdSLuigi Rizzo memset(nmbuf, 0, PTNET_HDR_SIZE); 1816a9e644cdSLuigi Rizzo if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1817a9e644cdSLuigi Rizzo mhead = ptnet_tx_offload(ifp, mhead, false, 1818a9e644cdSLuigi Rizzo vh); 1819a9e644cdSLuigi Rizzo if (unlikely(!mhead)) { 1820a9e644cdSLuigi Rizzo /* Packet dropped because errors 1821a9e644cdSLuigi Rizzo * occurred while preparing the vnet 1822a9e644cdSLuigi Rizzo * header. Let's go ahead with the next 1823a9e644cdSLuigi Rizzo * packet. */ 1824a9e644cdSLuigi Rizzo pq->stats.errors ++; 1825a9e644cdSLuigi Rizzo drbr_advance(ifp, pq->bufring); 1826a9e644cdSLuigi Rizzo continue; 1827a9e644cdSLuigi Rizzo } 1828a9e644cdSLuigi Rizzo } 1829a9e644cdSLuigi Rizzo ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1830a9e644cdSLuigi Rizzo "csum_start %u csum_ofs %u hdr_len = %u " 1831a9e644cdSLuigi Rizzo "gso_size %u gso_type %x", __func__, 1832a9e644cdSLuigi Rizzo mhead->m_pkthdr.csum_flags, vh->flags, 1833a9e644cdSLuigi Rizzo vh->csum_start, vh->csum_offset, vh->hdr_len, 1834a9e644cdSLuigi Rizzo vh->gso_size, vh->gso_type); 1835a9e644cdSLuigi Rizzo 1836a9e644cdSLuigi Rizzo nmbuf += PTNET_HDR_SIZE; 1837a9e644cdSLuigi Rizzo nmbuf_bytes += PTNET_HDR_SIZE; 1838a9e644cdSLuigi Rizzo } 1839a9e644cdSLuigi Rizzo 1840a9e644cdSLuigi Rizzo for (mf = mhead; mf; mf = mf->m_next) { 1841a9e644cdSLuigi Rizzo uint8_t *mdata = mf->m_data; 1842a9e644cdSLuigi Rizzo int mlen = mf->m_len; 1843a9e644cdSLuigi Rizzo 1844a9e644cdSLuigi Rizzo for (;;) { 1845a9e644cdSLuigi Rizzo int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1846a9e644cdSLuigi Rizzo 1847a9e644cdSLuigi Rizzo if (mlen < copy) { 1848a9e644cdSLuigi Rizzo copy = mlen; 1849a9e644cdSLuigi Rizzo } 1850a9e644cdSLuigi Rizzo memcpy(nmbuf, mdata, copy); 1851a9e644cdSLuigi Rizzo 1852a9e644cdSLuigi Rizzo mdata += copy; 1853a9e644cdSLuigi Rizzo mlen -= copy; 1854a9e644cdSLuigi Rizzo nmbuf += copy; 1855a9e644cdSLuigi Rizzo nmbuf_bytes += copy; 1856a9e644cdSLuigi Rizzo 1857a9e644cdSLuigi Rizzo if (!mlen) { 1858a9e644cdSLuigi Rizzo break; 1859a9e644cdSLuigi Rizzo } 1860a9e644cdSLuigi Rizzo 1861a9e644cdSLuigi Rizzo slot->len = nmbuf_bytes; 1862a9e644cdSLuigi Rizzo slot->flags = NS_MOREFRAG; 1863a9e644cdSLuigi Rizzo 1864a9e644cdSLuigi Rizzo head = nm_next(head, lim); 1865a9e644cdSLuigi Rizzo KASSERT(head != ring->tail, 1866a9e644cdSLuigi Rizzo ("Unexpectedly run out of TX space")); 1867a9e644cdSLuigi Rizzo slot = ring->slot + head; 1868a9e644cdSLuigi Rizzo nmbuf = NMB(na, slot); 1869a9e644cdSLuigi Rizzo nmbuf_bytes = 0; 1870a9e644cdSLuigi Rizzo } 1871a9e644cdSLuigi Rizzo } 1872a9e644cdSLuigi Rizzo 1873a9e644cdSLuigi Rizzo /* Complete last slot and update head. */ 1874a9e644cdSLuigi Rizzo slot->len = nmbuf_bytes; 1875a9e644cdSLuigi Rizzo slot->flags = 0; 1876a9e644cdSLuigi Rizzo head = nm_next(head, lim); 1877a9e644cdSLuigi Rizzo 1878a9e644cdSLuigi Rizzo /* Consume the packet just processed. */ 1879a9e644cdSLuigi Rizzo drbr_advance(ifp, pq->bufring); 1880a9e644cdSLuigi Rizzo 1881a9e644cdSLuigi Rizzo /* Copy the packet to listeners. */ 1882a9e644cdSLuigi Rizzo ETHER_BPF_MTAP(ifp, mhead); 1883a9e644cdSLuigi Rizzo 1884a9e644cdSLuigi Rizzo pq->stats.packets ++; 1885a9e644cdSLuigi Rizzo pq->stats.bytes += mhead->m_pkthdr.len; 1886a9e644cdSLuigi Rizzo if (mhead->m_flags & M_MCAST) { 1887a9e644cdSLuigi Rizzo pq->stats.mcasts ++; 1888a9e644cdSLuigi Rizzo } 1889a9e644cdSLuigi Rizzo 1890a9e644cdSLuigi Rizzo m_freem(mhead); 1891a9e644cdSLuigi Rizzo 1892a9e644cdSLuigi Rizzo count ++; 1893a9e644cdSLuigi Rizzo if (++batch_count == PTNET_TX_BATCH) { 1894a9e644cdSLuigi Rizzo ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1895a9e644cdSLuigi Rizzo batch_count = 0; 1896a9e644cdSLuigi Rizzo } 1897a9e644cdSLuigi Rizzo } 1898a9e644cdSLuigi Rizzo 1899a9e644cdSLuigi Rizzo if (batch_count) { 1900a9e644cdSLuigi Rizzo ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1901a9e644cdSLuigi Rizzo } 1902a9e644cdSLuigi Rizzo 1903a9e644cdSLuigi Rizzo if (count >= budget && may_resched) { 1904a9e644cdSLuigi Rizzo DBG(RD(1, "out of budget: resched, %d mbufs pending\n", 1905a9e644cdSLuigi Rizzo drbr_inuse(ifp, pq->bufring))); 1906a9e644cdSLuigi Rizzo taskqueue_enqueue(pq->taskq, &pq->task); 1907a9e644cdSLuigi Rizzo } 1908a9e644cdSLuigi Rizzo 1909a9e644cdSLuigi Rizzo PTNET_Q_UNLOCK(pq); 1910a9e644cdSLuigi Rizzo 1911a9e644cdSLuigi Rizzo return count; 1912a9e644cdSLuigi Rizzo } 1913a9e644cdSLuigi Rizzo 1914a9e644cdSLuigi Rizzo static int 1915a9e644cdSLuigi Rizzo ptnet_transmit(if_t ifp, struct mbuf *m) 1916a9e644cdSLuigi Rizzo { 1917a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(ifp); 1918a9e644cdSLuigi Rizzo struct ptnet_queue *pq; 1919a9e644cdSLuigi Rizzo unsigned int queue_idx; 1920a9e644cdSLuigi Rizzo int err; 1921a9e644cdSLuigi Rizzo 1922a9e644cdSLuigi Rizzo DBG(device_printf(sc->dev, "transmit %p\n", m)); 1923a9e644cdSLuigi Rizzo 1924a9e644cdSLuigi Rizzo /* Insert 802.1Q header if needed. */ 1925a9e644cdSLuigi Rizzo if (m->m_flags & M_VLANTAG) { 1926a9e644cdSLuigi Rizzo m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1927a9e644cdSLuigi Rizzo if (m == NULL) { 1928a9e644cdSLuigi Rizzo return ENOBUFS; 1929a9e644cdSLuigi Rizzo } 1930a9e644cdSLuigi Rizzo m->m_flags &= ~M_VLANTAG; 1931a9e644cdSLuigi Rizzo } 1932a9e644cdSLuigi Rizzo 1933a9e644cdSLuigi Rizzo /* Get the flow-id if available. */ 1934a9e644cdSLuigi Rizzo queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1935a9e644cdSLuigi Rizzo m->m_pkthdr.flowid : curcpu; 1936a9e644cdSLuigi Rizzo 1937a9e644cdSLuigi Rizzo if (unlikely(queue_idx >= sc->num_tx_rings)) { 1938a9e644cdSLuigi Rizzo queue_idx %= sc->num_tx_rings; 1939a9e644cdSLuigi Rizzo } 1940a9e644cdSLuigi Rizzo 1941a9e644cdSLuigi Rizzo pq = sc->queues + queue_idx; 1942a9e644cdSLuigi Rizzo 1943a9e644cdSLuigi Rizzo err = drbr_enqueue(ifp, pq->bufring, m); 1944a9e644cdSLuigi Rizzo if (err) { 1945a9e644cdSLuigi Rizzo /* ENOBUFS when the bufring is full */ 1946a9e644cdSLuigi Rizzo RD(1, "%s: drbr_enqueue() failed %d\n", 1947a9e644cdSLuigi Rizzo __func__, err); 1948a9e644cdSLuigi Rizzo pq->stats.errors ++; 1949a9e644cdSLuigi Rizzo return err; 1950a9e644cdSLuigi Rizzo } 1951a9e644cdSLuigi Rizzo 1952a9e644cdSLuigi Rizzo if (ifp->if_capenable & IFCAP_POLLING) { 1953a9e644cdSLuigi Rizzo /* If polling is on, the transmit queues will be 1954a9e644cdSLuigi Rizzo * drained by the poller. */ 1955a9e644cdSLuigi Rizzo return 0; 1956a9e644cdSLuigi Rizzo } 1957a9e644cdSLuigi Rizzo 1958a9e644cdSLuigi Rizzo err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1959a9e644cdSLuigi Rizzo 1960a9e644cdSLuigi Rizzo return (err < 0) ? err : 0; 1961a9e644cdSLuigi Rizzo } 1962a9e644cdSLuigi Rizzo 1963a9e644cdSLuigi Rizzo static unsigned int 1964a9e644cdSLuigi Rizzo ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1965a9e644cdSLuigi Rizzo { 1966a9e644cdSLuigi Rizzo struct netmap_ring *ring = kring->ring; 1967a9e644cdSLuigi Rizzo struct netmap_slot *slot = ring->slot + head; 1968a9e644cdSLuigi Rizzo 1969a9e644cdSLuigi Rizzo for (;;) { 1970a9e644cdSLuigi Rizzo head = nm_next(head, kring->nkr_num_slots - 1); 1971a9e644cdSLuigi Rizzo if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1972a9e644cdSLuigi Rizzo break; 1973a9e644cdSLuigi Rizzo } 1974a9e644cdSLuigi Rizzo slot = ring->slot + head; 1975a9e644cdSLuigi Rizzo } 1976a9e644cdSLuigi Rizzo 1977a9e644cdSLuigi Rizzo return head; 1978a9e644cdSLuigi Rizzo } 1979a9e644cdSLuigi Rizzo 1980a9e644cdSLuigi Rizzo static inline struct mbuf * 1981a9e644cdSLuigi Rizzo ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1982a9e644cdSLuigi Rizzo { 1983a9e644cdSLuigi Rizzo uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1984a9e644cdSLuigi Rizzo 1985a9e644cdSLuigi Rizzo do { 1986a9e644cdSLuigi Rizzo unsigned int copy; 1987a9e644cdSLuigi Rizzo 1988a9e644cdSLuigi Rizzo if (mtail->m_len == MCLBYTES) { 1989a9e644cdSLuigi Rizzo struct mbuf *mf; 1990a9e644cdSLuigi Rizzo 1991a9e644cdSLuigi Rizzo mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1992a9e644cdSLuigi Rizzo if (unlikely(!mf)) { 1993a9e644cdSLuigi Rizzo return NULL; 1994a9e644cdSLuigi Rizzo } 1995a9e644cdSLuigi Rizzo 1996a9e644cdSLuigi Rizzo mtail->m_next = mf; 1997a9e644cdSLuigi Rizzo mtail = mf; 1998a9e644cdSLuigi Rizzo mdata = mtod(mtail, uint8_t *); 1999a9e644cdSLuigi Rizzo mtail->m_len = 0; 2000a9e644cdSLuigi Rizzo } 2001a9e644cdSLuigi Rizzo 2002a9e644cdSLuigi Rizzo copy = MCLBYTES - mtail->m_len; 2003a9e644cdSLuigi Rizzo if (nmbuf_len < copy) { 2004a9e644cdSLuigi Rizzo copy = nmbuf_len; 2005a9e644cdSLuigi Rizzo } 2006a9e644cdSLuigi Rizzo 2007a9e644cdSLuigi Rizzo memcpy(mdata, nmbuf, copy); 2008a9e644cdSLuigi Rizzo 2009a9e644cdSLuigi Rizzo nmbuf += copy; 2010a9e644cdSLuigi Rizzo nmbuf_len -= copy; 2011a9e644cdSLuigi Rizzo mdata += copy; 2012a9e644cdSLuigi Rizzo mtail->m_len += copy; 2013a9e644cdSLuigi Rizzo } while (nmbuf_len); 2014a9e644cdSLuigi Rizzo 2015a9e644cdSLuigi Rizzo return mtail; 2016a9e644cdSLuigi Rizzo } 2017a9e644cdSLuigi Rizzo 2018a9e644cdSLuigi Rizzo static int 2019a9e644cdSLuigi Rizzo ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 2020a9e644cdSLuigi Rizzo { 2021a9e644cdSLuigi Rizzo struct ptnet_softc *sc = pq->sc; 2022a9e644cdSLuigi Rizzo bool have_vnet_hdr = sc->vnet_hdr_len; 202346023447SVincenzo Maffione struct ptnet_csb_gh *ptgh = pq->ptgh; 202446023447SVincenzo Maffione struct ptnet_csb_hg *pthg = pq->pthg; 2025a9e644cdSLuigi Rizzo struct netmap_adapter *na = &sc->ptna->dr.up; 20262ff91c17SVincenzo Maffione struct netmap_kring *kring = na->rx_rings[pq->kring_id]; 2027a9e644cdSLuigi Rizzo struct netmap_ring *ring = kring->ring; 2028a9e644cdSLuigi Rizzo unsigned int const lim = kring->nkr_num_slots - 1; 2029a9e644cdSLuigi Rizzo unsigned int batch_count = 0; 2030a9e644cdSLuigi Rizzo if_t ifp = sc->ifp; 2031a9e644cdSLuigi Rizzo unsigned int count = 0; 203246023447SVincenzo Maffione uint32_t head; 2033a9e644cdSLuigi Rizzo 2034a9e644cdSLuigi Rizzo PTNET_Q_LOCK(pq); 2035a9e644cdSLuigi Rizzo 2036a9e644cdSLuigi Rizzo if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2037a9e644cdSLuigi Rizzo goto unlock; 2038a9e644cdSLuigi Rizzo } 2039a9e644cdSLuigi Rizzo 2040a9e644cdSLuigi Rizzo kring->nr_kflags &= ~NKR_PENDINTR; 2041a9e644cdSLuigi Rizzo 204246023447SVincenzo Maffione head = ring->head; 2043a9e644cdSLuigi Rizzo while (count < budget) { 204446023447SVincenzo Maffione uint32_t prev_head = head; 2045a9e644cdSLuigi Rizzo struct mbuf *mhead, *mtail; 2046a9e644cdSLuigi Rizzo struct virtio_net_hdr *vh; 2047a9e644cdSLuigi Rizzo struct netmap_slot *slot; 2048a9e644cdSLuigi Rizzo unsigned int nmbuf_len; 2049a9e644cdSLuigi Rizzo uint8_t *nmbuf; 205046023447SVincenzo Maffione int deliver = 1; /* the mbuf to the network stack. */ 2051a9e644cdSLuigi Rizzo host_sync: 2052a9e644cdSLuigi Rizzo if (head == ring->tail) { 2053a9e644cdSLuigi Rizzo /* We ran out of slot, let's see if the host has 2054a9e644cdSLuigi Rizzo * added some, by reading hwcur and hwtail from 2055a9e644cdSLuigi Rizzo * the CSB. */ 205646023447SVincenzo Maffione ptnet_sync_tail(pthg, kring); 2057a9e644cdSLuigi Rizzo 2058a9e644cdSLuigi Rizzo if (head == ring->tail) { 2059a9e644cdSLuigi Rizzo /* Still no slots available. Reactivate 2060a9e644cdSLuigi Rizzo * interrupts as they were disabled by the 2061a9e644cdSLuigi Rizzo * host thread right before issuing the 2062a9e644cdSLuigi Rizzo * last interrupt. */ 206346023447SVincenzo Maffione ptgh->guest_need_kick = 1; 2064a9e644cdSLuigi Rizzo 2065a9e644cdSLuigi Rizzo /* Double-check. */ 206646023447SVincenzo Maffione ptnet_sync_tail(pthg, kring); 2067a9e644cdSLuigi Rizzo if (likely(head == ring->tail)) { 2068a9e644cdSLuigi Rizzo break; 2069a9e644cdSLuigi Rizzo } 207046023447SVincenzo Maffione ptgh->guest_need_kick = 0; 2071a9e644cdSLuigi Rizzo } 2072a9e644cdSLuigi Rizzo } 2073a9e644cdSLuigi Rizzo 2074a9e644cdSLuigi Rizzo /* Initialize ring state variables, possibly grabbing the 2075a9e644cdSLuigi Rizzo * virtio-net header. */ 2076a9e644cdSLuigi Rizzo slot = ring->slot + head; 2077a9e644cdSLuigi Rizzo nmbuf = NMB(na, slot); 2078a9e644cdSLuigi Rizzo nmbuf_len = slot->len; 2079a9e644cdSLuigi Rizzo 2080a9e644cdSLuigi Rizzo vh = (struct virtio_net_hdr *)nmbuf; 2081a9e644cdSLuigi Rizzo if (have_vnet_hdr) { 2082a9e644cdSLuigi Rizzo if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 2083a9e644cdSLuigi Rizzo /* There is no good reason why host should 2084a9e644cdSLuigi Rizzo * put the header in multiple netmap slots. 2085a9e644cdSLuigi Rizzo * If this is the case, discard. */ 2086a9e644cdSLuigi Rizzo RD(1, "Fragmented vnet-hdr: dropping"); 2087a9e644cdSLuigi Rizzo head = ptnet_rx_discard(kring, head); 2088a9e644cdSLuigi Rizzo pq->stats.iqdrops ++; 208946023447SVincenzo Maffione deliver = 0; 2090a9e644cdSLuigi Rizzo goto skip; 2091a9e644cdSLuigi Rizzo } 2092a9e644cdSLuigi Rizzo ND(1, "%s: vnet hdr: flags %x csum_start %u " 2093a9e644cdSLuigi Rizzo "csum_ofs %u hdr_len = %u gso_size %u " 2094a9e644cdSLuigi Rizzo "gso_type %x", __func__, vh->flags, 2095a9e644cdSLuigi Rizzo vh->csum_start, vh->csum_offset, vh->hdr_len, 2096a9e644cdSLuigi Rizzo vh->gso_size, vh->gso_type); 2097a9e644cdSLuigi Rizzo nmbuf += PTNET_HDR_SIZE; 2098a9e644cdSLuigi Rizzo nmbuf_len -= PTNET_HDR_SIZE; 2099a9e644cdSLuigi Rizzo } 2100a9e644cdSLuigi Rizzo 2101a9e644cdSLuigi Rizzo /* Allocate the head of a new mbuf chain. 2102a9e644cdSLuigi Rizzo * We use m_getcl() to allocate an mbuf with standard cluster 2103a9e644cdSLuigi Rizzo * size (MCLBYTES). In the future we could use m_getjcl() 2104a9e644cdSLuigi Rizzo * to choose different sizes. */ 2105a9e644cdSLuigi Rizzo mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2106a9e644cdSLuigi Rizzo if (unlikely(mhead == NULL)) { 2107a9e644cdSLuigi Rizzo device_printf(sc->dev, "%s: failed to allocate mbuf " 2108a9e644cdSLuigi Rizzo "head\n", __func__); 2109a9e644cdSLuigi Rizzo pq->stats.errors ++; 2110a9e644cdSLuigi Rizzo break; 2111a9e644cdSLuigi Rizzo } 2112a9e644cdSLuigi Rizzo 2113a9e644cdSLuigi Rizzo /* Initialize the mbuf state variables. */ 2114a9e644cdSLuigi Rizzo mhead->m_pkthdr.len = nmbuf_len; 2115a9e644cdSLuigi Rizzo mtail->m_len = 0; 2116a9e644cdSLuigi Rizzo 2117a9e644cdSLuigi Rizzo /* Scan all the netmap slots containing the current packet. */ 2118a9e644cdSLuigi Rizzo for (;;) { 2119a9e644cdSLuigi Rizzo DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 2120a9e644cdSLuigi Rizzo "len %u, flags %u\n", __func__, 2121a9e644cdSLuigi Rizzo head, ring->tail, slot->len, 2122a9e644cdSLuigi Rizzo slot->flags)); 2123a9e644cdSLuigi Rizzo 2124a9e644cdSLuigi Rizzo mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 2125a9e644cdSLuigi Rizzo if (unlikely(!mtail)) { 2126a9e644cdSLuigi Rizzo /* Ouch. We ran out of memory while processing 2127a9e644cdSLuigi Rizzo * a packet. We have to restore the previous 2128a9e644cdSLuigi Rizzo * head position, free the mbuf chain, and 2129a9e644cdSLuigi Rizzo * schedule the taskqueue to give the packet 2130a9e644cdSLuigi Rizzo * another chance. */ 2131a9e644cdSLuigi Rizzo device_printf(sc->dev, "%s: failed to allocate" 2132a9e644cdSLuigi Rizzo " mbuf frag, reset head %u --> %u\n", 2133a9e644cdSLuigi Rizzo __func__, head, prev_head); 2134a9e644cdSLuigi Rizzo head = prev_head; 2135a9e644cdSLuigi Rizzo m_freem(mhead); 2136a9e644cdSLuigi Rizzo pq->stats.errors ++; 2137a9e644cdSLuigi Rizzo if (may_resched) { 2138a9e644cdSLuigi Rizzo taskqueue_enqueue(pq->taskq, 2139a9e644cdSLuigi Rizzo &pq->task); 2140a9e644cdSLuigi Rizzo } 2141a9e644cdSLuigi Rizzo goto escape; 2142a9e644cdSLuigi Rizzo } 2143a9e644cdSLuigi Rizzo 2144a9e644cdSLuigi Rizzo /* We have to increment head irrespective of the 2145a9e644cdSLuigi Rizzo * NS_MOREFRAG being set or not. */ 2146a9e644cdSLuigi Rizzo head = nm_next(head, lim); 2147a9e644cdSLuigi Rizzo 2148a9e644cdSLuigi Rizzo if (!(slot->flags & NS_MOREFRAG)) { 2149a9e644cdSLuigi Rizzo break; 2150a9e644cdSLuigi Rizzo } 2151a9e644cdSLuigi Rizzo 2152a9e644cdSLuigi Rizzo if (unlikely(head == ring->tail)) { 2153a9e644cdSLuigi Rizzo /* The very last slot prepared by the host has 2154a9e644cdSLuigi Rizzo * the NS_MOREFRAG set. Drop it and continue 2155a9e644cdSLuigi Rizzo * the outer cycle (to do the double-check). */ 2156a9e644cdSLuigi Rizzo RD(1, "Incomplete packet: dropping"); 2157a9e644cdSLuigi Rizzo m_freem(mhead); 2158a9e644cdSLuigi Rizzo pq->stats.iqdrops ++; 2159a9e644cdSLuigi Rizzo goto host_sync; 2160a9e644cdSLuigi Rizzo } 2161a9e644cdSLuigi Rizzo 2162a9e644cdSLuigi Rizzo slot = ring->slot + head; 2163a9e644cdSLuigi Rizzo nmbuf = NMB(na, slot); 2164a9e644cdSLuigi Rizzo nmbuf_len = slot->len; 2165a9e644cdSLuigi Rizzo mhead->m_pkthdr.len += nmbuf_len; 2166a9e644cdSLuigi Rizzo } 2167a9e644cdSLuigi Rizzo 2168a9e644cdSLuigi Rizzo mhead->m_pkthdr.rcvif = ifp; 2169a9e644cdSLuigi Rizzo mhead->m_pkthdr.csum_flags = 0; 2170a9e644cdSLuigi Rizzo 2171a9e644cdSLuigi Rizzo /* Store the queue idx in the packet header. */ 2172a9e644cdSLuigi Rizzo mhead->m_pkthdr.flowid = pq->kring_id; 2173a9e644cdSLuigi Rizzo M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 2174a9e644cdSLuigi Rizzo 2175a9e644cdSLuigi Rizzo if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 2176a9e644cdSLuigi Rizzo struct ether_header *eh; 2177a9e644cdSLuigi Rizzo 2178a9e644cdSLuigi Rizzo eh = mtod(mhead, struct ether_header *); 2179a9e644cdSLuigi Rizzo if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2180a9e644cdSLuigi Rizzo ptnet_vlan_tag_remove(mhead); 2181a9e644cdSLuigi Rizzo /* 2182a9e644cdSLuigi Rizzo * With the 802.1Q header removed, update the 2183a9e644cdSLuigi Rizzo * checksum starting location accordingly. 2184a9e644cdSLuigi Rizzo */ 2185a9e644cdSLuigi Rizzo if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 2186a9e644cdSLuigi Rizzo vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 2187a9e644cdSLuigi Rizzo } 2188a9e644cdSLuigi Rizzo } 2189a9e644cdSLuigi Rizzo 2190a9e644cdSLuigi Rizzo if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM 2191a9e644cdSLuigi Rizzo | VIRTIO_NET_HDR_F_DATA_VALID))) { 2192a9e644cdSLuigi Rizzo if (unlikely(ptnet_rx_csum(mhead, vh))) { 2193a9e644cdSLuigi Rizzo m_freem(mhead); 2194a9e644cdSLuigi Rizzo RD(1, "Csum offload error: dropping"); 2195a9e644cdSLuigi Rizzo pq->stats.iqdrops ++; 219646023447SVincenzo Maffione deliver = 0; 2197a9e644cdSLuigi Rizzo } 2198a9e644cdSLuigi Rizzo } 2199a9e644cdSLuigi Rizzo 220046023447SVincenzo Maffione skip: 220146023447SVincenzo Maffione count ++; 220246023447SVincenzo Maffione if (++batch_count >= PTNET_RX_BATCH) { 220346023447SVincenzo Maffione /* Some packets have been (or will be) pushed to the network 220446023447SVincenzo Maffione * stack. We need to update the CSB to tell the host about 220546023447SVincenzo Maffione * the new ring->cur and ring->head (RX buffer refill). */ 220646023447SVincenzo Maffione ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 220746023447SVincenzo Maffione batch_count = 0; 220846023447SVincenzo Maffione } 220946023447SVincenzo Maffione 221046023447SVincenzo Maffione if (likely(deliver)) { 2211a9e644cdSLuigi Rizzo pq->stats.packets ++; 2212a9e644cdSLuigi Rizzo pq->stats.bytes += mhead->m_pkthdr.len; 2213a9e644cdSLuigi Rizzo 2214a9e644cdSLuigi Rizzo PTNET_Q_UNLOCK(pq); 2215a9e644cdSLuigi Rizzo (*ifp->if_input)(ifp, mhead); 2216a9e644cdSLuigi Rizzo PTNET_Q_LOCK(pq); 221746023447SVincenzo Maffione /* The ring->head index (and related indices) are 221846023447SVincenzo Maffione * updated under pq lock by ptnet_ring_update(). 221946023447SVincenzo Maffione * Since we dropped the lock to call if_input(), we 222046023447SVincenzo Maffione * must reload ring->head and restart processing the 222146023447SVincenzo Maffione * ring from there. */ 222246023447SVincenzo Maffione head = ring->head; 2223a9e644cdSLuigi Rizzo 2224a9e644cdSLuigi Rizzo if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2225a9e644cdSLuigi Rizzo /* The interface has gone down while we didn't 2226a9e644cdSLuigi Rizzo * have the lock. Stop any processing and exit. */ 2227a9e644cdSLuigi Rizzo goto unlock; 2228a9e644cdSLuigi Rizzo } 2229a9e644cdSLuigi Rizzo } 2230a9e644cdSLuigi Rizzo } 2231a9e644cdSLuigi Rizzo escape: 2232a9e644cdSLuigi Rizzo if (batch_count) { 2233a9e644cdSLuigi Rizzo ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2234a9e644cdSLuigi Rizzo 2235a9e644cdSLuigi Rizzo } 2236a9e644cdSLuigi Rizzo 2237a9e644cdSLuigi Rizzo if (count >= budget && may_resched) { 2238a9e644cdSLuigi Rizzo /* If we ran out of budget or the double-check found new 2239a9e644cdSLuigi Rizzo * slots to process, schedule the taskqueue. */ 2240a9e644cdSLuigi Rizzo DBG(RD(1, "out of budget: resched h %u t %u\n", 2241a9e644cdSLuigi Rizzo head, ring->tail)); 2242a9e644cdSLuigi Rizzo taskqueue_enqueue(pq->taskq, &pq->task); 2243a9e644cdSLuigi Rizzo } 2244a9e644cdSLuigi Rizzo unlock: 2245a9e644cdSLuigi Rizzo PTNET_Q_UNLOCK(pq); 2246a9e644cdSLuigi Rizzo 2247a9e644cdSLuigi Rizzo return count; 2248a9e644cdSLuigi Rizzo } 2249a9e644cdSLuigi Rizzo 2250a9e644cdSLuigi Rizzo static void 2251a9e644cdSLuigi Rizzo ptnet_rx_task(void *context, int pending) 2252a9e644cdSLuigi Rizzo { 2253a9e644cdSLuigi Rizzo struct ptnet_queue *pq = context; 2254a9e644cdSLuigi Rizzo 2255a9e644cdSLuigi Rizzo DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2256a9e644cdSLuigi Rizzo ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 2257a9e644cdSLuigi Rizzo } 2258a9e644cdSLuigi Rizzo 2259a9e644cdSLuigi Rizzo static void 2260a9e644cdSLuigi Rizzo ptnet_tx_task(void *context, int pending) 2261a9e644cdSLuigi Rizzo { 2262a9e644cdSLuigi Rizzo struct ptnet_queue *pq = context; 2263a9e644cdSLuigi Rizzo 2264a9e644cdSLuigi Rizzo DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2265a9e644cdSLuigi Rizzo ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 2266a9e644cdSLuigi Rizzo } 2267a9e644cdSLuigi Rizzo 2268a9e644cdSLuigi Rizzo #ifdef DEVICE_POLLING 2269a9e644cdSLuigi Rizzo /* We don't need to handle differently POLL_AND_CHECK_STATUS and 2270a9e644cdSLuigi Rizzo * POLL_ONLY, since we don't have an Interrupt Status Register. */ 2271a9e644cdSLuigi Rizzo static int 2272a9e644cdSLuigi Rizzo ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 2273a9e644cdSLuigi Rizzo { 2274a9e644cdSLuigi Rizzo struct ptnet_softc *sc = if_getsoftc(ifp); 2275a9e644cdSLuigi Rizzo unsigned int queue_budget; 2276a9e644cdSLuigi Rizzo unsigned int count = 0; 2277a9e644cdSLuigi Rizzo bool borrow = false; 2278a9e644cdSLuigi Rizzo int i; 2279a9e644cdSLuigi Rizzo 2280a9e644cdSLuigi Rizzo KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 2281a9e644cdSLuigi Rizzo queue_budget = MAX(budget / sc->num_rings, 1); 2282a9e644cdSLuigi Rizzo RD(1, "Per-queue budget is %d", queue_budget); 2283a9e644cdSLuigi Rizzo 2284a9e644cdSLuigi Rizzo while (budget) { 2285a9e644cdSLuigi Rizzo unsigned int rcnt = 0; 2286a9e644cdSLuigi Rizzo 2287a9e644cdSLuigi Rizzo for (i = 0; i < sc->num_rings; i++) { 2288a9e644cdSLuigi Rizzo struct ptnet_queue *pq = sc->queues + i; 2289a9e644cdSLuigi Rizzo 2290a9e644cdSLuigi Rizzo if (borrow) { 2291a9e644cdSLuigi Rizzo queue_budget = MIN(queue_budget, budget); 2292a9e644cdSLuigi Rizzo if (queue_budget == 0) { 2293a9e644cdSLuigi Rizzo break; 2294a9e644cdSLuigi Rizzo } 2295a9e644cdSLuigi Rizzo } 2296a9e644cdSLuigi Rizzo 2297a9e644cdSLuigi Rizzo if (i < sc->num_tx_rings) { 2298a9e644cdSLuigi Rizzo rcnt += ptnet_drain_transmit_queue(pq, 2299a9e644cdSLuigi Rizzo queue_budget, false); 2300a9e644cdSLuigi Rizzo } else { 2301a9e644cdSLuigi Rizzo rcnt += ptnet_rx_eof(pq, queue_budget, 2302a9e644cdSLuigi Rizzo false); 2303a9e644cdSLuigi Rizzo } 2304a9e644cdSLuigi Rizzo } 2305a9e644cdSLuigi Rizzo 2306a9e644cdSLuigi Rizzo if (!rcnt) { 2307a9e644cdSLuigi Rizzo /* A scan of the queues gave no result, we can 2308a9e644cdSLuigi Rizzo * stop here. */ 2309a9e644cdSLuigi Rizzo break; 2310a9e644cdSLuigi Rizzo } 2311a9e644cdSLuigi Rizzo 2312a9e644cdSLuigi Rizzo if (rcnt > budget) { 2313a9e644cdSLuigi Rizzo /* This may happen when initial budget < sc->num_rings, 2314a9e644cdSLuigi Rizzo * since one packet budget is given to each queue 2315a9e644cdSLuigi Rizzo * anyway. Just pretend we didn't eat "so much". */ 2316a9e644cdSLuigi Rizzo rcnt = budget; 2317a9e644cdSLuigi Rizzo } 2318a9e644cdSLuigi Rizzo count += rcnt; 2319a9e644cdSLuigi Rizzo budget -= rcnt; 2320a9e644cdSLuigi Rizzo borrow = true; 2321a9e644cdSLuigi Rizzo } 2322a9e644cdSLuigi Rizzo 2323a9e644cdSLuigi Rizzo 2324a9e644cdSLuigi Rizzo return count; 2325a9e644cdSLuigi Rizzo } 2326a9e644cdSLuigi Rizzo #endif /* DEVICE_POLLING */ 2327