110b59a9bSPeter Grehan /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3718cf2ccSPedro F. Giffuni * 4abd6790cSBryan Venteicher * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 510b59a9bSPeter Grehan * All rights reserved. 610b59a9bSPeter Grehan * 710b59a9bSPeter Grehan * Redistribution and use in source and binary forms, with or without 810b59a9bSPeter Grehan * modification, are permitted provided that the following conditions 910b59a9bSPeter Grehan * are met: 1010b59a9bSPeter Grehan * 1. Redistributions of source code must retain the above copyright 1110b59a9bSPeter Grehan * notice unmodified, this list of conditions, and the following 1210b59a9bSPeter Grehan * disclaimer. 1310b59a9bSPeter Grehan * 2. Redistributions in binary form must reproduce the above copyright 1410b59a9bSPeter Grehan * notice, this list of conditions and the following disclaimer in the 1510b59a9bSPeter Grehan * documentation and/or other materials provided with the distribution. 1610b59a9bSPeter Grehan * 1710b59a9bSPeter Grehan * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1810b59a9bSPeter Grehan * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1910b59a9bSPeter Grehan * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 2010b59a9bSPeter Grehan * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 2110b59a9bSPeter Grehan * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2210b59a9bSPeter Grehan * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2310b59a9bSPeter Grehan * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2410b59a9bSPeter Grehan * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2510b59a9bSPeter Grehan * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2610b59a9bSPeter Grehan * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2710b59a9bSPeter Grehan * 2810b59a9bSPeter Grehan * $FreeBSD$ 2910b59a9bSPeter Grehan */ 3010b59a9bSPeter Grehan 3110b59a9bSPeter Grehan #ifndef _IF_VTNETVAR_H 3210b59a9bSPeter Grehan #define _IF_VTNETVAR_H 3310b59a9bSPeter Grehan 345afe81a7SLuiz Otavio O Souza #ifdef ALTQ 355afe81a7SLuiz Otavio O Souza #define VTNET_LEGACY_TX 365afe81a7SLuiz Otavio O Souza #endif 375afe81a7SLuiz Otavio O Souza 388f3600b1SBryan Venteicher struct vtnet_softc; 398f3600b1SBryan Venteicher 4010b59a9bSPeter Grehan struct vtnet_statistics { 418f3600b1SBryan Venteicher uint64_t mbuf_alloc_failed; 4210b59a9bSPeter Grehan 438f3600b1SBryan Venteicher uint64_t rx_frame_too_large; 448f3600b1SBryan Venteicher uint64_t rx_enq_replacement_failed; 458f3600b1SBryan Venteicher uint64_t rx_mergeable_failed; 468f3600b1SBryan Venteicher uint64_t rx_csum_bad_ethtype; 478f3600b1SBryan Venteicher uint64_t rx_csum_bad_ipproto; 488f3600b1SBryan Venteicher uint64_t rx_csum_bad_offset; 498f3600b1SBryan Venteicher uint64_t rx_csum_bad_proto; 50475a60aeSBryan Venteicher uint64_t tx_csum_unknown_ethtype; 51475a60aeSBryan Venteicher uint64_t tx_csum_proto_mismatch; 528f3600b1SBryan Venteicher uint64_t tx_tso_not_tcp; 53475a60aeSBryan Venteicher uint64_t tx_tso_without_csum; 5454fb8142SBryan Venteicher uint64_t tx_defragged; 5554fb8142SBryan Venteicher uint64_t tx_defrag_failed; 5610b59a9bSPeter Grehan 578f3600b1SBryan Venteicher /* 588f3600b1SBryan Venteicher * These are accumulated from each Rx/Tx queue. 598f3600b1SBryan Venteicher */ 608f3600b1SBryan Venteicher uint64_t rx_csum_failed; 618f3600b1SBryan Venteicher uint64_t rx_csum_offloaded; 628f3600b1SBryan Venteicher uint64_t rx_task_rescheduled; 638f3600b1SBryan Venteicher uint64_t tx_csum_offloaded; 648f3600b1SBryan Venteicher uint64_t tx_tso_offloaded; 658f3600b1SBryan Venteicher uint64_t tx_task_rescheduled; 6610b59a9bSPeter Grehan }; 6710b59a9bSPeter Grehan 688f3600b1SBryan Venteicher struct vtnet_rxq_stats { 698f3600b1SBryan Venteicher uint64_t vrxs_ipackets; /* if_ipackets */ 708f3600b1SBryan Venteicher uint64_t vrxs_ibytes; /* if_ibytes */ 718f3600b1SBryan Venteicher uint64_t vrxs_iqdrops; /* if_iqdrops */ 728f3600b1SBryan Venteicher uint64_t vrxs_ierrors; /* if_ierrors */ 738f3600b1SBryan Venteicher uint64_t vrxs_csum; 748f3600b1SBryan Venteicher uint64_t vrxs_csum_failed; 752bfab357SBryan Venteicher uint64_t vrxs_host_lro; 768f3600b1SBryan Venteicher uint64_t vrxs_rescheduled; 778f3600b1SBryan Venteicher }; 788f3600b1SBryan Venteicher 798f3600b1SBryan Venteicher struct vtnet_rxq { 808f3600b1SBryan Venteicher struct mtx vtnrx_mtx; 818f3600b1SBryan Venteicher struct vtnet_softc *vtnrx_sc; 828f3600b1SBryan Venteicher struct virtqueue *vtnrx_vq; 83443c3d0bSBryan Venteicher struct sglist *vtnrx_sg; 848f3600b1SBryan Venteicher int vtnrx_id; 858f3600b1SBryan Venteicher struct vtnet_rxq_stats vtnrx_stats; 868f3600b1SBryan Venteicher struct taskqueue *vtnrx_tq; 878f3600b1SBryan Venteicher struct task vtnrx_intrtask; 8842343a63SBryan Venteicher struct lro_ctrl vtnrx_lro; 892e42b74aSVincenzo Maffione #ifdef DEV_NETMAP 902d769e25SVincenzo Maffione uint32_t vtnrx_nm_refill; 912e42b74aSVincenzo Maffione struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr; 922e42b74aSVincenzo Maffione #endif /* DEV_NETMAP */ 938f3600b1SBryan Venteicher char vtnrx_name[16]; 948f3600b1SBryan Venteicher } __aligned(CACHE_LINE_SIZE); 958f3600b1SBryan Venteicher 968f3600b1SBryan Venteicher #define VTNET_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vtnrx_mtx) 978f3600b1SBryan Venteicher #define VTNET_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vtnrx_mtx) 988f3600b1SBryan Venteicher #define VTNET_RXQ_LOCK_ASSERT(_rxq) \ 998f3600b1SBryan Venteicher mtx_assert(&(_rxq)->vtnrx_mtx, MA_OWNED) 1008f3600b1SBryan Venteicher #define VTNET_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \ 1018f3600b1SBryan Venteicher mtx_assert(&(_rxq)->vtnrx_mtx, MA_NOTOWNED) 1028f3600b1SBryan Venteicher 1038f3600b1SBryan Venteicher struct vtnet_txq_stats { 1048f3600b1SBryan Venteicher uint64_t vtxs_opackets; /* if_opackets */ 1058f3600b1SBryan Venteicher uint64_t vtxs_obytes; /* if_obytes */ 1068f3600b1SBryan Venteicher uint64_t vtxs_omcasts; /* if_omcasts */ 1078f3600b1SBryan Venteicher uint64_t vtxs_csum; 1088f3600b1SBryan Venteicher uint64_t vtxs_tso; 1098f3600b1SBryan Venteicher uint64_t vtxs_rescheduled; 1108f3600b1SBryan Venteicher }; 1118f3600b1SBryan Venteicher 1128f3600b1SBryan Venteicher struct vtnet_txq { 1138f3600b1SBryan Venteicher struct mtx vtntx_mtx; 1148f3600b1SBryan Venteicher struct vtnet_softc *vtntx_sc; 1158f3600b1SBryan Venteicher struct virtqueue *vtntx_vq; 116443c3d0bSBryan Venteicher struct sglist *vtntx_sg; 1178f3600b1SBryan Venteicher #ifndef VTNET_LEGACY_TX 1188f3600b1SBryan Venteicher struct buf_ring *vtntx_br; 1198f3600b1SBryan Venteicher #endif 1208f3600b1SBryan Venteicher int vtntx_id; 1218f3600b1SBryan Venteicher int vtntx_watchdog; 122baa5234fSBryan Venteicher int vtntx_intr_threshold; 1238f3600b1SBryan Venteicher struct vtnet_txq_stats vtntx_stats; 1248f3600b1SBryan Venteicher struct taskqueue *vtntx_tq; 1258f3600b1SBryan Venteicher struct task vtntx_intrtask; 1268f3600b1SBryan Venteicher #ifndef VTNET_LEGACY_TX 1278f3600b1SBryan Venteicher struct task vtntx_defrtask; 1288f3600b1SBryan Venteicher #endif 1292e42b74aSVincenzo Maffione #ifdef DEV_NETMAP 1302e42b74aSVincenzo Maffione struct virtio_net_hdr_mrg_rxbuf vtntx_shrhdr; 1312e42b74aSVincenzo Maffione #endif /* DEV_NETMAP */ 1328f3600b1SBryan Venteicher char vtntx_name[16]; 1338f3600b1SBryan Venteicher } __aligned(CACHE_LINE_SIZE); 1348f3600b1SBryan Venteicher 1358f3600b1SBryan Venteicher #define VTNET_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vtntx_mtx) 1368f3600b1SBryan Venteicher #define VTNET_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vtntx_mtx) 1378f3600b1SBryan Venteicher #define VTNET_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vtntx_mtx) 1388f3600b1SBryan Venteicher #define VTNET_TXQ_LOCK_ASSERT(_txq) \ 1398f3600b1SBryan Venteicher mtx_assert(&(_txq)->vtntx_mtx, MA_OWNED) 1408f3600b1SBryan Venteicher #define VTNET_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \ 1418f3600b1SBryan Venteicher mtx_assert(&(_txq)->vtntx_mtx, MA_NOTOWNED) 1428f3600b1SBryan Venteicher 14310b59a9bSPeter Grehan struct vtnet_softc { 14410b59a9bSPeter Grehan device_t vtnet_dev; 145*4ee96792SJustin Hibbits if_t vtnet_ifp; 1468f3600b1SBryan Venteicher struct vtnet_rxq *vtnet_rxqs; 1478f3600b1SBryan Venteicher struct vtnet_txq *vtnet_txqs; 148ed6cbf48SGleb Smirnoff pfil_head_t vtnet_pfil; 14974cd316aSBryan Venteicher uint64_t vtnet_features; 15010b59a9bSPeter Grehan 15110b59a9bSPeter Grehan uint32_t vtnet_flags; 1525e220811SBryan Venteicher #define VTNET_FLAG_MODERN 0x0001 1538f3600b1SBryan Venteicher #define VTNET_FLAG_MAC 0x0002 15410b59a9bSPeter Grehan #define VTNET_FLAG_CTRL_VQ 0x0004 15510b59a9bSPeter Grehan #define VTNET_FLAG_CTRL_RX 0x0008 1568f3600b1SBryan Venteicher #define VTNET_FLAG_CTRL_MAC 0x0010 1578f3600b1SBryan Venteicher #define VTNET_FLAG_VLAN_FILTER 0x0020 1588f3600b1SBryan Venteicher #define VTNET_FLAG_TSO_ECN 0x0040 1598f3600b1SBryan Venteicher #define VTNET_FLAG_MRG_RXBUFS 0x0080 1608f3600b1SBryan Venteicher #define VTNET_FLAG_LRO_NOMRG 0x0100 1615e220811SBryan Venteicher #define VTNET_FLAG_MQ 0x0200 162ab4c2818SBryan Venteicher #define VTNET_FLAG_INDIRECT 0x0400 163ab4c2818SBryan Venteicher #define VTNET_FLAG_EVENT_IDX 0x0800 1645e220811SBryan Venteicher #define VTNET_FLAG_SUSPENDED 0x1000 165fa7ca1e3SBryan Venteicher #define VTNET_FLAG_FIXUP_NEEDS_CSUM 0x2000 16642343a63SBryan Venteicher #define VTNET_FLAG_SW_LRO 0x4000 16710b59a9bSPeter Grehan 168c1b554c8SAlex Richardson u_int vtnet_hdr_size; 1698f3600b1SBryan Venteicher int vtnet_rx_nmbufs; 1705e220811SBryan Venteicher int vtnet_rx_clustersz; 17174cd316aSBryan Venteicher int vtnet_rx_nsegs; 17274cd316aSBryan Venteicher int vtnet_rx_process_limit; 17374cd316aSBryan Venteicher int vtnet_link_active; 1748f3600b1SBryan Venteicher int vtnet_act_vq_pairs; 175b470419eSBryan Venteicher int vtnet_req_vq_pairs; 1768f3600b1SBryan Venteicher int vtnet_max_vq_pairs; 17774cd316aSBryan Venteicher int vtnet_tx_nsegs; 17874cd316aSBryan Venteicher int vtnet_if_flags; 179c1b554c8SAlex Richardson u_int vtnet_max_mtu; 18042343a63SBryan Venteicher int vtnet_lro_entry_count; 18142343a63SBryan Venteicher int vtnet_lro_mbufq_depth; 1828f3600b1SBryan Venteicher 1838f3600b1SBryan Venteicher struct virtqueue *vtnet_ctrl_vq; 1848f3600b1SBryan Venteicher struct vtnet_mac_filter *vtnet_mac_filter; 1858f3600b1SBryan Venteicher uint32_t *vtnet_vlan_filter; 1868f3600b1SBryan Venteicher 18744559b26SBryan Venteicher uint64_t vtnet_negotiated_features; 18810b59a9bSPeter Grehan struct vtnet_statistics vtnet_stats; 18910b59a9bSPeter Grehan struct callout vtnet_tick_ch; 1908f3600b1SBryan Venteicher struct ifmedia vtnet_media; 19110b59a9bSPeter Grehan eventhandler_tag vtnet_vlan_attach; 19210b59a9bSPeter Grehan eventhandler_tag vtnet_vlan_detach; 19310b59a9bSPeter Grehan 1948f3600b1SBryan Venteicher struct mtx vtnet_mtx; 19510b59a9bSPeter Grehan char vtnet_mtx_name[16]; 196c1b554c8SAlex Richardson uint8_t vtnet_hwaddr[ETHER_ADDR_LEN]; 19710b59a9bSPeter Grehan }; 19810b59a9bSPeter Grehan 1995e220811SBryan Venteicher static bool 2005e220811SBryan Venteicher vtnet_modern(struct vtnet_softc *sc) 2015e220811SBryan Venteicher { 2025e220811SBryan Venteicher return ((sc->vtnet_flags & VTNET_FLAG_MODERN) != 0); 2035e220811SBryan Venteicher } 2045e220811SBryan Venteicher 20542343a63SBryan Venteicher static bool 20642343a63SBryan Venteicher vtnet_software_lro(struct vtnet_softc *sc) 20742343a63SBryan Venteicher { 20842343a63SBryan Venteicher return ((sc->vtnet_flags & VTNET_FLAG_SW_LRO) != 0); 20942343a63SBryan Venteicher } 21042343a63SBryan Venteicher 21110b59a9bSPeter Grehan /* 2128f3600b1SBryan Venteicher * Maximum number of queue pairs we will autoconfigure to. 2138f3600b1SBryan Venteicher */ 214fa7ca1e3SBryan Venteicher #define VTNET_MAX_QUEUE_PAIRS 32 2158f3600b1SBryan Venteicher 2168f3600b1SBryan Venteicher /* 2178f3600b1SBryan Venteicher * Additional completed entries can appear in a virtqueue before we can 2188f3600b1SBryan Venteicher * reenable interrupts. Number of times to retry before scheduling the 2198f3600b1SBryan Venteicher * taskqueue to process the completed entries. 2208f3600b1SBryan Venteicher */ 2218f3600b1SBryan Venteicher #define VTNET_INTR_DISABLE_RETRIES 4 2228f3600b1SBryan Venteicher 2238f3600b1SBryan Venteicher /* 22432487a89SBryan Venteicher * Similarly, additional completed entries can appear in a virtqueue 22532487a89SBryan Venteicher * between when lasted checked and before notifying the host. Number 22632487a89SBryan Venteicher * of times to retry before scheduling the taskqueue to process the 22732487a89SBryan Venteicher * queue. 22832487a89SBryan Venteicher */ 22932487a89SBryan Venteicher #define VTNET_NOTIFY_RETRIES 4 23032487a89SBryan Venteicher 23132487a89SBryan Venteicher /* 2328f3600b1SBryan Venteicher * Number of words to allocate for the VLAN shadow table. There is one 2338f3600b1SBryan Venteicher * bit for each VLAN. 2348f3600b1SBryan Venteicher */ 2358f3600b1SBryan Venteicher #define VTNET_VLAN_FILTER_NWORDS (4096 / 32) 2368f3600b1SBryan Venteicher 2375e220811SBryan Venteicher /* We depend on these being the same size (and same layout). */ 2385e220811SBryan Venteicher CTASSERT(sizeof(struct virtio_net_hdr_mrg_rxbuf) == 2395e220811SBryan Venteicher sizeof(struct virtio_net_hdr_v1)); 2405e220811SBryan Venteicher 2418f3600b1SBryan Venteicher /* 2425e220811SBryan Venteicher * In legacy VirtIO when mergeable buffers are not negotiated, this structure 2435e220811SBryan Venteicher * is placed at the beginning of the mbuf data. Use 4 bytes of pad to keep 2445e220811SBryan Venteicher * both the VirtIO header and the data non-contiguous and the frame's payload 2455e220811SBryan Venteicher * 4 byte aligned. Note this padding would not be necessary if the 2465e220811SBryan Venteicher * VIRTIO_F_ANY_LAYOUT feature was negotiated (but we don't support that yet). 24710b59a9bSPeter Grehan * 2485e220811SBryan Venteicher * In modern VirtIO or when mergeable buffers are negotiated, the host puts 2495e220811SBryan Venteicher * the VirtIO header in the beginning of the first mbuf's data. 25010b59a9bSPeter Grehan */ 25110b59a9bSPeter Grehan #define VTNET_RX_HEADER_PAD 4 25210b59a9bSPeter Grehan struct vtnet_rx_header { 2531cd1ed3fSBryan Venteicher struct virtio_net_hdr vrh_hdr; 25410b59a9bSPeter Grehan char vrh_pad[VTNET_RX_HEADER_PAD]; 25510b59a9bSPeter Grehan } __packed; 25610b59a9bSPeter Grehan 25710b59a9bSPeter Grehan /* 25810b59a9bSPeter Grehan * For each outgoing frame, the vtnet_tx_header below is allocated from 25910b59a9bSPeter Grehan * the vtnet_tx_header_zone. 26010b59a9bSPeter Grehan */ 26110b59a9bSPeter Grehan struct vtnet_tx_header { 26210b59a9bSPeter Grehan union { 26310b59a9bSPeter Grehan struct virtio_net_hdr hdr; 26410b59a9bSPeter Grehan struct virtio_net_hdr_mrg_rxbuf mhdr; 2655e220811SBryan Venteicher struct virtio_net_hdr_v1 v1hdr; 26610b59a9bSPeter Grehan } vth_uhdr; 26710b59a9bSPeter Grehan 26810b59a9bSPeter Grehan struct mbuf *vth_mbuf; 26910b59a9bSPeter Grehan }; 27010b59a9bSPeter Grehan 27110b59a9bSPeter Grehan /* 27210b59a9bSPeter Grehan * The VirtIO specification does not place a limit on the number of MAC 27310b59a9bSPeter Grehan * addresses the guest driver may request to be filtered. In practice, 27410b59a9bSPeter Grehan * the host is constrained by available resources. To simplify this driver, 27510b59a9bSPeter Grehan * impose a reasonably high limit of MAC addresses we will filter before 27610b59a9bSPeter Grehan * falling back to promiscuous or all-multicast modes. 27710b59a9bSPeter Grehan */ 27810b59a9bSPeter Grehan #define VTNET_MAX_MAC_ENTRIES 128 27910b59a9bSPeter Grehan 2805e220811SBryan Venteicher /* 2815e220811SBryan Venteicher * The driver version of struct virtio_net_ctrl_mac but with our predefined 2825e220811SBryan Venteicher * number of MAC addresses allocated. This structure is shared with the host, 2835e220811SBryan Venteicher * so nentries field is in the correct VirtIO endianness. 2845e220811SBryan Venteicher */ 28510b59a9bSPeter Grehan struct vtnet_mac_table { 28610b59a9bSPeter Grehan uint32_t nentries; 28710b59a9bSPeter Grehan uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN]; 28810b59a9bSPeter Grehan } __packed; 28910b59a9bSPeter Grehan 29010b59a9bSPeter Grehan struct vtnet_mac_filter { 29110b59a9bSPeter Grehan struct vtnet_mac_table vmf_unicast; 29210b59a9bSPeter Grehan uint32_t vmf_pad; /* Make tables non-contiguous. */ 29310b59a9bSPeter Grehan struct vtnet_mac_table vmf_multicast; 29410b59a9bSPeter Grehan }; 29510b59a9bSPeter Grehan 29610b59a9bSPeter Grehan /* 29710b59a9bSPeter Grehan * The MAC filter table is malloc(9)'d when needed. Ensure it will 29810b59a9bSPeter Grehan * always fit in one segment. 29910b59a9bSPeter Grehan */ 30010b59a9bSPeter Grehan CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE); 30110b59a9bSPeter Grehan 3028f3600b1SBryan Venteicher #define VTNET_TX_TIMEOUT 5 303132ea9f2SMichael Tuexen #define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP) 304132ea9f2SMichael Tuexen #define VTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) 3058f3600b1SBryan Venteicher 3068f3600b1SBryan Venteicher #define VTNET_CSUM_ALL_OFFLOAD \ 3078f3600b1SBryan Venteicher (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO) 30810b59a9bSPeter Grehan 3095e220811SBryan Venteicher #define VTNET_COMMON_FEATURES \ 31010b59a9bSPeter Grehan (VIRTIO_NET_F_MAC | \ 31110b59a9bSPeter Grehan VIRTIO_NET_F_STATUS | \ 312e36a6b1bSBryan Venteicher VIRTIO_NET_F_CTRL_GUEST_OFFLOADS | \ 313aabdf5b6SBryan Venteicher VIRTIO_NET_F_MTU | \ 31410b59a9bSPeter Grehan VIRTIO_NET_F_CTRL_VQ | \ 31510b59a9bSPeter Grehan VIRTIO_NET_F_CTRL_RX | \ 3168f3600b1SBryan Venteicher VIRTIO_NET_F_CTRL_MAC_ADDR | \ 31710b59a9bSPeter Grehan VIRTIO_NET_F_CTRL_VLAN | \ 31810b59a9bSPeter Grehan VIRTIO_NET_F_CSUM | \ 31910b59a9bSPeter Grehan VIRTIO_NET_F_HOST_TSO4 | \ 32010b59a9bSPeter Grehan VIRTIO_NET_F_HOST_TSO6 | \ 32110b59a9bSPeter Grehan VIRTIO_NET_F_HOST_ECN | \ 32210b59a9bSPeter Grehan VIRTIO_NET_F_GUEST_CSUM | \ 32310b59a9bSPeter Grehan VIRTIO_NET_F_GUEST_TSO4 | \ 32410b59a9bSPeter Grehan VIRTIO_NET_F_GUEST_TSO6 | \ 32510b59a9bSPeter Grehan VIRTIO_NET_F_GUEST_ECN | \ 32610b59a9bSPeter Grehan VIRTIO_NET_F_MRG_RXBUF | \ 3278f3600b1SBryan Venteicher VIRTIO_NET_F_MQ | \ 3286a733393SBryan Venteicher VIRTIO_NET_F_SPEED_DUPLEX | \ 3298f3600b1SBryan Venteicher VIRTIO_RING_F_EVENT_IDX | \ 3301cd1ed3fSBryan Venteicher VIRTIO_RING_F_INDIRECT_DESC) 33110b59a9bSPeter Grehan 3325e220811SBryan Venteicher #define VTNET_MODERN_FEATURES (VTNET_COMMON_FEATURES) 3335e220811SBryan Venteicher #define VTNET_LEGACY_FEATURES (VTNET_COMMON_FEATURES | VIRTIO_NET_F_GSO) 3345e220811SBryan Venteicher 33510b59a9bSPeter Grehan /* 3368f3600b1SBryan Venteicher * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host 3378f3600b1SBryan Venteicher * frames larger than 1514 bytes. 3388f3600b1SBryan Venteicher */ 3398f3600b1SBryan Venteicher #define VTNET_TSO_FEATURES (VIRTIO_NET_F_GSO | VIRTIO_NET_F_HOST_TSO4 | \ 3408f3600b1SBryan Venteicher VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN) 3418f3600b1SBryan Venteicher 3428f3600b1SBryan Venteicher /* 34310b59a9bSPeter Grehan * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us 34442343a63SBryan Venteicher * frames larger than 1514 bytes. 34510b59a9bSPeter Grehan */ 34610b59a9bSPeter Grehan #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \ 34710b59a9bSPeter Grehan VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN) 34810b59a9bSPeter Grehan 349c3187190SBryan Venteicher #define VTNET_MIN_MTU 68 35010b59a9bSPeter Grehan #define VTNET_MAX_MTU 65536 35110b59a9bSPeter Grehan #define VTNET_MAX_RX_SIZE 65550 35210b59a9bSPeter Grehan 35310b59a9bSPeter Grehan /* 354fa7ca1e3SBryan Venteicher * Used to preallocate the VQ indirect descriptors. Modern and mergeable 355fa7ca1e3SBryan Venteicher * buffers do not required one segment for the VirtIO header since it is 356fa7ca1e3SBryan Venteicher * placed inline at the beginning of the receive buffer. 35710b59a9bSPeter Grehan */ 358fa7ca1e3SBryan Venteicher #define VTNET_RX_SEGS_HDR_INLINE 1 359fa7ca1e3SBryan Venteicher #define VTNET_RX_SEGS_HDR_SEPARATE 2 360fa7ca1e3SBryan Venteicher #define VTNET_RX_SEGS_LRO_NOMRG 34 361fa7ca1e3SBryan Venteicher #define VTNET_TX_SEGS_MIN 32 362fa7ca1e3SBryan Venteicher #define VTNET_TX_SEGS_MAX 64 36310b59a9bSPeter Grehan 364fa7ca1e3SBryan Venteicher CTASSERT(((VTNET_RX_SEGS_LRO_NOMRG - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE); 365fa7ca1e3SBryan Venteicher CTASSERT(((VTNET_TX_SEGS_MAX - 1) * MCLBYTES) >= VTNET_MAX_MTU); 36610b59a9bSPeter Grehan 36710b59a9bSPeter Grehan /* 3688f3600b1SBryan Venteicher * Number of slots in the Tx bufrings. This value matches most other 3698f3600b1SBryan Venteicher * multiqueue drivers. 3708f3600b1SBryan Venteicher */ 3718f3600b1SBryan Venteicher #define VTNET_DEFAULT_BUFRING_SIZE 4096 3728f3600b1SBryan Venteicher 3738f3600b1SBryan Venteicher #define VTNET_CORE_MTX(_sc) &(_sc)->vtnet_mtx 3748f3600b1SBryan Venteicher #define VTNET_CORE_LOCK(_sc) mtx_lock(VTNET_CORE_MTX((_sc))) 3758f3600b1SBryan Venteicher #define VTNET_CORE_UNLOCK(_sc) mtx_unlock(VTNET_CORE_MTX((_sc))) 3768f3600b1SBryan Venteicher #define VTNET_CORE_LOCK_DESTROY(_sc) mtx_destroy(VTNET_CORE_MTX((_sc))) 3778f3600b1SBryan Venteicher #define VTNET_CORE_LOCK_ASSERT(_sc) \ 3788f3600b1SBryan Venteicher mtx_assert(VTNET_CORE_MTX((_sc)), MA_OWNED) 3798f3600b1SBryan Venteicher #define VTNET_CORE_LOCK_ASSERT_NOTOWNED(_sc) \ 3808f3600b1SBryan Venteicher mtx_assert(VTNET_CORE_MTX((_sc)), MA_NOTOWNED) 38110b59a9bSPeter Grehan 3828f3600b1SBryan Venteicher #define VTNET_CORE_LOCK_INIT(_sc) do { \ 38310b59a9bSPeter Grehan snprintf((_sc)->vtnet_mtx_name, sizeof((_sc)->vtnet_mtx_name), \ 38410b59a9bSPeter Grehan "%s", device_get_nameunit((_sc)->vtnet_dev)); \ 3858f3600b1SBryan Venteicher mtx_init(VTNET_CORE_MTX((_sc)), (_sc)->vtnet_mtx_name, \ 38610b59a9bSPeter Grehan "VTNET Core Lock", MTX_DEF); \ 38710b59a9bSPeter Grehan } while (0) 38810b59a9bSPeter Grehan 38916f224b5SVincenzo Maffione /* 39016f224b5SVincenzo Maffione * Values for the init_mode argument of vtnet_init_locked(). 39116f224b5SVincenzo Maffione */ 39216f224b5SVincenzo Maffione #define VTNET_INIT_NETMAP_ENTER 1 39316f224b5SVincenzo Maffione #define VTNET_INIT_NETMAP_EXIT 2 39416f224b5SVincenzo Maffione 39510b59a9bSPeter Grehan #endif /* _IF_VTNETVAR_H */ 396