1 /*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _IF_VTNETVAR_H 30 #define _IF_VTNETVAR_H 31 32 struct vtnet_softc; 33 34 struct vtnet_statistics { 35 uint64_t mbuf_alloc_failed; 36 37 uint64_t rx_frame_too_large; 38 uint64_t rx_enq_replacement_failed; 39 uint64_t rx_mergeable_failed; 40 uint64_t rx_csum_bad_ethtype; 41 uint64_t rx_csum_bad_ipproto; 42 uint64_t rx_csum_bad_offset; 43 uint64_t rx_csum_bad_proto; 44 uint64_t tx_csum_bad_ethtype; 45 uint64_t tx_tso_bad_ethtype; 46 uint64_t tx_tso_not_tcp; 47 uint64_t tx_defragged; 48 uint64_t tx_defrag_failed; 49 50 /* 51 * These are accumulated from each Rx/Tx queue. 52 */ 53 uint64_t rx_csum_failed; 54 uint64_t rx_csum_offloaded; 55 uint64_t rx_task_rescheduled; 56 uint64_t tx_csum_offloaded; 57 uint64_t tx_tso_offloaded; 58 uint64_t tx_task_rescheduled; 59 }; 60 61 struct vtnet_rxq_stats { 62 uint64_t vrxs_ipackets; /* if_ipackets */ 63 uint64_t vrxs_ibytes; /* if_ibytes */ 64 uint64_t vrxs_iqdrops; /* if_iqdrops */ 65 uint64_t vrxs_ierrors; /* if_ierrors */ 66 uint64_t vrxs_csum; 67 uint64_t vrxs_csum_failed; 68 uint64_t vrxs_rescheduled; 69 }; 70 71 struct vtnet_rxq { 72 struct mtx vtnrx_mtx; 73 struct vtnet_softc *vtnrx_sc; 74 struct virtqueue *vtnrx_vq; 75 struct sglist *vtnrx_sg; 76 int vtnrx_id; 77 int vtnrx_process_limit; 78 struct vtnet_rxq_stats vtnrx_stats; 79 struct taskqueue *vtnrx_tq; 80 struct task vtnrx_intrtask; 81 char vtnrx_name[16]; 82 } __aligned(CACHE_LINE_SIZE); 83 84 #define VTNET_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vtnrx_mtx) 85 #define VTNET_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vtnrx_mtx) 86 #define VTNET_RXQ_LOCK_ASSERT(_rxq) \ 87 mtx_assert(&(_rxq)->vtnrx_mtx, MA_OWNED) 88 #define VTNET_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \ 89 mtx_assert(&(_rxq)->vtnrx_mtx, MA_NOTOWNED) 90 91 struct vtnet_txq_stats { 92 uint64_t vtxs_opackets; /* if_opackets */ 93 uint64_t vtxs_obytes; /* if_obytes */ 94 uint64_t vtxs_omcasts; /* if_omcasts */ 95 uint64_t vtxs_csum; 96 uint64_t vtxs_tso; 97 uint64_t vtxs_rescheduled; 98 }; 99 100 struct vtnet_txq { 101 struct mtx vtntx_mtx; 102 struct vtnet_softc *vtntx_sc; 103 struct virtqueue *vtntx_vq; 104 struct sglist *vtntx_sg; 105 #ifndef VTNET_LEGACY_TX 106 struct buf_ring *vtntx_br; 107 #endif 108 int vtntx_id; 109 int vtntx_watchdog; 110 struct vtnet_txq_stats vtntx_stats; 111 struct taskqueue *vtntx_tq; 112 struct task vtntx_intrtask; 113 #ifndef VTNET_LEGACY_TX 114 struct task vtntx_defrtask; 115 #endif 116 char vtntx_name[16]; 117 } __aligned(CACHE_LINE_SIZE); 118 119 #define VTNET_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vtntx_mtx) 120 #define VTNET_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vtntx_mtx) 121 #define VTNET_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vtntx_mtx) 122 #define VTNET_TXQ_LOCK_ASSERT(_txq) \ 123 mtx_assert(&(_txq)->vtntx_mtx, MA_OWNED) 124 #define VTNET_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \ 125 mtx_assert(&(_txq)->vtntx_mtx, MA_NOTOWNED) 126 127 struct vtnet_softc { 128 device_t vtnet_dev; 129 struct ifnet *vtnet_ifp; 130 struct vtnet_rxq *vtnet_rxqs; 131 struct vtnet_txq *vtnet_txqs; 132 133 uint32_t vtnet_flags; 134 #define VTNET_FLAG_SUSPENDED 0x0001 135 #define VTNET_FLAG_MAC 0x0002 136 #define VTNET_FLAG_CTRL_VQ 0x0004 137 #define VTNET_FLAG_CTRL_RX 0x0008 138 #define VTNET_FLAG_CTRL_MAC 0x0010 139 #define VTNET_FLAG_VLAN_FILTER 0x0020 140 #define VTNET_FLAG_TSO_ECN 0x0040 141 #define VTNET_FLAG_MRG_RXBUFS 0x0080 142 #define VTNET_FLAG_LRO_NOMRG 0x0100 143 #define VTNET_FLAG_MULTIQ 0x0200 144 #define VTNET_FLAG_EVENT_IDX 0x0400 145 146 int vtnet_link_active; 147 int vtnet_hdr_size; 148 int vtnet_rx_process_limit; 149 int vtnet_rx_nsegs; 150 int vtnet_rx_nmbufs; 151 int vtnet_rx_clsize; 152 int vtnet_rx_new_clsize; 153 int vtnet_tx_nsegs; 154 int vtnet_if_flags; 155 int vtnet_act_vq_pairs; 156 int vtnet_max_vq_pairs; 157 158 struct virtqueue *vtnet_ctrl_vq; 159 struct vtnet_mac_filter *vtnet_mac_filter; 160 uint32_t *vtnet_vlan_filter; 161 162 uint64_t vtnet_features; 163 struct vtnet_statistics vtnet_stats; 164 struct callout vtnet_tick_ch; 165 struct ifmedia vtnet_media; 166 eventhandler_tag vtnet_vlan_attach; 167 eventhandler_tag vtnet_vlan_detach; 168 169 struct mtx vtnet_mtx; 170 char vtnet_mtx_name[16]; 171 char vtnet_hwaddr[ETHER_ADDR_LEN]; 172 }; 173 174 /* 175 * Maximum number of queue pairs we will autoconfigure to. 176 */ 177 #define VTNET_MAX_QUEUE_PAIRS 8 178 179 /* 180 * Additional completed entries can appear in a virtqueue before we can 181 * reenable interrupts. Number of times to retry before scheduling the 182 * taskqueue to process the completed entries. 183 */ 184 #define VTNET_INTR_DISABLE_RETRIES 4 185 186 /* 187 * Fake the media type. The host does not provide us with any real media 188 * information. 189 */ 190 #define VTNET_MEDIATYPE (IFM_ETHER | IFM_10G_T | IFM_FDX) 191 192 /* 193 * Number of words to allocate for the VLAN shadow table. There is one 194 * bit for each VLAN. 195 */ 196 #define VTNET_VLAN_FILTER_NWORDS (4096 / 32) 197 198 /* 199 * When mergeable buffers are not negotiated, the vtnet_rx_header structure 200 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to 201 * both keep the VirtIO header and the data non-contiguous and to keep the 202 * frame's payload 4 byte aligned. 203 * 204 * When mergeable buffers are negotiated, the host puts the VirtIO header in 205 * the beginning of the first mbuf's data. 206 */ 207 #define VTNET_RX_HEADER_PAD 4 208 struct vtnet_rx_header { 209 struct virtio_net_hdr vrh_hdr; 210 char vrh_pad[VTNET_RX_HEADER_PAD]; 211 } __packed; 212 213 /* 214 * For each outgoing frame, the vtnet_tx_header below is allocated from 215 * the vtnet_tx_header_zone. 216 */ 217 struct vtnet_tx_header { 218 union { 219 struct virtio_net_hdr hdr; 220 struct virtio_net_hdr_mrg_rxbuf mhdr; 221 } vth_uhdr; 222 223 struct mbuf *vth_mbuf; 224 }; 225 226 /* 227 * The VirtIO specification does not place a limit on the number of MAC 228 * addresses the guest driver may request to be filtered. In practice, 229 * the host is constrained by available resources. To simplify this driver, 230 * impose a reasonably high limit of MAC addresses we will filter before 231 * falling back to promiscuous or all-multicast modes. 232 */ 233 #define VTNET_MAX_MAC_ENTRIES 128 234 235 struct vtnet_mac_table { 236 uint32_t nentries; 237 uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN]; 238 } __packed; 239 240 struct vtnet_mac_filter { 241 struct vtnet_mac_table vmf_unicast; 242 uint32_t vmf_pad; /* Make tables non-contiguous. */ 243 struct vtnet_mac_table vmf_multicast; 244 }; 245 246 /* 247 * The MAC filter table is malloc(9)'d when needed. Ensure it will 248 * always fit in one segment. 249 */ 250 CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE); 251 252 #define VTNET_TX_TIMEOUT 5 253 #define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) 254 #define VTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6) 255 256 #define VTNET_CSUM_ALL_OFFLOAD \ 257 (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO) 258 259 /* Features desired/implemented by this driver. */ 260 #define VTNET_FEATURES \ 261 (VIRTIO_NET_F_MAC | \ 262 VIRTIO_NET_F_STATUS | \ 263 VIRTIO_NET_F_CTRL_VQ | \ 264 VIRTIO_NET_F_CTRL_RX | \ 265 VIRTIO_NET_F_CTRL_MAC_ADDR | \ 266 VIRTIO_NET_F_CTRL_VLAN | \ 267 VIRTIO_NET_F_CSUM | \ 268 VIRTIO_NET_F_GSO | \ 269 VIRTIO_NET_F_HOST_TSO4 | \ 270 VIRTIO_NET_F_HOST_TSO6 | \ 271 VIRTIO_NET_F_HOST_ECN | \ 272 VIRTIO_NET_F_GUEST_CSUM | \ 273 VIRTIO_NET_F_GUEST_TSO4 | \ 274 VIRTIO_NET_F_GUEST_TSO6 | \ 275 VIRTIO_NET_F_GUEST_ECN | \ 276 VIRTIO_NET_F_MRG_RXBUF | \ 277 VIRTIO_NET_F_MQ | \ 278 VIRTIO_RING_F_EVENT_IDX | \ 279 VIRTIO_RING_F_INDIRECT_DESC) 280 281 /* 282 * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host 283 * frames larger than 1514 bytes. 284 */ 285 #define VTNET_TSO_FEATURES (VIRTIO_NET_F_GSO | VIRTIO_NET_F_HOST_TSO4 | \ 286 VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN) 287 288 /* 289 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us 290 * frames larger than 1514 bytes. We do not yet support software LRO 291 * via tcp_lro_rx(). 292 */ 293 #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \ 294 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN) 295 296 #define VTNET_MAX_MTU 65536 297 #define VTNET_MAX_RX_SIZE 65550 298 299 /* 300 * Used to preallocate the Vq indirect descriptors. The first segment 301 * is reserved for the header, except for mergeable buffers since the 302 * header is placed inline with the data. 303 */ 304 #define VTNET_MRG_RX_SEGS 1 305 #define VTNET_MIN_RX_SEGS 2 306 #define VTNET_MAX_RX_SEGS 34 307 #define VTNET_MIN_TX_SEGS 4 308 #define VTNET_MAX_TX_SEGS 64 309 310 /* 311 * Assert we can receive and transmit the maximum with regular 312 * size clusters. 313 */ 314 CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE); 315 CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU); 316 317 /* 318 * Number of slots in the Tx bufrings. This value matches most other 319 * multiqueue drivers. 320 */ 321 #define VTNET_DEFAULT_BUFRING_SIZE 4096 322 323 /* 324 * Determine how many mbufs are in each receive buffer. For LRO without 325 * mergeable buffers, we must allocate an mbuf chain large enough to 326 * hold both the vtnet_rx_header and the maximum receivable data. 327 */ 328 #define VTNET_NEEDED_RX_MBUFS(_sc, _clsize) \ 329 ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \ 330 howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \ 331 (_clsize)) 332 333 #define VTNET_CORE_MTX(_sc) &(_sc)->vtnet_mtx 334 #define VTNET_CORE_LOCK(_sc) mtx_lock(VTNET_CORE_MTX((_sc))) 335 #define VTNET_CORE_UNLOCK(_sc) mtx_unlock(VTNET_CORE_MTX((_sc))) 336 #define VTNET_CORE_LOCK_DESTROY(_sc) mtx_destroy(VTNET_CORE_MTX((_sc))) 337 #define VTNET_CORE_LOCK_ASSERT(_sc) \ 338 mtx_assert(VTNET_CORE_MTX((_sc)), MA_OWNED) 339 #define VTNET_CORE_LOCK_ASSERT_NOTOWNED(_sc) \ 340 mtx_assert(VTNET_CORE_MTX((_sc)), MA_NOTOWNED) 341 342 #define VTNET_CORE_LOCK_INIT(_sc) do { \ 343 snprintf((_sc)->vtnet_mtx_name, sizeof((_sc)->vtnet_mtx_name), \ 344 "%s", device_get_nameunit((_sc)->vtnet_dev)); \ 345 mtx_init(VTNET_CORE_MTX((_sc)), (_sc)->vtnet_mtx_name, \ 346 "VTNET Core Lock", MTX_DEF); \ 347 } while (0) 348 349 #endif /* _IF_VTNETVAR_H */ 350