110b59a9bSPeter Grehan /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
3718cf2ccSPedro F. Giffuni *
4abd6790cSBryan Venteicher * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
510b59a9bSPeter Grehan * All rights reserved.
610b59a9bSPeter Grehan *
710b59a9bSPeter Grehan * Redistribution and use in source and binary forms, with or without
810b59a9bSPeter Grehan * modification, are permitted provided that the following conditions
910b59a9bSPeter Grehan * are met:
1010b59a9bSPeter Grehan * 1. Redistributions of source code must retain the above copyright
1110b59a9bSPeter Grehan * notice unmodified, this list of conditions, and the following
1210b59a9bSPeter Grehan * disclaimer.
1310b59a9bSPeter Grehan * 2. Redistributions in binary form must reproduce the above copyright
1410b59a9bSPeter Grehan * notice, this list of conditions and the following disclaimer in the
1510b59a9bSPeter Grehan * documentation and/or other materials provided with the distribution.
1610b59a9bSPeter Grehan *
1710b59a9bSPeter Grehan * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1810b59a9bSPeter Grehan * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1910b59a9bSPeter Grehan * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2010b59a9bSPeter Grehan * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2110b59a9bSPeter Grehan * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2210b59a9bSPeter Grehan * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2310b59a9bSPeter Grehan * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2410b59a9bSPeter Grehan * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2510b59a9bSPeter Grehan * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2610b59a9bSPeter Grehan * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2710b59a9bSPeter Grehan */
2810b59a9bSPeter Grehan
2910b59a9bSPeter Grehan /* Driver for VirtIO network devices. */
3010b59a9bSPeter Grehan
3110b59a9bSPeter Grehan #include <sys/param.h>
32c3322cb9SGleb Smirnoff #include <sys/eventhandler.h>
3310b59a9bSPeter Grehan #include <sys/systm.h>
3410b59a9bSPeter Grehan #include <sys/kernel.h>
3510b59a9bSPeter Grehan #include <sys/sockio.h>
3610b59a9bSPeter Grehan #include <sys/malloc.h>
37710c0556SMark Johnston #include <sys/mbuf.h>
3810b59a9bSPeter Grehan #include <sys/module.h>
39710c0556SMark Johnston #include <sys/msan.h>
4010b59a9bSPeter Grehan #include <sys/socket.h>
4110b59a9bSPeter Grehan #include <sys/sysctl.h>
4210b59a9bSPeter Grehan #include <sys/random.h>
4310b59a9bSPeter Grehan #include <sys/sglist.h>
4410b59a9bSPeter Grehan #include <sys/lock.h>
4510b59a9bSPeter Grehan #include <sys/mutex.h>
468f3600b1SBryan Venteicher #include <sys/taskqueue.h>
478f3600b1SBryan Venteicher #include <sys/smp.h>
488f3600b1SBryan Venteicher #include <machine/smp.h>
4910b59a9bSPeter Grehan
5010b59a9bSPeter Grehan #include <vm/uma.h>
5110b59a9bSPeter Grehan
527790c8c1SConrad Meyer #include <net/debugnet.h>
5310b59a9bSPeter Grehan #include <net/ethernet.h>
54ed6cbf48SGleb Smirnoff #include <net/pfil.h>
5510b59a9bSPeter Grehan #include <net/if.h>
5676039bc8SGleb Smirnoff #include <net/if_var.h>
5710b59a9bSPeter Grehan #include <net/if_arp.h>
5810b59a9bSPeter Grehan #include <net/if_dl.h>
5910b59a9bSPeter Grehan #include <net/if_types.h>
6010b59a9bSPeter Grehan #include <net/if_media.h>
6110b59a9bSPeter Grehan #include <net/if_vlan_var.h>
6210b59a9bSPeter Grehan
6310b59a9bSPeter Grehan #include <net/bpf.h>
6410b59a9bSPeter Grehan
6510b59a9bSPeter Grehan #include <netinet/in_systm.h>
6610b59a9bSPeter Grehan #include <netinet/in.h>
6710b59a9bSPeter Grehan #include <netinet/ip.h>
6810b59a9bSPeter Grehan #include <netinet/ip6.h>
698f3600b1SBryan Venteicher #include <netinet6/ip6_var.h>
7010b59a9bSPeter Grehan #include <netinet/udp.h>
7110b59a9bSPeter Grehan #include <netinet/tcp.h>
7242343a63SBryan Venteicher #include <netinet/tcp_lro.h>
7310b59a9bSPeter Grehan
7410b59a9bSPeter Grehan #include <machine/bus.h>
7510b59a9bSPeter Grehan #include <machine/resource.h>
7610b59a9bSPeter Grehan #include <sys/bus.h>
7710b59a9bSPeter Grehan #include <sys/rman.h>
7810b59a9bSPeter Grehan
7910b59a9bSPeter Grehan #include <dev/virtio/virtio.h>
8010b59a9bSPeter Grehan #include <dev/virtio/virtqueue.h>
8110b59a9bSPeter Grehan #include <dev/virtio/network/virtio_net.h>
8210b59a9bSPeter Grehan #include <dev/virtio/network/if_vtnetvar.h>
8310b59a9bSPeter Grehan #include "virtio_if.h"
8410b59a9bSPeter Grehan
858f3600b1SBryan Venteicher #include "opt_inet.h"
868f3600b1SBryan Venteicher #include "opt_inet6.h"
878f3600b1SBryan Venteicher
88fa7ca1e3SBryan Venteicher #if defined(INET) || defined(INET6)
89fa7ca1e3SBryan Venteicher #include <machine/in_cksum.h>
90fa7ca1e3SBryan Venteicher #endif
91fa7ca1e3SBryan Venteicher
920ea4b408SWarner Losh #ifdef __NO_STRICT_ALIGNMENT
930ea4b408SWarner Losh #define VTNET_ETHER_ALIGN 0
940ea4b408SWarner Losh #else /* Strict alignment */
950ea4b408SWarner Losh #define VTNET_ETHER_ALIGN ETHER_ALIGN
960ea4b408SWarner Losh #endif
970ea4b408SWarner Losh
9810b59a9bSPeter Grehan static int vtnet_modevent(module_t, int, void *);
9910b59a9bSPeter Grehan
10010b59a9bSPeter Grehan static int vtnet_probe(device_t);
10110b59a9bSPeter Grehan static int vtnet_attach(device_t);
10210b59a9bSPeter Grehan static int vtnet_detach(device_t);
10310b59a9bSPeter Grehan static int vtnet_suspend(device_t);
10410b59a9bSPeter Grehan static int vtnet_resume(device_t);
10510b59a9bSPeter Grehan static int vtnet_shutdown(device_t);
1068f3600b1SBryan Venteicher static int vtnet_attach_completed(device_t);
10710b59a9bSPeter Grehan static int vtnet_config_change(device_t);
10810b59a9bSPeter Grehan
109e6cc42f1SBryan Venteicher static int vtnet_negotiate_features(struct vtnet_softc *);
110e6cc42f1SBryan Venteicher static int vtnet_setup_features(struct vtnet_softc *);
1118f3600b1SBryan Venteicher static int vtnet_init_rxq(struct vtnet_softc *, int);
1128f3600b1SBryan Venteicher static int vtnet_init_txq(struct vtnet_softc *, int);
1138f3600b1SBryan Venteicher static int vtnet_alloc_rxtx_queues(struct vtnet_softc *);
1148f3600b1SBryan Venteicher static void vtnet_free_rxtx_queues(struct vtnet_softc *);
1158f3600b1SBryan Venteicher static int vtnet_alloc_rx_filters(struct vtnet_softc *);
1168f3600b1SBryan Venteicher static void vtnet_free_rx_filters(struct vtnet_softc *);
11710b59a9bSPeter Grehan static int vtnet_alloc_virtqueues(struct vtnet_softc *);
118aa386085SZhenlei Huang static void vtnet_alloc_interface(struct vtnet_softc *);
1198f3600b1SBryan Venteicher static int vtnet_setup_interface(struct vtnet_softc *);
120c1b554c8SAlex Richardson static int vtnet_ioctl_mtu(struct vtnet_softc *, u_int);
121dc9029d8SBryan Venteicher static int vtnet_ioctl_ifflags(struct vtnet_softc *);
122dc9029d8SBryan Venteicher static int vtnet_ioctl_multi(struct vtnet_softc *);
123dc9029d8SBryan Venteicher static int vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *);
1244ee96792SJustin Hibbits static int vtnet_ioctl(if_t, u_long, caddr_t);
1254ee96792SJustin Hibbits static uint64_t vtnet_get_counter(if_t, ift_counter);
12610b59a9bSPeter Grehan
1278f3600b1SBryan Venteicher static int vtnet_rxq_populate(struct vtnet_rxq *);
1288f3600b1SBryan Venteicher static void vtnet_rxq_free_mbufs(struct vtnet_rxq *);
1298f3600b1SBryan Venteicher static struct mbuf *
1308f3600b1SBryan Venteicher vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
131fa7ca1e3SBryan Venteicher static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
1328f3600b1SBryan Venteicher struct mbuf *, int);
1338f3600b1SBryan Venteicher static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
1348f3600b1SBryan Venteicher static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
1358f3600b1SBryan Venteicher static int vtnet_rxq_new_buf(struct vtnet_rxq *);
136fa7ca1e3SBryan Venteicher static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
137fa7ca1e3SBryan Venteicher uint16_t, int, struct virtio_net_hdr *);
138fa7ca1e3SBryan Venteicher static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
139fa7ca1e3SBryan Venteicher uint16_t, int, struct virtio_net_hdr *);
1408f3600b1SBryan Venteicher static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
1418f3600b1SBryan Venteicher struct virtio_net_hdr *);
1428f3600b1SBryan Venteicher static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
1438f3600b1SBryan Venteicher static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
1448f3600b1SBryan Venteicher static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
1458f3600b1SBryan Venteicher static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
1468f3600b1SBryan Venteicher struct virtio_net_hdr *);
1478f3600b1SBryan Venteicher static int vtnet_rxq_eof(struct vtnet_rxq *);
148ef6fdb33SVincenzo Maffione static void vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries);
1498f3600b1SBryan Venteicher static void vtnet_rx_vq_intr(void *);
1508f3600b1SBryan Venteicher static void vtnet_rxq_tq_intr(void *, int);
15110b59a9bSPeter Grehan
152baa5234fSBryan Venteicher static int vtnet_txq_intr_threshold(struct vtnet_txq *);
15332487a89SBryan Venteicher static int vtnet_txq_below_threshold(struct vtnet_txq *);
15432487a89SBryan Venteicher static int vtnet_txq_notify(struct vtnet_txq *);
1558f3600b1SBryan Venteicher static void vtnet_txq_free_mbufs(struct vtnet_txq *);
1568f3600b1SBryan Venteicher static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
1578f3600b1SBryan Venteicher int *, int *, int *);
1588f3600b1SBryan Venteicher static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
1598f3600b1SBryan Venteicher int, struct virtio_net_hdr *);
1608f3600b1SBryan Venteicher static struct mbuf *
1618f3600b1SBryan Venteicher vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
1628f3600b1SBryan Venteicher struct virtio_net_hdr *);
1638f3600b1SBryan Venteicher static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
1648f3600b1SBryan Venteicher struct vtnet_tx_header *);
165c857c7d5SMark Johnston static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int);
166*3a9ebff2SBjoern Jakobsen
167*3a9ebff2SBjoern Jakobsen /* Required for ALTQ */
1684ee96792SJustin Hibbits static void vtnet_start_locked(struct vtnet_txq *, if_t);
1694ee96792SJustin Hibbits static void vtnet_start(if_t);
170*3a9ebff2SBjoern Jakobsen
171*3a9ebff2SBjoern Jakobsen /* Required for MQ */
1728f3600b1SBryan Venteicher static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
1734ee96792SJustin Hibbits static int vtnet_txq_mq_start(if_t, struct mbuf *);
1748f3600b1SBryan Venteicher static void vtnet_txq_tq_deferred(void *, int);
175*3a9ebff2SBjoern Jakobsen static void vtnet_qflush(if_t);
176*3a9ebff2SBjoern Jakobsen
177*3a9ebff2SBjoern Jakobsen
178bddddcd5SBryan Venteicher static void vtnet_txq_start(struct vtnet_txq *);
1798f3600b1SBryan Venteicher static void vtnet_txq_tq_intr(void *, int);
18032487a89SBryan Venteicher static int vtnet_txq_eof(struct vtnet_txq *);
1818f3600b1SBryan Venteicher static void vtnet_tx_vq_intr(void *);
1828f3600b1SBryan Venteicher static void vtnet_tx_start_all(struct vtnet_softc *);
1838f3600b1SBryan Venteicher
1848f3600b1SBryan Venteicher static int vtnet_watchdog(struct vtnet_txq *);
18584047b19SGleb Smirnoff static void vtnet_accum_stats(struct vtnet_softc *,
18684047b19SGleb Smirnoff struct vtnet_rxq_stats *, struct vtnet_txq_stats *);
18710b59a9bSPeter Grehan static void vtnet_tick(void *);
18810b59a9bSPeter Grehan
1898f3600b1SBryan Venteicher static void vtnet_start_taskqueues(struct vtnet_softc *);
1908f3600b1SBryan Venteicher static void vtnet_free_taskqueues(struct vtnet_softc *);
1918f3600b1SBryan Venteicher static void vtnet_drain_taskqueues(struct vtnet_softc *);
1928f3600b1SBryan Venteicher
1938f3600b1SBryan Venteicher static void vtnet_drain_rxtx_queues(struct vtnet_softc *);
1948f3600b1SBryan Venteicher static void vtnet_stop_rendezvous(struct vtnet_softc *);
19510b59a9bSPeter Grehan static void vtnet_stop(struct vtnet_softc *);
1968f3600b1SBryan Venteicher static int vtnet_virtio_reinit(struct vtnet_softc *);
1978f3600b1SBryan Venteicher static void vtnet_init_rx_filters(struct vtnet_softc *);
1988f3600b1SBryan Venteicher static int vtnet_init_rx_queues(struct vtnet_softc *);
1998f3600b1SBryan Venteicher static int vtnet_init_tx_queues(struct vtnet_softc *);
2008f3600b1SBryan Venteicher static int vtnet_init_rxtx_queues(struct vtnet_softc *);
2018f3600b1SBryan Venteicher static void vtnet_set_active_vq_pairs(struct vtnet_softc *);
202e36a6b1bSBryan Venteicher static void vtnet_update_rx_offloads(struct vtnet_softc *);
20310b59a9bSPeter Grehan static int vtnet_reinit(struct vtnet_softc *);
20416f224b5SVincenzo Maffione static void vtnet_init_locked(struct vtnet_softc *, int);
20510b59a9bSPeter Grehan static void vtnet_init(void *);
20610b59a9bSPeter Grehan
2078f3600b1SBryan Venteicher static void vtnet_free_ctrl_vq(struct vtnet_softc *);
20810b59a9bSPeter Grehan static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
20910b59a9bSPeter Grehan struct sglist *, int, int);
2108f3600b1SBryan Venteicher static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
211e36a6b1bSBryan Venteicher static int vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t);
2128f3600b1SBryan Venteicher static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
213c1b554c8SAlex Richardson static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, bool);
214c1b554c8SAlex Richardson static int vtnet_set_promisc(struct vtnet_softc *, bool);
215c1b554c8SAlex Richardson static int vtnet_set_allmulti(struct vtnet_softc *, bool);
2168f3600b1SBryan Venteicher static void vtnet_rx_filter(struct vtnet_softc *);
21710b59a9bSPeter Grehan static void vtnet_rx_filter_mac(struct vtnet_softc *);
21810b59a9bSPeter Grehan static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
21910b59a9bSPeter Grehan static void vtnet_rx_filter_vlan(struct vtnet_softc *);
2208f3600b1SBryan Venteicher static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
2214ee96792SJustin Hibbits static void vtnet_register_vlan(void *, if_t, uint16_t);
2224ee96792SJustin Hibbits static void vtnet_unregister_vlan(void *, if_t, uint16_t);
22310b59a9bSPeter Grehan
2246a733393SBryan Venteicher static void vtnet_update_speed_duplex(struct vtnet_softc *);
2258f3600b1SBryan Venteicher static int vtnet_is_link_up(struct vtnet_softc *);
2268f3600b1SBryan Venteicher static void vtnet_update_link_status(struct vtnet_softc *);
2274ee96792SJustin Hibbits static int vtnet_ifmedia_upd(if_t);
2284ee96792SJustin Hibbits static void vtnet_ifmedia_sts(if_t, struct ifmediareq *);
22905041794SBryan Venteicher static void vtnet_get_macaddr(struct vtnet_softc *);
23005041794SBryan Venteicher static void vtnet_set_macaddr(struct vtnet_softc *);
23105041794SBryan Venteicher static void vtnet_attached_set_macaddr(struct vtnet_softc *);
2328f3600b1SBryan Venteicher static void vtnet_vlan_tag_remove(struct mbuf *);
23332487a89SBryan Venteicher static void vtnet_set_rx_process_limit(struct vtnet_softc *);
23410b59a9bSPeter Grehan
2358f3600b1SBryan Venteicher static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
2368f3600b1SBryan Venteicher struct sysctl_oid_list *, struct vtnet_rxq *);
2378f3600b1SBryan Venteicher static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
2388f3600b1SBryan Venteicher struct sysctl_oid_list *, struct vtnet_txq *);
2398f3600b1SBryan Venteicher static void vtnet_setup_queue_sysctl(struct vtnet_softc *);
24042343a63SBryan Venteicher static void vtnet_load_tunables(struct vtnet_softc *);
2418f3600b1SBryan Venteicher static void vtnet_setup_sysctl(struct vtnet_softc *);
24210b59a9bSPeter Grehan
2438f3600b1SBryan Venteicher static int vtnet_rxq_enable_intr(struct vtnet_rxq *);
2448f3600b1SBryan Venteicher static void vtnet_rxq_disable_intr(struct vtnet_rxq *);
2458f3600b1SBryan Venteicher static int vtnet_txq_enable_intr(struct vtnet_txq *);
2468f3600b1SBryan Venteicher static void vtnet_txq_disable_intr(struct vtnet_txq *);
2478f3600b1SBryan Venteicher static void vtnet_enable_rx_interrupts(struct vtnet_softc *);
2488f3600b1SBryan Venteicher static void vtnet_enable_tx_interrupts(struct vtnet_softc *);
2498f3600b1SBryan Venteicher static void vtnet_enable_interrupts(struct vtnet_softc *);
2508f3600b1SBryan Venteicher static void vtnet_disable_rx_interrupts(struct vtnet_softc *);
2518f3600b1SBryan Venteicher static void vtnet_disable_tx_interrupts(struct vtnet_softc *);
2528f3600b1SBryan Venteicher static void vtnet_disable_interrupts(struct vtnet_softc *);
2538f3600b1SBryan Venteicher
2548f3600b1SBryan Venteicher static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
25510b59a9bSPeter Grehan
2567790c8c1SConrad Meyer DEBUGNET_DEFINE(vtnet);
257c857c7d5SMark Johnston
2585e220811SBryan Venteicher #define vtnet_htog16(_sc, _val) virtio_htog16(vtnet_modern(_sc), _val)
2595e220811SBryan Venteicher #define vtnet_htog32(_sc, _val) virtio_htog32(vtnet_modern(_sc), _val)
2605e220811SBryan Venteicher #define vtnet_htog64(_sc, _val) virtio_htog64(vtnet_modern(_sc), _val)
2615e220811SBryan Venteicher #define vtnet_gtoh16(_sc, _val) virtio_gtoh16(vtnet_modern(_sc), _val)
2625e220811SBryan Venteicher #define vtnet_gtoh32(_sc, _val) virtio_gtoh32(vtnet_modern(_sc), _val)
2635e220811SBryan Venteicher #define vtnet_gtoh64(_sc, _val) virtio_gtoh64(vtnet_modern(_sc), _val)
2645e220811SBryan Venteicher
26510b59a9bSPeter Grehan /* Tunables. */
2667029da5cSPawel Biernacki static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
267fa7ca1e3SBryan Venteicher "VirtIO Net driver parameters");
268fa7ca1e3SBryan Venteicher
26910b59a9bSPeter Grehan static int vtnet_csum_disable = 0;
2704be723f6SSteven Hartland SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
2714be723f6SSteven Hartland &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
272fa7ca1e3SBryan Venteicher
273fa7ca1e3SBryan Venteicher static int vtnet_fixup_needs_csum = 0;
274fa7ca1e3SBryan Venteicher SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN,
275fa7ca1e3SBryan Venteicher &vtnet_fixup_needs_csum, 0,
276fa7ca1e3SBryan Venteicher "Calculate valid checksum for NEEDS_CSUM packets");
277fa7ca1e3SBryan Venteicher
27810b59a9bSPeter Grehan static int vtnet_tso_disable = 0;
27933b5433fSBryan Venteicher SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
28033b5433fSBryan Venteicher &vtnet_tso_disable, 0, "Disables TSO");
281fa7ca1e3SBryan Venteicher
28210b59a9bSPeter Grehan static int vtnet_lro_disable = 0;
28333b5433fSBryan Venteicher SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
28433b5433fSBryan Venteicher &vtnet_lro_disable, 0, "Disables hardware LRO");
285fa7ca1e3SBryan Venteicher
2868f3600b1SBryan Venteicher static int vtnet_mq_disable = 0;
28733b5433fSBryan Venteicher SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN,
28833b5433fSBryan Venteicher &vtnet_mq_disable, 0, "Disables multiqueue support");
289fa7ca1e3SBryan Venteicher
2904be723f6SSteven Hartland static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
2914be723f6SSteven Hartland SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
29233b5433fSBryan Venteicher &vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs");
293fa7ca1e3SBryan Venteicher
294177761e4SBryan Venteicher static int vtnet_tso_maxlen = IP_MAXPACKET;
295177761e4SBryan Venteicher SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
296177761e4SBryan Venteicher &vtnet_tso_maxlen, 0, "TSO burst limit");
297177761e4SBryan Venteicher
298fa7ca1e3SBryan Venteicher static int vtnet_rx_process_limit = 1024;
2994be723f6SSteven Hartland SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
30033b5433fSBryan Venteicher &vtnet_rx_process_limit, 0,
30133b5433fSBryan Venteicher "Number of RX segments processed in one pass");
30210b59a9bSPeter Grehan
30342343a63SBryan Venteicher static int vtnet_lro_entry_count = 128;
30442343a63SBryan Venteicher SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
30542343a63SBryan Venteicher &vtnet_lro_entry_count, 0, "Software LRO entry count");
30642343a63SBryan Venteicher
30742343a63SBryan Venteicher /* Enable sorted LRO, and the depth of the mbuf queue. */
30842343a63SBryan Venteicher static int vtnet_lro_mbufq_depth = 0;
30942343a63SBryan Venteicher SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
31042343a63SBryan Venteicher &vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue");
31142343a63SBryan Venteicher
312108e5d07SBjoern Jakobsen /* Deactivate ALTQ Support */
313108e5d07SBjoern Jakobsen static int vtnet_altq_disable = 0;
314108e5d07SBjoern Jakobsen SYSCTL_INT(_hw_vtnet, OID_AUTO, altq_disable, CTLFLAG_RDTUN,
315108e5d07SBjoern Jakobsen &vtnet_altq_disable, 0, "Disables ALTQ Support");
316108e5d07SBjoern Jakobsen
317108e5d07SBjoern Jakobsen /*
318108e5d07SBjoern Jakobsen * For the driver to be considered as having altq enabled,
319108e5d07SBjoern Jakobsen * it must be compiled with an ALTQ capable kernel,
320108e5d07SBjoern Jakobsen * and the tunable hw.vtnet.altq_disable must be zero
321108e5d07SBjoern Jakobsen */
322108e5d07SBjoern Jakobsen #define VTNET_ALTQ_ENABLED (VTNET_ALTQ_CAPABLE && (!vtnet_altq_disable))
323108e5d07SBjoern Jakobsen
324108e5d07SBjoern Jakobsen
32510b59a9bSPeter Grehan static uma_zone_t vtnet_tx_header_zone;
32610b59a9bSPeter Grehan
32710b59a9bSPeter Grehan static struct virtio_feature_desc vtnet_feature_desc[] = {
32810b59a9bSPeter Grehan { VIRTIO_NET_F_CSUM, "TxChecksum" },
32910b59a9bSPeter Grehan { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
3305e220811SBryan Venteicher { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "CtrlRxOffloads" },
3315e220811SBryan Venteicher { VIRTIO_NET_F_MAC, "MAC" },
3325e220811SBryan Venteicher { VIRTIO_NET_F_GSO, "TxGSO" },
3335e220811SBryan Venteicher { VIRTIO_NET_F_GUEST_TSO4, "RxLROv4" },
3345e220811SBryan Venteicher { VIRTIO_NET_F_GUEST_TSO6, "RxLROv6" },
3355e220811SBryan Venteicher { VIRTIO_NET_F_GUEST_ECN, "RxLROECN" },
33610b59a9bSPeter Grehan { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
33710b59a9bSPeter Grehan { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
33810b59a9bSPeter Grehan { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
33910b59a9bSPeter Grehan { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
34010b59a9bSPeter Grehan { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
34110b59a9bSPeter Grehan { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
34210b59a9bSPeter Grehan { VIRTIO_NET_F_STATUS, "Status" },
3435e220811SBryan Venteicher { VIRTIO_NET_F_CTRL_VQ, "CtrlVq" },
3445e220811SBryan Venteicher { VIRTIO_NET_F_CTRL_RX, "CtrlRxMode" },
3455e220811SBryan Venteicher { VIRTIO_NET_F_CTRL_VLAN, "CtrlVLANFilter" },
3465e220811SBryan Venteicher { VIRTIO_NET_F_CTRL_RX_EXTRA, "CtrlRxModeExtra" },
3478f3600b1SBryan Venteicher { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
3488f3600b1SBryan Venteicher { VIRTIO_NET_F_MQ, "Multiqueue" },
3495e220811SBryan Venteicher { VIRTIO_NET_F_CTRL_MAC_ADDR, "CtrlMacAddr" },
3505e220811SBryan Venteicher { VIRTIO_NET_F_SPEED_DUPLEX, "SpeedDuplex" },
3515e220811SBryan Venteicher
35210b59a9bSPeter Grehan { 0, NULL }
35310b59a9bSPeter Grehan };
35410b59a9bSPeter Grehan
35510b59a9bSPeter Grehan static device_method_t vtnet_methods[] = {
35610b59a9bSPeter Grehan /* Device methods. */
35710b59a9bSPeter Grehan DEVMETHOD(device_probe, vtnet_probe),
35810b59a9bSPeter Grehan DEVMETHOD(device_attach, vtnet_attach),
35910b59a9bSPeter Grehan DEVMETHOD(device_detach, vtnet_detach),
36010b59a9bSPeter Grehan DEVMETHOD(device_suspend, vtnet_suspend),
36110b59a9bSPeter Grehan DEVMETHOD(device_resume, vtnet_resume),
36210b59a9bSPeter Grehan DEVMETHOD(device_shutdown, vtnet_shutdown),
36310b59a9bSPeter Grehan
36410b59a9bSPeter Grehan /* VirtIO methods. */
3658f3600b1SBryan Venteicher DEVMETHOD(virtio_attach_completed, vtnet_attach_completed),
36610b59a9bSPeter Grehan DEVMETHOD(virtio_config_change, vtnet_config_change),
36710b59a9bSPeter Grehan
368b8a58707SPeter Grehan DEVMETHOD_END
36910b59a9bSPeter Grehan };
37010b59a9bSPeter Grehan
3714bf50f18SLuigi Rizzo #ifdef DEV_NETMAP
3724bf50f18SLuigi Rizzo #include <dev/netmap/if_vtnet_netmap.h>
3735e220811SBryan Venteicher #endif
3744bf50f18SLuigi Rizzo
37510b59a9bSPeter Grehan static driver_t vtnet_driver = {
3765e220811SBryan Venteicher .name = "vtnet",
3775e220811SBryan Venteicher .methods = vtnet_methods,
3785e220811SBryan Venteicher .size = sizeof(struct vtnet_softc)
37910b59a9bSPeter Grehan };
3805c4c96d3SJohn Baldwin VIRTIO_DRIVER_MODULE(vtnet, vtnet_driver, vtnet_modevent, NULL);
38110b59a9bSPeter Grehan MODULE_VERSION(vtnet, 1);
38210b59a9bSPeter Grehan MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
3830fdeab7bSLuigi Rizzo #ifdef DEV_NETMAP
3840fdeab7bSLuigi Rizzo MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
3855e220811SBryan Venteicher #endif
38610b59a9bSPeter Grehan
387633218eeSJessica Clarke VIRTIO_SIMPLE_PNPINFO(vtnet, VIRTIO_ID_NETWORK, "VirtIO Networking Adapter");
3880f6040f0SConrad Meyer
38910b59a9bSPeter Grehan static int
vtnet_modevent(module_t mod __unused,int type,void * unused __unused)390c1b554c8SAlex Richardson vtnet_modevent(module_t mod __unused, int type, void *unused __unused)
39110b59a9bSPeter Grehan {
3923fcb1aaeSKristof Provost int error = 0;
3933fcb1aaeSKristof Provost static int loaded = 0;
39410b59a9bSPeter Grehan
39510b59a9bSPeter Grehan switch (type) {
39610b59a9bSPeter Grehan case MOD_LOAD:
39729bfe210SKristof Provost if (loaded++ == 0) {
39810b59a9bSPeter Grehan vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
39910b59a9bSPeter Grehan sizeof(struct vtnet_tx_header),
40010b59a9bSPeter Grehan NULL, NULL, NULL, NULL, 0, 0);
40129bfe210SKristof Provost #ifdef DEBUGNET
40229bfe210SKristof Provost /*
40329bfe210SKristof Provost * We need to allocate from this zone in the transmit path, so ensure
40429bfe210SKristof Provost * that we have at least one item per header available.
40529bfe210SKristof Provost * XXX add a separate zone like we do for mbufs? otherwise we may alloc
40629bfe210SKristof Provost * buckets
40729bfe210SKristof Provost */
40829bfe210SKristof Provost uma_zone_reserve(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
40929bfe210SKristof Provost uma_prealloc(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
41029bfe210SKristof Provost #endif
41129bfe210SKristof Provost }
41210b59a9bSPeter Grehan break;
41310b59a9bSPeter Grehan case MOD_QUIESCE:
41410b59a9bSPeter Grehan if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
41510b59a9bSPeter Grehan error = EBUSY;
4163fcb1aaeSKristof Provost break;
4173fcb1aaeSKristof Provost case MOD_UNLOAD:
4183fcb1aaeSKristof Provost if (--loaded == 0) {
41910b59a9bSPeter Grehan uma_zdestroy(vtnet_tx_header_zone);
42010b59a9bSPeter Grehan vtnet_tx_header_zone = NULL;
42110b59a9bSPeter Grehan }
42210b59a9bSPeter Grehan break;
42310b59a9bSPeter Grehan case MOD_SHUTDOWN:
42410b59a9bSPeter Grehan break;
42510b59a9bSPeter Grehan default:
42610b59a9bSPeter Grehan error = EOPNOTSUPP;
42710b59a9bSPeter Grehan break;
42810b59a9bSPeter Grehan }
42910b59a9bSPeter Grehan
43010b59a9bSPeter Grehan return (error);
43110b59a9bSPeter Grehan }
43210b59a9bSPeter Grehan
43310b59a9bSPeter Grehan static int
vtnet_probe(device_t dev)43410b59a9bSPeter Grehan vtnet_probe(device_t dev)
43510b59a9bSPeter Grehan {
4360f6040f0SConrad Meyer return (VIRTIO_SIMPLE_PROBE(dev, vtnet));
43710b59a9bSPeter Grehan }
43810b59a9bSPeter Grehan
43910b59a9bSPeter Grehan static int
vtnet_attach(device_t dev)44010b59a9bSPeter Grehan vtnet_attach(device_t dev)
44110b59a9bSPeter Grehan {
44210b59a9bSPeter Grehan struct vtnet_softc *sc;
4438f3600b1SBryan Venteicher int error;
44410b59a9bSPeter Grehan
44510b59a9bSPeter Grehan sc = device_get_softc(dev);
44610b59a9bSPeter Grehan sc->vtnet_dev = dev;
44710b59a9bSPeter Grehan virtio_set_feature_desc(dev, vtnet_feature_desc);
44810b59a9bSPeter Grehan
4498f3600b1SBryan Venteicher VTNET_CORE_LOCK_INIT(sc);
4508f3600b1SBryan Venteicher callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
45142343a63SBryan Venteicher vtnet_load_tunables(sc);
45242343a63SBryan Venteicher
453aa386085SZhenlei Huang vtnet_alloc_interface(sc);
4548f3600b1SBryan Venteicher vtnet_setup_sysctl(sc);
455e6cc42f1SBryan Venteicher
456e6cc42f1SBryan Venteicher error = vtnet_setup_features(sc);
457e6cc42f1SBryan Venteicher if (error) {
458e6cc42f1SBryan Venteicher device_printf(dev, "cannot setup features\n");
459e6cc42f1SBryan Venteicher goto fail;
460e6cc42f1SBryan Venteicher }
46110b59a9bSPeter Grehan
4628f3600b1SBryan Venteicher error = vtnet_alloc_rx_filters(sc);
4638f3600b1SBryan Venteicher if (error) {
4648f3600b1SBryan Venteicher device_printf(dev, "cannot allocate Rx filters\n");
465336f459cSPeter Grehan goto fail;
466336f459cSPeter Grehan }
467336f459cSPeter Grehan
4688f3600b1SBryan Venteicher error = vtnet_alloc_rxtx_queues(sc);
4698f3600b1SBryan Venteicher if (error) {
4708f3600b1SBryan Venteicher device_printf(dev, "cannot allocate queues\n");
4718f3600b1SBryan Venteicher goto fail;
472336f459cSPeter Grehan }
473336f459cSPeter Grehan
47410b59a9bSPeter Grehan error = vtnet_alloc_virtqueues(sc);
47510b59a9bSPeter Grehan if (error) {
47610b59a9bSPeter Grehan device_printf(dev, "cannot allocate virtqueues\n");
47710b59a9bSPeter Grehan goto fail;
47810b59a9bSPeter Grehan }
47910b59a9bSPeter Grehan
4808f3600b1SBryan Venteicher error = vtnet_setup_interface(sc);
4818f3600b1SBryan Venteicher if (error) {
4828f3600b1SBryan Venteicher device_printf(dev, "cannot setup interface\n");
48310b59a9bSPeter Grehan goto fail;
48410b59a9bSPeter Grehan }
48510b59a9bSPeter Grehan
4868f3600b1SBryan Venteicher error = virtio_setup_intr(dev, INTR_TYPE_NET);
4878f3600b1SBryan Venteicher if (error) {
4885e220811SBryan Venteicher device_printf(dev, "cannot setup interrupts\n");
4898f3600b1SBryan Venteicher ether_ifdetach(sc->vtnet_ifp);
4908f3600b1SBryan Venteicher goto fail;
4918f3600b1SBryan Venteicher }
4928f3600b1SBryan Venteicher
4934bf50f18SLuigi Rizzo #ifdef DEV_NETMAP
4944bf50f18SLuigi Rizzo vtnet_netmap_attach(sc);
4955e220811SBryan Venteicher #endif
4968f3600b1SBryan Venteicher vtnet_start_taskqueues(sc);
4978f3600b1SBryan Venteicher
4988f3600b1SBryan Venteicher fail:
4998f3600b1SBryan Venteicher if (error)
5008f3600b1SBryan Venteicher vtnet_detach(dev);
5018f3600b1SBryan Venteicher
5028f3600b1SBryan Venteicher return (error);
5038f3600b1SBryan Venteicher }
5048f3600b1SBryan Venteicher
5058f3600b1SBryan Venteicher static int
vtnet_detach(device_t dev)5068f3600b1SBryan Venteicher vtnet_detach(device_t dev)
5078f3600b1SBryan Venteicher {
5088f3600b1SBryan Venteicher struct vtnet_softc *sc;
5094ee96792SJustin Hibbits if_t ifp;
5108f3600b1SBryan Venteicher
5118f3600b1SBryan Venteicher sc = device_get_softc(dev);
5128f3600b1SBryan Venteicher ifp = sc->vtnet_ifp;
5138f3600b1SBryan Venteicher
5148f3600b1SBryan Venteicher if (device_is_attached(dev)) {
5158f3600b1SBryan Venteicher VTNET_CORE_LOCK(sc);
5168f3600b1SBryan Venteicher vtnet_stop(sc);
5178f3600b1SBryan Venteicher VTNET_CORE_UNLOCK(sc);
5188f3600b1SBryan Venteicher
5198f3600b1SBryan Venteicher callout_drain(&sc->vtnet_tick_ch);
5208f3600b1SBryan Venteicher vtnet_drain_taskqueues(sc);
5218f3600b1SBryan Venteicher
5228f3600b1SBryan Venteicher ether_ifdetach(ifp);
5238f3600b1SBryan Venteicher }
5248f3600b1SBryan Venteicher
5254bf50f18SLuigi Rizzo #ifdef DEV_NETMAP
5264bf50f18SLuigi Rizzo netmap_detach(ifp);
5275e220811SBryan Venteicher #endif
5284bf50f18SLuigi Rizzo
5293f6ab549SGleb Smirnoff if (sc->vtnet_pfil != NULL) {
5303f6ab549SGleb Smirnoff pfil_head_unregister(sc->vtnet_pfil);
5313f6ab549SGleb Smirnoff sc->vtnet_pfil = NULL;
5323f6ab549SGleb Smirnoff }
5333f6ab549SGleb Smirnoff
5348f3600b1SBryan Venteicher vtnet_free_taskqueues(sc);
5358f3600b1SBryan Venteicher
5368f3600b1SBryan Venteicher if (sc->vtnet_vlan_attach != NULL) {
5378f3600b1SBryan Venteicher EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
5388f3600b1SBryan Venteicher sc->vtnet_vlan_attach = NULL;
5398f3600b1SBryan Venteicher }
5408f3600b1SBryan Venteicher if (sc->vtnet_vlan_detach != NULL) {
541cab10cc1SBryan Venteicher EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
5428f3600b1SBryan Venteicher sc->vtnet_vlan_detach = NULL;
5438f3600b1SBryan Venteicher }
5448f3600b1SBryan Venteicher
5458f3600b1SBryan Venteicher ifmedia_removeall(&sc->vtnet_media);
5468f3600b1SBryan Venteicher
5478f3600b1SBryan Venteicher if (ifp != NULL) {
5488f3600b1SBryan Venteicher if_free(ifp);
5498f3600b1SBryan Venteicher sc->vtnet_ifp = NULL;
5508f3600b1SBryan Venteicher }
5518f3600b1SBryan Venteicher
5528f3600b1SBryan Venteicher vtnet_free_rxtx_queues(sc);
5538f3600b1SBryan Venteicher vtnet_free_rx_filters(sc);
5548f3600b1SBryan Venteicher
5558f3600b1SBryan Venteicher if (sc->vtnet_ctrl_vq != NULL)
5568f3600b1SBryan Venteicher vtnet_free_ctrl_vq(sc);
5578f3600b1SBryan Venteicher
5588f3600b1SBryan Venteicher VTNET_CORE_LOCK_DESTROY(sc);
5598f3600b1SBryan Venteicher
5608f3600b1SBryan Venteicher return (0);
5618f3600b1SBryan Venteicher }
5628f3600b1SBryan Venteicher
5638f3600b1SBryan Venteicher static int
vtnet_suspend(device_t dev)5648f3600b1SBryan Venteicher vtnet_suspend(device_t dev)
5658f3600b1SBryan Venteicher {
5668f3600b1SBryan Venteicher struct vtnet_softc *sc;
5678f3600b1SBryan Venteicher
5688f3600b1SBryan Venteicher sc = device_get_softc(dev);
5698f3600b1SBryan Venteicher
5708f3600b1SBryan Venteicher VTNET_CORE_LOCK(sc);
5718f3600b1SBryan Venteicher vtnet_stop(sc);
5728f3600b1SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
5738f3600b1SBryan Venteicher VTNET_CORE_UNLOCK(sc);
5748f3600b1SBryan Venteicher
5758f3600b1SBryan Venteicher return (0);
5768f3600b1SBryan Venteicher }
5778f3600b1SBryan Venteicher
5788f3600b1SBryan Venteicher static int
vtnet_resume(device_t dev)5798f3600b1SBryan Venteicher vtnet_resume(device_t dev)
5808f3600b1SBryan Venteicher {
5818f3600b1SBryan Venteicher struct vtnet_softc *sc;
5824ee96792SJustin Hibbits if_t ifp;
5838f3600b1SBryan Venteicher
5848f3600b1SBryan Venteicher sc = device_get_softc(dev);
5858f3600b1SBryan Venteicher ifp = sc->vtnet_ifp;
5868f3600b1SBryan Venteicher
5878f3600b1SBryan Venteicher VTNET_CORE_LOCK(sc);
5884ee96792SJustin Hibbits if (if_getflags(ifp) & IFF_UP)
58916f224b5SVincenzo Maffione vtnet_init_locked(sc, 0);
5908f3600b1SBryan Venteicher sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
5918f3600b1SBryan Venteicher VTNET_CORE_UNLOCK(sc);
5928f3600b1SBryan Venteicher
5938f3600b1SBryan Venteicher return (0);
5948f3600b1SBryan Venteicher }
5958f3600b1SBryan Venteicher
5968f3600b1SBryan Venteicher static int
vtnet_shutdown(device_t dev)5978f3600b1SBryan Venteicher vtnet_shutdown(device_t dev)
5988f3600b1SBryan Venteicher {
5998f3600b1SBryan Venteicher /*
6008f3600b1SBryan Venteicher * Suspend already does all of what we need to
6018f3600b1SBryan Venteicher * do here; we just never expect to be resumed.
6028f3600b1SBryan Venteicher */
6038f3600b1SBryan Venteicher return (vtnet_suspend(dev));
6048f3600b1SBryan Venteicher }
6058f3600b1SBryan Venteicher
6068f3600b1SBryan Venteicher static int
vtnet_attach_completed(device_t dev)6078f3600b1SBryan Venteicher vtnet_attach_completed(device_t dev)
6088f3600b1SBryan Venteicher {
60905041794SBryan Venteicher struct vtnet_softc *sc;
61005041794SBryan Venteicher
61105041794SBryan Venteicher sc = device_get_softc(dev);
61205041794SBryan Venteicher
61305041794SBryan Venteicher VTNET_CORE_LOCK(sc);
61405041794SBryan Venteicher vtnet_attached_set_macaddr(sc);
61505041794SBryan Venteicher VTNET_CORE_UNLOCK(sc);
6168f3600b1SBryan Venteicher
6178f3600b1SBryan Venteicher return (0);
6188f3600b1SBryan Venteicher }
6198f3600b1SBryan Venteicher
6208f3600b1SBryan Venteicher static int
vtnet_config_change(device_t dev)6218f3600b1SBryan Venteicher vtnet_config_change(device_t dev)
6228f3600b1SBryan Venteicher {
6238f3600b1SBryan Venteicher struct vtnet_softc *sc;
6248f3600b1SBryan Venteicher
6258f3600b1SBryan Venteicher sc = device_get_softc(dev);
6268f3600b1SBryan Venteicher
6278f3600b1SBryan Venteicher VTNET_CORE_LOCK(sc);
6288f3600b1SBryan Venteicher vtnet_update_link_status(sc);
6298f3600b1SBryan Venteicher if (sc->vtnet_link_active != 0)
6308f3600b1SBryan Venteicher vtnet_tx_start_all(sc);
6318f3600b1SBryan Venteicher VTNET_CORE_UNLOCK(sc);
6328f3600b1SBryan Venteicher
6338f3600b1SBryan Venteicher return (0);
6348f3600b1SBryan Venteicher }
6358f3600b1SBryan Venteicher
636e6cc42f1SBryan Venteicher static int
vtnet_negotiate_features(struct vtnet_softc * sc)6378f3600b1SBryan Venteicher vtnet_negotiate_features(struct vtnet_softc *sc)
6388f3600b1SBryan Venteicher {
6398f3600b1SBryan Venteicher device_t dev;
64044559b26SBryan Venteicher uint64_t features, negotiated_features;
6415e220811SBryan Venteicher int no_csum;
6428f3600b1SBryan Venteicher
6438f3600b1SBryan Venteicher dev = sc->vtnet_dev;
6445e220811SBryan Venteicher features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES :
6455e220811SBryan Venteicher VTNET_LEGACY_FEATURES;
6468f3600b1SBryan Venteicher
6478f3600b1SBryan Venteicher /*
6488f3600b1SBryan Venteicher * TSO and LRO are only available when their corresponding checksum
6498f3600b1SBryan Venteicher * offload feature is also negotiated.
6508f3600b1SBryan Venteicher */
6515e220811SBryan Venteicher no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable);
6525e220811SBryan Venteicher if (no_csum)
6535e220811SBryan Venteicher features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM);
6545e220811SBryan Venteicher if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
6555e220811SBryan Venteicher features &= ~VTNET_TSO_FEATURES;
6565e220811SBryan Venteicher if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
6575e220811SBryan Venteicher features &= ~VTNET_LRO_FEATURES;
6585e220811SBryan Venteicher
659*3a9ebff2SBjoern Jakobsen /* Deactivate MQ Feature flag, if driver has ALTQ enabled, or MQ is explicitly disabled */
660*3a9ebff2SBjoern Jakobsen if (VTNET_ALTQ_ENABLED || vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
6615e220811SBryan Venteicher features &= ~VIRTIO_NET_F_MQ;
6628f3600b1SBryan Venteicher
66344559b26SBryan Venteicher negotiated_features = virtio_negotiate_features(dev, features);
6648f3600b1SBryan Venteicher
665c3187190SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
666c3187190SBryan Venteicher uint16_t mtu;
667c3187190SBryan Venteicher
668c3187190SBryan Venteicher mtu = virtio_read_dev_config_2(dev,
669c3187190SBryan Venteicher offsetof(struct virtio_net_config, mtu));
670283da05bSBjoern Jakobsen if (mtu < VTNET_MIN_MTU) {
671c3187190SBryan Venteicher device_printf(dev, "Invalid MTU value: %d. "
672c3187190SBryan Venteicher "MTU feature disabled.\n", mtu);
673c3187190SBryan Venteicher features &= ~VIRTIO_NET_F_MTU;
674c3187190SBryan Venteicher negotiated_features =
675c3187190SBryan Venteicher virtio_negotiate_features(dev, features);
676c3187190SBryan Venteicher }
677c3187190SBryan Venteicher }
678c3187190SBryan Venteicher
679b470419eSBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
680b470419eSBryan Venteicher uint16_t npairs;
681b470419eSBryan Venteicher
682b470419eSBryan Venteicher npairs = virtio_read_dev_config_2(dev,
683b470419eSBryan Venteicher offsetof(struct virtio_net_config, max_virtqueue_pairs));
684b470419eSBryan Venteicher if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
685b470419eSBryan Venteicher npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
686b470419eSBryan Venteicher device_printf(dev, "Invalid max_virtqueue_pairs value: "
687b470419eSBryan Venteicher "%d. Multiqueue feature disabled.\n", npairs);
688b470419eSBryan Venteicher features &= ~VIRTIO_NET_F_MQ;
689b470419eSBryan Venteicher negotiated_features =
690b470419eSBryan Venteicher virtio_negotiate_features(dev, features);
691b470419eSBryan Venteicher }
692b470419eSBryan Venteicher }
693b470419eSBryan Venteicher
694fd5b3951SBryan Venteicher if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
695fd5b3951SBryan Venteicher virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
6968f3600b1SBryan Venteicher /*
697fd5b3951SBryan Venteicher * LRO without mergeable buffers requires special care. This
698fd5b3951SBryan Venteicher * is not ideal because every receive buffer must be large
699fd5b3951SBryan Venteicher * enough to hold the maximum TCP packet, the Ethernet header,
700fd5b3951SBryan Venteicher * and the header. This requires up to 34 descriptors with
701fd5b3951SBryan Venteicher * MCLBYTES clusters. If we do not have indirect descriptors,
702fd5b3951SBryan Venteicher * LRO is disabled since the virtqueue will not contain very
703fd5b3951SBryan Venteicher * many receive buffers.
7048f3600b1SBryan Venteicher */
705fd5b3951SBryan Venteicher if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
7068f3600b1SBryan Venteicher device_printf(dev,
70742343a63SBryan Venteicher "Host LRO disabled since both mergeable buffers "
70842343a63SBryan Venteicher "and indirect descriptors were not negotiated\n");
7098f3600b1SBryan Venteicher features &= ~VTNET_LRO_FEATURES;
71044559b26SBryan Venteicher negotiated_features =
711fd5b3951SBryan Venteicher virtio_negotiate_features(dev, features);
7128f3600b1SBryan Venteicher } else
7138f3600b1SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
7148f3600b1SBryan Venteicher }
7155e220811SBryan Venteicher
71644559b26SBryan Venteicher sc->vtnet_features = negotiated_features;
71744559b26SBryan Venteicher sc->vtnet_negotiated_features = negotiated_features;
71844559b26SBryan Venteicher
719e6cc42f1SBryan Venteicher return (virtio_finalize_features(dev));
720fd5b3951SBryan Venteicher }
7218f3600b1SBryan Venteicher
722e6cc42f1SBryan Venteicher static int
vtnet_setup_features(struct vtnet_softc * sc)7238f3600b1SBryan Venteicher vtnet_setup_features(struct vtnet_softc *sc)
7248f3600b1SBryan Venteicher {
7258f3600b1SBryan Venteicher device_t dev;
726e6cc42f1SBryan Venteicher int error;
7278f3600b1SBryan Venteicher
7288f3600b1SBryan Venteicher dev = sc->vtnet_dev;
7298f3600b1SBryan Venteicher
730e6cc42f1SBryan Venteicher error = vtnet_negotiate_features(sc);
731e6cc42f1SBryan Venteicher if (error)
732e6cc42f1SBryan Venteicher return (error);
7338f3600b1SBryan Venteicher
7345e220811SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_F_VERSION_1))
7355e220811SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_MODERN;
736ab4c2818SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
737ab4c2818SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
7386e03f319SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
7396e03f319SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
7406e03f319SBryan Venteicher
7418f3600b1SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
7428f3600b1SBryan Venteicher /* This feature should always be negotiated. */
7438f3600b1SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_MAC;
7448f3600b1SBryan Venteicher }
7458f3600b1SBryan Venteicher
746aabdf5b6SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
747aabdf5b6SBryan Venteicher sc->vtnet_max_mtu = virtio_read_dev_config_2(dev,
748aabdf5b6SBryan Venteicher offsetof(struct virtio_net_config, mtu));
749aabdf5b6SBryan Venteicher } else
750aabdf5b6SBryan Venteicher sc->vtnet_max_mtu = VTNET_MAX_MTU;
751aabdf5b6SBryan Venteicher
7521cd1ed3fSBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
7538f3600b1SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
7548f3600b1SBryan Venteicher sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
7555e220811SBryan Venteicher } else if (vtnet_modern(sc)) {
756fa7ca1e3SBryan Venteicher /* This is identical to the mergeable header. */
7575e220811SBryan Venteicher sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1);
7581cd1ed3fSBryan Venteicher } else
7598f3600b1SBryan Venteicher sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
7608f3600b1SBryan Venteicher
761fa7ca1e3SBryan Venteicher if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
762fa7ca1e3SBryan Venteicher sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE;
763443c3d0bSBryan Venteicher else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
764fa7ca1e3SBryan Venteicher sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG;
765443c3d0bSBryan Venteicher else
766fa7ca1e3SBryan Venteicher sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE;
767443c3d0bSBryan Venteicher
76842343a63SBryan Venteicher /*
76942343a63SBryan Venteicher * Favor "hardware" LRO if negotiated, but support software LRO as
77042343a63SBryan Venteicher * a fallback; there is usually little benefit (or worse) with both.
77142343a63SBryan Venteicher */
77242343a63SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 &&
77342343a63SBryan Venteicher virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0)
77442343a63SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_SW_LRO;
77542343a63SBryan Venteicher
776443c3d0bSBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
777443c3d0bSBryan Venteicher virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
778443c3d0bSBryan Venteicher virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
779fa7ca1e3SBryan Venteicher sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX;
780443c3d0bSBryan Venteicher else
781fa7ca1e3SBryan Venteicher sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN;
782443c3d0bSBryan Venteicher
783bd8809dfSBryan Venteicher sc->vtnet_req_vq_pairs = 1;
7845e220811SBryan Venteicher sc->vtnet_max_vq_pairs = 1;
7855e220811SBryan Venteicher
7868f3600b1SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
7878f3600b1SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
7888f3600b1SBryan Venteicher
7898f3600b1SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
7908f3600b1SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
7918f3600b1SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
7928f3600b1SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
7938f3600b1SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
7948f3600b1SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
7955e220811SBryan Venteicher
7965e220811SBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
7975e220811SBryan Venteicher sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
7985e220811SBryan Venteicher offsetof(struct virtio_net_config,
7995e220811SBryan Venteicher max_virtqueue_pairs));
8005e220811SBryan Venteicher }
8018f3600b1SBryan Venteicher }
8028f3600b1SBryan Venteicher
8034be723f6SSteven Hartland if (sc->vtnet_max_vq_pairs > 1) {
804b470419eSBryan Venteicher int req;
8054be723f6SSteven Hartland
8065e220811SBryan Venteicher /*
807b470419eSBryan Venteicher * Limit the maximum number of requested queue pairs to the
808b470419eSBryan Venteicher * number of CPUs and the configured maximum.
8095e220811SBryan Venteicher */
810b470419eSBryan Venteicher req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
811bd8809dfSBryan Venteicher if (req < 0)
812b470419eSBryan Venteicher req = 1;
813bd8809dfSBryan Venteicher if (req == 0)
814bd8809dfSBryan Venteicher req = mp_ncpus;
815b470419eSBryan Venteicher if (req > sc->vtnet_max_vq_pairs)
816b470419eSBryan Venteicher req = sc->vtnet_max_vq_pairs;
817b470419eSBryan Venteicher if (req > mp_ncpus)
818b470419eSBryan Venteicher req = mp_ncpus;
819b470419eSBryan Venteicher if (req > 1) {
820b470419eSBryan Venteicher sc->vtnet_req_vq_pairs = req;
8215e220811SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_MQ;
8228f3600b1SBryan Venteicher }
8234be723f6SSteven Hartland }
824e6cc42f1SBryan Venteicher
825e6cc42f1SBryan Venteicher return (0);
8264be723f6SSteven Hartland }
8278f3600b1SBryan Venteicher
8288f3600b1SBryan Venteicher static int
vtnet_init_rxq(struct vtnet_softc * sc,int id)8298f3600b1SBryan Venteicher vtnet_init_rxq(struct vtnet_softc *sc, int id)
8308f3600b1SBryan Venteicher {
8318f3600b1SBryan Venteicher struct vtnet_rxq *rxq;
8328f3600b1SBryan Venteicher
8338f3600b1SBryan Venteicher rxq = &sc->vtnet_rxqs[id];
8348f3600b1SBryan Venteicher
8358f3600b1SBryan Venteicher snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
8368f3600b1SBryan Venteicher device_get_nameunit(sc->vtnet_dev), id);
8378f3600b1SBryan Venteicher mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
8388f3600b1SBryan Venteicher
8398f3600b1SBryan Venteicher rxq->vtnrx_sc = sc;
8408f3600b1SBryan Venteicher rxq->vtnrx_id = id;
8418f3600b1SBryan Venteicher
842443c3d0bSBryan Venteicher rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
843443c3d0bSBryan Venteicher if (rxq->vtnrx_sg == NULL)
844443c3d0bSBryan Venteicher return (ENOMEM);
845443c3d0bSBryan Venteicher
84642343a63SBryan Venteicher #if defined(INET) || defined(INET6)
84742343a63SBryan Venteicher if (vtnet_software_lro(sc)) {
84842343a63SBryan Venteicher if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp,
84942343a63SBryan Venteicher sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0)
85042343a63SBryan Venteicher return (ENOMEM);
85142343a63SBryan Venteicher }
85242343a63SBryan Venteicher #endif
85342343a63SBryan Venteicher
8546c3e93cbSGleb Smirnoff NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
8558f3600b1SBryan Venteicher rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
8568f3600b1SBryan Venteicher taskqueue_thread_enqueue, &rxq->vtnrx_tq);
8578f3600b1SBryan Venteicher
8588f3600b1SBryan Venteicher return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
8598f3600b1SBryan Venteicher }
8608f3600b1SBryan Venteicher
8618f3600b1SBryan Venteicher static int
vtnet_init_txq(struct vtnet_softc * sc,int id)8628f3600b1SBryan Venteicher vtnet_init_txq(struct vtnet_softc *sc, int id)
8638f3600b1SBryan Venteicher {
8648f3600b1SBryan Venteicher struct vtnet_txq *txq;
8658f3600b1SBryan Venteicher
8668f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[id];
8678f3600b1SBryan Venteicher
8688f3600b1SBryan Venteicher snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
8698f3600b1SBryan Venteicher device_get_nameunit(sc->vtnet_dev), id);
8708f3600b1SBryan Venteicher mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
8718f3600b1SBryan Venteicher
8728f3600b1SBryan Venteicher txq->vtntx_sc = sc;
8738f3600b1SBryan Venteicher txq->vtntx_id = id;
8748f3600b1SBryan Venteicher
875443c3d0bSBryan Venteicher txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
876443c3d0bSBryan Venteicher if (txq->vtntx_sg == NULL)
877443c3d0bSBryan Venteicher return (ENOMEM);
878443c3d0bSBryan Venteicher
879*3a9ebff2SBjoern Jakobsen if (!VTNET_ALTQ_ENABLED) {
8808f3600b1SBryan Venteicher txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
8818f3600b1SBryan Venteicher M_NOWAIT, &txq->vtntx_mtx);
8828f3600b1SBryan Venteicher if (txq->vtntx_br == NULL)
8838f3600b1SBryan Venteicher return (ENOMEM);
8848f3600b1SBryan Venteicher
8858f3600b1SBryan Venteicher TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
886*3a9ebff2SBjoern Jakobsen }
8878f3600b1SBryan Venteicher TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
8888f3600b1SBryan Venteicher txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
8898f3600b1SBryan Venteicher taskqueue_thread_enqueue, &txq->vtntx_tq);
8908f3600b1SBryan Venteicher if (txq->vtntx_tq == NULL)
8918f3600b1SBryan Venteicher return (ENOMEM);
8928f3600b1SBryan Venteicher
8938f3600b1SBryan Venteicher return (0);
8948f3600b1SBryan Venteicher }
8958f3600b1SBryan Venteicher
8968f3600b1SBryan Venteicher static int
vtnet_alloc_rxtx_queues(struct vtnet_softc * sc)8978f3600b1SBryan Venteicher vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
8988f3600b1SBryan Venteicher {
8998f3600b1SBryan Venteicher int i, npairs, error;
9008f3600b1SBryan Venteicher
9018f3600b1SBryan Venteicher npairs = sc->vtnet_max_vq_pairs;
9028f3600b1SBryan Venteicher
903ac2fffa4SPedro F. Giffuni sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
9048f3600b1SBryan Venteicher M_NOWAIT | M_ZERO);
905ac2fffa4SPedro F. Giffuni sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
9068f3600b1SBryan Venteicher M_NOWAIT | M_ZERO);
9078f3600b1SBryan Venteicher if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
9088f3600b1SBryan Venteicher return (ENOMEM);
9098f3600b1SBryan Venteicher
9108f3600b1SBryan Venteicher for (i = 0; i < npairs; i++) {
9118f3600b1SBryan Venteicher error = vtnet_init_rxq(sc, i);
9128f3600b1SBryan Venteicher if (error)
9138f3600b1SBryan Venteicher return (error);
9148f3600b1SBryan Venteicher error = vtnet_init_txq(sc, i);
9158f3600b1SBryan Venteicher if (error)
9168f3600b1SBryan Venteicher return (error);
9178f3600b1SBryan Venteicher }
9188f3600b1SBryan Venteicher
91932e0493cSBryan Venteicher vtnet_set_rx_process_limit(sc);
9208f3600b1SBryan Venteicher vtnet_setup_queue_sysctl(sc);
9218f3600b1SBryan Venteicher
9228f3600b1SBryan Venteicher return (0);
9238f3600b1SBryan Venteicher }
9248f3600b1SBryan Venteicher
9258f3600b1SBryan Venteicher static void
vtnet_destroy_rxq(struct vtnet_rxq * rxq)9268f3600b1SBryan Venteicher vtnet_destroy_rxq(struct vtnet_rxq *rxq)
9278f3600b1SBryan Venteicher {
9288f3600b1SBryan Venteicher
9298f3600b1SBryan Venteicher rxq->vtnrx_sc = NULL;
9308f3600b1SBryan Venteicher rxq->vtnrx_id = -1;
9318f3600b1SBryan Venteicher
93242343a63SBryan Venteicher #if defined(INET) || defined(INET6)
93342343a63SBryan Venteicher tcp_lro_free(&rxq->vtnrx_lro);
93442343a63SBryan Venteicher #endif
93542343a63SBryan Venteicher
936443c3d0bSBryan Venteicher if (rxq->vtnrx_sg != NULL) {
937443c3d0bSBryan Venteicher sglist_free(rxq->vtnrx_sg);
938443c3d0bSBryan Venteicher rxq->vtnrx_sg = NULL;
939443c3d0bSBryan Venteicher }
940443c3d0bSBryan Venteicher
9418f3600b1SBryan Venteicher if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
9428f3600b1SBryan Venteicher mtx_destroy(&rxq->vtnrx_mtx);
9438f3600b1SBryan Venteicher }
9448f3600b1SBryan Venteicher
9458f3600b1SBryan Venteicher static void
vtnet_destroy_txq(struct vtnet_txq * txq)9468f3600b1SBryan Venteicher vtnet_destroy_txq(struct vtnet_txq *txq)
9478f3600b1SBryan Venteicher {
9488f3600b1SBryan Venteicher
9498f3600b1SBryan Venteicher txq->vtntx_sc = NULL;
9508f3600b1SBryan Venteicher txq->vtntx_id = -1;
9518f3600b1SBryan Venteicher
952443c3d0bSBryan Venteicher if (txq->vtntx_sg != NULL) {
953443c3d0bSBryan Venteicher sglist_free(txq->vtntx_sg);
954443c3d0bSBryan Venteicher txq->vtntx_sg = NULL;
955443c3d0bSBryan Venteicher }
956443c3d0bSBryan Venteicher
957*3a9ebff2SBjoern Jakobsen if (!VTNET_ALTQ_ENABLED) {
9588f3600b1SBryan Venteicher if (txq->vtntx_br != NULL) {
9598f3600b1SBryan Venteicher buf_ring_free(txq->vtntx_br, M_DEVBUF);
9608f3600b1SBryan Venteicher txq->vtntx_br = NULL;
9618f3600b1SBryan Venteicher }
962*3a9ebff2SBjoern Jakobsen }
9638f3600b1SBryan Venteicher
9648f3600b1SBryan Venteicher if (mtx_initialized(&txq->vtntx_mtx) != 0)
9658f3600b1SBryan Venteicher mtx_destroy(&txq->vtntx_mtx);
9668f3600b1SBryan Venteicher }
9678f3600b1SBryan Venteicher
9688f3600b1SBryan Venteicher static void
vtnet_free_rxtx_queues(struct vtnet_softc * sc)9698f3600b1SBryan Venteicher vtnet_free_rxtx_queues(struct vtnet_softc *sc)
9708f3600b1SBryan Venteicher {
9718f3600b1SBryan Venteicher int i;
9728f3600b1SBryan Venteicher
9738f3600b1SBryan Venteicher if (sc->vtnet_rxqs != NULL) {
9748f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
9758f3600b1SBryan Venteicher vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
9768f3600b1SBryan Venteicher free(sc->vtnet_rxqs, M_DEVBUF);
9778f3600b1SBryan Venteicher sc->vtnet_rxqs = NULL;
9788f3600b1SBryan Venteicher }
9798f3600b1SBryan Venteicher
9808f3600b1SBryan Venteicher if (sc->vtnet_txqs != NULL) {
9818f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
9828f3600b1SBryan Venteicher vtnet_destroy_txq(&sc->vtnet_txqs[i]);
9838f3600b1SBryan Venteicher free(sc->vtnet_txqs, M_DEVBUF);
9848f3600b1SBryan Venteicher sc->vtnet_txqs = NULL;
9858f3600b1SBryan Venteicher }
9868f3600b1SBryan Venteicher }
9878f3600b1SBryan Venteicher
9888f3600b1SBryan Venteicher static int
vtnet_alloc_rx_filters(struct vtnet_softc * sc)9898f3600b1SBryan Venteicher vtnet_alloc_rx_filters(struct vtnet_softc *sc)
9908f3600b1SBryan Venteicher {
9918f3600b1SBryan Venteicher
9928f3600b1SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
9938f3600b1SBryan Venteicher sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
9948f3600b1SBryan Venteicher M_DEVBUF, M_NOWAIT | M_ZERO);
9958f3600b1SBryan Venteicher if (sc->vtnet_mac_filter == NULL)
9968f3600b1SBryan Venteicher return (ENOMEM);
9978f3600b1SBryan Venteicher }
9988f3600b1SBryan Venteicher
9998f3600b1SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
10008f3600b1SBryan Venteicher sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
10018f3600b1SBryan Venteicher VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
10028f3600b1SBryan Venteicher if (sc->vtnet_vlan_filter == NULL)
10038f3600b1SBryan Venteicher return (ENOMEM);
10048f3600b1SBryan Venteicher }
10058f3600b1SBryan Venteicher
10068f3600b1SBryan Venteicher return (0);
10078f3600b1SBryan Venteicher }
10088f3600b1SBryan Venteicher
10098f3600b1SBryan Venteicher static void
vtnet_free_rx_filters(struct vtnet_softc * sc)10108f3600b1SBryan Venteicher vtnet_free_rx_filters(struct vtnet_softc *sc)
10118f3600b1SBryan Venteicher {
10128f3600b1SBryan Venteicher
10138f3600b1SBryan Venteicher if (sc->vtnet_mac_filter != NULL) {
10148f3600b1SBryan Venteicher free(sc->vtnet_mac_filter, M_DEVBUF);
10158f3600b1SBryan Venteicher sc->vtnet_mac_filter = NULL;
10168f3600b1SBryan Venteicher }
10178f3600b1SBryan Venteicher
10188f3600b1SBryan Venteicher if (sc->vtnet_vlan_filter != NULL) {
10198f3600b1SBryan Venteicher free(sc->vtnet_vlan_filter, M_DEVBUF);
10208f3600b1SBryan Venteicher sc->vtnet_vlan_filter = NULL;
10218f3600b1SBryan Venteicher }
10228f3600b1SBryan Venteicher }
10238f3600b1SBryan Venteicher
10248f3600b1SBryan Venteicher static int
vtnet_alloc_virtqueues(struct vtnet_softc * sc)10258f3600b1SBryan Venteicher vtnet_alloc_virtqueues(struct vtnet_softc *sc)
10268f3600b1SBryan Venteicher {
10278f3600b1SBryan Venteicher device_t dev;
10288f3600b1SBryan Venteicher struct vq_alloc_info *info;
10298f3600b1SBryan Venteicher struct vtnet_rxq *rxq;
10308f3600b1SBryan Venteicher struct vtnet_txq *txq;
1031180c0240SMina Galić int i, idx, nvqs, error;
10328f3600b1SBryan Venteicher
10338f3600b1SBryan Venteicher dev = sc->vtnet_dev;
10348f3600b1SBryan Venteicher
10358f3600b1SBryan Venteicher nvqs = sc->vtnet_max_vq_pairs * 2;
10368f3600b1SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
10378f3600b1SBryan Venteicher nvqs++;
10388f3600b1SBryan Venteicher
1039ac2fffa4SPedro F. Giffuni info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
10408f3600b1SBryan Venteicher if (info == NULL)
10418f3600b1SBryan Venteicher return (ENOMEM);
10428f3600b1SBryan Venteicher
1043bd8809dfSBryan Venteicher for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) {
10448f3600b1SBryan Venteicher rxq = &sc->vtnet_rxqs[i];
1045443c3d0bSBryan Venteicher VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
10468f3600b1SBryan Venteicher vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
1047bd8809dfSBryan Venteicher "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
10488f3600b1SBryan Venteicher
10498f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[i];
1050443c3d0bSBryan Venteicher VQ_ALLOC_INFO_INIT(&info[idx + 1], sc->vtnet_tx_nsegs,
10518f3600b1SBryan Venteicher vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
1052bd8809dfSBryan Venteicher "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
1053bd8809dfSBryan Venteicher }
1054bd8809dfSBryan Venteicher
1055bd8809dfSBryan Venteicher /* These queues will not be used so allocate the minimum resources. */
1056283da05bSBjoern Jakobsen for (; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
1057bd8809dfSBryan Venteicher rxq = &sc->vtnet_rxqs[i];
1058bd8809dfSBryan Venteicher VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq,
1059bd8809dfSBryan Venteicher "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
1060bd8809dfSBryan Venteicher
1061bd8809dfSBryan Venteicher txq = &sc->vtnet_txqs[i];
1062bd8809dfSBryan Venteicher VQ_ALLOC_INFO_INIT(&info[idx + 1], 0, NULL, txq, &txq->vtntx_vq,
1063bd8809dfSBryan Venteicher "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
10648f3600b1SBryan Venteicher }
10658f3600b1SBryan Venteicher
10668f3600b1SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
10678f3600b1SBryan Venteicher VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
10688f3600b1SBryan Venteicher &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
10698f3600b1SBryan Venteicher }
10708f3600b1SBryan Venteicher
1071180c0240SMina Galić error = virtio_alloc_virtqueues(dev, nvqs, info);
10728f3600b1SBryan Venteicher free(info, M_TEMP);
10738f3600b1SBryan Venteicher
10748f3600b1SBryan Venteicher return (error);
10758f3600b1SBryan Venteicher }
10768f3600b1SBryan Venteicher
1077aa386085SZhenlei Huang static void
vtnet_alloc_interface(struct vtnet_softc * sc)107842343a63SBryan Venteicher vtnet_alloc_interface(struct vtnet_softc *sc)
107942343a63SBryan Venteicher {
108042343a63SBryan Venteicher device_t dev;
10814ee96792SJustin Hibbits if_t ifp;
108242343a63SBryan Venteicher
108342343a63SBryan Venteicher dev = sc->vtnet_dev;
108442343a63SBryan Venteicher
108542343a63SBryan Venteicher ifp = if_alloc(IFT_ETHER);
108642343a63SBryan Venteicher sc->vtnet_ifp = ifp;
10874ee96792SJustin Hibbits if_setsoftc(ifp, sc);
108842343a63SBryan Venteicher if_initname(ifp, device_get_name(dev), device_get_unit(dev));
108942343a63SBryan Venteicher }
109042343a63SBryan Venteicher
109142343a63SBryan Venteicher static int
vtnet_setup_interface(struct vtnet_softc * sc)10928f3600b1SBryan Venteicher vtnet_setup_interface(struct vtnet_softc *sc)
10938f3600b1SBryan Venteicher {
10948f3600b1SBryan Venteicher device_t dev;
1095ed6cbf48SGleb Smirnoff struct pfil_head_args pa;
10964ee96792SJustin Hibbits if_t ifp;
10978f3600b1SBryan Venteicher
10988f3600b1SBryan Venteicher dev = sc->vtnet_dev;
109942343a63SBryan Venteicher ifp = sc->vtnet_ifp;
11008f3600b1SBryan Venteicher
1101a6b55ee6SGleb Smirnoff if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
11024ee96792SJustin Hibbits if_setbaudrate(ifp, IF_Gbps(10));
11034ee96792SJustin Hibbits if_setinitfn(ifp, vtnet_init);
11044ee96792SJustin Hibbits if_setioctlfn(ifp, vtnet_ioctl);
11054ee96792SJustin Hibbits if_setgetcounterfn(ifp, vtnet_get_counter);
1106*3a9ebff2SBjoern Jakobsen
1107*3a9ebff2SBjoern Jakobsen if (!VTNET_ALTQ_ENABLED) {
11084ee96792SJustin Hibbits if_settransmitfn(ifp, vtnet_txq_mq_start);
11094ee96792SJustin Hibbits if_setqflushfn(ifp, vtnet_qflush);
1110*3a9ebff2SBjoern Jakobsen } else {
11118f3600b1SBryan Venteicher struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
11124ee96792SJustin Hibbits if_setstartfn(ifp, vtnet_start);
11134ee96792SJustin Hibbits if_setsendqlen(ifp, virtqueue_size(vq) - 1);
11144ee96792SJustin Hibbits if_setsendqready(ifp);
1115*3a9ebff2SBjoern Jakobsen }
11168f3600b1SBryan Venteicher
111732e0493cSBryan Venteicher vtnet_get_macaddr(sc);
111832e0493cSBryan Venteicher
111932e0493cSBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
11204ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
11216a733393SBryan Venteicher
11226a733393SBryan Venteicher ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts);
11236a733393SBryan Venteicher ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL);
11246a733393SBryan Venteicher ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO);
11258f3600b1SBryan Venteicher
112610b59a9bSPeter Grehan if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
11275e220811SBryan Venteicher int gso;
11285e220811SBryan Venteicher
11294ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6, 0);
113010b59a9bSPeter Grehan
11315e220811SBryan Venteicher gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO);
11325e220811SBryan Venteicher if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
11334ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
11345e220811SBryan Venteicher if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
11354ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
11365e220811SBryan Venteicher if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
113710b59a9bSPeter Grehan sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
113810b59a9bSPeter Grehan
11394ee96792SJustin Hibbits if (if_getcapabilities(ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
1140177761e4SBryan Venteicher int tso_maxlen;
1141177761e4SBryan Venteicher
11424ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0);
1143177761e4SBryan Venteicher
1144177761e4SBryan Venteicher tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen",
1145177761e4SBryan Venteicher vtnet_tso_maxlen);
11464ee96792SJustin Hibbits if_sethwtsomax(ifp, tso_maxlen -
11474ee96792SJustin Hibbits (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
11484ee96792SJustin Hibbits if_sethwtsomaxsegcount(ifp, sc->vtnet_tx_nsegs - 1);
11494ee96792SJustin Hibbits if_sethwtsomaxsegsize(ifp, PAGE_SIZE);
1150177761e4SBryan Venteicher }
115110b59a9bSPeter Grehan }
115210b59a9bSPeter Grehan
11539a4dabdcSBryan Venteicher if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
11544ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
1155e36a6b1bSBryan Venteicher #ifdef notyet
1156e36a6b1bSBryan Venteicher /* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
11574ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_RXCSUM_IPV6, 0);
1158e36a6b1bSBryan Venteicher #endif
11598f3600b1SBryan Venteicher
1160fa7ca1e3SBryan Venteicher if (vtnet_tunable_int(sc, "fixup_needs_csum",
1161fa7ca1e3SBryan Venteicher vtnet_fixup_needs_csum) != 0)
1162fa7ca1e3SBryan Venteicher sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM;
1163fa7ca1e3SBryan Venteicher
116442343a63SBryan Venteicher /* Support either "hardware" or software LRO. */
11654ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
11669a4dabdcSBryan Venteicher }
11679a4dabdcSBryan Venteicher
11684ee96792SJustin Hibbits if (if_getcapabilities(ifp) & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) {
116910b59a9bSPeter Grehan /*
117010b59a9bSPeter Grehan * VirtIO does not support VLAN tagging, but we can fake
117110b59a9bSPeter Grehan * it by inserting and removing the 802.1Q header during
117210b59a9bSPeter Grehan * transmit and receive. We are then able to do checksum
117310b59a9bSPeter Grehan * offloading of VLAN frames.
117410b59a9bSPeter Grehan */
11754ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM, 0);
117610b59a9bSPeter Grehan }
117710b59a9bSPeter Grehan
11782520cd38SBryan Venteicher if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
11794ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
11804ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
11812520cd38SBryan Venteicher
118210b59a9bSPeter Grehan /*
118310b59a9bSPeter Grehan * Capabilities after here are not enabled by default.
118410b59a9bSPeter Grehan */
11854ee96792SJustin Hibbits if_setcapenable(ifp, if_getcapabilities(ifp));
118632e0493cSBryan Venteicher
118710b59a9bSPeter Grehan if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
11884ee96792SJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
118910b59a9bSPeter Grehan
119010b59a9bSPeter Grehan sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
119110b59a9bSPeter Grehan vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
119210b59a9bSPeter Grehan sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
119310b59a9bSPeter Grehan vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
119410b59a9bSPeter Grehan }
119510b59a9bSPeter Grehan
119632e0493cSBryan Venteicher ether_ifattach(ifp, sc->vtnet_hwaddr);
119732e0493cSBryan Venteicher
119832e0493cSBryan Venteicher /* Tell the upper layer(s) we support long frames. */
11994ee96792SJustin Hibbits if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
120010b59a9bSPeter Grehan
12017790c8c1SConrad Meyer DEBUGNET_SET(ifp, vtnet);
1202c857c7d5SMark Johnston
1203ed6cbf48SGleb Smirnoff pa.pa_version = PFIL_VERSION;
1204ed6cbf48SGleb Smirnoff pa.pa_flags = PFIL_IN;
1205ed6cbf48SGleb Smirnoff pa.pa_type = PFIL_TYPE_ETHERNET;
12064ee96792SJustin Hibbits pa.pa_headname = if_name(ifp);
1207ed6cbf48SGleb Smirnoff sc->vtnet_pfil = pfil_head_register(&pa);
1208ed6cbf48SGleb Smirnoff
12098f3600b1SBryan Venteicher return (0);
121010b59a9bSPeter Grehan }
121110b59a9bSPeter Grehan
12128f3600b1SBryan Venteicher static int
vtnet_rx_cluster_size(struct vtnet_softc * sc,int mtu)1213fa7ca1e3SBryan Venteicher vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu)
12148f3600b1SBryan Venteicher {
1215fa7ca1e3SBryan Venteicher int framesz;
12168f3600b1SBryan Venteicher
1217fa7ca1e3SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
1218fa7ca1e3SBryan Venteicher return (MJUMPAGESIZE);
1219fa7ca1e3SBryan Venteicher else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1220fa7ca1e3SBryan Venteicher return (MCLBYTES);
12218f3600b1SBryan Venteicher
122210b59a9bSPeter Grehan /*
1223aabdf5b6SBryan Venteicher * Try to scale the receive mbuf cluster size from the MTU. We
1224aabdf5b6SBryan Venteicher * could also use the VQ size to influence the selected size,
1225aabdf5b6SBryan Venteicher * but that would only matter for very small queues.
122610b59a9bSPeter Grehan */
1227fa7ca1e3SBryan Venteicher if (vtnet_modern(sc)) {
1228fa7ca1e3SBryan Venteicher MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1));
1229fa7ca1e3SBryan Venteicher framesz = sizeof(struct virtio_net_hdr_v1);
1230fa7ca1e3SBryan Venteicher } else
1231fa7ca1e3SBryan Venteicher framesz = sizeof(struct vtnet_rx_header);
1232fa7ca1e3SBryan Venteicher framesz += sizeof(struct ether_vlan_header) + mtu;
1233d9e0e426SWarner Losh /*
1234d9e0e426SWarner Losh * Account for the offsetting we'll do elsewhere so we allocate the
1235d9e0e426SWarner Losh * right size for the mtu.
1236d9e0e426SWarner Losh */
12370ea4b408SWarner Losh if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0) {
12380ea4b408SWarner Losh framesz += VTNET_ETHER_ALIGN;
1239d9e0e426SWarner Losh }
1240fa7ca1e3SBryan Venteicher
1241fa7ca1e3SBryan Venteicher if (framesz <= MCLBYTES)
1242fa7ca1e3SBryan Venteicher return (MCLBYTES);
1243fa7ca1e3SBryan Venteicher else if (framesz <= MJUMPAGESIZE)
1244fa7ca1e3SBryan Venteicher return (MJUMPAGESIZE);
1245fa7ca1e3SBryan Venteicher else if (framesz <= MJUM9BYTES)
1246fa7ca1e3SBryan Venteicher return (MJUM9BYTES);
1247fa7ca1e3SBryan Venteicher
1248fa7ca1e3SBryan Venteicher /* Sane default; avoid 16KB clusters. */
1249fa7ca1e3SBryan Venteicher return (MCLBYTES);
12505e220811SBryan Venteicher }
125110b59a9bSPeter Grehan
1252fa7ca1e3SBryan Venteicher static int
vtnet_ioctl_mtu(struct vtnet_softc * sc,u_int mtu)1253c1b554c8SAlex Richardson vtnet_ioctl_mtu(struct vtnet_softc *sc, u_int mtu)
1254fa7ca1e3SBryan Venteicher {
12554ee96792SJustin Hibbits if_t ifp;
1256fa7ca1e3SBryan Venteicher int clustersz;
125710b59a9bSPeter Grehan
1258fa7ca1e3SBryan Venteicher ifp = sc->vtnet_ifp;
1259fa7ca1e3SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
1260fa7ca1e3SBryan Venteicher
12614ee96792SJustin Hibbits if (if_getmtu(ifp) == mtu)
1262fa7ca1e3SBryan Venteicher return (0);
1263aabdf5b6SBryan Venteicher else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu)
1264fa7ca1e3SBryan Venteicher return (EINVAL);
1265fa7ca1e3SBryan Venteicher
12664ee96792SJustin Hibbits if_setmtu(ifp, mtu);
1267fa7ca1e3SBryan Venteicher clustersz = vtnet_rx_cluster_size(sc, mtu);
1268fa7ca1e3SBryan Venteicher
1269fa7ca1e3SBryan Venteicher if (clustersz != sc->vtnet_rx_clustersz &&
12704ee96792SJustin Hibbits if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
12714ee96792SJustin Hibbits if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
127216f224b5SVincenzo Maffione vtnet_init_locked(sc, 0);
127310b59a9bSPeter Grehan }
127410b59a9bSPeter Grehan
12758f3600b1SBryan Venteicher return (0);
12768f3600b1SBryan Venteicher }
12778f3600b1SBryan Venteicher
127810b59a9bSPeter Grehan static int
vtnet_ioctl_ifflags(struct vtnet_softc * sc)1279dc9029d8SBryan Venteicher vtnet_ioctl_ifflags(struct vtnet_softc *sc)
128010b59a9bSPeter Grehan {
12814ee96792SJustin Hibbits if_t ifp;
1282dc9029d8SBryan Venteicher int drv_running;
128310b59a9bSPeter Grehan
1284dc9029d8SBryan Venteicher ifp = sc->vtnet_ifp;
12854ee96792SJustin Hibbits drv_running = (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0;
128610b59a9bSPeter Grehan
1287dc9029d8SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
128810b59a9bSPeter Grehan
12894ee96792SJustin Hibbits if ((if_getflags(ifp) & IFF_UP) == 0) {
1290dc9029d8SBryan Venteicher if (drv_running)
129110b59a9bSPeter Grehan vtnet_stop(sc);
1292dc9029d8SBryan Venteicher goto out;
1293dc9029d8SBryan Venteicher }
1294dc9029d8SBryan Venteicher
1295dc9029d8SBryan Venteicher if (!drv_running) {
1296dc9029d8SBryan Venteicher vtnet_init_locked(sc, 0);
1297dc9029d8SBryan Venteicher goto out;
1298dc9029d8SBryan Venteicher }
1299dc9029d8SBryan Venteicher
13004ee96792SJustin Hibbits if ((if_getflags(ifp) ^ sc->vtnet_if_flags) &
130110b59a9bSPeter Grehan (IFF_PROMISC | IFF_ALLMULTI)) {
1302fc035df8SAleksandr Fedorov if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
130310b59a9bSPeter Grehan vtnet_rx_filter(sc);
1304fc035df8SAleksandr Fedorov else {
1305580cadd6SKristof Provost /*
1306580cadd6SKristof Provost * We don't support filtering out multicast, so
1307580cadd6SKristof Provost * ALLMULTI is always set.
1308580cadd6SKristof Provost */
1309580cadd6SKristof Provost if_setflagbits(ifp, IFF_ALLMULTI, 0);
13104ee96792SJustin Hibbits if_setflagbits(ifp, IFF_PROMISC, 0);
1311fc035df8SAleksandr Fedorov }
131210b59a9bSPeter Grehan }
131310b59a9bSPeter Grehan
1314dc9029d8SBryan Venteicher out:
13154ee96792SJustin Hibbits sc->vtnet_if_flags = if_getflags(ifp);
1316dc9029d8SBryan Venteicher return (0);
1317dc9029d8SBryan Venteicher }
131810b59a9bSPeter Grehan
1319dc9029d8SBryan Venteicher static int
vtnet_ioctl_multi(struct vtnet_softc * sc)1320dc9029d8SBryan Venteicher vtnet_ioctl_multi(struct vtnet_softc *sc)
1321dc9029d8SBryan Venteicher {
13224ee96792SJustin Hibbits if_t ifp;
1323dc9029d8SBryan Venteicher
1324dc9029d8SBryan Venteicher ifp = sc->vtnet_ifp;
1325dc9029d8SBryan Venteicher
1326dc9029d8SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
1327dc9029d8SBryan Venteicher
13285e220811SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX &&
13294ee96792SJustin Hibbits if_getdrvflags(ifp) & IFF_DRV_RUNNING)
133010b59a9bSPeter Grehan vtnet_rx_filter_mac(sc);
133110b59a9bSPeter Grehan
1332dc9029d8SBryan Venteicher return (0);
1333dc9029d8SBryan Venteicher }
133410b59a9bSPeter Grehan
1335dc9029d8SBryan Venteicher static int
vtnet_ioctl_ifcap(struct vtnet_softc * sc,struct ifreq * ifr)1336dc9029d8SBryan Venteicher vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
1337dc9029d8SBryan Venteicher {
13384ee96792SJustin Hibbits if_t ifp;
1339e36a6b1bSBryan Venteicher int mask, reinit, update;
1340dc9029d8SBryan Venteicher
1341dc9029d8SBryan Venteicher ifp = sc->vtnet_ifp;
13424ee96792SJustin Hibbits mask = (ifr->ifr_reqcap & if_getcapabilities(ifp)) ^ if_getcapenable(ifp);
1343e36a6b1bSBryan Venteicher reinit = update = 0;
1344dc9029d8SBryan Venteicher
1345dc9029d8SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
134610b59a9bSPeter Grehan
13478f3600b1SBryan Venteicher if (mask & IFCAP_TXCSUM)
13484ee96792SJustin Hibbits if_togglecapenable(ifp, IFCAP_TXCSUM);
13498f3600b1SBryan Venteicher if (mask & IFCAP_TXCSUM_IPV6)
13504ee96792SJustin Hibbits if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
13518f3600b1SBryan Venteicher if (mask & IFCAP_TSO4)
13524ee96792SJustin Hibbits if_togglecapenable(ifp, IFCAP_TSO4);
13538f3600b1SBryan Venteicher if (mask & IFCAP_TSO6)
13544ee96792SJustin Hibbits if_togglecapenable(ifp, IFCAP_TSO6);
135510b59a9bSPeter Grehan
1356e36a6b1bSBryan Venteicher if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
1357e36a6b1bSBryan Venteicher /*
1358e36a6b1bSBryan Venteicher * These Rx features require the negotiated features to
1359e36a6b1bSBryan Venteicher * be updated. Avoid a full reinit if possible.
1360e36a6b1bSBryan Venteicher */
1361e36a6b1bSBryan Venteicher if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
1362e36a6b1bSBryan Venteicher update = 1;
1363e36a6b1bSBryan Venteicher else
13648f3600b1SBryan Venteicher reinit = 1;
13658f3600b1SBryan Venteicher
136642343a63SBryan Venteicher /* BMV: Avoid needless renegotiation for just software LRO. */
136742343a63SBryan Venteicher if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
136842343a63SBryan Venteicher IFCAP_LRO && vtnet_software_lro(sc))
136942343a63SBryan Venteicher reinit = update = 0;
137042343a63SBryan Venteicher
13718f3600b1SBryan Venteicher if (mask & IFCAP_RXCSUM)
13724ee96792SJustin Hibbits if_togglecapenable(ifp, IFCAP_RXCSUM);
13738f3600b1SBryan Venteicher if (mask & IFCAP_RXCSUM_IPV6)
13744ee96792SJustin Hibbits if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
13758f3600b1SBryan Venteicher if (mask & IFCAP_LRO)
13764ee96792SJustin Hibbits if_togglecapenable(ifp, IFCAP_LRO);
1377e36a6b1bSBryan Venteicher
1378e36a6b1bSBryan Venteicher /*
1379e36a6b1bSBryan Venteicher * VirtIO does not distinguish between IPv4 and IPv6 checksums
1380e36a6b1bSBryan Venteicher * so treat them as a pair. Guest TSO (LRO) requires receive
1381e36a6b1bSBryan Venteicher * checksums.
1382e36a6b1bSBryan Venteicher */
13834ee96792SJustin Hibbits if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
13844ee96792SJustin Hibbits if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
1385e36a6b1bSBryan Venteicher #ifdef notyet
13864ee96792SJustin Hibbits if_setcapenablebit(ifp, IFCAP_RXCSUM_IPV6, 0);
1387e36a6b1bSBryan Venteicher #endif
1388e36a6b1bSBryan Venteicher } else
13894ee96792SJustin Hibbits if_setcapenablebit(ifp, 0,
13904ee96792SJustin Hibbits (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO));
1391e36a6b1bSBryan Venteicher }
1392e36a6b1bSBryan Venteicher
1393e36a6b1bSBryan Venteicher if (mask & IFCAP_VLAN_HWFILTER) {
1394e36a6b1bSBryan Venteicher /* These Rx features require renegotiation. */
1395e36a6b1bSBryan Venteicher reinit = 1;
1396e36a6b1bSBryan Venteicher
13978f3600b1SBryan Venteicher if (mask & IFCAP_VLAN_HWFILTER)
13984ee96792SJustin Hibbits if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
1399e36a6b1bSBryan Venteicher }
140010b59a9bSPeter Grehan
140110b59a9bSPeter Grehan if (mask & IFCAP_VLAN_HWTSO)
14024ee96792SJustin Hibbits if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
140310b59a9bSPeter Grehan if (mask & IFCAP_VLAN_HWTAGGING)
14044ee96792SJustin Hibbits if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
140510b59a9bSPeter Grehan
14064ee96792SJustin Hibbits if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1407e36a6b1bSBryan Venteicher if (reinit) {
14084ee96792SJustin Hibbits if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
140916f224b5SVincenzo Maffione vtnet_init_locked(sc, 0);
1410e36a6b1bSBryan Venteicher } else if (update)
1411e36a6b1bSBryan Venteicher vtnet_update_rx_offloads(sc);
141210b59a9bSPeter Grehan }
14138f3600b1SBryan Venteicher
1414dc9029d8SBryan Venteicher return (0);
1415dc9029d8SBryan Venteicher }
1416dc9029d8SBryan Venteicher
1417dc9029d8SBryan Venteicher static int
vtnet_ioctl(if_t ifp,u_long cmd,caddr_t data)14184ee96792SJustin Hibbits vtnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
1419dc9029d8SBryan Venteicher {
1420dc9029d8SBryan Venteicher struct vtnet_softc *sc;
1421dc9029d8SBryan Venteicher struct ifreq *ifr;
1422dc9029d8SBryan Venteicher int error;
1423dc9029d8SBryan Venteicher
14244ee96792SJustin Hibbits sc = if_getsoftc(ifp);
1425dc9029d8SBryan Venteicher ifr = (struct ifreq *) data;
1426dc9029d8SBryan Venteicher error = 0;
1427dc9029d8SBryan Venteicher
1428dc9029d8SBryan Venteicher switch (cmd) {
1429dc9029d8SBryan Venteicher case SIOCSIFMTU:
1430dc9029d8SBryan Venteicher VTNET_CORE_LOCK(sc);
1431dc9029d8SBryan Venteicher error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu);
1432dc9029d8SBryan Venteicher VTNET_CORE_UNLOCK(sc);
1433dc9029d8SBryan Venteicher break;
1434dc9029d8SBryan Venteicher
1435dc9029d8SBryan Venteicher case SIOCSIFFLAGS:
1436dc9029d8SBryan Venteicher VTNET_CORE_LOCK(sc);
1437dc9029d8SBryan Venteicher error = vtnet_ioctl_ifflags(sc);
1438dc9029d8SBryan Venteicher VTNET_CORE_UNLOCK(sc);
1439dc9029d8SBryan Venteicher break;
1440dc9029d8SBryan Venteicher
1441dc9029d8SBryan Venteicher case SIOCADDMULTI:
1442dc9029d8SBryan Venteicher case SIOCDELMULTI:
1443dc9029d8SBryan Venteicher VTNET_CORE_LOCK(sc);
1444dc9029d8SBryan Venteicher error = vtnet_ioctl_multi(sc);
1445dc9029d8SBryan Venteicher VTNET_CORE_UNLOCK(sc);
1446dc9029d8SBryan Venteicher break;
1447dc9029d8SBryan Venteicher
1448dc9029d8SBryan Venteicher case SIOCSIFMEDIA:
1449dc9029d8SBryan Venteicher case SIOCGIFMEDIA:
1450dc9029d8SBryan Venteicher error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1451dc9029d8SBryan Venteicher break;
1452dc9029d8SBryan Venteicher
1453dc9029d8SBryan Venteicher case SIOCSIFCAP:
1454dc9029d8SBryan Venteicher VTNET_CORE_LOCK(sc);
1455dc9029d8SBryan Venteicher error = vtnet_ioctl_ifcap(sc, ifr);
14568f3600b1SBryan Venteicher VTNET_CORE_UNLOCK(sc);
145710b59a9bSPeter Grehan VLAN_CAPABILITIES(ifp);
145810b59a9bSPeter Grehan break;
145910b59a9bSPeter Grehan
146010b59a9bSPeter Grehan default:
146110b59a9bSPeter Grehan error = ether_ioctl(ifp, cmd, data);
146210b59a9bSPeter Grehan break;
146310b59a9bSPeter Grehan }
146410b59a9bSPeter Grehan
14658f3600b1SBryan Venteicher VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
146610b59a9bSPeter Grehan
146710b59a9bSPeter Grehan return (error);
146810b59a9bSPeter Grehan }
146910b59a9bSPeter Grehan
147010b59a9bSPeter Grehan static int
vtnet_rxq_populate(struct vtnet_rxq * rxq)14718f3600b1SBryan Venteicher vtnet_rxq_populate(struct vtnet_rxq *rxq)
147210b59a9bSPeter Grehan {
147310b59a9bSPeter Grehan struct virtqueue *vq;
147410b59a9bSPeter Grehan int nbufs, error;
147510b59a9bSPeter Grehan
14762e42b74aSVincenzo Maffione #ifdef DEV_NETMAP
14772e42b74aSVincenzo Maffione error = vtnet_netmap_rxq_populate(rxq);
14782e42b74aSVincenzo Maffione if (error >= 0)
14792e42b74aSVincenzo Maffione return (error);
14802e42b74aSVincenzo Maffione #endif /* DEV_NETMAP */
14812e42b74aSVincenzo Maffione
14828f3600b1SBryan Venteicher vq = rxq->vtnrx_vq;
148310b59a9bSPeter Grehan error = ENOSPC;
148410b59a9bSPeter Grehan
14858f3600b1SBryan Venteicher for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
14868f3600b1SBryan Venteicher error = vtnet_rxq_new_buf(rxq);
14878f3600b1SBryan Venteicher if (error)
148810b59a9bSPeter Grehan break;
148910b59a9bSPeter Grehan }
149010b59a9bSPeter Grehan
149110b59a9bSPeter Grehan if (nbufs > 0) {
149210b59a9bSPeter Grehan virtqueue_notify(vq);
149310b59a9bSPeter Grehan /*
149410b59a9bSPeter Grehan * EMSGSIZE signifies the virtqueue did not have enough
149510b59a9bSPeter Grehan * entries available to hold the last mbuf. This is not
14968f3600b1SBryan Venteicher * an error.
149710b59a9bSPeter Grehan */
149810b59a9bSPeter Grehan if (error == EMSGSIZE)
149910b59a9bSPeter Grehan error = 0;
150010b59a9bSPeter Grehan }
150110b59a9bSPeter Grehan
150210b59a9bSPeter Grehan return (error);
150310b59a9bSPeter Grehan }
150410b59a9bSPeter Grehan
150510b59a9bSPeter Grehan static void
vtnet_rxq_free_mbufs(struct vtnet_rxq * rxq)15068f3600b1SBryan Venteicher vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
150710b59a9bSPeter Grehan {
150810b59a9bSPeter Grehan struct virtqueue *vq;
150910b59a9bSPeter Grehan struct mbuf *m;
151010b59a9bSPeter Grehan int last;
15112e42b74aSVincenzo Maffione #ifdef DEV_NETMAP
151266823237SVincenzo Maffione struct netmap_kring *kring = netmap_kring_on(NA(rxq->vtnrx_sc->vtnet_ifp),
151366823237SVincenzo Maffione rxq->vtnrx_id, NR_RX);
15142e42b74aSVincenzo Maffione #else /* !DEV_NETMAP */
151566823237SVincenzo Maffione void *kring = NULL;
15162e42b74aSVincenzo Maffione #endif /* !DEV_NETMAP */
151710b59a9bSPeter Grehan
15188f3600b1SBryan Venteicher vq = rxq->vtnrx_vq;
151910b59a9bSPeter Grehan last = 0;
152010b59a9bSPeter Grehan
15212e42b74aSVincenzo Maffione while ((m = virtqueue_drain(vq, &last)) != NULL) {
152266823237SVincenzo Maffione if (kring == NULL)
152310b59a9bSPeter Grehan m_freem(m);
15242e42b74aSVincenzo Maffione }
152510b59a9bSPeter Grehan
15268f3600b1SBryan Venteicher KASSERT(virtqueue_empty(vq),
15278f3600b1SBryan Venteicher ("%s: mbufs remaining in rx queue %p", __func__, rxq));
152810b59a9bSPeter Grehan }
152910b59a9bSPeter Grehan
153010b59a9bSPeter Grehan static struct mbuf *
vtnet_rx_alloc_buf(struct vtnet_softc * sc,int nbufs,struct mbuf ** m_tailp)15318f3600b1SBryan Venteicher vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
153210b59a9bSPeter Grehan {
153310b59a9bSPeter Grehan struct mbuf *m_head, *m_tail, *m;
1534fa7ca1e3SBryan Venteicher int i, size;
153510b59a9bSPeter Grehan
1536fa7ca1e3SBryan Venteicher m_head = NULL;
1537fa7ca1e3SBryan Venteicher size = sc->vtnet_rx_clustersz;
15388f3600b1SBryan Venteicher
15398f3600b1SBryan Venteicher KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1540fa7ca1e3SBryan Venteicher ("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs));
154110b59a9bSPeter Grehan
1542fa7ca1e3SBryan Venteicher for (i = 0; i < nbufs; i++) {
1543fa7ca1e3SBryan Venteicher m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size);
1544fa7ca1e3SBryan Venteicher if (m == NULL) {
1545fa7ca1e3SBryan Venteicher sc->vtnet_stats.mbuf_alloc_failed++;
1546fa7ca1e3SBryan Venteicher m_freem(m_head);
1547fa7ca1e3SBryan Venteicher return (NULL);
1548fa7ca1e3SBryan Venteicher }
154923699ff2SWarner Losh
1550fa7ca1e3SBryan Venteicher m->m_len = size;
15513be59adbSWarner Losh /*
15523be59adbSWarner Losh * Need to offset the mbuf if the header we're going to add
15533be59adbSWarner Losh * will misalign.
15543be59adbSWarner Losh */
15550ea4b408SWarner Losh if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0) {
15560ea4b408SWarner Losh m_adj(m, VTNET_ETHER_ALIGN);
15573be59adbSWarner Losh }
1558fa7ca1e3SBryan Venteicher if (m_head != NULL) {
155910b59a9bSPeter Grehan m_tail->m_next = m;
156010b59a9bSPeter Grehan m_tail = m;
1561fa7ca1e3SBryan Venteicher } else
1562fa7ca1e3SBryan Venteicher m_head = m_tail = m;
156310b59a9bSPeter Grehan }
156410b59a9bSPeter Grehan
156510b59a9bSPeter Grehan if (m_tailp != NULL)
156610b59a9bSPeter Grehan *m_tailp = m_tail;
156710b59a9bSPeter Grehan
156810b59a9bSPeter Grehan return (m_head);
156910b59a9bSPeter Grehan }
157010b59a9bSPeter Grehan
15718f3600b1SBryan Venteicher /*
15728f3600b1SBryan Venteicher * Slow path for when LRO without mergeable buffers is negotiated.
15738f3600b1SBryan Venteicher */
157410b59a9bSPeter Grehan static int
vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq * rxq,struct mbuf * m0,int len0)1575fa7ca1e3SBryan Venteicher vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
15768f3600b1SBryan Venteicher int len0)
157710b59a9bSPeter Grehan {
15788f3600b1SBryan Venteicher struct vtnet_softc *sc;
1579fa7ca1e3SBryan Venteicher struct mbuf *m, *m_prev, *m_new, *m_tail;
15805e220811SBryan Venteicher int len, clustersz, nreplace, error;
158110b59a9bSPeter Grehan
15828f3600b1SBryan Venteicher sc = rxq->vtnrx_sc;
15835e220811SBryan Venteicher clustersz = sc->vtnet_rx_clustersz;
15843be59adbSWarner Losh /*
15853be59adbSWarner Losh * Need to offset the mbuf if the header we're going to add will
15863be59adbSWarner Losh * misalign, account for that here.
15873be59adbSWarner Losh */
15880ea4b408SWarner Losh if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0)
15890ea4b408SWarner Losh clustersz -= VTNET_ETHER_ALIGN;
159010b59a9bSPeter Grehan
15918f3600b1SBryan Venteicher m_prev = NULL;
159210b59a9bSPeter Grehan m_tail = NULL;
159310b59a9bSPeter Grehan nreplace = 0;
159410b59a9bSPeter Grehan
15958f3600b1SBryan Venteicher m = m0;
15968f3600b1SBryan Venteicher len = len0;
159710b59a9bSPeter Grehan
159810b59a9bSPeter Grehan /*
1599fa7ca1e3SBryan Venteicher * Since these mbuf chains are so large, avoid allocating a complete
1600fa7ca1e3SBryan Venteicher * replacement when the received frame did not consume the entire
1601fa7ca1e3SBryan Venteicher * chain. Unused mbufs are moved to the tail of the replacement mbuf.
160210b59a9bSPeter Grehan */
160310b59a9bSPeter Grehan while (len > 0) {
160410b59a9bSPeter Grehan if (m == NULL) {
160510b59a9bSPeter Grehan sc->vtnet_stats.rx_frame_too_large++;
160610b59a9bSPeter Grehan return (EMSGSIZE);
160710b59a9bSPeter Grehan }
160810b59a9bSPeter Grehan
1609fa7ca1e3SBryan Venteicher /*
1610fa7ca1e3SBryan Venteicher * Every mbuf should have the expected cluster size since that
1611fa7ca1e3SBryan Venteicher * is also used to allocate the replacements.
1612fa7ca1e3SBryan Venteicher */
16135e220811SBryan Venteicher KASSERT(m->m_len == clustersz,
1614fa7ca1e3SBryan Venteicher ("%s: mbuf size %d not expected cluster size %d", __func__,
1615fa7ca1e3SBryan Venteicher m->m_len, clustersz));
161610b59a9bSPeter Grehan
161710b59a9bSPeter Grehan m->m_len = MIN(m->m_len, len);
161810b59a9bSPeter Grehan len -= m->m_len;
161910b59a9bSPeter Grehan
162010b59a9bSPeter Grehan m_prev = m;
162110b59a9bSPeter Grehan m = m->m_next;
162210b59a9bSPeter Grehan nreplace++;
162310b59a9bSPeter Grehan }
162410b59a9bSPeter Grehan
1625fa7ca1e3SBryan Venteicher KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs,
1626fa7ca1e3SBryan Venteicher ("%s: invalid replacement mbuf count %d max %d", __func__,
1627fa7ca1e3SBryan Venteicher nreplace, sc->vtnet_rx_nmbufs));
162810b59a9bSPeter Grehan
16298f3600b1SBryan Venteicher m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
163010b59a9bSPeter Grehan if (m_new == NULL) {
16315e220811SBryan Venteicher m_prev->m_len = clustersz;
163210b59a9bSPeter Grehan return (ENOBUFS);
163310b59a9bSPeter Grehan }
163410b59a9bSPeter Grehan
163510b59a9bSPeter Grehan /*
1636fa7ca1e3SBryan Venteicher * Move any unused mbufs from the received mbuf chain onto the
1637fa7ca1e3SBryan Venteicher * end of the replacement chain.
163810b59a9bSPeter Grehan */
163910b59a9bSPeter Grehan if (m_prev->m_next != NULL) {
164010b59a9bSPeter Grehan m_tail->m_next = m_prev->m_next;
164110b59a9bSPeter Grehan m_prev->m_next = NULL;
164210b59a9bSPeter Grehan }
164310b59a9bSPeter Grehan
16448f3600b1SBryan Venteicher error = vtnet_rxq_enqueue_buf(rxq, m_new);
164510b59a9bSPeter Grehan if (error) {
164610b59a9bSPeter Grehan /*
1647fa7ca1e3SBryan Venteicher * The replacement is suppose to be an copy of the one
1648fa7ca1e3SBryan Venteicher * dequeued so this is a very unexpected error.
164910b59a9bSPeter Grehan *
1650fa7ca1e3SBryan Venteicher * Restore the m0 chain to the original state if it was
1651fa7ca1e3SBryan Venteicher * modified so we can then discard it.
165210b59a9bSPeter Grehan */
165310b59a9bSPeter Grehan if (m_tail->m_next != NULL) {
165410b59a9bSPeter Grehan m_prev->m_next = m_tail->m_next;
165510b59a9bSPeter Grehan m_tail->m_next = NULL;
165610b59a9bSPeter Grehan }
16575e220811SBryan Venteicher m_prev->m_len = clustersz;
1658fa7ca1e3SBryan Venteicher sc->vtnet_stats.rx_enq_replacement_failed++;
165910b59a9bSPeter Grehan m_freem(m_new);
166010b59a9bSPeter Grehan }
166110b59a9bSPeter Grehan
166210b59a9bSPeter Grehan return (error);
166310b59a9bSPeter Grehan }
166410b59a9bSPeter Grehan
166510b59a9bSPeter Grehan static int
vtnet_rxq_replace_buf(struct vtnet_rxq * rxq,struct mbuf * m,int len)16668f3600b1SBryan Venteicher vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
166710b59a9bSPeter Grehan {
16688f3600b1SBryan Venteicher struct vtnet_softc *sc;
16698f3600b1SBryan Venteicher struct mbuf *m_new;
16708f3600b1SBryan Venteicher int error;
16718f3600b1SBryan Venteicher
16728f3600b1SBryan Venteicher sc = rxq->vtnrx_sc;
16738f3600b1SBryan Venteicher
1674fa7ca1e3SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1675fa7ca1e3SBryan Venteicher return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len));
16768f3600b1SBryan Venteicher
1677fa7ca1e3SBryan Venteicher MPASS(m->m_next == NULL);
16788f3600b1SBryan Venteicher if (m->m_len < len)
1679fa7ca1e3SBryan Venteicher return (EMSGSIZE);
16808f3600b1SBryan Venteicher
16818f3600b1SBryan Venteicher m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
16828f3600b1SBryan Venteicher if (m_new == NULL)
16838f3600b1SBryan Venteicher return (ENOBUFS);
16848f3600b1SBryan Venteicher
16858f3600b1SBryan Venteicher error = vtnet_rxq_enqueue_buf(rxq, m_new);
16868f3600b1SBryan Venteicher if (error) {
16878f3600b1SBryan Venteicher sc->vtnet_stats.rx_enq_replacement_failed++;
1688fa7ca1e3SBryan Venteicher m_freem(m_new);
16898f3600b1SBryan Venteicher } else
16908f3600b1SBryan Venteicher m->m_len = len;
16918f3600b1SBryan Venteicher
16928f3600b1SBryan Venteicher return (error);
16938f3600b1SBryan Venteicher }
16948f3600b1SBryan Venteicher
16958f3600b1SBryan Venteicher static int
vtnet_rxq_enqueue_buf(struct vtnet_rxq * rxq,struct mbuf * m)16968f3600b1SBryan Venteicher vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
16978f3600b1SBryan Venteicher {
16988f3600b1SBryan Venteicher struct vtnet_softc *sc;
1699443c3d0bSBryan Venteicher struct sglist *sg;
1700fa7ca1e3SBryan Venteicher int header_inlined, error;
17018f3600b1SBryan Venteicher
17028f3600b1SBryan Venteicher sc = rxq->vtnrx_sc;
1703443c3d0bSBryan Venteicher sg = rxq->vtnrx_sg;
17048f3600b1SBryan Venteicher
17055e220811SBryan Venteicher KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
17065e220811SBryan Venteicher ("%s: mbuf chain without LRO_NOMRG", __func__));
17078f3600b1SBryan Venteicher VTNET_RXQ_LOCK_ASSERT(rxq);
17088f3600b1SBryan Venteicher
1709443c3d0bSBryan Venteicher sglist_reset(sg);
1710fa7ca1e3SBryan Venteicher header_inlined = vtnet_modern(sc) ||
1711fa7ca1e3SBryan Venteicher (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */
17128f3600b1SBryan Venteicher
17133be59adbSWarner Losh /*
17143be59adbSWarner Losh * Note: The mbuf has been already adjusted when we allocate it if we
17153be59adbSWarner Losh * have to do strict alignment.
17163be59adbSWarner Losh */
1717fa7ca1e3SBryan Venteicher if (header_inlined)
1718fa7ca1e3SBryan Venteicher error = sglist_append_mbuf(sg, m);
1719fa7ca1e3SBryan Venteicher else {
1720fa7ca1e3SBryan Venteicher struct vtnet_rx_header *rxhdr =
1721fa7ca1e3SBryan Venteicher mtod(m, struct vtnet_rx_header *);
17225e220811SBryan Venteicher MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
17235e220811SBryan Venteicher
1724fa7ca1e3SBryan Venteicher /* Append the header and remaining mbuf data. */
17255e220811SBryan Venteicher error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1726fa7ca1e3SBryan Venteicher if (error)
1727fa7ca1e3SBryan Venteicher return (error);
17285e220811SBryan Venteicher error = sglist_append(sg, &rxhdr[1],
17295e220811SBryan Venteicher m->m_len - sizeof(struct vtnet_rx_header));
1730fa7ca1e3SBryan Venteicher if (error)
1731fa7ca1e3SBryan Venteicher return (error);
1732fa7ca1e3SBryan Venteicher
1733fa7ca1e3SBryan Venteicher if (m->m_next != NULL)
1734443c3d0bSBryan Venteicher error = sglist_append_mbuf(sg, m->m_next);
17358f3600b1SBryan Venteicher }
17368f3600b1SBryan Venteicher
17375e220811SBryan Venteicher if (error)
17388f3600b1SBryan Venteicher return (error);
17395e220811SBryan Venteicher
17405e220811SBryan Venteicher return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg));
17418f3600b1SBryan Venteicher }
17428f3600b1SBryan Venteicher
17438f3600b1SBryan Venteicher static int
vtnet_rxq_new_buf(struct vtnet_rxq * rxq)17448f3600b1SBryan Venteicher vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
17458f3600b1SBryan Venteicher {
17468f3600b1SBryan Venteicher struct vtnet_softc *sc;
174710b59a9bSPeter Grehan struct mbuf *m;
174810b59a9bSPeter Grehan int error;
174910b59a9bSPeter Grehan
17508f3600b1SBryan Venteicher sc = rxq->vtnrx_sc;
17518f3600b1SBryan Venteicher
17528f3600b1SBryan Venteicher m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
175310b59a9bSPeter Grehan if (m == NULL)
175410b59a9bSPeter Grehan return (ENOBUFS);
175510b59a9bSPeter Grehan
17568f3600b1SBryan Venteicher error = vtnet_rxq_enqueue_buf(rxq, m);
175710b59a9bSPeter Grehan if (error)
175810b59a9bSPeter Grehan m_freem(m);
175910b59a9bSPeter Grehan
176010b59a9bSPeter Grehan return (error);
176110b59a9bSPeter Grehan }
176210b59a9bSPeter Grehan
176310b59a9bSPeter Grehan static int
vtnet_rxq_csum_needs_csum(struct vtnet_rxq * rxq,struct mbuf * m,uint16_t etype,int hoff,struct virtio_net_hdr * hdr)1764fa7ca1e3SBryan Venteicher vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
1765fa7ca1e3SBryan Venteicher int hoff, struct virtio_net_hdr *hdr)
176610b59a9bSPeter Grehan {
17678f3600b1SBryan Venteicher struct vtnet_softc *sc;
1768fa7ca1e3SBryan Venteicher int error;
176910b59a9bSPeter Grehan
17708f3600b1SBryan Venteicher sc = rxq->vtnrx_sc;
177110b59a9bSPeter Grehan
1772fa7ca1e3SBryan Venteicher /*
1773fa7ca1e3SBryan Venteicher * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
1774fa7ca1e3SBryan Venteicher * not have an analogous CSUM flag. The checksum has been validated,
1775fa7ca1e3SBryan Venteicher * but is incomplete (TCP/UDP pseudo header).
1776fa7ca1e3SBryan Venteicher *
1777fa7ca1e3SBryan Venteicher * The packet is likely from another VM on the same host that itself
1778fa7ca1e3SBryan Venteicher * performed checksum offloading so Tx/Rx is basically a memcpy and
1779fa7ca1e3SBryan Venteicher * the checksum has little value.
1780fa7ca1e3SBryan Venteicher *
1781fa7ca1e3SBryan Venteicher * Default to receiving the packet as-is for performance reasons, but
1782fa7ca1e3SBryan Venteicher * this can cause issues if the packet is to be forwarded because it
1783fa7ca1e3SBryan Venteicher * does not contain a valid checksum. This patch may be helpful:
1784fa7ca1e3SBryan Venteicher * https://reviews.freebsd.org/D6611. In the meantime, have the driver
1785fa7ca1e3SBryan Venteicher * compute the checksum if requested.
1786fa7ca1e3SBryan Venteicher *
1787fa7ca1e3SBryan Venteicher * BMV: Need to add an CSUM_PARTIAL flag?
1788fa7ca1e3SBryan Venteicher */
1789fa7ca1e3SBryan Venteicher if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
1790fa7ca1e3SBryan Venteicher error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
1791fa7ca1e3SBryan Venteicher return (error);
1792fa7ca1e3SBryan Venteicher }
1793fa7ca1e3SBryan Venteicher
1794fa7ca1e3SBryan Venteicher /*
1795fa7ca1e3SBryan Venteicher * Compute the checksum in the driver so the packet will contain a
1796fa7ca1e3SBryan Venteicher * valid checksum. The checksum is at csum_offset from csum_start.
1797fa7ca1e3SBryan Venteicher */
1798fa7ca1e3SBryan Venteicher switch (etype) {
1799fa7ca1e3SBryan Venteicher #if defined(INET) || defined(INET6)
1800fa7ca1e3SBryan Venteicher case ETHERTYPE_IP:
1801fa7ca1e3SBryan Venteicher case ETHERTYPE_IPV6: {
1802fa7ca1e3SBryan Venteicher int csum_off, csum_end;
1803fa7ca1e3SBryan Venteicher uint16_t csum;
1804fa7ca1e3SBryan Venteicher
1805fa7ca1e3SBryan Venteicher csum_off = hdr->csum_start + hdr->csum_offset;
1806fa7ca1e3SBryan Venteicher csum_end = csum_off + sizeof(uint16_t);
1807fa7ca1e3SBryan Venteicher
1808fa7ca1e3SBryan Venteicher /* Assume checksum will be in the first mbuf. */
1809fa7ca1e3SBryan Venteicher if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
1810fa7ca1e3SBryan Venteicher return (1);
1811fa7ca1e3SBryan Venteicher
1812fa7ca1e3SBryan Venteicher /*
1813fa7ca1e3SBryan Venteicher * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
1814fa7ca1e3SBryan Venteicher * checksum and write it at the specified offset. We could
1815fa7ca1e3SBryan Venteicher * try to verify the packet: csum_start should probably
1816fa7ca1e3SBryan Venteicher * correspond to the start of the TCP/UDP header.
1817fa7ca1e3SBryan Venteicher *
1818fa7ca1e3SBryan Venteicher * BMV: Need to properly handle UDP with zero checksum. Is
1819fa7ca1e3SBryan Venteicher * the IPv4 header checksum implicitly validated?
1820fa7ca1e3SBryan Venteicher */
1821fa7ca1e3SBryan Venteicher csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
1822fa7ca1e3SBryan Venteicher *(uint16_t *)(mtodo(m, csum_off)) = csum;
1823fa7ca1e3SBryan Venteicher m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1824fa7ca1e3SBryan Venteicher m->m_pkthdr.csum_data = 0xFFFF;
1825fa7ca1e3SBryan Venteicher break;
1826fa7ca1e3SBryan Venteicher }
1827fa7ca1e3SBryan Venteicher #endif
1828fa7ca1e3SBryan Venteicher default:
1829fa7ca1e3SBryan Venteicher sc->vtnet_stats.rx_csum_bad_ethtype++;
1830fa7ca1e3SBryan Venteicher return (1);
1831fa7ca1e3SBryan Venteicher }
1832fa7ca1e3SBryan Venteicher
1833fa7ca1e3SBryan Venteicher return (0);
1834fa7ca1e3SBryan Venteicher }
1835fa7ca1e3SBryan Venteicher
1836fa7ca1e3SBryan Venteicher static int
vtnet_rxq_csum_data_valid(struct vtnet_rxq * rxq,struct mbuf * m,uint16_t etype,int hoff,struct virtio_net_hdr * hdr __unused)1837fa7ca1e3SBryan Venteicher vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
1838c1b554c8SAlex Richardson uint16_t etype, int hoff, struct virtio_net_hdr *hdr __unused)
1839fa7ca1e3SBryan Venteicher {
18400d3b2bd7SMateusz Guzik #if 0
1841fa7ca1e3SBryan Venteicher struct vtnet_softc *sc;
18420d3b2bd7SMateusz Guzik #endif
1843fa7ca1e3SBryan Venteicher int protocol;
1844fa7ca1e3SBryan Venteicher
18450d3b2bd7SMateusz Guzik #if 0
1846fa7ca1e3SBryan Venteicher sc = rxq->vtnrx_sc;
18470d3b2bd7SMateusz Guzik #endif
1848fa7ca1e3SBryan Venteicher
1849fa7ca1e3SBryan Venteicher switch (etype) {
18508f3600b1SBryan Venteicher #if defined(INET)
18518f3600b1SBryan Venteicher case ETHERTYPE_IP:
1852fa7ca1e3SBryan Venteicher if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
1853fa7ca1e3SBryan Venteicher protocol = IPPROTO_DONE;
1854fa7ca1e3SBryan Venteicher else {
1855fa7ca1e3SBryan Venteicher struct ip *ip = (struct ip *)(m->m_data + hoff);
1856fa7ca1e3SBryan Venteicher protocol = ip->ip_p;
1857fa7ca1e3SBryan Venteicher }
18588f3600b1SBryan Venteicher break;
18598f3600b1SBryan Venteicher #endif
18608f3600b1SBryan Venteicher #if defined(INET6)
18618f3600b1SBryan Venteicher case ETHERTYPE_IPV6:
1862fa7ca1e3SBryan Venteicher if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
1863fa7ca1e3SBryan Venteicher || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
1864fa7ca1e3SBryan Venteicher protocol = IPPROTO_DONE;
18658f3600b1SBryan Venteicher break;
18668f3600b1SBryan Venteicher #endif
18678f3600b1SBryan Venteicher default:
1868fa7ca1e3SBryan Venteicher protocol = IPPROTO_DONE;
18698f3600b1SBryan Venteicher break;
18708f3600b1SBryan Venteicher }
18718f3600b1SBryan Venteicher
1872fa7ca1e3SBryan Venteicher switch (protocol) {
18738f3600b1SBryan Venteicher case IPPROTO_TCP:
18748f3600b1SBryan Venteicher case IPPROTO_UDP:
18758f3600b1SBryan Venteicher m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
18768f3600b1SBryan Venteicher m->m_pkthdr.csum_data = 0xFFFF;
18778f3600b1SBryan Venteicher break;
18788f3600b1SBryan Venteicher default:
18798f3600b1SBryan Venteicher /*
1880fa7ca1e3SBryan Venteicher * FreeBSD does not support checksum offloading of this
1881fa7ca1e3SBryan Venteicher * protocol. Let the stack re-verify the checksum later
1882fa7ca1e3SBryan Venteicher * if the protocol is supported.
18838f3600b1SBryan Venteicher */
18848f3600b1SBryan Venteicher #if 0
1885fa7ca1e3SBryan Venteicher if_printf(sc->vtnet_ifp,
1886fa7ca1e3SBryan Venteicher "%s: checksum offload of unsupported protocol "
1887fa7ca1e3SBryan Venteicher "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
1888fa7ca1e3SBryan Venteicher __func__, etype, protocol, hdr->csum_start,
1889fa7ca1e3SBryan Venteicher hdr->csum_offset);
18908f3600b1SBryan Venteicher #endif
18918f3600b1SBryan Venteicher break;
18928f3600b1SBryan Venteicher }
18938f3600b1SBryan Venteicher
18948f3600b1SBryan Venteicher return (0);
18958f3600b1SBryan Venteicher }
18968f3600b1SBryan Venteicher
18978f3600b1SBryan Venteicher static int
vtnet_rxq_csum(struct vtnet_rxq * rxq,struct mbuf * m,struct virtio_net_hdr * hdr)18988f3600b1SBryan Venteicher vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
18998f3600b1SBryan Venteicher struct virtio_net_hdr *hdr)
19008f3600b1SBryan Venteicher {
1901fa7ca1e3SBryan Venteicher const struct ether_header *eh;
1902fa7ca1e3SBryan Venteicher int hoff;
1903fa7ca1e3SBryan Venteicher uint16_t etype;
19048f3600b1SBryan Venteicher
1905fa7ca1e3SBryan Venteicher eh = mtod(m, const struct ether_header *);
1906fa7ca1e3SBryan Venteicher etype = ntohs(eh->ether_type);
1907fa7ca1e3SBryan Venteicher if (etype == ETHERTYPE_VLAN) {
1908fa7ca1e3SBryan Venteicher /* TODO BMV: Handle QinQ. */
1909fa7ca1e3SBryan Venteicher const struct ether_vlan_header *evh =
1910fa7ca1e3SBryan Venteicher mtod(m, const struct ether_vlan_header *);
1911fa7ca1e3SBryan Venteicher etype = ntohs(evh->evl_proto);
1912fa7ca1e3SBryan Venteicher hoff = sizeof(struct ether_vlan_header);
19138f3600b1SBryan Venteicher } else
1914fa7ca1e3SBryan Venteicher hoff = sizeof(struct ether_header);
19158f3600b1SBryan Venteicher
19168f3600b1SBryan Venteicher if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1917fa7ca1e3SBryan Venteicher return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
1918fa7ca1e3SBryan Venteicher else /* VIRTIO_NET_HDR_F_DATA_VALID */
1919fa7ca1e3SBryan Venteicher return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
19208f3600b1SBryan Venteicher }
19218f3600b1SBryan Venteicher
19228f3600b1SBryan Venteicher static void
vtnet_rxq_discard_merged_bufs(struct vtnet_rxq * rxq,int nbufs)19238f3600b1SBryan Venteicher vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
19248f3600b1SBryan Venteicher {
19258f3600b1SBryan Venteicher struct mbuf *m;
19268f3600b1SBryan Venteicher
19278f3600b1SBryan Venteicher while (--nbufs > 0) {
19288f3600b1SBryan Venteicher m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
19298f3600b1SBryan Venteicher if (m == NULL)
19308f3600b1SBryan Venteicher break;
19318f3600b1SBryan Venteicher vtnet_rxq_discard_buf(rxq, m);
19328f3600b1SBryan Venteicher }
19338f3600b1SBryan Venteicher }
19348f3600b1SBryan Venteicher
19358f3600b1SBryan Venteicher static void
vtnet_rxq_discard_buf(struct vtnet_rxq * rxq,struct mbuf * m)19368f3600b1SBryan Venteicher vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
19378f3600b1SBryan Venteicher {
19380d3b2bd7SMateusz Guzik int error __diagused;
19398f3600b1SBryan Venteicher
19408f3600b1SBryan Venteicher /*
19418ee1cc4aSWarner Losh * Requeue the discarded mbuf. This should always be successful
19428ee1cc4aSWarner Losh * since it was just dequeued.
19438f3600b1SBryan Venteicher */
19448f3600b1SBryan Venteicher error = vtnet_rxq_enqueue_buf(rxq, m);
19458f3600b1SBryan Venteicher KASSERT(error == 0,
19468f3600b1SBryan Venteicher ("%s: cannot requeue discarded mbuf %d", __func__, error));
19478f3600b1SBryan Venteicher }
19488f3600b1SBryan Venteicher
19498f3600b1SBryan Venteicher static int
vtnet_rxq_merged_eof(struct vtnet_rxq * rxq,struct mbuf * m_head,int nbufs)19508f3600b1SBryan Venteicher vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
19518f3600b1SBryan Venteicher {
19528f3600b1SBryan Venteicher struct vtnet_softc *sc;
195310b59a9bSPeter Grehan struct virtqueue *vq;
1954fa7ca1e3SBryan Venteicher struct mbuf *m_tail;
195510b59a9bSPeter Grehan
19568f3600b1SBryan Venteicher sc = rxq->vtnrx_sc;
19578f3600b1SBryan Venteicher vq = rxq->vtnrx_vq;
195810b59a9bSPeter Grehan m_tail = m_head;
195910b59a9bSPeter Grehan
196010b59a9bSPeter Grehan while (--nbufs > 0) {
1961fa7ca1e3SBryan Venteicher struct mbuf *m;
1962c1b554c8SAlex Richardson uint32_t len;
1963fa7ca1e3SBryan Venteicher
196410b59a9bSPeter Grehan m = virtqueue_dequeue(vq, &len);
196510b59a9bSPeter Grehan if (m == NULL) {
19668f3600b1SBryan Venteicher rxq->vtnrx_stats.vrxs_ierrors++;
196710b59a9bSPeter Grehan goto fail;
196810b59a9bSPeter Grehan }
196910b59a9bSPeter Grehan
19708f3600b1SBryan Venteicher if (vtnet_rxq_new_buf(rxq) != 0) {
19718f3600b1SBryan Venteicher rxq->vtnrx_stats.vrxs_iqdrops++;
19728f3600b1SBryan Venteicher vtnet_rxq_discard_buf(rxq, m);
197310b59a9bSPeter Grehan if (nbufs > 1)
19748f3600b1SBryan Venteicher vtnet_rxq_discard_merged_bufs(rxq, nbufs);
197510b59a9bSPeter Grehan goto fail;
197610b59a9bSPeter Grehan }
197710b59a9bSPeter Grehan
197810b59a9bSPeter Grehan if (m->m_len < len)
197910b59a9bSPeter Grehan len = m->m_len;
198010b59a9bSPeter Grehan
198110b59a9bSPeter Grehan m->m_len = len;
198210b59a9bSPeter Grehan m->m_flags &= ~M_PKTHDR;
198310b59a9bSPeter Grehan
198410b59a9bSPeter Grehan m_head->m_pkthdr.len += len;
198510b59a9bSPeter Grehan m_tail->m_next = m;
198610b59a9bSPeter Grehan m_tail = m;
198710b59a9bSPeter Grehan }
198810b59a9bSPeter Grehan
198910b59a9bSPeter Grehan return (0);
199010b59a9bSPeter Grehan
199110b59a9bSPeter Grehan fail:
199210b59a9bSPeter Grehan sc->vtnet_stats.rx_mergeable_failed++;
199310b59a9bSPeter Grehan m_freem(m_head);
199410b59a9bSPeter Grehan
199510b59a9bSPeter Grehan return (1);
199610b59a9bSPeter Grehan }
199710b59a9bSPeter Grehan
199842343a63SBryan Venteicher #if defined(INET) || defined(INET6)
199942343a63SBryan Venteicher static int
vtnet_lro_rx(struct vtnet_rxq * rxq,struct mbuf * m)200042343a63SBryan Venteicher vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m)
200142343a63SBryan Venteicher {
200242343a63SBryan Venteicher struct lro_ctrl *lro;
200342343a63SBryan Venteicher
200442343a63SBryan Venteicher lro = &rxq->vtnrx_lro;
200542343a63SBryan Venteicher
200642343a63SBryan Venteicher if (lro->lro_mbuf_max != 0) {
200742343a63SBryan Venteicher tcp_lro_queue_mbuf(lro, m);
200842343a63SBryan Venteicher return (0);
200942343a63SBryan Venteicher }
201042343a63SBryan Venteicher
201142343a63SBryan Venteicher return (tcp_lro_rx(lro, m, 0));
201242343a63SBryan Venteicher }
201342343a63SBryan Venteicher #endif
201442343a63SBryan Venteicher
20158f3600b1SBryan Venteicher static void
vtnet_rxq_input(struct vtnet_rxq * rxq,struct mbuf * m,struct virtio_net_hdr * hdr)20168f3600b1SBryan Venteicher vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
20178f3600b1SBryan Venteicher struct virtio_net_hdr *hdr)
201810b59a9bSPeter Grehan {
20198f3600b1SBryan Venteicher struct vtnet_softc *sc;
20204ee96792SJustin Hibbits if_t ifp;
20218f3600b1SBryan Venteicher
20228f3600b1SBryan Venteicher sc = rxq->vtnrx_sc;
20238f3600b1SBryan Venteicher ifp = sc->vtnet_ifp;
20248f3600b1SBryan Venteicher
20254ee96792SJustin Hibbits if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
2026fa7ca1e3SBryan Venteicher struct ether_header *eh = mtod(m, struct ether_header *);
20278f3600b1SBryan Venteicher if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
20288f3600b1SBryan Venteicher vtnet_vlan_tag_remove(m);
20298f3600b1SBryan Venteicher /*
20308f3600b1SBryan Venteicher * With the 802.1Q header removed, update the
20318f3600b1SBryan Venteicher * checksum starting location accordingly.
20328f3600b1SBryan Venteicher */
20338f3600b1SBryan Venteicher if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
20348f3600b1SBryan Venteicher hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
20358f3600b1SBryan Venteicher }
20368f3600b1SBryan Venteicher }
20378f3600b1SBryan Venteicher
20388f3600b1SBryan Venteicher m->m_pkthdr.flowid = rxq->vtnrx_id;
2039c2529042SHans Petter Selasky M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
20408f3600b1SBryan Venteicher
2041fa7ca1e3SBryan Venteicher if (hdr->flags &
2042fa7ca1e3SBryan Venteicher (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
20438f3600b1SBryan Venteicher if (vtnet_rxq_csum(rxq, m, hdr) == 0)
20448f3600b1SBryan Venteicher rxq->vtnrx_stats.vrxs_csum++;
20458f3600b1SBryan Venteicher else
20468f3600b1SBryan Venteicher rxq->vtnrx_stats.vrxs_csum_failed++;
20478f3600b1SBryan Venteicher }
20488f3600b1SBryan Venteicher
20496b53aeedSBryan Venteicher if (hdr->gso_size != 0) {
20506b53aeedSBryan Venteicher switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
20516b53aeedSBryan Venteicher case VIRTIO_NET_HDR_GSO_TCPV4:
20526b53aeedSBryan Venteicher case VIRTIO_NET_HDR_GSO_TCPV6:
20536b53aeedSBryan Venteicher m->m_pkthdr.lro_nsegs =
20546b53aeedSBryan Venteicher howmany(m->m_pkthdr.len, hdr->gso_size);
20552bfab357SBryan Venteicher rxq->vtnrx_stats.vrxs_host_lro++;
20566b53aeedSBryan Venteicher break;
20576b53aeedSBryan Venteicher }
20586b53aeedSBryan Venteicher }
20596b53aeedSBryan Venteicher
20608f3600b1SBryan Venteicher rxq->vtnrx_stats.vrxs_ipackets++;
20618f3600b1SBryan Venteicher rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
20628f3600b1SBryan Venteicher
206342343a63SBryan Venteicher #if defined(INET) || defined(INET6)
20644ee96792SJustin Hibbits if (vtnet_software_lro(sc) && if_getcapenable(ifp) & IFCAP_LRO) {
206542343a63SBryan Venteicher if (vtnet_lro_rx(rxq, m) == 0)
206642343a63SBryan Venteicher return;
206742343a63SBryan Venteicher }
206842343a63SBryan Venteicher #endif
206942343a63SBryan Venteicher
20704ee96792SJustin Hibbits if_input(ifp, m);
20718f3600b1SBryan Venteicher }
20728f3600b1SBryan Venteicher
20738f3600b1SBryan Venteicher static int
vtnet_rxq_eof(struct vtnet_rxq * rxq)20748f3600b1SBryan Venteicher vtnet_rxq_eof(struct vtnet_rxq *rxq)
20758f3600b1SBryan Venteicher {
20768f3600b1SBryan Venteicher struct virtio_net_hdr lhdr, *hdr;
20778f3600b1SBryan Venteicher struct vtnet_softc *sc;
20784ee96792SJustin Hibbits if_t ifp;
207910b59a9bSPeter Grehan struct virtqueue *vq;
2080fa7ca1e3SBryan Venteicher int deq, count;
208110b59a9bSPeter Grehan
20828f3600b1SBryan Venteicher sc = rxq->vtnrx_sc;
20838f3600b1SBryan Venteicher vq = rxq->vtnrx_vq;
208410b59a9bSPeter Grehan ifp = sc->vtnet_ifp;
208510b59a9bSPeter Grehan deq = 0;
20868f3600b1SBryan Venteicher count = sc->vtnet_rx_process_limit;
208710b59a9bSPeter Grehan
20888f3600b1SBryan Venteicher VTNET_RXQ_LOCK_ASSERT(rxq);
208910b59a9bSPeter Grehan
2090a01c7081SGleb Smirnoff CURVNET_SET(if_getvnet(ifp));
20918f3600b1SBryan Venteicher while (count-- > 0) {
2092fa7ca1e3SBryan Venteicher struct mbuf *m;
2093c1b554c8SAlex Richardson uint32_t len, nbufs, adjsz;
2094fa7ca1e3SBryan Venteicher
209510b59a9bSPeter Grehan m = virtqueue_dequeue(vq, &len);
209610b59a9bSPeter Grehan if (m == NULL)
209710b59a9bSPeter Grehan break;
209810b59a9bSPeter Grehan deq++;
209910b59a9bSPeter Grehan
210010b59a9bSPeter Grehan if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
21018f3600b1SBryan Venteicher rxq->vtnrx_stats.vrxs_ierrors++;
21028f3600b1SBryan Venteicher vtnet_rxq_discard_buf(rxq, m);
210310b59a9bSPeter Grehan continue;
210410b59a9bSPeter Grehan }
210510b59a9bSPeter Grehan
2106fa7ca1e3SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) {
21075e220811SBryan Venteicher struct virtio_net_hdr_mrg_rxbuf *mhdr =
21085e220811SBryan Venteicher mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
2109526ddf17SMark Johnston kmsan_mark(mhdr, sizeof(*mhdr), KMSAN_STATE_INITED);
21105e220811SBryan Venteicher nbufs = vtnet_htog16(sc, mhdr->num_buffers);
21115e220811SBryan Venteicher adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2112fa7ca1e3SBryan Venteicher } else if (vtnet_modern(sc)) {
2113fa7ca1e3SBryan Venteicher nbufs = 1; /* num_buffers is always 1 */
2114fa7ca1e3SBryan Venteicher adjsz = sizeof(struct virtio_net_hdr_v1);
21155e220811SBryan Venteicher } else {
211610b59a9bSPeter Grehan nbufs = 1;
211710b59a9bSPeter Grehan adjsz = sizeof(struct vtnet_rx_header);
2118fa7ca1e3SBryan Venteicher /*
2119fa7ca1e3SBryan Venteicher * Account for our gap between the header and start of
2120fa7ca1e3SBryan Venteicher * data to keep the segments separated.
2121fa7ca1e3SBryan Venteicher */
21221cd1ed3fSBryan Venteicher len += VTNET_RX_HEADER_PAD;
212310b59a9bSPeter Grehan }
212410b59a9bSPeter Grehan
21258f3600b1SBryan Venteicher if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
21268f3600b1SBryan Venteicher rxq->vtnrx_stats.vrxs_iqdrops++;
21278f3600b1SBryan Venteicher vtnet_rxq_discard_buf(rxq, m);
212810b59a9bSPeter Grehan if (nbufs > 1)
21298f3600b1SBryan Venteicher vtnet_rxq_discard_merged_bufs(rxq, nbufs);
213010b59a9bSPeter Grehan continue;
213110b59a9bSPeter Grehan }
213210b59a9bSPeter Grehan
213310b59a9bSPeter Grehan m->m_pkthdr.len = len;
213410b59a9bSPeter Grehan m->m_pkthdr.rcvif = ifp;
213510b59a9bSPeter Grehan m->m_pkthdr.csum_flags = 0;
213610b59a9bSPeter Grehan
213710b59a9bSPeter Grehan if (nbufs > 1) {
21388f3600b1SBryan Venteicher /* Dequeue the rest of chain. */
21398f3600b1SBryan Venteicher if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
214010b59a9bSPeter Grehan continue;
214110b59a9bSPeter Grehan }
214210b59a9bSPeter Grehan
2143710c0556SMark Johnston kmsan_mark_mbuf(m, KMSAN_STATE_INITED);
2144710c0556SMark Johnston
214510b59a9bSPeter Grehan /*
21465e220811SBryan Venteicher * Save an endian swapped version of the header prior to it
2147fa7ca1e3SBryan Venteicher * being stripped. The header is always at the start of the
2148fa7ca1e3SBryan Venteicher * mbuf data. num_buffers was already saved (and not needed)
2149fa7ca1e3SBryan Venteicher * so use the standard header.
215010b59a9bSPeter Grehan */
21515e220811SBryan Venteicher hdr = mtod(m, struct virtio_net_hdr *);
21525e220811SBryan Venteicher lhdr.flags = hdr->flags;
21535e220811SBryan Venteicher lhdr.gso_type = hdr->gso_type;
21545e220811SBryan Venteicher lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len);
21555e220811SBryan Venteicher lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size);
21565e220811SBryan Venteicher lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start);
21575e220811SBryan Venteicher lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset);
215810b59a9bSPeter Grehan m_adj(m, adjsz);
215910b59a9bSPeter Grehan
21605e220811SBryan Venteicher if (PFIL_HOOKED_IN(sc->vtnet_pfil)) {
21615e220811SBryan Venteicher pfil_return_t pfil;
21625e220811SBryan Venteicher
2163a2256150SGleb Smirnoff pfil = pfil_mbuf_in(sc->vtnet_pfil, &m, ifp, NULL);
2164ed6cbf48SGleb Smirnoff switch (pfil) {
2165ed6cbf48SGleb Smirnoff case PFIL_DROPPED:
2166ed6cbf48SGleb Smirnoff case PFIL_CONSUMED:
2167ed6cbf48SGleb Smirnoff continue;
2168ed6cbf48SGleb Smirnoff default:
2169ed6cbf48SGleb Smirnoff KASSERT(pfil == PFIL_PASS,
21705e220811SBryan Venteicher ("Filter returned %d!", pfil));
2171ed6cbf48SGleb Smirnoff }
2172ed6cbf48SGleb Smirnoff }
2173ed6cbf48SGleb Smirnoff
21745e220811SBryan Venteicher vtnet_rxq_input(rxq, m, &lhdr);
217510b59a9bSPeter Grehan }
217610b59a9bSPeter Grehan
217742343a63SBryan Venteicher if (deq > 0) {
217842343a63SBryan Venteicher #if defined(INET) || defined(INET6)
21794044af03SAlexander V. Chernikov if (vtnet_software_lro(sc))
218042343a63SBryan Venteicher tcp_lro_flush_all(&rxq->vtnrx_lro);
218142343a63SBryan Venteicher #endif
218210b59a9bSPeter Grehan virtqueue_notify(vq);
218342343a63SBryan Venteicher }
21843f2b9607SGleb Smirnoff CURVNET_RESTORE();
218510b59a9bSPeter Grehan
218610b59a9bSPeter Grehan return (count > 0 ? 0 : EAGAIN);
218710b59a9bSPeter Grehan }
218810b59a9bSPeter Grehan
218910b59a9bSPeter Grehan static void
vtnet_rx_vq_process(struct vtnet_rxq * rxq,int tries)2190ef6fdb33SVincenzo Maffione vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries)
219110b59a9bSPeter Grehan {
219210b59a9bSPeter Grehan struct vtnet_softc *sc;
21934ee96792SJustin Hibbits if_t ifp;
2194c1b554c8SAlex Richardson u_int more;
21951b89d00bSVincenzo Maffione #ifdef DEV_NETMAP
21961b89d00bSVincenzo Maffione int nmirq;
21971b89d00bSVincenzo Maffione #endif /* DEV_NETMAP */
219810b59a9bSPeter Grehan
21998f3600b1SBryan Venteicher sc = rxq->vtnrx_sc;
220010b59a9bSPeter Grehan ifp = sc->vtnet_ifp;
22018f3600b1SBryan Venteicher
22028f3600b1SBryan Venteicher if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
22038f3600b1SBryan Venteicher /*
22048f3600b1SBryan Venteicher * Ignore this interrupt. Either this is a spurious interrupt
22058f3600b1SBryan Venteicher * or multiqueue without per-VQ MSIX so every queue needs to
22068f3600b1SBryan Venteicher * be polled (a brain dead configuration we could try harder
22078f3600b1SBryan Venteicher * to avoid).
22088f3600b1SBryan Venteicher */
22098f3600b1SBryan Venteicher vtnet_rxq_disable_intr(rxq);
22108f3600b1SBryan Venteicher return;
22118f3600b1SBryan Venteicher }
221210b59a9bSPeter Grehan
2213f0d8d352SVincenzo Maffione VTNET_RXQ_LOCK(rxq);
2214f0d8d352SVincenzo Maffione
22152e42b74aSVincenzo Maffione #ifdef DEV_NETMAP
2216f0d8d352SVincenzo Maffione /*
2217f0d8d352SVincenzo Maffione * We call netmap_rx_irq() under lock to prevent concurrent calls.
2218f0d8d352SVincenzo Maffione * This is not necessary to serialize the access to the RX vq, but
2219f0d8d352SVincenzo Maffione * rather to avoid races that may happen if this interface is
2220f0d8d352SVincenzo Maffione * attached to a VALE switch, which would cause received packets
2221f0d8d352SVincenzo Maffione * to stall in the RX queue (nm_kr_tryget() could find the kring
2222f0d8d352SVincenzo Maffione * busy when called from netmap_bwrap_intr_notify()).
2223f0d8d352SVincenzo Maffione */
22241b89d00bSVincenzo Maffione nmirq = netmap_rx_irq(ifp, rxq->vtnrx_id, &more);
22251b89d00bSVincenzo Maffione if (nmirq != NM_IRQ_PASS) {
2226f0d8d352SVincenzo Maffione VTNET_RXQ_UNLOCK(rxq);
22271b89d00bSVincenzo Maffione if (nmirq == NM_IRQ_RESCHED) {
22281b89d00bSVincenzo Maffione taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
22291b89d00bSVincenzo Maffione }
22302e42b74aSVincenzo Maffione return;
22311b89d00bSVincenzo Maffione }
22322e42b74aSVincenzo Maffione #endif /* DEV_NETMAP */
22332e42b74aSVincenzo Maffione
2234dd6f83a0SBryan Venteicher again:
22354ee96792SJustin Hibbits if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
22368f3600b1SBryan Venteicher VTNET_RXQ_UNLOCK(rxq);
223710b59a9bSPeter Grehan return;
223810b59a9bSPeter Grehan }
223910b59a9bSPeter Grehan
22408f3600b1SBryan Venteicher more = vtnet_rxq_eof(rxq);
22418f3600b1SBryan Venteicher if (more || vtnet_rxq_enable_intr(rxq) != 0) {
22426632efe4SBryan Venteicher if (!more)
22438f3600b1SBryan Venteicher vtnet_rxq_disable_intr(rxq);
22448f3600b1SBryan Venteicher /*
22458f3600b1SBryan Venteicher * This is an occasional condition or race (when !more),
22468f3600b1SBryan Venteicher * so retry a few times before scheduling the taskqueue.
22478f3600b1SBryan Venteicher */
2248ef6fdb33SVincenzo Maffione if (tries-- > 0)
22496632efe4SBryan Venteicher goto again;
2250dd6f83a0SBryan Venteicher
2251dd6f83a0SBryan Venteicher rxq->vtnrx_stats.vrxs_rescheduled++;
2252ef6fdb33SVincenzo Maffione VTNET_RXQ_UNLOCK(rxq);
22538f3600b1SBryan Venteicher taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
22548f3600b1SBryan Venteicher } else
22558f3600b1SBryan Venteicher VTNET_RXQ_UNLOCK(rxq);
225610b59a9bSPeter Grehan }
225710b59a9bSPeter Grehan
225810b59a9bSPeter Grehan static void
vtnet_rx_vq_intr(void * xrxq)2259ef6fdb33SVincenzo Maffione vtnet_rx_vq_intr(void *xrxq)
22608f3600b1SBryan Venteicher {
22618f3600b1SBryan Venteicher struct vtnet_rxq *rxq;
22628f3600b1SBryan Venteicher
22638f3600b1SBryan Venteicher rxq = xrxq;
2264ef6fdb33SVincenzo Maffione vtnet_rx_vq_process(rxq, VTNET_INTR_DISABLE_RETRIES);
22658f3600b1SBryan Venteicher }
22668f3600b1SBryan Venteicher
2267ef6fdb33SVincenzo Maffione static void
vtnet_rxq_tq_intr(void * xrxq,int pending __unused)2268c1b554c8SAlex Richardson vtnet_rxq_tq_intr(void *xrxq, int pending __unused)
2269ef6fdb33SVincenzo Maffione {
2270ef6fdb33SVincenzo Maffione struct vtnet_rxq *rxq;
22718f3600b1SBryan Venteicher
2272ef6fdb33SVincenzo Maffione rxq = xrxq;
2273ef6fdb33SVincenzo Maffione vtnet_rx_vq_process(rxq, 0);
22748f3600b1SBryan Venteicher }
22758f3600b1SBryan Venteicher
227632487a89SBryan Venteicher static int
vtnet_txq_intr_threshold(struct vtnet_txq * txq)2277baa5234fSBryan Venteicher vtnet_txq_intr_threshold(struct vtnet_txq *txq)
227832487a89SBryan Venteicher {
227932487a89SBryan Venteicher struct vtnet_softc *sc;
2280baa5234fSBryan Venteicher int threshold;
228132487a89SBryan Venteicher
228232487a89SBryan Venteicher sc = txq->vtntx_sc;
2283baa5234fSBryan Venteicher
2284baa5234fSBryan Venteicher /*
2285baa5234fSBryan Venteicher * The Tx interrupt is disabled until the queue free count falls
2286baa5234fSBryan Venteicher * below our threshold. Completed frames are drained from the Tx
2287baa5234fSBryan Venteicher * virtqueue before transmitting new frames and in the watchdog
2288baa5234fSBryan Venteicher * callout, so the frequency of Tx interrupts is greatly reduced,
2289baa5234fSBryan Venteicher * at the cost of not freeing mbufs as quickly as they otherwise
2290baa5234fSBryan Venteicher * would be.
2291baa5234fSBryan Venteicher */
2292baa5234fSBryan Venteicher threshold = virtqueue_size(txq->vtntx_vq) / 4;
2293baa5234fSBryan Venteicher
2294baa5234fSBryan Venteicher /*
2295baa5234fSBryan Venteicher * Without indirect descriptors, leave enough room for the most
2296baa5234fSBryan Venteicher * segments we handle.
2297baa5234fSBryan Venteicher */
2298baa5234fSBryan Venteicher if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
2299baa5234fSBryan Venteicher threshold < sc->vtnet_tx_nsegs)
2300baa5234fSBryan Venteicher threshold = sc->vtnet_tx_nsegs;
2301baa5234fSBryan Venteicher
2302baa5234fSBryan Venteicher return (threshold);
2303baa5234fSBryan Venteicher }
2304baa5234fSBryan Venteicher
2305baa5234fSBryan Venteicher static int
vtnet_txq_below_threshold(struct vtnet_txq * txq)2306baa5234fSBryan Venteicher vtnet_txq_below_threshold(struct vtnet_txq *txq)
2307baa5234fSBryan Venteicher {
2308baa5234fSBryan Venteicher struct virtqueue *vq;
2309baa5234fSBryan Venteicher
231032487a89SBryan Venteicher vq = txq->vtntx_vq;
231132487a89SBryan Venteicher
2312baa5234fSBryan Venteicher return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold);
231332487a89SBryan Venteicher }
231432487a89SBryan Venteicher
231532487a89SBryan Venteicher static int
vtnet_txq_notify(struct vtnet_txq * txq)231632487a89SBryan Venteicher vtnet_txq_notify(struct vtnet_txq *txq)
231732487a89SBryan Venteicher {
231832487a89SBryan Venteicher struct virtqueue *vq;
231932487a89SBryan Venteicher
232032487a89SBryan Venteicher vq = txq->vtntx_vq;
232132487a89SBryan Venteicher
232232487a89SBryan Venteicher txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
232332487a89SBryan Venteicher virtqueue_notify(vq);
232432487a89SBryan Venteicher
232532487a89SBryan Venteicher if (vtnet_txq_enable_intr(txq) == 0)
232632487a89SBryan Venteicher return (0);
232732487a89SBryan Venteicher
232832487a89SBryan Venteicher /*
232932487a89SBryan Venteicher * Drain frames that were completed since last checked. If this
233032487a89SBryan Venteicher * causes the queue to go above the threshold, the caller should
233132487a89SBryan Venteicher * continue transmitting.
233232487a89SBryan Venteicher */
233332487a89SBryan Venteicher if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) {
233432487a89SBryan Venteicher virtqueue_disable_intr(vq);
233532487a89SBryan Venteicher return (1);
233632487a89SBryan Venteicher }
233732487a89SBryan Venteicher
233832487a89SBryan Venteicher return (0);
233932487a89SBryan Venteicher }
234032487a89SBryan Venteicher
23418f3600b1SBryan Venteicher static void
vtnet_txq_free_mbufs(struct vtnet_txq * txq)23428f3600b1SBryan Venteicher vtnet_txq_free_mbufs(struct vtnet_txq *txq)
234310b59a9bSPeter Grehan {
234410b59a9bSPeter Grehan struct virtqueue *vq;
234510b59a9bSPeter Grehan struct vtnet_tx_header *txhdr;
23468f3600b1SBryan Venteicher int last;
23472e42b74aSVincenzo Maffione #ifdef DEV_NETMAP
234866823237SVincenzo Maffione struct netmap_kring *kring = netmap_kring_on(NA(txq->vtntx_sc->vtnet_ifp),
234966823237SVincenzo Maffione txq->vtntx_id, NR_TX);
23502e42b74aSVincenzo Maffione #else /* !DEV_NETMAP */
235166823237SVincenzo Maffione void *kring = NULL;
23522e42b74aSVincenzo Maffione #endif /* !DEV_NETMAP */
235310b59a9bSPeter Grehan
23548f3600b1SBryan Venteicher vq = txq->vtntx_vq;
23558f3600b1SBryan Venteicher last = 0;
235610b59a9bSPeter Grehan
23578f3600b1SBryan Venteicher while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
235866823237SVincenzo Maffione if (kring == NULL) {
235910b59a9bSPeter Grehan m_freem(txhdr->vth_mbuf);
236010b59a9bSPeter Grehan uma_zfree(vtnet_tx_header_zone, txhdr);
236110b59a9bSPeter Grehan }
23622e42b74aSVincenzo Maffione }
236310b59a9bSPeter Grehan
23648f3600b1SBryan Venteicher KASSERT(virtqueue_empty(vq),
23658f3600b1SBryan Venteicher ("%s: mbufs remaining in tx queue %p", __func__, txq));
236610b59a9bSPeter Grehan }
236710b59a9bSPeter Grehan
236810b59a9bSPeter Grehan /*
23695e220811SBryan Venteicher * BMV: This can go away once we finally have offsets in the mbuf header.
237010b59a9bSPeter Grehan */
23718f3600b1SBryan Venteicher static int
vtnet_txq_offload_ctx(struct vtnet_txq * txq,struct mbuf * m,int * etype,int * proto,int * start)23725e220811SBryan Venteicher vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype,
23735e220811SBryan Venteicher int *proto, int *start)
23748f3600b1SBryan Venteicher {
23758f3600b1SBryan Venteicher struct vtnet_softc *sc;
23768f3600b1SBryan Venteicher struct ether_vlan_header *evh;
2377127b40e7SJohn Baldwin #if defined(INET) || defined(INET6)
23788f3600b1SBryan Venteicher int offset;
2379127b40e7SJohn Baldwin #endif
23808f3600b1SBryan Venteicher
23818f3600b1SBryan Venteicher sc = txq->vtntx_sc;
23828f3600b1SBryan Venteicher
23838f3600b1SBryan Venteicher evh = mtod(m, struct ether_vlan_header *);
23848f3600b1SBryan Venteicher if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
23858f3600b1SBryan Venteicher /* BMV: We should handle nested VLAN tags too. */
23868f3600b1SBryan Venteicher *etype = ntohs(evh->evl_proto);
2387127b40e7SJohn Baldwin #if defined(INET) || defined(INET6)
23888f3600b1SBryan Venteicher offset = sizeof(struct ether_vlan_header);
2389127b40e7SJohn Baldwin #endif
23908f3600b1SBryan Venteicher } else {
23918f3600b1SBryan Venteicher *etype = ntohs(evh->evl_encap_proto);
2392127b40e7SJohn Baldwin #if defined(INET) || defined(INET6)
23938f3600b1SBryan Venteicher offset = sizeof(struct ether_header);
2394127b40e7SJohn Baldwin #endif
23958f3600b1SBryan Venteicher }
23968f3600b1SBryan Venteicher
23978f3600b1SBryan Venteicher switch (*etype) {
23988f3600b1SBryan Venteicher #if defined(INET)
23998f3600b1SBryan Venteicher case ETHERTYPE_IP: {
24008f3600b1SBryan Venteicher struct ip *ip, iphdr;
24018f3600b1SBryan Venteicher if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
24028f3600b1SBryan Venteicher m_copydata(m, offset, sizeof(struct ip),
24038f3600b1SBryan Venteicher (caddr_t) &iphdr);
24048f3600b1SBryan Venteicher ip = &iphdr;
24058f3600b1SBryan Venteicher } else
24068f3600b1SBryan Venteicher ip = (struct ip *)(m->m_data + offset);
24078f3600b1SBryan Venteicher *proto = ip->ip_p;
24088f3600b1SBryan Venteicher *start = offset + (ip->ip_hl << 2);
240910b59a9bSPeter Grehan break;
24108f3600b1SBryan Venteicher }
24118f3600b1SBryan Venteicher #endif
24128f3600b1SBryan Venteicher #if defined(INET6)
24138f3600b1SBryan Venteicher case ETHERTYPE_IPV6:
24148f3600b1SBryan Venteicher *proto = -1;
24158f3600b1SBryan Venteicher *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
24168f3600b1SBryan Venteicher /* Assert the network stack sent us a valid packet. */
24178f3600b1SBryan Venteicher KASSERT(*start > offset,
24188f3600b1SBryan Venteicher ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
24198f3600b1SBryan Venteicher *start, offset, *proto));
24208f3600b1SBryan Venteicher break;
24218f3600b1SBryan Venteicher #endif
242210b59a9bSPeter Grehan default:
2423475a60aeSBryan Venteicher sc->vtnet_stats.tx_csum_unknown_ethtype++;
24248f3600b1SBryan Venteicher return (EINVAL);
242510b59a9bSPeter Grehan }
242610b59a9bSPeter Grehan
24278f3600b1SBryan Venteicher return (0);
242810b59a9bSPeter Grehan }
242910b59a9bSPeter Grehan
24308f3600b1SBryan Venteicher static int
vtnet_txq_offload_tso(struct vtnet_txq * txq,struct mbuf * m,int eth_type,int offset,struct virtio_net_hdr * hdr)2431d4697a6bSMichael Tuexen vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
24328f3600b1SBryan Venteicher int offset, struct virtio_net_hdr *hdr)
24338f3600b1SBryan Venteicher {
24348f3600b1SBryan Venteicher static struct timeval lastecn;
24358f3600b1SBryan Venteicher static int curecn;
24368f3600b1SBryan Venteicher struct vtnet_softc *sc;
24378f3600b1SBryan Venteicher struct tcphdr *tcp, tcphdr;
243810b59a9bSPeter Grehan
24398f3600b1SBryan Venteicher sc = txq->vtntx_sc;
244010b59a9bSPeter Grehan
24418f3600b1SBryan Venteicher if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
24428f3600b1SBryan Venteicher m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
24438f3600b1SBryan Venteicher tcp = &tcphdr;
24448f3600b1SBryan Venteicher } else
24458f3600b1SBryan Venteicher tcp = (struct tcphdr *)(m->m_data + offset);
24468f3600b1SBryan Venteicher
24475e220811SBryan Venteicher hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2));
24485e220811SBryan Venteicher hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz);
2449d4697a6bSMichael Tuexen hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
2450d4697a6bSMichael Tuexen VIRTIO_NET_HDR_GSO_TCPV6;
245110b59a9bSPeter Grehan
24520fc7bdc9SRichard Scheffenegger if (__predict_false(tcp_get_flags(tcp) & TH_CWR)) {
245310b59a9bSPeter Grehan /*
2454475a60aeSBryan Venteicher * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
2455475a60aeSBryan Venteicher * FreeBSD, ECN support is not on a per-interface basis,
2456475a60aeSBryan Venteicher * but globally via the net.inet.tcp.ecn.enable sysctl
2457475a60aeSBryan Venteicher * knob. The default is off.
245810b59a9bSPeter Grehan */
245910b59a9bSPeter Grehan if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
24608f3600b1SBryan Venteicher if (ppsratecheck(&lastecn, &curecn, 1))
24618f3600b1SBryan Venteicher if_printf(sc->vtnet_ifp,
24628f3600b1SBryan Venteicher "TSO with ECN not negotiated with host\n");
24638f3600b1SBryan Venteicher return (ENOTSUP);
24648f3600b1SBryan Venteicher }
24658f3600b1SBryan Venteicher hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
24668f3600b1SBryan Venteicher }
24678f3600b1SBryan Venteicher
24688f3600b1SBryan Venteicher txq->vtntx_stats.vtxs_tso++;
24698f3600b1SBryan Venteicher
24708f3600b1SBryan Venteicher return (0);
24718f3600b1SBryan Venteicher }
24728f3600b1SBryan Venteicher
24738f3600b1SBryan Venteicher static struct mbuf *
vtnet_txq_offload(struct vtnet_txq * txq,struct mbuf * m,struct virtio_net_hdr * hdr)24748f3600b1SBryan Venteicher vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
24758f3600b1SBryan Venteicher struct virtio_net_hdr *hdr)
24768f3600b1SBryan Venteicher {
24778f3600b1SBryan Venteicher struct vtnet_softc *sc;
24788f3600b1SBryan Venteicher int flags, etype, csum_start, proto, error;
24798f3600b1SBryan Venteicher
24808f3600b1SBryan Venteicher sc = txq->vtntx_sc;
24818f3600b1SBryan Venteicher flags = m->m_pkthdr.csum_flags;
24828f3600b1SBryan Venteicher
24838f3600b1SBryan Venteicher error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
24848f3600b1SBryan Venteicher if (error)
24858f3600b1SBryan Venteicher goto drop;
24868f3600b1SBryan Venteicher
2487475a60aeSBryan Venteicher if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) {
2488475a60aeSBryan Venteicher /* Sanity check the parsed mbuf matches the offload flags. */
2489475a60aeSBryan Venteicher if (__predict_false((flags & VTNET_CSUM_OFFLOAD &&
2490475a60aeSBryan Venteicher etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6
2491475a60aeSBryan Venteicher && etype != ETHERTYPE_IPV6))) {
2492475a60aeSBryan Venteicher sc->vtnet_stats.tx_csum_proto_mismatch++;
2493475a60aeSBryan Venteicher goto drop;
2494475a60aeSBryan Venteicher }
2495475a60aeSBryan Venteicher
24968f3600b1SBryan Venteicher hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
24975e220811SBryan Venteicher hdr->csum_start = vtnet_gtoh16(sc, csum_start);
24985e220811SBryan Venteicher hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
24998f3600b1SBryan Venteicher txq->vtntx_stats.vtxs_csum++;
25008f3600b1SBryan Venteicher }
25018f3600b1SBryan Venteicher
2502475a60aeSBryan Venteicher if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
2503475a60aeSBryan Venteicher /*
2504475a60aeSBryan Venteicher * Sanity check the parsed mbuf IP protocol is TCP, and
2505475a60aeSBryan Venteicher * VirtIO TSO reqires the checksum offloading above.
2506475a60aeSBryan Venteicher */
25078f3600b1SBryan Venteicher if (__predict_false(proto != IPPROTO_TCP)) {
25088f3600b1SBryan Venteicher sc->vtnet_stats.tx_tso_not_tcp++;
25098f3600b1SBryan Venteicher goto drop;
2510475a60aeSBryan Venteicher } else if (__predict_false((hdr->flags &
2511475a60aeSBryan Venteicher VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) {
2512475a60aeSBryan Venteicher sc->vtnet_stats.tx_tso_without_csum++;
2513475a60aeSBryan Venteicher goto drop;
25148f3600b1SBryan Venteicher }
25158f3600b1SBryan Venteicher
2516d4697a6bSMichael Tuexen error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
25178f3600b1SBryan Venteicher if (error)
25188f3600b1SBryan Venteicher goto drop;
25198f3600b1SBryan Venteicher }
25208f3600b1SBryan Venteicher
25218f3600b1SBryan Venteicher return (m);
25228f3600b1SBryan Venteicher
25238f3600b1SBryan Venteicher drop:
252410b59a9bSPeter Grehan m_freem(m);
252510b59a9bSPeter Grehan return (NULL);
252610b59a9bSPeter Grehan }
252710b59a9bSPeter Grehan
252810b59a9bSPeter Grehan static int
vtnet_txq_enqueue_buf(struct vtnet_txq * txq,struct mbuf ** m_head,struct vtnet_tx_header * txhdr)25298f3600b1SBryan Venteicher vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
253010b59a9bSPeter Grehan struct vtnet_tx_header *txhdr)
253110b59a9bSPeter Grehan {
25328f3600b1SBryan Venteicher struct vtnet_softc *sc;
253310b59a9bSPeter Grehan struct virtqueue *vq;
2534443c3d0bSBryan Venteicher struct sglist *sg;
253510b59a9bSPeter Grehan struct mbuf *m;
253654fb8142SBryan Venteicher int error;
253710b59a9bSPeter Grehan
25388f3600b1SBryan Venteicher sc = txq->vtntx_sc;
2539443c3d0bSBryan Venteicher vq = txq->vtntx_vq;
2540443c3d0bSBryan Venteicher sg = txq->vtntx_sg;
254110b59a9bSPeter Grehan m = *m_head;
254210b59a9bSPeter Grehan
2543443c3d0bSBryan Venteicher sglist_reset(sg);
2544443c3d0bSBryan Venteicher error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
25455e220811SBryan Venteicher if (error != 0 || sg->sg_nseg != 1) {
25465e220811SBryan Venteicher KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d",
25475e220811SBryan Venteicher __func__, error, sg->sg_nseg));
25485e220811SBryan Venteicher goto fail;
25495e220811SBryan Venteicher }
255010b59a9bSPeter Grehan
2551443c3d0bSBryan Venteicher error = sglist_append_mbuf(sg, m);
255210b59a9bSPeter Grehan if (error) {
255354fb8142SBryan Venteicher m = m_defrag(m, M_NOWAIT);
255410b59a9bSPeter Grehan if (m == NULL)
255510b59a9bSPeter Grehan goto fail;
255610b59a9bSPeter Grehan
255710b59a9bSPeter Grehan *m_head = m;
255854fb8142SBryan Venteicher sc->vtnet_stats.tx_defragged++;
255954fb8142SBryan Venteicher
256054fb8142SBryan Venteicher error = sglist_append_mbuf(sg, m);
256154fb8142SBryan Venteicher if (error)
256254fb8142SBryan Venteicher goto fail;
256310b59a9bSPeter Grehan }
256410b59a9bSPeter Grehan
256510b59a9bSPeter Grehan txhdr->vth_mbuf = m;
2566443c3d0bSBryan Venteicher error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
256710b59a9bSPeter Grehan
25688f3600b1SBryan Venteicher return (error);
256910b59a9bSPeter Grehan
257010b59a9bSPeter Grehan fail:
257154fb8142SBryan Venteicher sc->vtnet_stats.tx_defrag_failed++;
257210b59a9bSPeter Grehan m_freem(*m_head);
257310b59a9bSPeter Grehan *m_head = NULL;
257410b59a9bSPeter Grehan
257510b59a9bSPeter Grehan return (ENOBUFS);
257610b59a9bSPeter Grehan }
257710b59a9bSPeter Grehan
257810b59a9bSPeter Grehan static int
vtnet_txq_encap(struct vtnet_txq * txq,struct mbuf ** m_head,int flags)2579c857c7d5SMark Johnston vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
258010b59a9bSPeter Grehan {
258110b59a9bSPeter Grehan struct vtnet_tx_header *txhdr;
258210b59a9bSPeter Grehan struct virtio_net_hdr *hdr;
258310b59a9bSPeter Grehan struct mbuf *m;
258410b59a9bSPeter Grehan int error;
258510b59a9bSPeter Grehan
2586336f459cSPeter Grehan m = *m_head;
25873dd8d840SBryan Venteicher M_ASSERTPKTHDR(m);
2588336f459cSPeter Grehan
2589c857c7d5SMark Johnston txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO);
2590336f459cSPeter Grehan if (txhdr == NULL) {
2591336f459cSPeter Grehan m_freem(m);
25928f3600b1SBryan Venteicher *m_head = NULL;
259310b59a9bSPeter Grehan return (ENOMEM);
2594336f459cSPeter Grehan }
259510b59a9bSPeter Grehan
259610b59a9bSPeter Grehan /*
25975e220811SBryan Venteicher * Always use the non-mergeable header, regardless if mergable headers
25985e220811SBryan Venteicher * were negotiated, because for transmit num_buffers is always zero.
25995e220811SBryan Venteicher * The vtnet_hdr_size is used to enqueue the right header size segment.
260010b59a9bSPeter Grehan */
260110b59a9bSPeter Grehan hdr = &txhdr->vth_uhdr.hdr;
260210b59a9bSPeter Grehan
260310b59a9bSPeter Grehan if (m->m_flags & M_VLANTAG) {
260410b59a9bSPeter Grehan m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2605336f459cSPeter Grehan if ((*m_head = m) == NULL) {
2606336f459cSPeter Grehan error = ENOBUFS;
260710b59a9bSPeter Grehan goto fail;
2608336f459cSPeter Grehan }
260910b59a9bSPeter Grehan m->m_flags &= ~M_VLANTAG;
261010b59a9bSPeter Grehan }
261110b59a9bSPeter Grehan
26128f3600b1SBryan Venteicher if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
26138f3600b1SBryan Venteicher m = vtnet_txq_offload(txq, m, hdr);
2614336f459cSPeter Grehan if ((*m_head = m) == NULL) {
2615336f459cSPeter Grehan error = ENOBUFS;
261610b59a9bSPeter Grehan goto fail;
261710b59a9bSPeter Grehan }
2618336f459cSPeter Grehan }
261910b59a9bSPeter Grehan
26208f3600b1SBryan Venteicher error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
262110b59a9bSPeter Grehan fail:
26225e220811SBryan Venteicher if (error)
262310b59a9bSPeter Grehan uma_zfree(vtnet_tx_header_zone, txhdr);
262410b59a9bSPeter Grehan
262510b59a9bSPeter Grehan return (error);
262610b59a9bSPeter Grehan }
262710b59a9bSPeter Grehan
262810b59a9bSPeter Grehan
262910b59a9bSPeter Grehan static void
vtnet_start_locked(struct vtnet_txq * txq,if_t ifp)26304ee96792SJustin Hibbits vtnet_start_locked(struct vtnet_txq *txq, if_t ifp)
263110b59a9bSPeter Grehan {
263210b59a9bSPeter Grehan struct vtnet_softc *sc;
263310b59a9bSPeter Grehan struct virtqueue *vq;
263410b59a9bSPeter Grehan struct mbuf *m0;
263532487a89SBryan Venteicher int tries, enq;
263610b59a9bSPeter Grehan
26378f3600b1SBryan Venteicher sc = txq->vtntx_sc;
26388f3600b1SBryan Venteicher vq = txq->vtntx_vq;
263932487a89SBryan Venteicher tries = 0;
264010b59a9bSPeter Grehan
26418f3600b1SBryan Venteicher VTNET_TXQ_LOCK_ASSERT(txq);
264210b59a9bSPeter Grehan
26434ee96792SJustin Hibbits if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
26448f3600b1SBryan Venteicher sc->vtnet_link_active == 0)
264510b59a9bSPeter Grehan return;
264610b59a9bSPeter Grehan
26476e03f319SBryan Venteicher vtnet_txq_eof(txq);
26486e03f319SBryan Venteicher
264932487a89SBryan Venteicher again:
265032487a89SBryan Venteicher enq = 0;
265132487a89SBryan Venteicher
26524ee96792SJustin Hibbits while (!if_sendq_empty(ifp)) {
26538f3600b1SBryan Venteicher if (virtqueue_full(vq))
265410b59a9bSPeter Grehan break;
265510b59a9bSPeter Grehan
26564ee96792SJustin Hibbits m0 = if_dequeue(ifp);
265710b59a9bSPeter Grehan if (m0 == NULL)
265810b59a9bSPeter Grehan break;
265910b59a9bSPeter Grehan
2660c857c7d5SMark Johnston if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) {
26618f3600b1SBryan Venteicher if (m0 != NULL)
26624ee96792SJustin Hibbits if_sendq_prepend(ifp, m0);
266310b59a9bSPeter Grehan break;
266410b59a9bSPeter Grehan }
266510b59a9bSPeter Grehan
266610b59a9bSPeter Grehan enq++;
266710b59a9bSPeter Grehan ETHER_BPF_MTAP(ifp, m0);
266810b59a9bSPeter Grehan }
266910b59a9bSPeter Grehan
267032487a89SBryan Venteicher if (enq > 0 && vtnet_txq_notify(txq) != 0) {
267132487a89SBryan Venteicher if (tries++ < VTNET_NOTIFY_RETRIES)
267232487a89SBryan Venteicher goto again;
267332487a89SBryan Venteicher
267432487a89SBryan Venteicher txq->vtntx_stats.vtxs_rescheduled++;
267532487a89SBryan Venteicher taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
267610b59a9bSPeter Grehan }
267710b59a9bSPeter Grehan }
267810b59a9bSPeter Grehan
267910b59a9bSPeter Grehan static void
vtnet_start(if_t ifp)26804ee96792SJustin Hibbits vtnet_start(if_t ifp)
26818f3600b1SBryan Venteicher {
26828f3600b1SBryan Venteicher struct vtnet_softc *sc;
26838f3600b1SBryan Venteicher struct vtnet_txq *txq;
26848f3600b1SBryan Venteicher
26854ee96792SJustin Hibbits sc = if_getsoftc(ifp);
26868f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[0];
26878f3600b1SBryan Venteicher
26888f3600b1SBryan Venteicher VTNET_TXQ_LOCK(txq);
26898f3600b1SBryan Venteicher vtnet_start_locked(txq, ifp);
26908f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
26918f3600b1SBryan Venteicher }
26928f3600b1SBryan Venteicher
26938f3600b1SBryan Venteicher
26948f3600b1SBryan Venteicher static int
vtnet_txq_mq_start_locked(struct vtnet_txq * txq,struct mbuf * m)26958f3600b1SBryan Venteicher vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
26968f3600b1SBryan Venteicher {
26978f3600b1SBryan Venteicher struct vtnet_softc *sc;
26988f3600b1SBryan Venteicher struct virtqueue *vq;
26998f3600b1SBryan Venteicher struct buf_ring *br;
27004ee96792SJustin Hibbits if_t ifp;
270132487a89SBryan Venteicher int enq, tries, error;
27028f3600b1SBryan Venteicher
27038f3600b1SBryan Venteicher sc = txq->vtntx_sc;
27048f3600b1SBryan Venteicher vq = txq->vtntx_vq;
27058f3600b1SBryan Venteicher br = txq->vtntx_br;
27068f3600b1SBryan Venteicher ifp = sc->vtnet_ifp;
270732487a89SBryan Venteicher tries = 0;
27088f3600b1SBryan Venteicher error = 0;
27098f3600b1SBryan Venteicher
27108f3600b1SBryan Venteicher VTNET_TXQ_LOCK_ASSERT(txq);
27118f3600b1SBryan Venteicher
27124ee96792SJustin Hibbits if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
27138f3600b1SBryan Venteicher sc->vtnet_link_active == 0) {
27148f3600b1SBryan Venteicher if (m != NULL)
27158f3600b1SBryan Venteicher error = drbr_enqueue(ifp, br, m);
27168f3600b1SBryan Venteicher return (error);
27178f3600b1SBryan Venteicher }
27188f3600b1SBryan Venteicher
27198f3600b1SBryan Venteicher if (m != NULL) {
27208f3600b1SBryan Venteicher error = drbr_enqueue(ifp, br, m);
27218f3600b1SBryan Venteicher if (error)
27228f3600b1SBryan Venteicher return (error);
27238f3600b1SBryan Venteicher }
27248f3600b1SBryan Venteicher
27256e03f319SBryan Venteicher vtnet_txq_eof(txq);
27266e03f319SBryan Venteicher
272732487a89SBryan Venteicher again:
272832487a89SBryan Venteicher enq = 0;
272932487a89SBryan Venteicher
27308f3600b1SBryan Venteicher while ((m = drbr_peek(ifp, br)) != NULL) {
27319ef6342fSBryan Venteicher if (virtqueue_full(vq)) {
27329ef6342fSBryan Venteicher drbr_putback(ifp, br, m);
27339ef6342fSBryan Venteicher break;
27349ef6342fSBryan Venteicher }
27359ef6342fSBryan Venteicher
2736c857c7d5SMark Johnston if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) {
27378f3600b1SBryan Venteicher if (m != NULL)
27388f3600b1SBryan Venteicher drbr_putback(ifp, br, m);
27398f3600b1SBryan Venteicher else
27408f3600b1SBryan Venteicher drbr_advance(ifp, br);
27418f3600b1SBryan Venteicher break;
27428f3600b1SBryan Venteicher }
27438f3600b1SBryan Venteicher drbr_advance(ifp, br);
27448f3600b1SBryan Venteicher
27458f3600b1SBryan Venteicher enq++;
27468f3600b1SBryan Venteicher ETHER_BPF_MTAP(ifp, m);
27478f3600b1SBryan Venteicher }
27488f3600b1SBryan Venteicher
274932487a89SBryan Venteicher if (enq > 0 && vtnet_txq_notify(txq) != 0) {
275032487a89SBryan Venteicher if (tries++ < VTNET_NOTIFY_RETRIES)
275132487a89SBryan Venteicher goto again;
275232487a89SBryan Venteicher
275332487a89SBryan Venteicher txq->vtntx_stats.vtxs_rescheduled++;
275432487a89SBryan Venteicher taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
27558f3600b1SBryan Venteicher }
27568f3600b1SBryan Venteicher
2757c26e5fc2SLuigi Rizzo return (0);
27588f3600b1SBryan Venteicher }
27598f3600b1SBryan Venteicher
27608f3600b1SBryan Venteicher static int
vtnet_txq_mq_start(if_t ifp,struct mbuf * m)27614ee96792SJustin Hibbits vtnet_txq_mq_start(if_t ifp, struct mbuf *m)
27628f3600b1SBryan Venteicher {
27638f3600b1SBryan Venteicher struct vtnet_softc *sc;
27648f3600b1SBryan Venteicher struct vtnet_txq *txq;
27658f3600b1SBryan Venteicher int i, npairs, error;
27668f3600b1SBryan Venteicher
27674ee96792SJustin Hibbits sc = if_getsoftc(ifp);
27688f3600b1SBryan Venteicher npairs = sc->vtnet_act_vq_pairs;
27698f3600b1SBryan Venteicher
2770c2529042SHans Petter Selasky if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
27718f3600b1SBryan Venteicher i = m->m_pkthdr.flowid % npairs;
27728f3600b1SBryan Venteicher else
27738f3600b1SBryan Venteicher i = curcpu % npairs;
27748f3600b1SBryan Venteicher
27758f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[i];
27768f3600b1SBryan Venteicher
27778f3600b1SBryan Venteicher if (VTNET_TXQ_TRYLOCK(txq) != 0) {
27788f3600b1SBryan Venteicher error = vtnet_txq_mq_start_locked(txq, m);
27798f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
27808f3600b1SBryan Venteicher } else {
27818f3600b1SBryan Venteicher error = drbr_enqueue(ifp, txq->vtntx_br, m);
27828f3600b1SBryan Venteicher taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
27838f3600b1SBryan Venteicher }
27848f3600b1SBryan Venteicher
27858f3600b1SBryan Venteicher return (error);
27868f3600b1SBryan Venteicher }
27878f3600b1SBryan Venteicher
27888f3600b1SBryan Venteicher static void
vtnet_txq_tq_deferred(void * xtxq,int pending __unused)2789c1b554c8SAlex Richardson vtnet_txq_tq_deferred(void *xtxq, int pending __unused)
27908f3600b1SBryan Venteicher {
27918f3600b1SBryan Venteicher struct vtnet_softc *sc;
27928f3600b1SBryan Venteicher struct vtnet_txq *txq;
27938f3600b1SBryan Venteicher
27948f3600b1SBryan Venteicher txq = xtxq;
27958f3600b1SBryan Venteicher sc = txq->vtntx_sc;
27968f3600b1SBryan Venteicher
27978f3600b1SBryan Venteicher VTNET_TXQ_LOCK(txq);
27988f3600b1SBryan Venteicher if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
27998f3600b1SBryan Venteicher vtnet_txq_mq_start_locked(txq, NULL);
28008f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
28018f3600b1SBryan Venteicher }
28028f3600b1SBryan Venteicher
28038f3600b1SBryan Venteicher
28048f3600b1SBryan Venteicher static void
vtnet_txq_start(struct vtnet_txq * txq)2805bddddcd5SBryan Venteicher vtnet_txq_start(struct vtnet_txq *txq)
2806bddddcd5SBryan Venteicher {
2807bddddcd5SBryan Venteicher struct vtnet_softc *sc;
28084ee96792SJustin Hibbits if_t ifp;
2809bddddcd5SBryan Venteicher
2810bddddcd5SBryan Venteicher sc = txq->vtntx_sc;
2811bddddcd5SBryan Venteicher ifp = sc->vtnet_ifp;
2812bddddcd5SBryan Venteicher
2813*3a9ebff2SBjoern Jakobsen if (!VTNET_ALTQ_ENABLED) {
2814bddddcd5SBryan Venteicher if (!drbr_empty(ifp, txq->vtntx_br))
2815bddddcd5SBryan Venteicher vtnet_txq_mq_start_locked(txq, NULL);
2816*3a9ebff2SBjoern Jakobsen } else {
2817*3a9ebff2SBjoern Jakobsen if (!if_sendq_empty(ifp))
2818*3a9ebff2SBjoern Jakobsen vtnet_start_locked(txq, ifp);
2819*3a9ebff2SBjoern Jakobsen
2820*3a9ebff2SBjoern Jakobsen }
2821bddddcd5SBryan Venteicher }
2822bddddcd5SBryan Venteicher
2823bddddcd5SBryan Venteicher static void
vtnet_txq_tq_intr(void * xtxq,int pending __unused)2824c1b554c8SAlex Richardson vtnet_txq_tq_intr(void *xtxq, int pending __unused)
28258f3600b1SBryan Venteicher {
28268f3600b1SBryan Venteicher struct vtnet_softc *sc;
28278f3600b1SBryan Venteicher struct vtnet_txq *txq;
28284ee96792SJustin Hibbits if_t ifp;
28298f3600b1SBryan Venteicher
28308f3600b1SBryan Venteicher txq = xtxq;
28318f3600b1SBryan Venteicher sc = txq->vtntx_sc;
28328f3600b1SBryan Venteicher ifp = sc->vtnet_ifp;
28338f3600b1SBryan Venteicher
28348f3600b1SBryan Venteicher VTNET_TXQ_LOCK(txq);
28358f3600b1SBryan Venteicher
28364ee96792SJustin Hibbits if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
28378f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
28388f3600b1SBryan Venteicher return;
28398f3600b1SBryan Venteicher }
28408f3600b1SBryan Venteicher
28418f3600b1SBryan Venteicher vtnet_txq_eof(txq);
2842bddddcd5SBryan Venteicher vtnet_txq_start(txq);
28438f3600b1SBryan Venteicher
28448f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
28458f3600b1SBryan Venteicher }
28468f3600b1SBryan Venteicher
284732487a89SBryan Venteicher static int
vtnet_txq_eof(struct vtnet_txq * txq)28488f3600b1SBryan Venteicher vtnet_txq_eof(struct vtnet_txq *txq)
28498f3600b1SBryan Venteicher {
28508f3600b1SBryan Venteicher struct virtqueue *vq;
28518f3600b1SBryan Venteicher struct vtnet_tx_header *txhdr;
28528f3600b1SBryan Venteicher struct mbuf *m;
285332487a89SBryan Venteicher int deq;
28548f3600b1SBryan Venteicher
28558f3600b1SBryan Venteicher vq = txq->vtntx_vq;
285632487a89SBryan Venteicher deq = 0;
28578f3600b1SBryan Venteicher VTNET_TXQ_LOCK_ASSERT(txq);
28588f3600b1SBryan Venteicher
28598f3600b1SBryan Venteicher while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
28608f3600b1SBryan Venteicher m = txhdr->vth_mbuf;
286132487a89SBryan Venteicher deq++;
28628f3600b1SBryan Venteicher
28638f3600b1SBryan Venteicher txq->vtntx_stats.vtxs_opackets++;
28648f3600b1SBryan Venteicher txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
28658f3600b1SBryan Venteicher if (m->m_flags & M_MCAST)
28668f3600b1SBryan Venteicher txq->vtntx_stats.vtxs_omcasts++;
28678f3600b1SBryan Venteicher
28688f3600b1SBryan Venteicher m_freem(m);
28698f3600b1SBryan Venteicher uma_zfree(vtnet_tx_header_zone, txhdr);
28708f3600b1SBryan Venteicher }
28718f3600b1SBryan Venteicher
28728f3600b1SBryan Venteicher if (virtqueue_empty(vq))
28738f3600b1SBryan Venteicher txq->vtntx_watchdog = 0;
287432487a89SBryan Venteicher
287532487a89SBryan Venteicher return (deq);
28768f3600b1SBryan Venteicher }
28778f3600b1SBryan Venteicher
28788f3600b1SBryan Venteicher static void
vtnet_tx_vq_intr(void * xtxq)28798f3600b1SBryan Venteicher vtnet_tx_vq_intr(void *xtxq)
28808f3600b1SBryan Venteicher {
28818f3600b1SBryan Venteicher struct vtnet_softc *sc;
28828f3600b1SBryan Venteicher struct vtnet_txq *txq;
28834ee96792SJustin Hibbits if_t ifp;
28848f3600b1SBryan Venteicher
28858f3600b1SBryan Venteicher txq = xtxq;
28868f3600b1SBryan Venteicher sc = txq->vtntx_sc;
28878f3600b1SBryan Venteicher ifp = sc->vtnet_ifp;
28888f3600b1SBryan Venteicher
28898f3600b1SBryan Venteicher if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
28908f3600b1SBryan Venteicher /*
28918f3600b1SBryan Venteicher * Ignore this interrupt. Either this is a spurious interrupt
28928f3600b1SBryan Venteicher * or multiqueue without per-VQ MSIX so every queue needs to
28938f3600b1SBryan Venteicher * be polled (a brain dead configuration we could try harder
28948f3600b1SBryan Venteicher * to avoid).
28958f3600b1SBryan Venteicher */
28968f3600b1SBryan Venteicher vtnet_txq_disable_intr(txq);
28978f3600b1SBryan Venteicher return;
28988f3600b1SBryan Venteicher }
28998f3600b1SBryan Venteicher
29002e42b74aSVincenzo Maffione #ifdef DEV_NETMAP
29012e42b74aSVincenzo Maffione if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS)
29022e42b74aSVincenzo Maffione return;
29032e42b74aSVincenzo Maffione #endif /* DEV_NETMAP */
29042e42b74aSVincenzo Maffione
29058f3600b1SBryan Venteicher VTNET_TXQ_LOCK(txq);
29068f3600b1SBryan Venteicher
29074ee96792SJustin Hibbits if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
29088f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
29098f3600b1SBryan Venteicher return;
29108f3600b1SBryan Venteicher }
29118f3600b1SBryan Venteicher
29128f3600b1SBryan Venteicher vtnet_txq_eof(txq);
2913bddddcd5SBryan Venteicher vtnet_txq_start(txq);
29148f3600b1SBryan Venteicher
29158f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
29168f3600b1SBryan Venteicher }
29178f3600b1SBryan Venteicher
29188f3600b1SBryan Venteicher static void
vtnet_tx_start_all(struct vtnet_softc * sc)29198f3600b1SBryan Venteicher vtnet_tx_start_all(struct vtnet_softc *sc)
29208f3600b1SBryan Venteicher {
29218f3600b1SBryan Venteicher struct vtnet_txq *txq;
29228f3600b1SBryan Venteicher int i;
29238f3600b1SBryan Venteicher
29248f3600b1SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
29258f3600b1SBryan Venteicher
29268f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
29278f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[i];
29288f3600b1SBryan Venteicher
29298f3600b1SBryan Venteicher VTNET_TXQ_LOCK(txq);
2930bddddcd5SBryan Venteicher vtnet_txq_start(txq);
29318f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
29328f3600b1SBryan Venteicher }
29338f3600b1SBryan Venteicher }
29348f3600b1SBryan Venteicher
29358f3600b1SBryan Venteicher static void
vtnet_qflush(if_t ifp)29364ee96792SJustin Hibbits vtnet_qflush(if_t ifp)
29378f3600b1SBryan Venteicher {
29388f3600b1SBryan Venteicher struct vtnet_softc *sc;
29398f3600b1SBryan Venteicher struct vtnet_txq *txq;
29408f3600b1SBryan Venteicher struct mbuf *m;
29418f3600b1SBryan Venteicher int i;
29428f3600b1SBryan Venteicher
29434ee96792SJustin Hibbits sc = if_getsoftc(ifp);
29448f3600b1SBryan Venteicher
29458f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
29468f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[i];
29478f3600b1SBryan Venteicher
29488f3600b1SBryan Venteicher VTNET_TXQ_LOCK(txq);
29498f3600b1SBryan Venteicher while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
29508f3600b1SBryan Venteicher m_freem(m);
29518f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
29528f3600b1SBryan Venteicher }
29538f3600b1SBryan Venteicher
29548f3600b1SBryan Venteicher if_qflush(ifp);
29558f3600b1SBryan Venteicher }
29568f3600b1SBryan Venteicher
29578f3600b1SBryan Venteicher static int
vtnet_watchdog(struct vtnet_txq * txq)29588f3600b1SBryan Venteicher vtnet_watchdog(struct vtnet_txq *txq)
29598f3600b1SBryan Venteicher {
29604ee96792SJustin Hibbits if_t ifp;
29618f3600b1SBryan Venteicher
296232487a89SBryan Venteicher ifp = txq->vtntx_sc->vtnet_ifp;
29638f3600b1SBryan Venteicher
29648f3600b1SBryan Venteicher VTNET_TXQ_LOCK(txq);
296532487a89SBryan Venteicher if (txq->vtntx_watchdog == 1) {
296632487a89SBryan Venteicher /*
296732487a89SBryan Venteicher * Only drain completed frames if the watchdog is about to
296832487a89SBryan Venteicher * expire. If any frames were drained, there may be enough
296932487a89SBryan Venteicher * free descriptors now available to transmit queued frames.
297032487a89SBryan Venteicher * In that case, the timer will immediately be decremented
297132487a89SBryan Venteicher * below, but the timeout is generous enough that should not
297232487a89SBryan Venteicher * be a problem.
297332487a89SBryan Venteicher */
297432487a89SBryan Venteicher if (vtnet_txq_eof(txq) != 0)
297532487a89SBryan Venteicher vtnet_txq_start(txq);
297632487a89SBryan Venteicher }
297732487a89SBryan Venteicher
29788f3600b1SBryan Venteicher if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
29798f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
29808f3600b1SBryan Venteicher return (0);
29818f3600b1SBryan Venteicher }
29828f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
29838f3600b1SBryan Venteicher
298432487a89SBryan Venteicher if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id);
29858f3600b1SBryan Venteicher return (1);
29868f3600b1SBryan Venteicher }
29878f3600b1SBryan Venteicher
29888f3600b1SBryan Venteicher static void
vtnet_accum_stats(struct vtnet_softc * sc,struct vtnet_rxq_stats * rxacc,struct vtnet_txq_stats * txacc)298984047b19SGleb Smirnoff vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc,
299084047b19SGleb Smirnoff struct vtnet_txq_stats *txacc)
29918f3600b1SBryan Venteicher {
29928f3600b1SBryan Venteicher
299384047b19SGleb Smirnoff bzero(rxacc, sizeof(struct vtnet_rxq_stats));
299484047b19SGleb Smirnoff bzero(txacc, sizeof(struct vtnet_txq_stats));
29958f3600b1SBryan Venteicher
299684047b19SGleb Smirnoff for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) {
299784047b19SGleb Smirnoff struct vtnet_rxq_stats *rxst;
299884047b19SGleb Smirnoff struct vtnet_txq_stats *txst;
299984047b19SGleb Smirnoff
300084047b19SGleb Smirnoff rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
300184047b19SGleb Smirnoff rxacc->vrxs_ipackets += rxst->vrxs_ipackets;
300284047b19SGleb Smirnoff rxacc->vrxs_ibytes += rxst->vrxs_ibytes;
300384047b19SGleb Smirnoff rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops;
300484047b19SGleb Smirnoff rxacc->vrxs_csum += rxst->vrxs_csum;
300584047b19SGleb Smirnoff rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed;
300684047b19SGleb Smirnoff rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled;
300784047b19SGleb Smirnoff
300884047b19SGleb Smirnoff txst = &sc->vtnet_txqs[i].vtntx_stats;
300984047b19SGleb Smirnoff txacc->vtxs_opackets += txst->vtxs_opackets;
301084047b19SGleb Smirnoff txacc->vtxs_obytes += txst->vtxs_obytes;
301184047b19SGleb Smirnoff txacc->vtxs_csum += txst->vtxs_csum;
301284047b19SGleb Smirnoff txacc->vtxs_tso += txst->vtxs_tso;
301384047b19SGleb Smirnoff txacc->vtxs_rescheduled += txst->vtxs_rescheduled;
301484047b19SGleb Smirnoff }
30158f3600b1SBryan Venteicher }
30168f3600b1SBryan Venteicher
301784047b19SGleb Smirnoff static uint64_t
vtnet_get_counter(if_t ifp,ift_counter cnt)301884047b19SGleb Smirnoff vtnet_get_counter(if_t ifp, ift_counter cnt)
30198f3600b1SBryan Venteicher {
302084047b19SGleb Smirnoff struct vtnet_softc *sc;
30218f3600b1SBryan Venteicher struct vtnet_rxq_stats rxaccum;
30228f3600b1SBryan Venteicher struct vtnet_txq_stats txaccum;
30238f3600b1SBryan Venteicher
302484047b19SGleb Smirnoff sc = if_getsoftc(ifp);
302584047b19SGleb Smirnoff vtnet_accum_stats(sc, &rxaccum, &txaccum);
30268f3600b1SBryan Venteicher
302784047b19SGleb Smirnoff switch (cnt) {
302884047b19SGleb Smirnoff case IFCOUNTER_IPACKETS:
302984047b19SGleb Smirnoff return (rxaccum.vrxs_ipackets);
303084047b19SGleb Smirnoff case IFCOUNTER_IQDROPS:
303184047b19SGleb Smirnoff return (rxaccum.vrxs_iqdrops);
303284047b19SGleb Smirnoff case IFCOUNTER_IERRORS:
303384047b19SGleb Smirnoff return (rxaccum.vrxs_ierrors);
303484047b19SGleb Smirnoff case IFCOUNTER_OPACKETS:
303584047b19SGleb Smirnoff return (txaccum.vtxs_opackets);
303684047b19SGleb Smirnoff case IFCOUNTER_OBYTES:
3037*3a9ebff2SBjoern Jakobsen if (!VTNET_ALTQ_ENABLED)
303884047b19SGleb Smirnoff return (txaccum.vtxs_obytes);
3039*3a9ebff2SBjoern Jakobsen /* FALLTHROUGH */
304084047b19SGleb Smirnoff case IFCOUNTER_OMCASTS:
3041*3a9ebff2SBjoern Jakobsen if (!VTNET_ALTQ_ENABLED)
304284047b19SGleb Smirnoff return (txaccum.vtxs_omcasts);
3043*3a9ebff2SBjoern Jakobsen /* FALLTHROUGH */
304484047b19SGleb Smirnoff default:
304584047b19SGleb Smirnoff return (if_get_counter_default(ifp, cnt));
304684047b19SGleb Smirnoff }
30478f3600b1SBryan Venteicher }
30488f3600b1SBryan Venteicher
30498f3600b1SBryan Venteicher static void
vtnet_tick(void * xsc)305010b59a9bSPeter Grehan vtnet_tick(void *xsc)
305110b59a9bSPeter Grehan {
305210b59a9bSPeter Grehan struct vtnet_softc *sc;
30534ee96792SJustin Hibbits if_t ifp;
30548f3600b1SBryan Venteicher int i, timedout;
305510b59a9bSPeter Grehan
30566632efe4SBryan Venteicher sc = xsc;
305710b59a9bSPeter Grehan ifp = sc->vtnet_ifp;
30588f3600b1SBryan Venteicher timedout = 0;
305910b59a9bSPeter Grehan
30608f3600b1SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
306110b59a9bSPeter Grehan
30628f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
30638f3600b1SBryan Venteicher timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
30648f3600b1SBryan Venteicher
30658f3600b1SBryan Venteicher if (timedout != 0) {
30664ee96792SJustin Hibbits if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
306716f224b5SVincenzo Maffione vtnet_init_locked(sc, 0);
30688f3600b1SBryan Venteicher } else
30698f3600b1SBryan Venteicher callout_schedule(&sc->vtnet_tick_ch, hz);
307010b59a9bSPeter Grehan }
30718f3600b1SBryan Venteicher
30728f3600b1SBryan Venteicher static void
vtnet_start_taskqueues(struct vtnet_softc * sc)30738f3600b1SBryan Venteicher vtnet_start_taskqueues(struct vtnet_softc *sc)
30748f3600b1SBryan Venteicher {
30758f3600b1SBryan Venteicher device_t dev;
30768f3600b1SBryan Venteicher struct vtnet_rxq *rxq;
30778f3600b1SBryan Venteicher struct vtnet_txq *txq;
30788f3600b1SBryan Venteicher int i, error;
30798f3600b1SBryan Venteicher
30808f3600b1SBryan Venteicher dev = sc->vtnet_dev;
30818f3600b1SBryan Venteicher
30828f3600b1SBryan Venteicher /*
30838f3600b1SBryan Venteicher * Errors here are very difficult to recover from - we cannot
30848f3600b1SBryan Venteicher * easily fail because, if this is during boot, we will hang
30858f3600b1SBryan Venteicher * when freeing any successfully started taskqueues because
30868f3600b1SBryan Venteicher * the scheduler isn't up yet.
30878f3600b1SBryan Venteicher *
30888f3600b1SBryan Venteicher * Most drivers just ignore the return value - it only fails
30898f3600b1SBryan Venteicher * with ENOMEM so an error is not likely.
30908f3600b1SBryan Venteicher */
3091bd8809dfSBryan Venteicher for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
30928f3600b1SBryan Venteicher rxq = &sc->vtnet_rxqs[i];
30938f3600b1SBryan Venteicher error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
30948f3600b1SBryan Venteicher "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
30958f3600b1SBryan Venteicher if (error) {
30968f3600b1SBryan Venteicher device_printf(dev, "failed to start rx taskq %d\n",
30978f3600b1SBryan Venteicher rxq->vtnrx_id);
30988f3600b1SBryan Venteicher }
30998f3600b1SBryan Venteicher
31008f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[i];
31018f3600b1SBryan Venteicher error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
31028f3600b1SBryan Venteicher "%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
31038f3600b1SBryan Venteicher if (error) {
31048f3600b1SBryan Venteicher device_printf(dev, "failed to start tx taskq %d\n",
31058f3600b1SBryan Venteicher txq->vtntx_id);
31068f3600b1SBryan Venteicher }
31078f3600b1SBryan Venteicher }
31088f3600b1SBryan Venteicher }
31098f3600b1SBryan Venteicher
31108f3600b1SBryan Venteicher static void
vtnet_free_taskqueues(struct vtnet_softc * sc)31118f3600b1SBryan Venteicher vtnet_free_taskqueues(struct vtnet_softc *sc)
31128f3600b1SBryan Venteicher {
31138f3600b1SBryan Venteicher struct vtnet_rxq *rxq;
31148f3600b1SBryan Venteicher struct vtnet_txq *txq;
31158f3600b1SBryan Venteicher int i;
31168f3600b1SBryan Venteicher
31178f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
31188f3600b1SBryan Venteicher rxq = &sc->vtnet_rxqs[i];
31198f3600b1SBryan Venteicher if (rxq->vtnrx_tq != NULL) {
31208f3600b1SBryan Venteicher taskqueue_free(rxq->vtnrx_tq);
312193ef2969SVincenzo Maffione rxq->vtnrx_tq = NULL;
31228f3600b1SBryan Venteicher }
31238f3600b1SBryan Venteicher
31248f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[i];
31258f3600b1SBryan Venteicher if (txq->vtntx_tq != NULL) {
31268f3600b1SBryan Venteicher taskqueue_free(txq->vtntx_tq);
31278f3600b1SBryan Venteicher txq->vtntx_tq = NULL;
31288f3600b1SBryan Venteicher }
31298f3600b1SBryan Venteicher }
31308f3600b1SBryan Venteicher }
31318f3600b1SBryan Venteicher
31328f3600b1SBryan Venteicher static void
vtnet_drain_taskqueues(struct vtnet_softc * sc)31338f3600b1SBryan Venteicher vtnet_drain_taskqueues(struct vtnet_softc *sc)
31348f3600b1SBryan Venteicher {
31358f3600b1SBryan Venteicher struct vtnet_rxq *rxq;
31368f3600b1SBryan Venteicher struct vtnet_txq *txq;
31378f3600b1SBryan Venteicher int i;
31388f3600b1SBryan Venteicher
31398f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
31408f3600b1SBryan Venteicher rxq = &sc->vtnet_rxqs[i];
31418f3600b1SBryan Venteicher if (rxq->vtnrx_tq != NULL)
31428f3600b1SBryan Venteicher taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
31438f3600b1SBryan Venteicher
31448f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[i];
31458f3600b1SBryan Venteicher if (txq->vtntx_tq != NULL) {
31468f3600b1SBryan Venteicher taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
3147*3a9ebff2SBjoern Jakobsen if (!VTNET_ALTQ_ENABLED)
31488f3600b1SBryan Venteicher taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
31498f3600b1SBryan Venteicher }
31508f3600b1SBryan Venteicher }
315110b59a9bSPeter Grehan }
315210b59a9bSPeter Grehan
31538f3600b1SBryan Venteicher static void
vtnet_drain_rxtx_queues(struct vtnet_softc * sc)31548f3600b1SBryan Venteicher vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
31558f3600b1SBryan Venteicher {
31568f3600b1SBryan Venteicher struct vtnet_rxq *rxq;
31578f3600b1SBryan Venteicher struct vtnet_txq *txq;
31588f3600b1SBryan Venteicher int i;
315910b59a9bSPeter Grehan
3160bd8809dfSBryan Venteicher for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
31618f3600b1SBryan Venteicher rxq = &sc->vtnet_rxqs[i];
31628f3600b1SBryan Venteicher vtnet_rxq_free_mbufs(rxq);
316310b59a9bSPeter Grehan
31648f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[i];
31658f3600b1SBryan Venteicher vtnet_txq_free_mbufs(txq);
31668f3600b1SBryan Venteicher }
316710b59a9bSPeter Grehan }
316810b59a9bSPeter Grehan
31698f3600b1SBryan Venteicher static void
vtnet_stop_rendezvous(struct vtnet_softc * sc)31708f3600b1SBryan Venteicher vtnet_stop_rendezvous(struct vtnet_softc *sc)
31718f3600b1SBryan Venteicher {
31728f3600b1SBryan Venteicher struct vtnet_rxq *rxq;
31738f3600b1SBryan Venteicher struct vtnet_txq *txq;
31748f3600b1SBryan Venteicher int i;
31758f3600b1SBryan Venteicher
31764f18e23fSBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
31774f18e23fSBryan Venteicher
31788f3600b1SBryan Venteicher /*
31798f3600b1SBryan Venteicher * Lock and unlock the per-queue mutex so we known the stop
31808f3600b1SBryan Venteicher * state is visible. Doing only the active queues should be
31818f3600b1SBryan Venteicher * sufficient, but it does not cost much extra to do all the
31824f18e23fSBryan Venteicher * queues.
31838f3600b1SBryan Venteicher */
31848f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
31858f3600b1SBryan Venteicher rxq = &sc->vtnet_rxqs[i];
31868f3600b1SBryan Venteicher VTNET_RXQ_LOCK(rxq);
31878f3600b1SBryan Venteicher VTNET_RXQ_UNLOCK(rxq);
31888f3600b1SBryan Venteicher
31898f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[i];
31908f3600b1SBryan Venteicher VTNET_TXQ_LOCK(txq);
31918f3600b1SBryan Venteicher VTNET_TXQ_UNLOCK(txq);
31928f3600b1SBryan Venteicher }
319310b59a9bSPeter Grehan }
319410b59a9bSPeter Grehan
319510b59a9bSPeter Grehan static void
vtnet_stop(struct vtnet_softc * sc)319610b59a9bSPeter Grehan vtnet_stop(struct vtnet_softc *sc)
319710b59a9bSPeter Grehan {
319810b59a9bSPeter Grehan device_t dev;
31994ee96792SJustin Hibbits if_t ifp;
320010b59a9bSPeter Grehan
320110b59a9bSPeter Grehan dev = sc->vtnet_dev;
320210b59a9bSPeter Grehan ifp = sc->vtnet_ifp;
320310b59a9bSPeter Grehan
32048f3600b1SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
320510b59a9bSPeter Grehan
32064ee96792SJustin Hibbits if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
32078f3600b1SBryan Venteicher sc->vtnet_link_active = 0;
320810b59a9bSPeter Grehan callout_stop(&sc->vtnet_tick_ch);
320910b59a9bSPeter Grehan
32108f3600b1SBryan Venteicher /* Only advisory. */
32118f3600b1SBryan Venteicher vtnet_disable_interrupts(sc);
321210b59a9bSPeter Grehan
3213bb714db6SVincenzo Maffione #ifdef DEV_NETMAP
3214bb714db6SVincenzo Maffione /* Stop any pending txsync/rxsync and disable them. */
3215bb714db6SVincenzo Maffione netmap_disable_all_rings(ifp);
3216bb714db6SVincenzo Maffione #endif /* DEV_NETMAP */
3217bb714db6SVincenzo Maffione
321810b59a9bSPeter Grehan /*
32198f3600b1SBryan Venteicher * Stop the host adapter. This resets it to the pre-initialized
32208f3600b1SBryan Venteicher * state. It will not generate any interrupts until after it is
32218f3600b1SBryan Venteicher * reinitialized.
322210b59a9bSPeter Grehan */
322310b59a9bSPeter Grehan virtio_stop(dev);
32248f3600b1SBryan Venteicher vtnet_stop_rendezvous(sc);
322510b59a9bSPeter Grehan
32268f3600b1SBryan Venteicher vtnet_drain_rxtx_queues(sc);
32274f18e23fSBryan Venteicher sc->vtnet_act_vq_pairs = 1;
322810b59a9bSPeter Grehan }
322910b59a9bSPeter Grehan
323010b59a9bSPeter Grehan static int
vtnet_virtio_reinit(struct vtnet_softc * sc)32318f3600b1SBryan Venteicher vtnet_virtio_reinit(struct vtnet_softc *sc)
323210b59a9bSPeter Grehan {
32338f3600b1SBryan Venteicher device_t dev;
32344ee96792SJustin Hibbits if_t ifp;
323510b59a9bSPeter Grehan uint64_t features;
323644559b26SBryan Venteicher int error;
323710b59a9bSPeter Grehan
32388f3600b1SBryan Venteicher dev = sc->vtnet_dev;
323910b59a9bSPeter Grehan ifp = sc->vtnet_ifp;
324044559b26SBryan Venteicher features = sc->vtnet_negotiated_features;
32418f3600b1SBryan Venteicher
324210b59a9bSPeter Grehan /*
324310b59a9bSPeter Grehan * Re-negotiate with the host, removing any disabled receive
324410b59a9bSPeter Grehan * features. Transmit features are disabled only on our side
324510b59a9bSPeter Grehan * via if_capenable and if_hwassist.
324610b59a9bSPeter Grehan */
324710b59a9bSPeter Grehan
32484ee96792SJustin Hibbits if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
3249e36a6b1bSBryan Venteicher features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES);
325010b59a9bSPeter Grehan
32514ee96792SJustin Hibbits if ((if_getcapenable(ifp) & IFCAP_LRO) == 0)
325210b59a9bSPeter Grehan features &= ~VTNET_LRO_FEATURES;
325310b59a9bSPeter Grehan
32544ee96792SJustin Hibbits if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
325510b59a9bSPeter Grehan features &= ~VIRTIO_NET_F_CTRL_VLAN;
325610b59a9bSPeter Grehan
32578f3600b1SBryan Venteicher error = virtio_reinit(dev, features);
325844559b26SBryan Venteicher if (error) {
32598f3600b1SBryan Venteicher device_printf(dev, "virtio reinit error %d\n", error);
32608f3600b1SBryan Venteicher return (error);
326110b59a9bSPeter Grehan }
326210b59a9bSPeter Grehan
326344559b26SBryan Venteicher sc->vtnet_features = features;
326444559b26SBryan Venteicher virtio_reinit_complete(dev);
326544559b26SBryan Venteicher
326644559b26SBryan Venteicher return (0);
326744559b26SBryan Venteicher }
326844559b26SBryan Venteicher
326910b59a9bSPeter Grehan static void
vtnet_init_rx_filters(struct vtnet_softc * sc)32708f3600b1SBryan Venteicher vtnet_init_rx_filters(struct vtnet_softc *sc)
32718f3600b1SBryan Venteicher {
32724ee96792SJustin Hibbits if_t ifp;
32738f3600b1SBryan Venteicher
32748f3600b1SBryan Venteicher ifp = sc->vtnet_ifp;
32758f3600b1SBryan Venteicher
32768f3600b1SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
32778f3600b1SBryan Venteicher vtnet_rx_filter(sc);
32788f3600b1SBryan Venteicher vtnet_rx_filter_mac(sc);
32798f3600b1SBryan Venteicher }
32808f3600b1SBryan Venteicher
32814ee96792SJustin Hibbits if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
32828f3600b1SBryan Venteicher vtnet_rx_filter_vlan(sc);
32838f3600b1SBryan Venteicher }
32848f3600b1SBryan Venteicher
32858f3600b1SBryan Venteicher static int
vtnet_init_rx_queues(struct vtnet_softc * sc)32868f3600b1SBryan Venteicher vtnet_init_rx_queues(struct vtnet_softc *sc)
32878f3600b1SBryan Venteicher {
32888f3600b1SBryan Venteicher device_t dev;
32894ee96792SJustin Hibbits if_t ifp;
32908f3600b1SBryan Venteicher struct vtnet_rxq *rxq;
32915e220811SBryan Venteicher int i, clustersz, error;
32928f3600b1SBryan Venteicher
32938f3600b1SBryan Venteicher dev = sc->vtnet_dev;
3294fa7ca1e3SBryan Venteicher ifp = sc->vtnet_ifp;
32958f3600b1SBryan Venteicher
32964ee96792SJustin Hibbits clustersz = vtnet_rx_cluster_size(sc, if_getmtu(ifp));
32975e220811SBryan Venteicher sc->vtnet_rx_clustersz = clustersz;
32988f3600b1SBryan Venteicher
3299fa7ca1e3SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) {
3300fa7ca1e3SBryan Venteicher sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) +
3301fa7ca1e3SBryan Venteicher VTNET_MAX_RX_SIZE, clustersz);
3302fa7ca1e3SBryan Venteicher KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
3303443c3d0bSBryan Venteicher ("%s: too many rx mbufs %d for %d segments", __func__,
3304443c3d0bSBryan Venteicher sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
3305fa7ca1e3SBryan Venteicher } else
3306fa7ca1e3SBryan Venteicher sc->vtnet_rx_nmbufs = 1;
33078f3600b1SBryan Venteicher
33088f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
33098f3600b1SBryan Venteicher rxq = &sc->vtnet_rxqs[i];
33108f3600b1SBryan Venteicher
33118f3600b1SBryan Venteicher /* Hold the lock to satisfy asserts. */
33128f3600b1SBryan Venteicher VTNET_RXQ_LOCK(rxq);
33138f3600b1SBryan Venteicher error = vtnet_rxq_populate(rxq);
33148f3600b1SBryan Venteicher VTNET_RXQ_UNLOCK(rxq);
33158f3600b1SBryan Venteicher
33168f3600b1SBryan Venteicher if (error) {
3317fa7ca1e3SBryan Venteicher device_printf(dev, "cannot populate Rx queue %d\n", i);
33188f3600b1SBryan Venteicher return (error);
33198f3600b1SBryan Venteicher }
33208f3600b1SBryan Venteicher }
33218f3600b1SBryan Venteicher
33228f3600b1SBryan Venteicher return (0);
33238f3600b1SBryan Venteicher }
33248f3600b1SBryan Venteicher
33258f3600b1SBryan Venteicher static int
vtnet_init_tx_queues(struct vtnet_softc * sc)33268f3600b1SBryan Venteicher vtnet_init_tx_queues(struct vtnet_softc *sc)
33278f3600b1SBryan Venteicher {
33288f3600b1SBryan Venteicher struct vtnet_txq *txq;
33298f3600b1SBryan Venteicher int i;
33308f3600b1SBryan Venteicher
33318f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
33328f3600b1SBryan Venteicher txq = &sc->vtnet_txqs[i];
33338f3600b1SBryan Venteicher txq->vtntx_watchdog = 0;
3334baa5234fSBryan Venteicher txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq);
333516f224b5SVincenzo Maffione #ifdef DEV_NETMAP
333616f224b5SVincenzo Maffione netmap_reset(NA(sc->vtnet_ifp), NR_TX, i, 0);
333716f224b5SVincenzo Maffione #endif /* DEV_NETMAP */
33388f3600b1SBryan Venteicher }
33398f3600b1SBryan Venteicher
33408f3600b1SBryan Venteicher return (0);
33418f3600b1SBryan Venteicher }
33428f3600b1SBryan Venteicher
33438f3600b1SBryan Venteicher static int
vtnet_init_rxtx_queues(struct vtnet_softc * sc)33448f3600b1SBryan Venteicher vtnet_init_rxtx_queues(struct vtnet_softc *sc)
33458f3600b1SBryan Venteicher {
33468f3600b1SBryan Venteicher int error;
33478f3600b1SBryan Venteicher
33488f3600b1SBryan Venteicher error = vtnet_init_rx_queues(sc);
33498f3600b1SBryan Venteicher if (error)
33508f3600b1SBryan Venteicher return (error);
33518f3600b1SBryan Venteicher
33528f3600b1SBryan Venteicher error = vtnet_init_tx_queues(sc);
33538f3600b1SBryan Venteicher if (error)
33548f3600b1SBryan Venteicher return (error);
33558f3600b1SBryan Venteicher
33568f3600b1SBryan Venteicher return (0);
33578f3600b1SBryan Venteicher }
33588f3600b1SBryan Venteicher
33598f3600b1SBryan Venteicher static void
vtnet_set_active_vq_pairs(struct vtnet_softc * sc)33608f3600b1SBryan Venteicher vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
33618f3600b1SBryan Venteicher {
33628f3600b1SBryan Venteicher device_t dev;
33638f3600b1SBryan Venteicher int npairs;
33648f3600b1SBryan Venteicher
33658f3600b1SBryan Venteicher dev = sc->vtnet_dev;
33668f3600b1SBryan Venteicher
33675e220811SBryan Venteicher if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) {
33688f3600b1SBryan Venteicher sc->vtnet_act_vq_pairs = 1;
33698f3600b1SBryan Venteicher return;
33708f3600b1SBryan Venteicher }
33718f3600b1SBryan Venteicher
3372b470419eSBryan Venteicher npairs = sc->vtnet_req_vq_pairs;
33738f3600b1SBryan Venteicher
33748f3600b1SBryan Venteicher if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3375bd8809dfSBryan Venteicher device_printf(dev, "cannot set active queue pairs to %d, "
3376bd8809dfSBryan Venteicher "falling back to 1 queue pair\n", npairs);
33778f3600b1SBryan Venteicher npairs = 1;
33788f3600b1SBryan Venteicher }
33798f3600b1SBryan Venteicher
33808f3600b1SBryan Venteicher sc->vtnet_act_vq_pairs = npairs;
33818f3600b1SBryan Venteicher }
33828f3600b1SBryan Venteicher
3383e36a6b1bSBryan Venteicher static void
vtnet_update_rx_offloads(struct vtnet_softc * sc)3384e36a6b1bSBryan Venteicher vtnet_update_rx_offloads(struct vtnet_softc *sc)
3385e36a6b1bSBryan Venteicher {
33864ee96792SJustin Hibbits if_t ifp;
3387e36a6b1bSBryan Venteicher uint64_t features;
3388e36a6b1bSBryan Venteicher int error;
3389e36a6b1bSBryan Venteicher
3390e36a6b1bSBryan Venteicher ifp = sc->vtnet_ifp;
3391e36a6b1bSBryan Venteicher features = sc->vtnet_features;
3392e36a6b1bSBryan Venteicher
3393e36a6b1bSBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
3394e36a6b1bSBryan Venteicher
33954ee96792SJustin Hibbits if (if_getcapabilities(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
33964ee96792SJustin Hibbits if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
3397e36a6b1bSBryan Venteicher features |= VIRTIO_NET_F_GUEST_CSUM;
3398e36a6b1bSBryan Venteicher else
3399e36a6b1bSBryan Venteicher features &= ~VIRTIO_NET_F_GUEST_CSUM;
3400e36a6b1bSBryan Venteicher }
3401e36a6b1bSBryan Venteicher
34024ee96792SJustin Hibbits if (if_getcapabilities(ifp) & IFCAP_LRO && !vtnet_software_lro(sc)) {
34034ee96792SJustin Hibbits if (if_getcapenable(ifp) & IFCAP_LRO)
3404e36a6b1bSBryan Venteicher features |= VTNET_LRO_FEATURES;
3405e36a6b1bSBryan Venteicher else
3406e36a6b1bSBryan Venteicher features &= ~VTNET_LRO_FEATURES;
3407e36a6b1bSBryan Venteicher }
3408e36a6b1bSBryan Venteicher
3409e36a6b1bSBryan Venteicher error = vtnet_ctrl_guest_offloads(sc,
3410e36a6b1bSBryan Venteicher features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 |
3411e36a6b1bSBryan Venteicher VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN |
3412e36a6b1bSBryan Venteicher VIRTIO_NET_F_GUEST_UFO));
3413e36a6b1bSBryan Venteicher if (error) {
3414e36a6b1bSBryan Venteicher device_printf(sc->vtnet_dev,
3415e36a6b1bSBryan Venteicher "%s: cannot update Rx features\n", __func__);
34164ee96792SJustin Hibbits if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
34174ee96792SJustin Hibbits if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3418e36a6b1bSBryan Venteicher vtnet_init_locked(sc, 0);
3419e36a6b1bSBryan Venteicher }
3420e36a6b1bSBryan Venteicher } else
3421e36a6b1bSBryan Venteicher sc->vtnet_features = features;
3422e36a6b1bSBryan Venteicher }
3423e36a6b1bSBryan Venteicher
34248f3600b1SBryan Venteicher static int
vtnet_reinit(struct vtnet_softc * sc)34258f3600b1SBryan Venteicher vtnet_reinit(struct vtnet_softc *sc)
342610b59a9bSPeter Grehan {
34274ee96792SJustin Hibbits if_t ifp;
342810b59a9bSPeter Grehan int error;
342910b59a9bSPeter Grehan
343010b59a9bSPeter Grehan ifp = sc->vtnet_ifp;
343110b59a9bSPeter Grehan
34324ee96792SJustin Hibbits bcopy(if_getlladdr(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
343310b59a9bSPeter Grehan
343444559b26SBryan Venteicher error = vtnet_virtio_reinit(sc);
343544559b26SBryan Venteicher if (error)
343644559b26SBryan Venteicher return (error);
343744559b26SBryan Venteicher
343844559b26SBryan Venteicher vtnet_set_macaddr(sc);
34398f3600b1SBryan Venteicher vtnet_set_active_vq_pairs(sc);
34408f3600b1SBryan Venteicher
344144559b26SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
344244559b26SBryan Venteicher vtnet_init_rx_filters(sc);
344344559b26SBryan Venteicher
34444ee96792SJustin Hibbits if_sethwassist(ifp, 0);
34454ee96792SJustin Hibbits if (if_getcapenable(ifp) & IFCAP_TXCSUM)
34464ee96792SJustin Hibbits if_sethwassistbits(ifp, VTNET_CSUM_OFFLOAD, 0);
34474ee96792SJustin Hibbits if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
34484ee96792SJustin Hibbits if_sethwassistbits(ifp, VTNET_CSUM_OFFLOAD_IPV6, 0);
34494ee96792SJustin Hibbits if (if_getcapenable(ifp) & IFCAP_TSO4)
34504ee96792SJustin Hibbits if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
34514ee96792SJustin Hibbits if (if_getcapenable(ifp) & IFCAP_TSO6)
34524ee96792SJustin Hibbits if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
345310b59a9bSPeter Grehan
34548f3600b1SBryan Venteicher error = vtnet_init_rxtx_queues(sc);
34558f3600b1SBryan Venteicher if (error)
34568f3600b1SBryan Venteicher return (error);
345710b59a9bSPeter Grehan
34588f3600b1SBryan Venteicher return (0);
34598f3600b1SBryan Venteicher }
34608f3600b1SBryan Venteicher
34618f3600b1SBryan Venteicher static void
vtnet_init_locked(struct vtnet_softc * sc,int init_mode)346216f224b5SVincenzo Maffione vtnet_init_locked(struct vtnet_softc *sc, int init_mode)
34638f3600b1SBryan Venteicher {
34644ee96792SJustin Hibbits if_t ifp;
34658f3600b1SBryan Venteicher
34668f3600b1SBryan Venteicher ifp = sc->vtnet_ifp;
34678f3600b1SBryan Venteicher
34688f3600b1SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
34698f3600b1SBryan Venteicher
34704ee96792SJustin Hibbits if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
34718f3600b1SBryan Venteicher return;
34728f3600b1SBryan Venteicher
34738f3600b1SBryan Venteicher vtnet_stop(sc);
34748f3600b1SBryan Venteicher
347516f224b5SVincenzo Maffione #ifdef DEV_NETMAP
347616f224b5SVincenzo Maffione /* Once stopped we can update the netmap flags, if necessary. */
347716f224b5SVincenzo Maffione switch (init_mode) {
347816f224b5SVincenzo Maffione case VTNET_INIT_NETMAP_ENTER:
347916f224b5SVincenzo Maffione nm_set_native_flags(NA(ifp));
348016f224b5SVincenzo Maffione break;
348116f224b5SVincenzo Maffione case VTNET_INIT_NETMAP_EXIT:
348216f224b5SVincenzo Maffione nm_clear_native_flags(NA(ifp));
348316f224b5SVincenzo Maffione break;
348416f224b5SVincenzo Maffione }
348516f224b5SVincenzo Maffione #endif /* DEV_NETMAP */
348616f224b5SVincenzo Maffione
348744559b26SBryan Venteicher if (vtnet_reinit(sc) != 0) {
348844559b26SBryan Venteicher vtnet_stop(sc);
348944559b26SBryan Venteicher return;
349044559b26SBryan Venteicher }
34918f3600b1SBryan Venteicher
34924ee96792SJustin Hibbits if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
349310b59a9bSPeter Grehan vtnet_update_link_status(sc);
349444559b26SBryan Venteicher vtnet_enable_interrupts(sc);
349510b59a9bSPeter Grehan callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
34968f3600b1SBryan Venteicher
3497bb714db6SVincenzo Maffione #ifdef DEV_NETMAP
3498bb714db6SVincenzo Maffione /* Re-enable txsync/rxsync. */
3499bb714db6SVincenzo Maffione netmap_enable_all_rings(ifp);
3500bb714db6SVincenzo Maffione #endif /* DEV_NETMAP */
350110b59a9bSPeter Grehan }
350210b59a9bSPeter Grehan
350310b59a9bSPeter Grehan static void
vtnet_init(void * xsc)350410b59a9bSPeter Grehan vtnet_init(void *xsc)
350510b59a9bSPeter Grehan {
350610b59a9bSPeter Grehan struct vtnet_softc *sc;
350710b59a9bSPeter Grehan
350810b59a9bSPeter Grehan sc = xsc;
350910b59a9bSPeter Grehan
35108f3600b1SBryan Venteicher VTNET_CORE_LOCK(sc);
351116f224b5SVincenzo Maffione vtnet_init_locked(sc, 0);
35128f3600b1SBryan Venteicher VTNET_CORE_UNLOCK(sc);
35138f3600b1SBryan Venteicher }
35148f3600b1SBryan Venteicher
35158f3600b1SBryan Venteicher static void
vtnet_free_ctrl_vq(struct vtnet_softc * sc)35168f3600b1SBryan Venteicher vtnet_free_ctrl_vq(struct vtnet_softc *sc)
35178f3600b1SBryan Venteicher {
35188f3600b1SBryan Venteicher
35198f3600b1SBryan Venteicher /*
35208f3600b1SBryan Venteicher * The control virtqueue is only polled and therefore it should
35218f3600b1SBryan Venteicher * already be empty.
35228f3600b1SBryan Venteicher */
35235e220811SBryan Venteicher KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
35245e220811SBryan Venteicher ("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq));
352510b59a9bSPeter Grehan }
352610b59a9bSPeter Grehan
352710b59a9bSPeter Grehan static void
vtnet_exec_ctrl_cmd(struct vtnet_softc * sc,void * cookie,struct sglist * sg,int readable,int writable)352810b59a9bSPeter Grehan vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
352910b59a9bSPeter Grehan struct sglist *sg, int readable, int writable)
353010b59a9bSPeter Grehan {
353110b59a9bSPeter Grehan struct virtqueue *vq;
353210b59a9bSPeter Grehan
353310b59a9bSPeter Grehan vq = sc->vtnet_ctrl_vq;
353410b59a9bSPeter Grehan
35355e220811SBryan Venteicher MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ);
353644559b26SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
353710b59a9bSPeter Grehan
35388f3600b1SBryan Venteicher if (!virtqueue_empty(vq))
35398f3600b1SBryan Venteicher return;
354010b59a9bSPeter Grehan
354110b59a9bSPeter Grehan /*
35425e220811SBryan Venteicher * Poll for the response, but the command is likely completed before
35435e220811SBryan Venteicher * returning from the notify.
354410b59a9bSPeter Grehan */
35455e220811SBryan Venteicher if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0) {
35468f3600b1SBryan Venteicher virtqueue_notify(vq);
35478f3600b1SBryan Venteicher virtqueue_poll(vq, NULL);
354810b59a9bSPeter Grehan }
35495e220811SBryan Venteicher }
355010b59a9bSPeter Grehan
35518f3600b1SBryan Venteicher static int
vtnet_ctrl_mac_cmd(struct vtnet_softc * sc,uint8_t * hwaddr)35528f3600b1SBryan Venteicher vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
355310b59a9bSPeter Grehan {
35548f3600b1SBryan Venteicher struct sglist_seg segs[3];
35558f3600b1SBryan Venteicher struct sglist sg;
35565e220811SBryan Venteicher struct {
35575e220811SBryan Venteicher struct virtio_net_ctrl_hdr hdr __aligned(2);
35585e220811SBryan Venteicher uint8_t pad1;
35595e220811SBryan Venteicher uint8_t addr[ETHER_ADDR_LEN] __aligned(8);
35605e220811SBryan Venteicher uint8_t pad2;
35618f3600b1SBryan Venteicher uint8_t ack;
35625e220811SBryan Venteicher } s;
35638f3600b1SBryan Venteicher int error;
356410b59a9bSPeter Grehan
35658f3600b1SBryan Venteicher error = 0;
35665e220811SBryan Venteicher MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC);
356710b59a9bSPeter Grehan
35685e220811SBryan Venteicher s.hdr.class = VIRTIO_NET_CTRL_MAC;
35695e220811SBryan Venteicher s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
35705e220811SBryan Venteicher bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN);
35715e220811SBryan Venteicher s.ack = VIRTIO_NET_ERR;
357210b59a9bSPeter Grehan
35735e220811SBryan Venteicher sglist_init(&sg, nitems(segs), segs);
35745e220811SBryan Venteicher error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
35755e220811SBryan Venteicher error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN);
35765e220811SBryan Venteicher error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
35775e220811SBryan Venteicher MPASS(error == 0 && sg.sg_nseg == nitems(segs));
35785e220811SBryan Venteicher
35795e220811SBryan Venteicher if (error == 0)
35805e220811SBryan Venteicher vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
35815e220811SBryan Venteicher
35825e220811SBryan Venteicher return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
35838f3600b1SBryan Venteicher }
35848f3600b1SBryan Venteicher
35858f3600b1SBryan Venteicher static int
vtnet_ctrl_guest_offloads(struct vtnet_softc * sc,uint64_t offloads)3586e36a6b1bSBryan Venteicher vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads)
3587e36a6b1bSBryan Venteicher {
3588e36a6b1bSBryan Venteicher struct sglist_seg segs[3];
3589e36a6b1bSBryan Venteicher struct sglist sg;
3590e36a6b1bSBryan Venteicher struct {
3591e36a6b1bSBryan Venteicher struct virtio_net_ctrl_hdr hdr __aligned(2);
3592e36a6b1bSBryan Venteicher uint8_t pad1;
3593e36a6b1bSBryan Venteicher uint64_t offloads __aligned(8);
3594e36a6b1bSBryan Venteicher uint8_t pad2;
3595e36a6b1bSBryan Venteicher uint8_t ack;
3596e36a6b1bSBryan Venteicher } s;
3597e36a6b1bSBryan Venteicher int error;
3598e36a6b1bSBryan Venteicher
3599e36a6b1bSBryan Venteicher error = 0;
3600e36a6b1bSBryan Venteicher MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3601e36a6b1bSBryan Venteicher
3602e36a6b1bSBryan Venteicher s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
3603e36a6b1bSBryan Venteicher s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
3604e36a6b1bSBryan Venteicher s.offloads = vtnet_gtoh64(sc, offloads);
3605e36a6b1bSBryan Venteicher s.ack = VIRTIO_NET_ERR;
3606e36a6b1bSBryan Venteicher
3607e36a6b1bSBryan Venteicher sglist_init(&sg, nitems(segs), segs);
3608e36a6b1bSBryan Venteicher error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3609e36a6b1bSBryan Venteicher error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t));
3610e36a6b1bSBryan Venteicher error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3611e36a6b1bSBryan Venteicher MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3612e36a6b1bSBryan Venteicher
3613e36a6b1bSBryan Venteicher if (error == 0)
3614e36a6b1bSBryan Venteicher vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3615e36a6b1bSBryan Venteicher
3616e36a6b1bSBryan Venteicher return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3617e36a6b1bSBryan Venteicher }
3618e36a6b1bSBryan Venteicher
3619e36a6b1bSBryan Venteicher static int
vtnet_ctrl_mq_cmd(struct vtnet_softc * sc,uint16_t npairs)36208f3600b1SBryan Venteicher vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
36218f3600b1SBryan Venteicher {
36228f3600b1SBryan Venteicher struct sglist_seg segs[3];
36238f3600b1SBryan Venteicher struct sglist sg;
36248f3600b1SBryan Venteicher struct {
36255e220811SBryan Venteicher struct virtio_net_ctrl_hdr hdr __aligned(2);
36268f3600b1SBryan Venteicher uint8_t pad1;
36275e220811SBryan Venteicher struct virtio_net_ctrl_mq mq __aligned(2);
36288f3600b1SBryan Venteicher uint8_t pad2;
36298f3600b1SBryan Venteicher uint8_t ack;
36305e220811SBryan Venteicher } s;
36318f3600b1SBryan Venteicher int error;
36328f3600b1SBryan Venteicher
36335e220811SBryan Venteicher error = 0;
36345e220811SBryan Venteicher MPASS(sc->vtnet_flags & VTNET_FLAG_MQ);
36355e220811SBryan Venteicher
36368f3600b1SBryan Venteicher s.hdr.class = VIRTIO_NET_CTRL_MQ;
36378f3600b1SBryan Venteicher s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
36385e220811SBryan Venteicher s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs);
36398f3600b1SBryan Venteicher s.ack = VIRTIO_NET_ERR;
36408f3600b1SBryan Venteicher
36415e220811SBryan Venteicher sglist_init(&sg, nitems(segs), segs);
36428f3600b1SBryan Venteicher error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
36438f3600b1SBryan Venteicher error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
36448f3600b1SBryan Venteicher error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
36455e220811SBryan Venteicher MPASS(error == 0 && sg.sg_nseg == nitems(segs));
36468f3600b1SBryan Venteicher
36475e220811SBryan Venteicher if (error == 0)
36488f3600b1SBryan Venteicher vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
36498f3600b1SBryan Venteicher
36508f3600b1SBryan Venteicher return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
365110b59a9bSPeter Grehan }
365210b59a9bSPeter Grehan
365310b59a9bSPeter Grehan static int
vtnet_ctrl_rx_cmd(struct vtnet_softc * sc,uint8_t cmd,bool on)3654c1b554c8SAlex Richardson vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, bool on)
365510b59a9bSPeter Grehan {
365610b59a9bSPeter Grehan struct sglist_seg segs[3];
365710b59a9bSPeter Grehan struct sglist sg;
36588f3600b1SBryan Venteicher struct {
36595e220811SBryan Venteicher struct virtio_net_ctrl_hdr hdr __aligned(2);
36608f3600b1SBryan Venteicher uint8_t pad1;
36618f3600b1SBryan Venteicher uint8_t onoff;
36628f3600b1SBryan Venteicher uint8_t pad2;
36638f3600b1SBryan Venteicher uint8_t ack;
36645e220811SBryan Venteicher } s;
366510b59a9bSPeter Grehan int error;
366610b59a9bSPeter Grehan
36675e220811SBryan Venteicher error = 0;
36685e220811SBryan Venteicher MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
366910b59a9bSPeter Grehan
36708f3600b1SBryan Venteicher s.hdr.class = VIRTIO_NET_CTRL_RX;
36718f3600b1SBryan Venteicher s.hdr.cmd = cmd;
3672c1b554c8SAlex Richardson s.onoff = on;
36738f3600b1SBryan Venteicher s.ack = VIRTIO_NET_ERR;
367410b59a9bSPeter Grehan
36755e220811SBryan Venteicher sglist_init(&sg, nitems(segs), segs);
36768f3600b1SBryan Venteicher error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
36778f3600b1SBryan Venteicher error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
36788f3600b1SBryan Venteicher error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
36795e220811SBryan Venteicher MPASS(error == 0 && sg.sg_nseg == nitems(segs));
368010b59a9bSPeter Grehan
36815e220811SBryan Venteicher if (error == 0)
36828f3600b1SBryan Venteicher vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
368310b59a9bSPeter Grehan
36848f3600b1SBryan Venteicher return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
368510b59a9bSPeter Grehan }
368610b59a9bSPeter Grehan
368710b59a9bSPeter Grehan static int
vtnet_set_promisc(struct vtnet_softc * sc,bool on)3688c1b554c8SAlex Richardson vtnet_set_promisc(struct vtnet_softc *sc, bool on)
368910b59a9bSPeter Grehan {
369010b59a9bSPeter Grehan return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
369110b59a9bSPeter Grehan }
369210b59a9bSPeter Grehan
369310b59a9bSPeter Grehan static int
vtnet_set_allmulti(struct vtnet_softc * sc,bool on)3694c1b554c8SAlex Richardson vtnet_set_allmulti(struct vtnet_softc *sc, bool on)
369510b59a9bSPeter Grehan {
369610b59a9bSPeter Grehan return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
369710b59a9bSPeter Grehan }
369810b59a9bSPeter Grehan
36998f3600b1SBryan Venteicher static void
vtnet_rx_filter(struct vtnet_softc * sc)37008f3600b1SBryan Venteicher vtnet_rx_filter(struct vtnet_softc *sc)
37018f3600b1SBryan Venteicher {
37028f3600b1SBryan Venteicher device_t dev;
37034ee96792SJustin Hibbits if_t ifp;
37048f3600b1SBryan Venteicher
37058f3600b1SBryan Venteicher dev = sc->vtnet_dev;
37068f3600b1SBryan Venteicher ifp = sc->vtnet_ifp;
37078f3600b1SBryan Venteicher
37088f3600b1SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
37098f3600b1SBryan Venteicher
37104ee96792SJustin Hibbits if (vtnet_set_promisc(sc, if_getflags(ifp) & IFF_PROMISC) != 0) {
37118f3600b1SBryan Venteicher device_printf(dev, "cannot %s promiscuous mode\n",
37124ee96792SJustin Hibbits if_getflags(ifp) & IFF_PROMISC ? "enable" : "disable");
37135e220811SBryan Venteicher }
37148f3600b1SBryan Venteicher
37154ee96792SJustin Hibbits if (vtnet_set_allmulti(sc, if_getflags(ifp) & IFF_ALLMULTI) != 0) {
37168f3600b1SBryan Venteicher device_printf(dev, "cannot %s all-multicast mode\n",
37174ee96792SJustin Hibbits if_getflags(ifp) & IFF_ALLMULTI ? "enable" : "disable");
37188f3600b1SBryan Venteicher }
37195e220811SBryan Venteicher }
37208f3600b1SBryan Venteicher
37217dce5659SGleb Smirnoff static u_int
vtnet_copy_ifaddr(void * arg,struct sockaddr_dl * sdl,u_int ucnt)37227dce5659SGleb Smirnoff vtnet_copy_ifaddr(void *arg, struct sockaddr_dl *sdl, u_int ucnt)
37237dce5659SGleb Smirnoff {
37247dce5659SGleb Smirnoff struct vtnet_softc *sc = arg;
37257dce5659SGleb Smirnoff
37267dce5659SGleb Smirnoff if (memcmp(LLADDR(sdl), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
37277dce5659SGleb Smirnoff return (0);
37287dce5659SGleb Smirnoff
37297dce5659SGleb Smirnoff if (ucnt < VTNET_MAX_MAC_ENTRIES)
37307dce5659SGleb Smirnoff bcopy(LLADDR(sdl),
37317dce5659SGleb Smirnoff &sc->vtnet_mac_filter->vmf_unicast.macs[ucnt],
37327dce5659SGleb Smirnoff ETHER_ADDR_LEN);
37337dce5659SGleb Smirnoff
37347dce5659SGleb Smirnoff return (1);
37357dce5659SGleb Smirnoff }
37367dce5659SGleb Smirnoff
37377dce5659SGleb Smirnoff static u_int
vtnet_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int mcnt)37387dce5659SGleb Smirnoff vtnet_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
37397dce5659SGleb Smirnoff {
37407dce5659SGleb Smirnoff struct vtnet_mac_filter *filter = arg;
37417dce5659SGleb Smirnoff
37427dce5659SGleb Smirnoff if (mcnt < VTNET_MAX_MAC_ENTRIES)
37437dce5659SGleb Smirnoff bcopy(LLADDR(sdl), &filter->vmf_multicast.macs[mcnt],
37447dce5659SGleb Smirnoff ETHER_ADDR_LEN);
37457dce5659SGleb Smirnoff
37467dce5659SGleb Smirnoff return (1);
37477dce5659SGleb Smirnoff }
37487dce5659SGleb Smirnoff
374910b59a9bSPeter Grehan static void
vtnet_rx_filter_mac(struct vtnet_softc * sc)375010b59a9bSPeter Grehan vtnet_rx_filter_mac(struct vtnet_softc *sc)
375110b59a9bSPeter Grehan {
3752bae486f5SBryan Venteicher struct virtio_net_ctrl_hdr hdr __aligned(2);
375310b59a9bSPeter Grehan struct vtnet_mac_filter *filter;
375410b59a9bSPeter Grehan struct sglist_seg segs[4];
375510b59a9bSPeter Grehan struct sglist sg;
37564ee96792SJustin Hibbits if_t ifp;
37577dce5659SGleb Smirnoff bool promisc, allmulti;
37587dce5659SGleb Smirnoff u_int ucnt, mcnt;
37597dce5659SGleb Smirnoff int error;
376010b59a9bSPeter Grehan uint8_t ack;
376110b59a9bSPeter Grehan
376210b59a9bSPeter Grehan ifp = sc->vtnet_ifp;
3763336f459cSPeter Grehan filter = sc->vtnet_mac_filter;
37645e220811SBryan Venteicher error = 0;
376510b59a9bSPeter Grehan
37665e220811SBryan Venteicher MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
37678f3600b1SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
376810b59a9bSPeter Grehan
376910b59a9bSPeter Grehan /* Unicast MAC addresses: */
37707dce5659SGleb Smirnoff ucnt = if_foreach_lladdr(ifp, vtnet_copy_ifaddr, sc);
37717dce5659SGleb Smirnoff promisc = (ucnt > VTNET_MAX_MAC_ENTRIES);
377210b59a9bSPeter Grehan
37737dce5659SGleb Smirnoff if (promisc) {
37745e220811SBryan Venteicher ucnt = 0;
377510b59a9bSPeter Grehan if_printf(ifp, "more than %d MAC addresses assigned, "
377610b59a9bSPeter Grehan "falling back to promiscuous mode\n",
377710b59a9bSPeter Grehan VTNET_MAX_MAC_ENTRIES);
37785e220811SBryan Venteicher }
377910b59a9bSPeter Grehan
378010b59a9bSPeter Grehan /* Multicast MAC addresses: */
37817dce5659SGleb Smirnoff mcnt = if_foreach_llmaddr(ifp, vtnet_copy_maddr, filter);
37827dce5659SGleb Smirnoff allmulti = (mcnt > VTNET_MAX_MAC_ENTRIES);
378310b59a9bSPeter Grehan
37847dce5659SGleb Smirnoff if (allmulti) {
37855e220811SBryan Venteicher mcnt = 0;
378610b59a9bSPeter Grehan if_printf(ifp, "more than %d multicast MAC addresses "
378710b59a9bSPeter Grehan "assigned, falling back to all-multicast mode\n",
378810b59a9bSPeter Grehan VTNET_MAX_MAC_ENTRIES);
37895e220811SBryan Venteicher }
379010b59a9bSPeter Grehan
37917dce5659SGleb Smirnoff if (promisc && allmulti)
379210b59a9bSPeter Grehan goto out;
379310b59a9bSPeter Grehan
37945e220811SBryan Venteicher filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt);
37955e220811SBryan Venteicher filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt);
37965e220811SBryan Venteicher
379710b59a9bSPeter Grehan hdr.class = VIRTIO_NET_CTRL_MAC;
379810b59a9bSPeter Grehan hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
379910b59a9bSPeter Grehan ack = VIRTIO_NET_ERR;
380010b59a9bSPeter Grehan
38015e220811SBryan Venteicher sglist_init(&sg, nitems(segs), segs);
380210b59a9bSPeter Grehan error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
380310b59a9bSPeter Grehan error |= sglist_append(&sg, &filter->vmf_unicast,
38045e220811SBryan Venteicher sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN);
380510b59a9bSPeter Grehan error |= sglist_append(&sg, &filter->vmf_multicast,
38065e220811SBryan Venteicher sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN);
380710b59a9bSPeter Grehan error |= sglist_append(&sg, &ack, sizeof(uint8_t));
38085e220811SBryan Venteicher MPASS(error == 0 && sg.sg_nseg == nitems(segs));
380910b59a9bSPeter Grehan
38105e220811SBryan Venteicher if (error == 0)
381110b59a9bSPeter Grehan vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
381210b59a9bSPeter Grehan if (ack != VIRTIO_NET_OK)
381310b59a9bSPeter Grehan if_printf(ifp, "error setting host MAC filter table\n");
381410b59a9bSPeter Grehan
381510b59a9bSPeter Grehan out:
3816c1b554c8SAlex Richardson if (promisc != 0 && vtnet_set_promisc(sc, true) != 0)
381710b59a9bSPeter Grehan if_printf(ifp, "cannot enable promiscuous mode\n");
3818c1b554c8SAlex Richardson if (allmulti != 0 && vtnet_set_allmulti(sc, true) != 0)
381910b59a9bSPeter Grehan if_printf(ifp, "cannot enable all-multicast mode\n");
382010b59a9bSPeter Grehan }
382110b59a9bSPeter Grehan
382210b59a9bSPeter Grehan static int
vtnet_exec_vlan_filter(struct vtnet_softc * sc,int add,uint16_t tag)382310b59a9bSPeter Grehan vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
382410b59a9bSPeter Grehan {
382510b59a9bSPeter Grehan struct sglist_seg segs[3];
382610b59a9bSPeter Grehan struct sglist sg;
38278f3600b1SBryan Venteicher struct {
38285e220811SBryan Venteicher struct virtio_net_ctrl_hdr hdr __aligned(2);
38298f3600b1SBryan Venteicher uint8_t pad1;
38305e220811SBryan Venteicher uint16_t tag __aligned(2);
38318f3600b1SBryan Venteicher uint8_t pad2;
383210b59a9bSPeter Grehan uint8_t ack;
38335e220811SBryan Venteicher } s;
383410b59a9bSPeter Grehan int error;
383510b59a9bSPeter Grehan
38365e220811SBryan Venteicher error = 0;
38375e220811SBryan Venteicher MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
38385e220811SBryan Venteicher
38398f3600b1SBryan Venteicher s.hdr.class = VIRTIO_NET_CTRL_VLAN;
38408f3600b1SBryan Venteicher s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
38415e220811SBryan Venteicher s.tag = vtnet_gtoh16(sc, tag);
38428f3600b1SBryan Venteicher s.ack = VIRTIO_NET_ERR;
384310b59a9bSPeter Grehan
38445e220811SBryan Venteicher sglist_init(&sg, nitems(segs), segs);
38458f3600b1SBryan Venteicher error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
38468f3600b1SBryan Venteicher error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
38478f3600b1SBryan Venteicher error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
38485e220811SBryan Venteicher MPASS(error == 0 && sg.sg_nseg == nitems(segs));
384910b59a9bSPeter Grehan
38505e220811SBryan Venteicher if (error == 0)
38518f3600b1SBryan Venteicher vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
385210b59a9bSPeter Grehan
38538f3600b1SBryan Venteicher return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
385410b59a9bSPeter Grehan }
385510b59a9bSPeter Grehan
385610b59a9bSPeter Grehan static void
vtnet_rx_filter_vlan(struct vtnet_softc * sc)385710b59a9bSPeter Grehan vtnet_rx_filter_vlan(struct vtnet_softc *sc)
385810b59a9bSPeter Grehan {
38595e220811SBryan Venteicher int i, bit;
38608f3600b1SBryan Venteicher uint32_t w;
386110b59a9bSPeter Grehan uint16_t tag;
386210b59a9bSPeter Grehan
38635e220811SBryan Venteicher MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
38648f3600b1SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
386510b59a9bSPeter Grehan
38668f3600b1SBryan Venteicher /* Enable the filter for each configured VLAN. */
38678f3600b1SBryan Venteicher for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
38688f3600b1SBryan Venteicher w = sc->vtnet_vlan_filter[i];
386910b59a9bSPeter Grehan
38708f3600b1SBryan Venteicher while ((bit = ffs(w) - 1) != -1) {
38718f3600b1SBryan Venteicher w &= ~(1 << bit);
38728f3600b1SBryan Venteicher tag = sizeof(w) * CHAR_BIT * i + bit;
38738f3600b1SBryan Venteicher
38748f3600b1SBryan Venteicher if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
38758f3600b1SBryan Venteicher device_printf(sc->vtnet_dev,
38768f3600b1SBryan Venteicher "cannot enable VLAN %d filter\n", tag);
387710b59a9bSPeter Grehan }
387810b59a9bSPeter Grehan }
387910b59a9bSPeter Grehan }
388010b59a9bSPeter Grehan }
388110b59a9bSPeter Grehan
388210b59a9bSPeter Grehan static void
vtnet_update_vlan_filter(struct vtnet_softc * sc,int add,uint16_t tag)38838f3600b1SBryan Venteicher vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
388410b59a9bSPeter Grehan {
38854ee96792SJustin Hibbits if_t ifp;
388610b59a9bSPeter Grehan int idx, bit;
388710b59a9bSPeter Grehan
388810b59a9bSPeter Grehan ifp = sc->vtnet_ifp;
388910b59a9bSPeter Grehan idx = (tag >> 5) & 0x7F;
389010b59a9bSPeter Grehan bit = tag & 0x1F;
389110b59a9bSPeter Grehan
38928f3600b1SBryan Venteicher if (tag == 0 || tag > 4095)
38938f3600b1SBryan Venteicher return;
389410b59a9bSPeter Grehan
38958f3600b1SBryan Venteicher VTNET_CORE_LOCK(sc);
389610b59a9bSPeter Grehan
38978f3600b1SBryan Venteicher if (add)
38988f3600b1SBryan Venteicher sc->vtnet_vlan_filter[idx] |= (1 << bit);
38998f3600b1SBryan Venteicher else
39008f3600b1SBryan Venteicher sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
39018f3600b1SBryan Venteicher
39024ee96792SJustin Hibbits if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER &&
39034ee96792SJustin Hibbits if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
39048f3600b1SBryan Venteicher vtnet_exec_vlan_filter(sc, add, tag) != 0) {
390510b59a9bSPeter Grehan device_printf(sc->vtnet_dev,
390610b59a9bSPeter Grehan "cannot %s VLAN %d %s the host filter table\n",
39078f3600b1SBryan Venteicher add ? "add" : "remove", tag, add ? "to" : "from");
390810b59a9bSPeter Grehan }
390910b59a9bSPeter Grehan
39108f3600b1SBryan Venteicher VTNET_CORE_UNLOCK(sc);
391110b59a9bSPeter Grehan }
391210b59a9bSPeter Grehan
391310b59a9bSPeter Grehan static void
vtnet_register_vlan(void * arg,if_t ifp,uint16_t tag)39144ee96792SJustin Hibbits vtnet_register_vlan(void *arg, if_t ifp, uint16_t tag)
391510b59a9bSPeter Grehan {
391610b59a9bSPeter Grehan
39174ee96792SJustin Hibbits if (if_getsoftc(ifp) != arg)
391810b59a9bSPeter Grehan return;
391910b59a9bSPeter Grehan
39208f3600b1SBryan Venteicher vtnet_update_vlan_filter(arg, 1, tag);
392110b59a9bSPeter Grehan }
392210b59a9bSPeter Grehan
392310b59a9bSPeter Grehan static void
vtnet_unregister_vlan(void * arg,if_t ifp,uint16_t tag)39244ee96792SJustin Hibbits vtnet_unregister_vlan(void *arg, if_t ifp, uint16_t tag)
392510b59a9bSPeter Grehan {
392610b59a9bSPeter Grehan
39274ee96792SJustin Hibbits if (if_getsoftc(ifp) != arg)
392810b59a9bSPeter Grehan return;
392910b59a9bSPeter Grehan
39308f3600b1SBryan Venteicher vtnet_update_vlan_filter(arg, 0, tag);
39318f3600b1SBryan Venteicher }
39328f3600b1SBryan Venteicher
39336a733393SBryan Venteicher static void
vtnet_update_speed_duplex(struct vtnet_softc * sc)39346a733393SBryan Venteicher vtnet_update_speed_duplex(struct vtnet_softc *sc)
39356a733393SBryan Venteicher {
39364ee96792SJustin Hibbits if_t ifp;
39376a733393SBryan Venteicher uint32_t speed;
39386a733393SBryan Venteicher
39396a733393SBryan Venteicher ifp = sc->vtnet_ifp;
39406a733393SBryan Venteicher
39416a733393SBryan Venteicher if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0)
394232e0493cSBryan Venteicher return;
39436a733393SBryan Venteicher
394432e0493cSBryan Venteicher /* BMV: Ignore duplex. */
394532e0493cSBryan Venteicher speed = virtio_read_dev_config_4(sc->vtnet_dev,
394632e0493cSBryan Venteicher offsetof(struct virtio_net_config, speed));
3947c1b554c8SAlex Richardson if (speed != UINT32_MAX)
39484ee96792SJustin Hibbits if_setbaudrate(ifp, IF_Mbps(speed));
39496a733393SBryan Venteicher }
39506a733393SBryan Venteicher
39518f3600b1SBryan Venteicher static int
vtnet_is_link_up(struct vtnet_softc * sc)39528f3600b1SBryan Venteicher vtnet_is_link_up(struct vtnet_softc *sc)
39538f3600b1SBryan Venteicher {
39548f3600b1SBryan Venteicher uint16_t status;
39558f3600b1SBryan Venteicher
395632e0493cSBryan Venteicher if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0)
395732e0493cSBryan Venteicher return (1);
39588f3600b1SBryan Venteicher
395932e0493cSBryan Venteicher status = virtio_read_dev_config_2(sc->vtnet_dev,
39608f3600b1SBryan Venteicher offsetof(struct virtio_net_config, status));
39618f3600b1SBryan Venteicher
39628f3600b1SBryan Venteicher return ((status & VIRTIO_NET_S_LINK_UP) != 0);
39638f3600b1SBryan Venteicher }
39648f3600b1SBryan Venteicher
39658f3600b1SBryan Venteicher static void
vtnet_update_link_status(struct vtnet_softc * sc)39668f3600b1SBryan Venteicher vtnet_update_link_status(struct vtnet_softc *sc)
39678f3600b1SBryan Venteicher {
39684ee96792SJustin Hibbits if_t ifp;
39698f3600b1SBryan Venteicher int link;
39708f3600b1SBryan Venteicher
39718f3600b1SBryan Venteicher ifp = sc->vtnet_ifp;
39728f3600b1SBryan Venteicher VTNET_CORE_LOCK_ASSERT(sc);
39738f3600b1SBryan Venteicher link = vtnet_is_link_up(sc);
39748f3600b1SBryan Venteicher
39758f3600b1SBryan Venteicher /* Notify if the link status has changed. */
39768f3600b1SBryan Venteicher if (link != 0 && sc->vtnet_link_active == 0) {
39776a733393SBryan Venteicher vtnet_update_speed_duplex(sc);
39788f3600b1SBryan Venteicher sc->vtnet_link_active = 1;
39798f3600b1SBryan Venteicher if_link_state_change(ifp, LINK_STATE_UP);
39808f3600b1SBryan Venteicher } else if (link == 0 && sc->vtnet_link_active != 0) {
39818f3600b1SBryan Venteicher sc->vtnet_link_active = 0;
39828f3600b1SBryan Venteicher if_link_state_change(ifp, LINK_STATE_DOWN);
39838f3600b1SBryan Venteicher }
398410b59a9bSPeter Grehan }
398510b59a9bSPeter Grehan
398610b59a9bSPeter Grehan static int
vtnet_ifmedia_upd(if_t ifp __unused)39874ee96792SJustin Hibbits vtnet_ifmedia_upd(if_t ifp __unused)
398810b59a9bSPeter Grehan {
39896a733393SBryan Venteicher return (EOPNOTSUPP);
399010b59a9bSPeter Grehan }
399110b59a9bSPeter Grehan
399210b59a9bSPeter Grehan static void
vtnet_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)39934ee96792SJustin Hibbits vtnet_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
399410b59a9bSPeter Grehan {
399510b59a9bSPeter Grehan struct vtnet_softc *sc;
399610b59a9bSPeter Grehan
39974ee96792SJustin Hibbits sc = if_getsoftc(ifp);
399810b59a9bSPeter Grehan
399910b59a9bSPeter Grehan ifmr->ifm_status = IFM_AVALID;
400010b59a9bSPeter Grehan ifmr->ifm_active = IFM_ETHER;
400110b59a9bSPeter Grehan
40028f3600b1SBryan Venteicher VTNET_CORE_LOCK(sc);
400310b59a9bSPeter Grehan if (vtnet_is_link_up(sc) != 0) {
400410b59a9bSPeter Grehan ifmr->ifm_status |= IFM_ACTIVE;
40056a733393SBryan Venteicher ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
400610b59a9bSPeter Grehan } else
400710b59a9bSPeter Grehan ifmr->ifm_active |= IFM_NONE;
40088f3600b1SBryan Venteicher VTNET_CORE_UNLOCK(sc);
400910b59a9bSPeter Grehan }
401010b59a9bSPeter Grehan
401110b59a9bSPeter Grehan static void
vtnet_get_macaddr(struct vtnet_softc * sc)401205041794SBryan Venteicher vtnet_get_macaddr(struct vtnet_softc *sc)
40138f3600b1SBryan Venteicher {
40148f3600b1SBryan Venteicher
40155e220811SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_MAC) {
40165e220811SBryan Venteicher virtio_read_device_config_array(sc->vtnet_dev,
40175e220811SBryan Venteicher offsetof(struct virtio_net_config, mac),
40185e220811SBryan Venteicher &sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN);
40195e220811SBryan Venteicher } else {
40205e220811SBryan Venteicher /* Generate a random locally administered unicast address. */
40218f3600b1SBryan Venteicher sc->vtnet_hwaddr[0] = 0xB2;
40228f3600b1SBryan Venteicher arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
40231dbb21dcSBryan Venteicher }
40248f3600b1SBryan Venteicher }
40258f3600b1SBryan Venteicher
40268f3600b1SBryan Venteicher static void
vtnet_set_macaddr(struct vtnet_softc * sc)402705041794SBryan Venteicher vtnet_set_macaddr(struct vtnet_softc *sc)
402805041794SBryan Venteicher {
402944559b26SBryan Venteicher device_t dev;
403005041794SBryan Venteicher int error;
403105041794SBryan Venteicher
403244559b26SBryan Venteicher dev = sc->vtnet_dev;
403344559b26SBryan Venteicher
403405041794SBryan Venteicher if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
403505041794SBryan Venteicher error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr);
403605041794SBryan Venteicher if (error)
403744559b26SBryan Venteicher device_printf(dev, "unable to set MAC address\n");
403805041794SBryan Venteicher return;
403905041794SBryan Venteicher }
404005041794SBryan Venteicher
404105041794SBryan Venteicher /* MAC in config is read-only in modern VirtIO. */
404205041794SBryan Venteicher if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) {
404305041794SBryan Venteicher for (int i = 0; i < ETHER_ADDR_LEN; i++) {
404444559b26SBryan Venteicher virtio_write_dev_config_1(dev,
404505041794SBryan Venteicher offsetof(struct virtio_net_config, mac) + i,
404605041794SBryan Venteicher sc->vtnet_hwaddr[i]);
404705041794SBryan Venteicher }
404805041794SBryan Venteicher }
404905041794SBryan Venteicher }
405005041794SBryan Venteicher
405105041794SBryan Venteicher static void
vtnet_attached_set_macaddr(struct vtnet_softc * sc)405205041794SBryan Venteicher vtnet_attached_set_macaddr(struct vtnet_softc *sc)
405305041794SBryan Venteicher {
405405041794SBryan Venteicher
405505041794SBryan Venteicher /* Assign MAC address if it was generated. */
405605041794SBryan Venteicher if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0)
405705041794SBryan Venteicher vtnet_set_macaddr(sc);
405805041794SBryan Venteicher }
405905041794SBryan Venteicher
406005041794SBryan Venteicher static void
vtnet_vlan_tag_remove(struct mbuf * m)40618f3600b1SBryan Venteicher vtnet_vlan_tag_remove(struct mbuf *m)
40628f3600b1SBryan Venteicher {
40638f3600b1SBryan Venteicher struct ether_vlan_header *evh;
40648f3600b1SBryan Venteicher
40658f3600b1SBryan Venteicher evh = mtod(m, struct ether_vlan_header *);
40668f3600b1SBryan Venteicher m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
40678f3600b1SBryan Venteicher m->m_flags |= M_VLANTAG;
40688f3600b1SBryan Venteicher
40698f3600b1SBryan Venteicher /* Strip the 802.1Q header. */
40708f3600b1SBryan Venteicher bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
40718f3600b1SBryan Venteicher ETHER_HDR_LEN - ETHER_TYPE_LEN);
40728f3600b1SBryan Venteicher m_adj(m, ETHER_VLAN_ENCAP_LEN);
40738f3600b1SBryan Venteicher }
40748f3600b1SBryan Venteicher
40758f3600b1SBryan Venteicher static void
vtnet_set_rx_process_limit(struct vtnet_softc * sc)407632487a89SBryan Venteicher vtnet_set_rx_process_limit(struct vtnet_softc *sc)
407732487a89SBryan Venteicher {
407832487a89SBryan Venteicher int limit;
407932487a89SBryan Venteicher
408032487a89SBryan Venteicher limit = vtnet_tunable_int(sc, "rx_process_limit",
408132487a89SBryan Venteicher vtnet_rx_process_limit);
408232487a89SBryan Venteicher if (limit < 0)
408332487a89SBryan Venteicher limit = INT_MAX;
408432487a89SBryan Venteicher sc->vtnet_rx_process_limit = limit;
408532487a89SBryan Venteicher }
408632487a89SBryan Venteicher
408732487a89SBryan Venteicher static void
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child,struct vtnet_rxq * rxq)40888f3600b1SBryan Venteicher vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
40898f3600b1SBryan Venteicher struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
40908f3600b1SBryan Venteicher {
40918f3600b1SBryan Venteicher struct sysctl_oid *node;
40928f3600b1SBryan Venteicher struct sysctl_oid_list *list;
40938f3600b1SBryan Venteicher struct vtnet_rxq_stats *stats;
40948f3600b1SBryan Venteicher char namebuf[16];
40958f3600b1SBryan Venteicher
40968f3600b1SBryan Venteicher snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
40978f3600b1SBryan Venteicher node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
40987029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Receive Queue");
40998f3600b1SBryan Venteicher list = SYSCTL_CHILDREN(node);
41008f3600b1SBryan Venteicher
41018f3600b1SBryan Venteicher stats = &rxq->vtnrx_stats;
41028f3600b1SBryan Venteicher
41038f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
41048f3600b1SBryan Venteicher &stats->vrxs_ipackets, "Receive packets");
41058f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
41068f3600b1SBryan Venteicher &stats->vrxs_ibytes, "Receive bytes");
41078f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
41088f3600b1SBryan Venteicher &stats->vrxs_iqdrops, "Receive drops");
41098f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
41108f3600b1SBryan Venteicher &stats->vrxs_ierrors, "Receive errors");
41118f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
41128f3600b1SBryan Venteicher &stats->vrxs_csum, "Receive checksum offloaded");
41138f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
41148f3600b1SBryan Venteicher &stats->vrxs_csum_failed, "Receive checksum offload failed");
41152bfab357SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
41162bfab357SBryan Venteicher &stats->vrxs_host_lro, "Receive host segmentation offloaded");
41178f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
41188f3600b1SBryan Venteicher &stats->vrxs_rescheduled,
41198f3600b1SBryan Venteicher "Receive interrupt handler rescheduled");
41208f3600b1SBryan Venteicher }
41218f3600b1SBryan Venteicher
41228f3600b1SBryan Venteicher static void
vtnet_setup_txq_sysctl(struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child,struct vtnet_txq * txq)41238f3600b1SBryan Venteicher vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
41248f3600b1SBryan Venteicher struct sysctl_oid_list *child, struct vtnet_txq *txq)
41258f3600b1SBryan Venteicher {
41268f3600b1SBryan Venteicher struct sysctl_oid *node;
41278f3600b1SBryan Venteicher struct sysctl_oid_list *list;
41288f3600b1SBryan Venteicher struct vtnet_txq_stats *stats;
41298f3600b1SBryan Venteicher char namebuf[16];
41308f3600b1SBryan Venteicher
41318f3600b1SBryan Venteicher snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
41328f3600b1SBryan Venteicher node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
41337029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Transmit Queue");
41348f3600b1SBryan Venteicher list = SYSCTL_CHILDREN(node);
41358f3600b1SBryan Venteicher
41368f3600b1SBryan Venteicher stats = &txq->vtntx_stats;
41378f3600b1SBryan Venteicher
41388f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
41398f3600b1SBryan Venteicher &stats->vtxs_opackets, "Transmit packets");
41408f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
41418f3600b1SBryan Venteicher &stats->vtxs_obytes, "Transmit bytes");
41428f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
41438f3600b1SBryan Venteicher &stats->vtxs_omcasts, "Transmit multicasts");
41448f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
41458f3600b1SBryan Venteicher &stats->vtxs_csum, "Transmit checksum offloaded");
41468f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
4147475a60aeSBryan Venteicher &stats->vtxs_tso, "Transmit TCP segmentation offloaded");
41488f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
41498f3600b1SBryan Venteicher &stats->vtxs_rescheduled,
41508f3600b1SBryan Venteicher "Transmit interrupt handler rescheduled");
41518f3600b1SBryan Venteicher }
41528f3600b1SBryan Venteicher
41538f3600b1SBryan Venteicher static void
vtnet_setup_queue_sysctl(struct vtnet_softc * sc)41548f3600b1SBryan Venteicher vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
41558f3600b1SBryan Venteicher {
41568f3600b1SBryan Venteicher device_t dev;
41578f3600b1SBryan Venteicher struct sysctl_ctx_list *ctx;
41588f3600b1SBryan Venteicher struct sysctl_oid *tree;
41598f3600b1SBryan Venteicher struct sysctl_oid_list *child;
41608f3600b1SBryan Venteicher int i;
41618f3600b1SBryan Venteicher
41628f3600b1SBryan Venteicher dev = sc->vtnet_dev;
41638f3600b1SBryan Venteicher ctx = device_get_sysctl_ctx(dev);
41648f3600b1SBryan Venteicher tree = device_get_sysctl_tree(dev);
41658f3600b1SBryan Venteicher child = SYSCTL_CHILDREN(tree);
41668f3600b1SBryan Venteicher
4167bd8809dfSBryan Venteicher for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
41688f3600b1SBryan Venteicher vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
41698f3600b1SBryan Venteicher vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
41708f3600b1SBryan Venteicher }
41718f3600b1SBryan Venteicher }
41728f3600b1SBryan Venteicher
41738f3600b1SBryan Venteicher static void
vtnet_setup_stat_sysctl(struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child,struct vtnet_softc * sc)41748f3600b1SBryan Venteicher vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
41758f3600b1SBryan Venteicher struct sysctl_oid_list *child, struct vtnet_softc *sc)
41768f3600b1SBryan Venteicher {
417710b59a9bSPeter Grehan struct vtnet_statistics *stats;
417884047b19SGleb Smirnoff struct vtnet_rxq_stats rxaccum;
417984047b19SGleb Smirnoff struct vtnet_txq_stats txaccum;
418084047b19SGleb Smirnoff
418184047b19SGleb Smirnoff vtnet_accum_stats(sc, &rxaccum, &txaccum);
41828f3600b1SBryan Venteicher
41838f3600b1SBryan Venteicher stats = &sc->vtnet_stats;
418484047b19SGleb Smirnoff stats->rx_csum_offloaded = rxaccum.vrxs_csum;
418584047b19SGleb Smirnoff stats->rx_csum_failed = rxaccum.vrxs_csum_failed;
418684047b19SGleb Smirnoff stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
418784047b19SGleb Smirnoff stats->tx_csum_offloaded = txaccum.vtxs_csum;
418884047b19SGleb Smirnoff stats->tx_tso_offloaded = txaccum.vtxs_tso;
418984047b19SGleb Smirnoff stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
41908f3600b1SBryan Venteicher
41918f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
41928f3600b1SBryan Venteicher CTLFLAG_RD, &stats->mbuf_alloc_failed,
41938f3600b1SBryan Venteicher "Mbuf cluster allocation failures");
41948f3600b1SBryan Venteicher
41958f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
41968f3600b1SBryan Venteicher CTLFLAG_RD, &stats->rx_frame_too_large,
41978f3600b1SBryan Venteicher "Received frame larger than the mbuf chain");
41988f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
41998f3600b1SBryan Venteicher CTLFLAG_RD, &stats->rx_enq_replacement_failed,
42008f3600b1SBryan Venteicher "Enqueuing the replacement receive mbuf failed");
42018f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
42028f3600b1SBryan Venteicher CTLFLAG_RD, &stats->rx_mergeable_failed,
42038f3600b1SBryan Venteicher "Mergeable buffers receive failures");
42048f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
42058f3600b1SBryan Venteicher CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
42068f3600b1SBryan Venteicher "Received checksum offloaded buffer with unsupported "
42078f3600b1SBryan Venteicher "Ethernet type");
42088f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
42098f3600b1SBryan Venteicher CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
42108f3600b1SBryan Venteicher "Received checksum offloaded buffer with incorrect IP protocol");
42118f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
42128f3600b1SBryan Venteicher CTLFLAG_RD, &stats->rx_csum_bad_offset,
42138f3600b1SBryan Venteicher "Received checksum offloaded buffer with incorrect offset");
42148f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
42158f3600b1SBryan Venteicher CTLFLAG_RD, &stats->rx_csum_bad_proto,
42168f3600b1SBryan Venteicher "Received checksum offloaded buffer with incorrect protocol");
42178f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
42188f3600b1SBryan Venteicher CTLFLAG_RD, &stats->rx_csum_failed,
42198f3600b1SBryan Venteicher "Received buffer checksum offload failed");
42208f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
42218f3600b1SBryan Venteicher CTLFLAG_RD, &stats->rx_csum_offloaded,
42228f3600b1SBryan Venteicher "Received buffer checksum offload succeeded");
42238f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
42248f3600b1SBryan Venteicher CTLFLAG_RD, &stats->rx_task_rescheduled,
42258f3600b1SBryan Venteicher "Times the receive interrupt task rescheduled itself");
42268f3600b1SBryan Venteicher
4227475a60aeSBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
4228475a60aeSBryan Venteicher CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
42298f3600b1SBryan Venteicher "Aborted transmit of checksum offloaded buffer with unknown "
42308f3600b1SBryan Venteicher "Ethernet type");
4231475a60aeSBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
4232475a60aeSBryan Venteicher CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
4233475a60aeSBryan Venteicher "Aborted transmit of checksum offloaded buffer because mismatched "
4234475a60aeSBryan Venteicher "protocols");
42358f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
42368f3600b1SBryan Venteicher CTLFLAG_RD, &stats->tx_tso_not_tcp,
42378f3600b1SBryan Venteicher "Aborted transmit of TSO buffer with non TCP protocol");
4238475a60aeSBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
4239475a60aeSBryan Venteicher CTLFLAG_RD, &stats->tx_tso_without_csum,
4240475a60aeSBryan Venteicher "Aborted transmit of TSO buffer without TCP checksum offload");
424154fb8142SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
424254fb8142SBryan Venteicher CTLFLAG_RD, &stats->tx_defragged,
424354fb8142SBryan Venteicher "Transmit mbufs defragged");
424454fb8142SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
424554fb8142SBryan Venteicher CTLFLAG_RD, &stats->tx_defrag_failed,
424654fb8142SBryan Venteicher "Aborted transmit of buffer because defrag failed");
42478f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
42488f3600b1SBryan Venteicher CTLFLAG_RD, &stats->tx_csum_offloaded,
42498f3600b1SBryan Venteicher "Offloaded checksum of transmitted buffer");
42508f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
42518f3600b1SBryan Venteicher CTLFLAG_RD, &stats->tx_tso_offloaded,
42528f3600b1SBryan Venteicher "Segmentation offload of transmitted buffer");
42538f3600b1SBryan Venteicher SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
42548f3600b1SBryan Venteicher CTLFLAG_RD, &stats->tx_task_rescheduled,
42558f3600b1SBryan Venteicher "Times the transmit interrupt task rescheduled itself");
42568f3600b1SBryan Venteicher }
42578f3600b1SBryan Venteicher
42588f3600b1SBryan Venteicher static void
vtnet_setup_sysctl(struct vtnet_softc * sc)42598f3600b1SBryan Venteicher vtnet_setup_sysctl(struct vtnet_softc *sc)
42608f3600b1SBryan Venteicher {
42618f3600b1SBryan Venteicher device_t dev;
426210b59a9bSPeter Grehan struct sysctl_ctx_list *ctx;
426310b59a9bSPeter Grehan struct sysctl_oid *tree;
426410b59a9bSPeter Grehan struct sysctl_oid_list *child;
426510b59a9bSPeter Grehan
426610b59a9bSPeter Grehan dev = sc->vtnet_dev;
426710b59a9bSPeter Grehan ctx = device_get_sysctl_ctx(dev);
426810b59a9bSPeter Grehan tree = device_get_sysctl_tree(dev);
426910b59a9bSPeter Grehan child = SYSCTL_CHILDREN(tree);
427010b59a9bSPeter Grehan
42718f3600b1SBryan Venteicher SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
42728f3600b1SBryan Venteicher CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
42735e220811SBryan Venteicher "Number of maximum supported virtqueue pairs");
4274b470419eSBryan Venteicher SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs",
4275b470419eSBryan Venteicher CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0,
42765e220811SBryan Venteicher "Number of requested virtqueue pairs");
42778f3600b1SBryan Venteicher SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
42788f3600b1SBryan Venteicher CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
42798f3600b1SBryan Venteicher "Number of active virtqueue pairs");
428010b59a9bSPeter Grehan
42818f3600b1SBryan Venteicher vtnet_setup_stat_sysctl(ctx, child, sc);
428210b59a9bSPeter Grehan }
428310b59a9bSPeter Grehan
428442343a63SBryan Venteicher static void
vtnet_load_tunables(struct vtnet_softc * sc)428542343a63SBryan Venteicher vtnet_load_tunables(struct vtnet_softc *sc)
428642343a63SBryan Venteicher {
428742343a63SBryan Venteicher
428842343a63SBryan Venteicher sc->vtnet_lro_entry_count = vtnet_tunable_int(sc,
428942343a63SBryan Venteicher "lro_entry_count", vtnet_lro_entry_count);
429042343a63SBryan Venteicher if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES)
429142343a63SBryan Venteicher sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES;
429242343a63SBryan Venteicher
429342343a63SBryan Venteicher sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc,
429442343a63SBryan Venteicher "lro_mbufq_depth", vtnet_lro_mbufq_depth);
429542343a63SBryan Venteicher }
429642343a63SBryan Venteicher
429710b59a9bSPeter Grehan static int
vtnet_rxq_enable_intr(struct vtnet_rxq * rxq)42988f3600b1SBryan Venteicher vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
429910b59a9bSPeter Grehan {
430010b59a9bSPeter Grehan
43018f3600b1SBryan Venteicher return (virtqueue_enable_intr(rxq->vtnrx_vq));
430210b59a9bSPeter Grehan }
430310b59a9bSPeter Grehan
430410b59a9bSPeter Grehan static void
vtnet_rxq_disable_intr(struct vtnet_rxq * rxq)43058f3600b1SBryan Venteicher vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
430610b59a9bSPeter Grehan {
430710b59a9bSPeter Grehan
43088f3600b1SBryan Venteicher virtqueue_disable_intr(rxq->vtnrx_vq);
430910b59a9bSPeter Grehan }
431010b59a9bSPeter Grehan
431110b59a9bSPeter Grehan static int
vtnet_txq_enable_intr(struct vtnet_txq * txq)43128f3600b1SBryan Venteicher vtnet_txq_enable_intr(struct vtnet_txq *txq)
431310b59a9bSPeter Grehan {
431432487a89SBryan Venteicher struct virtqueue *vq;
431510b59a9bSPeter Grehan
431632487a89SBryan Venteicher vq = txq->vtntx_vq;
431732487a89SBryan Venteicher
431832487a89SBryan Venteicher if (vtnet_txq_below_threshold(txq) != 0)
431932487a89SBryan Venteicher return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG));
432032487a89SBryan Venteicher
432132487a89SBryan Venteicher /*
432232487a89SBryan Venteicher * The free count is above our threshold. Keep the Tx interrupt
432332487a89SBryan Venteicher * disabled until the queue is fuller.
432432487a89SBryan Venteicher */
432532487a89SBryan Venteicher return (0);
432610b59a9bSPeter Grehan }
432710b59a9bSPeter Grehan
432810b59a9bSPeter Grehan static void
vtnet_txq_disable_intr(struct vtnet_txq * txq)43298f3600b1SBryan Venteicher vtnet_txq_disable_intr(struct vtnet_txq *txq)
433010b59a9bSPeter Grehan {
433110b59a9bSPeter Grehan
43328f3600b1SBryan Venteicher virtqueue_disable_intr(txq->vtntx_vq);
43338f3600b1SBryan Venteicher }
43348f3600b1SBryan Venteicher
43358f3600b1SBryan Venteicher static void
vtnet_enable_rx_interrupts(struct vtnet_softc * sc)43368f3600b1SBryan Venteicher vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
43378f3600b1SBryan Venteicher {
43384f18e23fSBryan Venteicher struct vtnet_rxq *rxq;
43398f3600b1SBryan Venteicher int i;
43408f3600b1SBryan Venteicher
43414f18e23fSBryan Venteicher for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
43424f18e23fSBryan Venteicher rxq = &sc->vtnet_rxqs[i];
43434f18e23fSBryan Venteicher if (vtnet_rxq_enable_intr(rxq) != 0)
43444f18e23fSBryan Venteicher taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
43454f18e23fSBryan Venteicher }
43468f3600b1SBryan Venteicher }
43478f3600b1SBryan Venteicher
43488f3600b1SBryan Venteicher static void
vtnet_enable_tx_interrupts(struct vtnet_softc * sc)43498f3600b1SBryan Venteicher vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
43508f3600b1SBryan Venteicher {
43518f3600b1SBryan Venteicher int i;
43528f3600b1SBryan Venteicher
43538f3600b1SBryan Venteicher for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
43548f3600b1SBryan Venteicher vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
43558f3600b1SBryan Venteicher }
43568f3600b1SBryan Venteicher
43578f3600b1SBryan Venteicher static void
vtnet_enable_interrupts(struct vtnet_softc * sc)43588f3600b1SBryan Venteicher vtnet_enable_interrupts(struct vtnet_softc *sc)
43598f3600b1SBryan Venteicher {
43608f3600b1SBryan Venteicher
43618f3600b1SBryan Venteicher vtnet_enable_rx_interrupts(sc);
43628f3600b1SBryan Venteicher vtnet_enable_tx_interrupts(sc);
43638f3600b1SBryan Venteicher }
43648f3600b1SBryan Venteicher
43658f3600b1SBryan Venteicher static void
vtnet_disable_rx_interrupts(struct vtnet_softc * sc)43668f3600b1SBryan Venteicher vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
43678f3600b1SBryan Venteicher {
43688f3600b1SBryan Venteicher int i;
43698f3600b1SBryan Venteicher
4370bd8809dfSBryan Venteicher for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
43718f3600b1SBryan Venteicher vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
43728f3600b1SBryan Venteicher }
43738f3600b1SBryan Venteicher
43748f3600b1SBryan Venteicher static void
vtnet_disable_tx_interrupts(struct vtnet_softc * sc)43758f3600b1SBryan Venteicher vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
43768f3600b1SBryan Venteicher {
43778f3600b1SBryan Venteicher int i;
43788f3600b1SBryan Venteicher
4379bd8809dfSBryan Venteicher for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
43808f3600b1SBryan Venteicher vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
43818f3600b1SBryan Venteicher }
43828f3600b1SBryan Venteicher
43838f3600b1SBryan Venteicher static void
vtnet_disable_interrupts(struct vtnet_softc * sc)43848f3600b1SBryan Venteicher vtnet_disable_interrupts(struct vtnet_softc *sc)
43858f3600b1SBryan Venteicher {
43868f3600b1SBryan Venteicher
43878f3600b1SBryan Venteicher vtnet_disable_rx_interrupts(sc);
43888f3600b1SBryan Venteicher vtnet_disable_tx_interrupts(sc);
43898f3600b1SBryan Venteicher }
43908f3600b1SBryan Venteicher
43918f3600b1SBryan Venteicher static int
vtnet_tunable_int(struct vtnet_softc * sc,const char * knob,int def)43928f3600b1SBryan Venteicher vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
43938f3600b1SBryan Venteicher {
43948f3600b1SBryan Venteicher char path[64];
43958f3600b1SBryan Venteicher
43968f3600b1SBryan Venteicher snprintf(path, sizeof(path),
43978f3600b1SBryan Venteicher "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
43988f3600b1SBryan Venteicher TUNABLE_INT_FETCH(path, &def);
43998f3600b1SBryan Venteicher
44008f3600b1SBryan Venteicher return (def);
440110b59a9bSPeter Grehan }
4402c857c7d5SMark Johnston
44037790c8c1SConrad Meyer #ifdef DEBUGNET
4404c857c7d5SMark Johnston static void
vtnet_debugnet_init(if_t ifp,int * nrxr,int * ncl,int * clsize)44054ee96792SJustin Hibbits vtnet_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
4406c857c7d5SMark Johnston {
4407c857c7d5SMark Johnston struct vtnet_softc *sc;
4408c857c7d5SMark Johnston
4409c857c7d5SMark Johnston sc = if_getsoftc(ifp);
4410c857c7d5SMark Johnston
4411c857c7d5SMark Johnston VTNET_CORE_LOCK(sc);
4412bd8809dfSBryan Venteicher *nrxr = sc->vtnet_req_vq_pairs;
44137790c8c1SConrad Meyer *ncl = DEBUGNET_MAX_IN_FLIGHT;
44145e220811SBryan Venteicher *clsize = sc->vtnet_rx_clustersz;
4415c857c7d5SMark Johnston VTNET_CORE_UNLOCK(sc);
4416c857c7d5SMark Johnston }
4417c857c7d5SMark Johnston
4418c857c7d5SMark Johnston static void
vtnet_debugnet_event(if_t ifp __unused,enum debugnet_ev event)44194ee96792SJustin Hibbits vtnet_debugnet_event(if_t ifp __unused, enum debugnet_ev event)
4420c857c7d5SMark Johnston {
442153236f90SMichael Tuexen struct vtnet_softc *sc;
442253236f90SMichael Tuexen static bool sw_lro_enabled = false;
442353236f90SMichael Tuexen
442453236f90SMichael Tuexen /*
442553236f90SMichael Tuexen * Disable software LRO, since it would require entering the network
442653236f90SMichael Tuexen * epoch when calling vtnet_txq_eof() in vtnet_debugnet_poll().
442753236f90SMichael Tuexen */
442853236f90SMichael Tuexen sc = if_getsoftc(ifp);
442953236f90SMichael Tuexen switch (event) {
443053236f90SMichael Tuexen case DEBUGNET_START:
443153236f90SMichael Tuexen sw_lro_enabled = (sc->vtnet_flags & VTNET_FLAG_SW_LRO) != 0;
443253236f90SMichael Tuexen if (sw_lro_enabled)
443353236f90SMichael Tuexen sc->vtnet_flags &= ~VTNET_FLAG_SW_LRO;
443453236f90SMichael Tuexen break;
443553236f90SMichael Tuexen case DEBUGNET_END:
443653236f90SMichael Tuexen if (sw_lro_enabled)
443753236f90SMichael Tuexen sc->vtnet_flags |= VTNET_FLAG_SW_LRO;
443853236f90SMichael Tuexen break;
443953236f90SMichael Tuexen }
4440c857c7d5SMark Johnston }
4441c857c7d5SMark Johnston
4442c857c7d5SMark Johnston static int
vtnet_debugnet_transmit(if_t ifp,struct mbuf * m)44434ee96792SJustin Hibbits vtnet_debugnet_transmit(if_t ifp, struct mbuf *m)
4444c857c7d5SMark Johnston {
4445c857c7d5SMark Johnston struct vtnet_softc *sc;
4446c857c7d5SMark Johnston struct vtnet_txq *txq;
4447c857c7d5SMark Johnston int error;
4448c857c7d5SMark Johnston
4449c857c7d5SMark Johnston sc = if_getsoftc(ifp);
4450c857c7d5SMark Johnston if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4451c857c7d5SMark Johnston IFF_DRV_RUNNING)
4452c857c7d5SMark Johnston return (EBUSY);
4453c857c7d5SMark Johnston
4454c857c7d5SMark Johnston txq = &sc->vtnet_txqs[0];
4455c857c7d5SMark Johnston error = vtnet_txq_encap(txq, &m, M_NOWAIT | M_USE_RESERVE);
4456c857c7d5SMark Johnston if (error == 0)
4457c857c7d5SMark Johnston (void)vtnet_txq_notify(txq);
4458c857c7d5SMark Johnston return (error);
4459c857c7d5SMark Johnston }
4460c857c7d5SMark Johnston
4461c857c7d5SMark Johnston static int
vtnet_debugnet_poll(if_t ifp,int count)44624ee96792SJustin Hibbits vtnet_debugnet_poll(if_t ifp, int count)
4463c857c7d5SMark Johnston {
4464c857c7d5SMark Johnston struct vtnet_softc *sc;
4465c857c7d5SMark Johnston int i;
4466c857c7d5SMark Johnston
4467c857c7d5SMark Johnston sc = if_getsoftc(ifp);
4468c857c7d5SMark Johnston if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4469c857c7d5SMark Johnston IFF_DRV_RUNNING)
4470c857c7d5SMark Johnston return (EBUSY);
4471c857c7d5SMark Johnston
4472c857c7d5SMark Johnston (void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4473bd8809dfSBryan Venteicher for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4474c857c7d5SMark Johnston (void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4475c857c7d5SMark Johnston return (0);
4476c857c7d5SMark Johnston }
44777790c8c1SConrad Meyer #endif /* DEBUGNET */
4478