154dfc97bSShailend Chand /*-
254dfc97bSShailend Chand * SPDX-License-Identifier: BSD-3-Clause
354dfc97bSShailend Chand *
4d438b4efSShailend Chand * Copyright (c) 2023-2024 Google LLC
554dfc97bSShailend Chand *
654dfc97bSShailend Chand * Redistribution and use in source and binary forms, with or without modification,
754dfc97bSShailend Chand * are permitted provided that the following conditions are met:
854dfc97bSShailend Chand *
954dfc97bSShailend Chand * 1. Redistributions of source code must retain the above copyright notice, this
1054dfc97bSShailend Chand * list of conditions and the following disclaimer.
1154dfc97bSShailend Chand *
1254dfc97bSShailend Chand * 2. Redistributions in binary form must reproduce the above copyright notice,
1354dfc97bSShailend Chand * this list of conditions and the following disclaimer in the documentation
1454dfc97bSShailend Chand * and/or other materials provided with the distribution.
1554dfc97bSShailend Chand *
1654dfc97bSShailend Chand * 3. Neither the name of the copyright holder nor the names of its contributors
1754dfc97bSShailend Chand * may be used to endorse or promote products derived from this software without
1854dfc97bSShailend Chand * specific prior written permission.
1954dfc97bSShailend Chand *
2054dfc97bSShailend Chand * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
2154dfc97bSShailend Chand * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2254dfc97bSShailend Chand * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2354dfc97bSShailend Chand * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
2454dfc97bSShailend Chand * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2554dfc97bSShailend Chand * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2654dfc97bSShailend Chand * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
2754dfc97bSShailend Chand * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2854dfc97bSShailend Chand * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2954dfc97bSShailend Chand * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3054dfc97bSShailend Chand */
3154dfc97bSShailend Chand #ifndef _GVE_FBSD_H
3254dfc97bSShailend Chand #define _GVE_FBSD_H
3354dfc97bSShailend Chand
3454dfc97bSShailend Chand #include "gve_desc.h"
3554dfc97bSShailend Chand #include "gve_plat.h"
3654dfc97bSShailend Chand #include "gve_register.h"
3754dfc97bSShailend Chand
3854dfc97bSShailend Chand #ifndef PCI_VENDOR_ID_GOOGLE
3954dfc97bSShailend Chand #define PCI_VENDOR_ID_GOOGLE 0x1ae0
4054dfc97bSShailend Chand #endif
4154dfc97bSShailend Chand
4254dfc97bSShailend Chand #define PCI_DEV_ID_GVNIC 0x0042
4354dfc97bSShailend Chand #define GVE_REGISTER_BAR 0
4454dfc97bSShailend Chand #define GVE_DOORBELL_BAR 2
4554dfc97bSShailend Chand
4654dfc97bSShailend Chand /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
4754dfc97bSShailend Chand #define GVE_TX_MAX_DESCS 4
4854dfc97bSShailend Chand #define GVE_TX_BUFRING_ENTRIES 4096
4954dfc97bSShailend Chand
5054dfc97bSShailend Chand #define ADMINQ_SIZE PAGE_SIZE
5154dfc97bSShailend Chand
5254dfc97bSShailend Chand #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
5354dfc97bSShailend Chand /* Each RX bounce buffer page can fit two packet buffers. */
5454dfc97bSShailend Chand #define GVE_DEFAULT_RX_BUFFER_OFFSET (PAGE_SIZE / 2)
5554dfc97bSShailend Chand
56d438b4efSShailend Chand /* PTYPEs are always 10 bits. */
57d438b4efSShailend Chand #define GVE_NUM_PTYPES 1024
58d438b4efSShailend Chand
5954dfc97bSShailend Chand /*
6054dfc97bSShailend Chand * Number of descriptors per queue page list.
6154dfc97bSShailend Chand * Page count AKA QPL size can be derived by dividing the number of elements in
6254dfc97bSShailend Chand * a page by the number of descriptors available.
6354dfc97bSShailend Chand */
6454dfc97bSShailend Chand #define GVE_QPL_DIVISOR 16
6554dfc97bSShailend Chand
6654dfc97bSShailend Chand static MALLOC_DEFINE(M_GVE, "gve", "gve allocations");
6754dfc97bSShailend Chand
6854dfc97bSShailend Chand struct gve_dma_handle {
6954dfc97bSShailend Chand bus_addr_t bus_addr;
7054dfc97bSShailend Chand void *cpu_addr;
7154dfc97bSShailend Chand bus_dma_tag_t tag;
7254dfc97bSShailend Chand bus_dmamap_t map;
7354dfc97bSShailend Chand };
7454dfc97bSShailend Chand
7554dfc97bSShailend Chand union gve_tx_desc {
7654dfc97bSShailend Chand struct gve_tx_pkt_desc pkt; /* first desc for a packet */
7754dfc97bSShailend Chand struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
7854dfc97bSShailend Chand struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
7954dfc97bSShailend Chand };
8054dfc97bSShailend Chand
8154dfc97bSShailend Chand /* Tracks the memory in the fifo occupied by a segment of a packet */
8254dfc97bSShailend Chand struct gve_tx_iovec {
8354dfc97bSShailend Chand uint32_t iov_offset; /* offset into this segment */
8454dfc97bSShailend Chand uint32_t iov_len; /* length */
8554dfc97bSShailend Chand uint32_t iov_padding; /* padding associated with this segment */
8654dfc97bSShailend Chand };
8754dfc97bSShailend Chand
8854dfc97bSShailend Chand /* Tracks allowed and current queue settings */
8954dfc97bSShailend Chand struct gve_queue_config {
9054dfc97bSShailend Chand uint16_t max_queues;
9154dfc97bSShailend Chand uint16_t num_queues; /* current */
9254dfc97bSShailend Chand };
9354dfc97bSShailend Chand
9454dfc97bSShailend Chand struct gve_irq_db {
9554dfc97bSShailend Chand __be32 index;
9654dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE);
9754dfc97bSShailend Chand
9854dfc97bSShailend Chand /*
9954dfc97bSShailend Chand * GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
10054dfc97bSShailend Chand * when the entire configure_device_resources command is zeroed out and the
10154dfc97bSShailend Chand * queue_format is not specified.
10254dfc97bSShailend Chand */
10354dfc97bSShailend Chand enum gve_queue_format {
10454dfc97bSShailend Chand GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
10554dfc97bSShailend Chand GVE_GQI_RDA_FORMAT = 0x1,
10654dfc97bSShailend Chand GVE_GQI_QPL_FORMAT = 0x2,
10754dfc97bSShailend Chand GVE_DQO_RDA_FORMAT = 0x3,
1082348ac89SShailend Chand GVE_DQO_QPL_FORMAT = 0x4,
10954dfc97bSShailend Chand };
11054dfc97bSShailend Chand
11154dfc97bSShailend Chand enum gve_state_flags_bit {
11254dfc97bSShailend Chand GVE_STATE_FLAG_ADMINQ_OK,
11354dfc97bSShailend Chand GVE_STATE_FLAG_RESOURCES_OK,
11454dfc97bSShailend Chand GVE_STATE_FLAG_QPLREG_OK,
11554dfc97bSShailend Chand GVE_STATE_FLAG_RX_RINGS_OK,
11654dfc97bSShailend Chand GVE_STATE_FLAG_TX_RINGS_OK,
11754dfc97bSShailend Chand GVE_STATE_FLAG_QUEUES_UP,
11854dfc97bSShailend Chand GVE_STATE_FLAG_LINK_UP,
11954dfc97bSShailend Chand GVE_STATE_FLAG_DO_RESET,
12054dfc97bSShailend Chand GVE_STATE_FLAG_IN_RESET,
12154dfc97bSShailend Chand GVE_NUM_STATE_FLAGS /* Not part of the enum space */
12254dfc97bSShailend Chand };
12354dfc97bSShailend Chand
12454dfc97bSShailend Chand BITSET_DEFINE(gve_state_flags, GVE_NUM_STATE_FLAGS);
12554dfc97bSShailend Chand
12654dfc97bSShailend Chand #define GVE_DEVICE_STATUS_RESET (0x1 << 1)
12754dfc97bSShailend Chand #define GVE_DEVICE_STATUS_LINK_STATUS (0x1 << 2)
12854dfc97bSShailend Chand
12954dfc97bSShailend Chand #define GVE_RING_LOCK(ring) mtx_lock(&(ring)->ring_mtx)
13054dfc97bSShailend Chand #define GVE_RING_TRYLOCK(ring) mtx_trylock(&(ring)->ring_mtx)
13154dfc97bSShailend Chand #define GVE_RING_UNLOCK(ring) mtx_unlock(&(ring)->ring_mtx)
13254dfc97bSShailend Chand #define GVE_RING_ASSERT(ring) mtx_assert(&(ring)->ring_mtx, MA_OWNED)
13354dfc97bSShailend Chand
13454dfc97bSShailend Chand #define GVE_IFACE_LOCK_INIT(lock) sx_init(&lock, "gve interface lock")
13554dfc97bSShailend Chand #define GVE_IFACE_LOCK_DESTROY(lock) sx_destroy(&lock)
13654dfc97bSShailend Chand #define GVE_IFACE_LOCK_LOCK(lock) sx_xlock(&lock)
13754dfc97bSShailend Chand #define GVE_IFACE_LOCK_UNLOCK(lock) sx_unlock(&lock)
13854dfc97bSShailend Chand #define GVE_IFACE_LOCK_ASSERT(lock) sx_assert(&lock, SA_XLOCKED)
13954dfc97bSShailend Chand
14054dfc97bSShailend Chand struct gve_queue_page_list {
14154dfc97bSShailend Chand uint32_t id;
14254dfc97bSShailend Chand uint32_t num_dmas;
14354dfc97bSShailend Chand uint32_t num_pages;
14454dfc97bSShailend Chand vm_offset_t kva;
14554dfc97bSShailend Chand vm_page_t *pages;
14654dfc97bSShailend Chand struct gve_dma_handle *dmas;
14754dfc97bSShailend Chand };
14854dfc97bSShailend Chand
14954dfc97bSShailend Chand struct gve_irq {
15054dfc97bSShailend Chand struct resource *res;
15154dfc97bSShailend Chand void *cookie;
15254dfc97bSShailend Chand };
15354dfc97bSShailend Chand
15454dfc97bSShailend Chand struct gve_rx_slot_page_info {
15554dfc97bSShailend Chand void *page_address;
15654dfc97bSShailend Chand vm_page_t page;
15754dfc97bSShailend Chand uint32_t page_offset;
15854dfc97bSShailend Chand uint16_t pad;
15954dfc97bSShailend Chand };
16054dfc97bSShailend Chand
16154dfc97bSShailend Chand /*
16254dfc97bSShailend Chand * A single received packet split across multiple buffers may be
16354dfc97bSShailend Chand * reconstructed using the information in this structure.
16454dfc97bSShailend Chand */
16554dfc97bSShailend Chand struct gve_rx_ctx {
16654dfc97bSShailend Chand /* head and tail of mbuf chain for the current packet */
16754dfc97bSShailend Chand struct mbuf *mbuf_head;
16854dfc97bSShailend Chand struct mbuf *mbuf_tail;
16954dfc97bSShailend Chand uint32_t total_size;
17054dfc97bSShailend Chand uint8_t frag_cnt;
1715f62584aSShailend Chand bool is_tcp;
17254dfc97bSShailend Chand bool drop_pkt;
17354dfc97bSShailend Chand };
17454dfc97bSShailend Chand
17554dfc97bSShailend Chand struct gve_ring_com {
17654dfc97bSShailend Chand struct gve_priv *priv;
17754dfc97bSShailend Chand uint32_t id;
17854dfc97bSShailend Chand
17954dfc97bSShailend Chand /*
18054dfc97bSShailend Chand * BAR2 offset for this ring's doorbell and the
18154dfc97bSShailend Chand * counter-array offset for this ring's counter.
18254dfc97bSShailend Chand * Acquired from the device individually for each
18354dfc97bSShailend Chand * queue in the queue_create adminq command.
18454dfc97bSShailend Chand */
18554dfc97bSShailend Chand struct gve_queue_resources *q_resources;
18654dfc97bSShailend Chand struct gve_dma_handle q_resources_mem;
18754dfc97bSShailend Chand
18854dfc97bSShailend Chand /* Byte offset into BAR2 where this ring's 4-byte irq doorbell lies. */
18954dfc97bSShailend Chand uint32_t irq_db_offset;
19054dfc97bSShailend Chand /* Byte offset into BAR2 where this ring's 4-byte doorbell lies. */
19154dfc97bSShailend Chand uint32_t db_offset;
19254dfc97bSShailend Chand /*
19354dfc97bSShailend Chand * Index, not byte-offset, into the counter array where this ring's
19454dfc97bSShailend Chand * 4-byte counter lies.
19554dfc97bSShailend Chand */
19654dfc97bSShailend Chand uint32_t counter_idx;
19754dfc97bSShailend Chand
19854dfc97bSShailend Chand /*
19954dfc97bSShailend Chand * The index of the MSIX vector that was assigned to
20054dfc97bSShailend Chand * this ring in `gve_alloc_irqs`.
20154dfc97bSShailend Chand *
20254dfc97bSShailend Chand * It is passed to the device in the queue_create adminq
20354dfc97bSShailend Chand * command.
20454dfc97bSShailend Chand *
20554dfc97bSShailend Chand * Additionally, this also serves as the index into
20654dfc97bSShailend Chand * `priv->irq_db_indices` where this ring's irq doorbell's
20754dfc97bSShailend Chand * BAR2 offset, `irq_db_idx`, can be found.
20854dfc97bSShailend Chand */
20954dfc97bSShailend Chand int ntfy_id;
21054dfc97bSShailend Chand
21154dfc97bSShailend Chand /*
21254dfc97bSShailend Chand * The fixed bounce buffer for this ring.
21354dfc97bSShailend Chand * Once allocated, has to be offered to the device
21454dfc97bSShailend Chand * over the register-page-list adminq command.
21554dfc97bSShailend Chand */
21654dfc97bSShailend Chand struct gve_queue_page_list *qpl;
21754dfc97bSShailend Chand
21854dfc97bSShailend Chand struct task cleanup_task;
21954dfc97bSShailend Chand struct taskqueue *cleanup_tq;
22054dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE);
22154dfc97bSShailend Chand
22254dfc97bSShailend Chand struct gve_rxq_stats {
22354dfc97bSShailend Chand counter_u64_t rbytes;
22454dfc97bSShailend Chand counter_u64_t rpackets;
22554dfc97bSShailend Chand counter_u64_t rx_dropped_pkt;
22654dfc97bSShailend Chand counter_u64_t rx_copybreak_cnt;
22754dfc97bSShailend Chand counter_u64_t rx_frag_flip_cnt;
22854dfc97bSShailend Chand counter_u64_t rx_frag_copy_cnt;
22954dfc97bSShailend Chand counter_u64_t rx_dropped_pkt_desc_err;
2302348ac89SShailend Chand counter_u64_t rx_dropped_pkt_buf_post_fail;
23154dfc97bSShailend Chand counter_u64_t rx_dropped_pkt_mbuf_alloc_fail;
232d438b4efSShailend Chand counter_u64_t rx_mbuf_dmamap_err;
233d438b4efSShailend Chand counter_u64_t rx_mbuf_mclget_null;
23454dfc97bSShailend Chand };
23554dfc97bSShailend Chand
23654dfc97bSShailend Chand #define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t))
23754dfc97bSShailend Chand
2382348ac89SShailend Chand union gve_rx_qpl_buf_id_dqo {
2392348ac89SShailend Chand struct {
2402348ac89SShailend Chand uint16_t buf_id:11; /* Index into rx->dqo.bufs */
2412348ac89SShailend Chand uint8_t frag_num:5; /* Which frag in the QPL page */
2422348ac89SShailend Chand };
2432348ac89SShailend Chand uint16_t all;
2442348ac89SShailend Chand } __packed;
2452348ac89SShailend Chand _Static_assert(sizeof(union gve_rx_qpl_buf_id_dqo) == 2,
2462348ac89SShailend Chand "gve: bad dqo qpl rx buf id length");
2472348ac89SShailend Chand
248d438b4efSShailend Chand struct gve_rx_buf_dqo {
2492348ac89SShailend Chand union {
2502348ac89SShailend Chand /* RDA */
2512348ac89SShailend Chand struct {
252d438b4efSShailend Chand struct mbuf *mbuf;
253d438b4efSShailend Chand bus_dmamap_t dmamap;
254d438b4efSShailend Chand uint64_t addr;
255d438b4efSShailend Chand bool mapped;
2562348ac89SShailend Chand };
2572348ac89SShailend Chand /* QPL */
2582348ac89SShailend Chand struct {
2592348ac89SShailend Chand uint8_t num_nic_frags; /* number of pending completions */
2602348ac89SShailend Chand uint8_t next_idx; /* index of the next frag to post */
2612348ac89SShailend Chand /* for chaining rx->dqo.used_bufs */
2622348ac89SShailend Chand STAILQ_ENTRY(gve_rx_buf_dqo) stailq_entry;
2632348ac89SShailend Chand };
2642348ac89SShailend Chand };
2652348ac89SShailend Chand /* for chaining rx->dqo.free_bufs */
266d438b4efSShailend Chand SLIST_ENTRY(gve_rx_buf_dqo) slist_entry;
267d438b4efSShailend Chand };
268d438b4efSShailend Chand
26954dfc97bSShailend Chand /* power-of-2 sized receive ring */
27054dfc97bSShailend Chand struct gve_rx_ring {
27154dfc97bSShailend Chand struct gve_ring_com com;
27254dfc97bSShailend Chand struct gve_dma_handle desc_ring_mem;
273d438b4efSShailend Chand uint32_t cnt; /* free-running total number of completed packets */
274d438b4efSShailend Chand uint32_t fill_cnt; /* free-running total number of descs and buffs posted */
275d438b4efSShailend Chand
276d438b4efSShailend Chand union {
277d438b4efSShailend Chand /* GQI-only fields */
278d438b4efSShailend Chand struct {
27954dfc97bSShailend Chand struct gve_dma_handle data_ring_mem;
28054dfc97bSShailend Chand
281d438b4efSShailend Chand /* accessed in the GQ receive hot path */
28254dfc97bSShailend Chand struct gve_rx_desc *desc_ring;
28354dfc97bSShailend Chand union gve_rx_data_slot *data_ring;
28454dfc97bSShailend Chand struct gve_rx_slot_page_info *page_info;
28554dfc97bSShailend Chand uint32_t mask; /* masks the cnt and fill_cnt to the size of the ring */
286d438b4efSShailend Chand uint8_t seq_no; /* helps traverse the descriptor ring */
287d438b4efSShailend Chand };
288d438b4efSShailend Chand
289d438b4efSShailend Chand /* DQO-only fields */
290d438b4efSShailend Chand struct {
291d438b4efSShailend Chand struct gve_dma_handle compl_ring_mem;
292d438b4efSShailend Chand
293d438b4efSShailend Chand struct gve_rx_compl_desc_dqo *compl_ring;
294d438b4efSShailend Chand struct gve_rx_desc_dqo *desc_ring;
295d438b4efSShailend Chand struct gve_rx_buf_dqo *bufs; /* Parking place for posted buffers */
296d438b4efSShailend Chand bus_dma_tag_t buf_dmatag; /* To dmamap posted mbufs with */
297d438b4efSShailend Chand
298d438b4efSShailend Chand uint32_t buf_cnt; /* Size of the bufs array */
299d438b4efSShailend Chand uint32_t mask; /* One less than the sizes of the desc and compl rings */
300d438b4efSShailend Chand uint32_t head; /* The index at which to post the next buffer at */
301d438b4efSShailend Chand uint32_t tail; /* The index at which to receive the next compl at */
302d438b4efSShailend Chand uint8_t cur_gen_bit; /* Gets flipped on every cycle of the compl ring */
303d438b4efSShailend Chand SLIST_HEAD(, gve_rx_buf_dqo) free_bufs;
3042348ac89SShailend Chand
3052348ac89SShailend Chand /*
306*031800c7SJasper Tran O'Leary * Only used in QPL mode. Pages referred to by if_input-ed mbufs
3072348ac89SShailend Chand * stay parked here till their wire count comes back to 1.
3082348ac89SShailend Chand * Pages are moved here after there aren't any pending completions.
3092348ac89SShailend Chand */
3102348ac89SShailend Chand STAILQ_HEAD(, gve_rx_buf_dqo) used_bufs;
311d438b4efSShailend Chand } dqo;
312d438b4efSShailend Chand };
313d438b4efSShailend Chand
314d438b4efSShailend Chand struct lro_ctrl lro;
315d438b4efSShailend Chand struct gve_rx_ctx ctx;
31654dfc97bSShailend Chand struct gve_rxq_stats stats;
31754dfc97bSShailend Chand
31854dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE);
31954dfc97bSShailend Chand
32054dfc97bSShailend Chand /*
32154dfc97bSShailend Chand * A contiguous representation of the pages composing the Tx bounce buffer.
32254dfc97bSShailend Chand * The xmit taskqueue and the completion taskqueue both simultaneously use it.
32354dfc97bSShailend Chand * Both operate on `available`: the xmit tq lowers it and the completion tq
32454dfc97bSShailend Chand * raises it. `head` is the last location written at and so only the xmit tq
32554dfc97bSShailend Chand * uses it.
32654dfc97bSShailend Chand */
32754dfc97bSShailend Chand struct gve_tx_fifo {
32854dfc97bSShailend Chand vm_offset_t base; /* address of base of FIFO */
32954dfc97bSShailend Chand uint32_t size; /* total size */
33054dfc97bSShailend Chand volatile int available; /* how much space is still available */
33154dfc97bSShailend Chand uint32_t head; /* offset to write at */
33254dfc97bSShailend Chand };
33354dfc97bSShailend Chand
33454dfc97bSShailend Chand struct gve_tx_buffer_state {
33554dfc97bSShailend Chand struct mbuf *mbuf;
33654dfc97bSShailend Chand struct gve_tx_iovec iov[GVE_TX_MAX_DESCS];
33754dfc97bSShailend Chand };
33854dfc97bSShailend Chand
33954dfc97bSShailend Chand struct gve_txq_stats {
34054dfc97bSShailend Chand counter_u64_t tbytes;
34154dfc97bSShailend Chand counter_u64_t tpackets;
34254dfc97bSShailend Chand counter_u64_t tso_packet_cnt;
34354dfc97bSShailend Chand counter_u64_t tx_dropped_pkt;
34440097cd6SShailend Chand counter_u64_t tx_delayed_pkt_nospace_device;
34554dfc97bSShailend Chand counter_u64_t tx_dropped_pkt_nospace_bufring;
346d438b4efSShailend Chand counter_u64_t tx_delayed_pkt_nospace_descring;
347d438b4efSShailend Chand counter_u64_t tx_delayed_pkt_nospace_compring;
3482348ac89SShailend Chand counter_u64_t tx_delayed_pkt_nospace_qpl_bufs;
349d438b4efSShailend Chand counter_u64_t tx_delayed_pkt_tsoerr;
35054dfc97bSShailend Chand counter_u64_t tx_dropped_pkt_vlan;
351d438b4efSShailend Chand counter_u64_t tx_mbuf_collapse;
352d438b4efSShailend Chand counter_u64_t tx_mbuf_defrag;
353d438b4efSShailend Chand counter_u64_t tx_mbuf_defrag_err;
354d438b4efSShailend Chand counter_u64_t tx_mbuf_dmamap_enomem_err;
355d438b4efSShailend Chand counter_u64_t tx_mbuf_dmamap_err;
35654dfc97bSShailend Chand };
35754dfc97bSShailend Chand
35854dfc97bSShailend Chand #define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t))
35954dfc97bSShailend Chand
360d438b4efSShailend Chand struct gve_tx_pending_pkt_dqo {
361d438b4efSShailend Chand struct mbuf *mbuf;
3622348ac89SShailend Chand union {
3632348ac89SShailend Chand /* RDA */
364d438b4efSShailend Chand bus_dmamap_t dmamap;
3652348ac89SShailend Chand /* QPL */
3662348ac89SShailend Chand struct {
3672348ac89SShailend Chand /*
3682348ac89SShailend Chand * A linked list of entries from qpl_bufs that served
3692348ac89SShailend Chand * as the bounce buffer for this packet.
3702348ac89SShailend Chand */
3712348ac89SShailend Chand int32_t qpl_buf_head;
3722348ac89SShailend Chand uint32_t num_qpl_bufs;
3732348ac89SShailend Chand };
3742348ac89SShailend Chand };
375d438b4efSShailend Chand uint8_t state; /* the gve_packet_state enum */
376d438b4efSShailend Chand int next; /* To chain the free_pending_pkts lists */
377d438b4efSShailend Chand };
378d438b4efSShailend Chand
37954dfc97bSShailend Chand /* power-of-2 sized transmit ring */
38054dfc97bSShailend Chand struct gve_tx_ring {
38154dfc97bSShailend Chand struct gve_ring_com com;
38254dfc97bSShailend Chand struct gve_dma_handle desc_ring_mem;
38354dfc97bSShailend Chand
38454dfc97bSShailend Chand struct task xmit_task;
38554dfc97bSShailend Chand struct taskqueue *xmit_tq;
38640097cd6SShailend Chand bool stopped;
38754dfc97bSShailend Chand
388d438b4efSShailend Chand /* Accessed when writing descriptors */
38954dfc97bSShailend Chand struct buf_ring *br;
39054dfc97bSShailend Chand struct mtx ring_mtx;
39154dfc97bSShailend Chand
39254dfc97bSShailend Chand uint32_t req; /* free-running total number of packets written to the nic */
39354dfc97bSShailend Chand uint32_t done; /* free-running total number of completed packets */
394d438b4efSShailend Chand
395d438b4efSShailend Chand union {
396d438b4efSShailend Chand /* GQI specific stuff */
397d438b4efSShailend Chand struct {
398d438b4efSShailend Chand union gve_tx_desc *desc_ring;
399d438b4efSShailend Chand struct gve_tx_buffer_state *info;
400d438b4efSShailend Chand
401d438b4efSShailend Chand struct gve_tx_fifo fifo;
402d438b4efSShailend Chand
40354dfc97bSShailend Chand uint32_t mask; /* masks the req and done to the size of the ring */
404d438b4efSShailend Chand };
405d438b4efSShailend Chand
406d438b4efSShailend Chand /* DQO specific stuff */
407d438b4efSShailend Chand struct {
408d438b4efSShailend Chand struct gve_dma_handle compl_ring_mem;
409d438b4efSShailend Chand
410d438b4efSShailend Chand /* Accessed when writing descriptors */
411d438b4efSShailend Chand struct {
412d438b4efSShailend Chand union gve_tx_desc_dqo *desc_ring;
413d438b4efSShailend Chand uint32_t desc_mask; /* masks head and tail to the size of desc_ring */
414d438b4efSShailend Chand uint32_t desc_head; /* last desc read by NIC, cached value of hw_tx_head */
415d438b4efSShailend Chand uint32_t desc_tail; /* last desc written by driver */
416d438b4efSShailend Chand uint32_t last_re_idx; /* desc which last had "report event" set */
417d438b4efSShailend Chand
418d438b4efSShailend Chand /*
419d438b4efSShailend Chand * The head index of a singly linked list containing pending packet objects
420d438b4efSShailend Chand * to park mbufs till the NIC sends completions. Once this list is depleted,
421d438b4efSShailend Chand * the "_prd" suffixed producer list, grown by the completion taskqueue,
422d438b4efSShailend Chand * is stolen.
423d438b4efSShailend Chand */
424d438b4efSShailend Chand int32_t free_pending_pkts_csm;
425d438b4efSShailend Chand
4262348ac89SShailend Chand /*
4272348ac89SShailend Chand * The head index of a singly linked list representing QPL page fragments
4282348ac89SShailend Chand * to copy mbuf payload into for the NIC to see. Once this list is depleted,
4292348ac89SShailend Chand * the "_prd" suffixed producer list, grown by the completion taskqueue,
4302348ac89SShailend Chand * is stolen.
4312348ac89SShailend Chand *
4322348ac89SShailend Chand * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
4332348ac89SShailend Chand */
4342348ac89SShailend Chand int32_t free_qpl_bufs_csm;
4352348ac89SShailend Chand uint32_t qpl_bufs_consumed; /* Allows quickly checking for buf availability */
4362348ac89SShailend Chand uint32_t qpl_bufs_produced_cached; /* Cached value of qpl_bufs_produced */
4372348ac89SShailend Chand
4382348ac89SShailend Chand /* DMA params for mapping Tx mbufs. Only used in RDA mode. */
4392348ac89SShailend Chand bus_dma_tag_t buf_dmatag;
440d438b4efSShailend Chand } __aligned(CACHE_LINE_SIZE);
441d438b4efSShailend Chand
442d438b4efSShailend Chand /* Accessed when processing completions */
443d438b4efSShailend Chand struct {
444d438b4efSShailend Chand struct gve_tx_compl_desc_dqo *compl_ring;
445d438b4efSShailend Chand uint32_t compl_mask; /* masks head to the size of compl_ring */
446d438b4efSShailend Chand uint32_t compl_head; /* last completion read by driver */
447d438b4efSShailend Chand uint8_t cur_gen_bit; /* NIC flips a bit on every pass */
448d438b4efSShailend Chand uint32_t hw_tx_head; /* last desc read by NIC */
449d438b4efSShailend Chand
450d438b4efSShailend Chand /*
451d438b4efSShailend Chand * The completion taskqueue moves pending-packet objects to this
452d438b4efSShailend Chand * list after freeing the mbuf. The "_prd" denotes that this is
453*031800c7SJasper Tran O'Leary * a producer list. The transmit taskqueue steals this list once
454d438b4efSShailend Chand * its consumer list, with the "_csm" suffix, is depleted.
455d438b4efSShailend Chand */
456d438b4efSShailend Chand int32_t free_pending_pkts_prd;
4572348ac89SShailend Chand
4582348ac89SShailend Chand /*
4592348ac89SShailend Chand * The completion taskqueue moves the QPL pages corresponding to a
4602348ac89SShailend Chand * completed packet into this list. It is only used in QPL mode.
461*031800c7SJasper Tran O'Leary * The "_prd" denotes that this is a producer list. The transmit
4622348ac89SShailend Chand * taskqueue steals this list once its consumer list, with the "_csm"
4632348ac89SShailend Chand * suffix, is depleted.
4642348ac89SShailend Chand *
4652348ac89SShailend Chand * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
4662348ac89SShailend Chand */
4672348ac89SShailend Chand int32_t free_qpl_bufs_prd;
4682348ac89SShailend Chand uint32_t qpl_bufs_produced;
469d438b4efSShailend Chand } __aligned(CACHE_LINE_SIZE);
470d438b4efSShailend Chand
471d438b4efSShailend Chand /* Accessed by both the completion and xmit loops */
472d438b4efSShailend Chand struct {
473d438b4efSShailend Chand /* completion tags index into this array */
474d438b4efSShailend Chand struct gve_tx_pending_pkt_dqo *pending_pkts;
475d438b4efSShailend Chand uint16_t num_pending_pkts;
4762348ac89SShailend Chand
4772348ac89SShailend Chand /*
4782348ac89SShailend Chand * Represents QPL page fragments. An index into this array
4792348ac89SShailend Chand * always represents the same QPL page fragment. The value
4802348ac89SShailend Chand * is also an index into this array and servers as a means
4812348ac89SShailend Chand * to chain buffers into linked lists whose heads are
4822348ac89SShailend Chand * either free_qpl_bufs_prd or free_qpl_bufs_csm or
4832348ac89SShailend Chand * qpl_bufs_head.
4842348ac89SShailend Chand */
4852348ac89SShailend Chand int32_t *qpl_bufs;
486d438b4efSShailend Chand } __aligned(CACHE_LINE_SIZE);
487d438b4efSShailend Chand } dqo;
488d438b4efSShailend Chand };
48954dfc97bSShailend Chand struct gve_txq_stats stats;
49054dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE);
49154dfc97bSShailend Chand
492d438b4efSShailend Chand enum gve_packet_state {
493d438b4efSShailend Chand /*
494d438b4efSShailend Chand * Packet does not yet have a dmamap created.
495d438b4efSShailend Chand * This should always be zero since state is not explicitly initialized.
496d438b4efSShailend Chand */
497d438b4efSShailend Chand GVE_PACKET_STATE_UNALLOCATED,
498d438b4efSShailend Chand /* Packet has a dmamap and is in free list, available to be allocated. */
499d438b4efSShailend Chand GVE_PACKET_STATE_FREE,
500d438b4efSShailend Chand /* Packet is expecting a regular data completion */
501d438b4efSShailend Chand GVE_PACKET_STATE_PENDING_DATA_COMPL,
502d438b4efSShailend Chand };
503d438b4efSShailend Chand
504d438b4efSShailend Chand struct gve_ptype {
505d438b4efSShailend Chand uint8_t l3_type; /* `gve_l3_type` in gve_adminq.h */
506d438b4efSShailend Chand uint8_t l4_type; /* `gve_l4_type` in gve_adminq.h */
507d438b4efSShailend Chand };
508d438b4efSShailend Chand
509d438b4efSShailend Chand struct gve_ptype_lut {
510d438b4efSShailend Chand struct gve_ptype ptypes[GVE_NUM_PTYPES];
511d438b4efSShailend Chand };
51254dfc97bSShailend Chand
51354dfc97bSShailend Chand struct gve_priv {
51454dfc97bSShailend Chand if_t ifp;
51554dfc97bSShailend Chand device_t dev;
51654dfc97bSShailend Chand struct ifmedia media;
51754dfc97bSShailend Chand
51854dfc97bSShailend Chand uint8_t mac[ETHER_ADDR_LEN];
51954dfc97bSShailend Chand
52054dfc97bSShailend Chand struct gve_dma_handle aq_mem;
52154dfc97bSShailend Chand
52254dfc97bSShailend Chand struct resource *reg_bar; /* BAR0 */
52354dfc97bSShailend Chand struct resource *db_bar; /* BAR2 */
52454dfc97bSShailend Chand struct resource *msix_table;
52554dfc97bSShailend Chand
52654dfc97bSShailend Chand uint32_t mgmt_msix_idx;
52754dfc97bSShailend Chand uint32_t rx_copybreak;
52854dfc97bSShailend Chand
52954dfc97bSShailend Chand uint16_t num_event_counters;
53054dfc97bSShailend Chand uint16_t default_num_queues;
53154dfc97bSShailend Chand uint16_t tx_desc_cnt;
53254dfc97bSShailend Chand uint16_t rx_desc_cnt;
53354dfc97bSShailend Chand uint16_t rx_pages_per_qpl;
53454dfc97bSShailend Chand uint64_t max_registered_pages;
53554dfc97bSShailend Chand uint64_t num_registered_pages;
53654dfc97bSShailend Chand uint32_t supported_features;
53754dfc97bSShailend Chand uint16_t max_mtu;
53854dfc97bSShailend Chand
53954dfc97bSShailend Chand struct gve_dma_handle counter_array_mem;
54054dfc97bSShailend Chand __be32 *counters;
54154dfc97bSShailend Chand struct gve_dma_handle irqs_db_mem;
54254dfc97bSShailend Chand struct gve_irq_db *irq_db_indices;
54354dfc97bSShailend Chand
54454dfc97bSShailend Chand enum gve_queue_format queue_format;
54554dfc97bSShailend Chand struct gve_queue_page_list *qpls;
54654dfc97bSShailend Chand struct gve_queue_config tx_cfg;
54754dfc97bSShailend Chand struct gve_queue_config rx_cfg;
54854dfc97bSShailend Chand uint32_t num_queues;
54954dfc97bSShailend Chand
55054dfc97bSShailend Chand struct gve_irq *irq_tbl;
55154dfc97bSShailend Chand struct gve_tx_ring *tx;
55254dfc97bSShailend Chand struct gve_rx_ring *rx;
55354dfc97bSShailend Chand
554d438b4efSShailend Chand struct gve_ptype_lut *ptype_lut_dqo;
555d438b4efSShailend Chand
55654dfc97bSShailend Chand /*
55754dfc97bSShailend Chand * Admin queue - see gve_adminq.h
55854dfc97bSShailend Chand * Since AQ cmds do not run in steady state, 32 bit counters suffice
55954dfc97bSShailend Chand */
56054dfc97bSShailend Chand struct gve_adminq_command *adminq;
56154dfc97bSShailend Chand vm_paddr_t adminq_bus_addr;
56254dfc97bSShailend Chand uint32_t adminq_mask; /* masks prod_cnt to adminq size */
56354dfc97bSShailend Chand uint32_t adminq_prod_cnt; /* free-running count of AQ cmds executed */
56454dfc97bSShailend Chand uint32_t adminq_cmd_fail; /* free-running count of AQ cmds failed */
56554dfc97bSShailend Chand uint32_t adminq_timeouts; /* free-running count of AQ cmds timeouts */
56654dfc97bSShailend Chand /* free-running count of each distinct AQ cmd executed */
56754dfc97bSShailend Chand uint32_t adminq_describe_device_cnt;
56854dfc97bSShailend Chand uint32_t adminq_cfg_device_resources_cnt;
56954dfc97bSShailend Chand uint32_t adminq_register_page_list_cnt;
57054dfc97bSShailend Chand uint32_t adminq_unregister_page_list_cnt;
57154dfc97bSShailend Chand uint32_t adminq_create_tx_queue_cnt;
57254dfc97bSShailend Chand uint32_t adminq_create_rx_queue_cnt;
57354dfc97bSShailend Chand uint32_t adminq_destroy_tx_queue_cnt;
57454dfc97bSShailend Chand uint32_t adminq_destroy_rx_queue_cnt;
57554dfc97bSShailend Chand uint32_t adminq_dcfg_device_resources_cnt;
57654dfc97bSShailend Chand uint32_t adminq_set_driver_parameter_cnt;
57754dfc97bSShailend Chand uint32_t adminq_verify_driver_compatibility_cnt;
578d438b4efSShailend Chand uint32_t adminq_get_ptype_map_cnt;
57954dfc97bSShailend Chand
58054dfc97bSShailend Chand uint32_t interface_up_cnt;
58154dfc97bSShailend Chand uint32_t interface_down_cnt;
58254dfc97bSShailend Chand uint32_t reset_cnt;
58354dfc97bSShailend Chand
58454dfc97bSShailend Chand struct task service_task;
58554dfc97bSShailend Chand struct taskqueue *service_tq;
58654dfc97bSShailend Chand
58754dfc97bSShailend Chand struct gve_state_flags state_flags;
58854dfc97bSShailend Chand struct sx gve_iface_lock;
58954dfc97bSShailend Chand };
59054dfc97bSShailend Chand
59154dfc97bSShailend Chand static inline bool
gve_get_state_flag(struct gve_priv * priv,int pos)59254dfc97bSShailend Chand gve_get_state_flag(struct gve_priv *priv, int pos)
59354dfc97bSShailend Chand {
59454dfc97bSShailend Chand return (BIT_ISSET(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags));
59554dfc97bSShailend Chand }
59654dfc97bSShailend Chand
59754dfc97bSShailend Chand static inline void
gve_set_state_flag(struct gve_priv * priv,int pos)59854dfc97bSShailend Chand gve_set_state_flag(struct gve_priv *priv, int pos)
59954dfc97bSShailend Chand {
60054dfc97bSShailend Chand BIT_SET_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
60154dfc97bSShailend Chand }
60254dfc97bSShailend Chand
60354dfc97bSShailend Chand static inline void
gve_clear_state_flag(struct gve_priv * priv,int pos)60454dfc97bSShailend Chand gve_clear_state_flag(struct gve_priv *priv, int pos)
60554dfc97bSShailend Chand {
60654dfc97bSShailend Chand BIT_CLR_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
60754dfc97bSShailend Chand }
60854dfc97bSShailend Chand
609d438b4efSShailend Chand static inline bool
gve_is_gqi(struct gve_priv * priv)610d438b4efSShailend Chand gve_is_gqi(struct gve_priv *priv)
611d438b4efSShailend Chand {
612d438b4efSShailend Chand return (priv->queue_format == GVE_GQI_QPL_FORMAT);
613d438b4efSShailend Chand }
614d438b4efSShailend Chand
6152348ac89SShailend Chand static inline bool
gve_is_qpl(struct gve_priv * priv)6162348ac89SShailend Chand gve_is_qpl(struct gve_priv *priv)
6172348ac89SShailend Chand {
6182348ac89SShailend Chand return (priv->queue_format == GVE_GQI_QPL_FORMAT ||
6192348ac89SShailend Chand priv->queue_format == GVE_DQO_QPL_FORMAT);
6202348ac89SShailend Chand }
6212348ac89SShailend Chand
62254dfc97bSShailend Chand /* Defined in gve_main.c */
62354dfc97bSShailend Chand void gve_schedule_reset(struct gve_priv *priv);
62454dfc97bSShailend Chand
62554dfc97bSShailend Chand /* Register access functions defined in gve_utils.c */
62654dfc97bSShailend Chand uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset);
62754dfc97bSShailend Chand void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
62854dfc97bSShailend Chand void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
629d438b4efSShailend Chand void gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
63054dfc97bSShailend Chand
63154dfc97bSShailend Chand /* QPL (Queue Page List) functions defined in gve_qpl.c */
63254dfc97bSShailend Chand int gve_alloc_qpls(struct gve_priv *priv);
63354dfc97bSShailend Chand void gve_free_qpls(struct gve_priv *priv);
63454dfc97bSShailend Chand int gve_register_qpls(struct gve_priv *priv);
63554dfc97bSShailend Chand int gve_unregister_qpls(struct gve_priv *priv);
6362348ac89SShailend Chand void gve_mextadd_free(struct mbuf *mbuf);
63754dfc97bSShailend Chand
63854dfc97bSShailend Chand /* TX functions defined in gve_tx.c */
63954dfc97bSShailend Chand int gve_alloc_tx_rings(struct gve_priv *priv);
64054dfc97bSShailend Chand void gve_free_tx_rings(struct gve_priv *priv);
64154dfc97bSShailend Chand int gve_create_tx_rings(struct gve_priv *priv);
64254dfc97bSShailend Chand int gve_destroy_tx_rings(struct gve_priv *priv);
64354dfc97bSShailend Chand int gve_tx_intr(void *arg);
64454dfc97bSShailend Chand int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf);
64554dfc97bSShailend Chand void gve_qflush(if_t ifp);
64654dfc97bSShailend Chand void gve_xmit_tq(void *arg, int pending);
64754dfc97bSShailend Chand void gve_tx_cleanup_tq(void *arg, int pending);
64854dfc97bSShailend Chand
649d438b4efSShailend Chand /* TX functions defined in gve_tx_dqo.c */
650d438b4efSShailend Chand int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i);
651d438b4efSShailend Chand void gve_tx_free_ring_dqo(struct gve_priv *priv, int i);
652d438b4efSShailend Chand void gve_clear_tx_ring_dqo(struct gve_priv *priv, int i);
653d438b4efSShailend Chand int gve_tx_intr_dqo(void *arg);
654d438b4efSShailend Chand int gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr);
6552348ac89SShailend Chand int gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf);
656d438b4efSShailend Chand void gve_tx_cleanup_tq_dqo(void *arg, int pending);
657d438b4efSShailend Chand
65854dfc97bSShailend Chand /* RX functions defined in gve_rx.c */
65954dfc97bSShailend Chand int gve_alloc_rx_rings(struct gve_priv *priv);
66054dfc97bSShailend Chand void gve_free_rx_rings(struct gve_priv *priv);
66154dfc97bSShailend Chand int gve_create_rx_rings(struct gve_priv *priv);
66254dfc97bSShailend Chand int gve_destroy_rx_rings(struct gve_priv *priv);
66354dfc97bSShailend Chand int gve_rx_intr(void *arg);
66454dfc97bSShailend Chand void gve_rx_cleanup_tq(void *arg, int pending);
66554dfc97bSShailend Chand
666d438b4efSShailend Chand /* RX functions defined in gve_rx_dqo.c */
667d438b4efSShailend Chand int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int i);
668d438b4efSShailend Chand void gve_rx_free_ring_dqo(struct gve_priv *priv, int i);
669d438b4efSShailend Chand void gve_rx_prefill_buffers_dqo(struct gve_rx_ring *rx);
670d438b4efSShailend Chand void gve_clear_rx_ring_dqo(struct gve_priv *priv, int i);
671d438b4efSShailend Chand int gve_rx_intr_dqo(void *arg);
672d438b4efSShailend Chand void gve_rx_cleanup_tq_dqo(void *arg, int pending);
673d438b4efSShailend Chand
67454dfc97bSShailend Chand /* DMA functions defined in gve_utils.c */
67554dfc97bSShailend Chand int gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
67654dfc97bSShailend Chand struct gve_dma_handle *dma);
67754dfc97bSShailend Chand void gve_dma_free_coherent(struct gve_dma_handle *dma);
67854dfc97bSShailend Chand int gve_dmamap_create(struct gve_priv *priv, int size, int align,
67954dfc97bSShailend Chand struct gve_dma_handle *dma);
68054dfc97bSShailend Chand void gve_dmamap_destroy(struct gve_dma_handle *dma);
68154dfc97bSShailend Chand
68254dfc97bSShailend Chand /* IRQ functions defined in gve_utils.c */
68354dfc97bSShailend Chand void gve_free_irqs(struct gve_priv *priv);
68454dfc97bSShailend Chand int gve_alloc_irqs(struct gve_priv *priv);
68554dfc97bSShailend Chand void gve_unmask_all_queue_irqs(struct gve_priv *priv);
68654dfc97bSShailend Chand void gve_mask_all_queue_irqs(struct gve_priv *priv);
68754dfc97bSShailend Chand
68854dfc97bSShailend Chand /* Systcl functions defined in gve_sysctl.c */
689d438b4efSShailend Chand extern bool gve_disable_hw_lro;
690d438b4efSShailend Chand extern char gve_queue_format[8];
691d438b4efSShailend Chand extern char gve_version[8];
69254dfc97bSShailend Chand void gve_setup_sysctl(struct gve_priv *priv);
69354dfc97bSShailend Chand void gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets,
69454dfc97bSShailend Chand uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets,
69554dfc97bSShailend Chand uint64_t *tbytes, uint64_t *tx_dropped_pkt);
69654dfc97bSShailend Chand
69754dfc97bSShailend Chand /* Stats functions defined in gve_utils.c */
69854dfc97bSShailend Chand void gve_alloc_counters(counter_u64_t *stat, int num_stats);
69954dfc97bSShailend Chand void gve_free_counters(counter_u64_t *stat, int num_stats);
70054dfc97bSShailend Chand
70154dfc97bSShailend Chand #endif /* _GVE_FBSD_H_ */
702