xref: /freebsd/sys/dev/gve/gve.h (revision 5f62584a9adb7887bae33af617cfa4f43017abf8)
154dfc97bSShailend Chand /*-
254dfc97bSShailend Chand  * SPDX-License-Identifier: BSD-3-Clause
354dfc97bSShailend Chand  *
454dfc97bSShailend Chand  * Copyright (c) 2023 Google LLC
554dfc97bSShailend Chand  *
654dfc97bSShailend Chand  * Redistribution and use in source and binary forms, with or without modification,
754dfc97bSShailend Chand  * are permitted provided that the following conditions are met:
854dfc97bSShailend Chand  *
954dfc97bSShailend Chand  * 1. Redistributions of source code must retain the above copyright notice, this
1054dfc97bSShailend Chand  *    list of conditions and the following disclaimer.
1154dfc97bSShailend Chand  *
1254dfc97bSShailend Chand  * 2. Redistributions in binary form must reproduce the above copyright notice,
1354dfc97bSShailend Chand  *    this list of conditions and the following disclaimer in the documentation
1454dfc97bSShailend Chand  *    and/or other materials provided with the distribution.
1554dfc97bSShailend Chand  *
1654dfc97bSShailend Chand  * 3. Neither the name of the copyright holder nor the names of its contributors
1754dfc97bSShailend Chand  *    may be used to endorse or promote products derived from this software without
1854dfc97bSShailend Chand  *    specific prior written permission.
1954dfc97bSShailend Chand  *
2054dfc97bSShailend Chand  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
2154dfc97bSShailend Chand  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2254dfc97bSShailend Chand  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2354dfc97bSShailend Chand  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
2454dfc97bSShailend Chand  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2554dfc97bSShailend Chand  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2654dfc97bSShailend Chand  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
2754dfc97bSShailend Chand  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2854dfc97bSShailend Chand  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2954dfc97bSShailend Chand  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3054dfc97bSShailend Chand  */
3154dfc97bSShailend Chand #ifndef _GVE_FBSD_H
3254dfc97bSShailend Chand #define _GVE_FBSD_H
3354dfc97bSShailend Chand 
3454dfc97bSShailend Chand #include "gve_desc.h"
3554dfc97bSShailend Chand #include "gve_plat.h"
3654dfc97bSShailend Chand #include "gve_register.h"
3754dfc97bSShailend Chand 
3854dfc97bSShailend Chand #ifndef PCI_VENDOR_ID_GOOGLE
3954dfc97bSShailend Chand #define PCI_VENDOR_ID_GOOGLE	0x1ae0
4054dfc97bSShailend Chand #endif
4154dfc97bSShailend Chand 
4254dfc97bSShailend Chand #define PCI_DEV_ID_GVNIC	0x0042
4354dfc97bSShailend Chand #define GVE_REGISTER_BAR	0
4454dfc97bSShailend Chand #define GVE_DOORBELL_BAR	2
4554dfc97bSShailend Chand 
4654dfc97bSShailend Chand /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
4754dfc97bSShailend Chand #define GVE_TX_MAX_DESCS	4
4854dfc97bSShailend Chand #define GVE_TX_BUFRING_ENTRIES	4096
4954dfc97bSShailend Chand 
5054dfc97bSShailend Chand #define ADMINQ_SIZE PAGE_SIZE
5154dfc97bSShailend Chand 
5254dfc97bSShailend Chand #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
5354dfc97bSShailend Chand /* Each RX bounce buffer page can fit two packet buffers. */
5454dfc97bSShailend Chand #define GVE_DEFAULT_RX_BUFFER_OFFSET (PAGE_SIZE / 2)
5554dfc97bSShailend Chand 
5654dfc97bSShailend Chand /*
5754dfc97bSShailend Chand  * Number of descriptors per queue page list.
5854dfc97bSShailend Chand  * Page count AKA QPL size can be derived by dividing the number of elements in
5954dfc97bSShailend Chand  * a page by the number of descriptors available.
6054dfc97bSShailend Chand  */
6154dfc97bSShailend Chand #define GVE_QPL_DIVISOR	16
6254dfc97bSShailend Chand 
6354dfc97bSShailend Chand static MALLOC_DEFINE(M_GVE, "gve", "gve allocations");
6454dfc97bSShailend Chand 
6554dfc97bSShailend Chand struct gve_dma_handle {
6654dfc97bSShailend Chand 	bus_addr_t	bus_addr;
6754dfc97bSShailend Chand 	void		*cpu_addr;
6854dfc97bSShailend Chand 	bus_dma_tag_t	tag;
6954dfc97bSShailend Chand 	bus_dmamap_t	map;
7054dfc97bSShailend Chand };
7154dfc97bSShailend Chand 
7254dfc97bSShailend Chand union gve_tx_desc {
7354dfc97bSShailend Chand 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
7454dfc97bSShailend Chand 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
7554dfc97bSShailend Chand 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
7654dfc97bSShailend Chand };
7754dfc97bSShailend Chand 
7854dfc97bSShailend Chand /* Tracks the memory in the fifo occupied by a segment of a packet */
7954dfc97bSShailend Chand struct gve_tx_iovec {
8054dfc97bSShailend Chand 	uint32_t iov_offset; /* offset into this segment */
8154dfc97bSShailend Chand 	uint32_t iov_len; /* length */
8254dfc97bSShailend Chand 	uint32_t iov_padding; /* padding associated with this segment */
8354dfc97bSShailend Chand };
8454dfc97bSShailend Chand 
8554dfc97bSShailend Chand /* Tracks allowed and current queue settings */
8654dfc97bSShailend Chand struct gve_queue_config {
8754dfc97bSShailend Chand 	uint16_t max_queues;
8854dfc97bSShailend Chand 	uint16_t num_queues; /* current */
8954dfc97bSShailend Chand };
9054dfc97bSShailend Chand 
9154dfc97bSShailend Chand struct gve_irq_db {
9254dfc97bSShailend Chand 	__be32 index;
9354dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE);
9454dfc97bSShailend Chand 
9554dfc97bSShailend Chand /*
9654dfc97bSShailend Chand  * GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
9754dfc97bSShailend Chand  * when the entire configure_device_resources command is zeroed out and the
9854dfc97bSShailend Chand  * queue_format is not specified.
9954dfc97bSShailend Chand  */
10054dfc97bSShailend Chand enum gve_queue_format {
10154dfc97bSShailend Chand 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
10254dfc97bSShailend Chand 	GVE_GQI_RDA_FORMAT		= 0x1,
10354dfc97bSShailend Chand 	GVE_GQI_QPL_FORMAT		= 0x2,
10454dfc97bSShailend Chand 	GVE_DQO_RDA_FORMAT		= 0x3,
10554dfc97bSShailend Chand };
10654dfc97bSShailend Chand 
10754dfc97bSShailend Chand enum gve_state_flags_bit {
10854dfc97bSShailend Chand 	GVE_STATE_FLAG_ADMINQ_OK,
10954dfc97bSShailend Chand 	GVE_STATE_FLAG_RESOURCES_OK,
11054dfc97bSShailend Chand 	GVE_STATE_FLAG_QPLREG_OK,
11154dfc97bSShailend Chand 	GVE_STATE_FLAG_RX_RINGS_OK,
11254dfc97bSShailend Chand 	GVE_STATE_FLAG_TX_RINGS_OK,
11354dfc97bSShailend Chand 	GVE_STATE_FLAG_QUEUES_UP,
11454dfc97bSShailend Chand 	GVE_STATE_FLAG_LINK_UP,
11554dfc97bSShailend Chand 	GVE_STATE_FLAG_DO_RESET,
11654dfc97bSShailend Chand 	GVE_STATE_FLAG_IN_RESET,
11754dfc97bSShailend Chand 	GVE_NUM_STATE_FLAGS /* Not part of the enum space */
11854dfc97bSShailend Chand };
11954dfc97bSShailend Chand 
12054dfc97bSShailend Chand BITSET_DEFINE(gve_state_flags, GVE_NUM_STATE_FLAGS);
12154dfc97bSShailend Chand 
12254dfc97bSShailend Chand #define GVE_DEVICE_STATUS_RESET (0x1 << 1)
12354dfc97bSShailend Chand #define GVE_DEVICE_STATUS_LINK_STATUS (0x1 << 2)
12454dfc97bSShailend Chand 
12554dfc97bSShailend Chand #define GVE_RING_LOCK(ring)	mtx_lock(&(ring)->ring_mtx)
12654dfc97bSShailend Chand #define GVE_RING_TRYLOCK(ring)	mtx_trylock(&(ring)->ring_mtx)
12754dfc97bSShailend Chand #define GVE_RING_UNLOCK(ring)	mtx_unlock(&(ring)->ring_mtx)
12854dfc97bSShailend Chand #define GVE_RING_ASSERT(ring)	mtx_assert(&(ring)->ring_mtx, MA_OWNED)
12954dfc97bSShailend Chand 
13054dfc97bSShailend Chand #define GVE_IFACE_LOCK_INIT(lock)     sx_init(&lock, "gve interface lock")
13154dfc97bSShailend Chand #define GVE_IFACE_LOCK_DESTROY(lock)  sx_destroy(&lock)
13254dfc97bSShailend Chand #define GVE_IFACE_LOCK_LOCK(lock)     sx_xlock(&lock)
13354dfc97bSShailend Chand #define GVE_IFACE_LOCK_UNLOCK(lock)   sx_unlock(&lock)
13454dfc97bSShailend Chand #define GVE_IFACE_LOCK_ASSERT(lock)   sx_assert(&lock, SA_XLOCKED)
13554dfc97bSShailend Chand 
13654dfc97bSShailend Chand struct gve_queue_page_list {
13754dfc97bSShailend Chand 	uint32_t id;
13854dfc97bSShailend Chand 	uint32_t num_dmas;
13954dfc97bSShailend Chand 	uint32_t num_pages;
14054dfc97bSShailend Chand 	vm_offset_t kva;
14154dfc97bSShailend Chand 	vm_page_t *pages;
14254dfc97bSShailend Chand 	struct gve_dma_handle *dmas;
14354dfc97bSShailend Chand };
14454dfc97bSShailend Chand 
14554dfc97bSShailend Chand struct gve_irq {
14654dfc97bSShailend Chand 	struct resource *res;
14754dfc97bSShailend Chand 	void *cookie;
14854dfc97bSShailend Chand };
14954dfc97bSShailend Chand 
15054dfc97bSShailend Chand struct gve_rx_slot_page_info {
15154dfc97bSShailend Chand 	void *page_address;
15254dfc97bSShailend Chand 	vm_page_t page;
15354dfc97bSShailend Chand 	uint32_t page_offset;
15454dfc97bSShailend Chand 	uint16_t pad;
15554dfc97bSShailend Chand };
15654dfc97bSShailend Chand 
15754dfc97bSShailend Chand /*
15854dfc97bSShailend Chand  * A single received packet split across multiple buffers may be
15954dfc97bSShailend Chand  * reconstructed using the information in this structure.
16054dfc97bSShailend Chand  */
16154dfc97bSShailend Chand struct gve_rx_ctx {
16254dfc97bSShailend Chand 	/* head and tail of mbuf chain for the current packet */
16354dfc97bSShailend Chand 	struct mbuf *mbuf_head;
16454dfc97bSShailend Chand 	struct mbuf *mbuf_tail;
16554dfc97bSShailend Chand 	uint32_t total_size;
16654dfc97bSShailend Chand 	uint8_t frag_cnt;
167*5f62584aSShailend Chand 	bool is_tcp;
16854dfc97bSShailend Chand 	bool drop_pkt;
16954dfc97bSShailend Chand };
17054dfc97bSShailend Chand 
17154dfc97bSShailend Chand struct gve_ring_com {
17254dfc97bSShailend Chand 	struct gve_priv *priv;
17354dfc97bSShailend Chand 	uint32_t id;
17454dfc97bSShailend Chand 
17554dfc97bSShailend Chand 	/*
17654dfc97bSShailend Chand 	 * BAR2 offset for this ring's doorbell and the
17754dfc97bSShailend Chand 	 * counter-array offset for this ring's counter.
17854dfc97bSShailend Chand 	 * Acquired from the device individually for each
17954dfc97bSShailend Chand 	 * queue in the queue_create adminq command.
18054dfc97bSShailend Chand 	 */
18154dfc97bSShailend Chand 	struct gve_queue_resources *q_resources;
18254dfc97bSShailend Chand 	struct gve_dma_handle q_resources_mem;
18354dfc97bSShailend Chand 
18454dfc97bSShailend Chand 	/* Byte offset into BAR2 where this ring's 4-byte irq doorbell lies. */
18554dfc97bSShailend Chand 	uint32_t irq_db_offset;
18654dfc97bSShailend Chand 	/* Byte offset into BAR2 where this ring's 4-byte doorbell lies. */
18754dfc97bSShailend Chand 	uint32_t db_offset;
18854dfc97bSShailend Chand 	/*
18954dfc97bSShailend Chand 	 * Index, not byte-offset, into the counter array where this ring's
19054dfc97bSShailend Chand 	 * 4-byte counter lies.
19154dfc97bSShailend Chand 	 */
19254dfc97bSShailend Chand 	uint32_t counter_idx;
19354dfc97bSShailend Chand 
19454dfc97bSShailend Chand 	/*
19554dfc97bSShailend Chand 	 * The index of the MSIX vector that was assigned to
19654dfc97bSShailend Chand 	 * this ring in `gve_alloc_irqs`.
19754dfc97bSShailend Chand 	 *
19854dfc97bSShailend Chand 	 * It is passed to the device in the queue_create adminq
19954dfc97bSShailend Chand 	 * command.
20054dfc97bSShailend Chand 	 *
20154dfc97bSShailend Chand 	 * Additionally, this also serves as the index into
20254dfc97bSShailend Chand 	 * `priv->irq_db_indices` where this ring's irq doorbell's
20354dfc97bSShailend Chand 	 * BAR2 offset, `irq_db_idx`, can be found.
20454dfc97bSShailend Chand 	 */
20554dfc97bSShailend Chand 	int ntfy_id;
20654dfc97bSShailend Chand 
20754dfc97bSShailend Chand 	/*
20854dfc97bSShailend Chand 	 * The fixed bounce buffer for this ring.
20954dfc97bSShailend Chand 	 * Once allocated, has to be offered to the device
21054dfc97bSShailend Chand 	 * over the register-page-list adminq command.
21154dfc97bSShailend Chand 	 */
21254dfc97bSShailend Chand 	struct gve_queue_page_list *qpl;
21354dfc97bSShailend Chand 
21454dfc97bSShailend Chand 	struct task cleanup_task;
21554dfc97bSShailend Chand 	struct taskqueue *cleanup_tq;
21654dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE);
21754dfc97bSShailend Chand 
21854dfc97bSShailend Chand struct gve_rxq_stats {
21954dfc97bSShailend Chand 	counter_u64_t rbytes;
22054dfc97bSShailend Chand 	counter_u64_t rpackets;
22154dfc97bSShailend Chand 	counter_u64_t rx_dropped_pkt;
22254dfc97bSShailend Chand 	counter_u64_t rx_copybreak_cnt;
22354dfc97bSShailend Chand 	counter_u64_t rx_frag_flip_cnt;
22454dfc97bSShailend Chand 	counter_u64_t rx_frag_copy_cnt;
22554dfc97bSShailend Chand 	counter_u64_t rx_dropped_pkt_desc_err;
22654dfc97bSShailend Chand 	counter_u64_t rx_dropped_pkt_mbuf_alloc_fail;
22754dfc97bSShailend Chand };
22854dfc97bSShailend Chand 
22954dfc97bSShailend Chand #define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t))
23054dfc97bSShailend Chand 
23154dfc97bSShailend Chand /* power-of-2 sized receive ring */
23254dfc97bSShailend Chand struct gve_rx_ring {
23354dfc97bSShailend Chand 	struct gve_ring_com com;
23454dfc97bSShailend Chand 	struct gve_dma_handle desc_ring_mem;
23554dfc97bSShailend Chand 	struct gve_dma_handle data_ring_mem;
23654dfc97bSShailend Chand 
23754dfc97bSShailend Chand 	/* accessed in the receive hot path */
23854dfc97bSShailend Chand 	struct {
23954dfc97bSShailend Chand 		struct gve_rx_desc *desc_ring;
24054dfc97bSShailend Chand 		union gve_rx_data_slot *data_ring;
24154dfc97bSShailend Chand 		struct gve_rx_slot_page_info *page_info;
24254dfc97bSShailend Chand 
24354dfc97bSShailend Chand 		struct gve_rx_ctx ctx;
24454dfc97bSShailend Chand 		struct lro_ctrl lro;
24554dfc97bSShailend Chand 		uint8_t seq_no; /* helps traverse the descriptor ring */
24654dfc97bSShailend Chand 		uint32_t cnt; /* free-running total number of completed packets */
24754dfc97bSShailend Chand 		uint32_t fill_cnt; /* free-running total number of descs and buffs posted */
24854dfc97bSShailend Chand 		uint32_t mask; /* masks the cnt and fill_cnt to the size of the ring */
24954dfc97bSShailend Chand 		struct gve_rxq_stats stats;
25054dfc97bSShailend Chand 	} __aligned(CACHE_LINE_SIZE);
25154dfc97bSShailend Chand 
25254dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE);
25354dfc97bSShailend Chand 
25454dfc97bSShailend Chand /*
25554dfc97bSShailend Chand  * A contiguous representation of the pages composing the Tx bounce buffer.
25654dfc97bSShailend Chand  * The xmit taskqueue and the completion taskqueue both simultaneously use it.
25754dfc97bSShailend Chand  * Both operate on `available`: the xmit tq lowers it and the completion tq
25854dfc97bSShailend Chand  * raises it. `head` is the last location written at and so only the xmit tq
25954dfc97bSShailend Chand  * uses it.
26054dfc97bSShailend Chand  */
26154dfc97bSShailend Chand struct gve_tx_fifo {
26254dfc97bSShailend Chand 	vm_offset_t base; /* address of base of FIFO */
26354dfc97bSShailend Chand 	uint32_t size; /* total size */
26454dfc97bSShailend Chand 	volatile int available; /* how much space is still available */
26554dfc97bSShailend Chand 	uint32_t head; /* offset to write at */
26654dfc97bSShailend Chand };
26754dfc97bSShailend Chand 
26854dfc97bSShailend Chand struct gve_tx_buffer_state {
26954dfc97bSShailend Chand 	struct mbuf *mbuf;
27054dfc97bSShailend Chand 	struct gve_tx_iovec iov[GVE_TX_MAX_DESCS];
27154dfc97bSShailend Chand };
27254dfc97bSShailend Chand 
27354dfc97bSShailend Chand struct gve_txq_stats {
27454dfc97bSShailend Chand 	counter_u64_t tbytes;
27554dfc97bSShailend Chand 	counter_u64_t tpackets;
27654dfc97bSShailend Chand 	counter_u64_t tso_packet_cnt;
27754dfc97bSShailend Chand 	counter_u64_t tx_dropped_pkt;
27854dfc97bSShailend Chand 	counter_u64_t tx_dropped_pkt_nospace_device;
27954dfc97bSShailend Chand 	counter_u64_t tx_dropped_pkt_nospace_bufring;
28054dfc97bSShailend Chand 	counter_u64_t tx_dropped_pkt_vlan;
28154dfc97bSShailend Chand };
28254dfc97bSShailend Chand 
28354dfc97bSShailend Chand #define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t))
28454dfc97bSShailend Chand 
28554dfc97bSShailend Chand /* power-of-2 sized transmit ring */
28654dfc97bSShailend Chand struct gve_tx_ring {
28754dfc97bSShailend Chand 	struct gve_ring_com com;
28854dfc97bSShailend Chand 	struct gve_dma_handle desc_ring_mem;
28954dfc97bSShailend Chand 
29054dfc97bSShailend Chand 	struct task xmit_task;
29154dfc97bSShailend Chand 	struct taskqueue *xmit_tq;
29254dfc97bSShailend Chand 
29354dfc97bSShailend Chand 	/* accessed in the transmit hot path */
29454dfc97bSShailend Chand 	struct {
29554dfc97bSShailend Chand 		union gve_tx_desc *desc_ring;
29654dfc97bSShailend Chand 		struct gve_tx_buffer_state *info;
29754dfc97bSShailend Chand 		struct buf_ring *br;
29854dfc97bSShailend Chand 
29954dfc97bSShailend Chand 		struct gve_tx_fifo fifo;
30054dfc97bSShailend Chand 		struct mtx ring_mtx;
30154dfc97bSShailend Chand 
30254dfc97bSShailend Chand 		uint32_t req; /* free-running total number of packets written to the nic */
30354dfc97bSShailend Chand 		uint32_t done; /* free-running total number of completed packets */
30454dfc97bSShailend Chand 		uint32_t mask; /* masks the req and done to the size of the ring */
30554dfc97bSShailend Chand 		struct gve_txq_stats stats;
30654dfc97bSShailend Chand 	} __aligned(CACHE_LINE_SIZE);
30754dfc97bSShailend Chand 
30854dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE);
30954dfc97bSShailend Chand 
31054dfc97bSShailend Chand struct gve_priv {
31154dfc97bSShailend Chand 	if_t ifp;
31254dfc97bSShailend Chand 	device_t dev;
31354dfc97bSShailend Chand 	struct ifmedia media;
31454dfc97bSShailend Chand 
31554dfc97bSShailend Chand 	uint8_t mac[ETHER_ADDR_LEN];
31654dfc97bSShailend Chand 
31754dfc97bSShailend Chand 	struct gve_dma_handle aq_mem;
31854dfc97bSShailend Chand 
31954dfc97bSShailend Chand 	struct resource *reg_bar; /* BAR0 */
32054dfc97bSShailend Chand 	struct resource *db_bar; /* BAR2 */
32154dfc97bSShailend Chand 	struct resource *msix_table;
32254dfc97bSShailend Chand 
32354dfc97bSShailend Chand 	uint32_t mgmt_msix_idx;
32454dfc97bSShailend Chand 	uint32_t rx_copybreak;
32554dfc97bSShailend Chand 
32654dfc97bSShailend Chand 	uint16_t num_event_counters;
32754dfc97bSShailend Chand 	uint16_t default_num_queues;
32854dfc97bSShailend Chand 	uint16_t tx_desc_cnt;
32954dfc97bSShailend Chand 	uint16_t rx_desc_cnt;
33054dfc97bSShailend Chand 	uint16_t rx_pages_per_qpl;
33154dfc97bSShailend Chand 	uint64_t max_registered_pages;
33254dfc97bSShailend Chand 	uint64_t num_registered_pages;
33354dfc97bSShailend Chand 	uint32_t supported_features;
33454dfc97bSShailend Chand 	uint16_t max_mtu;
33554dfc97bSShailend Chand 
33654dfc97bSShailend Chand 	struct gve_dma_handle counter_array_mem;
33754dfc97bSShailend Chand 	__be32 *counters;
33854dfc97bSShailend Chand 	struct gve_dma_handle irqs_db_mem;
33954dfc97bSShailend Chand 	struct gve_irq_db *irq_db_indices;
34054dfc97bSShailend Chand 
34154dfc97bSShailend Chand 	enum gve_queue_format queue_format;
34254dfc97bSShailend Chand 	struct gve_queue_page_list *qpls;
34354dfc97bSShailend Chand 	struct gve_queue_config tx_cfg;
34454dfc97bSShailend Chand 	struct gve_queue_config rx_cfg;
34554dfc97bSShailend Chand 	uint32_t num_queues;
34654dfc97bSShailend Chand 
34754dfc97bSShailend Chand 	struct gve_irq *irq_tbl;
34854dfc97bSShailend Chand 	struct gve_tx_ring *tx;
34954dfc97bSShailend Chand 	struct gve_rx_ring *rx;
35054dfc97bSShailend Chand 
35154dfc97bSShailend Chand 	/*
35254dfc97bSShailend Chand 	 * Admin queue - see gve_adminq.h
35354dfc97bSShailend Chand 	 * Since AQ cmds do not run in steady state, 32 bit counters suffice
35454dfc97bSShailend Chand 	 */
35554dfc97bSShailend Chand 	struct gve_adminq_command *adminq;
35654dfc97bSShailend Chand 	vm_paddr_t adminq_bus_addr;
35754dfc97bSShailend Chand 	uint32_t adminq_mask; /* masks prod_cnt to adminq size */
35854dfc97bSShailend Chand 	uint32_t adminq_prod_cnt; /* free-running count of AQ cmds executed */
35954dfc97bSShailend Chand 	uint32_t adminq_cmd_fail; /* free-running count of AQ cmds failed */
36054dfc97bSShailend Chand 	uint32_t adminq_timeouts; /* free-running count of AQ cmds timeouts */
36154dfc97bSShailend Chand 	/* free-running count of each distinct AQ cmd executed */
36254dfc97bSShailend Chand 	uint32_t adminq_describe_device_cnt;
36354dfc97bSShailend Chand 	uint32_t adminq_cfg_device_resources_cnt;
36454dfc97bSShailend Chand 	uint32_t adminq_register_page_list_cnt;
36554dfc97bSShailend Chand 	uint32_t adminq_unregister_page_list_cnt;
36654dfc97bSShailend Chand 	uint32_t adminq_create_tx_queue_cnt;
36754dfc97bSShailend Chand 	uint32_t adminq_create_rx_queue_cnt;
36854dfc97bSShailend Chand 	uint32_t adminq_destroy_tx_queue_cnt;
36954dfc97bSShailend Chand 	uint32_t adminq_destroy_rx_queue_cnt;
37054dfc97bSShailend Chand 	uint32_t adminq_dcfg_device_resources_cnt;
37154dfc97bSShailend Chand 	uint32_t adminq_set_driver_parameter_cnt;
37254dfc97bSShailend Chand 	uint32_t adminq_verify_driver_compatibility_cnt;
37354dfc97bSShailend Chand 
37454dfc97bSShailend Chand 	uint32_t interface_up_cnt;
37554dfc97bSShailend Chand 	uint32_t interface_down_cnt;
37654dfc97bSShailend Chand 	uint32_t reset_cnt;
37754dfc97bSShailend Chand 
37854dfc97bSShailend Chand 	struct task service_task;
37954dfc97bSShailend Chand 	struct taskqueue *service_tq;
38054dfc97bSShailend Chand 
38154dfc97bSShailend Chand 	struct gve_state_flags state_flags;
38254dfc97bSShailend Chand 	struct sx gve_iface_lock;
38354dfc97bSShailend Chand };
38454dfc97bSShailend Chand 
38554dfc97bSShailend Chand static inline bool
38654dfc97bSShailend Chand gve_get_state_flag(struct gve_priv *priv, int pos)
38754dfc97bSShailend Chand {
38854dfc97bSShailend Chand 	return (BIT_ISSET(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags));
38954dfc97bSShailend Chand }
39054dfc97bSShailend Chand 
39154dfc97bSShailend Chand static inline void
39254dfc97bSShailend Chand gve_set_state_flag(struct gve_priv *priv, int pos)
39354dfc97bSShailend Chand {
39454dfc97bSShailend Chand 	BIT_SET_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
39554dfc97bSShailend Chand }
39654dfc97bSShailend Chand 
39754dfc97bSShailend Chand static inline void
39854dfc97bSShailend Chand gve_clear_state_flag(struct gve_priv *priv, int pos)
39954dfc97bSShailend Chand {
40054dfc97bSShailend Chand 	BIT_CLR_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
40154dfc97bSShailend Chand }
40254dfc97bSShailend Chand 
40354dfc97bSShailend Chand /* Defined in gve_main.c */
40454dfc97bSShailend Chand void gve_schedule_reset(struct gve_priv *priv);
40554dfc97bSShailend Chand 
40654dfc97bSShailend Chand /* Register access functions defined in gve_utils.c */
40754dfc97bSShailend Chand uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset);
40854dfc97bSShailend Chand void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
40954dfc97bSShailend Chand void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
41054dfc97bSShailend Chand 
41154dfc97bSShailend Chand /* QPL (Queue Page List) functions defined in gve_qpl.c */
41254dfc97bSShailend Chand int gve_alloc_qpls(struct gve_priv *priv);
41354dfc97bSShailend Chand void gve_free_qpls(struct gve_priv *priv);
41454dfc97bSShailend Chand int gve_register_qpls(struct gve_priv *priv);
41554dfc97bSShailend Chand int gve_unregister_qpls(struct gve_priv *priv);
41654dfc97bSShailend Chand 
41754dfc97bSShailend Chand /* TX functions defined in gve_tx.c */
41854dfc97bSShailend Chand int gve_alloc_tx_rings(struct gve_priv *priv);
41954dfc97bSShailend Chand void gve_free_tx_rings(struct gve_priv *priv);
42054dfc97bSShailend Chand int gve_create_tx_rings(struct gve_priv *priv);
42154dfc97bSShailend Chand int gve_destroy_tx_rings(struct gve_priv *priv);
42254dfc97bSShailend Chand int gve_tx_intr(void *arg);
42354dfc97bSShailend Chand int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf);
42454dfc97bSShailend Chand void gve_qflush(if_t ifp);
42554dfc97bSShailend Chand void gve_xmit_tq(void *arg, int pending);
42654dfc97bSShailend Chand void gve_tx_cleanup_tq(void *arg, int pending);
42754dfc97bSShailend Chand 
42854dfc97bSShailend Chand /* RX functions defined in gve_rx.c */
42954dfc97bSShailend Chand int gve_alloc_rx_rings(struct gve_priv *priv);
43054dfc97bSShailend Chand void gve_free_rx_rings(struct gve_priv *priv);
43154dfc97bSShailend Chand int gve_create_rx_rings(struct gve_priv *priv);
43254dfc97bSShailend Chand int gve_destroy_rx_rings(struct gve_priv *priv);
43354dfc97bSShailend Chand int gve_rx_intr(void *arg);
43454dfc97bSShailend Chand void gve_rx_cleanup_tq(void *arg, int pending);
43554dfc97bSShailend Chand 
43654dfc97bSShailend Chand /* DMA functions defined in gve_utils.c */
43754dfc97bSShailend Chand int gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
43854dfc97bSShailend Chand     struct gve_dma_handle *dma);
43954dfc97bSShailend Chand void gve_dma_free_coherent(struct gve_dma_handle *dma);
44054dfc97bSShailend Chand int gve_dmamap_create(struct gve_priv *priv, int size, int align,
44154dfc97bSShailend Chand     struct gve_dma_handle *dma);
44254dfc97bSShailend Chand void gve_dmamap_destroy(struct gve_dma_handle *dma);
44354dfc97bSShailend Chand 
44454dfc97bSShailend Chand /* IRQ functions defined in gve_utils.c */
44554dfc97bSShailend Chand void gve_free_irqs(struct gve_priv *priv);
44654dfc97bSShailend Chand int gve_alloc_irqs(struct gve_priv *priv);
44754dfc97bSShailend Chand void gve_unmask_all_queue_irqs(struct gve_priv *priv);
44854dfc97bSShailend Chand void gve_mask_all_queue_irqs(struct gve_priv *priv);
44954dfc97bSShailend Chand 
45054dfc97bSShailend Chand /* Systcl functions defined in gve_sysctl.c*/
45154dfc97bSShailend Chand void gve_setup_sysctl(struct gve_priv *priv);
45254dfc97bSShailend Chand void gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets,
45354dfc97bSShailend Chand     uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets,
45454dfc97bSShailend Chand     uint64_t *tbytes, uint64_t *tx_dropped_pkt);
45554dfc97bSShailend Chand 
45654dfc97bSShailend Chand /* Stats functions defined in gve_utils.c */
45754dfc97bSShailend Chand void gve_alloc_counters(counter_u64_t *stat, int num_stats);
45854dfc97bSShailend Chand void gve_free_counters(counter_u64_t *stat, int num_stats);
45954dfc97bSShailend Chand 
46054dfc97bSShailend Chand #endif /* _GVE_FBSD_H_ */
461