1*54dfc97bSShailend Chand /*- 2*54dfc97bSShailend Chand * SPDX-License-Identifier: BSD-3-Clause 3*54dfc97bSShailend Chand * 4*54dfc97bSShailend Chand * Copyright (c) 2023 Google LLC 5*54dfc97bSShailend Chand * 6*54dfc97bSShailend Chand * Redistribution and use in source and binary forms, with or without modification, 7*54dfc97bSShailend Chand * are permitted provided that the following conditions are met: 8*54dfc97bSShailend Chand * 9*54dfc97bSShailend Chand * 1. Redistributions of source code must retain the above copyright notice, this 10*54dfc97bSShailend Chand * list of conditions and the following disclaimer. 11*54dfc97bSShailend Chand * 12*54dfc97bSShailend Chand * 2. Redistributions in binary form must reproduce the above copyright notice, 13*54dfc97bSShailend Chand * this list of conditions and the following disclaimer in the documentation 14*54dfc97bSShailend Chand * and/or other materials provided with the distribution. 15*54dfc97bSShailend Chand * 16*54dfc97bSShailend Chand * 3. Neither the name of the copyright holder nor the names of its contributors 17*54dfc97bSShailend Chand * may be used to endorse or promote products derived from this software without 18*54dfc97bSShailend Chand * specific prior written permission. 19*54dfc97bSShailend Chand * 20*54dfc97bSShailend Chand * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 21*54dfc97bSShailend Chand * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22*54dfc97bSShailend Chand * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23*54dfc97bSShailend Chand * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 24*54dfc97bSShailend Chand * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25*54dfc97bSShailend Chand * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26*54dfc97bSShailend Chand * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 27*54dfc97bSShailend Chand * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28*54dfc97bSShailend Chand * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29*54dfc97bSShailend Chand * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30*54dfc97bSShailend Chand */ 31*54dfc97bSShailend Chand #ifndef _GVE_FBSD_H 32*54dfc97bSShailend Chand #define _GVE_FBSD_H 33*54dfc97bSShailend Chand 34*54dfc97bSShailend Chand #include "gve_desc.h" 35*54dfc97bSShailend Chand #include "gve_plat.h" 36*54dfc97bSShailend Chand #include "gve_register.h" 37*54dfc97bSShailend Chand 38*54dfc97bSShailend Chand #ifndef PCI_VENDOR_ID_GOOGLE 39*54dfc97bSShailend Chand #define PCI_VENDOR_ID_GOOGLE 0x1ae0 40*54dfc97bSShailend Chand #endif 41*54dfc97bSShailend Chand 42*54dfc97bSShailend Chand #define PCI_DEV_ID_GVNIC 0x0042 43*54dfc97bSShailend Chand #define GVE_REGISTER_BAR 0 44*54dfc97bSShailend Chand #define GVE_DOORBELL_BAR 2 45*54dfc97bSShailend Chand 46*54dfc97bSShailend Chand /* Driver can alloc up to 2 segments for the header and 2 for the payload. */ 47*54dfc97bSShailend Chand #define GVE_TX_MAX_DESCS 4 48*54dfc97bSShailend Chand #define GVE_TX_BUFRING_ENTRIES 4096 49*54dfc97bSShailend Chand 50*54dfc97bSShailend Chand #define ADMINQ_SIZE PAGE_SIZE 51*54dfc97bSShailend Chand 52*54dfc97bSShailend Chand #define GVE_DEFAULT_RX_BUFFER_SIZE 2048 53*54dfc97bSShailend Chand /* Each RX bounce buffer page can fit two packet buffers. */ 54*54dfc97bSShailend Chand #define GVE_DEFAULT_RX_BUFFER_OFFSET (PAGE_SIZE / 2) 55*54dfc97bSShailend Chand 56*54dfc97bSShailend Chand /* 57*54dfc97bSShailend Chand * Number of descriptors per queue page list. 58*54dfc97bSShailend Chand * Page count AKA QPL size can be derived by dividing the number of elements in 59*54dfc97bSShailend Chand * a page by the number of descriptors available. 60*54dfc97bSShailend Chand */ 61*54dfc97bSShailend Chand #define GVE_QPL_DIVISOR 16 62*54dfc97bSShailend Chand 63*54dfc97bSShailend Chand static MALLOC_DEFINE(M_GVE, "gve", "gve allocations"); 64*54dfc97bSShailend Chand 65*54dfc97bSShailend Chand struct gve_dma_handle { 66*54dfc97bSShailend Chand bus_addr_t bus_addr; 67*54dfc97bSShailend Chand void *cpu_addr; 68*54dfc97bSShailend Chand bus_dma_tag_t tag; 69*54dfc97bSShailend Chand bus_dmamap_t map; 70*54dfc97bSShailend Chand }; 71*54dfc97bSShailend Chand 72*54dfc97bSShailend Chand union gve_tx_desc { 73*54dfc97bSShailend Chand struct gve_tx_pkt_desc pkt; /* first desc for a packet */ 74*54dfc97bSShailend Chand struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */ 75*54dfc97bSShailend Chand struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ 76*54dfc97bSShailend Chand }; 77*54dfc97bSShailend Chand 78*54dfc97bSShailend Chand /* Tracks the memory in the fifo occupied by a segment of a packet */ 79*54dfc97bSShailend Chand struct gve_tx_iovec { 80*54dfc97bSShailend Chand uint32_t iov_offset; /* offset into this segment */ 81*54dfc97bSShailend Chand uint32_t iov_len; /* length */ 82*54dfc97bSShailend Chand uint32_t iov_padding; /* padding associated with this segment */ 83*54dfc97bSShailend Chand }; 84*54dfc97bSShailend Chand 85*54dfc97bSShailend Chand /* Tracks allowed and current queue settings */ 86*54dfc97bSShailend Chand struct gve_queue_config { 87*54dfc97bSShailend Chand uint16_t max_queues; 88*54dfc97bSShailend Chand uint16_t num_queues; /* current */ 89*54dfc97bSShailend Chand }; 90*54dfc97bSShailend Chand 91*54dfc97bSShailend Chand struct gve_irq_db { 92*54dfc97bSShailend Chand __be32 index; 93*54dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE); 94*54dfc97bSShailend Chand 95*54dfc97bSShailend Chand /* 96*54dfc97bSShailend Chand * GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value 97*54dfc97bSShailend Chand * when the entire configure_device_resources command is zeroed out and the 98*54dfc97bSShailend Chand * queue_format is not specified. 99*54dfc97bSShailend Chand */ 100*54dfc97bSShailend Chand enum gve_queue_format { 101*54dfc97bSShailend Chand GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, 102*54dfc97bSShailend Chand GVE_GQI_RDA_FORMAT = 0x1, 103*54dfc97bSShailend Chand GVE_GQI_QPL_FORMAT = 0x2, 104*54dfc97bSShailend Chand GVE_DQO_RDA_FORMAT = 0x3, 105*54dfc97bSShailend Chand }; 106*54dfc97bSShailend Chand 107*54dfc97bSShailend Chand enum gve_state_flags_bit { 108*54dfc97bSShailend Chand GVE_STATE_FLAG_ADMINQ_OK, 109*54dfc97bSShailend Chand GVE_STATE_FLAG_RESOURCES_OK, 110*54dfc97bSShailend Chand GVE_STATE_FLAG_QPLREG_OK, 111*54dfc97bSShailend Chand GVE_STATE_FLAG_RX_RINGS_OK, 112*54dfc97bSShailend Chand GVE_STATE_FLAG_TX_RINGS_OK, 113*54dfc97bSShailend Chand GVE_STATE_FLAG_QUEUES_UP, 114*54dfc97bSShailend Chand GVE_STATE_FLAG_LINK_UP, 115*54dfc97bSShailend Chand GVE_STATE_FLAG_DO_RESET, 116*54dfc97bSShailend Chand GVE_STATE_FLAG_IN_RESET, 117*54dfc97bSShailend Chand GVE_NUM_STATE_FLAGS /* Not part of the enum space */ 118*54dfc97bSShailend Chand }; 119*54dfc97bSShailend Chand 120*54dfc97bSShailend Chand BITSET_DEFINE(gve_state_flags, GVE_NUM_STATE_FLAGS); 121*54dfc97bSShailend Chand 122*54dfc97bSShailend Chand #define GVE_DEVICE_STATUS_RESET (0x1 << 1) 123*54dfc97bSShailend Chand #define GVE_DEVICE_STATUS_LINK_STATUS (0x1 << 2) 124*54dfc97bSShailend Chand 125*54dfc97bSShailend Chand #define GVE_RING_LOCK(ring) mtx_lock(&(ring)->ring_mtx) 126*54dfc97bSShailend Chand #define GVE_RING_TRYLOCK(ring) mtx_trylock(&(ring)->ring_mtx) 127*54dfc97bSShailend Chand #define GVE_RING_UNLOCK(ring) mtx_unlock(&(ring)->ring_mtx) 128*54dfc97bSShailend Chand #define GVE_RING_ASSERT(ring) mtx_assert(&(ring)->ring_mtx, MA_OWNED) 129*54dfc97bSShailend Chand 130*54dfc97bSShailend Chand #define GVE_IFACE_LOCK_INIT(lock) sx_init(&lock, "gve interface lock") 131*54dfc97bSShailend Chand #define GVE_IFACE_LOCK_DESTROY(lock) sx_destroy(&lock) 132*54dfc97bSShailend Chand #define GVE_IFACE_LOCK_LOCK(lock) sx_xlock(&lock) 133*54dfc97bSShailend Chand #define GVE_IFACE_LOCK_UNLOCK(lock) sx_unlock(&lock) 134*54dfc97bSShailend Chand #define GVE_IFACE_LOCK_ASSERT(lock) sx_assert(&lock, SA_XLOCKED) 135*54dfc97bSShailend Chand 136*54dfc97bSShailend Chand struct gve_queue_page_list { 137*54dfc97bSShailend Chand uint32_t id; 138*54dfc97bSShailend Chand uint32_t num_dmas; 139*54dfc97bSShailend Chand uint32_t num_pages; 140*54dfc97bSShailend Chand vm_offset_t kva; 141*54dfc97bSShailend Chand vm_page_t *pages; 142*54dfc97bSShailend Chand struct gve_dma_handle *dmas; 143*54dfc97bSShailend Chand }; 144*54dfc97bSShailend Chand 145*54dfc97bSShailend Chand struct gve_irq { 146*54dfc97bSShailend Chand struct resource *res; 147*54dfc97bSShailend Chand void *cookie; 148*54dfc97bSShailend Chand }; 149*54dfc97bSShailend Chand 150*54dfc97bSShailend Chand struct gve_rx_slot_page_info { 151*54dfc97bSShailend Chand void *page_address; 152*54dfc97bSShailend Chand vm_page_t page; 153*54dfc97bSShailend Chand uint32_t page_offset; 154*54dfc97bSShailend Chand uint16_t pad; 155*54dfc97bSShailend Chand }; 156*54dfc97bSShailend Chand 157*54dfc97bSShailend Chand /* 158*54dfc97bSShailend Chand * A single received packet split across multiple buffers may be 159*54dfc97bSShailend Chand * reconstructed using the information in this structure. 160*54dfc97bSShailend Chand */ 161*54dfc97bSShailend Chand struct gve_rx_ctx { 162*54dfc97bSShailend Chand /* head and tail of mbuf chain for the current packet */ 163*54dfc97bSShailend Chand struct mbuf *mbuf_head; 164*54dfc97bSShailend Chand struct mbuf *mbuf_tail; 165*54dfc97bSShailend Chand uint32_t total_size; 166*54dfc97bSShailend Chand uint8_t frag_cnt; 167*54dfc97bSShailend Chand bool drop_pkt; 168*54dfc97bSShailend Chand }; 169*54dfc97bSShailend Chand 170*54dfc97bSShailend Chand struct gve_ring_com { 171*54dfc97bSShailend Chand struct gve_priv *priv; 172*54dfc97bSShailend Chand uint32_t id; 173*54dfc97bSShailend Chand 174*54dfc97bSShailend Chand /* 175*54dfc97bSShailend Chand * BAR2 offset for this ring's doorbell and the 176*54dfc97bSShailend Chand * counter-array offset for this ring's counter. 177*54dfc97bSShailend Chand * Acquired from the device individually for each 178*54dfc97bSShailend Chand * queue in the queue_create adminq command. 179*54dfc97bSShailend Chand */ 180*54dfc97bSShailend Chand struct gve_queue_resources *q_resources; 181*54dfc97bSShailend Chand struct gve_dma_handle q_resources_mem; 182*54dfc97bSShailend Chand 183*54dfc97bSShailend Chand /* Byte offset into BAR2 where this ring's 4-byte irq doorbell lies. */ 184*54dfc97bSShailend Chand uint32_t irq_db_offset; 185*54dfc97bSShailend Chand /* Byte offset into BAR2 where this ring's 4-byte doorbell lies. */ 186*54dfc97bSShailend Chand uint32_t db_offset; 187*54dfc97bSShailend Chand /* 188*54dfc97bSShailend Chand * Index, not byte-offset, into the counter array where this ring's 189*54dfc97bSShailend Chand * 4-byte counter lies. 190*54dfc97bSShailend Chand */ 191*54dfc97bSShailend Chand uint32_t counter_idx; 192*54dfc97bSShailend Chand 193*54dfc97bSShailend Chand /* 194*54dfc97bSShailend Chand * The index of the MSIX vector that was assigned to 195*54dfc97bSShailend Chand * this ring in `gve_alloc_irqs`. 196*54dfc97bSShailend Chand * 197*54dfc97bSShailend Chand * It is passed to the device in the queue_create adminq 198*54dfc97bSShailend Chand * command. 199*54dfc97bSShailend Chand * 200*54dfc97bSShailend Chand * Additionally, this also serves as the index into 201*54dfc97bSShailend Chand * `priv->irq_db_indices` where this ring's irq doorbell's 202*54dfc97bSShailend Chand * BAR2 offset, `irq_db_idx`, can be found. 203*54dfc97bSShailend Chand */ 204*54dfc97bSShailend Chand int ntfy_id; 205*54dfc97bSShailend Chand 206*54dfc97bSShailend Chand /* 207*54dfc97bSShailend Chand * The fixed bounce buffer for this ring. 208*54dfc97bSShailend Chand * Once allocated, has to be offered to the device 209*54dfc97bSShailend Chand * over the register-page-list adminq command. 210*54dfc97bSShailend Chand */ 211*54dfc97bSShailend Chand struct gve_queue_page_list *qpl; 212*54dfc97bSShailend Chand 213*54dfc97bSShailend Chand struct task cleanup_task; 214*54dfc97bSShailend Chand struct taskqueue *cleanup_tq; 215*54dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE); 216*54dfc97bSShailend Chand 217*54dfc97bSShailend Chand struct gve_rxq_stats { 218*54dfc97bSShailend Chand counter_u64_t rbytes; 219*54dfc97bSShailend Chand counter_u64_t rpackets; 220*54dfc97bSShailend Chand counter_u64_t rx_dropped_pkt; 221*54dfc97bSShailend Chand counter_u64_t rx_copybreak_cnt; 222*54dfc97bSShailend Chand counter_u64_t rx_frag_flip_cnt; 223*54dfc97bSShailend Chand counter_u64_t rx_frag_copy_cnt; 224*54dfc97bSShailend Chand counter_u64_t rx_dropped_pkt_desc_err; 225*54dfc97bSShailend Chand counter_u64_t rx_dropped_pkt_mbuf_alloc_fail; 226*54dfc97bSShailend Chand }; 227*54dfc97bSShailend Chand 228*54dfc97bSShailend Chand #define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t)) 229*54dfc97bSShailend Chand 230*54dfc97bSShailend Chand /* power-of-2 sized receive ring */ 231*54dfc97bSShailend Chand struct gve_rx_ring { 232*54dfc97bSShailend Chand struct gve_ring_com com; 233*54dfc97bSShailend Chand struct gve_dma_handle desc_ring_mem; 234*54dfc97bSShailend Chand struct gve_dma_handle data_ring_mem; 235*54dfc97bSShailend Chand 236*54dfc97bSShailend Chand /* accessed in the receive hot path */ 237*54dfc97bSShailend Chand struct { 238*54dfc97bSShailend Chand struct gve_rx_desc *desc_ring; 239*54dfc97bSShailend Chand union gve_rx_data_slot *data_ring; 240*54dfc97bSShailend Chand struct gve_rx_slot_page_info *page_info; 241*54dfc97bSShailend Chand 242*54dfc97bSShailend Chand struct gve_rx_ctx ctx; 243*54dfc97bSShailend Chand struct lro_ctrl lro; 244*54dfc97bSShailend Chand uint8_t seq_no; /* helps traverse the descriptor ring */ 245*54dfc97bSShailend Chand uint32_t cnt; /* free-running total number of completed packets */ 246*54dfc97bSShailend Chand uint32_t fill_cnt; /* free-running total number of descs and buffs posted */ 247*54dfc97bSShailend Chand uint32_t mask; /* masks the cnt and fill_cnt to the size of the ring */ 248*54dfc97bSShailend Chand struct gve_rxq_stats stats; 249*54dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE); 250*54dfc97bSShailend Chand 251*54dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE); 252*54dfc97bSShailend Chand 253*54dfc97bSShailend Chand /* 254*54dfc97bSShailend Chand * A contiguous representation of the pages composing the Tx bounce buffer. 255*54dfc97bSShailend Chand * The xmit taskqueue and the completion taskqueue both simultaneously use it. 256*54dfc97bSShailend Chand * Both operate on `available`: the xmit tq lowers it and the completion tq 257*54dfc97bSShailend Chand * raises it. `head` is the last location written at and so only the xmit tq 258*54dfc97bSShailend Chand * uses it. 259*54dfc97bSShailend Chand */ 260*54dfc97bSShailend Chand struct gve_tx_fifo { 261*54dfc97bSShailend Chand vm_offset_t base; /* address of base of FIFO */ 262*54dfc97bSShailend Chand uint32_t size; /* total size */ 263*54dfc97bSShailend Chand volatile int available; /* how much space is still available */ 264*54dfc97bSShailend Chand uint32_t head; /* offset to write at */ 265*54dfc97bSShailend Chand }; 266*54dfc97bSShailend Chand 267*54dfc97bSShailend Chand struct gve_tx_buffer_state { 268*54dfc97bSShailend Chand struct mbuf *mbuf; 269*54dfc97bSShailend Chand struct gve_tx_iovec iov[GVE_TX_MAX_DESCS]; 270*54dfc97bSShailend Chand }; 271*54dfc97bSShailend Chand 272*54dfc97bSShailend Chand struct gve_txq_stats { 273*54dfc97bSShailend Chand counter_u64_t tbytes; 274*54dfc97bSShailend Chand counter_u64_t tpackets; 275*54dfc97bSShailend Chand counter_u64_t tso_packet_cnt; 276*54dfc97bSShailend Chand counter_u64_t tx_dropped_pkt; 277*54dfc97bSShailend Chand counter_u64_t tx_dropped_pkt_nospace_device; 278*54dfc97bSShailend Chand counter_u64_t tx_dropped_pkt_nospace_bufring; 279*54dfc97bSShailend Chand counter_u64_t tx_dropped_pkt_vlan; 280*54dfc97bSShailend Chand }; 281*54dfc97bSShailend Chand 282*54dfc97bSShailend Chand #define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t)) 283*54dfc97bSShailend Chand 284*54dfc97bSShailend Chand /* power-of-2 sized transmit ring */ 285*54dfc97bSShailend Chand struct gve_tx_ring { 286*54dfc97bSShailend Chand struct gve_ring_com com; 287*54dfc97bSShailend Chand struct gve_dma_handle desc_ring_mem; 288*54dfc97bSShailend Chand 289*54dfc97bSShailend Chand struct task xmit_task; 290*54dfc97bSShailend Chand struct taskqueue *xmit_tq; 291*54dfc97bSShailend Chand 292*54dfc97bSShailend Chand /* accessed in the transmit hot path */ 293*54dfc97bSShailend Chand struct { 294*54dfc97bSShailend Chand union gve_tx_desc *desc_ring; 295*54dfc97bSShailend Chand struct gve_tx_buffer_state *info; 296*54dfc97bSShailend Chand struct buf_ring *br; 297*54dfc97bSShailend Chand 298*54dfc97bSShailend Chand struct gve_tx_fifo fifo; 299*54dfc97bSShailend Chand struct mtx ring_mtx; 300*54dfc97bSShailend Chand 301*54dfc97bSShailend Chand uint32_t req; /* free-running total number of packets written to the nic */ 302*54dfc97bSShailend Chand uint32_t done; /* free-running total number of completed packets */ 303*54dfc97bSShailend Chand uint32_t mask; /* masks the req and done to the size of the ring */ 304*54dfc97bSShailend Chand struct gve_txq_stats stats; 305*54dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE); 306*54dfc97bSShailend Chand 307*54dfc97bSShailend Chand } __aligned(CACHE_LINE_SIZE); 308*54dfc97bSShailend Chand 309*54dfc97bSShailend Chand struct gve_priv { 310*54dfc97bSShailend Chand if_t ifp; 311*54dfc97bSShailend Chand device_t dev; 312*54dfc97bSShailend Chand struct ifmedia media; 313*54dfc97bSShailend Chand 314*54dfc97bSShailend Chand uint8_t mac[ETHER_ADDR_LEN]; 315*54dfc97bSShailend Chand 316*54dfc97bSShailend Chand struct gve_dma_handle aq_mem; 317*54dfc97bSShailend Chand 318*54dfc97bSShailend Chand struct resource *reg_bar; /* BAR0 */ 319*54dfc97bSShailend Chand struct resource *db_bar; /* BAR2 */ 320*54dfc97bSShailend Chand struct resource *msix_table; 321*54dfc97bSShailend Chand 322*54dfc97bSShailend Chand uint32_t mgmt_msix_idx; 323*54dfc97bSShailend Chand uint32_t rx_copybreak; 324*54dfc97bSShailend Chand 325*54dfc97bSShailend Chand uint16_t num_event_counters; 326*54dfc97bSShailend Chand uint16_t default_num_queues; 327*54dfc97bSShailend Chand uint16_t tx_desc_cnt; 328*54dfc97bSShailend Chand uint16_t rx_desc_cnt; 329*54dfc97bSShailend Chand uint16_t rx_pages_per_qpl; 330*54dfc97bSShailend Chand uint64_t max_registered_pages; 331*54dfc97bSShailend Chand uint64_t num_registered_pages; 332*54dfc97bSShailend Chand uint32_t supported_features; 333*54dfc97bSShailend Chand uint16_t max_mtu; 334*54dfc97bSShailend Chand 335*54dfc97bSShailend Chand struct gve_dma_handle counter_array_mem; 336*54dfc97bSShailend Chand __be32 *counters; 337*54dfc97bSShailend Chand struct gve_dma_handle irqs_db_mem; 338*54dfc97bSShailend Chand struct gve_irq_db *irq_db_indices; 339*54dfc97bSShailend Chand 340*54dfc97bSShailend Chand enum gve_queue_format queue_format; 341*54dfc97bSShailend Chand struct gve_queue_page_list *qpls; 342*54dfc97bSShailend Chand struct gve_queue_config tx_cfg; 343*54dfc97bSShailend Chand struct gve_queue_config rx_cfg; 344*54dfc97bSShailend Chand uint32_t num_queues; 345*54dfc97bSShailend Chand 346*54dfc97bSShailend Chand struct gve_irq *irq_tbl; 347*54dfc97bSShailend Chand struct gve_tx_ring *tx; 348*54dfc97bSShailend Chand struct gve_rx_ring *rx; 349*54dfc97bSShailend Chand 350*54dfc97bSShailend Chand /* 351*54dfc97bSShailend Chand * Admin queue - see gve_adminq.h 352*54dfc97bSShailend Chand * Since AQ cmds do not run in steady state, 32 bit counters suffice 353*54dfc97bSShailend Chand */ 354*54dfc97bSShailend Chand struct gve_adminq_command *adminq; 355*54dfc97bSShailend Chand vm_paddr_t adminq_bus_addr; 356*54dfc97bSShailend Chand uint32_t adminq_mask; /* masks prod_cnt to adminq size */ 357*54dfc97bSShailend Chand uint32_t adminq_prod_cnt; /* free-running count of AQ cmds executed */ 358*54dfc97bSShailend Chand uint32_t adminq_cmd_fail; /* free-running count of AQ cmds failed */ 359*54dfc97bSShailend Chand uint32_t adminq_timeouts; /* free-running count of AQ cmds timeouts */ 360*54dfc97bSShailend Chand /* free-running count of each distinct AQ cmd executed */ 361*54dfc97bSShailend Chand uint32_t adminq_describe_device_cnt; 362*54dfc97bSShailend Chand uint32_t adminq_cfg_device_resources_cnt; 363*54dfc97bSShailend Chand uint32_t adminq_register_page_list_cnt; 364*54dfc97bSShailend Chand uint32_t adminq_unregister_page_list_cnt; 365*54dfc97bSShailend Chand uint32_t adminq_create_tx_queue_cnt; 366*54dfc97bSShailend Chand uint32_t adminq_create_rx_queue_cnt; 367*54dfc97bSShailend Chand uint32_t adminq_destroy_tx_queue_cnt; 368*54dfc97bSShailend Chand uint32_t adminq_destroy_rx_queue_cnt; 369*54dfc97bSShailend Chand uint32_t adminq_dcfg_device_resources_cnt; 370*54dfc97bSShailend Chand uint32_t adminq_set_driver_parameter_cnt; 371*54dfc97bSShailend Chand uint32_t adminq_verify_driver_compatibility_cnt; 372*54dfc97bSShailend Chand 373*54dfc97bSShailend Chand uint32_t interface_up_cnt; 374*54dfc97bSShailend Chand uint32_t interface_down_cnt; 375*54dfc97bSShailend Chand uint32_t reset_cnt; 376*54dfc97bSShailend Chand 377*54dfc97bSShailend Chand struct task service_task; 378*54dfc97bSShailend Chand struct taskqueue *service_tq; 379*54dfc97bSShailend Chand 380*54dfc97bSShailend Chand struct gve_state_flags state_flags; 381*54dfc97bSShailend Chand struct sx gve_iface_lock; 382*54dfc97bSShailend Chand }; 383*54dfc97bSShailend Chand 384*54dfc97bSShailend Chand static inline bool 385*54dfc97bSShailend Chand gve_get_state_flag(struct gve_priv *priv, int pos) 386*54dfc97bSShailend Chand { 387*54dfc97bSShailend Chand return (BIT_ISSET(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags)); 388*54dfc97bSShailend Chand } 389*54dfc97bSShailend Chand 390*54dfc97bSShailend Chand static inline void 391*54dfc97bSShailend Chand gve_set_state_flag(struct gve_priv *priv, int pos) 392*54dfc97bSShailend Chand { 393*54dfc97bSShailend Chand BIT_SET_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags); 394*54dfc97bSShailend Chand } 395*54dfc97bSShailend Chand 396*54dfc97bSShailend Chand static inline void 397*54dfc97bSShailend Chand gve_clear_state_flag(struct gve_priv *priv, int pos) 398*54dfc97bSShailend Chand { 399*54dfc97bSShailend Chand BIT_CLR_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags); 400*54dfc97bSShailend Chand } 401*54dfc97bSShailend Chand 402*54dfc97bSShailend Chand /* Defined in gve_main.c */ 403*54dfc97bSShailend Chand void gve_schedule_reset(struct gve_priv *priv); 404*54dfc97bSShailend Chand 405*54dfc97bSShailend Chand /* Register access functions defined in gve_utils.c */ 406*54dfc97bSShailend Chand uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset); 407*54dfc97bSShailend Chand void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val); 408*54dfc97bSShailend Chand void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val); 409*54dfc97bSShailend Chand 410*54dfc97bSShailend Chand /* QPL (Queue Page List) functions defined in gve_qpl.c */ 411*54dfc97bSShailend Chand int gve_alloc_qpls(struct gve_priv *priv); 412*54dfc97bSShailend Chand void gve_free_qpls(struct gve_priv *priv); 413*54dfc97bSShailend Chand int gve_register_qpls(struct gve_priv *priv); 414*54dfc97bSShailend Chand int gve_unregister_qpls(struct gve_priv *priv); 415*54dfc97bSShailend Chand 416*54dfc97bSShailend Chand /* TX functions defined in gve_tx.c */ 417*54dfc97bSShailend Chand int gve_alloc_tx_rings(struct gve_priv *priv); 418*54dfc97bSShailend Chand void gve_free_tx_rings(struct gve_priv *priv); 419*54dfc97bSShailend Chand int gve_create_tx_rings(struct gve_priv *priv); 420*54dfc97bSShailend Chand int gve_destroy_tx_rings(struct gve_priv *priv); 421*54dfc97bSShailend Chand int gve_tx_intr(void *arg); 422*54dfc97bSShailend Chand int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf); 423*54dfc97bSShailend Chand void gve_qflush(if_t ifp); 424*54dfc97bSShailend Chand void gve_xmit_tq(void *arg, int pending); 425*54dfc97bSShailend Chand void gve_tx_cleanup_tq(void *arg, int pending); 426*54dfc97bSShailend Chand 427*54dfc97bSShailend Chand /* RX functions defined in gve_rx.c */ 428*54dfc97bSShailend Chand int gve_alloc_rx_rings(struct gve_priv *priv); 429*54dfc97bSShailend Chand void gve_free_rx_rings(struct gve_priv *priv); 430*54dfc97bSShailend Chand int gve_create_rx_rings(struct gve_priv *priv); 431*54dfc97bSShailend Chand int gve_destroy_rx_rings(struct gve_priv *priv); 432*54dfc97bSShailend Chand int gve_rx_intr(void *arg); 433*54dfc97bSShailend Chand void gve_rx_cleanup_tq(void *arg, int pending); 434*54dfc97bSShailend Chand 435*54dfc97bSShailend Chand /* DMA functions defined in gve_utils.c */ 436*54dfc97bSShailend Chand int gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align, 437*54dfc97bSShailend Chand struct gve_dma_handle *dma); 438*54dfc97bSShailend Chand void gve_dma_free_coherent(struct gve_dma_handle *dma); 439*54dfc97bSShailend Chand int gve_dmamap_create(struct gve_priv *priv, int size, int align, 440*54dfc97bSShailend Chand struct gve_dma_handle *dma); 441*54dfc97bSShailend Chand void gve_dmamap_destroy(struct gve_dma_handle *dma); 442*54dfc97bSShailend Chand 443*54dfc97bSShailend Chand /* IRQ functions defined in gve_utils.c */ 444*54dfc97bSShailend Chand void gve_free_irqs(struct gve_priv *priv); 445*54dfc97bSShailend Chand int gve_alloc_irqs(struct gve_priv *priv); 446*54dfc97bSShailend Chand void gve_unmask_all_queue_irqs(struct gve_priv *priv); 447*54dfc97bSShailend Chand void gve_mask_all_queue_irqs(struct gve_priv *priv); 448*54dfc97bSShailend Chand 449*54dfc97bSShailend Chand /* Systcl functions defined in gve_sysctl.c*/ 450*54dfc97bSShailend Chand void gve_setup_sysctl(struct gve_priv *priv); 451*54dfc97bSShailend Chand void gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets, 452*54dfc97bSShailend Chand uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets, 453*54dfc97bSShailend Chand uint64_t *tbytes, uint64_t *tx_dropped_pkt); 454*54dfc97bSShailend Chand 455*54dfc97bSShailend Chand /* Stats functions defined in gve_utils.c */ 456*54dfc97bSShailend Chand void gve_alloc_counters(counter_u64_t *stat, int num_stats); 457*54dfc97bSShailend Chand void gve_free_counters(counter_u64_t *stat, int num_stats); 458*54dfc97bSShailend Chand 459*54dfc97bSShailend Chand #endif /* _GVE_FBSD_H_ */ 460