xref: /freebsd/sys/dev/gve/gve.h (revision c27f7d6b9cf6d4ab01cb3d0972726c14e0aca146)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023-2024 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #ifndef _GVE_FBSD_H
32 #define _GVE_FBSD_H
33 
34 #include "gve_desc.h"
35 #include "gve_plat.h"
36 #include "gve_register.h"
37 
38 #ifndef PCI_VENDOR_ID_GOOGLE
39 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
40 #endif
41 
42 #define PCI_DEV_ID_GVNIC	0x0042
43 #define GVE_REGISTER_BAR	0
44 #define GVE_DOORBELL_BAR	2
45 
46 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
47 #define GVE_TX_MAX_DESCS	4
48 #define GVE_TX_BUFRING_ENTRIES	4096
49 
50 #define GVE_TX_TIMEOUT_PKT_SEC		 	5
51 #define GVE_TX_TIMEOUT_CHECK_CADENCE_SEC	5
52 /*
53  * If the driver finds timed out packets on a tx queue it first kicks it and
54  * records the time. If the driver again finds a timeout on the same queue
55  * before the end of the cooldown period, only then will it reset. Thus, for a
56  * reset to be able to occur at all, the cooldown must be at least as long
57  * as the tx timeout checking cadence multiplied by the number of queues.
58  */
59 #define GVE_TX_TIMEOUT_MAX_TX_QUEUES	 16
60 #define GVE_TX_TIMEOUT_KICK_COOLDOWN_SEC \
61     (2 * GVE_TX_TIMEOUT_CHECK_CADENCE_SEC * GVE_TX_TIMEOUT_MAX_TX_QUEUES)
62 
63 #define GVE_TIMESTAMP_INVALID		-1
64 
65 #define ADMINQ_SIZE PAGE_SIZE
66 
67 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
68 /* Each RX bounce buffer page can fit two packet buffers. */
69 #define GVE_DEFAULT_RX_BUFFER_OFFSET (PAGE_SIZE / 2)
70 
71 /* PTYPEs are always 10 bits. */
72 #define GVE_NUM_PTYPES	1024
73 
74 /*
75  * Number of descriptors per queue page list.
76  * Page count AKA QPL size can be derived by dividing the number of elements in
77  * a page by the number of descriptors available.
78  */
79 #define GVE_QPL_DIVISOR	16
80 
81 /* Ring Size Limits */
82 #define GVE_DEFAULT_MIN_RX_RING_SIZE	512
83 #define GVE_DEFAULT_MIN_TX_RING_SIZE	256
84 
85 static MALLOC_DEFINE(M_GVE, "gve", "gve allocations");
86 
87 struct gve_dma_handle {
88 	bus_addr_t	bus_addr;
89 	void		*cpu_addr;
90 	bus_dma_tag_t	tag;
91 	bus_dmamap_t	map;
92 };
93 
94 union gve_tx_desc {
95 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
96 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
97 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
98 };
99 
100 /* Tracks the memory in the fifo occupied by a segment of a packet */
101 struct gve_tx_iovec {
102 	uint32_t iov_offset; /* offset into this segment */
103 	uint32_t iov_len; /* length */
104 	uint32_t iov_padding; /* padding associated with this segment */
105 };
106 
107 /* Tracks allowed and current queue settings */
108 struct gve_queue_config {
109 	uint16_t max_queues;
110 	uint16_t num_queues; /* current */
111 };
112 
113 struct gve_irq_db {
114 	__be32 index;
115 } __aligned(CACHE_LINE_SIZE);
116 
117 /*
118  * GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
119  * when the entire configure_device_resources command is zeroed out and the
120  * queue_format is not specified.
121  */
122 enum gve_queue_format {
123 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
124 	GVE_GQI_RDA_FORMAT		= 0x1,
125 	GVE_GQI_QPL_FORMAT		= 0x2,
126 	GVE_DQO_RDA_FORMAT		= 0x3,
127 	GVE_DQO_QPL_FORMAT		= 0x4,
128 };
129 
130 enum gve_state_flags_bit {
131 	GVE_STATE_FLAG_ADMINQ_OK,
132 	GVE_STATE_FLAG_RESOURCES_OK,
133 	GVE_STATE_FLAG_QPLREG_OK,
134 	GVE_STATE_FLAG_RX_RINGS_OK,
135 	GVE_STATE_FLAG_TX_RINGS_OK,
136 	GVE_STATE_FLAG_QUEUES_UP,
137 	GVE_STATE_FLAG_LINK_UP,
138 	GVE_STATE_FLAG_DO_RESET,
139 	GVE_STATE_FLAG_IN_RESET,
140 	GVE_NUM_STATE_FLAGS /* Not part of the enum space */
141 };
142 
143 BITSET_DEFINE(gve_state_flags, GVE_NUM_STATE_FLAGS);
144 
145 #define GVE_DEVICE_STATUS_RESET (0x1 << 1)
146 #define GVE_DEVICE_STATUS_LINK_STATUS (0x1 << 2)
147 
148 #define GVE_RING_LOCK(ring)	mtx_lock(&(ring)->ring_mtx)
149 #define GVE_RING_TRYLOCK(ring)	mtx_trylock(&(ring)->ring_mtx)
150 #define GVE_RING_UNLOCK(ring)	mtx_unlock(&(ring)->ring_mtx)
151 #define GVE_RING_ASSERT(ring)	mtx_assert(&(ring)->ring_mtx, MA_OWNED)
152 
153 #define GVE_IFACE_LOCK_INIT(lock)     sx_init(&lock, "gve interface lock")
154 #define GVE_IFACE_LOCK_DESTROY(lock)  sx_destroy(&lock)
155 #define GVE_IFACE_LOCK_LOCK(lock)     sx_xlock(&lock)
156 #define GVE_IFACE_LOCK_UNLOCK(lock)   sx_unlock(&lock)
157 #define GVE_IFACE_LOCK_ASSERT(lock)   sx_assert(&lock, SA_XLOCKED)
158 
159 struct gve_queue_page_list {
160 	uint32_t id;
161 	uint32_t num_dmas;
162 	uint32_t num_pages;
163 	vm_offset_t kva;
164 	vm_page_t *pages;
165 	struct gve_dma_handle *dmas;
166 };
167 
168 struct gve_irq {
169 	struct resource *res;
170 	void *cookie;
171 };
172 
173 struct gve_rx_slot_page_info {
174 	void *page_address;
175 	vm_page_t page;
176 	uint32_t page_offset;
177 	uint16_t pad;
178 };
179 
180 /*
181  * A single received packet split across multiple buffers may be
182  * reconstructed using the information in this structure.
183  */
184 struct gve_rx_ctx {
185 	/* head and tail of mbuf chain for the current packet */
186 	struct mbuf *mbuf_head;
187 	struct mbuf *mbuf_tail;
188 	uint32_t total_size;
189 	uint8_t frag_cnt;
190 	bool is_tcp;
191 	bool drop_pkt;
192 };
193 
194 struct gve_ring_com {
195 	struct gve_priv *priv;
196 	uint32_t id;
197 
198 	/*
199 	 * BAR2 offset for this ring's doorbell and the
200 	 * counter-array offset for this ring's counter.
201 	 * Acquired from the device individually for each
202 	 * queue in the queue_create adminq command.
203 	 */
204 	struct gve_queue_resources *q_resources;
205 	struct gve_dma_handle q_resources_mem;
206 
207 	/* Byte offset into BAR2 where this ring's 4-byte irq doorbell lies. */
208 	uint32_t irq_db_offset;
209 	/* Byte offset into BAR2 where this ring's 4-byte doorbell lies. */
210 	uint32_t db_offset;
211 	/*
212 	 * Index, not byte-offset, into the counter array where this ring's
213 	 * 4-byte counter lies.
214 	 */
215 	uint32_t counter_idx;
216 
217 	/*
218 	 * The index of the MSIX vector that was assigned to
219 	 * this ring in `gve_alloc_irqs`.
220 	 *
221 	 * It is passed to the device in the queue_create adminq
222 	 * command.
223 	 *
224 	 * Additionally, this also serves as the index into
225 	 * `priv->irq_db_indices` where this ring's irq doorbell's
226 	 * BAR2 offset, `irq_db_idx`, can be found.
227 	 */
228 	int ntfy_id;
229 
230 	/*
231 	 * The fixed bounce buffer for this ring.
232 	 * Once allocated, has to be offered to the device
233 	 * over the register-page-list adminq command.
234 	 */
235 	struct gve_queue_page_list *qpl;
236 
237 	struct task cleanup_task;
238 	struct taskqueue *cleanup_tq;
239 } __aligned(CACHE_LINE_SIZE);
240 
241 struct gve_rxq_stats {
242 	counter_u64_t rbytes;
243 	counter_u64_t rpackets;
244 	counter_u64_t rx_dropped_pkt;
245 	counter_u64_t rx_copybreak_cnt;
246 	counter_u64_t rx_frag_flip_cnt;
247 	counter_u64_t rx_frag_copy_cnt;
248 	counter_u64_t rx_dropped_pkt_desc_err;
249 	counter_u64_t rx_dropped_pkt_buf_post_fail;
250 	counter_u64_t rx_dropped_pkt_mbuf_alloc_fail;
251 	counter_u64_t rx_mbuf_dmamap_err;
252 	counter_u64_t rx_mbuf_mclget_null;
253 };
254 
255 #define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t))
256 
257 union gve_rx_qpl_buf_id_dqo {
258 	struct {
259 		uint16_t buf_id:11; /* Index into rx->dqo.bufs */
260 		uint8_t frag_num:5; /* Which frag in the QPL page */
261 	};
262 	uint16_t all;
263 } __packed;
264 _Static_assert(sizeof(union gve_rx_qpl_buf_id_dqo) == 2,
265     "gve: bad dqo qpl rx buf id length");
266 
267 struct gve_rx_buf_dqo {
268 	union {
269 		/* RDA */
270 		struct {
271 			struct mbuf *mbuf;
272 			bus_dmamap_t dmamap;
273 			uint64_t addr;
274 			bool mapped;
275 		};
276 		/* QPL */
277 		struct {
278 			uint8_t num_nic_frags; /* number of pending completions */
279 			uint8_t next_idx;  /* index of the next frag to post */
280 			/* for chaining rx->dqo.used_bufs */
281 			STAILQ_ENTRY(gve_rx_buf_dqo) stailq_entry;
282 		};
283 	};
284 	/* for chaining rx->dqo.free_bufs */
285 	SLIST_ENTRY(gve_rx_buf_dqo) slist_entry;
286 };
287 
288 /* power-of-2 sized receive ring */
289 struct gve_rx_ring {
290 	struct gve_ring_com com;
291 	struct gve_dma_handle desc_ring_mem;
292 	uint32_t cnt; /* free-running total number of completed packets */
293 	uint32_t fill_cnt; /* free-running total number of descs and buffs posted */
294 
295 	union {
296 		/* GQI-only fields */
297 		struct {
298 			struct gve_dma_handle data_ring_mem;
299 
300 			/* accessed in the GQ receive hot path */
301 			struct gve_rx_desc *desc_ring;
302 			union gve_rx_data_slot *data_ring;
303 			struct gve_rx_slot_page_info *page_info;
304 			uint32_t mask; /* masks the cnt and fill_cnt to the size of the ring */
305 			uint8_t seq_no; /* helps traverse the descriptor ring */
306 		};
307 
308 		/* DQO-only fields */
309 		struct {
310 			struct gve_dma_handle compl_ring_mem;
311 
312 			struct gve_rx_compl_desc_dqo *compl_ring;
313 			struct gve_rx_desc_dqo *desc_ring;
314 			struct gve_rx_buf_dqo *bufs; /* Parking place for posted buffers */
315 			bus_dma_tag_t buf_dmatag; /* To dmamap posted mbufs with */
316 
317 			uint32_t buf_cnt; /* Size of the bufs array */
318 			uint32_t mask; /* One less than the sizes of the desc and compl rings */
319 			uint32_t head; /* The index at which to post the next buffer at */
320 			uint32_t tail; /* The index at which to receive the next compl at */
321 			uint8_t cur_gen_bit; /* Gets flipped on every cycle of the compl ring */
322 			SLIST_HEAD(, gve_rx_buf_dqo) free_bufs;
323 
324 			/*
325 			 * Only used in QPL mode. Pages referred to by if_input-ed mbufs
326 			 * stay parked here till their wire count comes back to 1.
327 			 * Pages are moved here after there aren't any pending completions.
328 			 */
329 			STAILQ_HEAD(, gve_rx_buf_dqo) used_bufs;
330 		} dqo;
331 	};
332 
333 	struct lro_ctrl lro;
334 	struct gve_rx_ctx ctx;
335 	struct gve_rxq_stats stats;
336 
337 } __aligned(CACHE_LINE_SIZE);
338 
339 /*
340  * A contiguous representation of the pages composing the Tx bounce buffer.
341  * The xmit taskqueue and the completion taskqueue both simultaneously use it.
342  * Both operate on `available`: the xmit tq lowers it and the completion tq
343  * raises it. `head` is the last location written at and so only the xmit tq
344  * uses it.
345  */
346 struct gve_tx_fifo {
347 	vm_offset_t base; /* address of base of FIFO */
348 	uint32_t size; /* total size */
349 	volatile int available; /* how much space is still available */
350 	uint32_t head; /* offset to write at */
351 };
352 
353 struct gve_tx_buffer_state {
354 	struct mbuf *mbuf;
355 
356 	/*
357 	 * Time at which the xmit tq places descriptors for mbuf's payload on a
358 	 * tx queue. This timestamp is invalidated when the mbuf is freed and
359 	 * must be checked for validity when read.
360 	 */
361 	int64_t enqueue_time_sec;
362 
363 	struct gve_tx_iovec iov[GVE_TX_MAX_DESCS];
364 };
365 
366 struct gve_txq_stats {
367 	counter_u64_t tbytes;
368 	counter_u64_t tpackets;
369 	counter_u64_t tso_packet_cnt;
370 	counter_u64_t tx_dropped_pkt;
371 	counter_u64_t tx_delayed_pkt_nospace_device;
372 	counter_u64_t tx_dropped_pkt_nospace_bufring;
373 	counter_u64_t tx_delayed_pkt_nospace_descring;
374 	counter_u64_t tx_delayed_pkt_nospace_compring;
375 	counter_u64_t tx_delayed_pkt_nospace_qpl_bufs;
376 	counter_u64_t tx_delayed_pkt_tsoerr;
377 	counter_u64_t tx_dropped_pkt_vlan;
378 	counter_u64_t tx_mbuf_collapse;
379 	counter_u64_t tx_mbuf_defrag;
380 	counter_u64_t tx_mbuf_defrag_err;
381 	counter_u64_t tx_mbuf_dmamap_enomem_err;
382 	counter_u64_t tx_mbuf_dmamap_err;
383 	counter_u64_t tx_timeout;
384 };
385 
386 #define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t))
387 
388 struct gve_tx_pending_pkt_dqo {
389 	struct mbuf *mbuf;
390 
391 	/*
392 	 * Time at which the xmit tq places descriptors for mbuf's payload on a
393 	 * tx queue. This timestamp is invalidated when the mbuf is freed and
394 	 * must be checked for validity when read.
395 	 */
396 	int64_t enqueue_time_sec;
397 
398 	union {
399 		/* RDA */
400 		bus_dmamap_t dmamap;
401 		/* QPL */
402 		struct {
403 			/*
404 			 * A linked list of entries from qpl_bufs that served
405 			 * as the bounce buffer for this packet.
406 			 */
407 			int32_t qpl_buf_head;
408 			uint32_t num_qpl_bufs;
409 		};
410 	};
411 	uint8_t state; /* the gve_packet_state enum */
412 	int next; /* To chain the free_pending_pkts lists */
413 };
414 
415 /* power-of-2 sized transmit ring */
416 struct gve_tx_ring {
417 	struct gve_ring_com com;
418 	struct gve_dma_handle desc_ring_mem;
419 
420 	struct task xmit_task;
421 	struct taskqueue *xmit_tq;
422 	bool stopped;
423 
424 	/* Accessed when writing descriptors */
425 	struct buf_ring *br;
426 	struct mtx ring_mtx;
427 
428 	uint32_t req; /* free-running total number of packets written to the nic */
429 	uint32_t done; /* free-running total number of completed packets */
430 
431 	int64_t last_kicked; /* always-valid timestamp in seconds for the last queue kick */
432 
433 	union {
434 		/* GQI specific stuff */
435 		struct {
436 			union gve_tx_desc *desc_ring;
437 			struct gve_tx_buffer_state *info;
438 
439 			struct gve_tx_fifo fifo;
440 
441 			uint32_t mask; /* masks the req and done to the size of the ring */
442 		};
443 
444 		/* DQO specific stuff */
445 		struct {
446 			struct gve_dma_handle compl_ring_mem;
447 
448 			/* Accessed when writing descriptors */
449 			struct {
450 				union gve_tx_desc_dqo *desc_ring;
451 				uint32_t desc_mask; /* masks head and tail to the size of desc_ring */
452 				uint32_t desc_head; /* last desc read by NIC, cached value of hw_tx_head */
453 				uint32_t desc_tail; /* last desc written by driver */
454 				uint32_t last_re_idx; /* desc which last had "report event" set */
455 
456 				/*
457 				 * The head index of a singly linked list containing pending packet objects
458 				 * to park mbufs till the NIC sends completions. Once this list is depleted,
459 				 * the "_prd" suffixed producer list, grown by the completion taskqueue,
460 				 * is stolen.
461 				 */
462 				int32_t free_pending_pkts_csm;
463 
464 				/*
465 				 * The head index of a singly linked list representing QPL page fragments
466 				 * to copy mbuf payload into for the NIC to see. Once this list is depleted,
467 				 * the "_prd" suffixed producer list, grown by the completion taskqueue,
468 				 * is stolen.
469 				 *
470 				 * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
471 				 */
472 				int32_t free_qpl_bufs_csm;
473 				uint32_t qpl_bufs_consumed; /* Allows quickly checking for buf availability */
474 				uint32_t qpl_bufs_produced_cached; /* Cached value of qpl_bufs_produced */
475 
476 				/* DMA params for mapping Tx mbufs. Only used in RDA mode. */
477 				bus_dma_tag_t buf_dmatag;
478 			} __aligned(CACHE_LINE_SIZE);
479 
480 			/* Accessed when processing completions */
481 			struct {
482 				struct gve_tx_compl_desc_dqo *compl_ring;
483 				uint32_t compl_mask; /* masks head to the size of compl_ring */
484 				uint32_t compl_head; /* last completion read by driver */
485 				uint8_t cur_gen_bit; /* NIC flips a bit on every pass */
486 				uint32_t hw_tx_head; /* last desc read by NIC */
487 
488 				/*
489 				 * The completion taskqueue moves pending-packet objects to this
490 				 * list after freeing the mbuf. The "_prd" denotes that this is
491 				 * a producer list. The transmit taskqueue steals this list once
492 				 * its consumer list, with the "_csm" suffix, is depleted.
493 				 */
494 				int32_t free_pending_pkts_prd;
495 
496 				/*
497 				 * The completion taskqueue moves the QPL pages corresponding to a
498 				 * completed packet into this list. It is only used in QPL mode.
499 				 * The "_prd" denotes that this is a producer list. The transmit
500 				 * taskqueue steals this list once its consumer list, with the "_csm"
501 				 * suffix, is depleted.
502 				 *
503 				 * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
504 				 */
505 				int32_t free_qpl_bufs_prd;
506 				uint32_t qpl_bufs_produced;
507 			} __aligned(CACHE_LINE_SIZE);
508 
509 			/* Accessed by both the completion and xmit loops */
510 			struct {
511 				/* completion tags index into this array */
512 				struct gve_tx_pending_pkt_dqo *pending_pkts;
513 				uint16_t num_pending_pkts;
514 
515 				/*
516 				 * Represents QPL page fragments. An index into this array
517 				 * always represents the same QPL page fragment. The value
518 				 * is also an index into this array and servers as a means
519 				 * to chain buffers into linked lists whose heads are
520 				 * either free_qpl_bufs_prd or free_qpl_bufs_csm or
521 				 * qpl_bufs_head.
522 				 */
523 				int32_t *qpl_bufs;
524 			} __aligned(CACHE_LINE_SIZE);
525 		} dqo;
526 	};
527 	struct gve_txq_stats stats;
528 } __aligned(CACHE_LINE_SIZE);
529 
530 enum gve_packet_state {
531 	/*
532 	 * Packet does not yet have a dmamap created.
533 	 * This should always be zero since state is not explicitly initialized.
534 	 */
535 	GVE_PACKET_STATE_UNALLOCATED,
536 	/* Packet has a dmamap and is in free list, available to be allocated. */
537 	GVE_PACKET_STATE_FREE,
538 	/* Packet is expecting a regular data completion */
539 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
540 };
541 
542 struct gve_ptype {
543 	uint8_t l3_type;  /* `gve_l3_type` in gve_adminq.h */
544 	uint8_t l4_type;  /* `gve_l4_type` in gve_adminq.h */
545 };
546 
547 struct gve_ptype_lut {
548 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
549 };
550 
551 struct gve_priv {
552 	if_t ifp;
553 	device_t dev;
554 	struct ifmedia media;
555 
556 	uint8_t mac[ETHER_ADDR_LEN];
557 
558 	struct gve_dma_handle aq_mem;
559 
560 	struct resource *reg_bar; /* BAR0 */
561 	struct resource *db_bar; /* BAR2 */
562 	struct resource *msix_table;
563 
564 	uint32_t mgmt_msix_idx;
565 	uint32_t rx_copybreak;
566 
567 	uint16_t num_event_counters;
568 	uint16_t default_num_queues;
569 	uint16_t tx_desc_cnt;
570 	uint16_t max_tx_desc_cnt;
571 	uint16_t min_tx_desc_cnt;
572 	uint16_t rx_desc_cnt;
573 	uint16_t max_rx_desc_cnt;
574 	uint16_t min_rx_desc_cnt;
575 	uint16_t rx_pages_per_qpl;
576 	uint64_t max_registered_pages;
577 	uint64_t num_registered_pages;
578 	uint32_t supported_features;
579 	uint16_t max_mtu;
580 	bool modify_ringsize_enabled;
581 
582 	struct gve_dma_handle counter_array_mem;
583 	__be32 *counters;
584 	struct gve_dma_handle irqs_db_mem;
585 	struct gve_irq_db *irq_db_indices;
586 
587 	enum gve_queue_format queue_format;
588 	struct gve_queue_config tx_cfg;
589 	struct gve_queue_config rx_cfg;
590 	uint32_t num_queues;
591 
592 	struct gve_irq *irq_tbl;
593 	struct gve_tx_ring *tx;
594 	struct gve_rx_ring *rx;
595 
596 	struct gve_ptype_lut *ptype_lut_dqo;
597 
598 	/*
599 	 * Admin queue - see gve_adminq.h
600 	 * Since AQ cmds do not run in steady state, 32 bit counters suffice
601 	 */
602 	struct gve_adminq_command *adminq;
603 	vm_paddr_t adminq_bus_addr;
604 	uint32_t adminq_mask; /* masks prod_cnt to adminq size */
605 	uint32_t adminq_prod_cnt; /* free-running count of AQ cmds executed */
606 	uint32_t adminq_cmd_fail; /* free-running count of AQ cmds failed */
607 	uint32_t adminq_timeouts; /* free-running count of AQ cmds timeouts */
608 	/* free-running count of each distinct AQ cmd executed */
609 	uint32_t adminq_describe_device_cnt;
610 	uint32_t adminq_cfg_device_resources_cnt;
611 	uint32_t adminq_register_page_list_cnt;
612 	uint32_t adminq_unregister_page_list_cnt;
613 	uint32_t adminq_create_tx_queue_cnt;
614 	uint32_t adminq_create_rx_queue_cnt;
615 	uint32_t adminq_destroy_tx_queue_cnt;
616 	uint32_t adminq_destroy_rx_queue_cnt;
617 	uint32_t adminq_dcfg_device_resources_cnt;
618 	uint32_t adminq_set_driver_parameter_cnt;
619 	uint32_t adminq_verify_driver_compatibility_cnt;
620 	uint32_t adminq_get_ptype_map_cnt;
621 
622 	uint32_t interface_up_cnt;
623 	uint32_t interface_down_cnt;
624 	uint32_t reset_cnt;
625 
626 	struct task service_task;
627 	struct taskqueue *service_tq;
628 
629 	struct gve_state_flags state_flags;
630 	struct sx gve_iface_lock;
631 
632 	struct callout tx_timeout_service;
633 	/* The index of tx queue that the timer service will check on its next invocation */
634 	uint16_t check_tx_queue_idx;
635 
636 };
637 
638 static inline bool
639 gve_get_state_flag(struct gve_priv *priv, int pos)
640 {
641 	return (BIT_ISSET(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags));
642 }
643 
644 static inline void
645 gve_set_state_flag(struct gve_priv *priv, int pos)
646 {
647 	BIT_SET_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
648 }
649 
650 static inline void
651 gve_clear_state_flag(struct gve_priv *priv, int pos)
652 {
653 	BIT_CLR_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
654 }
655 
656 static inline bool
657 gve_is_gqi(struct gve_priv *priv)
658 {
659 	return (priv->queue_format == GVE_GQI_QPL_FORMAT);
660 }
661 
662 static inline bool
663 gve_is_qpl(struct gve_priv *priv)
664 {
665 	return (priv->queue_format == GVE_GQI_QPL_FORMAT ||
666 	    priv->queue_format == GVE_DQO_QPL_FORMAT);
667 }
668 
669 /* Defined in gve_main.c */
670 void gve_schedule_reset(struct gve_priv *priv);
671 int gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
672 int gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
673 int gve_adjust_ring_sizes(struct gve_priv *priv, uint16_t new_desc_cnt, bool is_rx);
674 
675 /* Register access functions defined in gve_utils.c */
676 uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset);
677 void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
678 void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
679 void gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
680 
681 /* QPL (Queue Page List) functions defined in gve_qpl.c */
682 struct gve_queue_page_list *gve_alloc_qpl(struct gve_priv *priv, uint32_t id,
683     int npages, bool single_kva);
684 void gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl);
685 int gve_register_qpls(struct gve_priv *priv);
686 int gve_unregister_qpls(struct gve_priv *priv);
687 void gve_mextadd_free(struct mbuf *mbuf);
688 
689 /* TX functions defined in gve_tx.c */
690 int gve_alloc_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
691 void gve_free_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
692 int gve_create_tx_rings(struct gve_priv *priv);
693 int gve_destroy_tx_rings(struct gve_priv *priv);
694 int gve_check_tx_timeout_gqi(struct gve_priv *priv, struct gve_tx_ring *tx);
695 int gve_tx_intr(void *arg);
696 int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf);
697 void gve_qflush(if_t ifp);
698 void gve_xmit_tq(void *arg, int pending);
699 void gve_tx_cleanup_tq(void *arg, int pending);
700 
701 /* TX functions defined in gve_tx_dqo.c */
702 int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i);
703 void gve_tx_free_ring_dqo(struct gve_priv *priv, int i);
704 void gve_clear_tx_ring_dqo(struct gve_priv *priv, int i);
705 int gve_check_tx_timeout_dqo(struct gve_priv *priv, struct gve_tx_ring *tx);
706 int gve_tx_intr_dqo(void *arg);
707 int gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr);
708 int gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf);
709 void gve_tx_cleanup_tq_dqo(void *arg, int pending);
710 
711 /* RX functions defined in gve_rx.c */
712 int gve_alloc_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
713 void gve_free_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
714 int gve_create_rx_rings(struct gve_priv *priv);
715 int gve_destroy_rx_rings(struct gve_priv *priv);
716 int gve_rx_intr(void *arg);
717 void gve_rx_cleanup_tq(void *arg, int pending);
718 
719 /* RX functions defined in gve_rx_dqo.c */
720 int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int i);
721 void gve_rx_free_ring_dqo(struct gve_priv *priv, int i);
722 void gve_rx_prefill_buffers_dqo(struct gve_rx_ring *rx);
723 void gve_clear_rx_ring_dqo(struct gve_priv *priv, int i);
724 int gve_rx_intr_dqo(void *arg);
725 void gve_rx_cleanup_tq_dqo(void *arg, int pending);
726 
727 /* DMA functions defined in gve_utils.c */
728 int gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
729     struct gve_dma_handle *dma);
730 void gve_dma_free_coherent(struct gve_dma_handle *dma);
731 int gve_dmamap_create(struct gve_priv *priv, int size, int align,
732     struct gve_dma_handle *dma);
733 void gve_dmamap_destroy(struct gve_dma_handle *dma);
734 
735 /* IRQ functions defined in gve_utils.c */
736 void gve_free_irqs(struct gve_priv *priv);
737 int gve_alloc_irqs(struct gve_priv *priv);
738 void gve_unmask_all_queue_irqs(struct gve_priv *priv);
739 void gve_mask_all_queue_irqs(struct gve_priv *priv);
740 
741 /* Miscellaneous functions defined in gve_utils.c */
742 void gve_invalidate_timestamp(int64_t *timestamp_sec);
743 int64_t gve_seconds_since(int64_t *timestamp_sec);
744 void gve_set_timestamp(int64_t *timestamp_sec);
745 bool gve_timestamp_valid(int64_t *timestamp_sec);
746 
747 /* Systcl functions defined in gve_sysctl.c */
748 extern bool gve_disable_hw_lro;
749 extern char gve_queue_format[8];
750 extern char gve_version[8];
751 void gve_setup_sysctl(struct gve_priv *priv);
752 void gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets,
753     uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets,
754     uint64_t *tbytes, uint64_t *tx_dropped_pkt);
755 
756 /* Stats functions defined in gve_utils.c */
757 void gve_alloc_counters(counter_u64_t *stat, int num_stats);
758 void gve_free_counters(counter_u64_t *stat, int num_stats);
759 
760 #endif /* _GVE_FBSD_H_ */
761