xref: /freebsd/sys/dev/gve/gve.h (revision 40097cd67c0d52e2b288e8555b12faf02768d89c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023-2024 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #ifndef _GVE_FBSD_H
32 #define _GVE_FBSD_H
33 
34 #include "gve_desc.h"
35 #include "gve_plat.h"
36 #include "gve_register.h"
37 
38 #ifndef PCI_VENDOR_ID_GOOGLE
39 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
40 #endif
41 
42 #define PCI_DEV_ID_GVNIC	0x0042
43 #define GVE_REGISTER_BAR	0
44 #define GVE_DOORBELL_BAR	2
45 
46 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
47 #define GVE_TX_MAX_DESCS	4
48 #define GVE_TX_BUFRING_ENTRIES	4096
49 
50 #define ADMINQ_SIZE PAGE_SIZE
51 
52 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
53 /* Each RX bounce buffer page can fit two packet buffers. */
54 #define GVE_DEFAULT_RX_BUFFER_OFFSET (PAGE_SIZE / 2)
55 
56 /* PTYPEs are always 10 bits. */
57 #define GVE_NUM_PTYPES	1024
58 
59 /*
60  * Number of descriptors per queue page list.
61  * Page count AKA QPL size can be derived by dividing the number of elements in
62  * a page by the number of descriptors available.
63  */
64 #define GVE_QPL_DIVISOR	16
65 
66 static MALLOC_DEFINE(M_GVE, "gve", "gve allocations");
67 
68 struct gve_dma_handle {
69 	bus_addr_t	bus_addr;
70 	void		*cpu_addr;
71 	bus_dma_tag_t	tag;
72 	bus_dmamap_t	map;
73 };
74 
75 union gve_tx_desc {
76 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
77 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
78 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
79 };
80 
81 /* Tracks the memory in the fifo occupied by a segment of a packet */
82 struct gve_tx_iovec {
83 	uint32_t iov_offset; /* offset into this segment */
84 	uint32_t iov_len; /* length */
85 	uint32_t iov_padding; /* padding associated with this segment */
86 };
87 
88 /* Tracks allowed and current queue settings */
89 struct gve_queue_config {
90 	uint16_t max_queues;
91 	uint16_t num_queues; /* current */
92 };
93 
94 struct gve_irq_db {
95 	__be32 index;
96 } __aligned(CACHE_LINE_SIZE);
97 
98 /*
99  * GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
100  * when the entire configure_device_resources command is zeroed out and the
101  * queue_format is not specified.
102  */
103 enum gve_queue_format {
104 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
105 	GVE_GQI_RDA_FORMAT		= 0x1,
106 	GVE_GQI_QPL_FORMAT		= 0x2,
107 	GVE_DQO_RDA_FORMAT		= 0x3,
108 	GVE_DQO_QPL_FORMAT		= 0x4,
109 };
110 
111 enum gve_state_flags_bit {
112 	GVE_STATE_FLAG_ADMINQ_OK,
113 	GVE_STATE_FLAG_RESOURCES_OK,
114 	GVE_STATE_FLAG_QPLREG_OK,
115 	GVE_STATE_FLAG_RX_RINGS_OK,
116 	GVE_STATE_FLAG_TX_RINGS_OK,
117 	GVE_STATE_FLAG_QUEUES_UP,
118 	GVE_STATE_FLAG_LINK_UP,
119 	GVE_STATE_FLAG_DO_RESET,
120 	GVE_STATE_FLAG_IN_RESET,
121 	GVE_NUM_STATE_FLAGS /* Not part of the enum space */
122 };
123 
124 BITSET_DEFINE(gve_state_flags, GVE_NUM_STATE_FLAGS);
125 
126 #define GVE_DEVICE_STATUS_RESET (0x1 << 1)
127 #define GVE_DEVICE_STATUS_LINK_STATUS (0x1 << 2)
128 
129 #define GVE_RING_LOCK(ring)	mtx_lock(&(ring)->ring_mtx)
130 #define GVE_RING_TRYLOCK(ring)	mtx_trylock(&(ring)->ring_mtx)
131 #define GVE_RING_UNLOCK(ring)	mtx_unlock(&(ring)->ring_mtx)
132 #define GVE_RING_ASSERT(ring)	mtx_assert(&(ring)->ring_mtx, MA_OWNED)
133 
134 #define GVE_IFACE_LOCK_INIT(lock)     sx_init(&lock, "gve interface lock")
135 #define GVE_IFACE_LOCK_DESTROY(lock)  sx_destroy(&lock)
136 #define GVE_IFACE_LOCK_LOCK(lock)     sx_xlock(&lock)
137 #define GVE_IFACE_LOCK_UNLOCK(lock)   sx_unlock(&lock)
138 #define GVE_IFACE_LOCK_ASSERT(lock)   sx_assert(&lock, SA_XLOCKED)
139 
140 struct gve_queue_page_list {
141 	uint32_t id;
142 	uint32_t num_dmas;
143 	uint32_t num_pages;
144 	vm_offset_t kva;
145 	vm_page_t *pages;
146 	struct gve_dma_handle *dmas;
147 };
148 
149 struct gve_irq {
150 	struct resource *res;
151 	void *cookie;
152 };
153 
154 struct gve_rx_slot_page_info {
155 	void *page_address;
156 	vm_page_t page;
157 	uint32_t page_offset;
158 	uint16_t pad;
159 };
160 
161 /*
162  * A single received packet split across multiple buffers may be
163  * reconstructed using the information in this structure.
164  */
165 struct gve_rx_ctx {
166 	/* head and tail of mbuf chain for the current packet */
167 	struct mbuf *mbuf_head;
168 	struct mbuf *mbuf_tail;
169 	uint32_t total_size;
170 	uint8_t frag_cnt;
171 	bool is_tcp;
172 	bool drop_pkt;
173 };
174 
175 struct gve_ring_com {
176 	struct gve_priv *priv;
177 	uint32_t id;
178 
179 	/*
180 	 * BAR2 offset for this ring's doorbell and the
181 	 * counter-array offset for this ring's counter.
182 	 * Acquired from the device individually for each
183 	 * queue in the queue_create adminq command.
184 	 */
185 	struct gve_queue_resources *q_resources;
186 	struct gve_dma_handle q_resources_mem;
187 
188 	/* Byte offset into BAR2 where this ring's 4-byte irq doorbell lies. */
189 	uint32_t irq_db_offset;
190 	/* Byte offset into BAR2 where this ring's 4-byte doorbell lies. */
191 	uint32_t db_offset;
192 	/*
193 	 * Index, not byte-offset, into the counter array where this ring's
194 	 * 4-byte counter lies.
195 	 */
196 	uint32_t counter_idx;
197 
198 	/*
199 	 * The index of the MSIX vector that was assigned to
200 	 * this ring in `gve_alloc_irqs`.
201 	 *
202 	 * It is passed to the device in the queue_create adminq
203 	 * command.
204 	 *
205 	 * Additionally, this also serves as the index into
206 	 * `priv->irq_db_indices` where this ring's irq doorbell's
207 	 * BAR2 offset, `irq_db_idx`, can be found.
208 	 */
209 	int ntfy_id;
210 
211 	/*
212 	 * The fixed bounce buffer for this ring.
213 	 * Once allocated, has to be offered to the device
214 	 * over the register-page-list adminq command.
215 	 */
216 	struct gve_queue_page_list *qpl;
217 
218 	struct task cleanup_task;
219 	struct taskqueue *cleanup_tq;
220 } __aligned(CACHE_LINE_SIZE);
221 
222 struct gve_rxq_stats {
223 	counter_u64_t rbytes;
224 	counter_u64_t rpackets;
225 	counter_u64_t rx_dropped_pkt;
226 	counter_u64_t rx_copybreak_cnt;
227 	counter_u64_t rx_frag_flip_cnt;
228 	counter_u64_t rx_frag_copy_cnt;
229 	counter_u64_t rx_dropped_pkt_desc_err;
230 	counter_u64_t rx_dropped_pkt_buf_post_fail;
231 	counter_u64_t rx_dropped_pkt_mbuf_alloc_fail;
232 	counter_u64_t rx_mbuf_dmamap_err;
233 	counter_u64_t rx_mbuf_mclget_null;
234 };
235 
236 #define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t))
237 
238 union gve_rx_qpl_buf_id_dqo {
239 	struct {
240 		uint16_t buf_id:11; /* Index into rx->dqo.bufs */
241 		uint8_t frag_num:5; /* Which frag in the QPL page */
242 	};
243 	uint16_t all;
244 } __packed;
245 _Static_assert(sizeof(union gve_rx_qpl_buf_id_dqo) == 2,
246     "gve: bad dqo qpl rx buf id length");
247 
248 struct gve_rx_buf_dqo {
249 	union {
250 		/* RDA */
251 		struct {
252 			struct mbuf *mbuf;
253 			bus_dmamap_t dmamap;
254 			uint64_t addr;
255 			bool mapped;
256 		};
257 		/* QPL */
258 		struct {
259 			uint8_t num_nic_frags; /* number of pending completions */
260 			uint8_t next_idx;  /* index of the next frag to post */
261 			/* for chaining rx->dqo.used_bufs */
262 			STAILQ_ENTRY(gve_rx_buf_dqo) stailq_entry;
263 		};
264 	};
265 	/* for chaining rx->dqo.free_bufs */
266 	SLIST_ENTRY(gve_rx_buf_dqo) slist_entry;
267 };
268 
269 /* power-of-2 sized receive ring */
270 struct gve_rx_ring {
271 	struct gve_ring_com com;
272 	struct gve_dma_handle desc_ring_mem;
273 	uint32_t cnt; /* free-running total number of completed packets */
274 	uint32_t fill_cnt; /* free-running total number of descs and buffs posted */
275 
276 	union {
277 		/* GQI-only fields */
278 		struct {
279 			struct gve_dma_handle data_ring_mem;
280 
281 			/* accessed in the GQ receive hot path */
282 			struct gve_rx_desc *desc_ring;
283 			union gve_rx_data_slot *data_ring;
284 			struct gve_rx_slot_page_info *page_info;
285 			uint32_t mask; /* masks the cnt and fill_cnt to the size of the ring */
286 			uint8_t seq_no; /* helps traverse the descriptor ring */
287 		};
288 
289 		/* DQO-only fields */
290 		struct {
291 			struct gve_dma_handle compl_ring_mem;
292 
293 			struct gve_rx_compl_desc_dqo *compl_ring;
294 			struct gve_rx_desc_dqo *desc_ring;
295 			struct gve_rx_buf_dqo *bufs; /* Parking place for posted buffers */
296 			bus_dma_tag_t buf_dmatag; /* To dmamap posted mbufs with */
297 
298 			uint32_t buf_cnt; /* Size of the bufs array */
299 			uint32_t mask; /* One less than the sizes of the desc and compl rings */
300 			uint32_t head; /* The index at which to post the next buffer at */
301 			uint32_t tail; /* The index at which to receive the next compl at */
302 			uint8_t cur_gen_bit; /* Gets flipped on every cycle of the compl ring */
303 			SLIST_HEAD(, gve_rx_buf_dqo) free_bufs;
304 
305 			/*
306 			 * Only used in QPL mode. Pages refered to by if_input-ed mbufs
307 			 * stay parked here till their wire count comes back to 1.
308 			 * Pages are moved here after there aren't any pending completions.
309 			 */
310 			STAILQ_HEAD(, gve_rx_buf_dqo) used_bufs;
311 		} dqo;
312 	};
313 
314 	struct lro_ctrl lro;
315 	struct gve_rx_ctx ctx;
316 	struct gve_rxq_stats stats;
317 
318 } __aligned(CACHE_LINE_SIZE);
319 
320 /*
321  * A contiguous representation of the pages composing the Tx bounce buffer.
322  * The xmit taskqueue and the completion taskqueue both simultaneously use it.
323  * Both operate on `available`: the xmit tq lowers it and the completion tq
324  * raises it. `head` is the last location written at and so only the xmit tq
325  * uses it.
326  */
327 struct gve_tx_fifo {
328 	vm_offset_t base; /* address of base of FIFO */
329 	uint32_t size; /* total size */
330 	volatile int available; /* how much space is still available */
331 	uint32_t head; /* offset to write at */
332 };
333 
334 struct gve_tx_buffer_state {
335 	struct mbuf *mbuf;
336 	struct gve_tx_iovec iov[GVE_TX_MAX_DESCS];
337 };
338 
339 struct gve_txq_stats {
340 	counter_u64_t tbytes;
341 	counter_u64_t tpackets;
342 	counter_u64_t tso_packet_cnt;
343 	counter_u64_t tx_dropped_pkt;
344 	counter_u64_t tx_delayed_pkt_nospace_device;
345 	counter_u64_t tx_dropped_pkt_nospace_bufring;
346 	counter_u64_t tx_delayed_pkt_nospace_descring;
347 	counter_u64_t tx_delayed_pkt_nospace_compring;
348 	counter_u64_t tx_delayed_pkt_nospace_qpl_bufs;
349 	counter_u64_t tx_delayed_pkt_tsoerr;
350 	counter_u64_t tx_dropped_pkt_vlan;
351 	counter_u64_t tx_mbuf_collapse;
352 	counter_u64_t tx_mbuf_defrag;
353 	counter_u64_t tx_mbuf_defrag_err;
354 	counter_u64_t tx_mbuf_dmamap_enomem_err;
355 	counter_u64_t tx_mbuf_dmamap_err;
356 };
357 
358 #define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t))
359 
360 struct gve_tx_pending_pkt_dqo {
361 	struct mbuf *mbuf;
362 	union {
363 		/* RDA */
364 		bus_dmamap_t dmamap;
365 		/* QPL */
366 		struct {
367 			/*
368 			 * A linked list of entries from qpl_bufs that served
369 			 * as the bounce buffer for this packet.
370 			 */
371 			int32_t qpl_buf_head;
372 			uint32_t num_qpl_bufs;
373 		};
374 	};
375 	uint8_t state; /* the gve_packet_state enum */
376 	int next; /* To chain the free_pending_pkts lists */
377 };
378 
379 /* power-of-2 sized transmit ring */
380 struct gve_tx_ring {
381 	struct gve_ring_com com;
382 	struct gve_dma_handle desc_ring_mem;
383 
384 	struct task xmit_task;
385 	struct taskqueue *xmit_tq;
386 	bool stopped;
387 
388 	/* Accessed when writing descriptors */
389 	struct buf_ring *br;
390 	struct mtx ring_mtx;
391 
392 	uint32_t req; /* free-running total number of packets written to the nic */
393 	uint32_t done; /* free-running total number of completed packets */
394 
395 	union {
396 		/* GQI specific stuff */
397 		struct {
398 			union gve_tx_desc *desc_ring;
399 			struct gve_tx_buffer_state *info;
400 
401 			struct gve_tx_fifo fifo;
402 
403 			uint32_t mask; /* masks the req and done to the size of the ring */
404 		};
405 
406 		/* DQO specific stuff */
407 		struct {
408 			struct gve_dma_handle compl_ring_mem;
409 
410 			/* Accessed when writing descriptors */
411 			struct {
412 				union gve_tx_desc_dqo *desc_ring;
413 				uint32_t desc_mask; /* masks head and tail to the size of desc_ring */
414 				uint32_t desc_head; /* last desc read by NIC, cached value of hw_tx_head */
415 				uint32_t desc_tail; /* last desc written by driver */
416 				uint32_t last_re_idx; /* desc which last had "report event" set */
417 
418 				/*
419 				 * The head index of a singly linked list containing pending packet objects
420 				 * to park mbufs till the NIC sends completions. Once this list is depleted,
421 				 * the "_prd" suffixed producer list, grown by the completion taskqueue,
422 				 * is stolen.
423 				 */
424 				int32_t free_pending_pkts_csm;
425 
426 				/*
427 				 * The head index of a singly linked list representing QPL page fragments
428 				 * to copy mbuf payload into for the NIC to see. Once this list is depleted,
429 				 * the "_prd" suffixed producer list, grown by the completion taskqueue,
430 				 * is stolen.
431 				 *
432 				 * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
433 				 */
434 				int32_t free_qpl_bufs_csm;
435 				uint32_t qpl_bufs_consumed; /* Allows quickly checking for buf availability */
436 				uint32_t qpl_bufs_produced_cached; /* Cached value of qpl_bufs_produced */
437 
438 				/* DMA params for mapping Tx mbufs. Only used in RDA mode. */
439 				bus_dma_tag_t buf_dmatag;
440 			} __aligned(CACHE_LINE_SIZE);
441 
442 			/* Accessed when processing completions */
443 			struct {
444 				struct gve_tx_compl_desc_dqo *compl_ring;
445 				uint32_t compl_mask; /* masks head to the size of compl_ring */
446 				uint32_t compl_head; /* last completion read by driver */
447 				uint8_t cur_gen_bit; /* NIC flips a bit on every pass */
448 				uint32_t hw_tx_head; /* last desc read by NIC */
449 
450 				/*
451 				 * The completion taskqueue moves pending-packet objects to this
452 				 * list after freeing the mbuf. The "_prd" denotes that this is
453 				 * a producer list. The trasnmit taskqueue steals this list once
454 				 * its consumer list, with the "_csm" suffix, is depleted.
455 				 */
456 				int32_t free_pending_pkts_prd;
457 
458 				/*
459 				 * The completion taskqueue moves the QPL pages corresponding to a
460 				 * completed packet into this list. It is only used in QPL mode.
461 				 * The "_prd" denotes that this is a producer list. The trasnmit
462 				 * taskqueue steals this list once its consumer list, with the "_csm"
463 				 * suffix, is depleted.
464 				 *
465 				 * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
466 				 */
467 				int32_t free_qpl_bufs_prd;
468 				uint32_t qpl_bufs_produced;
469 			} __aligned(CACHE_LINE_SIZE);
470 
471 			/* Accessed by both the completion and xmit loops */
472 			struct {
473 				/* completion tags index into this array */
474 				struct gve_tx_pending_pkt_dqo *pending_pkts;
475 				uint16_t num_pending_pkts;
476 
477 				/*
478 				 * Represents QPL page fragments. An index into this array
479 				 * always represents the same QPL page fragment. The value
480 				 * is also an index into this array and servers as a means
481 				 * to chain buffers into linked lists whose heads are
482 				 * either free_qpl_bufs_prd or free_qpl_bufs_csm or
483 				 * qpl_bufs_head.
484 				 */
485 				int32_t *qpl_bufs;
486 			} __aligned(CACHE_LINE_SIZE);
487 		} dqo;
488 	};
489 	struct gve_txq_stats stats;
490 } __aligned(CACHE_LINE_SIZE);
491 
492 enum gve_packet_state {
493 	/*
494 	 * Packet does not yet have a dmamap created.
495 	 * This should always be zero since state is not explicitly initialized.
496 	 */
497 	GVE_PACKET_STATE_UNALLOCATED,
498 	/* Packet has a dmamap and is in free list, available to be allocated. */
499 	GVE_PACKET_STATE_FREE,
500 	/* Packet is expecting a regular data completion */
501 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
502 };
503 
504 struct gve_ptype {
505 	uint8_t l3_type;  /* `gve_l3_type` in gve_adminq.h */
506 	uint8_t l4_type;  /* `gve_l4_type` in gve_adminq.h */
507 };
508 
509 struct gve_ptype_lut {
510 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
511 };
512 
513 struct gve_priv {
514 	if_t ifp;
515 	device_t dev;
516 	struct ifmedia media;
517 
518 	uint8_t mac[ETHER_ADDR_LEN];
519 
520 	struct gve_dma_handle aq_mem;
521 
522 	struct resource *reg_bar; /* BAR0 */
523 	struct resource *db_bar; /* BAR2 */
524 	struct resource *msix_table;
525 
526 	uint32_t mgmt_msix_idx;
527 	uint32_t rx_copybreak;
528 
529 	uint16_t num_event_counters;
530 	uint16_t default_num_queues;
531 	uint16_t tx_desc_cnt;
532 	uint16_t rx_desc_cnt;
533 	uint16_t rx_pages_per_qpl;
534 	uint64_t max_registered_pages;
535 	uint64_t num_registered_pages;
536 	uint32_t supported_features;
537 	uint16_t max_mtu;
538 
539 	struct gve_dma_handle counter_array_mem;
540 	__be32 *counters;
541 	struct gve_dma_handle irqs_db_mem;
542 	struct gve_irq_db *irq_db_indices;
543 
544 	enum gve_queue_format queue_format;
545 	struct gve_queue_page_list *qpls;
546 	struct gve_queue_config tx_cfg;
547 	struct gve_queue_config rx_cfg;
548 	uint32_t num_queues;
549 
550 	struct gve_irq *irq_tbl;
551 	struct gve_tx_ring *tx;
552 	struct gve_rx_ring *rx;
553 
554 	struct gve_ptype_lut *ptype_lut_dqo;
555 
556 	/*
557 	 * Admin queue - see gve_adminq.h
558 	 * Since AQ cmds do not run in steady state, 32 bit counters suffice
559 	 */
560 	struct gve_adminq_command *adminq;
561 	vm_paddr_t adminq_bus_addr;
562 	uint32_t adminq_mask; /* masks prod_cnt to adminq size */
563 	uint32_t adminq_prod_cnt; /* free-running count of AQ cmds executed */
564 	uint32_t adminq_cmd_fail; /* free-running count of AQ cmds failed */
565 	uint32_t adminq_timeouts; /* free-running count of AQ cmds timeouts */
566 	/* free-running count of each distinct AQ cmd executed */
567 	uint32_t adminq_describe_device_cnt;
568 	uint32_t adminq_cfg_device_resources_cnt;
569 	uint32_t adminq_register_page_list_cnt;
570 	uint32_t adminq_unregister_page_list_cnt;
571 	uint32_t adminq_create_tx_queue_cnt;
572 	uint32_t adminq_create_rx_queue_cnt;
573 	uint32_t adminq_destroy_tx_queue_cnt;
574 	uint32_t adminq_destroy_rx_queue_cnt;
575 	uint32_t adminq_dcfg_device_resources_cnt;
576 	uint32_t adminq_set_driver_parameter_cnt;
577 	uint32_t adminq_verify_driver_compatibility_cnt;
578 	uint32_t adminq_get_ptype_map_cnt;
579 
580 	uint32_t interface_up_cnt;
581 	uint32_t interface_down_cnt;
582 	uint32_t reset_cnt;
583 
584 	struct task service_task;
585 	struct taskqueue *service_tq;
586 
587 	struct gve_state_flags state_flags;
588 	struct sx gve_iface_lock;
589 };
590 
591 static inline bool
gve_get_state_flag(struct gve_priv * priv,int pos)592 gve_get_state_flag(struct gve_priv *priv, int pos)
593 {
594 	return (BIT_ISSET(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags));
595 }
596 
597 static inline void
gve_set_state_flag(struct gve_priv * priv,int pos)598 gve_set_state_flag(struct gve_priv *priv, int pos)
599 {
600 	BIT_SET_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
601 }
602 
603 static inline void
gve_clear_state_flag(struct gve_priv * priv,int pos)604 gve_clear_state_flag(struct gve_priv *priv, int pos)
605 {
606 	BIT_CLR_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
607 }
608 
609 static inline bool
gve_is_gqi(struct gve_priv * priv)610 gve_is_gqi(struct gve_priv *priv)
611 {
612 	return (priv->queue_format == GVE_GQI_QPL_FORMAT);
613 }
614 
615 static inline bool
gve_is_qpl(struct gve_priv * priv)616 gve_is_qpl(struct gve_priv *priv)
617 {
618 	return (priv->queue_format == GVE_GQI_QPL_FORMAT ||
619 	    priv->queue_format == GVE_DQO_QPL_FORMAT);
620 }
621 
622 /* Defined in gve_main.c */
623 void gve_schedule_reset(struct gve_priv *priv);
624 
625 /* Register access functions defined in gve_utils.c */
626 uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset);
627 void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
628 void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
629 void gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
630 
631 /* QPL (Queue Page List) functions defined in gve_qpl.c */
632 int gve_alloc_qpls(struct gve_priv *priv);
633 void gve_free_qpls(struct gve_priv *priv);
634 int gve_register_qpls(struct gve_priv *priv);
635 int gve_unregister_qpls(struct gve_priv *priv);
636 void gve_mextadd_free(struct mbuf *mbuf);
637 
638 /* TX functions defined in gve_tx.c */
639 int gve_alloc_tx_rings(struct gve_priv *priv);
640 void gve_free_tx_rings(struct gve_priv *priv);
641 int gve_create_tx_rings(struct gve_priv *priv);
642 int gve_destroy_tx_rings(struct gve_priv *priv);
643 int gve_tx_intr(void *arg);
644 int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf);
645 void gve_qflush(if_t ifp);
646 void gve_xmit_tq(void *arg, int pending);
647 void gve_tx_cleanup_tq(void *arg, int pending);
648 
649 /* TX functions defined in gve_tx_dqo.c */
650 int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i);
651 void gve_tx_free_ring_dqo(struct gve_priv *priv, int i);
652 void gve_clear_tx_ring_dqo(struct gve_priv *priv, int i);
653 int gve_tx_intr_dqo(void *arg);
654 int gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr);
655 int gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf);
656 void gve_tx_cleanup_tq_dqo(void *arg, int pending);
657 
658 /* RX functions defined in gve_rx.c */
659 int gve_alloc_rx_rings(struct gve_priv *priv);
660 void gve_free_rx_rings(struct gve_priv *priv);
661 int gve_create_rx_rings(struct gve_priv *priv);
662 int gve_destroy_rx_rings(struct gve_priv *priv);
663 int gve_rx_intr(void *arg);
664 void gve_rx_cleanup_tq(void *arg, int pending);
665 
666 /* RX functions defined in gve_rx_dqo.c */
667 int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int i);
668 void gve_rx_free_ring_dqo(struct gve_priv *priv, int i);
669 void gve_rx_prefill_buffers_dqo(struct gve_rx_ring *rx);
670 void gve_clear_rx_ring_dqo(struct gve_priv *priv, int i);
671 int gve_rx_intr_dqo(void *arg);
672 void gve_rx_cleanup_tq_dqo(void *arg, int pending);
673 
674 /* DMA functions defined in gve_utils.c */
675 int gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
676     struct gve_dma_handle *dma);
677 void gve_dma_free_coherent(struct gve_dma_handle *dma);
678 int gve_dmamap_create(struct gve_priv *priv, int size, int align,
679     struct gve_dma_handle *dma);
680 void gve_dmamap_destroy(struct gve_dma_handle *dma);
681 
682 /* IRQ functions defined in gve_utils.c */
683 void gve_free_irqs(struct gve_priv *priv);
684 int gve_alloc_irqs(struct gve_priv *priv);
685 void gve_unmask_all_queue_irqs(struct gve_priv *priv);
686 void gve_mask_all_queue_irqs(struct gve_priv *priv);
687 
688 /* Systcl functions defined in gve_sysctl.c */
689 extern bool gve_disable_hw_lro;
690 extern char gve_queue_format[8];
691 extern char gve_version[8];
692 void gve_setup_sysctl(struct gve_priv *priv);
693 void gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets,
694     uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets,
695     uint64_t *tbytes, uint64_t *tx_dropped_pkt);
696 
697 /* Stats functions defined in gve_utils.c */
698 void gve_alloc_counters(counter_u64_t *stat, int num_stats);
699 void gve_free_counters(counter_u64_t *stat, int num_stats);
700 
701 #endif /* _GVE_FBSD_H_ */
702