xref: /linux/drivers/net/ethernet/google/gve/gve.h (revision de5ca699bc3f7fe9f90ba927d8a6e7783cd7311d)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google LLC
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/ethtool_netlink.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/u64_stats_sync.h>
16 #include <net/page_pool/helpers.h>
17 #include <net/xdp.h>
18 
19 #include "gve_desc.h"
20 #include "gve_desc_dqo.h"
21 
22 #ifndef PCI_VENDOR_ID_GOOGLE
23 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
24 #endif
25 
26 #define PCI_DEV_ID_GVNIC	0x0042
27 
28 #define GVE_REGISTER_BAR	0
29 #define GVE_DOORBELL_BAR	2
30 
31 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
32 #define GVE_TX_MAX_IOVEC	4
33 /* 1 for management, 1 for rx, 1 for tx */
34 #define GVE_MIN_MSIX 3
35 
36 /* Numbers of gve tx/rx stats in stats report. */
37 #define GVE_TX_STATS_REPORT_NUM	6
38 #define GVE_RX_STATS_REPORT_NUM	2
39 
40 /* Interval to schedule a stats report update, 20000ms. */
41 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
42 
43 /* Numbers of NIC tx/rx stats in stats report. */
44 #define NIC_TX_STATS_REPORT_NUM	0
45 #define NIC_RX_STATS_REPORT_NUM	4
46 
47 #define GVE_ADMINQ_BUFFER_SIZE 4096
48 
49 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
50 
51 /* PTYPEs are always 10 bits. */
52 #define GVE_NUM_PTYPES	1024
53 
54 /* Default minimum ring size */
55 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
56 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
57 
58 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
59 
60 #define GVE_MAX_RX_BUFFER_SIZE 4096
61 
62 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
63 
64 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
65 
66 #define GVE_FLOW_RULES_CACHE_SIZE \
67 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
68 #define GVE_FLOW_RULE_IDS_CACHE_SIZE \
69 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
70 
71 #define GVE_RSS_KEY_SIZE	40
72 #define GVE_RSS_INDIR_SIZE	128
73 
74 #define GVE_XDP_ACTIONS 5
75 
76 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
77 
78 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
79 
80 #define DQO_QPL_DEFAULT_TX_PAGES 512
81 
82 /* Maximum TSO size supported on DQO */
83 #define GVE_DQO_TX_MAX	0x3FFFF
84 
85 #define GVE_TX_BUF_SHIFT_DQO 11
86 
87 /* 2K buffers for DQO-QPL */
88 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
89 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
90 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
91 
92 /* If number of free/recyclable buffers are less than this threshold; driver
93  * allocs and uses a non-qpl page on the receive path of DQO QPL to free
94  * up buffers.
95  * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
96  */
97 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
98 
99 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
100 struct gve_rx_desc_queue {
101 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
102 	dma_addr_t bus; /* the bus for the desc_ring */
103 	u8 seqno; /* the next expected seqno for this desc*/
104 };
105 
106 /* The page info for a single slot in the RX data queue */
107 struct gve_rx_slot_page_info {
108 	/* netmem is used for DQO RDA mode
109 	 * page is used in all other modes
110 	 */
111 	union {
112 		struct page *page;
113 		netmem_ref netmem;
114 	};
115 	void *page_address;
116 	u32 page_offset; /* offset to write to in page */
117 	unsigned int buf_size;
118 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
119 	u16 pad; /* adjustment for rx padding */
120 	u8 can_flip; /* tracks if the networking stack is using the page */
121 };
122 
123 /* A list of pages registered with the device during setup and used by a queue
124  * as buffers
125  */
126 struct gve_queue_page_list {
127 	u32 id; /* unique id */
128 	u32 num_entries;
129 	struct page **pages; /* list of num_entries pages */
130 	dma_addr_t *page_buses; /* the dma addrs of the pages */
131 };
132 
133 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
134 struct gve_rx_data_queue {
135 	union gve_rx_data_slot *data_ring; /* read by NIC */
136 	dma_addr_t data_bus; /* dma mapping of the slots */
137 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
138 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
139 	u8 raw_addressing; /* use raw_addressing? */
140 };
141 
142 struct gve_priv;
143 
144 /* RX buffer queue for posting buffers to HW.
145  * Each RX (completion) queue has a corresponding buffer queue.
146  */
147 struct gve_rx_buf_queue_dqo {
148 	struct gve_rx_desc_dqo *desc_ring;
149 	dma_addr_t bus;
150 	u32 head; /* Pointer to start cleaning buffers at. */
151 	u32 tail; /* Last posted buffer index + 1 */
152 	u32 mask; /* Mask for indices to the size of the ring */
153 };
154 
155 /* RX completion queue to receive packets from HW. */
156 struct gve_rx_compl_queue_dqo {
157 	struct gve_rx_compl_desc_dqo *desc_ring;
158 	dma_addr_t bus;
159 
160 	/* Number of slots which did not have a buffer posted yet. We should not
161 	 * post more buffers than the queue size to avoid HW overrunning the
162 	 * queue.
163 	 */
164 	int num_free_slots;
165 
166 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
167 	 * descriptor's generation bit is different from the current generation,
168 	 * that descriptor is ready to be consumed by SW.
169 	 */
170 	u8 cur_gen_bit;
171 
172 	/* Pointer into desc_ring where the next completion descriptor will be
173 	 * received.
174 	 */
175 	u32 head;
176 	u32 mask; /* Mask for indices to the size of the ring */
177 };
178 
179 struct gve_header_buf {
180 	u8 *data;
181 	dma_addr_t addr;
182 };
183 
184 /* Stores state for tracking buffers posted to HW */
185 struct gve_rx_buf_state_dqo {
186 	/* The page posted to HW. */
187 	struct gve_rx_slot_page_info page_info;
188 
189 	/* The DMA address corresponding to `page_info`. */
190 	dma_addr_t addr;
191 
192 	/* Last offset into the page when it only had a single reference, at
193 	 * which point every other offset is free to be reused.
194 	 */
195 	u32 last_single_ref_offset;
196 
197 	/* Linked list index to next element in the list, or -1 if none */
198 	s16 next;
199 };
200 
201 /* `head` and `tail` are indices into an array, or -1 if empty. */
202 struct gve_index_list {
203 	s16 head;
204 	s16 tail;
205 };
206 
207 /* A single received packet split across multiple buffers may be
208  * reconstructed using the information in this structure.
209  */
210 struct gve_rx_ctx {
211 	/* head and tail of skb chain for the current packet or NULL if none */
212 	struct sk_buff *skb_head;
213 	struct sk_buff *skb_tail;
214 	u32 total_size;
215 	u8 frag_cnt;
216 	bool drop_pkt;
217 };
218 
219 struct gve_rx_cnts {
220 	u32 ok_pkt_bytes;
221 	u16 ok_pkt_cnt;
222 	u16 total_pkt_cnt;
223 	u16 cont_pkt_cnt;
224 	u16 desc_err_pkt_cnt;
225 };
226 
227 /* Contains datapath state used to represent an RX queue. */
228 struct gve_rx_ring {
229 	struct gve_priv *gve;
230 	union {
231 		/* GQI fields */
232 		struct {
233 			struct gve_rx_desc_queue desc;
234 			struct gve_rx_data_queue data;
235 
236 			/* threshold for posting new buffs and descs */
237 			u32 db_threshold;
238 			u16 packet_buffer_size;
239 
240 			u32 qpl_copy_pool_mask;
241 			u32 qpl_copy_pool_head;
242 			struct gve_rx_slot_page_info *qpl_copy_pool;
243 		};
244 
245 		/* DQO fields. */
246 		struct {
247 			struct gve_rx_buf_queue_dqo bufq;
248 			struct gve_rx_compl_queue_dqo complq;
249 
250 			struct gve_rx_buf_state_dqo *buf_states;
251 			u16 num_buf_states;
252 
253 			/* Linked list of gve_rx_buf_state_dqo. Index into
254 			 * buf_states, or -1 if empty.
255 			 */
256 			s16 free_buf_states;
257 
258 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
259 			 * buf_states, or -1 if empty.
260 			 *
261 			 * This list contains buf_states which are pointing to
262 			 * valid buffers.
263 			 *
264 			 * We use a FIFO here in order to increase the
265 			 * probability that buffers can be reused by increasing
266 			 * the time between usages.
267 			 */
268 			struct gve_index_list recycled_buf_states;
269 
270 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
271 			 * buf_states, or -1 if empty.
272 			 *
273 			 * This list contains buf_states which have buffers
274 			 * which cannot be reused yet.
275 			 */
276 			struct gve_index_list used_buf_states;
277 
278 			/* qpl assigned to this queue */
279 			struct gve_queue_page_list *qpl;
280 
281 			/* index into queue page list */
282 			u32 next_qpl_page_idx;
283 
284 			/* track number of used buffers */
285 			u16 used_buf_states_cnt;
286 
287 			/* Address info of the buffers for header-split */
288 			struct gve_header_buf hdr_bufs;
289 
290 			struct page_pool *page_pool;
291 		} dqo;
292 	};
293 
294 	u64 rbytes; /* free-running bytes received */
295 	u64 rx_hsplit_bytes; /* free-running header bytes received */
296 	u64 rpackets; /* free-running packets received */
297 	u32 cnt; /* free-running total number of completed packets */
298 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
299 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
300 	u64 rx_hsplit_pkt; /* free-running packets with headers split */
301 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
302 	u64 rx_copied_pkt; /* free-running total number of copied packets */
303 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
304 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
305 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
306 	/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
307 	u64 rx_hsplit_unsplit_pkt;
308 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
309 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
310 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
311 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
312 	u64 xdp_tx_errors;
313 	u64 xdp_redirect_errors;
314 	u64 xdp_alloc_fails;
315 	u64 xdp_actions[GVE_XDP_ACTIONS];
316 	u32 q_num; /* queue index */
317 	u32 ntfy_id; /* notification block index */
318 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
319 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
320 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
321 
322 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
323 
324 	/* XDP stuff */
325 	struct xdp_rxq_info xdp_rxq;
326 	struct xdp_rxq_info xsk_rxq;
327 	struct xsk_buff_pool *xsk_pool;
328 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
329 };
330 
331 /* A TX desc ring entry */
332 union gve_tx_desc {
333 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
334 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
335 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
336 };
337 
338 /* Tracks the memory in the fifo occupied by a segment of a packet */
339 struct gve_tx_iovec {
340 	u32 iov_offset; /* offset into this segment */
341 	u32 iov_len; /* length */
342 	u32 iov_padding; /* padding associated with this segment */
343 };
344 
345 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
346  * ring entry but only used for a pkt_desc not a seg_desc
347  */
348 struct gve_tx_buffer_state {
349 	union {
350 		struct sk_buff *skb; /* skb for this pkt */
351 		struct xdp_frame *xdp_frame; /* xdp_frame */
352 	};
353 	struct {
354 		u16 size; /* size of xmitted xdp pkt */
355 		u8 is_xsk; /* xsk buff */
356 	} xdp;
357 	union {
358 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
359 		struct {
360 			DEFINE_DMA_UNMAP_ADDR(dma);
361 			DEFINE_DMA_UNMAP_LEN(len);
362 		};
363 	};
364 };
365 
366 /* A TX buffer - each queue has one */
367 struct gve_tx_fifo {
368 	void *base; /* address of base of FIFO */
369 	u32 size; /* total size */
370 	atomic_t available; /* how much space is still available */
371 	u32 head; /* offset to write at */
372 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
373 };
374 
375 /* TX descriptor for DQO format */
376 union gve_tx_desc_dqo {
377 	struct gve_tx_pkt_desc_dqo pkt;
378 	struct gve_tx_tso_context_desc_dqo tso_ctx;
379 	struct gve_tx_general_context_desc_dqo general_ctx;
380 };
381 
382 enum gve_packet_state {
383 	/* Packet is in free list, available to be allocated.
384 	 * This should always be zero since state is not explicitly initialized.
385 	 */
386 	GVE_PACKET_STATE_UNALLOCATED,
387 	/* Packet is expecting a regular data completion or miss completion */
388 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
389 	/* Packet has received a miss completion and is expecting a
390 	 * re-injection completion.
391 	 */
392 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
393 	/* No valid completion received within the specified timeout. */
394 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
395 };
396 
397 struct gve_tx_pending_packet_dqo {
398 	struct sk_buff *skb; /* skb for this packet */
399 
400 	/* 0th element corresponds to the linear portion of `skb`, should be
401 	 * unmapped with `dma_unmap_single`.
402 	 *
403 	 * All others correspond to `skb`'s frags and should be unmapped with
404 	 * `dma_unmap_page`.
405 	 */
406 	union {
407 		struct {
408 			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
409 			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
410 		};
411 		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
412 	};
413 
414 	u16 num_bufs;
415 
416 	/* Linked list index to next element in the list, or -1 if none */
417 	s16 next;
418 
419 	/* Linked list index to prev element in the list, or -1 if none.
420 	 * Used for tracking either outstanding miss completions or prematurely
421 	 * freed packets.
422 	 */
423 	s16 prev;
424 
425 	/* Identifies the current state of the packet as defined in
426 	 * `enum gve_packet_state`.
427 	 */
428 	u8 state;
429 
430 	/* If packet is an outstanding miss completion, then the packet is
431 	 * freed if the corresponding re-injection completion is not received
432 	 * before kernel jiffies exceeds timeout_jiffies.
433 	 */
434 	unsigned long timeout_jiffies;
435 };
436 
437 /* Contains datapath state used to represent a TX queue. */
438 struct gve_tx_ring {
439 	/* Cacheline 0 -- Accessed & dirtied during transmit */
440 	union {
441 		/* GQI fields */
442 		struct {
443 			struct gve_tx_fifo tx_fifo;
444 			u32 req; /* driver tracked head pointer */
445 			u32 done; /* driver tracked tail pointer */
446 		};
447 
448 		/* DQO fields. */
449 		struct {
450 			/* Linked list of gve_tx_pending_packet_dqo. Index into
451 			 * pending_packets, or -1 if empty.
452 			 *
453 			 * This is a consumer list owned by the TX path. When it
454 			 * runs out, the producer list is stolen from the
455 			 * completion handling path
456 			 * (dqo_compl.free_pending_packets).
457 			 */
458 			s16 free_pending_packets;
459 
460 			/* Cached value of `dqo_compl.hw_tx_head` */
461 			u32 head;
462 			u32 tail; /* Last posted buffer index + 1 */
463 
464 			/* Index of the last descriptor with "report event" bit
465 			 * set.
466 			 */
467 			u32 last_re_idx;
468 
469 			/* free running number of packet buf descriptors posted */
470 			u16 posted_packet_desc_cnt;
471 			/* free running number of packet buf descriptors completed */
472 			u16 completed_packet_desc_cnt;
473 
474 			/* QPL fields */
475 			struct {
476 			       /* Linked list of gve_tx_buf_dqo. Index into
477 				* tx_qpl_buf_next, or -1 if empty.
478 				*
479 				* This is a consumer list owned by the TX path. When it
480 				* runs out, the producer list is stolen from the
481 				* completion handling path
482 				* (dqo_compl.free_tx_qpl_buf_head).
483 				*/
484 				s16 free_tx_qpl_buf_head;
485 
486 			       /* Free running count of the number of QPL tx buffers
487 				* allocated
488 				*/
489 				u32 alloc_tx_qpl_buf_cnt;
490 
491 				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
492 				u32 free_tx_qpl_buf_cnt;
493 			};
494 		} dqo_tx;
495 	};
496 
497 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
498 	union {
499 		/* GQI fields */
500 		struct {
501 			/* Spinlock for when cleanup in progress */
502 			spinlock_t clean_lock;
503 			/* Spinlock for XDP tx traffic */
504 			spinlock_t xdp_lock;
505 		};
506 
507 		/* DQO fields. */
508 		struct {
509 			u32 head; /* Last read on compl_desc */
510 
511 			/* Tracks the current gen bit of compl_q */
512 			u8 cur_gen_bit;
513 
514 			/* Linked list of gve_tx_pending_packet_dqo. Index into
515 			 * pending_packets, or -1 if empty.
516 			 *
517 			 * This is the producer list, owned by the completion
518 			 * handling path. When the consumer list
519 			 * (dqo_tx.free_pending_packets) is runs out, this list
520 			 * will be stolen.
521 			 */
522 			atomic_t free_pending_packets;
523 
524 			/* Last TX ring index fetched by HW */
525 			atomic_t hw_tx_head;
526 
527 			/* List to track pending packets which received a miss
528 			 * completion but not a corresponding reinjection.
529 			 */
530 			struct gve_index_list miss_completions;
531 
532 			/* List to track pending packets that were completed
533 			 * before receiving a valid completion because they
534 			 * reached a specified timeout.
535 			 */
536 			struct gve_index_list timed_out_completions;
537 
538 			/* QPL fields */
539 			struct {
540 				/* Linked list of gve_tx_buf_dqo. Index into
541 				 * tx_qpl_buf_next, or -1 if empty.
542 				 *
543 				 * This is the producer list, owned by the completion
544 				 * handling path. When the consumer list
545 				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
546 				 * will be stolen.
547 				 */
548 				atomic_t free_tx_qpl_buf_head;
549 
550 				/* Free running count of the number of tx buffers
551 				 * freed
552 				 */
553 				atomic_t free_tx_qpl_buf_cnt;
554 			};
555 		} dqo_compl;
556 	} ____cacheline_aligned;
557 	u64 pkt_done; /* free-running - total packets completed */
558 	u64 bytes_done; /* free-running - total bytes completed */
559 	u64 dropped_pkt; /* free-running - total packets dropped */
560 	u64 dma_mapping_error; /* count of dma mapping errors */
561 
562 	/* Cacheline 2 -- Read-mostly fields */
563 	union {
564 		/* GQI fields */
565 		struct {
566 			union gve_tx_desc *desc;
567 
568 			/* Maps 1:1 to a desc */
569 			struct gve_tx_buffer_state *info;
570 		};
571 
572 		/* DQO fields. */
573 		struct {
574 			union gve_tx_desc_dqo *tx_ring;
575 			struct gve_tx_compl_desc *compl_ring;
576 
577 			struct gve_tx_pending_packet_dqo *pending_packets;
578 			s16 num_pending_packets;
579 
580 			u32 complq_mask; /* complq size is complq_mask + 1 */
581 
582 			/* QPL fields */
583 			struct {
584 				/* qpl assigned to this queue */
585 				struct gve_queue_page_list *qpl;
586 
587 				/* Each QPL page is divided into TX bounce buffers
588 				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
589 				 * an array to manage linked lists of TX buffers.
590 				 * An entry j at index i implies that j'th buffer
591 				 * is next on the list after i
592 				 */
593 				s16 *tx_qpl_buf_next;
594 				u32 num_tx_qpl_bufs;
595 			};
596 		} dqo;
597 	} ____cacheline_aligned;
598 	struct netdev_queue *netdev_txq;
599 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
600 	struct device *dev;
601 	u32 mask; /* masks req and done down to queue size */
602 	u8 raw_addressing; /* use raw_addressing? */
603 
604 	/* Slow-path fields */
605 	u32 q_num ____cacheline_aligned; /* queue idx */
606 	u32 stop_queue; /* count of queue stops */
607 	u32 wake_queue; /* count of queue wakes */
608 	u32 queue_timeout; /* count of queue timeouts */
609 	u32 ntfy_id; /* notification block index */
610 	u32 last_kick_msec; /* Last time the queue was kicked */
611 	dma_addr_t bus; /* dma address of the descr ring */
612 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
613 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
614 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
615 	struct xsk_buff_pool *xsk_pool;
616 	u32 xdp_xsk_wakeup;
617 	u32 xdp_xsk_done;
618 	u64 xdp_xsk_sent;
619 	u64 xdp_xmit;
620 	u64 xdp_xmit_errors;
621 } ____cacheline_aligned;
622 
623 /* Wraps the info for one irq including the napi struct and the queues
624  * associated with that irq.
625  */
626 struct gve_notify_block {
627 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
628 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
629 	struct napi_struct napi; /* kernel napi struct for this block */
630 	struct gve_priv *priv;
631 	struct gve_tx_ring *tx; /* tx rings on this block */
632 	struct gve_rx_ring *rx; /* rx rings on this block */
633 	u32 irq;
634 };
635 
636 /* Tracks allowed and current queue settings */
637 struct gve_queue_config {
638 	u16 max_queues;
639 	u16 num_queues; /* current */
640 };
641 
642 /* Tracks the available and used qpl IDs */
643 struct gve_qpl_config {
644 	u32 qpl_map_size; /* map memory size */
645 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
646 };
647 
648 struct gve_irq_db {
649 	__be32 index;
650 } ____cacheline_aligned;
651 
652 struct gve_ptype {
653 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
654 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
655 };
656 
657 struct gve_ptype_lut {
658 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
659 };
660 
661 /* Parameters for allocating resources for tx queues */
662 struct gve_tx_alloc_rings_cfg {
663 	struct gve_queue_config *qcfg;
664 
665 	u16 ring_size;
666 	u16 start_idx;
667 	u16 num_rings;
668 	bool raw_addressing;
669 
670 	/* Allocated resources are returned here */
671 	struct gve_tx_ring *tx;
672 };
673 
674 /* Parameters for allocating resources for rx queues */
675 struct gve_rx_alloc_rings_cfg {
676 	/* tx config is also needed to determine QPL ids */
677 	struct gve_queue_config *qcfg;
678 	struct gve_queue_config *qcfg_tx;
679 
680 	u16 ring_size;
681 	u16 packet_buffer_size;
682 	bool raw_addressing;
683 	bool enable_header_split;
684 	bool reset_rss;
685 
686 	/* Allocated resources are returned here */
687 	struct gve_rx_ring *rx;
688 };
689 
690 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
691  * when the entire configure_device_resources command is zeroed out and the
692  * queue_format is not specified.
693  */
694 enum gve_queue_format {
695 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
696 	GVE_GQI_RDA_FORMAT		= 0x1,
697 	GVE_GQI_QPL_FORMAT		= 0x2,
698 	GVE_DQO_RDA_FORMAT		= 0x3,
699 	GVE_DQO_QPL_FORMAT		= 0x4,
700 };
701 
702 struct gve_flow_spec {
703 	__be32 src_ip[4];
704 	__be32 dst_ip[4];
705 	union {
706 		struct {
707 			__be16 src_port;
708 			__be16 dst_port;
709 		};
710 		__be32 spi;
711 	};
712 	union {
713 		u8 tos;
714 		u8 tclass;
715 	};
716 };
717 
718 struct gve_flow_rule {
719 	u32 location;
720 	u16 flow_type;
721 	u16 action;
722 	struct gve_flow_spec key;
723 	struct gve_flow_spec mask;
724 };
725 
726 struct gve_flow_rules_cache {
727 	bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
728 	struct gve_adminq_queried_flow_rule *rules_cache;
729 	__be32 *rule_ids_cache;
730 	/* The total number of queried rules that stored in the caches */
731 	u32 rules_cache_num;
732 	u32 rule_ids_cache_num;
733 };
734 
735 struct gve_rss_config {
736 	u8 *hash_key;
737 	u32 *hash_lut;
738 };
739 
740 struct gve_priv {
741 	struct net_device *dev;
742 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
743 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
744 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
745 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
746 	dma_addr_t irq_db_indices_bus;
747 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
748 	char mgmt_msix_name[IFNAMSIZ + 16];
749 	u32 mgmt_msix_idx;
750 	__be32 *counter_array; /* array of num_event_counters */
751 	dma_addr_t counter_array_bus;
752 
753 	u16 num_event_counters;
754 	u16 tx_desc_cnt; /* num desc per ring */
755 	u16 rx_desc_cnt; /* num desc per ring */
756 	u16 max_tx_desc_cnt;
757 	u16 max_rx_desc_cnt;
758 	u16 min_tx_desc_cnt;
759 	u16 min_rx_desc_cnt;
760 	bool modify_ring_size_enabled;
761 	bool default_min_ring_size;
762 	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
763 	u64 max_registered_pages;
764 	u64 num_registered_pages; /* num pages registered with NIC */
765 	struct bpf_prog *xdp_prog; /* XDP BPF program */
766 	u32 rx_copybreak; /* copy packets smaller than this */
767 	u16 default_num_queues; /* default num queues to set up */
768 
769 	u16 num_xdp_queues;
770 	struct gve_queue_config tx_cfg;
771 	struct gve_queue_config rx_cfg;
772 	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
773 
774 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
775 	__be32 __iomem *db_bar2; /* "array" of doorbells */
776 	u32 msg_enable;	/* level for netif* netdev print macros	*/
777 	struct pci_dev *pdev;
778 
779 	/* metrics */
780 	u32 tx_timeo_cnt;
781 
782 	/* Admin queue - see gve_adminq.h*/
783 	union gve_adminq_command *adminq;
784 	dma_addr_t adminq_bus_addr;
785 	struct dma_pool *adminq_pool;
786 	struct mutex adminq_lock; /* Protects adminq command execution */
787 	u32 adminq_mask; /* masks prod_cnt to adminq size */
788 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
789 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
790 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
791 	/* free-running count of per AQ cmd executed */
792 	u32 adminq_describe_device_cnt;
793 	u32 adminq_cfg_device_resources_cnt;
794 	u32 adminq_register_page_list_cnt;
795 	u32 adminq_unregister_page_list_cnt;
796 	u32 adminq_create_tx_queue_cnt;
797 	u32 adminq_create_rx_queue_cnt;
798 	u32 adminq_destroy_tx_queue_cnt;
799 	u32 adminq_destroy_rx_queue_cnt;
800 	u32 adminq_dcfg_device_resources_cnt;
801 	u32 adminq_set_driver_parameter_cnt;
802 	u32 adminq_report_stats_cnt;
803 	u32 adminq_report_link_speed_cnt;
804 	u32 adminq_get_ptype_map_cnt;
805 	u32 adminq_verify_driver_compatibility_cnt;
806 	u32 adminq_query_flow_rules_cnt;
807 	u32 adminq_cfg_flow_rule_cnt;
808 	u32 adminq_cfg_rss_cnt;
809 	u32 adminq_query_rss_cnt;
810 
811 	/* Global stats */
812 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
813 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
814 	u32 reset_cnt; /* count of reset */
815 	u32 page_alloc_fail; /* count of page alloc fails */
816 	u32 dma_mapping_error; /* count of dma mapping errors */
817 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
818 	u32 suspend_cnt; /* count of times suspended */
819 	u32 resume_cnt; /* count of times resumed */
820 	struct workqueue_struct *gve_wq;
821 	struct work_struct service_task;
822 	struct work_struct stats_report_task;
823 	unsigned long service_task_flags;
824 	unsigned long state_flags;
825 
826 	struct gve_stats_report *stats_report;
827 	u64 stats_report_len;
828 	dma_addr_t stats_report_bus; /* dma address for the stats report */
829 	unsigned long ethtool_flags;
830 
831 	unsigned long stats_report_timer_period;
832 	struct timer_list stats_report_timer;
833 
834 	/* Gvnic device link speed from hypervisor. */
835 	u64 link_speed;
836 	bool up_before_suspend; /* True if dev was up before suspend */
837 
838 	struct gve_ptype_lut *ptype_lut_dqo;
839 
840 	/* Must be a power of two. */
841 	u16 data_buffer_size_dqo;
842 	u16 max_rx_buffer_size; /* device limit */
843 
844 	enum gve_queue_format queue_format;
845 
846 	/* Interrupt coalescing settings */
847 	u32 tx_coalesce_usecs;
848 	u32 rx_coalesce_usecs;
849 
850 	u16 header_buf_size; /* device configured, header-split supported if non-zero */
851 	bool header_split_enabled; /* True if the header split is enabled by the user */
852 
853 	u32 max_flow_rules;
854 	u32 num_flow_rules;
855 
856 	struct gve_flow_rules_cache flow_rules_cache;
857 
858 	u16 rss_key_size;
859 	u16 rss_lut_size;
860 	bool cache_rss_config;
861 	struct gve_rss_config rss_config;
862 };
863 
864 enum gve_service_task_flags_bit {
865 	GVE_PRIV_FLAGS_DO_RESET			= 1,
866 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
867 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
868 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
869 };
870 
871 enum gve_state_flags_bit {
872 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
873 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
874 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
875 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
876 };
877 
878 enum gve_ethtool_flags_bit {
879 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
880 };
881 
882 static inline bool gve_get_do_reset(struct gve_priv *priv)
883 {
884 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
885 }
886 
887 static inline void gve_set_do_reset(struct gve_priv *priv)
888 {
889 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
890 }
891 
892 static inline void gve_clear_do_reset(struct gve_priv *priv)
893 {
894 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
895 }
896 
897 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
898 {
899 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
900 			&priv->service_task_flags);
901 }
902 
903 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
904 {
905 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
906 }
907 
908 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
909 {
910 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
911 }
912 
913 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
914 {
915 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
916 			&priv->service_task_flags);
917 }
918 
919 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
920 {
921 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
922 }
923 
924 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
925 {
926 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
927 }
928 
929 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
930 {
931 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
932 			&priv->service_task_flags);
933 }
934 
935 static inline void gve_set_do_report_stats(struct gve_priv *priv)
936 {
937 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
938 }
939 
940 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
941 {
942 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
943 }
944 
945 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
946 {
947 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
948 }
949 
950 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
951 {
952 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
953 }
954 
955 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
956 {
957 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
958 }
959 
960 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
961 {
962 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
963 }
964 
965 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
966 {
967 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
968 }
969 
970 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
971 {
972 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
973 }
974 
975 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
976 {
977 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
978 }
979 
980 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
981 {
982 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
983 }
984 
985 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
986 {
987 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
988 }
989 
990 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
991 {
992 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
993 }
994 
995 static inline void gve_set_napi_enabled(struct gve_priv *priv)
996 {
997 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
998 }
999 
1000 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
1001 {
1002 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1003 }
1004 
1005 static inline bool gve_get_report_stats(struct gve_priv *priv)
1006 {
1007 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1008 }
1009 
1010 static inline void gve_clear_report_stats(struct gve_priv *priv)
1011 {
1012 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1013 }
1014 
1015 /* Returns the address of the ntfy_blocks irq doorbell
1016  */
1017 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
1018 					       struct gve_notify_block *block)
1019 {
1020 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
1021 }
1022 
1023 /* Returns the index into ntfy_blocks of the given tx ring's block
1024  */
1025 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1026 {
1027 	return queue_idx;
1028 }
1029 
1030 /* Returns the index into ntfy_blocks of the given rx ring's block
1031  */
1032 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1033 {
1034 	return (priv->num_ntfy_blks / 2) + queue_idx;
1035 }
1036 
1037 static inline bool gve_is_qpl(struct gve_priv *priv)
1038 {
1039 	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
1040 		priv->queue_format == GVE_DQO_QPL_FORMAT;
1041 }
1042 
1043 /* Returns the number of tx queue page lists */
1044 static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
1045 				  int num_xdp_queues,
1046 				  bool is_qpl)
1047 {
1048 	if (!is_qpl)
1049 		return 0;
1050 	return tx_cfg->num_queues + num_xdp_queues;
1051 }
1052 
1053 /* Returns the number of XDP tx queue page lists
1054  */
1055 static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
1056 {
1057 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
1058 		return 0;
1059 
1060 	return priv->num_xdp_queues;
1061 }
1062 
1063 /* Returns the number of rx queue page lists */
1064 static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
1065 				  bool is_qpl)
1066 {
1067 	if (!is_qpl)
1068 		return 0;
1069 	return rx_cfg->num_queues;
1070 }
1071 
1072 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
1073 {
1074 	return tx_qid;
1075 }
1076 
1077 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1078 {
1079 	return priv->tx_cfg.max_queues + rx_qid;
1080 }
1081 
1082 static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
1083 {
1084 	return tx_cfg->max_queues + rx_qid;
1085 }
1086 
1087 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1088 {
1089 	return gve_tx_qpl_id(priv, 0);
1090 }
1091 
1092 static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
1093 {
1094 	return gve_get_rx_qpl_id(tx_cfg, 0);
1095 }
1096 
1097 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
1098 {
1099 	/* For DQO, page count should be more than ring size for
1100 	 * out-of-order completions. Set it to two times of ring size.
1101 	 */
1102 	return 2 * rx_desc_cnt;
1103 }
1104 
1105 /* Returns the correct dma direction for tx and rx qpls */
1106 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1107 						      int id)
1108 {
1109 	if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1110 		return DMA_TO_DEVICE;
1111 	else
1112 		return DMA_FROM_DEVICE;
1113 }
1114 
1115 static inline bool gve_is_gqi(struct gve_priv *priv)
1116 {
1117 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1118 		priv->queue_format == GVE_GQI_QPL_FORMAT;
1119 }
1120 
1121 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1122 {
1123 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
1124 }
1125 
1126 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1127 {
1128 	return priv->tx_cfg.num_queues + queue_id;
1129 }
1130 
1131 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1132 {
1133 	return gve_xdp_tx_queue_id(priv, 0);
1134 }
1135 
1136 static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
1137 {
1138 	switch (priv->queue_format) {
1139 	case GVE_GQI_QPL_FORMAT:
1140 		return true;
1141 	default:
1142 		return false;
1143 	}
1144 }
1145 
1146 /* gqi napi handler defined in gve_main.c */
1147 int gve_napi_poll(struct napi_struct *napi, int budget);
1148 
1149 /* buffers */
1150 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1151 		   struct page **page, dma_addr_t *dma,
1152 		   enum dma_data_direction, gfp_t gfp_flags);
1153 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1154 		   enum dma_data_direction);
1155 /* qpls */
1156 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1157 						      u32 id, int pages);
1158 void gve_free_queue_page_list(struct gve_priv *priv,
1159 			      struct gve_queue_page_list *qpl,
1160 			      u32 id);
1161 /* tx handling */
1162 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1163 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1164 		 u32 flags);
1165 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1166 		     void *data, int len, void *frame_p);
1167 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1168 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1169 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1170 int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
1171 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1172 			   struct gve_tx_alloc_rings_cfg *cfg);
1173 void gve_tx_free_rings_gqi(struct gve_priv *priv,
1174 			   struct gve_tx_alloc_rings_cfg *cfg);
1175 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1176 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1177 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1178 			      struct gve_tx_ring *tx);
1179 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1180 /* rx handling */
1181 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1182 int gve_rx_poll(struct gve_notify_block *block, int budget);
1183 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1184 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1185 			  struct gve_rx_alloc_rings_cfg *cfg,
1186 			  struct gve_rx_ring *rx,
1187 			  int idx);
1188 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1189 			  struct gve_rx_alloc_rings_cfg *cfg);
1190 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1191 			   struct gve_rx_alloc_rings_cfg *cfg);
1192 void gve_rx_free_rings_gqi(struct gve_priv *priv,
1193 			   struct gve_rx_alloc_rings_cfg *cfg);
1194 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1195 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1196 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
1197 bool gve_header_split_supported(const struct gve_priv *priv);
1198 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1199 /* rx buffer handling */
1200 int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
1201 void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
1202 		       bool free_page);
1203 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1204 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1205 				struct gve_rx_buf_state_dqo *buf_state);
1206 void gve_free_buf_state(struct gve_rx_ring *rx,
1207 			struct gve_rx_buf_state_dqo *buf_state);
1208 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1209 						   struct gve_index_list *list);
1210 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1211 			   struct gve_rx_buf_state_dqo *buf_state);
1212 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1213 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1214 			 struct gve_rx_buf_state_dqo *buf_state);
1215 void gve_free_to_page_pool(struct gve_rx_ring *rx,
1216 			   struct gve_rx_buf_state_dqo *buf_state,
1217 			   bool allow_direct);
1218 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1219 			   struct gve_rx_buf_state_dqo *buf_state);
1220 void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
1221 void gve_reuse_buffer(struct gve_rx_ring *rx,
1222 		      struct gve_rx_buf_state_dqo *buf_state);
1223 void gve_free_buffer(struct gve_rx_ring *rx,
1224 		     struct gve_rx_buf_state_dqo *buf_state);
1225 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1226 struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1227 					  struct gve_rx_ring *rx);
1228 
1229 /* Reset */
1230 void gve_schedule_reset(struct gve_priv *priv);
1231 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1232 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1233 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1234 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1235 int gve_adjust_config(struct gve_priv *priv,
1236 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1237 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1238 int gve_adjust_queues(struct gve_priv *priv,
1239 		      struct gve_queue_config new_rx_config,
1240 		      struct gve_queue_config new_tx_config,
1241 		      bool reset_rss);
1242 /* flow steering rule */
1243 int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1244 int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
1245 int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1246 int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1247 int gve_flow_rules_reset(struct gve_priv *priv);
1248 /* RSS config */
1249 int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
1250 /* report stats handling */
1251 void gve_handle_report_stats(struct gve_priv *priv);
1252 /* exported by ethtool.c */
1253 extern const struct ethtool_ops gve_ethtool_ops;
1254 /* needed by ethtool */
1255 extern char gve_driver_name[];
1256 extern const char gve_version_str[];
1257 #endif /* _GVE_H_ */
1258