xref: /linux/drivers/net/ethernet/google/gve/gve.h (revision 1cc3462159babb69c84c39cb1b4e262aef3ea325)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google LLC
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/ethtool_netlink.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/u64_stats_sync.h>
16 #include <net/page_pool/helpers.h>
17 #include <net/xdp.h>
18 
19 #include "gve_desc.h"
20 #include "gve_desc_dqo.h"
21 
22 #ifndef PCI_VENDOR_ID_GOOGLE
23 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
24 #endif
25 
26 #define PCI_DEV_ID_GVNIC	0x0042
27 
28 #define GVE_REGISTER_BAR	0
29 #define GVE_DOORBELL_BAR	2
30 
31 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
32 #define GVE_TX_MAX_IOVEC	4
33 /* 1 for management, 1 for rx, 1 for tx */
34 #define GVE_MIN_MSIX 3
35 
36 /* Numbers of gve tx/rx stats in stats report. */
37 #define GVE_TX_STATS_REPORT_NUM	6
38 #define GVE_RX_STATS_REPORT_NUM	2
39 
40 /* Interval to schedule a stats report update, 20000ms. */
41 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
42 
43 /* Numbers of NIC tx/rx stats in stats report. */
44 #define NIC_TX_STATS_REPORT_NUM	0
45 #define NIC_RX_STATS_REPORT_NUM	4
46 
47 #define GVE_ADMINQ_BUFFER_SIZE 4096
48 
49 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
50 
51 /* PTYPEs are always 10 bits. */
52 #define GVE_NUM_PTYPES	1024
53 
54 /* Default minimum ring size */
55 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
56 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
57 
58 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
59 
60 #define GVE_MAX_RX_BUFFER_SIZE 4096
61 
62 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
63 
64 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
65 
66 #define GVE_FLOW_RULES_CACHE_SIZE \
67 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
68 #define GVE_FLOW_RULE_IDS_CACHE_SIZE \
69 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
70 
71 #define GVE_RSS_KEY_SIZE	40
72 #define GVE_RSS_INDIR_SIZE	128
73 
74 #define GVE_XDP_ACTIONS 5
75 
76 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
77 
78 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
79 
80 #define DQO_QPL_DEFAULT_TX_PAGES 512
81 
82 /* Maximum TSO size supported on DQO */
83 #define GVE_DQO_TX_MAX	0x3FFFF
84 
85 #define GVE_TX_BUF_SHIFT_DQO 11
86 
87 /* 2K buffers for DQO-QPL */
88 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
89 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
90 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
91 
92 /* If number of free/recyclable buffers are less than this threshold; driver
93  * allocs and uses a non-qpl page on the receive path of DQO QPL to free
94  * up buffers.
95  * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
96  */
97 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
98 
99 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
100 struct gve_rx_desc_queue {
101 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
102 	dma_addr_t bus; /* the bus for the desc_ring */
103 	u8 seqno; /* the next expected seqno for this desc*/
104 };
105 
106 /* The page info for a single slot in the RX data queue */
107 struct gve_rx_slot_page_info {
108 	struct page *page;
109 	void *page_address;
110 	u32 page_offset; /* offset to write to in page */
111 	unsigned int buf_size;
112 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
113 	u16 pad; /* adjustment for rx padding */
114 	u8 can_flip; /* tracks if the networking stack is using the page */
115 };
116 
117 /* A list of pages registered with the device during setup and used by a queue
118  * as buffers
119  */
120 struct gve_queue_page_list {
121 	u32 id; /* unique id */
122 	u32 num_entries;
123 	struct page **pages; /* list of num_entries pages */
124 	dma_addr_t *page_buses; /* the dma addrs of the pages */
125 };
126 
127 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
128 struct gve_rx_data_queue {
129 	union gve_rx_data_slot *data_ring; /* read by NIC */
130 	dma_addr_t data_bus; /* dma mapping of the slots */
131 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
132 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
133 	u8 raw_addressing; /* use raw_addressing? */
134 };
135 
136 struct gve_priv;
137 
138 /* RX buffer queue for posting buffers to HW.
139  * Each RX (completion) queue has a corresponding buffer queue.
140  */
141 struct gve_rx_buf_queue_dqo {
142 	struct gve_rx_desc_dqo *desc_ring;
143 	dma_addr_t bus;
144 	u32 head; /* Pointer to start cleaning buffers at. */
145 	u32 tail; /* Last posted buffer index + 1 */
146 	u32 mask; /* Mask for indices to the size of the ring */
147 };
148 
149 /* RX completion queue to receive packets from HW. */
150 struct gve_rx_compl_queue_dqo {
151 	struct gve_rx_compl_desc_dqo *desc_ring;
152 	dma_addr_t bus;
153 
154 	/* Number of slots which did not have a buffer posted yet. We should not
155 	 * post more buffers than the queue size to avoid HW overrunning the
156 	 * queue.
157 	 */
158 	int num_free_slots;
159 
160 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
161 	 * descriptor's generation bit is different from the current generation,
162 	 * that descriptor is ready to be consumed by SW.
163 	 */
164 	u8 cur_gen_bit;
165 
166 	/* Pointer into desc_ring where the next completion descriptor will be
167 	 * received.
168 	 */
169 	u32 head;
170 	u32 mask; /* Mask for indices to the size of the ring */
171 };
172 
173 struct gve_header_buf {
174 	u8 *data;
175 	dma_addr_t addr;
176 };
177 
178 /* Stores state for tracking buffers posted to HW */
179 struct gve_rx_buf_state_dqo {
180 	/* The page posted to HW. */
181 	struct gve_rx_slot_page_info page_info;
182 
183 	/* The DMA address corresponding to `page_info`. */
184 	dma_addr_t addr;
185 
186 	/* Last offset into the page when it only had a single reference, at
187 	 * which point every other offset is free to be reused.
188 	 */
189 	u32 last_single_ref_offset;
190 
191 	/* Linked list index to next element in the list, or -1 if none */
192 	s16 next;
193 };
194 
195 /* `head` and `tail` are indices into an array, or -1 if empty. */
196 struct gve_index_list {
197 	s16 head;
198 	s16 tail;
199 };
200 
201 /* A single received packet split across multiple buffers may be
202  * reconstructed using the information in this structure.
203  */
204 struct gve_rx_ctx {
205 	/* head and tail of skb chain for the current packet or NULL if none */
206 	struct sk_buff *skb_head;
207 	struct sk_buff *skb_tail;
208 	u32 total_size;
209 	u8 frag_cnt;
210 	bool drop_pkt;
211 };
212 
213 struct gve_rx_cnts {
214 	u32 ok_pkt_bytes;
215 	u16 ok_pkt_cnt;
216 	u16 total_pkt_cnt;
217 	u16 cont_pkt_cnt;
218 	u16 desc_err_pkt_cnt;
219 };
220 
221 /* Contains datapath state used to represent an RX queue. */
222 struct gve_rx_ring {
223 	struct gve_priv *gve;
224 	union {
225 		/* GQI fields */
226 		struct {
227 			struct gve_rx_desc_queue desc;
228 			struct gve_rx_data_queue data;
229 
230 			/* threshold for posting new buffs and descs */
231 			u32 db_threshold;
232 			u16 packet_buffer_size;
233 
234 			u32 qpl_copy_pool_mask;
235 			u32 qpl_copy_pool_head;
236 			struct gve_rx_slot_page_info *qpl_copy_pool;
237 		};
238 
239 		/* DQO fields. */
240 		struct {
241 			struct gve_rx_buf_queue_dqo bufq;
242 			struct gve_rx_compl_queue_dqo complq;
243 
244 			struct gve_rx_buf_state_dqo *buf_states;
245 			u16 num_buf_states;
246 
247 			/* Linked list of gve_rx_buf_state_dqo. Index into
248 			 * buf_states, or -1 if empty.
249 			 */
250 			s16 free_buf_states;
251 
252 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
253 			 * buf_states, or -1 if empty.
254 			 *
255 			 * This list contains buf_states which are pointing to
256 			 * valid buffers.
257 			 *
258 			 * We use a FIFO here in order to increase the
259 			 * probability that buffers can be reused by increasing
260 			 * the time between usages.
261 			 */
262 			struct gve_index_list recycled_buf_states;
263 
264 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
265 			 * buf_states, or -1 if empty.
266 			 *
267 			 * This list contains buf_states which have buffers
268 			 * which cannot be reused yet.
269 			 */
270 			struct gve_index_list used_buf_states;
271 
272 			/* qpl assigned to this queue */
273 			struct gve_queue_page_list *qpl;
274 
275 			/* index into queue page list */
276 			u32 next_qpl_page_idx;
277 
278 			/* track number of used buffers */
279 			u16 used_buf_states_cnt;
280 
281 			/* Address info of the buffers for header-split */
282 			struct gve_header_buf hdr_bufs;
283 
284 			struct page_pool *page_pool;
285 		} dqo;
286 	};
287 
288 	u64 rbytes; /* free-running bytes received */
289 	u64 rx_hsplit_bytes; /* free-running header bytes received */
290 	u64 rpackets; /* free-running packets received */
291 	u32 cnt; /* free-running total number of completed packets */
292 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
293 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
294 	u64 rx_hsplit_pkt; /* free-running packets with headers split */
295 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
296 	u64 rx_copied_pkt; /* free-running total number of copied packets */
297 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
298 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
299 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
300 	/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
301 	u64 rx_hsplit_unsplit_pkt;
302 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
303 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
304 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
305 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
306 	u64 xdp_tx_errors;
307 	u64 xdp_redirect_errors;
308 	u64 xdp_alloc_fails;
309 	u64 xdp_actions[GVE_XDP_ACTIONS];
310 	u32 q_num; /* queue index */
311 	u32 ntfy_id; /* notification block index */
312 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
313 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
314 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
315 
316 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
317 
318 	/* XDP stuff */
319 	struct xdp_rxq_info xdp_rxq;
320 	struct xdp_rxq_info xsk_rxq;
321 	struct xsk_buff_pool *xsk_pool;
322 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
323 };
324 
325 /* A TX desc ring entry */
326 union gve_tx_desc {
327 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
328 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
329 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
330 };
331 
332 /* Tracks the memory in the fifo occupied by a segment of a packet */
333 struct gve_tx_iovec {
334 	u32 iov_offset; /* offset into this segment */
335 	u32 iov_len; /* length */
336 	u32 iov_padding; /* padding associated with this segment */
337 };
338 
339 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
340  * ring entry but only used for a pkt_desc not a seg_desc
341  */
342 struct gve_tx_buffer_state {
343 	union {
344 		struct sk_buff *skb; /* skb for this pkt */
345 		struct xdp_frame *xdp_frame; /* xdp_frame */
346 	};
347 	struct {
348 		u16 size; /* size of xmitted xdp pkt */
349 		u8 is_xsk; /* xsk buff */
350 	} xdp;
351 	union {
352 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
353 		struct {
354 			DEFINE_DMA_UNMAP_ADDR(dma);
355 			DEFINE_DMA_UNMAP_LEN(len);
356 		};
357 	};
358 };
359 
360 /* A TX buffer - each queue has one */
361 struct gve_tx_fifo {
362 	void *base; /* address of base of FIFO */
363 	u32 size; /* total size */
364 	atomic_t available; /* how much space is still available */
365 	u32 head; /* offset to write at */
366 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
367 };
368 
369 /* TX descriptor for DQO format */
370 union gve_tx_desc_dqo {
371 	struct gve_tx_pkt_desc_dqo pkt;
372 	struct gve_tx_tso_context_desc_dqo tso_ctx;
373 	struct gve_tx_general_context_desc_dqo general_ctx;
374 };
375 
376 enum gve_packet_state {
377 	/* Packet is in free list, available to be allocated.
378 	 * This should always be zero since state is not explicitly initialized.
379 	 */
380 	GVE_PACKET_STATE_UNALLOCATED,
381 	/* Packet is expecting a regular data completion or miss completion */
382 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
383 	/* Packet has received a miss completion and is expecting a
384 	 * re-injection completion.
385 	 */
386 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
387 	/* No valid completion received within the specified timeout. */
388 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
389 };
390 
391 struct gve_tx_pending_packet_dqo {
392 	struct sk_buff *skb; /* skb for this packet */
393 
394 	/* 0th element corresponds to the linear portion of `skb`, should be
395 	 * unmapped with `dma_unmap_single`.
396 	 *
397 	 * All others correspond to `skb`'s frags and should be unmapped with
398 	 * `dma_unmap_page`.
399 	 */
400 	union {
401 		struct {
402 			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
403 			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
404 		};
405 		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
406 	};
407 
408 	u16 num_bufs;
409 
410 	/* Linked list index to next element in the list, or -1 if none */
411 	s16 next;
412 
413 	/* Linked list index to prev element in the list, or -1 if none.
414 	 * Used for tracking either outstanding miss completions or prematurely
415 	 * freed packets.
416 	 */
417 	s16 prev;
418 
419 	/* Identifies the current state of the packet as defined in
420 	 * `enum gve_packet_state`.
421 	 */
422 	u8 state;
423 
424 	/* If packet is an outstanding miss completion, then the packet is
425 	 * freed if the corresponding re-injection completion is not received
426 	 * before kernel jiffies exceeds timeout_jiffies.
427 	 */
428 	unsigned long timeout_jiffies;
429 };
430 
431 /* Contains datapath state used to represent a TX queue. */
432 struct gve_tx_ring {
433 	/* Cacheline 0 -- Accessed & dirtied during transmit */
434 	union {
435 		/* GQI fields */
436 		struct {
437 			struct gve_tx_fifo tx_fifo;
438 			u32 req; /* driver tracked head pointer */
439 			u32 done; /* driver tracked tail pointer */
440 		};
441 
442 		/* DQO fields. */
443 		struct {
444 			/* Linked list of gve_tx_pending_packet_dqo. Index into
445 			 * pending_packets, or -1 if empty.
446 			 *
447 			 * This is a consumer list owned by the TX path. When it
448 			 * runs out, the producer list is stolen from the
449 			 * completion handling path
450 			 * (dqo_compl.free_pending_packets).
451 			 */
452 			s16 free_pending_packets;
453 
454 			/* Cached value of `dqo_compl.hw_tx_head` */
455 			u32 head;
456 			u32 tail; /* Last posted buffer index + 1 */
457 
458 			/* Index of the last descriptor with "report event" bit
459 			 * set.
460 			 */
461 			u32 last_re_idx;
462 
463 			/* free running number of packet buf descriptors posted */
464 			u16 posted_packet_desc_cnt;
465 			/* free running number of packet buf descriptors completed */
466 			u16 completed_packet_desc_cnt;
467 
468 			/* QPL fields */
469 			struct {
470 			       /* Linked list of gve_tx_buf_dqo. Index into
471 				* tx_qpl_buf_next, or -1 if empty.
472 				*
473 				* This is a consumer list owned by the TX path. When it
474 				* runs out, the producer list is stolen from the
475 				* completion handling path
476 				* (dqo_compl.free_tx_qpl_buf_head).
477 				*/
478 				s16 free_tx_qpl_buf_head;
479 
480 			       /* Free running count of the number of QPL tx buffers
481 				* allocated
482 				*/
483 				u32 alloc_tx_qpl_buf_cnt;
484 
485 				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
486 				u32 free_tx_qpl_buf_cnt;
487 			};
488 		} dqo_tx;
489 	};
490 
491 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
492 	union {
493 		/* GQI fields */
494 		struct {
495 			/* Spinlock for when cleanup in progress */
496 			spinlock_t clean_lock;
497 			/* Spinlock for XDP tx traffic */
498 			spinlock_t xdp_lock;
499 		};
500 
501 		/* DQO fields. */
502 		struct {
503 			u32 head; /* Last read on compl_desc */
504 
505 			/* Tracks the current gen bit of compl_q */
506 			u8 cur_gen_bit;
507 
508 			/* Linked list of gve_tx_pending_packet_dqo. Index into
509 			 * pending_packets, or -1 if empty.
510 			 *
511 			 * This is the producer list, owned by the completion
512 			 * handling path. When the consumer list
513 			 * (dqo_tx.free_pending_packets) is runs out, this list
514 			 * will be stolen.
515 			 */
516 			atomic_t free_pending_packets;
517 
518 			/* Last TX ring index fetched by HW */
519 			atomic_t hw_tx_head;
520 
521 			/* List to track pending packets which received a miss
522 			 * completion but not a corresponding reinjection.
523 			 */
524 			struct gve_index_list miss_completions;
525 
526 			/* List to track pending packets that were completed
527 			 * before receiving a valid completion because they
528 			 * reached a specified timeout.
529 			 */
530 			struct gve_index_list timed_out_completions;
531 
532 			/* QPL fields */
533 			struct {
534 				/* Linked list of gve_tx_buf_dqo. Index into
535 				 * tx_qpl_buf_next, or -1 if empty.
536 				 *
537 				 * This is the producer list, owned by the completion
538 				 * handling path. When the consumer list
539 				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
540 				 * will be stolen.
541 				 */
542 				atomic_t free_tx_qpl_buf_head;
543 
544 				/* Free running count of the number of tx buffers
545 				 * freed
546 				 */
547 				atomic_t free_tx_qpl_buf_cnt;
548 			};
549 		} dqo_compl;
550 	} ____cacheline_aligned;
551 	u64 pkt_done; /* free-running - total packets completed */
552 	u64 bytes_done; /* free-running - total bytes completed */
553 	u64 dropped_pkt; /* free-running - total packets dropped */
554 	u64 dma_mapping_error; /* count of dma mapping errors */
555 
556 	/* Cacheline 2 -- Read-mostly fields */
557 	union {
558 		/* GQI fields */
559 		struct {
560 			union gve_tx_desc *desc;
561 
562 			/* Maps 1:1 to a desc */
563 			struct gve_tx_buffer_state *info;
564 		};
565 
566 		/* DQO fields. */
567 		struct {
568 			union gve_tx_desc_dqo *tx_ring;
569 			struct gve_tx_compl_desc *compl_ring;
570 
571 			struct gve_tx_pending_packet_dqo *pending_packets;
572 			s16 num_pending_packets;
573 
574 			u32 complq_mask; /* complq size is complq_mask + 1 */
575 
576 			/* QPL fields */
577 			struct {
578 				/* qpl assigned to this queue */
579 				struct gve_queue_page_list *qpl;
580 
581 				/* Each QPL page is divided into TX bounce buffers
582 				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
583 				 * an array to manage linked lists of TX buffers.
584 				 * An entry j at index i implies that j'th buffer
585 				 * is next on the list after i
586 				 */
587 				s16 *tx_qpl_buf_next;
588 				u32 num_tx_qpl_bufs;
589 			};
590 		} dqo;
591 	} ____cacheline_aligned;
592 	struct netdev_queue *netdev_txq;
593 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
594 	struct device *dev;
595 	u32 mask; /* masks req and done down to queue size */
596 	u8 raw_addressing; /* use raw_addressing? */
597 
598 	/* Slow-path fields */
599 	u32 q_num ____cacheline_aligned; /* queue idx */
600 	u32 stop_queue; /* count of queue stops */
601 	u32 wake_queue; /* count of queue wakes */
602 	u32 queue_timeout; /* count of queue timeouts */
603 	u32 ntfy_id; /* notification block index */
604 	u32 last_kick_msec; /* Last time the queue was kicked */
605 	dma_addr_t bus; /* dma address of the descr ring */
606 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
607 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
608 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
609 	struct xsk_buff_pool *xsk_pool;
610 	u32 xdp_xsk_wakeup;
611 	u32 xdp_xsk_done;
612 	u64 xdp_xsk_sent;
613 	u64 xdp_xmit;
614 	u64 xdp_xmit_errors;
615 } ____cacheline_aligned;
616 
617 /* Wraps the info for one irq including the napi struct and the queues
618  * associated with that irq.
619  */
620 struct gve_notify_block {
621 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
622 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
623 	struct napi_struct napi; /* kernel napi struct for this block */
624 	struct gve_priv *priv;
625 	struct gve_tx_ring *tx; /* tx rings on this block */
626 	struct gve_rx_ring *rx; /* rx rings on this block */
627 	u32 irq;
628 };
629 
630 /* Tracks allowed and current queue settings */
631 struct gve_queue_config {
632 	u16 max_queues;
633 	u16 num_queues; /* current */
634 };
635 
636 /* Tracks the available and used qpl IDs */
637 struct gve_qpl_config {
638 	u32 qpl_map_size; /* map memory size */
639 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
640 };
641 
642 struct gve_irq_db {
643 	__be32 index;
644 } ____cacheline_aligned;
645 
646 struct gve_ptype {
647 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
648 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
649 };
650 
651 struct gve_ptype_lut {
652 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
653 };
654 
655 /* Parameters for allocating resources for tx queues */
656 struct gve_tx_alloc_rings_cfg {
657 	struct gve_queue_config *qcfg;
658 
659 	u16 ring_size;
660 	u16 start_idx;
661 	u16 num_rings;
662 	bool raw_addressing;
663 
664 	/* Allocated resources are returned here */
665 	struct gve_tx_ring *tx;
666 };
667 
668 /* Parameters for allocating resources for rx queues */
669 struct gve_rx_alloc_rings_cfg {
670 	/* tx config is also needed to determine QPL ids */
671 	struct gve_queue_config *qcfg;
672 	struct gve_queue_config *qcfg_tx;
673 
674 	u16 ring_size;
675 	u16 packet_buffer_size;
676 	bool raw_addressing;
677 	bool enable_header_split;
678 	bool reset_rss;
679 
680 	/* Allocated resources are returned here */
681 	struct gve_rx_ring *rx;
682 };
683 
684 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
685  * when the entire configure_device_resources command is zeroed out and the
686  * queue_format is not specified.
687  */
688 enum gve_queue_format {
689 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
690 	GVE_GQI_RDA_FORMAT		= 0x1,
691 	GVE_GQI_QPL_FORMAT		= 0x2,
692 	GVE_DQO_RDA_FORMAT		= 0x3,
693 	GVE_DQO_QPL_FORMAT		= 0x4,
694 };
695 
696 struct gve_flow_spec {
697 	__be32 src_ip[4];
698 	__be32 dst_ip[4];
699 	union {
700 		struct {
701 			__be16 src_port;
702 			__be16 dst_port;
703 		};
704 		__be32 spi;
705 	};
706 	union {
707 		u8 tos;
708 		u8 tclass;
709 	};
710 };
711 
712 struct gve_flow_rule {
713 	u32 location;
714 	u16 flow_type;
715 	u16 action;
716 	struct gve_flow_spec key;
717 	struct gve_flow_spec mask;
718 };
719 
720 struct gve_flow_rules_cache {
721 	bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
722 	struct gve_adminq_queried_flow_rule *rules_cache;
723 	__be32 *rule_ids_cache;
724 	/* The total number of queried rules that stored in the caches */
725 	u32 rules_cache_num;
726 	u32 rule_ids_cache_num;
727 };
728 
729 struct gve_rss_config {
730 	u8 *hash_key;
731 	u32 *hash_lut;
732 };
733 
734 struct gve_priv {
735 	struct net_device *dev;
736 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
737 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
738 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
739 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
740 	dma_addr_t irq_db_indices_bus;
741 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
742 	char mgmt_msix_name[IFNAMSIZ + 16];
743 	u32 mgmt_msix_idx;
744 	__be32 *counter_array; /* array of num_event_counters */
745 	dma_addr_t counter_array_bus;
746 
747 	u16 num_event_counters;
748 	u16 tx_desc_cnt; /* num desc per ring */
749 	u16 rx_desc_cnt; /* num desc per ring */
750 	u16 max_tx_desc_cnt;
751 	u16 max_rx_desc_cnt;
752 	u16 min_tx_desc_cnt;
753 	u16 min_rx_desc_cnt;
754 	bool modify_ring_size_enabled;
755 	bool default_min_ring_size;
756 	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
757 	u64 max_registered_pages;
758 	u64 num_registered_pages; /* num pages registered with NIC */
759 	struct bpf_prog *xdp_prog; /* XDP BPF program */
760 	u32 rx_copybreak; /* copy packets smaller than this */
761 	u16 default_num_queues; /* default num queues to set up */
762 
763 	u16 num_xdp_queues;
764 	struct gve_queue_config tx_cfg;
765 	struct gve_queue_config rx_cfg;
766 	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
767 
768 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
769 	__be32 __iomem *db_bar2; /* "array" of doorbells */
770 	u32 msg_enable;	/* level for netif* netdev print macros	*/
771 	struct pci_dev *pdev;
772 
773 	/* metrics */
774 	u32 tx_timeo_cnt;
775 
776 	/* Admin queue - see gve_adminq.h*/
777 	union gve_adminq_command *adminq;
778 	dma_addr_t adminq_bus_addr;
779 	struct dma_pool *adminq_pool;
780 	struct mutex adminq_lock; /* Protects adminq command execution */
781 	u32 adminq_mask; /* masks prod_cnt to adminq size */
782 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
783 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
784 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
785 	/* free-running count of per AQ cmd executed */
786 	u32 adminq_describe_device_cnt;
787 	u32 adminq_cfg_device_resources_cnt;
788 	u32 adminq_register_page_list_cnt;
789 	u32 adminq_unregister_page_list_cnt;
790 	u32 adminq_create_tx_queue_cnt;
791 	u32 adminq_create_rx_queue_cnt;
792 	u32 adminq_destroy_tx_queue_cnt;
793 	u32 adminq_destroy_rx_queue_cnt;
794 	u32 adminq_dcfg_device_resources_cnt;
795 	u32 adminq_set_driver_parameter_cnt;
796 	u32 adminq_report_stats_cnt;
797 	u32 adminq_report_link_speed_cnt;
798 	u32 adminq_get_ptype_map_cnt;
799 	u32 adminq_verify_driver_compatibility_cnt;
800 	u32 adminq_query_flow_rules_cnt;
801 	u32 adminq_cfg_flow_rule_cnt;
802 	u32 adminq_cfg_rss_cnt;
803 	u32 adminq_query_rss_cnt;
804 
805 	/* Global stats */
806 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
807 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
808 	u32 reset_cnt; /* count of reset */
809 	u32 page_alloc_fail; /* count of page alloc fails */
810 	u32 dma_mapping_error; /* count of dma mapping errors */
811 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
812 	u32 suspend_cnt; /* count of times suspended */
813 	u32 resume_cnt; /* count of times resumed */
814 	struct workqueue_struct *gve_wq;
815 	struct work_struct service_task;
816 	struct work_struct stats_report_task;
817 	unsigned long service_task_flags;
818 	unsigned long state_flags;
819 
820 	struct gve_stats_report *stats_report;
821 	u64 stats_report_len;
822 	dma_addr_t stats_report_bus; /* dma address for the stats report */
823 	unsigned long ethtool_flags;
824 
825 	unsigned long stats_report_timer_period;
826 	struct timer_list stats_report_timer;
827 
828 	/* Gvnic device link speed from hypervisor. */
829 	u64 link_speed;
830 	bool up_before_suspend; /* True if dev was up before suspend */
831 
832 	struct gve_ptype_lut *ptype_lut_dqo;
833 
834 	/* Must be a power of two. */
835 	u16 data_buffer_size_dqo;
836 	u16 max_rx_buffer_size; /* device limit */
837 
838 	enum gve_queue_format queue_format;
839 
840 	/* Interrupt coalescing settings */
841 	u32 tx_coalesce_usecs;
842 	u32 rx_coalesce_usecs;
843 
844 	u16 header_buf_size; /* device configured, header-split supported if non-zero */
845 	bool header_split_enabled; /* True if the header split is enabled by the user */
846 
847 	u32 max_flow_rules;
848 	u32 num_flow_rules;
849 
850 	struct gve_flow_rules_cache flow_rules_cache;
851 
852 	u16 rss_key_size;
853 	u16 rss_lut_size;
854 	bool cache_rss_config;
855 	struct gve_rss_config rss_config;
856 };
857 
858 enum gve_service_task_flags_bit {
859 	GVE_PRIV_FLAGS_DO_RESET			= 1,
860 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
861 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
862 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
863 };
864 
865 enum gve_state_flags_bit {
866 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
867 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
868 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
869 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
870 };
871 
872 enum gve_ethtool_flags_bit {
873 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
874 };
875 
876 static inline bool gve_get_do_reset(struct gve_priv *priv)
877 {
878 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
879 }
880 
881 static inline void gve_set_do_reset(struct gve_priv *priv)
882 {
883 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
884 }
885 
886 static inline void gve_clear_do_reset(struct gve_priv *priv)
887 {
888 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
889 }
890 
891 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
892 {
893 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
894 			&priv->service_task_flags);
895 }
896 
897 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
898 {
899 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
900 }
901 
902 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
903 {
904 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
905 }
906 
907 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
908 {
909 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
910 			&priv->service_task_flags);
911 }
912 
913 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
914 {
915 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
916 }
917 
918 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
919 {
920 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
921 }
922 
923 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
924 {
925 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
926 			&priv->service_task_flags);
927 }
928 
929 static inline void gve_set_do_report_stats(struct gve_priv *priv)
930 {
931 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
932 }
933 
934 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
935 {
936 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
937 }
938 
939 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
940 {
941 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
942 }
943 
944 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
945 {
946 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
947 }
948 
949 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
950 {
951 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
952 }
953 
954 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
955 {
956 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
957 }
958 
959 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
960 {
961 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
962 }
963 
964 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
965 {
966 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
967 }
968 
969 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
970 {
971 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
972 }
973 
974 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
975 {
976 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
977 }
978 
979 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
980 {
981 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
982 }
983 
984 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
985 {
986 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
987 }
988 
989 static inline void gve_set_napi_enabled(struct gve_priv *priv)
990 {
991 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
992 }
993 
994 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
995 {
996 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
997 }
998 
999 static inline bool gve_get_report_stats(struct gve_priv *priv)
1000 {
1001 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1002 }
1003 
1004 static inline void gve_clear_report_stats(struct gve_priv *priv)
1005 {
1006 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1007 }
1008 
1009 /* Returns the address of the ntfy_blocks irq doorbell
1010  */
1011 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
1012 					       struct gve_notify_block *block)
1013 {
1014 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
1015 }
1016 
1017 /* Returns the index into ntfy_blocks of the given tx ring's block
1018  */
1019 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1020 {
1021 	return queue_idx;
1022 }
1023 
1024 /* Returns the index into ntfy_blocks of the given rx ring's block
1025  */
1026 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1027 {
1028 	return (priv->num_ntfy_blks / 2) + queue_idx;
1029 }
1030 
1031 static inline bool gve_is_qpl(struct gve_priv *priv)
1032 {
1033 	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
1034 		priv->queue_format == GVE_DQO_QPL_FORMAT;
1035 }
1036 
1037 /* Returns the number of tx queue page lists */
1038 static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
1039 				  int num_xdp_queues,
1040 				  bool is_qpl)
1041 {
1042 	if (!is_qpl)
1043 		return 0;
1044 	return tx_cfg->num_queues + num_xdp_queues;
1045 }
1046 
1047 /* Returns the number of XDP tx queue page lists
1048  */
1049 static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
1050 {
1051 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
1052 		return 0;
1053 
1054 	return priv->num_xdp_queues;
1055 }
1056 
1057 /* Returns the number of rx queue page lists */
1058 static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
1059 				  bool is_qpl)
1060 {
1061 	if (!is_qpl)
1062 		return 0;
1063 	return rx_cfg->num_queues;
1064 }
1065 
1066 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
1067 {
1068 	return tx_qid;
1069 }
1070 
1071 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1072 {
1073 	return priv->tx_cfg.max_queues + rx_qid;
1074 }
1075 
1076 static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
1077 {
1078 	return tx_cfg->max_queues + rx_qid;
1079 }
1080 
1081 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1082 {
1083 	return gve_tx_qpl_id(priv, 0);
1084 }
1085 
1086 static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
1087 {
1088 	return gve_get_rx_qpl_id(tx_cfg, 0);
1089 }
1090 
1091 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
1092 {
1093 	/* For DQO, page count should be more than ring size for
1094 	 * out-of-order completions. Set it to two times of ring size.
1095 	 */
1096 	return 2 * rx_desc_cnt;
1097 }
1098 
1099 /* Returns the correct dma direction for tx and rx qpls */
1100 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1101 						      int id)
1102 {
1103 	if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1104 		return DMA_TO_DEVICE;
1105 	else
1106 		return DMA_FROM_DEVICE;
1107 }
1108 
1109 static inline bool gve_is_gqi(struct gve_priv *priv)
1110 {
1111 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1112 		priv->queue_format == GVE_GQI_QPL_FORMAT;
1113 }
1114 
1115 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1116 {
1117 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
1118 }
1119 
1120 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1121 {
1122 	return priv->tx_cfg.num_queues + queue_id;
1123 }
1124 
1125 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1126 {
1127 	return gve_xdp_tx_queue_id(priv, 0);
1128 }
1129 
1130 static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
1131 {
1132 	switch (priv->queue_format) {
1133 	case GVE_GQI_QPL_FORMAT:
1134 		return true;
1135 	default:
1136 		return false;
1137 	}
1138 }
1139 
1140 /* gqi napi handler defined in gve_main.c */
1141 int gve_napi_poll(struct napi_struct *napi, int budget);
1142 
1143 /* buffers */
1144 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1145 		   struct page **page, dma_addr_t *dma,
1146 		   enum dma_data_direction, gfp_t gfp_flags);
1147 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1148 		   enum dma_data_direction);
1149 /* qpls */
1150 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1151 						      u32 id, int pages);
1152 void gve_free_queue_page_list(struct gve_priv *priv,
1153 			      struct gve_queue_page_list *qpl,
1154 			      u32 id);
1155 /* tx handling */
1156 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1157 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1158 		 u32 flags);
1159 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1160 		     void *data, int len, void *frame_p);
1161 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1162 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1163 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1164 int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
1165 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1166 			   struct gve_tx_alloc_rings_cfg *cfg);
1167 void gve_tx_free_rings_gqi(struct gve_priv *priv,
1168 			   struct gve_tx_alloc_rings_cfg *cfg);
1169 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1170 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1171 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1172 			      struct gve_tx_ring *tx);
1173 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1174 /* rx handling */
1175 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1176 int gve_rx_poll(struct gve_notify_block *block, int budget);
1177 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1178 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1179 			  struct gve_rx_alloc_rings_cfg *cfg,
1180 			  struct gve_rx_ring *rx,
1181 			  int idx);
1182 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1183 			  struct gve_rx_alloc_rings_cfg *cfg);
1184 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1185 			   struct gve_rx_alloc_rings_cfg *cfg);
1186 void gve_rx_free_rings_gqi(struct gve_priv *priv,
1187 			   struct gve_rx_alloc_rings_cfg *cfg);
1188 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1189 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1190 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
1191 bool gve_header_split_supported(const struct gve_priv *priv);
1192 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1193 /* rx buffer handling */
1194 int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
1195 void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
1196 		       bool free_page);
1197 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1198 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1199 				struct gve_rx_buf_state_dqo *buf_state);
1200 void gve_free_buf_state(struct gve_rx_ring *rx,
1201 			struct gve_rx_buf_state_dqo *buf_state);
1202 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1203 						   struct gve_index_list *list);
1204 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1205 			   struct gve_rx_buf_state_dqo *buf_state);
1206 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1207 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1208 			 struct gve_rx_buf_state_dqo *buf_state);
1209 void gve_free_to_page_pool(struct gve_rx_ring *rx,
1210 			   struct gve_rx_buf_state_dqo *buf_state,
1211 			   bool allow_direct);
1212 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1213 			   struct gve_rx_buf_state_dqo *buf_state);
1214 void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
1215 void gve_reuse_buffer(struct gve_rx_ring *rx,
1216 		      struct gve_rx_buf_state_dqo *buf_state);
1217 void gve_free_buffer(struct gve_rx_ring *rx,
1218 		     struct gve_rx_buf_state_dqo *buf_state);
1219 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1220 struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1221 					  struct gve_rx_ring *rx);
1222 
1223 /* Reset */
1224 void gve_schedule_reset(struct gve_priv *priv);
1225 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1226 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1227 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1228 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1229 int gve_adjust_config(struct gve_priv *priv,
1230 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1231 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1232 int gve_adjust_queues(struct gve_priv *priv,
1233 		      struct gve_queue_config new_rx_config,
1234 		      struct gve_queue_config new_tx_config,
1235 		      bool reset_rss);
1236 /* flow steering rule */
1237 int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1238 int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
1239 int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1240 int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1241 int gve_flow_rules_reset(struct gve_priv *priv);
1242 /* RSS config */
1243 int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
1244 /* report stats handling */
1245 void gve_handle_report_stats(struct gve_priv *priv);
1246 /* exported by ethtool.c */
1247 extern const struct ethtool_ops gve_ethtool_ops;
1248 /* needed by ethtool */
1249 extern char gve_driver_name[];
1250 extern const char gve_version_str[];
1251 #endif /* _GVE_H_ */
1252