xref: /linux/drivers/net/ethernet/google/gve/gve.h (revision a339dd699a7aa01bce4b38c8d81def310cf2bca0)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google LLC
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/ethtool_netlink.h>
13 #include <linux/netdevice.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/pci.h>
16 #include <linux/ptp_clock_kernel.h>
17 #include <linux/u64_stats_sync.h>
18 #include <net/page_pool/helpers.h>
19 #include <net/xdp.h>
20 
21 #include "gve_desc.h"
22 #include "gve_desc_dqo.h"
23 
24 #ifndef PCI_VENDOR_ID_GOOGLE
25 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
26 #endif
27 
28 #define PCI_DEV_ID_GVNIC	0x0042
29 
30 #define GVE_REGISTER_BAR	0
31 #define GVE_DOORBELL_BAR	2
32 
33 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
34 #define GVE_TX_MAX_IOVEC	4
35 /* 1 for management, 1 for rx, 1 for tx */
36 #define GVE_MIN_MSIX 3
37 
38 /* Numbers of gve tx/rx stats in stats report. */
39 #define GVE_TX_STATS_REPORT_NUM	6
40 #define GVE_RX_STATS_REPORT_NUM	2
41 
42 /* Interval to schedule a stats report update, 20000ms. */
43 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
44 
45 /* Numbers of NIC tx/rx stats in stats report. */
46 #define NIC_TX_STATS_REPORT_NUM	0
47 #define NIC_RX_STATS_REPORT_NUM	4
48 
49 #define GVE_ADMINQ_BUFFER_SIZE 4096
50 
51 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
52 
53 /* PTYPEs are always 10 bits. */
54 #define GVE_NUM_PTYPES	1024
55 
56 /* Default minimum ring size */
57 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
58 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
59 
60 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
61 
62 #define GVE_MAX_RX_BUFFER_SIZE 4096
63 
64 #define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
65 
66 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
67 
68 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
69 
70 #define GVE_FLOW_RULES_CACHE_SIZE \
71 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
72 #define GVE_FLOW_RULE_IDS_CACHE_SIZE \
73 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
74 
75 #define GVE_RSS_KEY_SIZE	40
76 #define GVE_RSS_INDIR_SIZE	128
77 
78 #define GVE_XDP_ACTIONS 5
79 
80 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
81 
82 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
83 
84 #define DQO_QPL_DEFAULT_TX_PAGES 512
85 
86 /* Maximum TSO size supported on DQO */
87 #define GVE_DQO_TX_MAX	0x3FFFF
88 
89 #define GVE_TX_BUF_SHIFT_DQO 11
90 
91 /* 2K buffers for DQO-QPL */
92 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
93 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
94 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
95 
96 /* If number of free/recyclable buffers are less than this threshold; driver
97  * allocs and uses a non-qpl page on the receive path of DQO QPL to free
98  * up buffers.
99  * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
100  */
101 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
102 
103 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
104 struct gve_rx_desc_queue {
105 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
106 	dma_addr_t bus; /* the bus for the desc_ring */
107 	u8 seqno; /* the next expected seqno for this desc*/
108 };
109 
110 /* The page info for a single slot in the RX data queue */
111 struct gve_rx_slot_page_info {
112 	/* netmem is used for DQO RDA mode
113 	 * page is used in all other modes
114 	 */
115 	union {
116 		struct page *page;
117 		netmem_ref netmem;
118 	};
119 	void *page_address;
120 	u32 page_offset; /* offset to write to in page */
121 	unsigned int buf_size;
122 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
123 	u16 pad; /* adjustment for rx padding */
124 	u8 can_flip; /* tracks if the networking stack is using the page */
125 };
126 
127 /* A list of pages registered with the device during setup and used by a queue
128  * as buffers
129  */
130 struct gve_queue_page_list {
131 	u32 id; /* unique id */
132 	u32 num_entries;
133 	struct page **pages; /* list of num_entries pages */
134 	dma_addr_t *page_buses; /* the dma addrs of the pages */
135 };
136 
137 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
138 struct gve_rx_data_queue {
139 	union gve_rx_data_slot *data_ring; /* read by NIC */
140 	dma_addr_t data_bus; /* dma mapping of the slots */
141 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
142 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
143 	u8 raw_addressing; /* use raw_addressing? */
144 };
145 
146 struct gve_priv;
147 
148 /* RX buffer queue for posting buffers to HW.
149  * Each RX (completion) queue has a corresponding buffer queue.
150  */
151 struct gve_rx_buf_queue_dqo {
152 	struct gve_rx_desc_dqo *desc_ring;
153 	dma_addr_t bus;
154 	u32 head; /* Pointer to start cleaning buffers at. */
155 	u32 tail; /* Last posted buffer index + 1 */
156 	u32 mask; /* Mask for indices to the size of the ring */
157 };
158 
159 /* RX completion queue to receive packets from HW. */
160 struct gve_rx_compl_queue_dqo {
161 	struct gve_rx_compl_desc_dqo *desc_ring;
162 	dma_addr_t bus;
163 
164 	/* Number of slots which did not have a buffer posted yet. We should not
165 	 * post more buffers than the queue size to avoid HW overrunning the
166 	 * queue.
167 	 */
168 	int num_free_slots;
169 
170 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
171 	 * descriptor's generation bit is different from the current generation,
172 	 * that descriptor is ready to be consumed by SW.
173 	 */
174 	u8 cur_gen_bit;
175 
176 	/* Pointer into desc_ring where the next completion descriptor will be
177 	 * received.
178 	 */
179 	u32 head;
180 	u32 mask; /* Mask for indices to the size of the ring */
181 };
182 
183 struct gve_header_buf {
184 	u8 *data;
185 	dma_addr_t addr;
186 };
187 
188 /* Stores state for tracking buffers posted to HW */
189 struct gve_rx_buf_state_dqo {
190 	/* The page posted to HW. */
191 	struct gve_rx_slot_page_info page_info;
192 
193 	/* The DMA address corresponding to `page_info`. */
194 	dma_addr_t addr;
195 
196 	/* Last offset into the page when it only had a single reference, at
197 	 * which point every other offset is free to be reused.
198 	 */
199 	u32 last_single_ref_offset;
200 
201 	/* Linked list index to next element in the list, or -1 if none */
202 	s16 next;
203 };
204 
205 /* `head` and `tail` are indices into an array, or -1 if empty. */
206 struct gve_index_list {
207 	s16 head;
208 	s16 tail;
209 };
210 
211 /* A single received packet split across multiple buffers may be
212  * reconstructed using the information in this structure.
213  */
214 struct gve_rx_ctx {
215 	/* head and tail of skb chain for the current packet or NULL if none */
216 	struct sk_buff *skb_head;
217 	struct sk_buff *skb_tail;
218 	u32 total_size;
219 	u8 frag_cnt;
220 	bool drop_pkt;
221 };
222 
223 struct gve_rx_cnts {
224 	u32 ok_pkt_bytes;
225 	u16 ok_pkt_cnt;
226 	u16 total_pkt_cnt;
227 	u16 cont_pkt_cnt;
228 	u16 desc_err_pkt_cnt;
229 };
230 
231 /* Contains datapath state used to represent an RX queue. */
232 struct gve_rx_ring {
233 	struct gve_priv *gve;
234 
235 	u16 packet_buffer_size;		/* Size of buffer posted to NIC */
236 	u16 packet_buffer_truesize;	/* Total size of RX buffer */
237 	u16 rx_headroom;
238 
239 	union {
240 		/* GQI fields */
241 		struct {
242 			struct gve_rx_desc_queue desc;
243 			struct gve_rx_data_queue data;
244 
245 			/* threshold for posting new buffs and descs */
246 			u32 db_threshold;
247 
248 			u32 qpl_copy_pool_mask;
249 			u32 qpl_copy_pool_head;
250 			struct gve_rx_slot_page_info *qpl_copy_pool;
251 		};
252 
253 		/* DQO fields. */
254 		struct {
255 			struct gve_rx_buf_queue_dqo bufq;
256 			struct gve_rx_compl_queue_dqo complq;
257 
258 			struct gve_rx_buf_state_dqo *buf_states;
259 			u16 num_buf_states;
260 
261 			/* Linked list of gve_rx_buf_state_dqo. Index into
262 			 * buf_states, or -1 if empty.
263 			 */
264 			s16 free_buf_states;
265 
266 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
267 			 * buf_states, or -1 if empty.
268 			 *
269 			 * This list contains buf_states which are pointing to
270 			 * valid buffers.
271 			 *
272 			 * We use a FIFO here in order to increase the
273 			 * probability that buffers can be reused by increasing
274 			 * the time between usages.
275 			 */
276 			struct gve_index_list recycled_buf_states;
277 
278 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
279 			 * buf_states, or -1 if empty.
280 			 *
281 			 * This list contains buf_states which have buffers
282 			 * which cannot be reused yet.
283 			 */
284 			struct gve_index_list used_buf_states;
285 
286 			/* qpl assigned to this queue */
287 			struct gve_queue_page_list *qpl;
288 
289 			/* index into queue page list */
290 			u32 next_qpl_page_idx;
291 
292 			/* track number of used buffers */
293 			u16 used_buf_states_cnt;
294 
295 			/* Address info of the buffers for header-split */
296 			struct gve_header_buf hdr_bufs;
297 
298 			struct page_pool *page_pool;
299 		} dqo;
300 	};
301 
302 	u64 rbytes; /* free-running bytes received */
303 	u64 rx_hsplit_bytes; /* free-running header bytes received */
304 	u64 rpackets; /* free-running packets received */
305 	u32 cnt; /* free-running total number of completed packets */
306 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
307 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
308 	u64 rx_hsplit_pkt; /* free-running packets with headers split */
309 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
310 	u64 rx_copied_pkt; /* free-running total number of copied packets */
311 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
312 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
313 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
314 	/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
315 	u64 rx_hsplit_unsplit_pkt;
316 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
317 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
318 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
319 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
320 	u64 xdp_tx_errors;
321 	u64 xdp_redirect_errors;
322 	u64 xdp_alloc_fails;
323 	u64 xdp_actions[GVE_XDP_ACTIONS];
324 	u32 q_num; /* queue index */
325 	u32 ntfy_id; /* notification block index */
326 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
327 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
328 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
329 
330 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
331 
332 	/* XDP stuff */
333 	struct xdp_rxq_info xdp_rxq;
334 	struct xdp_rxq_info xsk_rxq;
335 	struct xsk_buff_pool *xsk_pool;
336 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
337 };
338 
339 /* A TX desc ring entry */
340 union gve_tx_desc {
341 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
342 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
343 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
344 };
345 
346 /* Tracks the memory in the fifo occupied by a segment of a packet */
347 struct gve_tx_iovec {
348 	u32 iov_offset; /* offset into this segment */
349 	u32 iov_len; /* length */
350 	u32 iov_padding; /* padding associated with this segment */
351 };
352 
353 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
354  * ring entry but only used for a pkt_desc not a seg_desc
355  */
356 struct gve_tx_buffer_state {
357 	union {
358 		struct sk_buff *skb; /* skb for this pkt */
359 		struct xdp_frame *xdp_frame; /* xdp_frame */
360 	};
361 	struct {
362 		u16 size; /* size of xmitted xdp pkt */
363 		u8 is_xsk; /* xsk buff */
364 	} xdp;
365 	union {
366 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
367 		struct {
368 			DEFINE_DMA_UNMAP_ADDR(dma);
369 			DEFINE_DMA_UNMAP_LEN(len);
370 		};
371 	};
372 };
373 
374 /* A TX buffer - each queue has one */
375 struct gve_tx_fifo {
376 	void *base; /* address of base of FIFO */
377 	u32 size; /* total size */
378 	atomic_t available; /* how much space is still available */
379 	u32 head; /* offset to write at */
380 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
381 };
382 
383 /* TX descriptor for DQO format */
384 union gve_tx_desc_dqo {
385 	struct gve_tx_pkt_desc_dqo pkt;
386 	struct gve_tx_tso_context_desc_dqo tso_ctx;
387 	struct gve_tx_general_context_desc_dqo general_ctx;
388 };
389 
390 enum gve_packet_state {
391 	/* Packet is in free list, available to be allocated.
392 	 * This should always be zero since state is not explicitly initialized.
393 	 */
394 	GVE_PACKET_STATE_UNALLOCATED,
395 	/* Packet is expecting a regular data completion or miss completion */
396 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
397 	/* Packet has received a miss completion and is expecting a
398 	 * re-injection completion.
399 	 */
400 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
401 	/* No valid completion received within the specified timeout. */
402 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
403 };
404 
405 enum gve_tx_pending_packet_dqo_type {
406 	GVE_TX_PENDING_PACKET_DQO_SKB,
407 	GVE_TX_PENDING_PACKET_DQO_XDP_FRAME
408 };
409 
410 struct gve_tx_pending_packet_dqo {
411 	union {
412 		struct sk_buff *skb;
413 		struct xdp_frame *xdpf;
414 	};
415 
416 	/* 0th element corresponds to the linear portion of `skb`, should be
417 	 * unmapped with `dma_unmap_single`.
418 	 *
419 	 * All others correspond to `skb`'s frags and should be unmapped with
420 	 * `dma_unmap_page`.
421 	 */
422 	union {
423 		struct {
424 			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
425 			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
426 		};
427 		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
428 	};
429 
430 	u16 num_bufs;
431 
432 	/* Linked list index to next element in the list, or -1 if none */
433 	s16 next;
434 
435 	/* Linked list index to prev element in the list, or -1 if none.
436 	 * Used for tracking either outstanding miss completions or prematurely
437 	 * freed packets.
438 	 */
439 	s16 prev;
440 
441 	/* Identifies the current state of the packet as defined in
442 	 * `enum gve_packet_state`.
443 	 */
444 	u8 state : 2;
445 
446 	/* gve_tx_pending_packet_dqo_type */
447 	u8 type : 1;
448 
449 	/* If packet is an outstanding miss completion, then the packet is
450 	 * freed if the corresponding re-injection completion is not received
451 	 * before kernel jiffies exceeds timeout_jiffies.
452 	 */
453 	unsigned long timeout_jiffies;
454 };
455 
456 /* Contains datapath state used to represent a TX queue. */
457 struct gve_tx_ring {
458 	/* Cacheline 0 -- Accessed & dirtied during transmit */
459 	union {
460 		/* GQI fields */
461 		struct {
462 			struct gve_tx_fifo tx_fifo;
463 			u32 req; /* driver tracked head pointer */
464 			u32 done; /* driver tracked tail pointer */
465 		};
466 
467 		/* DQO fields. */
468 		struct {
469 			/* Spinlock for XDP tx traffic */
470 			spinlock_t xdp_lock;
471 
472 			/* Linked list of gve_tx_pending_packet_dqo. Index into
473 			 * pending_packets, or -1 if empty.
474 			 *
475 			 * This is a consumer list owned by the TX path. When it
476 			 * runs out, the producer list is stolen from the
477 			 * completion handling path
478 			 * (dqo_compl.free_pending_packets).
479 			 */
480 			s16 free_pending_packets;
481 
482 			/* Cached value of `dqo_compl.hw_tx_head` */
483 			u32 head;
484 			u32 tail; /* Last posted buffer index + 1 */
485 
486 			/* Index of the last descriptor with "report event" bit
487 			 * set.
488 			 */
489 			u32 last_re_idx;
490 
491 			/* free running number of packet buf descriptors posted */
492 			u16 posted_packet_desc_cnt;
493 			/* free running number of packet buf descriptors completed */
494 			u16 completed_packet_desc_cnt;
495 
496 			/* QPL fields */
497 			struct {
498 			       /* Linked list of gve_tx_buf_dqo. Index into
499 				* tx_qpl_buf_next, or -1 if empty.
500 				*
501 				* This is a consumer list owned by the TX path. When it
502 				* runs out, the producer list is stolen from the
503 				* completion handling path
504 				* (dqo_compl.free_tx_qpl_buf_head).
505 				*/
506 				s16 free_tx_qpl_buf_head;
507 
508 			       /* Free running count of the number of QPL tx buffers
509 				* allocated
510 				*/
511 				u32 alloc_tx_qpl_buf_cnt;
512 
513 				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
514 				u32 free_tx_qpl_buf_cnt;
515 			};
516 		} dqo_tx;
517 	};
518 
519 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
520 	union {
521 		/* GQI fields */
522 		struct {
523 			/* Spinlock for when cleanup in progress */
524 			spinlock_t clean_lock;
525 			/* Spinlock for XDP tx traffic */
526 			spinlock_t xdp_lock;
527 		};
528 
529 		/* DQO fields. */
530 		struct {
531 			u32 head; /* Last read on compl_desc */
532 
533 			/* Tracks the current gen bit of compl_q */
534 			u8 cur_gen_bit;
535 
536 			/* Linked list of gve_tx_pending_packet_dqo. Index into
537 			 * pending_packets, or -1 if empty.
538 			 *
539 			 * This is the producer list, owned by the completion
540 			 * handling path. When the consumer list
541 			 * (dqo_tx.free_pending_packets) is runs out, this list
542 			 * will be stolen.
543 			 */
544 			atomic_t free_pending_packets;
545 
546 			/* Last TX ring index fetched by HW */
547 			atomic_t hw_tx_head;
548 
549 			/* List to track pending packets which received a miss
550 			 * completion but not a corresponding reinjection.
551 			 */
552 			struct gve_index_list miss_completions;
553 
554 			/* List to track pending packets that were completed
555 			 * before receiving a valid completion because they
556 			 * reached a specified timeout.
557 			 */
558 			struct gve_index_list timed_out_completions;
559 
560 			/* QPL fields */
561 			struct {
562 				/* Linked list of gve_tx_buf_dqo. Index into
563 				 * tx_qpl_buf_next, or -1 if empty.
564 				 *
565 				 * This is the producer list, owned by the completion
566 				 * handling path. When the consumer list
567 				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
568 				 * will be stolen.
569 				 */
570 				atomic_t free_tx_qpl_buf_head;
571 
572 				/* Free running count of the number of tx buffers
573 				 * freed
574 				 */
575 				atomic_t free_tx_qpl_buf_cnt;
576 			};
577 		} dqo_compl;
578 	} ____cacheline_aligned;
579 	u64 pkt_done; /* free-running - total packets completed */
580 	u64 bytes_done; /* free-running - total bytes completed */
581 	u64 dropped_pkt; /* free-running - total packets dropped */
582 	u64 dma_mapping_error; /* count of dma mapping errors */
583 
584 	/* Cacheline 2 -- Read-mostly fields */
585 	union {
586 		/* GQI fields */
587 		struct {
588 			union gve_tx_desc *desc;
589 
590 			/* Maps 1:1 to a desc */
591 			struct gve_tx_buffer_state *info;
592 		};
593 
594 		/* DQO fields. */
595 		struct {
596 			union gve_tx_desc_dqo *tx_ring;
597 			struct gve_tx_compl_desc *compl_ring;
598 
599 			struct gve_tx_pending_packet_dqo *pending_packets;
600 			s16 num_pending_packets;
601 
602 			u32 complq_mask; /* complq size is complq_mask + 1 */
603 
604 			/* QPL fields */
605 			struct {
606 				/* qpl assigned to this queue */
607 				struct gve_queue_page_list *qpl;
608 
609 				/* Each QPL page is divided into TX bounce buffers
610 				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
611 				 * an array to manage linked lists of TX buffers.
612 				 * An entry j at index i implies that j'th buffer
613 				 * is next on the list after i
614 				 */
615 				s16 *tx_qpl_buf_next;
616 				u32 num_tx_qpl_bufs;
617 			};
618 		} dqo;
619 	} ____cacheline_aligned;
620 	struct netdev_queue *netdev_txq;
621 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
622 	struct device *dev;
623 	u32 mask; /* masks req and done down to queue size */
624 	u8 raw_addressing; /* use raw_addressing? */
625 
626 	/* Slow-path fields */
627 	u32 q_num ____cacheline_aligned; /* queue idx */
628 	u32 stop_queue; /* count of queue stops */
629 	u32 wake_queue; /* count of queue wakes */
630 	u32 queue_timeout; /* count of queue timeouts */
631 	u32 ntfy_id; /* notification block index */
632 	u32 last_kick_msec; /* Last time the queue was kicked */
633 	dma_addr_t bus; /* dma address of the descr ring */
634 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
635 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
636 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
637 	struct xsk_buff_pool *xsk_pool;
638 	u64 xdp_xsk_sent;
639 	u64 xdp_xmit;
640 	u64 xdp_xmit_errors;
641 } ____cacheline_aligned;
642 
643 /* Wraps the info for one irq including the napi struct and the queues
644  * associated with that irq.
645  */
646 struct gve_notify_block {
647 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
648 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
649 	struct napi_struct napi; /* kernel napi struct for this block */
650 	struct gve_priv *priv;
651 	struct gve_tx_ring *tx; /* tx rings on this block */
652 	struct gve_rx_ring *rx; /* rx rings on this block */
653 	u32 irq;
654 };
655 
656 /* Tracks allowed and current rx queue settings */
657 struct gve_rx_queue_config {
658 	u16 max_queues;
659 	u16 num_queues;
660 	u16 packet_buffer_size;
661 };
662 
663 /* Tracks allowed and current tx queue settings */
664 struct gve_tx_queue_config {
665 	u16 max_queues;
666 	u16 num_queues; /* number of TX queues, excluding XDP queues */
667 	u16 num_xdp_queues;
668 };
669 
670 /* Tracks the available and used qpl IDs */
671 struct gve_qpl_config {
672 	u32 qpl_map_size; /* map memory size */
673 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
674 };
675 
676 struct gve_irq_db {
677 	__be32 index;
678 } ____cacheline_aligned;
679 
680 struct gve_ptype {
681 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
682 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
683 };
684 
685 struct gve_ptype_lut {
686 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
687 };
688 
689 /* Parameters for allocating resources for tx queues */
690 struct gve_tx_alloc_rings_cfg {
691 	struct gve_tx_queue_config *qcfg;
692 
693 	u16 num_xdp_rings;
694 
695 	u16 ring_size;
696 	bool raw_addressing;
697 
698 	/* Allocated resources are returned here */
699 	struct gve_tx_ring *tx;
700 };
701 
702 /* Parameters for allocating resources for rx queues */
703 struct gve_rx_alloc_rings_cfg {
704 	/* tx config is also needed to determine QPL ids */
705 	struct gve_rx_queue_config *qcfg_rx;
706 	struct gve_tx_queue_config *qcfg_tx;
707 
708 	u16 ring_size;
709 	u16 packet_buffer_size;
710 	bool raw_addressing;
711 	bool enable_header_split;
712 	bool reset_rss;
713 	bool xdp;
714 
715 	/* Allocated resources are returned here */
716 	struct gve_rx_ring *rx;
717 };
718 
719 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
720  * when the entire configure_device_resources command is zeroed out and the
721  * queue_format is not specified.
722  */
723 enum gve_queue_format {
724 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
725 	GVE_GQI_RDA_FORMAT		= 0x1,
726 	GVE_GQI_QPL_FORMAT		= 0x2,
727 	GVE_DQO_RDA_FORMAT		= 0x3,
728 	GVE_DQO_QPL_FORMAT		= 0x4,
729 };
730 
731 struct gve_flow_spec {
732 	__be32 src_ip[4];
733 	__be32 dst_ip[4];
734 	union {
735 		struct {
736 			__be16 src_port;
737 			__be16 dst_port;
738 		};
739 		__be32 spi;
740 	};
741 	union {
742 		u8 tos;
743 		u8 tclass;
744 	};
745 };
746 
747 struct gve_flow_rule {
748 	u32 location;
749 	u16 flow_type;
750 	u16 action;
751 	struct gve_flow_spec key;
752 	struct gve_flow_spec mask;
753 };
754 
755 struct gve_flow_rules_cache {
756 	bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
757 	struct gve_adminq_queried_flow_rule *rules_cache;
758 	__be32 *rule_ids_cache;
759 	/* The total number of queried rules that stored in the caches */
760 	u32 rules_cache_num;
761 	u32 rule_ids_cache_num;
762 };
763 
764 struct gve_rss_config {
765 	u8 *hash_key;
766 	u32 *hash_lut;
767 };
768 
769 struct gve_ptp {
770 	struct ptp_clock_info info;
771 	struct ptp_clock *clock;
772 	struct gve_priv *priv;
773 };
774 
775 struct gve_priv {
776 	struct net_device *dev;
777 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
778 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
779 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
780 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
781 	dma_addr_t irq_db_indices_bus;
782 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
783 	char mgmt_msix_name[IFNAMSIZ + 16];
784 	u32 mgmt_msix_idx;
785 	__be32 *counter_array; /* array of num_event_counters */
786 	dma_addr_t counter_array_bus;
787 
788 	u16 num_event_counters;
789 	u16 tx_desc_cnt; /* num desc per ring */
790 	u16 rx_desc_cnt; /* num desc per ring */
791 	u16 max_tx_desc_cnt;
792 	u16 max_rx_desc_cnt;
793 	u16 min_tx_desc_cnt;
794 	u16 min_rx_desc_cnt;
795 	bool modify_ring_size_enabled;
796 	bool default_min_ring_size;
797 	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
798 	u64 max_registered_pages;
799 	u64 num_registered_pages; /* num pages registered with NIC */
800 	struct bpf_prog *xdp_prog; /* XDP BPF program */
801 	u32 rx_copybreak; /* copy packets smaller than this */
802 	u16 default_num_queues; /* default num queues to set up */
803 
804 	struct gve_tx_queue_config tx_cfg;
805 	struct gve_rx_queue_config rx_cfg;
806 	u32 num_ntfy_blks; /* split between TX and RX so must be even */
807 	int numa_node;
808 
809 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
810 	__be32 __iomem *db_bar2; /* "array" of doorbells */
811 	u32 msg_enable;	/* level for netif* netdev print macros	*/
812 	struct pci_dev *pdev;
813 
814 	/* metrics */
815 	u32 tx_timeo_cnt;
816 
817 	/* Admin queue - see gve_adminq.h*/
818 	union gve_adminq_command *adminq;
819 	dma_addr_t adminq_bus_addr;
820 	struct dma_pool *adminq_pool;
821 	struct mutex adminq_lock; /* Protects adminq command execution */
822 	u32 adminq_mask; /* masks prod_cnt to adminq size */
823 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
824 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
825 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
826 	/* free-running count of per AQ cmd executed */
827 	u32 adminq_describe_device_cnt;
828 	u32 adminq_cfg_device_resources_cnt;
829 	u32 adminq_register_page_list_cnt;
830 	u32 adminq_unregister_page_list_cnt;
831 	u32 adminq_create_tx_queue_cnt;
832 	u32 adminq_create_rx_queue_cnt;
833 	u32 adminq_destroy_tx_queue_cnt;
834 	u32 adminq_destroy_rx_queue_cnt;
835 	u32 adminq_dcfg_device_resources_cnt;
836 	u32 adminq_set_driver_parameter_cnt;
837 	u32 adminq_report_stats_cnt;
838 	u32 adminq_report_link_speed_cnt;
839 	u32 adminq_report_nic_timestamp_cnt;
840 	u32 adminq_get_ptype_map_cnt;
841 	u32 adminq_verify_driver_compatibility_cnt;
842 	u32 adminq_query_flow_rules_cnt;
843 	u32 adminq_cfg_flow_rule_cnt;
844 	u32 adminq_cfg_rss_cnt;
845 	u32 adminq_query_rss_cnt;
846 
847 	/* Global stats */
848 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
849 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
850 	u32 reset_cnt; /* count of reset */
851 	u32 page_alloc_fail; /* count of page alloc fails */
852 	u32 dma_mapping_error; /* count of dma mapping errors */
853 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
854 	u32 suspend_cnt; /* count of times suspended */
855 	u32 resume_cnt; /* count of times resumed */
856 	struct workqueue_struct *gve_wq;
857 	struct work_struct service_task;
858 	struct work_struct stats_report_task;
859 	unsigned long service_task_flags;
860 	unsigned long state_flags;
861 
862 	struct gve_stats_report *stats_report;
863 	u64 stats_report_len;
864 	dma_addr_t stats_report_bus; /* dma address for the stats report */
865 	unsigned long ethtool_flags;
866 
867 	unsigned long stats_report_timer_period;
868 	struct timer_list stats_report_timer;
869 
870 	/* Gvnic device link speed from hypervisor. */
871 	u64 link_speed;
872 	bool up_before_suspend; /* True if dev was up before suspend */
873 
874 	struct gve_ptype_lut *ptype_lut_dqo;
875 
876 	/* Must be a power of two. */
877 	u16 max_rx_buffer_size; /* device limit */
878 
879 	enum gve_queue_format queue_format;
880 
881 	/* Interrupt coalescing settings */
882 	u32 tx_coalesce_usecs;
883 	u32 rx_coalesce_usecs;
884 
885 	u16 header_buf_size; /* device configured, header-split supported if non-zero */
886 	bool header_split_enabled; /* True if the header split is enabled by the user */
887 
888 	u32 max_flow_rules;
889 	u32 num_flow_rules;
890 
891 	struct gve_flow_rules_cache flow_rules_cache;
892 
893 	u16 rss_key_size;
894 	u16 rss_lut_size;
895 	bool cache_rss_config;
896 	struct gve_rss_config rss_config;
897 
898 	/* True if the device supports reading the nic clock */
899 	bool nic_timestamp_supported;
900 	struct gve_ptp *ptp;
901 	struct kernel_hwtstamp_config ts_config;
902 	struct gve_nic_ts_report *nic_ts_report;
903 	dma_addr_t nic_ts_report_bus;
904 	u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */
905 };
906 
907 enum gve_service_task_flags_bit {
908 	GVE_PRIV_FLAGS_DO_RESET			= 1,
909 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
910 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
911 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
912 };
913 
914 enum gve_state_flags_bit {
915 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
916 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
917 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
918 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
919 };
920 
921 enum gve_ethtool_flags_bit {
922 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
923 };
924 
925 static inline bool gve_get_do_reset(struct gve_priv *priv)
926 {
927 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
928 }
929 
930 static inline void gve_set_do_reset(struct gve_priv *priv)
931 {
932 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
933 }
934 
935 static inline void gve_clear_do_reset(struct gve_priv *priv)
936 {
937 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
938 }
939 
940 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
941 {
942 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
943 			&priv->service_task_flags);
944 }
945 
946 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
947 {
948 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
949 }
950 
951 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
952 {
953 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
954 }
955 
956 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
957 {
958 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
959 			&priv->service_task_flags);
960 }
961 
962 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
963 {
964 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
965 }
966 
967 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
968 {
969 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
970 }
971 
972 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
973 {
974 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
975 			&priv->service_task_flags);
976 }
977 
978 static inline void gve_set_do_report_stats(struct gve_priv *priv)
979 {
980 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
981 }
982 
983 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
984 {
985 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
986 }
987 
988 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
989 {
990 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
991 }
992 
993 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
994 {
995 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
996 }
997 
998 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
999 {
1000 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
1001 }
1002 
1003 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
1004 {
1005 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1006 }
1007 
1008 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
1009 {
1010 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1011 }
1012 
1013 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
1014 {
1015 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1016 }
1017 
1018 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
1019 {
1020 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1021 }
1022 
1023 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
1024 {
1025 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1026 }
1027 
1028 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
1029 {
1030 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1031 }
1032 
1033 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
1034 {
1035 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1036 }
1037 
1038 static inline void gve_set_napi_enabled(struct gve_priv *priv)
1039 {
1040 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1041 }
1042 
1043 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
1044 {
1045 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1046 }
1047 
1048 static inline bool gve_get_report_stats(struct gve_priv *priv)
1049 {
1050 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1051 }
1052 
1053 static inline void gve_clear_report_stats(struct gve_priv *priv)
1054 {
1055 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1056 }
1057 
1058 /* Returns the address of the ntfy_blocks irq doorbell
1059  */
1060 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
1061 					       struct gve_notify_block *block)
1062 {
1063 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
1064 }
1065 
1066 /* Returns the index into ntfy_blocks of the given tx ring's block
1067  */
1068 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1069 {
1070 	return queue_idx;
1071 }
1072 
1073 /* Returns the index into ntfy_blocks of the given rx ring's block
1074  */
1075 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1076 {
1077 	return (priv->num_ntfy_blks / 2) + queue_idx;
1078 }
1079 
1080 static inline bool gve_is_qpl(struct gve_priv *priv)
1081 {
1082 	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
1083 		priv->queue_format == GVE_DQO_QPL_FORMAT;
1084 }
1085 
1086 /* Returns the number of tx queue page lists */
1087 static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
1088 				  bool is_qpl)
1089 {
1090 	if (!is_qpl)
1091 		return 0;
1092 	return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
1093 }
1094 
1095 /* Returns the number of rx queue page lists */
1096 static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
1097 				  bool is_qpl)
1098 {
1099 	if (!is_qpl)
1100 		return 0;
1101 	return rx_cfg->num_queues;
1102 }
1103 
1104 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
1105 {
1106 	return tx_qid;
1107 }
1108 
1109 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1110 {
1111 	return priv->tx_cfg.max_queues + rx_qid;
1112 }
1113 
1114 static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
1115 				    int rx_qid)
1116 {
1117 	return tx_cfg->max_queues + rx_qid;
1118 }
1119 
1120 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1121 {
1122 	return gve_tx_qpl_id(priv, 0);
1123 }
1124 
1125 static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
1126 {
1127 	return gve_get_rx_qpl_id(tx_cfg, 0);
1128 }
1129 
1130 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
1131 {
1132 	/* For DQO, page count should be more than ring size for
1133 	 * out-of-order completions. Set it to two times of ring size.
1134 	 */
1135 	return 2 * rx_desc_cnt;
1136 }
1137 
1138 /* Returns the correct dma direction for tx and rx qpls */
1139 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1140 						      int id)
1141 {
1142 	if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1143 		return DMA_TO_DEVICE;
1144 	else
1145 		return DMA_FROM_DEVICE;
1146 }
1147 
1148 static inline bool gve_is_gqi(struct gve_priv *priv)
1149 {
1150 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1151 		priv->queue_format == GVE_GQI_QPL_FORMAT;
1152 }
1153 
1154 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1155 {
1156 	return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
1157 }
1158 
1159 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1160 {
1161 	return priv->tx_cfg.num_queues + queue_id;
1162 }
1163 
1164 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1165 {
1166 	return gve_xdp_tx_queue_id(priv, 0);
1167 }
1168 
1169 static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
1170 {
1171 	switch (priv->queue_format) {
1172 	case GVE_GQI_QPL_FORMAT:
1173 	case GVE_DQO_RDA_FORMAT:
1174 		return true;
1175 	default:
1176 		return false;
1177 	}
1178 }
1179 
1180 /* gqi napi handler defined in gve_main.c */
1181 int gve_napi_poll(struct napi_struct *napi, int budget);
1182 
1183 /* buffers */
1184 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1185 		   struct page **page, dma_addr_t *dma,
1186 		   enum dma_data_direction, gfp_t gfp_flags);
1187 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1188 		   enum dma_data_direction);
1189 /* qpls */
1190 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1191 						      u32 id, int pages);
1192 void gve_free_queue_page_list(struct gve_priv *priv,
1193 			      struct gve_queue_page_list *qpl,
1194 			      u32 id);
1195 /* tx handling */
1196 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1197 int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
1198 		     u32 flags);
1199 int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
1200 		     u32 flags);
1201 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1202 		     void *data, int len, void *frame_p);
1203 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1204 int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
1205 			 struct xdp_frame *xdpf);
1206 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1207 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1208 int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
1209 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1210 			   struct gve_tx_alloc_rings_cfg *cfg);
1211 void gve_tx_free_rings_gqi(struct gve_priv *priv,
1212 			   struct gve_tx_alloc_rings_cfg *cfg);
1213 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1214 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1215 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1216 			      struct gve_tx_ring *tx);
1217 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1218 /* rx handling */
1219 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1220 int gve_rx_poll(struct gve_notify_block *block, int budget);
1221 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1222 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1223 			  struct gve_rx_alloc_rings_cfg *cfg,
1224 			  struct gve_rx_ring *rx,
1225 			  int idx);
1226 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1227 			  struct gve_rx_alloc_rings_cfg *cfg);
1228 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1229 			   struct gve_rx_alloc_rings_cfg *cfg);
1230 void gve_rx_free_rings_gqi(struct gve_priv *priv,
1231 			   struct gve_rx_alloc_rings_cfg *cfg);
1232 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1233 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1234 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
1235 bool gve_header_split_supported(const struct gve_priv *priv);
1236 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1237 /* rx buffer handling */
1238 int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
1239 void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
1240 		       bool free_page);
1241 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1242 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1243 				struct gve_rx_buf_state_dqo *buf_state);
1244 void gve_free_buf_state(struct gve_rx_ring *rx,
1245 			struct gve_rx_buf_state_dqo *buf_state);
1246 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1247 						   struct gve_index_list *list);
1248 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1249 			   struct gve_rx_buf_state_dqo *buf_state);
1250 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1251 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1252 			 struct gve_rx_buf_state_dqo *buf_state);
1253 void gve_free_to_page_pool(struct gve_rx_ring *rx,
1254 			   struct gve_rx_buf_state_dqo *buf_state,
1255 			   bool allow_direct);
1256 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1257 			   struct gve_rx_buf_state_dqo *buf_state);
1258 void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
1259 void gve_reuse_buffer(struct gve_rx_ring *rx,
1260 		      struct gve_rx_buf_state_dqo *buf_state);
1261 void gve_free_buffer(struct gve_rx_ring *rx,
1262 		     struct gve_rx_buf_state_dqo *buf_state);
1263 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1264 struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1265 					  struct gve_rx_ring *rx,
1266 					  bool xdp);
1267 
1268 /* Reset */
1269 void gve_schedule_reset(struct gve_priv *priv);
1270 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1271 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1272 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1273 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1274 int gve_adjust_config(struct gve_priv *priv,
1275 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1276 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1277 int gve_adjust_queues(struct gve_priv *priv,
1278 		      struct gve_rx_queue_config new_rx_config,
1279 		      struct gve_tx_queue_config new_tx_config,
1280 		      bool reset_rss);
1281 /* flow steering rule */
1282 int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1283 int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
1284 int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1285 int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1286 int gve_flow_rules_reset(struct gve_priv *priv);
1287 /* RSS config */
1288 int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
1289 /* PTP and timestamping */
1290 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
1291 int gve_clock_nic_ts_read(struct gve_priv *priv);
1292 int gve_init_clock(struct gve_priv *priv);
1293 void gve_teardown_clock(struct gve_priv *priv);
1294 #else /* CONFIG_PTP_1588_CLOCK */
1295 static inline int gve_clock_nic_ts_read(struct gve_priv *priv)
1296 {
1297 	return -EOPNOTSUPP;
1298 }
1299 
1300 static inline int gve_init_clock(struct gve_priv *priv)
1301 {
1302 	return 0;
1303 }
1304 
1305 static inline void gve_teardown_clock(struct gve_priv *priv) { }
1306 #endif /* CONFIG_PTP_1588_CLOCK */
1307 /* report stats handling */
1308 void gve_handle_report_stats(struct gve_priv *priv);
1309 /* exported by ethtool.c */
1310 extern const struct ethtool_ops gve_ethtool_ops;
1311 /* needed by ethtool */
1312 extern char gve_driver_name[];
1313 extern const char gve_version_str[];
1314 #endif /* _GVE_H_ */
1315