xref: /linux/drivers/net/ethernet/google/gve/gve.h (revision 9410645520e9b820069761f3450ef6661418e279)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google LLC
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/ethtool_netlink.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/u64_stats_sync.h>
16 #include <net/xdp.h>
17 
18 #include "gve_desc.h"
19 #include "gve_desc_dqo.h"
20 
21 #ifndef PCI_VENDOR_ID_GOOGLE
22 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
23 #endif
24 
25 #define PCI_DEV_ID_GVNIC	0x0042
26 
27 #define GVE_REGISTER_BAR	0
28 #define GVE_DOORBELL_BAR	2
29 
30 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
31 #define GVE_TX_MAX_IOVEC	4
32 /* 1 for management, 1 for rx, 1 for tx */
33 #define GVE_MIN_MSIX 3
34 
35 /* Numbers of gve tx/rx stats in stats report. */
36 #define GVE_TX_STATS_REPORT_NUM	6
37 #define GVE_RX_STATS_REPORT_NUM	2
38 
39 /* Interval to schedule a stats report update, 20000ms. */
40 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
41 
42 /* Numbers of NIC tx/rx stats in stats report. */
43 #define NIC_TX_STATS_REPORT_NUM	0
44 #define NIC_RX_STATS_REPORT_NUM	4
45 
46 #define GVE_ADMINQ_BUFFER_SIZE 4096
47 
48 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
49 
50 /* PTYPEs are always 10 bits. */
51 #define GVE_NUM_PTYPES	1024
52 
53 /* Default minimum ring size */
54 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
55 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
56 
57 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
58 
59 #define GVE_MAX_RX_BUFFER_SIZE 4096
60 
61 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
62 
63 #define GVE_FLOW_RULES_CACHE_SIZE \
64 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
65 #define GVE_FLOW_RULE_IDS_CACHE_SIZE \
66 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
67 
68 #define GVE_XDP_ACTIONS 5
69 
70 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
71 
72 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
73 
74 #define DQO_QPL_DEFAULT_TX_PAGES 512
75 
76 /* Maximum TSO size supported on DQO */
77 #define GVE_DQO_TX_MAX	0x3FFFF
78 
79 #define GVE_TX_BUF_SHIFT_DQO 11
80 
81 /* 2K buffers for DQO-QPL */
82 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
83 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
84 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
85 
86 /* If number of free/recyclable buffers are less than this threshold; driver
87  * allocs and uses a non-qpl page on the receive path of DQO QPL to free
88  * up buffers.
89  * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
90  */
91 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
92 
93 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
94 struct gve_rx_desc_queue {
95 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
96 	dma_addr_t bus; /* the bus for the desc_ring */
97 	u8 seqno; /* the next expected seqno for this desc*/
98 };
99 
100 /* The page info for a single slot in the RX data queue */
101 struct gve_rx_slot_page_info {
102 	struct page *page;
103 	void *page_address;
104 	u32 page_offset; /* offset to write to in page */
105 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
106 	u16 pad; /* adjustment for rx padding */
107 	u8 can_flip; /* tracks if the networking stack is using the page */
108 };
109 
110 /* A list of pages registered with the device during setup and used by a queue
111  * as buffers
112  */
113 struct gve_queue_page_list {
114 	u32 id; /* unique id */
115 	u32 num_entries;
116 	struct page **pages; /* list of num_entries pages */
117 	dma_addr_t *page_buses; /* the dma addrs of the pages */
118 };
119 
120 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
121 struct gve_rx_data_queue {
122 	union gve_rx_data_slot *data_ring; /* read by NIC */
123 	dma_addr_t data_bus; /* dma mapping of the slots */
124 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
125 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
126 	u8 raw_addressing; /* use raw_addressing? */
127 };
128 
129 struct gve_priv;
130 
131 /* RX buffer queue for posting buffers to HW.
132  * Each RX (completion) queue has a corresponding buffer queue.
133  */
134 struct gve_rx_buf_queue_dqo {
135 	struct gve_rx_desc_dqo *desc_ring;
136 	dma_addr_t bus;
137 	u32 head; /* Pointer to start cleaning buffers at. */
138 	u32 tail; /* Last posted buffer index + 1 */
139 	u32 mask; /* Mask for indices to the size of the ring */
140 };
141 
142 /* RX completion queue to receive packets from HW. */
143 struct gve_rx_compl_queue_dqo {
144 	struct gve_rx_compl_desc_dqo *desc_ring;
145 	dma_addr_t bus;
146 
147 	/* Number of slots which did not have a buffer posted yet. We should not
148 	 * post more buffers than the queue size to avoid HW overrunning the
149 	 * queue.
150 	 */
151 	int num_free_slots;
152 
153 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
154 	 * descriptor's generation bit is different from the current generation,
155 	 * that descriptor is ready to be consumed by SW.
156 	 */
157 	u8 cur_gen_bit;
158 
159 	/* Pointer into desc_ring where the next completion descriptor will be
160 	 * received.
161 	 */
162 	u32 head;
163 	u32 mask; /* Mask for indices to the size of the ring */
164 };
165 
166 struct gve_header_buf {
167 	u8 *data;
168 	dma_addr_t addr;
169 };
170 
171 /* Stores state for tracking buffers posted to HW */
172 struct gve_rx_buf_state_dqo {
173 	/* The page posted to HW. */
174 	struct gve_rx_slot_page_info page_info;
175 
176 	/* The DMA address corresponding to `page_info`. */
177 	dma_addr_t addr;
178 
179 	/* Last offset into the page when it only had a single reference, at
180 	 * which point every other offset is free to be reused.
181 	 */
182 	u32 last_single_ref_offset;
183 
184 	/* Linked list index to next element in the list, or -1 if none */
185 	s16 next;
186 };
187 
188 /* `head` and `tail` are indices into an array, or -1 if empty. */
189 struct gve_index_list {
190 	s16 head;
191 	s16 tail;
192 };
193 
194 /* A single received packet split across multiple buffers may be
195  * reconstructed using the information in this structure.
196  */
197 struct gve_rx_ctx {
198 	/* head and tail of skb chain for the current packet or NULL if none */
199 	struct sk_buff *skb_head;
200 	struct sk_buff *skb_tail;
201 	u32 total_size;
202 	u8 frag_cnt;
203 	bool drop_pkt;
204 };
205 
206 struct gve_rx_cnts {
207 	u32 ok_pkt_bytes;
208 	u16 ok_pkt_cnt;
209 	u16 total_pkt_cnt;
210 	u16 cont_pkt_cnt;
211 	u16 desc_err_pkt_cnt;
212 };
213 
214 /* Contains datapath state used to represent an RX queue. */
215 struct gve_rx_ring {
216 	struct gve_priv *gve;
217 	union {
218 		/* GQI fields */
219 		struct {
220 			struct gve_rx_desc_queue desc;
221 			struct gve_rx_data_queue data;
222 
223 			/* threshold for posting new buffs and descs */
224 			u32 db_threshold;
225 			u16 packet_buffer_size;
226 
227 			u32 qpl_copy_pool_mask;
228 			u32 qpl_copy_pool_head;
229 			struct gve_rx_slot_page_info *qpl_copy_pool;
230 		};
231 
232 		/* DQO fields. */
233 		struct {
234 			struct gve_rx_buf_queue_dqo bufq;
235 			struct gve_rx_compl_queue_dqo complq;
236 
237 			struct gve_rx_buf_state_dqo *buf_states;
238 			u16 num_buf_states;
239 
240 			/* Linked list of gve_rx_buf_state_dqo. Index into
241 			 * buf_states, or -1 if empty.
242 			 */
243 			s16 free_buf_states;
244 
245 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
246 			 * buf_states, or -1 if empty.
247 			 *
248 			 * This list contains buf_states which are pointing to
249 			 * valid buffers.
250 			 *
251 			 * We use a FIFO here in order to increase the
252 			 * probability that buffers can be reused by increasing
253 			 * the time between usages.
254 			 */
255 			struct gve_index_list recycled_buf_states;
256 
257 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
258 			 * buf_states, or -1 if empty.
259 			 *
260 			 * This list contains buf_states which have buffers
261 			 * which cannot be reused yet.
262 			 */
263 			struct gve_index_list used_buf_states;
264 
265 			/* qpl assigned to this queue */
266 			struct gve_queue_page_list *qpl;
267 
268 			/* index into queue page list */
269 			u32 next_qpl_page_idx;
270 
271 			/* track number of used buffers */
272 			u16 used_buf_states_cnt;
273 
274 			/* Address info of the buffers for header-split */
275 			struct gve_header_buf hdr_bufs;
276 		} dqo;
277 	};
278 
279 	u64 rbytes; /* free-running bytes received */
280 	u64 rx_hsplit_bytes; /* free-running header bytes received */
281 	u64 rpackets; /* free-running packets received */
282 	u32 cnt; /* free-running total number of completed packets */
283 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
284 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
285 	u64 rx_hsplit_pkt; /* free-running packets with headers split */
286 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
287 	u64 rx_copied_pkt; /* free-running total number of copied packets */
288 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
289 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
290 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
291 	/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
292 	u64 rx_hsplit_unsplit_pkt;
293 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
294 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
295 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
296 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
297 	u64 xdp_tx_errors;
298 	u64 xdp_redirect_errors;
299 	u64 xdp_alloc_fails;
300 	u64 xdp_actions[GVE_XDP_ACTIONS];
301 	u32 q_num; /* queue index */
302 	u32 ntfy_id; /* notification block index */
303 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
304 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
305 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
306 
307 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
308 
309 	/* XDP stuff */
310 	struct xdp_rxq_info xdp_rxq;
311 	struct xdp_rxq_info xsk_rxq;
312 	struct xsk_buff_pool *xsk_pool;
313 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
314 };
315 
316 /* A TX desc ring entry */
317 union gve_tx_desc {
318 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
319 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
320 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
321 };
322 
323 /* Tracks the memory in the fifo occupied by a segment of a packet */
324 struct gve_tx_iovec {
325 	u32 iov_offset; /* offset into this segment */
326 	u32 iov_len; /* length */
327 	u32 iov_padding; /* padding associated with this segment */
328 };
329 
330 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
331  * ring entry but only used for a pkt_desc not a seg_desc
332  */
333 struct gve_tx_buffer_state {
334 	union {
335 		struct sk_buff *skb; /* skb for this pkt */
336 		struct xdp_frame *xdp_frame; /* xdp_frame */
337 	};
338 	struct {
339 		u16 size; /* size of xmitted xdp pkt */
340 		u8 is_xsk; /* xsk buff */
341 	} xdp;
342 	union {
343 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
344 		struct {
345 			DEFINE_DMA_UNMAP_ADDR(dma);
346 			DEFINE_DMA_UNMAP_LEN(len);
347 		};
348 	};
349 };
350 
351 /* A TX buffer - each queue has one */
352 struct gve_tx_fifo {
353 	void *base; /* address of base of FIFO */
354 	u32 size; /* total size */
355 	atomic_t available; /* how much space is still available */
356 	u32 head; /* offset to write at */
357 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
358 };
359 
360 /* TX descriptor for DQO format */
361 union gve_tx_desc_dqo {
362 	struct gve_tx_pkt_desc_dqo pkt;
363 	struct gve_tx_tso_context_desc_dqo tso_ctx;
364 	struct gve_tx_general_context_desc_dqo general_ctx;
365 };
366 
367 enum gve_packet_state {
368 	/* Packet is in free list, available to be allocated.
369 	 * This should always be zero since state is not explicitly initialized.
370 	 */
371 	GVE_PACKET_STATE_UNALLOCATED,
372 	/* Packet is expecting a regular data completion or miss completion */
373 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
374 	/* Packet has received a miss completion and is expecting a
375 	 * re-injection completion.
376 	 */
377 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
378 	/* No valid completion received within the specified timeout. */
379 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
380 };
381 
382 struct gve_tx_pending_packet_dqo {
383 	struct sk_buff *skb; /* skb for this packet */
384 
385 	/* 0th element corresponds to the linear portion of `skb`, should be
386 	 * unmapped with `dma_unmap_single`.
387 	 *
388 	 * All others correspond to `skb`'s frags and should be unmapped with
389 	 * `dma_unmap_page`.
390 	 */
391 	union {
392 		struct {
393 			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
394 			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
395 		};
396 		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
397 	};
398 
399 	u16 num_bufs;
400 
401 	/* Linked list index to next element in the list, or -1 if none */
402 	s16 next;
403 
404 	/* Linked list index to prev element in the list, or -1 if none.
405 	 * Used for tracking either outstanding miss completions or prematurely
406 	 * freed packets.
407 	 */
408 	s16 prev;
409 
410 	/* Identifies the current state of the packet as defined in
411 	 * `enum gve_packet_state`.
412 	 */
413 	u8 state;
414 
415 	/* If packet is an outstanding miss completion, then the packet is
416 	 * freed if the corresponding re-injection completion is not received
417 	 * before kernel jiffies exceeds timeout_jiffies.
418 	 */
419 	unsigned long timeout_jiffies;
420 };
421 
422 /* Contains datapath state used to represent a TX queue. */
423 struct gve_tx_ring {
424 	/* Cacheline 0 -- Accessed & dirtied during transmit */
425 	union {
426 		/* GQI fields */
427 		struct {
428 			struct gve_tx_fifo tx_fifo;
429 			u32 req; /* driver tracked head pointer */
430 			u32 done; /* driver tracked tail pointer */
431 		};
432 
433 		/* DQO fields. */
434 		struct {
435 			/* Linked list of gve_tx_pending_packet_dqo. Index into
436 			 * pending_packets, or -1 if empty.
437 			 *
438 			 * This is a consumer list owned by the TX path. When it
439 			 * runs out, the producer list is stolen from the
440 			 * completion handling path
441 			 * (dqo_compl.free_pending_packets).
442 			 */
443 			s16 free_pending_packets;
444 
445 			/* Cached value of `dqo_compl.hw_tx_head` */
446 			u32 head;
447 			u32 tail; /* Last posted buffer index + 1 */
448 
449 			/* Index of the last descriptor with "report event" bit
450 			 * set.
451 			 */
452 			u32 last_re_idx;
453 
454 			/* free running number of packet buf descriptors posted */
455 			u16 posted_packet_desc_cnt;
456 			/* free running number of packet buf descriptors completed */
457 			u16 completed_packet_desc_cnt;
458 
459 			/* QPL fields */
460 			struct {
461 			       /* Linked list of gve_tx_buf_dqo. Index into
462 				* tx_qpl_buf_next, or -1 if empty.
463 				*
464 				* This is a consumer list owned by the TX path. When it
465 				* runs out, the producer list is stolen from the
466 				* completion handling path
467 				* (dqo_compl.free_tx_qpl_buf_head).
468 				*/
469 				s16 free_tx_qpl_buf_head;
470 
471 			       /* Free running count of the number of QPL tx buffers
472 				* allocated
473 				*/
474 				u32 alloc_tx_qpl_buf_cnt;
475 
476 				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
477 				u32 free_tx_qpl_buf_cnt;
478 			};
479 		} dqo_tx;
480 	};
481 
482 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
483 	union {
484 		/* GQI fields */
485 		struct {
486 			/* Spinlock for when cleanup in progress */
487 			spinlock_t clean_lock;
488 			/* Spinlock for XDP tx traffic */
489 			spinlock_t xdp_lock;
490 		};
491 
492 		/* DQO fields. */
493 		struct {
494 			u32 head; /* Last read on compl_desc */
495 
496 			/* Tracks the current gen bit of compl_q */
497 			u8 cur_gen_bit;
498 
499 			/* Linked list of gve_tx_pending_packet_dqo. Index into
500 			 * pending_packets, or -1 if empty.
501 			 *
502 			 * This is the producer list, owned by the completion
503 			 * handling path. When the consumer list
504 			 * (dqo_tx.free_pending_packets) is runs out, this list
505 			 * will be stolen.
506 			 */
507 			atomic_t free_pending_packets;
508 
509 			/* Last TX ring index fetched by HW */
510 			atomic_t hw_tx_head;
511 
512 			/* List to track pending packets which received a miss
513 			 * completion but not a corresponding reinjection.
514 			 */
515 			struct gve_index_list miss_completions;
516 
517 			/* List to track pending packets that were completed
518 			 * before receiving a valid completion because they
519 			 * reached a specified timeout.
520 			 */
521 			struct gve_index_list timed_out_completions;
522 
523 			/* QPL fields */
524 			struct {
525 				/* Linked list of gve_tx_buf_dqo. Index into
526 				 * tx_qpl_buf_next, or -1 if empty.
527 				 *
528 				 * This is the producer list, owned by the completion
529 				 * handling path. When the consumer list
530 				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
531 				 * will be stolen.
532 				 */
533 				atomic_t free_tx_qpl_buf_head;
534 
535 				/* Free running count of the number of tx buffers
536 				 * freed
537 				 */
538 				atomic_t free_tx_qpl_buf_cnt;
539 			};
540 		} dqo_compl;
541 	} ____cacheline_aligned;
542 	u64 pkt_done; /* free-running - total packets completed */
543 	u64 bytes_done; /* free-running - total bytes completed */
544 	u64 dropped_pkt; /* free-running - total packets dropped */
545 	u64 dma_mapping_error; /* count of dma mapping errors */
546 
547 	/* Cacheline 2 -- Read-mostly fields */
548 	union {
549 		/* GQI fields */
550 		struct {
551 			union gve_tx_desc *desc;
552 
553 			/* Maps 1:1 to a desc */
554 			struct gve_tx_buffer_state *info;
555 		};
556 
557 		/* DQO fields. */
558 		struct {
559 			union gve_tx_desc_dqo *tx_ring;
560 			struct gve_tx_compl_desc *compl_ring;
561 
562 			struct gve_tx_pending_packet_dqo *pending_packets;
563 			s16 num_pending_packets;
564 
565 			u32 complq_mask; /* complq size is complq_mask + 1 */
566 
567 			/* QPL fields */
568 			struct {
569 				/* qpl assigned to this queue */
570 				struct gve_queue_page_list *qpl;
571 
572 				/* Each QPL page is divided into TX bounce buffers
573 				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
574 				 * an array to manage linked lists of TX buffers.
575 				 * An entry j at index i implies that j'th buffer
576 				 * is next on the list after i
577 				 */
578 				s16 *tx_qpl_buf_next;
579 				u32 num_tx_qpl_bufs;
580 			};
581 		} dqo;
582 	} ____cacheline_aligned;
583 	struct netdev_queue *netdev_txq;
584 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
585 	struct device *dev;
586 	u32 mask; /* masks req and done down to queue size */
587 	u8 raw_addressing; /* use raw_addressing? */
588 
589 	/* Slow-path fields */
590 	u32 q_num ____cacheline_aligned; /* queue idx */
591 	u32 stop_queue; /* count of queue stops */
592 	u32 wake_queue; /* count of queue wakes */
593 	u32 queue_timeout; /* count of queue timeouts */
594 	u32 ntfy_id; /* notification block index */
595 	u32 last_kick_msec; /* Last time the queue was kicked */
596 	dma_addr_t bus; /* dma address of the descr ring */
597 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
598 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
599 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
600 	struct xsk_buff_pool *xsk_pool;
601 	u32 xdp_xsk_wakeup;
602 	u32 xdp_xsk_done;
603 	u64 xdp_xsk_sent;
604 	u64 xdp_xmit;
605 	u64 xdp_xmit_errors;
606 } ____cacheline_aligned;
607 
608 /* Wraps the info for one irq including the napi struct and the queues
609  * associated with that irq.
610  */
611 struct gve_notify_block {
612 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
613 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
614 	struct napi_struct napi; /* kernel napi struct for this block */
615 	struct gve_priv *priv;
616 	struct gve_tx_ring *tx; /* tx rings on this block */
617 	struct gve_rx_ring *rx; /* rx rings on this block */
618 	u32 irq;
619 };
620 
621 /* Tracks allowed and current queue settings */
622 struct gve_queue_config {
623 	u16 max_queues;
624 	u16 num_queues; /* current */
625 };
626 
627 /* Tracks the available and used qpl IDs */
628 struct gve_qpl_config {
629 	u32 qpl_map_size; /* map memory size */
630 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
631 };
632 
633 struct gve_irq_db {
634 	__be32 index;
635 } ____cacheline_aligned;
636 
637 struct gve_ptype {
638 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
639 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
640 };
641 
642 struct gve_ptype_lut {
643 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
644 };
645 
646 /* Parameters for allocating resources for tx queues */
647 struct gve_tx_alloc_rings_cfg {
648 	struct gve_queue_config *qcfg;
649 
650 	u16 ring_size;
651 	u16 start_idx;
652 	u16 num_rings;
653 	bool raw_addressing;
654 
655 	/* Allocated resources are returned here */
656 	struct gve_tx_ring *tx;
657 };
658 
659 /* Parameters for allocating resources for rx queues */
660 struct gve_rx_alloc_rings_cfg {
661 	/* tx config is also needed to determine QPL ids */
662 	struct gve_queue_config *qcfg;
663 	struct gve_queue_config *qcfg_tx;
664 
665 	u16 ring_size;
666 	u16 packet_buffer_size;
667 	bool raw_addressing;
668 	bool enable_header_split;
669 
670 	/* Allocated resources are returned here */
671 	struct gve_rx_ring *rx;
672 };
673 
674 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
675  * when the entire configure_device_resources command is zeroed out and the
676  * queue_format is not specified.
677  */
678 enum gve_queue_format {
679 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
680 	GVE_GQI_RDA_FORMAT		= 0x1,
681 	GVE_GQI_QPL_FORMAT		= 0x2,
682 	GVE_DQO_RDA_FORMAT		= 0x3,
683 	GVE_DQO_QPL_FORMAT		= 0x4,
684 };
685 
686 struct gve_flow_spec {
687 	__be32 src_ip[4];
688 	__be32 dst_ip[4];
689 	union {
690 		struct {
691 			__be16 src_port;
692 			__be16 dst_port;
693 		};
694 		__be32 spi;
695 	};
696 	union {
697 		u8 tos;
698 		u8 tclass;
699 	};
700 };
701 
702 struct gve_flow_rule {
703 	u32 location;
704 	u16 flow_type;
705 	u16 action;
706 	struct gve_flow_spec key;
707 	struct gve_flow_spec mask;
708 };
709 
710 struct gve_flow_rules_cache {
711 	bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
712 	struct gve_adminq_queried_flow_rule *rules_cache;
713 	__be32 *rule_ids_cache;
714 	/* The total number of queried rules that stored in the caches */
715 	u32 rules_cache_num;
716 	u32 rule_ids_cache_num;
717 };
718 
719 struct gve_priv {
720 	struct net_device *dev;
721 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
722 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
723 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
724 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
725 	dma_addr_t irq_db_indices_bus;
726 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
727 	char mgmt_msix_name[IFNAMSIZ + 16];
728 	u32 mgmt_msix_idx;
729 	__be32 *counter_array; /* array of num_event_counters */
730 	dma_addr_t counter_array_bus;
731 
732 	u16 num_event_counters;
733 	u16 tx_desc_cnt; /* num desc per ring */
734 	u16 rx_desc_cnt; /* num desc per ring */
735 	u16 max_tx_desc_cnt;
736 	u16 max_rx_desc_cnt;
737 	u16 min_tx_desc_cnt;
738 	u16 min_rx_desc_cnt;
739 	bool modify_ring_size_enabled;
740 	bool default_min_ring_size;
741 	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
742 	u64 max_registered_pages;
743 	u64 num_registered_pages; /* num pages registered with NIC */
744 	struct bpf_prog *xdp_prog; /* XDP BPF program */
745 	u32 rx_copybreak; /* copy packets smaller than this */
746 	u16 default_num_queues; /* default num queues to set up */
747 
748 	u16 num_xdp_queues;
749 	struct gve_queue_config tx_cfg;
750 	struct gve_queue_config rx_cfg;
751 	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
752 
753 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
754 	__be32 __iomem *db_bar2; /* "array" of doorbells */
755 	u32 msg_enable;	/* level for netif* netdev print macros	*/
756 	struct pci_dev *pdev;
757 
758 	/* metrics */
759 	u32 tx_timeo_cnt;
760 
761 	/* Admin queue - see gve_adminq.h*/
762 	union gve_adminq_command *adminq;
763 	dma_addr_t adminq_bus_addr;
764 	struct dma_pool *adminq_pool;
765 	struct mutex adminq_lock; /* Protects adminq command execution */
766 	u32 adminq_mask; /* masks prod_cnt to adminq size */
767 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
768 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
769 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
770 	/* free-running count of per AQ cmd executed */
771 	u32 adminq_describe_device_cnt;
772 	u32 adminq_cfg_device_resources_cnt;
773 	u32 adminq_register_page_list_cnt;
774 	u32 adminq_unregister_page_list_cnt;
775 	u32 adminq_create_tx_queue_cnt;
776 	u32 adminq_create_rx_queue_cnt;
777 	u32 adminq_destroy_tx_queue_cnt;
778 	u32 adminq_destroy_rx_queue_cnt;
779 	u32 adminq_dcfg_device_resources_cnt;
780 	u32 adminq_set_driver_parameter_cnt;
781 	u32 adminq_report_stats_cnt;
782 	u32 adminq_report_link_speed_cnt;
783 	u32 adminq_get_ptype_map_cnt;
784 	u32 adminq_verify_driver_compatibility_cnt;
785 	u32 adminq_query_flow_rules_cnt;
786 	u32 adminq_cfg_flow_rule_cnt;
787 	u32 adminq_cfg_rss_cnt;
788 	u32 adminq_query_rss_cnt;
789 
790 	/* Global stats */
791 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
792 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
793 	u32 reset_cnt; /* count of reset */
794 	u32 page_alloc_fail; /* count of page alloc fails */
795 	u32 dma_mapping_error; /* count of dma mapping errors */
796 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
797 	u32 suspend_cnt; /* count of times suspended */
798 	u32 resume_cnt; /* count of times resumed */
799 	struct workqueue_struct *gve_wq;
800 	struct work_struct service_task;
801 	struct work_struct stats_report_task;
802 	unsigned long service_task_flags;
803 	unsigned long state_flags;
804 
805 	struct gve_stats_report *stats_report;
806 	u64 stats_report_len;
807 	dma_addr_t stats_report_bus; /* dma address for the stats report */
808 	unsigned long ethtool_flags;
809 
810 	unsigned long stats_report_timer_period;
811 	struct timer_list stats_report_timer;
812 
813 	/* Gvnic device link speed from hypervisor. */
814 	u64 link_speed;
815 	bool up_before_suspend; /* True if dev was up before suspend */
816 
817 	struct gve_ptype_lut *ptype_lut_dqo;
818 
819 	/* Must be a power of two. */
820 	u16 data_buffer_size_dqo;
821 	u16 max_rx_buffer_size; /* device limit */
822 
823 	enum gve_queue_format queue_format;
824 
825 	/* Interrupt coalescing settings */
826 	u32 tx_coalesce_usecs;
827 	u32 rx_coalesce_usecs;
828 
829 	u16 header_buf_size; /* device configured, header-split supported if non-zero */
830 	bool header_split_enabled; /* True if the header split is enabled by the user */
831 
832 	u32 max_flow_rules;
833 	u32 num_flow_rules;
834 
835 	struct gve_flow_rules_cache flow_rules_cache;
836 
837 	u16 rss_key_size;
838 	u16 rss_lut_size;
839 };
840 
841 enum gve_service_task_flags_bit {
842 	GVE_PRIV_FLAGS_DO_RESET			= 1,
843 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
844 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
845 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
846 };
847 
848 enum gve_state_flags_bit {
849 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
850 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
851 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
852 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
853 };
854 
855 enum gve_ethtool_flags_bit {
856 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
857 };
858 
gve_get_do_reset(struct gve_priv * priv)859 static inline bool gve_get_do_reset(struct gve_priv *priv)
860 {
861 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
862 }
863 
gve_set_do_reset(struct gve_priv * priv)864 static inline void gve_set_do_reset(struct gve_priv *priv)
865 {
866 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
867 }
868 
gve_clear_do_reset(struct gve_priv * priv)869 static inline void gve_clear_do_reset(struct gve_priv *priv)
870 {
871 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
872 }
873 
gve_get_reset_in_progress(struct gve_priv * priv)874 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
875 {
876 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
877 			&priv->service_task_flags);
878 }
879 
gve_set_reset_in_progress(struct gve_priv * priv)880 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
881 {
882 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
883 }
884 
gve_clear_reset_in_progress(struct gve_priv * priv)885 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
886 {
887 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
888 }
889 
gve_get_probe_in_progress(struct gve_priv * priv)890 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
891 {
892 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
893 			&priv->service_task_flags);
894 }
895 
gve_set_probe_in_progress(struct gve_priv * priv)896 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
897 {
898 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
899 }
900 
gve_clear_probe_in_progress(struct gve_priv * priv)901 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
902 {
903 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
904 }
905 
gve_get_do_report_stats(struct gve_priv * priv)906 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
907 {
908 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
909 			&priv->service_task_flags);
910 }
911 
gve_set_do_report_stats(struct gve_priv * priv)912 static inline void gve_set_do_report_stats(struct gve_priv *priv)
913 {
914 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
915 }
916 
gve_clear_do_report_stats(struct gve_priv * priv)917 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
918 {
919 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
920 }
921 
gve_get_admin_queue_ok(struct gve_priv * priv)922 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
923 {
924 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
925 }
926 
gve_set_admin_queue_ok(struct gve_priv * priv)927 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
928 {
929 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
930 }
931 
gve_clear_admin_queue_ok(struct gve_priv * priv)932 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
933 {
934 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
935 }
936 
gve_get_device_resources_ok(struct gve_priv * priv)937 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
938 {
939 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
940 }
941 
gve_set_device_resources_ok(struct gve_priv * priv)942 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
943 {
944 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
945 }
946 
gve_clear_device_resources_ok(struct gve_priv * priv)947 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
948 {
949 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
950 }
951 
gve_get_device_rings_ok(struct gve_priv * priv)952 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
953 {
954 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
955 }
956 
gve_set_device_rings_ok(struct gve_priv * priv)957 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
958 {
959 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
960 }
961 
gve_clear_device_rings_ok(struct gve_priv * priv)962 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
963 {
964 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
965 }
966 
gve_get_napi_enabled(struct gve_priv * priv)967 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
968 {
969 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
970 }
971 
gve_set_napi_enabled(struct gve_priv * priv)972 static inline void gve_set_napi_enabled(struct gve_priv *priv)
973 {
974 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
975 }
976 
gve_clear_napi_enabled(struct gve_priv * priv)977 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
978 {
979 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
980 }
981 
gve_get_report_stats(struct gve_priv * priv)982 static inline bool gve_get_report_stats(struct gve_priv *priv)
983 {
984 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
985 }
986 
gve_clear_report_stats(struct gve_priv * priv)987 static inline void gve_clear_report_stats(struct gve_priv *priv)
988 {
989 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
990 }
991 
992 /* Returns the address of the ntfy_blocks irq doorbell
993  */
gve_irq_doorbell(struct gve_priv * priv,struct gve_notify_block * block)994 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
995 					       struct gve_notify_block *block)
996 {
997 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
998 }
999 
1000 /* Returns the index into ntfy_blocks of the given tx ring's block
1001  */
gve_tx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)1002 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1003 {
1004 	return queue_idx;
1005 }
1006 
1007 /* Returns the index into ntfy_blocks of the given rx ring's block
1008  */
gve_rx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)1009 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1010 {
1011 	return (priv->num_ntfy_blks / 2) + queue_idx;
1012 }
1013 
gve_is_qpl(struct gve_priv * priv)1014 static inline bool gve_is_qpl(struct gve_priv *priv)
1015 {
1016 	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
1017 		priv->queue_format == GVE_DQO_QPL_FORMAT;
1018 }
1019 
1020 /* Returns the number of tx queue page lists */
gve_num_tx_qpls(const struct gve_queue_config * tx_cfg,int num_xdp_queues,bool is_qpl)1021 static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
1022 				  int num_xdp_queues,
1023 				  bool is_qpl)
1024 {
1025 	if (!is_qpl)
1026 		return 0;
1027 	return tx_cfg->num_queues + num_xdp_queues;
1028 }
1029 
1030 /* Returns the number of XDP tx queue page lists
1031  */
gve_num_xdp_qpls(struct gve_priv * priv)1032 static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
1033 {
1034 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
1035 		return 0;
1036 
1037 	return priv->num_xdp_queues;
1038 }
1039 
1040 /* Returns the number of rx queue page lists */
gve_num_rx_qpls(const struct gve_queue_config * rx_cfg,bool is_qpl)1041 static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
1042 				  bool is_qpl)
1043 {
1044 	if (!is_qpl)
1045 		return 0;
1046 	return rx_cfg->num_queues;
1047 }
1048 
gve_tx_qpl_id(struct gve_priv * priv,int tx_qid)1049 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
1050 {
1051 	return tx_qid;
1052 }
1053 
gve_rx_qpl_id(struct gve_priv * priv,int rx_qid)1054 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1055 {
1056 	return priv->tx_cfg.max_queues + rx_qid;
1057 }
1058 
gve_get_rx_qpl_id(const struct gve_queue_config * tx_cfg,int rx_qid)1059 static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
1060 {
1061 	return tx_cfg->max_queues + rx_qid;
1062 }
1063 
gve_tx_start_qpl_id(struct gve_priv * priv)1064 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1065 {
1066 	return gve_tx_qpl_id(priv, 0);
1067 }
1068 
gve_rx_start_qpl_id(const struct gve_queue_config * tx_cfg)1069 static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
1070 {
1071 	return gve_get_rx_qpl_id(tx_cfg, 0);
1072 }
1073 
gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)1074 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
1075 {
1076 	/* For DQO, page count should be more than ring size for
1077 	 * out-of-order completions. Set it to two times of ring size.
1078 	 */
1079 	return 2 * rx_desc_cnt;
1080 }
1081 
1082 /* Returns the correct dma direction for tx and rx qpls */
gve_qpl_dma_dir(struct gve_priv * priv,int id)1083 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1084 						      int id)
1085 {
1086 	if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1087 		return DMA_TO_DEVICE;
1088 	else
1089 		return DMA_FROM_DEVICE;
1090 }
1091 
gve_is_gqi(struct gve_priv * priv)1092 static inline bool gve_is_gqi(struct gve_priv *priv)
1093 {
1094 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1095 		priv->queue_format == GVE_GQI_QPL_FORMAT;
1096 }
1097 
gve_num_tx_queues(struct gve_priv * priv)1098 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1099 {
1100 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
1101 }
1102 
gve_xdp_tx_queue_id(struct gve_priv * priv,u32 queue_id)1103 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1104 {
1105 	return priv->tx_cfg.num_queues + queue_id;
1106 }
1107 
gve_xdp_tx_start_queue_id(struct gve_priv * priv)1108 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1109 {
1110 	return gve_xdp_tx_queue_id(priv, 0);
1111 }
1112 
1113 /* gqi napi handler defined in gve_main.c */
1114 int gve_napi_poll(struct napi_struct *napi, int budget);
1115 
1116 /* buffers */
1117 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1118 		   struct page **page, dma_addr_t *dma,
1119 		   enum dma_data_direction, gfp_t gfp_flags);
1120 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1121 		   enum dma_data_direction);
1122 /* qpls */
1123 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1124 						      u32 id, int pages);
1125 void gve_free_queue_page_list(struct gve_priv *priv,
1126 			      struct gve_queue_page_list *qpl,
1127 			      u32 id);
1128 /* tx handling */
1129 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1130 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1131 		 u32 flags);
1132 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1133 		     void *data, int len, void *frame_p);
1134 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1135 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1136 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1137 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1138 			   struct gve_tx_alloc_rings_cfg *cfg);
1139 void gve_tx_free_rings_gqi(struct gve_priv *priv,
1140 			   struct gve_tx_alloc_rings_cfg *cfg);
1141 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1142 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1143 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1144 			      struct gve_tx_ring *tx);
1145 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1146 /* rx handling */
1147 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1148 int gve_rx_poll(struct gve_notify_block *block, int budget);
1149 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1150 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1151 			  struct gve_rx_alloc_rings_cfg *cfg,
1152 			  struct gve_rx_ring *rx,
1153 			  int idx);
1154 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1155 			  struct gve_rx_alloc_rings_cfg *cfg);
1156 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1157 			   struct gve_rx_alloc_rings_cfg *cfg);
1158 void gve_rx_free_rings_gqi(struct gve_priv *priv,
1159 			   struct gve_rx_alloc_rings_cfg *cfg);
1160 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1161 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1162 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
1163 bool gve_header_split_supported(const struct gve_priv *priv);
1164 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1165 /* Reset */
1166 void gve_schedule_reset(struct gve_priv *priv);
1167 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1168 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1169 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1170 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1171 int gve_adjust_config(struct gve_priv *priv,
1172 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1173 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1174 int gve_adjust_queues(struct gve_priv *priv,
1175 		      struct gve_queue_config new_rx_config,
1176 		      struct gve_queue_config new_tx_config);
1177 /* flow steering rule */
1178 int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1179 int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
1180 int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1181 int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1182 int gve_flow_rules_reset(struct gve_priv *priv);
1183 /* report stats handling */
1184 void gve_handle_report_stats(struct gve_priv *priv);
1185 /* exported by ethtool.c */
1186 extern const struct ethtool_ops gve_ethtool_ops;
1187 /* needed by ethtool */
1188 extern char gve_driver_name[];
1189 extern const char gve_version_str[];
1190 #endif /* _GVE_H_ */
1191