xref: /linux/drivers/net/ethernet/google/gve/gve.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google LLC
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/ethtool_netlink.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/u64_stats_sync.h>
16 #include <net/page_pool/helpers.h>
17 #include <net/xdp.h>
18 
19 #include "gve_desc.h"
20 #include "gve_desc_dqo.h"
21 
22 #ifndef PCI_VENDOR_ID_GOOGLE
23 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
24 #endif
25 
26 #define PCI_DEV_ID_GVNIC	0x0042
27 
28 #define GVE_REGISTER_BAR	0
29 #define GVE_DOORBELL_BAR	2
30 
31 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
32 #define GVE_TX_MAX_IOVEC	4
33 /* 1 for management, 1 for rx, 1 for tx */
34 #define GVE_MIN_MSIX 3
35 
36 /* Numbers of gve tx/rx stats in stats report. */
37 #define GVE_TX_STATS_REPORT_NUM	6
38 #define GVE_RX_STATS_REPORT_NUM	2
39 
40 /* Interval to schedule a stats report update, 20000ms. */
41 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
42 
43 /* Numbers of NIC tx/rx stats in stats report. */
44 #define NIC_TX_STATS_REPORT_NUM	0
45 #define NIC_RX_STATS_REPORT_NUM	4
46 
47 #define GVE_ADMINQ_BUFFER_SIZE 4096
48 
49 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
50 
51 /* PTYPEs are always 10 bits. */
52 #define GVE_NUM_PTYPES	1024
53 
54 /* Default minimum ring size */
55 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
56 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
57 
58 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
59 
60 #define GVE_MAX_RX_BUFFER_SIZE 4096
61 
62 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
63 
64 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
65 
66 #define GVE_FLOW_RULES_CACHE_SIZE \
67 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
68 #define GVE_FLOW_RULE_IDS_CACHE_SIZE \
69 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
70 
71 #define GVE_XDP_ACTIONS 5
72 
73 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
74 
75 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
76 
77 #define DQO_QPL_DEFAULT_TX_PAGES 512
78 
79 /* Maximum TSO size supported on DQO */
80 #define GVE_DQO_TX_MAX	0x3FFFF
81 
82 #define GVE_TX_BUF_SHIFT_DQO 11
83 
84 /* 2K buffers for DQO-QPL */
85 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
86 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
87 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
88 
89 /* If number of free/recyclable buffers are less than this threshold; driver
90  * allocs and uses a non-qpl page on the receive path of DQO QPL to free
91  * up buffers.
92  * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
93  */
94 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
95 
96 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
97 struct gve_rx_desc_queue {
98 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
99 	dma_addr_t bus; /* the bus for the desc_ring */
100 	u8 seqno; /* the next expected seqno for this desc*/
101 };
102 
103 /* The page info for a single slot in the RX data queue */
104 struct gve_rx_slot_page_info {
105 	struct page *page;
106 	void *page_address;
107 	u32 page_offset; /* offset to write to in page */
108 	unsigned int buf_size;
109 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
110 	u16 pad; /* adjustment for rx padding */
111 	u8 can_flip; /* tracks if the networking stack is using the page */
112 };
113 
114 /* A list of pages registered with the device during setup and used by a queue
115  * as buffers
116  */
117 struct gve_queue_page_list {
118 	u32 id; /* unique id */
119 	u32 num_entries;
120 	struct page **pages; /* list of num_entries pages */
121 	dma_addr_t *page_buses; /* the dma addrs of the pages */
122 };
123 
124 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
125 struct gve_rx_data_queue {
126 	union gve_rx_data_slot *data_ring; /* read by NIC */
127 	dma_addr_t data_bus; /* dma mapping of the slots */
128 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
129 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
130 	u8 raw_addressing; /* use raw_addressing? */
131 };
132 
133 struct gve_priv;
134 
135 /* RX buffer queue for posting buffers to HW.
136  * Each RX (completion) queue has a corresponding buffer queue.
137  */
138 struct gve_rx_buf_queue_dqo {
139 	struct gve_rx_desc_dqo *desc_ring;
140 	dma_addr_t bus;
141 	u32 head; /* Pointer to start cleaning buffers at. */
142 	u32 tail; /* Last posted buffer index + 1 */
143 	u32 mask; /* Mask for indices to the size of the ring */
144 };
145 
146 /* RX completion queue to receive packets from HW. */
147 struct gve_rx_compl_queue_dqo {
148 	struct gve_rx_compl_desc_dqo *desc_ring;
149 	dma_addr_t bus;
150 
151 	/* Number of slots which did not have a buffer posted yet. We should not
152 	 * post more buffers than the queue size to avoid HW overrunning the
153 	 * queue.
154 	 */
155 	int num_free_slots;
156 
157 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
158 	 * descriptor's generation bit is different from the current generation,
159 	 * that descriptor is ready to be consumed by SW.
160 	 */
161 	u8 cur_gen_bit;
162 
163 	/* Pointer into desc_ring where the next completion descriptor will be
164 	 * received.
165 	 */
166 	u32 head;
167 	u32 mask; /* Mask for indices to the size of the ring */
168 };
169 
170 struct gve_header_buf {
171 	u8 *data;
172 	dma_addr_t addr;
173 };
174 
175 /* Stores state for tracking buffers posted to HW */
176 struct gve_rx_buf_state_dqo {
177 	/* The page posted to HW. */
178 	struct gve_rx_slot_page_info page_info;
179 
180 	/* The DMA address corresponding to `page_info`. */
181 	dma_addr_t addr;
182 
183 	/* Last offset into the page when it only had a single reference, at
184 	 * which point every other offset is free to be reused.
185 	 */
186 	u32 last_single_ref_offset;
187 
188 	/* Linked list index to next element in the list, or -1 if none */
189 	s16 next;
190 };
191 
192 /* `head` and `tail` are indices into an array, or -1 if empty. */
193 struct gve_index_list {
194 	s16 head;
195 	s16 tail;
196 };
197 
198 /* A single received packet split across multiple buffers may be
199  * reconstructed using the information in this structure.
200  */
201 struct gve_rx_ctx {
202 	/* head and tail of skb chain for the current packet or NULL if none */
203 	struct sk_buff *skb_head;
204 	struct sk_buff *skb_tail;
205 	u32 total_size;
206 	u8 frag_cnt;
207 	bool drop_pkt;
208 };
209 
210 struct gve_rx_cnts {
211 	u32 ok_pkt_bytes;
212 	u16 ok_pkt_cnt;
213 	u16 total_pkt_cnt;
214 	u16 cont_pkt_cnt;
215 	u16 desc_err_pkt_cnt;
216 };
217 
218 /* Contains datapath state used to represent an RX queue. */
219 struct gve_rx_ring {
220 	struct gve_priv *gve;
221 	union {
222 		/* GQI fields */
223 		struct {
224 			struct gve_rx_desc_queue desc;
225 			struct gve_rx_data_queue data;
226 
227 			/* threshold for posting new buffs and descs */
228 			u32 db_threshold;
229 			u16 packet_buffer_size;
230 
231 			u32 qpl_copy_pool_mask;
232 			u32 qpl_copy_pool_head;
233 			struct gve_rx_slot_page_info *qpl_copy_pool;
234 		};
235 
236 		/* DQO fields. */
237 		struct {
238 			struct gve_rx_buf_queue_dqo bufq;
239 			struct gve_rx_compl_queue_dqo complq;
240 
241 			struct gve_rx_buf_state_dqo *buf_states;
242 			u16 num_buf_states;
243 
244 			/* Linked list of gve_rx_buf_state_dqo. Index into
245 			 * buf_states, or -1 if empty.
246 			 */
247 			s16 free_buf_states;
248 
249 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
250 			 * buf_states, or -1 if empty.
251 			 *
252 			 * This list contains buf_states which are pointing to
253 			 * valid buffers.
254 			 *
255 			 * We use a FIFO here in order to increase the
256 			 * probability that buffers can be reused by increasing
257 			 * the time between usages.
258 			 */
259 			struct gve_index_list recycled_buf_states;
260 
261 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
262 			 * buf_states, or -1 if empty.
263 			 *
264 			 * This list contains buf_states which have buffers
265 			 * which cannot be reused yet.
266 			 */
267 			struct gve_index_list used_buf_states;
268 
269 			/* qpl assigned to this queue */
270 			struct gve_queue_page_list *qpl;
271 
272 			/* index into queue page list */
273 			u32 next_qpl_page_idx;
274 
275 			/* track number of used buffers */
276 			u16 used_buf_states_cnt;
277 
278 			/* Address info of the buffers for header-split */
279 			struct gve_header_buf hdr_bufs;
280 
281 			struct page_pool *page_pool;
282 		} dqo;
283 	};
284 
285 	u64 rbytes; /* free-running bytes received */
286 	u64 rx_hsplit_bytes; /* free-running header bytes received */
287 	u64 rpackets; /* free-running packets received */
288 	u32 cnt; /* free-running total number of completed packets */
289 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
290 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
291 	u64 rx_hsplit_pkt; /* free-running packets with headers split */
292 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
293 	u64 rx_copied_pkt; /* free-running total number of copied packets */
294 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
295 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
296 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
297 	/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
298 	u64 rx_hsplit_unsplit_pkt;
299 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
300 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
301 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
302 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
303 	u64 xdp_tx_errors;
304 	u64 xdp_redirect_errors;
305 	u64 xdp_alloc_fails;
306 	u64 xdp_actions[GVE_XDP_ACTIONS];
307 	u32 q_num; /* queue index */
308 	u32 ntfy_id; /* notification block index */
309 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
310 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
311 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
312 
313 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
314 
315 	/* XDP stuff */
316 	struct xdp_rxq_info xdp_rxq;
317 	struct xdp_rxq_info xsk_rxq;
318 	struct xsk_buff_pool *xsk_pool;
319 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
320 };
321 
322 /* A TX desc ring entry */
323 union gve_tx_desc {
324 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
325 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
326 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
327 };
328 
329 /* Tracks the memory in the fifo occupied by a segment of a packet */
330 struct gve_tx_iovec {
331 	u32 iov_offset; /* offset into this segment */
332 	u32 iov_len; /* length */
333 	u32 iov_padding; /* padding associated with this segment */
334 };
335 
336 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
337  * ring entry but only used for a pkt_desc not a seg_desc
338  */
339 struct gve_tx_buffer_state {
340 	union {
341 		struct sk_buff *skb; /* skb for this pkt */
342 		struct xdp_frame *xdp_frame; /* xdp_frame */
343 	};
344 	struct {
345 		u16 size; /* size of xmitted xdp pkt */
346 		u8 is_xsk; /* xsk buff */
347 	} xdp;
348 	union {
349 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
350 		struct {
351 			DEFINE_DMA_UNMAP_ADDR(dma);
352 			DEFINE_DMA_UNMAP_LEN(len);
353 		};
354 	};
355 };
356 
357 /* A TX buffer - each queue has one */
358 struct gve_tx_fifo {
359 	void *base; /* address of base of FIFO */
360 	u32 size; /* total size */
361 	atomic_t available; /* how much space is still available */
362 	u32 head; /* offset to write at */
363 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
364 };
365 
366 /* TX descriptor for DQO format */
367 union gve_tx_desc_dqo {
368 	struct gve_tx_pkt_desc_dqo pkt;
369 	struct gve_tx_tso_context_desc_dqo tso_ctx;
370 	struct gve_tx_general_context_desc_dqo general_ctx;
371 };
372 
373 enum gve_packet_state {
374 	/* Packet is in free list, available to be allocated.
375 	 * This should always be zero since state is not explicitly initialized.
376 	 */
377 	GVE_PACKET_STATE_UNALLOCATED,
378 	/* Packet is expecting a regular data completion or miss completion */
379 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
380 	/* Packet has received a miss completion and is expecting a
381 	 * re-injection completion.
382 	 */
383 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
384 	/* No valid completion received within the specified timeout. */
385 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
386 };
387 
388 struct gve_tx_pending_packet_dqo {
389 	struct sk_buff *skb; /* skb for this packet */
390 
391 	/* 0th element corresponds to the linear portion of `skb`, should be
392 	 * unmapped with `dma_unmap_single`.
393 	 *
394 	 * All others correspond to `skb`'s frags and should be unmapped with
395 	 * `dma_unmap_page`.
396 	 */
397 	union {
398 		struct {
399 			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
400 			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
401 		};
402 		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
403 	};
404 
405 	u16 num_bufs;
406 
407 	/* Linked list index to next element in the list, or -1 if none */
408 	s16 next;
409 
410 	/* Linked list index to prev element in the list, or -1 if none.
411 	 * Used for tracking either outstanding miss completions or prematurely
412 	 * freed packets.
413 	 */
414 	s16 prev;
415 
416 	/* Identifies the current state of the packet as defined in
417 	 * `enum gve_packet_state`.
418 	 */
419 	u8 state;
420 
421 	/* If packet is an outstanding miss completion, then the packet is
422 	 * freed if the corresponding re-injection completion is not received
423 	 * before kernel jiffies exceeds timeout_jiffies.
424 	 */
425 	unsigned long timeout_jiffies;
426 };
427 
428 /* Contains datapath state used to represent a TX queue. */
429 struct gve_tx_ring {
430 	/* Cacheline 0 -- Accessed & dirtied during transmit */
431 	union {
432 		/* GQI fields */
433 		struct {
434 			struct gve_tx_fifo tx_fifo;
435 			u32 req; /* driver tracked head pointer */
436 			u32 done; /* driver tracked tail pointer */
437 		};
438 
439 		/* DQO fields. */
440 		struct {
441 			/* Linked list of gve_tx_pending_packet_dqo. Index into
442 			 * pending_packets, or -1 if empty.
443 			 *
444 			 * This is a consumer list owned by the TX path. When it
445 			 * runs out, the producer list is stolen from the
446 			 * completion handling path
447 			 * (dqo_compl.free_pending_packets).
448 			 */
449 			s16 free_pending_packets;
450 
451 			/* Cached value of `dqo_compl.hw_tx_head` */
452 			u32 head;
453 			u32 tail; /* Last posted buffer index + 1 */
454 
455 			/* Index of the last descriptor with "report event" bit
456 			 * set.
457 			 */
458 			u32 last_re_idx;
459 
460 			/* free running number of packet buf descriptors posted */
461 			u16 posted_packet_desc_cnt;
462 			/* free running number of packet buf descriptors completed */
463 			u16 completed_packet_desc_cnt;
464 
465 			/* QPL fields */
466 			struct {
467 			       /* Linked list of gve_tx_buf_dqo. Index into
468 				* tx_qpl_buf_next, or -1 if empty.
469 				*
470 				* This is a consumer list owned by the TX path. When it
471 				* runs out, the producer list is stolen from the
472 				* completion handling path
473 				* (dqo_compl.free_tx_qpl_buf_head).
474 				*/
475 				s16 free_tx_qpl_buf_head;
476 
477 			       /* Free running count of the number of QPL tx buffers
478 				* allocated
479 				*/
480 				u32 alloc_tx_qpl_buf_cnt;
481 
482 				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
483 				u32 free_tx_qpl_buf_cnt;
484 			};
485 		} dqo_tx;
486 	};
487 
488 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
489 	union {
490 		/* GQI fields */
491 		struct {
492 			/* Spinlock for when cleanup in progress */
493 			spinlock_t clean_lock;
494 			/* Spinlock for XDP tx traffic */
495 			spinlock_t xdp_lock;
496 		};
497 
498 		/* DQO fields. */
499 		struct {
500 			u32 head; /* Last read on compl_desc */
501 
502 			/* Tracks the current gen bit of compl_q */
503 			u8 cur_gen_bit;
504 
505 			/* Linked list of gve_tx_pending_packet_dqo. Index into
506 			 * pending_packets, or -1 if empty.
507 			 *
508 			 * This is the producer list, owned by the completion
509 			 * handling path. When the consumer list
510 			 * (dqo_tx.free_pending_packets) is runs out, this list
511 			 * will be stolen.
512 			 */
513 			atomic_t free_pending_packets;
514 
515 			/* Last TX ring index fetched by HW */
516 			atomic_t hw_tx_head;
517 
518 			/* List to track pending packets which received a miss
519 			 * completion but not a corresponding reinjection.
520 			 */
521 			struct gve_index_list miss_completions;
522 
523 			/* List to track pending packets that were completed
524 			 * before receiving a valid completion because they
525 			 * reached a specified timeout.
526 			 */
527 			struct gve_index_list timed_out_completions;
528 
529 			/* QPL fields */
530 			struct {
531 				/* Linked list of gve_tx_buf_dqo. Index into
532 				 * tx_qpl_buf_next, or -1 if empty.
533 				 *
534 				 * This is the producer list, owned by the completion
535 				 * handling path. When the consumer list
536 				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
537 				 * will be stolen.
538 				 */
539 				atomic_t free_tx_qpl_buf_head;
540 
541 				/* Free running count of the number of tx buffers
542 				 * freed
543 				 */
544 				atomic_t free_tx_qpl_buf_cnt;
545 			};
546 		} dqo_compl;
547 	} ____cacheline_aligned;
548 	u64 pkt_done; /* free-running - total packets completed */
549 	u64 bytes_done; /* free-running - total bytes completed */
550 	u64 dropped_pkt; /* free-running - total packets dropped */
551 	u64 dma_mapping_error; /* count of dma mapping errors */
552 
553 	/* Cacheline 2 -- Read-mostly fields */
554 	union {
555 		/* GQI fields */
556 		struct {
557 			union gve_tx_desc *desc;
558 
559 			/* Maps 1:1 to a desc */
560 			struct gve_tx_buffer_state *info;
561 		};
562 
563 		/* DQO fields. */
564 		struct {
565 			union gve_tx_desc_dqo *tx_ring;
566 			struct gve_tx_compl_desc *compl_ring;
567 
568 			struct gve_tx_pending_packet_dqo *pending_packets;
569 			s16 num_pending_packets;
570 
571 			u32 complq_mask; /* complq size is complq_mask + 1 */
572 
573 			/* QPL fields */
574 			struct {
575 				/* qpl assigned to this queue */
576 				struct gve_queue_page_list *qpl;
577 
578 				/* Each QPL page is divided into TX bounce buffers
579 				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
580 				 * an array to manage linked lists of TX buffers.
581 				 * An entry j at index i implies that j'th buffer
582 				 * is next on the list after i
583 				 */
584 				s16 *tx_qpl_buf_next;
585 				u32 num_tx_qpl_bufs;
586 			};
587 		} dqo;
588 	} ____cacheline_aligned;
589 	struct netdev_queue *netdev_txq;
590 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
591 	struct device *dev;
592 	u32 mask; /* masks req and done down to queue size */
593 	u8 raw_addressing; /* use raw_addressing? */
594 
595 	/* Slow-path fields */
596 	u32 q_num ____cacheline_aligned; /* queue idx */
597 	u32 stop_queue; /* count of queue stops */
598 	u32 wake_queue; /* count of queue wakes */
599 	u32 queue_timeout; /* count of queue timeouts */
600 	u32 ntfy_id; /* notification block index */
601 	u32 last_kick_msec; /* Last time the queue was kicked */
602 	dma_addr_t bus; /* dma address of the descr ring */
603 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
604 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
605 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
606 	struct xsk_buff_pool *xsk_pool;
607 	u32 xdp_xsk_wakeup;
608 	u32 xdp_xsk_done;
609 	u64 xdp_xsk_sent;
610 	u64 xdp_xmit;
611 	u64 xdp_xmit_errors;
612 } ____cacheline_aligned;
613 
614 /* Wraps the info for one irq including the napi struct and the queues
615  * associated with that irq.
616  */
617 struct gve_notify_block {
618 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
619 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
620 	struct napi_struct napi; /* kernel napi struct for this block */
621 	struct gve_priv *priv;
622 	struct gve_tx_ring *tx; /* tx rings on this block */
623 	struct gve_rx_ring *rx; /* rx rings on this block */
624 	u32 irq;
625 };
626 
627 /* Tracks allowed and current queue settings */
628 struct gve_queue_config {
629 	u16 max_queues;
630 	u16 num_queues; /* current */
631 };
632 
633 /* Tracks the available and used qpl IDs */
634 struct gve_qpl_config {
635 	u32 qpl_map_size; /* map memory size */
636 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
637 };
638 
639 struct gve_irq_db {
640 	__be32 index;
641 } ____cacheline_aligned;
642 
643 struct gve_ptype {
644 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
645 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
646 };
647 
648 struct gve_ptype_lut {
649 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
650 };
651 
652 /* Parameters for allocating resources for tx queues */
653 struct gve_tx_alloc_rings_cfg {
654 	struct gve_queue_config *qcfg;
655 
656 	u16 ring_size;
657 	u16 start_idx;
658 	u16 num_rings;
659 	bool raw_addressing;
660 
661 	/* Allocated resources are returned here */
662 	struct gve_tx_ring *tx;
663 };
664 
665 /* Parameters for allocating resources for rx queues */
666 struct gve_rx_alloc_rings_cfg {
667 	/* tx config is also needed to determine QPL ids */
668 	struct gve_queue_config *qcfg;
669 	struct gve_queue_config *qcfg_tx;
670 
671 	u16 ring_size;
672 	u16 packet_buffer_size;
673 	bool raw_addressing;
674 	bool enable_header_split;
675 
676 	/* Allocated resources are returned here */
677 	struct gve_rx_ring *rx;
678 };
679 
680 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
681  * when the entire configure_device_resources command is zeroed out and the
682  * queue_format is not specified.
683  */
684 enum gve_queue_format {
685 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
686 	GVE_GQI_RDA_FORMAT		= 0x1,
687 	GVE_GQI_QPL_FORMAT		= 0x2,
688 	GVE_DQO_RDA_FORMAT		= 0x3,
689 	GVE_DQO_QPL_FORMAT		= 0x4,
690 };
691 
692 struct gve_flow_spec {
693 	__be32 src_ip[4];
694 	__be32 dst_ip[4];
695 	union {
696 		struct {
697 			__be16 src_port;
698 			__be16 dst_port;
699 		};
700 		__be32 spi;
701 	};
702 	union {
703 		u8 tos;
704 		u8 tclass;
705 	};
706 };
707 
708 struct gve_flow_rule {
709 	u32 location;
710 	u16 flow_type;
711 	u16 action;
712 	struct gve_flow_spec key;
713 	struct gve_flow_spec mask;
714 };
715 
716 struct gve_flow_rules_cache {
717 	bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
718 	struct gve_adminq_queried_flow_rule *rules_cache;
719 	__be32 *rule_ids_cache;
720 	/* The total number of queried rules that stored in the caches */
721 	u32 rules_cache_num;
722 	u32 rule_ids_cache_num;
723 };
724 
725 struct gve_priv {
726 	struct net_device *dev;
727 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
728 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
729 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
730 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
731 	dma_addr_t irq_db_indices_bus;
732 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
733 	char mgmt_msix_name[IFNAMSIZ + 16];
734 	u32 mgmt_msix_idx;
735 	__be32 *counter_array; /* array of num_event_counters */
736 	dma_addr_t counter_array_bus;
737 
738 	u16 num_event_counters;
739 	u16 tx_desc_cnt; /* num desc per ring */
740 	u16 rx_desc_cnt; /* num desc per ring */
741 	u16 max_tx_desc_cnt;
742 	u16 max_rx_desc_cnt;
743 	u16 min_tx_desc_cnt;
744 	u16 min_rx_desc_cnt;
745 	bool modify_ring_size_enabled;
746 	bool default_min_ring_size;
747 	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
748 	u64 max_registered_pages;
749 	u64 num_registered_pages; /* num pages registered with NIC */
750 	struct bpf_prog *xdp_prog; /* XDP BPF program */
751 	u32 rx_copybreak; /* copy packets smaller than this */
752 	u16 default_num_queues; /* default num queues to set up */
753 
754 	u16 num_xdp_queues;
755 	struct gve_queue_config tx_cfg;
756 	struct gve_queue_config rx_cfg;
757 	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
758 
759 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
760 	__be32 __iomem *db_bar2; /* "array" of doorbells */
761 	u32 msg_enable;	/* level for netif* netdev print macros	*/
762 	struct pci_dev *pdev;
763 
764 	/* metrics */
765 	u32 tx_timeo_cnt;
766 
767 	/* Admin queue - see gve_adminq.h*/
768 	union gve_adminq_command *adminq;
769 	dma_addr_t adminq_bus_addr;
770 	struct dma_pool *adminq_pool;
771 	struct mutex adminq_lock; /* Protects adminq command execution */
772 	u32 adminq_mask; /* masks prod_cnt to adminq size */
773 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
774 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
775 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
776 	/* free-running count of per AQ cmd executed */
777 	u32 adminq_describe_device_cnt;
778 	u32 adminq_cfg_device_resources_cnt;
779 	u32 adminq_register_page_list_cnt;
780 	u32 adminq_unregister_page_list_cnt;
781 	u32 adminq_create_tx_queue_cnt;
782 	u32 adminq_create_rx_queue_cnt;
783 	u32 adminq_destroy_tx_queue_cnt;
784 	u32 adminq_destroy_rx_queue_cnt;
785 	u32 adminq_dcfg_device_resources_cnt;
786 	u32 adminq_set_driver_parameter_cnt;
787 	u32 adminq_report_stats_cnt;
788 	u32 adminq_report_link_speed_cnt;
789 	u32 adminq_get_ptype_map_cnt;
790 	u32 adminq_verify_driver_compatibility_cnt;
791 	u32 adminq_query_flow_rules_cnt;
792 	u32 adminq_cfg_flow_rule_cnt;
793 	u32 adminq_cfg_rss_cnt;
794 	u32 adminq_query_rss_cnt;
795 
796 	/* Global stats */
797 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
798 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
799 	u32 reset_cnt; /* count of reset */
800 	u32 page_alloc_fail; /* count of page alloc fails */
801 	u32 dma_mapping_error; /* count of dma mapping errors */
802 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
803 	u32 suspend_cnt; /* count of times suspended */
804 	u32 resume_cnt; /* count of times resumed */
805 	struct workqueue_struct *gve_wq;
806 	struct work_struct service_task;
807 	struct work_struct stats_report_task;
808 	unsigned long service_task_flags;
809 	unsigned long state_flags;
810 
811 	struct gve_stats_report *stats_report;
812 	u64 stats_report_len;
813 	dma_addr_t stats_report_bus; /* dma address for the stats report */
814 	unsigned long ethtool_flags;
815 
816 	unsigned long stats_report_timer_period;
817 	struct timer_list stats_report_timer;
818 
819 	/* Gvnic device link speed from hypervisor. */
820 	u64 link_speed;
821 	bool up_before_suspend; /* True if dev was up before suspend */
822 
823 	struct gve_ptype_lut *ptype_lut_dqo;
824 
825 	/* Must be a power of two. */
826 	u16 data_buffer_size_dqo;
827 	u16 max_rx_buffer_size; /* device limit */
828 
829 	enum gve_queue_format queue_format;
830 
831 	/* Interrupt coalescing settings */
832 	u32 tx_coalesce_usecs;
833 	u32 rx_coalesce_usecs;
834 
835 	u16 header_buf_size; /* device configured, header-split supported if non-zero */
836 	bool header_split_enabled; /* True if the header split is enabled by the user */
837 
838 	u32 max_flow_rules;
839 	u32 num_flow_rules;
840 
841 	struct gve_flow_rules_cache flow_rules_cache;
842 
843 	u16 rss_key_size;
844 	u16 rss_lut_size;
845 };
846 
847 enum gve_service_task_flags_bit {
848 	GVE_PRIV_FLAGS_DO_RESET			= 1,
849 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
850 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
851 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
852 };
853 
854 enum gve_state_flags_bit {
855 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
856 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
857 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
858 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
859 };
860 
861 enum gve_ethtool_flags_bit {
862 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
863 };
864 
865 static inline bool gve_get_do_reset(struct gve_priv *priv)
866 {
867 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
868 }
869 
870 static inline void gve_set_do_reset(struct gve_priv *priv)
871 {
872 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
873 }
874 
875 static inline void gve_clear_do_reset(struct gve_priv *priv)
876 {
877 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
878 }
879 
880 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
881 {
882 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
883 			&priv->service_task_flags);
884 }
885 
886 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
887 {
888 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
889 }
890 
891 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
892 {
893 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
894 }
895 
896 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
897 {
898 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
899 			&priv->service_task_flags);
900 }
901 
902 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
903 {
904 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
905 }
906 
907 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
908 {
909 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
910 }
911 
912 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
913 {
914 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
915 			&priv->service_task_flags);
916 }
917 
918 static inline void gve_set_do_report_stats(struct gve_priv *priv)
919 {
920 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
921 }
922 
923 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
924 {
925 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
926 }
927 
928 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
929 {
930 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
931 }
932 
933 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
934 {
935 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
936 }
937 
938 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
939 {
940 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
941 }
942 
943 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
944 {
945 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
946 }
947 
948 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
949 {
950 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
951 }
952 
953 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
954 {
955 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
956 }
957 
958 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
959 {
960 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
961 }
962 
963 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
964 {
965 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
966 }
967 
968 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
969 {
970 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
971 }
972 
973 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
974 {
975 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
976 }
977 
978 static inline void gve_set_napi_enabled(struct gve_priv *priv)
979 {
980 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
981 }
982 
983 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
984 {
985 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
986 }
987 
988 static inline bool gve_get_report_stats(struct gve_priv *priv)
989 {
990 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
991 }
992 
993 static inline void gve_clear_report_stats(struct gve_priv *priv)
994 {
995 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
996 }
997 
998 /* Returns the address of the ntfy_blocks irq doorbell
999  */
1000 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
1001 					       struct gve_notify_block *block)
1002 {
1003 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
1004 }
1005 
1006 /* Returns the index into ntfy_blocks of the given tx ring's block
1007  */
1008 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1009 {
1010 	return queue_idx;
1011 }
1012 
1013 /* Returns the index into ntfy_blocks of the given rx ring's block
1014  */
1015 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1016 {
1017 	return (priv->num_ntfy_blks / 2) + queue_idx;
1018 }
1019 
1020 static inline bool gve_is_qpl(struct gve_priv *priv)
1021 {
1022 	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
1023 		priv->queue_format == GVE_DQO_QPL_FORMAT;
1024 }
1025 
1026 /* Returns the number of tx queue page lists */
1027 static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
1028 				  int num_xdp_queues,
1029 				  bool is_qpl)
1030 {
1031 	if (!is_qpl)
1032 		return 0;
1033 	return tx_cfg->num_queues + num_xdp_queues;
1034 }
1035 
1036 /* Returns the number of XDP tx queue page lists
1037  */
1038 static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
1039 {
1040 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
1041 		return 0;
1042 
1043 	return priv->num_xdp_queues;
1044 }
1045 
1046 /* Returns the number of rx queue page lists */
1047 static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
1048 				  bool is_qpl)
1049 {
1050 	if (!is_qpl)
1051 		return 0;
1052 	return rx_cfg->num_queues;
1053 }
1054 
1055 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
1056 {
1057 	return tx_qid;
1058 }
1059 
1060 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1061 {
1062 	return priv->tx_cfg.max_queues + rx_qid;
1063 }
1064 
1065 static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
1066 {
1067 	return tx_cfg->max_queues + rx_qid;
1068 }
1069 
1070 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1071 {
1072 	return gve_tx_qpl_id(priv, 0);
1073 }
1074 
1075 static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
1076 {
1077 	return gve_get_rx_qpl_id(tx_cfg, 0);
1078 }
1079 
1080 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
1081 {
1082 	/* For DQO, page count should be more than ring size for
1083 	 * out-of-order completions. Set it to two times of ring size.
1084 	 */
1085 	return 2 * rx_desc_cnt;
1086 }
1087 
1088 /* Returns the correct dma direction for tx and rx qpls */
1089 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1090 						      int id)
1091 {
1092 	if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1093 		return DMA_TO_DEVICE;
1094 	else
1095 		return DMA_FROM_DEVICE;
1096 }
1097 
1098 static inline bool gve_is_gqi(struct gve_priv *priv)
1099 {
1100 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1101 		priv->queue_format == GVE_GQI_QPL_FORMAT;
1102 }
1103 
1104 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1105 {
1106 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
1107 }
1108 
1109 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1110 {
1111 	return priv->tx_cfg.num_queues + queue_id;
1112 }
1113 
1114 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1115 {
1116 	return gve_xdp_tx_queue_id(priv, 0);
1117 }
1118 
1119 /* gqi napi handler defined in gve_main.c */
1120 int gve_napi_poll(struct napi_struct *napi, int budget);
1121 
1122 /* buffers */
1123 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1124 		   struct page **page, dma_addr_t *dma,
1125 		   enum dma_data_direction, gfp_t gfp_flags);
1126 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1127 		   enum dma_data_direction);
1128 /* qpls */
1129 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1130 						      u32 id, int pages);
1131 void gve_free_queue_page_list(struct gve_priv *priv,
1132 			      struct gve_queue_page_list *qpl,
1133 			      u32 id);
1134 /* tx handling */
1135 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1136 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1137 		 u32 flags);
1138 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1139 		     void *data, int len, void *frame_p);
1140 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1141 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1142 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1143 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1144 			   struct gve_tx_alloc_rings_cfg *cfg);
1145 void gve_tx_free_rings_gqi(struct gve_priv *priv,
1146 			   struct gve_tx_alloc_rings_cfg *cfg);
1147 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1148 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1149 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1150 			      struct gve_tx_ring *tx);
1151 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1152 /* rx handling */
1153 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1154 int gve_rx_poll(struct gve_notify_block *block, int budget);
1155 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1156 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1157 			  struct gve_rx_alloc_rings_cfg *cfg,
1158 			  struct gve_rx_ring *rx,
1159 			  int idx);
1160 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1161 			  struct gve_rx_alloc_rings_cfg *cfg);
1162 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1163 			   struct gve_rx_alloc_rings_cfg *cfg);
1164 void gve_rx_free_rings_gqi(struct gve_priv *priv,
1165 			   struct gve_rx_alloc_rings_cfg *cfg);
1166 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1167 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1168 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
1169 bool gve_header_split_supported(const struct gve_priv *priv);
1170 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1171 /* rx buffer handling */
1172 int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
1173 void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
1174 		       bool free_page);
1175 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1176 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1177 				struct gve_rx_buf_state_dqo *buf_state);
1178 void gve_free_buf_state(struct gve_rx_ring *rx,
1179 			struct gve_rx_buf_state_dqo *buf_state);
1180 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1181 						   struct gve_index_list *list);
1182 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1183 			   struct gve_rx_buf_state_dqo *buf_state);
1184 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1185 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1186 			 struct gve_rx_buf_state_dqo *buf_state);
1187 void gve_free_to_page_pool(struct gve_rx_ring *rx,
1188 			   struct gve_rx_buf_state_dqo *buf_state,
1189 			   bool allow_direct);
1190 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1191 			   struct gve_rx_buf_state_dqo *buf_state);
1192 void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
1193 void gve_reuse_buffer(struct gve_rx_ring *rx,
1194 		      struct gve_rx_buf_state_dqo *buf_state);
1195 void gve_free_buffer(struct gve_rx_ring *rx,
1196 		     struct gve_rx_buf_state_dqo *buf_state);
1197 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1198 struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1199 					  struct gve_rx_ring *rx);
1200 
1201 /* Reset */
1202 void gve_schedule_reset(struct gve_priv *priv);
1203 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1204 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1205 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1206 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1207 int gve_adjust_config(struct gve_priv *priv,
1208 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1209 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1210 int gve_adjust_queues(struct gve_priv *priv,
1211 		      struct gve_queue_config new_rx_config,
1212 		      struct gve_queue_config new_tx_config);
1213 /* flow steering rule */
1214 int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1215 int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
1216 int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1217 int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1218 int gve_flow_rules_reset(struct gve_priv *priv);
1219 /* report stats handling */
1220 void gve_handle_report_stats(struct gve_priv *priv);
1221 /* exported by ethtool.c */
1222 extern const struct ethtool_ops gve_ethtool_ops;
1223 /* needed by ethtool */
1224 extern char gve_driver_name[];
1225 extern const char gve_version_str[];
1226 #endif /* _GVE_H_ */
1227