xref: /linux/drivers/net/ethernet/google/gve/gve.h (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google LLC
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/ethtool_netlink.h>
13 #include <linux/netdevice.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/pci.h>
16 #include <linux/ptp_clock_kernel.h>
17 #include <linux/u64_stats_sync.h>
18 #include <net/page_pool/helpers.h>
19 #include <net/xdp.h>
20 
21 #include "gve_desc.h"
22 #include "gve_desc_dqo.h"
23 
24 #ifndef PCI_VENDOR_ID_GOOGLE
25 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
26 #endif
27 
28 #define PCI_DEV_ID_GVNIC	0x0042
29 
30 #define GVE_REGISTER_BAR	0
31 #define GVE_DOORBELL_BAR	2
32 
33 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
34 #define GVE_TX_MAX_IOVEC	4
35 /* 1 for management, 1 for rx, 1 for tx */
36 #define GVE_MIN_MSIX 3
37 
38 /* Numbers of gve tx/rx stats in stats report. */
39 #define GVE_TX_STATS_REPORT_NUM	6
40 #define GVE_RX_STATS_REPORT_NUM	2
41 
42 /* Interval to schedule a stats report update, 20000ms. */
43 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
44 
45 /* Numbers of NIC tx/rx stats in stats report. */
46 #define NIC_TX_STATS_REPORT_NUM	0
47 #define NIC_RX_STATS_REPORT_NUM	4
48 
49 #define GVE_ADMINQ_BUFFER_SIZE 4096
50 
51 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
52 
53 /* PTYPEs are always 10 bits. */
54 #define GVE_NUM_PTYPES	1024
55 
56 /* Default minimum ring size */
57 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
58 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
59 
60 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
61 
62 #define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
63 
64 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
65 
66 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
67 
68 #define GVE_FLOW_RULES_CACHE_SIZE \
69 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
70 #define GVE_FLOW_RULE_IDS_CACHE_SIZE \
71 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
72 
73 #define GVE_RSS_KEY_SIZE	40
74 #define GVE_RSS_INDIR_SIZE	128
75 
76 #define GVE_XDP_ACTIONS 5
77 
78 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
79 
80 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
81 
82 #define DQO_QPL_DEFAULT_TX_PAGES 512
83 
84 /* Maximum TSO size supported on DQO */
85 #define GVE_DQO_TX_MAX	0x3FFFF
86 
87 #define GVE_TX_BUF_SHIFT_DQO 11
88 
89 /* 2K buffers for DQO-QPL */
90 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
91 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
92 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
93 
94 /* If number of free/recyclable buffers are less than this threshold; driver
95  * allocs and uses a non-qpl page on the receive path of DQO QPL to free
96  * up buffers.
97  * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
98  */
99 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
100 
101 #define GVE_DQO_RX_HWTSTAMP_VALID 0x1
102 
103 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
104 struct gve_rx_desc_queue {
105 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
106 	dma_addr_t bus; /* the bus for the desc_ring */
107 	u8 seqno; /* the next expected seqno for this desc*/
108 };
109 
110 /* The page info for a single slot in the RX data queue */
111 struct gve_rx_slot_page_info {
112 	/* netmem is used for DQO RDA mode
113 	 * page is used in all other modes
114 	 */
115 	union {
116 		struct page *page;
117 		netmem_ref netmem;
118 	};
119 	void *page_address;
120 	u32 page_offset; /* offset to write to in page */
121 	unsigned int buf_size;
122 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
123 	u16 pad; /* adjustment for rx padding */
124 	u8 can_flip; /* tracks if the networking stack is using the page */
125 };
126 
127 /* A list of pages registered with the device during setup and used by a queue
128  * as buffers
129  */
130 struct gve_queue_page_list {
131 	u32 id; /* unique id */
132 	u32 num_entries;
133 	struct page **pages; /* list of num_entries pages */
134 	dma_addr_t *page_buses; /* the dma addrs of the pages */
135 };
136 
137 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
138 struct gve_rx_data_queue {
139 	union gve_rx_data_slot *data_ring; /* read by NIC */
140 	dma_addr_t data_bus; /* dma mapping of the slots */
141 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
142 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
143 	u8 raw_addressing; /* use raw_addressing? */
144 };
145 
146 struct gve_priv;
147 
148 /* RX buffer queue for posting buffers to HW.
149  * Each RX (completion) queue has a corresponding buffer queue.
150  */
151 struct gve_rx_buf_queue_dqo {
152 	struct gve_rx_desc_dqo *desc_ring;
153 	dma_addr_t bus;
154 	u32 head; /* Pointer to start cleaning buffers at. */
155 	u32 tail; /* Last posted buffer index + 1 */
156 	u32 mask; /* Mask for indices to the size of the ring */
157 };
158 
159 /* RX completion queue to receive packets from HW. */
160 struct gve_rx_compl_queue_dqo {
161 	struct gve_rx_compl_desc_dqo *desc_ring;
162 	dma_addr_t bus;
163 
164 	/* Number of slots which did not have a buffer posted yet. We should not
165 	 * post more buffers than the queue size to avoid HW overrunning the
166 	 * queue.
167 	 */
168 	int num_free_slots;
169 
170 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
171 	 * descriptor's generation bit is different from the current generation,
172 	 * that descriptor is ready to be consumed by SW.
173 	 */
174 	u8 cur_gen_bit;
175 
176 	/* Pointer into desc_ring where the next completion descriptor will be
177 	 * received.
178 	 */
179 	u32 head;
180 	u32 mask; /* Mask for indices to the size of the ring */
181 };
182 
183 struct gve_header_buf {
184 	u8 *data;
185 	dma_addr_t addr;
186 };
187 
188 /* Stores state for tracking buffers posted to HW */
189 struct gve_rx_buf_state_dqo {
190 	/* The page posted to HW. */
191 	struct gve_rx_slot_page_info page_info;
192 
193 	/* XSK buffer */
194 	struct xdp_buff *xsk_buff;
195 
196 	/* The DMA address corresponding to `page_info`. */
197 	dma_addr_t addr;
198 
199 	/* Last offset into the page when it only had a single reference, at
200 	 * which point every other offset is free to be reused.
201 	 */
202 	u32 last_single_ref_offset;
203 
204 	/* Linked list index to next element in the list, or -1 if none */
205 	s16 next;
206 };
207 
208 /* Wrapper for XDP Rx metadata */
209 struct gve_xdp_buff {
210 	struct xdp_buff xdp;
211 	struct gve_priv *gve;
212 	const struct gve_rx_compl_desc_dqo *compl_desc;
213 };
214 
215 /* `head` and `tail` are indices into an array, or -1 if empty. */
216 struct gve_index_list {
217 	s16 head;
218 	s16 tail;
219 };
220 
221 /* A single received packet split across multiple buffers may be
222  * reconstructed using the information in this structure.
223  */
224 struct gve_rx_ctx {
225 	/* head and tail of skb chain for the current packet or NULL if none */
226 	struct sk_buff *skb_head;
227 	struct sk_buff *skb_tail;
228 	u32 total_size;
229 	u8 frag_cnt;
230 	bool drop_pkt;
231 };
232 
233 struct gve_rx_cnts {
234 	u32 ok_pkt_bytes;
235 	u16 ok_pkt_cnt;
236 	u16 total_pkt_cnt;
237 	u16 cont_pkt_cnt;
238 	u16 desc_err_pkt_cnt;
239 };
240 
241 /* Contains datapath state used to represent an RX queue. */
242 struct gve_rx_ring {
243 	struct gve_priv *gve;
244 
245 	u16 packet_buffer_size;		/* Size of buffer posted to NIC */
246 	u16 packet_buffer_truesize;	/* Total size of RX buffer */
247 	u16 rx_headroom;
248 
249 	union {
250 		/* GQI fields */
251 		struct {
252 			struct gve_rx_desc_queue desc;
253 			struct gve_rx_data_queue data;
254 
255 			/* threshold for posting new buffs and descs */
256 			u32 db_threshold;
257 
258 			u32 qpl_copy_pool_mask;
259 			u32 qpl_copy_pool_head;
260 			struct gve_rx_slot_page_info *qpl_copy_pool;
261 		};
262 
263 		/* DQO fields. */
264 		struct {
265 			struct gve_rx_buf_queue_dqo bufq;
266 			struct gve_rx_compl_queue_dqo complq;
267 
268 			struct gve_rx_buf_state_dqo *buf_states;
269 			u16 num_buf_states;
270 
271 			/* Linked list of gve_rx_buf_state_dqo. Index into
272 			 * buf_states, or -1 if empty.
273 			 */
274 			s16 free_buf_states;
275 
276 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
277 			 * buf_states, or -1 if empty.
278 			 *
279 			 * This list contains buf_states which are pointing to
280 			 * valid buffers.
281 			 *
282 			 * We use a FIFO here in order to increase the
283 			 * probability that buffers can be reused by increasing
284 			 * the time between usages.
285 			 */
286 			struct gve_index_list recycled_buf_states;
287 
288 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
289 			 * buf_states, or -1 if empty.
290 			 *
291 			 * This list contains buf_states which have buffers
292 			 * which cannot be reused yet.
293 			 */
294 			struct gve_index_list used_buf_states;
295 
296 			/* qpl assigned to this queue */
297 			struct gve_queue_page_list *qpl;
298 
299 			/* index into queue page list */
300 			u32 next_qpl_page_idx;
301 
302 			/* track number of used buffers */
303 			u16 used_buf_states_cnt;
304 
305 			/* Address info of the buffers for header-split */
306 			struct gve_header_buf hdr_bufs;
307 
308 			struct page_pool *page_pool;
309 		} dqo;
310 	};
311 
312 	u64 rbytes; /* free-running bytes received */
313 	u64 rx_hsplit_bytes; /* free-running header bytes received */
314 	u64 rpackets; /* free-running packets received */
315 	u32 cnt; /* free-running total number of completed packets */
316 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
317 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
318 	u64 rx_hsplit_pkt; /* free-running packets with headers split */
319 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
320 	u64 rx_copied_pkt; /* free-running total number of copied packets */
321 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
322 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
323 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
324 	/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
325 	u64 rx_hsplit_unsplit_pkt;
326 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
327 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
328 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
329 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
330 	u64 xdp_tx_errors;
331 	u64 xdp_redirect_errors;
332 	u64 xdp_alloc_fails;
333 	u64 xdp_actions[GVE_XDP_ACTIONS];
334 	u32 q_num; /* queue index */
335 	u32 ntfy_id; /* notification block index */
336 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
337 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
338 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
339 
340 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
341 
342 	/* XDP stuff */
343 	struct xdp_rxq_info xdp_rxq;
344 	struct xsk_buff_pool *xsk_pool;
345 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
346 };
347 
348 /* A TX desc ring entry */
349 union gve_tx_desc {
350 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
351 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
352 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
353 };
354 
355 /* Tracks the memory in the fifo occupied by a segment of a packet */
356 struct gve_tx_iovec {
357 	u32 iov_offset; /* offset into this segment */
358 	u32 iov_len; /* length */
359 	u32 iov_padding; /* padding associated with this segment */
360 };
361 
362 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
363  * ring entry but only used for a pkt_desc not a seg_desc
364  */
365 struct gve_tx_buffer_state {
366 	union {
367 		struct sk_buff *skb; /* skb for this pkt */
368 		struct xdp_frame *xdp_frame; /* xdp_frame */
369 	};
370 	struct {
371 		u16 size; /* size of xmitted xdp pkt */
372 		u8 is_xsk; /* xsk buff */
373 	} xdp;
374 	union {
375 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
376 		struct {
377 			DEFINE_DMA_UNMAP_ADDR(dma);
378 			DEFINE_DMA_UNMAP_LEN(len);
379 		};
380 	};
381 };
382 
383 /* A TX buffer - each queue has one */
384 struct gve_tx_fifo {
385 	void *base; /* address of base of FIFO */
386 	u32 size; /* total size */
387 	atomic_t available; /* how much space is still available */
388 	u32 head; /* offset to write at */
389 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
390 };
391 
392 /* TX descriptor for DQO format */
393 union gve_tx_desc_dqo {
394 	struct gve_tx_pkt_desc_dqo pkt;
395 	struct gve_tx_tso_context_desc_dqo tso_ctx;
396 	struct gve_tx_general_context_desc_dqo general_ctx;
397 };
398 
399 enum gve_packet_state {
400 	/* Packet is in free list, available to be allocated.
401 	 * This should always be zero since state is not explicitly initialized.
402 	 */
403 	GVE_PACKET_STATE_UNALLOCATED,
404 	/* Packet is expecting a regular data completion or miss completion */
405 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
406 	/* Packet has received a miss completion and is expecting a
407 	 * re-injection completion.
408 	 */
409 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
410 	/* No valid completion received within the specified timeout. */
411 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
412 	/* XSK pending packet has received a packet/reinjection completion, or
413 	 * has timed out. At this point, the pending packet can be counted by
414 	 * xsk_tx_complete and freed.
415 	 */
416 	GVE_PACKET_STATE_XSK_COMPLETE,
417 };
418 
419 enum gve_tx_pending_packet_dqo_type {
420 	GVE_TX_PENDING_PACKET_DQO_SKB,
421 	GVE_TX_PENDING_PACKET_DQO_XDP_FRAME,
422 	GVE_TX_PENDING_PACKET_DQO_XSK,
423 };
424 
425 struct gve_tx_pending_packet_dqo {
426 	union {
427 		struct sk_buff *skb;
428 		struct xdp_frame *xdpf;
429 	};
430 
431 	/* 0th element corresponds to the linear portion of `skb`, should be
432 	 * unmapped with `dma_unmap_single`.
433 	 *
434 	 * All others correspond to `skb`'s frags and should be unmapped with
435 	 * `dma_unmap_page`.
436 	 */
437 	union {
438 		struct {
439 			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
440 			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
441 		};
442 		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
443 	};
444 
445 	u16 num_bufs;
446 
447 	/* Linked list index to next element in the list, or -1 if none */
448 	s16 next;
449 
450 	/* Linked list index to prev element in the list, or -1 if none.
451 	 * Used for tracking either outstanding miss completions or prematurely
452 	 * freed packets.
453 	 */
454 	s16 prev;
455 
456 	/* Identifies the current state of the packet as defined in
457 	 * `enum gve_packet_state`.
458 	 */
459 	u8 state : 3;
460 
461 	/* gve_tx_pending_packet_dqo_type */
462 	u8 type : 2;
463 
464 	/* If packet is an outstanding miss completion, then the packet is
465 	 * freed if the corresponding re-injection completion is not received
466 	 * before kernel jiffies exceeds timeout_jiffies.
467 	 */
468 	unsigned long timeout_jiffies;
469 };
470 
471 /* Contains datapath state used to represent a TX queue. */
472 struct gve_tx_ring {
473 	/* Cacheline 0 -- Accessed & dirtied during transmit */
474 	union {
475 		/* GQI fields */
476 		struct {
477 			struct gve_tx_fifo tx_fifo;
478 			u32 req; /* driver tracked head pointer */
479 			u32 done; /* driver tracked tail pointer */
480 		};
481 
482 		/* DQO fields. */
483 		struct {
484 			/* Spinlock for XDP tx traffic */
485 			spinlock_t xdp_lock;
486 
487 			/* Linked list of gve_tx_pending_packet_dqo. Index into
488 			 * pending_packets, or -1 if empty.
489 			 *
490 			 * This is a consumer list owned by the TX path. When it
491 			 * runs out, the producer list is stolen from the
492 			 * completion handling path
493 			 * (dqo_compl.free_pending_packets).
494 			 */
495 			s16 free_pending_packets;
496 
497 			/* Cached value of `dqo_compl.hw_tx_head` */
498 			u32 head;
499 			u32 tail; /* Last posted buffer index + 1 */
500 
501 			/* Index of the last descriptor with "report event" bit
502 			 * set.
503 			 */
504 			u32 last_re_idx;
505 
506 			/* free running number of packet buf descriptors posted */
507 			u16 posted_packet_desc_cnt;
508 			/* free running number of packet buf descriptors completed */
509 			u16 completed_packet_desc_cnt;
510 
511 			/* QPL fields */
512 			struct {
513 			       /* Linked list of gve_tx_buf_dqo. Index into
514 				* tx_qpl_buf_next, or -1 if empty.
515 				*
516 				* This is a consumer list owned by the TX path. When it
517 				* runs out, the producer list is stolen from the
518 				* completion handling path
519 				* (dqo_compl.free_tx_qpl_buf_head).
520 				*/
521 				s16 free_tx_qpl_buf_head;
522 
523 			       /* Free running count of the number of QPL tx buffers
524 				* allocated
525 				*/
526 				u32 alloc_tx_qpl_buf_cnt;
527 
528 				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
529 				u32 free_tx_qpl_buf_cnt;
530 			};
531 
532 			atomic_t xsk_reorder_queue_tail;
533 		} dqo_tx;
534 	};
535 
536 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
537 	union {
538 		/* GQI fields */
539 		struct {
540 			/* Spinlock for when cleanup in progress */
541 			spinlock_t clean_lock;
542 			/* Spinlock for XDP tx traffic */
543 			spinlock_t xdp_lock;
544 		};
545 
546 		/* DQO fields. */
547 		struct {
548 			u32 head; /* Last read on compl_desc */
549 
550 			/* Tracks the current gen bit of compl_q */
551 			u8 cur_gen_bit;
552 
553 			/* Linked list of gve_tx_pending_packet_dqo. Index into
554 			 * pending_packets, or -1 if empty.
555 			 *
556 			 * This is the producer list, owned by the completion
557 			 * handling path. When the consumer list
558 			 * (dqo_tx.free_pending_packets) is runs out, this list
559 			 * will be stolen.
560 			 */
561 			atomic_t free_pending_packets;
562 
563 			/* Last TX ring index fetched by HW */
564 			atomic_t hw_tx_head;
565 
566 			u16 xsk_reorder_queue_head;
567 			u16 xsk_reorder_queue_tail;
568 
569 			/* List to track pending packets which received a miss
570 			 * completion but not a corresponding reinjection.
571 			 */
572 			struct gve_index_list miss_completions;
573 
574 			/* List to track pending packets that were completed
575 			 * before receiving a valid completion because they
576 			 * reached a specified timeout.
577 			 */
578 			struct gve_index_list timed_out_completions;
579 
580 			/* QPL fields */
581 			struct {
582 				/* Linked list of gve_tx_buf_dqo. Index into
583 				 * tx_qpl_buf_next, or -1 if empty.
584 				 *
585 				 * This is the producer list, owned by the completion
586 				 * handling path. When the consumer list
587 				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
588 				 * will be stolen.
589 				 */
590 				atomic_t free_tx_qpl_buf_head;
591 
592 				/* Free running count of the number of tx buffers
593 				 * freed
594 				 */
595 				atomic_t free_tx_qpl_buf_cnt;
596 			};
597 		} dqo_compl;
598 	} ____cacheline_aligned;
599 	u64 pkt_done; /* free-running - total packets completed */
600 	u64 bytes_done; /* free-running - total bytes completed */
601 	u64 dropped_pkt; /* free-running - total packets dropped */
602 	u64 dma_mapping_error; /* count of dma mapping errors */
603 
604 	/* Cacheline 2 -- Read-mostly fields */
605 	union {
606 		/* GQI fields */
607 		struct {
608 			union gve_tx_desc *desc;
609 
610 			/* Maps 1:1 to a desc */
611 			struct gve_tx_buffer_state *info;
612 		};
613 
614 		/* DQO fields. */
615 		struct {
616 			union gve_tx_desc_dqo *tx_ring;
617 			struct gve_tx_compl_desc *compl_ring;
618 
619 			struct gve_tx_pending_packet_dqo *pending_packets;
620 			s16 num_pending_packets;
621 
622 			u16 *xsk_reorder_queue;
623 
624 			u32 complq_mask; /* complq size is complq_mask + 1 */
625 
626 			/* QPL fields */
627 			struct {
628 				/* qpl assigned to this queue */
629 				struct gve_queue_page_list *qpl;
630 
631 				/* Each QPL page is divided into TX bounce buffers
632 				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
633 				 * an array to manage linked lists of TX buffers.
634 				 * An entry j at index i implies that j'th buffer
635 				 * is next on the list after i
636 				 */
637 				s16 *tx_qpl_buf_next;
638 				u32 num_tx_qpl_bufs;
639 			};
640 		} dqo;
641 	} ____cacheline_aligned;
642 	struct netdev_queue *netdev_txq;
643 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
644 	struct device *dev;
645 	u32 mask; /* masks req and done down to queue size */
646 	u8 raw_addressing; /* use raw_addressing? */
647 
648 	/* Slow-path fields */
649 	u32 q_num ____cacheline_aligned; /* queue idx */
650 	u32 stop_queue; /* count of queue stops */
651 	u32 wake_queue; /* count of queue wakes */
652 	u32 queue_timeout; /* count of queue timeouts */
653 	u32 ntfy_id; /* notification block index */
654 	u32 last_kick_msec; /* Last time the queue was kicked */
655 	dma_addr_t bus; /* dma address of the descr ring */
656 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
657 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
658 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
659 	struct xsk_buff_pool *xsk_pool;
660 	u64 xdp_xsk_sent;
661 	u64 xdp_xmit;
662 	u64 xdp_xmit_errors;
663 } ____cacheline_aligned;
664 
665 /* Wraps the info for one irq including the napi struct and the queues
666  * associated with that irq.
667  */
668 struct gve_notify_block {
669 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
670 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
671 	struct napi_struct napi; /* kernel napi struct for this block */
672 	struct gve_priv *priv;
673 	struct gve_tx_ring *tx; /* tx rings on this block */
674 	struct gve_rx_ring *rx; /* rx rings on this block */
675 	u32 irq;
676 };
677 
678 /* Tracks allowed and current rx queue settings */
679 struct gve_rx_queue_config {
680 	u16 max_queues;
681 	u16 num_queues;
682 	u16 packet_buffer_size;
683 };
684 
685 /* Tracks allowed and current tx queue settings */
686 struct gve_tx_queue_config {
687 	u16 max_queues;
688 	u16 num_queues; /* number of TX queues, excluding XDP queues */
689 	u16 num_xdp_queues;
690 };
691 
692 /* Tracks the available and used qpl IDs */
693 struct gve_qpl_config {
694 	u32 qpl_map_size; /* map memory size */
695 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
696 };
697 
698 struct gve_irq_db {
699 	__be32 index;
700 } ____cacheline_aligned;
701 
702 struct gve_ptype {
703 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
704 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
705 };
706 
707 struct gve_ptype_lut {
708 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
709 };
710 
711 /* Parameters for allocating resources for tx queues */
712 struct gve_tx_alloc_rings_cfg {
713 	struct gve_tx_queue_config *qcfg;
714 
715 	u16 num_xdp_rings;
716 
717 	u16 ring_size;
718 	bool raw_addressing;
719 
720 	/* Allocated resources are returned here */
721 	struct gve_tx_ring *tx;
722 };
723 
724 /* Parameters for allocating resources for rx queues */
725 struct gve_rx_alloc_rings_cfg {
726 	/* tx config is also needed to determine QPL ids */
727 	struct gve_rx_queue_config *qcfg_rx;
728 	struct gve_tx_queue_config *qcfg_tx;
729 
730 	u16 ring_size;
731 	u16 packet_buffer_size;
732 	bool raw_addressing;
733 	bool enable_header_split;
734 	bool reset_rss;
735 	bool xdp;
736 
737 	/* Allocated resources are returned here */
738 	struct gve_rx_ring *rx;
739 };
740 
741 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
742  * when the entire configure_device_resources command is zeroed out and the
743  * queue_format is not specified.
744  */
745 enum gve_queue_format {
746 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
747 	GVE_GQI_RDA_FORMAT		= 0x1,
748 	GVE_GQI_QPL_FORMAT		= 0x2,
749 	GVE_DQO_RDA_FORMAT		= 0x3,
750 	GVE_DQO_QPL_FORMAT		= 0x4,
751 };
752 
753 struct gve_flow_spec {
754 	__be32 src_ip[4];
755 	__be32 dst_ip[4];
756 	union {
757 		struct {
758 			__be16 src_port;
759 			__be16 dst_port;
760 		};
761 		__be32 spi;
762 	};
763 	union {
764 		u8 tos;
765 		u8 tclass;
766 	};
767 };
768 
769 struct gve_flow_rule {
770 	u32 location;
771 	u16 flow_type;
772 	u16 action;
773 	struct gve_flow_spec key;
774 	struct gve_flow_spec mask;
775 };
776 
777 struct gve_flow_rules_cache {
778 	bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
779 	struct gve_adminq_queried_flow_rule *rules_cache;
780 	__be32 *rule_ids_cache;
781 	/* The total number of queried rules that stored in the caches */
782 	u32 rules_cache_num;
783 	u32 rule_ids_cache_num;
784 };
785 
786 struct gve_rss_config {
787 	u8 *hash_key;
788 	u32 *hash_lut;
789 };
790 
791 struct gve_ptp {
792 	struct ptp_clock_info info;
793 	struct ptp_clock *clock;
794 	struct gve_priv *priv;
795 };
796 
797 struct gve_priv {
798 	struct net_device *dev;
799 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
800 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
801 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
802 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
803 	dma_addr_t irq_db_indices_bus;
804 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
805 	char mgmt_msix_name[IFNAMSIZ + 16];
806 	u32 mgmt_msix_idx;
807 	__be32 *counter_array; /* array of num_event_counters */
808 	dma_addr_t counter_array_bus;
809 
810 	u16 num_event_counters;
811 	u16 tx_desc_cnt; /* num desc per ring */
812 	u16 rx_desc_cnt; /* num desc per ring */
813 	u16 max_tx_desc_cnt;
814 	u16 max_rx_desc_cnt;
815 	u16 min_tx_desc_cnt;
816 	u16 min_rx_desc_cnt;
817 	bool modify_ring_size_enabled;
818 	bool default_min_ring_size;
819 	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
820 	u64 max_registered_pages;
821 	u64 num_registered_pages; /* num pages registered with NIC */
822 	struct bpf_prog *xdp_prog; /* XDP BPF program */
823 	u32 rx_copybreak; /* copy packets smaller than this */
824 	u16 default_num_queues; /* default num queues to set up */
825 
826 	struct gve_tx_queue_config tx_cfg;
827 	struct gve_rx_queue_config rx_cfg;
828 	unsigned long *xsk_pools; /* bitmap of RX queues with XSK pools */
829 	u32 num_ntfy_blks; /* split between TX and RX so must be even */
830 	int numa_node;
831 
832 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
833 	__be32 __iomem *db_bar2; /* "array" of doorbells */
834 	u32 msg_enable;	/* level for netif* netdev print macros	*/
835 	struct pci_dev *pdev;
836 
837 	/* metrics */
838 	u32 tx_timeo_cnt;
839 
840 	/* Admin queue - see gve_adminq.h*/
841 	union gve_adminq_command *adminq;
842 	dma_addr_t adminq_bus_addr;
843 	struct dma_pool *adminq_pool;
844 	struct mutex adminq_lock; /* Protects adminq command execution */
845 	u32 adminq_mask; /* masks prod_cnt to adminq size */
846 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
847 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
848 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
849 	/* free-running count of per AQ cmd executed */
850 	u32 adminq_describe_device_cnt;
851 	u32 adminq_cfg_device_resources_cnt;
852 	u32 adminq_register_page_list_cnt;
853 	u32 adminq_unregister_page_list_cnt;
854 	u32 adminq_create_tx_queue_cnt;
855 	u32 adminq_create_rx_queue_cnt;
856 	u32 adminq_destroy_tx_queue_cnt;
857 	u32 adminq_destroy_rx_queue_cnt;
858 	u32 adminq_dcfg_device_resources_cnt;
859 	u32 adminq_set_driver_parameter_cnt;
860 	u32 adminq_report_stats_cnt;
861 	u32 adminq_report_link_speed_cnt;
862 	u32 adminq_report_nic_timestamp_cnt;
863 	u32 adminq_get_ptype_map_cnt;
864 	u32 adminq_verify_driver_compatibility_cnt;
865 	u32 adminq_query_flow_rules_cnt;
866 	u32 adminq_cfg_flow_rule_cnt;
867 	u32 adminq_cfg_rss_cnt;
868 	u32 adminq_query_rss_cnt;
869 
870 	/* Global stats */
871 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
872 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
873 	u32 reset_cnt; /* count of reset */
874 	u32 page_alloc_fail; /* count of page alloc fails */
875 	u32 dma_mapping_error; /* count of dma mapping errors */
876 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
877 	u32 suspend_cnt; /* count of times suspended */
878 	u32 resume_cnt; /* count of times resumed */
879 	struct workqueue_struct *gve_wq;
880 	struct work_struct service_task;
881 	struct work_struct stats_report_task;
882 	unsigned long service_task_flags;
883 	unsigned long state_flags;
884 
885 	struct gve_stats_report *stats_report;
886 	u64 stats_report_len;
887 	dma_addr_t stats_report_bus; /* dma address for the stats report */
888 	unsigned long ethtool_flags;
889 
890 	unsigned long stats_report_timer_period;
891 	struct timer_list stats_report_timer;
892 
893 	/* Gvnic device link speed from hypervisor. */
894 	u64 link_speed;
895 	bool up_before_suspend; /* True if dev was up before suspend */
896 
897 	struct gve_ptype_lut *ptype_lut_dqo;
898 
899 	/* Must be a power of two. */
900 	u16 max_rx_buffer_size; /* device limit */
901 
902 	enum gve_queue_format queue_format;
903 
904 	/* Interrupt coalescing settings */
905 	u32 tx_coalesce_usecs;
906 	u32 rx_coalesce_usecs;
907 
908 	u16 header_buf_size; /* device configured, header-split supported if non-zero */
909 	bool header_split_enabled; /* True if the header split is enabled by the user */
910 
911 	u32 max_flow_rules;
912 	u32 num_flow_rules;
913 
914 	struct gve_flow_rules_cache flow_rules_cache;
915 
916 	u16 rss_key_size;
917 	u16 rss_lut_size;
918 	bool cache_rss_config;
919 	struct gve_rss_config rss_config;
920 
921 	/* True if the device supports reading the nic clock */
922 	bool nic_timestamp_supported;
923 	struct gve_ptp *ptp;
924 	struct kernel_hwtstamp_config ts_config;
925 	struct gve_nic_ts_report *nic_ts_report;
926 	dma_addr_t nic_ts_report_bus;
927 	u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */
928 };
929 
930 enum gve_service_task_flags_bit {
931 	GVE_PRIV_FLAGS_DO_RESET			= 1,
932 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
933 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
934 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
935 };
936 
937 enum gve_state_flags_bit {
938 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
939 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
940 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
941 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
942 };
943 
944 enum gve_ethtool_flags_bit {
945 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
946 };
947 
948 static inline bool gve_get_do_reset(struct gve_priv *priv)
949 {
950 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
951 }
952 
953 static inline void gve_set_do_reset(struct gve_priv *priv)
954 {
955 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
956 }
957 
958 static inline void gve_clear_do_reset(struct gve_priv *priv)
959 {
960 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
961 }
962 
963 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
964 {
965 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
966 			&priv->service_task_flags);
967 }
968 
969 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
970 {
971 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
972 }
973 
974 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
975 {
976 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
977 }
978 
979 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
980 {
981 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
982 			&priv->service_task_flags);
983 }
984 
985 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
986 {
987 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
988 }
989 
990 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
991 {
992 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
993 }
994 
995 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
996 {
997 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
998 			&priv->service_task_flags);
999 }
1000 
1001 static inline void gve_set_do_report_stats(struct gve_priv *priv)
1002 {
1003 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
1004 }
1005 
1006 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
1007 {
1008 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
1009 }
1010 
1011 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
1012 {
1013 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
1014 }
1015 
1016 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
1017 {
1018 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
1019 }
1020 
1021 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
1022 {
1023 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
1024 }
1025 
1026 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
1027 {
1028 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1029 }
1030 
1031 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
1032 {
1033 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1034 }
1035 
1036 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
1037 {
1038 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1039 }
1040 
1041 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
1042 {
1043 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1044 }
1045 
1046 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
1047 {
1048 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1049 }
1050 
1051 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
1052 {
1053 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1054 }
1055 
1056 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
1057 {
1058 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1059 }
1060 
1061 static inline void gve_set_napi_enabled(struct gve_priv *priv)
1062 {
1063 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1064 }
1065 
1066 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
1067 {
1068 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1069 }
1070 
1071 static inline bool gve_get_report_stats(struct gve_priv *priv)
1072 {
1073 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1074 }
1075 
1076 static inline void gve_clear_report_stats(struct gve_priv *priv)
1077 {
1078 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1079 }
1080 
1081 /* Returns the address of the ntfy_blocks irq doorbell
1082  */
1083 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
1084 					       struct gve_notify_block *block)
1085 {
1086 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
1087 }
1088 
1089 /* Returns the index into ntfy_blocks of the given tx ring's block
1090  */
1091 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1092 {
1093 	return queue_idx;
1094 }
1095 
1096 /* Returns the index into ntfy_blocks of the given rx ring's block
1097  */
1098 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1099 {
1100 	return (priv->num_ntfy_blks / 2) + queue_idx;
1101 }
1102 
1103 static inline bool gve_is_qpl(struct gve_priv *priv)
1104 {
1105 	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
1106 		priv->queue_format == GVE_DQO_QPL_FORMAT;
1107 }
1108 
1109 /* Returns the number of tx queue page lists */
1110 static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
1111 				  bool is_qpl)
1112 {
1113 	if (!is_qpl)
1114 		return 0;
1115 	return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
1116 }
1117 
1118 /* Returns the number of rx queue page lists */
1119 static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
1120 				  bool is_qpl)
1121 {
1122 	if (!is_qpl)
1123 		return 0;
1124 	return rx_cfg->num_queues;
1125 }
1126 
1127 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
1128 {
1129 	return tx_qid;
1130 }
1131 
1132 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1133 {
1134 	return priv->tx_cfg.max_queues + rx_qid;
1135 }
1136 
1137 static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
1138 				    int rx_qid)
1139 {
1140 	return tx_cfg->max_queues + rx_qid;
1141 }
1142 
1143 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1144 {
1145 	return gve_tx_qpl_id(priv, 0);
1146 }
1147 
1148 static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
1149 {
1150 	return gve_get_rx_qpl_id(tx_cfg, 0);
1151 }
1152 
1153 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
1154 {
1155 	/* For DQO, page count should be more than ring size for
1156 	 * out-of-order completions. Set it to two times of ring size.
1157 	 */
1158 	return 2 * rx_desc_cnt;
1159 }
1160 
1161 /* Returns the correct dma direction for tx and rx qpls */
1162 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1163 						      int id)
1164 {
1165 	if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1166 		return DMA_TO_DEVICE;
1167 	else
1168 		return DMA_FROM_DEVICE;
1169 }
1170 
1171 static inline bool gve_is_gqi(struct gve_priv *priv)
1172 {
1173 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1174 		priv->queue_format == GVE_GQI_QPL_FORMAT;
1175 }
1176 
1177 static inline bool gve_is_dqo(struct gve_priv *priv)
1178 {
1179 	return priv->queue_format == GVE_DQO_RDA_FORMAT ||
1180 	       priv->queue_format == GVE_DQO_QPL_FORMAT;
1181 }
1182 
1183 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1184 {
1185 	return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
1186 }
1187 
1188 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1189 {
1190 	return priv->tx_cfg.num_queues + queue_id;
1191 }
1192 
1193 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1194 {
1195 	return gve_xdp_tx_queue_id(priv, 0);
1196 }
1197 
1198 static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
1199 {
1200 	switch (priv->queue_format) {
1201 	case GVE_GQI_QPL_FORMAT:
1202 	case GVE_DQO_RDA_FORMAT:
1203 		return true;
1204 	default:
1205 		return false;
1206 	}
1207 }
1208 
1209 /* gqi napi handler defined in gve_main.c */
1210 int gve_napi_poll(struct napi_struct *napi, int budget);
1211 
1212 /* buffers */
1213 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1214 		   struct page **page, dma_addr_t *dma,
1215 		   enum dma_data_direction, gfp_t gfp_flags);
1216 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1217 		   enum dma_data_direction);
1218 /* qpls */
1219 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1220 						      u32 id, int pages);
1221 void gve_free_queue_page_list(struct gve_priv *priv,
1222 			      struct gve_queue_page_list *qpl,
1223 			      u32 id);
1224 /* tx handling */
1225 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1226 int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
1227 		     u32 flags);
1228 int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
1229 		     u32 flags);
1230 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1231 		     void *data, int len, void *frame_p);
1232 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1233 int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
1234 			 struct xdp_frame *xdpf);
1235 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1236 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1237 int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
1238 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1239 			   struct gve_tx_alloc_rings_cfg *cfg);
1240 void gve_tx_free_rings_gqi(struct gve_priv *priv,
1241 			   struct gve_tx_alloc_rings_cfg *cfg);
1242 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1243 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1244 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1245 			      struct gve_tx_ring *tx);
1246 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1247 /* rx handling */
1248 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1249 int gve_rx_poll(struct gve_notify_block *block, int budget);
1250 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1251 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1252 			  struct gve_rx_alloc_rings_cfg *cfg,
1253 			  struct gve_rx_ring *rx,
1254 			  int idx);
1255 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1256 			  struct gve_rx_alloc_rings_cfg *cfg);
1257 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1258 			   struct gve_rx_alloc_rings_cfg *cfg);
1259 void gve_rx_free_rings_gqi(struct gve_priv *priv,
1260 			   struct gve_rx_alloc_rings_cfg *cfg);
1261 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1262 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1263 bool gve_header_split_supported(const struct gve_priv *priv);
1264 int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
1265 			      struct netlink_ext_ack *extack,
1266 			      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1267 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
1268 			  struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1269 /* rx buffer handling */
1270 int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
1271 void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
1272 		       bool free_page);
1273 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1274 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1275 				struct gve_rx_buf_state_dqo *buf_state);
1276 void gve_free_buf_state(struct gve_rx_ring *rx,
1277 			struct gve_rx_buf_state_dqo *buf_state);
1278 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1279 						   struct gve_index_list *list);
1280 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1281 			   struct gve_rx_buf_state_dqo *buf_state);
1282 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1283 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1284 			 struct gve_rx_buf_state_dqo *buf_state);
1285 void gve_free_to_page_pool(struct gve_rx_ring *rx,
1286 			   struct gve_rx_buf_state_dqo *buf_state,
1287 			   bool allow_direct);
1288 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1289 			   struct gve_rx_buf_state_dqo *buf_state);
1290 void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
1291 void gve_reuse_buffer(struct gve_rx_ring *rx,
1292 		      struct gve_rx_buf_state_dqo *buf_state);
1293 void gve_free_buffer(struct gve_rx_ring *rx,
1294 		     struct gve_rx_buf_state_dqo *buf_state);
1295 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1296 struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1297 					  struct gve_rx_ring *rx,
1298 					  bool xdp);
1299 
1300 /* Reset */
1301 void gve_schedule_reset(struct gve_priv *priv);
1302 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1303 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1304 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1305 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1306 int gve_adjust_config(struct gve_priv *priv,
1307 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1308 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1309 int gve_adjust_queues(struct gve_priv *priv,
1310 		      struct gve_rx_queue_config new_rx_config,
1311 		      struct gve_tx_queue_config new_tx_config,
1312 		      bool reset_rss);
1313 /* flow steering rule */
1314 int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1315 int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
1316 int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1317 int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1318 int gve_flow_rules_reset(struct gve_priv *priv);
1319 /* RSS config */
1320 int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
1321 /* PTP and timestamping */
1322 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
1323 int gve_clock_nic_ts_read(struct gve_priv *priv);
1324 int gve_init_clock(struct gve_priv *priv);
1325 void gve_teardown_clock(struct gve_priv *priv);
1326 #else /* CONFIG_PTP_1588_CLOCK */
1327 static inline int gve_clock_nic_ts_read(struct gve_priv *priv)
1328 {
1329 	return -EOPNOTSUPP;
1330 }
1331 
1332 static inline int gve_init_clock(struct gve_priv *priv)
1333 {
1334 	return 0;
1335 }
1336 
1337 static inline void gve_teardown_clock(struct gve_priv *priv) { }
1338 #endif /* CONFIG_PTP_1588_CLOCK */
1339 /* report stats handling */
1340 void gve_handle_report_stats(struct gve_priv *priv);
1341 /* exported by ethtool.c */
1342 extern const struct ethtool_ops gve_ethtool_ops;
1343 /* needed by ethtool */
1344 extern char gve_driver_name[];
1345 extern const char gve_version_str[];
1346 #endif /* _GVE_H_ */
1347