xref: /linux/drivers/net/ethernet/google/gve/gve.h (revision 634ec1fc7982efeeeeed4a7688b0004827b43a21)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google LLC
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/ethtool_netlink.h>
13 #include <linux/netdevice.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/pci.h>
16 #include <linux/ptp_clock_kernel.h>
17 #include <linux/u64_stats_sync.h>
18 #include <net/page_pool/helpers.h>
19 #include <net/xdp.h>
20 
21 #include "gve_desc.h"
22 #include "gve_desc_dqo.h"
23 
24 #ifndef PCI_VENDOR_ID_GOOGLE
25 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
26 #endif
27 
28 #define PCI_DEV_ID_GVNIC	0x0042
29 
30 #define GVE_REGISTER_BAR	0
31 #define GVE_DOORBELL_BAR	2
32 
33 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
34 #define GVE_TX_MAX_IOVEC	4
35 /* 1 for management, 1 for rx, 1 for tx */
36 #define GVE_MIN_MSIX 3
37 
38 /* Numbers of gve tx/rx stats in stats report. */
39 #define GVE_TX_STATS_REPORT_NUM	6
40 #define GVE_RX_STATS_REPORT_NUM	2
41 
42 /* Interval to schedule a stats report update, 20000ms. */
43 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
44 
45 /* Numbers of NIC tx/rx stats in stats report. */
46 #define NIC_TX_STATS_REPORT_NUM	0
47 #define NIC_RX_STATS_REPORT_NUM	4
48 
49 #define GVE_ADMINQ_BUFFER_SIZE 4096
50 
51 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
52 
53 /* PTYPEs are always 10 bits. */
54 #define GVE_NUM_PTYPES	1024
55 
56 /* Default minimum ring size */
57 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
58 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
59 
60 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
61 
62 #define GVE_MAX_RX_BUFFER_SIZE 4096
63 
64 #define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
65 
66 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
67 
68 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
69 
70 #define GVE_FLOW_RULES_CACHE_SIZE \
71 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
72 #define GVE_FLOW_RULE_IDS_CACHE_SIZE \
73 	(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
74 
75 #define GVE_RSS_KEY_SIZE	40
76 #define GVE_RSS_INDIR_SIZE	128
77 
78 #define GVE_XDP_ACTIONS 5
79 
80 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
81 
82 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
83 
84 #define DQO_QPL_DEFAULT_TX_PAGES 512
85 
86 /* Maximum TSO size supported on DQO */
87 #define GVE_DQO_TX_MAX	0x3FFFF
88 
89 #define GVE_TX_BUF_SHIFT_DQO 11
90 
91 /* 2K buffers for DQO-QPL */
92 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
93 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
94 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
95 
96 /* If number of free/recyclable buffers are less than this threshold; driver
97  * allocs and uses a non-qpl page on the receive path of DQO QPL to free
98  * up buffers.
99  * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
100  */
101 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
102 
103 #define GVE_DQO_RX_HWTSTAMP_VALID 0x1
104 
105 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
106 struct gve_rx_desc_queue {
107 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
108 	dma_addr_t bus; /* the bus for the desc_ring */
109 	u8 seqno; /* the next expected seqno for this desc*/
110 };
111 
112 /* The page info for a single slot in the RX data queue */
113 struct gve_rx_slot_page_info {
114 	/* netmem is used for DQO RDA mode
115 	 * page is used in all other modes
116 	 */
117 	union {
118 		struct page *page;
119 		netmem_ref netmem;
120 	};
121 	void *page_address;
122 	u32 page_offset; /* offset to write to in page */
123 	unsigned int buf_size;
124 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
125 	u16 pad; /* adjustment for rx padding */
126 	u8 can_flip; /* tracks if the networking stack is using the page */
127 };
128 
129 /* A list of pages registered with the device during setup and used by a queue
130  * as buffers
131  */
132 struct gve_queue_page_list {
133 	u32 id; /* unique id */
134 	u32 num_entries;
135 	struct page **pages; /* list of num_entries pages */
136 	dma_addr_t *page_buses; /* the dma addrs of the pages */
137 };
138 
139 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
140 struct gve_rx_data_queue {
141 	union gve_rx_data_slot *data_ring; /* read by NIC */
142 	dma_addr_t data_bus; /* dma mapping of the slots */
143 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
144 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
145 	u8 raw_addressing; /* use raw_addressing? */
146 };
147 
148 struct gve_priv;
149 
150 /* RX buffer queue for posting buffers to HW.
151  * Each RX (completion) queue has a corresponding buffer queue.
152  */
153 struct gve_rx_buf_queue_dqo {
154 	struct gve_rx_desc_dqo *desc_ring;
155 	dma_addr_t bus;
156 	u32 head; /* Pointer to start cleaning buffers at. */
157 	u32 tail; /* Last posted buffer index + 1 */
158 	u32 mask; /* Mask for indices to the size of the ring */
159 };
160 
161 /* RX completion queue to receive packets from HW. */
162 struct gve_rx_compl_queue_dqo {
163 	struct gve_rx_compl_desc_dqo *desc_ring;
164 	dma_addr_t bus;
165 
166 	/* Number of slots which did not have a buffer posted yet. We should not
167 	 * post more buffers than the queue size to avoid HW overrunning the
168 	 * queue.
169 	 */
170 	int num_free_slots;
171 
172 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
173 	 * descriptor's generation bit is different from the current generation,
174 	 * that descriptor is ready to be consumed by SW.
175 	 */
176 	u8 cur_gen_bit;
177 
178 	/* Pointer into desc_ring where the next completion descriptor will be
179 	 * received.
180 	 */
181 	u32 head;
182 	u32 mask; /* Mask for indices to the size of the ring */
183 };
184 
185 struct gve_header_buf {
186 	u8 *data;
187 	dma_addr_t addr;
188 };
189 
190 /* Stores state for tracking buffers posted to HW */
191 struct gve_rx_buf_state_dqo {
192 	/* The page posted to HW. */
193 	struct gve_rx_slot_page_info page_info;
194 
195 	/* XSK buffer */
196 	struct xdp_buff *xsk_buff;
197 
198 	/* The DMA address corresponding to `page_info`. */
199 	dma_addr_t addr;
200 
201 	/* Last offset into the page when it only had a single reference, at
202 	 * which point every other offset is free to be reused.
203 	 */
204 	u32 last_single_ref_offset;
205 
206 	/* Linked list index to next element in the list, or -1 if none */
207 	s16 next;
208 };
209 
210 /* `head` and `tail` are indices into an array, or -1 if empty. */
211 struct gve_index_list {
212 	s16 head;
213 	s16 tail;
214 };
215 
216 /* A single received packet split across multiple buffers may be
217  * reconstructed using the information in this structure.
218  */
219 struct gve_rx_ctx {
220 	/* head and tail of skb chain for the current packet or NULL if none */
221 	struct sk_buff *skb_head;
222 	struct sk_buff *skb_tail;
223 	u32 total_size;
224 	u8 frag_cnt;
225 	bool drop_pkt;
226 };
227 
228 struct gve_rx_cnts {
229 	u32 ok_pkt_bytes;
230 	u16 ok_pkt_cnt;
231 	u16 total_pkt_cnt;
232 	u16 cont_pkt_cnt;
233 	u16 desc_err_pkt_cnt;
234 };
235 
236 /* Contains datapath state used to represent an RX queue. */
237 struct gve_rx_ring {
238 	struct gve_priv *gve;
239 
240 	u16 packet_buffer_size;		/* Size of buffer posted to NIC */
241 	u16 packet_buffer_truesize;	/* Total size of RX buffer */
242 	u16 rx_headroom;
243 
244 	union {
245 		/* GQI fields */
246 		struct {
247 			struct gve_rx_desc_queue desc;
248 			struct gve_rx_data_queue data;
249 
250 			/* threshold for posting new buffs and descs */
251 			u32 db_threshold;
252 
253 			u32 qpl_copy_pool_mask;
254 			u32 qpl_copy_pool_head;
255 			struct gve_rx_slot_page_info *qpl_copy_pool;
256 		};
257 
258 		/* DQO fields. */
259 		struct {
260 			struct gve_rx_buf_queue_dqo bufq;
261 			struct gve_rx_compl_queue_dqo complq;
262 
263 			struct gve_rx_buf_state_dqo *buf_states;
264 			u16 num_buf_states;
265 
266 			/* Linked list of gve_rx_buf_state_dqo. Index into
267 			 * buf_states, or -1 if empty.
268 			 */
269 			s16 free_buf_states;
270 
271 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
272 			 * buf_states, or -1 if empty.
273 			 *
274 			 * This list contains buf_states which are pointing to
275 			 * valid buffers.
276 			 *
277 			 * We use a FIFO here in order to increase the
278 			 * probability that buffers can be reused by increasing
279 			 * the time between usages.
280 			 */
281 			struct gve_index_list recycled_buf_states;
282 
283 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
284 			 * buf_states, or -1 if empty.
285 			 *
286 			 * This list contains buf_states which have buffers
287 			 * which cannot be reused yet.
288 			 */
289 			struct gve_index_list used_buf_states;
290 
291 			/* qpl assigned to this queue */
292 			struct gve_queue_page_list *qpl;
293 
294 			/* index into queue page list */
295 			u32 next_qpl_page_idx;
296 
297 			/* track number of used buffers */
298 			u16 used_buf_states_cnt;
299 
300 			/* Address info of the buffers for header-split */
301 			struct gve_header_buf hdr_bufs;
302 
303 			struct page_pool *page_pool;
304 		} dqo;
305 	};
306 
307 	u64 rbytes; /* free-running bytes received */
308 	u64 rx_hsplit_bytes; /* free-running header bytes received */
309 	u64 rpackets; /* free-running packets received */
310 	u32 cnt; /* free-running total number of completed packets */
311 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
312 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
313 	u64 rx_hsplit_pkt; /* free-running packets with headers split */
314 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
315 	u64 rx_copied_pkt; /* free-running total number of copied packets */
316 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
317 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
318 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
319 	/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
320 	u64 rx_hsplit_unsplit_pkt;
321 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
322 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
323 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
324 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
325 	u64 xdp_tx_errors;
326 	u64 xdp_redirect_errors;
327 	u64 xdp_alloc_fails;
328 	u64 xdp_actions[GVE_XDP_ACTIONS];
329 	u32 q_num; /* queue index */
330 	u32 ntfy_id; /* notification block index */
331 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
332 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
333 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
334 
335 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
336 
337 	/* XDP stuff */
338 	struct xdp_rxq_info xdp_rxq;
339 	struct xsk_buff_pool *xsk_pool;
340 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
341 };
342 
343 /* A TX desc ring entry */
344 union gve_tx_desc {
345 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
346 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
347 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
348 };
349 
350 /* Tracks the memory in the fifo occupied by a segment of a packet */
351 struct gve_tx_iovec {
352 	u32 iov_offset; /* offset into this segment */
353 	u32 iov_len; /* length */
354 	u32 iov_padding; /* padding associated with this segment */
355 };
356 
357 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
358  * ring entry but only used for a pkt_desc not a seg_desc
359  */
360 struct gve_tx_buffer_state {
361 	union {
362 		struct sk_buff *skb; /* skb for this pkt */
363 		struct xdp_frame *xdp_frame; /* xdp_frame */
364 	};
365 	struct {
366 		u16 size; /* size of xmitted xdp pkt */
367 		u8 is_xsk; /* xsk buff */
368 	} xdp;
369 	union {
370 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
371 		struct {
372 			DEFINE_DMA_UNMAP_ADDR(dma);
373 			DEFINE_DMA_UNMAP_LEN(len);
374 		};
375 	};
376 };
377 
378 /* A TX buffer - each queue has one */
379 struct gve_tx_fifo {
380 	void *base; /* address of base of FIFO */
381 	u32 size; /* total size */
382 	atomic_t available; /* how much space is still available */
383 	u32 head; /* offset to write at */
384 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
385 };
386 
387 /* TX descriptor for DQO format */
388 union gve_tx_desc_dqo {
389 	struct gve_tx_pkt_desc_dqo pkt;
390 	struct gve_tx_tso_context_desc_dqo tso_ctx;
391 	struct gve_tx_general_context_desc_dqo general_ctx;
392 };
393 
394 enum gve_packet_state {
395 	/* Packet is in free list, available to be allocated.
396 	 * This should always be zero since state is not explicitly initialized.
397 	 */
398 	GVE_PACKET_STATE_UNALLOCATED,
399 	/* Packet is expecting a regular data completion or miss completion */
400 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
401 	/* Packet has received a miss completion and is expecting a
402 	 * re-injection completion.
403 	 */
404 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
405 	/* No valid completion received within the specified timeout. */
406 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
407 	/* XSK pending packet has received a packet/reinjection completion, or
408 	 * has timed out. At this point, the pending packet can be counted by
409 	 * xsk_tx_complete and freed.
410 	 */
411 	GVE_PACKET_STATE_XSK_COMPLETE,
412 };
413 
414 enum gve_tx_pending_packet_dqo_type {
415 	GVE_TX_PENDING_PACKET_DQO_SKB,
416 	GVE_TX_PENDING_PACKET_DQO_XDP_FRAME,
417 	GVE_TX_PENDING_PACKET_DQO_XSK,
418 };
419 
420 struct gve_tx_pending_packet_dqo {
421 	union {
422 		struct sk_buff *skb;
423 		struct xdp_frame *xdpf;
424 	};
425 
426 	/* 0th element corresponds to the linear portion of `skb`, should be
427 	 * unmapped with `dma_unmap_single`.
428 	 *
429 	 * All others correspond to `skb`'s frags and should be unmapped with
430 	 * `dma_unmap_page`.
431 	 */
432 	union {
433 		struct {
434 			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
435 			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
436 		};
437 		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
438 	};
439 
440 	u16 num_bufs;
441 
442 	/* Linked list index to next element in the list, or -1 if none */
443 	s16 next;
444 
445 	/* Linked list index to prev element in the list, or -1 if none.
446 	 * Used for tracking either outstanding miss completions or prematurely
447 	 * freed packets.
448 	 */
449 	s16 prev;
450 
451 	/* Identifies the current state of the packet as defined in
452 	 * `enum gve_packet_state`.
453 	 */
454 	u8 state : 3;
455 
456 	/* gve_tx_pending_packet_dqo_type */
457 	u8 type : 2;
458 
459 	/* If packet is an outstanding miss completion, then the packet is
460 	 * freed if the corresponding re-injection completion is not received
461 	 * before kernel jiffies exceeds timeout_jiffies.
462 	 */
463 	unsigned long timeout_jiffies;
464 };
465 
466 /* Contains datapath state used to represent a TX queue. */
467 struct gve_tx_ring {
468 	/* Cacheline 0 -- Accessed & dirtied during transmit */
469 	union {
470 		/* GQI fields */
471 		struct {
472 			struct gve_tx_fifo tx_fifo;
473 			u32 req; /* driver tracked head pointer */
474 			u32 done; /* driver tracked tail pointer */
475 		};
476 
477 		/* DQO fields. */
478 		struct {
479 			/* Spinlock for XDP tx traffic */
480 			spinlock_t xdp_lock;
481 
482 			/* Linked list of gve_tx_pending_packet_dqo. Index into
483 			 * pending_packets, or -1 if empty.
484 			 *
485 			 * This is a consumer list owned by the TX path. When it
486 			 * runs out, the producer list is stolen from the
487 			 * completion handling path
488 			 * (dqo_compl.free_pending_packets).
489 			 */
490 			s16 free_pending_packets;
491 
492 			/* Cached value of `dqo_compl.hw_tx_head` */
493 			u32 head;
494 			u32 tail; /* Last posted buffer index + 1 */
495 
496 			/* Index of the last descriptor with "report event" bit
497 			 * set.
498 			 */
499 			u32 last_re_idx;
500 
501 			/* free running number of packet buf descriptors posted */
502 			u16 posted_packet_desc_cnt;
503 			/* free running number of packet buf descriptors completed */
504 			u16 completed_packet_desc_cnt;
505 
506 			/* QPL fields */
507 			struct {
508 			       /* Linked list of gve_tx_buf_dqo. Index into
509 				* tx_qpl_buf_next, or -1 if empty.
510 				*
511 				* This is a consumer list owned by the TX path. When it
512 				* runs out, the producer list is stolen from the
513 				* completion handling path
514 				* (dqo_compl.free_tx_qpl_buf_head).
515 				*/
516 				s16 free_tx_qpl_buf_head;
517 
518 			       /* Free running count of the number of QPL tx buffers
519 				* allocated
520 				*/
521 				u32 alloc_tx_qpl_buf_cnt;
522 
523 				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
524 				u32 free_tx_qpl_buf_cnt;
525 			};
526 
527 			atomic_t xsk_reorder_queue_tail;
528 		} dqo_tx;
529 	};
530 
531 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
532 	union {
533 		/* GQI fields */
534 		struct {
535 			/* Spinlock for when cleanup in progress */
536 			spinlock_t clean_lock;
537 			/* Spinlock for XDP tx traffic */
538 			spinlock_t xdp_lock;
539 		};
540 
541 		/* DQO fields. */
542 		struct {
543 			u32 head; /* Last read on compl_desc */
544 
545 			/* Tracks the current gen bit of compl_q */
546 			u8 cur_gen_bit;
547 
548 			/* Linked list of gve_tx_pending_packet_dqo. Index into
549 			 * pending_packets, or -1 if empty.
550 			 *
551 			 * This is the producer list, owned by the completion
552 			 * handling path. When the consumer list
553 			 * (dqo_tx.free_pending_packets) is runs out, this list
554 			 * will be stolen.
555 			 */
556 			atomic_t free_pending_packets;
557 
558 			/* Last TX ring index fetched by HW */
559 			atomic_t hw_tx_head;
560 
561 			u16 xsk_reorder_queue_head;
562 			u16 xsk_reorder_queue_tail;
563 
564 			/* List to track pending packets which received a miss
565 			 * completion but not a corresponding reinjection.
566 			 */
567 			struct gve_index_list miss_completions;
568 
569 			/* List to track pending packets that were completed
570 			 * before receiving a valid completion because they
571 			 * reached a specified timeout.
572 			 */
573 			struct gve_index_list timed_out_completions;
574 
575 			/* QPL fields */
576 			struct {
577 				/* Linked list of gve_tx_buf_dqo. Index into
578 				 * tx_qpl_buf_next, or -1 if empty.
579 				 *
580 				 * This is the producer list, owned by the completion
581 				 * handling path. When the consumer list
582 				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
583 				 * will be stolen.
584 				 */
585 				atomic_t free_tx_qpl_buf_head;
586 
587 				/* Free running count of the number of tx buffers
588 				 * freed
589 				 */
590 				atomic_t free_tx_qpl_buf_cnt;
591 			};
592 		} dqo_compl;
593 	} ____cacheline_aligned;
594 	u64 pkt_done; /* free-running - total packets completed */
595 	u64 bytes_done; /* free-running - total bytes completed */
596 	u64 dropped_pkt; /* free-running - total packets dropped */
597 	u64 dma_mapping_error; /* count of dma mapping errors */
598 
599 	/* Cacheline 2 -- Read-mostly fields */
600 	union {
601 		/* GQI fields */
602 		struct {
603 			union gve_tx_desc *desc;
604 
605 			/* Maps 1:1 to a desc */
606 			struct gve_tx_buffer_state *info;
607 		};
608 
609 		/* DQO fields. */
610 		struct {
611 			union gve_tx_desc_dqo *tx_ring;
612 			struct gve_tx_compl_desc *compl_ring;
613 
614 			struct gve_tx_pending_packet_dqo *pending_packets;
615 			s16 num_pending_packets;
616 
617 			u16 *xsk_reorder_queue;
618 
619 			u32 complq_mask; /* complq size is complq_mask + 1 */
620 
621 			/* QPL fields */
622 			struct {
623 				/* qpl assigned to this queue */
624 				struct gve_queue_page_list *qpl;
625 
626 				/* Each QPL page is divided into TX bounce buffers
627 				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
628 				 * an array to manage linked lists of TX buffers.
629 				 * An entry j at index i implies that j'th buffer
630 				 * is next on the list after i
631 				 */
632 				s16 *tx_qpl_buf_next;
633 				u32 num_tx_qpl_bufs;
634 			};
635 		} dqo;
636 	} ____cacheline_aligned;
637 	struct netdev_queue *netdev_txq;
638 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
639 	struct device *dev;
640 	u32 mask; /* masks req and done down to queue size */
641 	u8 raw_addressing; /* use raw_addressing? */
642 
643 	/* Slow-path fields */
644 	u32 q_num ____cacheline_aligned; /* queue idx */
645 	u32 stop_queue; /* count of queue stops */
646 	u32 wake_queue; /* count of queue wakes */
647 	u32 queue_timeout; /* count of queue timeouts */
648 	u32 ntfy_id; /* notification block index */
649 	u32 last_kick_msec; /* Last time the queue was kicked */
650 	dma_addr_t bus; /* dma address of the descr ring */
651 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
652 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
653 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
654 	struct xsk_buff_pool *xsk_pool;
655 	u64 xdp_xsk_sent;
656 	u64 xdp_xmit;
657 	u64 xdp_xmit_errors;
658 } ____cacheline_aligned;
659 
660 /* Wraps the info for one irq including the napi struct and the queues
661  * associated with that irq.
662  */
663 struct gve_notify_block {
664 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
665 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
666 	struct napi_struct napi; /* kernel napi struct for this block */
667 	struct gve_priv *priv;
668 	struct gve_tx_ring *tx; /* tx rings on this block */
669 	struct gve_rx_ring *rx; /* rx rings on this block */
670 	u32 irq;
671 };
672 
673 /* Tracks allowed and current rx queue settings */
674 struct gve_rx_queue_config {
675 	u16 max_queues;
676 	u16 num_queues;
677 	u16 packet_buffer_size;
678 };
679 
680 /* Tracks allowed and current tx queue settings */
681 struct gve_tx_queue_config {
682 	u16 max_queues;
683 	u16 num_queues; /* number of TX queues, excluding XDP queues */
684 	u16 num_xdp_queues;
685 };
686 
687 /* Tracks the available and used qpl IDs */
688 struct gve_qpl_config {
689 	u32 qpl_map_size; /* map memory size */
690 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
691 };
692 
693 struct gve_irq_db {
694 	__be32 index;
695 } ____cacheline_aligned;
696 
697 struct gve_ptype {
698 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
699 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
700 };
701 
702 struct gve_ptype_lut {
703 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
704 };
705 
706 /* Parameters for allocating resources for tx queues */
707 struct gve_tx_alloc_rings_cfg {
708 	struct gve_tx_queue_config *qcfg;
709 
710 	u16 num_xdp_rings;
711 
712 	u16 ring_size;
713 	bool raw_addressing;
714 
715 	/* Allocated resources are returned here */
716 	struct gve_tx_ring *tx;
717 };
718 
719 /* Parameters for allocating resources for rx queues */
720 struct gve_rx_alloc_rings_cfg {
721 	/* tx config is also needed to determine QPL ids */
722 	struct gve_rx_queue_config *qcfg_rx;
723 	struct gve_tx_queue_config *qcfg_tx;
724 
725 	u16 ring_size;
726 	u16 packet_buffer_size;
727 	bool raw_addressing;
728 	bool enable_header_split;
729 	bool reset_rss;
730 	bool xdp;
731 
732 	/* Allocated resources are returned here */
733 	struct gve_rx_ring *rx;
734 };
735 
736 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
737  * when the entire configure_device_resources command is zeroed out and the
738  * queue_format is not specified.
739  */
740 enum gve_queue_format {
741 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
742 	GVE_GQI_RDA_FORMAT		= 0x1,
743 	GVE_GQI_QPL_FORMAT		= 0x2,
744 	GVE_DQO_RDA_FORMAT		= 0x3,
745 	GVE_DQO_QPL_FORMAT		= 0x4,
746 };
747 
748 struct gve_flow_spec {
749 	__be32 src_ip[4];
750 	__be32 dst_ip[4];
751 	union {
752 		struct {
753 			__be16 src_port;
754 			__be16 dst_port;
755 		};
756 		__be32 spi;
757 	};
758 	union {
759 		u8 tos;
760 		u8 tclass;
761 	};
762 };
763 
764 struct gve_flow_rule {
765 	u32 location;
766 	u16 flow_type;
767 	u16 action;
768 	struct gve_flow_spec key;
769 	struct gve_flow_spec mask;
770 };
771 
772 struct gve_flow_rules_cache {
773 	bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
774 	struct gve_adminq_queried_flow_rule *rules_cache;
775 	__be32 *rule_ids_cache;
776 	/* The total number of queried rules that stored in the caches */
777 	u32 rules_cache_num;
778 	u32 rule_ids_cache_num;
779 };
780 
781 struct gve_rss_config {
782 	u8 *hash_key;
783 	u32 *hash_lut;
784 };
785 
786 struct gve_ptp {
787 	struct ptp_clock_info info;
788 	struct ptp_clock *clock;
789 	struct gve_priv *priv;
790 };
791 
792 struct gve_priv {
793 	struct net_device *dev;
794 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
795 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
796 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
797 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
798 	dma_addr_t irq_db_indices_bus;
799 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
800 	char mgmt_msix_name[IFNAMSIZ + 16];
801 	u32 mgmt_msix_idx;
802 	__be32 *counter_array; /* array of num_event_counters */
803 	dma_addr_t counter_array_bus;
804 
805 	u16 num_event_counters;
806 	u16 tx_desc_cnt; /* num desc per ring */
807 	u16 rx_desc_cnt; /* num desc per ring */
808 	u16 max_tx_desc_cnt;
809 	u16 max_rx_desc_cnt;
810 	u16 min_tx_desc_cnt;
811 	u16 min_rx_desc_cnt;
812 	bool modify_ring_size_enabled;
813 	bool default_min_ring_size;
814 	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
815 	u64 max_registered_pages;
816 	u64 num_registered_pages; /* num pages registered with NIC */
817 	struct bpf_prog *xdp_prog; /* XDP BPF program */
818 	u32 rx_copybreak; /* copy packets smaller than this */
819 	u16 default_num_queues; /* default num queues to set up */
820 
821 	struct gve_tx_queue_config tx_cfg;
822 	struct gve_rx_queue_config rx_cfg;
823 	unsigned long *xsk_pools; /* bitmap of RX queues with XSK pools */
824 	u32 num_ntfy_blks; /* split between TX and RX so must be even */
825 	int numa_node;
826 
827 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
828 	__be32 __iomem *db_bar2; /* "array" of doorbells */
829 	u32 msg_enable;	/* level for netif* netdev print macros	*/
830 	struct pci_dev *pdev;
831 
832 	/* metrics */
833 	u32 tx_timeo_cnt;
834 
835 	/* Admin queue - see gve_adminq.h*/
836 	union gve_adminq_command *adminq;
837 	dma_addr_t adminq_bus_addr;
838 	struct dma_pool *adminq_pool;
839 	struct mutex adminq_lock; /* Protects adminq command execution */
840 	u32 adminq_mask; /* masks prod_cnt to adminq size */
841 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
842 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
843 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
844 	/* free-running count of per AQ cmd executed */
845 	u32 adminq_describe_device_cnt;
846 	u32 adminq_cfg_device_resources_cnt;
847 	u32 adminq_register_page_list_cnt;
848 	u32 adminq_unregister_page_list_cnt;
849 	u32 adminq_create_tx_queue_cnt;
850 	u32 adminq_create_rx_queue_cnt;
851 	u32 adminq_destroy_tx_queue_cnt;
852 	u32 adminq_destroy_rx_queue_cnt;
853 	u32 adminq_dcfg_device_resources_cnt;
854 	u32 adminq_set_driver_parameter_cnt;
855 	u32 adminq_report_stats_cnt;
856 	u32 adminq_report_link_speed_cnt;
857 	u32 adminq_report_nic_timestamp_cnt;
858 	u32 adminq_get_ptype_map_cnt;
859 	u32 adminq_verify_driver_compatibility_cnt;
860 	u32 adminq_query_flow_rules_cnt;
861 	u32 adminq_cfg_flow_rule_cnt;
862 	u32 adminq_cfg_rss_cnt;
863 	u32 adminq_query_rss_cnt;
864 
865 	/* Global stats */
866 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
867 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
868 	u32 reset_cnt; /* count of reset */
869 	u32 page_alloc_fail; /* count of page alloc fails */
870 	u32 dma_mapping_error; /* count of dma mapping errors */
871 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
872 	u32 suspend_cnt; /* count of times suspended */
873 	u32 resume_cnt; /* count of times resumed */
874 	struct workqueue_struct *gve_wq;
875 	struct work_struct service_task;
876 	struct work_struct stats_report_task;
877 	unsigned long service_task_flags;
878 	unsigned long state_flags;
879 
880 	struct gve_stats_report *stats_report;
881 	u64 stats_report_len;
882 	dma_addr_t stats_report_bus; /* dma address for the stats report */
883 	unsigned long ethtool_flags;
884 
885 	unsigned long stats_report_timer_period;
886 	struct timer_list stats_report_timer;
887 
888 	/* Gvnic device link speed from hypervisor. */
889 	u64 link_speed;
890 	bool up_before_suspend; /* True if dev was up before suspend */
891 
892 	struct gve_ptype_lut *ptype_lut_dqo;
893 
894 	/* Must be a power of two. */
895 	u16 max_rx_buffer_size; /* device limit */
896 
897 	enum gve_queue_format queue_format;
898 
899 	/* Interrupt coalescing settings */
900 	u32 tx_coalesce_usecs;
901 	u32 rx_coalesce_usecs;
902 
903 	u16 header_buf_size; /* device configured, header-split supported if non-zero */
904 	bool header_split_enabled; /* True if the header split is enabled by the user */
905 
906 	u32 max_flow_rules;
907 	u32 num_flow_rules;
908 
909 	struct gve_flow_rules_cache flow_rules_cache;
910 
911 	u16 rss_key_size;
912 	u16 rss_lut_size;
913 	bool cache_rss_config;
914 	struct gve_rss_config rss_config;
915 
916 	/* True if the device supports reading the nic clock */
917 	bool nic_timestamp_supported;
918 	struct gve_ptp *ptp;
919 	struct kernel_hwtstamp_config ts_config;
920 	struct gve_nic_ts_report *nic_ts_report;
921 	dma_addr_t nic_ts_report_bus;
922 	u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */
923 };
924 
925 enum gve_service_task_flags_bit {
926 	GVE_PRIV_FLAGS_DO_RESET			= 1,
927 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
928 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
929 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
930 };
931 
932 enum gve_state_flags_bit {
933 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
934 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
935 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
936 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
937 };
938 
939 enum gve_ethtool_flags_bit {
940 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
941 };
942 
gve_get_do_reset(struct gve_priv * priv)943 static inline bool gve_get_do_reset(struct gve_priv *priv)
944 {
945 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
946 }
947 
gve_set_do_reset(struct gve_priv * priv)948 static inline void gve_set_do_reset(struct gve_priv *priv)
949 {
950 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
951 }
952 
gve_clear_do_reset(struct gve_priv * priv)953 static inline void gve_clear_do_reset(struct gve_priv *priv)
954 {
955 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
956 }
957 
gve_get_reset_in_progress(struct gve_priv * priv)958 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
959 {
960 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
961 			&priv->service_task_flags);
962 }
963 
gve_set_reset_in_progress(struct gve_priv * priv)964 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
965 {
966 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
967 }
968 
gve_clear_reset_in_progress(struct gve_priv * priv)969 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
970 {
971 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
972 }
973 
gve_get_probe_in_progress(struct gve_priv * priv)974 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
975 {
976 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
977 			&priv->service_task_flags);
978 }
979 
gve_set_probe_in_progress(struct gve_priv * priv)980 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
981 {
982 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
983 }
984 
gve_clear_probe_in_progress(struct gve_priv * priv)985 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
986 {
987 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
988 }
989 
gve_get_do_report_stats(struct gve_priv * priv)990 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
991 {
992 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
993 			&priv->service_task_flags);
994 }
995 
gve_set_do_report_stats(struct gve_priv * priv)996 static inline void gve_set_do_report_stats(struct gve_priv *priv)
997 {
998 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
999 }
1000 
gve_clear_do_report_stats(struct gve_priv * priv)1001 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
1002 {
1003 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
1004 }
1005 
gve_get_admin_queue_ok(struct gve_priv * priv)1006 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
1007 {
1008 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
1009 }
1010 
gve_set_admin_queue_ok(struct gve_priv * priv)1011 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
1012 {
1013 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
1014 }
1015 
gve_clear_admin_queue_ok(struct gve_priv * priv)1016 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
1017 {
1018 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
1019 }
1020 
gve_get_device_resources_ok(struct gve_priv * priv)1021 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
1022 {
1023 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1024 }
1025 
gve_set_device_resources_ok(struct gve_priv * priv)1026 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
1027 {
1028 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1029 }
1030 
gve_clear_device_resources_ok(struct gve_priv * priv)1031 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
1032 {
1033 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1034 }
1035 
gve_get_device_rings_ok(struct gve_priv * priv)1036 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
1037 {
1038 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1039 }
1040 
gve_set_device_rings_ok(struct gve_priv * priv)1041 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
1042 {
1043 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1044 }
1045 
gve_clear_device_rings_ok(struct gve_priv * priv)1046 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
1047 {
1048 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1049 }
1050 
gve_get_napi_enabled(struct gve_priv * priv)1051 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
1052 {
1053 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1054 }
1055 
gve_set_napi_enabled(struct gve_priv * priv)1056 static inline void gve_set_napi_enabled(struct gve_priv *priv)
1057 {
1058 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1059 }
1060 
gve_clear_napi_enabled(struct gve_priv * priv)1061 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
1062 {
1063 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1064 }
1065 
gve_get_report_stats(struct gve_priv * priv)1066 static inline bool gve_get_report_stats(struct gve_priv *priv)
1067 {
1068 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1069 }
1070 
gve_clear_report_stats(struct gve_priv * priv)1071 static inline void gve_clear_report_stats(struct gve_priv *priv)
1072 {
1073 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1074 }
1075 
1076 /* Returns the address of the ntfy_blocks irq doorbell
1077  */
gve_irq_doorbell(struct gve_priv * priv,struct gve_notify_block * block)1078 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
1079 					       struct gve_notify_block *block)
1080 {
1081 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
1082 }
1083 
1084 /* Returns the index into ntfy_blocks of the given tx ring's block
1085  */
gve_tx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)1086 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1087 {
1088 	return queue_idx;
1089 }
1090 
1091 /* Returns the index into ntfy_blocks of the given rx ring's block
1092  */
gve_rx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)1093 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1094 {
1095 	return (priv->num_ntfy_blks / 2) + queue_idx;
1096 }
1097 
gve_is_qpl(struct gve_priv * priv)1098 static inline bool gve_is_qpl(struct gve_priv *priv)
1099 {
1100 	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
1101 		priv->queue_format == GVE_DQO_QPL_FORMAT;
1102 }
1103 
1104 /* Returns the number of tx queue page lists */
gve_num_tx_qpls(const struct gve_tx_queue_config * tx_cfg,bool is_qpl)1105 static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
1106 				  bool is_qpl)
1107 {
1108 	if (!is_qpl)
1109 		return 0;
1110 	return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
1111 }
1112 
1113 /* Returns the number of rx queue page lists */
gve_num_rx_qpls(const struct gve_rx_queue_config * rx_cfg,bool is_qpl)1114 static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
1115 				  bool is_qpl)
1116 {
1117 	if (!is_qpl)
1118 		return 0;
1119 	return rx_cfg->num_queues;
1120 }
1121 
gve_tx_qpl_id(struct gve_priv * priv,int tx_qid)1122 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
1123 {
1124 	return tx_qid;
1125 }
1126 
gve_rx_qpl_id(struct gve_priv * priv,int rx_qid)1127 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1128 {
1129 	return priv->tx_cfg.max_queues + rx_qid;
1130 }
1131 
gve_get_rx_qpl_id(const struct gve_tx_queue_config * tx_cfg,int rx_qid)1132 static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
1133 				    int rx_qid)
1134 {
1135 	return tx_cfg->max_queues + rx_qid;
1136 }
1137 
gve_tx_start_qpl_id(struct gve_priv * priv)1138 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1139 {
1140 	return gve_tx_qpl_id(priv, 0);
1141 }
1142 
gve_rx_start_qpl_id(const struct gve_tx_queue_config * tx_cfg)1143 static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
1144 {
1145 	return gve_get_rx_qpl_id(tx_cfg, 0);
1146 }
1147 
gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)1148 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
1149 {
1150 	/* For DQO, page count should be more than ring size for
1151 	 * out-of-order completions. Set it to two times of ring size.
1152 	 */
1153 	return 2 * rx_desc_cnt;
1154 }
1155 
1156 /* Returns the correct dma direction for tx and rx qpls */
gve_qpl_dma_dir(struct gve_priv * priv,int id)1157 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1158 						      int id)
1159 {
1160 	if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1161 		return DMA_TO_DEVICE;
1162 	else
1163 		return DMA_FROM_DEVICE;
1164 }
1165 
gve_is_gqi(struct gve_priv * priv)1166 static inline bool gve_is_gqi(struct gve_priv *priv)
1167 {
1168 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1169 		priv->queue_format == GVE_GQI_QPL_FORMAT;
1170 }
1171 
gve_num_tx_queues(struct gve_priv * priv)1172 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1173 {
1174 	return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
1175 }
1176 
gve_xdp_tx_queue_id(struct gve_priv * priv,u32 queue_id)1177 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1178 {
1179 	return priv->tx_cfg.num_queues + queue_id;
1180 }
1181 
gve_xdp_tx_start_queue_id(struct gve_priv * priv)1182 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1183 {
1184 	return gve_xdp_tx_queue_id(priv, 0);
1185 }
1186 
gve_supports_xdp_xmit(struct gve_priv * priv)1187 static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
1188 {
1189 	switch (priv->queue_format) {
1190 	case GVE_GQI_QPL_FORMAT:
1191 	case GVE_DQO_RDA_FORMAT:
1192 		return true;
1193 	default:
1194 		return false;
1195 	}
1196 }
1197 
1198 /* gqi napi handler defined in gve_main.c */
1199 int gve_napi_poll(struct napi_struct *napi, int budget);
1200 
1201 /* buffers */
1202 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1203 		   struct page **page, dma_addr_t *dma,
1204 		   enum dma_data_direction, gfp_t gfp_flags);
1205 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1206 		   enum dma_data_direction);
1207 /* qpls */
1208 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1209 						      u32 id, int pages);
1210 void gve_free_queue_page_list(struct gve_priv *priv,
1211 			      struct gve_queue_page_list *qpl,
1212 			      u32 id);
1213 /* tx handling */
1214 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1215 int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
1216 		     u32 flags);
1217 int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
1218 		     u32 flags);
1219 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1220 		     void *data, int len, void *frame_p);
1221 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1222 int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
1223 			 struct xdp_frame *xdpf);
1224 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1225 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1226 int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
1227 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1228 			   struct gve_tx_alloc_rings_cfg *cfg);
1229 void gve_tx_free_rings_gqi(struct gve_priv *priv,
1230 			   struct gve_tx_alloc_rings_cfg *cfg);
1231 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1232 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1233 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1234 			      struct gve_tx_ring *tx);
1235 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1236 /* rx handling */
1237 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1238 int gve_rx_poll(struct gve_notify_block *block, int budget);
1239 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1240 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1241 			  struct gve_rx_alloc_rings_cfg *cfg,
1242 			  struct gve_rx_ring *rx,
1243 			  int idx);
1244 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1245 			  struct gve_rx_alloc_rings_cfg *cfg);
1246 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1247 			   struct gve_rx_alloc_rings_cfg *cfg);
1248 void gve_rx_free_rings_gqi(struct gve_priv *priv,
1249 			   struct gve_rx_alloc_rings_cfg *cfg);
1250 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1251 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1252 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
1253 bool gve_header_split_supported(const struct gve_priv *priv);
1254 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1255 /* rx buffer handling */
1256 int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
1257 void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
1258 		       bool free_page);
1259 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1260 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1261 				struct gve_rx_buf_state_dqo *buf_state);
1262 void gve_free_buf_state(struct gve_rx_ring *rx,
1263 			struct gve_rx_buf_state_dqo *buf_state);
1264 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1265 						   struct gve_index_list *list);
1266 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1267 			   struct gve_rx_buf_state_dqo *buf_state);
1268 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1269 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1270 			 struct gve_rx_buf_state_dqo *buf_state);
1271 void gve_free_to_page_pool(struct gve_rx_ring *rx,
1272 			   struct gve_rx_buf_state_dqo *buf_state,
1273 			   bool allow_direct);
1274 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1275 			   struct gve_rx_buf_state_dqo *buf_state);
1276 void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
1277 void gve_reuse_buffer(struct gve_rx_ring *rx,
1278 		      struct gve_rx_buf_state_dqo *buf_state);
1279 void gve_free_buffer(struct gve_rx_ring *rx,
1280 		     struct gve_rx_buf_state_dqo *buf_state);
1281 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1282 struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1283 					  struct gve_rx_ring *rx,
1284 					  bool xdp);
1285 
1286 /* Reset */
1287 void gve_schedule_reset(struct gve_priv *priv);
1288 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1289 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1290 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1291 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1292 int gve_adjust_config(struct gve_priv *priv,
1293 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1294 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1295 int gve_adjust_queues(struct gve_priv *priv,
1296 		      struct gve_rx_queue_config new_rx_config,
1297 		      struct gve_tx_queue_config new_tx_config,
1298 		      bool reset_rss);
1299 /* flow steering rule */
1300 int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1301 int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
1302 int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1303 int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1304 int gve_flow_rules_reset(struct gve_priv *priv);
1305 /* RSS config */
1306 int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
1307 /* PTP and timestamping */
1308 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
1309 int gve_clock_nic_ts_read(struct gve_priv *priv);
1310 int gve_init_clock(struct gve_priv *priv);
1311 void gve_teardown_clock(struct gve_priv *priv);
1312 #else /* CONFIG_PTP_1588_CLOCK */
gve_clock_nic_ts_read(struct gve_priv * priv)1313 static inline int gve_clock_nic_ts_read(struct gve_priv *priv)
1314 {
1315 	return -EOPNOTSUPP;
1316 }
1317 
gve_init_clock(struct gve_priv * priv)1318 static inline int gve_init_clock(struct gve_priv *priv)
1319 {
1320 	return 0;
1321 }
1322 
gve_teardown_clock(struct gve_priv * priv)1323 static inline void gve_teardown_clock(struct gve_priv *priv) { }
1324 #endif /* CONFIG_PTP_1588_CLOCK */
1325 /* report stats handling */
1326 void gve_handle_report_stats(struct gve_priv *priv);
1327 /* exported by ethtool.c */
1328 extern const struct ethtool_ops gve_ethtool_ops;
1329 /* needed by ethtool */
1330 extern char gve_driver_name[];
1331 extern const char gve_version_str[];
1332 #endif /* _GVE_H_ */
1333