xref: /linux/drivers/net/ethernet/google/gve/gve.h (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/ethtool_netlink.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/u64_stats_sync.h>
16 #include <net/xdp.h>
17 
18 #include "gve_desc.h"
19 #include "gve_desc_dqo.h"
20 
21 #ifndef PCI_VENDOR_ID_GOOGLE
22 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
23 #endif
24 
25 #define PCI_DEV_ID_GVNIC	0x0042
26 
27 #define GVE_REGISTER_BAR	0
28 #define GVE_DOORBELL_BAR	2
29 
30 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
31 #define GVE_TX_MAX_IOVEC	4
32 /* 1 for management, 1 for rx, 1 for tx */
33 #define GVE_MIN_MSIX 3
34 
35 /* Numbers of gve tx/rx stats in stats report. */
36 #define GVE_TX_STATS_REPORT_NUM	6
37 #define GVE_RX_STATS_REPORT_NUM	2
38 
39 /* Interval to schedule a stats report update, 20000ms. */
40 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
41 
42 /* Numbers of NIC tx/rx stats in stats report. */
43 #define NIC_TX_STATS_REPORT_NUM	0
44 #define NIC_RX_STATS_REPORT_NUM	4
45 
46 #define GVE_ADMINQ_BUFFER_SIZE 4096
47 
48 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
49 
50 /* PTYPEs are always 10 bits. */
51 #define GVE_NUM_PTYPES	1024
52 
53 /* Default minimum ring size */
54 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
55 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
56 
57 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
58 
59 #define GVE_MAX_RX_BUFFER_SIZE 4096
60 
61 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
62 
63 #define GVE_XDP_ACTIONS 5
64 
65 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
66 
67 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
68 
69 #define DQO_QPL_DEFAULT_TX_PAGES 512
70 
71 /* Maximum TSO size supported on DQO */
72 #define GVE_DQO_TX_MAX	0x3FFFF
73 
74 #define GVE_TX_BUF_SHIFT_DQO 11
75 
76 /* 2K buffers for DQO-QPL */
77 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
78 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
79 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
80 
81 /* If number of free/recyclable buffers are less than this threshold; driver
82  * allocs and uses a non-qpl page on the receive path of DQO QPL to free
83  * up buffers.
84  * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
85  */
86 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
87 
88 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
89 struct gve_rx_desc_queue {
90 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
91 	dma_addr_t bus; /* the bus for the desc_ring */
92 	u8 seqno; /* the next expected seqno for this desc*/
93 };
94 
95 /* The page info for a single slot in the RX data queue */
96 struct gve_rx_slot_page_info {
97 	struct page *page;
98 	void *page_address;
99 	u32 page_offset; /* offset to write to in page */
100 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
101 	u16 pad; /* adjustment for rx padding */
102 	u8 can_flip; /* tracks if the networking stack is using the page */
103 };
104 
105 /* A list of pages registered with the device during setup and used by a queue
106  * as buffers
107  */
108 struct gve_queue_page_list {
109 	u32 id; /* unique id */
110 	u32 num_entries;
111 	struct page **pages; /* list of num_entries pages */
112 	dma_addr_t *page_buses; /* the dma addrs of the pages */
113 };
114 
115 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
116 struct gve_rx_data_queue {
117 	union gve_rx_data_slot *data_ring; /* read by NIC */
118 	dma_addr_t data_bus; /* dma mapping of the slots */
119 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
120 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
121 	u8 raw_addressing; /* use raw_addressing? */
122 };
123 
124 struct gve_priv;
125 
126 /* RX buffer queue for posting buffers to HW.
127  * Each RX (completion) queue has a corresponding buffer queue.
128  */
129 struct gve_rx_buf_queue_dqo {
130 	struct gve_rx_desc_dqo *desc_ring;
131 	dma_addr_t bus;
132 	u32 head; /* Pointer to start cleaning buffers at. */
133 	u32 tail; /* Last posted buffer index + 1 */
134 	u32 mask; /* Mask for indices to the size of the ring */
135 };
136 
137 /* RX completion queue to receive packets from HW. */
138 struct gve_rx_compl_queue_dqo {
139 	struct gve_rx_compl_desc_dqo *desc_ring;
140 	dma_addr_t bus;
141 
142 	/* Number of slots which did not have a buffer posted yet. We should not
143 	 * post more buffers than the queue size to avoid HW overrunning the
144 	 * queue.
145 	 */
146 	int num_free_slots;
147 
148 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
149 	 * descriptor's generation bit is different from the current generation,
150 	 * that descriptor is ready to be consumed by SW.
151 	 */
152 	u8 cur_gen_bit;
153 
154 	/* Pointer into desc_ring where the next completion descriptor will be
155 	 * received.
156 	 */
157 	u32 head;
158 	u32 mask; /* Mask for indices to the size of the ring */
159 };
160 
161 struct gve_header_buf {
162 	u8 *data;
163 	dma_addr_t addr;
164 };
165 
166 /* Stores state for tracking buffers posted to HW */
167 struct gve_rx_buf_state_dqo {
168 	/* The page posted to HW. */
169 	struct gve_rx_slot_page_info page_info;
170 
171 	/* The DMA address corresponding to `page_info`. */
172 	dma_addr_t addr;
173 
174 	/* Last offset into the page when it only had a single reference, at
175 	 * which point every other offset is free to be reused.
176 	 */
177 	u32 last_single_ref_offset;
178 
179 	/* Linked list index to next element in the list, or -1 if none */
180 	s16 next;
181 };
182 
183 /* `head` and `tail` are indices into an array, or -1 if empty. */
184 struct gve_index_list {
185 	s16 head;
186 	s16 tail;
187 };
188 
189 /* A single received packet split across multiple buffers may be
190  * reconstructed using the information in this structure.
191  */
192 struct gve_rx_ctx {
193 	/* head and tail of skb chain for the current packet or NULL if none */
194 	struct sk_buff *skb_head;
195 	struct sk_buff *skb_tail;
196 	u32 total_size;
197 	u8 frag_cnt;
198 	bool drop_pkt;
199 };
200 
201 struct gve_rx_cnts {
202 	u32 ok_pkt_bytes;
203 	u16 ok_pkt_cnt;
204 	u16 total_pkt_cnt;
205 	u16 cont_pkt_cnt;
206 	u16 desc_err_pkt_cnt;
207 };
208 
209 /* Contains datapath state used to represent an RX queue. */
210 struct gve_rx_ring {
211 	struct gve_priv *gve;
212 	union {
213 		/* GQI fields */
214 		struct {
215 			struct gve_rx_desc_queue desc;
216 			struct gve_rx_data_queue data;
217 
218 			/* threshold for posting new buffs and descs */
219 			u32 db_threshold;
220 			u16 packet_buffer_size;
221 
222 			u32 qpl_copy_pool_mask;
223 			u32 qpl_copy_pool_head;
224 			struct gve_rx_slot_page_info *qpl_copy_pool;
225 		};
226 
227 		/* DQO fields. */
228 		struct {
229 			struct gve_rx_buf_queue_dqo bufq;
230 			struct gve_rx_compl_queue_dqo complq;
231 
232 			struct gve_rx_buf_state_dqo *buf_states;
233 			u16 num_buf_states;
234 
235 			/* Linked list of gve_rx_buf_state_dqo. Index into
236 			 * buf_states, or -1 if empty.
237 			 */
238 			s16 free_buf_states;
239 
240 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
241 			 * buf_states, or -1 if empty.
242 			 *
243 			 * This list contains buf_states which are pointing to
244 			 * valid buffers.
245 			 *
246 			 * We use a FIFO here in order to increase the
247 			 * probability that buffers can be reused by increasing
248 			 * the time between usages.
249 			 */
250 			struct gve_index_list recycled_buf_states;
251 
252 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
253 			 * buf_states, or -1 if empty.
254 			 *
255 			 * This list contains buf_states which have buffers
256 			 * which cannot be reused yet.
257 			 */
258 			struct gve_index_list used_buf_states;
259 
260 			/* qpl assigned to this queue */
261 			struct gve_queue_page_list *qpl;
262 
263 			/* index into queue page list */
264 			u32 next_qpl_page_idx;
265 
266 			/* track number of used buffers */
267 			u16 used_buf_states_cnt;
268 
269 			/* Address info of the buffers for header-split */
270 			struct gve_header_buf hdr_bufs;
271 		} dqo;
272 	};
273 
274 	u64 rbytes; /* free-running bytes received */
275 	u64 rx_hsplit_bytes; /* free-running header bytes received */
276 	u64 rpackets; /* free-running packets received */
277 	u32 cnt; /* free-running total number of completed packets */
278 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
279 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
280 	u64 rx_hsplit_pkt; /* free-running packets with headers split */
281 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
282 	u64 rx_copied_pkt; /* free-running total number of copied packets */
283 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
284 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
285 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
286 	/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
287 	u64 rx_hsplit_unsplit_pkt;
288 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
289 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
290 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
291 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
292 	u64 xdp_tx_errors;
293 	u64 xdp_redirect_errors;
294 	u64 xdp_alloc_fails;
295 	u64 xdp_actions[GVE_XDP_ACTIONS];
296 	u32 q_num; /* queue index */
297 	u32 ntfy_id; /* notification block index */
298 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
299 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
300 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
301 
302 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
303 
304 	/* XDP stuff */
305 	struct xdp_rxq_info xdp_rxq;
306 	struct xdp_rxq_info xsk_rxq;
307 	struct xsk_buff_pool *xsk_pool;
308 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
309 };
310 
311 /* A TX desc ring entry */
312 union gve_tx_desc {
313 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
314 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
315 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
316 };
317 
318 /* Tracks the memory in the fifo occupied by a segment of a packet */
319 struct gve_tx_iovec {
320 	u32 iov_offset; /* offset into this segment */
321 	u32 iov_len; /* length */
322 	u32 iov_padding; /* padding associated with this segment */
323 };
324 
325 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
326  * ring entry but only used for a pkt_desc not a seg_desc
327  */
328 struct gve_tx_buffer_state {
329 	union {
330 		struct sk_buff *skb; /* skb for this pkt */
331 		struct xdp_frame *xdp_frame; /* xdp_frame */
332 	};
333 	struct {
334 		u16 size; /* size of xmitted xdp pkt */
335 		u8 is_xsk; /* xsk buff */
336 	} xdp;
337 	union {
338 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
339 		struct {
340 			DEFINE_DMA_UNMAP_ADDR(dma);
341 			DEFINE_DMA_UNMAP_LEN(len);
342 		};
343 	};
344 };
345 
346 /* A TX buffer - each queue has one */
347 struct gve_tx_fifo {
348 	void *base; /* address of base of FIFO */
349 	u32 size; /* total size */
350 	atomic_t available; /* how much space is still available */
351 	u32 head; /* offset to write at */
352 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
353 };
354 
355 /* TX descriptor for DQO format */
356 union gve_tx_desc_dqo {
357 	struct gve_tx_pkt_desc_dqo pkt;
358 	struct gve_tx_tso_context_desc_dqo tso_ctx;
359 	struct gve_tx_general_context_desc_dqo general_ctx;
360 };
361 
362 enum gve_packet_state {
363 	/* Packet is in free list, available to be allocated.
364 	 * This should always be zero since state is not explicitly initialized.
365 	 */
366 	GVE_PACKET_STATE_UNALLOCATED,
367 	/* Packet is expecting a regular data completion or miss completion */
368 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
369 	/* Packet has received a miss completion and is expecting a
370 	 * re-injection completion.
371 	 */
372 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
373 	/* No valid completion received within the specified timeout. */
374 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
375 };
376 
377 struct gve_tx_pending_packet_dqo {
378 	struct sk_buff *skb; /* skb for this packet */
379 
380 	/* 0th element corresponds to the linear portion of `skb`, should be
381 	 * unmapped with `dma_unmap_single`.
382 	 *
383 	 * All others correspond to `skb`'s frags and should be unmapped with
384 	 * `dma_unmap_page`.
385 	 */
386 	union {
387 		struct {
388 			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
389 			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
390 		};
391 		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
392 	};
393 
394 	u16 num_bufs;
395 
396 	/* Linked list index to next element in the list, or -1 if none */
397 	s16 next;
398 
399 	/* Linked list index to prev element in the list, or -1 if none.
400 	 * Used for tracking either outstanding miss completions or prematurely
401 	 * freed packets.
402 	 */
403 	s16 prev;
404 
405 	/* Identifies the current state of the packet as defined in
406 	 * `enum gve_packet_state`.
407 	 */
408 	u8 state;
409 
410 	/* If packet is an outstanding miss completion, then the packet is
411 	 * freed if the corresponding re-injection completion is not received
412 	 * before kernel jiffies exceeds timeout_jiffies.
413 	 */
414 	unsigned long timeout_jiffies;
415 };
416 
417 /* Contains datapath state used to represent a TX queue. */
418 struct gve_tx_ring {
419 	/* Cacheline 0 -- Accessed & dirtied during transmit */
420 	union {
421 		/* GQI fields */
422 		struct {
423 			struct gve_tx_fifo tx_fifo;
424 			u32 req; /* driver tracked head pointer */
425 			u32 done; /* driver tracked tail pointer */
426 		};
427 
428 		/* DQO fields. */
429 		struct {
430 			/* Linked list of gve_tx_pending_packet_dqo. Index into
431 			 * pending_packets, or -1 if empty.
432 			 *
433 			 * This is a consumer list owned by the TX path. When it
434 			 * runs out, the producer list is stolen from the
435 			 * completion handling path
436 			 * (dqo_compl.free_pending_packets).
437 			 */
438 			s16 free_pending_packets;
439 
440 			/* Cached value of `dqo_compl.hw_tx_head` */
441 			u32 head;
442 			u32 tail; /* Last posted buffer index + 1 */
443 
444 			/* Index of the last descriptor with "report event" bit
445 			 * set.
446 			 */
447 			u32 last_re_idx;
448 
449 			/* free running number of packet buf descriptors posted */
450 			u16 posted_packet_desc_cnt;
451 			/* free running number of packet buf descriptors completed */
452 			u16 completed_packet_desc_cnt;
453 
454 			/* QPL fields */
455 			struct {
456 			       /* Linked list of gve_tx_buf_dqo. Index into
457 				* tx_qpl_buf_next, or -1 if empty.
458 				*
459 				* This is a consumer list owned by the TX path. When it
460 				* runs out, the producer list is stolen from the
461 				* completion handling path
462 				* (dqo_compl.free_tx_qpl_buf_head).
463 				*/
464 				s16 free_tx_qpl_buf_head;
465 
466 			       /* Free running count of the number of QPL tx buffers
467 				* allocated
468 				*/
469 				u32 alloc_tx_qpl_buf_cnt;
470 
471 				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
472 				u32 free_tx_qpl_buf_cnt;
473 			};
474 		} dqo_tx;
475 	};
476 
477 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
478 	union {
479 		/* GQI fields */
480 		struct {
481 			/* Spinlock for when cleanup in progress */
482 			spinlock_t clean_lock;
483 			/* Spinlock for XDP tx traffic */
484 			spinlock_t xdp_lock;
485 		};
486 
487 		/* DQO fields. */
488 		struct {
489 			u32 head; /* Last read on compl_desc */
490 
491 			/* Tracks the current gen bit of compl_q */
492 			u8 cur_gen_bit;
493 
494 			/* Linked list of gve_tx_pending_packet_dqo. Index into
495 			 * pending_packets, or -1 if empty.
496 			 *
497 			 * This is the producer list, owned by the completion
498 			 * handling path. When the consumer list
499 			 * (dqo_tx.free_pending_packets) is runs out, this list
500 			 * will be stolen.
501 			 */
502 			atomic_t free_pending_packets;
503 
504 			/* Last TX ring index fetched by HW */
505 			atomic_t hw_tx_head;
506 
507 			/* List to track pending packets which received a miss
508 			 * completion but not a corresponding reinjection.
509 			 */
510 			struct gve_index_list miss_completions;
511 
512 			/* List to track pending packets that were completed
513 			 * before receiving a valid completion because they
514 			 * reached a specified timeout.
515 			 */
516 			struct gve_index_list timed_out_completions;
517 
518 			/* QPL fields */
519 			struct {
520 				/* Linked list of gve_tx_buf_dqo. Index into
521 				 * tx_qpl_buf_next, or -1 if empty.
522 				 *
523 				 * This is the producer list, owned by the completion
524 				 * handling path. When the consumer list
525 				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
526 				 * will be stolen.
527 				 */
528 				atomic_t free_tx_qpl_buf_head;
529 
530 				/* Free running count of the number of tx buffers
531 				 * freed
532 				 */
533 				atomic_t free_tx_qpl_buf_cnt;
534 			};
535 		} dqo_compl;
536 	} ____cacheline_aligned;
537 	u64 pkt_done; /* free-running - total packets completed */
538 	u64 bytes_done; /* free-running - total bytes completed */
539 	u64 dropped_pkt; /* free-running - total packets dropped */
540 	u64 dma_mapping_error; /* count of dma mapping errors */
541 
542 	/* Cacheline 2 -- Read-mostly fields */
543 	union {
544 		/* GQI fields */
545 		struct {
546 			union gve_tx_desc *desc;
547 
548 			/* Maps 1:1 to a desc */
549 			struct gve_tx_buffer_state *info;
550 		};
551 
552 		/* DQO fields. */
553 		struct {
554 			union gve_tx_desc_dqo *tx_ring;
555 			struct gve_tx_compl_desc *compl_ring;
556 
557 			struct gve_tx_pending_packet_dqo *pending_packets;
558 			s16 num_pending_packets;
559 
560 			u32 complq_mask; /* complq size is complq_mask + 1 */
561 
562 			/* QPL fields */
563 			struct {
564 				/* qpl assigned to this queue */
565 				struct gve_queue_page_list *qpl;
566 
567 				/* Each QPL page is divided into TX bounce buffers
568 				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
569 				 * an array to manage linked lists of TX buffers.
570 				 * An entry j at index i implies that j'th buffer
571 				 * is next on the list after i
572 				 */
573 				s16 *tx_qpl_buf_next;
574 				u32 num_tx_qpl_bufs;
575 			};
576 		} dqo;
577 	} ____cacheline_aligned;
578 	struct netdev_queue *netdev_txq;
579 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
580 	struct device *dev;
581 	u32 mask; /* masks req and done down to queue size */
582 	u8 raw_addressing; /* use raw_addressing? */
583 
584 	/* Slow-path fields */
585 	u32 q_num ____cacheline_aligned; /* queue idx */
586 	u32 stop_queue; /* count of queue stops */
587 	u32 wake_queue; /* count of queue wakes */
588 	u32 queue_timeout; /* count of queue timeouts */
589 	u32 ntfy_id; /* notification block index */
590 	u32 last_kick_msec; /* Last time the queue was kicked */
591 	dma_addr_t bus; /* dma address of the descr ring */
592 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
593 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
594 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
595 	struct xsk_buff_pool *xsk_pool;
596 	u32 xdp_xsk_wakeup;
597 	u32 xdp_xsk_done;
598 	u64 xdp_xsk_sent;
599 	u64 xdp_xmit;
600 	u64 xdp_xmit_errors;
601 } ____cacheline_aligned;
602 
603 /* Wraps the info for one irq including the napi struct and the queues
604  * associated with that irq.
605  */
606 struct gve_notify_block {
607 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
608 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
609 	struct napi_struct napi; /* kernel napi struct for this block */
610 	struct gve_priv *priv;
611 	struct gve_tx_ring *tx; /* tx rings on this block */
612 	struct gve_rx_ring *rx; /* rx rings on this block */
613 	u32 irq;
614 };
615 
616 /* Tracks allowed and current queue settings */
617 struct gve_queue_config {
618 	u16 max_queues;
619 	u16 num_queues; /* current */
620 };
621 
622 /* Tracks the available and used qpl IDs */
623 struct gve_qpl_config {
624 	u32 qpl_map_size; /* map memory size */
625 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
626 };
627 
628 struct gve_irq_db {
629 	__be32 index;
630 } ____cacheline_aligned;
631 
632 struct gve_ptype {
633 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
634 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
635 };
636 
637 struct gve_ptype_lut {
638 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
639 };
640 
641 /* Parameters for allocating resources for tx queues */
642 struct gve_tx_alloc_rings_cfg {
643 	struct gve_queue_config *qcfg;
644 
645 	u16 ring_size;
646 	u16 start_idx;
647 	u16 num_rings;
648 	bool raw_addressing;
649 
650 	/* Allocated resources are returned here */
651 	struct gve_tx_ring *tx;
652 };
653 
654 /* Parameters for allocating resources for rx queues */
655 struct gve_rx_alloc_rings_cfg {
656 	/* tx config is also needed to determine QPL ids */
657 	struct gve_queue_config *qcfg;
658 	struct gve_queue_config *qcfg_tx;
659 
660 	u16 ring_size;
661 	u16 packet_buffer_size;
662 	bool raw_addressing;
663 	bool enable_header_split;
664 
665 	/* Allocated resources are returned here */
666 	struct gve_rx_ring *rx;
667 };
668 
669 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
670  * when the entire configure_device_resources command is zeroed out and the
671  * queue_format is not specified.
672  */
673 enum gve_queue_format {
674 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
675 	GVE_GQI_RDA_FORMAT		= 0x1,
676 	GVE_GQI_QPL_FORMAT		= 0x2,
677 	GVE_DQO_RDA_FORMAT		= 0x3,
678 	GVE_DQO_QPL_FORMAT		= 0x4,
679 };
680 
681 struct gve_priv {
682 	struct net_device *dev;
683 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
684 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
685 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
686 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
687 	dma_addr_t irq_db_indices_bus;
688 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
689 	char mgmt_msix_name[IFNAMSIZ + 16];
690 	u32 mgmt_msix_idx;
691 	__be32 *counter_array; /* array of num_event_counters */
692 	dma_addr_t counter_array_bus;
693 
694 	u16 num_event_counters;
695 	u16 tx_desc_cnt; /* num desc per ring */
696 	u16 rx_desc_cnt; /* num desc per ring */
697 	u16 max_tx_desc_cnt;
698 	u16 max_rx_desc_cnt;
699 	u16 min_tx_desc_cnt;
700 	u16 min_rx_desc_cnt;
701 	bool modify_ring_size_enabled;
702 	bool default_min_ring_size;
703 	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
704 	u64 max_registered_pages;
705 	u64 num_registered_pages; /* num pages registered with NIC */
706 	struct bpf_prog *xdp_prog; /* XDP BPF program */
707 	u32 rx_copybreak; /* copy packets smaller than this */
708 	u16 default_num_queues; /* default num queues to set up */
709 
710 	u16 num_xdp_queues;
711 	struct gve_queue_config tx_cfg;
712 	struct gve_queue_config rx_cfg;
713 	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
714 
715 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
716 	__be32 __iomem *db_bar2; /* "array" of doorbells */
717 	u32 msg_enable;	/* level for netif* netdev print macros	*/
718 	struct pci_dev *pdev;
719 
720 	/* metrics */
721 	u32 tx_timeo_cnt;
722 
723 	/* Admin queue - see gve_adminq.h*/
724 	union gve_adminq_command *adminq;
725 	dma_addr_t adminq_bus_addr;
726 	struct dma_pool *adminq_pool;
727 	u32 adminq_mask; /* masks prod_cnt to adminq size */
728 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
729 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
730 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
731 	/* free-running count of per AQ cmd executed */
732 	u32 adminq_describe_device_cnt;
733 	u32 adminq_cfg_device_resources_cnt;
734 	u32 adminq_register_page_list_cnt;
735 	u32 adminq_unregister_page_list_cnt;
736 	u32 adminq_create_tx_queue_cnt;
737 	u32 adminq_create_rx_queue_cnt;
738 	u32 adminq_destroy_tx_queue_cnt;
739 	u32 adminq_destroy_rx_queue_cnt;
740 	u32 adminq_dcfg_device_resources_cnt;
741 	u32 adminq_set_driver_parameter_cnt;
742 	u32 adminq_report_stats_cnt;
743 	u32 adminq_report_link_speed_cnt;
744 	u32 adminq_get_ptype_map_cnt;
745 	u32 adminq_verify_driver_compatibility_cnt;
746 
747 	/* Global stats */
748 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
749 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
750 	u32 reset_cnt; /* count of reset */
751 	u32 page_alloc_fail; /* count of page alloc fails */
752 	u32 dma_mapping_error; /* count of dma mapping errors */
753 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
754 	u32 suspend_cnt; /* count of times suspended */
755 	u32 resume_cnt; /* count of times resumed */
756 	struct workqueue_struct *gve_wq;
757 	struct work_struct service_task;
758 	struct work_struct stats_report_task;
759 	unsigned long service_task_flags;
760 	unsigned long state_flags;
761 
762 	struct gve_stats_report *stats_report;
763 	u64 stats_report_len;
764 	dma_addr_t stats_report_bus; /* dma address for the stats report */
765 	unsigned long ethtool_flags;
766 
767 	unsigned long stats_report_timer_period;
768 	struct timer_list stats_report_timer;
769 
770 	/* Gvnic device link speed from hypervisor. */
771 	u64 link_speed;
772 	bool up_before_suspend; /* True if dev was up before suspend */
773 
774 	struct gve_ptype_lut *ptype_lut_dqo;
775 
776 	/* Must be a power of two. */
777 	u16 data_buffer_size_dqo;
778 	u16 max_rx_buffer_size; /* device limit */
779 
780 	enum gve_queue_format queue_format;
781 
782 	/* Interrupt coalescing settings */
783 	u32 tx_coalesce_usecs;
784 	u32 rx_coalesce_usecs;
785 
786 	u16 header_buf_size; /* device configured, header-split supported if non-zero */
787 	bool header_split_enabled; /* True if the header split is enabled by the user */
788 };
789 
790 enum gve_service_task_flags_bit {
791 	GVE_PRIV_FLAGS_DO_RESET			= 1,
792 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
793 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
794 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
795 };
796 
797 enum gve_state_flags_bit {
798 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
799 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
800 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
801 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
802 };
803 
804 enum gve_ethtool_flags_bit {
805 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
806 };
807 
808 static inline bool gve_get_do_reset(struct gve_priv *priv)
809 {
810 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
811 }
812 
813 static inline void gve_set_do_reset(struct gve_priv *priv)
814 {
815 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
816 }
817 
818 static inline void gve_clear_do_reset(struct gve_priv *priv)
819 {
820 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
821 }
822 
823 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
824 {
825 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
826 			&priv->service_task_flags);
827 }
828 
829 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
830 {
831 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
832 }
833 
834 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
835 {
836 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
837 }
838 
839 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
840 {
841 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
842 			&priv->service_task_flags);
843 }
844 
845 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
846 {
847 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
848 }
849 
850 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
851 {
852 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
853 }
854 
855 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
856 {
857 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
858 			&priv->service_task_flags);
859 }
860 
861 static inline void gve_set_do_report_stats(struct gve_priv *priv)
862 {
863 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
864 }
865 
866 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
867 {
868 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
869 }
870 
871 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
872 {
873 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
874 }
875 
876 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
877 {
878 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
879 }
880 
881 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
882 {
883 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
884 }
885 
886 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
887 {
888 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
889 }
890 
891 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
892 {
893 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
894 }
895 
896 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
897 {
898 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
899 }
900 
901 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
902 {
903 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
904 }
905 
906 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
907 {
908 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
909 }
910 
911 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
912 {
913 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
914 }
915 
916 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
917 {
918 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
919 }
920 
921 static inline void gve_set_napi_enabled(struct gve_priv *priv)
922 {
923 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
924 }
925 
926 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
927 {
928 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
929 }
930 
931 static inline bool gve_get_report_stats(struct gve_priv *priv)
932 {
933 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
934 }
935 
936 static inline void gve_clear_report_stats(struct gve_priv *priv)
937 {
938 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
939 }
940 
941 /* Returns the address of the ntfy_blocks irq doorbell
942  */
943 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
944 					       struct gve_notify_block *block)
945 {
946 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
947 }
948 
949 /* Returns the index into ntfy_blocks of the given tx ring's block
950  */
951 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
952 {
953 	return queue_idx;
954 }
955 
956 /* Returns the index into ntfy_blocks of the given rx ring's block
957  */
958 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
959 {
960 	return (priv->num_ntfy_blks / 2) + queue_idx;
961 }
962 
963 static inline bool gve_is_qpl(struct gve_priv *priv)
964 {
965 	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
966 		priv->queue_format == GVE_DQO_QPL_FORMAT;
967 }
968 
969 /* Returns the number of tx queue page lists */
970 static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
971 				  int num_xdp_queues,
972 				  bool is_qpl)
973 {
974 	if (!is_qpl)
975 		return 0;
976 	return tx_cfg->num_queues + num_xdp_queues;
977 }
978 
979 /* Returns the number of XDP tx queue page lists
980  */
981 static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
982 {
983 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
984 		return 0;
985 
986 	return priv->num_xdp_queues;
987 }
988 
989 /* Returns the number of rx queue page lists */
990 static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
991 				  bool is_qpl)
992 {
993 	if (!is_qpl)
994 		return 0;
995 	return rx_cfg->num_queues;
996 }
997 
998 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
999 {
1000 	return tx_qid;
1001 }
1002 
1003 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1004 {
1005 	return priv->tx_cfg.max_queues + rx_qid;
1006 }
1007 
1008 static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
1009 {
1010 	return tx_cfg->max_queues + rx_qid;
1011 }
1012 
1013 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1014 {
1015 	return gve_tx_qpl_id(priv, 0);
1016 }
1017 
1018 static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
1019 {
1020 	return gve_get_rx_qpl_id(tx_cfg, 0);
1021 }
1022 
1023 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
1024 {
1025 	/* For DQO, page count should be more than ring size for
1026 	 * out-of-order completions. Set it to two times of ring size.
1027 	 */
1028 	return 2 * rx_desc_cnt;
1029 }
1030 
1031 /* Returns the correct dma direction for tx and rx qpls */
1032 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1033 						      int id)
1034 {
1035 	if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1036 		return DMA_TO_DEVICE;
1037 	else
1038 		return DMA_FROM_DEVICE;
1039 }
1040 
1041 static inline bool gve_is_gqi(struct gve_priv *priv)
1042 {
1043 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1044 		priv->queue_format == GVE_GQI_QPL_FORMAT;
1045 }
1046 
1047 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1048 {
1049 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
1050 }
1051 
1052 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1053 {
1054 	return priv->tx_cfg.num_queues + queue_id;
1055 }
1056 
1057 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1058 {
1059 	return gve_xdp_tx_queue_id(priv, 0);
1060 }
1061 
1062 /* gqi napi handler defined in gve_main.c */
1063 int gve_napi_poll(struct napi_struct *napi, int budget);
1064 
1065 /* buffers */
1066 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1067 		   struct page **page, dma_addr_t *dma,
1068 		   enum dma_data_direction, gfp_t gfp_flags);
1069 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1070 		   enum dma_data_direction);
1071 /* qpls */
1072 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1073 						      u32 id, int pages);
1074 void gve_free_queue_page_list(struct gve_priv *priv,
1075 			      struct gve_queue_page_list *qpl,
1076 			      u32 id);
1077 /* tx handling */
1078 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1079 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1080 		 u32 flags);
1081 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1082 		     void *data, int len, void *frame_p);
1083 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1084 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1085 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1086 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1087 			   struct gve_tx_alloc_rings_cfg *cfg);
1088 void gve_tx_free_rings_gqi(struct gve_priv *priv,
1089 			   struct gve_tx_alloc_rings_cfg *cfg);
1090 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1091 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1092 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1093 			      struct gve_tx_ring *tx);
1094 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1095 /* rx handling */
1096 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1097 int gve_rx_poll(struct gve_notify_block *block, int budget);
1098 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1099 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1100 			  struct gve_rx_alloc_rings_cfg *cfg,
1101 			  struct gve_rx_ring *rx,
1102 			  int idx);
1103 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1104 			  struct gve_rx_alloc_rings_cfg *cfg);
1105 int gve_rx_alloc_rings(struct gve_priv *priv);
1106 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1107 			   struct gve_rx_alloc_rings_cfg *cfg);
1108 void gve_rx_free_rings_gqi(struct gve_priv *priv,
1109 			   struct gve_rx_alloc_rings_cfg *cfg);
1110 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1111 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1112 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
1113 bool gve_header_split_supported(const struct gve_priv *priv);
1114 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1115 /* Reset */
1116 void gve_schedule_reset(struct gve_priv *priv);
1117 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1118 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1119 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1120 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1121 int gve_adjust_config(struct gve_priv *priv,
1122 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1123 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1124 int gve_adjust_queues(struct gve_priv *priv,
1125 		      struct gve_queue_config new_rx_config,
1126 		      struct gve_queue_config new_tx_config);
1127 /* report stats handling */
1128 void gve_handle_report_stats(struct gve_priv *priv);
1129 /* exported by ethtool.c */
1130 extern const struct ethtool_ops gve_ethtool_ops;
1131 /* needed by ethtool */
1132 extern char gve_driver_name[];
1133 extern const char gve_version_str[];
1134 #endif /* _GVE_H_ */
1135