xref: /linux/drivers/net/ethernet/google/gve/gve.h (revision 88872790923e2d80edf29a00b4e440f1473fa8f5)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/u64_stats_sync.h>
14 #include <net/xdp.h>
15 
16 #include "gve_desc.h"
17 #include "gve_desc_dqo.h"
18 
19 #ifndef PCI_VENDOR_ID_GOOGLE
20 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
21 #endif
22 
23 #define PCI_DEV_ID_GVNIC	0x0042
24 
25 #define GVE_REGISTER_BAR	0
26 #define GVE_DOORBELL_BAR	2
27 
28 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
29 #define GVE_TX_MAX_IOVEC	4
30 /* 1 for management, 1 for rx, 1 for tx */
31 #define GVE_MIN_MSIX 3
32 
33 /* Numbers of gve tx/rx stats in stats report. */
34 #define GVE_TX_STATS_REPORT_NUM	6
35 #define GVE_RX_STATS_REPORT_NUM	2
36 
37 /* Interval to schedule a stats report update, 20000ms. */
38 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
39 
40 /* Numbers of NIC tx/rx stats in stats report. */
41 #define NIC_TX_STATS_REPORT_NUM	0
42 #define NIC_RX_STATS_REPORT_NUM	4
43 
44 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
45 
46 /* PTYPEs are always 10 bits. */
47 #define GVE_NUM_PTYPES	1024
48 
49 #define GVE_RX_BUFFER_SIZE_DQO 2048
50 
51 #define GVE_XDP_ACTIONS 5
52 
53 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
54 
55 #define DQO_QPL_DEFAULT_TX_PAGES 512
56 #define DQO_QPL_DEFAULT_RX_PAGES 2048
57 
58 /* Maximum TSO size supported on DQO */
59 #define GVE_DQO_TX_MAX	0x3FFFF
60 
61 #define GVE_TX_BUF_SHIFT_DQO 11
62 
63 /* 2K buffers for DQO-QPL */
64 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
65 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
66 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
67 
68 /* If number of free/recyclable buffers are less than this threshold; driver
69  * allocs and uses a non-qpl page on the receive path of DQO QPL to free
70  * up buffers.
71  * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
72  */
73 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
74 
75 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
76 struct gve_rx_desc_queue {
77 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
78 	dma_addr_t bus; /* the bus for the desc_ring */
79 	u8 seqno; /* the next expected seqno for this desc*/
80 };
81 
82 /* The page info for a single slot in the RX data queue */
83 struct gve_rx_slot_page_info {
84 	struct page *page;
85 	void *page_address;
86 	u32 page_offset; /* offset to write to in page */
87 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
88 	u16 pad; /* adjustment for rx padding */
89 	u8 can_flip; /* tracks if the networking stack is using the page */
90 };
91 
92 /* A list of pages registered with the device during setup and used by a queue
93  * as buffers
94  */
95 struct gve_queue_page_list {
96 	u32 id; /* unique id */
97 	u32 num_entries;
98 	struct page **pages; /* list of num_entries pages */
99 	dma_addr_t *page_buses; /* the dma addrs of the pages */
100 };
101 
102 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
103 struct gve_rx_data_queue {
104 	union gve_rx_data_slot *data_ring; /* read by NIC */
105 	dma_addr_t data_bus; /* dma mapping of the slots */
106 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
107 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
108 	u8 raw_addressing; /* use raw_addressing? */
109 };
110 
111 struct gve_priv;
112 
113 /* RX buffer queue for posting buffers to HW.
114  * Each RX (completion) queue has a corresponding buffer queue.
115  */
116 struct gve_rx_buf_queue_dqo {
117 	struct gve_rx_desc_dqo *desc_ring;
118 	dma_addr_t bus;
119 	u32 head; /* Pointer to start cleaning buffers at. */
120 	u32 tail; /* Last posted buffer index + 1 */
121 	u32 mask; /* Mask for indices to the size of the ring */
122 };
123 
124 /* RX completion queue to receive packets from HW. */
125 struct gve_rx_compl_queue_dqo {
126 	struct gve_rx_compl_desc_dqo *desc_ring;
127 	dma_addr_t bus;
128 
129 	/* Number of slots which did not have a buffer posted yet. We should not
130 	 * post more buffers than the queue size to avoid HW overrunning the
131 	 * queue.
132 	 */
133 	int num_free_slots;
134 
135 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
136 	 * descriptor's generation bit is different from the current generation,
137 	 * that descriptor is ready to be consumed by SW.
138 	 */
139 	u8 cur_gen_bit;
140 
141 	/* Pointer into desc_ring where the next completion descriptor will be
142 	 * received.
143 	 */
144 	u32 head;
145 	u32 mask; /* Mask for indices to the size of the ring */
146 };
147 
148 /* Stores state for tracking buffers posted to HW */
149 struct gve_rx_buf_state_dqo {
150 	/* The page posted to HW. */
151 	struct gve_rx_slot_page_info page_info;
152 
153 	/* The DMA address corresponding to `page_info`. */
154 	dma_addr_t addr;
155 
156 	/* Last offset into the page when it only had a single reference, at
157 	 * which point every other offset is free to be reused.
158 	 */
159 	u32 last_single_ref_offset;
160 
161 	/* Linked list index to next element in the list, or -1 if none */
162 	s16 next;
163 };
164 
165 /* `head` and `tail` are indices into an array, or -1 if empty. */
166 struct gve_index_list {
167 	s16 head;
168 	s16 tail;
169 };
170 
171 /* A single received packet split across multiple buffers may be
172  * reconstructed using the information in this structure.
173  */
174 struct gve_rx_ctx {
175 	/* head and tail of skb chain for the current packet or NULL if none */
176 	struct sk_buff *skb_head;
177 	struct sk_buff *skb_tail;
178 	u32 total_size;
179 	u8 frag_cnt;
180 	bool drop_pkt;
181 };
182 
183 struct gve_rx_cnts {
184 	u32 ok_pkt_bytes;
185 	u16 ok_pkt_cnt;
186 	u16 total_pkt_cnt;
187 	u16 cont_pkt_cnt;
188 	u16 desc_err_pkt_cnt;
189 };
190 
191 /* Contains datapath state used to represent an RX queue. */
192 struct gve_rx_ring {
193 	struct gve_priv *gve;
194 	union {
195 		/* GQI fields */
196 		struct {
197 			struct gve_rx_desc_queue desc;
198 			struct gve_rx_data_queue data;
199 
200 			/* threshold for posting new buffs and descs */
201 			u32 db_threshold;
202 			u16 packet_buffer_size;
203 
204 			u32 qpl_copy_pool_mask;
205 			u32 qpl_copy_pool_head;
206 			struct gve_rx_slot_page_info *qpl_copy_pool;
207 		};
208 
209 		/* DQO fields. */
210 		struct {
211 			struct gve_rx_buf_queue_dqo bufq;
212 			struct gve_rx_compl_queue_dqo complq;
213 
214 			struct gve_rx_buf_state_dqo *buf_states;
215 			u16 num_buf_states;
216 
217 			/* Linked list of gve_rx_buf_state_dqo. Index into
218 			 * buf_states, or -1 if empty.
219 			 */
220 			s16 free_buf_states;
221 
222 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
223 			 * buf_states, or -1 if empty.
224 			 *
225 			 * This list contains buf_states which are pointing to
226 			 * valid buffers.
227 			 *
228 			 * We use a FIFO here in order to increase the
229 			 * probability that buffers can be reused by increasing
230 			 * the time between usages.
231 			 */
232 			struct gve_index_list recycled_buf_states;
233 
234 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
235 			 * buf_states, or -1 if empty.
236 			 *
237 			 * This list contains buf_states which have buffers
238 			 * which cannot be reused yet.
239 			 */
240 			struct gve_index_list used_buf_states;
241 
242 			/* qpl assigned to this queue */
243 			struct gve_queue_page_list *qpl;
244 
245 			/* index into queue page list */
246 			u32 next_qpl_page_idx;
247 
248 			/* track number of used buffers */
249 			u16 used_buf_states_cnt;
250 		} dqo;
251 	};
252 
253 	u64 rbytes; /* free-running bytes received */
254 	u64 rpackets; /* free-running packets received */
255 	u32 cnt; /* free-running total number of completed packets */
256 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
257 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
258 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
259 	u64 rx_copied_pkt; /* free-running total number of copied packets */
260 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
261 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
262 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
263 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
264 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
265 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
266 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
267 	u64 xdp_tx_errors;
268 	u64 xdp_redirect_errors;
269 	u64 xdp_alloc_fails;
270 	u64 xdp_actions[GVE_XDP_ACTIONS];
271 	u32 q_num; /* queue index */
272 	u32 ntfy_id; /* notification block index */
273 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
274 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
275 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
276 
277 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
278 
279 	/* XDP stuff */
280 	struct xdp_rxq_info xdp_rxq;
281 	struct xdp_rxq_info xsk_rxq;
282 	struct xsk_buff_pool *xsk_pool;
283 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
284 };
285 
286 /* A TX desc ring entry */
287 union gve_tx_desc {
288 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
289 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
290 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
291 };
292 
293 /* Tracks the memory in the fifo occupied by a segment of a packet */
294 struct gve_tx_iovec {
295 	u32 iov_offset; /* offset into this segment */
296 	u32 iov_len; /* length */
297 	u32 iov_padding; /* padding associated with this segment */
298 };
299 
300 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
301  * ring entry but only used for a pkt_desc not a seg_desc
302  */
303 struct gve_tx_buffer_state {
304 	union {
305 		struct sk_buff *skb; /* skb for this pkt */
306 		struct xdp_frame *xdp_frame; /* xdp_frame */
307 	};
308 	struct {
309 		u16 size; /* size of xmitted xdp pkt */
310 		u8 is_xsk; /* xsk buff */
311 	} xdp;
312 	union {
313 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
314 		struct {
315 			DEFINE_DMA_UNMAP_ADDR(dma);
316 			DEFINE_DMA_UNMAP_LEN(len);
317 		};
318 	};
319 };
320 
321 /* A TX buffer - each queue has one */
322 struct gve_tx_fifo {
323 	void *base; /* address of base of FIFO */
324 	u32 size; /* total size */
325 	atomic_t available; /* how much space is still available */
326 	u32 head; /* offset to write at */
327 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
328 };
329 
330 /* TX descriptor for DQO format */
331 union gve_tx_desc_dqo {
332 	struct gve_tx_pkt_desc_dqo pkt;
333 	struct gve_tx_tso_context_desc_dqo tso_ctx;
334 	struct gve_tx_general_context_desc_dqo general_ctx;
335 };
336 
337 enum gve_packet_state {
338 	/* Packet is in free list, available to be allocated.
339 	 * This should always be zero since state is not explicitly initialized.
340 	 */
341 	GVE_PACKET_STATE_UNALLOCATED,
342 	/* Packet is expecting a regular data completion or miss completion */
343 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
344 	/* Packet has received a miss completion and is expecting a
345 	 * re-injection completion.
346 	 */
347 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
348 	/* No valid completion received within the specified timeout. */
349 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
350 };
351 
352 struct gve_tx_pending_packet_dqo {
353 	struct sk_buff *skb; /* skb for this packet */
354 
355 	/* 0th element corresponds to the linear portion of `skb`, should be
356 	 * unmapped with `dma_unmap_single`.
357 	 *
358 	 * All others correspond to `skb`'s frags and should be unmapped with
359 	 * `dma_unmap_page`.
360 	 */
361 	union {
362 		struct {
363 			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
364 			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
365 		};
366 		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
367 	};
368 
369 	u16 num_bufs;
370 
371 	/* Linked list index to next element in the list, or -1 if none */
372 	s16 next;
373 
374 	/* Linked list index to prev element in the list, or -1 if none.
375 	 * Used for tracking either outstanding miss completions or prematurely
376 	 * freed packets.
377 	 */
378 	s16 prev;
379 
380 	/* Identifies the current state of the packet as defined in
381 	 * `enum gve_packet_state`.
382 	 */
383 	u8 state;
384 
385 	/* If packet is an outstanding miss completion, then the packet is
386 	 * freed if the corresponding re-injection completion is not received
387 	 * before kernel jiffies exceeds timeout_jiffies.
388 	 */
389 	unsigned long timeout_jiffies;
390 };
391 
392 /* Contains datapath state used to represent a TX queue. */
393 struct gve_tx_ring {
394 	/* Cacheline 0 -- Accessed & dirtied during transmit */
395 	union {
396 		/* GQI fields */
397 		struct {
398 			struct gve_tx_fifo tx_fifo;
399 			u32 req; /* driver tracked head pointer */
400 			u32 done; /* driver tracked tail pointer */
401 		};
402 
403 		/* DQO fields. */
404 		struct {
405 			/* Linked list of gve_tx_pending_packet_dqo. Index into
406 			 * pending_packets, or -1 if empty.
407 			 *
408 			 * This is a consumer list owned by the TX path. When it
409 			 * runs out, the producer list is stolen from the
410 			 * completion handling path
411 			 * (dqo_compl.free_pending_packets).
412 			 */
413 			s16 free_pending_packets;
414 
415 			/* Cached value of `dqo_compl.hw_tx_head` */
416 			u32 head;
417 			u32 tail; /* Last posted buffer index + 1 */
418 
419 			/* Index of the last descriptor with "report event" bit
420 			 * set.
421 			 */
422 			u32 last_re_idx;
423 
424 			/* free running number of packet buf descriptors posted */
425 			u16 posted_packet_desc_cnt;
426 			/* free running number of packet buf descriptors completed */
427 			u16 completed_packet_desc_cnt;
428 
429 			/* QPL fields */
430 			struct {
431 			       /* Linked list of gve_tx_buf_dqo. Index into
432 				* tx_qpl_buf_next, or -1 if empty.
433 				*
434 				* This is a consumer list owned by the TX path. When it
435 				* runs out, the producer list is stolen from the
436 				* completion handling path
437 				* (dqo_compl.free_tx_qpl_buf_head).
438 				*/
439 				s16 free_tx_qpl_buf_head;
440 
441 			       /* Free running count of the number of QPL tx buffers
442 				* allocated
443 				*/
444 				u32 alloc_tx_qpl_buf_cnt;
445 
446 				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
447 				u32 free_tx_qpl_buf_cnt;
448 			};
449 		} dqo_tx;
450 	};
451 
452 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
453 	union {
454 		/* GQI fields */
455 		struct {
456 			/* Spinlock for when cleanup in progress */
457 			spinlock_t clean_lock;
458 			/* Spinlock for XDP tx traffic */
459 			spinlock_t xdp_lock;
460 		};
461 
462 		/* DQO fields. */
463 		struct {
464 			u32 head; /* Last read on compl_desc */
465 
466 			/* Tracks the current gen bit of compl_q */
467 			u8 cur_gen_bit;
468 
469 			/* Linked list of gve_tx_pending_packet_dqo. Index into
470 			 * pending_packets, or -1 if empty.
471 			 *
472 			 * This is the producer list, owned by the completion
473 			 * handling path. When the consumer list
474 			 * (dqo_tx.free_pending_packets) is runs out, this list
475 			 * will be stolen.
476 			 */
477 			atomic_t free_pending_packets;
478 
479 			/* Last TX ring index fetched by HW */
480 			atomic_t hw_tx_head;
481 
482 			/* List to track pending packets which received a miss
483 			 * completion but not a corresponding reinjection.
484 			 */
485 			struct gve_index_list miss_completions;
486 
487 			/* List to track pending packets that were completed
488 			 * before receiving a valid completion because they
489 			 * reached a specified timeout.
490 			 */
491 			struct gve_index_list timed_out_completions;
492 
493 			/* QPL fields */
494 			struct {
495 				/* Linked list of gve_tx_buf_dqo. Index into
496 				 * tx_qpl_buf_next, or -1 if empty.
497 				 *
498 				 * This is the producer list, owned by the completion
499 				 * handling path. When the consumer list
500 				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
501 				 * will be stolen.
502 				 */
503 				atomic_t free_tx_qpl_buf_head;
504 
505 				/* Free running count of the number of tx buffers
506 				 * freed
507 				 */
508 				atomic_t free_tx_qpl_buf_cnt;
509 			};
510 		} dqo_compl;
511 	} ____cacheline_aligned;
512 	u64 pkt_done; /* free-running - total packets completed */
513 	u64 bytes_done; /* free-running - total bytes completed */
514 	u64 dropped_pkt; /* free-running - total packets dropped */
515 	u64 dma_mapping_error; /* count of dma mapping errors */
516 
517 	/* Cacheline 2 -- Read-mostly fields */
518 	union {
519 		/* GQI fields */
520 		struct {
521 			union gve_tx_desc *desc;
522 
523 			/* Maps 1:1 to a desc */
524 			struct gve_tx_buffer_state *info;
525 		};
526 
527 		/* DQO fields. */
528 		struct {
529 			union gve_tx_desc_dqo *tx_ring;
530 			struct gve_tx_compl_desc *compl_ring;
531 
532 			struct gve_tx_pending_packet_dqo *pending_packets;
533 			s16 num_pending_packets;
534 
535 			u32 complq_mask; /* complq size is complq_mask + 1 */
536 
537 			/* QPL fields */
538 			struct {
539 				/* qpl assigned to this queue */
540 				struct gve_queue_page_list *qpl;
541 
542 				/* Each QPL page is divided into TX bounce buffers
543 				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
544 				 * an array to manage linked lists of TX buffers.
545 				 * An entry j at index i implies that j'th buffer
546 				 * is next on the list after i
547 				 */
548 				s16 *tx_qpl_buf_next;
549 				u32 num_tx_qpl_bufs;
550 			};
551 		} dqo;
552 	} ____cacheline_aligned;
553 	struct netdev_queue *netdev_txq;
554 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
555 	struct device *dev;
556 	u32 mask; /* masks req and done down to queue size */
557 	u8 raw_addressing; /* use raw_addressing? */
558 
559 	/* Slow-path fields */
560 	u32 q_num ____cacheline_aligned; /* queue idx */
561 	u32 stop_queue; /* count of queue stops */
562 	u32 wake_queue; /* count of queue wakes */
563 	u32 queue_timeout; /* count of queue timeouts */
564 	u32 ntfy_id; /* notification block index */
565 	u32 last_kick_msec; /* Last time the queue was kicked */
566 	dma_addr_t bus; /* dma address of the descr ring */
567 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
568 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
569 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
570 	struct xsk_buff_pool *xsk_pool;
571 	u32 xdp_xsk_wakeup;
572 	u32 xdp_xsk_done;
573 	u64 xdp_xsk_sent;
574 	u64 xdp_xmit;
575 	u64 xdp_xmit_errors;
576 } ____cacheline_aligned;
577 
578 /* Wraps the info for one irq including the napi struct and the queues
579  * associated with that irq.
580  */
581 struct gve_notify_block {
582 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
583 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
584 	struct napi_struct napi; /* kernel napi struct for this block */
585 	struct gve_priv *priv;
586 	struct gve_tx_ring *tx; /* tx rings on this block */
587 	struct gve_rx_ring *rx; /* rx rings on this block */
588 };
589 
590 /* Tracks allowed and current queue settings */
591 struct gve_queue_config {
592 	u16 max_queues;
593 	u16 num_queues; /* current */
594 };
595 
596 /* Tracks the available and used qpl IDs */
597 struct gve_qpl_config {
598 	u32 qpl_map_size; /* map memory size */
599 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
600 };
601 
602 struct gve_options_dqo_rda {
603 	u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
604 	u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
605 };
606 
607 struct gve_irq_db {
608 	__be32 index;
609 } ____cacheline_aligned;
610 
611 struct gve_ptype {
612 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
613 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
614 };
615 
616 struct gve_ptype_lut {
617 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
618 };
619 
620 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
621  * when the entire configure_device_resources command is zeroed out and the
622  * queue_format is not specified.
623  */
624 enum gve_queue_format {
625 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
626 	GVE_GQI_RDA_FORMAT		= 0x1,
627 	GVE_GQI_QPL_FORMAT		= 0x2,
628 	GVE_DQO_RDA_FORMAT		= 0x3,
629 	GVE_DQO_QPL_FORMAT		= 0x4,
630 };
631 
632 struct gve_priv {
633 	struct net_device *dev;
634 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
635 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
636 	struct gve_queue_page_list *qpls; /* array of num qpls */
637 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
638 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
639 	dma_addr_t irq_db_indices_bus;
640 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
641 	char mgmt_msix_name[IFNAMSIZ + 16];
642 	u32 mgmt_msix_idx;
643 	__be32 *counter_array; /* array of num_event_counters */
644 	dma_addr_t counter_array_bus;
645 
646 	u16 num_event_counters;
647 	u16 tx_desc_cnt; /* num desc per ring */
648 	u16 rx_desc_cnt; /* num desc per ring */
649 	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
650 	u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
651 	u16 rx_data_slot_cnt; /* rx buffer length */
652 	u64 max_registered_pages;
653 	u64 num_registered_pages; /* num pages registered with NIC */
654 	struct bpf_prog *xdp_prog; /* XDP BPF program */
655 	u32 rx_copybreak; /* copy packets smaller than this */
656 	u16 default_num_queues; /* default num queues to set up */
657 
658 	u16 num_xdp_queues;
659 	struct gve_queue_config tx_cfg;
660 	struct gve_queue_config rx_cfg;
661 	struct gve_qpl_config qpl_cfg; /* map used QPL ids */
662 	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
663 
664 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
665 	__be32 __iomem *db_bar2; /* "array" of doorbells */
666 	u32 msg_enable;	/* level for netif* netdev print macros	*/
667 	struct pci_dev *pdev;
668 
669 	/* metrics */
670 	u32 tx_timeo_cnt;
671 
672 	/* Admin queue - see gve_adminq.h*/
673 	union gve_adminq_command *adminq;
674 	dma_addr_t adminq_bus_addr;
675 	u32 adminq_mask; /* masks prod_cnt to adminq size */
676 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
677 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
678 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
679 	/* free-running count of per AQ cmd executed */
680 	u32 adminq_describe_device_cnt;
681 	u32 adminq_cfg_device_resources_cnt;
682 	u32 adminq_register_page_list_cnt;
683 	u32 adminq_unregister_page_list_cnt;
684 	u32 adminq_create_tx_queue_cnt;
685 	u32 adminq_create_rx_queue_cnt;
686 	u32 adminq_destroy_tx_queue_cnt;
687 	u32 adminq_destroy_rx_queue_cnt;
688 	u32 adminq_dcfg_device_resources_cnt;
689 	u32 adminq_set_driver_parameter_cnt;
690 	u32 adminq_report_stats_cnt;
691 	u32 adminq_report_link_speed_cnt;
692 	u32 adminq_get_ptype_map_cnt;
693 	u32 adminq_verify_driver_compatibility_cnt;
694 
695 	/* Global stats */
696 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
697 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
698 	u32 reset_cnt; /* count of reset */
699 	u32 page_alloc_fail; /* count of page alloc fails */
700 	u32 dma_mapping_error; /* count of dma mapping errors */
701 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
702 	u32 suspend_cnt; /* count of times suspended */
703 	u32 resume_cnt; /* count of times resumed */
704 	struct workqueue_struct *gve_wq;
705 	struct work_struct service_task;
706 	struct work_struct stats_report_task;
707 	unsigned long service_task_flags;
708 	unsigned long state_flags;
709 
710 	struct gve_stats_report *stats_report;
711 	u64 stats_report_len;
712 	dma_addr_t stats_report_bus; /* dma address for the stats report */
713 	unsigned long ethtool_flags;
714 
715 	unsigned long stats_report_timer_period;
716 	struct timer_list stats_report_timer;
717 
718 	/* Gvnic device link speed from hypervisor. */
719 	u64 link_speed;
720 	bool up_before_suspend; /* True if dev was up before suspend */
721 
722 	struct gve_options_dqo_rda options_dqo_rda;
723 	struct gve_ptype_lut *ptype_lut_dqo;
724 
725 	/* Must be a power of two. */
726 	int data_buffer_size_dqo;
727 
728 	enum gve_queue_format queue_format;
729 
730 	/* Interrupt coalescing settings */
731 	u32 tx_coalesce_usecs;
732 	u32 rx_coalesce_usecs;
733 };
734 
735 enum gve_service_task_flags_bit {
736 	GVE_PRIV_FLAGS_DO_RESET			= 1,
737 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
738 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
739 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
740 };
741 
742 enum gve_state_flags_bit {
743 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
744 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
745 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
746 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
747 };
748 
749 enum gve_ethtool_flags_bit {
750 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
751 };
752 
753 static inline bool gve_get_do_reset(struct gve_priv *priv)
754 {
755 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
756 }
757 
758 static inline void gve_set_do_reset(struct gve_priv *priv)
759 {
760 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
761 }
762 
763 static inline void gve_clear_do_reset(struct gve_priv *priv)
764 {
765 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
766 }
767 
768 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
769 {
770 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
771 			&priv->service_task_flags);
772 }
773 
774 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
775 {
776 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
777 }
778 
779 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
780 {
781 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
782 }
783 
784 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
785 {
786 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
787 			&priv->service_task_flags);
788 }
789 
790 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
791 {
792 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
793 }
794 
795 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
796 {
797 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
798 }
799 
800 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
801 {
802 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
803 			&priv->service_task_flags);
804 }
805 
806 static inline void gve_set_do_report_stats(struct gve_priv *priv)
807 {
808 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
809 }
810 
811 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
812 {
813 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
814 }
815 
816 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
817 {
818 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
819 }
820 
821 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
822 {
823 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
824 }
825 
826 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
827 {
828 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
829 }
830 
831 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
832 {
833 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
834 }
835 
836 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
837 {
838 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
839 }
840 
841 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
842 {
843 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
844 }
845 
846 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
847 {
848 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
849 }
850 
851 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
852 {
853 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
854 }
855 
856 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
857 {
858 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
859 }
860 
861 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
862 {
863 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
864 }
865 
866 static inline void gve_set_napi_enabled(struct gve_priv *priv)
867 {
868 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
869 }
870 
871 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
872 {
873 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
874 }
875 
876 static inline bool gve_get_report_stats(struct gve_priv *priv)
877 {
878 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
879 }
880 
881 static inline void gve_clear_report_stats(struct gve_priv *priv)
882 {
883 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
884 }
885 
886 /* Returns the address of the ntfy_blocks irq doorbell
887  */
888 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
889 					       struct gve_notify_block *block)
890 {
891 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
892 }
893 
894 /* Returns the index into ntfy_blocks of the given tx ring's block
895  */
896 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
897 {
898 	return queue_idx;
899 }
900 
901 /* Returns the index into ntfy_blocks of the given rx ring's block
902  */
903 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
904 {
905 	return (priv->num_ntfy_blks / 2) + queue_idx;
906 }
907 
908 static inline bool gve_is_qpl(struct gve_priv *priv)
909 {
910 	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
911 		priv->queue_format == GVE_DQO_QPL_FORMAT;
912 }
913 
914 /* Returns the number of tx queue page lists
915  */
916 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
917 {
918 	if (!gve_is_qpl(priv))
919 		return 0;
920 
921 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
922 }
923 
924 /* Returns the number of XDP tx queue page lists
925  */
926 static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
927 {
928 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
929 		return 0;
930 
931 	return priv->num_xdp_queues;
932 }
933 
934 /* Returns the number of rx queue page lists
935  */
936 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
937 {
938 	if (!gve_is_qpl(priv))
939 		return 0;
940 
941 	return priv->rx_cfg.num_queues;
942 }
943 
944 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
945 {
946 	return tx_qid;
947 }
948 
949 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
950 {
951 	return priv->tx_cfg.max_queues + rx_qid;
952 }
953 
954 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
955 {
956 	return gve_tx_qpl_id(priv, 0);
957 }
958 
959 static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
960 {
961 	return gve_rx_qpl_id(priv, 0);
962 }
963 
964 /* Returns a pointer to the next available tx qpl in the list of qpls
965  */
966 static inline
967 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
968 {
969 	int id = gve_tx_qpl_id(priv, tx_qid);
970 
971 	/* QPL already in use */
972 	if (test_bit(id, priv->qpl_cfg.qpl_id_map))
973 		return NULL;
974 
975 	set_bit(id, priv->qpl_cfg.qpl_id_map);
976 	return &priv->qpls[id];
977 }
978 
979 /* Returns a pointer to the next available rx qpl in the list of qpls
980  */
981 static inline
982 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
983 {
984 	int id = gve_rx_qpl_id(priv, rx_qid);
985 
986 	/* QPL already in use */
987 	if (test_bit(id, priv->qpl_cfg.qpl_id_map))
988 		return NULL;
989 
990 	set_bit(id, priv->qpl_cfg.qpl_id_map);
991 	return &priv->qpls[id];
992 }
993 
994 /* Unassigns the qpl with the given id
995  */
996 static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
997 {
998 	clear_bit(id, priv->qpl_cfg.qpl_id_map);
999 }
1000 
1001 /* Returns the correct dma direction for tx and rx qpls
1002  */
1003 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1004 						      int id)
1005 {
1006 	if (id < gve_rx_start_qpl_id(priv))
1007 		return DMA_TO_DEVICE;
1008 	else
1009 		return DMA_FROM_DEVICE;
1010 }
1011 
1012 static inline bool gve_is_gqi(struct gve_priv *priv)
1013 {
1014 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1015 		priv->queue_format == GVE_GQI_QPL_FORMAT;
1016 }
1017 
1018 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1019 {
1020 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
1021 }
1022 
1023 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1024 {
1025 	return priv->tx_cfg.num_queues + queue_id;
1026 }
1027 
1028 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1029 {
1030 	return gve_xdp_tx_queue_id(priv, 0);
1031 }
1032 
1033 /* buffers */
1034 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1035 		   struct page **page, dma_addr_t *dma,
1036 		   enum dma_data_direction, gfp_t gfp_flags);
1037 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1038 		   enum dma_data_direction);
1039 /* tx handling */
1040 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1041 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1042 		 u32 flags);
1043 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1044 		     void *data, int len, void *frame_p);
1045 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1046 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1047 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1048 int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
1049 void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
1050 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1051 			      struct gve_tx_ring *tx);
1052 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1053 /* rx handling */
1054 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1055 int gve_rx_poll(struct gve_notify_block *block, int budget);
1056 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1057 int gve_rx_alloc_rings(struct gve_priv *priv);
1058 void gve_rx_free_rings_gqi(struct gve_priv *priv);
1059 /* Reset */
1060 void gve_schedule_reset(struct gve_priv *priv);
1061 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1062 int gve_adjust_queues(struct gve_priv *priv,
1063 		      struct gve_queue_config new_rx_config,
1064 		      struct gve_queue_config new_tx_config);
1065 /* report stats handling */
1066 void gve_handle_report_stats(struct gve_priv *priv);
1067 /* exported by ethtool.c */
1068 extern const struct ethtool_ops gve_ethtool_ops;
1069 /* needed by ethtool */
1070 extern char gve_driver_name[];
1071 extern const char gve_version_str[];
1072 #endif /* _GVE_H_ */
1073