xref: /linux/drivers/net/ethernet/google/gve/gve.h (revision a5766cd479fd212e9831ceef8e9ab630c91445ab)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/netdevice.h>
13 #include <linux/pci.h>
14 #include <linux/u64_stats_sync.h>
15 #include <net/xdp.h>
16 
17 #include "gve_desc.h"
18 #include "gve_desc_dqo.h"
19 
20 #ifndef PCI_VENDOR_ID_GOOGLE
21 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
22 #endif
23 
24 #define PCI_DEV_ID_GVNIC	0x0042
25 
26 #define GVE_REGISTER_BAR	0
27 #define GVE_DOORBELL_BAR	2
28 
29 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
30 #define GVE_TX_MAX_IOVEC	4
31 /* 1 for management, 1 for rx, 1 for tx */
32 #define GVE_MIN_MSIX 3
33 
34 /* Numbers of gve tx/rx stats in stats report. */
35 #define GVE_TX_STATS_REPORT_NUM	6
36 #define GVE_RX_STATS_REPORT_NUM	2
37 
38 /* Interval to schedule a stats report update, 20000ms. */
39 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
40 
41 /* Numbers of NIC tx/rx stats in stats report. */
42 #define NIC_TX_STATS_REPORT_NUM	0
43 #define NIC_RX_STATS_REPORT_NUM	4
44 
45 #define GVE_ADMINQ_BUFFER_SIZE 4096
46 
47 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
48 
49 /* PTYPEs are always 10 bits. */
50 #define GVE_NUM_PTYPES	1024
51 
52 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
53 
54 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
55 
56 #define GVE_XDP_ACTIONS 5
57 
58 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
59 
60 #define DQO_QPL_DEFAULT_TX_PAGES 512
61 #define DQO_QPL_DEFAULT_RX_PAGES 2048
62 
63 /* Maximum TSO size supported on DQO */
64 #define GVE_DQO_TX_MAX	0x3FFFF
65 
66 #define GVE_TX_BUF_SHIFT_DQO 11
67 
68 /* 2K buffers for DQO-QPL */
69 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
70 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
71 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
72 
73 /* If number of free/recyclable buffers are less than this threshold; driver
74  * allocs and uses a non-qpl page on the receive path of DQO QPL to free
75  * up buffers.
76  * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
77  */
78 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
79 
80 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
81 struct gve_rx_desc_queue {
82 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
83 	dma_addr_t bus; /* the bus for the desc_ring */
84 	u8 seqno; /* the next expected seqno for this desc*/
85 };
86 
87 /* The page info for a single slot in the RX data queue */
88 struct gve_rx_slot_page_info {
89 	struct page *page;
90 	void *page_address;
91 	u32 page_offset; /* offset to write to in page */
92 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
93 	u16 pad; /* adjustment for rx padding */
94 	u8 can_flip; /* tracks if the networking stack is using the page */
95 };
96 
97 /* A list of pages registered with the device during setup and used by a queue
98  * as buffers
99  */
100 struct gve_queue_page_list {
101 	u32 id; /* unique id */
102 	u32 num_entries;
103 	struct page **pages; /* list of num_entries pages */
104 	dma_addr_t *page_buses; /* the dma addrs of the pages */
105 };
106 
107 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
108 struct gve_rx_data_queue {
109 	union gve_rx_data_slot *data_ring; /* read by NIC */
110 	dma_addr_t data_bus; /* dma mapping of the slots */
111 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
112 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
113 	u8 raw_addressing; /* use raw_addressing? */
114 };
115 
116 struct gve_priv;
117 
118 /* RX buffer queue for posting buffers to HW.
119  * Each RX (completion) queue has a corresponding buffer queue.
120  */
121 struct gve_rx_buf_queue_dqo {
122 	struct gve_rx_desc_dqo *desc_ring;
123 	dma_addr_t bus;
124 	u32 head; /* Pointer to start cleaning buffers at. */
125 	u32 tail; /* Last posted buffer index + 1 */
126 	u32 mask; /* Mask for indices to the size of the ring */
127 };
128 
129 /* RX completion queue to receive packets from HW. */
130 struct gve_rx_compl_queue_dqo {
131 	struct gve_rx_compl_desc_dqo *desc_ring;
132 	dma_addr_t bus;
133 
134 	/* Number of slots which did not have a buffer posted yet. We should not
135 	 * post more buffers than the queue size to avoid HW overrunning the
136 	 * queue.
137 	 */
138 	int num_free_slots;
139 
140 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
141 	 * descriptor's generation bit is different from the current generation,
142 	 * that descriptor is ready to be consumed by SW.
143 	 */
144 	u8 cur_gen_bit;
145 
146 	/* Pointer into desc_ring where the next completion descriptor will be
147 	 * received.
148 	 */
149 	u32 head;
150 	u32 mask; /* Mask for indices to the size of the ring */
151 };
152 
153 /* Stores state for tracking buffers posted to HW */
154 struct gve_rx_buf_state_dqo {
155 	/* The page posted to HW. */
156 	struct gve_rx_slot_page_info page_info;
157 
158 	/* The DMA address corresponding to `page_info`. */
159 	dma_addr_t addr;
160 
161 	/* Last offset into the page when it only had a single reference, at
162 	 * which point every other offset is free to be reused.
163 	 */
164 	u32 last_single_ref_offset;
165 
166 	/* Linked list index to next element in the list, or -1 if none */
167 	s16 next;
168 };
169 
170 /* `head` and `tail` are indices into an array, or -1 if empty. */
171 struct gve_index_list {
172 	s16 head;
173 	s16 tail;
174 };
175 
176 /* A single received packet split across multiple buffers may be
177  * reconstructed using the information in this structure.
178  */
179 struct gve_rx_ctx {
180 	/* head and tail of skb chain for the current packet or NULL if none */
181 	struct sk_buff *skb_head;
182 	struct sk_buff *skb_tail;
183 	u32 total_size;
184 	u8 frag_cnt;
185 	bool drop_pkt;
186 };
187 
188 struct gve_rx_cnts {
189 	u32 ok_pkt_bytes;
190 	u16 ok_pkt_cnt;
191 	u16 total_pkt_cnt;
192 	u16 cont_pkt_cnt;
193 	u16 desc_err_pkt_cnt;
194 };
195 
196 /* Contains datapath state used to represent an RX queue. */
197 struct gve_rx_ring {
198 	struct gve_priv *gve;
199 	union {
200 		/* GQI fields */
201 		struct {
202 			struct gve_rx_desc_queue desc;
203 			struct gve_rx_data_queue data;
204 
205 			/* threshold for posting new buffs and descs */
206 			u32 db_threshold;
207 			u16 packet_buffer_size;
208 
209 			u32 qpl_copy_pool_mask;
210 			u32 qpl_copy_pool_head;
211 			struct gve_rx_slot_page_info *qpl_copy_pool;
212 		};
213 
214 		/* DQO fields. */
215 		struct {
216 			struct gve_rx_buf_queue_dqo bufq;
217 			struct gve_rx_compl_queue_dqo complq;
218 
219 			struct gve_rx_buf_state_dqo *buf_states;
220 			u16 num_buf_states;
221 
222 			/* Linked list of gve_rx_buf_state_dqo. Index into
223 			 * buf_states, or -1 if empty.
224 			 */
225 			s16 free_buf_states;
226 
227 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
228 			 * buf_states, or -1 if empty.
229 			 *
230 			 * This list contains buf_states which are pointing to
231 			 * valid buffers.
232 			 *
233 			 * We use a FIFO here in order to increase the
234 			 * probability that buffers can be reused by increasing
235 			 * the time between usages.
236 			 */
237 			struct gve_index_list recycled_buf_states;
238 
239 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
240 			 * buf_states, or -1 if empty.
241 			 *
242 			 * This list contains buf_states which have buffers
243 			 * which cannot be reused yet.
244 			 */
245 			struct gve_index_list used_buf_states;
246 
247 			/* qpl assigned to this queue */
248 			struct gve_queue_page_list *qpl;
249 
250 			/* index into queue page list */
251 			u32 next_qpl_page_idx;
252 
253 			/* track number of used buffers */
254 			u16 used_buf_states_cnt;
255 		} dqo;
256 	};
257 
258 	u64 rbytes; /* free-running bytes received */
259 	u64 rpackets; /* free-running packets received */
260 	u32 cnt; /* free-running total number of completed packets */
261 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
262 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
263 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
264 	u64 rx_copied_pkt; /* free-running total number of copied packets */
265 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
266 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
267 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
268 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
269 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
270 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
271 	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
272 	u64 xdp_tx_errors;
273 	u64 xdp_redirect_errors;
274 	u64 xdp_alloc_fails;
275 	u64 xdp_actions[GVE_XDP_ACTIONS];
276 	u32 q_num; /* queue index */
277 	u32 ntfy_id; /* notification block index */
278 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
279 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
280 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
281 
282 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
283 
284 	/* XDP stuff */
285 	struct xdp_rxq_info xdp_rxq;
286 	struct xdp_rxq_info xsk_rxq;
287 	struct xsk_buff_pool *xsk_pool;
288 	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
289 };
290 
291 /* A TX desc ring entry */
292 union gve_tx_desc {
293 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
294 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
295 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
296 };
297 
298 /* Tracks the memory in the fifo occupied by a segment of a packet */
299 struct gve_tx_iovec {
300 	u32 iov_offset; /* offset into this segment */
301 	u32 iov_len; /* length */
302 	u32 iov_padding; /* padding associated with this segment */
303 };
304 
305 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
306  * ring entry but only used for a pkt_desc not a seg_desc
307  */
308 struct gve_tx_buffer_state {
309 	union {
310 		struct sk_buff *skb; /* skb for this pkt */
311 		struct xdp_frame *xdp_frame; /* xdp_frame */
312 	};
313 	struct {
314 		u16 size; /* size of xmitted xdp pkt */
315 		u8 is_xsk; /* xsk buff */
316 	} xdp;
317 	union {
318 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
319 		struct {
320 			DEFINE_DMA_UNMAP_ADDR(dma);
321 			DEFINE_DMA_UNMAP_LEN(len);
322 		};
323 	};
324 };
325 
326 /* A TX buffer - each queue has one */
327 struct gve_tx_fifo {
328 	void *base; /* address of base of FIFO */
329 	u32 size; /* total size */
330 	atomic_t available; /* how much space is still available */
331 	u32 head; /* offset to write at */
332 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
333 };
334 
335 /* TX descriptor for DQO format */
336 union gve_tx_desc_dqo {
337 	struct gve_tx_pkt_desc_dqo pkt;
338 	struct gve_tx_tso_context_desc_dqo tso_ctx;
339 	struct gve_tx_general_context_desc_dqo general_ctx;
340 };
341 
342 enum gve_packet_state {
343 	/* Packet is in free list, available to be allocated.
344 	 * This should always be zero since state is not explicitly initialized.
345 	 */
346 	GVE_PACKET_STATE_UNALLOCATED,
347 	/* Packet is expecting a regular data completion or miss completion */
348 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
349 	/* Packet has received a miss completion and is expecting a
350 	 * re-injection completion.
351 	 */
352 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
353 	/* No valid completion received within the specified timeout. */
354 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
355 };
356 
357 struct gve_tx_pending_packet_dqo {
358 	struct sk_buff *skb; /* skb for this packet */
359 
360 	/* 0th element corresponds to the linear portion of `skb`, should be
361 	 * unmapped with `dma_unmap_single`.
362 	 *
363 	 * All others correspond to `skb`'s frags and should be unmapped with
364 	 * `dma_unmap_page`.
365 	 */
366 	union {
367 		struct {
368 			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
369 			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
370 		};
371 		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
372 	};
373 
374 	u16 num_bufs;
375 
376 	/* Linked list index to next element in the list, or -1 if none */
377 	s16 next;
378 
379 	/* Linked list index to prev element in the list, or -1 if none.
380 	 * Used for tracking either outstanding miss completions or prematurely
381 	 * freed packets.
382 	 */
383 	s16 prev;
384 
385 	/* Identifies the current state of the packet as defined in
386 	 * `enum gve_packet_state`.
387 	 */
388 	u8 state;
389 
390 	/* If packet is an outstanding miss completion, then the packet is
391 	 * freed if the corresponding re-injection completion is not received
392 	 * before kernel jiffies exceeds timeout_jiffies.
393 	 */
394 	unsigned long timeout_jiffies;
395 };
396 
397 /* Contains datapath state used to represent a TX queue. */
398 struct gve_tx_ring {
399 	/* Cacheline 0 -- Accessed & dirtied during transmit */
400 	union {
401 		/* GQI fields */
402 		struct {
403 			struct gve_tx_fifo tx_fifo;
404 			u32 req; /* driver tracked head pointer */
405 			u32 done; /* driver tracked tail pointer */
406 		};
407 
408 		/* DQO fields. */
409 		struct {
410 			/* Linked list of gve_tx_pending_packet_dqo. Index into
411 			 * pending_packets, or -1 if empty.
412 			 *
413 			 * This is a consumer list owned by the TX path. When it
414 			 * runs out, the producer list is stolen from the
415 			 * completion handling path
416 			 * (dqo_compl.free_pending_packets).
417 			 */
418 			s16 free_pending_packets;
419 
420 			/* Cached value of `dqo_compl.hw_tx_head` */
421 			u32 head;
422 			u32 tail; /* Last posted buffer index + 1 */
423 
424 			/* Index of the last descriptor with "report event" bit
425 			 * set.
426 			 */
427 			u32 last_re_idx;
428 
429 			/* free running number of packet buf descriptors posted */
430 			u16 posted_packet_desc_cnt;
431 			/* free running number of packet buf descriptors completed */
432 			u16 completed_packet_desc_cnt;
433 
434 			/* QPL fields */
435 			struct {
436 			       /* Linked list of gve_tx_buf_dqo. Index into
437 				* tx_qpl_buf_next, or -1 if empty.
438 				*
439 				* This is a consumer list owned by the TX path. When it
440 				* runs out, the producer list is stolen from the
441 				* completion handling path
442 				* (dqo_compl.free_tx_qpl_buf_head).
443 				*/
444 				s16 free_tx_qpl_buf_head;
445 
446 			       /* Free running count of the number of QPL tx buffers
447 				* allocated
448 				*/
449 				u32 alloc_tx_qpl_buf_cnt;
450 
451 				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
452 				u32 free_tx_qpl_buf_cnt;
453 			};
454 		} dqo_tx;
455 	};
456 
457 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
458 	union {
459 		/* GQI fields */
460 		struct {
461 			/* Spinlock for when cleanup in progress */
462 			spinlock_t clean_lock;
463 			/* Spinlock for XDP tx traffic */
464 			spinlock_t xdp_lock;
465 		};
466 
467 		/* DQO fields. */
468 		struct {
469 			u32 head; /* Last read on compl_desc */
470 
471 			/* Tracks the current gen bit of compl_q */
472 			u8 cur_gen_bit;
473 
474 			/* Linked list of gve_tx_pending_packet_dqo. Index into
475 			 * pending_packets, or -1 if empty.
476 			 *
477 			 * This is the producer list, owned by the completion
478 			 * handling path. When the consumer list
479 			 * (dqo_tx.free_pending_packets) is runs out, this list
480 			 * will be stolen.
481 			 */
482 			atomic_t free_pending_packets;
483 
484 			/* Last TX ring index fetched by HW */
485 			atomic_t hw_tx_head;
486 
487 			/* List to track pending packets which received a miss
488 			 * completion but not a corresponding reinjection.
489 			 */
490 			struct gve_index_list miss_completions;
491 
492 			/* List to track pending packets that were completed
493 			 * before receiving a valid completion because they
494 			 * reached a specified timeout.
495 			 */
496 			struct gve_index_list timed_out_completions;
497 
498 			/* QPL fields */
499 			struct {
500 				/* Linked list of gve_tx_buf_dqo. Index into
501 				 * tx_qpl_buf_next, or -1 if empty.
502 				 *
503 				 * This is the producer list, owned by the completion
504 				 * handling path. When the consumer list
505 				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
506 				 * will be stolen.
507 				 */
508 				atomic_t free_tx_qpl_buf_head;
509 
510 				/* Free running count of the number of tx buffers
511 				 * freed
512 				 */
513 				atomic_t free_tx_qpl_buf_cnt;
514 			};
515 		} dqo_compl;
516 	} ____cacheline_aligned;
517 	u64 pkt_done; /* free-running - total packets completed */
518 	u64 bytes_done; /* free-running - total bytes completed */
519 	u64 dropped_pkt; /* free-running - total packets dropped */
520 	u64 dma_mapping_error; /* count of dma mapping errors */
521 
522 	/* Cacheline 2 -- Read-mostly fields */
523 	union {
524 		/* GQI fields */
525 		struct {
526 			union gve_tx_desc *desc;
527 
528 			/* Maps 1:1 to a desc */
529 			struct gve_tx_buffer_state *info;
530 		};
531 
532 		/* DQO fields. */
533 		struct {
534 			union gve_tx_desc_dqo *tx_ring;
535 			struct gve_tx_compl_desc *compl_ring;
536 
537 			struct gve_tx_pending_packet_dqo *pending_packets;
538 			s16 num_pending_packets;
539 
540 			u32 complq_mask; /* complq size is complq_mask + 1 */
541 
542 			/* QPL fields */
543 			struct {
544 				/* qpl assigned to this queue */
545 				struct gve_queue_page_list *qpl;
546 
547 				/* Each QPL page is divided into TX bounce buffers
548 				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
549 				 * an array to manage linked lists of TX buffers.
550 				 * An entry j at index i implies that j'th buffer
551 				 * is next on the list after i
552 				 */
553 				s16 *tx_qpl_buf_next;
554 				u32 num_tx_qpl_bufs;
555 			};
556 		} dqo;
557 	} ____cacheline_aligned;
558 	struct netdev_queue *netdev_txq;
559 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
560 	struct device *dev;
561 	u32 mask; /* masks req and done down to queue size */
562 	u8 raw_addressing; /* use raw_addressing? */
563 
564 	/* Slow-path fields */
565 	u32 q_num ____cacheline_aligned; /* queue idx */
566 	u32 stop_queue; /* count of queue stops */
567 	u32 wake_queue; /* count of queue wakes */
568 	u32 queue_timeout; /* count of queue timeouts */
569 	u32 ntfy_id; /* notification block index */
570 	u32 last_kick_msec; /* Last time the queue was kicked */
571 	dma_addr_t bus; /* dma address of the descr ring */
572 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
573 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
574 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
575 	struct xsk_buff_pool *xsk_pool;
576 	u32 xdp_xsk_wakeup;
577 	u32 xdp_xsk_done;
578 	u64 xdp_xsk_sent;
579 	u64 xdp_xmit;
580 	u64 xdp_xmit_errors;
581 } ____cacheline_aligned;
582 
583 /* Wraps the info for one irq including the napi struct and the queues
584  * associated with that irq.
585  */
586 struct gve_notify_block {
587 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
588 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
589 	struct napi_struct napi; /* kernel napi struct for this block */
590 	struct gve_priv *priv;
591 	struct gve_tx_ring *tx; /* tx rings on this block */
592 	struct gve_rx_ring *rx; /* rx rings on this block */
593 };
594 
595 /* Tracks allowed and current queue settings */
596 struct gve_queue_config {
597 	u16 max_queues;
598 	u16 num_queues; /* current */
599 };
600 
601 /* Tracks the available and used qpl IDs */
602 struct gve_qpl_config {
603 	u32 qpl_map_size; /* map memory size */
604 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
605 };
606 
607 struct gve_options_dqo_rda {
608 	u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
609 	u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
610 };
611 
612 struct gve_irq_db {
613 	__be32 index;
614 } ____cacheline_aligned;
615 
616 struct gve_ptype {
617 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
618 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
619 };
620 
621 struct gve_ptype_lut {
622 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
623 };
624 
625 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
626  * when the entire configure_device_resources command is zeroed out and the
627  * queue_format is not specified.
628  */
629 enum gve_queue_format {
630 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
631 	GVE_GQI_RDA_FORMAT		= 0x1,
632 	GVE_GQI_QPL_FORMAT		= 0x2,
633 	GVE_DQO_RDA_FORMAT		= 0x3,
634 	GVE_DQO_QPL_FORMAT		= 0x4,
635 };
636 
637 struct gve_priv {
638 	struct net_device *dev;
639 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
640 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
641 	struct gve_queue_page_list *qpls; /* array of num qpls */
642 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
643 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
644 	dma_addr_t irq_db_indices_bus;
645 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
646 	char mgmt_msix_name[IFNAMSIZ + 16];
647 	u32 mgmt_msix_idx;
648 	__be32 *counter_array; /* array of num_event_counters */
649 	dma_addr_t counter_array_bus;
650 
651 	u16 num_event_counters;
652 	u16 tx_desc_cnt; /* num desc per ring */
653 	u16 rx_desc_cnt; /* num desc per ring */
654 	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
655 	u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
656 	u16 rx_data_slot_cnt; /* rx buffer length */
657 	u64 max_registered_pages;
658 	u64 num_registered_pages; /* num pages registered with NIC */
659 	struct bpf_prog *xdp_prog; /* XDP BPF program */
660 	u32 rx_copybreak; /* copy packets smaller than this */
661 	u16 default_num_queues; /* default num queues to set up */
662 
663 	u16 num_xdp_queues;
664 	struct gve_queue_config tx_cfg;
665 	struct gve_queue_config rx_cfg;
666 	struct gve_qpl_config qpl_cfg; /* map used QPL ids */
667 	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
668 
669 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
670 	__be32 __iomem *db_bar2; /* "array" of doorbells */
671 	u32 msg_enable;	/* level for netif* netdev print macros	*/
672 	struct pci_dev *pdev;
673 
674 	/* metrics */
675 	u32 tx_timeo_cnt;
676 
677 	/* Admin queue - see gve_adminq.h*/
678 	union gve_adminq_command *adminq;
679 	dma_addr_t adminq_bus_addr;
680 	struct dma_pool *adminq_pool;
681 	u32 adminq_mask; /* masks prod_cnt to adminq size */
682 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
683 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
684 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
685 	/* free-running count of per AQ cmd executed */
686 	u32 adminq_describe_device_cnt;
687 	u32 adminq_cfg_device_resources_cnt;
688 	u32 adminq_register_page_list_cnt;
689 	u32 adminq_unregister_page_list_cnt;
690 	u32 adminq_create_tx_queue_cnt;
691 	u32 adminq_create_rx_queue_cnt;
692 	u32 adminq_destroy_tx_queue_cnt;
693 	u32 adminq_destroy_rx_queue_cnt;
694 	u32 adminq_dcfg_device_resources_cnt;
695 	u32 adminq_set_driver_parameter_cnt;
696 	u32 adminq_report_stats_cnt;
697 	u32 adminq_report_link_speed_cnt;
698 	u32 adminq_get_ptype_map_cnt;
699 	u32 adminq_verify_driver_compatibility_cnt;
700 
701 	/* Global stats */
702 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
703 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
704 	u32 reset_cnt; /* count of reset */
705 	u32 page_alloc_fail; /* count of page alloc fails */
706 	u32 dma_mapping_error; /* count of dma mapping errors */
707 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
708 	u32 suspend_cnt; /* count of times suspended */
709 	u32 resume_cnt; /* count of times resumed */
710 	struct workqueue_struct *gve_wq;
711 	struct work_struct service_task;
712 	struct work_struct stats_report_task;
713 	unsigned long service_task_flags;
714 	unsigned long state_flags;
715 
716 	struct gve_stats_report *stats_report;
717 	u64 stats_report_len;
718 	dma_addr_t stats_report_bus; /* dma address for the stats report */
719 	unsigned long ethtool_flags;
720 
721 	unsigned long stats_report_timer_period;
722 	struct timer_list stats_report_timer;
723 
724 	/* Gvnic device link speed from hypervisor. */
725 	u64 link_speed;
726 	bool up_before_suspend; /* True if dev was up before suspend */
727 
728 	struct gve_options_dqo_rda options_dqo_rda;
729 	struct gve_ptype_lut *ptype_lut_dqo;
730 
731 	/* Must be a power of two. */
732 	int data_buffer_size_dqo;
733 
734 	enum gve_queue_format queue_format;
735 
736 	/* Interrupt coalescing settings */
737 	u32 tx_coalesce_usecs;
738 	u32 rx_coalesce_usecs;
739 };
740 
741 enum gve_service_task_flags_bit {
742 	GVE_PRIV_FLAGS_DO_RESET			= 1,
743 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
744 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
745 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
746 };
747 
748 enum gve_state_flags_bit {
749 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
750 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
751 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
752 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
753 };
754 
755 enum gve_ethtool_flags_bit {
756 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
757 };
758 
759 static inline bool gve_get_do_reset(struct gve_priv *priv)
760 {
761 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
762 }
763 
764 static inline void gve_set_do_reset(struct gve_priv *priv)
765 {
766 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
767 }
768 
769 static inline void gve_clear_do_reset(struct gve_priv *priv)
770 {
771 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
772 }
773 
774 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
775 {
776 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
777 			&priv->service_task_flags);
778 }
779 
780 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
781 {
782 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
783 }
784 
785 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
786 {
787 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
788 }
789 
790 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
791 {
792 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
793 			&priv->service_task_flags);
794 }
795 
796 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
797 {
798 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
799 }
800 
801 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
802 {
803 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
804 }
805 
806 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
807 {
808 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
809 			&priv->service_task_flags);
810 }
811 
812 static inline void gve_set_do_report_stats(struct gve_priv *priv)
813 {
814 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
815 }
816 
817 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
818 {
819 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
820 }
821 
822 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
823 {
824 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
825 }
826 
827 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
828 {
829 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
830 }
831 
832 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
833 {
834 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
835 }
836 
837 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
838 {
839 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
840 }
841 
842 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
843 {
844 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
845 }
846 
847 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
848 {
849 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
850 }
851 
852 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
853 {
854 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
855 }
856 
857 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
858 {
859 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
860 }
861 
862 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
863 {
864 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
865 }
866 
867 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
868 {
869 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
870 }
871 
872 static inline void gve_set_napi_enabled(struct gve_priv *priv)
873 {
874 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
875 }
876 
877 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
878 {
879 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
880 }
881 
882 static inline bool gve_get_report_stats(struct gve_priv *priv)
883 {
884 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
885 }
886 
887 static inline void gve_clear_report_stats(struct gve_priv *priv)
888 {
889 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
890 }
891 
892 /* Returns the address of the ntfy_blocks irq doorbell
893  */
894 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
895 					       struct gve_notify_block *block)
896 {
897 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
898 }
899 
900 /* Returns the index into ntfy_blocks of the given tx ring's block
901  */
902 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
903 {
904 	return queue_idx;
905 }
906 
907 /* Returns the index into ntfy_blocks of the given rx ring's block
908  */
909 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
910 {
911 	return (priv->num_ntfy_blks / 2) + queue_idx;
912 }
913 
914 static inline bool gve_is_qpl(struct gve_priv *priv)
915 {
916 	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
917 		priv->queue_format == GVE_DQO_QPL_FORMAT;
918 }
919 
920 /* Returns the number of tx queue page lists
921  */
922 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
923 {
924 	if (!gve_is_qpl(priv))
925 		return 0;
926 
927 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
928 }
929 
930 /* Returns the number of XDP tx queue page lists
931  */
932 static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
933 {
934 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
935 		return 0;
936 
937 	return priv->num_xdp_queues;
938 }
939 
940 /* Returns the number of rx queue page lists
941  */
942 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
943 {
944 	if (!gve_is_qpl(priv))
945 		return 0;
946 
947 	return priv->rx_cfg.num_queues;
948 }
949 
950 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
951 {
952 	return tx_qid;
953 }
954 
955 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
956 {
957 	return priv->tx_cfg.max_queues + rx_qid;
958 }
959 
960 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
961 {
962 	return gve_tx_qpl_id(priv, 0);
963 }
964 
965 static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
966 {
967 	return gve_rx_qpl_id(priv, 0);
968 }
969 
970 /* Returns a pointer to the next available tx qpl in the list of qpls
971  */
972 static inline
973 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
974 {
975 	int id = gve_tx_qpl_id(priv, tx_qid);
976 
977 	/* QPL already in use */
978 	if (test_bit(id, priv->qpl_cfg.qpl_id_map))
979 		return NULL;
980 
981 	set_bit(id, priv->qpl_cfg.qpl_id_map);
982 	return &priv->qpls[id];
983 }
984 
985 /* Returns a pointer to the next available rx qpl in the list of qpls
986  */
987 static inline
988 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
989 {
990 	int id = gve_rx_qpl_id(priv, rx_qid);
991 
992 	/* QPL already in use */
993 	if (test_bit(id, priv->qpl_cfg.qpl_id_map))
994 		return NULL;
995 
996 	set_bit(id, priv->qpl_cfg.qpl_id_map);
997 	return &priv->qpls[id];
998 }
999 
1000 /* Unassigns the qpl with the given id
1001  */
1002 static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
1003 {
1004 	clear_bit(id, priv->qpl_cfg.qpl_id_map);
1005 }
1006 
1007 /* Returns the correct dma direction for tx and rx qpls
1008  */
1009 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1010 						      int id)
1011 {
1012 	if (id < gve_rx_start_qpl_id(priv))
1013 		return DMA_TO_DEVICE;
1014 	else
1015 		return DMA_FROM_DEVICE;
1016 }
1017 
1018 static inline bool gve_is_gqi(struct gve_priv *priv)
1019 {
1020 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1021 		priv->queue_format == GVE_GQI_QPL_FORMAT;
1022 }
1023 
1024 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1025 {
1026 	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
1027 }
1028 
1029 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1030 {
1031 	return priv->tx_cfg.num_queues + queue_id;
1032 }
1033 
1034 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1035 {
1036 	return gve_xdp_tx_queue_id(priv, 0);
1037 }
1038 
1039 /* buffers */
1040 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1041 		   struct page **page, dma_addr_t *dma,
1042 		   enum dma_data_direction, gfp_t gfp_flags);
1043 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1044 		   enum dma_data_direction);
1045 /* tx handling */
1046 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1047 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1048 		 u32 flags);
1049 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1050 		     void *data, int len, void *frame_p);
1051 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1052 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1053 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1054 int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
1055 void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
1056 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1057 			      struct gve_tx_ring *tx);
1058 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1059 /* rx handling */
1060 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1061 int gve_rx_poll(struct gve_notify_block *block, int budget);
1062 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1063 int gve_rx_alloc_rings(struct gve_priv *priv);
1064 void gve_rx_free_rings_gqi(struct gve_priv *priv);
1065 /* Reset */
1066 void gve_schedule_reset(struct gve_priv *priv);
1067 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1068 int gve_adjust_queues(struct gve_priv *priv,
1069 		      struct gve_queue_config new_rx_config,
1070 		      struct gve_queue_config new_tx_config);
1071 /* report stats handling */
1072 void gve_handle_report_stats(struct gve_priv *priv);
1073 /* exported by ethtool.c */
1074 extern const struct ethtool_ops gve_ethtool_ops;
1075 /* needed by ethtool */
1076 extern char gve_driver_name[];
1077 extern const char gve_version_str[];
1078 #endif /* _GVE_H_ */
1079