1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 * Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2024 Google LLC
5 */
6
7 #ifndef _GVE_H_
8 #define _GVE_H_
9
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/ethtool_netlink.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/u64_stats_sync.h>
16 #include <net/page_pool/helpers.h>
17 #include <net/xdp.h>
18
19 #include "gve_desc.h"
20 #include "gve_desc_dqo.h"
21
22 #ifndef PCI_VENDOR_ID_GOOGLE
23 #define PCI_VENDOR_ID_GOOGLE 0x1ae0
24 #endif
25
26 #define PCI_DEV_ID_GVNIC 0x0042
27
28 #define GVE_REGISTER_BAR 0
29 #define GVE_DOORBELL_BAR 2
30
31 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
32 #define GVE_TX_MAX_IOVEC 4
33 /* 1 for management, 1 for rx, 1 for tx */
34 #define GVE_MIN_MSIX 3
35
36 /* Numbers of gve tx/rx stats in stats report. */
37 #define GVE_TX_STATS_REPORT_NUM 6
38 #define GVE_RX_STATS_REPORT_NUM 2
39
40 /* Interval to schedule a stats report update, 20000ms. */
41 #define GVE_STATS_REPORT_TIMER_PERIOD 20000
42
43 /* Numbers of NIC tx/rx stats in stats report. */
44 #define NIC_TX_STATS_REPORT_NUM 0
45 #define NIC_RX_STATS_REPORT_NUM 4
46
47 #define GVE_ADMINQ_BUFFER_SIZE 4096
48
49 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
50
51 /* PTYPEs are always 10 bits. */
52 #define GVE_NUM_PTYPES 1024
53
54 /* Default minimum ring size */
55 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
56 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
57
58 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
59
60 #define GVE_MAX_RX_BUFFER_SIZE 4096
61
62 #define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
63
64 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
65
66 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
67
68 #define GVE_FLOW_RULES_CACHE_SIZE \
69 (GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
70 #define GVE_FLOW_RULE_IDS_CACHE_SIZE \
71 (GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
72
73 #define GVE_RSS_KEY_SIZE 40
74 #define GVE_RSS_INDIR_SIZE 128
75
76 #define GVE_XDP_ACTIONS 5
77
78 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
79
80 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
81
82 #define DQO_QPL_DEFAULT_TX_PAGES 512
83
84 /* Maximum TSO size supported on DQO */
85 #define GVE_DQO_TX_MAX 0x3FFFF
86
87 #define GVE_TX_BUF_SHIFT_DQO 11
88
89 /* 2K buffers for DQO-QPL */
90 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
91 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
92 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
93
94 /* If number of free/recyclable buffers are less than this threshold; driver
95 * allocs and uses a non-qpl page on the receive path of DQO QPL to free
96 * up buffers.
97 * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
98 */
99 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
100
101 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
102 struct gve_rx_desc_queue {
103 struct gve_rx_desc *desc_ring; /* the descriptor ring */
104 dma_addr_t bus; /* the bus for the desc_ring */
105 u8 seqno; /* the next expected seqno for this desc*/
106 };
107
108 /* The page info for a single slot in the RX data queue */
109 struct gve_rx_slot_page_info {
110 /* netmem is used for DQO RDA mode
111 * page is used in all other modes
112 */
113 union {
114 struct page *page;
115 netmem_ref netmem;
116 };
117 void *page_address;
118 u32 page_offset; /* offset to write to in page */
119 unsigned int buf_size;
120 int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
121 u16 pad; /* adjustment for rx padding */
122 u8 can_flip; /* tracks if the networking stack is using the page */
123 };
124
125 /* A list of pages registered with the device during setup and used by a queue
126 * as buffers
127 */
128 struct gve_queue_page_list {
129 u32 id; /* unique id */
130 u32 num_entries;
131 struct page **pages; /* list of num_entries pages */
132 dma_addr_t *page_buses; /* the dma addrs of the pages */
133 };
134
135 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
136 struct gve_rx_data_queue {
137 union gve_rx_data_slot *data_ring; /* read by NIC */
138 dma_addr_t data_bus; /* dma mapping of the slots */
139 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
140 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
141 u8 raw_addressing; /* use raw_addressing? */
142 };
143
144 struct gve_priv;
145
146 /* RX buffer queue for posting buffers to HW.
147 * Each RX (completion) queue has a corresponding buffer queue.
148 */
149 struct gve_rx_buf_queue_dqo {
150 struct gve_rx_desc_dqo *desc_ring;
151 dma_addr_t bus;
152 u32 head; /* Pointer to start cleaning buffers at. */
153 u32 tail; /* Last posted buffer index + 1 */
154 u32 mask; /* Mask for indices to the size of the ring */
155 };
156
157 /* RX completion queue to receive packets from HW. */
158 struct gve_rx_compl_queue_dqo {
159 struct gve_rx_compl_desc_dqo *desc_ring;
160 dma_addr_t bus;
161
162 /* Number of slots which did not have a buffer posted yet. We should not
163 * post more buffers than the queue size to avoid HW overrunning the
164 * queue.
165 */
166 int num_free_slots;
167
168 /* HW uses a "generation bit" to notify SW of new descriptors. When a
169 * descriptor's generation bit is different from the current generation,
170 * that descriptor is ready to be consumed by SW.
171 */
172 u8 cur_gen_bit;
173
174 /* Pointer into desc_ring where the next completion descriptor will be
175 * received.
176 */
177 u32 head;
178 u32 mask; /* Mask for indices to the size of the ring */
179 };
180
181 struct gve_header_buf {
182 u8 *data;
183 dma_addr_t addr;
184 };
185
186 /* Stores state for tracking buffers posted to HW */
187 struct gve_rx_buf_state_dqo {
188 /* The page posted to HW. */
189 struct gve_rx_slot_page_info page_info;
190
191 /* The DMA address corresponding to `page_info`. */
192 dma_addr_t addr;
193
194 /* Last offset into the page when it only had a single reference, at
195 * which point every other offset is free to be reused.
196 */
197 u32 last_single_ref_offset;
198
199 /* Linked list index to next element in the list, or -1 if none */
200 s16 next;
201 };
202
203 /* `head` and `tail` are indices into an array, or -1 if empty. */
204 struct gve_index_list {
205 s16 head;
206 s16 tail;
207 };
208
209 /* A single received packet split across multiple buffers may be
210 * reconstructed using the information in this structure.
211 */
212 struct gve_rx_ctx {
213 /* head and tail of skb chain for the current packet or NULL if none */
214 struct sk_buff *skb_head;
215 struct sk_buff *skb_tail;
216 u32 total_size;
217 u8 frag_cnt;
218 bool drop_pkt;
219 };
220
221 struct gve_rx_cnts {
222 u32 ok_pkt_bytes;
223 u16 ok_pkt_cnt;
224 u16 total_pkt_cnt;
225 u16 cont_pkt_cnt;
226 u16 desc_err_pkt_cnt;
227 };
228
229 /* Contains datapath state used to represent an RX queue. */
230 struct gve_rx_ring {
231 struct gve_priv *gve;
232
233 u16 packet_buffer_size; /* Size of buffer posted to NIC */
234 u16 packet_buffer_truesize; /* Total size of RX buffer */
235 u16 rx_headroom;
236
237 union {
238 /* GQI fields */
239 struct {
240 struct gve_rx_desc_queue desc;
241 struct gve_rx_data_queue data;
242
243 /* threshold for posting new buffs and descs */
244 u32 db_threshold;
245
246 u32 qpl_copy_pool_mask;
247 u32 qpl_copy_pool_head;
248 struct gve_rx_slot_page_info *qpl_copy_pool;
249 };
250
251 /* DQO fields. */
252 struct {
253 struct gve_rx_buf_queue_dqo bufq;
254 struct gve_rx_compl_queue_dqo complq;
255
256 struct gve_rx_buf_state_dqo *buf_states;
257 u16 num_buf_states;
258
259 /* Linked list of gve_rx_buf_state_dqo. Index into
260 * buf_states, or -1 if empty.
261 */
262 s16 free_buf_states;
263
264 /* Linked list of gve_rx_buf_state_dqo. Indexes into
265 * buf_states, or -1 if empty.
266 *
267 * This list contains buf_states which are pointing to
268 * valid buffers.
269 *
270 * We use a FIFO here in order to increase the
271 * probability that buffers can be reused by increasing
272 * the time between usages.
273 */
274 struct gve_index_list recycled_buf_states;
275
276 /* Linked list of gve_rx_buf_state_dqo. Indexes into
277 * buf_states, or -1 if empty.
278 *
279 * This list contains buf_states which have buffers
280 * which cannot be reused yet.
281 */
282 struct gve_index_list used_buf_states;
283
284 /* qpl assigned to this queue */
285 struct gve_queue_page_list *qpl;
286
287 /* index into queue page list */
288 u32 next_qpl_page_idx;
289
290 /* track number of used buffers */
291 u16 used_buf_states_cnt;
292
293 /* Address info of the buffers for header-split */
294 struct gve_header_buf hdr_bufs;
295
296 struct page_pool *page_pool;
297 } dqo;
298 };
299
300 u64 rbytes; /* free-running bytes received */
301 u64 rx_hsplit_bytes; /* free-running header bytes received */
302 u64 rpackets; /* free-running packets received */
303 u32 cnt; /* free-running total number of completed packets */
304 u32 fill_cnt; /* free-running total number of descs and buffs posted */
305 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
306 u64 rx_hsplit_pkt; /* free-running packets with headers split */
307 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
308 u64 rx_copied_pkt; /* free-running total number of copied packets */
309 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
310 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
311 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
312 /* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
313 u64 rx_hsplit_unsplit_pkt;
314 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
315 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
316 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
317 u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
318 u64 xdp_tx_errors;
319 u64 xdp_redirect_errors;
320 u64 xdp_alloc_fails;
321 u64 xdp_actions[GVE_XDP_ACTIONS];
322 u32 q_num; /* queue index */
323 u32 ntfy_id; /* notification block index */
324 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
325 dma_addr_t q_resources_bus; /* dma address for the queue resources */
326 struct u64_stats_sync statss; /* sync stats for 32bit archs */
327
328 struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
329
330 /* XDP stuff */
331 struct xdp_rxq_info xdp_rxq;
332 struct xdp_rxq_info xsk_rxq;
333 struct xsk_buff_pool *xsk_pool;
334 struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
335 };
336
337 /* A TX desc ring entry */
338 union gve_tx_desc {
339 struct gve_tx_pkt_desc pkt; /* first desc for a packet */
340 struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
341 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
342 };
343
344 /* Tracks the memory in the fifo occupied by a segment of a packet */
345 struct gve_tx_iovec {
346 u32 iov_offset; /* offset into this segment */
347 u32 iov_len; /* length */
348 u32 iov_padding; /* padding associated with this segment */
349 };
350
351 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
352 * ring entry but only used for a pkt_desc not a seg_desc
353 */
354 struct gve_tx_buffer_state {
355 union {
356 struct sk_buff *skb; /* skb for this pkt */
357 struct xdp_frame *xdp_frame; /* xdp_frame */
358 };
359 struct {
360 u16 size; /* size of xmitted xdp pkt */
361 u8 is_xsk; /* xsk buff */
362 } xdp;
363 union {
364 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
365 struct {
366 DEFINE_DMA_UNMAP_ADDR(dma);
367 DEFINE_DMA_UNMAP_LEN(len);
368 };
369 };
370 };
371
372 /* A TX buffer - each queue has one */
373 struct gve_tx_fifo {
374 void *base; /* address of base of FIFO */
375 u32 size; /* total size */
376 atomic_t available; /* how much space is still available */
377 u32 head; /* offset to write at */
378 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
379 };
380
381 /* TX descriptor for DQO format */
382 union gve_tx_desc_dqo {
383 struct gve_tx_pkt_desc_dqo pkt;
384 struct gve_tx_tso_context_desc_dqo tso_ctx;
385 struct gve_tx_general_context_desc_dqo general_ctx;
386 };
387
388 enum gve_packet_state {
389 /* Packet is in free list, available to be allocated.
390 * This should always be zero since state is not explicitly initialized.
391 */
392 GVE_PACKET_STATE_UNALLOCATED,
393 /* Packet is expecting a regular data completion or miss completion */
394 GVE_PACKET_STATE_PENDING_DATA_COMPL,
395 /* Packet has received a miss completion and is expecting a
396 * re-injection completion.
397 */
398 GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
399 /* No valid completion received within the specified timeout. */
400 GVE_PACKET_STATE_TIMED_OUT_COMPL,
401 };
402
403 struct gve_tx_pending_packet_dqo {
404 struct sk_buff *skb; /* skb for this packet */
405
406 /* 0th element corresponds to the linear portion of `skb`, should be
407 * unmapped with `dma_unmap_single`.
408 *
409 * All others correspond to `skb`'s frags and should be unmapped with
410 * `dma_unmap_page`.
411 */
412 union {
413 struct {
414 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
415 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
416 };
417 s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
418 };
419
420 u16 num_bufs;
421
422 /* Linked list index to next element in the list, or -1 if none */
423 s16 next;
424
425 /* Linked list index to prev element in the list, or -1 if none.
426 * Used for tracking either outstanding miss completions or prematurely
427 * freed packets.
428 */
429 s16 prev;
430
431 /* Identifies the current state of the packet as defined in
432 * `enum gve_packet_state`.
433 */
434 u8 state;
435
436 /* If packet is an outstanding miss completion, then the packet is
437 * freed if the corresponding re-injection completion is not received
438 * before kernel jiffies exceeds timeout_jiffies.
439 */
440 unsigned long timeout_jiffies;
441 };
442
443 /* Contains datapath state used to represent a TX queue. */
444 struct gve_tx_ring {
445 /* Cacheline 0 -- Accessed & dirtied during transmit */
446 union {
447 /* GQI fields */
448 struct {
449 struct gve_tx_fifo tx_fifo;
450 u32 req; /* driver tracked head pointer */
451 u32 done; /* driver tracked tail pointer */
452 };
453
454 /* DQO fields. */
455 struct {
456 /* Linked list of gve_tx_pending_packet_dqo. Index into
457 * pending_packets, or -1 if empty.
458 *
459 * This is a consumer list owned by the TX path. When it
460 * runs out, the producer list is stolen from the
461 * completion handling path
462 * (dqo_compl.free_pending_packets).
463 */
464 s16 free_pending_packets;
465
466 /* Cached value of `dqo_compl.hw_tx_head` */
467 u32 head;
468 u32 tail; /* Last posted buffer index + 1 */
469
470 /* Index of the last descriptor with "report event" bit
471 * set.
472 */
473 u32 last_re_idx;
474
475 /* free running number of packet buf descriptors posted */
476 u16 posted_packet_desc_cnt;
477 /* free running number of packet buf descriptors completed */
478 u16 completed_packet_desc_cnt;
479
480 /* QPL fields */
481 struct {
482 /* Linked list of gve_tx_buf_dqo. Index into
483 * tx_qpl_buf_next, or -1 if empty.
484 *
485 * This is a consumer list owned by the TX path. When it
486 * runs out, the producer list is stolen from the
487 * completion handling path
488 * (dqo_compl.free_tx_qpl_buf_head).
489 */
490 s16 free_tx_qpl_buf_head;
491
492 /* Free running count of the number of QPL tx buffers
493 * allocated
494 */
495 u32 alloc_tx_qpl_buf_cnt;
496
497 /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
498 u32 free_tx_qpl_buf_cnt;
499 };
500 } dqo_tx;
501 };
502
503 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
504 union {
505 /* GQI fields */
506 struct {
507 /* Spinlock for when cleanup in progress */
508 spinlock_t clean_lock;
509 /* Spinlock for XDP tx traffic */
510 spinlock_t xdp_lock;
511 };
512
513 /* DQO fields. */
514 struct {
515 u32 head; /* Last read on compl_desc */
516
517 /* Tracks the current gen bit of compl_q */
518 u8 cur_gen_bit;
519
520 /* Linked list of gve_tx_pending_packet_dqo. Index into
521 * pending_packets, or -1 if empty.
522 *
523 * This is the producer list, owned by the completion
524 * handling path. When the consumer list
525 * (dqo_tx.free_pending_packets) is runs out, this list
526 * will be stolen.
527 */
528 atomic_t free_pending_packets;
529
530 /* Last TX ring index fetched by HW */
531 atomic_t hw_tx_head;
532
533 /* List to track pending packets which received a miss
534 * completion but not a corresponding reinjection.
535 */
536 struct gve_index_list miss_completions;
537
538 /* List to track pending packets that were completed
539 * before receiving a valid completion because they
540 * reached a specified timeout.
541 */
542 struct gve_index_list timed_out_completions;
543
544 /* QPL fields */
545 struct {
546 /* Linked list of gve_tx_buf_dqo. Index into
547 * tx_qpl_buf_next, or -1 if empty.
548 *
549 * This is the producer list, owned by the completion
550 * handling path. When the consumer list
551 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
552 * will be stolen.
553 */
554 atomic_t free_tx_qpl_buf_head;
555
556 /* Free running count of the number of tx buffers
557 * freed
558 */
559 atomic_t free_tx_qpl_buf_cnt;
560 };
561 } dqo_compl;
562 } ____cacheline_aligned;
563 u64 pkt_done; /* free-running - total packets completed */
564 u64 bytes_done; /* free-running - total bytes completed */
565 u64 dropped_pkt; /* free-running - total packets dropped */
566 u64 dma_mapping_error; /* count of dma mapping errors */
567
568 /* Cacheline 2 -- Read-mostly fields */
569 union {
570 /* GQI fields */
571 struct {
572 union gve_tx_desc *desc;
573
574 /* Maps 1:1 to a desc */
575 struct gve_tx_buffer_state *info;
576 };
577
578 /* DQO fields. */
579 struct {
580 union gve_tx_desc_dqo *tx_ring;
581 struct gve_tx_compl_desc *compl_ring;
582
583 struct gve_tx_pending_packet_dqo *pending_packets;
584 s16 num_pending_packets;
585
586 u32 complq_mask; /* complq size is complq_mask + 1 */
587
588 /* QPL fields */
589 struct {
590 /* qpl assigned to this queue */
591 struct gve_queue_page_list *qpl;
592
593 /* Each QPL page is divided into TX bounce buffers
594 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
595 * an array to manage linked lists of TX buffers.
596 * An entry j at index i implies that j'th buffer
597 * is next on the list after i
598 */
599 s16 *tx_qpl_buf_next;
600 u32 num_tx_qpl_bufs;
601 };
602 } dqo;
603 } ____cacheline_aligned;
604 struct netdev_queue *netdev_txq;
605 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
606 struct device *dev;
607 u32 mask; /* masks req and done down to queue size */
608 u8 raw_addressing; /* use raw_addressing? */
609
610 /* Slow-path fields */
611 u32 q_num ____cacheline_aligned; /* queue idx */
612 u32 stop_queue; /* count of queue stops */
613 u32 wake_queue; /* count of queue wakes */
614 u32 queue_timeout; /* count of queue timeouts */
615 u32 ntfy_id; /* notification block index */
616 u32 last_kick_msec; /* Last time the queue was kicked */
617 dma_addr_t bus; /* dma address of the descr ring */
618 dma_addr_t q_resources_bus; /* dma address of the queue resources */
619 dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
620 struct u64_stats_sync statss; /* sync stats for 32bit archs */
621 struct xsk_buff_pool *xsk_pool;
622 u64 xdp_xsk_sent;
623 u64 xdp_xmit;
624 u64 xdp_xmit_errors;
625 } ____cacheline_aligned;
626
627 /* Wraps the info for one irq including the napi struct and the queues
628 * associated with that irq.
629 */
630 struct gve_notify_block {
631 __be32 *irq_db_index; /* pointer to idx into Bar2 */
632 char name[IFNAMSIZ + 16]; /* name registered with the kernel */
633 struct napi_struct napi; /* kernel napi struct for this block */
634 struct gve_priv *priv;
635 struct gve_tx_ring *tx; /* tx rings on this block */
636 struct gve_rx_ring *rx; /* rx rings on this block */
637 u32 irq;
638 };
639
640 /* Tracks allowed and current rx queue settings */
641 struct gve_rx_queue_config {
642 u16 max_queues;
643 u16 num_queues;
644 u16 packet_buffer_size;
645 };
646
647 /* Tracks allowed and current tx queue settings */
648 struct gve_tx_queue_config {
649 u16 max_queues;
650 u16 num_queues; /* number of TX queues, excluding XDP queues */
651 u16 num_xdp_queues;
652 };
653
654 /* Tracks the available and used qpl IDs */
655 struct gve_qpl_config {
656 u32 qpl_map_size; /* map memory size */
657 unsigned long *qpl_id_map; /* bitmap of used qpl ids */
658 };
659
660 struct gve_irq_db {
661 __be32 index;
662 } ____cacheline_aligned;
663
664 struct gve_ptype {
665 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */
666 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */
667 };
668
669 struct gve_ptype_lut {
670 struct gve_ptype ptypes[GVE_NUM_PTYPES];
671 };
672
673 /* Parameters for allocating resources for tx queues */
674 struct gve_tx_alloc_rings_cfg {
675 struct gve_tx_queue_config *qcfg;
676
677 u16 num_xdp_rings;
678
679 u16 ring_size;
680 bool raw_addressing;
681
682 /* Allocated resources are returned here */
683 struct gve_tx_ring *tx;
684 };
685
686 /* Parameters for allocating resources for rx queues */
687 struct gve_rx_alloc_rings_cfg {
688 /* tx config is also needed to determine QPL ids */
689 struct gve_rx_queue_config *qcfg_rx;
690 struct gve_tx_queue_config *qcfg_tx;
691
692 u16 ring_size;
693 u16 packet_buffer_size;
694 bool raw_addressing;
695 bool enable_header_split;
696 bool reset_rss;
697 bool xdp;
698
699 /* Allocated resources are returned here */
700 struct gve_rx_ring *rx;
701 };
702
703 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
704 * when the entire configure_device_resources command is zeroed out and the
705 * queue_format is not specified.
706 */
707 enum gve_queue_format {
708 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
709 GVE_GQI_RDA_FORMAT = 0x1,
710 GVE_GQI_QPL_FORMAT = 0x2,
711 GVE_DQO_RDA_FORMAT = 0x3,
712 GVE_DQO_QPL_FORMAT = 0x4,
713 };
714
715 struct gve_flow_spec {
716 __be32 src_ip[4];
717 __be32 dst_ip[4];
718 union {
719 struct {
720 __be16 src_port;
721 __be16 dst_port;
722 };
723 __be32 spi;
724 };
725 union {
726 u8 tos;
727 u8 tclass;
728 };
729 };
730
731 struct gve_flow_rule {
732 u32 location;
733 u16 flow_type;
734 u16 action;
735 struct gve_flow_spec key;
736 struct gve_flow_spec mask;
737 };
738
739 struct gve_flow_rules_cache {
740 bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
741 struct gve_adminq_queried_flow_rule *rules_cache;
742 __be32 *rule_ids_cache;
743 /* The total number of queried rules that stored in the caches */
744 u32 rules_cache_num;
745 u32 rule_ids_cache_num;
746 };
747
748 struct gve_rss_config {
749 u8 *hash_key;
750 u32 *hash_lut;
751 };
752
753 struct gve_priv {
754 struct net_device *dev;
755 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
756 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
757 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
758 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
759 dma_addr_t irq_db_indices_bus;
760 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
761 char mgmt_msix_name[IFNAMSIZ + 16];
762 u32 mgmt_msix_idx;
763 __be32 *counter_array; /* array of num_event_counters */
764 dma_addr_t counter_array_bus;
765
766 u16 num_event_counters;
767 u16 tx_desc_cnt; /* num desc per ring */
768 u16 rx_desc_cnt; /* num desc per ring */
769 u16 max_tx_desc_cnt;
770 u16 max_rx_desc_cnt;
771 u16 min_tx_desc_cnt;
772 u16 min_rx_desc_cnt;
773 bool modify_ring_size_enabled;
774 bool default_min_ring_size;
775 u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
776 u64 max_registered_pages;
777 u64 num_registered_pages; /* num pages registered with NIC */
778 struct bpf_prog *xdp_prog; /* XDP BPF program */
779 u32 rx_copybreak; /* copy packets smaller than this */
780 u16 default_num_queues; /* default num queues to set up */
781
782 struct gve_tx_queue_config tx_cfg;
783 struct gve_rx_queue_config rx_cfg;
784 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
785
786 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
787 __be32 __iomem *db_bar2; /* "array" of doorbells */
788 u32 msg_enable; /* level for netif* netdev print macros */
789 struct pci_dev *pdev;
790
791 /* metrics */
792 u32 tx_timeo_cnt;
793
794 /* Admin queue - see gve_adminq.h*/
795 union gve_adminq_command *adminq;
796 dma_addr_t adminq_bus_addr;
797 struct dma_pool *adminq_pool;
798 struct mutex adminq_lock; /* Protects adminq command execution */
799 u32 adminq_mask; /* masks prod_cnt to adminq size */
800 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
801 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
802 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
803 /* free-running count of per AQ cmd executed */
804 u32 adminq_describe_device_cnt;
805 u32 adminq_cfg_device_resources_cnt;
806 u32 adminq_register_page_list_cnt;
807 u32 adminq_unregister_page_list_cnt;
808 u32 adminq_create_tx_queue_cnt;
809 u32 adminq_create_rx_queue_cnt;
810 u32 adminq_destroy_tx_queue_cnt;
811 u32 adminq_destroy_rx_queue_cnt;
812 u32 adminq_dcfg_device_resources_cnt;
813 u32 adminq_set_driver_parameter_cnt;
814 u32 adminq_report_stats_cnt;
815 u32 adminq_report_link_speed_cnt;
816 u32 adminq_get_ptype_map_cnt;
817 u32 adminq_verify_driver_compatibility_cnt;
818 u32 adminq_query_flow_rules_cnt;
819 u32 adminq_cfg_flow_rule_cnt;
820 u32 adminq_cfg_rss_cnt;
821 u32 adminq_query_rss_cnt;
822
823 /* Global stats */
824 u32 interface_up_cnt; /* count of times interface turned up since last reset */
825 u32 interface_down_cnt; /* count of times interface turned down since last reset */
826 u32 reset_cnt; /* count of reset */
827 u32 page_alloc_fail; /* count of page alloc fails */
828 u32 dma_mapping_error; /* count of dma mapping errors */
829 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
830 u32 suspend_cnt; /* count of times suspended */
831 u32 resume_cnt; /* count of times resumed */
832 struct workqueue_struct *gve_wq;
833 struct work_struct service_task;
834 struct work_struct stats_report_task;
835 unsigned long service_task_flags;
836 unsigned long state_flags;
837
838 struct gve_stats_report *stats_report;
839 u64 stats_report_len;
840 dma_addr_t stats_report_bus; /* dma address for the stats report */
841 unsigned long ethtool_flags;
842
843 unsigned long stats_report_timer_period;
844 struct timer_list stats_report_timer;
845
846 /* Gvnic device link speed from hypervisor. */
847 u64 link_speed;
848 bool up_before_suspend; /* True if dev was up before suspend */
849
850 struct gve_ptype_lut *ptype_lut_dqo;
851
852 /* Must be a power of two. */
853 u16 max_rx_buffer_size; /* device limit */
854
855 enum gve_queue_format queue_format;
856
857 /* Interrupt coalescing settings */
858 u32 tx_coalesce_usecs;
859 u32 rx_coalesce_usecs;
860
861 u16 header_buf_size; /* device configured, header-split supported if non-zero */
862 bool header_split_enabled; /* True if the header split is enabled by the user */
863
864 u32 max_flow_rules;
865 u32 num_flow_rules;
866
867 struct gve_flow_rules_cache flow_rules_cache;
868
869 u16 rss_key_size;
870 u16 rss_lut_size;
871 bool cache_rss_config;
872 struct gve_rss_config rss_config;
873 };
874
875 enum gve_service_task_flags_bit {
876 GVE_PRIV_FLAGS_DO_RESET = 1,
877 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
878 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
879 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
880 };
881
882 enum gve_state_flags_bit {
883 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
884 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
885 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
886 GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
887 };
888
889 enum gve_ethtool_flags_bit {
890 GVE_PRIV_FLAGS_REPORT_STATS = 0,
891 };
892
gve_get_do_reset(struct gve_priv * priv)893 static inline bool gve_get_do_reset(struct gve_priv *priv)
894 {
895 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
896 }
897
gve_set_do_reset(struct gve_priv * priv)898 static inline void gve_set_do_reset(struct gve_priv *priv)
899 {
900 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
901 }
902
gve_clear_do_reset(struct gve_priv * priv)903 static inline void gve_clear_do_reset(struct gve_priv *priv)
904 {
905 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
906 }
907
gve_get_reset_in_progress(struct gve_priv * priv)908 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
909 {
910 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
911 &priv->service_task_flags);
912 }
913
gve_set_reset_in_progress(struct gve_priv * priv)914 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
915 {
916 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
917 }
918
gve_clear_reset_in_progress(struct gve_priv * priv)919 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
920 {
921 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
922 }
923
gve_get_probe_in_progress(struct gve_priv * priv)924 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
925 {
926 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
927 &priv->service_task_flags);
928 }
929
gve_set_probe_in_progress(struct gve_priv * priv)930 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
931 {
932 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
933 }
934
gve_clear_probe_in_progress(struct gve_priv * priv)935 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
936 {
937 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
938 }
939
gve_get_do_report_stats(struct gve_priv * priv)940 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
941 {
942 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
943 &priv->service_task_flags);
944 }
945
gve_set_do_report_stats(struct gve_priv * priv)946 static inline void gve_set_do_report_stats(struct gve_priv *priv)
947 {
948 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
949 }
950
gve_clear_do_report_stats(struct gve_priv * priv)951 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
952 {
953 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
954 }
955
gve_get_admin_queue_ok(struct gve_priv * priv)956 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
957 {
958 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
959 }
960
gve_set_admin_queue_ok(struct gve_priv * priv)961 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
962 {
963 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
964 }
965
gve_clear_admin_queue_ok(struct gve_priv * priv)966 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
967 {
968 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
969 }
970
gve_get_device_resources_ok(struct gve_priv * priv)971 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
972 {
973 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
974 }
975
gve_set_device_resources_ok(struct gve_priv * priv)976 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
977 {
978 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
979 }
980
gve_clear_device_resources_ok(struct gve_priv * priv)981 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
982 {
983 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
984 }
985
gve_get_device_rings_ok(struct gve_priv * priv)986 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
987 {
988 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
989 }
990
gve_set_device_rings_ok(struct gve_priv * priv)991 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
992 {
993 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
994 }
995
gve_clear_device_rings_ok(struct gve_priv * priv)996 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
997 {
998 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
999 }
1000
gve_get_napi_enabled(struct gve_priv * priv)1001 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
1002 {
1003 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1004 }
1005
gve_set_napi_enabled(struct gve_priv * priv)1006 static inline void gve_set_napi_enabled(struct gve_priv *priv)
1007 {
1008 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1009 }
1010
gve_clear_napi_enabled(struct gve_priv * priv)1011 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
1012 {
1013 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1014 }
1015
gve_get_report_stats(struct gve_priv * priv)1016 static inline bool gve_get_report_stats(struct gve_priv *priv)
1017 {
1018 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1019 }
1020
gve_clear_report_stats(struct gve_priv * priv)1021 static inline void gve_clear_report_stats(struct gve_priv *priv)
1022 {
1023 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1024 }
1025
1026 /* Returns the address of the ntfy_blocks irq doorbell
1027 */
gve_irq_doorbell(struct gve_priv * priv,struct gve_notify_block * block)1028 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
1029 struct gve_notify_block *block)
1030 {
1031 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
1032 }
1033
1034 /* Returns the index into ntfy_blocks of the given tx ring's block
1035 */
gve_tx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)1036 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1037 {
1038 return queue_idx;
1039 }
1040
1041 /* Returns the index into ntfy_blocks of the given rx ring's block
1042 */
gve_rx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)1043 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1044 {
1045 return (priv->num_ntfy_blks / 2) + queue_idx;
1046 }
1047
gve_is_qpl(struct gve_priv * priv)1048 static inline bool gve_is_qpl(struct gve_priv *priv)
1049 {
1050 return priv->queue_format == GVE_GQI_QPL_FORMAT ||
1051 priv->queue_format == GVE_DQO_QPL_FORMAT;
1052 }
1053
1054 /* Returns the number of tx queue page lists */
gve_num_tx_qpls(const struct gve_tx_queue_config * tx_cfg,bool is_qpl)1055 static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
1056 bool is_qpl)
1057 {
1058 if (!is_qpl)
1059 return 0;
1060 return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
1061 }
1062
1063 /* Returns the number of rx queue page lists */
gve_num_rx_qpls(const struct gve_rx_queue_config * rx_cfg,bool is_qpl)1064 static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
1065 bool is_qpl)
1066 {
1067 if (!is_qpl)
1068 return 0;
1069 return rx_cfg->num_queues;
1070 }
1071
gve_tx_qpl_id(struct gve_priv * priv,int tx_qid)1072 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
1073 {
1074 return tx_qid;
1075 }
1076
gve_rx_qpl_id(struct gve_priv * priv,int rx_qid)1077 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1078 {
1079 return priv->tx_cfg.max_queues + rx_qid;
1080 }
1081
gve_get_rx_qpl_id(const struct gve_tx_queue_config * tx_cfg,int rx_qid)1082 static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
1083 int rx_qid)
1084 {
1085 return tx_cfg->max_queues + rx_qid;
1086 }
1087
gve_tx_start_qpl_id(struct gve_priv * priv)1088 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1089 {
1090 return gve_tx_qpl_id(priv, 0);
1091 }
1092
gve_rx_start_qpl_id(const struct gve_tx_queue_config * tx_cfg)1093 static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
1094 {
1095 return gve_get_rx_qpl_id(tx_cfg, 0);
1096 }
1097
gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)1098 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
1099 {
1100 /* For DQO, page count should be more than ring size for
1101 * out-of-order completions. Set it to two times of ring size.
1102 */
1103 return 2 * rx_desc_cnt;
1104 }
1105
1106 /* Returns the correct dma direction for tx and rx qpls */
gve_qpl_dma_dir(struct gve_priv * priv,int id)1107 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1108 int id)
1109 {
1110 if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1111 return DMA_TO_DEVICE;
1112 else
1113 return DMA_FROM_DEVICE;
1114 }
1115
gve_is_gqi(struct gve_priv * priv)1116 static inline bool gve_is_gqi(struct gve_priv *priv)
1117 {
1118 return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1119 priv->queue_format == GVE_GQI_QPL_FORMAT;
1120 }
1121
gve_num_tx_queues(struct gve_priv * priv)1122 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1123 {
1124 return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
1125 }
1126
gve_xdp_tx_queue_id(struct gve_priv * priv,u32 queue_id)1127 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1128 {
1129 return priv->tx_cfg.num_queues + queue_id;
1130 }
1131
gve_xdp_tx_start_queue_id(struct gve_priv * priv)1132 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1133 {
1134 return gve_xdp_tx_queue_id(priv, 0);
1135 }
1136
gve_supports_xdp_xmit(struct gve_priv * priv)1137 static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
1138 {
1139 switch (priv->queue_format) {
1140 case GVE_GQI_QPL_FORMAT:
1141 return true;
1142 default:
1143 return false;
1144 }
1145 }
1146
1147 /* gqi napi handler defined in gve_main.c */
1148 int gve_napi_poll(struct napi_struct *napi, int budget);
1149
1150 /* buffers */
1151 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1152 struct page **page, dma_addr_t *dma,
1153 enum dma_data_direction, gfp_t gfp_flags);
1154 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1155 enum dma_data_direction);
1156 /* qpls */
1157 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1158 u32 id, int pages);
1159 void gve_free_queue_page_list(struct gve_priv *priv,
1160 struct gve_queue_page_list *qpl,
1161 u32 id);
1162 /* tx handling */
1163 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1164 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1165 u32 flags);
1166 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1167 void *data, int len, void *frame_p);
1168 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1169 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1170 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1171 int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
1172 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1173 struct gve_tx_alloc_rings_cfg *cfg);
1174 void gve_tx_free_rings_gqi(struct gve_priv *priv,
1175 struct gve_tx_alloc_rings_cfg *cfg);
1176 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1177 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1178 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1179 struct gve_tx_ring *tx);
1180 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1181 /* rx handling */
1182 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1183 int gve_rx_poll(struct gve_notify_block *block, int budget);
1184 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1185 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1186 struct gve_rx_alloc_rings_cfg *cfg,
1187 struct gve_rx_ring *rx,
1188 int idx);
1189 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1190 struct gve_rx_alloc_rings_cfg *cfg);
1191 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1192 struct gve_rx_alloc_rings_cfg *cfg);
1193 void gve_rx_free_rings_gqi(struct gve_priv *priv,
1194 struct gve_rx_alloc_rings_cfg *cfg);
1195 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1196 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1197 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
1198 bool gve_header_split_supported(const struct gve_priv *priv);
1199 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1200 /* rx buffer handling */
1201 int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
1202 void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
1203 bool free_page);
1204 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1205 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1206 struct gve_rx_buf_state_dqo *buf_state);
1207 void gve_free_buf_state(struct gve_rx_ring *rx,
1208 struct gve_rx_buf_state_dqo *buf_state);
1209 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1210 struct gve_index_list *list);
1211 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1212 struct gve_rx_buf_state_dqo *buf_state);
1213 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1214 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1215 struct gve_rx_buf_state_dqo *buf_state);
1216 void gve_free_to_page_pool(struct gve_rx_ring *rx,
1217 struct gve_rx_buf_state_dqo *buf_state,
1218 bool allow_direct);
1219 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1220 struct gve_rx_buf_state_dqo *buf_state);
1221 void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
1222 void gve_reuse_buffer(struct gve_rx_ring *rx,
1223 struct gve_rx_buf_state_dqo *buf_state);
1224 void gve_free_buffer(struct gve_rx_ring *rx,
1225 struct gve_rx_buf_state_dqo *buf_state);
1226 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1227 struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1228 struct gve_rx_ring *rx,
1229 bool xdp);
1230
1231 /* Reset */
1232 void gve_schedule_reset(struct gve_priv *priv);
1233 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1234 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1235 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1236 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1237 int gve_adjust_config(struct gve_priv *priv,
1238 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1239 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1240 int gve_adjust_queues(struct gve_priv *priv,
1241 struct gve_rx_queue_config new_rx_config,
1242 struct gve_tx_queue_config new_tx_config,
1243 bool reset_rss);
1244 /* flow steering rule */
1245 int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1246 int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
1247 int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1248 int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1249 int gve_flow_rules_reset(struct gve_priv *priv);
1250 /* RSS config */
1251 int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
1252 /* report stats handling */
1253 void gve_handle_report_stats(struct gve_priv *priv);
1254 /* exported by ethtool.c */
1255 extern const struct ethtool_ops gve_ethtool_ops;
1256 /* needed by ethtool */
1257 extern char gve_driver_name[];
1258 extern const char gve_version_str[];
1259 #endif /* _GVE_H_ */
1260