1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 * Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2024 Google LLC
5 */
6
7 #ifndef _GVE_H_
8 #define _GVE_H_
9
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/ethtool_netlink.h>
13 #include <linux/netdevice.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/pci.h>
16 #include <linux/ptp_clock_kernel.h>
17 #include <linux/u64_stats_sync.h>
18 #include <net/page_pool/helpers.h>
19 #include <net/xdp.h>
20
21 #include "gve_desc.h"
22 #include "gve_desc_dqo.h"
23
24 #ifndef PCI_VENDOR_ID_GOOGLE
25 #define PCI_VENDOR_ID_GOOGLE 0x1ae0
26 #endif
27
28 #define PCI_DEV_ID_GVNIC 0x0042
29
30 #define GVE_REGISTER_BAR 0
31 #define GVE_DOORBELL_BAR 2
32
33 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
34 #define GVE_TX_MAX_IOVEC 4
35 /* 1 for management, 1 for rx, 1 for tx */
36 #define GVE_MIN_MSIX 3
37
38 /* Numbers of gve tx/rx stats in stats report. */
39 #define GVE_TX_STATS_REPORT_NUM 6
40 #define GVE_RX_STATS_REPORT_NUM 2
41
42 /* Interval to schedule a stats report update, 20000ms. */
43 #define GVE_STATS_REPORT_TIMER_PERIOD 20000
44
45 /* Numbers of NIC tx/rx stats in stats report. */
46 #define NIC_TX_STATS_REPORT_NUM 0
47 #define NIC_RX_STATS_REPORT_NUM 4
48
49 #define GVE_ADMINQ_BUFFER_SIZE 4096
50
51 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
52
53 /* PTYPEs are always 10 bits. */
54 #define GVE_NUM_PTYPES 1024
55
56 /* Default minimum ring size */
57 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
58 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
59
60 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
61
62 #define GVE_MAX_RX_BUFFER_SIZE 4096
63
64 #define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
65
66 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
67
68 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
69
70 #define GVE_FLOW_RULES_CACHE_SIZE \
71 (GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
72 #define GVE_FLOW_RULE_IDS_CACHE_SIZE \
73 (GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
74
75 #define GVE_RSS_KEY_SIZE 40
76 #define GVE_RSS_INDIR_SIZE 128
77
78 #define GVE_XDP_ACTIONS 5
79
80 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
81
82 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
83
84 #define DQO_QPL_DEFAULT_TX_PAGES 512
85
86 /* Maximum TSO size supported on DQO */
87 #define GVE_DQO_TX_MAX 0x3FFFF
88
89 #define GVE_TX_BUF_SHIFT_DQO 11
90
91 /* 2K buffers for DQO-QPL */
92 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
93 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
94 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
95
96 /* If number of free/recyclable buffers are less than this threshold; driver
97 * allocs and uses a non-qpl page on the receive path of DQO QPL to free
98 * up buffers.
99 * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
100 */
101 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
102
103 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
104 struct gve_rx_desc_queue {
105 struct gve_rx_desc *desc_ring; /* the descriptor ring */
106 dma_addr_t bus; /* the bus for the desc_ring */
107 u8 seqno; /* the next expected seqno for this desc*/
108 };
109
110 /* The page info for a single slot in the RX data queue */
111 struct gve_rx_slot_page_info {
112 /* netmem is used for DQO RDA mode
113 * page is used in all other modes
114 */
115 union {
116 struct page *page;
117 netmem_ref netmem;
118 };
119 void *page_address;
120 u32 page_offset; /* offset to write to in page */
121 unsigned int buf_size;
122 int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
123 u16 pad; /* adjustment for rx padding */
124 u8 can_flip; /* tracks if the networking stack is using the page */
125 };
126
127 /* A list of pages registered with the device during setup and used by a queue
128 * as buffers
129 */
130 struct gve_queue_page_list {
131 u32 id; /* unique id */
132 u32 num_entries;
133 struct page **pages; /* list of num_entries pages */
134 dma_addr_t *page_buses; /* the dma addrs of the pages */
135 };
136
137 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
138 struct gve_rx_data_queue {
139 union gve_rx_data_slot *data_ring; /* read by NIC */
140 dma_addr_t data_bus; /* dma mapping of the slots */
141 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
142 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
143 u8 raw_addressing; /* use raw_addressing? */
144 };
145
146 struct gve_priv;
147
148 /* RX buffer queue for posting buffers to HW.
149 * Each RX (completion) queue has a corresponding buffer queue.
150 */
151 struct gve_rx_buf_queue_dqo {
152 struct gve_rx_desc_dqo *desc_ring;
153 dma_addr_t bus;
154 u32 head; /* Pointer to start cleaning buffers at. */
155 u32 tail; /* Last posted buffer index + 1 */
156 u32 mask; /* Mask for indices to the size of the ring */
157 };
158
159 /* RX completion queue to receive packets from HW. */
160 struct gve_rx_compl_queue_dqo {
161 struct gve_rx_compl_desc_dqo *desc_ring;
162 dma_addr_t bus;
163
164 /* Number of slots which did not have a buffer posted yet. We should not
165 * post more buffers than the queue size to avoid HW overrunning the
166 * queue.
167 */
168 int num_free_slots;
169
170 /* HW uses a "generation bit" to notify SW of new descriptors. When a
171 * descriptor's generation bit is different from the current generation,
172 * that descriptor is ready to be consumed by SW.
173 */
174 u8 cur_gen_bit;
175
176 /* Pointer into desc_ring where the next completion descriptor will be
177 * received.
178 */
179 u32 head;
180 u32 mask; /* Mask for indices to the size of the ring */
181 };
182
183 struct gve_header_buf {
184 u8 *data;
185 dma_addr_t addr;
186 };
187
188 /* Stores state for tracking buffers posted to HW */
189 struct gve_rx_buf_state_dqo {
190 /* The page posted to HW. */
191 struct gve_rx_slot_page_info page_info;
192
193 /* XSK buffer */
194 struct xdp_buff *xsk_buff;
195
196 /* The DMA address corresponding to `page_info`. */
197 dma_addr_t addr;
198
199 /* Last offset into the page when it only had a single reference, at
200 * which point every other offset is free to be reused.
201 */
202 u32 last_single_ref_offset;
203
204 /* Linked list index to next element in the list, or -1 if none */
205 s16 next;
206 };
207
208 /* `head` and `tail` are indices into an array, or -1 if empty. */
209 struct gve_index_list {
210 s16 head;
211 s16 tail;
212 };
213
214 /* A single received packet split across multiple buffers may be
215 * reconstructed using the information in this structure.
216 */
217 struct gve_rx_ctx {
218 /* head and tail of skb chain for the current packet or NULL if none */
219 struct sk_buff *skb_head;
220 struct sk_buff *skb_tail;
221 u32 total_size;
222 u8 frag_cnt;
223 bool drop_pkt;
224 };
225
226 struct gve_rx_cnts {
227 u32 ok_pkt_bytes;
228 u16 ok_pkt_cnt;
229 u16 total_pkt_cnt;
230 u16 cont_pkt_cnt;
231 u16 desc_err_pkt_cnt;
232 };
233
234 /* Contains datapath state used to represent an RX queue. */
235 struct gve_rx_ring {
236 struct gve_priv *gve;
237
238 u16 packet_buffer_size; /* Size of buffer posted to NIC */
239 u16 packet_buffer_truesize; /* Total size of RX buffer */
240 u16 rx_headroom;
241
242 union {
243 /* GQI fields */
244 struct {
245 struct gve_rx_desc_queue desc;
246 struct gve_rx_data_queue data;
247
248 /* threshold for posting new buffs and descs */
249 u32 db_threshold;
250
251 u32 qpl_copy_pool_mask;
252 u32 qpl_copy_pool_head;
253 struct gve_rx_slot_page_info *qpl_copy_pool;
254 };
255
256 /* DQO fields. */
257 struct {
258 struct gve_rx_buf_queue_dqo bufq;
259 struct gve_rx_compl_queue_dqo complq;
260
261 struct gve_rx_buf_state_dqo *buf_states;
262 u16 num_buf_states;
263
264 /* Linked list of gve_rx_buf_state_dqo. Index into
265 * buf_states, or -1 if empty.
266 */
267 s16 free_buf_states;
268
269 /* Linked list of gve_rx_buf_state_dqo. Indexes into
270 * buf_states, or -1 if empty.
271 *
272 * This list contains buf_states which are pointing to
273 * valid buffers.
274 *
275 * We use a FIFO here in order to increase the
276 * probability that buffers can be reused by increasing
277 * the time between usages.
278 */
279 struct gve_index_list recycled_buf_states;
280
281 /* Linked list of gve_rx_buf_state_dqo. Indexes into
282 * buf_states, or -1 if empty.
283 *
284 * This list contains buf_states which have buffers
285 * which cannot be reused yet.
286 */
287 struct gve_index_list used_buf_states;
288
289 /* qpl assigned to this queue */
290 struct gve_queue_page_list *qpl;
291
292 /* index into queue page list */
293 u32 next_qpl_page_idx;
294
295 /* track number of used buffers */
296 u16 used_buf_states_cnt;
297
298 /* Address info of the buffers for header-split */
299 struct gve_header_buf hdr_bufs;
300
301 struct page_pool *page_pool;
302 } dqo;
303 };
304
305 u64 rbytes; /* free-running bytes received */
306 u64 rx_hsplit_bytes; /* free-running header bytes received */
307 u64 rpackets; /* free-running packets received */
308 u32 cnt; /* free-running total number of completed packets */
309 u32 fill_cnt; /* free-running total number of descs and buffs posted */
310 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
311 u64 rx_hsplit_pkt; /* free-running packets with headers split */
312 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
313 u64 rx_copied_pkt; /* free-running total number of copied packets */
314 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
315 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
316 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
317 /* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
318 u64 rx_hsplit_unsplit_pkt;
319 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
320 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
321 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
322 u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
323 u64 xdp_tx_errors;
324 u64 xdp_redirect_errors;
325 u64 xdp_alloc_fails;
326 u64 xdp_actions[GVE_XDP_ACTIONS];
327 u32 q_num; /* queue index */
328 u32 ntfy_id; /* notification block index */
329 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
330 dma_addr_t q_resources_bus; /* dma address for the queue resources */
331 struct u64_stats_sync statss; /* sync stats for 32bit archs */
332
333 struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
334
335 /* XDP stuff */
336 struct xdp_rxq_info xdp_rxq;
337 struct xsk_buff_pool *xsk_pool;
338 struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
339 };
340
341 /* A TX desc ring entry */
342 union gve_tx_desc {
343 struct gve_tx_pkt_desc pkt; /* first desc for a packet */
344 struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
345 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
346 };
347
348 /* Tracks the memory in the fifo occupied by a segment of a packet */
349 struct gve_tx_iovec {
350 u32 iov_offset; /* offset into this segment */
351 u32 iov_len; /* length */
352 u32 iov_padding; /* padding associated with this segment */
353 };
354
355 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
356 * ring entry but only used for a pkt_desc not a seg_desc
357 */
358 struct gve_tx_buffer_state {
359 union {
360 struct sk_buff *skb; /* skb for this pkt */
361 struct xdp_frame *xdp_frame; /* xdp_frame */
362 };
363 struct {
364 u16 size; /* size of xmitted xdp pkt */
365 u8 is_xsk; /* xsk buff */
366 } xdp;
367 union {
368 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
369 struct {
370 DEFINE_DMA_UNMAP_ADDR(dma);
371 DEFINE_DMA_UNMAP_LEN(len);
372 };
373 };
374 };
375
376 /* A TX buffer - each queue has one */
377 struct gve_tx_fifo {
378 void *base; /* address of base of FIFO */
379 u32 size; /* total size */
380 atomic_t available; /* how much space is still available */
381 u32 head; /* offset to write at */
382 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
383 };
384
385 /* TX descriptor for DQO format */
386 union gve_tx_desc_dqo {
387 struct gve_tx_pkt_desc_dqo pkt;
388 struct gve_tx_tso_context_desc_dqo tso_ctx;
389 struct gve_tx_general_context_desc_dqo general_ctx;
390 };
391
392 enum gve_packet_state {
393 /* Packet is in free list, available to be allocated.
394 * This should always be zero since state is not explicitly initialized.
395 */
396 GVE_PACKET_STATE_UNALLOCATED,
397 /* Packet is expecting a regular data completion or miss completion */
398 GVE_PACKET_STATE_PENDING_DATA_COMPL,
399 /* Packet has received a miss completion and is expecting a
400 * re-injection completion.
401 */
402 GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
403 /* No valid completion received within the specified timeout. */
404 GVE_PACKET_STATE_TIMED_OUT_COMPL,
405 /* XSK pending packet has received a packet/reinjection completion, or
406 * has timed out. At this point, the pending packet can be counted by
407 * xsk_tx_complete and freed.
408 */
409 GVE_PACKET_STATE_XSK_COMPLETE,
410 };
411
412 enum gve_tx_pending_packet_dqo_type {
413 GVE_TX_PENDING_PACKET_DQO_SKB,
414 GVE_TX_PENDING_PACKET_DQO_XDP_FRAME,
415 GVE_TX_PENDING_PACKET_DQO_XSK,
416 };
417
418 struct gve_tx_pending_packet_dqo {
419 union {
420 struct sk_buff *skb;
421 struct xdp_frame *xdpf;
422 };
423
424 /* 0th element corresponds to the linear portion of `skb`, should be
425 * unmapped with `dma_unmap_single`.
426 *
427 * All others correspond to `skb`'s frags and should be unmapped with
428 * `dma_unmap_page`.
429 */
430 union {
431 struct {
432 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
433 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
434 };
435 s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
436 };
437
438 u16 num_bufs;
439
440 /* Linked list index to next element in the list, or -1 if none */
441 s16 next;
442
443 /* Linked list index to prev element in the list, or -1 if none.
444 * Used for tracking either outstanding miss completions or prematurely
445 * freed packets.
446 */
447 s16 prev;
448
449 /* Identifies the current state of the packet as defined in
450 * `enum gve_packet_state`.
451 */
452 u8 state : 3;
453
454 /* gve_tx_pending_packet_dqo_type */
455 u8 type : 2;
456
457 /* If packet is an outstanding miss completion, then the packet is
458 * freed if the corresponding re-injection completion is not received
459 * before kernel jiffies exceeds timeout_jiffies.
460 */
461 unsigned long timeout_jiffies;
462 };
463
464 /* Contains datapath state used to represent a TX queue. */
465 struct gve_tx_ring {
466 /* Cacheline 0 -- Accessed & dirtied during transmit */
467 union {
468 /* GQI fields */
469 struct {
470 struct gve_tx_fifo tx_fifo;
471 u32 req; /* driver tracked head pointer */
472 u32 done; /* driver tracked tail pointer */
473 };
474
475 /* DQO fields. */
476 struct {
477 /* Spinlock for XDP tx traffic */
478 spinlock_t xdp_lock;
479
480 /* Linked list of gve_tx_pending_packet_dqo. Index into
481 * pending_packets, or -1 if empty.
482 *
483 * This is a consumer list owned by the TX path. When it
484 * runs out, the producer list is stolen from the
485 * completion handling path
486 * (dqo_compl.free_pending_packets).
487 */
488 s16 free_pending_packets;
489
490 /* Cached value of `dqo_compl.hw_tx_head` */
491 u32 head;
492 u32 tail; /* Last posted buffer index + 1 */
493
494 /* Index of the last descriptor with "report event" bit
495 * set.
496 */
497 u32 last_re_idx;
498
499 /* free running number of packet buf descriptors posted */
500 u16 posted_packet_desc_cnt;
501 /* free running number of packet buf descriptors completed */
502 u16 completed_packet_desc_cnt;
503
504 /* QPL fields */
505 struct {
506 /* Linked list of gve_tx_buf_dqo. Index into
507 * tx_qpl_buf_next, or -1 if empty.
508 *
509 * This is a consumer list owned by the TX path. When it
510 * runs out, the producer list is stolen from the
511 * completion handling path
512 * (dqo_compl.free_tx_qpl_buf_head).
513 */
514 s16 free_tx_qpl_buf_head;
515
516 /* Free running count of the number of QPL tx buffers
517 * allocated
518 */
519 u32 alloc_tx_qpl_buf_cnt;
520
521 /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
522 u32 free_tx_qpl_buf_cnt;
523 };
524
525 atomic_t xsk_reorder_queue_tail;
526 } dqo_tx;
527 };
528
529 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
530 union {
531 /* GQI fields */
532 struct {
533 /* Spinlock for when cleanup in progress */
534 spinlock_t clean_lock;
535 /* Spinlock for XDP tx traffic */
536 spinlock_t xdp_lock;
537 };
538
539 /* DQO fields. */
540 struct {
541 u32 head; /* Last read on compl_desc */
542
543 /* Tracks the current gen bit of compl_q */
544 u8 cur_gen_bit;
545
546 /* Linked list of gve_tx_pending_packet_dqo. Index into
547 * pending_packets, or -1 if empty.
548 *
549 * This is the producer list, owned by the completion
550 * handling path. When the consumer list
551 * (dqo_tx.free_pending_packets) is runs out, this list
552 * will be stolen.
553 */
554 atomic_t free_pending_packets;
555
556 /* Last TX ring index fetched by HW */
557 atomic_t hw_tx_head;
558
559 u16 xsk_reorder_queue_head;
560 u16 xsk_reorder_queue_tail;
561
562 /* List to track pending packets which received a miss
563 * completion but not a corresponding reinjection.
564 */
565 struct gve_index_list miss_completions;
566
567 /* List to track pending packets that were completed
568 * before receiving a valid completion because they
569 * reached a specified timeout.
570 */
571 struct gve_index_list timed_out_completions;
572
573 /* QPL fields */
574 struct {
575 /* Linked list of gve_tx_buf_dqo. Index into
576 * tx_qpl_buf_next, or -1 if empty.
577 *
578 * This is the producer list, owned by the completion
579 * handling path. When the consumer list
580 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
581 * will be stolen.
582 */
583 atomic_t free_tx_qpl_buf_head;
584
585 /* Free running count of the number of tx buffers
586 * freed
587 */
588 atomic_t free_tx_qpl_buf_cnt;
589 };
590 } dqo_compl;
591 } ____cacheline_aligned;
592 u64 pkt_done; /* free-running - total packets completed */
593 u64 bytes_done; /* free-running - total bytes completed */
594 u64 dropped_pkt; /* free-running - total packets dropped */
595 u64 dma_mapping_error; /* count of dma mapping errors */
596
597 /* Cacheline 2 -- Read-mostly fields */
598 union {
599 /* GQI fields */
600 struct {
601 union gve_tx_desc *desc;
602
603 /* Maps 1:1 to a desc */
604 struct gve_tx_buffer_state *info;
605 };
606
607 /* DQO fields. */
608 struct {
609 union gve_tx_desc_dqo *tx_ring;
610 struct gve_tx_compl_desc *compl_ring;
611
612 struct gve_tx_pending_packet_dqo *pending_packets;
613 s16 num_pending_packets;
614
615 u16 *xsk_reorder_queue;
616
617 u32 complq_mask; /* complq size is complq_mask + 1 */
618
619 /* QPL fields */
620 struct {
621 /* qpl assigned to this queue */
622 struct gve_queue_page_list *qpl;
623
624 /* Each QPL page is divided into TX bounce buffers
625 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
626 * an array to manage linked lists of TX buffers.
627 * An entry j at index i implies that j'th buffer
628 * is next on the list after i
629 */
630 s16 *tx_qpl_buf_next;
631 u32 num_tx_qpl_bufs;
632 };
633 } dqo;
634 } ____cacheline_aligned;
635 struct netdev_queue *netdev_txq;
636 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
637 struct device *dev;
638 u32 mask; /* masks req and done down to queue size */
639 u8 raw_addressing; /* use raw_addressing? */
640
641 /* Slow-path fields */
642 u32 q_num ____cacheline_aligned; /* queue idx */
643 u32 stop_queue; /* count of queue stops */
644 u32 wake_queue; /* count of queue wakes */
645 u32 queue_timeout; /* count of queue timeouts */
646 u32 ntfy_id; /* notification block index */
647 u32 last_kick_msec; /* Last time the queue was kicked */
648 dma_addr_t bus; /* dma address of the descr ring */
649 dma_addr_t q_resources_bus; /* dma address of the queue resources */
650 dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
651 struct u64_stats_sync statss; /* sync stats for 32bit archs */
652 struct xsk_buff_pool *xsk_pool;
653 u64 xdp_xsk_sent;
654 u64 xdp_xmit;
655 u64 xdp_xmit_errors;
656 } ____cacheline_aligned;
657
658 /* Wraps the info for one irq including the napi struct and the queues
659 * associated with that irq.
660 */
661 struct gve_notify_block {
662 __be32 *irq_db_index; /* pointer to idx into Bar2 */
663 char name[IFNAMSIZ + 16]; /* name registered with the kernel */
664 struct napi_struct napi; /* kernel napi struct for this block */
665 struct gve_priv *priv;
666 struct gve_tx_ring *tx; /* tx rings on this block */
667 struct gve_rx_ring *rx; /* rx rings on this block */
668 u32 irq;
669 };
670
671 /* Tracks allowed and current rx queue settings */
672 struct gve_rx_queue_config {
673 u16 max_queues;
674 u16 num_queues;
675 u16 packet_buffer_size;
676 };
677
678 /* Tracks allowed and current tx queue settings */
679 struct gve_tx_queue_config {
680 u16 max_queues;
681 u16 num_queues; /* number of TX queues, excluding XDP queues */
682 u16 num_xdp_queues;
683 };
684
685 /* Tracks the available and used qpl IDs */
686 struct gve_qpl_config {
687 u32 qpl_map_size; /* map memory size */
688 unsigned long *qpl_id_map; /* bitmap of used qpl ids */
689 };
690
691 struct gve_irq_db {
692 __be32 index;
693 } ____cacheline_aligned;
694
695 struct gve_ptype {
696 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */
697 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */
698 };
699
700 struct gve_ptype_lut {
701 struct gve_ptype ptypes[GVE_NUM_PTYPES];
702 };
703
704 /* Parameters for allocating resources for tx queues */
705 struct gve_tx_alloc_rings_cfg {
706 struct gve_tx_queue_config *qcfg;
707
708 u16 num_xdp_rings;
709
710 u16 ring_size;
711 bool raw_addressing;
712
713 /* Allocated resources are returned here */
714 struct gve_tx_ring *tx;
715 };
716
717 /* Parameters for allocating resources for rx queues */
718 struct gve_rx_alloc_rings_cfg {
719 /* tx config is also needed to determine QPL ids */
720 struct gve_rx_queue_config *qcfg_rx;
721 struct gve_tx_queue_config *qcfg_tx;
722
723 u16 ring_size;
724 u16 packet_buffer_size;
725 bool raw_addressing;
726 bool enable_header_split;
727 bool reset_rss;
728 bool xdp;
729
730 /* Allocated resources are returned here */
731 struct gve_rx_ring *rx;
732 };
733
734 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
735 * when the entire configure_device_resources command is zeroed out and the
736 * queue_format is not specified.
737 */
738 enum gve_queue_format {
739 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
740 GVE_GQI_RDA_FORMAT = 0x1,
741 GVE_GQI_QPL_FORMAT = 0x2,
742 GVE_DQO_RDA_FORMAT = 0x3,
743 GVE_DQO_QPL_FORMAT = 0x4,
744 };
745
746 struct gve_flow_spec {
747 __be32 src_ip[4];
748 __be32 dst_ip[4];
749 union {
750 struct {
751 __be16 src_port;
752 __be16 dst_port;
753 };
754 __be32 spi;
755 };
756 union {
757 u8 tos;
758 u8 tclass;
759 };
760 };
761
762 struct gve_flow_rule {
763 u32 location;
764 u16 flow_type;
765 u16 action;
766 struct gve_flow_spec key;
767 struct gve_flow_spec mask;
768 };
769
770 struct gve_flow_rules_cache {
771 bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
772 struct gve_adminq_queried_flow_rule *rules_cache;
773 __be32 *rule_ids_cache;
774 /* The total number of queried rules that stored in the caches */
775 u32 rules_cache_num;
776 u32 rule_ids_cache_num;
777 };
778
779 struct gve_rss_config {
780 u8 *hash_key;
781 u32 *hash_lut;
782 };
783
784 struct gve_ptp {
785 struct ptp_clock_info info;
786 struct ptp_clock *clock;
787 struct gve_priv *priv;
788 };
789
790 struct gve_priv {
791 struct net_device *dev;
792 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
793 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
794 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
795 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
796 dma_addr_t irq_db_indices_bus;
797 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
798 char mgmt_msix_name[IFNAMSIZ + 16];
799 u32 mgmt_msix_idx;
800 __be32 *counter_array; /* array of num_event_counters */
801 dma_addr_t counter_array_bus;
802
803 u16 num_event_counters;
804 u16 tx_desc_cnt; /* num desc per ring */
805 u16 rx_desc_cnt; /* num desc per ring */
806 u16 max_tx_desc_cnt;
807 u16 max_rx_desc_cnt;
808 u16 min_tx_desc_cnt;
809 u16 min_rx_desc_cnt;
810 bool modify_ring_size_enabled;
811 bool default_min_ring_size;
812 u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
813 u64 max_registered_pages;
814 u64 num_registered_pages; /* num pages registered with NIC */
815 struct bpf_prog *xdp_prog; /* XDP BPF program */
816 u32 rx_copybreak; /* copy packets smaller than this */
817 u16 default_num_queues; /* default num queues to set up */
818
819 struct gve_tx_queue_config tx_cfg;
820 struct gve_rx_queue_config rx_cfg;
821 unsigned long *xsk_pools; /* bitmap of RX queues with XSK pools */
822 u32 num_ntfy_blks; /* split between TX and RX so must be even */
823 int numa_node;
824
825 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
826 __be32 __iomem *db_bar2; /* "array" of doorbells */
827 u32 msg_enable; /* level for netif* netdev print macros */
828 struct pci_dev *pdev;
829
830 /* metrics */
831 u32 tx_timeo_cnt;
832
833 /* Admin queue - see gve_adminq.h*/
834 union gve_adminq_command *adminq;
835 dma_addr_t adminq_bus_addr;
836 struct dma_pool *adminq_pool;
837 struct mutex adminq_lock; /* Protects adminq command execution */
838 u32 adminq_mask; /* masks prod_cnt to adminq size */
839 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
840 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
841 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
842 /* free-running count of per AQ cmd executed */
843 u32 adminq_describe_device_cnt;
844 u32 adminq_cfg_device_resources_cnt;
845 u32 adminq_register_page_list_cnt;
846 u32 adminq_unregister_page_list_cnt;
847 u32 adminq_create_tx_queue_cnt;
848 u32 adminq_create_rx_queue_cnt;
849 u32 adminq_destroy_tx_queue_cnt;
850 u32 adminq_destroy_rx_queue_cnt;
851 u32 adminq_dcfg_device_resources_cnt;
852 u32 adminq_set_driver_parameter_cnt;
853 u32 adminq_report_stats_cnt;
854 u32 adminq_report_link_speed_cnt;
855 u32 adminq_report_nic_timestamp_cnt;
856 u32 adminq_get_ptype_map_cnt;
857 u32 adminq_verify_driver_compatibility_cnt;
858 u32 adminq_query_flow_rules_cnt;
859 u32 adminq_cfg_flow_rule_cnt;
860 u32 adminq_cfg_rss_cnt;
861 u32 adminq_query_rss_cnt;
862
863 /* Global stats */
864 u32 interface_up_cnt; /* count of times interface turned up since last reset */
865 u32 interface_down_cnt; /* count of times interface turned down since last reset */
866 u32 reset_cnt; /* count of reset */
867 u32 page_alloc_fail; /* count of page alloc fails */
868 u32 dma_mapping_error; /* count of dma mapping errors */
869 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
870 u32 suspend_cnt; /* count of times suspended */
871 u32 resume_cnt; /* count of times resumed */
872 struct workqueue_struct *gve_wq;
873 struct work_struct service_task;
874 struct work_struct stats_report_task;
875 unsigned long service_task_flags;
876 unsigned long state_flags;
877
878 struct gve_stats_report *stats_report;
879 u64 stats_report_len;
880 dma_addr_t stats_report_bus; /* dma address for the stats report */
881 unsigned long ethtool_flags;
882
883 unsigned long stats_report_timer_period;
884 struct timer_list stats_report_timer;
885
886 /* Gvnic device link speed from hypervisor. */
887 u64 link_speed;
888 bool up_before_suspend; /* True if dev was up before suspend */
889
890 struct gve_ptype_lut *ptype_lut_dqo;
891
892 /* Must be a power of two. */
893 u16 max_rx_buffer_size; /* device limit */
894
895 enum gve_queue_format queue_format;
896
897 /* Interrupt coalescing settings */
898 u32 tx_coalesce_usecs;
899 u32 rx_coalesce_usecs;
900
901 u16 header_buf_size; /* device configured, header-split supported if non-zero */
902 bool header_split_enabled; /* True if the header split is enabled by the user */
903
904 u32 max_flow_rules;
905 u32 num_flow_rules;
906
907 struct gve_flow_rules_cache flow_rules_cache;
908
909 u16 rss_key_size;
910 u16 rss_lut_size;
911 bool cache_rss_config;
912 struct gve_rss_config rss_config;
913
914 /* True if the device supports reading the nic clock */
915 bool nic_timestamp_supported;
916 struct gve_ptp *ptp;
917 struct kernel_hwtstamp_config ts_config;
918 struct gve_nic_ts_report *nic_ts_report;
919 dma_addr_t nic_ts_report_bus;
920 u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */
921 };
922
923 enum gve_service_task_flags_bit {
924 GVE_PRIV_FLAGS_DO_RESET = 1,
925 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
926 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
927 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
928 };
929
930 enum gve_state_flags_bit {
931 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
932 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
933 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
934 GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
935 };
936
937 enum gve_ethtool_flags_bit {
938 GVE_PRIV_FLAGS_REPORT_STATS = 0,
939 };
940
gve_get_do_reset(struct gve_priv * priv)941 static inline bool gve_get_do_reset(struct gve_priv *priv)
942 {
943 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
944 }
945
gve_set_do_reset(struct gve_priv * priv)946 static inline void gve_set_do_reset(struct gve_priv *priv)
947 {
948 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
949 }
950
gve_clear_do_reset(struct gve_priv * priv)951 static inline void gve_clear_do_reset(struct gve_priv *priv)
952 {
953 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
954 }
955
gve_get_reset_in_progress(struct gve_priv * priv)956 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
957 {
958 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
959 &priv->service_task_flags);
960 }
961
gve_set_reset_in_progress(struct gve_priv * priv)962 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
963 {
964 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
965 }
966
gve_clear_reset_in_progress(struct gve_priv * priv)967 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
968 {
969 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
970 }
971
gve_get_probe_in_progress(struct gve_priv * priv)972 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
973 {
974 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
975 &priv->service_task_flags);
976 }
977
gve_set_probe_in_progress(struct gve_priv * priv)978 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
979 {
980 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
981 }
982
gve_clear_probe_in_progress(struct gve_priv * priv)983 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
984 {
985 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
986 }
987
gve_get_do_report_stats(struct gve_priv * priv)988 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
989 {
990 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
991 &priv->service_task_flags);
992 }
993
gve_set_do_report_stats(struct gve_priv * priv)994 static inline void gve_set_do_report_stats(struct gve_priv *priv)
995 {
996 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
997 }
998
gve_clear_do_report_stats(struct gve_priv * priv)999 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
1000 {
1001 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
1002 }
1003
gve_get_admin_queue_ok(struct gve_priv * priv)1004 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
1005 {
1006 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
1007 }
1008
gve_set_admin_queue_ok(struct gve_priv * priv)1009 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
1010 {
1011 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
1012 }
1013
gve_clear_admin_queue_ok(struct gve_priv * priv)1014 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
1015 {
1016 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
1017 }
1018
gve_get_device_resources_ok(struct gve_priv * priv)1019 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
1020 {
1021 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1022 }
1023
gve_set_device_resources_ok(struct gve_priv * priv)1024 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
1025 {
1026 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1027 }
1028
gve_clear_device_resources_ok(struct gve_priv * priv)1029 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
1030 {
1031 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
1032 }
1033
gve_get_device_rings_ok(struct gve_priv * priv)1034 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
1035 {
1036 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1037 }
1038
gve_set_device_rings_ok(struct gve_priv * priv)1039 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
1040 {
1041 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1042 }
1043
gve_clear_device_rings_ok(struct gve_priv * priv)1044 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
1045 {
1046 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
1047 }
1048
gve_get_napi_enabled(struct gve_priv * priv)1049 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
1050 {
1051 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1052 }
1053
gve_set_napi_enabled(struct gve_priv * priv)1054 static inline void gve_set_napi_enabled(struct gve_priv *priv)
1055 {
1056 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1057 }
1058
gve_clear_napi_enabled(struct gve_priv * priv)1059 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
1060 {
1061 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
1062 }
1063
gve_get_report_stats(struct gve_priv * priv)1064 static inline bool gve_get_report_stats(struct gve_priv *priv)
1065 {
1066 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1067 }
1068
gve_clear_report_stats(struct gve_priv * priv)1069 static inline void gve_clear_report_stats(struct gve_priv *priv)
1070 {
1071 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
1072 }
1073
1074 /* Returns the address of the ntfy_blocks irq doorbell
1075 */
gve_irq_doorbell(struct gve_priv * priv,struct gve_notify_block * block)1076 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
1077 struct gve_notify_block *block)
1078 {
1079 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
1080 }
1081
1082 /* Returns the index into ntfy_blocks of the given tx ring's block
1083 */
gve_tx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)1084 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1085 {
1086 return queue_idx;
1087 }
1088
1089 /* Returns the index into ntfy_blocks of the given rx ring's block
1090 */
gve_rx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)1091 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1092 {
1093 return (priv->num_ntfy_blks / 2) + queue_idx;
1094 }
1095
gve_is_qpl(struct gve_priv * priv)1096 static inline bool gve_is_qpl(struct gve_priv *priv)
1097 {
1098 return priv->queue_format == GVE_GQI_QPL_FORMAT ||
1099 priv->queue_format == GVE_DQO_QPL_FORMAT;
1100 }
1101
1102 /* Returns the number of tx queue page lists */
gve_num_tx_qpls(const struct gve_tx_queue_config * tx_cfg,bool is_qpl)1103 static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
1104 bool is_qpl)
1105 {
1106 if (!is_qpl)
1107 return 0;
1108 return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
1109 }
1110
1111 /* Returns the number of rx queue page lists */
gve_num_rx_qpls(const struct gve_rx_queue_config * rx_cfg,bool is_qpl)1112 static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
1113 bool is_qpl)
1114 {
1115 if (!is_qpl)
1116 return 0;
1117 return rx_cfg->num_queues;
1118 }
1119
gve_tx_qpl_id(struct gve_priv * priv,int tx_qid)1120 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
1121 {
1122 return tx_qid;
1123 }
1124
gve_rx_qpl_id(struct gve_priv * priv,int rx_qid)1125 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1126 {
1127 return priv->tx_cfg.max_queues + rx_qid;
1128 }
1129
gve_get_rx_qpl_id(const struct gve_tx_queue_config * tx_cfg,int rx_qid)1130 static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
1131 int rx_qid)
1132 {
1133 return tx_cfg->max_queues + rx_qid;
1134 }
1135
gve_tx_start_qpl_id(struct gve_priv * priv)1136 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1137 {
1138 return gve_tx_qpl_id(priv, 0);
1139 }
1140
gve_rx_start_qpl_id(const struct gve_tx_queue_config * tx_cfg)1141 static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
1142 {
1143 return gve_get_rx_qpl_id(tx_cfg, 0);
1144 }
1145
gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)1146 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
1147 {
1148 /* For DQO, page count should be more than ring size for
1149 * out-of-order completions. Set it to two times of ring size.
1150 */
1151 return 2 * rx_desc_cnt;
1152 }
1153
1154 /* Returns the correct dma direction for tx and rx qpls */
gve_qpl_dma_dir(struct gve_priv * priv,int id)1155 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1156 int id)
1157 {
1158 if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1159 return DMA_TO_DEVICE;
1160 else
1161 return DMA_FROM_DEVICE;
1162 }
1163
gve_is_gqi(struct gve_priv * priv)1164 static inline bool gve_is_gqi(struct gve_priv *priv)
1165 {
1166 return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1167 priv->queue_format == GVE_GQI_QPL_FORMAT;
1168 }
1169
gve_num_tx_queues(struct gve_priv * priv)1170 static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1171 {
1172 return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
1173 }
1174
gve_xdp_tx_queue_id(struct gve_priv * priv,u32 queue_id)1175 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1176 {
1177 return priv->tx_cfg.num_queues + queue_id;
1178 }
1179
gve_xdp_tx_start_queue_id(struct gve_priv * priv)1180 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1181 {
1182 return gve_xdp_tx_queue_id(priv, 0);
1183 }
1184
gve_supports_xdp_xmit(struct gve_priv * priv)1185 static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
1186 {
1187 switch (priv->queue_format) {
1188 case GVE_GQI_QPL_FORMAT:
1189 case GVE_DQO_RDA_FORMAT:
1190 return true;
1191 default:
1192 return false;
1193 }
1194 }
1195
1196 /* gqi napi handler defined in gve_main.c */
1197 int gve_napi_poll(struct napi_struct *napi, int budget);
1198
1199 /* buffers */
1200 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1201 struct page **page, dma_addr_t *dma,
1202 enum dma_data_direction, gfp_t gfp_flags);
1203 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1204 enum dma_data_direction);
1205 /* qpls */
1206 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1207 u32 id, int pages);
1208 void gve_free_queue_page_list(struct gve_priv *priv,
1209 struct gve_queue_page_list *qpl,
1210 u32 id);
1211 /* tx handling */
1212 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1213 int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
1214 u32 flags);
1215 int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
1216 u32 flags);
1217 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1218 void *data, int len, void *frame_p);
1219 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1220 int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
1221 struct xdp_frame *xdpf);
1222 bool gve_tx_poll(struct gve_notify_block *block, int budget);
1223 bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1224 int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
1225 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1226 struct gve_tx_alloc_rings_cfg *cfg);
1227 void gve_tx_free_rings_gqi(struct gve_priv *priv,
1228 struct gve_tx_alloc_rings_cfg *cfg);
1229 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1230 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1231 u32 gve_tx_load_event_counter(struct gve_priv *priv,
1232 struct gve_tx_ring *tx);
1233 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1234 /* rx handling */
1235 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1236 int gve_rx_poll(struct gve_notify_block *block, int budget);
1237 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1238 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1239 struct gve_rx_alloc_rings_cfg *cfg,
1240 struct gve_rx_ring *rx,
1241 int idx);
1242 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1243 struct gve_rx_alloc_rings_cfg *cfg);
1244 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1245 struct gve_rx_alloc_rings_cfg *cfg);
1246 void gve_rx_free_rings_gqi(struct gve_priv *priv,
1247 struct gve_rx_alloc_rings_cfg *cfg);
1248 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1249 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1250 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
1251 bool gve_header_split_supported(const struct gve_priv *priv);
1252 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1253 /* rx buffer handling */
1254 int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
1255 void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
1256 bool free_page);
1257 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1258 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1259 struct gve_rx_buf_state_dqo *buf_state);
1260 void gve_free_buf_state(struct gve_rx_ring *rx,
1261 struct gve_rx_buf_state_dqo *buf_state);
1262 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1263 struct gve_index_list *list);
1264 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1265 struct gve_rx_buf_state_dqo *buf_state);
1266 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1267 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1268 struct gve_rx_buf_state_dqo *buf_state);
1269 void gve_free_to_page_pool(struct gve_rx_ring *rx,
1270 struct gve_rx_buf_state_dqo *buf_state,
1271 bool allow_direct);
1272 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1273 struct gve_rx_buf_state_dqo *buf_state);
1274 void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
1275 void gve_reuse_buffer(struct gve_rx_ring *rx,
1276 struct gve_rx_buf_state_dqo *buf_state);
1277 void gve_free_buffer(struct gve_rx_ring *rx,
1278 struct gve_rx_buf_state_dqo *buf_state);
1279 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1280 struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1281 struct gve_rx_ring *rx,
1282 bool xdp);
1283
1284 /* Reset */
1285 void gve_schedule_reset(struct gve_priv *priv);
1286 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1287 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1288 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1289 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1290 int gve_adjust_config(struct gve_priv *priv,
1291 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1292 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1293 int gve_adjust_queues(struct gve_priv *priv,
1294 struct gve_rx_queue_config new_rx_config,
1295 struct gve_tx_queue_config new_tx_config,
1296 bool reset_rss);
1297 /* flow steering rule */
1298 int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1299 int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
1300 int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1301 int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1302 int gve_flow_rules_reset(struct gve_priv *priv);
1303 /* RSS config */
1304 int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
1305 /* PTP and timestamping */
1306 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
1307 int gve_clock_nic_ts_read(struct gve_priv *priv);
1308 int gve_init_clock(struct gve_priv *priv);
1309 void gve_teardown_clock(struct gve_priv *priv);
1310 #else /* CONFIG_PTP_1588_CLOCK */
gve_clock_nic_ts_read(struct gve_priv * priv)1311 static inline int gve_clock_nic_ts_read(struct gve_priv *priv)
1312 {
1313 return -EOPNOTSUPP;
1314 }
1315
gve_init_clock(struct gve_priv * priv)1316 static inline int gve_init_clock(struct gve_priv *priv)
1317 {
1318 return 0;
1319 }
1320
gve_teardown_clock(struct gve_priv * priv)1321 static inline void gve_teardown_clock(struct gve_priv *priv) { }
1322 #endif /* CONFIG_PTP_1588_CLOCK */
1323 /* report stats handling */
1324 void gve_handle_report_stats(struct gve_priv *priv);
1325 /* exported by ethtool.c */
1326 extern const struct ethtool_ops gve_ethtool_ops;
1327 /* needed by ethtool */
1328 extern char gve_driver_name[];
1329 extern const char gve_version_str[];
1330 #endif /* _GVE_H_ */
1331