1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 * Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #ifndef _GVE_H_ 8 #define _GVE_H_ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/dmapool.h> 12 #include <linux/netdevice.h> 13 #include <linux/pci.h> 14 #include <linux/u64_stats_sync.h> 15 #include <net/xdp.h> 16 17 #include "gve_desc.h" 18 #include "gve_desc_dqo.h" 19 20 #ifndef PCI_VENDOR_ID_GOOGLE 21 #define PCI_VENDOR_ID_GOOGLE 0x1ae0 22 #endif 23 24 #define PCI_DEV_ID_GVNIC 0x0042 25 26 #define GVE_REGISTER_BAR 0 27 #define GVE_DOORBELL_BAR 2 28 29 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */ 30 #define GVE_TX_MAX_IOVEC 4 31 /* 1 for management, 1 for rx, 1 for tx */ 32 #define GVE_MIN_MSIX 3 33 34 /* Numbers of gve tx/rx stats in stats report. */ 35 #define GVE_TX_STATS_REPORT_NUM 6 36 #define GVE_RX_STATS_REPORT_NUM 2 37 38 /* Interval to schedule a stats report update, 20000ms. */ 39 #define GVE_STATS_REPORT_TIMER_PERIOD 20000 40 41 /* Numbers of NIC tx/rx stats in stats report. */ 42 #define NIC_TX_STATS_REPORT_NUM 0 43 #define NIC_RX_STATS_REPORT_NUM 4 44 45 #define GVE_ADMINQ_BUFFER_SIZE 4096 46 47 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) 48 49 /* PTYPEs are always 10 bits. */ 50 #define GVE_NUM_PTYPES 1024 51 52 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048 53 54 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 55 56 #define GVE_XDP_ACTIONS 5 57 58 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 59 60 #define DQO_QPL_DEFAULT_TX_PAGES 512 61 #define DQO_QPL_DEFAULT_RX_PAGES 2048 62 63 /* Maximum TSO size supported on DQO */ 64 #define GVE_DQO_TX_MAX 0x3FFFF 65 66 #define GVE_TX_BUF_SHIFT_DQO 11 67 68 /* 2K buffers for DQO-QPL */ 69 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO) 70 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO) 71 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO)) 72 73 /* If number of free/recyclable buffers are less than this threshold; driver 74 * allocs and uses a non-qpl page on the receive path of DQO QPL to free 75 * up buffers. 76 * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC. 77 */ 78 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96 79 80 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ 81 struct gve_rx_desc_queue { 82 struct gve_rx_desc *desc_ring; /* the descriptor ring */ 83 dma_addr_t bus; /* the bus for the desc_ring */ 84 u8 seqno; /* the next expected seqno for this desc*/ 85 }; 86 87 /* The page info for a single slot in the RX data queue */ 88 struct gve_rx_slot_page_info { 89 struct page *page; 90 void *page_address; 91 u32 page_offset; /* offset to write to in page */ 92 int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ 93 u16 pad; /* adjustment for rx padding */ 94 u8 can_flip; /* tracks if the networking stack is using the page */ 95 }; 96 97 /* A list of pages registered with the device during setup and used by a queue 98 * as buffers 99 */ 100 struct gve_queue_page_list { 101 u32 id; /* unique id */ 102 u32 num_entries; 103 struct page **pages; /* list of num_entries pages */ 104 dma_addr_t *page_buses; /* the dma addrs of the pages */ 105 }; 106 107 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */ 108 struct gve_rx_data_queue { 109 union gve_rx_data_slot *data_ring; /* read by NIC */ 110 dma_addr_t data_bus; /* dma mapping of the slots */ 111 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ 112 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ 113 u8 raw_addressing; /* use raw_addressing? */ 114 }; 115 116 struct gve_priv; 117 118 /* RX buffer queue for posting buffers to HW. 119 * Each RX (completion) queue has a corresponding buffer queue. 120 */ 121 struct gve_rx_buf_queue_dqo { 122 struct gve_rx_desc_dqo *desc_ring; 123 dma_addr_t bus; 124 u32 head; /* Pointer to start cleaning buffers at. */ 125 u32 tail; /* Last posted buffer index + 1 */ 126 u32 mask; /* Mask for indices to the size of the ring */ 127 }; 128 129 /* RX completion queue to receive packets from HW. */ 130 struct gve_rx_compl_queue_dqo { 131 struct gve_rx_compl_desc_dqo *desc_ring; 132 dma_addr_t bus; 133 134 /* Number of slots which did not have a buffer posted yet. We should not 135 * post more buffers than the queue size to avoid HW overrunning the 136 * queue. 137 */ 138 int num_free_slots; 139 140 /* HW uses a "generation bit" to notify SW of new descriptors. When a 141 * descriptor's generation bit is different from the current generation, 142 * that descriptor is ready to be consumed by SW. 143 */ 144 u8 cur_gen_bit; 145 146 /* Pointer into desc_ring where the next completion descriptor will be 147 * received. 148 */ 149 u32 head; 150 u32 mask; /* Mask for indices to the size of the ring */ 151 }; 152 153 /* Stores state for tracking buffers posted to HW */ 154 struct gve_rx_buf_state_dqo { 155 /* The page posted to HW. */ 156 struct gve_rx_slot_page_info page_info; 157 158 /* The DMA address corresponding to `page_info`. */ 159 dma_addr_t addr; 160 161 /* Last offset into the page when it only had a single reference, at 162 * which point every other offset is free to be reused. 163 */ 164 u32 last_single_ref_offset; 165 166 /* Linked list index to next element in the list, or -1 if none */ 167 s16 next; 168 }; 169 170 /* `head` and `tail` are indices into an array, or -1 if empty. */ 171 struct gve_index_list { 172 s16 head; 173 s16 tail; 174 }; 175 176 /* A single received packet split across multiple buffers may be 177 * reconstructed using the information in this structure. 178 */ 179 struct gve_rx_ctx { 180 /* head and tail of skb chain for the current packet or NULL if none */ 181 struct sk_buff *skb_head; 182 struct sk_buff *skb_tail; 183 u32 total_size; 184 u8 frag_cnt; 185 bool drop_pkt; 186 }; 187 188 struct gve_rx_cnts { 189 u32 ok_pkt_bytes; 190 u16 ok_pkt_cnt; 191 u16 total_pkt_cnt; 192 u16 cont_pkt_cnt; 193 u16 desc_err_pkt_cnt; 194 }; 195 196 /* Contains datapath state used to represent an RX queue. */ 197 struct gve_rx_ring { 198 struct gve_priv *gve; 199 union { 200 /* GQI fields */ 201 struct { 202 struct gve_rx_desc_queue desc; 203 struct gve_rx_data_queue data; 204 205 /* threshold for posting new buffs and descs */ 206 u32 db_threshold; 207 u16 packet_buffer_size; 208 209 u32 qpl_copy_pool_mask; 210 u32 qpl_copy_pool_head; 211 struct gve_rx_slot_page_info *qpl_copy_pool; 212 }; 213 214 /* DQO fields. */ 215 struct { 216 struct gve_rx_buf_queue_dqo bufq; 217 struct gve_rx_compl_queue_dqo complq; 218 219 struct gve_rx_buf_state_dqo *buf_states; 220 u16 num_buf_states; 221 222 /* Linked list of gve_rx_buf_state_dqo. Index into 223 * buf_states, or -1 if empty. 224 */ 225 s16 free_buf_states; 226 227 /* Linked list of gve_rx_buf_state_dqo. Indexes into 228 * buf_states, or -1 if empty. 229 * 230 * This list contains buf_states which are pointing to 231 * valid buffers. 232 * 233 * We use a FIFO here in order to increase the 234 * probability that buffers can be reused by increasing 235 * the time between usages. 236 */ 237 struct gve_index_list recycled_buf_states; 238 239 /* Linked list of gve_rx_buf_state_dqo. Indexes into 240 * buf_states, or -1 if empty. 241 * 242 * This list contains buf_states which have buffers 243 * which cannot be reused yet. 244 */ 245 struct gve_index_list used_buf_states; 246 247 /* qpl assigned to this queue */ 248 struct gve_queue_page_list *qpl; 249 250 /* index into queue page list */ 251 u32 next_qpl_page_idx; 252 253 /* track number of used buffers */ 254 u16 used_buf_states_cnt; 255 } dqo; 256 }; 257 258 u64 rbytes; /* free-running bytes received */ 259 u64 rpackets; /* free-running packets received */ 260 u32 cnt; /* free-running total number of completed packets */ 261 u32 fill_cnt; /* free-running total number of descs and buffs posted */ 262 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ 263 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ 264 u64 rx_copied_pkt; /* free-running total number of copied packets */ 265 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ 266 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ 267 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ 268 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ 269 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ 270 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ 271 u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */ 272 u64 xdp_tx_errors; 273 u64 xdp_redirect_errors; 274 u64 xdp_alloc_fails; 275 u64 xdp_actions[GVE_XDP_ACTIONS]; 276 u32 q_num; /* queue index */ 277 u32 ntfy_id; /* notification block index */ 278 struct gve_queue_resources *q_resources; /* head and tail pointer idx */ 279 dma_addr_t q_resources_bus; /* dma address for the queue resources */ 280 struct u64_stats_sync statss; /* sync stats for 32bit archs */ 281 282 struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */ 283 284 /* XDP stuff */ 285 struct xdp_rxq_info xdp_rxq; 286 struct xdp_rxq_info xsk_rxq; 287 struct xsk_buff_pool *xsk_pool; 288 struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */ 289 }; 290 291 /* A TX desc ring entry */ 292 union gve_tx_desc { 293 struct gve_tx_pkt_desc pkt; /* first desc for a packet */ 294 struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */ 295 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ 296 }; 297 298 /* Tracks the memory in the fifo occupied by a segment of a packet */ 299 struct gve_tx_iovec { 300 u32 iov_offset; /* offset into this segment */ 301 u32 iov_len; /* length */ 302 u32 iov_padding; /* padding associated with this segment */ 303 }; 304 305 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc 306 * ring entry but only used for a pkt_desc not a seg_desc 307 */ 308 struct gve_tx_buffer_state { 309 union { 310 struct sk_buff *skb; /* skb for this pkt */ 311 struct xdp_frame *xdp_frame; /* xdp_frame */ 312 }; 313 struct { 314 u16 size; /* size of xmitted xdp pkt */ 315 u8 is_xsk; /* xsk buff */ 316 } xdp; 317 union { 318 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ 319 struct { 320 DEFINE_DMA_UNMAP_ADDR(dma); 321 DEFINE_DMA_UNMAP_LEN(len); 322 }; 323 }; 324 }; 325 326 /* A TX buffer - each queue has one */ 327 struct gve_tx_fifo { 328 void *base; /* address of base of FIFO */ 329 u32 size; /* total size */ 330 atomic_t available; /* how much space is still available */ 331 u32 head; /* offset to write at */ 332 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */ 333 }; 334 335 /* TX descriptor for DQO format */ 336 union gve_tx_desc_dqo { 337 struct gve_tx_pkt_desc_dqo pkt; 338 struct gve_tx_tso_context_desc_dqo tso_ctx; 339 struct gve_tx_general_context_desc_dqo general_ctx; 340 }; 341 342 enum gve_packet_state { 343 /* Packet is in free list, available to be allocated. 344 * This should always be zero since state is not explicitly initialized. 345 */ 346 GVE_PACKET_STATE_UNALLOCATED, 347 /* Packet is expecting a regular data completion or miss completion */ 348 GVE_PACKET_STATE_PENDING_DATA_COMPL, 349 /* Packet has received a miss completion and is expecting a 350 * re-injection completion. 351 */ 352 GVE_PACKET_STATE_PENDING_REINJECT_COMPL, 353 /* No valid completion received within the specified timeout. */ 354 GVE_PACKET_STATE_TIMED_OUT_COMPL, 355 }; 356 357 struct gve_tx_pending_packet_dqo { 358 struct sk_buff *skb; /* skb for this packet */ 359 360 /* 0th element corresponds to the linear portion of `skb`, should be 361 * unmapped with `dma_unmap_single`. 362 * 363 * All others correspond to `skb`'s frags and should be unmapped with 364 * `dma_unmap_page`. 365 */ 366 union { 367 struct { 368 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); 369 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); 370 }; 371 s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT]; 372 }; 373 374 u16 num_bufs; 375 376 /* Linked list index to next element in the list, or -1 if none */ 377 s16 next; 378 379 /* Linked list index to prev element in the list, or -1 if none. 380 * Used for tracking either outstanding miss completions or prematurely 381 * freed packets. 382 */ 383 s16 prev; 384 385 /* Identifies the current state of the packet as defined in 386 * `enum gve_packet_state`. 387 */ 388 u8 state; 389 390 /* If packet is an outstanding miss completion, then the packet is 391 * freed if the corresponding re-injection completion is not received 392 * before kernel jiffies exceeds timeout_jiffies. 393 */ 394 unsigned long timeout_jiffies; 395 }; 396 397 /* Contains datapath state used to represent a TX queue. */ 398 struct gve_tx_ring { 399 /* Cacheline 0 -- Accessed & dirtied during transmit */ 400 union { 401 /* GQI fields */ 402 struct { 403 struct gve_tx_fifo tx_fifo; 404 u32 req; /* driver tracked head pointer */ 405 u32 done; /* driver tracked tail pointer */ 406 }; 407 408 /* DQO fields. */ 409 struct { 410 /* Linked list of gve_tx_pending_packet_dqo. Index into 411 * pending_packets, or -1 if empty. 412 * 413 * This is a consumer list owned by the TX path. When it 414 * runs out, the producer list is stolen from the 415 * completion handling path 416 * (dqo_compl.free_pending_packets). 417 */ 418 s16 free_pending_packets; 419 420 /* Cached value of `dqo_compl.hw_tx_head` */ 421 u32 head; 422 u32 tail; /* Last posted buffer index + 1 */ 423 424 /* Index of the last descriptor with "report event" bit 425 * set. 426 */ 427 u32 last_re_idx; 428 429 /* free running number of packet buf descriptors posted */ 430 u16 posted_packet_desc_cnt; 431 /* free running number of packet buf descriptors completed */ 432 u16 completed_packet_desc_cnt; 433 434 /* QPL fields */ 435 struct { 436 /* Linked list of gve_tx_buf_dqo. Index into 437 * tx_qpl_buf_next, or -1 if empty. 438 * 439 * This is a consumer list owned by the TX path. When it 440 * runs out, the producer list is stolen from the 441 * completion handling path 442 * (dqo_compl.free_tx_qpl_buf_head). 443 */ 444 s16 free_tx_qpl_buf_head; 445 446 /* Free running count of the number of QPL tx buffers 447 * allocated 448 */ 449 u32 alloc_tx_qpl_buf_cnt; 450 451 /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */ 452 u32 free_tx_qpl_buf_cnt; 453 }; 454 } dqo_tx; 455 }; 456 457 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */ 458 union { 459 /* GQI fields */ 460 struct { 461 /* Spinlock for when cleanup in progress */ 462 spinlock_t clean_lock; 463 /* Spinlock for XDP tx traffic */ 464 spinlock_t xdp_lock; 465 }; 466 467 /* DQO fields. */ 468 struct { 469 u32 head; /* Last read on compl_desc */ 470 471 /* Tracks the current gen bit of compl_q */ 472 u8 cur_gen_bit; 473 474 /* Linked list of gve_tx_pending_packet_dqo. Index into 475 * pending_packets, or -1 if empty. 476 * 477 * This is the producer list, owned by the completion 478 * handling path. When the consumer list 479 * (dqo_tx.free_pending_packets) is runs out, this list 480 * will be stolen. 481 */ 482 atomic_t free_pending_packets; 483 484 /* Last TX ring index fetched by HW */ 485 atomic_t hw_tx_head; 486 487 /* List to track pending packets which received a miss 488 * completion but not a corresponding reinjection. 489 */ 490 struct gve_index_list miss_completions; 491 492 /* List to track pending packets that were completed 493 * before receiving a valid completion because they 494 * reached a specified timeout. 495 */ 496 struct gve_index_list timed_out_completions; 497 498 /* QPL fields */ 499 struct { 500 /* Linked list of gve_tx_buf_dqo. Index into 501 * tx_qpl_buf_next, or -1 if empty. 502 * 503 * This is the producer list, owned by the completion 504 * handling path. When the consumer list 505 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list 506 * will be stolen. 507 */ 508 atomic_t free_tx_qpl_buf_head; 509 510 /* Free running count of the number of tx buffers 511 * freed 512 */ 513 atomic_t free_tx_qpl_buf_cnt; 514 }; 515 } dqo_compl; 516 } ____cacheline_aligned; 517 u64 pkt_done; /* free-running - total packets completed */ 518 u64 bytes_done; /* free-running - total bytes completed */ 519 u64 dropped_pkt; /* free-running - total packets dropped */ 520 u64 dma_mapping_error; /* count of dma mapping errors */ 521 522 /* Cacheline 2 -- Read-mostly fields */ 523 union { 524 /* GQI fields */ 525 struct { 526 union gve_tx_desc *desc; 527 528 /* Maps 1:1 to a desc */ 529 struct gve_tx_buffer_state *info; 530 }; 531 532 /* DQO fields. */ 533 struct { 534 union gve_tx_desc_dqo *tx_ring; 535 struct gve_tx_compl_desc *compl_ring; 536 537 struct gve_tx_pending_packet_dqo *pending_packets; 538 s16 num_pending_packets; 539 540 u32 complq_mask; /* complq size is complq_mask + 1 */ 541 542 /* QPL fields */ 543 struct { 544 /* qpl assigned to this queue */ 545 struct gve_queue_page_list *qpl; 546 547 /* Each QPL page is divided into TX bounce buffers 548 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is 549 * an array to manage linked lists of TX buffers. 550 * An entry j at index i implies that j'th buffer 551 * is next on the list after i 552 */ 553 s16 *tx_qpl_buf_next; 554 u32 num_tx_qpl_bufs; 555 }; 556 } dqo; 557 } ____cacheline_aligned; 558 struct netdev_queue *netdev_txq; 559 struct gve_queue_resources *q_resources; /* head and tail pointer idx */ 560 struct device *dev; 561 u32 mask; /* masks req and done down to queue size */ 562 u8 raw_addressing; /* use raw_addressing? */ 563 564 /* Slow-path fields */ 565 u32 q_num ____cacheline_aligned; /* queue idx */ 566 u32 stop_queue; /* count of queue stops */ 567 u32 wake_queue; /* count of queue wakes */ 568 u32 queue_timeout; /* count of queue timeouts */ 569 u32 ntfy_id; /* notification block index */ 570 u32 last_kick_msec; /* Last time the queue was kicked */ 571 dma_addr_t bus; /* dma address of the descr ring */ 572 dma_addr_t q_resources_bus; /* dma address of the queue resources */ 573 dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ 574 struct u64_stats_sync statss; /* sync stats for 32bit archs */ 575 struct xsk_buff_pool *xsk_pool; 576 u32 xdp_xsk_wakeup; 577 u32 xdp_xsk_done; 578 u64 xdp_xsk_sent; 579 u64 xdp_xmit; 580 u64 xdp_xmit_errors; 581 } ____cacheline_aligned; 582 583 /* Wraps the info for one irq including the napi struct and the queues 584 * associated with that irq. 585 */ 586 struct gve_notify_block { 587 __be32 *irq_db_index; /* pointer to idx into Bar2 */ 588 char name[IFNAMSIZ + 16]; /* name registered with the kernel */ 589 struct napi_struct napi; /* kernel napi struct for this block */ 590 struct gve_priv *priv; 591 struct gve_tx_ring *tx; /* tx rings on this block */ 592 struct gve_rx_ring *rx; /* rx rings on this block */ 593 }; 594 595 /* Tracks allowed and current queue settings */ 596 struct gve_queue_config { 597 u16 max_queues; 598 u16 num_queues; /* current */ 599 }; 600 601 /* Tracks the available and used qpl IDs */ 602 struct gve_qpl_config { 603 u32 qpl_map_size; /* map memory size */ 604 unsigned long *qpl_id_map; /* bitmap of used qpl ids */ 605 }; 606 607 struct gve_options_dqo_rda { 608 u16 tx_comp_ring_entries; /* number of tx_comp descriptors */ 609 u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ 610 }; 611 612 struct gve_irq_db { 613 __be32 index; 614 } ____cacheline_aligned; 615 616 struct gve_ptype { 617 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ 618 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ 619 }; 620 621 struct gve_ptype_lut { 622 struct gve_ptype ptypes[GVE_NUM_PTYPES]; 623 }; 624 625 /* Parameters for allocating queue page lists */ 626 struct gve_qpls_alloc_cfg { 627 struct gve_qpl_config *qpl_cfg; 628 struct gve_queue_config *tx_cfg; 629 struct gve_queue_config *rx_cfg; 630 631 u16 num_xdp_queues; 632 bool raw_addressing; 633 bool is_gqi; 634 635 /* Allocated resources are returned here */ 636 struct gve_queue_page_list *qpls; 637 }; 638 639 /* Parameters for allocating resources for tx queues */ 640 struct gve_tx_alloc_rings_cfg { 641 struct gve_queue_config *qcfg; 642 643 /* qpls and qpl_cfg must already be allocated */ 644 struct gve_queue_page_list *qpls; 645 struct gve_qpl_config *qpl_cfg; 646 647 u16 ring_size; 648 u16 start_idx; 649 u16 num_rings; 650 bool raw_addressing; 651 652 /* Allocated resources are returned here */ 653 struct gve_tx_ring *tx; 654 }; 655 656 /* Parameters for allocating resources for rx queues */ 657 struct gve_rx_alloc_rings_cfg { 658 /* tx config is also needed to determine QPL ids */ 659 struct gve_queue_config *qcfg; 660 struct gve_queue_config *qcfg_tx; 661 662 /* qpls and qpl_cfg must already be allocated */ 663 struct gve_queue_page_list *qpls; 664 struct gve_qpl_config *qpl_cfg; 665 666 u16 ring_size; 667 bool raw_addressing; 668 bool enable_header_split; 669 670 /* Allocated resources are returned here */ 671 struct gve_rx_ring *rx; 672 }; 673 674 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value 675 * when the entire configure_device_resources command is zeroed out and the 676 * queue_format is not specified. 677 */ 678 enum gve_queue_format { 679 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, 680 GVE_GQI_RDA_FORMAT = 0x1, 681 GVE_GQI_QPL_FORMAT = 0x2, 682 GVE_DQO_RDA_FORMAT = 0x3, 683 GVE_DQO_QPL_FORMAT = 0x4, 684 }; 685 686 struct gve_priv { 687 struct net_device *dev; 688 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ 689 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ 690 struct gve_queue_page_list *qpls; /* array of num qpls */ 691 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ 692 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ 693 dma_addr_t irq_db_indices_bus; 694 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ 695 char mgmt_msix_name[IFNAMSIZ + 16]; 696 u32 mgmt_msix_idx; 697 __be32 *counter_array; /* array of num_event_counters */ 698 dma_addr_t counter_array_bus; 699 700 u16 num_event_counters; 701 u16 tx_desc_cnt; /* num desc per ring */ 702 u16 rx_desc_cnt; /* num desc per ring */ 703 u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ 704 u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */ 705 u16 rx_data_slot_cnt; /* rx buffer length */ 706 u64 max_registered_pages; 707 u64 num_registered_pages; /* num pages registered with NIC */ 708 struct bpf_prog *xdp_prog; /* XDP BPF program */ 709 u32 rx_copybreak; /* copy packets smaller than this */ 710 u16 default_num_queues; /* default num queues to set up */ 711 712 u16 num_xdp_queues; 713 struct gve_queue_config tx_cfg; 714 struct gve_queue_config rx_cfg; 715 struct gve_qpl_config qpl_cfg; /* map used QPL ids */ 716 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ 717 718 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ 719 __be32 __iomem *db_bar2; /* "array" of doorbells */ 720 u32 msg_enable; /* level for netif* netdev print macros */ 721 struct pci_dev *pdev; 722 723 /* metrics */ 724 u32 tx_timeo_cnt; 725 726 /* Admin queue - see gve_adminq.h*/ 727 union gve_adminq_command *adminq; 728 dma_addr_t adminq_bus_addr; 729 struct dma_pool *adminq_pool; 730 u32 adminq_mask; /* masks prod_cnt to adminq size */ 731 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ 732 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ 733 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */ 734 /* free-running count of per AQ cmd executed */ 735 u32 adminq_describe_device_cnt; 736 u32 adminq_cfg_device_resources_cnt; 737 u32 adminq_register_page_list_cnt; 738 u32 adminq_unregister_page_list_cnt; 739 u32 adminq_create_tx_queue_cnt; 740 u32 adminq_create_rx_queue_cnt; 741 u32 adminq_destroy_tx_queue_cnt; 742 u32 adminq_destroy_rx_queue_cnt; 743 u32 adminq_dcfg_device_resources_cnt; 744 u32 adminq_set_driver_parameter_cnt; 745 u32 adminq_report_stats_cnt; 746 u32 adminq_report_link_speed_cnt; 747 u32 adminq_get_ptype_map_cnt; 748 u32 adminq_verify_driver_compatibility_cnt; 749 750 /* Global stats */ 751 u32 interface_up_cnt; /* count of times interface turned up since last reset */ 752 u32 interface_down_cnt; /* count of times interface turned down since last reset */ 753 u32 reset_cnt; /* count of reset */ 754 u32 page_alloc_fail; /* count of page alloc fails */ 755 u32 dma_mapping_error; /* count of dma mapping errors */ 756 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */ 757 u32 suspend_cnt; /* count of times suspended */ 758 u32 resume_cnt; /* count of times resumed */ 759 struct workqueue_struct *gve_wq; 760 struct work_struct service_task; 761 struct work_struct stats_report_task; 762 unsigned long service_task_flags; 763 unsigned long state_flags; 764 765 struct gve_stats_report *stats_report; 766 u64 stats_report_len; 767 dma_addr_t stats_report_bus; /* dma address for the stats report */ 768 unsigned long ethtool_flags; 769 770 unsigned long stats_report_timer_period; 771 struct timer_list stats_report_timer; 772 773 /* Gvnic device link speed from hypervisor. */ 774 u64 link_speed; 775 bool up_before_suspend; /* True if dev was up before suspend */ 776 777 struct gve_options_dqo_rda options_dqo_rda; 778 struct gve_ptype_lut *ptype_lut_dqo; 779 780 /* Must be a power of two. */ 781 int data_buffer_size_dqo; 782 783 enum gve_queue_format queue_format; 784 785 /* Interrupt coalescing settings */ 786 u32 tx_coalesce_usecs; 787 u32 rx_coalesce_usecs; 788 }; 789 790 enum gve_service_task_flags_bit { 791 GVE_PRIV_FLAGS_DO_RESET = 1, 792 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2, 793 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3, 794 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4, 795 }; 796 797 enum gve_state_flags_bit { 798 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1, 799 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2, 800 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3, 801 GVE_PRIV_FLAGS_NAPI_ENABLED = 4, 802 }; 803 804 enum gve_ethtool_flags_bit { 805 GVE_PRIV_FLAGS_REPORT_STATS = 0, 806 }; 807 808 static inline bool gve_get_do_reset(struct gve_priv *priv) 809 { 810 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 811 } 812 813 static inline void gve_set_do_reset(struct gve_priv *priv) 814 { 815 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 816 } 817 818 static inline void gve_clear_do_reset(struct gve_priv *priv) 819 { 820 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 821 } 822 823 static inline bool gve_get_reset_in_progress(struct gve_priv *priv) 824 { 825 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, 826 &priv->service_task_flags); 827 } 828 829 static inline void gve_set_reset_in_progress(struct gve_priv *priv) 830 { 831 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); 832 } 833 834 static inline void gve_clear_reset_in_progress(struct gve_priv *priv) 835 { 836 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); 837 } 838 839 static inline bool gve_get_probe_in_progress(struct gve_priv *priv) 840 { 841 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, 842 &priv->service_task_flags); 843 } 844 845 static inline void gve_set_probe_in_progress(struct gve_priv *priv) 846 { 847 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); 848 } 849 850 static inline void gve_clear_probe_in_progress(struct gve_priv *priv) 851 { 852 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); 853 } 854 855 static inline bool gve_get_do_report_stats(struct gve_priv *priv) 856 { 857 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, 858 &priv->service_task_flags); 859 } 860 861 static inline void gve_set_do_report_stats(struct gve_priv *priv) 862 { 863 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); 864 } 865 866 static inline void gve_clear_do_report_stats(struct gve_priv *priv) 867 { 868 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); 869 } 870 871 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv) 872 { 873 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 874 } 875 876 static inline void gve_set_admin_queue_ok(struct gve_priv *priv) 877 { 878 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 879 } 880 881 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv) 882 { 883 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 884 } 885 886 static inline bool gve_get_device_resources_ok(struct gve_priv *priv) 887 { 888 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 889 } 890 891 static inline void gve_set_device_resources_ok(struct gve_priv *priv) 892 { 893 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 894 } 895 896 static inline void gve_clear_device_resources_ok(struct gve_priv *priv) 897 { 898 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 899 } 900 901 static inline bool gve_get_device_rings_ok(struct gve_priv *priv) 902 { 903 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 904 } 905 906 static inline void gve_set_device_rings_ok(struct gve_priv *priv) 907 { 908 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 909 } 910 911 static inline void gve_clear_device_rings_ok(struct gve_priv *priv) 912 { 913 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 914 } 915 916 static inline bool gve_get_napi_enabled(struct gve_priv *priv) 917 { 918 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 919 } 920 921 static inline void gve_set_napi_enabled(struct gve_priv *priv) 922 { 923 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 924 } 925 926 static inline void gve_clear_napi_enabled(struct gve_priv *priv) 927 { 928 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 929 } 930 931 static inline bool gve_get_report_stats(struct gve_priv *priv) 932 { 933 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); 934 } 935 936 static inline void gve_clear_report_stats(struct gve_priv *priv) 937 { 938 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); 939 } 940 941 /* Returns the address of the ntfy_blocks irq doorbell 942 */ 943 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv, 944 struct gve_notify_block *block) 945 { 946 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; 947 } 948 949 /* Returns the index into ntfy_blocks of the given tx ring's block 950 */ 951 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) 952 { 953 return queue_idx; 954 } 955 956 /* Returns the index into ntfy_blocks of the given rx ring's block 957 */ 958 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) 959 { 960 return (priv->num_ntfy_blks / 2) + queue_idx; 961 } 962 963 static inline bool gve_is_qpl(struct gve_priv *priv) 964 { 965 return priv->queue_format == GVE_GQI_QPL_FORMAT || 966 priv->queue_format == GVE_DQO_QPL_FORMAT; 967 } 968 969 /* Returns the number of tx queue page lists */ 970 static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg, 971 int num_xdp_queues, 972 bool is_qpl) 973 { 974 if (!is_qpl) 975 return 0; 976 return tx_cfg->num_queues + num_xdp_queues; 977 } 978 979 /* Returns the number of XDP tx queue page lists 980 */ 981 static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) 982 { 983 if (priv->queue_format != GVE_GQI_QPL_FORMAT) 984 return 0; 985 986 return priv->num_xdp_queues; 987 } 988 989 /* Returns the number of rx queue page lists */ 990 static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg, 991 bool is_qpl) 992 { 993 if (!is_qpl) 994 return 0; 995 return rx_cfg->num_queues; 996 } 997 998 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid) 999 { 1000 return tx_qid; 1001 } 1002 1003 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) 1004 { 1005 return priv->tx_cfg.max_queues + rx_qid; 1006 } 1007 1008 /* Returns the index into priv->qpls where a certain rx queue's QPL resides */ 1009 static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid) 1010 { 1011 return tx_cfg->max_queues + rx_qid; 1012 } 1013 1014 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) 1015 { 1016 return gve_tx_qpl_id(priv, 0); 1017 } 1018 1019 /* Returns the index into priv->qpls where the first rx queue's QPL resides */ 1020 static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg) 1021 { 1022 return gve_get_rx_qpl_id(tx_cfg, 0); 1023 } 1024 1025 /* Returns a pointer to the next available tx qpl in the list of qpls */ 1026 static inline 1027 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg, 1028 int tx_qid) 1029 { 1030 /* QPL already in use */ 1031 if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map)) 1032 return NULL; 1033 set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map); 1034 return &cfg->qpls[tx_qid]; 1035 } 1036 1037 /* Returns a pointer to the next available rx qpl in the list of qpls */ 1038 static inline 1039 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg, 1040 int rx_qid) 1041 { 1042 int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid); 1043 /* QPL already in use */ 1044 if (test_bit(id, cfg->qpl_cfg->qpl_id_map)) 1045 return NULL; 1046 set_bit(id, cfg->qpl_cfg->qpl_id_map); 1047 return &cfg->qpls[id]; 1048 } 1049 1050 /* Unassigns the qpl with the given id */ 1051 static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id) 1052 { 1053 clear_bit(id, qpl_cfg->qpl_id_map); 1054 } 1055 1056 /* Returns the correct dma direction for tx and rx qpls */ 1057 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, 1058 int id) 1059 { 1060 if (id < gve_rx_start_qpl_id(&priv->tx_cfg)) 1061 return DMA_TO_DEVICE; 1062 else 1063 return DMA_FROM_DEVICE; 1064 } 1065 1066 static inline bool gve_is_gqi(struct gve_priv *priv) 1067 { 1068 return priv->queue_format == GVE_GQI_RDA_FORMAT || 1069 priv->queue_format == GVE_GQI_QPL_FORMAT; 1070 } 1071 1072 static inline u32 gve_num_tx_queues(struct gve_priv *priv) 1073 { 1074 return priv->tx_cfg.num_queues + priv->num_xdp_queues; 1075 } 1076 1077 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id) 1078 { 1079 return priv->tx_cfg.num_queues + queue_id; 1080 } 1081 1082 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv) 1083 { 1084 return gve_xdp_tx_queue_id(priv, 0); 1085 } 1086 1087 /* gqi napi handler defined in gve_main.c */ 1088 int gve_napi_poll(struct napi_struct *napi, int budget); 1089 1090 /* buffers */ 1091 int gve_alloc_page(struct gve_priv *priv, struct device *dev, 1092 struct page **page, dma_addr_t *dma, 1093 enum dma_data_direction, gfp_t gfp_flags); 1094 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, 1095 enum dma_data_direction); 1096 /* tx handling */ 1097 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); 1098 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 1099 u32 flags); 1100 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, 1101 void *data, int len, void *frame_p); 1102 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); 1103 bool gve_tx_poll(struct gve_notify_block *block, int budget); 1104 bool gve_xdp_poll(struct gve_notify_block *block, int budget); 1105 int gve_tx_alloc_rings_gqi(struct gve_priv *priv, 1106 struct gve_tx_alloc_rings_cfg *cfg); 1107 void gve_tx_free_rings_gqi(struct gve_priv *priv, 1108 struct gve_tx_alloc_rings_cfg *cfg); 1109 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx); 1110 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx); 1111 u32 gve_tx_load_event_counter(struct gve_priv *priv, 1112 struct gve_tx_ring *tx); 1113 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); 1114 /* rx handling */ 1115 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); 1116 int gve_rx_poll(struct gve_notify_block *block, int budget); 1117 bool gve_rx_work_pending(struct gve_rx_ring *rx); 1118 int gve_rx_alloc_rings(struct gve_priv *priv); 1119 int gve_rx_alloc_rings_gqi(struct gve_priv *priv, 1120 struct gve_rx_alloc_rings_cfg *cfg); 1121 void gve_rx_free_rings_gqi(struct gve_priv *priv, 1122 struct gve_rx_alloc_rings_cfg *cfg); 1123 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx); 1124 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx); 1125 /* Reset */ 1126 void gve_schedule_reset(struct gve_priv *priv); 1127 int gve_reset(struct gve_priv *priv, bool attempt_teardown); 1128 int gve_adjust_queues(struct gve_priv *priv, 1129 struct gve_queue_config new_rx_config, 1130 struct gve_queue_config new_tx_config); 1131 /* report stats handling */ 1132 void gve_handle_report_stats(struct gve_priv *priv); 1133 /* exported by ethtool.c */ 1134 extern const struct ethtool_ops gve_ethtool_ops; 1135 /* needed by ethtool */ 1136 extern char gve_driver_name[]; 1137 extern const char gve_version_str[]; 1138 #endif /* _GVE_H_ */ 1139