1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 * Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #ifndef _GVE_H_ 8 #define _GVE_H_ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/dmapool.h> 12 #include <linux/ethtool_netlink.h> 13 #include <linux/netdevice.h> 14 #include <linux/pci.h> 15 #include <linux/u64_stats_sync.h> 16 #include <net/xdp.h> 17 18 #include "gve_desc.h" 19 #include "gve_desc_dqo.h" 20 21 #ifndef PCI_VENDOR_ID_GOOGLE 22 #define PCI_VENDOR_ID_GOOGLE 0x1ae0 23 #endif 24 25 #define PCI_DEV_ID_GVNIC 0x0042 26 27 #define GVE_REGISTER_BAR 0 28 #define GVE_DOORBELL_BAR 2 29 30 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */ 31 #define GVE_TX_MAX_IOVEC 4 32 /* 1 for management, 1 for rx, 1 for tx */ 33 #define GVE_MIN_MSIX 3 34 35 /* Numbers of gve tx/rx stats in stats report. */ 36 #define GVE_TX_STATS_REPORT_NUM 6 37 #define GVE_RX_STATS_REPORT_NUM 2 38 39 /* Interval to schedule a stats report update, 20000ms. */ 40 #define GVE_STATS_REPORT_TIMER_PERIOD 20000 41 42 /* Numbers of NIC tx/rx stats in stats report. */ 43 #define NIC_TX_STATS_REPORT_NUM 0 44 #define NIC_RX_STATS_REPORT_NUM 4 45 46 #define GVE_ADMINQ_BUFFER_SIZE 4096 47 48 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) 49 50 /* PTYPEs are always 10 bits. */ 51 #define GVE_NUM_PTYPES 1024 52 53 /* Default minimum ring size */ 54 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256 55 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512 56 57 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048 58 59 #define GVE_MAX_RX_BUFFER_SIZE 4096 60 61 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 62 63 #define GVE_XDP_ACTIONS 5 64 65 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 66 67 #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128 68 69 #define DQO_QPL_DEFAULT_TX_PAGES 512 70 71 /* Maximum TSO size supported on DQO */ 72 #define GVE_DQO_TX_MAX 0x3FFFF 73 74 #define GVE_TX_BUF_SHIFT_DQO 11 75 76 /* 2K buffers for DQO-QPL */ 77 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO) 78 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO) 79 #define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO)) 80 81 /* If number of free/recyclable buffers are less than this threshold; driver 82 * allocs and uses a non-qpl page on the receive path of DQO QPL to free 83 * up buffers. 84 * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC. 85 */ 86 #define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96 87 88 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ 89 struct gve_rx_desc_queue { 90 struct gve_rx_desc *desc_ring; /* the descriptor ring */ 91 dma_addr_t bus; /* the bus for the desc_ring */ 92 u8 seqno; /* the next expected seqno for this desc*/ 93 }; 94 95 /* The page info for a single slot in the RX data queue */ 96 struct gve_rx_slot_page_info { 97 struct page *page; 98 void *page_address; 99 u32 page_offset; /* offset to write to in page */ 100 int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ 101 u16 pad; /* adjustment for rx padding */ 102 u8 can_flip; /* tracks if the networking stack is using the page */ 103 }; 104 105 /* A list of pages registered with the device during setup and used by a queue 106 * as buffers 107 */ 108 struct gve_queue_page_list { 109 u32 id; /* unique id */ 110 u32 num_entries; 111 struct page **pages; /* list of num_entries pages */ 112 dma_addr_t *page_buses; /* the dma addrs of the pages */ 113 }; 114 115 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */ 116 struct gve_rx_data_queue { 117 union gve_rx_data_slot *data_ring; /* read by NIC */ 118 dma_addr_t data_bus; /* dma mapping of the slots */ 119 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ 120 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ 121 u8 raw_addressing; /* use raw_addressing? */ 122 }; 123 124 struct gve_priv; 125 126 /* RX buffer queue for posting buffers to HW. 127 * Each RX (completion) queue has a corresponding buffer queue. 128 */ 129 struct gve_rx_buf_queue_dqo { 130 struct gve_rx_desc_dqo *desc_ring; 131 dma_addr_t bus; 132 u32 head; /* Pointer to start cleaning buffers at. */ 133 u32 tail; /* Last posted buffer index + 1 */ 134 u32 mask; /* Mask for indices to the size of the ring */ 135 }; 136 137 /* RX completion queue to receive packets from HW. */ 138 struct gve_rx_compl_queue_dqo { 139 struct gve_rx_compl_desc_dqo *desc_ring; 140 dma_addr_t bus; 141 142 /* Number of slots which did not have a buffer posted yet. We should not 143 * post more buffers than the queue size to avoid HW overrunning the 144 * queue. 145 */ 146 int num_free_slots; 147 148 /* HW uses a "generation bit" to notify SW of new descriptors. When a 149 * descriptor's generation bit is different from the current generation, 150 * that descriptor is ready to be consumed by SW. 151 */ 152 u8 cur_gen_bit; 153 154 /* Pointer into desc_ring where the next completion descriptor will be 155 * received. 156 */ 157 u32 head; 158 u32 mask; /* Mask for indices to the size of the ring */ 159 }; 160 161 struct gve_header_buf { 162 u8 *data; 163 dma_addr_t addr; 164 }; 165 166 /* Stores state for tracking buffers posted to HW */ 167 struct gve_rx_buf_state_dqo { 168 /* The page posted to HW. */ 169 struct gve_rx_slot_page_info page_info; 170 171 /* The DMA address corresponding to `page_info`. */ 172 dma_addr_t addr; 173 174 /* Last offset into the page when it only had a single reference, at 175 * which point every other offset is free to be reused. 176 */ 177 u32 last_single_ref_offset; 178 179 /* Linked list index to next element in the list, or -1 if none */ 180 s16 next; 181 }; 182 183 /* `head` and `tail` are indices into an array, or -1 if empty. */ 184 struct gve_index_list { 185 s16 head; 186 s16 tail; 187 }; 188 189 /* A single received packet split across multiple buffers may be 190 * reconstructed using the information in this structure. 191 */ 192 struct gve_rx_ctx { 193 /* head and tail of skb chain for the current packet or NULL if none */ 194 struct sk_buff *skb_head; 195 struct sk_buff *skb_tail; 196 u32 total_size; 197 u8 frag_cnt; 198 bool drop_pkt; 199 }; 200 201 struct gve_rx_cnts { 202 u32 ok_pkt_bytes; 203 u16 ok_pkt_cnt; 204 u16 total_pkt_cnt; 205 u16 cont_pkt_cnt; 206 u16 desc_err_pkt_cnt; 207 }; 208 209 /* Contains datapath state used to represent an RX queue. */ 210 struct gve_rx_ring { 211 struct gve_priv *gve; 212 union { 213 /* GQI fields */ 214 struct { 215 struct gve_rx_desc_queue desc; 216 struct gve_rx_data_queue data; 217 218 /* threshold for posting new buffs and descs */ 219 u32 db_threshold; 220 u16 packet_buffer_size; 221 222 u32 qpl_copy_pool_mask; 223 u32 qpl_copy_pool_head; 224 struct gve_rx_slot_page_info *qpl_copy_pool; 225 }; 226 227 /* DQO fields. */ 228 struct { 229 struct gve_rx_buf_queue_dqo bufq; 230 struct gve_rx_compl_queue_dqo complq; 231 232 struct gve_rx_buf_state_dqo *buf_states; 233 u16 num_buf_states; 234 235 /* Linked list of gve_rx_buf_state_dqo. Index into 236 * buf_states, or -1 if empty. 237 */ 238 s16 free_buf_states; 239 240 /* Linked list of gve_rx_buf_state_dqo. Indexes into 241 * buf_states, or -1 if empty. 242 * 243 * This list contains buf_states which are pointing to 244 * valid buffers. 245 * 246 * We use a FIFO here in order to increase the 247 * probability that buffers can be reused by increasing 248 * the time between usages. 249 */ 250 struct gve_index_list recycled_buf_states; 251 252 /* Linked list of gve_rx_buf_state_dqo. Indexes into 253 * buf_states, or -1 if empty. 254 * 255 * This list contains buf_states which have buffers 256 * which cannot be reused yet. 257 */ 258 struct gve_index_list used_buf_states; 259 260 /* qpl assigned to this queue */ 261 struct gve_queue_page_list *qpl; 262 263 /* index into queue page list */ 264 u32 next_qpl_page_idx; 265 266 /* track number of used buffers */ 267 u16 used_buf_states_cnt; 268 269 /* Address info of the buffers for header-split */ 270 struct gve_header_buf hdr_bufs; 271 } dqo; 272 }; 273 274 u64 rbytes; /* free-running bytes received */ 275 u64 rx_hsplit_bytes; /* free-running header bytes received */ 276 u64 rpackets; /* free-running packets received */ 277 u32 cnt; /* free-running total number of completed packets */ 278 u32 fill_cnt; /* free-running total number of descs and buffs posted */ 279 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ 280 u64 rx_hsplit_pkt; /* free-running packets with headers split */ 281 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ 282 u64 rx_copied_pkt; /* free-running total number of copied packets */ 283 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ 284 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ 285 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ 286 /* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */ 287 u64 rx_hsplit_unsplit_pkt; 288 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ 289 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ 290 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ 291 u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */ 292 u64 xdp_tx_errors; 293 u64 xdp_redirect_errors; 294 u64 xdp_alloc_fails; 295 u64 xdp_actions[GVE_XDP_ACTIONS]; 296 u32 q_num; /* queue index */ 297 u32 ntfy_id; /* notification block index */ 298 struct gve_queue_resources *q_resources; /* head and tail pointer idx */ 299 dma_addr_t q_resources_bus; /* dma address for the queue resources */ 300 struct u64_stats_sync statss; /* sync stats for 32bit archs */ 301 302 struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */ 303 304 /* XDP stuff */ 305 struct xdp_rxq_info xdp_rxq; 306 struct xdp_rxq_info xsk_rxq; 307 struct xsk_buff_pool *xsk_pool; 308 struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */ 309 }; 310 311 /* A TX desc ring entry */ 312 union gve_tx_desc { 313 struct gve_tx_pkt_desc pkt; /* first desc for a packet */ 314 struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */ 315 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ 316 }; 317 318 /* Tracks the memory in the fifo occupied by a segment of a packet */ 319 struct gve_tx_iovec { 320 u32 iov_offset; /* offset into this segment */ 321 u32 iov_len; /* length */ 322 u32 iov_padding; /* padding associated with this segment */ 323 }; 324 325 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc 326 * ring entry but only used for a pkt_desc not a seg_desc 327 */ 328 struct gve_tx_buffer_state { 329 union { 330 struct sk_buff *skb; /* skb for this pkt */ 331 struct xdp_frame *xdp_frame; /* xdp_frame */ 332 }; 333 struct { 334 u16 size; /* size of xmitted xdp pkt */ 335 u8 is_xsk; /* xsk buff */ 336 } xdp; 337 union { 338 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ 339 struct { 340 DEFINE_DMA_UNMAP_ADDR(dma); 341 DEFINE_DMA_UNMAP_LEN(len); 342 }; 343 }; 344 }; 345 346 /* A TX buffer - each queue has one */ 347 struct gve_tx_fifo { 348 void *base; /* address of base of FIFO */ 349 u32 size; /* total size */ 350 atomic_t available; /* how much space is still available */ 351 u32 head; /* offset to write at */ 352 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */ 353 }; 354 355 /* TX descriptor for DQO format */ 356 union gve_tx_desc_dqo { 357 struct gve_tx_pkt_desc_dqo pkt; 358 struct gve_tx_tso_context_desc_dqo tso_ctx; 359 struct gve_tx_general_context_desc_dqo general_ctx; 360 }; 361 362 enum gve_packet_state { 363 /* Packet is in free list, available to be allocated. 364 * This should always be zero since state is not explicitly initialized. 365 */ 366 GVE_PACKET_STATE_UNALLOCATED, 367 /* Packet is expecting a regular data completion or miss completion */ 368 GVE_PACKET_STATE_PENDING_DATA_COMPL, 369 /* Packet has received a miss completion and is expecting a 370 * re-injection completion. 371 */ 372 GVE_PACKET_STATE_PENDING_REINJECT_COMPL, 373 /* No valid completion received within the specified timeout. */ 374 GVE_PACKET_STATE_TIMED_OUT_COMPL, 375 }; 376 377 struct gve_tx_pending_packet_dqo { 378 struct sk_buff *skb; /* skb for this packet */ 379 380 /* 0th element corresponds to the linear portion of `skb`, should be 381 * unmapped with `dma_unmap_single`. 382 * 383 * All others correspond to `skb`'s frags and should be unmapped with 384 * `dma_unmap_page`. 385 */ 386 union { 387 struct { 388 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); 389 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); 390 }; 391 s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT]; 392 }; 393 394 u16 num_bufs; 395 396 /* Linked list index to next element in the list, or -1 if none */ 397 s16 next; 398 399 /* Linked list index to prev element in the list, or -1 if none. 400 * Used for tracking either outstanding miss completions or prematurely 401 * freed packets. 402 */ 403 s16 prev; 404 405 /* Identifies the current state of the packet as defined in 406 * `enum gve_packet_state`. 407 */ 408 u8 state; 409 410 /* If packet is an outstanding miss completion, then the packet is 411 * freed if the corresponding re-injection completion is not received 412 * before kernel jiffies exceeds timeout_jiffies. 413 */ 414 unsigned long timeout_jiffies; 415 }; 416 417 /* Contains datapath state used to represent a TX queue. */ 418 struct gve_tx_ring { 419 /* Cacheline 0 -- Accessed & dirtied during transmit */ 420 union { 421 /* GQI fields */ 422 struct { 423 struct gve_tx_fifo tx_fifo; 424 u32 req; /* driver tracked head pointer */ 425 u32 done; /* driver tracked tail pointer */ 426 }; 427 428 /* DQO fields. */ 429 struct { 430 /* Linked list of gve_tx_pending_packet_dqo. Index into 431 * pending_packets, or -1 if empty. 432 * 433 * This is a consumer list owned by the TX path. When it 434 * runs out, the producer list is stolen from the 435 * completion handling path 436 * (dqo_compl.free_pending_packets). 437 */ 438 s16 free_pending_packets; 439 440 /* Cached value of `dqo_compl.hw_tx_head` */ 441 u32 head; 442 u32 tail; /* Last posted buffer index + 1 */ 443 444 /* Index of the last descriptor with "report event" bit 445 * set. 446 */ 447 u32 last_re_idx; 448 449 /* free running number of packet buf descriptors posted */ 450 u16 posted_packet_desc_cnt; 451 /* free running number of packet buf descriptors completed */ 452 u16 completed_packet_desc_cnt; 453 454 /* QPL fields */ 455 struct { 456 /* Linked list of gve_tx_buf_dqo. Index into 457 * tx_qpl_buf_next, or -1 if empty. 458 * 459 * This is a consumer list owned by the TX path. When it 460 * runs out, the producer list is stolen from the 461 * completion handling path 462 * (dqo_compl.free_tx_qpl_buf_head). 463 */ 464 s16 free_tx_qpl_buf_head; 465 466 /* Free running count of the number of QPL tx buffers 467 * allocated 468 */ 469 u32 alloc_tx_qpl_buf_cnt; 470 471 /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */ 472 u32 free_tx_qpl_buf_cnt; 473 }; 474 } dqo_tx; 475 }; 476 477 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */ 478 union { 479 /* GQI fields */ 480 struct { 481 /* Spinlock for when cleanup in progress */ 482 spinlock_t clean_lock; 483 /* Spinlock for XDP tx traffic */ 484 spinlock_t xdp_lock; 485 }; 486 487 /* DQO fields. */ 488 struct { 489 u32 head; /* Last read on compl_desc */ 490 491 /* Tracks the current gen bit of compl_q */ 492 u8 cur_gen_bit; 493 494 /* Linked list of gve_tx_pending_packet_dqo. Index into 495 * pending_packets, or -1 if empty. 496 * 497 * This is the producer list, owned by the completion 498 * handling path. When the consumer list 499 * (dqo_tx.free_pending_packets) is runs out, this list 500 * will be stolen. 501 */ 502 atomic_t free_pending_packets; 503 504 /* Last TX ring index fetched by HW */ 505 atomic_t hw_tx_head; 506 507 /* List to track pending packets which received a miss 508 * completion but not a corresponding reinjection. 509 */ 510 struct gve_index_list miss_completions; 511 512 /* List to track pending packets that were completed 513 * before receiving a valid completion because they 514 * reached a specified timeout. 515 */ 516 struct gve_index_list timed_out_completions; 517 518 /* QPL fields */ 519 struct { 520 /* Linked list of gve_tx_buf_dqo. Index into 521 * tx_qpl_buf_next, or -1 if empty. 522 * 523 * This is the producer list, owned by the completion 524 * handling path. When the consumer list 525 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list 526 * will be stolen. 527 */ 528 atomic_t free_tx_qpl_buf_head; 529 530 /* Free running count of the number of tx buffers 531 * freed 532 */ 533 atomic_t free_tx_qpl_buf_cnt; 534 }; 535 } dqo_compl; 536 } ____cacheline_aligned; 537 u64 pkt_done; /* free-running - total packets completed */ 538 u64 bytes_done; /* free-running - total bytes completed */ 539 u64 dropped_pkt; /* free-running - total packets dropped */ 540 u64 dma_mapping_error; /* count of dma mapping errors */ 541 542 /* Cacheline 2 -- Read-mostly fields */ 543 union { 544 /* GQI fields */ 545 struct { 546 union gve_tx_desc *desc; 547 548 /* Maps 1:1 to a desc */ 549 struct gve_tx_buffer_state *info; 550 }; 551 552 /* DQO fields. */ 553 struct { 554 union gve_tx_desc_dqo *tx_ring; 555 struct gve_tx_compl_desc *compl_ring; 556 557 struct gve_tx_pending_packet_dqo *pending_packets; 558 s16 num_pending_packets; 559 560 u32 complq_mask; /* complq size is complq_mask + 1 */ 561 562 /* QPL fields */ 563 struct { 564 /* qpl assigned to this queue */ 565 struct gve_queue_page_list *qpl; 566 567 /* Each QPL page is divided into TX bounce buffers 568 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is 569 * an array to manage linked lists of TX buffers. 570 * An entry j at index i implies that j'th buffer 571 * is next on the list after i 572 */ 573 s16 *tx_qpl_buf_next; 574 u32 num_tx_qpl_bufs; 575 }; 576 } dqo; 577 } ____cacheline_aligned; 578 struct netdev_queue *netdev_txq; 579 struct gve_queue_resources *q_resources; /* head and tail pointer idx */ 580 struct device *dev; 581 u32 mask; /* masks req and done down to queue size */ 582 u8 raw_addressing; /* use raw_addressing? */ 583 584 /* Slow-path fields */ 585 u32 q_num ____cacheline_aligned; /* queue idx */ 586 u32 stop_queue; /* count of queue stops */ 587 u32 wake_queue; /* count of queue wakes */ 588 u32 queue_timeout; /* count of queue timeouts */ 589 u32 ntfy_id; /* notification block index */ 590 u32 last_kick_msec; /* Last time the queue was kicked */ 591 dma_addr_t bus; /* dma address of the descr ring */ 592 dma_addr_t q_resources_bus; /* dma address of the queue resources */ 593 dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ 594 struct u64_stats_sync statss; /* sync stats for 32bit archs */ 595 struct xsk_buff_pool *xsk_pool; 596 u32 xdp_xsk_wakeup; 597 u32 xdp_xsk_done; 598 u64 xdp_xsk_sent; 599 u64 xdp_xmit; 600 u64 xdp_xmit_errors; 601 } ____cacheline_aligned; 602 603 /* Wraps the info for one irq including the napi struct and the queues 604 * associated with that irq. 605 */ 606 struct gve_notify_block { 607 __be32 *irq_db_index; /* pointer to idx into Bar2 */ 608 char name[IFNAMSIZ + 16]; /* name registered with the kernel */ 609 struct napi_struct napi; /* kernel napi struct for this block */ 610 struct gve_priv *priv; 611 struct gve_tx_ring *tx; /* tx rings on this block */ 612 struct gve_rx_ring *rx; /* rx rings on this block */ 613 }; 614 615 /* Tracks allowed and current queue settings */ 616 struct gve_queue_config { 617 u16 max_queues; 618 u16 num_queues; /* current */ 619 }; 620 621 /* Tracks the available and used qpl IDs */ 622 struct gve_qpl_config { 623 u32 qpl_map_size; /* map memory size */ 624 unsigned long *qpl_id_map; /* bitmap of used qpl ids */ 625 }; 626 627 struct gve_irq_db { 628 __be32 index; 629 } ____cacheline_aligned; 630 631 struct gve_ptype { 632 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ 633 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ 634 }; 635 636 struct gve_ptype_lut { 637 struct gve_ptype ptypes[GVE_NUM_PTYPES]; 638 }; 639 640 /* Parameters for allocating queue page lists */ 641 struct gve_qpls_alloc_cfg { 642 struct gve_qpl_config *qpl_cfg; 643 struct gve_queue_config *tx_cfg; 644 struct gve_queue_config *rx_cfg; 645 646 u16 num_xdp_queues; 647 bool raw_addressing; 648 bool is_gqi; 649 650 /* Allocated resources are returned here */ 651 struct gve_queue_page_list *qpls; 652 }; 653 654 /* Parameters for allocating resources for tx queues */ 655 struct gve_tx_alloc_rings_cfg { 656 struct gve_queue_config *qcfg; 657 658 /* qpls and qpl_cfg must already be allocated */ 659 struct gve_queue_page_list *qpls; 660 struct gve_qpl_config *qpl_cfg; 661 662 u16 ring_size; 663 u16 start_idx; 664 u16 num_rings; 665 bool raw_addressing; 666 667 /* Allocated resources are returned here */ 668 struct gve_tx_ring *tx; 669 }; 670 671 /* Parameters for allocating resources for rx queues */ 672 struct gve_rx_alloc_rings_cfg { 673 /* tx config is also needed to determine QPL ids */ 674 struct gve_queue_config *qcfg; 675 struct gve_queue_config *qcfg_tx; 676 677 /* qpls and qpl_cfg must already be allocated */ 678 struct gve_queue_page_list *qpls; 679 struct gve_qpl_config *qpl_cfg; 680 681 u16 ring_size; 682 u16 packet_buffer_size; 683 bool raw_addressing; 684 bool enable_header_split; 685 686 /* Allocated resources are returned here */ 687 struct gve_rx_ring *rx; 688 }; 689 690 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value 691 * when the entire configure_device_resources command is zeroed out and the 692 * queue_format is not specified. 693 */ 694 enum gve_queue_format { 695 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, 696 GVE_GQI_RDA_FORMAT = 0x1, 697 GVE_GQI_QPL_FORMAT = 0x2, 698 GVE_DQO_RDA_FORMAT = 0x3, 699 GVE_DQO_QPL_FORMAT = 0x4, 700 }; 701 702 struct gve_priv { 703 struct net_device *dev; 704 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ 705 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ 706 struct gve_queue_page_list *qpls; /* array of num qpls */ 707 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ 708 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ 709 dma_addr_t irq_db_indices_bus; 710 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ 711 char mgmt_msix_name[IFNAMSIZ + 16]; 712 u32 mgmt_msix_idx; 713 __be32 *counter_array; /* array of num_event_counters */ 714 dma_addr_t counter_array_bus; 715 716 u16 num_event_counters; 717 u16 tx_desc_cnt; /* num desc per ring */ 718 u16 rx_desc_cnt; /* num desc per ring */ 719 u16 max_tx_desc_cnt; 720 u16 max_rx_desc_cnt; 721 u16 min_tx_desc_cnt; 722 u16 min_rx_desc_cnt; 723 bool modify_ring_size_enabled; 724 bool default_min_ring_size; 725 u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ 726 u64 max_registered_pages; 727 u64 num_registered_pages; /* num pages registered with NIC */ 728 struct bpf_prog *xdp_prog; /* XDP BPF program */ 729 u32 rx_copybreak; /* copy packets smaller than this */ 730 u16 default_num_queues; /* default num queues to set up */ 731 732 u16 num_xdp_queues; 733 struct gve_queue_config tx_cfg; 734 struct gve_queue_config rx_cfg; 735 struct gve_qpl_config qpl_cfg; /* map used QPL ids */ 736 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ 737 738 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ 739 __be32 __iomem *db_bar2; /* "array" of doorbells */ 740 u32 msg_enable; /* level for netif* netdev print macros */ 741 struct pci_dev *pdev; 742 743 /* metrics */ 744 u32 tx_timeo_cnt; 745 746 /* Admin queue - see gve_adminq.h*/ 747 union gve_adminq_command *adminq; 748 dma_addr_t adminq_bus_addr; 749 struct dma_pool *adminq_pool; 750 u32 adminq_mask; /* masks prod_cnt to adminq size */ 751 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ 752 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ 753 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */ 754 /* free-running count of per AQ cmd executed */ 755 u32 adminq_describe_device_cnt; 756 u32 adminq_cfg_device_resources_cnt; 757 u32 adminq_register_page_list_cnt; 758 u32 adminq_unregister_page_list_cnt; 759 u32 adminq_create_tx_queue_cnt; 760 u32 adminq_create_rx_queue_cnt; 761 u32 adminq_destroy_tx_queue_cnt; 762 u32 adminq_destroy_rx_queue_cnt; 763 u32 adminq_dcfg_device_resources_cnt; 764 u32 adminq_set_driver_parameter_cnt; 765 u32 adminq_report_stats_cnt; 766 u32 adminq_report_link_speed_cnt; 767 u32 adminq_get_ptype_map_cnt; 768 u32 adminq_verify_driver_compatibility_cnt; 769 770 /* Global stats */ 771 u32 interface_up_cnt; /* count of times interface turned up since last reset */ 772 u32 interface_down_cnt; /* count of times interface turned down since last reset */ 773 u32 reset_cnt; /* count of reset */ 774 u32 page_alloc_fail; /* count of page alloc fails */ 775 u32 dma_mapping_error; /* count of dma mapping errors */ 776 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */ 777 u32 suspend_cnt; /* count of times suspended */ 778 u32 resume_cnt; /* count of times resumed */ 779 struct workqueue_struct *gve_wq; 780 struct work_struct service_task; 781 struct work_struct stats_report_task; 782 unsigned long service_task_flags; 783 unsigned long state_flags; 784 785 struct gve_stats_report *stats_report; 786 u64 stats_report_len; 787 dma_addr_t stats_report_bus; /* dma address for the stats report */ 788 unsigned long ethtool_flags; 789 790 unsigned long stats_report_timer_period; 791 struct timer_list stats_report_timer; 792 793 /* Gvnic device link speed from hypervisor. */ 794 u64 link_speed; 795 bool up_before_suspend; /* True if dev was up before suspend */ 796 797 struct gve_ptype_lut *ptype_lut_dqo; 798 799 /* Must be a power of two. */ 800 u16 data_buffer_size_dqo; 801 u16 max_rx_buffer_size; /* device limit */ 802 803 enum gve_queue_format queue_format; 804 805 /* Interrupt coalescing settings */ 806 u32 tx_coalesce_usecs; 807 u32 rx_coalesce_usecs; 808 809 u16 header_buf_size; /* device configured, header-split supported if non-zero */ 810 bool header_split_enabled; /* True if the header split is enabled by the user */ 811 }; 812 813 enum gve_service_task_flags_bit { 814 GVE_PRIV_FLAGS_DO_RESET = 1, 815 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2, 816 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3, 817 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4, 818 }; 819 820 enum gve_state_flags_bit { 821 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1, 822 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2, 823 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3, 824 GVE_PRIV_FLAGS_NAPI_ENABLED = 4, 825 }; 826 827 enum gve_ethtool_flags_bit { 828 GVE_PRIV_FLAGS_REPORT_STATS = 0, 829 }; 830 831 static inline bool gve_get_do_reset(struct gve_priv *priv) 832 { 833 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 834 } 835 836 static inline void gve_set_do_reset(struct gve_priv *priv) 837 { 838 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 839 } 840 841 static inline void gve_clear_do_reset(struct gve_priv *priv) 842 { 843 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 844 } 845 846 static inline bool gve_get_reset_in_progress(struct gve_priv *priv) 847 { 848 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, 849 &priv->service_task_flags); 850 } 851 852 static inline void gve_set_reset_in_progress(struct gve_priv *priv) 853 { 854 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); 855 } 856 857 static inline void gve_clear_reset_in_progress(struct gve_priv *priv) 858 { 859 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); 860 } 861 862 static inline bool gve_get_probe_in_progress(struct gve_priv *priv) 863 { 864 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, 865 &priv->service_task_flags); 866 } 867 868 static inline void gve_set_probe_in_progress(struct gve_priv *priv) 869 { 870 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); 871 } 872 873 static inline void gve_clear_probe_in_progress(struct gve_priv *priv) 874 { 875 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); 876 } 877 878 static inline bool gve_get_do_report_stats(struct gve_priv *priv) 879 { 880 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, 881 &priv->service_task_flags); 882 } 883 884 static inline void gve_set_do_report_stats(struct gve_priv *priv) 885 { 886 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); 887 } 888 889 static inline void gve_clear_do_report_stats(struct gve_priv *priv) 890 { 891 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); 892 } 893 894 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv) 895 { 896 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 897 } 898 899 static inline void gve_set_admin_queue_ok(struct gve_priv *priv) 900 { 901 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 902 } 903 904 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv) 905 { 906 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 907 } 908 909 static inline bool gve_get_device_resources_ok(struct gve_priv *priv) 910 { 911 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 912 } 913 914 static inline void gve_set_device_resources_ok(struct gve_priv *priv) 915 { 916 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 917 } 918 919 static inline void gve_clear_device_resources_ok(struct gve_priv *priv) 920 { 921 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 922 } 923 924 static inline bool gve_get_device_rings_ok(struct gve_priv *priv) 925 { 926 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 927 } 928 929 static inline void gve_set_device_rings_ok(struct gve_priv *priv) 930 { 931 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 932 } 933 934 static inline void gve_clear_device_rings_ok(struct gve_priv *priv) 935 { 936 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 937 } 938 939 static inline bool gve_get_napi_enabled(struct gve_priv *priv) 940 { 941 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 942 } 943 944 static inline void gve_set_napi_enabled(struct gve_priv *priv) 945 { 946 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 947 } 948 949 static inline void gve_clear_napi_enabled(struct gve_priv *priv) 950 { 951 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 952 } 953 954 static inline bool gve_get_report_stats(struct gve_priv *priv) 955 { 956 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); 957 } 958 959 static inline void gve_clear_report_stats(struct gve_priv *priv) 960 { 961 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); 962 } 963 964 /* Returns the address of the ntfy_blocks irq doorbell 965 */ 966 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv, 967 struct gve_notify_block *block) 968 { 969 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; 970 } 971 972 /* Returns the index into ntfy_blocks of the given tx ring's block 973 */ 974 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) 975 { 976 return queue_idx; 977 } 978 979 /* Returns the index into ntfy_blocks of the given rx ring's block 980 */ 981 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) 982 { 983 return (priv->num_ntfy_blks / 2) + queue_idx; 984 } 985 986 static inline bool gve_is_qpl(struct gve_priv *priv) 987 { 988 return priv->queue_format == GVE_GQI_QPL_FORMAT || 989 priv->queue_format == GVE_DQO_QPL_FORMAT; 990 } 991 992 /* Returns the number of tx queue page lists */ 993 static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg, 994 int num_xdp_queues, 995 bool is_qpl) 996 { 997 if (!is_qpl) 998 return 0; 999 return tx_cfg->num_queues + num_xdp_queues; 1000 } 1001 1002 /* Returns the number of XDP tx queue page lists 1003 */ 1004 static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) 1005 { 1006 if (priv->queue_format != GVE_GQI_QPL_FORMAT) 1007 return 0; 1008 1009 return priv->num_xdp_queues; 1010 } 1011 1012 /* Returns the number of rx queue page lists */ 1013 static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg, 1014 bool is_qpl) 1015 { 1016 if (!is_qpl) 1017 return 0; 1018 return rx_cfg->num_queues; 1019 } 1020 1021 static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid) 1022 { 1023 return tx_qid; 1024 } 1025 1026 static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) 1027 { 1028 return priv->tx_cfg.max_queues + rx_qid; 1029 } 1030 1031 /* Returns the index into priv->qpls where a certain rx queue's QPL resides */ 1032 static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid) 1033 { 1034 return tx_cfg->max_queues + rx_qid; 1035 } 1036 1037 static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) 1038 { 1039 return gve_tx_qpl_id(priv, 0); 1040 } 1041 1042 /* Returns the index into priv->qpls where the first rx queue's QPL resides */ 1043 static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg) 1044 { 1045 return gve_get_rx_qpl_id(tx_cfg, 0); 1046 } 1047 1048 static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt) 1049 { 1050 /* For DQO, page count should be more than ring size for 1051 * out-of-order completions. Set it to two times of ring size. 1052 */ 1053 return 2 * rx_desc_cnt; 1054 } 1055 1056 /* Returns a pointer to the next available tx qpl in the list of qpls */ 1057 static inline 1058 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg, 1059 int tx_qid) 1060 { 1061 /* QPL already in use */ 1062 if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map)) 1063 return NULL; 1064 set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map); 1065 return &cfg->qpls[tx_qid]; 1066 } 1067 1068 /* Returns a pointer to the next available rx qpl in the list of qpls */ 1069 static inline 1070 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg, 1071 int rx_qid) 1072 { 1073 int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid); 1074 /* QPL already in use */ 1075 if (test_bit(id, cfg->qpl_cfg->qpl_id_map)) 1076 return NULL; 1077 set_bit(id, cfg->qpl_cfg->qpl_id_map); 1078 return &cfg->qpls[id]; 1079 } 1080 1081 /* Unassigns the qpl with the given id */ 1082 static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id) 1083 { 1084 clear_bit(id, qpl_cfg->qpl_id_map); 1085 } 1086 1087 /* Returns the correct dma direction for tx and rx qpls */ 1088 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, 1089 int id) 1090 { 1091 if (id < gve_rx_start_qpl_id(&priv->tx_cfg)) 1092 return DMA_TO_DEVICE; 1093 else 1094 return DMA_FROM_DEVICE; 1095 } 1096 1097 static inline bool gve_is_gqi(struct gve_priv *priv) 1098 { 1099 return priv->queue_format == GVE_GQI_RDA_FORMAT || 1100 priv->queue_format == GVE_GQI_QPL_FORMAT; 1101 } 1102 1103 static inline u32 gve_num_tx_queues(struct gve_priv *priv) 1104 { 1105 return priv->tx_cfg.num_queues + priv->num_xdp_queues; 1106 } 1107 1108 static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id) 1109 { 1110 return priv->tx_cfg.num_queues + queue_id; 1111 } 1112 1113 static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv) 1114 { 1115 return gve_xdp_tx_queue_id(priv, 0); 1116 } 1117 1118 /* gqi napi handler defined in gve_main.c */ 1119 int gve_napi_poll(struct napi_struct *napi, int budget); 1120 1121 /* buffers */ 1122 int gve_alloc_page(struct gve_priv *priv, struct device *dev, 1123 struct page **page, dma_addr_t *dma, 1124 enum dma_data_direction, gfp_t gfp_flags); 1125 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, 1126 enum dma_data_direction); 1127 /* tx handling */ 1128 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); 1129 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 1130 u32 flags); 1131 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, 1132 void *data, int len, void *frame_p); 1133 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); 1134 bool gve_tx_poll(struct gve_notify_block *block, int budget); 1135 bool gve_xdp_poll(struct gve_notify_block *block, int budget); 1136 int gve_tx_alloc_rings_gqi(struct gve_priv *priv, 1137 struct gve_tx_alloc_rings_cfg *cfg); 1138 void gve_tx_free_rings_gqi(struct gve_priv *priv, 1139 struct gve_tx_alloc_rings_cfg *cfg); 1140 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx); 1141 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx); 1142 u32 gve_tx_load_event_counter(struct gve_priv *priv, 1143 struct gve_tx_ring *tx); 1144 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); 1145 /* rx handling */ 1146 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); 1147 int gve_rx_poll(struct gve_notify_block *block, int budget); 1148 bool gve_rx_work_pending(struct gve_rx_ring *rx); 1149 int gve_rx_alloc_rings(struct gve_priv *priv); 1150 int gve_rx_alloc_rings_gqi(struct gve_priv *priv, 1151 struct gve_rx_alloc_rings_cfg *cfg); 1152 void gve_rx_free_rings_gqi(struct gve_priv *priv, 1153 struct gve_rx_alloc_rings_cfg *cfg); 1154 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx); 1155 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx); 1156 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit); 1157 bool gve_header_split_supported(const struct gve_priv *priv); 1158 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split); 1159 /* Reset */ 1160 void gve_schedule_reset(struct gve_priv *priv); 1161 int gve_reset(struct gve_priv *priv, bool attempt_teardown); 1162 void gve_get_curr_alloc_cfgs(struct gve_priv *priv, 1163 struct gve_qpls_alloc_cfg *qpls_alloc_cfg, 1164 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, 1165 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); 1166 int gve_adjust_config(struct gve_priv *priv, 1167 struct gve_qpls_alloc_cfg *qpls_alloc_cfg, 1168 struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, 1169 struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); 1170 int gve_adjust_queues(struct gve_priv *priv, 1171 struct gve_queue_config new_rx_config, 1172 struct gve_queue_config new_tx_config); 1173 /* report stats handling */ 1174 void gve_handle_report_stats(struct gve_priv *priv); 1175 /* exported by ethtool.c */ 1176 extern const struct ethtool_ops gve_ethtool_ops; 1177 /* needed by ethtool */ 1178 extern char gve_driver_name[]; 1179 extern const char gve_version_str[]; 1180 #endif /* _GVE_H_ */ 1181