1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 * Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #ifndef _GVE_H_ 8 #define _GVE_H_ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/u64_stats_sync.h> 14 15 #include "gve_desc.h" 16 #include "gve_desc_dqo.h" 17 18 #ifndef PCI_VENDOR_ID_GOOGLE 19 #define PCI_VENDOR_ID_GOOGLE 0x1ae0 20 #endif 21 22 #define PCI_DEV_ID_GVNIC 0x0042 23 24 #define GVE_REGISTER_BAR 0 25 #define GVE_DOORBELL_BAR 2 26 27 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */ 28 #define GVE_TX_MAX_IOVEC 4 29 /* 1 for management, 1 for rx, 1 for tx */ 30 #define GVE_MIN_MSIX 3 31 32 /* Numbers of gve tx/rx stats in stats report. */ 33 #define GVE_TX_STATS_REPORT_NUM 6 34 #define GVE_RX_STATS_REPORT_NUM 2 35 36 /* Interval to schedule a stats report update, 20000ms. */ 37 #define GVE_STATS_REPORT_TIMER_PERIOD 20000 38 39 /* Numbers of NIC tx/rx stats in stats report. */ 40 #define NIC_TX_STATS_REPORT_NUM 0 41 #define NIC_RX_STATS_REPORT_NUM 4 42 43 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) 44 45 /* PTYPEs are always 10 bits. */ 46 #define GVE_NUM_PTYPES 1024 47 48 #define GVE_RX_BUFFER_SIZE_DQO 2048 49 50 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ 51 struct gve_rx_desc_queue { 52 struct gve_rx_desc *desc_ring; /* the descriptor ring */ 53 dma_addr_t bus; /* the bus for the desc_ring */ 54 u8 seqno; /* the next expected seqno for this desc*/ 55 }; 56 57 /* The page info for a single slot in the RX data queue */ 58 struct gve_rx_slot_page_info { 59 struct page *page; 60 void *page_address; 61 u32 page_offset; /* offset to write to in page */ 62 int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ 63 u8 can_flip; 64 }; 65 66 /* A list of pages registered with the device during setup and used by a queue 67 * as buffers 68 */ 69 struct gve_queue_page_list { 70 u32 id; /* unique id */ 71 u32 num_entries; 72 struct page **pages; /* list of num_entries pages */ 73 dma_addr_t *page_buses; /* the dma addrs of the pages */ 74 }; 75 76 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */ 77 struct gve_rx_data_queue { 78 union gve_rx_data_slot *data_ring; /* read by NIC */ 79 dma_addr_t data_bus; /* dma mapping of the slots */ 80 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ 81 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ 82 u8 raw_addressing; /* use raw_addressing? */ 83 }; 84 85 struct gve_priv; 86 87 /* RX buffer queue for posting buffers to HW. 88 * Each RX (completion) queue has a corresponding buffer queue. 89 */ 90 struct gve_rx_buf_queue_dqo { 91 struct gve_rx_desc_dqo *desc_ring; 92 dma_addr_t bus; 93 u32 head; /* Pointer to start cleaning buffers at. */ 94 u32 tail; /* Last posted buffer index + 1 */ 95 u32 mask; /* Mask for indices to the size of the ring */ 96 }; 97 98 /* RX completion queue to receive packets from HW. */ 99 struct gve_rx_compl_queue_dqo { 100 struct gve_rx_compl_desc_dqo *desc_ring; 101 dma_addr_t bus; 102 103 /* Number of slots which did not have a buffer posted yet. We should not 104 * post more buffers than the queue size to avoid HW overrunning the 105 * queue. 106 */ 107 int num_free_slots; 108 109 /* HW uses a "generation bit" to notify SW of new descriptors. When a 110 * descriptor's generation bit is different from the current generation, 111 * that descriptor is ready to be consumed by SW. 112 */ 113 u8 cur_gen_bit; 114 115 /* Pointer into desc_ring where the next completion descriptor will be 116 * received. 117 */ 118 u32 head; 119 u32 mask; /* Mask for indices to the size of the ring */ 120 }; 121 122 /* Stores state for tracking buffers posted to HW */ 123 struct gve_rx_buf_state_dqo { 124 /* The page posted to HW. */ 125 struct gve_rx_slot_page_info page_info; 126 127 /* The DMA address corresponding to `page_info`. */ 128 dma_addr_t addr; 129 130 /* Last offset into the page when it only had a single reference, at 131 * which point every other offset is free to be reused. 132 */ 133 u32 last_single_ref_offset; 134 135 /* Linked list index to next element in the list, or -1 if none */ 136 s16 next; 137 }; 138 139 /* `head` and `tail` are indices into an array, or -1 if empty. */ 140 struct gve_index_list { 141 s16 head; 142 s16 tail; 143 }; 144 145 /* Contains datapath state used to represent an RX queue. */ 146 struct gve_rx_ring { 147 struct gve_priv *gve; 148 union { 149 /* GQI fields */ 150 struct { 151 struct gve_rx_desc_queue desc; 152 struct gve_rx_data_queue data; 153 154 /* threshold for posting new buffs and descs */ 155 u32 db_threshold; 156 }; 157 158 /* DQO fields. */ 159 struct { 160 struct gve_rx_buf_queue_dqo bufq; 161 struct gve_rx_compl_queue_dqo complq; 162 163 struct gve_rx_buf_state_dqo *buf_states; 164 u16 num_buf_states; 165 166 /* Linked list of gve_rx_buf_state_dqo. Index into 167 * buf_states, or -1 if empty. 168 */ 169 s16 free_buf_states; 170 171 /* Linked list of gve_rx_buf_state_dqo. Indexes into 172 * buf_states, or -1 if empty. 173 * 174 * This list contains buf_states which are pointing to 175 * valid buffers. 176 * 177 * We use a FIFO here in order to increase the 178 * probability that buffers can be reused by increasing 179 * the time between usages. 180 */ 181 struct gve_index_list recycled_buf_states; 182 183 /* Linked list of gve_rx_buf_state_dqo. Indexes into 184 * buf_states, or -1 if empty. 185 * 186 * This list contains buf_states which have buffers 187 * which cannot be reused yet. 188 */ 189 struct gve_index_list used_buf_states; 190 } dqo; 191 }; 192 193 u64 rbytes; /* free-running bytes received */ 194 u64 rpackets; /* free-running packets received */ 195 u32 cnt; /* free-running total number of completed packets */ 196 u32 fill_cnt; /* free-running total number of descs and buffs posted */ 197 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ 198 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ 199 u64 rx_copied_pkt; /* free-running total number of copied packets */ 200 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ 201 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ 202 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ 203 u32 q_num; /* queue index */ 204 u32 ntfy_id; /* notification block index */ 205 struct gve_queue_resources *q_resources; /* head and tail pointer idx */ 206 dma_addr_t q_resources_bus; /* dma address for the queue resources */ 207 struct u64_stats_sync statss; /* sync stats for 32bit archs */ 208 209 /* head and tail of skb chain for the current packet or NULL if none */ 210 struct sk_buff *skb_head; 211 struct sk_buff *skb_tail; 212 }; 213 214 /* A TX desc ring entry */ 215 union gve_tx_desc { 216 struct gve_tx_pkt_desc pkt; /* first desc for a packet */ 217 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ 218 }; 219 220 /* Tracks the memory in the fifo occupied by a segment of a packet */ 221 struct gve_tx_iovec { 222 u32 iov_offset; /* offset into this segment */ 223 u32 iov_len; /* length */ 224 u32 iov_padding; /* padding associated with this segment */ 225 }; 226 227 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc 228 * ring entry but only used for a pkt_desc not a seg_desc 229 */ 230 struct gve_tx_buffer_state { 231 struct sk_buff *skb; /* skb for this pkt */ 232 union { 233 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ 234 struct { 235 DEFINE_DMA_UNMAP_ADDR(dma); 236 DEFINE_DMA_UNMAP_LEN(len); 237 }; 238 }; 239 }; 240 241 /* A TX buffer - each queue has one */ 242 struct gve_tx_fifo { 243 void *base; /* address of base of FIFO */ 244 u32 size; /* total size */ 245 atomic_t available; /* how much space is still available */ 246 u32 head; /* offset to write at */ 247 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */ 248 }; 249 250 /* TX descriptor for DQO format */ 251 union gve_tx_desc_dqo { 252 struct gve_tx_pkt_desc_dqo pkt; 253 struct gve_tx_tso_context_desc_dqo tso_ctx; 254 struct gve_tx_general_context_desc_dqo general_ctx; 255 }; 256 257 enum gve_packet_state { 258 /* Packet is in free list, available to be allocated. 259 * This should always be zero since state is not explicitly initialized. 260 */ 261 GVE_PACKET_STATE_UNALLOCATED, 262 /* Packet is expecting a regular data completion or miss completion */ 263 GVE_PACKET_STATE_PENDING_DATA_COMPL, 264 /* Packet has received a miss completion and is expecting a 265 * re-injection completion. 266 */ 267 GVE_PACKET_STATE_PENDING_REINJECT_COMPL, 268 /* No valid completion received within the specified timeout. */ 269 GVE_PACKET_STATE_TIMED_OUT_COMPL, 270 }; 271 272 struct gve_tx_pending_packet_dqo { 273 struct sk_buff *skb; /* skb for this packet */ 274 275 /* 0th element corresponds to the linear portion of `skb`, should be 276 * unmapped with `dma_unmap_single`. 277 * 278 * All others correspond to `skb`'s frags and should be unmapped with 279 * `dma_unmap_page`. 280 */ 281 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); 282 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); 283 u16 num_bufs; 284 285 /* Linked list index to next element in the list, or -1 if none */ 286 s16 next; 287 288 /* Linked list index to prev element in the list, or -1 if none. 289 * Used for tracking either outstanding miss completions or prematurely 290 * freed packets. 291 */ 292 s16 prev; 293 294 /* Identifies the current state of the packet as defined in 295 * `enum gve_packet_state`. 296 */ 297 u8 state; 298 299 /* If packet is an outstanding miss completion, then the packet is 300 * freed if the corresponding re-injection completion is not received 301 * before kernel jiffies exceeds timeout_jiffies. 302 */ 303 unsigned long timeout_jiffies; 304 }; 305 306 /* Contains datapath state used to represent a TX queue. */ 307 struct gve_tx_ring { 308 /* Cacheline 0 -- Accessed & dirtied during transmit */ 309 union { 310 /* GQI fields */ 311 struct { 312 struct gve_tx_fifo tx_fifo; 313 u32 req; /* driver tracked head pointer */ 314 u32 done; /* driver tracked tail pointer */ 315 }; 316 317 /* DQO fields. */ 318 struct { 319 /* Linked list of gve_tx_pending_packet_dqo. Index into 320 * pending_packets, or -1 if empty. 321 * 322 * This is a consumer list owned by the TX path. When it 323 * runs out, the producer list is stolen from the 324 * completion handling path 325 * (dqo_compl.free_pending_packets). 326 */ 327 s16 free_pending_packets; 328 329 /* Cached value of `dqo_compl.hw_tx_head` */ 330 u32 head; 331 u32 tail; /* Last posted buffer index + 1 */ 332 333 /* Index of the last descriptor with "report event" bit 334 * set. 335 */ 336 u32 last_re_idx; 337 } dqo_tx; 338 }; 339 340 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */ 341 union { 342 /* GQI fields */ 343 struct { 344 /* Spinlock for when cleanup in progress */ 345 spinlock_t clean_lock; 346 }; 347 348 /* DQO fields. */ 349 struct { 350 u32 head; /* Last read on compl_desc */ 351 352 /* Tracks the current gen bit of compl_q */ 353 u8 cur_gen_bit; 354 355 /* Linked list of gve_tx_pending_packet_dqo. Index into 356 * pending_packets, or -1 if empty. 357 * 358 * This is the producer list, owned by the completion 359 * handling path. When the consumer list 360 * (dqo_tx.free_pending_packets) is runs out, this list 361 * will be stolen. 362 */ 363 atomic_t free_pending_packets; 364 365 /* Last TX ring index fetched by HW */ 366 atomic_t hw_tx_head; 367 368 /* List to track pending packets which received a miss 369 * completion but not a corresponding reinjection. 370 */ 371 struct gve_index_list miss_completions; 372 373 /* List to track pending packets that were completed 374 * before receiving a valid completion because they 375 * reached a specified timeout. 376 */ 377 struct gve_index_list timed_out_completions; 378 } dqo_compl; 379 } ____cacheline_aligned; 380 u64 pkt_done; /* free-running - total packets completed */ 381 u64 bytes_done; /* free-running - total bytes completed */ 382 u64 dropped_pkt; /* free-running - total packets dropped */ 383 u64 dma_mapping_error; /* count of dma mapping errors */ 384 385 /* Cacheline 2 -- Read-mostly fields */ 386 union { 387 /* GQI fields */ 388 struct { 389 union gve_tx_desc *desc; 390 391 /* Maps 1:1 to a desc */ 392 struct gve_tx_buffer_state *info; 393 }; 394 395 /* DQO fields. */ 396 struct { 397 union gve_tx_desc_dqo *tx_ring; 398 struct gve_tx_compl_desc *compl_ring; 399 400 struct gve_tx_pending_packet_dqo *pending_packets; 401 s16 num_pending_packets; 402 403 u32 complq_mask; /* complq size is complq_mask + 1 */ 404 } dqo; 405 } ____cacheline_aligned; 406 struct netdev_queue *netdev_txq; 407 struct gve_queue_resources *q_resources; /* head and tail pointer idx */ 408 struct device *dev; 409 u32 mask; /* masks req and done down to queue size */ 410 u8 raw_addressing; /* use raw_addressing? */ 411 412 /* Slow-path fields */ 413 u32 q_num ____cacheline_aligned; /* queue idx */ 414 u32 stop_queue; /* count of queue stops */ 415 u32 wake_queue; /* count of queue wakes */ 416 u32 queue_timeout; /* count of queue timeouts */ 417 u32 ntfy_id; /* notification block index */ 418 u32 last_kick_msec; /* Last time the queue was kicked */ 419 dma_addr_t bus; /* dma address of the descr ring */ 420 dma_addr_t q_resources_bus; /* dma address of the queue resources */ 421 dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ 422 struct u64_stats_sync statss; /* sync stats for 32bit archs */ 423 } ____cacheline_aligned; 424 425 /* Wraps the info for one irq including the napi struct and the queues 426 * associated with that irq. 427 */ 428 struct gve_notify_block { 429 __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */ 430 char name[IFNAMSIZ + 16]; /* name registered with the kernel */ 431 struct napi_struct napi; /* kernel napi struct for this block */ 432 struct gve_priv *priv; 433 struct gve_tx_ring *tx; /* tx rings on this block */ 434 struct gve_rx_ring *rx; /* rx rings on this block */ 435 } ____cacheline_aligned; 436 437 /* Tracks allowed and current queue settings */ 438 struct gve_queue_config { 439 u16 max_queues; 440 u16 num_queues; /* current */ 441 }; 442 443 /* Tracks the available and used qpl IDs */ 444 struct gve_qpl_config { 445 u32 qpl_map_size; /* map memory size */ 446 unsigned long *qpl_id_map; /* bitmap of used qpl ids */ 447 }; 448 449 struct gve_options_dqo_rda { 450 u16 tx_comp_ring_entries; /* number of tx_comp descriptors */ 451 u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ 452 }; 453 454 struct gve_ptype { 455 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ 456 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ 457 }; 458 459 struct gve_ptype_lut { 460 struct gve_ptype ptypes[GVE_NUM_PTYPES]; 461 }; 462 463 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value 464 * when the entire configure_device_resources command is zeroed out and the 465 * queue_format is not specified. 466 */ 467 enum gve_queue_format { 468 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, 469 GVE_GQI_RDA_FORMAT = 0x1, 470 GVE_GQI_QPL_FORMAT = 0x2, 471 GVE_DQO_RDA_FORMAT = 0x3, 472 }; 473 474 struct gve_priv { 475 struct net_device *dev; 476 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ 477 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ 478 struct gve_queue_page_list *qpls; /* array of num qpls */ 479 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ 480 dma_addr_t ntfy_block_bus; 481 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ 482 char mgmt_msix_name[IFNAMSIZ + 16]; 483 u32 mgmt_msix_idx; 484 __be32 *counter_array; /* array of num_event_counters */ 485 dma_addr_t counter_array_bus; 486 487 u16 num_event_counters; 488 u16 tx_desc_cnt; /* num desc per ring */ 489 u16 rx_desc_cnt; /* num desc per ring */ 490 u16 tx_pages_per_qpl; /* tx buffer length */ 491 u16 rx_data_slot_cnt; /* rx buffer length */ 492 u64 max_registered_pages; 493 u64 num_registered_pages; /* num pages registered with NIC */ 494 u32 rx_copybreak; /* copy packets smaller than this */ 495 u16 default_num_queues; /* default num queues to set up */ 496 497 struct gve_queue_config tx_cfg; 498 struct gve_queue_config rx_cfg; 499 struct gve_qpl_config qpl_cfg; /* map used QPL ids */ 500 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ 501 502 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ 503 __be32 __iomem *db_bar2; /* "array" of doorbells */ 504 u32 msg_enable; /* level for netif* netdev print macros */ 505 struct pci_dev *pdev; 506 507 /* metrics */ 508 u32 tx_timeo_cnt; 509 510 /* Admin queue - see gve_adminq.h*/ 511 union gve_adminq_command *adminq; 512 dma_addr_t adminq_bus_addr; 513 u32 adminq_mask; /* masks prod_cnt to adminq size */ 514 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ 515 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ 516 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */ 517 /* free-running count of per AQ cmd executed */ 518 u32 adminq_describe_device_cnt; 519 u32 adminq_cfg_device_resources_cnt; 520 u32 adminq_register_page_list_cnt; 521 u32 adminq_unregister_page_list_cnt; 522 u32 adminq_create_tx_queue_cnt; 523 u32 adminq_create_rx_queue_cnt; 524 u32 adminq_destroy_tx_queue_cnt; 525 u32 adminq_destroy_rx_queue_cnt; 526 u32 adminq_dcfg_device_resources_cnt; 527 u32 adminq_set_driver_parameter_cnt; 528 u32 adminq_report_stats_cnt; 529 u32 adminq_report_link_speed_cnt; 530 u32 adminq_get_ptype_map_cnt; 531 532 /* Global stats */ 533 u32 interface_up_cnt; /* count of times interface turned up since last reset */ 534 u32 interface_down_cnt; /* count of times interface turned down since last reset */ 535 u32 reset_cnt; /* count of reset */ 536 u32 page_alloc_fail; /* count of page alloc fails */ 537 u32 dma_mapping_error; /* count of dma mapping errors */ 538 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */ 539 struct workqueue_struct *gve_wq; 540 struct work_struct service_task; 541 struct work_struct stats_report_task; 542 unsigned long service_task_flags; 543 unsigned long state_flags; 544 545 struct gve_stats_report *stats_report; 546 u64 stats_report_len; 547 dma_addr_t stats_report_bus; /* dma address for the stats report */ 548 unsigned long ethtool_flags; 549 550 unsigned long stats_report_timer_period; 551 struct timer_list stats_report_timer; 552 553 /* Gvnic device link speed from hypervisor. */ 554 u64 link_speed; 555 556 struct gve_options_dqo_rda options_dqo_rda; 557 struct gve_ptype_lut *ptype_lut_dqo; 558 559 /* Must be a power of two. */ 560 int data_buffer_size_dqo; 561 562 enum gve_queue_format queue_format; 563 }; 564 565 enum gve_service_task_flags_bit { 566 GVE_PRIV_FLAGS_DO_RESET = 1, 567 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2, 568 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3, 569 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4, 570 }; 571 572 enum gve_state_flags_bit { 573 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1, 574 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2, 575 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3, 576 GVE_PRIV_FLAGS_NAPI_ENABLED = 4, 577 }; 578 579 enum gve_ethtool_flags_bit { 580 GVE_PRIV_FLAGS_REPORT_STATS = 0, 581 }; 582 583 static inline bool gve_get_do_reset(struct gve_priv *priv) 584 { 585 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 586 } 587 588 static inline void gve_set_do_reset(struct gve_priv *priv) 589 { 590 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 591 } 592 593 static inline void gve_clear_do_reset(struct gve_priv *priv) 594 { 595 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); 596 } 597 598 static inline bool gve_get_reset_in_progress(struct gve_priv *priv) 599 { 600 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, 601 &priv->service_task_flags); 602 } 603 604 static inline void gve_set_reset_in_progress(struct gve_priv *priv) 605 { 606 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); 607 } 608 609 static inline void gve_clear_reset_in_progress(struct gve_priv *priv) 610 { 611 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); 612 } 613 614 static inline bool gve_get_probe_in_progress(struct gve_priv *priv) 615 { 616 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, 617 &priv->service_task_flags); 618 } 619 620 static inline void gve_set_probe_in_progress(struct gve_priv *priv) 621 { 622 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); 623 } 624 625 static inline void gve_clear_probe_in_progress(struct gve_priv *priv) 626 { 627 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); 628 } 629 630 static inline bool gve_get_do_report_stats(struct gve_priv *priv) 631 { 632 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, 633 &priv->service_task_flags); 634 } 635 636 static inline void gve_set_do_report_stats(struct gve_priv *priv) 637 { 638 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); 639 } 640 641 static inline void gve_clear_do_report_stats(struct gve_priv *priv) 642 { 643 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); 644 } 645 646 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv) 647 { 648 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 649 } 650 651 static inline void gve_set_admin_queue_ok(struct gve_priv *priv) 652 { 653 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 654 } 655 656 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv) 657 { 658 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); 659 } 660 661 static inline bool gve_get_device_resources_ok(struct gve_priv *priv) 662 { 663 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 664 } 665 666 static inline void gve_set_device_resources_ok(struct gve_priv *priv) 667 { 668 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 669 } 670 671 static inline void gve_clear_device_resources_ok(struct gve_priv *priv) 672 { 673 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); 674 } 675 676 static inline bool gve_get_device_rings_ok(struct gve_priv *priv) 677 { 678 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 679 } 680 681 static inline void gve_set_device_rings_ok(struct gve_priv *priv) 682 { 683 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 684 } 685 686 static inline void gve_clear_device_rings_ok(struct gve_priv *priv) 687 { 688 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); 689 } 690 691 static inline bool gve_get_napi_enabled(struct gve_priv *priv) 692 { 693 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 694 } 695 696 static inline void gve_set_napi_enabled(struct gve_priv *priv) 697 { 698 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 699 } 700 701 static inline void gve_clear_napi_enabled(struct gve_priv *priv) 702 { 703 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); 704 } 705 706 static inline bool gve_get_report_stats(struct gve_priv *priv) 707 { 708 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); 709 } 710 711 static inline void gve_clear_report_stats(struct gve_priv *priv) 712 { 713 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); 714 } 715 716 /* Returns the address of the ntfy_blocks irq doorbell 717 */ 718 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv, 719 struct gve_notify_block *block) 720 { 721 return &priv->db_bar2[be32_to_cpu(block->irq_db_index)]; 722 } 723 724 /* Returns the index into ntfy_blocks of the given tx ring's block 725 */ 726 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) 727 { 728 return queue_idx; 729 } 730 731 /* Returns the index into ntfy_blocks of the given rx ring's block 732 */ 733 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) 734 { 735 return (priv->num_ntfy_blks / 2) + queue_idx; 736 } 737 738 /* Returns the number of tx queue page lists 739 */ 740 static inline u32 gve_num_tx_qpls(struct gve_priv *priv) 741 { 742 if (priv->queue_format != GVE_GQI_QPL_FORMAT) 743 return 0; 744 745 return priv->tx_cfg.num_queues; 746 } 747 748 /* Returns the number of rx queue page lists 749 */ 750 static inline u32 gve_num_rx_qpls(struct gve_priv *priv) 751 { 752 if (priv->queue_format != GVE_GQI_QPL_FORMAT) 753 return 0; 754 755 return priv->rx_cfg.num_queues; 756 } 757 758 /* Returns a pointer to the next available tx qpl in the list of qpls 759 */ 760 static inline 761 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv) 762 { 763 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map, 764 priv->qpl_cfg.qpl_map_size); 765 766 /* we are out of tx qpls */ 767 if (id >= gve_num_tx_qpls(priv)) 768 return NULL; 769 770 set_bit(id, priv->qpl_cfg.qpl_id_map); 771 return &priv->qpls[id]; 772 } 773 774 /* Returns a pointer to the next available rx qpl in the list of qpls 775 */ 776 static inline 777 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv) 778 { 779 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map, 780 priv->qpl_cfg.qpl_map_size, 781 gve_num_tx_qpls(priv)); 782 783 /* we are out of rx qpls */ 784 if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv)) 785 return NULL; 786 787 set_bit(id, priv->qpl_cfg.qpl_id_map); 788 return &priv->qpls[id]; 789 } 790 791 /* Unassigns the qpl with the given id 792 */ 793 static inline void gve_unassign_qpl(struct gve_priv *priv, int id) 794 { 795 clear_bit(id, priv->qpl_cfg.qpl_id_map); 796 } 797 798 /* Returns the correct dma direction for tx and rx qpls 799 */ 800 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, 801 int id) 802 { 803 if (id < gve_num_tx_qpls(priv)) 804 return DMA_TO_DEVICE; 805 else 806 return DMA_FROM_DEVICE; 807 } 808 809 static inline bool gve_is_gqi(struct gve_priv *priv) 810 { 811 return priv->queue_format == GVE_GQI_RDA_FORMAT || 812 priv->queue_format == GVE_GQI_QPL_FORMAT; 813 } 814 815 /* buffers */ 816 int gve_alloc_page(struct gve_priv *priv, struct device *dev, 817 struct page **page, dma_addr_t *dma, 818 enum dma_data_direction); 819 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, 820 enum dma_data_direction); 821 /* tx handling */ 822 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); 823 bool gve_tx_poll(struct gve_notify_block *block, int budget); 824 int gve_tx_alloc_rings(struct gve_priv *priv); 825 void gve_tx_free_rings_gqi(struct gve_priv *priv); 826 u32 gve_tx_load_event_counter(struct gve_priv *priv, 827 struct gve_tx_ring *tx); 828 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); 829 /* rx handling */ 830 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); 831 int gve_rx_poll(struct gve_notify_block *block, int budget); 832 bool gve_rx_work_pending(struct gve_rx_ring *rx); 833 int gve_rx_alloc_rings(struct gve_priv *priv); 834 void gve_rx_free_rings_gqi(struct gve_priv *priv); 835 /* Reset */ 836 void gve_schedule_reset(struct gve_priv *priv); 837 int gve_reset(struct gve_priv *priv, bool attempt_teardown); 838 int gve_adjust_queues(struct gve_priv *priv, 839 struct gve_queue_config new_rx_config, 840 struct gve_queue_config new_tx_config); 841 /* report stats handling */ 842 void gve_handle_report_stats(struct gve_priv *priv); 843 /* exported by ethtool.c */ 844 extern const struct ethtool_ops gve_ethtool_ops; 845 /* needed by ethtool */ 846 extern const char gve_version_str[]; 847 #endif /* _GVE_H_ */ 848