1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2023-2024 Google LLC
5 *
6 * Redistribution and use in source and binary forms, with or without modification,
7 * are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * 3. Neither the name of the copyright holder nor the names of its contributors
17 * may be used to endorse or promote products derived from this software without
18 * specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31 #ifndef _GVE_FBSD_H
32 #define _GVE_FBSD_H
33
34 #include "gve_desc.h"
35 #include "gve_plat.h"
36 #include "gve_register.h"
37
38 #ifndef PCI_VENDOR_ID_GOOGLE
39 #define PCI_VENDOR_ID_GOOGLE 0x1ae0
40 #endif
41
42 #define PCI_DEV_ID_GVNIC 0x0042
43 #define GVE_REGISTER_BAR 0
44 #define GVE_DOORBELL_BAR 2
45
46 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
47 #define GVE_TX_MAX_DESCS 4
48 #define GVE_TX_BUFRING_ENTRIES 4096
49
50 #define ADMINQ_SIZE PAGE_SIZE
51
52 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
53 /* Each RX bounce buffer page can fit two packet buffers. */
54 #define GVE_DEFAULT_RX_BUFFER_OFFSET (PAGE_SIZE / 2)
55
56 /* PTYPEs are always 10 bits. */
57 #define GVE_NUM_PTYPES 1024
58
59 /*
60 * Number of descriptors per queue page list.
61 * Page count AKA QPL size can be derived by dividing the number of elements in
62 * a page by the number of descriptors available.
63 */
64 #define GVE_QPL_DIVISOR 16
65
66 /* Ring Size Limits */
67 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
68 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
69
70 static MALLOC_DEFINE(M_GVE, "gve", "gve allocations");
71
72 struct gve_dma_handle {
73 bus_addr_t bus_addr;
74 void *cpu_addr;
75 bus_dma_tag_t tag;
76 bus_dmamap_t map;
77 };
78
79 union gve_tx_desc {
80 struct gve_tx_pkt_desc pkt; /* first desc for a packet */
81 struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
82 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
83 };
84
85 /* Tracks the memory in the fifo occupied by a segment of a packet */
86 struct gve_tx_iovec {
87 uint32_t iov_offset; /* offset into this segment */
88 uint32_t iov_len; /* length */
89 uint32_t iov_padding; /* padding associated with this segment */
90 };
91
92 /* Tracks allowed and current queue settings */
93 struct gve_queue_config {
94 uint16_t max_queues;
95 uint16_t num_queues; /* current */
96 };
97
98 struct gve_irq_db {
99 __be32 index;
100 } __aligned(CACHE_LINE_SIZE);
101
102 /*
103 * GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
104 * when the entire configure_device_resources command is zeroed out and the
105 * queue_format is not specified.
106 */
107 enum gve_queue_format {
108 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
109 GVE_GQI_RDA_FORMAT = 0x1,
110 GVE_GQI_QPL_FORMAT = 0x2,
111 GVE_DQO_RDA_FORMAT = 0x3,
112 GVE_DQO_QPL_FORMAT = 0x4,
113 };
114
115 enum gve_state_flags_bit {
116 GVE_STATE_FLAG_ADMINQ_OK,
117 GVE_STATE_FLAG_RESOURCES_OK,
118 GVE_STATE_FLAG_QPLREG_OK,
119 GVE_STATE_FLAG_RX_RINGS_OK,
120 GVE_STATE_FLAG_TX_RINGS_OK,
121 GVE_STATE_FLAG_QUEUES_UP,
122 GVE_STATE_FLAG_LINK_UP,
123 GVE_STATE_FLAG_DO_RESET,
124 GVE_STATE_FLAG_IN_RESET,
125 GVE_NUM_STATE_FLAGS /* Not part of the enum space */
126 };
127
128 BITSET_DEFINE(gve_state_flags, GVE_NUM_STATE_FLAGS);
129
130 #define GVE_DEVICE_STATUS_RESET (0x1 << 1)
131 #define GVE_DEVICE_STATUS_LINK_STATUS (0x1 << 2)
132
133 #define GVE_RING_LOCK(ring) mtx_lock(&(ring)->ring_mtx)
134 #define GVE_RING_TRYLOCK(ring) mtx_trylock(&(ring)->ring_mtx)
135 #define GVE_RING_UNLOCK(ring) mtx_unlock(&(ring)->ring_mtx)
136 #define GVE_RING_ASSERT(ring) mtx_assert(&(ring)->ring_mtx, MA_OWNED)
137
138 #define GVE_IFACE_LOCK_INIT(lock) sx_init(&lock, "gve interface lock")
139 #define GVE_IFACE_LOCK_DESTROY(lock) sx_destroy(&lock)
140 #define GVE_IFACE_LOCK_LOCK(lock) sx_xlock(&lock)
141 #define GVE_IFACE_LOCK_UNLOCK(lock) sx_unlock(&lock)
142 #define GVE_IFACE_LOCK_ASSERT(lock) sx_assert(&lock, SA_XLOCKED)
143
144 struct gve_queue_page_list {
145 uint32_t id;
146 uint32_t num_dmas;
147 uint32_t num_pages;
148 vm_offset_t kva;
149 vm_page_t *pages;
150 struct gve_dma_handle *dmas;
151 };
152
153 struct gve_irq {
154 struct resource *res;
155 void *cookie;
156 };
157
158 struct gve_rx_slot_page_info {
159 void *page_address;
160 vm_page_t page;
161 uint32_t page_offset;
162 uint16_t pad;
163 };
164
165 /*
166 * A single received packet split across multiple buffers may be
167 * reconstructed using the information in this structure.
168 */
169 struct gve_rx_ctx {
170 /* head and tail of mbuf chain for the current packet */
171 struct mbuf *mbuf_head;
172 struct mbuf *mbuf_tail;
173 uint32_t total_size;
174 uint8_t frag_cnt;
175 bool is_tcp;
176 bool drop_pkt;
177 };
178
179 struct gve_ring_com {
180 struct gve_priv *priv;
181 uint32_t id;
182
183 /*
184 * BAR2 offset for this ring's doorbell and the
185 * counter-array offset for this ring's counter.
186 * Acquired from the device individually for each
187 * queue in the queue_create adminq command.
188 */
189 struct gve_queue_resources *q_resources;
190 struct gve_dma_handle q_resources_mem;
191
192 /* Byte offset into BAR2 where this ring's 4-byte irq doorbell lies. */
193 uint32_t irq_db_offset;
194 /* Byte offset into BAR2 where this ring's 4-byte doorbell lies. */
195 uint32_t db_offset;
196 /*
197 * Index, not byte-offset, into the counter array where this ring's
198 * 4-byte counter lies.
199 */
200 uint32_t counter_idx;
201
202 /*
203 * The index of the MSIX vector that was assigned to
204 * this ring in `gve_alloc_irqs`.
205 *
206 * It is passed to the device in the queue_create adminq
207 * command.
208 *
209 * Additionally, this also serves as the index into
210 * `priv->irq_db_indices` where this ring's irq doorbell's
211 * BAR2 offset, `irq_db_idx`, can be found.
212 */
213 int ntfy_id;
214
215 /*
216 * The fixed bounce buffer for this ring.
217 * Once allocated, has to be offered to the device
218 * over the register-page-list adminq command.
219 */
220 struct gve_queue_page_list *qpl;
221
222 struct task cleanup_task;
223 struct taskqueue *cleanup_tq;
224 } __aligned(CACHE_LINE_SIZE);
225
226 struct gve_rxq_stats {
227 counter_u64_t rbytes;
228 counter_u64_t rpackets;
229 counter_u64_t rx_dropped_pkt;
230 counter_u64_t rx_copybreak_cnt;
231 counter_u64_t rx_frag_flip_cnt;
232 counter_u64_t rx_frag_copy_cnt;
233 counter_u64_t rx_dropped_pkt_desc_err;
234 counter_u64_t rx_dropped_pkt_buf_post_fail;
235 counter_u64_t rx_dropped_pkt_mbuf_alloc_fail;
236 counter_u64_t rx_mbuf_dmamap_err;
237 counter_u64_t rx_mbuf_mclget_null;
238 };
239
240 #define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t))
241
242 union gve_rx_qpl_buf_id_dqo {
243 struct {
244 uint16_t buf_id:11; /* Index into rx->dqo.bufs */
245 uint8_t frag_num:5; /* Which frag in the QPL page */
246 };
247 uint16_t all;
248 } __packed;
249 _Static_assert(sizeof(union gve_rx_qpl_buf_id_dqo) == 2,
250 "gve: bad dqo qpl rx buf id length");
251
252 struct gve_rx_buf_dqo {
253 union {
254 /* RDA */
255 struct {
256 struct mbuf *mbuf;
257 bus_dmamap_t dmamap;
258 uint64_t addr;
259 bool mapped;
260 };
261 /* QPL */
262 struct {
263 uint8_t num_nic_frags; /* number of pending completions */
264 uint8_t next_idx; /* index of the next frag to post */
265 /* for chaining rx->dqo.used_bufs */
266 STAILQ_ENTRY(gve_rx_buf_dqo) stailq_entry;
267 };
268 };
269 /* for chaining rx->dqo.free_bufs */
270 SLIST_ENTRY(gve_rx_buf_dqo) slist_entry;
271 };
272
273 /* power-of-2 sized receive ring */
274 struct gve_rx_ring {
275 struct gve_ring_com com;
276 struct gve_dma_handle desc_ring_mem;
277 uint32_t cnt; /* free-running total number of completed packets */
278 uint32_t fill_cnt; /* free-running total number of descs and buffs posted */
279
280 union {
281 /* GQI-only fields */
282 struct {
283 struct gve_dma_handle data_ring_mem;
284
285 /* accessed in the GQ receive hot path */
286 struct gve_rx_desc *desc_ring;
287 union gve_rx_data_slot *data_ring;
288 struct gve_rx_slot_page_info *page_info;
289 uint32_t mask; /* masks the cnt and fill_cnt to the size of the ring */
290 uint8_t seq_no; /* helps traverse the descriptor ring */
291 };
292
293 /* DQO-only fields */
294 struct {
295 struct gve_dma_handle compl_ring_mem;
296
297 struct gve_rx_compl_desc_dqo *compl_ring;
298 struct gve_rx_desc_dqo *desc_ring;
299 struct gve_rx_buf_dqo *bufs; /* Parking place for posted buffers */
300 bus_dma_tag_t buf_dmatag; /* To dmamap posted mbufs with */
301
302 uint32_t buf_cnt; /* Size of the bufs array */
303 uint32_t mask; /* One less than the sizes of the desc and compl rings */
304 uint32_t head; /* The index at which to post the next buffer at */
305 uint32_t tail; /* The index at which to receive the next compl at */
306 uint8_t cur_gen_bit; /* Gets flipped on every cycle of the compl ring */
307 SLIST_HEAD(, gve_rx_buf_dqo) free_bufs;
308
309 /*
310 * Only used in QPL mode. Pages referred to by if_input-ed mbufs
311 * stay parked here till their wire count comes back to 1.
312 * Pages are moved here after there aren't any pending completions.
313 */
314 STAILQ_HEAD(, gve_rx_buf_dqo) used_bufs;
315 } dqo;
316 };
317
318 struct lro_ctrl lro;
319 struct gve_rx_ctx ctx;
320 struct gve_rxq_stats stats;
321
322 } __aligned(CACHE_LINE_SIZE);
323
324 /*
325 * A contiguous representation of the pages composing the Tx bounce buffer.
326 * The xmit taskqueue and the completion taskqueue both simultaneously use it.
327 * Both operate on `available`: the xmit tq lowers it and the completion tq
328 * raises it. `head` is the last location written at and so only the xmit tq
329 * uses it.
330 */
331 struct gve_tx_fifo {
332 vm_offset_t base; /* address of base of FIFO */
333 uint32_t size; /* total size */
334 volatile int available; /* how much space is still available */
335 uint32_t head; /* offset to write at */
336 };
337
338 struct gve_tx_buffer_state {
339 struct mbuf *mbuf;
340 struct gve_tx_iovec iov[GVE_TX_MAX_DESCS];
341 };
342
343 struct gve_txq_stats {
344 counter_u64_t tbytes;
345 counter_u64_t tpackets;
346 counter_u64_t tso_packet_cnt;
347 counter_u64_t tx_dropped_pkt;
348 counter_u64_t tx_delayed_pkt_nospace_device;
349 counter_u64_t tx_dropped_pkt_nospace_bufring;
350 counter_u64_t tx_delayed_pkt_nospace_descring;
351 counter_u64_t tx_delayed_pkt_nospace_compring;
352 counter_u64_t tx_delayed_pkt_nospace_qpl_bufs;
353 counter_u64_t tx_delayed_pkt_tsoerr;
354 counter_u64_t tx_dropped_pkt_vlan;
355 counter_u64_t tx_mbuf_collapse;
356 counter_u64_t tx_mbuf_defrag;
357 counter_u64_t tx_mbuf_defrag_err;
358 counter_u64_t tx_mbuf_dmamap_enomem_err;
359 counter_u64_t tx_mbuf_dmamap_err;
360 };
361
362 #define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t))
363
364 struct gve_tx_pending_pkt_dqo {
365 struct mbuf *mbuf;
366 union {
367 /* RDA */
368 bus_dmamap_t dmamap;
369 /* QPL */
370 struct {
371 /*
372 * A linked list of entries from qpl_bufs that served
373 * as the bounce buffer for this packet.
374 */
375 int32_t qpl_buf_head;
376 uint32_t num_qpl_bufs;
377 };
378 };
379 uint8_t state; /* the gve_packet_state enum */
380 int next; /* To chain the free_pending_pkts lists */
381 };
382
383 /* power-of-2 sized transmit ring */
384 struct gve_tx_ring {
385 struct gve_ring_com com;
386 struct gve_dma_handle desc_ring_mem;
387
388 struct task xmit_task;
389 struct taskqueue *xmit_tq;
390 bool stopped;
391
392 /* Accessed when writing descriptors */
393 struct buf_ring *br;
394 struct mtx ring_mtx;
395
396 uint32_t req; /* free-running total number of packets written to the nic */
397 uint32_t done; /* free-running total number of completed packets */
398
399 union {
400 /* GQI specific stuff */
401 struct {
402 union gve_tx_desc *desc_ring;
403 struct gve_tx_buffer_state *info;
404
405 struct gve_tx_fifo fifo;
406
407 uint32_t mask; /* masks the req and done to the size of the ring */
408 };
409
410 /* DQO specific stuff */
411 struct {
412 struct gve_dma_handle compl_ring_mem;
413
414 /* Accessed when writing descriptors */
415 struct {
416 union gve_tx_desc_dqo *desc_ring;
417 uint32_t desc_mask; /* masks head and tail to the size of desc_ring */
418 uint32_t desc_head; /* last desc read by NIC, cached value of hw_tx_head */
419 uint32_t desc_tail; /* last desc written by driver */
420 uint32_t last_re_idx; /* desc which last had "report event" set */
421
422 /*
423 * The head index of a singly linked list containing pending packet objects
424 * to park mbufs till the NIC sends completions. Once this list is depleted,
425 * the "_prd" suffixed producer list, grown by the completion taskqueue,
426 * is stolen.
427 */
428 int32_t free_pending_pkts_csm;
429
430 /*
431 * The head index of a singly linked list representing QPL page fragments
432 * to copy mbuf payload into for the NIC to see. Once this list is depleted,
433 * the "_prd" suffixed producer list, grown by the completion taskqueue,
434 * is stolen.
435 *
436 * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
437 */
438 int32_t free_qpl_bufs_csm;
439 uint32_t qpl_bufs_consumed; /* Allows quickly checking for buf availability */
440 uint32_t qpl_bufs_produced_cached; /* Cached value of qpl_bufs_produced */
441
442 /* DMA params for mapping Tx mbufs. Only used in RDA mode. */
443 bus_dma_tag_t buf_dmatag;
444 } __aligned(CACHE_LINE_SIZE);
445
446 /* Accessed when processing completions */
447 struct {
448 struct gve_tx_compl_desc_dqo *compl_ring;
449 uint32_t compl_mask; /* masks head to the size of compl_ring */
450 uint32_t compl_head; /* last completion read by driver */
451 uint8_t cur_gen_bit; /* NIC flips a bit on every pass */
452 uint32_t hw_tx_head; /* last desc read by NIC */
453
454 /*
455 * The completion taskqueue moves pending-packet objects to this
456 * list after freeing the mbuf. The "_prd" denotes that this is
457 * a producer list. The transmit taskqueue steals this list once
458 * its consumer list, with the "_csm" suffix, is depleted.
459 */
460 int32_t free_pending_pkts_prd;
461
462 /*
463 * The completion taskqueue moves the QPL pages corresponding to a
464 * completed packet into this list. It is only used in QPL mode.
465 * The "_prd" denotes that this is a producer list. The transmit
466 * taskqueue steals this list once its consumer list, with the "_csm"
467 * suffix, is depleted.
468 *
469 * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
470 */
471 int32_t free_qpl_bufs_prd;
472 uint32_t qpl_bufs_produced;
473 } __aligned(CACHE_LINE_SIZE);
474
475 /* Accessed by both the completion and xmit loops */
476 struct {
477 /* completion tags index into this array */
478 struct gve_tx_pending_pkt_dqo *pending_pkts;
479 uint16_t num_pending_pkts;
480
481 /*
482 * Represents QPL page fragments. An index into this array
483 * always represents the same QPL page fragment. The value
484 * is also an index into this array and servers as a means
485 * to chain buffers into linked lists whose heads are
486 * either free_qpl_bufs_prd or free_qpl_bufs_csm or
487 * qpl_bufs_head.
488 */
489 int32_t *qpl_bufs;
490 } __aligned(CACHE_LINE_SIZE);
491 } dqo;
492 };
493 struct gve_txq_stats stats;
494 } __aligned(CACHE_LINE_SIZE);
495
496 enum gve_packet_state {
497 /*
498 * Packet does not yet have a dmamap created.
499 * This should always be zero since state is not explicitly initialized.
500 */
501 GVE_PACKET_STATE_UNALLOCATED,
502 /* Packet has a dmamap and is in free list, available to be allocated. */
503 GVE_PACKET_STATE_FREE,
504 /* Packet is expecting a regular data completion */
505 GVE_PACKET_STATE_PENDING_DATA_COMPL,
506 };
507
508 struct gve_ptype {
509 uint8_t l3_type; /* `gve_l3_type` in gve_adminq.h */
510 uint8_t l4_type; /* `gve_l4_type` in gve_adminq.h */
511 };
512
513 struct gve_ptype_lut {
514 struct gve_ptype ptypes[GVE_NUM_PTYPES];
515 };
516
517 struct gve_priv {
518 if_t ifp;
519 device_t dev;
520 struct ifmedia media;
521
522 uint8_t mac[ETHER_ADDR_LEN];
523
524 struct gve_dma_handle aq_mem;
525
526 struct resource *reg_bar; /* BAR0 */
527 struct resource *db_bar; /* BAR2 */
528 struct resource *msix_table;
529
530 uint32_t mgmt_msix_idx;
531 uint32_t rx_copybreak;
532
533 uint16_t num_event_counters;
534 uint16_t default_num_queues;
535 uint16_t tx_desc_cnt;
536 uint16_t max_tx_desc_cnt;
537 uint16_t min_tx_desc_cnt;
538 uint16_t rx_desc_cnt;
539 uint16_t max_rx_desc_cnt;
540 uint16_t min_rx_desc_cnt;
541 uint16_t rx_pages_per_qpl;
542 uint64_t max_registered_pages;
543 uint64_t num_registered_pages;
544 uint32_t supported_features;
545 uint16_t max_mtu;
546 bool modify_ringsize_enabled;
547
548 struct gve_dma_handle counter_array_mem;
549 __be32 *counters;
550 struct gve_dma_handle irqs_db_mem;
551 struct gve_irq_db *irq_db_indices;
552
553 enum gve_queue_format queue_format;
554 struct gve_queue_config tx_cfg;
555 struct gve_queue_config rx_cfg;
556 uint32_t num_queues;
557
558 struct gve_irq *irq_tbl;
559 struct gve_tx_ring *tx;
560 struct gve_rx_ring *rx;
561
562 struct gve_ptype_lut *ptype_lut_dqo;
563
564 /*
565 * Admin queue - see gve_adminq.h
566 * Since AQ cmds do not run in steady state, 32 bit counters suffice
567 */
568 struct gve_adminq_command *adminq;
569 vm_paddr_t adminq_bus_addr;
570 uint32_t adminq_mask; /* masks prod_cnt to adminq size */
571 uint32_t adminq_prod_cnt; /* free-running count of AQ cmds executed */
572 uint32_t adminq_cmd_fail; /* free-running count of AQ cmds failed */
573 uint32_t adminq_timeouts; /* free-running count of AQ cmds timeouts */
574 /* free-running count of each distinct AQ cmd executed */
575 uint32_t adminq_describe_device_cnt;
576 uint32_t adminq_cfg_device_resources_cnt;
577 uint32_t adminq_register_page_list_cnt;
578 uint32_t adminq_unregister_page_list_cnt;
579 uint32_t adminq_create_tx_queue_cnt;
580 uint32_t adminq_create_rx_queue_cnt;
581 uint32_t adminq_destroy_tx_queue_cnt;
582 uint32_t adminq_destroy_rx_queue_cnt;
583 uint32_t adminq_dcfg_device_resources_cnt;
584 uint32_t adminq_set_driver_parameter_cnt;
585 uint32_t adminq_verify_driver_compatibility_cnt;
586 uint32_t adminq_get_ptype_map_cnt;
587
588 uint32_t interface_up_cnt;
589 uint32_t interface_down_cnt;
590 uint32_t reset_cnt;
591
592 struct task service_task;
593 struct taskqueue *service_tq;
594
595 struct gve_state_flags state_flags;
596 struct sx gve_iface_lock;
597 };
598
599 static inline bool
gve_get_state_flag(struct gve_priv * priv,int pos)600 gve_get_state_flag(struct gve_priv *priv, int pos)
601 {
602 return (BIT_ISSET(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags));
603 }
604
605 static inline void
gve_set_state_flag(struct gve_priv * priv,int pos)606 gve_set_state_flag(struct gve_priv *priv, int pos)
607 {
608 BIT_SET_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
609 }
610
611 static inline void
gve_clear_state_flag(struct gve_priv * priv,int pos)612 gve_clear_state_flag(struct gve_priv *priv, int pos)
613 {
614 BIT_CLR_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
615 }
616
617 static inline bool
gve_is_gqi(struct gve_priv * priv)618 gve_is_gqi(struct gve_priv *priv)
619 {
620 return (priv->queue_format == GVE_GQI_QPL_FORMAT);
621 }
622
623 static inline bool
gve_is_qpl(struct gve_priv * priv)624 gve_is_qpl(struct gve_priv *priv)
625 {
626 return (priv->queue_format == GVE_GQI_QPL_FORMAT ||
627 priv->queue_format == GVE_DQO_QPL_FORMAT);
628 }
629
630 /* Defined in gve_main.c */
631 void gve_schedule_reset(struct gve_priv *priv);
632 int gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
633 int gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
634 int gve_adjust_ring_sizes(struct gve_priv *priv, uint16_t new_desc_cnt, bool is_rx);
635
636 /* Register access functions defined in gve_utils.c */
637 uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset);
638 void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
639 void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
640 void gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
641
642 /* QPL (Queue Page List) functions defined in gve_qpl.c */
643 struct gve_queue_page_list *gve_alloc_qpl(struct gve_priv *priv, uint32_t id,
644 int npages, bool single_kva);
645 void gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl);
646 int gve_register_qpls(struct gve_priv *priv);
647 int gve_unregister_qpls(struct gve_priv *priv);
648 void gve_mextadd_free(struct mbuf *mbuf);
649
650 /* TX functions defined in gve_tx.c */
651 int gve_alloc_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
652 void gve_free_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
653 int gve_create_tx_rings(struct gve_priv *priv);
654 int gve_destroy_tx_rings(struct gve_priv *priv);
655 int gve_tx_intr(void *arg);
656 int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf);
657 void gve_qflush(if_t ifp);
658 void gve_xmit_tq(void *arg, int pending);
659 void gve_tx_cleanup_tq(void *arg, int pending);
660
661 /* TX functions defined in gve_tx_dqo.c */
662 int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i);
663 void gve_tx_free_ring_dqo(struct gve_priv *priv, int i);
664 void gve_clear_tx_ring_dqo(struct gve_priv *priv, int i);
665 int gve_tx_intr_dqo(void *arg);
666 int gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr);
667 int gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf);
668 void gve_tx_cleanup_tq_dqo(void *arg, int pending);
669
670 /* RX functions defined in gve_rx.c */
671 int gve_alloc_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
672 void gve_free_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
673 int gve_create_rx_rings(struct gve_priv *priv);
674 int gve_destroy_rx_rings(struct gve_priv *priv);
675 int gve_rx_intr(void *arg);
676 void gve_rx_cleanup_tq(void *arg, int pending);
677
678 /* RX functions defined in gve_rx_dqo.c */
679 int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int i);
680 void gve_rx_free_ring_dqo(struct gve_priv *priv, int i);
681 void gve_rx_prefill_buffers_dqo(struct gve_rx_ring *rx);
682 void gve_clear_rx_ring_dqo(struct gve_priv *priv, int i);
683 int gve_rx_intr_dqo(void *arg);
684 void gve_rx_cleanup_tq_dqo(void *arg, int pending);
685
686 /* DMA functions defined in gve_utils.c */
687 int gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
688 struct gve_dma_handle *dma);
689 void gve_dma_free_coherent(struct gve_dma_handle *dma);
690 int gve_dmamap_create(struct gve_priv *priv, int size, int align,
691 struct gve_dma_handle *dma);
692 void gve_dmamap_destroy(struct gve_dma_handle *dma);
693
694 /* IRQ functions defined in gve_utils.c */
695 void gve_free_irqs(struct gve_priv *priv);
696 int gve_alloc_irqs(struct gve_priv *priv);
697 void gve_unmask_all_queue_irqs(struct gve_priv *priv);
698 void gve_mask_all_queue_irqs(struct gve_priv *priv);
699
700 /* Systcl functions defined in gve_sysctl.c */
701 extern bool gve_disable_hw_lro;
702 extern char gve_queue_format[8];
703 extern char gve_version[8];
704 void gve_setup_sysctl(struct gve_priv *priv);
705 void gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets,
706 uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets,
707 uint64_t *tbytes, uint64_t *tx_dropped_pkt);
708
709 /* Stats functions defined in gve_utils.c */
710 void gve_alloc_counters(counter_u64_t *stat, int num_stats);
711 void gve_free_counters(counter_u64_t *stat, int num_stats);
712
713 #endif /* _GVE_FBSD_H_ */
714