1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2023-2024 Google LLC
5 *
6 * Redistribution and use in source and binary forms, with or without modification,
7 * are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * 3. Neither the name of the copyright holder nor the names of its contributors
17 * may be used to endorse or promote products derived from this software without
18 * specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31 #ifndef _GVE_FBSD_H
32 #define _GVE_FBSD_H
33
34 #include "gve_desc.h"
35 #include "gve_plat.h"
36 #include "gve_register.h"
37
38 #ifndef PCI_VENDOR_ID_GOOGLE
39 #define PCI_VENDOR_ID_GOOGLE 0x1ae0
40 #endif
41
42 #define PCI_DEV_ID_GVNIC 0x0042
43 #define GVE_REGISTER_BAR 0
44 #define GVE_DOORBELL_BAR 2
45
46 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
47 #define GVE_TX_MAX_DESCS 4
48 #define GVE_TX_BUFRING_ENTRIES 4096
49
50 #define GVE_TX_TIMEOUT_PKT_SEC 5
51 #define GVE_TX_TIMEOUT_CHECK_CADENCE_SEC 5
52 /*
53 * If the driver finds timed out packets on a tx queue it first kicks it and
54 * records the time. If the driver again finds a timeout on the same queue
55 * before the end of the cooldown period, only then will it reset. Thus, for a
56 * reset to be able to occur at all, the cooldown must be at least as long
57 * as the tx timeout checking cadence multiplied by the number of queues.
58 */
59 #define GVE_TX_TIMEOUT_MAX_TX_QUEUES 16
60 #define GVE_TX_TIMEOUT_KICK_COOLDOWN_SEC \
61 (2 * GVE_TX_TIMEOUT_CHECK_CADENCE_SEC * GVE_TX_TIMEOUT_MAX_TX_QUEUES)
62
63 #define GVE_TIMESTAMP_INVALID -1
64
65 #define ADMINQ_SIZE PAGE_SIZE
66
67 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
68 #define GVE_4K_RX_BUFFER_SIZE_DQO 4096
69 /* Each RX bounce buffer page can fit two packet buffers. */
70 #define GVE_DEFAULT_RX_BUFFER_OFFSET (PAGE_SIZE / 2)
71
72 /* PTYPEs are always 10 bits. */
73 #define GVE_NUM_PTYPES 1024
74
75 /*
76 * Number of descriptors per queue page list.
77 * Page count AKA QPL size can be derived by dividing the number of elements in
78 * a page by the number of descriptors available.
79 */
80 #define GVE_QPL_DIVISOR 16
81
82 /* Ring Size Limits */
83 #define GVE_DEFAULT_MIN_RX_RING_SIZE 512
84 #define GVE_DEFAULT_MIN_TX_RING_SIZE 256
85
86 static MALLOC_DEFINE(M_GVE, "gve", "gve allocations");
87
88 _Static_assert(MCLBYTES >= GVE_DEFAULT_RX_BUFFER_SIZE,
89 "gve: bad MCLBYTES length");
90 _Static_assert(MJUMPAGESIZE >= GVE_4K_RX_BUFFER_SIZE_DQO,
91 "gve: bad MJUMPAGESIZE length");
92
93 struct gve_dma_handle {
94 bus_addr_t bus_addr;
95 void *cpu_addr;
96 bus_dma_tag_t tag;
97 bus_dmamap_t map;
98 };
99
100 union gve_tx_desc {
101 struct gve_tx_pkt_desc pkt; /* first desc for a packet */
102 struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
103 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
104 };
105
106 /* Tracks the memory in the fifo occupied by a segment of a packet */
107 struct gve_tx_iovec {
108 uint32_t iov_offset; /* offset into this segment */
109 uint32_t iov_len; /* length */
110 uint32_t iov_padding; /* padding associated with this segment */
111 };
112
113 /* Tracks allowed and current queue settings */
114 struct gve_queue_config {
115 uint16_t max_queues;
116 uint16_t num_queues; /* current */
117 };
118
119 struct gve_irq_db {
120 __be32 index;
121 } __aligned(CACHE_LINE_SIZE);
122
123 /*
124 * GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
125 * when the entire configure_device_resources command is zeroed out and the
126 * queue_format is not specified.
127 */
128 enum gve_queue_format {
129 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
130 GVE_GQI_RDA_FORMAT = 0x1,
131 GVE_GQI_QPL_FORMAT = 0x2,
132 GVE_DQO_RDA_FORMAT = 0x3,
133 GVE_DQO_QPL_FORMAT = 0x4,
134 };
135
136 enum gve_state_flags_bit {
137 GVE_STATE_FLAG_ADMINQ_OK,
138 GVE_STATE_FLAG_RESOURCES_OK,
139 GVE_STATE_FLAG_QPLREG_OK,
140 GVE_STATE_FLAG_RX_RINGS_OK,
141 GVE_STATE_FLAG_TX_RINGS_OK,
142 GVE_STATE_FLAG_QUEUES_UP,
143 GVE_STATE_FLAG_LINK_UP,
144 GVE_STATE_FLAG_DO_RESET,
145 GVE_STATE_FLAG_IN_RESET,
146 GVE_NUM_STATE_FLAGS /* Not part of the enum space */
147 };
148
149 BITSET_DEFINE(gve_state_flags, GVE_NUM_STATE_FLAGS);
150
151 #define GVE_DEVICE_STATUS_RESET (0x1 << 1)
152 #define GVE_DEVICE_STATUS_LINK_STATUS (0x1 << 2)
153
154 #define GVE_RING_LOCK(ring) mtx_lock(&(ring)->ring_mtx)
155 #define GVE_RING_TRYLOCK(ring) mtx_trylock(&(ring)->ring_mtx)
156 #define GVE_RING_UNLOCK(ring) mtx_unlock(&(ring)->ring_mtx)
157 #define GVE_RING_ASSERT(ring) mtx_assert(&(ring)->ring_mtx, MA_OWNED)
158
159 #define GVE_IFACE_LOCK_INIT(lock) sx_init(&lock, "gve interface lock")
160 #define GVE_IFACE_LOCK_DESTROY(lock) sx_destroy(&lock)
161 #define GVE_IFACE_LOCK_LOCK(lock) sx_xlock(&lock)
162 #define GVE_IFACE_LOCK_UNLOCK(lock) sx_unlock(&lock)
163 #define GVE_IFACE_LOCK_ASSERT(lock) sx_assert(&lock, SA_XLOCKED)
164
165 struct gve_queue_page_list {
166 uint32_t id;
167 uint32_t num_dmas;
168 uint32_t num_pages;
169 vm_offset_t kva;
170 vm_page_t *pages;
171 struct gve_dma_handle *dmas;
172 };
173
174 struct gve_irq {
175 struct resource *res;
176 void *cookie;
177 };
178
179 struct gve_rx_slot_page_info {
180 void *page_address;
181 vm_page_t page;
182 uint32_t page_offset;
183 uint16_t pad;
184 };
185
186 /*
187 * A single received packet split across multiple buffers may be
188 * reconstructed using the information in this structure.
189 */
190 struct gve_rx_ctx {
191 /* head and tail of mbuf chain for the current packet */
192 struct mbuf *mbuf_head;
193 struct mbuf *mbuf_tail;
194 uint32_t total_size;
195 uint8_t frag_cnt;
196 bool is_tcp;
197 bool drop_pkt;
198 };
199
200 struct gve_ring_com {
201 struct gve_priv *priv;
202 uint32_t id;
203
204 /*
205 * BAR2 offset for this ring's doorbell and the
206 * counter-array offset for this ring's counter.
207 * Acquired from the device individually for each
208 * queue in the queue_create adminq command.
209 */
210 struct gve_queue_resources *q_resources;
211 struct gve_dma_handle q_resources_mem;
212
213 /* Byte offset into BAR2 where this ring's 4-byte irq doorbell lies. */
214 uint32_t irq_db_offset;
215 /* Byte offset into BAR2 where this ring's 4-byte doorbell lies. */
216 uint32_t db_offset;
217 /*
218 * Index, not byte-offset, into the counter array where this ring's
219 * 4-byte counter lies.
220 */
221 uint32_t counter_idx;
222
223 /*
224 * The index of the MSIX vector that was assigned to
225 * this ring in `gve_alloc_irqs`.
226 *
227 * It is passed to the device in the queue_create adminq
228 * command.
229 *
230 * Additionally, this also serves as the index into
231 * `priv->irq_db_indices` where this ring's irq doorbell's
232 * BAR2 offset, `irq_db_idx`, can be found.
233 */
234 int ntfy_id;
235
236 /*
237 * The fixed bounce buffer for this ring.
238 * Once allocated, has to be offered to the device
239 * over the register-page-list adminq command.
240 */
241 struct gve_queue_page_list *qpl;
242
243 struct task cleanup_task;
244 struct taskqueue *cleanup_tq;
245 } __aligned(CACHE_LINE_SIZE);
246
247 struct gve_rxq_stats {
248 counter_u64_t rbytes;
249 counter_u64_t rpackets;
250 counter_u64_t rx_dropped_pkt;
251 counter_u64_t rx_copybreak_cnt;
252 counter_u64_t rx_frag_flip_cnt;
253 counter_u64_t rx_frag_copy_cnt;
254 counter_u64_t rx_dropped_pkt_desc_err;
255 counter_u64_t rx_dropped_pkt_buf_post_fail;
256 counter_u64_t rx_dropped_pkt_mbuf_alloc_fail;
257 counter_u64_t rx_mbuf_dmamap_err;
258 counter_u64_t rx_mbuf_mclget_null;
259 };
260
261 #define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t))
262
263 union gve_rx_qpl_buf_id_dqo {
264 struct {
265 uint16_t buf_id:11; /* Index into rx->dqo.bufs */
266 uint8_t frag_num:5; /* Which frag in the QPL page */
267 };
268 uint16_t all;
269 } __packed;
270 _Static_assert(sizeof(union gve_rx_qpl_buf_id_dqo) == 2,
271 "gve: bad dqo qpl rx buf id length");
272
273 struct gve_rx_buf_dqo {
274 union {
275 /* RDA */
276 struct {
277 struct mbuf *mbuf;
278 bus_dmamap_t dmamap;
279 uint64_t addr;
280 bool mapped;
281 };
282 /* QPL */
283 struct {
284 uint8_t num_nic_frags; /* number of pending completions */
285 uint8_t next_idx; /* index of the next frag to post */
286 /* for chaining rx->dqo.used_bufs */
287 STAILQ_ENTRY(gve_rx_buf_dqo) stailq_entry;
288 };
289 };
290 /* for chaining rx->dqo.free_bufs */
291 SLIST_ENTRY(gve_rx_buf_dqo) slist_entry;
292 };
293
294 /* power-of-2 sized receive ring */
295 struct gve_rx_ring {
296 struct gve_ring_com com;
297 struct gve_dma_handle desc_ring_mem;
298 uint32_t cnt; /* free-running total number of completed packets */
299 uint32_t fill_cnt; /* free-running total number of descs and buffs posted */
300
301 union {
302 /* GQI-only fields */
303 struct {
304 struct gve_dma_handle data_ring_mem;
305
306 /* accessed in the GQ receive hot path */
307 struct gve_rx_desc *desc_ring;
308 union gve_rx_data_slot *data_ring;
309 struct gve_rx_slot_page_info *page_info;
310 uint32_t mask; /* masks the cnt and fill_cnt to the size of the ring */
311 uint8_t seq_no; /* helps traverse the descriptor ring */
312 };
313
314 /* DQO-only fields */
315 struct {
316 struct gve_dma_handle compl_ring_mem;
317
318 struct gve_rx_compl_desc_dqo *compl_ring;
319 struct gve_rx_desc_dqo *desc_ring;
320 struct gve_rx_buf_dqo *bufs; /* Parking place for posted buffers */
321 bus_dma_tag_t buf_dmatag; /* To dmamap posted mbufs with */
322
323 uint32_t buf_cnt; /* Size of the bufs array */
324 uint32_t mask; /* One less than the sizes of the desc and compl rings */
325 uint32_t head; /* The index at which to post the next buffer at */
326 uint32_t tail; /* The index at which to receive the next compl at */
327 uint8_t cur_gen_bit; /* Gets flipped on every cycle of the compl ring */
328 SLIST_HEAD(, gve_rx_buf_dqo) free_bufs;
329
330 /*
331 * Only used in QPL mode. Pages referred to by if_input-ed mbufs
332 * stay parked here till their wire count comes back to 1.
333 * Pages are moved here after there aren't any pending completions.
334 */
335 STAILQ_HEAD(, gve_rx_buf_dqo) used_bufs;
336 } dqo;
337 };
338
339 struct lro_ctrl lro;
340 struct gve_rx_ctx ctx;
341 struct gve_rxq_stats stats;
342
343 } __aligned(CACHE_LINE_SIZE);
344
345 /*
346 * A contiguous representation of the pages composing the Tx bounce buffer.
347 * The xmit taskqueue and the completion taskqueue both simultaneously use it.
348 * Both operate on `available`: the xmit tq lowers it and the completion tq
349 * raises it. `head` is the last location written at and so only the xmit tq
350 * uses it.
351 */
352 struct gve_tx_fifo {
353 vm_offset_t base; /* address of base of FIFO */
354 uint32_t size; /* total size */
355 volatile int available; /* how much space is still available */
356 uint32_t head; /* offset to write at */
357 };
358
359 struct gve_tx_buffer_state {
360 struct mbuf *mbuf;
361
362 /*
363 * Time at which the xmit tq places descriptors for mbuf's payload on a
364 * tx queue. This timestamp is invalidated when the mbuf is freed and
365 * must be checked for validity when read.
366 */
367 int64_t enqueue_time_sec;
368
369 struct gve_tx_iovec iov[GVE_TX_MAX_DESCS];
370 };
371
372 struct gve_txq_stats {
373 counter_u64_t tbytes;
374 counter_u64_t tpackets;
375 counter_u64_t tso_packet_cnt;
376 counter_u64_t tx_dropped_pkt;
377 counter_u64_t tx_delayed_pkt_nospace_device;
378 counter_u64_t tx_dropped_pkt_nospace_bufring;
379 counter_u64_t tx_delayed_pkt_nospace_descring;
380 counter_u64_t tx_delayed_pkt_nospace_compring;
381 counter_u64_t tx_delayed_pkt_nospace_qpl_bufs;
382 counter_u64_t tx_delayed_pkt_tsoerr;
383 counter_u64_t tx_dropped_pkt_vlan;
384 counter_u64_t tx_mbuf_collapse;
385 counter_u64_t tx_mbuf_defrag;
386 counter_u64_t tx_mbuf_defrag_err;
387 counter_u64_t tx_mbuf_dmamap_enomem_err;
388 counter_u64_t tx_mbuf_dmamap_err;
389 counter_u64_t tx_timeout;
390 };
391
392 #define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t))
393
394 struct gve_tx_pending_pkt_dqo {
395 struct mbuf *mbuf;
396
397 /*
398 * Time at which the xmit tq places descriptors for mbuf's payload on a
399 * tx queue. This timestamp is invalidated when the mbuf is freed and
400 * must be checked for validity when read.
401 */
402 int64_t enqueue_time_sec;
403
404 union {
405 /* RDA */
406 bus_dmamap_t dmamap;
407 /* QPL */
408 struct {
409 /*
410 * A linked list of entries from qpl_bufs that served
411 * as the bounce buffer for this packet.
412 */
413 int32_t qpl_buf_head;
414 uint32_t num_qpl_bufs;
415 };
416 };
417 uint8_t state; /* the gve_packet_state enum */
418 int next; /* To chain the free_pending_pkts lists */
419 };
420
421 /* power-of-2 sized transmit ring */
422 struct gve_tx_ring {
423 struct gve_ring_com com;
424 struct gve_dma_handle desc_ring_mem;
425
426 struct task xmit_task;
427 struct taskqueue *xmit_tq;
428 bool stopped;
429
430 /* Accessed when writing descriptors */
431 struct buf_ring *br;
432 struct mtx ring_mtx;
433
434 uint32_t req; /* free-running total number of packets written to the nic */
435 uint32_t done; /* free-running total number of completed packets */
436
437 int64_t last_kicked; /* always-valid timestamp in seconds for the last queue kick */
438
439 union {
440 /* GQI specific stuff */
441 struct {
442 union gve_tx_desc *desc_ring;
443 struct gve_tx_buffer_state *info;
444
445 struct gve_tx_fifo fifo;
446
447 uint32_t mask; /* masks the req and done to the size of the ring */
448 };
449
450 /* DQO specific stuff */
451 struct {
452 struct gve_dma_handle compl_ring_mem;
453
454 /* Accessed when writing descriptors */
455 struct {
456 union gve_tx_desc_dqo *desc_ring;
457 uint32_t desc_mask; /* masks head and tail to the size of desc_ring */
458 uint32_t desc_head; /* last desc read by NIC, cached value of hw_tx_head */
459 uint32_t desc_tail; /* last desc written by driver */
460 uint32_t last_re_idx; /* desc which last had "report event" set */
461
462 /*
463 * The head index of a singly linked list containing pending packet objects
464 * to park mbufs till the NIC sends completions. Once this list is depleted,
465 * the "_prd" suffixed producer list, grown by the completion taskqueue,
466 * is stolen.
467 */
468 int32_t free_pending_pkts_csm;
469
470 /*
471 * The head index of a singly linked list representing QPL page fragments
472 * to copy mbuf payload into for the NIC to see. Once this list is depleted,
473 * the "_prd" suffixed producer list, grown by the completion taskqueue,
474 * is stolen.
475 *
476 * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
477 */
478 int32_t free_qpl_bufs_csm;
479 uint32_t qpl_bufs_consumed; /* Allows quickly checking for buf availability */
480 uint32_t qpl_bufs_produced_cached; /* Cached value of qpl_bufs_produced */
481
482 /* DMA params for mapping Tx mbufs. Only used in RDA mode. */
483 bus_dma_tag_t buf_dmatag;
484 } __aligned(CACHE_LINE_SIZE);
485
486 /* Accessed when processing completions */
487 struct {
488 struct gve_tx_compl_desc_dqo *compl_ring;
489 uint32_t compl_mask; /* masks head to the size of compl_ring */
490 uint32_t compl_head; /* last completion read by driver */
491 uint8_t cur_gen_bit; /* NIC flips a bit on every pass */
492 uint32_t hw_tx_head; /* last desc read by NIC */
493
494 /*
495 * The completion taskqueue moves pending-packet objects to this
496 * list after freeing the mbuf. The "_prd" denotes that this is
497 * a producer list. The transmit taskqueue steals this list once
498 * its consumer list, with the "_csm" suffix, is depleted.
499 */
500 int32_t free_pending_pkts_prd;
501
502 /*
503 * The completion taskqueue moves the QPL pages corresponding to a
504 * completed packet into this list. It is only used in QPL mode.
505 * The "_prd" denotes that this is a producer list. The transmit
506 * taskqueue steals this list once its consumer list, with the "_csm"
507 * suffix, is depleted.
508 *
509 * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
510 */
511 int32_t free_qpl_bufs_prd;
512 uint32_t qpl_bufs_produced;
513 } __aligned(CACHE_LINE_SIZE);
514
515 /* Accessed by both the completion and xmit loops */
516 struct {
517 /* completion tags index into this array */
518 struct gve_tx_pending_pkt_dqo *pending_pkts;
519 uint16_t num_pending_pkts;
520
521 /*
522 * Represents QPL page fragments. An index into this array
523 * always represents the same QPL page fragment. The value
524 * is also an index into this array and servers as a means
525 * to chain buffers into linked lists whose heads are
526 * either free_qpl_bufs_prd or free_qpl_bufs_csm or
527 * qpl_bufs_head.
528 */
529 int32_t *qpl_bufs;
530 } __aligned(CACHE_LINE_SIZE);
531 } dqo;
532 };
533 struct gve_txq_stats stats;
534 } __aligned(CACHE_LINE_SIZE);
535
536 enum gve_packet_state {
537 /*
538 * Packet does not yet have a dmamap created.
539 * This should always be zero since state is not explicitly initialized.
540 */
541 GVE_PACKET_STATE_UNALLOCATED,
542 /* Packet has a dmamap and is in free list, available to be allocated. */
543 GVE_PACKET_STATE_FREE,
544 /* Packet is expecting a regular data completion */
545 GVE_PACKET_STATE_PENDING_DATA_COMPL,
546 };
547
548 struct gve_ptype {
549 uint8_t l3_type; /* `gve_l3_type` in gve_adminq.h */
550 uint8_t l4_type; /* `gve_l4_type` in gve_adminq.h */
551 };
552
553 struct gve_ptype_lut {
554 struct gve_ptype ptypes[GVE_NUM_PTYPES];
555 };
556
557 struct gve_priv {
558 if_t ifp;
559 device_t dev;
560 struct ifmedia media;
561
562 uint8_t mac[ETHER_ADDR_LEN];
563
564 struct gve_dma_handle aq_mem;
565
566 struct resource *reg_bar; /* BAR0 */
567 struct resource *db_bar; /* BAR2 */
568 struct resource *msix_table;
569
570 uint32_t mgmt_msix_idx;
571 uint32_t rx_copybreak;
572
573 uint16_t num_event_counters;
574 uint16_t default_num_queues;
575 uint16_t tx_desc_cnt;
576 uint16_t max_tx_desc_cnt;
577 uint16_t min_tx_desc_cnt;
578 uint16_t rx_desc_cnt;
579 uint16_t max_rx_desc_cnt;
580 uint16_t min_rx_desc_cnt;
581 uint16_t rx_pages_per_qpl;
582 uint64_t max_registered_pages;
583 uint64_t num_registered_pages;
584 uint32_t supported_features;
585 uint16_t max_mtu;
586 bool modify_ringsize_enabled;
587
588 struct gve_dma_handle counter_array_mem;
589 __be32 *counters;
590 struct gve_dma_handle irqs_db_mem;
591 struct gve_irq_db *irq_db_indices;
592
593 enum gve_queue_format queue_format;
594 struct gve_queue_config tx_cfg;
595 struct gve_queue_config rx_cfg;
596 uint32_t num_queues;
597
598 struct gve_irq *irq_tbl;
599 struct gve_tx_ring *tx;
600 struct gve_rx_ring *rx;
601
602 struct gve_ptype_lut *ptype_lut_dqo;
603
604 /*
605 * Admin queue - see gve_adminq.h
606 * Since AQ cmds do not run in steady state, 32 bit counters suffice
607 */
608 struct gve_adminq_command *adminq;
609 vm_paddr_t adminq_bus_addr;
610 uint32_t adminq_mask; /* masks prod_cnt to adminq size */
611 uint32_t adminq_prod_cnt; /* free-running count of AQ cmds executed */
612 uint32_t adminq_cmd_fail; /* free-running count of AQ cmds failed */
613 uint32_t adminq_timeouts; /* free-running count of AQ cmds timeouts */
614 /* free-running count of each distinct AQ cmd executed */
615 uint32_t adminq_describe_device_cnt;
616 uint32_t adminq_cfg_device_resources_cnt;
617 uint32_t adminq_register_page_list_cnt;
618 uint32_t adminq_unregister_page_list_cnt;
619 uint32_t adminq_create_tx_queue_cnt;
620 uint32_t adminq_create_rx_queue_cnt;
621 uint32_t adminq_destroy_tx_queue_cnt;
622 uint32_t adminq_destroy_rx_queue_cnt;
623 uint32_t adminq_dcfg_device_resources_cnt;
624 uint32_t adminq_set_driver_parameter_cnt;
625 uint32_t adminq_verify_driver_compatibility_cnt;
626 uint32_t adminq_get_ptype_map_cnt;
627
628 uint32_t interface_up_cnt;
629 uint32_t interface_down_cnt;
630 uint32_t reset_cnt;
631
632 struct task service_task;
633 struct taskqueue *service_tq;
634
635 struct gve_state_flags state_flags;
636 struct sx gve_iface_lock;
637
638 struct callout tx_timeout_service;
639 /* The index of tx queue that the timer service will check on its next invocation */
640 uint16_t check_tx_queue_idx;
641
642 uint16_t rx_buf_size_dqo;
643 };
644
645 static inline bool
gve_get_state_flag(struct gve_priv * priv,int pos)646 gve_get_state_flag(struct gve_priv *priv, int pos)
647 {
648 return (BIT_ISSET(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags));
649 }
650
651 static inline void
gve_set_state_flag(struct gve_priv * priv,int pos)652 gve_set_state_flag(struct gve_priv *priv, int pos)
653 {
654 BIT_SET_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
655 }
656
657 static inline void
gve_clear_state_flag(struct gve_priv * priv,int pos)658 gve_clear_state_flag(struct gve_priv *priv, int pos)
659 {
660 BIT_CLR_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
661 }
662
663 static inline bool
gve_is_gqi(struct gve_priv * priv)664 gve_is_gqi(struct gve_priv *priv)
665 {
666 return (priv->queue_format == GVE_GQI_QPL_FORMAT);
667 }
668
669 static inline bool
gve_is_qpl(struct gve_priv * priv)670 gve_is_qpl(struct gve_priv *priv)
671 {
672 return (priv->queue_format == GVE_GQI_QPL_FORMAT ||
673 priv->queue_format == GVE_DQO_QPL_FORMAT);
674 }
675
676 static inline bool
gve_is_4k_rx_buf(struct gve_priv * priv)677 gve_is_4k_rx_buf(struct gve_priv *priv)
678 {
679 return (priv->rx_buf_size_dqo == GVE_4K_RX_BUFFER_SIZE_DQO);
680 }
681
682 static inline bus_size_t
gve_rx_dqo_mbuf_segment_size(struct gve_priv * priv)683 gve_rx_dqo_mbuf_segment_size(struct gve_priv *priv)
684 {
685 return (gve_is_4k_rx_buf(priv) ? MJUMPAGESIZE : MCLBYTES);
686 }
687
688 /* Defined in gve_main.c */
689 void gve_schedule_reset(struct gve_priv *priv);
690 int gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
691 int gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
692 int gve_adjust_ring_sizes(struct gve_priv *priv, uint16_t new_desc_cnt, bool is_rx);
693
694 /* Register access functions defined in gve_utils.c */
695 uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset);
696 void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
697 void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
698 void gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
699
700 /* QPL (Queue Page List) functions defined in gve_qpl.c */
701 struct gve_queue_page_list *gve_alloc_qpl(struct gve_priv *priv, uint32_t id,
702 int npages, bool single_kva);
703 void gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl);
704 int gve_register_qpls(struct gve_priv *priv);
705 int gve_unregister_qpls(struct gve_priv *priv);
706 void gve_mextadd_free(struct mbuf *mbuf);
707
708 /* TX functions defined in gve_tx.c */
709 int gve_alloc_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
710 void gve_free_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
711 int gve_create_tx_rings(struct gve_priv *priv);
712 int gve_destroy_tx_rings(struct gve_priv *priv);
713 int gve_check_tx_timeout_gqi(struct gve_priv *priv, struct gve_tx_ring *tx);
714 int gve_tx_intr(void *arg);
715 int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf);
716 void gve_qflush(if_t ifp);
717 void gve_xmit_tq(void *arg, int pending);
718 void gve_tx_cleanup_tq(void *arg, int pending);
719
720 /* TX functions defined in gve_tx_dqo.c */
721 int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i);
722 void gve_tx_free_ring_dqo(struct gve_priv *priv, int i);
723 void gve_clear_tx_ring_dqo(struct gve_priv *priv, int i);
724 int gve_check_tx_timeout_dqo(struct gve_priv *priv, struct gve_tx_ring *tx);
725 int gve_tx_intr_dqo(void *arg);
726 int gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr);
727 int gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf);
728 void gve_tx_cleanup_tq_dqo(void *arg, int pending);
729
730 /* RX functions defined in gve_rx.c */
731 int gve_alloc_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
732 void gve_free_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
733 int gve_create_rx_rings(struct gve_priv *priv);
734 int gve_destroy_rx_rings(struct gve_priv *priv);
735 int gve_rx_intr(void *arg);
736 void gve_rx_cleanup_tq(void *arg, int pending);
737
738 /* RX functions defined in gve_rx_dqo.c */
739 int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int i);
740 void gve_rx_free_ring_dqo(struct gve_priv *priv, int i);
741 void gve_rx_prefill_buffers_dqo(struct gve_rx_ring *rx);
742 void gve_clear_rx_ring_dqo(struct gve_priv *priv, int i);
743 int gve_rx_intr_dqo(void *arg);
744 void gve_rx_cleanup_tq_dqo(void *arg, int pending);
745
746 /* DMA functions defined in gve_utils.c */
747 int gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
748 struct gve_dma_handle *dma);
749 void gve_dma_free_coherent(struct gve_dma_handle *dma);
750 int gve_dmamap_create(struct gve_priv *priv, int size, int align,
751 struct gve_dma_handle *dma);
752 void gve_dmamap_destroy(struct gve_dma_handle *dma);
753
754 /* IRQ functions defined in gve_utils.c */
755 void gve_free_irqs(struct gve_priv *priv);
756 int gve_alloc_irqs(struct gve_priv *priv);
757 void gve_unmask_all_queue_irqs(struct gve_priv *priv);
758 void gve_mask_all_queue_irqs(struct gve_priv *priv);
759
760 /* Miscellaneous functions defined in gve_utils.c */
761 void gve_invalidate_timestamp(int64_t *timestamp_sec);
762 int64_t gve_seconds_since(int64_t *timestamp_sec);
763 void gve_set_timestamp(int64_t *timestamp_sec);
764 bool gve_timestamp_valid(int64_t *timestamp_sec);
765
766 /* Systcl functions defined in gve_sysctl.c */
767 extern bool gve_disable_hw_lro;
768 extern bool gve_allow_4k_rx_buffers;
769 extern char gve_queue_format[8];
770 extern char gve_version[8];
771 void gve_setup_sysctl(struct gve_priv *priv);
772 void gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets,
773 uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets,
774 uint64_t *tbytes, uint64_t *tx_dropped_pkt);
775
776 /* Stats functions defined in gve_utils.c */
777 void gve_alloc_counters(counter_u64_t *stat, int num_stats);
778 void gve_free_counters(counter_u64_t *stat, int num_stats);
779
780 #endif /* _GVE_FBSD_H_ */
781