Lines Matching +full:com +full:- +full:offset
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2023-2024 Google LLC
40 struct gve_queue_page_list *qpl = tx->com.qpl; in gve_tx_fifo_init()
41 struct gve_tx_fifo *fifo = &tx->fifo; in gve_tx_fifo_init()
43 fifo->size = qpl->num_pages * PAGE_SIZE; in gve_tx_fifo_init()
44 fifo->base = qpl->kva; in gve_tx_fifo_init()
45 atomic_store_int(&fifo->available, fifo->size); in gve_tx_fifo_init()
46 fifo->head = 0; in gve_tx_fifo_init()
54 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring_gqi()
56 if (tx->desc_ring != NULL) { in gve_tx_free_ring_gqi()
57 gve_dma_free_coherent(&tx->desc_ring_mem); in gve_tx_free_ring_gqi()
58 tx->desc_ring = NULL; in gve_tx_free_ring_gqi()
61 if (tx->info != NULL) { in gve_tx_free_ring_gqi()
62 free(tx->info, M_GVE); in gve_tx_free_ring_gqi()
63 tx->info = NULL; in gve_tx_free_ring_gqi()
70 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring()
71 struct gve_ring_com *com = &tx->com; in gve_tx_free_ring() local
74 gve_free_counters((counter_u64_t *)&tx->stats, NUM_TX_STATS); in gve_tx_free_ring()
76 if (mtx_initialized(&tx->ring_mtx)) in gve_tx_free_ring()
77 mtx_destroy(&tx->ring_mtx); in gve_tx_free_ring()
79 if (com->q_resources != NULL) { in gve_tx_free_ring()
80 gve_dma_free_coherent(&com->q_resources_mem); in gve_tx_free_ring()
81 com->q_resources = NULL; in gve_tx_free_ring()
84 if (tx->br != NULL) { in gve_tx_free_ring()
85 buf_ring_free(tx->br, M_DEVBUF); in gve_tx_free_ring()
86 tx->br = NULL; in gve_tx_free_ring()
98 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_alloc_ring_gqi()
99 struct gve_ring_com *com = &tx->com; in gve_tx_alloc_ring_gqi() local
103 sizeof(union gve_tx_desc) * priv->tx_desc_cnt, in gve_tx_alloc_ring_gqi()
104 CACHE_LINE_SIZE, &tx->desc_ring_mem); in gve_tx_alloc_ring_gqi()
106 device_printf(priv->dev, in gve_tx_alloc_ring_gqi()
110 tx->desc_ring = tx->desc_ring_mem.cpu_addr; in gve_tx_alloc_ring_gqi()
112 com->qpl = &priv->qpls[i]; in gve_tx_alloc_ring_gqi()
113 if (com->qpl == NULL) { in gve_tx_alloc_ring_gqi()
114 device_printf(priv->dev, "No QPL left for tx ring %d\n", i); in gve_tx_alloc_ring_gqi()
123 tx->info = malloc( in gve_tx_alloc_ring_gqi()
124 sizeof(struct gve_tx_buffer_state) * priv->tx_desc_cnt, in gve_tx_alloc_ring_gqi()
136 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_alloc_ring()
137 struct gve_ring_com *com = &tx->com; in gve_tx_alloc_ring() local
141 com->priv = priv; in gve_tx_alloc_ring()
142 com->id = i; in gve_tx_alloc_ring()
152 mtx_init(&tx->ring_mtx, mtx_name, NULL, MTX_DEF); in gve_tx_alloc_ring()
154 tx->br = buf_ring_alloc(GVE_TX_BUFRING_ENTRIES, M_DEVBUF, in gve_tx_alloc_ring()
155 M_WAITOK, &tx->ring_mtx); in gve_tx_alloc_ring()
157 gve_alloc_counters((counter_u64_t *)&tx->stats, NUM_TX_STATS); in gve_tx_alloc_ring()
160 PAGE_SIZE, &com->q_resources_mem); in gve_tx_alloc_ring()
162 device_printf(priv->dev, in gve_tx_alloc_ring()
166 com->q_resources = com->q_resources_mem.cpu_addr; in gve_tx_alloc_ring()
181 priv->tx = malloc(sizeof(struct gve_tx_ring) * priv->tx_cfg.num_queues, in gve_alloc_tx_rings()
184 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in gve_alloc_tx_rings()
194 while (i--) in gve_alloc_tx_rings()
196 free(priv->tx, M_GVE); in gve_alloc_tx_rings()
205 for (i = 0; i < priv->tx_cfg.num_queues; i++) in gve_free_tx_rings()
208 free(priv->tx, M_GVE); in gve_free_tx_rings()
214 struct gve_ring_com *com = &tx->com; in gve_tx_clear_desc_ring() local
217 for (i = 0; i < com->priv->tx_desc_cnt; i++) { in gve_tx_clear_desc_ring()
218 tx->desc_ring[i] = (union gve_tx_desc){}; in gve_tx_clear_desc_ring()
219 tx->info[i] = (struct gve_tx_buffer_state){}; in gve_tx_clear_desc_ring()
222 bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map, in gve_tx_clear_desc_ring()
229 struct gve_tx_ring *tx = &priv->tx[i]; in gve_clear_tx_ring()
230 struct gve_tx_fifo *fifo = &tx->fifo; in gve_clear_tx_ring()
232 tx->req = 0; in gve_clear_tx_ring()
233 tx->done = 0; in gve_clear_tx_ring()
234 tx->mask = priv->tx_desc_cnt - 1; in gve_clear_tx_ring()
236 atomic_store_int(&fifo->available, fifo->size); in gve_clear_tx_ring()
237 fifo->head = 0; in gve_clear_tx_ring()
245 struct gve_tx_ring *tx = &priv->tx[i]; in gve_start_tx_ring()
246 struct gve_ring_com *com = &tx->com; in gve_start_tx_ring() local
248 atomic_store_bool(&tx->stopped, false); in gve_start_tx_ring()
250 NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq, tx); in gve_start_tx_ring()
252 NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq_dqo, tx); in gve_start_tx_ring()
253 com->cleanup_tq = taskqueue_create_fast("gve tx", M_WAITOK, in gve_start_tx_ring()
254 taskqueue_thread_enqueue, &com->cleanup_tq); in gve_start_tx_ring()
255 taskqueue_start_threads(&com->cleanup_tq, 1, PI_NET, "%s txq %d", in gve_start_tx_ring()
256 device_get_nameunit(priv->dev), i); in gve_start_tx_ring()
258 TASK_INIT(&tx->xmit_task, 0, gve_xmit_tq, tx); in gve_start_tx_ring()
259 tx->xmit_tq = taskqueue_create_fast("gve tx xmit", in gve_start_tx_ring()
260 M_WAITOK, taskqueue_thread_enqueue, &tx->xmit_tq); in gve_start_tx_ring()
261 taskqueue_start_threads(&tx->xmit_tq, 1, PI_NET, "%s txq %d xmit", in gve_start_tx_ring()
262 device_get_nameunit(priv->dev), i); in gve_start_tx_ring()
268 struct gve_ring_com *com; in gve_create_tx_rings() local
276 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in gve_create_tx_rings()
283 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues); in gve_create_tx_rings()
287 bus_dmamap_sync(priv->irqs_db_mem.tag, priv->irqs_db_mem.map, in gve_create_tx_rings()
290 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in gve_create_tx_rings()
291 tx = &priv->tx[i]; in gve_create_tx_rings()
292 com = &tx->com; in gve_create_tx_rings()
294 com->irq_db_offset = 4 * be32toh(priv->irq_db_indices[com->ntfy_id].index); in gve_create_tx_rings()
296 bus_dmamap_sync(com->q_resources_mem.tag, com->q_resources_mem.map, in gve_create_tx_rings()
298 com->db_offset = 4 * be32toh(com->q_resources->db_index); in gve_create_tx_rings()
299 com->counter_idx = be32toh(com->q_resources->counter_index); in gve_create_tx_rings()
311 struct gve_tx_ring *tx = &priv->tx[i]; in gve_stop_tx_ring()
312 struct gve_ring_com *com = &tx->com; in gve_stop_tx_ring() local
314 if (com->cleanup_tq != NULL) { in gve_stop_tx_ring()
315 taskqueue_quiesce(com->cleanup_tq); in gve_stop_tx_ring()
316 taskqueue_free(com->cleanup_tq); in gve_stop_tx_ring()
317 com->cleanup_tq = NULL; in gve_stop_tx_ring()
320 if (tx->xmit_tq != NULL) { in gve_stop_tx_ring()
321 taskqueue_quiesce(tx->xmit_tq); in gve_stop_tx_ring()
322 taskqueue_free(tx->xmit_tq); in gve_stop_tx_ring()
323 tx->xmit_tq = NULL; in gve_stop_tx_ring()
333 for (i = 0; i < priv->tx_cfg.num_queues; i++) in gve_destroy_tx_rings()
337 err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues); in gve_destroy_tx_rings()
350 struct gve_priv *priv = tx->com.priv; in gve_tx_intr()
351 struct gve_ring_com *com = &tx->com; in gve_tx_intr() local
353 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0)) in gve_tx_intr()
356 gve_db_bar_write_4(priv, com->irq_db_offset, GVE_IRQ_MASK); in gve_tx_intr()
357 taskqueue_enqueue(com->cleanup_tq, &com->cleanup_task); in gve_tx_intr()
364 bus_dmamap_sync(priv->counter_array_mem.tag, priv->counter_array_mem.map, in gve_tx_load_event_counter()
366 uint32_t counter = priv->counters[tx->com.counter_idx]; in gve_tx_load_event_counter()
373 atomic_add_int(&fifo->available, bytes); in gve_tx_free_fifo()
380 struct gve_priv *priv = tx->com.priv; in gve_tx_cleanup_tq()
382 uint32_t todo = nic_done - tx->done; in gve_tx_cleanup_tq()
386 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0)) in gve_tx_cleanup_tq()
390 uint32_t idx = tx->done & tx->mask; in gve_tx_cleanup_tq()
391 struct gve_tx_buffer_state *info = &tx->info[idx]; in gve_tx_cleanup_tq()
392 struct mbuf *mbuf = info->mbuf; in gve_tx_cleanup_tq()
394 tx->done++; in gve_tx_cleanup_tq()
398 info->mbuf = NULL; in gve_tx_cleanup_tq()
400 counter_u64_add_protected(tx->stats.tbytes, mbuf->m_pkthdr.len); in gve_tx_cleanup_tq()
401 counter_u64_add_protected(tx->stats.tpackets, 1); in gve_tx_cleanup_tq()
406 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; in gve_tx_cleanup_tq()
407 info->iov[i].iov_len = 0; in gve_tx_cleanup_tq()
408 info->iov[i].iov_padding = 0; in gve_tx_cleanup_tq()
412 gve_tx_free_fifo(&tx->fifo, space_freed); in gve_tx_cleanup_tq()
414 gve_db_bar_write_4(priv, tx->com.irq_db_offset, in gve_tx_cleanup_tq()
425 todo = nic_done - tx->done; in gve_tx_cleanup_tq()
427 gve_db_bar_write_4(priv, tx->com.irq_db_offset, GVE_IRQ_MASK); in gve_tx_cleanup_tq()
428 taskqueue_enqueue(tx->com.cleanup_tq, &tx->com.cleanup_task); in gve_tx_cleanup_tq()
431 if (atomic_load_bool(&tx->stopped) && space_freed) { in gve_tx_cleanup_tq()
432 atomic_store_bool(&tx->stopped, false); in gve_tx_cleanup_tq()
433 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_tx_cleanup_tq()
441 uint64_t last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; in gve_dma_sync_for_device()
447 dma = &(qpl->dmas[page]); in gve_dma_sync_for_device()
448 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); in gve_dma_sync_for_device()
455 mtd_desc->type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH; in gve_tx_fill_mtd_desc()
456 mtd_desc->path_state = GVE_MTD_PATH_STATE_DEFAULT | GVE_MTD_PATH_HASH_L4; in gve_tx_fill_mtd_desc()
457 mtd_desc->path_hash = htobe32(mbuf->m_pkthdr.flowid); in gve_tx_fill_mtd_desc()
458 mtd_desc->reserved0 = 0; in gve_tx_fill_mtd_desc()
459 mtd_desc->reserved1 = 0; in gve_tx_fill_mtd_desc()
469 pkt_desc->type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM; in gve_tx_fill_pkt_desc()
470 pkt_desc->l4_csum_offset = csum_offset >> 1; in gve_tx_fill_pkt_desc()
471 pkt_desc->l4_hdr_offset = l4_hdr_offset >> 1; in gve_tx_fill_pkt_desc()
473 pkt_desc->type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM; in gve_tx_fill_pkt_desc()
474 pkt_desc->l4_csum_offset = csum_offset >> 1; in gve_tx_fill_pkt_desc()
475 pkt_desc->l4_hdr_offset = l4_hdr_offset >> 1; in gve_tx_fill_pkt_desc()
477 pkt_desc->type_flags = GVE_TXD_STD; in gve_tx_fill_pkt_desc()
478 pkt_desc->l4_csum_offset = 0; in gve_tx_fill_pkt_desc()
479 pkt_desc->l4_hdr_offset = 0; in gve_tx_fill_pkt_desc()
481 pkt_desc->desc_cnt = desc_cnt; in gve_tx_fill_pkt_desc()
482 pkt_desc->len = htobe16(pkt_len); in gve_tx_fill_pkt_desc()
483 pkt_desc->seg_len = htobe16(first_seg_len); in gve_tx_fill_pkt_desc()
484 pkt_desc->seg_addr = htobe64(addr); in gve_tx_fill_pkt_desc()
492 seg_desc->type_flags = GVE_TXD_SEG; in gve_tx_fill_seg_desc()
495 seg_desc->type_flags |= GVE_TXSF_IPV6; in gve_tx_fill_seg_desc()
496 seg_desc->l3_offset = l3_off >> 1; in gve_tx_fill_seg_desc()
497 seg_desc->mss = htobe16(tso_mss); in gve_tx_fill_seg_desc()
499 seg_desc->seg_len = htobe16(len); in gve_tx_fill_seg_desc()
500 seg_desc->seg_addr = htobe64(addr); in gve_tx_fill_seg_desc()
506 return (tx->mask + 1 - (tx->req - tx->done)); in gve_tx_avail()
512 return (atomic_load_int(&fifo->available) >= bytes); in gve_tx_fifo_can_alloc()
519 gve_tx_fifo_can_alloc(&tx->fifo, bytes_required)); in gve_can_tx()
525 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head; in gve_tx_fifo_pad_alloc_one_frag()
535 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->fifo, first_seg_len); in gve_fifo_bytes_required()
537 align_hdr_pad = roundup2(first_seg_len, CACHE_LINE_SIZE) - first_seg_len; in gve_fifo_bytes_required()
566 iov[0].iov_offset = fifo->head; in gve_tx_alloc_fifo()
568 fifo->head += bytes; in gve_tx_alloc_fifo()
570 if (fifo->head > fifo->size) { in gve_tx_alloc_fifo()
576 overflow = fifo->head - fifo->size; in gve_tx_alloc_fifo()
577 iov[0].iov_len -= overflow; in gve_tx_alloc_fifo()
581 fifo->head = overflow; in gve_tx_alloc_fifo()
584 /* Re-align to a cacheline boundary */ in gve_tx_alloc_fifo()
585 aligned_head = roundup2(fifo->head, CACHE_LINE_SIZE); in gve_tx_alloc_fifo()
586 padding = aligned_head - fifo->head; in gve_tx_alloc_fifo()
587 iov[nfrags - 1].iov_padding = padding; in gve_tx_alloc_fifo()
588 atomic_add_int(&fifo->available, -(bytes + padding)); in gve_tx_alloc_fifo()
589 fifo->head = aligned_head; in gve_tx_alloc_fifo()
591 if (fifo->head == fifo->size) in gve_tx_alloc_fifo()
592 fifo->head = 0; in gve_tx_alloc_fifo()
602 int csum_flags, csum_offset, mtd_desc_nr, offset, copy_offset; in gve_xmit() local
609 uint32_t idx = tx->req & tx->mask; in gve_xmit()
621 info = &tx->info[idx]; in gve_xmit()
622 csum_flags = mbuf->m_pkthdr.csum_flags; in gve_xmit()
623 pkt_len = mbuf->m_pkthdr.len; in gve_xmit()
628 tso_mss = is_tso ? mbuf->m_pkthdr.tso_segsz : 0; in gve_xmit()
631 KASSERT(eh->ether_type != ETHERTYPE_VLAN, in gve_xmit()
632 ("VLAN-tagged packets not supported")); in gve_xmit()
634 is_ipv6 = ntohs(eh->ether_type) == ETHERTYPE_IPV6; in gve_xmit()
636 mbuf_next = m_getptr(mbuf, l3_off, &offset); in gve_xmit()
639 ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset)); in gve_xmit()
641 is_tcp = (ip6->ip6_nxt == IPPROTO_TCP); in gve_xmit()
642 is_udp = (ip6->ip6_nxt == IPPROTO_UDP); in gve_xmit()
643 mbuf_next = m_getptr(mbuf, l4_off, &offset); in gve_xmit()
644 } else if (ntohs(eh->ether_type) == ETHERTYPE_IP) { in gve_xmit()
645 ip = (struct ip *)(mtodo(mbuf_next, offset)); in gve_xmit()
646 l4_off = l3_off + (ip->ip_hl << 2); in gve_xmit()
647 is_tcp = (ip->ip_p == IPPROTO_TCP); in gve_xmit()
648 is_udp = (ip->ip_p == IPPROTO_UDP); in gve_xmit()
649 mbuf_next = m_getptr(mbuf, l4_off, &offset); in gve_xmit()
654 th = (struct tcphdr *)(mtodo(mbuf_next, offset)); in gve_xmit()
655 l4_data_off = l4_off + (th->th_off << 2); in gve_xmit()
669 * spec-stipulated minimum of 182B. in gve_xmit()
679 counter_u64_add_protected(tx->stats.tx_delayed_pkt_nospace_device, 1); in gve_xmit()
685 info->mbuf = mbuf; in gve_xmit()
691 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->fifo, first_seg_len); in gve_xmit()
692 hdr_nfrags = gve_tx_alloc_fifo(&tx->fifo, first_seg_len + pad_bytes, in gve_xmit()
693 &info->iov[0]); in gve_xmit()
695 payload_nfrags = gve_tx_alloc_fifo(&tx->fifo, pkt_len - first_seg_len, in gve_xmit()
696 &info->iov[payload_iov]); in gve_xmit()
698 pkt_desc = &tx->desc_ring[idx].pkt; in gve_xmit()
701 info->iov[hdr_nfrags - 1].iov_offset, has_csum_flag, csum_offset, in gve_xmit()
705 (char *)tx->fifo.base + info->iov[hdr_nfrags - 1].iov_offset); in gve_xmit()
706 gve_dma_sync_for_device(tx->com.qpl, in gve_xmit()
707 info->iov[hdr_nfrags - 1].iov_offset, in gve_xmit()
708 info->iov[hdr_nfrags - 1].iov_len); in gve_xmit()
712 next_idx = (tx->req + 1) & tx->mask; in gve_xmit()
713 mtd_desc = &tx->desc_ring[next_idx].mtd; in gve_xmit()
718 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; in gve_xmit()
719 seg_desc = &tx->desc_ring[next_idx].seg; in gve_xmit()
721 gve_tx_fill_seg_desc(seg_desc, is_tso, info->iov[i].iov_len, in gve_xmit()
722 info->iov[i].iov_offset, is_ipv6, l3_off, tso_mss); in gve_xmit()
724 m_copydata(mbuf, copy_offset, info->iov[i].iov_len, in gve_xmit()
725 (char *)tx->fifo.base + info->iov[i].iov_offset); in gve_xmit()
726 gve_dma_sync_for_device(tx->com.qpl, in gve_xmit()
727 info->iov[i].iov_offset, info->iov[i].iov_len); in gve_xmit()
728 copy_offset += info->iov[i].iov_len; in gve_xmit()
731 tx->req += (1 + mtd_desc_nr + payload_nfrags); in gve_xmit()
734 counter_u64_add_protected(tx->stats.tso_packet_cnt, 1); in gve_xmit()
744 if (gve_is_gqi(tx->com.priv)) in gve_xmit_mbuf()
747 if (gve_is_qpl(tx->com.priv)) in gve_xmit_mbuf()
759 * Has the side-effect of stopping the xmit queue by setting tx->stopped
767 atomic_store_bool(&tx->stopped, true); in gve_xmit_retry_enobuf_mbuf()
774 * iteration creating the room will either see a tx->stopped value in gve_xmit_retry_enobuf_mbuf()
788 atomic_store_bool(&tx->stopped, false); in gve_xmit_retry_enobuf_mbuf()
796 struct gve_priv *priv = tx->com.priv; in gve_xmit_br()
797 struct ifnet *ifp = priv->ifp; in gve_xmit_br()
802 (mbuf = drbr_peek(ifp, tx->br)) != NULL) { in gve_xmit_br()
816 drbr_advance(ifp, tx->br); in gve_xmit_br()
819 drbr_putback(ifp, tx->br, mbuf); in gve_xmit_br()
823 drbr_advance(ifp, tx->br); in gve_xmit_br()
826 bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map, in gve_xmit_br()
830 gve_db_bar_write_4(priv, tx->com.db_offset, tx->req); in gve_xmit_br()
832 gve_db_bar_dqo_write_4(priv, tx->com.db_offset, in gve_xmit_br()
833 tx->dqo.desc_tail); in gve_xmit_br()
853 return (ntohs(eh->ether_type) == ETHERTYPE_VLAN); in is_vlan_tagged_pkt()
865 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0)) in gve_xmit_ifp()
869 i = mbuf->m_pkthdr.flowid % priv->tx_cfg.num_queues; in gve_xmit_ifp()
871 i = curcpu % priv->tx_cfg.num_queues; in gve_xmit_ifp()
872 tx = &priv->tx[i]; in gve_xmit_ifp()
876 counter_u64_add_protected(tx->stats.tx_dropped_pkt_vlan, 1); in gve_xmit_ifp()
877 counter_u64_add_protected(tx->stats.tx_dropped_pkt, 1); in gve_xmit_ifp()
883 is_br_empty = drbr_empty(ifp, tx->br); in gve_xmit_ifp()
884 err = drbr_enqueue(ifp, tx->br, mbuf); in gve_xmit_ifp()
886 if (!atomic_load_bool(&tx->stopped)) in gve_xmit_ifp()
887 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_xmit_ifp()
889 counter_u64_add_protected(tx->stats.tx_dropped_pkt_nospace_bufring, 1); in gve_xmit_ifp()
890 counter_u64_add_protected(tx->stats.tx_dropped_pkt, 1); in gve_xmit_ifp()
902 } else if (!atomic_load_bool(&tx->stopped)) in gve_xmit_ifp()
903 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_xmit_ifp()
915 for (i = 0; i < priv->tx_cfg.num_queues; ++i) { in gve_qflush()
916 tx = &priv->tx[i]; in gve_qflush()
917 if (drbr_empty(ifp, tx->br) == 0) { in gve_qflush()
919 drbr_flush(ifp, tx->br); in gve_qflush()