Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2023-2024 Google LLC
38 gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_fifo_init() argument
40 struct gve_queue_page_list *qpl = tx->com.qpl; in gve_tx_fifo_init()
41 struct gve_tx_fifo *fifo = &tx->fifo; in gve_tx_fifo_init()
43 fifo->size = qpl->num_pages * PAGE_SIZE; in gve_tx_fifo_init()
44 fifo->base = qpl->kva; in gve_tx_fifo_init()
45 atomic_store_int(&fifo->available, fifo->size); in gve_tx_fifo_init()
46 fifo->head = 0; in gve_tx_fifo_init()
54 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring_gqi() local
55 struct gve_ring_com *com = &tx->com; in gve_tx_free_ring_gqi()
57 if (tx->desc_ring != NULL) { in gve_tx_free_ring_gqi()
58 gve_dma_free_coherent(&tx->desc_ring_mem); in gve_tx_free_ring_gqi()
59 tx->desc_ring = NULL; in gve_tx_free_ring_gqi()
62 if (tx->info != NULL) { in gve_tx_free_ring_gqi()
63 free(tx->info, M_GVE); in gve_tx_free_ring_gqi()
64 tx->info = NULL; in gve_tx_free_ring_gqi()
67 if (com->qpl != NULL) { in gve_tx_free_ring_gqi()
68 gve_free_qpl(priv, com->qpl); in gve_tx_free_ring_gqi()
69 com->qpl = NULL; in gve_tx_free_ring_gqi()
76 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring() local
77 struct gve_ring_com *com = &tx->com; in gve_tx_free_ring()
80 gve_free_counters((counter_u64_t *)&tx->stats, NUM_TX_STATS); in gve_tx_free_ring()
82 if (mtx_initialized(&tx->ring_mtx)) in gve_tx_free_ring()
83 mtx_destroy(&tx->ring_mtx); in gve_tx_free_ring()
85 if (com->q_resources != NULL) { in gve_tx_free_ring()
86 gve_dma_free_coherent(&com->q_resources_mem); in gve_tx_free_ring()
87 com->q_resources = NULL; in gve_tx_free_ring()
90 if (tx->br != NULL) { in gve_tx_free_ring()
91 buf_ring_free(tx->br, M_DEVBUF); in gve_tx_free_ring()
92 tx->br = NULL; in gve_tx_free_ring()
104 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_alloc_ring_gqi() local
105 struct gve_ring_com *com = &tx->com; in gve_tx_alloc_ring_gqi()
109 sizeof(union gve_tx_desc) * priv->tx_desc_cnt, in gve_tx_alloc_ring_gqi()
110 CACHE_LINE_SIZE, &tx->desc_ring_mem); in gve_tx_alloc_ring_gqi()
112 device_printf(priv->dev, in gve_tx_alloc_ring_gqi()
113 "Failed to alloc desc ring for tx ring %d", i); in gve_tx_alloc_ring_gqi()
116 tx->desc_ring = tx->desc_ring_mem.cpu_addr; in gve_tx_alloc_ring_gqi()
118 com->qpl = gve_alloc_qpl(priv, i, priv->tx_desc_cnt / GVE_QPL_DIVISOR, in gve_tx_alloc_ring_gqi()
120 if (com->qpl == NULL) { in gve_tx_alloc_ring_gqi()
121 device_printf(priv->dev, in gve_tx_alloc_ring_gqi()
122 "Failed to alloc QPL for tx ring %d\n", i); in gve_tx_alloc_ring_gqi()
127 err = gve_tx_fifo_init(priv, tx); in gve_tx_alloc_ring_gqi()
131 tx->info = malloc( in gve_tx_alloc_ring_gqi()
132 sizeof(struct gve_tx_buffer_state) * priv->tx_desc_cnt, in gve_tx_alloc_ring_gqi()
144 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_alloc_ring() local
145 struct gve_ring_com *com = &tx->com; in gve_tx_alloc_ring()
149 com->priv = priv; in gve_tx_alloc_ring()
150 com->id = i; in gve_tx_alloc_ring()
160 mtx_init(&tx->ring_mtx, mtx_name, NULL, MTX_DEF); in gve_tx_alloc_ring()
162 tx->br = buf_ring_alloc(GVE_TX_BUFRING_ENTRIES, M_DEVBUF, in gve_tx_alloc_ring()
163 M_WAITOK, &tx->ring_mtx); in gve_tx_alloc_ring()
165 gve_alloc_counters((counter_u64_t *)&tx->stats, NUM_TX_STATS); in gve_tx_alloc_ring()
168 PAGE_SIZE, &com->q_resources_mem); in gve_tx_alloc_ring()
170 device_printf(priv->dev, in gve_tx_alloc_ring()
171 "Failed to alloc queue resources for tx ring %d", i); in gve_tx_alloc_ring()
174 com->q_resources = com->q_resources_mem.cpu_addr; in gve_tx_alloc_ring()
176 tx->last_kicked = 0; in gve_tx_alloc_ring()
191 KASSERT(priv->tx != NULL, ("priv->tx is NULL!")); in gve_alloc_tx_rings()
215 gve_tx_clear_desc_ring(struct gve_tx_ring *tx) in gve_tx_clear_desc_ring() argument
217 struct gve_ring_com *com = &tx->com; in gve_tx_clear_desc_ring()
220 for (i = 0; i < com->priv->tx_desc_cnt; i++) { in gve_tx_clear_desc_ring()
221 tx->desc_ring[i] = (union gve_tx_desc){}; in gve_tx_clear_desc_ring()
222 tx->info[i] = (struct gve_tx_buffer_state){}; in gve_tx_clear_desc_ring()
223 gve_invalidate_timestamp(&tx->info[i].enqueue_time_sec); in gve_tx_clear_desc_ring()
226 bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map, in gve_tx_clear_desc_ring()
233 struct gve_tx_ring *tx = &priv->tx[i]; in gve_clear_tx_ring() local
234 struct gve_tx_fifo *fifo = &tx->fifo; in gve_clear_tx_ring()
236 tx->req = 0; in gve_clear_tx_ring()
237 tx->done = 0; in gve_clear_tx_ring()
238 tx->mask = priv->tx_desc_cnt - 1; in gve_clear_tx_ring()
240 atomic_store_int(&fifo->available, fifo->size); in gve_clear_tx_ring()
241 fifo->head = 0; in gve_clear_tx_ring()
243 gve_tx_clear_desc_ring(tx); in gve_clear_tx_ring()
249 struct gve_tx_ring *tx = &priv->tx[i]; in gve_start_tx_ring() local
250 struct gve_ring_com *com = &tx->com; in gve_start_tx_ring()
252 atomic_store_bool(&tx->stopped, false); in gve_start_tx_ring()
254 NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq, tx); in gve_start_tx_ring()
256 NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq_dqo, tx); in gve_start_tx_ring()
257 com->cleanup_tq = taskqueue_create_fast("gve tx", M_WAITOK, in gve_start_tx_ring()
258 taskqueue_thread_enqueue, &com->cleanup_tq); in gve_start_tx_ring()
259 taskqueue_start_threads(&com->cleanup_tq, 1, PI_NET, "%s txq %d", in gve_start_tx_ring()
260 device_get_nameunit(priv->dev), i); in gve_start_tx_ring()
262 TASK_INIT(&tx->xmit_task, 0, gve_xmit_tq, tx); in gve_start_tx_ring()
263 tx->xmit_tq = taskqueue_create_fast("gve tx xmit", in gve_start_tx_ring()
264 M_WAITOK, taskqueue_thread_enqueue, &tx->xmit_tq); in gve_start_tx_ring()
265 taskqueue_start_threads(&tx->xmit_tq, 1, PI_NET, "%s txq %d xmit", in gve_start_tx_ring()
266 device_get_nameunit(priv->dev), i); in gve_start_tx_ring()
273 struct gve_tx_ring *tx; in gve_create_tx_rings() local
280 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in gve_create_tx_rings()
287 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues); in gve_create_tx_rings()
291 bus_dmamap_sync(priv->irqs_db_mem.tag, priv->irqs_db_mem.map, in gve_create_tx_rings()
294 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in gve_create_tx_rings()
295 tx = &priv->tx[i]; in gve_create_tx_rings()
296 com = &tx->com; in gve_create_tx_rings()
298 com->irq_db_offset = 4 * be32toh(priv->irq_db_indices[com->ntfy_id].index); in gve_create_tx_rings()
300 bus_dmamap_sync(com->q_resources_mem.tag, com->q_resources_mem.map, in gve_create_tx_rings()
302 com->db_offset = 4 * be32toh(com->q_resources->db_index); in gve_create_tx_rings()
303 com->counter_idx = be32toh(com->q_resources->counter_index); in gve_create_tx_rings()
315 struct gve_tx_ring *tx = &priv->tx[i]; in gve_stop_tx_ring() local
316 struct gve_ring_com *com = &tx->com; in gve_stop_tx_ring()
318 if (com->cleanup_tq != NULL) { in gve_stop_tx_ring()
319 taskqueue_quiesce(com->cleanup_tq); in gve_stop_tx_ring()
320 taskqueue_free(com->cleanup_tq); in gve_stop_tx_ring()
321 com->cleanup_tq = NULL; in gve_stop_tx_ring()
324 if (tx->xmit_tq != NULL) { in gve_stop_tx_ring()
325 taskqueue_quiesce(tx->xmit_tq); in gve_stop_tx_ring()
326 taskqueue_free(tx->xmit_tq); in gve_stop_tx_ring()
327 tx->xmit_tq = NULL; in gve_stop_tx_ring()
337 for (i = 0; i < priv->tx_cfg.num_queues; i++) in gve_destroy_tx_rings()
341 err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues); in gve_destroy_tx_rings()
351 gve_check_tx_timeout_gqi(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_check_tx_timeout_gqi() argument
359 for (pkt_idx = 0; pkt_idx < priv->tx_desc_cnt; pkt_idx++) { in gve_check_tx_timeout_gqi()
360 info = &tx->info[pkt_idx]; in gve_check_tx_timeout_gqi()
362 if (!gve_timestamp_valid(&info->enqueue_time_sec)) in gve_check_tx_timeout_gqi()
366 gve_seconds_since(&info->enqueue_time_sec) > in gve_check_tx_timeout_gqi()
377 struct gve_tx_ring *tx = arg; in gve_tx_intr() local
378 struct gve_priv *priv = tx->com.priv; in gve_tx_intr()
379 struct gve_ring_com *com = &tx->com; in gve_tx_intr()
381 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0)) in gve_tx_intr()
384 gve_db_bar_write_4(priv, com->irq_db_offset, GVE_IRQ_MASK); in gve_tx_intr()
385 taskqueue_enqueue(com->cleanup_tq, &com->cleanup_task); in gve_tx_intr()
390 gve_tx_load_event_counter(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_load_event_counter() argument
392 bus_dmamap_sync(priv->counter_array_mem.tag, priv->counter_array_mem.map, in gve_tx_load_event_counter()
394 uint32_t counter = priv->counters[tx->com.counter_idx]; in gve_tx_load_event_counter()
401 atomic_add_int(&fifo->available, bytes); in gve_tx_free_fifo()
407 struct gve_tx_ring *tx = arg; in gve_tx_cleanup_tq() local
408 struct gve_priv *priv = tx->com.priv; in gve_tx_cleanup_tq()
409 uint32_t nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_cleanup_tq()
410 uint32_t todo = nic_done - tx->done; in gve_tx_cleanup_tq()
414 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0)) in gve_tx_cleanup_tq()
418 uint32_t idx = tx->done & tx->mask; in gve_tx_cleanup_tq()
419 struct gve_tx_buffer_state *info = &tx->info[idx]; in gve_tx_cleanup_tq()
420 struct mbuf *mbuf = info->mbuf; in gve_tx_cleanup_tq()
422 tx->done++; in gve_tx_cleanup_tq()
426 gve_invalidate_timestamp(&info->enqueue_time_sec); in gve_tx_cleanup_tq()
428 info->mbuf = NULL; in gve_tx_cleanup_tq()
431 counter_u64_add_protected(tx->stats.tbytes, mbuf->m_pkthdr.len); in gve_tx_cleanup_tq()
432 counter_u64_add_protected(tx->stats.tpackets, 1); in gve_tx_cleanup_tq()
437 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; in gve_tx_cleanup_tq()
438 info->iov[i].iov_len = 0; in gve_tx_cleanup_tq()
439 info->iov[i].iov_padding = 0; in gve_tx_cleanup_tq()
443 gve_tx_free_fifo(&tx->fifo, space_freed); in gve_tx_cleanup_tq()
445 gve_db_bar_write_4(priv, tx->com.irq_db_offset, in gve_tx_cleanup_tq()
455 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_cleanup_tq()
456 todo = nic_done - tx->done; in gve_tx_cleanup_tq()
458 gve_db_bar_write_4(priv, tx->com.irq_db_offset, GVE_IRQ_MASK); in gve_tx_cleanup_tq()
459 taskqueue_enqueue(tx->com.cleanup_tq, &tx->com.cleanup_task); in gve_tx_cleanup_tq()
462 if (atomic_load_bool(&tx->stopped) && space_freed) { in gve_tx_cleanup_tq()
463 atomic_store_bool(&tx->stopped, false); in gve_tx_cleanup_tq()
464 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_tx_cleanup_tq()
472 uint64_t last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; in gve_dma_sync_for_device()
478 dma = &(qpl->dmas[page]); in gve_dma_sync_for_device()
479 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); in gve_dma_sync_for_device()
486 mtd_desc->type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH; in gve_tx_fill_mtd_desc()
487 mtd_desc->path_state = GVE_MTD_PATH_STATE_DEFAULT | GVE_MTD_PATH_HASH_L4; in gve_tx_fill_mtd_desc()
488 mtd_desc->path_hash = htobe32(mbuf->m_pkthdr.flowid); in gve_tx_fill_mtd_desc()
489 mtd_desc->reserved0 = 0; in gve_tx_fill_mtd_desc()
490 mtd_desc->reserved1 = 0; in gve_tx_fill_mtd_desc()
500 pkt_desc->type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM; in gve_tx_fill_pkt_desc()
501 pkt_desc->l4_csum_offset = csum_offset >> 1; in gve_tx_fill_pkt_desc()
502 pkt_desc->l4_hdr_offset = l4_hdr_offset >> 1; in gve_tx_fill_pkt_desc()
504 pkt_desc->type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM; in gve_tx_fill_pkt_desc()
505 pkt_desc->l4_csum_offset = csum_offset >> 1; in gve_tx_fill_pkt_desc()
506 pkt_desc->l4_hdr_offset = l4_hdr_offset >> 1; in gve_tx_fill_pkt_desc()
508 pkt_desc->type_flags = GVE_TXD_STD; in gve_tx_fill_pkt_desc()
509 pkt_desc->l4_csum_offset = 0; in gve_tx_fill_pkt_desc()
510 pkt_desc->l4_hdr_offset = 0; in gve_tx_fill_pkt_desc()
512 pkt_desc->desc_cnt = desc_cnt; in gve_tx_fill_pkt_desc()
513 pkt_desc->len = htobe16(pkt_len); in gve_tx_fill_pkt_desc()
514 pkt_desc->seg_len = htobe16(first_seg_len); in gve_tx_fill_pkt_desc()
515 pkt_desc->seg_addr = htobe64(addr); in gve_tx_fill_pkt_desc()
523 seg_desc->type_flags = GVE_TXD_SEG; in gve_tx_fill_seg_desc()
526 seg_desc->type_flags |= GVE_TXSF_IPV6; in gve_tx_fill_seg_desc()
527 seg_desc->l3_offset = l3_off >> 1; in gve_tx_fill_seg_desc()
528 seg_desc->mss = htobe16(tso_mss); in gve_tx_fill_seg_desc()
530 seg_desc->seg_len = htobe16(len); in gve_tx_fill_seg_desc()
531 seg_desc->seg_addr = htobe64(addr); in gve_tx_fill_seg_desc()
535 gve_tx_avail(struct gve_tx_ring *tx) in gve_tx_avail() argument
537 return (tx->mask + 1 - (tx->req - tx->done)); in gve_tx_avail()
543 return (atomic_load_int(&fifo->available) >= bytes); in gve_tx_fifo_can_alloc()
547 gve_can_tx(struct gve_tx_ring *tx, int bytes_required) in gve_can_tx() argument
549 return (gve_tx_avail(tx) >= (GVE_TX_MAX_DESCS + 1) && in gve_can_tx()
550 gve_tx_fifo_can_alloc(&tx->fifo, bytes_required)); in gve_can_tx()
556 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head; in gve_tx_fifo_pad_alloc_one_frag()
560 gve_fifo_bytes_required(struct gve_tx_ring *tx, uint16_t first_seg_len, in gve_fifo_bytes_required() argument
566 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->fifo, first_seg_len); in gve_fifo_bytes_required()
568 align_hdr_pad = roundup2(first_seg_len, CACHE_LINE_SIZE) - first_seg_len; in gve_fifo_bytes_required()
593 ("Allocating gve tx fifo when there is no room")); in gve_tx_alloc_fifo()
597 iov[0].iov_offset = fifo->head; in gve_tx_alloc_fifo()
599 fifo->head += bytes; in gve_tx_alloc_fifo()
601 if (fifo->head > fifo->size) { in gve_tx_alloc_fifo()
607 overflow = fifo->head - fifo->size; in gve_tx_alloc_fifo()
608 iov[0].iov_len -= overflow; in gve_tx_alloc_fifo()
612 fifo->head = overflow; in gve_tx_alloc_fifo()
615 /* Re-align to a cacheline boundary */ in gve_tx_alloc_fifo()
616 aligned_head = roundup2(fifo->head, CACHE_LINE_SIZE); in gve_tx_alloc_fifo()
617 padding = aligned_head - fifo->head; in gve_tx_alloc_fifo()
618 iov[nfrags - 1].iov_padding = padding; in gve_tx_alloc_fifo()
619 atomic_add_int(&fifo->available, -(bytes + padding)); in gve_tx_alloc_fifo()
620 fifo->head = aligned_head; in gve_tx_alloc_fifo()
622 if (fifo->head == fifo->size) in gve_tx_alloc_fifo()
623 fifo->head = 0; in gve_tx_alloc_fifo()
628 /* Only error this returns is ENOBUFS when the tx fifo is short of space */
630 gve_xmit(struct gve_tx_ring *tx, struct mbuf *mbuf) in gve_xmit() argument
640 uint32_t idx = tx->req & tx->mask; in gve_xmit()
652 info = &tx->info[idx]; in gve_xmit()
653 csum_flags = mbuf->m_pkthdr.csum_flags; in gve_xmit()
654 pkt_len = mbuf->m_pkthdr.len; in gve_xmit()
659 tso_mss = is_tso ? mbuf->m_pkthdr.tso_segsz : 0; in gve_xmit()
662 KASSERT(eh->ether_type != ETHERTYPE_VLAN, in gve_xmit()
663 ("VLAN-tagged packets not supported")); in gve_xmit()
665 is_ipv6 = ntohs(eh->ether_type) == ETHERTYPE_IPV6; in gve_xmit()
672 is_tcp = (ip6->ip6_nxt == IPPROTO_TCP); in gve_xmit()
673 is_udp = (ip6->ip6_nxt == IPPROTO_UDP); in gve_xmit()
675 } else if (ntohs(eh->ether_type) == ETHERTYPE_IP) { in gve_xmit()
677 l4_off = l3_off + (ip->ip_hl << 2); in gve_xmit()
678 is_tcp = (ip->ip_p == IPPROTO_TCP); in gve_xmit()
679 is_udp = (ip->ip_p == IPPROTO_UDP); in gve_xmit()
686 l4_data_off = l4_off + (th->th_off << 2); in gve_xmit()
700 * spec-stipulated minimum of 182B. in gve_xmit()
707 bytes_required = gve_fifo_bytes_required(tx, first_seg_len, pkt_len); in gve_xmit()
708 if (__predict_false(!gve_can_tx(tx, bytes_required))) { in gve_xmit()
710 counter_u64_add_protected(tx->stats.tx_delayed_pkt_nospace_device, 1); in gve_xmit()
716 info->mbuf = mbuf; in gve_xmit()
718 gve_set_timestamp(&info->enqueue_time_sec); in gve_xmit()
724 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->fifo, first_seg_len); in gve_xmit()
725 hdr_nfrags = gve_tx_alloc_fifo(&tx->fifo, first_seg_len + pad_bytes, in gve_xmit()
726 &info->iov[0]); in gve_xmit()
727 KASSERT(hdr_nfrags > 0, ("Number of header fragments for gve tx is 0")); in gve_xmit()
728 payload_nfrags = gve_tx_alloc_fifo(&tx->fifo, pkt_len - first_seg_len, in gve_xmit()
729 &info->iov[payload_iov]); in gve_xmit()
731 pkt_desc = &tx->desc_ring[idx].pkt; in gve_xmit()
734 info->iov[hdr_nfrags - 1].iov_offset, has_csum_flag, csum_offset, in gve_xmit()
738 (char *)tx->fifo.base + info->iov[hdr_nfrags - 1].iov_offset); in gve_xmit()
739 gve_dma_sync_for_device(tx->com.qpl, in gve_xmit()
740 info->iov[hdr_nfrags - 1].iov_offset, in gve_xmit()
741 info->iov[hdr_nfrags - 1].iov_len); in gve_xmit()
745 next_idx = (tx->req + 1) & tx->mask; in gve_xmit()
746 mtd_desc = &tx->desc_ring[next_idx].mtd; in gve_xmit()
751 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; in gve_xmit()
752 seg_desc = &tx->desc_ring[next_idx].seg; in gve_xmit()
754 gve_tx_fill_seg_desc(seg_desc, is_tso, info->iov[i].iov_len, in gve_xmit()
755 info->iov[i].iov_offset, is_ipv6, l3_off, tso_mss); in gve_xmit()
757 m_copydata(mbuf, copy_offset, info->iov[i].iov_len, in gve_xmit()
758 (char *)tx->fifo.base + info->iov[i].iov_offset); in gve_xmit()
759 gve_dma_sync_for_device(tx->com.qpl, in gve_xmit()
760 info->iov[i].iov_offset, info->iov[i].iov_len); in gve_xmit()
761 copy_offset += info->iov[i].iov_len; in gve_xmit()
764 tx->req += (1 + mtd_desc_nr + payload_nfrags); in gve_xmit()
767 counter_u64_add_protected(tx->stats.tso_packet_cnt, 1); in gve_xmit()
774 gve_xmit_mbuf(struct gve_tx_ring *tx, in gve_xmit_mbuf() argument
777 if (gve_is_gqi(tx->com.priv)) in gve_xmit_mbuf()
778 return (gve_xmit(tx, *mbuf)); in gve_xmit_mbuf()
780 if (gve_is_qpl(tx->com.priv)) in gve_xmit_mbuf()
781 return (gve_xmit_dqo_qpl(tx, *mbuf)); in gve_xmit_mbuf()
788 return (gve_xmit_dqo(tx, mbuf)); in gve_xmit_mbuf()
792 * Has the side-effect of stopping the xmit queue by setting tx->stopped
795 gve_xmit_retry_enobuf_mbuf(struct gve_tx_ring *tx, in gve_xmit_retry_enobuf_mbuf() argument
800 atomic_store_bool(&tx->stopped, true); in gve_xmit_retry_enobuf_mbuf()
803 * Room made in the queue BEFORE the barrier will be seen by the in gve_xmit_retry_enobuf_mbuf()
806 * If room is made in the queue AFTER the barrier, the cleanup tq in gve_xmit_retry_enobuf_mbuf()
807 * iteration creating the room will either see a tx->stopped value in gve_xmit_retry_enobuf_mbuf()
811 * implies a retry on the waiting pkt. in gve_xmit_retry_enobuf_mbuf()
815 * implies a retry on the waiting pkt. in gve_xmit_retry_enobuf_mbuf()
819 err = gve_xmit_mbuf(tx, mbuf); in gve_xmit_retry_enobuf_mbuf()
821 atomic_store_bool(&tx->stopped, false); in gve_xmit_retry_enobuf_mbuf()
827 gve_xmit_br(struct gve_tx_ring *tx) in gve_xmit_br() argument
829 struct gve_priv *priv = tx->com.priv; in gve_xmit_br()
830 struct ifnet *ifp = priv->ifp; in gve_xmit_br()
835 (mbuf = drbr_peek(ifp, tx->br)) != NULL) { in gve_xmit_br()
836 err = gve_xmit_mbuf(tx, &mbuf); in gve_xmit_br()
839 * We need to stop this taskqueue when we can't xmit the pkt due in gve_xmit_br()
842 * queue forever. in gve_xmit_br()
845 err = gve_xmit_retry_enobuf_mbuf(tx, &mbuf); in gve_xmit_br()
849 drbr_advance(ifp, tx->br); in gve_xmit_br()
852 drbr_putback(ifp, tx->br, mbuf); in gve_xmit_br()
856 drbr_advance(ifp, tx->br); in gve_xmit_br()
859 bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map, in gve_xmit_br()
863 gve_db_bar_write_4(priv, tx->com.db_offset, tx->req); in gve_xmit_br()
865 gve_db_bar_dqo_write_4(priv, tx->com.db_offset, in gve_xmit_br()
866 tx->dqo.desc_tail); in gve_xmit_br()
873 struct gve_tx_ring *tx = (struct gve_tx_ring *)arg; in gve_xmit_tq() local
875 GVE_RING_LOCK(tx); in gve_xmit_tq()
876 gve_xmit_br(tx); in gve_xmit_tq()
877 GVE_RING_UNLOCK(tx); in gve_xmit_tq()
886 return (ntohs(eh->ether_type) == ETHERTYPE_VLAN); in is_vlan_tagged_pkt()
893 struct gve_tx_ring *tx; in gve_xmit_ifp() local
898 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0)) in gve_xmit_ifp()
902 i = mbuf->m_pkthdr.flowid % priv->tx_cfg.num_queues; in gve_xmit_ifp()
904 i = curcpu % priv->tx_cfg.num_queues; in gve_xmit_ifp()
905 tx = &priv->tx[i]; in gve_xmit_ifp()
909 counter_u64_add_protected(tx->stats.tx_dropped_pkt_vlan, 1); in gve_xmit_ifp()
910 counter_u64_add_protected(tx->stats.tx_dropped_pkt, 1); in gve_xmit_ifp()
916 is_br_empty = drbr_empty(ifp, tx->br); in gve_xmit_ifp()
917 err = drbr_enqueue(ifp, tx->br, mbuf); in gve_xmit_ifp()
919 if (!atomic_load_bool(&tx->stopped)) in gve_xmit_ifp()
920 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_xmit_ifp()
922 counter_u64_add_protected(tx->stats.tx_dropped_pkt_nospace_bufring, 1); in gve_xmit_ifp()
923 counter_u64_add_protected(tx->stats.tx_dropped_pkt, 1); in gve_xmit_ifp()
932 if (is_br_empty && (GVE_RING_TRYLOCK(tx) != 0)) { in gve_xmit_ifp()
933 gve_xmit_br(tx); in gve_xmit_ifp()
934 GVE_RING_UNLOCK(tx); in gve_xmit_ifp()
935 } else if (!atomic_load_bool(&tx->stopped)) in gve_xmit_ifp()
936 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_xmit_ifp()
945 struct gve_tx_ring *tx; in gve_qflush() local
948 for (i = 0; i < priv->tx_cfg.num_queues; ++i) { in gve_qflush()
949 tx = &priv->tx[i]; in gve_qflush()
950 if (drbr_empty(ifp, tx->br) == 0) { in gve_qflush()
951 GVE_RING_LOCK(tx); in gve_qflush()
952 drbr_flush(ifp, tx->br); in gve_qflush()
953 GVE_RING_UNLOCK(tx); in gve_qflush()