Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
38 gve_unmap_packet(struct gve_tx_ring *tx, in gve_unmap_packet() argument
41 bus_dmamap_sync(tx->dqo.buf_dmatag, pending_pkt->dmamap, in gve_unmap_packet()
43 bus_dmamap_unload(tx->dqo.buf_dmatag, pending_pkt->dmamap); in gve_unmap_packet()
49 pending_pkt->qpl_buf_head = -1; in gve_clear_qpl_pending_pkt()
50 pending_pkt->num_qpl_bufs = 0; in gve_clear_qpl_pending_pkt()
54 gve_free_tx_mbufs_dqo(struct gve_tx_ring *tx) in gve_free_tx_mbufs_dqo() argument
59 for (i = 0; i < tx->dqo.num_pending_pkts; i++) { in gve_free_tx_mbufs_dqo()
60 pending_pkt = &tx->dqo.pending_pkts[i]; in gve_free_tx_mbufs_dqo()
61 if (!pending_pkt->mbuf) in gve_free_tx_mbufs_dqo()
64 if (gve_is_qpl(tx->com.priv)) in gve_free_tx_mbufs_dqo()
67 gve_unmap_packet(tx, pending_pkt); in gve_free_tx_mbufs_dqo()
69 m_freem(pending_pkt->mbuf); in gve_free_tx_mbufs_dqo()
70 pending_pkt->mbuf = NULL; in gve_free_tx_mbufs_dqo()
77 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring_dqo() local
78 struct gve_ring_com *com = &tx->com; in gve_tx_free_ring_dqo()
81 if (tx->dqo.desc_ring != NULL) { in gve_tx_free_ring_dqo()
82 gve_dma_free_coherent(&tx->desc_ring_mem); in gve_tx_free_ring_dqo()
83 tx->dqo.desc_ring = NULL; in gve_tx_free_ring_dqo()
86 if (tx->dqo.compl_ring != NULL) { in gve_tx_free_ring_dqo()
87 gve_dma_free_coherent(&tx->dqo.compl_ring_mem); in gve_tx_free_ring_dqo()
88 tx->dqo.compl_ring = NULL; in gve_tx_free_ring_dqo()
91 if (tx->dqo.pending_pkts != NULL) { in gve_tx_free_ring_dqo()
92 gve_free_tx_mbufs_dqo(tx); in gve_tx_free_ring_dqo()
94 if (!gve_is_qpl(priv) && tx->dqo.buf_dmatag) { in gve_tx_free_ring_dqo()
95 for (j = 0; j < tx->dqo.num_pending_pkts; j++) in gve_tx_free_ring_dqo()
96 if (tx->dqo.pending_pkts[j].state != in gve_tx_free_ring_dqo()
98 bus_dmamap_destroy(tx->dqo.buf_dmatag, in gve_tx_free_ring_dqo()
99 tx->dqo.pending_pkts[j].dmamap); in gve_tx_free_ring_dqo()
102 free(tx->dqo.pending_pkts, M_GVE); in gve_tx_free_ring_dqo()
103 tx->dqo.pending_pkts = NULL; in gve_tx_free_ring_dqo()
106 if (!gve_is_qpl(priv) && tx->dqo.buf_dmatag) in gve_tx_free_ring_dqo()
107 bus_dma_tag_destroy(tx->dqo.buf_dmatag); in gve_tx_free_ring_dqo()
109 if (gve_is_qpl(priv) && tx->dqo.qpl_bufs != NULL) { in gve_tx_free_ring_dqo()
110 free(tx->dqo.qpl_bufs, M_GVE); in gve_tx_free_ring_dqo()
111 tx->dqo.qpl_bufs = NULL; in gve_tx_free_ring_dqo()
114 if (com->qpl != NULL) { in gve_tx_free_ring_dqo()
115 gve_free_qpl(priv, com->qpl); in gve_tx_free_ring_dqo()
116 com->qpl = NULL; in gve_tx_free_ring_dqo()
121 gve_tx_alloc_rda_fields_dqo(struct gve_tx_ring *tx) in gve_tx_alloc_rda_fields_dqo() argument
123 struct gve_priv *priv = tx->com.priv; in gve_tx_alloc_rda_fields_dqo()
128 * DMA tag for mapping Tx mbufs in gve_tx_alloc_rda_fields_dqo()
133 bus_get_dma_tag(priv->dev), /* parent */ in gve_tx_alloc_rda_fields_dqo()
144 &tx->dqo.buf_dmatag); in gve_tx_alloc_rda_fields_dqo()
146 device_printf(priv->dev, "%s: bus_dma_tag_create failed: %d\n", in gve_tx_alloc_rda_fields_dqo()
151 for (j = 0; j < tx->dqo.num_pending_pkts; j++) { in gve_tx_alloc_rda_fields_dqo()
152 err = bus_dmamap_create(tx->dqo.buf_dmatag, 0, in gve_tx_alloc_rda_fields_dqo()
153 &tx->dqo.pending_pkts[j].dmamap); in gve_tx_alloc_rda_fields_dqo()
155 device_printf(priv->dev, in gve_tx_alloc_rda_fields_dqo()
156 "err in creating pending pkt dmamap %d: %d", in gve_tx_alloc_rda_fields_dqo()
160 tx->dqo.pending_pkts[j].state = GVE_PACKET_STATE_FREE; in gve_tx_alloc_rda_fields_dqo()
169 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_alloc_ring_dqo() local
175 sizeof(union gve_tx_desc_dqo) * priv->tx_desc_cnt, in gve_tx_alloc_ring_dqo()
176 CACHE_LINE_SIZE, &tx->desc_ring_mem); in gve_tx_alloc_ring_dqo()
178 device_printf(priv->dev, in gve_tx_alloc_ring_dqo()
179 "Failed to alloc desc ring for tx ring %d", i); in gve_tx_alloc_ring_dqo()
182 tx->dqo.desc_ring = tx->desc_ring_mem.cpu_addr; in gve_tx_alloc_ring_dqo()
186 sizeof(struct gve_tx_compl_desc_dqo) * priv->tx_desc_cnt, in gve_tx_alloc_ring_dqo()
187 CACHE_LINE_SIZE, &tx->dqo.compl_ring_mem); in gve_tx_alloc_ring_dqo()
189 device_printf(priv->dev, in gve_tx_alloc_ring_dqo()
190 "Failed to alloc compl ring for tx ring %d", i); in gve_tx_alloc_ring_dqo()
193 tx->dqo.compl_ring = tx->dqo.compl_ring_mem.cpu_addr; in gve_tx_alloc_ring_dqo()
199 * descriptors which maybe written to the completion queue. in gve_tx_alloc_ring_dqo()
202 * completion queue. in gve_tx_alloc_ring_dqo()
204 num_pending_pkts = priv->tx_desc_cnt; in gve_tx_alloc_ring_dqo()
209 num_pending_pkts -= num_pending_pkts / GVE_TX_MIN_RE_INTERVAL; in gve_tx_alloc_ring_dqo()
211 tx->dqo.num_pending_pkts = num_pending_pkts; in gve_tx_alloc_ring_dqo()
212 tx->dqo.pending_pkts = malloc( in gve_tx_alloc_ring_dqo()
219 tx->com.qpl = gve_alloc_qpl(priv, i, GVE_TX_NUM_QPL_PAGES_DQO, in gve_tx_alloc_ring_dqo()
221 if (tx->com.qpl == NULL) { in gve_tx_alloc_ring_dqo()
222 device_printf(priv->dev, in gve_tx_alloc_ring_dqo()
223 "Failed to alloc QPL for tx ring %d", i); in gve_tx_alloc_ring_dqo()
229 tx->com.qpl->num_pages; in gve_tx_alloc_ring_dqo()
231 tx->dqo.qpl_bufs = malloc( in gve_tx_alloc_ring_dqo()
232 sizeof(*tx->dqo.qpl_bufs) * qpl_buf_cnt, in gve_tx_alloc_ring_dqo()
235 gve_tx_alloc_rda_fields_dqo(tx); in gve_tx_alloc_ring_dqo()
247 uint32_t hash = mbuf->m_pkthdr.flowid; in gve_extract_tx_metadata_dqo()
250 metadata->version = GVE_TX_METADATA_VERSION_DQO; in gve_extract_tx_metadata_dqo()
254 path_hash &= (1 << 15) - 1; in gve_extract_tx_metadata_dqo()
258 metadata->path_hash = path_hash; in gve_extract_tx_metadata_dqo()
263 gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, in gve_tx_fill_pkt_desc_dqo() argument
269 &tx->dqo.desc_ring[*desc_idx].pkt; in gve_tx_fill_pkt_desc_dqo()
283 len -= cur_len; in gve_tx_fill_pkt_desc_dqo()
284 *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask; in gve_tx_fill_pkt_desc_dqo()
299 .flex0 = metadata->bytes[0], in gve_tx_fill_tso_ctx_desc()
300 .flex5 = metadata->bytes[5], in gve_tx_fill_tso_ctx_desc()
301 .flex6 = metadata->bytes[6], in gve_tx_fill_tso_ctx_desc()
302 .flex7 = metadata->bytes[7], in gve_tx_fill_tso_ctx_desc()
303 .flex8 = metadata->bytes[8], in gve_tx_fill_tso_ctx_desc()
304 .flex9 = metadata->bytes[9], in gve_tx_fill_tso_ctx_desc()
305 .flex10 = metadata->bytes[10], in gve_tx_fill_tso_ctx_desc()
306 .flex11 = metadata->bytes[11], in gve_tx_fill_tso_ctx_desc()
308 desc->tso_total_len = mbuf->m_pkthdr.len - header_len; in gve_tx_fill_tso_ctx_desc()
309 desc->mss = mbuf->m_pkthdr.tso_segsz; in gve_tx_fill_tso_ctx_desc()
317 .flex0 = metadata->bytes[0], in gve_tx_fill_general_ctx_desc()
318 .flex1 = metadata->bytes[1], in gve_tx_fill_general_ctx_desc()
319 .flex2 = metadata->bytes[2], in gve_tx_fill_general_ctx_desc()
320 .flex3 = metadata->bytes[3], in gve_tx_fill_general_ctx_desc()
321 .flex4 = metadata->bytes[4], in gve_tx_fill_general_ctx_desc()
322 .flex5 = metadata->bytes[5], in gve_tx_fill_general_ctx_desc()
323 .flex6 = metadata->bytes[6], in gve_tx_fill_general_ctx_desc()
324 .flex7 = metadata->bytes[7], in gve_tx_fill_general_ctx_desc()
325 .flex8 = metadata->bytes[8], in gve_tx_fill_general_ctx_desc()
326 .flex9 = metadata->bytes[9], in gve_tx_fill_general_ctx_desc()
327 .flex10 = metadata->bytes[10], in gve_tx_fill_general_ctx_desc()
328 .flex11 = metadata->bytes[11], in gve_tx_fill_general_ctx_desc()
335 if (__predict_false((m)->m_len < (len))) { \
352 KASSERT(eh->ether_type != ETHERTYPE_VLAN, in gve_prep_tso()
353 ("VLAN-tagged packets not supported")); in gve_prep_tso()
357 if (ntohs(eh->ether_type) == ETHERTYPE_IPV6) { in gve_prep_tso()
367 if (ntohs(eh->ether_type) == ETHERTYPE_IP) { in gve_prep_tso()
372 l4_off = l3_off + (ip->ip_hl << 2); in gve_prep_tso()
373 csum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, in gve_prep_tso()
379 *header_len = l4_off + (th->th_off << 2); in gve_prep_tso()
382 * Hardware requires the th->th_sum to not include the TCP payload, in gve_prep_tso()
385 th->th_sum = csum; in gve_prep_tso()
391 gve_tx_fill_ctx_descs(struct gve_tx_ring *tx, struct mbuf *mbuf, in gve_tx_fill_ctx_descs() argument
408 tx->stats.tx_delayed_pkt_tsoerr, 1); in gve_tx_fill_ctx_descs()
413 tso_desc = &tx->dqo.desc_ring[*desc_idx].tso_ctx; in gve_tx_fill_ctx_descs()
416 *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask; in gve_tx_fill_ctx_descs()
418 counter_u64_add_protected(tx->stats.tso_packet_cnt, 1); in gve_tx_fill_ctx_descs()
422 gen_desc = &tx->dqo.desc_ring[*desc_idx].general_ctx; in gve_tx_fill_ctx_descs()
424 *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask; in gve_tx_fill_ctx_descs()
429 gve_map_mbuf_dqo(struct gve_tx_ring *tx, in gve_map_mbuf_dqo() argument
436 err = bus_dmamap_load_mbuf_sg(tx->dqo.buf_dmatag, dmamap, in gve_map_mbuf_dqo()
448 tx->stats.tx_mbuf_collapse, 1); in gve_map_mbuf_dqo()
457 tx->stats.tx_mbuf_defrag, 1); in gve_map_mbuf_dqo()
465 tx->stats.tx_mbuf_defrag_err, 1); in gve_map_mbuf_dqo()
474 return (gve_map_mbuf_dqo(tx, mbuf, dmamap, in gve_map_mbuf_dqo()
480 tx->stats.tx_mbuf_dmamap_enomem_err, 1); in gve_map_mbuf_dqo()
491 counter_u64_add_protected(tx->stats.tx_mbuf_dmamap_err, 1); in gve_map_mbuf_dqo()
497 num_avail_desc_ring_slots(const struct gve_tx_ring *tx) in num_avail_desc_ring_slots() argument
499 uint32_t num_used = (tx->dqo.desc_tail - tx->dqo.desc_head) & in num_avail_desc_ring_slots()
500 tx->dqo.desc_mask; in num_avail_desc_ring_slots()
502 return (tx->dqo.desc_mask - num_used); in num_avail_desc_ring_slots()
506 gve_alloc_pending_packet(struct gve_tx_ring *tx) in gve_alloc_pending_packet() argument
508 int32_t index = tx->dqo.free_pending_pkts_csm; in gve_alloc_pending_packet()
515 if (__predict_false(index == -1)) { in gve_alloc_pending_packet()
516 tx->dqo.free_pending_pkts_csm = atomic_swap_32( in gve_alloc_pending_packet()
517 &tx->dqo.free_pending_pkts_prd, -1); in gve_alloc_pending_packet()
519 index = tx->dqo.free_pending_pkts_csm; in gve_alloc_pending_packet()
520 if (__predict_false(index == -1)) in gve_alloc_pending_packet()
524 pending_pkt = &tx->dqo.pending_pkts[index]; in gve_alloc_pending_packet()
527 tx->dqo.free_pending_pkts_csm = pending_pkt->next; in gve_alloc_pending_packet()
528 pending_pkt->state = GVE_PACKET_STATE_PENDING_DATA_COMPL; in gve_alloc_pending_packet()
530 gve_set_timestamp(&pending_pkt->enqueue_time_sec); in gve_alloc_pending_packet()
536 gve_free_pending_packet(struct gve_tx_ring *tx, in gve_free_pending_packet() argument
539 int index = pending_pkt - tx->dqo.pending_pkts; in gve_free_pending_packet()
542 pending_pkt->state = GVE_PACKET_STATE_FREE; in gve_free_pending_packet()
544 gve_invalidate_timestamp(&pending_pkt->enqueue_time_sec); in gve_free_pending_packet()
548 old_head = atomic_load_acq_32(&tx->dqo.free_pending_pkts_prd); in gve_free_pending_packet()
550 pending_pkt->next = old_head; in gve_free_pending_packet()
551 if (atomic_cmpset_32(&tx->dqo.free_pending_pkts_prd, in gve_free_pending_packet()
558 * Has the side-effect of retrieving the value of the last desc index
559 * processed by the NIC. hw_tx_head is written to by the completions-processing
560 * taskqueue upon receiving descriptor-completions.
563 gve_tx_has_desc_room_dqo(struct gve_tx_ring *tx, int needed_descs) in gve_tx_has_desc_room_dqo() argument
565 if (needed_descs <= num_avail_desc_ring_slots(tx)) in gve_tx_has_desc_room_dqo()
568 tx->dqo.desc_head = atomic_load_acq_32(&tx->dqo.hw_tx_head); in gve_tx_has_desc_room_dqo()
569 if (needed_descs > num_avail_desc_ring_slots(tx)) { in gve_tx_has_desc_room_dqo()
572 tx->stats.tx_delayed_pkt_nospace_descring, 1); in gve_tx_has_desc_room_dqo()
581 gve_tx_request_desc_compl(struct gve_tx_ring *tx, uint32_t desc_idx) in gve_tx_request_desc_compl() argument
586 last_desc_idx = (desc_idx - 1) & tx->dqo.desc_mask; in gve_tx_request_desc_compl()
588 (last_desc_idx - tx->dqo.last_re_idx) & tx->dqo.desc_mask; in gve_tx_request_desc_compl()
592 tx->dqo.desc_ring[last_desc_idx].pkt.report_event = true; in gve_tx_request_desc_compl()
593 tx->dqo.last_re_idx = last_desc_idx; in gve_tx_request_desc_compl()
598 gve_tx_have_enough_qpl_bufs(struct gve_tx_ring *tx, int num_bufs) in gve_tx_have_enough_qpl_bufs() argument
600 uint32_t available = tx->dqo.qpl_bufs_produced_cached - in gve_tx_have_enough_qpl_bufs()
601 tx->dqo.qpl_bufs_consumed; in gve_tx_have_enough_qpl_bufs()
606 tx->dqo.qpl_bufs_produced_cached = atomic_load_acq_32( in gve_tx_have_enough_qpl_bufs()
607 &tx->dqo.qpl_bufs_produced); in gve_tx_have_enough_qpl_bufs()
608 available = tx->dqo.qpl_bufs_produced_cached - in gve_tx_have_enough_qpl_bufs()
609 tx->dqo.qpl_bufs_consumed; in gve_tx_have_enough_qpl_bufs()
617 gve_tx_alloc_qpl_buf(struct gve_tx_ring *tx) in gve_tx_alloc_qpl_buf() argument
619 int32_t buf = tx->dqo.free_qpl_bufs_csm; in gve_tx_alloc_qpl_buf()
621 if (__predict_false(buf == -1)) { in gve_tx_alloc_qpl_buf()
622 tx->dqo.free_qpl_bufs_csm = atomic_swap_32( in gve_tx_alloc_qpl_buf()
623 &tx->dqo.free_qpl_bufs_prd, -1); in gve_tx_alloc_qpl_buf()
624 buf = tx->dqo.free_qpl_bufs_csm; in gve_tx_alloc_qpl_buf()
625 if (__predict_false(buf == -1)) in gve_tx_alloc_qpl_buf()
626 return (-1); in gve_tx_alloc_qpl_buf()
629 tx->dqo.free_qpl_bufs_csm = tx->dqo.qpl_bufs[buf]; in gve_tx_alloc_qpl_buf()
630 tx->dqo.qpl_bufs_consumed++; in gve_tx_alloc_qpl_buf()
635 * Tx buffer i corresponds to
640 gve_tx_buf_get_addr_dqo(struct gve_tx_ring *tx, in gve_tx_buf_get_addr_dqo() argument
643 int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO); in gve_tx_buf_get_addr_dqo()
644 int offset = (index & (GVE_TX_BUFS_PER_PAGE_DQO - 1)) << in gve_tx_buf_get_addr_dqo()
647 *va = (char *)tx->com.qpl->dmas[page_id].cpu_addr + offset; in gve_tx_buf_get_addr_dqo()
648 *dma_addr = tx->com.qpl->dmas[page_id].bus_addr + offset; in gve_tx_buf_get_addr_dqo()
652 gve_get_page_dma_handle(struct gve_tx_ring *tx, int32_t index) in gve_get_page_dma_handle() argument
654 int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO); in gve_get_page_dma_handle()
656 return (&tx->com.qpl->dmas[page_id]); in gve_get_page_dma_handle()
660 gve_tx_copy_mbuf_and_write_pkt_descs(struct gve_tx_ring *tx, in gve_tx_copy_mbuf_and_write_pkt_descs() argument
661 struct mbuf *mbuf, struct gve_tx_pending_pkt_dqo *pkt, in gve_tx_copy_mbuf_and_write_pkt_descs() argument
665 int32_t pkt_len = mbuf->m_pkthdr.len; in gve_tx_copy_mbuf_and_write_pkt_descs()
668 int32_t prev_buf = -1; in gve_tx_copy_mbuf_and_write_pkt_descs()
674 MPASS(pkt->num_qpl_bufs == 0); in gve_tx_copy_mbuf_and_write_pkt_descs()
675 MPASS(pkt->qpl_buf_head == -1); in gve_tx_copy_mbuf_and_write_pkt_descs()
678 buf = gve_tx_alloc_qpl_buf(tx); in gve_tx_copy_mbuf_and_write_pkt_descs()
680 MPASS(buf != -1); in gve_tx_copy_mbuf_and_write_pkt_descs()
682 gve_tx_buf_get_addr_dqo(tx, buf, &va, &addr); in gve_tx_copy_mbuf_and_write_pkt_descs()
683 copy_len = MIN(GVE_TX_BUF_SIZE_DQO, pkt_len - copy_offset); in gve_tx_copy_mbuf_and_write_pkt_descs()
687 dma = gve_get_page_dma_handle(tx, buf); in gve_tx_copy_mbuf_and_write_pkt_descs()
688 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); in gve_tx_copy_mbuf_and_write_pkt_descs()
690 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, in gve_tx_copy_mbuf_and_write_pkt_descs()
696 if (prev_buf == -1) in gve_tx_copy_mbuf_and_write_pkt_descs()
697 pkt->qpl_buf_head = buf; in gve_tx_copy_mbuf_and_write_pkt_descs()
699 tx->dqo.qpl_bufs[prev_buf] = buf; in gve_tx_copy_mbuf_and_write_pkt_descs()
702 pkt->num_qpl_bufs++; in gve_tx_copy_mbuf_and_write_pkt_descs()
705 tx->dqo.qpl_bufs[buf] = -1; in gve_tx_copy_mbuf_and_write_pkt_descs()
709 gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf) in gve_xmit_dqo_qpl() argument
711 uint32_t desc_idx = tx->dqo.desc_tail; in gve_xmit_dqo_qpl()
712 struct gve_tx_pending_pkt_dqo *pkt; in gve_xmit_dqo_qpl() local
721 csum_flags = mbuf->m_pkthdr.csum_flags; in gve_xmit_dqo_qpl()
726 nsegs = howmany(mbuf->m_pkthdr.len, GVE_TX_BUF_SIZE_DQO); in gve_xmit_dqo_qpl()
731 if (__predict_false(!gve_tx_has_desc_room_dqo(tx, total_descs_needed))) in gve_xmit_dqo_qpl()
734 if (!gve_tx_have_enough_qpl_bufs(tx, nsegs)) { in gve_xmit_dqo_qpl()
737 tx->stats.tx_delayed_pkt_nospace_qpl_bufs, 1); in gve_xmit_dqo_qpl()
742 pkt = gve_alloc_pending_packet(tx); in gve_xmit_dqo_qpl()
743 if (pkt == NULL) { in gve_xmit_dqo_qpl()
746 tx->stats.tx_delayed_pkt_nospace_compring, 1); in gve_xmit_dqo_qpl()
750 completion_tag = pkt - tx->dqo.pending_pkts; in gve_xmit_dqo_qpl()
751 pkt->mbuf = mbuf; in gve_xmit_dqo_qpl()
753 err = gve_tx_fill_ctx_descs(tx, mbuf, is_tso, &desc_idx); in gve_xmit_dqo_qpl()
757 gve_tx_copy_mbuf_and_write_pkt_descs(tx, mbuf, pkt, in gve_xmit_dqo_qpl()
761 tx->dqo.desc_tail = desc_idx; in gve_xmit_dqo_qpl()
767 gve_tx_request_desc_compl(tx, desc_idx); in gve_xmit_dqo_qpl()
769 tx->req += total_descs_needed; /* tx->req is just a sysctl counter */ in gve_xmit_dqo_qpl()
773 pkt->mbuf = NULL; in gve_xmit_dqo_qpl()
774 gve_free_pending_packet(tx, pkt); in gve_xmit_dqo_qpl()
779 gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr) in gve_xmit_dqo() argument
782 uint32_t desc_idx = tx->dqo.desc_tail; in gve_xmit_dqo()
783 struct gve_tx_pending_pkt_dqo *pkt; in gve_xmit_dqo() local
794 csum_flags = mbuf->m_pkthdr.csum_flags; in gve_xmit_dqo()
800 * This mbuf might end up needing more than 1 pkt desc. in gve_xmit_dqo()
808 if (__predict_false(!gve_tx_has_desc_room_dqo(tx, total_descs_needed))) in gve_xmit_dqo()
811 pkt = gve_alloc_pending_packet(tx); in gve_xmit_dqo()
812 if (pkt == NULL) { in gve_xmit_dqo()
815 tx->stats.tx_delayed_pkt_nospace_compring, 1); in gve_xmit_dqo()
819 completion_tag = pkt - tx->dqo.pending_pkts; in gve_xmit_dqo()
821 err = gve_map_mbuf_dqo(tx, mbuf_ptr, pkt->dmamap, in gve_xmit_dqo()
826 pkt->mbuf = mbuf; in gve_xmit_dqo()
832 !gve_tx_has_desc_room_dqo(tx, total_descs_needed))) { in gve_xmit_dqo()
837 err = gve_tx_fill_ctx_descs(tx, mbuf, is_tso, &desc_idx); in gve_xmit_dqo()
841 bus_dmamap_sync(tx->dqo.buf_dmatag, pkt->dmamap, BUS_DMASYNC_PREWRITE); in gve_xmit_dqo()
843 gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, in gve_xmit_dqo()
845 completion_tag, /*eop=*/i == (nsegs - 1), in gve_xmit_dqo()
850 tx->dqo.desc_tail = desc_idx; in gve_xmit_dqo()
856 gve_tx_request_desc_compl(tx, desc_idx); in gve_xmit_dqo()
858 tx->req += total_descs_needed; /* tx->req is just a sysctl counter */ in gve_xmit_dqo()
862 gve_unmap_packet(tx, pkt); in gve_xmit_dqo()
864 pkt->mbuf = NULL; in gve_xmit_dqo()
865 gve_free_pending_packet(tx, pkt); in gve_xmit_dqo()
870 gve_reap_qpl_bufs_dqo(struct gve_tx_ring *tx, in gve_reap_qpl_bufs_dqo() argument
871 struct gve_tx_pending_pkt_dqo *pkt) in gve_reap_qpl_bufs_dqo() argument
873 int32_t buf = pkt->qpl_buf_head; in gve_reap_qpl_bufs_dqo()
879 for (i = 0; i < pkt->num_qpl_bufs; i++) { in gve_reap_qpl_bufs_dqo()
880 dma = gve_get_page_dma_handle(tx, buf); in gve_reap_qpl_bufs_dqo()
881 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTWRITE); in gve_reap_qpl_bufs_dqo()
883 buf = tx->dqo.qpl_bufs[buf]; in gve_reap_qpl_bufs_dqo()
885 MPASS(buf == -1); in gve_reap_qpl_bufs_dqo()
889 old_head = atomic_load_32(&tx->dqo.free_qpl_bufs_prd); in gve_reap_qpl_bufs_dqo()
890 tx->dqo.qpl_bufs[buf] = old_head; in gve_reap_qpl_bufs_dqo()
894 * is visible only after the linked list from this pkt is in gve_reap_qpl_bufs_dqo()
897 if (atomic_cmpset_rel_32(&tx->dqo.free_qpl_bufs_prd, in gve_reap_qpl_bufs_dqo()
898 old_head, pkt->qpl_buf_head)) in gve_reap_qpl_bufs_dqo()
905 atomic_add_rel_32(&tx->dqo.qpl_bufs_produced, pkt->num_qpl_bufs); in gve_reap_qpl_bufs_dqo()
907 gve_clear_qpl_pending_pkt(pkt); in gve_reap_qpl_bufs_dqo()
912 struct gve_tx_ring *tx, uint16_t compl_tag) in gve_handle_packet_completion() argument
917 if (__predict_false(compl_tag >= tx->dqo.num_pending_pkts)) { in gve_handle_packet_completion()
918 device_printf(priv->dev, "Invalid TX completion tag: %d\n", in gve_handle_packet_completion()
923 pending_pkt = &tx->dqo.pending_pkts[compl_tag]; in gve_handle_packet_completion()
926 if (__predict_false(pending_pkt->state != in gve_handle_packet_completion()
928 device_printf(priv->dev, in gve_handle_packet_completion()
933 pkt_len = pending_pkt->mbuf->m_pkthdr.len; in gve_handle_packet_completion()
936 gve_reap_qpl_bufs_dqo(tx, pending_pkt); in gve_handle_packet_completion()
938 gve_unmap_packet(tx, pending_pkt); in gve_handle_packet_completion()
940 m_freem(pending_pkt->mbuf); in gve_handle_packet_completion()
941 pending_pkt->mbuf = NULL; in gve_handle_packet_completion()
942 gve_free_pending_packet(tx, pending_pkt); in gve_handle_packet_completion()
947 gve_check_tx_timeout_dqo(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_check_tx_timeout_dqo() argument
954 for (pkt_idx = 0; pkt_idx < tx->dqo.num_pending_pkts; pkt_idx++) { in gve_check_tx_timeout_dqo()
955 pending_pkt = &tx->dqo.pending_pkts[pkt_idx]; in gve_check_tx_timeout_dqo()
957 if (!gve_timestamp_valid(&pending_pkt->enqueue_time_sec)) in gve_check_tx_timeout_dqo()
961 gve_seconds_since(&pending_pkt->enqueue_time_sec) > in gve_check_tx_timeout_dqo()
972 struct gve_tx_ring *tx = arg; in gve_tx_intr_dqo() local
973 struct gve_priv *priv = tx->com.priv; in gve_tx_intr_dqo()
974 struct gve_ring_com *com = &tx->com; in gve_tx_intr_dqo()
976 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0)) in gve_tx_intr_dqo()
980 taskqueue_enqueue(com->cleanup_tq, &com->cleanup_task); in gve_tx_intr_dqo()
985 gve_tx_clear_desc_ring_dqo(struct gve_tx_ring *tx) in gve_tx_clear_desc_ring_dqo() argument
987 struct gve_ring_com *com = &tx->com; in gve_tx_clear_desc_ring_dqo()
990 for (i = 0; i < com->priv->tx_desc_cnt; i++) in gve_tx_clear_desc_ring_dqo()
991 tx->dqo.desc_ring[i] = (union gve_tx_desc_dqo){}; in gve_tx_clear_desc_ring_dqo()
993 bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map, in gve_tx_clear_desc_ring_dqo()
998 gve_tx_clear_compl_ring_dqo(struct gve_tx_ring *tx) in gve_tx_clear_compl_ring_dqo() argument
1000 struct gve_ring_com *com = &tx->com; in gve_tx_clear_compl_ring_dqo()
1004 entries = com->priv->tx_desc_cnt; in gve_tx_clear_compl_ring_dqo()
1006 tx->dqo.compl_ring[i] = (struct gve_tx_compl_desc_dqo){}; in gve_tx_clear_compl_ring_dqo()
1008 bus_dmamap_sync(tx->dqo.compl_ring_mem.tag, tx->dqo.compl_ring_mem.map, in gve_tx_clear_compl_ring_dqo()
1015 struct gve_tx_ring *tx = &priv->tx[i]; in gve_clear_tx_ring_dqo() local
1018 tx->dqo.desc_head = 0; in gve_clear_tx_ring_dqo()
1019 tx->dqo.desc_tail = 0; in gve_clear_tx_ring_dqo()
1020 tx->dqo.desc_mask = priv->tx_desc_cnt - 1; in gve_clear_tx_ring_dqo()
1021 tx->dqo.last_re_idx = 0; in gve_clear_tx_ring_dqo()
1023 tx->dqo.compl_head = 0; in gve_clear_tx_ring_dqo()
1024 tx->dqo.compl_mask = priv->tx_desc_cnt - 1; in gve_clear_tx_ring_dqo()
1025 atomic_store_32(&tx->dqo.hw_tx_head, 0); in gve_clear_tx_ring_dqo()
1026 tx->dqo.cur_gen_bit = 0; in gve_clear_tx_ring_dqo()
1028 gve_free_tx_mbufs_dqo(tx); in gve_clear_tx_ring_dqo()
1030 for (j = 0; j < tx->dqo.num_pending_pkts; j++) { in gve_clear_tx_ring_dqo()
1031 if (gve_is_qpl(tx->com.priv)) in gve_clear_tx_ring_dqo()
1032 gve_clear_qpl_pending_pkt(&tx->dqo.pending_pkts[j]); in gve_clear_tx_ring_dqo()
1034 &tx->dqo.pending_pkts[j].enqueue_time_sec); in gve_clear_tx_ring_dqo()
1035 tx->dqo.pending_pkts[j].next = in gve_clear_tx_ring_dqo()
1036 (j == tx->dqo.num_pending_pkts - 1) ? -1 : j + 1; in gve_clear_tx_ring_dqo()
1037 tx->dqo.pending_pkts[j].state = GVE_PACKET_STATE_FREE; in gve_clear_tx_ring_dqo()
1039 tx->dqo.free_pending_pkts_csm = 0; in gve_clear_tx_ring_dqo()
1040 atomic_store_rel_32(&tx->dqo.free_pending_pkts_prd, -1); in gve_clear_tx_ring_dqo()
1044 tx->com.qpl->num_pages; in gve_clear_tx_ring_dqo()
1046 for (j = 0; j < qpl_buf_cnt - 1; j++) in gve_clear_tx_ring_dqo()
1047 tx->dqo.qpl_bufs[j] = j + 1; in gve_clear_tx_ring_dqo()
1048 tx->dqo.qpl_bufs[j] = -1; in gve_clear_tx_ring_dqo()
1050 tx->dqo.free_qpl_bufs_csm = 0; in gve_clear_tx_ring_dqo()
1051 atomic_store_32(&tx->dqo.free_qpl_bufs_prd, -1); in gve_clear_tx_ring_dqo()
1052 atomic_store_32(&tx->dqo.qpl_bufs_produced, qpl_buf_cnt); in gve_clear_tx_ring_dqo()
1053 tx->dqo.qpl_bufs_produced_cached = qpl_buf_cnt; in gve_clear_tx_ring_dqo()
1054 tx->dqo.qpl_bufs_consumed = 0; in gve_clear_tx_ring_dqo()
1057 gve_tx_clear_desc_ring_dqo(tx); in gve_clear_tx_ring_dqo()
1058 gve_tx_clear_compl_ring_dqo(tx); in gve_clear_tx_ring_dqo()
1075 gve_tx_cleanup_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, int budget) in gve_tx_cleanup_dqo() argument
1086 bus_dmamap_sync(tx->dqo.compl_ring_mem.tag, in gve_tx_cleanup_dqo()
1087 tx->dqo.compl_ring_mem.map, in gve_tx_cleanup_dqo()
1090 compl_desc = &tx->dqo.compl_ring[tx->dqo.compl_head]; in gve_tx_cleanup_dqo()
1092 tx->dqo.cur_gen_bit) in gve_tx_cleanup_dqo()
1095 type = compl_desc->type; in gve_tx_cleanup_dqo()
1098 tx_head = le16toh(compl_desc->tx_head); in gve_tx_cleanup_dqo()
1099 atomic_store_rel_32(&tx->dqo.hw_tx_head, tx_head); in gve_tx_cleanup_dqo()
1101 compl_tag = le16toh(compl_desc->completion_tag); in gve_tx_cleanup_dqo()
1103 tx, compl_tag); in gve_tx_cleanup_dqo()
1107 tx->dqo.compl_head = (tx->dqo.compl_head + 1) & in gve_tx_cleanup_dqo()
1108 tx->dqo.compl_mask; in gve_tx_cleanup_dqo()
1110 tx->dqo.cur_gen_bit ^= tx->dqo.compl_head == 0; in gve_tx_cleanup_dqo()
1116 * the queue. in gve_tx_cleanup_dqo()
1119 if (atomic_load_bool(&tx->stopped) && work_done) { in gve_tx_cleanup_dqo()
1120 atomic_store_bool(&tx->stopped, false); in gve_tx_cleanup_dqo()
1121 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_tx_cleanup_dqo()
1124 tx->done += work_done; /* tx->done is just a sysctl counter */ in gve_tx_cleanup_dqo()
1126 counter_u64_add_protected(tx->stats.tbytes, bytes_done); in gve_tx_cleanup_dqo()
1127 counter_u64_add_protected(tx->stats.tpackets, pkts_done); in gve_tx_cleanup_dqo()
1136 struct gve_tx_ring *tx = arg; in gve_tx_cleanup_tq_dqo() local
1137 struct gve_priv *priv = tx->com.priv; in gve_tx_cleanup_tq_dqo()
1139 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0)) in gve_tx_cleanup_tq_dqo()
1142 if (gve_tx_cleanup_dqo(priv, tx, /*budget=*/1024)) { in gve_tx_cleanup_tq_dqo()
1143 taskqueue_enqueue(tx->com.cleanup_tq, &tx->com.cleanup_task); in gve_tx_cleanup_tq_dqo()
1147 gve_db_bar_dqo_write_4(priv, tx->com.irq_db_offset, in gve_tx_cleanup_tq_dqo()