Lines Matching full:tx
38 gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_fifo_init() argument
40 struct gve_queue_page_list *qpl = tx->com.qpl; in gve_tx_fifo_init()
41 struct gve_tx_fifo *fifo = &tx->fifo; in gve_tx_fifo_init()
54 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring_gqi() local
55 struct gve_ring_com *com = &tx->com; in gve_tx_free_ring_gqi()
57 if (tx->desc_ring != NULL) { in gve_tx_free_ring_gqi()
58 gve_dma_free_coherent(&tx->desc_ring_mem); in gve_tx_free_ring_gqi()
59 tx->desc_ring = NULL; in gve_tx_free_ring_gqi()
62 if (tx->info != NULL) { in gve_tx_free_ring_gqi()
63 free(tx->info, M_GVE); in gve_tx_free_ring_gqi()
64 tx->info = NULL; in gve_tx_free_ring_gqi()
76 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring() local
77 struct gve_ring_com *com = &tx->com; in gve_tx_free_ring()
80 gve_free_counters((counter_u64_t *)&tx->stats, NUM_TX_STATS); in gve_tx_free_ring()
82 if (mtx_initialized(&tx->ring_mtx)) in gve_tx_free_ring()
83 mtx_destroy(&tx->ring_mtx); in gve_tx_free_ring()
90 if (tx->br != NULL) { in gve_tx_free_ring()
91 buf_ring_free(tx->br, M_DEVBUF); in gve_tx_free_ring()
92 tx->br = NULL; in gve_tx_free_ring()
104 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_alloc_ring_gqi() local
105 struct gve_ring_com *com = &tx->com; in gve_tx_alloc_ring_gqi()
110 CACHE_LINE_SIZE, &tx->desc_ring_mem); in gve_tx_alloc_ring_gqi()
113 "Failed to alloc desc ring for tx ring %d", i); in gve_tx_alloc_ring_gqi()
116 tx->desc_ring = tx->desc_ring_mem.cpu_addr; in gve_tx_alloc_ring_gqi()
122 "Failed to alloc QPL for tx ring %d\n", i); in gve_tx_alloc_ring_gqi()
127 err = gve_tx_fifo_init(priv, tx); in gve_tx_alloc_ring_gqi()
131 tx->info = malloc( in gve_tx_alloc_ring_gqi()
144 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_alloc_ring() local
145 struct gve_ring_com *com = &tx->com; in gve_tx_alloc_ring()
160 mtx_init(&tx->ring_mtx, mtx_name, NULL, MTX_DEF); in gve_tx_alloc_ring()
162 tx->br = buf_ring_alloc(GVE_TX_BUFRING_ENTRIES, M_DEVBUF, in gve_tx_alloc_ring()
163 M_WAITOK, &tx->ring_mtx); in gve_tx_alloc_ring()
165 gve_alloc_counters((counter_u64_t *)&tx->stats, NUM_TX_STATS); in gve_tx_alloc_ring()
171 "Failed to alloc queue resources for tx ring %d", i); in gve_tx_alloc_ring()
176 tx->last_kicked = 0; in gve_tx_alloc_ring()
191 KASSERT(priv->tx != NULL, ("priv->tx is NULL!")); in gve_alloc_tx_rings()
215 gve_tx_clear_desc_ring(struct gve_tx_ring *tx) in gve_tx_clear_desc_ring() argument
217 struct gve_ring_com *com = &tx->com; in gve_tx_clear_desc_ring()
221 tx->desc_ring[i] = (union gve_tx_desc){}; in gve_tx_clear_desc_ring()
222 tx->info[i] = (struct gve_tx_buffer_state){}; in gve_tx_clear_desc_ring()
223 gve_invalidate_timestamp(&tx->info[i].enqueue_time_sec); in gve_tx_clear_desc_ring()
226 bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map, in gve_tx_clear_desc_ring()
233 struct gve_tx_ring *tx = &priv->tx[i]; in gve_clear_tx_ring() local
234 struct gve_tx_fifo *fifo = &tx->fifo; in gve_clear_tx_ring()
236 tx->req = 0; in gve_clear_tx_ring()
237 tx->done = 0; in gve_clear_tx_ring()
238 tx->mask = priv->tx_desc_cnt - 1; in gve_clear_tx_ring()
243 gve_tx_clear_desc_ring(tx); in gve_clear_tx_ring()
249 struct gve_tx_ring *tx = &priv->tx[i]; in gve_start_tx_ring() local
250 struct gve_ring_com *com = &tx->com; in gve_start_tx_ring()
252 atomic_store_bool(&tx->stopped, false); in gve_start_tx_ring()
254 NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq, tx); in gve_start_tx_ring()
256 NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq_dqo, tx); in gve_start_tx_ring()
257 com->cleanup_tq = taskqueue_create_fast("gve tx", M_WAITOK, in gve_start_tx_ring()
262 TASK_INIT(&tx->xmit_task, 0, gve_xmit_tq, tx); in gve_start_tx_ring()
263 tx->xmit_tq = taskqueue_create_fast("gve tx xmit", in gve_start_tx_ring()
264 M_WAITOK, taskqueue_thread_enqueue, &tx->xmit_tq); in gve_start_tx_ring()
265 taskqueue_start_threads(&tx->xmit_tq, 1, PI_NET, "%s txq %d xmit", in gve_start_tx_ring()
273 struct gve_tx_ring *tx; in gve_create_tx_rings() local
295 tx = &priv->tx[i]; in gve_create_tx_rings()
296 com = &tx->com; in gve_create_tx_rings()
315 struct gve_tx_ring *tx = &priv->tx[i]; in gve_stop_tx_ring() local
316 struct gve_ring_com *com = &tx->com; in gve_stop_tx_ring()
324 if (tx->xmit_tq != NULL) { in gve_stop_tx_ring()
325 taskqueue_quiesce(tx->xmit_tq); in gve_stop_tx_ring()
326 taskqueue_free(tx->xmit_tq); in gve_stop_tx_ring()
327 tx->xmit_tq = NULL; in gve_stop_tx_ring()
351 gve_check_tx_timeout_gqi(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_check_tx_timeout_gqi() argument
360 info = &tx->info[pkt_idx]; in gve_check_tx_timeout_gqi()
377 struct gve_tx_ring *tx = arg; in gve_tx_intr() local
378 struct gve_priv *priv = tx->com.priv; in gve_tx_intr()
379 struct gve_ring_com *com = &tx->com; in gve_tx_intr()
390 gve_tx_load_event_counter(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_load_event_counter() argument
394 uint32_t counter = priv->counters[tx->com.counter_idx]; in gve_tx_load_event_counter()
407 struct gve_tx_ring *tx = arg; in gve_tx_cleanup_tq() local
408 struct gve_priv *priv = tx->com.priv; in gve_tx_cleanup_tq()
409 uint32_t nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_cleanup_tq()
410 uint32_t todo = nic_done - tx->done; in gve_tx_cleanup_tq()
418 uint32_t idx = tx->done & tx->mask; in gve_tx_cleanup_tq()
419 struct gve_tx_buffer_state *info = &tx->info[idx]; in gve_tx_cleanup_tq()
422 tx->done++; in gve_tx_cleanup_tq()
431 counter_u64_add_protected(tx->stats.tbytes, mbuf->m_pkthdr.len); in gve_tx_cleanup_tq()
432 counter_u64_add_protected(tx->stats.tpackets, 1); in gve_tx_cleanup_tq()
443 gve_tx_free_fifo(&tx->fifo, space_freed); in gve_tx_cleanup_tq()
445 gve_db_bar_write_4(priv, tx->com.irq_db_offset, in gve_tx_cleanup_tq()
455 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_cleanup_tq()
456 todo = nic_done - tx->done; in gve_tx_cleanup_tq()
458 gve_db_bar_write_4(priv, tx->com.irq_db_offset, GVE_IRQ_MASK); in gve_tx_cleanup_tq()
459 taskqueue_enqueue(tx->com.cleanup_tq, &tx->com.cleanup_task); in gve_tx_cleanup_tq()
462 if (atomic_load_bool(&tx->stopped) && space_freed) { in gve_tx_cleanup_tq()
463 atomic_store_bool(&tx->stopped, false); in gve_tx_cleanup_tq()
464 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_tx_cleanup_tq()
535 gve_tx_avail(struct gve_tx_ring *tx) in gve_tx_avail() argument
537 return (tx->mask + 1 - (tx->req - tx->done)); in gve_tx_avail()
547 gve_can_tx(struct gve_tx_ring *tx, int bytes_required) in gve_can_tx() argument
549 return (gve_tx_avail(tx) >= (GVE_TX_MAX_DESCS + 1) && in gve_can_tx()
550 gve_tx_fifo_can_alloc(&tx->fifo, bytes_required)); in gve_can_tx()
560 gve_fifo_bytes_required(struct gve_tx_ring *tx, uint16_t first_seg_len, in gve_fifo_bytes_required() argument
566 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->fifo, first_seg_len); in gve_fifo_bytes_required()
593 ("Allocating gve tx fifo when there is no room")); in gve_tx_alloc_fifo()
628 /* Only error this returns is ENOBUFS when the tx fifo is short of space */
630 gve_xmit(struct gve_tx_ring *tx, struct mbuf *mbuf) in gve_xmit() argument
640 uint32_t idx = tx->req & tx->mask; in gve_xmit()
652 info = &tx->info[idx]; in gve_xmit()
707 bytes_required = gve_fifo_bytes_required(tx, first_seg_len, pkt_len); in gve_xmit()
708 if (__predict_false(!gve_can_tx(tx, bytes_required))) { in gve_xmit()
710 counter_u64_add_protected(tx->stats.tx_delayed_pkt_nospace_device, 1); in gve_xmit()
724 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->fifo, first_seg_len); in gve_xmit()
725 hdr_nfrags = gve_tx_alloc_fifo(&tx->fifo, first_seg_len + pad_bytes, in gve_xmit()
727 KASSERT(hdr_nfrags > 0, ("Number of header fragments for gve tx is 0")); in gve_xmit()
728 payload_nfrags = gve_tx_alloc_fifo(&tx->fifo, pkt_len - first_seg_len, in gve_xmit()
731 pkt_desc = &tx->desc_ring[idx].pkt; in gve_xmit()
738 (char *)tx->fifo.base + info->iov[hdr_nfrags - 1].iov_offset); in gve_xmit()
739 gve_dma_sync_for_device(tx->com.qpl, in gve_xmit()
745 next_idx = (tx->req + 1) & tx->mask; in gve_xmit()
746 mtd_desc = &tx->desc_ring[next_idx].mtd; in gve_xmit()
751 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; in gve_xmit()
752 seg_desc = &tx->desc_ring[next_idx].seg; in gve_xmit()
758 (char *)tx->fifo.base + info->iov[i].iov_offset); in gve_xmit()
759 gve_dma_sync_for_device(tx->com.qpl, in gve_xmit()
764 tx->req += (1 + mtd_desc_nr + payload_nfrags); in gve_xmit()
767 counter_u64_add_protected(tx->stats.tso_packet_cnt, 1); in gve_xmit()
774 gve_xmit_mbuf(struct gve_tx_ring *tx, in gve_xmit_mbuf() argument
777 if (gve_is_gqi(tx->com.priv)) in gve_xmit_mbuf()
778 return (gve_xmit(tx, *mbuf)); in gve_xmit_mbuf()
780 if (gve_is_qpl(tx->com.priv)) in gve_xmit_mbuf()
781 return (gve_xmit_dqo_qpl(tx, *mbuf)); in gve_xmit_mbuf()
788 return (gve_xmit_dqo(tx, mbuf)); in gve_xmit_mbuf()
792 * Has the side-effect of stopping the xmit queue by setting tx->stopped
795 gve_xmit_retry_enobuf_mbuf(struct gve_tx_ring *tx, in gve_xmit_retry_enobuf_mbuf() argument
800 atomic_store_bool(&tx->stopped, true); in gve_xmit_retry_enobuf_mbuf()
807 * iteration creating the room will either see a tx->stopped value in gve_xmit_retry_enobuf_mbuf()
819 err = gve_xmit_mbuf(tx, mbuf); in gve_xmit_retry_enobuf_mbuf()
821 atomic_store_bool(&tx->stopped, false); in gve_xmit_retry_enobuf_mbuf()
827 gve_xmit_br(struct gve_tx_ring *tx) in gve_xmit_br() argument
829 struct gve_priv *priv = tx->com.priv; in gve_xmit_br()
835 (mbuf = drbr_peek(ifp, tx->br)) != NULL) { in gve_xmit_br()
836 err = gve_xmit_mbuf(tx, &mbuf); in gve_xmit_br()
845 err = gve_xmit_retry_enobuf_mbuf(tx, &mbuf); in gve_xmit_br()
849 drbr_advance(ifp, tx->br); in gve_xmit_br()
852 drbr_putback(ifp, tx->br, mbuf); in gve_xmit_br()
856 drbr_advance(ifp, tx->br); in gve_xmit_br()
859 bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map, in gve_xmit_br()
863 gve_db_bar_write_4(priv, tx->com.db_offset, tx->req); in gve_xmit_br()
865 gve_db_bar_dqo_write_4(priv, tx->com.db_offset, in gve_xmit_br()
866 tx->dqo.desc_tail); in gve_xmit_br()
873 struct gve_tx_ring *tx = (struct gve_tx_ring *)arg; in gve_xmit_tq() local
875 GVE_RING_LOCK(tx); in gve_xmit_tq()
876 gve_xmit_br(tx); in gve_xmit_tq()
877 GVE_RING_UNLOCK(tx); in gve_xmit_tq()
893 struct gve_tx_ring *tx; in gve_xmit_ifp() local
905 tx = &priv->tx[i]; in gve_xmit_ifp()
909 counter_u64_add_protected(tx->stats.tx_dropped_pkt_vlan, 1); in gve_xmit_ifp()
910 counter_u64_add_protected(tx->stats.tx_dropped_pkt, 1); in gve_xmit_ifp()
916 is_br_empty = drbr_empty(ifp, tx->br); in gve_xmit_ifp()
917 err = drbr_enqueue(ifp, tx->br, mbuf); in gve_xmit_ifp()
919 if (!atomic_load_bool(&tx->stopped)) in gve_xmit_ifp()
920 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_xmit_ifp()
922 counter_u64_add_protected(tx->stats.tx_dropped_pkt_nospace_bufring, 1); in gve_xmit_ifp()
923 counter_u64_add_protected(tx->stats.tx_dropped_pkt, 1); in gve_xmit_ifp()
932 if (is_br_empty && (GVE_RING_TRYLOCK(tx) != 0)) { in gve_xmit_ifp()
933 gve_xmit_br(tx); in gve_xmit_ifp()
934 GVE_RING_UNLOCK(tx); in gve_xmit_ifp()
935 } else if (!atomic_load_bool(&tx->stopped)) in gve_xmit_ifp()
936 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_xmit_ifp()
945 struct gve_tx_ring *tx; in gve_qflush() local
949 tx = &priv->tx[i]; in gve_qflush()
950 if (drbr_empty(ifp, tx->br) == 0) { in gve_qflush()
951 GVE_RING_LOCK(tx); in gve_qflush()
952 drbr_flush(ifp, tx->br); in gve_qflush()
953 GVE_RING_UNLOCK(tx); in gve_qflush()