Home
last modified time | relevance | path

Searched refs:tx_ring (Results 1 – 25 of 90) sorted by relevance

1234

/freebsd/sys/dev/ena/
H A Dena_datapath.c49 static inline int ena_get_tx_req_id(struct ena_ring *tx_ring,
58 static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring,
73 struct ena_ring *tx_ring; in ena_cleanup() local
80 tx_ring = que->tx_ring; in ena_cleanup()
86 atomic_store_8(&tx_ring->cleanup_running, 1); in ena_cleanup()
98 atomic_store_8(&tx_ring->first_interrupt, 1); in ena_cleanup()
103 txc = ena_tx_cleanup(tx_ring); in ena_cleanup()
116 counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1); in ena_cleanup()
118 atomic_store_8(&tx_ring->cleanup_running, 0); in ena_cleanup()
124 struct ena_ring *tx_ring = (struct ena_ring *)arg; in ena_deferred_mq_start() local
[all …]
H A Dena.c399 txr = &adapter->tx_ring[i]; in ena_init_io_rings_basic()
413 que->tx_ring = txr; in ena_init_io_rings_basic()
431 txr = &adapter->tx_ring[i]; in ena_init_io_rings_advanced()
474 struct ena_ring *txr = &adapter->tx_ring[qid]; in ena_free_io_ring_resources()
569 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc) in validate_tx_req_id() argument
571 struct ena_adapter *adapter = tx_ring->adapter; in validate_tx_req_id()
579 req_id, tx_ring->qid); in validate_tx_req_id()
583 req_id, tx_ring->qid); in validate_tx_req_id()
584 counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); in validate_tx_req_id()
595 ena_release_all_tx_dmamap(struct ena_ring *tx_ring) in ena_release_all_tx_dmamap() argument
[all …]
H A Dena_netmap.c327 ctx.ring = &ctx.adapter->tx_ring[kring->ring_id]; in ena_netmap_txsync()
347 struct ena_ring *tx_ring = ctx->ring; in ena_netmap_tx_frames() local
369 tx_ring->acum_pkts++; in ena_netmap_tx_frames()
375 ena_ring_tx_doorbell(tx_ring); in ena_netmap_tx_frames()
389 struct ena_ring *tx_ring; in ena_netmap_tx_frame() local
404 tx_ring = ctx->ring; in ena_netmap_tx_frame()
406 req_id = tx_ring->free_tx_ids[ctx->nt]; in ena_netmap_tx_frame()
407 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_netmap_tx_frame()
428 if (tx_ring->acum_pkts == ENA_DB_THRESHOLD || in ena_netmap_tx_frame()
430 ena_ring_tx_doorbell(tx_ring); in ena_netmap_tx_frame()
[all …]
H A Dena.h222 struct ena_ring *tx_ring; member
486 struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
588 int validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc);
628 ena_ring_tx_doorbell(struct ena_ring *tx_ring) in ena_ring_tx_doorbell() argument
630 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); in ena_ring_tx_doorbell()
631 counter_u64_add(tx_ring->tx_stats.doorbells, 1); in ena_ring_tx_doorbell()
632 tx_ring->acum_pkts = 0; in ena_ring_tx_doorbell()
/freebsd/sys/ofed/drivers/infiniband/ulp/sdp/
H A Dsdp_tx.c54 if (!callout_pending(&ssk->tx_ring.timer)) in sdp_xmit_poll()
55 callout_reset(&ssk->tx_ring.timer, SDP_TX_POLL_TIMEOUT, in sdp_xmit_poll()
59 if (force || (++ssk->tx_ring.poll_cnt & (SDP_TX_POLL_MODER - 1)) == 0) in sdp_xmit_poll()
87 mseq = ring_head(ssk->tx_ring); in sdp_post_send()
123 tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)]; in sdp_post_send()
159 atomic_inc(&ssk->tx_ring.head); in sdp_post_send()
160 atomic_dec(&ssk->tx_ring.credits); in sdp_post_send()
172 struct sdp_tx_ring *tx_ring = &ssk->tx_ring; in sdp_send_completion() local
174 if (unlikely(mseq != ring_tail(*tx_ring))) { in sdp_send_completion()
176 mseq, ring_tail(*tx_ring)); in sdp_send_completion()
[all …]
H A Dsdp.h150 #define tx_ring_posted(ssk) (ring_posted(ssk->tx_ring) + \
151 (ssk->tx_ring.rdma_inflight ? ssk->tx_ring.rdma_inflight->busy : 0))
153 #define tx_ring_posted(ssk) ring_posted(ssk->tx_ring)
280 #define tx_credits(ssk) (atomic_read(&ssk->tx_ring.credits))
360 struct sdp_tx_ring tx_ring; member
459 ib_req_notify_cq(ssk->tx_ring.cq, IB_CQ_NEXT_COMP); in sdp_arm_tx_cq()
/freebsd/sys/dev/rtwn/pci/
H A Drtwn_pci_attach.c267 struct rtwn_tx_ring *tx_ring = &pc->tx_ring[qid]; in rtwn_pci_alloc_tx_list() local
274 size, 1, size, 0, NULL, NULL, &tx_ring->desc_dmat); in rtwn_pci_alloc_tx_list()
280 error = bus_dmamem_alloc(tx_ring->desc_dmat, &tx_ring->desc, in rtwn_pci_alloc_tx_list()
281 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &tx_ring->desc_map); in rtwn_pci_alloc_tx_list()
286 error = bus_dmamap_load(tx_ring->desc_dmat, tx_ring->desc_map, in rtwn_pci_alloc_tx_list()
287 tx_ring->desc, size, rtwn_pci_dma_map_addr, &tx_ring->paddr, in rtwn_pci_alloc_tx_list()
293 bus_dmamap_sync(tx_ring->desc_dmat, tx_ring->desc_map, in rtwn_pci_alloc_tx_list()
298 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &tx_ring->data_dmat); in rtwn_pci_alloc_tx_list()
305 struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i]; in rtwn_pci_alloc_tx_list()
306 void *tx_desc = (uint8_t *)tx_ring->desc + sc->txdesc_len * i; in rtwn_pci_alloc_tx_list()
[all …]
/freebsd/sys/dev/al_eth/
H A Dal_eth.c985 struct al_eth_ring *ring = &adapter->tx_ring[i]; in al_eth_init_rings()
1144 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring) in al_eth_tx_do_cleanup() argument
1148 int qid = tx_ring->ring_id; in al_eth_tx_do_cleanup()
1150 total_done = al_eth_comp_tx_get(tx_ring->dma_q); in al_eth_tx_do_cleanup()
1151 device_printf_dbg(tx_ring->dev, in al_eth_tx_do_cleanup()
1153 next_to_clean = tx_ring->next_to_clean; in al_eth_tx_do_cleanup()
1159 tx_info = &tx_ring->tx_buffer_info[next_to_clean]; in al_eth_tx_do_cleanup()
1168 device_printf_dbg(tx_ring->dev, in al_eth_tx_do_cleanup()
1172 bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map); in al_eth_tx_do_cleanup()
1176 next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean); in al_eth_tx_do_cleanup()
[all …]
/freebsd/sys/contrib/dev/athk/ath11k/
H A Ddp_tx.c96 struct dp_tx_ring *tx_ring; in ath11k_dp_tx() local
128 tx_ring = &dp->tx_ring[ti.ring_id]; in ath11k_dp_tx()
130 spin_lock_bh(&tx_ring->tx_idr_lock); in ath11k_dp_tx()
131 ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0, in ath11k_dp_tx()
133 spin_unlock_bh(&tx_ring->tx_idr_lock); in ath11k_dp_tx()
232 hal_ring_id = tx_ring->tcl_data_ring.ring_id; in ath11k_dp_tx()
283 spin_lock_bh(&tx_ring->tx_idr_lock); in ath11k_dp_tx()
284 idr_remove(&tx_ring->txbuf_idr, in ath11k_dp_tx()
286 spin_unlock_bh(&tx_ring->tx_idr_lock); in ath11k_dp_tx()
296 struct dp_tx_ring *tx_ring) in ath11k_dp_tx_free_txbuf() argument
[all …]
H A Ddp.c363 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); in ath11k_dp_srng_common_cleanup()
364 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); in ath11k_dp_srng_common_cleanup()
407 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, in ath11k_dp_srng_common_setup()
416 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, in ath11k_dp_srng_common_setup()
425 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id]; in ath11k_dp_srng_common_setup()
430 dp->tx_ring[i].tcl_data_ring.ring_id); in ath11k_dp_srng_common_setup()
1049 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock); in ath11k_dp_free()
1050 idr_for_each(&dp->tx_ring[i].txbuf_idr, in ath11k_dp_free()
1052 idr_destroy(&dp->tx_ring[i].txbuf_idr); in ath11k_dp_free()
1053 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock); in ath11k_dp_free()
[all …]
/freebsd/sys/contrib/dev/rtw89/
H A Dpci.c95 struct rtw89_pci_tx_ring *tx_ring) in rtw89_pci_release_fwcmd()
97 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; in rtw89_pci_release_fwcmd()
144 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; in rtw89_pci_sync_skb_for_cpu()
147 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); in rtw89_pci_sync_skb_for_cpu()
462 struct rtw89_pci_tx_ring *tx_ring, in rtw89_pci_release_txwd_skb()
477 tx_ring->tx_acked++; in rtw89_pci_release_txwd_skb()
484 tx_ring->tx_retry_lmt++; in rtw89_pci_release_rpp()
487 tx_ring->tx_life_time++; in rtw89_pci_release_rpp()
490 tx_ring->tx_mac_id_drop++; in rtw89_pci_release_rpp()
501 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) in rtw89_pci_release_rpp()
69 rtw89_pci_txbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring) rtw89_pci_txbd_recalc() argument
118 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; rtw89_pci_reclaim_tx_fwcmd() local
369 rtw89_pci_tx_status(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct sk_buff * skb,u8 tx_status) rtw89_pci_tx_status() argument
408 rtw89_pci_reclaim_txbd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring) rtw89_pci_reclaim_txbd() argument
430 rtw89_pci_release_busy_txwd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring) rtw89_pci_release_busy_txwd() argument
446 rtw89_pci_release_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,u16 seq,u8 tx_status) rtw89_pci_release_txwd_skb() argument
483 struct rtw89_pci_tx_ring *tx_ring; rtw89_pci_release_rpp() local
507 rtw89_pci_release_pending_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring) rtw89_pci_release_pending_txwd_skb() argument
946 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; __rtw89_pci_check_and_reclaim_tx_fwcmd_resource() local
962 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; __rtw89_pci_check_and_reclaim_tx_resource_noio() local
978 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; __rtw89_pci_check_and_reclaim_tx_resource() local
1040 __rtw89_pci_tx_kick_off(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring) __rtw89_pci_tx_kick_off() argument
1055 rtw89_pci_tx_bd_ring_update(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,int n_txbd) rtw89_pci_tx_bd_ring_update() argument
1071 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; rtw89_pci_ops_tx_kick_off() local
1084 struct rtw89_pci_tx_ring *tx_ring; rtw89_pci_tx_kick_off_pending() local
1099 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; __pci_flush_txch() local
1199 rtw89_pci_txwd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,struct rtw89_core_tx_request * tx_req) rtw89_pci_txwd_submit() argument
1267 rtw89_pci_fwcmd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req) rtw89_pci_fwcmd_submit() argument
1303 rtw89_pci_txbd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req) rtw89_pci_txbd_submit() argument
1350 struct rtw89_pci_tx_ring *tx_ring; rtw89_pci_tx_write() local
1436 struct rtw89_pci_tx_ring *tx_ring; rtw89_pci_reset_trx_rings() local
1484 rtw89_pci_release_tx_ring(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring) rtw89_pci_release_tx_ring() argument
1575 struct rtw89_pci_tx_ring *tx_ring; rtw89_pci_switch_bd_idx_addr() local
2840 rtw89_pci_free_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring) rtw89_pci_free_tx_wd_ring() argument
2855 rtw89_pci_free_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring) rtw89_pci_free_tx_ring() argument
2874 struct rtw89_pci_tx_ring *tx_ring; rtw89_pci_free_tx_rings() local
2966 rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,enum rtw89_tx_channel txch) rtw89_pci_alloc_tx_wd_ring() argument
3017 rtw89_pci_alloc_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,u32 desc_size,u32 len,enum rtw89_tx_channel txch) rtw89_pci_alloc_tx_ring() argument
3068 struct rtw89_pci_tx_ring *tx_ring; rtw89_pci_alloc_tx_rings() local
[all...]
/freebsd/sys/dev/qlxge/
H A Dqls_hw.c639 txr_done = ha->tx_ring[txr_idx].txr_done; in qls_hw_tx_done()
640 txr_next = ha->tx_ring[txr_idx].txr_next; in qls_hw_tx_done()
643 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS; in qls_hw_tx_done()
645 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next; in qls_hw_tx_done()
647 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS + in qls_hw_tx_done()
651 if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE) in qls_hw_tx_done()
685 if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) { in qls_hw_send()
689 ha->tx_ring[txr_idx].txr_free); in qls_hw_send()
694 tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next]; in qls_hw_send()
705 ha->tx_ring[txr_idx].tx_tso_frames++; in qls_hw_send()
[all …]
H A Dqls_isr.c54 txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx]; in qls_tx_comp()
66 ha->tx_ring[txr_idx].txr_done++; in qls_tx_comp()
68 if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS) in qls_tx_comp()
69 ha->tx_ring[txr_idx].txr_done = 0; in qls_tx_comp()
/freebsd/sys/contrib/dev/athk/ath12k/
H A Ddp_tx.c146 struct dp_tx_ring *tx_ring; in ath12k_dp_tx() local
179 tx_ring = &dp->tx_ring[ti.ring_id]; in ath12k_dp_tx()
291 hal_ring_id = tx_ring->tcl_data_ring.ring_id; in ath12k_dp_tx()
350 struct dp_tx_ring *tx_ring) in ath12k_dp_tx_free_txbuf() argument
373 struct dp_tx_ring *tx_ring, in ath12k_dp_tx_htt_tx_complete_buf() argument
413 struct dp_tx_ring *tx_ring) in ath12k_dp_tx_process_htt_tx_complete() argument
435 ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts); in ath12k_dp_tx_process_htt_tx_complete()
439 ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring); in ath12k_dp_tx_process_htt_tx_complete()
541 int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id; in ath12k_dp_tx_completion_handler()
546 struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id]; in ath12k_dp_tx_completion_handler() local
[all …]
/freebsd/sys/contrib/dev/rtw88/
H A Dpci.c140 struct rtw_pci_tx_ring *tx_ring) in rtw_pci_free_tx_ring_skbs() argument
148 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { in rtw_pci_free_tx_ring_skbs()
149 __skb_unlink(skb, &tx_ring->queue); in rtw_pci_free_tx_ring_skbs()
159 struct rtw_pci_tx_ring *tx_ring) in rtw_pci_free_tx_ring() argument
162 u8 *head = tx_ring->r.head; in rtw_pci_free_tx_ring()
163 u32 len = tx_ring->r.len; in rtw_pci_free_tx_ring()
164 int ring_sz = len * tx_ring->r.desc_size; in rtw_pci_free_tx_ring()
166 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); in rtw_pci_free_tx_ring()
169 dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma); in rtw_pci_free_tx_ring()
170 tx_ring in rtw_pci_free_tx_ring()
209 struct rtw_pci_tx_ring *tx_ring; rtw_pci_free_trx_ring() local
225 rtw_pci_init_tx_ring(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring,u8 desc_size,u32 len) rtw_pci_init_tx_ring() argument
364 struct rtw_pci_tx_ring *tx_ring; rtw_pci_init_trx_ring() local
585 struct rtw_pci_tx_ring *tx_ring; rtw_pci_dma_release() local
651 struct rtw_pci_tx_ring *tx_ring; rtw_pci_deep_ps_enter() local
[all...]
/freebsd/sys/dev/et/
H A Dif_et.c859 struct et_txdesc_ring *tx_ring; in et_dma_alloc() local
878 tx_ring = &sc->sc_tx_ring; in et_dma_alloc()
880 &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap, in et_dma_alloc()
881 &tx_ring->tr_paddr, "TX ring"); in et_dma_alloc()
1038 struct et_txdesc_ring *tx_ring; in et_dma_free() local
1112 tx_ring = &sc->sc_tx_ring; in et_dma_free()
1113 et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc, in et_dma_free()
1114 tx_ring->tr_dmap, &tx_ring->tr_paddr); in et_dma_free()
1371 struct et_txdesc_ring *tx_ring; in et_start_locked() local
1422 tx_ring = &sc->sc_tx_ring; in et_start_locked()
[all …]
/freebsd/sys/dev/mlx4/mlx4_ib/
H A Dmlx4_ib_mad.c558 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); in mlx4_ib_send_to_slave()
559 if (tun_qp->tx_ring[tun_tx_ix].ah) in mlx4_ib_send_to_slave()
560 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0); in mlx4_ib_send_to_slave()
561 tun_qp->tx_ring[tun_tx_ix].ah = ah; in mlx4_ib_send_to_slave()
563 tun_qp->tx_ring[tun_tx_ix].buf.map, in mlx4_ib_send_to_slave()
607 tun_qp->tx_ring[tun_tx_ix].buf.map, in mlx4_ib_send_to_slave()
611 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map; in mlx4_ib_send_to_slave()
633 tun_qp->tx_ring[tun_tx_ix].ah = NULL; in mlx4_ib_send_to_slave()
1383 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); in mlx4_ib_send_to_wire()
1384 kfree(sqp->tx_ring[wire_tx_ix].ah); in mlx4_ib_send_to_wire()
[all …]
/freebsd/sys/dev/axgbe/
H A Dif_axgbe_pci.c808 channel->tx_ring = NULL; in axgbe_alloc_channels()
1687 struct xgbe_ring *tx_ring; in axgbe_if_tx_queues_alloc() local
1701 tx_ring = (struct xgbe_ring*)malloc(ntxqs * in axgbe_if_tx_queues_alloc()
1704 if (tx_ring == NULL) { in axgbe_if_tx_queues_alloc()
1709 channel->tx_ring = tx_ring; in axgbe_if_tx_queues_alloc()
1711 for (j = 0; j < ntxqs; j++, tx_ring++) { in axgbe_if_tx_queues_alloc()
1712 tx_ring->rdata = in axgbe_if_tx_queues_alloc()
1717 tx_ring->rdesc = (struct xgbe_ring_desc *)va[i*ntxqs + j]; in axgbe_if_tx_queues_alloc()
1718 tx_ring->rdesc_paddr = pa[i*ntxqs + j]; in axgbe_if_tx_queues_alloc()
1719 tx_ring->rdesc_count = scctx->isc_ntxd[j]; in axgbe_if_tx_queues_alloc()
[all …]
/freebsd/sys/dev/ixl/
H A Dixl_txrx.c149 ixl_is_tx_desc_done(struct tx_ring *txr, int idx) in ixl_is_tx_desc_done()
280 ixl_tso_setup(struct tx_ring *txr, if_pkt_info_t pi) in ixl_tso_setup()
343 struct tx_ring *txr = &que->txr; in ixl_isc_txd_encap()
417 struct tx_ring *txr = &vsi->tx_queues[txqid].txr; in ixl_isc_txd_flush()
437 struct tx_ring *txr = &que->txr; in ixl_init_tx_ring()
455 struct tx_ring *txr = &que->txr; in ixl_get_tx_head()
467 struct tx_ring *txr = &que->txr; in ixl_isc_txd_credits_update_hwb()
488 struct tx_ring *txr = &tx_que->txr; in ixl_isc_txd_credits_update_dwb()
788 struct tx_ring *txr = &tx_que->txr; in ixl_init_tx_rsqs()
812 struct tx_ring *txr = &tx_que->txr; in ixl_init_tx_cidx()
[all …]
/freebsd/sys/dev/igc/
H A Digc_txrx.c58 static int igc_tx_ctx_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
60 static int igc_tso_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
85 struct tx_ring *txr; in igc_dump_rs()
122 igc_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len, in igc_tso_setup()
183 igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len, in igc_tx_ctx_setup()
267 struct tx_ring *txr = &que->txr; in igc_isc_txd_encap()
329 struct tx_ring *txr = &que->txr; in igc_isc_txd_flush()
340 struct tx_ring *txr = &que->txr; in igc_isc_txd_credits_update()
/freebsd/sys/dev/iavf/
H A Diavf_txrx_iflib.c105 iavf_is_tx_desc_done(struct tx_ring *txr, int idx) in iavf_is_tx_desc_done()
259 iavf_tso_setup(struct tx_ring *txr, if_pkt_info_t pi) in iavf_tso_setup()
325 struct tx_ring *txr = &que->txr; in iavf_isc_txd_encap()
413 struct tx_ring *txr = &vsi->tx_queues[txqid].txr; in iavf_isc_txd_flush()
430 struct tx_ring *txr = &que->txr; in iavf_init_tx_ring()
452 struct tx_ring *txr = &que->txr; in iavf_get_tx_head()
479 struct tx_ring *txr = &que->txr; in iavf_isc_txd_credits_update_hwb()
515 struct tx_ring *txr = &tx_que->txr; in iavf_isc_txd_credits_update_dwb()
/freebsd/sys/dev/e1000/
H A Digb_txrx.c56 static int igb_tx_ctx_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
58 static int igb_tso_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
85 igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len, in igb_tso_setup()
150 igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len, in igb_tx_ctx_setup()
239 struct tx_ring *txr = &que->txr; in igb_isc_txd_encap()
305 struct tx_ring *txr = &que->txr; in igb_isc_txd_flush()
316 struct tx_ring *txr = &que->txr; in igb_isc_txd_credits_update()
/freebsd/sys/dev/xen/netback/
H A Dnetback.c157 const netif_tx_back_ring_t *tx_ring,
319 netif_tx_back_ring_t tx_ring; member
545 &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; in xnb_dump_rings()
729 BACK_RING_INIT(&ring->back_ring.tx_ring, in xnb_connect_ring()
1432 txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; in xnb_intr()
1475 xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, in xnb_ring2pkt() argument
1495 if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { in xnb_ring2pkt()
1496 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); in xnb_ring2pkt()
1507 RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { in xnb_ring2pkt()
1509 (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx); in xnb_ring2pkt()
[all …]
/freebsd/sys/dev/neta/
H A Dif_mvnetavar.h216 KASSERT(mtx_owned(&(sc)->tx_ring[(q)].ring_mtx),\
290 struct mvneta_tx_ring tx_ring[MVNETA_TX_QNUM_MAX]; member
319 (&(sc)->tx_ring[(q)])
/freebsd/sys/dev/mlx4/mlx4_en/
H A Dmlx4_en_netdev.c1266 struct mlx4_en_tx_ring *tx_ring; in mlx4_en_start_port() local
1360 tx_ring = priv->tx_ring[i]; in mlx4_en_start_port()
1362 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, in mlx4_en_start_port()
1374 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) in mlx4_en_start_port()
1375 *((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT; in mlx4_en_start_port()
1435 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); in mlx4_en_start_port()
1538 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); in mlx4_en_stop_port()
1544 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); in mlx4_en_stop_port()
1580 ring = priv->tx_ring[i]; in mlx4_en_restart()
1619 priv->tx_ring[i]->bytes = 0; in mlx4_en_clear_stats()
[all …]

1234