| /linux/drivers/net/ethernet/netronome/nfp/nfd3/ |
| H A D | rings.c | 11 static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_xsk_tx_bufs_free() argument 16 while (tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfd3_xsk_tx_bufs_free() 17 idx = D_IDX(tx_ring, tx_ring->rd_p); in nfp_nfd3_xsk_tx_bufs_free() 18 txbuf = &tx_ring->txbufs[idx]; in nfp_nfd3_xsk_tx_bufs_free() 22 tx_ring->qcp_rd_p++; in nfp_nfd3_xsk_tx_bufs_free() 23 tx_ring->rd_p++; in nfp_nfd3_xsk_tx_bufs_free() 25 if (tx_ring->r_vec->xsk_pool) { in nfp_nfd3_xsk_tx_bufs_free() 29 xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1); in nfp_nfd3_xsk_tx_bufs_free() 42 nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_reset() argument 47 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfd3_tx_ring_reset() [all …]
|
| /linux/drivers/net/ethernet/netronome/nfp/nfdk/ |
| H A D | rings.c | 11 nfp_nfdk_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_nfdk_tx_ring_reset() argument 16 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfdk_tx_ring_reset() 23 rd_idx = D_IDX(tx_ring, tx_ring->rd_p); in nfp_nfdk_tx_ring_reset() 24 txbuf = &tx_ring->ktxbufs[rd_idx]; in nfp_nfdk_tx_ring_reset() 28 n_descs = D_BLOCK_CPL(tx_ring->rd_p); in nfp_nfdk_tx_ring_reset() 57 tx_ring->rd_p += n_descs; in nfp_nfdk_tx_ring_reset() 60 memset(tx_ring->txds, 0, tx_ring->size); in nfp_nfdk_tx_ring_reset() 61 tx_ring->data_pending = 0; in nfp_nfdk_tx_ring_reset() 62 tx_ring->wr_p = 0; in nfp_nfdk_tx_ring_reset() 63 tx_ring->rd_p = 0; in nfp_nfdk_tx_ring_reset() [all …]
|
| /linux/drivers/net/ethernet/amazon/ena/ |
| H A D | ena_xdp.c | 8 static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id) in validate_xdp_req_id() argument 12 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_xdp_req_id() 16 return handle_invalid_req_id(tx_ring, req_id, tx_info, true); in validate_xdp_req_id() 19 static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring, in ena_xdp_tx_map_frame() argument 24 struct ena_adapter *adapter = tx_ring->adapter; in ena_xdp_tx_map_frame() 35 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_xdp_tx_map_frame() 37 push_len = min_t(u32, size, tx_ring->tx_max_header_size); in ena_xdp_tx_map_frame() 48 dma = dma_map_single(tx_ring->dev, in ena_xdp_tx_map_frame() 52 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_xdp_tx_map_frame() 68 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, in ena_xdp_tx_map_frame() [all …]
|
| H A D | ena_netdev.c | 54 struct ena_ring *tx_ring; in ena_tx_timeout() local 63 tx_ring = &adapter->tx_ring[txqueue]; in ena_tx_timeout() 65 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in ena_tx_timeout() 66 napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED); in ena_tx_timeout() 193 txr = &adapter->tx_ring[i]; in ena_init_io_rings() 223 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; in ena_init_io_rings() 236 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources() local 240 if (tx_ring->tx_buffer_info) { in ena_setup_tx_resources() 246 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources() 249 tx_ring->tx_buffer_info = vzalloc_node(size, node); in ena_setup_tx_resources() [all …]
|
| /linux/drivers/net/ethernet/netronome/nfp/ |
| H A D | nfp_net_dp.h | 51 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt) in nfp_net_tx_full() argument 53 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt); in nfp_net_tx_full() 56 static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) in nfp_net_tx_xmit_more_flush() argument 59 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add); in nfp_net_tx_xmit_more_flush() 60 tx_ring->wr_ptr_add = 0; in nfp_net_tx_xmit_more_flush() 64 nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp) in nfp_net_read_tx_cmpl() argument 66 if (tx_ring->txrwb) in nfp_net_read_tx_cmpl() 67 return *tx_ring->txrwb; in nfp_net_read_tx_cmpl() 68 return nfp_qcp_rd_ptr_read(tx_ring->qcp_q); in nfp_net_read_tx_cmpl() 100 struct nfp_net_tx_ring *tx_ring, unsigned int idx); [all …]
|
| H A D | nfp_net_debugfs.c | 83 struct nfp_net_tx_ring *tx_ring; in __nfp_tx_q_show() local 90 tx_ring = r_vec->xdp_ring; in __nfp_tx_q_show() 92 tx_ring = r_vec->tx_ring; in __nfp_tx_q_show() 93 if (!r_vec->nfp_net || !tx_ring) in __nfp_tx_q_show() 99 d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); in __nfp_tx_q_show() 100 d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); in __nfp_tx_q_show() 103 tx_ring->idx, tx_ring->qcidx, in __nfp_tx_q_show() 104 tx_ring == r_vec->tx_ring ? "" : "xdp", in __nfp_tx_q_show() 105 tx_ring->cnt, &tx_ring->dma, tx_ring->txds, in __nfp_tx_q_show() 106 tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); in __nfp_tx_q_show() [all …]
|
| /linux/drivers/net/ethernet/intel/iavf/ |
| H A D | iavf_txrx.c | 79 static void iavf_clean_tx_ring(struct iavf_ring *tx_ring) in iavf_clean_tx_ring() argument 85 if (!tx_ring->tx_bi) in iavf_clean_tx_ring() 89 for (i = 0; i < tx_ring->count; i++) in iavf_clean_tx_ring() 90 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in iavf_clean_tx_ring() 92 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; in iavf_clean_tx_ring() 93 memset(tx_ring->tx_bi, 0, bi_size); in iavf_clean_tx_ring() 96 memset(tx_ring->desc, 0, tx_ring->size); in iavf_clean_tx_ring() 98 tx_ring->next_to_use = 0; in iavf_clean_tx_ring() 99 tx_ring->next_to_clean = 0; in iavf_clean_tx_ring() 101 if (!tx_ring->netdev) in iavf_clean_tx_ring() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | ipoib_tx.c | 49 return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, in hfi1_ipoib_used() 50 txq->tx_ring.complete_txreqs); in hfi1_ipoib_used() 56 if (atomic_inc_return(&txq->tx_ring.stops) == 1) in hfi1_ipoib_stop_txq() 63 if (atomic_dec_and_test(&txq->tx_ring.stops)) in hfi1_ipoib_wake_txq() 70 txq->tx_ring.max_items - 1); in hfi1_ipoib_ring_hwat() 76 txq->tx_ring.max_items) >> 1; in hfi1_ipoib_ring_lwat() 81 ++txq->tx_ring.sent_txreqs; in hfi1_ipoib_check_queue_depth() 83 !atomic_xchg(&txq->tx_ring.ring_full, 1)) { in hfi1_ipoib_check_queue_depth() 108 atomic_xchg(&txq->tx_ring.ring_full, 0)) { in hfi1_ipoib_check_queue_stopped() 136 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; in hfi1_ipoib_drain_tx_ring() local [all …]
|
| /linux/drivers/net/can/spi/mcp251xfd/ |
| H A D | mcp251xfd-tx.c | 21 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring) in mcp251xfd_get_tx_obj_next() argument 25 tx_head = mcp251xfd_get_tx_head(tx_ring); in mcp251xfd_get_tx_obj_next() 27 return &tx_ring->obj[tx_head]; in mcp251xfd_get_tx_obj_next() 135 struct mcp251xfd_tx_ring *tx_ring, in mcp251xfd_tx_failure_drop() argument 143 tx_ring->head--; in mcp251xfd_tx_failure_drop() 145 tx_head = mcp251xfd_get_tx_head(tx_ring); in mcp251xfd_tx_failure_drop() 159 struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_tx_obj_write_sync() local 164 mcp251xfd_tx_failure_drop(priv, tx_ring, err); in mcp251xfd_tx_obj_write_sync() 174 struct mcp251xfd_tx_ring *tx_ring) in mcp251xfd_tx_busy() argument 176 if (mcp251xfd_get_tx_free(tx_ring) > 0) in mcp251xfd_tx_busy() [all …]
|
| H A D | mcp251xfd-tef.c | 119 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_get_tef_len() local 120 const u8 shift = tx_ring->obj_num_shift_to_u8; in mcp251xfd_get_tef_len() 134 mcp251xfd_get_tx_free(tx_ring) == 0) { in mcp251xfd_get_tef_len() 135 *len_p = tx_ring->obj_num; in mcp251xfd_get_tef_len() 150 BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail)); in mcp251xfd_get_tef_len() 151 BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail)); in mcp251xfd_get_tef_len() 152 BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len)); in mcp251xfd_get_tef_len() 174 mcp251xfd_get_tx_free(tx_ring) == 0) in mcp251xfd_get_tef_len() 175 len = tx_ring->obj_num; in mcp251xfd_get_tef_len() 187 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_tef_obj_read() local [all …]
|
| /linux/drivers/net/ethernet/intel/i40e/ |
| H A D | i40e_txrx_common.h | 46 static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring, in i40e_update_tx_stats() argument 50 u64_stats_update_begin(&tx_ring->syncp); in i40e_update_tx_stats() 51 tx_ring->stats.bytes += total_bytes; in i40e_update_tx_stats() 52 tx_ring->stats.packets += total_packets; in i40e_update_tx_stats() 53 u64_stats_update_end(&tx_ring->syncp); in i40e_update_tx_stats() 54 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_update_tx_stats() 55 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_update_tx_stats() 66 static inline void i40e_arm_wb(struct i40e_ring *tx_ring, in i40e_arm_wb() argument 70 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { in i40e_arm_wb() 76 unsigned int j = i40e_get_tx_pending(tx_ring, false); in i40e_arm_wb() [all …]
|
| H A D | i40e_txrx.c | 23 static void i40e_fdir(struct i40e_ring *tx_ring, in i40e_fdir() argument 27 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir() 32 i = tx_ring->next_to_use; in i40e_fdir() 33 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_fdir() 36 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir() 88 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local 100 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter() 101 dev = tx_ring->dev; in i40e_program_fdir_filter() 104 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { in i40e_program_fdir_filter() 116 i = tx_ring->next_to_use; in i40e_program_fdir_filter() [all …]
|
| H A D | i40e_xsk.c | 612 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, in i40e_clean_xdp_tx_buffer() argument 616 tx_ring->xdp_tx_active--; in i40e_clean_xdp_tx_buffer() 617 dma_unmap_single(tx_ring->dev, in i40e_clean_xdp_tx_buffer() 630 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) in i40e_clean_xdp_tx_irq() argument 632 struct xsk_buff_pool *bp = tx_ring->xsk_pool; in i40e_clean_xdp_tx_irq() 634 u32 head_idx = i40e_get_head(tx_ring); in i40e_clean_xdp_tx_irq() 638 if (head_idx < tx_ring->next_to_clean) in i40e_clean_xdp_tx_irq() 639 head_idx += tx_ring->count; in i40e_clean_xdp_tx_irq() 640 completed_frames = head_idx - tx_ring->next_to_clean; in i40e_clean_xdp_tx_irq() 645 if (likely(!tx_ring->xdp_tx_active)) { in i40e_clean_xdp_tx_irq() [all …]
|
| /linux/drivers/net/ethernet/freescale/enetc/ |
| H A D | enetc.c | 95 struct enetc_bdr *tx_ring) in enetc_rx_ring_from_xdp_tx_ring() argument 97 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; in enetc_rx_ring_from_xdp_tx_ring() 119 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, in enetc_unmap_tx_buff() argument 127 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff() 131 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff() 136 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, in enetc_free_tx_frame() argument 143 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_free_tx_frame() 155 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) in enetc_update_tx_ring_tail() argument 158 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); in enetc_update_tx_ring_tail() 221 static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i) in enetc_unwind_tx_frame() argument [all …]
|
| /linux/drivers/net/ethernet/qlogic/qlcnic/ |
| H A D | qlcnic_io.c | 271 u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) in qlcnic_82xx_change_filter() argument 280 producer = tx_ring->producer; in qlcnic_82xx_change_filter() 281 hwdesc = &tx_ring->desc_head[tx_ring->producer]; in qlcnic_82xx_change_filter() 297 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_82xx_change_filter() 304 struct qlcnic_host_tx_ring *tx_ring) in qlcnic_send_filter() argument 338 vlan_id, tx_ring); in qlcnic_send_filter() 353 qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring); in qlcnic_send_filter() 373 struct qlcnic_host_tx_ring *tx_ring) in qlcnic_tx_encap_pkt() argument 377 u32 producer = tx_ring->producer; in qlcnic_tx_encap_pkt() 407 hwdesc = &tx_ring->desc_head[producer]; in qlcnic_tx_encap_pkt() [all …]
|
| /linux/drivers/net/ethernet/mscc/ |
| H A D | ocelot_fdma.c | 69 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_ring_free() local 71 if (tx_ring->next_to_use >= tx_ring->next_to_clean) in ocelot_fdma_tx_ring_free() 73 (tx_ring->next_to_use - tx_ring->next_to_clean) - 1; in ocelot_fdma_tx_ring_free() 75 return tx_ring->next_to_clean - tx_ring->next_to_use - 1; in ocelot_fdma_tx_ring_free() 80 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_ring_empty() local 82 return tx_ring->next_to_clean == tx_ring->next_to_use; in ocelot_fdma_tx_ring_empty() 484 struct ocelot_fdma_tx_ring *tx_ring; in ocelot_fdma_tx_cleanup() local 495 tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_cleanup() 501 ntc = tx_ring->next_to_clean; in ocelot_fdma_tx_cleanup() 502 dcb = &tx_ring->dcbs[ntc]; in ocelot_fdma_tx_cleanup() [all …]
|
| /linux/drivers/net/ethernet/intel/igc/ |
| H A D | igc_xdp.c | 31 igc_disable_tx_ring(adapter->tx_ring[i]); in igc_xdp_set_prog() 48 igc_enable_tx_ring(adapter->tx_ring[i]); in igc_xdp_set_prog() 61 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_enable_pool() local 90 tx_ring = adapter->tx_ring[queue_id]; in igc_xdp_enable_pool() 96 igc_disable_tx_ring(tx_ring); in igc_xdp_enable_pool() 101 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); in igc_xdp_enable_pool() 106 igc_enable_tx_ring(tx_ring); in igc_xdp_enable_pool() 120 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_disable_pool() local 136 tx_ring = adapter->tx_ring[queue_id]; in igc_xdp_disable_pool() 142 igc_disable_tx_ring(tx_ring); in igc_xdp_disable_pool() [all …]
|
| H A D | igc_dump.c | 118 struct igc_ring *tx_ring; in igc_rings_dump() local 138 tx_ring = adapter->tx_ring[n]; in igc_rings_dump() 139 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igc_rings_dump() 142 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igc_rings_dump() 167 tx_ring = adapter->tx_ring[n]; in igc_rings_dump() 170 tx_ring->queue_index); in igc_rings_dump() 174 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in igc_rings_dump() 178 tx_desc = IGC_TX_DESC(tx_ring, i); in igc_rings_dump() 179 buffer_info = &tx_ring->tx_buffer_info[i]; in igc_rings_dump() 181 if (i == tx_ring->next_to_use && in igc_rings_dump() [all …]
|
| /linux/drivers/net/ethernet/intel/ixgbevf/ |
| H A D | ixgbevf_main.c | 214 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) in ixgbevf_check_tx_hang() argument 216 u32 tx_done = ixgbevf_get_tx_completed(tx_ring); in ixgbevf_check_tx_hang() 217 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbevf_check_tx_hang() 218 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); in ixgbevf_check_tx_hang() 220 clear_check_for_tx_hang(tx_ring); in ixgbevf_check_tx_hang() 230 &tx_ring->state); in ixgbevf_check_tx_hang() 233 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); in ixgbevf_check_tx_hang() 236 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbevf_check_tx_hang() 269 struct ixgbevf_ring *tx_ring, int napi_budget) in ixgbevf_clean_tx_irq() argument 275 unsigned int budget = tx_ring->count / 2; in ixgbevf_clean_tx_irq() [all …]
|
| /linux/drivers/net/ethernet/intel/igb/ |
| H A D | igb_xsk.c | 36 struct igb_ring *tx_ring = adapter->tx_ring[qid]; in igb_txrx_ring_disable() local 40 set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags); in igb_txrx_ring_disable() 42 wr32(E1000_TXDCTL(tx_ring->reg_idx), 0); in igb_txrx_ring_disable() 50 igb_clean_tx_ring(tx_ring); in igb_txrx_ring_disable() 54 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); in igb_txrx_ring_disable() 59 struct igb_ring *tx_ring = adapter->tx_ring[qid]; in igb_txrx_ring_enable() local 62 igb_configure_tx_ring(adapter, tx_ring); in igb_txrx_ring_enable() 67 clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags); in igb_txrx_ring_enable() 465 bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool) in igb_xmit_zc() argument 467 unsigned int budget = igb_desc_unused(tx_ring); in igb_xmit_zc() [all …]
|
| /linux/drivers/net/ethernet/agere/ |
| H A D | et131x.c | 359 struct tx_ring { struct 489 struct tx_ring tx_ring; member 1639 struct tx_ring *tx_ring = &adapter->tx_ring; in et131x_config_tx_dma_regs() local 1642 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi); in et131x_config_tx_dma_regs() 1643 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo); in et131x_config_tx_dma_regs() 1649 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi); in et131x_config_tx_dma_regs() 1650 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo); in et131x_config_tx_dma_regs() 1652 *tx_ring->tx_status = 0; in et131x_config_tx_dma_regs() 1655 tx_ring->send_idx = 0; in et131x_config_tx_dma_regs() 1752 struct tx_ring *tx_ring = &adapter->tx_ring; in et131x_init_send() local [all …]
|
| /linux/drivers/net/ethernet/intel/fm10k/ |
| H A D | fm10k_netdev.c | 15 int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring) in fm10k_setup_tx_resources() argument 17 struct device *dev = tx_ring->dev; in fm10k_setup_tx_resources() 20 size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; in fm10k_setup_tx_resources() 22 tx_ring->tx_buffer = vzalloc(size); in fm10k_setup_tx_resources() 23 if (!tx_ring->tx_buffer) in fm10k_setup_tx_resources() 26 u64_stats_init(&tx_ring->syncp); in fm10k_setup_tx_resources() 29 tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc); in fm10k_setup_tx_resources() 30 tx_ring->size = ALIGN(tx_ring->size, 4096); in fm10k_setup_tx_resources() 32 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in fm10k_setup_tx_resources() 33 &tx_ring->dma, GFP_KERNEL); in fm10k_setup_tx_resources() [all …]
|
| /linux/drivers/net/ethernet/intel/ixgbe/ |
| H A D | ixgbe_xsk.c | 448 static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, in ixgbe_clean_xdp_tx_buffer() argument 452 dma_unmap_single(tx_ring->dev, in ixgbe_clean_xdp_tx_buffer() 459 struct ixgbe_ring *tx_ring, int napi_budget) in ixgbe_clean_xdp_tx_irq() argument 461 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; in ixgbe_clean_xdp_tx_irq() 463 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_clean_xdp_tx_irq() 468 tx_bi = &tx_ring->tx_buffer_info[ntc]; in ixgbe_clean_xdp_tx_irq() 469 tx_desc = IXGBE_TX_DESC(tx_ring, ntc); in ixgbe_clean_xdp_tx_irq() 479 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); in ixgbe_clean_xdp_tx_irq() 488 if (unlikely(ntc == tx_ring->count)) { in ixgbe_clean_xdp_tx_irq() 490 tx_bi = tx_ring->tx_buffer_info; in ixgbe_clean_xdp_tx_irq() [all …]
|
| /linux/drivers/net/ethernet/apm/xgene-v2/ |
| H A D | main.c | 174 struct xge_desc_ring *tx_ring; in xge_start_xmit() local 182 tx_ring = pdata->tx_ring; in xge_start_xmit() 183 tail = tx_ring->tail; in xge_start_xmit() 185 raw_desc = &tx_ring->raw_desc[tail]; in xge_start_xmit() 208 tx_ring->pkt_info[tail].skb = skb; in xge_start_xmit() 209 tx_ring->pkt_info[tail].dma_addr = dma_addr; in xge_start_xmit() 210 tx_ring->pkt_info[tail].pkt_buf = pkt_buf; in xge_start_xmit() 220 tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1); in xge_start_xmit() 238 struct xge_desc_ring *tx_ring; in xge_txc_poll() local 246 tx_ring = pdata->tx_ring; in xge_txc_poll() [all …]
|
| /linux/drivers/net/ethernet/marvell/prestera/ |
| H A D | prestera_rxtx.c | 94 struct prestera_tx_ring tx_ring; member 494 struct prestera_tx_ring *tx_ring; in prestera_sdma_tx_recycle_work_fn() local 500 tx_ring = &sdma->tx_ring; in prestera_sdma_tx_recycle_work_fn() 503 struct prestera_sdma_buf *buf = &tx_ring->bufs[b]; in prestera_sdma_tx_recycle_work_fn() 525 struct prestera_tx_ring *tx_ring = &sdma->tx_ring; in prestera_sdma_tx_init() local 532 tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL); in prestera_sdma_tx_init() 533 if (!tx_ring->bufs) in prestera_sdma_tx_init() 536 tail = &tx_ring->bufs[bnum - 1]; in prestera_sdma_tx_init() 537 head = &tx_ring->bufs[0]; in prestera_sdma_tx_init() 541 tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST; in prestera_sdma_tx_init() [all …]
|