| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | ipoib_tx.c | 29 struct hfi1_ipoib_txq *txq; member 47 static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_used() argument 49 return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, in hfi1_ipoib_used() 50 txq->tx_ring.complete_txreqs); in hfi1_ipoib_used() 53 static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_stop_txq() argument 55 trace_hfi1_txq_stop(txq); in hfi1_ipoib_stop_txq() 56 if (atomic_inc_return(&txq->tx_ring.stops) == 1) in hfi1_ipoib_stop_txq() 57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq() 60 static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_wake_txq() argument 62 trace_hfi1_txq_wake(txq); in hfi1_ipoib_wake_txq() [all …]
|
| /linux/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/ |
| H A D | tx.c | 81 struct iwl_txq *txq) in iwl_pcie_txq_inc_wr_ptr() argument 84 int txq_id = txq->id; in iwl_pcie_txq_inc_wr_ptr() 86 lockdep_assert_held(&txq->lock); in iwl_pcie_txq_inc_wr_ptr() 109 txq->need_update = true; in iwl_pcie_txq_inc_wr_ptr() 118 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); in iwl_pcie_txq_inc_wr_ptr() 119 if (!txq->block) in iwl_pcie_txq_inc_wr_ptr() 121 txq->write_ptr | (txq_id << 8)); in iwl_pcie_txq_inc_wr_ptr() 130 struct iwl_txq *txq = trans_pcie->txqs.txq[i]; in iwl_pcie_txq_check_wrptrs() local 135 spin_lock_bh(&txq->lock); in iwl_pcie_txq_check_wrptrs() 136 if (txq->need_update) { in iwl_pcie_txq_check_wrptrs() [all …]
|
| H A D | tx-gen2.c | 297 struct iwl_txq *txq, in iwl_txq_gen2_build_tx_amsdu() argument 304 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tx_amsdu() 305 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx_amsdu() 310 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); in iwl_txq_gen2_build_tx_amsdu() 346 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx_amsdu() 385 struct iwl_txq *txq, in iwl_txq_gen2_build_tx() argument 393 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tx() 394 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx() 400 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); in iwl_txq_gen2_build_tx() 403 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx() [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_tx.c | 77 static void hinic_txq_clean_stats(struct hinic_txq *txq) in hinic_txq_clean_stats() argument 79 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_clean_stats() 96 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) in hinic_txq_get_stats() argument 98 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_get_stats() 116 static void txq_stats_init(struct hinic_txq *txq) in txq_stats_init() argument 118 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in txq_stats_init() 121 hinic_txq_clean_stats(txq); in txq_stats_init() 499 struct hinic_txq *txq; in hinic_lb_xmit_frame() local 502 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame() 503 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame() [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic3/ |
| H A D | hinic3_tx.c | 36 static void hinic3_txq_stats_init(struct hinic3_txq *txq) in hinic3_txq_stats_init() argument 38 struct hinic3_txq_stats *txq_stats = &txq->txq_stats; in hinic3_txq_stats_init() 49 struct hinic3_txq *txq; in hinic3_alloc_txqs() local 56 txq = &nic_dev->txqs[q_id]; in hinic3_alloc_txqs() 57 txq->netdev = netdev; in hinic3_alloc_txqs() 58 txq->q_id = q_id; in hinic3_alloc_txqs() 59 txq->q_depth = nic_dev->q_params.sq_depth; in hinic3_alloc_txqs() 60 txq->q_mask = nic_dev->q_params.sq_depth - 1; in hinic3_alloc_txqs() 61 txq->dev = &pdev->dev; in hinic3_alloc_txqs() 63 hinic3_txq_stats_init(txq); in hinic3_alloc_txqs() [all …]
|
| /linux/drivers/net/wireless/ath/ath9k/ |
| H A D | xmit.c | 56 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 59 int tx_flags, struct ath_txq *txq, 62 struct ath_txq *txq, struct list_head *bf_q, 65 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 72 struct ath_txq *txq, 106 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_unlock_complete() argument 107 __releases(&txq->axq_lock) in ath_txq_unlock_complete() 114 skb_queue_splice_init(&txq->complete_q, &q); in ath_txq_unlock_complete() 115 spin_unlock_bh(&txq->axq_lock); in ath_txq_unlock_complete() 134 struct ath_txq *txq = tid->txq; in ath9k_wake_tx_queue() local [all …]
|
| /linux/drivers/net/ethernet/marvell/ |
| H A D | mv643xx_eth.c | 179 #define IS_TSO_HEADER(txq, addr) \ argument 180 ((addr >= txq->tso_hdrs_dma) && \ 181 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 408 struct tx_queue txq[8]; member 446 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument 448 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp() 467 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument 469 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr() 472 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr() 473 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr() [all …]
|
| H A D | mvneta.c | 135 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) argument 774 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument 776 txq->txq_get_index++; in mvneta_txq_inc_get() 777 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get() 778 txq->txq_get_index = 0; in mvneta_txq_inc_get() 782 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument 784 txq->txq_put_index++; in mvneta_txq_inc_put() 785 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put() 786 txq->txq_put_index = 0; in mvneta_txq_inc_put() 964 struct mvneta_tx_queue *txq, in mvneta_txq_pend_desc_add() argument [all …]
|
| /linux/drivers/net/ethernet/qlogic/qede/ |
| H A D | qede_fp.c | 77 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) in qede_free_tx_pkt() argument 79 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt() 80 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt() 85 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt() 91 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt() 97 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 105 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 115 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 121 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 125 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt() [all …]
|
| H A D | qede_main.c | 525 struct qede_fastpath *fp, struct qede_tx_queue *txq) in qede_txq_fp_log_metadata() argument 527 struct qed_chain *p_chain = &txq->tx_pbl; in qede_txq_fp_log_metadata() 532 fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos, in qede_txq_fp_log_metadata() 538 le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons, in qede_txq_fp_log_metadata() 543 qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq) in qede_tx_log_print() argument 555 sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]); in qede_tx_log_print() 560 txq->index, le16_to_cpu(*txq->hw_cons_ptr), in qede_tx_log_print() 561 qed_chain_get_cons_idx(&txq->tx_pbl), in qede_tx_log_print() 562 qed_chain_get_prod_idx(&txq->tx_pbl), jiffies); in qede_tx_log_print() 566 txq->index, fp->sb_info->igu_sb_id, in qede_tx_log_print() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| H A D | selq.h | 30 static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels) in mlx5e_txq_to_ch_ix() argument 32 while (unlikely(txq >= num_channels)) in mlx5e_txq_to_ch_ix() 33 txq -= num_channels; in mlx5e_txq_to_ch_ix() 34 return txq; in mlx5e_txq_to_ch_ix() 37 static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels) in mlx5e_txq_to_ch_ix_htb() argument 39 if (unlikely(txq >= num_channels)) { in mlx5e_txq_to_ch_ix_htb() 40 if (unlikely(txq >= num_channels << 3)) in mlx5e_txq_to_ch_ix_htb() 41 txq %= num_channels; in mlx5e_txq_to_ch_ix_htb() 44 txq -= num_channels; in mlx5e_txq_to_ch_ix_htb() 45 while (txq >= num_channels); in mlx5e_txq_to_ch_ix_htb() [all …]
|
| /linux/drivers/net/ethernet/atheros/alx/ |
| H A D | main.c | 53 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry) in alx_free_txbuf() argument 55 struct alx_buffer *txb = &txq->bufs[entry]; in alx_free_txbuf() 58 dma_unmap_single(txq->dev, in alx_free_txbuf() 149 return alx->qnapi[r_idx]->txq; in alx_tx_queue_mapping() 152 static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq) in alx_get_tx_queue() argument 154 return netdev_get_tx_queue(txq->netdev, txq->queue_idx); in alx_get_tx_queue() 157 static inline int alx_tpd_avail(struct alx_tx_queue *txq) in alx_tpd_avail() argument 159 if (txq->write_idx >= txq->read_idx) in alx_tpd_avail() 160 return txq->count + txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() 161 return txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() [all …]
|
| /linux/include/trace/events/ |
| H A D | qdisc.h | 16 TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, 19 TP_ARGS(qdisc, txq, packets, skb), 23 __field(const struct netdev_queue *, txq ) 35 __entry->txq = txq; 38 __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; 41 __entry->txq_state = txq->state; 51 TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb), 53 TP_ARGS(qdisc, txq, sk [all...] |
| /linux/drivers/net/ethernet/freescale/ |
| H A D | fec_main.c | 311 #define IS_TSO_HEADER(txq, addr) \ argument 312 ((addr >= txq->tso_hdrs_dma) && \ 313 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 337 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument 341 entries = (((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num() 342 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num() 344 return entries >= 0 ? entries : entries + txq->bd.ring_size; in fec_enet_get_free_txdesc_num() 360 struct fec_enet_priv_tx_q *txq; in fec_dump() local 366 txq = fep->tx_queue[0]; in fec_dump() 367 bdp = txq->bd.base; in fec_dump() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb4vf/ |
| H A D | sge.c | 1133 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument 1135 netif_tx_stop_queue(txq->txq); in txq_stop() 1136 txq->q.stops++; in txq_stop() 1164 struct sge_eth_txq *txq; in t4vf_eth_xmit() local 1195 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit() 1205 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit() 1214 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit() 1223 txq_stop(txq); in t4vf_eth_xmit() 1237 txq->mapping_err++; in t4vf_eth_xmit() 1252 txq_stop(txq); in t4vf_eth_xmit() [all …]
|
| /linux/net/sched/ |
| H A D | sch_generic.c | 41 const struct netdev_queue *txq) in qdisc_maybe_clear_missed() argument 55 if (!netif_xmit_frozen_or_stopped(txq)) in qdisc_maybe_clear_missed() 76 const struct netdev_queue *txq = q->dev_queue; in __skb_dequeue_bad_txq() local 88 txq = skb_get_tx_queue(txq->dev, skb); in __skb_dequeue_bad_txq() 89 if (!netif_xmit_frozen_or_stopped(txq)) { in __skb_dequeue_bad_txq() 100 qdisc_maybe_clear_missed(q, txq); in __skb_dequeue_bad_txq() 182 const struct netdev_queue *txq, in try_bulk_dequeue_skb() argument 185 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; in try_bulk_dequeue_skb() 205 * all skbs in the chain are for the same txq 236 const struct netdev_queue *txq = q->dev_queue; dequeue_skb() local 320 sch_direct_xmit(struct sk_buff * skb,struct Qdisc * q,struct net_device * dev,struct netdev_queue * txq,spinlock_t * root_lock,bool validate) sch_direct_xmit() argument 396 struct netdev_queue *txq; qdisc_restart() local 456 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); netif_freeze_queues() local 482 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); netif_unfreeze_queues() local 516 struct netdev_queue *txq; dev_watchdog() local 1185 struct netdev_queue *txq; attach_default_qdiscs() local [all...] |
| /linux/drivers/net/wireless/ath/ath5k/ |
| H A D | base.c | 769 struct ath5k_txq *txq, int padsize, in ath5k_txbuf_setup() argument 875 spin_lock_bh(&txq->lock); in ath5k_txbuf_setup() 876 list_add_tail(&bf->list, &txq->q); in ath5k_txbuf_setup() 877 txq->txq_len++; in ath5k_txbuf_setup() 878 if (txq->link == NULL) /* is this first packet? */ in ath5k_txbuf_setup() 879 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); in ath5k_txbuf_setup() 881 *txq->link = bf->daddr; in ath5k_txbuf_setup() 883 txq->link = &ds->ds_link; in ath5k_txbuf_setup() 884 ath5k_hw_start_tx_dma(ah, txq->qnum); in ath5k_txbuf_setup() 885 spin_unlock_bh(&txq->lock); in ath5k_txbuf_setup() [all …]
|
| /linux/net/core/ |
| H A D | netpoll.c | 63 struct netdev_queue *txq) in netpoll_start_xmit() argument 82 status = netdev_start_xmit(skb, dev, txq, false); in netpoll_start_xmit() 95 while ((skb = skb_dequeue(&npinfo->txq))) { in queue_process() 97 struct netdev_queue *txq; in queue_process() local 112 txq = netdev_get_tx_queue(dev, q_index); in queue_process() 113 HARD_TX_LOCK(dev, txq, smp_processor_id()); in queue_process() 114 if (netif_xmit_frozen_or_stopped(txq) || in queue_process() 115 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) { in queue_process() 116 skb_queue_head(&npinfo->txq, skb); in queue_process() 117 HARD_TX_UNLOCK(dev, txq); in queue_process() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb3/ |
| H A D | sge.c | 176 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset() 654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset() 691 if (q->txq[i].desc) { in t3_free_qset() 693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset() 695 if (q->txq[i].sdesc) { in t3_free_qset() 696 free_tx_desc(adapter, &q->txq[i], in t3_free_qset() 697 q->txq[i].in_use); in t3_free_qset() 698 kfree(q->txq[i].sdesc); in t3_free_qset() 701 q->txq[i].size * in t3_free_qset() 703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset() [all …]
|
| /linux/drivers/net/wireless/intel/iwlwifi/mld/ |
| H A D | tx.h | 46 iwl_mld_txq_from_mac80211(struct ieee80211_txq *txq) in iwl_mld_txq_from_mac80211() argument 48 return (void *)txq->drv_priv; in iwl_mld_txq_from_mac80211() 52 void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq); 56 void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq); 60 int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq); 75 struct ieee80211_txq *txq);
|
| /linux/drivers/net/wireless/intel/iwlegacy/ |
| H A D | common.c | 364 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; in il_send_cmd_sync() 2709 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) in il_txq_update_write_ptr() argument 2712 int txq_id = txq->q.id; in il_txq_update_write_ptr() 2714 if (txq->need_update == 0) in il_txq_update_write_ptr() 2732 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr() 2740 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr() 2741 txq->need_update = 0; in il_txq_update_write_ptr() 2751 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_unmap() local 2752 struct il_queue *q = &txq->q; in il_tx_queue_unmap() 2758 il->ops->txq_free_tfd(il, txq); in il_tx_queue_unmap() [all …]
|
| /linux/drivers/net/wireless/ath/ath10k/ |
| H A D | txrx.c | 50 struct ieee80211_txq *txq; in ath10k_txrx_tx_unref() local 76 txq = skb_cb->txq; in ath10k_txrx_tx_unref() 78 if (txq) { in ath10k_txrx_tx_unref() 79 artxq = (void *)txq->drv_priv; in ath10k_txrx_tx_unref() 89 if (txq && txq->sta && skb_cb->airtime_est) in ath10k_txrx_tx_unref() 90 ieee80211_sta_register_airtime(txq->sta, txq->tid, in ath10k_txrx_tx_unref() 138 if (txq) in ath10k_txrx_tx_unref() 139 status.sta = txq->sta; in ath10k_txrx_tx_unref()
|
| /linux/drivers/net/ethernet/chelsio/cxgb4/ |
| H A D | sge.c | 1220 netif_tx_stop_queue(q->txq); in eth_txq_stop() 1411 if (!q->in_use || !__netif_tx_trylock(eq->txq)) in t4_sge_eth_txq_egress_update() 1427 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { in t4_sge_eth_txq_egress_update() 1428 netif_tx_wake_queue(eq->txq); in t4_sge_eth_txq_egress_update() 1432 __netif_tx_unlock(eq->txq); in t4_sge_eth_txq_egress_update() 1819 struct sge_eth_txq *txq; in cxgb4_vf_eth_xmit() local 1845 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit() 1850 reclaim_completed_tx(adapter, &txq->q, -1, true); in cxgb4_vf_eth_xmit() 1858 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit() 1866 eth_txq_stop(txq); in cxgb4_vf_eth_xmit() [all …]
|
| /linux/drivers/net/ethernet/brocade/bna/ |
| H A D | bna_tx_rx.c | 2870 struct bna_txq *txq; in bna_tx_sm_started_entry() local 2873 list_for_each_entry(txq, &tx->txq_q, qe) { in bna_tx_sm_started_entry() 2874 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry() 2876 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry() 3089 struct bna_txq *txq = NULL; in bna_bfi_tx_enet_start() local 3099 txq = txq ? list_next_entry(txq, qe) in bna_bfi_tx_enet_start() 3101 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); in bna_bfi_tx_enet_start() 3102 cfg_req->q_cfg[i].q.priority = txq->priority; in bna_bfi_tx_enet_start() 3105 txq->ib.ib_seg_host_addr.lsb; in bna_bfi_tx_enet_start() 3107 txq->ib.ib_seg_host_addr.msb; in bna_bfi_tx_enet_start() [all …]
|
| /linux/drivers/net/ethernet/intel/idpf/ |
| H A D | idpf_txrx.c | 59 static void idpf_tx_buf_clean(struct idpf_tx_queue *txq) in idpf_tx_buf_clean() argument 64 .dev = txq->dev, in idpf_tx_buf_clean() 72 for (u32 i = 0; i < txq->buf_pool_size; i++) in idpf_tx_buf_clean() 73 libeth_tx_complete_any(&txq->tx_buf[i], &cp); in idpf_tx_buf_clean() 82 static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) in idpf_tx_buf_rel_all() argument 85 if (!txq->tx_buf) in idpf_tx_buf_rel_all() 88 if (idpf_queue_has(XSK, txq)) in idpf_tx_buf_rel_all() 89 idpf_xsksq_clean(txq); in idpf_tx_buf_rel_all() 91 idpf_tx_buf_clean(txq); in idpf_tx_buf_rel_all() 93 kfree(txq->tx_buf); in idpf_tx_buf_rel_all() [all …]
|