Home
last modified time | relevance | path

Searched refs:txq (Results 1 – 25 of 249) sorted by relevance

12345678910

/linux/drivers/net/wwan/t7xx/
H A Dt7xx_hif_dpmaif_tx.c54 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; in t7xx_dpmaif_update_drb_rd_idx() local
58 if (!txq->que_started) in t7xx_dpmaif_update_drb_rd_idx()
61 old_sw_rd_idx = txq->drb_rd_idx; in t7xx_dpmaif_update_drb_rd_idx()
71 drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx; in t7xx_dpmaif_update_drb_rd_idx()
73 spin_lock_irqsave(&txq->tx_lock, flags); in t7xx_dpmaif_update_drb_rd_idx()
74 txq->drb_rd_idx = new_hw_rd_idx; in t7xx_dpmaif_update_drb_rd_idx()
75 spin_unlock_irqrestore(&txq->tx_lock, flags); in t7xx_dpmaif_update_drb_rd_idx()
83 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; in t7xx_dpmaif_release_tx_buffer() local
90 drb_skb_base = txq->drb_skb_base; in t7xx_dpmaif_release_tx_buffer()
91 drb_base = txq->drb_base; in t7xx_dpmaif_release_tx_buffer()
[all …]
H A Dt7xx_hif_dpmaif.c236 tx_q = &dpmaif_ctrl->txq[tx_idx]; in t7xx_dpmaif_rxtx_sw_allocs()
261 tx_q = &dpmaif_ctrl->txq[i]; in t7xx_dpmaif_rxtx_sw_allocs()
289 tx_q = &dpmaif_ctrl->txq[i]; in t7xx_dpmaif_sw_release()
304 struct dpmaif_tx_queue *txq; in t7xx_dpmaif_start() local
343 txq = &dpmaif_ctrl->txq[i]; in t7xx_dpmaif_start()
344 txq->que_started = true; in t7xx_dpmaif_start()
346 hw_init_para.drb_base_addr[i] = txq->drb_bus_addr; in t7xx_dpmaif_start()
347 hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt; in t7xx_dpmaif_start()
433 struct dpmaif_tx_queue *txq; in t7xx_dpmaif_start_txrx_qs() local
437 txq = &dpmaif_ctrl->txq[que_cnt]; in t7xx_dpmaif_start_txrx_qs()
[all …]
/linux/drivers/infiniband/hw/hfi1/
H A Dipoib_tx.c29 struct hfi1_ipoib_txq *txq; member
47 static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_used() argument
49 return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, in hfi1_ipoib_used()
50 txq->tx_ring.complete_txreqs); in hfi1_ipoib_used()
53 static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_stop_txq() argument
55 trace_hfi1_txq_stop(txq); in hfi1_ipoib_stop_txq()
56 if (atomic_inc_return(&txq->tx_ring.stops) == 1) in hfi1_ipoib_stop_txq()
57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq()
60 static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_wake_txq() argument
62 trace_hfi1_txq_wake(txq); in hfi1_ipoib_wake_txq()
[all …]
/linux/drivers/net/wireless/intel/iwlwifi/pcie/
H A Dtx.c79 struct iwl_txq *txq) in iwl_pcie_txq_inc_wr_ptr() argument
83 int txq_id = txq->id; in iwl_pcie_txq_inc_wr_ptr()
85 lockdep_assert_held(&txq->lock); in iwl_pcie_txq_inc_wr_ptr()
108 txq->need_update = true; in iwl_pcie_txq_inc_wr_ptr()
117 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); in iwl_pcie_txq_inc_wr_ptr()
118 if (!txq->block) in iwl_pcie_txq_inc_wr_ptr()
120 txq->write_ptr | (txq_id << 8)); in iwl_pcie_txq_inc_wr_ptr()
129 struct iwl_txq *txq = trans_pcie->txqs.txq[i]; in iwl_pcie_txq_check_wrptrs() local
134 spin_lock_bh(&txq->lock); in iwl_pcie_txq_check_wrptrs()
135 if (txq->need_update) { in iwl_pcie_txq_check_wrptrs()
[all …]
H A Dtx-gen2.c298 struct iwl_txq *txq, in iwl_txq_gen2_build_tx_amsdu() argument
305 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tx_amsdu()
306 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx_amsdu()
311 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); in iwl_txq_gen2_build_tx_amsdu()
347 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx_amsdu()
386 struct iwl_txq *txq, in iwl_txq_gen2_build_tx() argument
394 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tx()
395 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx()
401 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); in iwl_txq_gen2_build_tx()
404 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx()
[all …]
H A Dinternal.h315 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; member
642 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
666 iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) in iwl_txq_get_first_tb_dma() argument
668 return txq->first_tb_dma + in iwl_txq_get_first_tb_dma()
678 struct iwl_txq *txq, int idx) in iwl_txq_get_tfd() argument
683 idx = iwl_txq_get_cmd_index(txq, idx); in iwl_txq_get_tfd()
685 return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx; in iwl_txq_get_tfd()
700 static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_stop() argument
704 if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) { in iwl_txq_stop()
705 iwl_op_mode_queue_full(trans->op_mode, txq->id); in iwl_txq_stop()
[all …]
/linux/drivers/net/ethernet/huawei/hinic/
H A Dhinic_tx.c77 static void hinic_txq_clean_stats(struct hinic_txq *txq) in hinic_txq_clean_stats() argument
79 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_clean_stats()
96 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) in hinic_txq_get_stats() argument
98 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_get_stats()
116 static void txq_stats_init(struct hinic_txq *txq) in txq_stats_init() argument
118 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in txq_stats_init()
121 hinic_txq_clean_stats(txq); in txq_stats_init()
499 struct hinic_txq *txq; in hinic_lb_xmit_frame() local
502 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame()
503 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame()
[all …]
/linux/drivers/net/ethernet/marvell/
H A Dmv643xx_eth.c179 #define IS_TSO_HEADER(txq, addr) \ argument
180 ((addr >= txq->tso_hdrs_dma) && \
181 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
408 struct tx_queue txq[8]; member
446 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument
448 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp()
467 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument
469 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr()
472 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr()
473 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr()
[all …]
H A Dmvneta.c135 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) argument
774 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument
776 txq->txq_get_index++; in mvneta_txq_inc_get()
777 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get()
778 txq->txq_get_index = 0; in mvneta_txq_inc_get()
782 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument
784 txq->txq_put_index++; in mvneta_txq_inc_put()
785 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put()
786 txq->txq_put_index = 0; in mvneta_txq_inc_put()
964 struct mvneta_tx_queue *txq, in mvneta_txq_pend_desc_add() argument
[all …]
/linux/drivers/net/ethernet/qlogic/qede/
H A Dqede_fp.c76 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) in qede_free_tx_pkt() argument
78 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt()
79 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt()
84 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
90 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt()
96 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
104 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
114 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
120 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
124 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt()
[all …]
H A Dqede_main.c544 struct qede_fastpath *fp, struct qede_tx_queue *txq) in qede_txq_fp_log_metadata() argument
546 struct qed_chain *p_chain = &txq->tx_pbl; in qede_txq_fp_log_metadata()
551 fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos, in qede_txq_fp_log_metadata()
557 le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons, in qede_txq_fp_log_metadata()
562 qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq) in qede_tx_log_print() argument
574 sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]); in qede_tx_log_print()
579 txq->index, le16_to_cpu(*txq->hw_cons_ptr), in qede_tx_log_print()
580 qed_chain_get_cons_idx(&txq->tx_pbl), in qede_tx_log_print()
581 qed_chain_get_prod_idx(&txq->tx_pbl), jiffies); in qede_tx_log_print()
585 txq->index, fp->sb_info->igu_sb_id, in qede_tx_log_print()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dselq.h30 static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels) in mlx5e_txq_to_ch_ix() argument
32 while (unlikely(txq >= num_channels)) in mlx5e_txq_to_ch_ix()
33 txq -= num_channels; in mlx5e_txq_to_ch_ix()
34 return txq; in mlx5e_txq_to_ch_ix()
37 static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels) in mlx5e_txq_to_ch_ix_htb() argument
39 if (unlikely(txq >= num_channels)) { in mlx5e_txq_to_ch_ix_htb()
40 if (unlikely(txq >= num_channels << 3)) in mlx5e_txq_to_ch_ix_htb()
41 txq %= num_channels; in mlx5e_txq_to_ch_ix_htb()
44 txq -= num_channels; in mlx5e_txq_to_ch_ix_htb()
45 while (txq >= num_channels); in mlx5e_txq_to_ch_ix_htb()
[all …]
/linux/include/trace/events/
H A Dqdisc.h16 TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq,
19 TP_ARGS(qdisc, txq, packets, skb),
23 __field(const struct netdev_queue *, txq )
35 __entry->txq = txq;
38 __entry->ifindex = txq->dev ? txq->dev->ifindex : 0;
41 __entry->txq_state = txq->state;
51 TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb),
53 TP_ARGS(qdisc, txq, sk
[all...]
/linux/drivers/net/ethernet/freescale/
H A Dfec_main.c307 #define IS_TSO_HEADER(txq, addr) \ argument
308 ((addr >= txq->tso_hdrs_dma) && \
309 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
333 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument
337 entries = (((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num()
338 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num()
340 return entries >= 0 ? entries : entries + txq->bd.ring_size; in fec_enet_get_free_txdesc_num()
356 struct fec_enet_priv_tx_q *txq; in fec_dump() local
362 txq = fep->tx_queue[0]; in fec_dump()
363 bdp = txq->bd.base; in fec_dump()
[all …]
/linux/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dsge.c1133 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument
1135 netif_tx_stop_queue(txq->txq); in txq_stop()
1136 txq->q.stops++; in txq_stop()
1164 struct sge_eth_txq *txq; in t4vf_eth_xmit() local
1195 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit()
1205 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit()
1214 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit()
1223 txq_stop(txq); in t4vf_eth_xmit()
1237 txq->mapping_err++; in t4vf_eth_xmit()
1252 txq_stop(txq); in t4vf_eth_xmit()
[all …]
/linux/drivers/bluetooth/
H A Dbtintel_pcie.c102 static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index, in btintel_pcie_prepare_tx() argument
108 tfd = &txq->tfds[tfd_index]; in btintel_pcie_prepare_tx()
111 buf = &txq->bufs[tfd_index]; in btintel_pcie_prepare_tx()
125 struct txq *txq = &data->txq; in btintel_pcie_send_sync() local
129 if (tfd_index > txq->count) in btintel_pcie_send_sync()
135 btintel_pcie_prepare_tx(txq, tfd_index, skb); in btintel_pcie_send_sync()
137 tfd_index = (tfd_index + 1) % txq->count; in btintel_pcie_send_sync()
500 struct txq *txq; in btintel_pcie_msix_tx_handle() local
509 txq = &data->txq; in btintel_pcie_msix_tx_handle()
515 urbd0 = &txq->urbd0s[cr_tia]; in btintel_pcie_msix_tx_handle()
[all …]
/linux/drivers/net/wireless/ath/ath5k/
H A Dbase.c769 struct ath5k_txq *txq, int padsize, in ath5k_txbuf_setup() argument
875 spin_lock_bh(&txq->lock); in ath5k_txbuf_setup()
876 list_add_tail(&bf->list, &txq->q); in ath5k_txbuf_setup()
877 txq->txq_len++; in ath5k_txbuf_setup()
878 if (txq->link == NULL) /* is this first packet? */ in ath5k_txbuf_setup()
879 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); in ath5k_txbuf_setup()
881 *txq->link = bf->daddr; in ath5k_txbuf_setup()
883 txq->link = &ds->ds_link; in ath5k_txbuf_setup()
884 ath5k_hw_start_tx_dma(ah, txq->qnum); in ath5k_txbuf_setup()
885 spin_unlock_bh(&txq->lock); in ath5k_txbuf_setup()
[all …]
/linux/drivers/net/ethernet/intel/idpf/
H A Didpf_txrx.h116 #define IDPF_DESC_UNUSED(txq) \ argument
117 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
118 (txq)->next_to_clean - (txq)->next_to_use - 1)
120 #define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top) argument
121 #define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ argument
122 (txq)->desc_count >> 2)
128 #define IDPF_TX_COMPLQ_PENDING(txq) \ argument
129 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
131 (txq)->num_completions_pending - (txq)->complq->num_completions)
135 #define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \ argument
[all …]
H A Didpf_txrx.c76 static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) in idpf_tx_buf_rel_all() argument
82 .dev = txq->dev, in idpf_tx_buf_rel_all()
89 if (!txq->tx_buf) in idpf_tx_buf_rel_all()
93 for (i = 0; i < txq->desc_count; i++) in idpf_tx_buf_rel_all()
94 libeth_tx_complete(&txq->tx_buf[i], &cp); in idpf_tx_buf_rel_all()
96 kfree(txq->tx_buf); in idpf_tx_buf_rel_all()
97 txq->tx_buf = NULL; in idpf_tx_buf_rel_all()
99 if (!idpf_queue_has(FLOW_SCH_EN, txq)) in idpf_tx_buf_rel_all()
102 buf_stack = &txq->stash->buf_stack; in idpf_tx_buf_rel_all()
110 hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash, in idpf_tx_buf_rel_all()
[all …]
/linux/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c176 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset()
691 if (q->txq[i].desc) { in t3_free_qset()
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset()
695 if (q->txq[i].sdesc) { in t3_free_qset()
696 free_tx_desc(adapter, &q->txq[i], in t3_free_qset()
697 q->txq[i].in_use); in t3_free_qset()
698 kfree(q->txq[i].sdesc); in t3_free_qset()
701 q->txq[i].size * in t3_free_qset()
703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset()
[all …]
/linux/net/core/
H A Dnetpoll.c70 struct netdev_queue *txq) in netpoll_start_xmit() argument
89 status = netdev_start_xmit(skb, dev, txq, false); in netpoll_start_xmit()
102 while ((skb = skb_dequeue(&npinfo->txq))) { in queue_process()
104 struct netdev_queue *txq; in queue_process() local
119 txq = netdev_get_tx_queue(dev, q_index); in queue_process()
120 HARD_TX_LOCK(dev, txq, smp_processor_id()); in queue_process()
121 if (netif_xmit_frozen_or_stopped(txq) || in queue_process()
122 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) { in queue_process()
123 skb_queue_head(&npinfo->txq, skb); in queue_process()
124 HARD_TX_UNLOCK(dev, txq); in queue_process()
[all …]
/linux/drivers/net/wireless/ath/ath10k/
H A Dtxrx.c50 struct ieee80211_txq *txq; in ath10k_txrx_tx_unref() local
76 txq = skb_cb->txq; in ath10k_txrx_tx_unref()
78 if (txq) { in ath10k_txrx_tx_unref()
79 artxq = (void *)txq->drv_priv; in ath10k_txrx_tx_unref()
89 if (txq && txq->sta && skb_cb->airtime_est) in ath10k_txrx_tx_unref()
90 ieee80211_sta_register_airtime(txq->sta, txq->tid, in ath10k_txrx_tx_unref()
138 if (txq) in ath10k_txrx_tx_unref()
139 status.sta = txq->sta; in ath10k_txrx_tx_unref()
/linux/drivers/net/ethernet/brocade/bna/
H A Dbna_tx_rx.c2870 struct bna_txq *txq; in bna_tx_sm_started_entry() local
2873 list_for_each_entry(txq, &tx->txq_q, qe) { in bna_tx_sm_started_entry()
2874 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry()
2876 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry()
3089 struct bna_txq *txq = NULL; in bna_bfi_tx_enet_start() local
3099 txq = txq ? list_next_entry(txq, qe) in bna_bfi_tx_enet_start()
3101 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); in bna_bfi_tx_enet_start()
3102 cfg_req->q_cfg[i].q.priority = txq->priority; in bna_bfi_tx_enet_start()
3105 txq->ib.ib_seg_host_addr.lsb; in bna_bfi_tx_enet_start()
3107 txq->ib.ib_seg_host_addr.msb; in bna_bfi_tx_enet_start()
[all …]
/linux/drivers/net/ethernet/microsoft/mana/
H A Dmana_en.c249 struct mana_txq *txq; in mana_start_xmit() local
259 txq = &apc->tx_qp[txq_idx].txq; in mana_start_xmit()
260 gdma_sq = txq->gdma_sq; in mana_start_xmit()
262 tx_stats = &txq->stats; in mana_start_xmit()
265 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; in mana_start_xmit()
267 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { in mana_start_xmit()
268 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; in mana_start_xmit()
271 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; in mana_start_xmit()
398 skb_queue_tail(&txq->pending_skbs, skb); in mana_start_xmit()
411 (void)skb_dequeue_tail(&txq->pending_skbs); in mana_start_xmit()
[all …]
/linux/drivers/net/ethernet/alacritech/
H A Dslicoss.c113 static unsigned int slic_get_free_tx_descs(struct slic_tx_queue *txq) in slic_get_free_tx_descs() argument
117 return slic_get_free_queue_descs(txq->put_idx, txq->done_idx, txq->len); in slic_get_free_tx_descs()
344 struct slic_tx_queue *txq = &sdev->txq; in slic_xmit_complete() local
359 txq->done_idx = idx; in slic_xmit_complete()
360 buff = &txq->txbuffs[idx]; in slic_xmit_complete()
387 (slic_get_free_tx_descs(txq) >= SLIC_MIN_TX_WAKEUP_DESCS)) in slic_xmit_complete()
838 struct slic_tx_queue *txq = &sdev->txq; in slic_init_tx_queue() local
844 txq->len = SLIC_NUM_TX_DESCS; in slic_init_tx_queue()
845 txq->put_idx = 0; in slic_init_tx_queue()
846 txq->done_idx = 0; in slic_init_tx_queue()
[all …]

12345678910