Lines Matching refs:txq
76 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) in qede_free_tx_pkt() argument
78 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt()
79 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt()
84 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
90 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt()
96 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
104 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
114 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
120 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
124 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt()
125 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_tx_pkt()
131 static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, in qede_free_failed_tx_pkt() argument
135 u16 idx = txq->sw_tx_prod; in qede_free_failed_tx_pkt()
136 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_failed_tx_pkt()
141 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
142 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
144 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
148 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
153 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), in qede_free_failed_tx_pkt()
159 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
161 dma_unmap_page(txq->dev, in qede_free_failed_tx_pkt()
167 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
168 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
172 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_failed_tx_pkt()
173 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_failed_tx_pkt()
243 static int map_frag_to_bd(struct qede_tx_queue *txq, in map_frag_to_bd() argument
249 mapping = skb_frag_dma_map(txq->dev, frag, 0, in map_frag_to_bd()
251 if (unlikely(dma_mapping_error(txq->dev, mapping))) in map_frag_to_bd()
288 static inline void qede_update_tx_producer(struct qede_tx_queue *txq) in qede_update_tx_producer() argument
295 writel(txq->tx_db.raw, txq->doorbell_addr); in qede_update_tx_producer()
304 static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad, in qede_xdp_xmit() argument
311 if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >= in qede_xdp_xmit()
312 txq->num_tx_buffers)) { in qede_xdp_xmit()
313 txq->stopped_cnt++; in qede_xdp_xmit()
317 bd = qed_chain_produce(&txq->tx_pbl); in qede_xdp_xmit()
329 xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; in qede_xdp_xmit()
334 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_xdp_xmit()
387 int qede_txq_has_work(struct qede_tx_queue *txq) in qede_txq_has_work() argument
393 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_txq_has_work()
394 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) in qede_txq_has_work()
397 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); in qede_txq_has_work()
400 static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) in qede_xdp_tx_int() argument
402 struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; in qede_xdp_tx_int()
407 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_xdp_tx_int()
410 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_xdp_tx_int()
411 xdp_info = xdp_arr + txq->sw_tx_cons; in qede_xdp_tx_int()
426 qed_chain_consume(&txq->tx_pbl); in qede_xdp_tx_int()
427 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_xdp_tx_int()
428 txq->xmit_pkts++; in qede_xdp_tx_int()
432 static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) in qede_tx_int() argument
439 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); in qede_tx_int()
441 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_tx_int()
444 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_tx_int()
447 rc = qede_free_tx_pkt(edev, txq, &len); in qede_tx_int()
451 qed_chain_get_cons_idx(&txq->tx_pbl)); in qede_tx_int()
457 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_tx_int()
458 txq->xmit_pkts++; in qede_tx_int()
489 (qed_chain_get_elem_left(&txq->tx_pbl) in qede_tx_int()
1405 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll_is_more_work()
1430 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll()
1431 qede_tx_int(edev, &fp->txq[cos]); in qede_poll()
1482 struct qede_tx_queue *txq; in qede_start_xmit() local
1499 txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index); in qede_start_xmit()
1502 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); in qede_start_xmit()
1509 txq->tx_mem_alloc_err++; in qede_start_xmit()
1518 idx = txq->sw_tx_prod; in qede_start_xmit()
1519 txq->sw_tx_ring.skbs[idx].skb = skb; in qede_start_xmit()
1521 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1530 mapping = dma_map_single(txq->dev, skb->data, in qede_start_xmit()
1532 if (unlikely(dma_mapping_error(txq->dev, mapping))) { in qede_start_xmit()
1534 qede_free_failed_tx_pkt(txq, first_bd, 0, false); in qede_start_xmit()
1535 qede_update_tx_producer(txq); in qede_start_xmit()
1546 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1551 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1584 if (unlikely(txq->is_legacy)) in qede_start_xmit()
1641 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; in qede_start_xmit()
1651 qede_free_failed_tx_pkt(txq, first_bd, 0, false); in qede_start_xmit()
1652 qede_update_tx_producer(txq); in qede_start_xmit()
1665 rc = map_frag_to_bd(txq, in qede_start_xmit()
1669 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); in qede_start_xmit()
1670 qede_update_tx_producer(txq); in qede_start_xmit()
1685 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1689 rc = map_frag_to_bd(txq, in qede_start_xmit()
1693 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); in qede_start_xmit()
1694 qede_update_tx_producer(txq); in qede_start_xmit()
1709 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_start_xmit()
1712 txq->tx_db.data.bd_prod = in qede_start_xmit()
1713 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); in qede_start_xmit()
1716 qede_update_tx_producer(txq); in qede_start_xmit()
1718 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) in qede_start_xmit()
1721 qede_update_tx_producer(txq); in qede_start_xmit()
1724 txq->stopped_cnt++; in qede_start_xmit()
1733 if ((qed_chain_get_elem_left(&txq->tx_pbl) >= in qede_start_xmit()