Lines Matching refs:tx_q
195 static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q) in idpf_tx_buf_alloc_all() argument
204 buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count; in idpf_tx_buf_alloc_all()
205 tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL); in idpf_tx_buf_alloc_all()
206 if (!tx_q->tx_buf) in idpf_tx_buf_alloc_all()
209 if (!idpf_queue_has(FLOW_SCH_EN, tx_q)) in idpf_tx_buf_alloc_all()
212 buf_stack = &tx_q->stash->buf_stack; in idpf_tx_buf_alloc_all()
217 buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs), in idpf_tx_buf_alloc_all()
222 buf_stack->size = tx_q->desc_count; in idpf_tx_buf_alloc_all()
223 buf_stack->top = tx_q->desc_count; in idpf_tx_buf_alloc_all()
225 for (i = 0; i < tx_q->desc_count; i++) { in idpf_tx_buf_alloc_all()
243 struct idpf_tx_queue *tx_q) in idpf_tx_desc_alloc() argument
245 struct device *dev = tx_q->dev; in idpf_tx_desc_alloc()
248 err = idpf_tx_buf_alloc_all(tx_q); in idpf_tx_desc_alloc()
252 tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx); in idpf_tx_desc_alloc()
255 tx_q->size = ALIGN(tx_q->size, 4096); in idpf_tx_desc_alloc()
256 tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma, in idpf_tx_desc_alloc()
258 if (!tx_q->desc_ring) { in idpf_tx_desc_alloc()
260 tx_q->size); in idpf_tx_desc_alloc()
265 tx_q->next_to_use = 0; in idpf_tx_desc_alloc()
266 tx_q->next_to_clean = 0; in idpf_tx_desc_alloc()
267 idpf_queue_set(GEN_CHK, tx_q); in idpf_tx_desc_alloc()
272 idpf_tx_desc_rel(tx_q); in idpf_tx_desc_alloc()
1635 static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q) in idpf_tx_handle_sw_marker() argument
1637 struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev); in idpf_tx_handle_sw_marker()
1641 idpf_queue_clear(SW_MARKER, tx_q); in idpf_tx_handle_sw_marker()
1767 static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, in idpf_tx_splitq_clean() argument
1774 u32 ntc = tx_q->next_to_clean; in idpf_tx_splitq_clean()
1776 .dev = tx_q->dev, in idpf_tx_splitq_clean()
1783 tx_desc = &tx_q->flex_tx[ntc]; in idpf_tx_splitq_clean()
1784 next_pending_desc = &tx_q->flex_tx[end]; in idpf_tx_splitq_clean()
1785 tx_buf = &tx_q->tx_buf[ntc]; in idpf_tx_splitq_clean()
1803 if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) { in idpf_tx_splitq_clean()
1808 idpf_stash_flow_sch_buffers(tx_q, tx_buf); in idpf_tx_splitq_clean()
1811 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, in idpf_tx_splitq_clean()
1813 idpf_stash_flow_sch_buffers(tx_q, tx_buf); in idpf_tx_splitq_clean()
1820 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, in idpf_tx_splitq_clean()
1829 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf); in idpf_tx_splitq_clean()
1833 tx_q->next_to_clean = ntc; in idpf_tx_splitq_clean()
1987 struct idpf_tx_queue *tx_q; in idpf_tx_clean_complq() local
2007 tx_q = complq->txq_grp->txqs[rel_tx_qid]; in idpf_tx_clean_complq()
2016 idpf_tx_splitq_clean(tx_q, hw_head, budget, in idpf_tx_clean_complq()
2020 idpf_tx_handle_rs_completion(tx_q, tx_desc, in idpf_tx_clean_complq()
2024 idpf_tx_handle_sw_marker(tx_q); in idpf_tx_clean_complq()
2027 netdev_err(tx_q->netdev, in idpf_tx_clean_complq()
2032 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_clean_complq()
2033 u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets); in idpf_tx_clean_complq()
2034 u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes); in idpf_tx_clean_complq()
2035 tx_q->cleaned_pkts += cleaned_stats.packets; in idpf_tx_clean_complq()
2036 tx_q->cleaned_bytes += cleaned_stats.bytes; in idpf_tx_clean_complq()
2038 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_clean_complq()
2064 struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i]; in idpf_tx_clean_complq() local
2069 if (!tx_q->cleaned_bytes) in idpf_tx_clean_complq()
2072 *cleaned += tx_q->cleaned_pkts; in idpf_tx_clean_complq()
2075 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_clean_complq()
2077 dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || in idpf_tx_clean_complq()
2079 !netif_carrier_ok(tx_q->netdev); in idpf_tx_clean_complq()
2081 __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, in idpf_tx_clean_complq()
2082 IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, in idpf_tx_clean_complq()
2088 tx_q->cleaned_bytes = 0; in idpf_tx_clean_complq()
2089 tx_q->cleaned_pkts = 0; in idpf_tx_clean_complq()
2142 static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, in idpf_tx_maybe_stop_splitq() argument
2145 if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) in idpf_tx_maybe_stop_splitq()
2152 if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > in idpf_tx_maybe_stop_splitq()
2153 IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq))) in idpf_tx_maybe_stop_splitq()
2159 if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q))) in idpf_tx_maybe_stop_splitq()
2165 netif_stop_subqueue(tx_q->netdev, tx_q->idx); in idpf_tx_maybe_stop_splitq()
2168 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_maybe_stop_splitq()
2169 u64_stats_inc(&tx_q->q_stats.q_busy); in idpf_tx_maybe_stop_splitq()
2170 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_maybe_stop_splitq()
2185 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, in idpf_tx_buf_hw_update() argument
2190 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_buf_hw_update()
2191 tx_q->next_to_use = val; in idpf_tx_buf_hw_update()
2193 if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) { in idpf_tx_buf_hw_update()
2194 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_buf_hw_update()
2195 u64_stats_inc(&tx_q->q_stats.q_busy); in idpf_tx_buf_hw_update()
2196 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_buf_hw_update()
2208 writel(val, tx_q->tail); in idpf_tx_buf_hw_update()
2336 static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, in idpf_tx_splitq_map() argument
2343 u16 i = tx_q->next_to_use; in idpf_tx_splitq_map()
2357 tx_desc = &tx_q->flex_tx[i]; in idpf_tx_splitq_map()
2359 dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); in idpf_tx_splitq_map()
2365 (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i; in idpf_tx_splitq_map()
2370 if (dma_mapping_error(tx_q->dev, dma)) in idpf_tx_splitq_map()
2371 return idpf_tx_dma_map_error(tx_q, skb, first, i); in idpf_tx_splitq_map()
2429 if (unlikely(++i == tx_q->desc_count)) { in idpf_tx_splitq_map()
2430 tx_buf = tx_q->tx_buf; in idpf_tx_splitq_map()
2431 tx_desc = &tx_q->flex_tx[0]; in idpf_tx_splitq_map()
2433 tx_q->compl_tag_cur_gen = in idpf_tx_splitq_map()
2434 IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); in idpf_tx_splitq_map()
2476 if (unlikely(++i == tx_q->desc_count)) { in idpf_tx_splitq_map()
2477 tx_buf = tx_q->tx_buf; in idpf_tx_splitq_map()
2478 tx_desc = &tx_q->flex_tx[0]; in idpf_tx_splitq_map()
2480 tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); in idpf_tx_splitq_map()
2489 dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, in idpf_tx_splitq_map()
2502 i = idpf_tx_splitq_bump_ntu(tx_q, i); in idpf_tx_splitq_map()
2504 tx_q->txq_grp->num_completions_pending++; in idpf_tx_splitq_map()
2507 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_splitq_map()
2510 idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); in idpf_tx_splitq_map()
2722 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb) in idpf_tx_drop_skb() argument
2724 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_drop_skb()
2725 u64_stats_inc(&tx_q->q_stats.skb_drops); in idpf_tx_drop_skb()
2726 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_drop_skb()
2728 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); in idpf_tx_drop_skb()
2743 struct idpf_tx_queue *tx_q) in idpf_tx_splitq_frame() argument
2750 count = idpf_tx_desc_count_required(tx_q, skb); in idpf_tx_splitq_frame()
2752 return idpf_tx_drop_skb(tx_q, skb); in idpf_tx_splitq_frame()
2756 return idpf_tx_drop_skb(tx_q, skb); in idpf_tx_splitq_frame()
2760 if (idpf_tx_maybe_stop_splitq(tx_q, count)) { in idpf_tx_splitq_frame()
2761 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); in idpf_tx_splitq_frame()
2769 idpf_tx_splitq_get_ctx_desc(tx_q); in idpf_tx_splitq_frame()
2782 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_splitq_frame()
2783 u64_stats_inc(&tx_q->q_stats.lso_pkts); in idpf_tx_splitq_frame()
2784 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_splitq_frame()
2788 first = &tx_q->tx_buf[tx_q->next_to_use]; in idpf_tx_splitq_frame()
2800 if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { in idpf_tx_splitq_frame()
2808 if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) { in idpf_tx_splitq_frame()
2810 tx_q->txq_grp->num_completions_pending++; in idpf_tx_splitq_frame()
2824 idpf_tx_splitq_map(tx_q, &tx_params, first); in idpf_tx_splitq_frame()
2839 struct idpf_tx_queue *tx_q; in idpf_tx_start() local
2847 tx_q = vport->txqs[skb_get_queue_mapping(skb)]; in idpf_tx_start()
2852 if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) { in idpf_tx_start()
2853 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); in idpf_tx_start()
2859 return idpf_tx_splitq_frame(skb, tx_q); in idpf_tx_start()
2861 return idpf_tx_singleq_frame(skb, tx_q); in idpf_tx_start()