Lines Matching +full:dcb +full:- +full:algorithm

1 // SPDX-License-Identifier: GPL-2.0
27 * ice_prgm_fdir_fltr - Program a Flow Director filter
47 return -ENOENT; in ice_prgm_fdir_fltr()
48 tx_ring = vsi->tx_rings[0]; in ice_prgm_fdir_fltr()
49 if (!tx_ring || !tx_ring->desc) in ice_prgm_fdir_fltr()
50 return -ENOENT; in ice_prgm_fdir_fltr()
51 dev = tx_ring->dev; in ice_prgm_fdir_fltr()
54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { in ice_prgm_fdir_fltr()
56 return -EAGAIN; in ice_prgm_fdir_fltr()
64 return -EINVAL; in ice_prgm_fdir_fltr()
67 i = tx_ring->next_to_use; in ice_prgm_fdir_fltr()
68 first = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
73 i = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
75 tx_buf = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
78 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
84 tx_desc->buf_addr = cpu_to_le64(dma); in ice_prgm_fdir_fltr()
88 tx_buf->type = ICE_TX_BUF_DUMMY; in ice_prgm_fdir_fltr()
89 tx_buf->raw_buf = raw_packet; in ice_prgm_fdir_fltr()
91 tx_desc->cmd_type_offset_bsz = in ice_prgm_fdir_fltr()
100 first->next_to_watch = tx_desc; in ice_prgm_fdir_fltr()
102 writel(tx_ring->next_to_use, tx_ring->tail); in ice_prgm_fdir_fltr()
108 * ice_unmap_and_free_tx_buf - Release a Tx buffer
116 dma_unmap_page(ring->dev, in ice_unmap_and_free_tx_buf()
121 switch (tx_buf->type) { in ice_unmap_and_free_tx_buf()
123 devm_kfree(ring->dev, tx_buf->raw_buf); in ice_unmap_and_free_tx_buf()
126 dev_kfree_skb_any(tx_buf->skb); in ice_unmap_and_free_tx_buf()
129 page_frag_free(tx_buf->raw_buf); in ice_unmap_and_free_tx_buf()
132 xdp_return_frame(tx_buf->xdpf); in ice_unmap_and_free_tx_buf()
136 tx_buf->next_to_watch = NULL; in ice_unmap_and_free_tx_buf()
137 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_unmap_and_free_tx_buf()
144 return netdev_get_tx_queue(ring->netdev, ring->q_index); in txring_txq()
148 * ice_clean_tx_ring - Free any empty Tx buffers
156 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring()
162 if (!tx_ring->tx_buf) in ice_clean_tx_ring()
166 for (i = 0; i < tx_ring->count; i++) in ice_clean_tx_ring()
167 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); in ice_clean_tx_ring()
170 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); in ice_clean_tx_ring()
172 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_clean_tx_ring()
175 memset(tx_ring->desc, 0, size); in ice_clean_tx_ring()
177 tx_ring->next_to_use = 0; in ice_clean_tx_ring()
178 tx_ring->next_to_clean = 0; in ice_clean_tx_ring()
180 if (!tx_ring->netdev) in ice_clean_tx_ring()
188 * ice_free_tx_ring - Free Tx resources per queue
198 devm_kfree(tx_ring->dev, tx_ring->tx_buf); in ice_free_tx_ring()
199 tx_ring->tx_buf = NULL; in ice_free_tx_ring()
201 if (tx_ring->desc) { in ice_free_tx_ring()
202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_free_tx_ring()
204 dmam_free_coherent(tx_ring->dev, size, in ice_free_tx_ring()
205 tx_ring->desc, tx_ring->dma); in ice_free_tx_ring()
206 tx_ring->desc = NULL; in ice_free_tx_ring()
211 * ice_clean_tx_irq - Reclaim resources after transmit completes
221 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_tx_irq()
222 s16 i = tx_ring->next_to_clean; in ice_clean_tx_irq()
229 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_tx_irq()
231 i -= tx_ring->count; in ice_clean_tx_irq()
233 prefetch(&vsi->state); in ice_clean_tx_irq()
236 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; in ice_clean_tx_irq()
243 prefetchw(&tx_buf->skb->users); in ice_clean_tx_irq()
249 if (!(eop_desc->cmd_type_offset_bsz & in ice_clean_tx_irq()
254 tx_buf->next_to_watch = NULL; in ice_clean_tx_irq()
257 total_bytes += tx_buf->bytecount; in ice_clean_tx_irq()
258 total_pkts += tx_buf->gso_segs; in ice_clean_tx_irq()
261 napi_consume_skb(tx_buf->skb, napi_budget); in ice_clean_tx_irq()
264 dma_unmap_single(tx_ring->dev, in ice_clean_tx_irq()
270 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_clean_tx_irq()
280 i -= tx_ring->count; in ice_clean_tx_irq()
281 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
287 dma_unmap_page(tx_ring->dev, in ice_clean_tx_irq()
301 i -= tx_ring->count; in ice_clean_tx_irq()
302 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
309 budget--; in ice_clean_tx_irq()
312 i += tx_ring->count; in ice_clean_tx_irq()
313 tx_ring->next_to_clean = i; in ice_clean_tx_irq()
319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && in ice_clean_tx_irq()
326 !test_bit(ICE_VSI_DOWN, vsi->state)) { in ice_clean_tx_irq()
328 ++tx_ring->ring_stats->tx_stats.restart_q; in ice_clean_tx_irq()
336 * ice_setup_tx_ring - Allocate the Tx descriptors
343 struct device *dev = tx_ring->dev; in ice_setup_tx_ring()
347 return -ENOMEM; in ice_setup_tx_ring()
350 WARN_ON(tx_ring->tx_buf); in ice_setup_tx_ring()
351 tx_ring->tx_buf = in ice_setup_tx_ring()
352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, in ice_setup_tx_ring()
354 if (!tx_ring->tx_buf) in ice_setup_tx_ring()
355 return -ENOMEM; in ice_setup_tx_ring()
358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_setup_tx_ring()
360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, in ice_setup_tx_ring()
362 if (!tx_ring->desc) { in ice_setup_tx_ring()
368 tx_ring->next_to_use = 0; in ice_setup_tx_ring()
369 tx_ring->next_to_clean = 0; in ice_setup_tx_ring()
370 tx_ring->ring_stats->tx_stats.prev_pkt = -1; in ice_setup_tx_ring()
374 devm_kfree(dev, tx_ring->tx_buf); in ice_setup_tx_ring()
375 tx_ring->tx_buf = NULL; in ice_setup_tx_ring()
376 return -ENOMEM; in ice_setup_tx_ring()
380 * ice_clean_rx_ring - Free Rx buffers
385 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_ring()
386 struct device *dev = rx_ring->dev; in ice_clean_rx_ring()
391 if (!rx_ring->rx_buf) in ice_clean_rx_ring()
394 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
399 if (xdp->data) { in ice_clean_rx_ring()
401 xdp->data = NULL; in ice_clean_rx_ring()
405 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring()
406 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring()
408 if (!rx_buf->page) in ice_clean_rx_ring()
414 dma_sync_single_range_for_cpu(dev, rx_buf->dma, in ice_clean_rx_ring()
415 rx_buf->page_offset, in ice_clean_rx_ring()
416 rx_ring->rx_buf_len, in ice_clean_rx_ring()
420 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), in ice_clean_rx_ring()
422 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); in ice_clean_rx_ring()
424 rx_buf->page = NULL; in ice_clean_rx_ring()
425 rx_buf->page_offset = 0; in ice_clean_rx_ring()
429 if (rx_ring->xsk_pool) in ice_clean_rx_ring()
430 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); in ice_clean_rx_ring()
432 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); in ice_clean_rx_ring()
435 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_clean_rx_ring()
437 memset(rx_ring->desc, 0, size); in ice_clean_rx_ring()
439 rx_ring->next_to_alloc = 0; in ice_clean_rx_ring()
440 rx_ring->next_to_clean = 0; in ice_clean_rx_ring()
441 rx_ring->first_desc = 0; in ice_clean_rx_ring()
442 rx_ring->next_to_use = 0; in ice_clean_rx_ring()
446 * ice_free_rx_ring - Free Rx resources
456 if (rx_ring->vsi->type == ICE_VSI_PF) in ice_free_rx_ring()
457 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in ice_free_rx_ring()
458 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ice_free_rx_ring()
459 WRITE_ONCE(rx_ring->xdp_prog, NULL); in ice_free_rx_ring()
460 if (rx_ring->xsk_pool) { in ice_free_rx_ring()
461 kfree(rx_ring->xdp_buf); in ice_free_rx_ring()
462 rx_ring->xdp_buf = NULL; in ice_free_rx_ring()
464 kfree(rx_ring->rx_buf); in ice_free_rx_ring()
465 rx_ring->rx_buf = NULL; in ice_free_rx_ring()
468 if (rx_ring->desc) { in ice_free_rx_ring()
469 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_free_rx_ring()
471 dmam_free_coherent(rx_ring->dev, size, in ice_free_rx_ring()
472 rx_ring->desc, rx_ring->dma); in ice_free_rx_ring()
473 rx_ring->desc = NULL; in ice_free_rx_ring()
478 * ice_setup_rx_ring - Allocate the Rx descriptors
485 struct device *dev = rx_ring->dev; in ice_setup_rx_ring()
489 return -ENOMEM; in ice_setup_rx_ring()
492 WARN_ON(rx_ring->rx_buf); in ice_setup_rx_ring()
493 rx_ring->rx_buf = in ice_setup_rx_ring()
494 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); in ice_setup_rx_ring()
495 if (!rx_ring->rx_buf) in ice_setup_rx_ring()
496 return -ENOMEM; in ice_setup_rx_ring()
499 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_setup_rx_ring()
501 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, in ice_setup_rx_ring()
503 if (!rx_ring->desc) { in ice_setup_rx_ring()
509 rx_ring->next_to_use = 0; in ice_setup_rx_ring()
510 rx_ring->next_to_clean = 0; in ice_setup_rx_ring()
511 rx_ring->first_desc = 0; in ice_setup_rx_ring()
513 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) in ice_setup_rx_ring()
514 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); in ice_setup_rx_ring()
519 kfree(rx_ring->rx_buf); in ice_setup_rx_ring()
520 rx_ring->rx_buf = NULL; in ice_setup_rx_ring()
521 return -ENOMEM; in ice_setup_rx_ring()
525 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
554 spin_lock(&xdp_ring->tx_lock); in ice_run_xdp()
557 spin_unlock(&xdp_ring->tx_lock); in ice_run_xdp()
562 if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) in ice_run_xdp()
567 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
571 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
581 * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
591 xdp.data = xdpf->data; in ice_xmit_xdp_ring()
592 xdp.data_end = xdp.data + xdpf->len; in ice_xmit_xdp_ring()
593 xdp.frame_sz = xdpf->frame_sz; in ice_xmit_xdp_ring()
594 xdp.flags = xdpf->flags; in ice_xmit_xdp_ring()
600 * ice_xdp_xmit - submit packets to XDP ring for transmission
608 * For error cases, a negative errno code is returned and no-frames
617 struct ice_vsi *vsi = np->vsi; in ice_xdp_xmit()
622 if (test_bit(ICE_VSI_DOWN, vsi->state)) in ice_xdp_xmit()
623 return -ENETDOWN; in ice_xdp_xmit()
626 return -ENXIO; in ice_xdp_xmit()
629 return -EINVAL; in ice_xdp_xmit()
632 queue_index %= vsi->num_xdp_txq; in ice_xdp_xmit()
633 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
634 spin_lock(&xdp_ring->tx_lock); in ice_xdp_xmit()
637 if (unlikely(queue_index >= vsi->num_xdp_txq)) in ice_xdp_xmit()
638 return -ENXIO; in ice_xdp_xmit()
639 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
642 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; in ice_xdp_xmit()
653 tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); in ice_xdp_xmit()
658 spin_unlock(&xdp_ring->tx_lock); in ice_xdp_xmit()
664 * ice_alloc_mapped_page - recycle or make a new page
674 struct page *page = bi->page; in ice_alloc_mapped_page()
684 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
689 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), in ice_alloc_mapped_page()
695 if (dma_mapping_error(rx_ring->dev, dma)) { in ice_alloc_mapped_page()
697 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
701 bi->dma = dma; in ice_alloc_mapped_page()
702 bi->page = page; in ice_alloc_mapped_page()
703 bi->page_offset = rx_ring->rx_offset; in ice_alloc_mapped_page()
704 page_ref_add(page, USHRT_MAX - 1); in ice_alloc_mapped_page()
705 bi->pagecnt_bias = USHRT_MAX; in ice_alloc_mapped_page()
711 * ice_alloc_rx_bufs - Replace used receive buffers
726 u16 ntu = rx_ring->next_to_use; in ice_alloc_rx_bufs()
730 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || in ice_alloc_rx_bufs()
736 bi = &rx_ring->rx_buf[ntu]; in ice_alloc_rx_bufs()
744 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ice_alloc_rx_bufs()
745 bi->page_offset, in ice_alloc_rx_bufs()
746 rx_ring->rx_buf_len, in ice_alloc_rx_bufs()
750 * because each write-back erases this info. in ice_alloc_rx_bufs()
752 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ice_alloc_rx_bufs()
757 if (unlikely(ntu == rx_ring->count)) { in ice_alloc_rx_bufs()
759 bi = rx_ring->rx_buf; in ice_alloc_rx_bufs()
764 rx_desc->wb.status_error0 = 0; in ice_alloc_rx_bufs()
766 cleaned_count--; in ice_alloc_rx_bufs()
769 if (rx_ring->next_to_use != ntu) in ice_alloc_rx_bufs()
776 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
790 rx_buf->page_offset ^= size; in ice_rx_buf_adjust_pg_offset()
793 rx_buf->page_offset += size; in ice_rx_buf_adjust_pg_offset()
798 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
809 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; in ice_can_reuse_rx_page()
810 struct page *page = rx_buf->page; in ice_can_reuse_rx_page()
812 /* avoid re-using remote and pfmemalloc pages */ in ice_can_reuse_rx_page()
817 if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) in ice_can_reuse_rx_page()
821 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072) in ice_can_reuse_rx_page()
822 if (rx_buf->page_offset > ICE_LAST_OFFSET) in ice_can_reuse_rx_page()
831 page_ref_add(page, USHRT_MAX - 1); in ice_can_reuse_rx_page()
832 rx_buf->pagecnt_bias = USHRT_MAX; in ice_can_reuse_rx_page()
839 * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
845 * This function will add the data contained in rx_buf->page to the xdp buf.
858 sinfo->nr_frags = 0; in ice_add_xdp_frag()
859 sinfo->xdp_frags_size = 0; in ice_add_xdp_frag()
863 if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { in ice_add_xdp_frag()
865 return -ENOMEM; in ice_add_xdp_frag()
868 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, in ice_add_xdp_frag()
869 rx_buf->page_offset, size); in ice_add_xdp_frag()
870 sinfo->xdp_frags_size += size; in ice_add_xdp_frag()
874 rx_ring->nr_frags = sinfo->nr_frags; in ice_add_xdp_frag()
876 if (page_is_pfmemalloc(rx_buf->page)) in ice_add_xdp_frag()
883 * ice_reuse_rx_page - page flip buffer and store it back on the ring
892 u16 nta = rx_ring->next_to_alloc; in ice_reuse_rx_page()
895 new_buf = &rx_ring->rx_buf[nta]; in ice_reuse_rx_page()
899 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ice_reuse_rx_page()
905 new_buf->dma = old_buf->dma; in ice_reuse_rx_page()
906 new_buf->page = old_buf->page; in ice_reuse_rx_page()
907 new_buf->page_offset = old_buf->page_offset; in ice_reuse_rx_page()
908 new_buf->pagecnt_bias = old_buf->pagecnt_bias; in ice_reuse_rx_page()
912 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
926 rx_buf = &rx_ring->rx_buf[ntc]; in ice_get_rx_buf()
927 rx_buf->pgcnt = page_count(rx_buf->page); in ice_get_rx_buf()
928 prefetchw(rx_buf->page); in ice_get_rx_buf()
933 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, in ice_get_rx_buf()
934 rx_buf->page_offset, size, in ice_get_rx_buf()
938 rx_buf->pagecnt_bias--; in ice_get_rx_buf()
944 * ice_build_skb - Build skb around an existing buffer
955 u8 metasize = xdp->data - xdp->data_meta; in ice_build_skb()
962 nr_frags = sinfo->nr_frags; in ice_build_skb()
965 /* Prefetch first cache line of first page. If xdp->data_meta in ice_build_skb()
966 * is unused, this points exactly as xdp->data, otherwise we in ice_build_skb()
970 net_prefetch(xdp->data_meta); in ice_build_skb()
972 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); in ice_build_skb()
979 skb_record_rx_queue(skb, rx_ring->q_index); in ice_build_skb()
982 skb_reserve(skb, xdp->data - xdp->data_hard_start); in ice_build_skb()
983 __skb_put(skb, xdp->data_end - xdp->data); in ice_build_skb()
989 sinfo->xdp_frags_size, in ice_build_skb()
990 nr_frags * xdp->frame_sz, in ice_build_skb()
997 * ice_construct_skb - Allocate skb and populate it
1008 unsigned int size = xdp->data_end - xdp->data; in ice_construct_skb()
1016 net_prefetch(xdp->data); in ice_construct_skb()
1020 nr_frags = sinfo->nr_frags; in ice_construct_skb()
1024 skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); in ice_construct_skb()
1028 rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; in ice_construct_skb()
1029 skb_record_rx_queue(skb, rx_ring->q_index); in ice_construct_skb()
1033 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); in ice_construct_skb()
1036 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, in ice_construct_skb()
1040 size -= headlen; in ice_construct_skb()
1046 if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { in ice_construct_skb()
1050 skb_add_rx_frag(skb, 0, rx_buf->page, in ice_construct_skb()
1051 rx_buf->page_offset + headlen, size, in ice_construct_skb()
1052 xdp->frame_sz); in ice_construct_skb()
1057 * as-is in ice_construct_skb()
1059 rx_buf->act = ICE_SKB_CONSUMED; in ice_construct_skb()
1065 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], in ice_construct_skb()
1068 xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, in ice_construct_skb()
1069 sinfo->xdp_frags_size, in ice_construct_skb()
1070 nr_frags * xdp->frame_sz, in ice_construct_skb()
1078 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1096 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, in ice_put_rx_buf()
1099 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); in ice_put_rx_buf()
1103 rx_buf->page = NULL; in ice_put_rx_buf()
1107 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1121 unsigned int offset = rx_ring->rx_offset; in ice_clean_rx_irq()
1122 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_irq()
1123 u32 cached_ntc = rx_ring->first_desc; in ice_clean_rx_irq()
1126 u32 ntc = rx_ring->next_to_clean; in ice_clean_rx_irq()
1127 u32 cnt = rx_ring->count; in ice_clean_rx_irq()
1133 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ice_clean_rx_irq()
1135 xdp_ring = rx_ring->xdp_ring; in ice_clean_rx_irq()
1136 cached_ntu = xdp_ring->next_to_use; in ice_clean_rx_irq()
1154 * hardware wrote DD then it will be non-zero in ice_clean_rx_irq()
1157 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) in ice_clean_rx_irq()
1167 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { in ice_clean_rx_irq()
1168 struct ice_vsi *ctrl_vsi = rx_ring->vsi; in ice_clean_rx_irq()
1170 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && in ice_clean_rx_irq()
1171 ctrl_vsi->vf) in ice_clean_rx_irq()
1175 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1179 size = le16_to_cpu(rx_desc->wb.pkt_len) & in ice_clean_rx_irq()
1185 if (!xdp->data) { in ice_clean_rx_irq()
1188 hard_start = page_address(rx_buf->page) + rx_buf->page_offset - in ice_clean_rx_irq()
1203 if (rx_buf->act == ICE_XDP_PASS) in ice_clean_rx_irq()
1208 xdp->data = NULL; in ice_clean_rx_irq()
1209 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1210 rx_ring->nr_frags = 0; in ice_clean_rx_irq()
1219 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_clean_rx_irq()
1220 rx_buf->act = ICE_XDP_CONSUMED; in ice_clean_rx_irq()
1224 xdp->data = NULL; in ice_clean_rx_irq()
1225 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1226 rx_ring->nr_frags = 0; in ice_clean_rx_irq()
1229 xdp->data = NULL; in ice_clean_rx_irq()
1230 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1231 rx_ring->nr_frags = 0; in ice_clean_rx_irq()
1234 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, in ice_clean_rx_irq()
1247 total_rx_bytes += skb->len; in ice_clean_rx_irq()
1260 first = rx_ring->first_desc; in ice_clean_rx_irq()
1262 struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc]; in ice_clean_rx_irq()
1264 if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) { in ice_clean_rx_irq()
1265 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); in ice_clean_rx_irq()
1266 xdp_xmit |= buf->act; in ice_clean_rx_irq()
1267 } else if (buf->act & ICE_XDP_CONSUMED) { in ice_clean_rx_irq()
1268 buf->pagecnt_bias++; in ice_clean_rx_irq()
1269 } else if (buf->act == ICE_XDP_PASS) { in ice_clean_rx_irq()
1270 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); in ice_clean_rx_irq()
1277 rx_ring->next_to_clean = ntc; in ice_clean_rx_irq()
1284 if (rx_ring->ring_stats) in ice_clean_rx_irq()
1305 ring_stats = tx_ring->ring_stats; in __ice_update_sample()
1308 packets += ring_stats->stats.pkts; in __ice_update_sample()
1309 bytes += ring_stats->stats.bytes; in __ice_update_sample()
1317 ring_stats = rx_ring->ring_stats; in __ice_update_sample()
1320 packets += ring_stats->stats.pkts; in __ice_update_sample()
1321 bytes += ring_stats->stats.bytes; in __ice_update_sample()
1325 dim_update_sample(q_vector->total_events, packets, bytes, sample); in __ice_update_sample()
1326 sample->comp_ctr = 0; in __ice_update_sample()
1333 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) in __ice_update_sample()
1334 rc->dim.state = DIM_START_MEASURE; in __ice_update_sample()
1338 * ice_net_dim - Update net DIM algorithm
1344 * This function is a no-op if the ring is not configured to dynamic ITR.
1348 struct ice_ring_container *tx = &q_vector->tx; in ice_net_dim()
1349 struct ice_ring_container *rx = &q_vector->rx; in ice_net_dim()
1355 net_dim(&tx->dim, dim_sample); in ice_net_dim()
1362 net_dim(&rx->dim, dim_sample); in ice_net_dim()
1367 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1375 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this in ice_buildreg_itr()
1384 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); in ice_buildreg_itr()
1388 * ice_enable_interrupt - re-enable MSI-X interrupt
1391 * If the VSI is down, the interrupt will not be re-enabled. Also,
1397 struct ice_vsi *vsi = q_vector->vsi; in ice_enable_interrupt()
1398 bool wb_en = q_vector->wb_on_itr; in ice_enable_interrupt()
1401 if (test_bit(ICE_DOWN, vsi->state)) in ice_enable_interrupt()
1412 q_vector->wb_on_itr = false; in ice_enable_interrupt()
1425 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); in ice_enable_interrupt()
1429 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1432 * We need to tell hardware to write-back completed descriptors even when
1438 * This sets the write-back frequency to whatever was set previously for the
1444 struct ice_vsi *vsi = q_vector->vsi; in ice_set_wb_on_itr()
1447 if (q_vector->wb_on_itr) in ice_set_wb_on_itr()
1452 * be static in non-adaptive mode (user configured) in ice_set_wb_on_itr()
1454 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), in ice_set_wb_on_itr()
1459 q_vector->wb_on_itr = true; in ice_set_wb_on_itr()
1463 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1484 ice_for_each_tx_ring(tx_ring, q_vector->tx) { in ice_napi_poll()
1485 struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); in ice_napi_poll()
1504 if (unlikely(q_vector->num_ring_rx > 1)) in ice_napi_poll()
1509 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); in ice_napi_poll()
1514 ice_for_each_rx_ring(rx_ring, q_vector->rx) { in ice_napi_poll()
1515 struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); in ice_napi_poll()
1518 /* A dedicated path for zero-copy allows making a single in ice_napi_poll()
1522 cleaned = rx_ring->xsk_pool ? in ice_napi_poll()
1534 * cache-lines will still continue even if we're polling. in ice_napi_poll()
1540 /* Exit the polling mode, but don't re-enable interrupts if stack might in ice_napi_poll()
1541 * poll us due to busy-polling in ice_napi_poll()
1550 return min_t(int, work_done, budget - 1); in ice_napi_poll()
1554 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1558 * Returns -EBUSY if a stop is needed, else 0
1568 return -EBUSY; in __ice_maybe_stop_tx()
1570 /* A reprieve! - use start_queue because it doesn't call schedule */ in __ice_maybe_stop_tx()
1572 ++tx_ring->ring_stats->tx_stats.restart_q; in __ice_maybe_stop_tx()
1577 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1592 * ice_tx_map - Build the Tx descriptor
1606 u16 i = tx_ring->next_to_use; in ice_tx_map()
1615 td_tag = off->td_l2tag1; in ice_tx_map()
1616 td_cmd = off->td_cmd; in ice_tx_map()
1617 td_offset = off->td_offset; in ice_tx_map()
1618 skb = first->skb; in ice_tx_map()
1620 data_len = skb->data_len; in ice_tx_map()
1625 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { in ice_tx_map()
1627 td_tag = first->vid; in ice_tx_map()
1630 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ice_tx_map()
1634 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in ice_tx_map()
1637 if (dma_mapping_error(tx_ring->dev, dma)) in ice_tx_map()
1645 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); in ice_tx_map()
1646 tx_desc->buf_addr = cpu_to_le64(dma); in ice_tx_map()
1652 tx_desc->cmd_type_offset_bsz = in ice_tx_map()
1659 if (i == tx_ring->count) { in ice_tx_map()
1665 size -= max_data; in ice_tx_map()
1668 tx_desc->buf_addr = cpu_to_le64(dma); in ice_tx_map()
1674 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, in ice_tx_map()
1680 if (i == tx_ring->count) { in ice_tx_map()
1686 data_len -= size; in ice_tx_map()
1688 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ice_tx_map()
1691 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1692 tx_buf->type = ICE_TX_BUF_FRAG; in ice_tx_map()
1696 skb_tx_timestamp(first->skb); in ice_tx_map()
1699 if (i == tx_ring->count) in ice_tx_map()
1704 tx_desc->cmd_type_offset_bsz = in ice_tx_map()
1716 first->next_to_watch = tx_desc; in ice_tx_map()
1718 tx_ring->next_to_use = i; in ice_tx_map()
1723 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, in ice_tx_map()
1727 writel(i, tx_ring->tail); in ice_tx_map()
1734 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1739 i = tx_ring->count; in ice_tx_map()
1740 i--; in ice_tx_map()
1743 tx_ring->next_to_use = i; in ice_tx_map()
1747 * ice_tx_csum - Enable Tx checksum offloads
1757 struct sk_buff *skb = first->skb; in ice_tx_csum()
1772 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_tx_csum()
1786 l2_len = ip.hdr - skb->data; in ice_tx_csum()
1792 if (ip.v4->version == 4) in ice_tx_csum()
1793 first->tx_flags |= ICE_TX_FLAGS_IPV4; in ice_tx_csum()
1794 else if (ip.v6->version == 6) in ice_tx_csum()
1795 first->tx_flags |= ICE_TX_FLAGS_IPV6; in ice_tx_csum()
1797 if (skb->encapsulation) { in ice_tx_csum()
1802 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { in ice_tx_csum()
1803 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? in ice_tx_csum()
1806 l4_proto = ip.v4->protocol; in ice_tx_csum()
1807 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { in ice_tx_csum()
1812 l4_proto = ip.v6->nexthdr; in ice_tx_csum()
1813 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, in ice_tx_csum()
1816 return -1; in ice_tx_csum()
1823 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
1827 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
1831 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
1835 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
1836 return -1; in ice_tx_csum()
1843 tunnel |= ((l4.hdr - ip.hdr) / 4) << in ice_tx_csum()
1850 tunnel |= ((ip.hdr - l4.hdr) / 2) << in ice_tx_csum()
1853 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; in ice_tx_csum()
1855 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && in ice_tx_csum()
1856 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in ice_tx_csum()
1860 off->cd_tunnel_params |= tunnel; in ice_tx_csum()
1865 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; in ice_tx_csum()
1872 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); in ice_tx_csum()
1873 if (ip.v4->version == 4) in ice_tx_csum()
1874 first->tx_flags |= ICE_TX_FLAGS_IPV4; in ice_tx_csum()
1875 if (ip.v6->version == 6) in ice_tx_csum()
1876 first->tx_flags |= ICE_TX_FLAGS_IPV6; in ice_tx_csum()
1880 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { in ice_tx_csum()
1881 l4_proto = ip.v4->protocol; in ice_tx_csum()
1885 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
1890 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { in ice_tx_csum()
1893 l4_proto = ip.v6->nexthdr; in ice_tx_csum()
1895 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, in ice_tx_csum()
1898 return -1; in ice_tx_csum()
1902 l3_len = l4.hdr - ip.hdr; in ice_tx_csum()
1910 l4_len = l4.tcp->doff; in ice_tx_csum()
1927 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
1928 return -1; in ice_tx_csum()
1933 off->td_cmd |= cmd; in ice_tx_csum()
1934 off->td_offset |= offset; in ice_tx_csum()
1939 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1944 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1949 struct sk_buff *skb = first->skb; in ice_tx_prepare_vlan_flags()
1952 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) in ice_tx_prepare_vlan_flags()
1960 first->vid = skb_vlan_tag_get(skb); in ice_tx_prepare_vlan_flags()
1961 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) in ice_tx_prepare_vlan_flags()
1962 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; in ice_tx_prepare_vlan_flags()
1964 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; in ice_tx_prepare_vlan_flags()
1971 * ice_tso - computes mss and TSO length to prepare for TSO
1980 struct sk_buff *skb = first->skb; in ice_tso()
1997 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_tso()
2016 if (ip.v4->version == 4) { in ice_tso()
2017 ip.v4->tot_len = 0; in ice_tso()
2018 ip.v4->check = 0; in ice_tso()
2020 ip.v6->payload_len = 0; in ice_tso()
2023 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in ice_tso()
2029 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in ice_tso()
2030 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { in ice_tso()
2031 l4.udp->len = 0; in ice_tso()
2034 l4_start = (u8)(l4.hdr - skb->data); in ice_tso()
2037 paylen = skb->len - l4_start; in ice_tso()
2038 csum_replace_by_diff(&l4.udp->check, in ice_tso()
2047 if (ip.v4->version == 4) { in ice_tso()
2048 ip.v4->tot_len = 0; in ice_tso()
2049 ip.v4->check = 0; in ice_tso()
2051 ip.v6->payload_len = 0; in ice_tso()
2056 l4_start = (u8)(l4.hdr - skb->data); in ice_tso()
2059 paylen = skb->len - l4_start; in ice_tso()
2061 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in ice_tso()
2062 csum_replace_by_diff(&l4.udp->check, in ice_tso()
2065 off->header_len = (u8)sizeof(l4.udp) + l4_start; in ice_tso()
2067 csum_replace_by_diff(&l4.tcp->check, in ice_tso()
2070 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); in ice_tso()
2074 first->gso_segs = skb_shinfo(skb)->gso_segs; in ice_tso()
2075 first->bytecount += (first->gso_segs - 1) * off->header_len; in ice_tso()
2077 cd_tso_len = skb->len - off->header_len; in ice_tso()
2078 cd_mss = skb_shinfo(skb)->gso_size; in ice_tso()
2081 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | in ice_tso()
2085 first->tx_flags |= ICE_TX_FLAGS_TSO; in ice_tso()
2090 * ice_txd_use_count - estimate the number of descriptors needed for Tx
2095 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2106 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2123 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2130 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; in ice_xmit_desc_count()
2131 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; in ice_xmit_desc_count()
2137 if (!nr_frags--) in ice_xmit_desc_count()
2147 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2165 nr_frags = skb_shinfo(skb)->nr_frags; in __ice_chk_linearize()
2166 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) in __ice_chk_linearize()
2172 nr_frags -= ICE_MAX_BUF_TXD - 2; in __ice_chk_linearize()
2173 frag = &skb_shinfo(skb)->frags[0]; in __ice_chk_linearize()
2181 sum = 1 - skb_shinfo(skb)->gso_size; in __ice_chk_linearize()
2193 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { in __ice_chk_linearize()
2205 int align_pad = -(skb_frag_off(stale)) & in __ice_chk_linearize()
2206 (ICE_MAX_READ_REQ_SIZE - 1); in __ice_chk_linearize()
2208 sum -= align_pad; in __ice_chk_linearize()
2209 stale_size -= align_pad; in __ice_chk_linearize()
2212 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; in __ice_chk_linearize()
2213 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; in __ice_chk_linearize()
2221 if (!nr_frags--) in __ice_chk_linearize()
2224 sum -= stale_size; in __ice_chk_linearize()
2231 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2235 * Note: Our HW can't scatter-gather more than 8 fragments to build
2253 * ice_tstamp - set up context descriptor for hardware timestamp
2266 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) in ice_tstamp()
2270 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tstamp()
2274 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); in ice_tstamp()
2276 tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; in ice_tstamp()
2280 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | in ice_tstamp()
2283 first->tx_flags |= ICE_TX_FLAGS_TSYN; in ice_tstamp()
2287 * ice_xmit_frame_ring - Sends buffer on Tx ring
2297 struct ice_vsi *vsi = tx_ring->vsi; in ice_xmit_frame_ring()
2312 count = ice_txd_use_count(skb->len); in ice_xmit_frame_ring()
2313 tx_ring->ring_stats->tx_stats.tx_linearize++; in ice_xmit_frame_ring()
2324 tx_ring->ring_stats->tx_stats.tx_busy++; in ice_xmit_frame_ring()
2334 first = &tx_ring->tx_buf[tx_ring->next_to_use]; in ice_xmit_frame_ring()
2335 first->skb = skb; in ice_xmit_frame_ring()
2336 first->type = ICE_TX_BUF_SKB; in ice_xmit_frame_ring()
2337 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); in ice_xmit_frame_ring()
2338 first->gso_segs = 1; in ice_xmit_frame_ring()
2339 first->tx_flags = 0; in ice_xmit_frame_ring()
2343 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { in ice_xmit_frame_ring()
2347 offload.cd_l2tag2 = first->vid; in ice_xmit_frame_ring()
2362 if (unlikely((skb->priority == TC_PRIO_CONTROL || in ice_xmit_frame_ring()
2363 eth->h_proto == htons(ETH_P_LLDP)) && in ice_xmit_frame_ring()
2364 vsi->type == ICE_VSI_PF && in ice_xmit_frame_ring()
2365 vsi->port_info->qos_cfg.is_sw_lldp)) in ice_xmit_frame_ring()
2371 if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF) in ice_xmit_frame_ring()
2376 u16 i = tx_ring->next_to_use; in ice_xmit_frame_ring()
2381 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_xmit_frame_ring()
2384 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); in ice_xmit_frame_ring()
2385 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); in ice_xmit_frame_ring()
2386 cdesc->rsvd = cpu_to_le16(0); in ice_xmit_frame_ring()
2387 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); in ice_xmit_frame_ring()
2400 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2409 struct ice_vsi *vsi = np->vsi; in ice_start_xmit()
2412 tx_ring = vsi->tx_rings[skb->queue_mapping]; in ice_start_xmit()
2424 * ice_get_dscp_up - return the UP/TC value for a SKB
2425 * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
2434 if (skb->protocol == htons(ETH_P_IP)) in ice_get_dscp_up()
2436 else if (skb->protocol == htons(ETH_P_IPV6)) in ice_get_dscp_up()
2439 return dcbcfg->dscp_map[dscp]; in ice_get_dscp_up()
2449 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; in ice_select_queue()
2450 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) in ice_select_queue()
2451 skb->priority = ice_get_dscp_up(dcbcfg, skb); in ice_select_queue()
2457 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2462 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_ctrl_tx_irq()
2463 s16 i = tx_ring->next_to_clean; in ice_clean_ctrl_tx_irq()
2468 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_ctrl_tx_irq()
2470 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2473 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; in ice_clean_ctrl_tx_irq()
2483 if (!(eop_desc->cmd_type_offset_bsz & in ice_clean_ctrl_tx_irq()
2488 tx_buf->next_to_watch = NULL; in ice_clean_ctrl_tx_irq()
2489 tx_desc->buf_addr = 0; in ice_clean_ctrl_tx_irq()
2490 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_ctrl_tx_irq()
2497 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2498 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2504 dma_unmap_single(tx_ring->dev, in ice_clean_ctrl_tx_irq()
2508 if (tx_buf->type == ICE_TX_BUF_DUMMY) in ice_clean_ctrl_tx_irq()
2509 devm_kfree(tx_ring->dev, tx_buf->raw_buf); in ice_clean_ctrl_tx_irq()
2512 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_clean_ctrl_tx_irq()
2513 tx_buf->tx_flags = 0; in ice_clean_ctrl_tx_irq()
2514 tx_buf->next_to_watch = NULL; in ice_clean_ctrl_tx_irq()
2516 tx_desc->buf_addr = 0; in ice_clean_ctrl_tx_irq()
2517 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_ctrl_tx_irq()
2524 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2525 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2529 budget--; in ice_clean_ctrl_tx_irq()
2532 i += tx_ring->count; in ice_clean_ctrl_tx_irq()
2533 tx_ring->next_to_clean = i; in ice_clean_ctrl_tx_irq()
2535 /* re-enable interrupt if needed */ in ice_clean_ctrl_tx_irq()
2536 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); in ice_clean_ctrl_tx_irq()