Lines Matching +full:line +full:- +full:orders
1 // SPDX-License-Identifier: GPL-2.0
26 * ice_prgm_fdir_fltr - Program a Flow Director filter
46 return -ENOENT; in ice_prgm_fdir_fltr()
47 tx_ring = vsi->tx_rings[0]; in ice_prgm_fdir_fltr()
48 if (!tx_ring || !tx_ring->desc) in ice_prgm_fdir_fltr()
49 return -ENOENT; in ice_prgm_fdir_fltr()
50 dev = tx_ring->dev; in ice_prgm_fdir_fltr()
53 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { in ice_prgm_fdir_fltr()
55 return -EAGAIN; in ice_prgm_fdir_fltr()
63 return -EINVAL; in ice_prgm_fdir_fltr()
66 i = tx_ring->next_to_use; in ice_prgm_fdir_fltr()
67 first = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
72 i = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
74 tx_buf = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
77 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
83 tx_desc->buf_addr = cpu_to_le64(dma); in ice_prgm_fdir_fltr()
87 tx_buf->type = ICE_TX_BUF_DUMMY; in ice_prgm_fdir_fltr()
88 tx_buf->raw_buf = raw_packet; in ice_prgm_fdir_fltr()
90 tx_desc->cmd_type_offset_bsz = in ice_prgm_fdir_fltr()
99 first->next_to_watch = tx_desc; in ice_prgm_fdir_fltr()
101 writel(tx_ring->next_to_use, tx_ring->tail); in ice_prgm_fdir_fltr()
107 * ice_unmap_and_free_tx_buf - Release a Tx buffer
115 dma_unmap_page(ring->dev, in ice_unmap_and_free_tx_buf()
120 switch (tx_buf->type) { in ice_unmap_and_free_tx_buf()
122 devm_kfree(ring->dev, tx_buf->raw_buf); in ice_unmap_and_free_tx_buf()
125 dev_kfree_skb_any(tx_buf->skb); in ice_unmap_and_free_tx_buf()
128 page_frag_free(tx_buf->raw_buf); in ice_unmap_and_free_tx_buf()
131 xdp_return_frame(tx_buf->xdpf); in ice_unmap_and_free_tx_buf()
135 tx_buf->next_to_watch = NULL; in ice_unmap_and_free_tx_buf()
136 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_unmap_and_free_tx_buf()
143 return netdev_get_tx_queue(ring->netdev, ring->q_index); in txring_txq()
147 * ice_clean_tstamp_ring - clean time stamp ring
152 struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; in ice_clean_tstamp_ring()
155 if (!tstamp_ring->desc) in ice_clean_tstamp_ring()
158 size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc), in ice_clean_tstamp_ring()
160 memset(tstamp_ring->desc, 0, size); in ice_clean_tstamp_ring()
161 tstamp_ring->next_to_use = 0; in ice_clean_tstamp_ring()
165 * ice_free_tstamp_ring - free time stamp resources per queue
170 struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; in ice_free_tstamp_ring()
173 if (!tstamp_ring->desc) in ice_free_tstamp_ring()
177 size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc), in ice_free_tstamp_ring()
179 dmam_free_coherent(tx_ring->dev, size, tstamp_ring->desc, in ice_free_tstamp_ring()
180 tstamp_ring->dma); in ice_free_tstamp_ring()
181 tstamp_ring->desc = NULL; in ice_free_tstamp_ring()
185 * ice_free_tx_tstamp_ring - free time stamp resources per Tx ring
191 kfree_rcu(tx_ring->tstamp_ring, rcu); in ice_free_tx_tstamp_ring()
192 tx_ring->tstamp_ring = NULL; in ice_free_tx_tstamp_ring()
193 tx_ring->flags &= ~ICE_TX_FLAGS_TXTIME; in ice_free_tx_tstamp_ring()
197 * ice_clean_tx_ring - Free any empty Tx buffers
205 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring()
211 if (!tx_ring->tx_buf) in ice_clean_tx_ring()
215 for (i = 0; i < tx_ring->count; i++) in ice_clean_tx_ring()
216 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); in ice_clean_tx_ring()
219 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); in ice_clean_tx_ring()
221 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_clean_tx_ring()
224 memset(tx_ring->desc, 0, size); in ice_clean_tx_ring()
226 tx_ring->next_to_use = 0; in ice_clean_tx_ring()
227 tx_ring->next_to_clean = 0; in ice_clean_tx_ring()
229 if (!tx_ring->netdev) in ice_clean_tx_ring()
240 * ice_free_tx_ring - Free Tx resources per queue
250 devm_kfree(tx_ring->dev, tx_ring->tx_buf); in ice_free_tx_ring()
251 tx_ring->tx_buf = NULL; in ice_free_tx_ring()
253 if (tx_ring->desc) { in ice_free_tx_ring()
254 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_free_tx_ring()
256 dmam_free_coherent(tx_ring->dev, size, in ice_free_tx_ring()
257 tx_ring->desc, tx_ring->dma); in ice_free_tx_ring()
258 tx_ring->desc = NULL; in ice_free_tx_ring()
263 * ice_clean_tx_irq - Reclaim resources after transmit completes
273 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_tx_irq()
274 s16 i = tx_ring->next_to_clean; in ice_clean_tx_irq()
281 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_tx_irq()
283 i -= tx_ring->count; in ice_clean_tx_irq()
285 prefetch(&vsi->state); in ice_clean_tx_irq()
288 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; in ice_clean_tx_irq()
295 prefetchw(&tx_buf->skb->users); in ice_clean_tx_irq()
301 if (!(eop_desc->cmd_type_offset_bsz & in ice_clean_tx_irq()
306 tx_buf->next_to_watch = NULL; in ice_clean_tx_irq()
309 total_bytes += tx_buf->bytecount; in ice_clean_tx_irq()
310 total_pkts += tx_buf->gso_segs; in ice_clean_tx_irq()
313 napi_consume_skb(tx_buf->skb, napi_budget); in ice_clean_tx_irq()
316 dma_unmap_single(tx_ring->dev, in ice_clean_tx_irq()
322 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_clean_tx_irq()
332 i -= tx_ring->count; in ice_clean_tx_irq()
333 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
339 dma_unmap_page(tx_ring->dev, in ice_clean_tx_irq()
353 i -= tx_ring->count; in ice_clean_tx_irq()
354 tx_buf = tx_ring->tx_buf; in ice_clean_tx_irq()
361 budget--; in ice_clean_tx_irq()
364 i += tx_ring->count; in ice_clean_tx_irq()
365 tx_ring->next_to_clean = i; in ice_clean_tx_irq()
371 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && in ice_clean_tx_irq()
378 !test_bit(ICE_VSI_DOWN, vsi->state)) { in ice_clean_tx_irq()
380 ++tx_ring->ring_stats->tx_stats.restart_q; in ice_clean_tx_irq()
388 * ice_alloc_tstamp_ring - allocate the Time Stamp ring
400 return -ENOMEM; in ice_alloc_tstamp_ring()
402 tstamp_ring->tx_ring = tx_ring; in ice_alloc_tstamp_ring()
403 tx_ring->tstamp_ring = tstamp_ring; in ice_alloc_tstamp_ring()
404 tstamp_ring->desc = NULL; in ice_alloc_tstamp_ring()
405 tstamp_ring->count = ice_calc_ts_ring_count(tx_ring); in ice_alloc_tstamp_ring()
406 tx_ring->flags |= ICE_TX_FLAGS_TXTIME; in ice_alloc_tstamp_ring()
411 * ice_setup_tstamp_ring - allocate the Time Stamp ring
418 struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; in ice_setup_tstamp_ring()
419 struct device *dev = tx_ring->dev; in ice_setup_tstamp_ring()
423 size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc), in ice_setup_tstamp_ring()
425 tstamp_ring->desc = dmam_alloc_coherent(dev, size, &tstamp_ring->dma, in ice_setup_tstamp_ring()
427 if (!tstamp_ring->desc) { in ice_setup_tstamp_ring()
430 return -ENOMEM; in ice_setup_tstamp_ring()
433 tstamp_ring->next_to_use = 0; in ice_setup_tstamp_ring()
438 * ice_alloc_setup_tstamp_ring - Allocate and setup the Time Stamp ring
445 struct device *dev = tx_ring->dev; in ice_alloc_setup_tstamp_ring()
451 tx_ring->q_index); in ice_alloc_setup_tstamp_ring()
458 tx_ring->q_index); in ice_alloc_setup_tstamp_ring()
466 * ice_setup_tx_ring - Allocate the Tx descriptors
473 struct device *dev = tx_ring->dev; in ice_setup_tx_ring()
477 return -ENOMEM; in ice_setup_tx_ring()
480 WARN_ON(tx_ring->tx_buf); in ice_setup_tx_ring()
481 tx_ring->tx_buf = in ice_setup_tx_ring()
482 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, in ice_setup_tx_ring()
484 if (!tx_ring->tx_buf) in ice_setup_tx_ring()
485 return -ENOMEM; in ice_setup_tx_ring()
488 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), in ice_setup_tx_ring()
490 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, in ice_setup_tx_ring()
492 if (!tx_ring->desc) { in ice_setup_tx_ring()
498 tx_ring->next_to_use = 0; in ice_setup_tx_ring()
499 tx_ring->next_to_clean = 0; in ice_setup_tx_ring()
500 tx_ring->ring_stats->tx_stats.prev_pkt = -1; in ice_setup_tx_ring()
504 devm_kfree(dev, tx_ring->tx_buf); in ice_setup_tx_ring()
505 tx_ring->tx_buf = NULL; in ice_setup_tx_ring()
506 return -ENOMEM; in ice_setup_tx_ring()
510 * ice_clean_rx_ring - Free Rx buffers
515 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_ring()
516 struct device *dev = rx_ring->dev; in ice_clean_rx_ring()
521 if (!rx_ring->rx_buf) in ice_clean_rx_ring()
524 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
529 if (xdp->data) { in ice_clean_rx_ring()
531 xdp->data = NULL; in ice_clean_rx_ring()
535 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring()
536 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring()
538 if (!rx_buf->page) in ice_clean_rx_ring()
544 dma_sync_single_range_for_cpu(dev, rx_buf->dma, in ice_clean_rx_ring()
545 rx_buf->page_offset, in ice_clean_rx_ring()
546 rx_ring->rx_buf_len, in ice_clean_rx_ring()
550 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), in ice_clean_rx_ring()
552 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); in ice_clean_rx_ring()
554 rx_buf->page = NULL; in ice_clean_rx_ring()
555 rx_buf->page_offset = 0; in ice_clean_rx_ring()
559 if (rx_ring->xsk_pool) in ice_clean_rx_ring()
560 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); in ice_clean_rx_ring()
562 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); in ice_clean_rx_ring()
565 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_clean_rx_ring()
567 memset(rx_ring->desc, 0, size); in ice_clean_rx_ring()
569 rx_ring->next_to_alloc = 0; in ice_clean_rx_ring()
570 rx_ring->next_to_clean = 0; in ice_clean_rx_ring()
571 rx_ring->first_desc = 0; in ice_clean_rx_ring()
572 rx_ring->next_to_use = 0; in ice_clean_rx_ring()
576 * ice_free_rx_ring - Free Rx resources
586 if (rx_ring->vsi->type == ICE_VSI_PF) in ice_free_rx_ring()
587 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in ice_free_rx_ring()
588 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ice_free_rx_ring()
589 WRITE_ONCE(rx_ring->xdp_prog, NULL); in ice_free_rx_ring()
590 if (rx_ring->xsk_pool) { in ice_free_rx_ring()
591 kfree(rx_ring->xdp_buf); in ice_free_rx_ring()
592 rx_ring->xdp_buf = NULL; in ice_free_rx_ring()
594 kfree(rx_ring->rx_buf); in ice_free_rx_ring()
595 rx_ring->rx_buf = NULL; in ice_free_rx_ring()
598 if (rx_ring->desc) { in ice_free_rx_ring()
599 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_free_rx_ring()
601 dmam_free_coherent(rx_ring->dev, size, in ice_free_rx_ring()
602 rx_ring->desc, rx_ring->dma); in ice_free_rx_ring()
603 rx_ring->desc = NULL; in ice_free_rx_ring()
608 * ice_setup_rx_ring - Allocate the Rx descriptors
615 struct device *dev = rx_ring->dev; in ice_setup_rx_ring()
619 return -ENOMEM; in ice_setup_rx_ring()
622 WARN_ON(rx_ring->rx_buf); in ice_setup_rx_ring()
623 rx_ring->rx_buf = in ice_setup_rx_ring()
624 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); in ice_setup_rx_ring()
625 if (!rx_ring->rx_buf) in ice_setup_rx_ring()
626 return -ENOMEM; in ice_setup_rx_ring()
629 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_setup_rx_ring()
631 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, in ice_setup_rx_ring()
633 if (!rx_ring->desc) { in ice_setup_rx_ring()
639 rx_ring->next_to_use = 0; in ice_setup_rx_ring()
640 rx_ring->next_to_clean = 0; in ice_setup_rx_ring()
641 rx_ring->first_desc = 0; in ice_setup_rx_ring()
643 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) in ice_setup_rx_ring()
644 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); in ice_setup_rx_ring()
649 kfree(rx_ring->rx_buf); in ice_setup_rx_ring()
650 rx_ring->rx_buf = NULL; in ice_setup_rx_ring()
651 return -ENOMEM; in ice_setup_rx_ring()
655 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
683 spin_lock(&xdp_ring->tx_lock); in ice_run_xdp()
686 spin_unlock(&xdp_ring->tx_lock); in ice_run_xdp()
691 if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) in ice_run_xdp()
696 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
700 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
710 * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
720 xdp.data = xdpf->data; in ice_xmit_xdp_ring()
721 xdp.data_end = xdp.data + xdpf->len; in ice_xmit_xdp_ring()
722 xdp.frame_sz = xdpf->frame_sz; in ice_xmit_xdp_ring()
723 xdp.flags = xdpf->flags; in ice_xmit_xdp_ring()
729 * ice_xdp_xmit - submit packets to XDP ring for transmission
737 * For error cases, a negative errno code is returned and no-frames
746 struct ice_vsi *vsi = np->vsi; in ice_xdp_xmit()
751 if (test_bit(ICE_VSI_DOWN, vsi->state)) in ice_xdp_xmit()
752 return -ENETDOWN; in ice_xdp_xmit()
755 return -ENXIO; in ice_xdp_xmit()
758 return -EINVAL; in ice_xdp_xmit()
761 queue_index %= vsi->num_xdp_txq; in ice_xdp_xmit()
762 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
763 spin_lock(&xdp_ring->tx_lock); in ice_xdp_xmit()
766 if (unlikely(queue_index >= vsi->num_xdp_txq)) in ice_xdp_xmit()
767 return -ENXIO; in ice_xdp_xmit()
768 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
771 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; in ice_xdp_xmit()
782 tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); in ice_xdp_xmit()
787 spin_unlock(&xdp_ring->tx_lock); in ice_xdp_xmit()
793 * ice_alloc_mapped_page - recycle or make a new page
803 struct page *page = bi->page; in ice_alloc_mapped_page()
813 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
818 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), in ice_alloc_mapped_page()
824 if (dma_mapping_error(rx_ring->dev, dma)) { in ice_alloc_mapped_page()
826 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
830 bi->dma = dma; in ice_alloc_mapped_page()
831 bi->page = page; in ice_alloc_mapped_page()
832 bi->page_offset = rx_ring->rx_offset; in ice_alloc_mapped_page()
833 page_ref_add(page, USHRT_MAX - 1); in ice_alloc_mapped_page()
834 bi->pagecnt_bias = USHRT_MAX; in ice_alloc_mapped_page()
840 * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi.
847 u32 ntu = rx_ring->next_to_use; in ice_init_ctrl_rx_descs()
857 if (unlikely(ntu == rx_ring->count)) { in ice_init_ctrl_rx_descs()
862 rx_desc->wb.status_error0 = 0; in ice_init_ctrl_rx_descs()
863 count--; in ice_init_ctrl_rx_descs()
866 if (rx_ring->next_to_use != ntu) in ice_init_ctrl_rx_descs()
871 * ice_alloc_rx_bufs - Replace used receive buffers
886 u16 ntu = rx_ring->next_to_use; in ice_alloc_rx_bufs()
890 if (!rx_ring->netdev || !cleaned_count) in ice_alloc_rx_bufs()
895 bi = &rx_ring->rx_buf[ntu]; in ice_alloc_rx_bufs()
903 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ice_alloc_rx_bufs()
904 bi->page_offset, in ice_alloc_rx_bufs()
905 rx_ring->rx_buf_len, in ice_alloc_rx_bufs()
909 * because each write-back erases this info. in ice_alloc_rx_bufs()
911 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ice_alloc_rx_bufs()
916 if (unlikely(ntu == rx_ring->count)) { in ice_alloc_rx_bufs()
918 bi = rx_ring->rx_buf; in ice_alloc_rx_bufs()
923 rx_desc->wb.status_error0 = 0; in ice_alloc_rx_bufs()
925 cleaned_count--; in ice_alloc_rx_bufs()
928 if (rx_ring->next_to_use != ntu) in ice_alloc_rx_bufs()
935 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
949 rx_buf->page_offset ^= size; in ice_rx_buf_adjust_pg_offset()
951 /* move offset up to the next cache line */ in ice_rx_buf_adjust_pg_offset()
952 rx_buf->page_offset += size; in ice_rx_buf_adjust_pg_offset()
957 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
968 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; in ice_can_reuse_rx_page()
969 struct page *page = rx_buf->page; in ice_can_reuse_rx_page()
971 /* avoid re-using remote and pfmemalloc pages */ in ice_can_reuse_rx_page()
976 if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) in ice_can_reuse_rx_page()
980 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072) in ice_can_reuse_rx_page()
981 if (rx_buf->page_offset > ICE_LAST_OFFSET) in ice_can_reuse_rx_page()
990 page_ref_add(page, USHRT_MAX - 1); in ice_can_reuse_rx_page()
991 rx_buf->pagecnt_bias = USHRT_MAX; in ice_can_reuse_rx_page()
998 * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
1004 * This function will add the data contained in rx_buf->page to the xdp buf.
1017 sinfo->nr_frags = 0; in ice_add_xdp_frag()
1018 sinfo->xdp_frags_size = 0; in ice_add_xdp_frag()
1022 if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) in ice_add_xdp_frag()
1023 return -ENOMEM; in ice_add_xdp_frag()
1025 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, in ice_add_xdp_frag()
1026 rx_buf->page_offset, size); in ice_add_xdp_frag()
1027 sinfo->xdp_frags_size += size; in ice_add_xdp_frag()
1029 if (page_is_pfmemalloc(rx_buf->page)) in ice_add_xdp_frag()
1036 * ice_reuse_rx_page - page flip buffer and store it back on the ring
1045 u16 nta = rx_ring->next_to_alloc; in ice_reuse_rx_page()
1048 new_buf = &rx_ring->rx_buf[nta]; in ice_reuse_rx_page()
1052 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ice_reuse_rx_page()
1058 new_buf->dma = old_buf->dma; in ice_reuse_rx_page()
1059 new_buf->page = old_buf->page; in ice_reuse_rx_page()
1060 new_buf->page_offset = old_buf->page_offset; in ice_reuse_rx_page()
1061 new_buf->pagecnt_bias = old_buf->pagecnt_bias; in ice_reuse_rx_page()
1065 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
1079 rx_buf = &rx_ring->rx_buf[ntc]; in ice_get_rx_buf()
1080 prefetchw(rx_buf->page); in ice_get_rx_buf()
1085 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, in ice_get_rx_buf()
1086 rx_buf->page_offset, size, in ice_get_rx_buf()
1090 rx_buf->pagecnt_bias--; in ice_get_rx_buf()
1096 * ice_get_pgcnts - grab page_count() for gathered fragments
1107 u32 idx = rx_ring->first_desc; in ice_get_pgcnts()
1109 u32 cnt = rx_ring->count; in ice_get_pgcnts()
1112 rx_buf = &rx_ring->rx_buf[idx]; in ice_get_pgcnts()
1113 rx_buf->pgcnt = page_count(rx_buf->page); in ice_get_pgcnts()
1121 * ice_build_skb - Build skb around an existing buffer
1132 u8 metasize = xdp->data - xdp->data_meta; in ice_build_skb()
1139 nr_frags = sinfo->nr_frags; in ice_build_skb()
1142 /* Prefetch first cache line of first page. If xdp->data_meta in ice_build_skb()
1143 * is unused, this points exactly as xdp->data, otherwise we in ice_build_skb()
1147 net_prefetch(xdp->data_meta); in ice_build_skb()
1149 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); in ice_build_skb()
1156 skb_record_rx_queue(skb, rx_ring->q_index); in ice_build_skb()
1159 skb_reserve(skb, xdp->data - xdp->data_hard_start); in ice_build_skb()
1160 __skb_put(skb, xdp->data_end - xdp->data); in ice_build_skb()
1165 xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size, in ice_build_skb()
1166 nr_frags * xdp->frame_sz, in ice_build_skb()
1173 * ice_construct_skb - Allocate skb and populate it
1184 unsigned int size = xdp->data_end - xdp->data; in ice_construct_skb()
1191 /* prefetch first cache line of first page */ in ice_construct_skb()
1192 net_prefetch(xdp->data); in ice_construct_skb()
1196 nr_frags = sinfo->nr_frags; in ice_construct_skb()
1200 skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); in ice_construct_skb()
1204 rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; in ice_construct_skb()
1205 skb_record_rx_queue(skb, rx_ring->q_index); in ice_construct_skb()
1209 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); in ice_construct_skb()
1212 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, in ice_construct_skb()
1216 size -= headlen; in ice_construct_skb()
1222 if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { in ice_construct_skb()
1226 skb_add_rx_frag(skb, 0, rx_buf->page, in ice_construct_skb()
1227 rx_buf->page_offset + headlen, size, in ice_construct_skb()
1228 xdp->frame_sz); in ice_construct_skb()
1233 * as-is in ice_construct_skb()
1235 rx_buf->pagecnt_bias++; in ice_construct_skb()
1241 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], in ice_construct_skb()
1244 xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags, in ice_construct_skb()
1245 sinfo->xdp_frags_size, in ice_construct_skb()
1246 nr_frags * xdp->frame_sz, in ice_construct_skb()
1254 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1272 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, in ice_put_rx_buf()
1275 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); in ice_put_rx_buf()
1279 rx_buf->page = NULL; in ice_put_rx_buf()
1283 * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame
1299 u32 idx = rx_ring->first_desc; in ice_put_rx_mbuf()
1300 u32 cnt = rx_ring->count; in ice_put_rx_mbuf()
1306 xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; in ice_put_rx_mbuf()
1309 buf = &rx_ring->rx_buf[idx]; in ice_put_rx_mbuf()
1314 * buffer. For these, we need to keep the pagecnt_bias as-is. in ice_put_rx_mbuf()
1319 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); in ice_put_rx_mbuf()
1321 buf->pagecnt_bias++; in ice_put_rx_mbuf()
1326 xdp->data = NULL; in ice_put_rx_mbuf()
1327 rx_ring->first_desc = ntc; in ice_put_rx_mbuf()
1331 * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring
1339 u32 ntc = rx_ring->next_to_clean; in ice_clean_ctrl_rx_irq()
1341 u32 cnt = rx_ring->count; in ice_clean_ctrl_rx_irq()
1344 struct ice_vsi *ctrl_vsi = rx_ring->vsi; in ice_clean_ctrl_rx_irq()
1351 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) in ice_clean_ctrl_rx_irq()
1356 if (ctrl_vsi->vf) in ice_clean_ctrl_rx_irq()
1364 rx_ring->first_desc = ntc; in ice_clean_ctrl_rx_irq()
1365 rx_ring->next_to_clean = ntc; in ice_clean_ctrl_rx_irq()
1370 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1384 unsigned int offset = rx_ring->rx_offset; in ice_clean_rx_irq()
1385 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_irq()
1388 u32 ntc = rx_ring->next_to_clean; in ice_clean_rx_irq()
1390 u32 cnt = rx_ring->count; in ice_clean_rx_irq()
1394 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ice_clean_rx_irq()
1396 xdp_ring = rx_ring->xdp_ring; in ice_clean_rx_irq()
1397 cached_ntu = xdp_ring->next_to_use; in ice_clean_rx_irq()
1415 * hardware wrote DD then it will be non-zero in ice_clean_rx_irq()
1418 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) in ice_clean_rx_irq()
1429 size = le16_to_cpu(rx_desc->wb.pkt_len) & in ice_clean_rx_irq()
1439 if (!xdp->data) { in ice_clean_rx_irq()
1442 hard_start = page_address(rx_buf->page) + rx_buf->page_offset - in ice_clean_rx_irq()
1473 rx_ring->ring_stats->rx_stats.alloc_buf_failed++; in ice_clean_rx_irq()
1482 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, in ice_clean_rx_irq()
1495 total_rx_bytes += skb->len; in ice_clean_rx_irq()
1508 rx_ring->next_to_clean = ntc; in ice_clean_rx_irq()
1515 if (rx_ring->ring_stats) in ice_clean_rx_irq()
1536 ring_stats = tx_ring->ring_stats; in __ice_update_sample()
1539 packets += ring_stats->stats.pkts; in __ice_update_sample()
1540 bytes += ring_stats->stats.bytes; in __ice_update_sample()
1548 ring_stats = rx_ring->ring_stats; in __ice_update_sample()
1551 packets += ring_stats->stats.pkts; in __ice_update_sample()
1552 bytes += ring_stats->stats.bytes; in __ice_update_sample()
1556 dim_update_sample(q_vector->total_events, packets, bytes, sample); in __ice_update_sample()
1557 sample->comp_ctr = 0; in __ice_update_sample()
1564 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) in __ice_update_sample()
1565 rc->dim.state = DIM_START_MEASURE; in __ice_update_sample()
1569 * ice_net_dim - Update net DIM algorithm
1575 * This function is a no-op if the ring is not configured to dynamic ITR.
1579 struct ice_ring_container *tx = &q_vector->tx; in ice_net_dim()
1580 struct ice_ring_container *rx = &q_vector->rx; in ice_net_dim()
1586 net_dim(&tx->dim, &dim_sample); in ice_net_dim()
1593 net_dim(&rx->dim, &dim_sample); in ice_net_dim()
1598 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1606 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this in ice_buildreg_itr()
1615 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); in ice_buildreg_itr()
1619 * ice_enable_interrupt - re-enable MSI-X interrupt
1622 * If the VSI is down, the interrupt will not be re-enabled. Also,
1628 struct ice_vsi *vsi = q_vector->vsi; in ice_enable_interrupt()
1629 bool wb_en = q_vector->wb_on_itr; in ice_enable_interrupt()
1632 if (test_bit(ICE_DOWN, vsi->state)) in ice_enable_interrupt()
1643 q_vector->wb_on_itr = false; in ice_enable_interrupt()
1656 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); in ice_enable_interrupt()
1660 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1663 * We need to tell hardware to write-back completed descriptors even when
1664 * interrupts are disabled. Descriptors will be written back on cache line
1666 * descriptors may not be written back if they don't fill a cache line until
1669 * This sets the write-back frequency to whatever was set previously for the
1675 struct ice_vsi *vsi = q_vector->vsi; in ice_set_wb_on_itr()
1678 if (q_vector->wb_on_itr) in ice_set_wb_on_itr()
1683 * be static in non-adaptive mode (user configured) in ice_set_wb_on_itr()
1685 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), in ice_set_wb_on_itr()
1690 q_vector->wb_on_itr = true; in ice_set_wb_on_itr()
1694 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1715 ice_for_each_tx_ring(tx_ring, q_vector->tx) { in ice_napi_poll()
1716 struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); in ice_napi_poll()
1735 if (unlikely(q_vector->num_ring_rx > 1)) in ice_napi_poll()
1740 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); in ice_napi_poll()
1745 ice_for_each_rx_ring(rx_ring, q_vector->rx) { in ice_napi_poll()
1746 struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); in ice_napi_poll()
1749 /* A dedicated path for zero-copy allows making a single in ice_napi_poll()
1753 cleaned = rx_ring->xsk_pool ? in ice_napi_poll()
1765 * cache-lines will still continue even if we're polling. in ice_napi_poll()
1771 /* Exit the polling mode, but don't re-enable interrupts if stack might in ice_napi_poll()
1772 * poll us due to busy-polling in ice_napi_poll()
1781 return min_t(int, work_done, budget - 1); in ice_napi_poll()
1785 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1789 * Returns -EBUSY if a stop is needed, else 0
1799 return -EBUSY; in __ice_maybe_stop_tx()
1801 /* A reprieve! - use start_queue because it doesn't call schedule */ in __ice_maybe_stop_tx()
1803 ++tx_ring->ring_stats->tx_stats.restart_q; in __ice_maybe_stop_tx()
1808 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1823 * ice_tx_map - Build the Tx descriptor
1837 u16 i = tx_ring->next_to_use; in ice_tx_map()
1846 td_tag = off->td_l2tag1; in ice_tx_map()
1847 td_cmd = off->td_cmd; in ice_tx_map()
1848 td_offset = off->td_offset; in ice_tx_map()
1849 skb = first->skb; in ice_tx_map()
1851 data_len = skb->data_len; in ice_tx_map()
1856 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { in ice_tx_map()
1858 td_tag = first->vid; in ice_tx_map()
1861 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ice_tx_map()
1865 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in ice_tx_map()
1868 if (dma_mapping_error(tx_ring->dev, dma)) in ice_tx_map()
1876 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); in ice_tx_map()
1877 tx_desc->buf_addr = cpu_to_le64(dma); in ice_tx_map()
1883 tx_desc->cmd_type_offset_bsz = in ice_tx_map()
1890 if (i == tx_ring->count) { in ice_tx_map()
1896 size -= max_data; in ice_tx_map()
1899 tx_desc->buf_addr = cpu_to_le64(dma); in ice_tx_map()
1905 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, in ice_tx_map()
1911 if (i == tx_ring->count) { in ice_tx_map()
1917 data_len -= size; in ice_tx_map()
1919 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ice_tx_map()
1922 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
1923 tx_buf->type = ICE_TX_BUF_FRAG; in ice_tx_map()
1927 skb_tx_timestamp(first->skb); in ice_tx_map()
1930 if (i == tx_ring->count) in ice_tx_map()
1935 tx_desc->cmd_type_offset_bsz = in ice_tx_map()
1947 first->next_to_watch = tx_desc; in ice_tx_map()
1949 tx_ring->next_to_use = i; in ice_tx_map()
1954 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, in ice_tx_map()
1960 struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; in ice_tx_map()
1961 u32 tstamp_count = tstamp_ring->count; in ice_tx_map()
1962 u32 j = tstamp_ring->next_to_use; in ice_tx_map()
1967 ts = ktime_to_timespec64(first->skb->tstamp); in ice_tx_map()
1971 ts_desc->tx_desc_idx_tstamp = ice_build_tstamp_desc(i, tstamp); in ice_tx_map()
1975 u32 fetch = tstamp_count - tx_ring->count; in ice_tx_map()
1987 ts_desc->tx_desc_idx_tstamp = in ice_tx_map()
1991 tstamp_ring->next_to_use = j; in ice_tx_map()
1992 writel_relaxed(j, tstamp_ring->tail); in ice_tx_map()
1994 writel_relaxed(i, tx_ring->tail); in ice_tx_map()
2001 tx_buf = &tx_ring->tx_buf[i]; in ice_tx_map()
2006 i = tx_ring->count; in ice_tx_map()
2007 i--; in ice_tx_map()
2010 tx_ring->next_to_use = i; in ice_tx_map()
2014 * ice_tx_csum - Enable Tx checksum offloads
2023 const struct ice_tx_ring *tx_ring = off->tx_ring; in ice_tx_csum()
2025 struct sk_buff *skb = first->skb; in ice_tx_csum()
2040 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_tx_csum()
2054 l2_len = ip.hdr - skb->data; in ice_tx_csum()
2060 if (ip.v4->version == 4) in ice_tx_csum()
2061 first->tx_flags |= ICE_TX_FLAGS_IPV4; in ice_tx_csum()
2062 else if (ip.v6->version == 6) in ice_tx_csum()
2063 first->tx_flags |= ICE_TX_FLAGS_IPV6; in ice_tx_csum()
2065 if (skb->encapsulation) { in ice_tx_csum()
2070 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { in ice_tx_csum()
2071 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? in ice_tx_csum()
2074 l4_proto = ip.v4->protocol; in ice_tx_csum()
2075 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { in ice_tx_csum()
2080 l4_proto = ip.v6->nexthdr; in ice_tx_csum()
2081 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, in ice_tx_csum()
2084 return -1; in ice_tx_csum()
2091 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
2095 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
2099 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; in ice_tx_csum()
2103 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
2104 return -1; in ice_tx_csum()
2111 tunnel |= ((l4.hdr - ip.hdr) / 4) << in ice_tx_csum()
2118 tunnel |= ((ip.hdr - l4.hdr) / 2) << in ice_tx_csum()
2121 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; in ice_tx_csum()
2123 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && in ice_tx_csum()
2124 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in ice_tx_csum()
2128 off->cd_tunnel_params |= tunnel; in ice_tx_csum()
2133 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; in ice_tx_csum()
2140 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); in ice_tx_csum()
2141 if (ip.v4->version == 4) in ice_tx_csum()
2142 first->tx_flags |= ICE_TX_FLAGS_IPV4; in ice_tx_csum()
2143 if (ip.v6->version == 6) in ice_tx_csum()
2144 first->tx_flags |= ICE_TX_FLAGS_IPV6; in ice_tx_csum()
2148 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { in ice_tx_csum()
2149 l4_proto = ip.v4->protocol; in ice_tx_csum()
2153 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
2158 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { in ice_tx_csum()
2161 l4_proto = ip.v6->nexthdr; in ice_tx_csum()
2163 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, in ice_tx_csum()
2166 return -1; in ice_tx_csum()
2170 l3_len = l4.hdr - ip.hdr; in ice_tx_csum()
2173 if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) && in ice_tx_csum()
2174 !(first->tx_flags & ICE_TX_FLAGS_TSO) && in ice_tx_csum()
2177 u16 csum_start = (skb->csum_start - skb->mac_header) / 2; in ice_tx_csum()
2178 u16 csum_offset = skb->csum_offset / 2; in ice_tx_csum()
2189 off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX; in ice_tx_csum()
2190 off->cd_gcs_params = gcs_params; in ice_tx_csum()
2192 off->td_offset |= offset; in ice_tx_csum()
2193 off->td_cmd |= cmd; in ice_tx_csum()
2202 l4_len = l4.tcp->doff; in ice_tx_csum()
2219 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tx_csum()
2220 return -1; in ice_tx_csum()
2225 off->td_cmd |= cmd; in ice_tx_csum()
2226 off->td_offset |= offset; in ice_tx_csum()
2231 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
2241 struct sk_buff *skb = first->skb; in ice_tx_prepare_vlan_flags()
2244 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) in ice_tx_prepare_vlan_flags()
2252 first->vid = skb_vlan_tag_get(skb); in ice_tx_prepare_vlan_flags()
2253 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) in ice_tx_prepare_vlan_flags()
2254 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; in ice_tx_prepare_vlan_flags()
2256 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; in ice_tx_prepare_vlan_flags()
2263 * ice_tso - computes mss and TSO length to prepare for TSO
2272 struct sk_buff *skb = first->skb; in ice_tso()
2289 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_tso()
2308 if (ip.v4->version == 4) { in ice_tso()
2309 ip.v4->tot_len = 0; in ice_tso()
2310 ip.v4->check = 0; in ice_tso()
2312 ip.v6->payload_len = 0; in ice_tso()
2315 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in ice_tso()
2321 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in ice_tso()
2322 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { in ice_tso()
2323 l4.udp->len = 0; in ice_tso()
2326 l4_start = (u8)(l4.hdr - skb->data); in ice_tso()
2329 paylen = skb->len - l4_start; in ice_tso()
2330 csum_replace_by_diff(&l4.udp->check, in ice_tso()
2339 if (ip.v4->version == 4) { in ice_tso()
2340 ip.v4->tot_len = 0; in ice_tso()
2341 ip.v4->check = 0; in ice_tso()
2343 ip.v6->payload_len = 0; in ice_tso()
2348 l4_start = (u8)(l4.hdr - skb->data); in ice_tso()
2351 paylen = skb->len - l4_start; in ice_tso()
2353 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in ice_tso()
2354 csum_replace_by_diff(&l4.udp->check, in ice_tso()
2357 off->header_len = (u8)sizeof(l4.udp) + l4_start; in ice_tso()
2359 csum_replace_by_diff(&l4.tcp->check, in ice_tso()
2362 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); in ice_tso()
2366 first->gso_segs = skb_shinfo(skb)->gso_segs; in ice_tso()
2367 first->bytecount += (first->gso_segs - 1) * off->header_len; in ice_tso()
2369 cd_tso_len = skb->len - off->header_len; in ice_tso()
2370 cd_mss = skb_shinfo(skb)->gso_size; in ice_tso()
2373 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | in ice_tso()
2377 first->tx_flags |= ICE_TX_FLAGS_TSO; in ice_tso()
2382 * ice_txd_use_count - estimate the number of descriptors needed for Tx
2387 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2398 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2399 * segment. For our purposes this is accurate out to 1M which is orders of
2415 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2422 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; in ice_xmit_desc_count()
2423 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; in ice_xmit_desc_count()
2429 if (!nr_frags--) in ice_xmit_desc_count()
2439 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2457 nr_frags = skb_shinfo(skb)->nr_frags; in __ice_chk_linearize()
2458 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) in __ice_chk_linearize()
2464 nr_frags -= ICE_MAX_BUF_TXD - 2; in __ice_chk_linearize()
2465 frag = &skb_shinfo(skb)->frags[0]; in __ice_chk_linearize()
2473 sum = 1 - skb_shinfo(skb)->gso_size; in __ice_chk_linearize()
2485 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { in __ice_chk_linearize()
2497 int align_pad = -(skb_frag_off(stale)) & in __ice_chk_linearize()
2498 (ICE_MAX_READ_REQ_SIZE - 1); in __ice_chk_linearize()
2500 sum -= align_pad; in __ice_chk_linearize()
2501 stale_size -= align_pad; in __ice_chk_linearize()
2504 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; in __ice_chk_linearize()
2505 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; in __ice_chk_linearize()
2513 if (!nr_frags--) in __ice_chk_linearize()
2516 sum -= stale_size; in __ice_chk_linearize()
2523 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2527 * Note: Our HW can't scatter-gather more than 8 fragments to build
2545 * ice_tstamp - set up context descriptor for hardware timestamp
2558 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) in ice_tstamp()
2562 if (first->tx_flags & ICE_TX_FLAGS_TSO) in ice_tstamp()
2566 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); in ice_tstamp()
2568 tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; in ice_tstamp()
2572 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | in ice_tstamp()
2575 first->tx_flags |= ICE_TX_FLAGS_TSYN; in ice_tstamp()
2579 * ice_xmit_frame_ring - Sends buffer on Tx ring
2589 struct ice_vsi *vsi = tx_ring->vsi; in ice_xmit_frame_ring()
2604 count = ice_txd_use_count(skb->len); in ice_xmit_frame_ring()
2605 tx_ring->ring_stats->tx_stats.tx_linearize++; in ice_xmit_frame_ring()
2610 * + 4 desc gap to avoid the cache line where head is, in ice_xmit_frame_ring()
2616 tx_ring->ring_stats->tx_stats.tx_busy++; in ice_xmit_frame_ring()
2626 first = &tx_ring->tx_buf[tx_ring->next_to_use]; in ice_xmit_frame_ring()
2627 first->skb = skb; in ice_xmit_frame_ring()
2628 first->type = ICE_TX_BUF_SKB; in ice_xmit_frame_ring()
2629 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); in ice_xmit_frame_ring()
2630 first->gso_segs = 1; in ice_xmit_frame_ring()
2631 first->tx_flags = 0; in ice_xmit_frame_ring()
2635 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { in ice_xmit_frame_ring()
2639 offload.cd_l2tag2 = first->vid; in ice_xmit_frame_ring()
2655 if ((ice_is_switchdev_running(vsi->back) || in ice_xmit_frame_ring()
2656 ice_lag_is_switchdev_running(vsi->back)) && in ice_xmit_frame_ring()
2657 vsi->type != ICE_VSI_SF) in ice_xmit_frame_ring()
2659 else if (unlikely((skb->priority == TC_PRIO_CONTROL || in ice_xmit_frame_ring()
2660 eth->h_proto == htons(ETH_P_LLDP)) && in ice_xmit_frame_ring()
2661 vsi->type == ICE_VSI_PF && in ice_xmit_frame_ring()
2662 vsi->port_info->qos_cfg.is_sw_lldp)) in ice_xmit_frame_ring()
2671 u16 i = tx_ring->next_to_use; in ice_xmit_frame_ring()
2676 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ice_xmit_frame_ring()
2679 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); in ice_xmit_frame_ring()
2680 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); in ice_xmit_frame_ring()
2681 cdesc->gcs = cpu_to_le16(offload.cd_gcs_params); in ice_xmit_frame_ring()
2682 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); in ice_xmit_frame_ring()
2695 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2704 struct ice_vsi *vsi = np->vsi; in ice_start_xmit()
2707 tx_ring = vsi->tx_rings[skb->queue_mapping]; in ice_start_xmit()
2719 * ice_get_dscp_up - return the UP/TC value for a SKB
2729 if (skb->protocol == htons(ETH_P_IP)) in ice_get_dscp_up()
2731 else if (skb->protocol == htons(ETH_P_IPV6)) in ice_get_dscp_up()
2734 return dcbcfg->dscp_map[dscp]; in ice_get_dscp_up()
2744 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; in ice_select_queue()
2745 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) in ice_select_queue()
2746 skb->priority = ice_get_dscp_up(dcbcfg, skb); in ice_select_queue()
2752 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2757 struct ice_vsi *vsi = tx_ring->vsi; in ice_clean_ctrl_tx_irq()
2758 s16 i = tx_ring->next_to_clean; in ice_clean_ctrl_tx_irq()
2763 tx_buf = &tx_ring->tx_buf[i]; in ice_clean_ctrl_tx_irq()
2765 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2768 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; in ice_clean_ctrl_tx_irq()
2778 if (!(eop_desc->cmd_type_offset_bsz & in ice_clean_ctrl_tx_irq()
2783 tx_buf->next_to_watch = NULL; in ice_clean_ctrl_tx_irq()
2784 tx_desc->buf_addr = 0; in ice_clean_ctrl_tx_irq()
2785 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_ctrl_tx_irq()
2792 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2793 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2799 dma_unmap_single(tx_ring->dev, in ice_clean_ctrl_tx_irq()
2803 if (tx_buf->type == ICE_TX_BUF_DUMMY) in ice_clean_ctrl_tx_irq()
2804 devm_kfree(tx_ring->dev, tx_buf->raw_buf); in ice_clean_ctrl_tx_irq()
2807 tx_buf->type = ICE_TX_BUF_EMPTY; in ice_clean_ctrl_tx_irq()
2808 tx_buf->tx_flags = 0; in ice_clean_ctrl_tx_irq()
2809 tx_buf->next_to_watch = NULL; in ice_clean_ctrl_tx_irq()
2811 tx_desc->buf_addr = 0; in ice_clean_ctrl_tx_irq()
2812 tx_desc->cmd_type_offset_bsz = 0; in ice_clean_ctrl_tx_irq()
2819 i -= tx_ring->count; in ice_clean_ctrl_tx_irq()
2820 tx_buf = tx_ring->tx_buf; in ice_clean_ctrl_tx_irq()
2824 budget--; in ice_clean_ctrl_tx_irq()
2827 i += tx_ring->count; in ice_clean_ctrl_tx_irq()
2828 tx_ring->next_to_clean = i; in ice_clean_ctrl_tx_irq()
2830 /* re-enable interrupt if needed */ in ice_clean_ctrl_tx_irq()
2831 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); in ice_clean_ctrl_tx_irq()