10a714186SBjörn Töpel // SPDX-License-Identifier: GPL-2.0 20a714186SBjörn Töpel /* Copyright(c) 2018 Intel Corporation. */ 30a714186SBjörn Töpel 40a714186SBjörn Töpel #include <linux/bpf_trace.h> 50a714186SBjörn Töpel #include <net/xdp_sock.h> 60a714186SBjörn Töpel #include <net/xdp.h> 70a714186SBjörn Töpel 80a714186SBjörn Töpel #include "i40e.h" 90a714186SBjörn Töpel #include "i40e_txrx_common.h" 100a714186SBjörn Töpel #include "i40e_xsk.h" 110a714186SBjörn Töpel 120a714186SBjörn Töpel /** 130a714186SBjörn Töpel * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev 140a714186SBjörn Töpel * @vsi: Current VSI 150a714186SBjörn Töpel * @umem: UMEM to DMA map 160a714186SBjörn Töpel * 170a714186SBjörn Töpel * Returns 0 on success, <0 on failure 180a714186SBjörn Töpel **/ 190a714186SBjörn Töpel static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem) 200a714186SBjörn Töpel { 210a714186SBjörn Töpel struct i40e_pf *pf = vsi->back; 220a714186SBjörn Töpel struct device *dev; 230a714186SBjörn Töpel unsigned int i, j; 240a714186SBjörn Töpel dma_addr_t dma; 250a714186SBjörn Töpel 260a714186SBjörn Töpel dev = &pf->pdev->dev; 270a714186SBjörn Töpel for (i = 0; i < umem->npgs; i++) { 280a714186SBjörn Töpel dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, 290a714186SBjörn Töpel DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); 300a714186SBjörn Töpel if (dma_mapping_error(dev, dma)) 310a714186SBjörn Töpel goto out_unmap; 320a714186SBjörn Töpel 330a714186SBjörn Töpel umem->pages[i].dma = dma; 340a714186SBjörn Töpel } 350a714186SBjörn Töpel 360a714186SBjörn Töpel return 0; 370a714186SBjörn Töpel 380a714186SBjörn Töpel out_unmap: 390a714186SBjörn Töpel for (j = 0; j < i; j++) { 400a714186SBjörn Töpel dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, 410a714186SBjörn Töpel DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); 420a714186SBjörn Töpel umem->pages[i].dma = 0; 430a714186SBjörn Töpel } 440a714186SBjörn Töpel 450a714186SBjörn Töpel return -1; 460a714186SBjörn Töpel } 470a714186SBjörn Töpel 480a714186SBjörn Töpel /** 490a714186SBjörn Töpel * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev 500a714186SBjörn Töpel * @vsi: Current VSI 510a714186SBjörn Töpel * @umem: UMEM to DMA map 520a714186SBjörn Töpel **/ 530a714186SBjörn Töpel static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) 540a714186SBjörn Töpel { 550a714186SBjörn Töpel struct i40e_pf *pf = vsi->back; 560a714186SBjörn Töpel struct device *dev; 570a714186SBjörn Töpel unsigned int i; 580a714186SBjörn Töpel 590a714186SBjörn Töpel dev = &pf->pdev->dev; 600a714186SBjörn Töpel 610a714186SBjörn Töpel for (i = 0; i < umem->npgs; i++) { 620a714186SBjörn Töpel dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, 630a714186SBjörn Töpel DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); 640a714186SBjörn Töpel 650a714186SBjörn Töpel umem->pages[i].dma = 0; 660a714186SBjörn Töpel } 670a714186SBjörn Töpel } 680a714186SBjörn Töpel 690a714186SBjörn Töpel /** 70529eb362SJan Sokolowski * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid 710a714186SBjörn Töpel * @vsi: Current VSI 720a714186SBjörn Töpel * @umem: UMEM 730a714186SBjörn Töpel * @qid: Rx ring to associate UMEM to 740a714186SBjörn Töpel * 750a714186SBjörn Töpel * Returns 0 on success, <0 on failure 760a714186SBjörn Töpel **/ 770a714186SBjörn Töpel static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, 780a714186SBjörn Töpel u16 qid) 790a714186SBjörn Töpel { 80f3fef2b6SJan Sokolowski struct net_device *netdev = vsi->netdev; 81411dc16fSBjörn Töpel struct xdp_umem_fq_reuse *reuseq; 820a714186SBjörn Töpel bool if_running; 830a714186SBjörn Töpel int err; 840a714186SBjörn Töpel 850a714186SBjörn Töpel if (vsi->type != I40E_VSI_MAIN) 860a714186SBjörn Töpel return -EINVAL; 870a714186SBjörn Töpel 880a714186SBjörn Töpel if (qid >= vsi->num_queue_pairs) 890a714186SBjörn Töpel return -EINVAL; 900a714186SBjörn Töpel 91f3fef2b6SJan Sokolowski if (qid >= netdev->real_num_rx_queues || 92f3fef2b6SJan Sokolowski qid >= netdev->real_num_tx_queues) 930a714186SBjörn Töpel return -EINVAL; 940a714186SBjörn Töpel 95411dc16fSBjörn Töpel reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); 96411dc16fSBjörn Töpel if (!reuseq) 97411dc16fSBjörn Töpel return -ENOMEM; 98411dc16fSBjörn Töpel 99411dc16fSBjörn Töpel xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); 100411dc16fSBjörn Töpel 1010a714186SBjörn Töpel err = i40e_xsk_umem_dma_map(vsi, umem); 1020a714186SBjörn Töpel if (err) 1030a714186SBjörn Töpel return err; 1040a714186SBjörn Töpel 10544ddd4f1SBjörn Töpel set_bit(qid, vsi->af_xdp_zc_qps); 10644ddd4f1SBjörn Töpel 1070a714186SBjörn Töpel if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 1080a714186SBjörn Töpel 1090a714186SBjörn Töpel if (if_running) { 1100a714186SBjörn Töpel err = i40e_queue_pair_disable(vsi, qid); 1110a714186SBjörn Töpel if (err) 1120a714186SBjörn Töpel return err; 1130a714186SBjörn Töpel 1140a714186SBjörn Töpel err = i40e_queue_pair_enable(vsi, qid); 1150a714186SBjörn Töpel if (err) 1160a714186SBjörn Töpel return err; 11714ffeb52SMagnus Karlsson 11814ffeb52SMagnus Karlsson /* Kick start the NAPI context so that receiving will start */ 1199116e5e2SMagnus Karlsson err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); 12014ffeb52SMagnus Karlsson if (err) 12114ffeb52SMagnus Karlsson return err; 1220a714186SBjörn Töpel } 1230a714186SBjörn Töpel 1240a714186SBjörn Töpel return 0; 1250a714186SBjörn Töpel } 1260a714186SBjörn Töpel 1270a714186SBjörn Töpel /** 128529eb362SJan Sokolowski * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid 1290a714186SBjörn Töpel * @vsi: Current VSI 1300a714186SBjörn Töpel * @qid: Rx ring to associate UMEM to 1310a714186SBjörn Töpel * 1320a714186SBjörn Töpel * Returns 0 on success, <0 on failure 1330a714186SBjörn Töpel **/ 1340a714186SBjörn Töpel static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) 1350a714186SBjörn Töpel { 136f3fef2b6SJan Sokolowski struct net_device *netdev = vsi->netdev; 137f3fef2b6SJan Sokolowski struct xdp_umem *umem; 1380a714186SBjörn Töpel bool if_running; 1390a714186SBjörn Töpel int err; 1400a714186SBjörn Töpel 141f3fef2b6SJan Sokolowski umem = xdp_get_umem_from_qid(netdev, qid); 142f3fef2b6SJan Sokolowski if (!umem) 1430a714186SBjörn Töpel return -EINVAL; 1440a714186SBjörn Töpel 1450a714186SBjörn Töpel if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 1460a714186SBjörn Töpel 1470a714186SBjörn Töpel if (if_running) { 1480a714186SBjörn Töpel err = i40e_queue_pair_disable(vsi, qid); 1490a714186SBjörn Töpel if (err) 1500a714186SBjörn Töpel return err; 1510a714186SBjörn Töpel } 1520a714186SBjörn Töpel 15344ddd4f1SBjörn Töpel clear_bit(qid, vsi->af_xdp_zc_qps); 154f3fef2b6SJan Sokolowski i40e_xsk_umem_dma_unmap(vsi, umem); 1550a714186SBjörn Töpel 1560a714186SBjörn Töpel if (if_running) { 1570a714186SBjörn Töpel err = i40e_queue_pair_enable(vsi, qid); 1580a714186SBjörn Töpel if (err) 1590a714186SBjörn Töpel return err; 1600a714186SBjörn Töpel } 1610a714186SBjörn Töpel 1620a714186SBjörn Töpel return 0; 1630a714186SBjörn Töpel } 1640a714186SBjörn Töpel 1650a714186SBjörn Töpel /** 166529eb362SJan Sokolowski * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid 1670a714186SBjörn Töpel * @vsi: Current VSI 1680a714186SBjörn Töpel * @umem: UMEM to enable/associate to a ring, or NULL to disable 1690a714186SBjörn Töpel * @qid: Rx ring to (dis)associate UMEM (from)to 1700a714186SBjörn Töpel * 171529eb362SJan Sokolowski * This function enables or disables a UMEM to a certain ring. 1720a714186SBjörn Töpel * 1730a714186SBjörn Töpel * Returns 0 on success, <0 on failure 1740a714186SBjörn Töpel **/ 1750a714186SBjörn Töpel int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, 1760a714186SBjörn Töpel u16 qid) 1770a714186SBjörn Töpel { 1780a714186SBjörn Töpel return umem ? i40e_xsk_umem_enable(vsi, umem, qid) : 1790a714186SBjörn Töpel i40e_xsk_umem_disable(vsi, qid); 1800a714186SBjörn Töpel } 1810a714186SBjörn Töpel 1820a714186SBjörn Töpel /** 1830a714186SBjörn Töpel * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff 1840a714186SBjörn Töpel * @rx_ring: Rx ring 1850a714186SBjörn Töpel * @xdp: xdp_buff used as input to the XDP program 1860a714186SBjörn Töpel * 187529eb362SJan Sokolowski * This function enables or disables a UMEM to a certain ring. 1880a714186SBjörn Töpel * 1890a714186SBjörn Töpel * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} 1900a714186SBjörn Töpel **/ 1910a714186SBjörn Töpel static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) 1920a714186SBjörn Töpel { 1930a714186SBjörn Töpel int err, result = I40E_XDP_PASS; 1940a714186SBjörn Töpel struct i40e_ring *xdp_ring; 1950a714186SBjörn Töpel struct bpf_prog *xdp_prog; 1960a714186SBjörn Töpel u32 act; 1970a714186SBjörn Töpel 1980a714186SBjörn Töpel rcu_read_lock(); 1990a714186SBjörn Töpel /* NB! xdp_prog will always be !NULL, due to the fact that 2000a714186SBjörn Töpel * this path is enabled by setting an XDP program. 2010a714186SBjörn Töpel */ 2020a714186SBjörn Töpel xdp_prog = READ_ONCE(rx_ring->xdp_prog); 2030a714186SBjörn Töpel act = bpf_prog_run_xdp(xdp_prog, xdp); 2040a714186SBjörn Töpel xdp->handle += xdp->data - xdp->data_hard_start; 2050a714186SBjörn Töpel switch (act) { 2060a714186SBjörn Töpel case XDP_PASS: 2070a714186SBjörn Töpel break; 2080a714186SBjörn Töpel case XDP_TX: 2090a714186SBjörn Töpel xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2100a714186SBjörn Töpel result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); 2110a714186SBjörn Töpel break; 2120a714186SBjörn Töpel case XDP_REDIRECT: 2130a714186SBjörn Töpel err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 2140a714186SBjörn Töpel result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; 2150a714186SBjörn Töpel break; 2160a714186SBjörn Töpel default: 2170a714186SBjörn Töpel bpf_warn_invalid_xdp_action(act); 218514af5f0SGustavo A. R. Silva /* fall through */ 2190a714186SBjörn Töpel case XDP_ABORTED: 2200a714186SBjörn Töpel trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 2210a714186SBjörn Töpel /* fallthrough -- handle aborts by dropping packet */ 2220a714186SBjörn Töpel case XDP_DROP: 2230a714186SBjörn Töpel result = I40E_XDP_CONSUMED; 2240a714186SBjörn Töpel break; 2250a714186SBjörn Töpel } 2260a714186SBjörn Töpel rcu_read_unlock(); 2270a714186SBjörn Töpel return result; 2280a714186SBjörn Töpel } 2290a714186SBjörn Töpel 2300a714186SBjörn Töpel /** 2310a714186SBjörn Töpel * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer 2320a714186SBjörn Töpel * @rx_ring: Rx ring 2330a714186SBjörn Töpel * @bi: Rx buffer to populate 2340a714186SBjörn Töpel * 2350a714186SBjörn Töpel * This function allocates an Rx buffer. The buffer can come from fill 2360a714186SBjörn Töpel * queue, or via the recycle queue (next_to_alloc). 2370a714186SBjörn Töpel * 2380a714186SBjörn Töpel * Returns true for a successful allocation, false otherwise 2390a714186SBjörn Töpel **/ 2400a714186SBjörn Töpel static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, 2410a714186SBjörn Töpel struct i40e_rx_buffer *bi) 2420a714186SBjörn Töpel { 2430a714186SBjörn Töpel struct xdp_umem *umem = rx_ring->xsk_umem; 2440a714186SBjörn Töpel void *addr = bi->addr; 2450a714186SBjörn Töpel u64 handle, hr; 2460a714186SBjörn Töpel 2470a714186SBjörn Töpel if (addr) { 2480a714186SBjörn Töpel rx_ring->rx_stats.page_reuse_count++; 2490a714186SBjörn Töpel return true; 2500a714186SBjörn Töpel } 2510a714186SBjörn Töpel 2520a714186SBjörn Töpel if (!xsk_umem_peek_addr(umem, &handle)) { 2530a714186SBjörn Töpel rx_ring->rx_stats.alloc_page_failed++; 2540a714186SBjörn Töpel return false; 2550a714186SBjörn Töpel } 2560a714186SBjörn Töpel 2570a714186SBjörn Töpel hr = umem->headroom + XDP_PACKET_HEADROOM; 2580a714186SBjörn Töpel 2590a714186SBjörn Töpel bi->dma = xdp_umem_get_dma(umem, handle); 2600a714186SBjörn Töpel bi->dma += hr; 2610a714186SBjörn Töpel 2620a714186SBjörn Töpel bi->addr = xdp_umem_get_data(umem, handle); 2630a714186SBjörn Töpel bi->addr += hr; 2640a714186SBjörn Töpel 2650a714186SBjörn Töpel bi->handle = handle + umem->headroom; 2660a714186SBjörn Töpel 2670a714186SBjörn Töpel xsk_umem_discard_addr(umem); 2680a714186SBjörn Töpel return true; 2690a714186SBjörn Töpel } 2700a714186SBjörn Töpel 2710a714186SBjörn Töpel /** 272411dc16fSBjörn Töpel * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer 2730a714186SBjörn Töpel * @rx_ring: Rx ring 274411dc16fSBjörn Töpel * @bi: Rx buffer to populate 2750a714186SBjörn Töpel * 276411dc16fSBjörn Töpel * This function allocates an Rx buffer. The buffer can come from fill 277411dc16fSBjörn Töpel * queue, or via the reuse queue. 2780a714186SBjörn Töpel * 2790a714186SBjörn Töpel * Returns true for a successful allocation, false otherwise 2800a714186SBjörn Töpel **/ 281411dc16fSBjörn Töpel static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, 282411dc16fSBjörn Töpel struct i40e_rx_buffer *bi) 283411dc16fSBjörn Töpel { 284411dc16fSBjörn Töpel struct xdp_umem *umem = rx_ring->xsk_umem; 285411dc16fSBjörn Töpel u64 handle, hr; 286411dc16fSBjörn Töpel 287411dc16fSBjörn Töpel if (!xsk_umem_peek_addr_rq(umem, &handle)) { 288411dc16fSBjörn Töpel rx_ring->rx_stats.alloc_page_failed++; 289411dc16fSBjörn Töpel return false; 290411dc16fSBjörn Töpel } 291411dc16fSBjörn Töpel 292411dc16fSBjörn Töpel handle &= rx_ring->xsk_umem->chunk_mask; 293411dc16fSBjörn Töpel 294411dc16fSBjörn Töpel hr = umem->headroom + XDP_PACKET_HEADROOM; 295411dc16fSBjörn Töpel 296411dc16fSBjörn Töpel bi->dma = xdp_umem_get_dma(umem, handle); 297411dc16fSBjörn Töpel bi->dma += hr; 298411dc16fSBjörn Töpel 299411dc16fSBjörn Töpel bi->addr = xdp_umem_get_data(umem, handle); 300411dc16fSBjörn Töpel bi->addr += hr; 301411dc16fSBjörn Töpel 302411dc16fSBjörn Töpel bi->handle = handle + umem->headroom; 303411dc16fSBjörn Töpel 304411dc16fSBjörn Töpel xsk_umem_discard_addr_rq(umem); 305411dc16fSBjörn Töpel return true; 306411dc16fSBjörn Töpel } 307411dc16fSBjörn Töpel 308411dc16fSBjörn Töpel static __always_inline bool 309411dc16fSBjörn Töpel __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count, 310411dc16fSBjörn Töpel bool alloc(struct i40e_ring *rx_ring, 311411dc16fSBjörn Töpel struct i40e_rx_buffer *bi)) 3120a714186SBjörn Töpel { 3130a714186SBjörn Töpel u16 ntu = rx_ring->next_to_use; 3140a714186SBjörn Töpel union i40e_rx_desc *rx_desc; 3150a714186SBjörn Töpel struct i40e_rx_buffer *bi; 3160a714186SBjörn Töpel bool ok = true; 3170a714186SBjörn Töpel 3180a714186SBjörn Töpel rx_desc = I40E_RX_DESC(rx_ring, ntu); 3190a714186SBjörn Töpel bi = &rx_ring->rx_bi[ntu]; 3200a714186SBjörn Töpel do { 321411dc16fSBjörn Töpel if (!alloc(rx_ring, bi)) { 3220a714186SBjörn Töpel ok = false; 3230a714186SBjörn Töpel goto no_buffers; 3240a714186SBjörn Töpel } 3250a714186SBjörn Töpel 3260a714186SBjörn Töpel dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, 3270a714186SBjörn Töpel rx_ring->rx_buf_len, 3280a714186SBjörn Töpel DMA_BIDIRECTIONAL); 3290a714186SBjörn Töpel 3300a714186SBjörn Töpel rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 3310a714186SBjörn Töpel 3320a714186SBjörn Töpel rx_desc++; 3330a714186SBjörn Töpel bi++; 3340a714186SBjörn Töpel ntu++; 3350a714186SBjörn Töpel 3360a714186SBjörn Töpel if (unlikely(ntu == rx_ring->count)) { 3370a714186SBjörn Töpel rx_desc = I40E_RX_DESC(rx_ring, 0); 3380a714186SBjörn Töpel bi = rx_ring->rx_bi; 3390a714186SBjörn Töpel ntu = 0; 3400a714186SBjörn Töpel } 3410a714186SBjörn Töpel 3420a714186SBjörn Töpel rx_desc->wb.qword1.status_error_len = 0; 3430a714186SBjörn Töpel count--; 3440a714186SBjörn Töpel } while (count); 3450a714186SBjörn Töpel 3460a714186SBjörn Töpel no_buffers: 3470a714186SBjörn Töpel if (rx_ring->next_to_use != ntu) 3480a714186SBjörn Töpel i40e_release_rx_desc(rx_ring, ntu); 3490a714186SBjörn Töpel 3500a714186SBjörn Töpel return ok; 3510a714186SBjörn Töpel } 3520a714186SBjörn Töpel 3530a714186SBjörn Töpel /** 354411dc16fSBjörn Töpel * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers 355411dc16fSBjörn Töpel * @rx_ring: Rx ring 356411dc16fSBjörn Töpel * @count: The number of buffers to allocate 357411dc16fSBjörn Töpel * 358411dc16fSBjörn Töpel * This function allocates a number of Rx buffers from the reuse queue 359411dc16fSBjörn Töpel * or fill ring and places them on the Rx ring. 360411dc16fSBjörn Töpel * 361411dc16fSBjörn Töpel * Returns true for a successful allocation, false otherwise 362411dc16fSBjörn Töpel **/ 363411dc16fSBjörn Töpel bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) 364411dc16fSBjörn Töpel { 365411dc16fSBjörn Töpel return __i40e_alloc_rx_buffers_zc(rx_ring, count, 366411dc16fSBjörn Töpel i40e_alloc_buffer_slow_zc); 367411dc16fSBjörn Töpel } 368411dc16fSBjörn Töpel 369411dc16fSBjörn Töpel /** 370411dc16fSBjörn Töpel * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers 371411dc16fSBjörn Töpel * @rx_ring: Rx ring 372411dc16fSBjörn Töpel * @count: The number of buffers to allocate 373411dc16fSBjörn Töpel * 374411dc16fSBjörn Töpel * This function allocates a number of Rx buffers from the fill ring 375411dc16fSBjörn Töpel * or the internal recycle mechanism and places them on the Rx ring. 376411dc16fSBjörn Töpel * 377411dc16fSBjörn Töpel * Returns true for a successful allocation, false otherwise 378411dc16fSBjörn Töpel **/ 379411dc16fSBjörn Töpel static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count) 380411dc16fSBjörn Töpel { 381411dc16fSBjörn Töpel return __i40e_alloc_rx_buffers_zc(rx_ring, count, 382411dc16fSBjörn Töpel i40e_alloc_buffer_zc); 383411dc16fSBjörn Töpel } 384411dc16fSBjörn Töpel 385411dc16fSBjörn Töpel /** 3860a714186SBjörn Töpel * i40e_get_rx_buffer_zc - Return the current Rx buffer 3870a714186SBjörn Töpel * @rx_ring: Rx ring 3880a714186SBjörn Töpel * @size: The size of the rx buffer (read from descriptor) 3890a714186SBjörn Töpel * 3900a714186SBjörn Töpel * This function returns the current, received Rx buffer, and also 3910a714186SBjörn Töpel * does DMA synchronization. the Rx ring. 3920a714186SBjörn Töpel * 3930a714186SBjörn Töpel * Returns the received Rx buffer 3940a714186SBjörn Töpel **/ 3950a714186SBjörn Töpel static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring, 3960a714186SBjörn Töpel const unsigned int size) 3970a714186SBjörn Töpel { 3980a714186SBjörn Töpel struct i40e_rx_buffer *bi; 3990a714186SBjörn Töpel 4000a714186SBjörn Töpel bi = &rx_ring->rx_bi[rx_ring->next_to_clean]; 4010a714186SBjörn Töpel 4020a714186SBjörn Töpel /* we are reusing so sync this buffer for CPU use */ 4030a714186SBjörn Töpel dma_sync_single_range_for_cpu(rx_ring->dev, 4040a714186SBjörn Töpel bi->dma, 0, 4050a714186SBjörn Töpel size, 4060a714186SBjörn Töpel DMA_BIDIRECTIONAL); 4070a714186SBjörn Töpel 4080a714186SBjörn Töpel return bi; 4090a714186SBjörn Töpel } 4100a714186SBjörn Töpel 4110a714186SBjörn Töpel /** 4120a714186SBjörn Töpel * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer 4130a714186SBjörn Töpel * @rx_ring: Rx ring 4140a714186SBjörn Töpel * @old_bi: The Rx buffer to recycle 4150a714186SBjörn Töpel * 4160a714186SBjörn Töpel * This function recycles a finished Rx buffer, and places it on the 4170a714186SBjörn Töpel * recycle queue (next_to_alloc). 4180a714186SBjörn Töpel **/ 4190a714186SBjörn Töpel static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, 4200a714186SBjörn Töpel struct i40e_rx_buffer *old_bi) 4210a714186SBjörn Töpel { 4220a714186SBjörn Töpel struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; 42393ee30f3SMagnus Karlsson unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; 4240a714186SBjörn Töpel u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; 4250a714186SBjörn Töpel u16 nta = rx_ring->next_to_alloc; 4260a714186SBjörn Töpel 4270a714186SBjörn Töpel /* update, and store next to alloc */ 4280a714186SBjörn Töpel nta++; 4290a714186SBjörn Töpel rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 4300a714186SBjörn Töpel 4310a714186SBjörn Töpel /* transfer page from old buffer to new buffer */ 4320a714186SBjörn Töpel new_bi->dma = old_bi->dma & mask; 4330a714186SBjörn Töpel new_bi->dma += hr; 4340a714186SBjörn Töpel 4350a714186SBjörn Töpel new_bi->addr = (void *)((unsigned long)old_bi->addr & mask); 4360a714186SBjörn Töpel new_bi->addr += hr; 4370a714186SBjörn Töpel 4380a714186SBjörn Töpel new_bi->handle = old_bi->handle & mask; 4390a714186SBjörn Töpel new_bi->handle += rx_ring->xsk_umem->headroom; 4400a714186SBjörn Töpel 4410a714186SBjörn Töpel old_bi->addr = NULL; 4420a714186SBjörn Töpel } 4430a714186SBjörn Töpel 4440a714186SBjörn Töpel /** 4450a714186SBjörn Töpel * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations 4460a714186SBjörn Töpel * @alloc: Zero-copy allocator 4470a714186SBjörn Töpel * @handle: Buffer handle 4480a714186SBjörn Töpel **/ 4490a714186SBjörn Töpel void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) 4500a714186SBjörn Töpel { 4510a714186SBjörn Töpel struct i40e_rx_buffer *bi; 4520a714186SBjörn Töpel struct i40e_ring *rx_ring; 4530a714186SBjörn Töpel u64 hr, mask; 4540a714186SBjörn Töpel u16 nta; 4550a714186SBjörn Töpel 4560a714186SBjörn Töpel rx_ring = container_of(alloc, struct i40e_ring, zca); 4570a714186SBjörn Töpel hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; 45893ee30f3SMagnus Karlsson mask = rx_ring->xsk_umem->chunk_mask; 4590a714186SBjörn Töpel 4600a714186SBjörn Töpel nta = rx_ring->next_to_alloc; 4610a714186SBjörn Töpel bi = &rx_ring->rx_bi[nta]; 4620a714186SBjörn Töpel 4630a714186SBjörn Töpel nta++; 4640a714186SBjörn Töpel rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 4650a714186SBjörn Töpel 4660a714186SBjörn Töpel handle &= mask; 4670a714186SBjörn Töpel 4680a714186SBjörn Töpel bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); 4690a714186SBjörn Töpel bi->dma += hr; 4700a714186SBjörn Töpel 4710a714186SBjörn Töpel bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); 4720a714186SBjörn Töpel bi->addr += hr; 4730a714186SBjörn Töpel 4740a714186SBjörn Töpel bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; 4750a714186SBjörn Töpel } 4760a714186SBjörn Töpel 4770a714186SBjörn Töpel /** 4780a714186SBjörn Töpel * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer 4790a714186SBjörn Töpel * @rx_ring: Rx ring 4800a714186SBjörn Töpel * @bi: Rx buffer 4810a714186SBjörn Töpel * @xdp: xdp_buff 4820a714186SBjörn Töpel * 4830a714186SBjörn Töpel * This functions allocates a new skb from a zero-copy Rx buffer. 4840a714186SBjörn Töpel * 4850a714186SBjörn Töpel * Returns the skb, or NULL on failure. 4860a714186SBjörn Töpel **/ 4870a714186SBjörn Töpel static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, 4880a714186SBjörn Töpel struct i40e_rx_buffer *bi, 4890a714186SBjörn Töpel struct xdp_buff *xdp) 4900a714186SBjörn Töpel { 4910a714186SBjörn Töpel unsigned int metasize = xdp->data - xdp->data_meta; 4920a714186SBjörn Töpel unsigned int datasize = xdp->data_end - xdp->data; 4930a714186SBjörn Töpel struct sk_buff *skb; 4940a714186SBjörn Töpel 4950a714186SBjörn Töpel /* allocate a skb to store the frags */ 4960a714186SBjörn Töpel skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 4970a714186SBjörn Töpel xdp->data_end - xdp->data_hard_start, 4980a714186SBjörn Töpel GFP_ATOMIC | __GFP_NOWARN); 4990a714186SBjörn Töpel if (unlikely(!skb)) 5000a714186SBjörn Töpel return NULL; 5010a714186SBjörn Töpel 5020a714186SBjörn Töpel skb_reserve(skb, xdp->data - xdp->data_hard_start); 5030a714186SBjörn Töpel memcpy(__skb_put(skb, datasize), xdp->data, datasize); 5040a714186SBjörn Töpel if (metasize) 5050a714186SBjörn Töpel skb_metadata_set(skb, metasize); 5060a714186SBjörn Töpel 5070a714186SBjörn Töpel i40e_reuse_rx_buffer_zc(rx_ring, bi); 5080a714186SBjörn Töpel return skb; 5090a714186SBjörn Töpel } 5100a714186SBjörn Töpel 5110a714186SBjörn Töpel /** 5120a714186SBjörn Töpel * i40e_inc_ntc: Advance the next_to_clean index 5130a714186SBjörn Töpel * @rx_ring: Rx ring 5140a714186SBjörn Töpel **/ 5150a714186SBjörn Töpel static void i40e_inc_ntc(struct i40e_ring *rx_ring) 5160a714186SBjörn Töpel { 5170a714186SBjörn Töpel u32 ntc = rx_ring->next_to_clean + 1; 5180a714186SBjörn Töpel 5190a714186SBjörn Töpel ntc = (ntc < rx_ring->count) ? ntc : 0; 5200a714186SBjörn Töpel rx_ring->next_to_clean = ntc; 5210a714186SBjörn Töpel prefetch(I40E_RX_DESC(rx_ring, ntc)); 5220a714186SBjörn Töpel } 5230a714186SBjörn Töpel 5240a714186SBjörn Töpel /** 5250a714186SBjörn Töpel * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring 5260a714186SBjörn Töpel * @rx_ring: Rx ring 5270a714186SBjörn Töpel * @budget: NAPI budget 5280a714186SBjörn Töpel * 5290a714186SBjörn Töpel * Returns amount of work completed 5300a714186SBjörn Töpel **/ 5310a714186SBjörn Töpel int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) 5320a714186SBjörn Töpel { 5330a714186SBjörn Töpel unsigned int total_rx_bytes = 0, total_rx_packets = 0; 5340a714186SBjörn Töpel u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 5350a714186SBjörn Töpel unsigned int xdp_res, xdp_xmit = 0; 5360a714186SBjörn Töpel bool failure = false; 5370a714186SBjörn Töpel struct sk_buff *skb; 5380a714186SBjörn Töpel struct xdp_buff xdp; 5390a714186SBjörn Töpel 5400a714186SBjörn Töpel xdp.rxq = &rx_ring->xdp_rxq; 5410a714186SBjörn Töpel 5420a714186SBjörn Töpel while (likely(total_rx_packets < (unsigned int)budget)) { 5430a714186SBjörn Töpel struct i40e_rx_buffer *bi; 5440a714186SBjörn Töpel union i40e_rx_desc *rx_desc; 5450a714186SBjörn Töpel unsigned int size; 5460a714186SBjörn Töpel u64 qword; 5470a714186SBjörn Töpel 5480a714186SBjörn Töpel if (cleaned_count >= I40E_RX_BUFFER_WRITE) { 5490a714186SBjörn Töpel failure = failure || 550411dc16fSBjörn Töpel !i40e_alloc_rx_buffers_fast_zc(rx_ring, 5510a714186SBjörn Töpel cleaned_count); 5520a714186SBjörn Töpel cleaned_count = 0; 5530a714186SBjörn Töpel } 5540a714186SBjörn Töpel 5550a714186SBjörn Töpel rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); 5560a714186SBjörn Töpel qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 5570a714186SBjörn Töpel 5580a714186SBjörn Töpel /* This memory barrier is needed to keep us from reading 5590a714186SBjörn Töpel * any other fields out of the rx_desc until we have 5600a714186SBjörn Töpel * verified the descriptor has been written back. 5610a714186SBjörn Töpel */ 5620a714186SBjörn Töpel dma_rmb(); 5630a714186SBjörn Töpel 5640a714186SBjörn Töpel bi = i40e_clean_programming_status(rx_ring, rx_desc, 5650a714186SBjörn Töpel qword); 5660a714186SBjörn Töpel if (unlikely(bi)) { 5670a714186SBjörn Töpel i40e_reuse_rx_buffer_zc(rx_ring, bi); 5680a714186SBjörn Töpel cleaned_count++; 5690a714186SBjörn Töpel continue; 5700a714186SBjörn Töpel } 5710a714186SBjörn Töpel 5720a714186SBjörn Töpel size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 5730a714186SBjörn Töpel I40E_RXD_QW1_LENGTH_PBUF_SHIFT; 5740a714186SBjörn Töpel if (!size) 5750a714186SBjörn Töpel break; 5760a714186SBjörn Töpel 5770a714186SBjörn Töpel bi = i40e_get_rx_buffer_zc(rx_ring, size); 5780a714186SBjörn Töpel xdp.data = bi->addr; 5790a714186SBjörn Töpel xdp.data_meta = xdp.data; 5800a714186SBjörn Töpel xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; 5810a714186SBjörn Töpel xdp.data_end = xdp.data + size; 5820a714186SBjörn Töpel xdp.handle = bi->handle; 5830a714186SBjörn Töpel 5840a714186SBjörn Töpel xdp_res = i40e_run_xdp_zc(rx_ring, &xdp); 5850a714186SBjörn Töpel if (xdp_res) { 5860a714186SBjörn Töpel if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { 5870a714186SBjörn Töpel xdp_xmit |= xdp_res; 5880a714186SBjörn Töpel bi->addr = NULL; 5890a714186SBjörn Töpel } else { 5900a714186SBjörn Töpel i40e_reuse_rx_buffer_zc(rx_ring, bi); 5910a714186SBjörn Töpel } 5920a714186SBjörn Töpel 5930a714186SBjörn Töpel total_rx_bytes += size; 5940a714186SBjörn Töpel total_rx_packets++; 5950a714186SBjörn Töpel 5960a714186SBjörn Töpel cleaned_count++; 5970a714186SBjörn Töpel i40e_inc_ntc(rx_ring); 5980a714186SBjörn Töpel continue; 5990a714186SBjörn Töpel } 6000a714186SBjörn Töpel 6010a714186SBjörn Töpel /* XDP_PASS path */ 6020a714186SBjörn Töpel 6030a714186SBjörn Töpel /* NB! We are not checking for errors using 6040a714186SBjörn Töpel * i40e_test_staterr with 6050a714186SBjörn Töpel * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that 6060a714186SBjörn Töpel * SBP is *not* set in PRT_SBPVSI (default not set). 6070a714186SBjörn Töpel */ 6080a714186SBjörn Töpel skb = i40e_construct_skb_zc(rx_ring, bi, &xdp); 6090a714186SBjörn Töpel if (!skb) { 6100a714186SBjörn Töpel rx_ring->rx_stats.alloc_buff_failed++; 6110a714186SBjörn Töpel break; 6120a714186SBjörn Töpel } 6130a714186SBjörn Töpel 6140a714186SBjörn Töpel cleaned_count++; 6150a714186SBjörn Töpel i40e_inc_ntc(rx_ring); 6160a714186SBjörn Töpel 6170a714186SBjörn Töpel if (eth_skb_pad(skb)) 6180a714186SBjörn Töpel continue; 6190a714186SBjörn Töpel 6200a714186SBjörn Töpel total_rx_bytes += skb->len; 6210a714186SBjörn Töpel total_rx_packets++; 6220a714186SBjörn Töpel 623800b8f63SMichał Mirosław i40e_process_skb_fields(rx_ring, rx_desc, skb); 6242a508c64SMichał Mirosław napi_gro_receive(&rx_ring->q_vector->napi, skb); 6250a714186SBjörn Töpel } 6260a714186SBjörn Töpel 6270a714186SBjörn Töpel i40e_finalize_xdp_rx(rx_ring, xdp_xmit); 6280a714186SBjörn Töpel i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); 629*3d0c5f1cSMagnus Karlsson 630*3d0c5f1cSMagnus Karlsson if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { 631*3d0c5f1cSMagnus Karlsson if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) 632*3d0c5f1cSMagnus Karlsson xsk_set_rx_need_wakeup(rx_ring->xsk_umem); 633*3d0c5f1cSMagnus Karlsson else 634*3d0c5f1cSMagnus Karlsson xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); 635*3d0c5f1cSMagnus Karlsson 636*3d0c5f1cSMagnus Karlsson return (int)total_rx_packets; 637*3d0c5f1cSMagnus Karlsson } 6380a714186SBjörn Töpel return failure ? budget : (int)total_rx_packets; 6390a714186SBjörn Töpel } 6400a714186SBjörn Töpel 6411328dcddSMagnus Karlsson /** 6421328dcddSMagnus Karlsson * i40e_xmit_zc - Performs zero-copy Tx AF_XDP 6431328dcddSMagnus Karlsson * @xdp_ring: XDP Tx ring 6441328dcddSMagnus Karlsson * @budget: NAPI budget 6451328dcddSMagnus Karlsson * 6461328dcddSMagnus Karlsson * Returns true if the work is finished. 6471328dcddSMagnus Karlsson **/ 6481328dcddSMagnus Karlsson static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) 6491328dcddSMagnus Karlsson { 650cf484f9fSMagnus Karlsson struct i40e_tx_desc *tx_desc = NULL; 6511328dcddSMagnus Karlsson struct i40e_tx_buffer *tx_bi; 6521328dcddSMagnus Karlsson bool work_done = true; 6534bce4e5cSMaxim Mikityanskiy struct xdp_desc desc; 6541328dcddSMagnus Karlsson dma_addr_t dma; 6551328dcddSMagnus Karlsson 6561328dcddSMagnus Karlsson while (budget-- > 0) { 6571328dcddSMagnus Karlsson if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { 6581328dcddSMagnus Karlsson xdp_ring->tx_stats.tx_busy++; 6591328dcddSMagnus Karlsson work_done = false; 6601328dcddSMagnus Karlsson break; 6611328dcddSMagnus Karlsson } 6621328dcddSMagnus Karlsson 6634bce4e5cSMaxim Mikityanskiy if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) 6641328dcddSMagnus Karlsson break; 6651328dcddSMagnus Karlsson 6664bce4e5cSMaxim Mikityanskiy dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr); 6674bce4e5cSMaxim Mikityanskiy 6684bce4e5cSMaxim Mikityanskiy dma_sync_single_for_device(xdp_ring->dev, dma, desc.len, 6691328dcddSMagnus Karlsson DMA_BIDIRECTIONAL); 6701328dcddSMagnus Karlsson 6711328dcddSMagnus Karlsson tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; 6724bce4e5cSMaxim Mikityanskiy tx_bi->bytecount = desc.len; 6731328dcddSMagnus Karlsson 6741328dcddSMagnus Karlsson tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use); 6751328dcddSMagnus Karlsson tx_desc->buffer_addr = cpu_to_le64(dma); 6761328dcddSMagnus Karlsson tx_desc->cmd_type_offset_bsz = 6771328dcddSMagnus Karlsson build_ctob(I40E_TX_DESC_CMD_ICRC 6781328dcddSMagnus Karlsson | I40E_TX_DESC_CMD_EOP, 6794bce4e5cSMaxim Mikityanskiy 0, desc.len, 0); 6801328dcddSMagnus Karlsson 6811328dcddSMagnus Karlsson xdp_ring->next_to_use++; 6821328dcddSMagnus Karlsson if (xdp_ring->next_to_use == xdp_ring->count) 6831328dcddSMagnus Karlsson xdp_ring->next_to_use = 0; 6841328dcddSMagnus Karlsson } 6851328dcddSMagnus Karlsson 686cf484f9fSMagnus Karlsson if (tx_desc) { 6871328dcddSMagnus Karlsson /* Request an interrupt for the last frame and bump tail ptr. */ 6881328dcddSMagnus Karlsson tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << 6891328dcddSMagnus Karlsson I40E_TXD_QW1_CMD_SHIFT); 6901328dcddSMagnus Karlsson i40e_xdp_ring_update_tail(xdp_ring); 6911328dcddSMagnus Karlsson 6921328dcddSMagnus Karlsson xsk_umem_consume_tx_done(xdp_ring->xsk_umem); 693*3d0c5f1cSMagnus Karlsson if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) 694*3d0c5f1cSMagnus Karlsson xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); 6951328dcddSMagnus Karlsson } 6961328dcddSMagnus Karlsson 6971328dcddSMagnus Karlsson return !!budget && work_done; 6981328dcddSMagnus Karlsson } 6991328dcddSMagnus Karlsson 7001328dcddSMagnus Karlsson /** 7011328dcddSMagnus Karlsson * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry 7021328dcddSMagnus Karlsson * @tx_ring: XDP Tx ring 7031328dcddSMagnus Karlsson * @tx_bi: Tx buffer info to clean 7041328dcddSMagnus Karlsson **/ 7051328dcddSMagnus Karlsson static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, 7061328dcddSMagnus Karlsson struct i40e_tx_buffer *tx_bi) 7071328dcddSMagnus Karlsson { 7081328dcddSMagnus Karlsson xdp_return_frame(tx_bi->xdpf); 7091328dcddSMagnus Karlsson dma_unmap_single(tx_ring->dev, 7101328dcddSMagnus Karlsson dma_unmap_addr(tx_bi, dma), 7111328dcddSMagnus Karlsson dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); 7121328dcddSMagnus Karlsson dma_unmap_len_set(tx_bi, len, 0); 7131328dcddSMagnus Karlsson } 7141328dcddSMagnus Karlsson 7151328dcddSMagnus Karlsson /** 7161328dcddSMagnus Karlsson * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries 7171328dcddSMagnus Karlsson * @tx_ring: XDP Tx ring 7181328dcddSMagnus Karlsson * @tx_bi: Tx buffer info to clean 7191328dcddSMagnus Karlsson * 7201328dcddSMagnus Karlsson * Returns true if cleanup/tranmission is done. 7211328dcddSMagnus Karlsson **/ 7221328dcddSMagnus Karlsson bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, 7231328dcddSMagnus Karlsson struct i40e_ring *tx_ring, int napi_budget) 7241328dcddSMagnus Karlsson { 7251328dcddSMagnus Karlsson unsigned int ntc, total_bytes = 0, budget = vsi->work_limit; 7261328dcddSMagnus Karlsson u32 i, completed_frames, frames_ready, xsk_frames = 0; 7271328dcddSMagnus Karlsson struct xdp_umem *umem = tx_ring->xsk_umem; 7281328dcddSMagnus Karlsson u32 head_idx = i40e_get_head(tx_ring); 7291328dcddSMagnus Karlsson bool work_done = true, xmit_done; 7301328dcddSMagnus Karlsson struct i40e_tx_buffer *tx_bi; 7311328dcddSMagnus Karlsson 7321328dcddSMagnus Karlsson if (head_idx < tx_ring->next_to_clean) 7331328dcddSMagnus Karlsson head_idx += tx_ring->count; 7341328dcddSMagnus Karlsson frames_ready = head_idx - tx_ring->next_to_clean; 7351328dcddSMagnus Karlsson 7361328dcddSMagnus Karlsson if (frames_ready == 0) { 7371328dcddSMagnus Karlsson goto out_xmit; 7381328dcddSMagnus Karlsson } else if (frames_ready > budget) { 7391328dcddSMagnus Karlsson completed_frames = budget; 7401328dcddSMagnus Karlsson work_done = false; 7411328dcddSMagnus Karlsson } else { 7421328dcddSMagnus Karlsson completed_frames = frames_ready; 7431328dcddSMagnus Karlsson } 7441328dcddSMagnus Karlsson 7451328dcddSMagnus Karlsson ntc = tx_ring->next_to_clean; 7461328dcddSMagnus Karlsson 7471328dcddSMagnus Karlsson for (i = 0; i < completed_frames; i++) { 7481328dcddSMagnus Karlsson tx_bi = &tx_ring->tx_bi[ntc]; 7491328dcddSMagnus Karlsson 7501328dcddSMagnus Karlsson if (tx_bi->xdpf) 7511328dcddSMagnus Karlsson i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); 7521328dcddSMagnus Karlsson else 7531328dcddSMagnus Karlsson xsk_frames++; 7541328dcddSMagnus Karlsson 7551328dcddSMagnus Karlsson tx_bi->xdpf = NULL; 7561328dcddSMagnus Karlsson total_bytes += tx_bi->bytecount; 7571328dcddSMagnus Karlsson 7581328dcddSMagnus Karlsson if (++ntc >= tx_ring->count) 7591328dcddSMagnus Karlsson ntc = 0; 7601328dcddSMagnus Karlsson } 7611328dcddSMagnus Karlsson 7621328dcddSMagnus Karlsson tx_ring->next_to_clean += completed_frames; 7631328dcddSMagnus Karlsson if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) 7641328dcddSMagnus Karlsson tx_ring->next_to_clean -= tx_ring->count; 7651328dcddSMagnus Karlsson 7661328dcddSMagnus Karlsson if (xsk_frames) 7671328dcddSMagnus Karlsson xsk_umem_complete_tx(umem, xsk_frames); 7681328dcddSMagnus Karlsson 7691328dcddSMagnus Karlsson i40e_arm_wb(tx_ring, vsi, budget); 7701328dcddSMagnus Karlsson i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); 7711328dcddSMagnus Karlsson 7721328dcddSMagnus Karlsson out_xmit: 773*3d0c5f1cSMagnus Karlsson if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { 774*3d0c5f1cSMagnus Karlsson if (tx_ring->next_to_clean == tx_ring->next_to_use) 775*3d0c5f1cSMagnus Karlsson xsk_set_tx_need_wakeup(tx_ring->xsk_umem); 776*3d0c5f1cSMagnus Karlsson else 777*3d0c5f1cSMagnus Karlsson xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); 778*3d0c5f1cSMagnus Karlsson } 779*3d0c5f1cSMagnus Karlsson 7801328dcddSMagnus Karlsson xmit_done = i40e_xmit_zc(tx_ring, budget); 7811328dcddSMagnus Karlsson 7821328dcddSMagnus Karlsson return work_done && xmit_done; 7831328dcddSMagnus Karlsson } 7841328dcddSMagnus Karlsson 7851328dcddSMagnus Karlsson /** 7869116e5e2SMagnus Karlsson * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup 7871328dcddSMagnus Karlsson * @dev: the netdevice 7881328dcddSMagnus Karlsson * @queue_id: queue id to wake up 7899116e5e2SMagnus Karlsson * @flags: ignored in our case since we have Rx and Tx in the same NAPI. 7901328dcddSMagnus Karlsson * 7911328dcddSMagnus Karlsson * Returns <0 for errors, 0 otherwise. 7921328dcddSMagnus Karlsson **/ 7939116e5e2SMagnus Karlsson int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 7941328dcddSMagnus Karlsson { 7951328dcddSMagnus Karlsson struct i40e_netdev_priv *np = netdev_priv(dev); 7961328dcddSMagnus Karlsson struct i40e_vsi *vsi = np->vsi; 7971328dcddSMagnus Karlsson struct i40e_ring *ring; 7981328dcddSMagnus Karlsson 7991328dcddSMagnus Karlsson if (test_bit(__I40E_VSI_DOWN, vsi->state)) 8001328dcddSMagnus Karlsson return -ENETDOWN; 8011328dcddSMagnus Karlsson 8021328dcddSMagnus Karlsson if (!i40e_enabled_xdp_vsi(vsi)) 8031328dcddSMagnus Karlsson return -ENXIO; 8041328dcddSMagnus Karlsson 8051328dcddSMagnus Karlsson if (queue_id >= vsi->num_queue_pairs) 8061328dcddSMagnus Karlsson return -ENXIO; 8071328dcddSMagnus Karlsson 8081328dcddSMagnus Karlsson if (!vsi->xdp_rings[queue_id]->xsk_umem) 8091328dcddSMagnus Karlsson return -ENXIO; 8101328dcddSMagnus Karlsson 8111328dcddSMagnus Karlsson ring = vsi->xdp_rings[queue_id]; 8121328dcddSMagnus Karlsson 8131328dcddSMagnus Karlsson /* The idea here is that if NAPI is running, mark a miss, so 8141328dcddSMagnus Karlsson * it will run again. If not, trigger an interrupt and 8151328dcddSMagnus Karlsson * schedule the NAPI from interrupt context. If NAPI would be 8161328dcddSMagnus Karlsson * scheduled here, the interrupt affinity would not be 8171328dcddSMagnus Karlsson * honored. 8181328dcddSMagnus Karlsson */ 8191328dcddSMagnus Karlsson if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) 8201328dcddSMagnus Karlsson i40e_force_wb(vsi, ring->q_vector); 8211328dcddSMagnus Karlsson 8221328dcddSMagnus Karlsson return 0; 8231328dcddSMagnus Karlsson } 8249dbb1370SBjörn Töpel 825411dc16fSBjörn Töpel void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) 826411dc16fSBjörn Töpel { 827411dc16fSBjörn Töpel u16 i; 828411dc16fSBjörn Töpel 829411dc16fSBjörn Töpel for (i = 0; i < rx_ring->count; i++) { 830411dc16fSBjörn Töpel struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; 831411dc16fSBjörn Töpel 832411dc16fSBjörn Töpel if (!rx_bi->addr) 833411dc16fSBjörn Töpel continue; 834411dc16fSBjörn Töpel 835411dc16fSBjörn Töpel xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle); 836411dc16fSBjörn Töpel rx_bi->addr = NULL; 837411dc16fSBjörn Töpel } 838411dc16fSBjörn Töpel } 839411dc16fSBjörn Töpel 8409dbb1370SBjörn Töpel /** 8419dbb1370SBjörn Töpel * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown 8429dbb1370SBjörn Töpel * @xdp_ring: XDP Tx ring 8439dbb1370SBjörn Töpel **/ 8449dbb1370SBjörn Töpel void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) 8459dbb1370SBjörn Töpel { 8469dbb1370SBjörn Töpel u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; 8479dbb1370SBjörn Töpel struct xdp_umem *umem = tx_ring->xsk_umem; 8489dbb1370SBjörn Töpel struct i40e_tx_buffer *tx_bi; 8499dbb1370SBjörn Töpel u32 xsk_frames = 0; 8509dbb1370SBjörn Töpel 8519dbb1370SBjörn Töpel while (ntc != ntu) { 8529dbb1370SBjörn Töpel tx_bi = &tx_ring->tx_bi[ntc]; 8539dbb1370SBjörn Töpel 8549dbb1370SBjörn Töpel if (tx_bi->xdpf) 8559dbb1370SBjörn Töpel i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); 8569dbb1370SBjörn Töpel else 8579dbb1370SBjörn Töpel xsk_frames++; 8589dbb1370SBjörn Töpel 8599dbb1370SBjörn Töpel tx_bi->xdpf = NULL; 8609dbb1370SBjörn Töpel 8619dbb1370SBjörn Töpel ntc++; 8629dbb1370SBjörn Töpel if (ntc >= tx_ring->count) 8639dbb1370SBjörn Töpel ntc = 0; 8649dbb1370SBjörn Töpel } 8659dbb1370SBjörn Töpel 8669dbb1370SBjörn Töpel if (xsk_frames) 8679dbb1370SBjörn Töpel xsk_umem_complete_tx(umem, xsk_frames); 8689dbb1370SBjörn Töpel } 8693ab52af5SBjörn Töpel 8703ab52af5SBjörn Töpel /** 8713ab52af5SBjörn Töpel * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached 8723ab52af5SBjörn Töpel * @vsi: vsi 8733ab52af5SBjörn Töpel * 8743ab52af5SBjörn Töpel * Returns true if any of the Rx rings has an AF_XDP UMEM attached 8753ab52af5SBjörn Töpel **/ 8763ab52af5SBjörn Töpel bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) 8773ab52af5SBjörn Töpel { 878f3fef2b6SJan Sokolowski struct net_device *netdev = vsi->netdev; 8793ab52af5SBjörn Töpel int i; 8803ab52af5SBjörn Töpel 8813ab52af5SBjörn Töpel for (i = 0; i < vsi->num_queue_pairs; i++) { 882f3fef2b6SJan Sokolowski if (xdp_get_umem_from_qid(netdev, i)) 8833ab52af5SBjörn Töpel return true; 8843ab52af5SBjörn Töpel } 8853ab52af5SBjörn Töpel 8863ab52af5SBjörn Töpel return false; 8873ab52af5SBjörn Töpel } 888