10a714186SBjörn Töpel // SPDX-License-Identifier: GPL-2.0 20a714186SBjörn Töpel /* Copyright(c) 2018 Intel Corporation. */ 30a714186SBjörn Töpel 40a714186SBjörn Töpel #include <linux/bpf_trace.h> 5a71506a4SMagnus Karlsson #include <net/xdp_sock_drv.h> 60a714186SBjörn Töpel #include "i40e_txrx_common.h" 70a714186SBjörn Töpel #include "i40e_xsk.h" 80a714186SBjörn Töpel 9be1222b5SBjörn Töpel void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) 10be1222b5SBjörn Töpel { 11be1222b5SBjörn Töpel memset(rx_ring->rx_bi_zc, 0, 12be1222b5SBjörn Töpel sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); 13be1222b5SBjörn Töpel } 14be1222b5SBjörn Töpel 153b4f0b66SBjörn Töpel static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) 16be1222b5SBjörn Töpel { 17be1222b5SBjörn Töpel return &rx_ring->rx_bi_zc[idx]; 18e1675f97SBjörn Töpel } 19e1675f97SBjörn Töpel 200a714186SBjörn Töpel /** 21aae425efSJan Sokolowski * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer 22aae425efSJan Sokolowski * @rx_ring: Current rx ring 23aae425efSJan Sokolowski * @pool_present: is pool for XSK present 24aae425efSJan Sokolowski * 25aae425efSJan Sokolowski * Try allocating memory and return ENOMEM, if failed to allocate. 26aae425efSJan Sokolowski * If allocation was successful, substitute buffer with allocated one. 27aae425efSJan Sokolowski * Returns 0 on success, negative on failure 28aae425efSJan Sokolowski */ 29aae425efSJan Sokolowski static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present) 30aae425efSJan Sokolowski { 31aae425efSJan Sokolowski size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : 32aae425efSJan Sokolowski sizeof(*rx_ring->rx_bi); 33aae425efSJan Sokolowski void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); 34aae425efSJan Sokolowski 35aae425efSJan Sokolowski if (!sw_ring) 36aae425efSJan Sokolowski return -ENOMEM; 37aae425efSJan Sokolowski 38aae425efSJan Sokolowski if (pool_present) { 39aae425efSJan Sokolowski kfree(rx_ring->rx_bi); 40aae425efSJan Sokolowski rx_ring->rx_bi = NULL; 41aae425efSJan Sokolowski rx_ring->rx_bi_zc = sw_ring; 42aae425efSJan Sokolowski } else { 43aae425efSJan Sokolowski kfree(rx_ring->rx_bi_zc); 44aae425efSJan Sokolowski rx_ring->rx_bi_zc = NULL; 45aae425efSJan Sokolowski rx_ring->rx_bi = sw_ring; 46aae425efSJan Sokolowski } 47aae425efSJan Sokolowski return 0; 48aae425efSJan Sokolowski } 49aae425efSJan Sokolowski 50aae425efSJan Sokolowski /** 51aae425efSJan Sokolowski * i40e_realloc_rx_bi_zc - reallocate rx SW rings 52aae425efSJan Sokolowski * @vsi: Current VSI 53aae425efSJan Sokolowski * @zc: is zero copy set 54aae425efSJan Sokolowski * 55aae425efSJan Sokolowski * Reallocate buffer for rx_rings that might be used by XSK. 56aae425efSJan Sokolowski * XDP requires more memory, than rx_buf provides. 57aae425efSJan Sokolowski * Returns 0 on success, negative on failure 58aae425efSJan Sokolowski */ 59aae425efSJan Sokolowski int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc) 60aae425efSJan Sokolowski { 61aae425efSJan Sokolowski struct i40e_ring *rx_ring; 62aae425efSJan Sokolowski unsigned long q; 63aae425efSJan Sokolowski 64aae425efSJan Sokolowski for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) { 65aae425efSJan Sokolowski rx_ring = vsi->rx_rings[q]; 66aae425efSJan Sokolowski if (i40e_realloc_rx_xdp_bi(rx_ring, zc)) 67aae425efSJan Sokolowski return -ENOMEM; 68aae425efSJan Sokolowski } 69aae425efSJan Sokolowski return 0; 70aae425efSJan Sokolowski } 71aae425efSJan Sokolowski 72aae425efSJan Sokolowski /** 731742b3d5SMagnus Karlsson * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a 741742b3d5SMagnus Karlsson * certain ring/qid 750a714186SBjörn Töpel * @vsi: Current VSI 761742b3d5SMagnus Karlsson * @pool: buffer pool 771742b3d5SMagnus Karlsson * @qid: Rx ring to associate buffer pool with 780a714186SBjörn Töpel * 790a714186SBjörn Töpel * Returns 0 on success, <0 on failure 800a714186SBjörn Töpel **/ 811742b3d5SMagnus Karlsson static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, 821742b3d5SMagnus Karlsson struct xsk_buff_pool *pool, 830a714186SBjörn Töpel u16 qid) 840a714186SBjörn Töpel { 85f3fef2b6SJan Sokolowski struct net_device *netdev = vsi->netdev; 860a714186SBjörn Töpel bool if_running; 870a714186SBjörn Töpel int err; 880a714186SBjörn Töpel 890a714186SBjörn Töpel if (vsi->type != I40E_VSI_MAIN) 900a714186SBjörn Töpel return -EINVAL; 910a714186SBjörn Töpel 920a714186SBjörn Töpel if (qid >= vsi->num_queue_pairs) 930a714186SBjörn Töpel return -EINVAL; 940a714186SBjörn Töpel 95f3fef2b6SJan Sokolowski if (qid >= netdev->real_num_rx_queues || 96f3fef2b6SJan Sokolowski qid >= netdev->real_num_tx_queues) 970a714186SBjörn Töpel return -EINVAL; 980a714186SBjörn Töpel 99c4655761SMagnus Karlsson err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); 1000a714186SBjörn Töpel if (err) 1010a714186SBjörn Töpel return err; 1020a714186SBjörn Töpel 10344ddd4f1SBjörn Töpel set_bit(qid, vsi->af_xdp_zc_qps); 10444ddd4f1SBjörn Töpel 1050a714186SBjörn Töpel if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 1060a714186SBjörn Töpel 1070a714186SBjörn Töpel if (if_running) { 1080a714186SBjörn Töpel err = i40e_queue_pair_disable(vsi, qid); 1090a714186SBjörn Töpel if (err) 1100a714186SBjörn Töpel return err; 1110a714186SBjörn Töpel 112aae425efSJan Sokolowski err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); 113aae425efSJan Sokolowski if (err) 114aae425efSJan Sokolowski return err; 115aae425efSJan Sokolowski 1160a714186SBjörn Töpel err = i40e_queue_pair_enable(vsi, qid); 1170a714186SBjörn Töpel if (err) 1180a714186SBjörn Töpel return err; 11914ffeb52SMagnus Karlsson 12014ffeb52SMagnus Karlsson /* Kick start the NAPI context so that receiving will start */ 1219116e5e2SMagnus Karlsson err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); 12214ffeb52SMagnus Karlsson if (err) 12314ffeb52SMagnus Karlsson return err; 1240a714186SBjörn Töpel } 1250a714186SBjörn Töpel 1260a714186SBjörn Töpel return 0; 1270a714186SBjörn Töpel } 1280a714186SBjörn Töpel 1290a714186SBjörn Töpel /** 1301742b3d5SMagnus Karlsson * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a 1311742b3d5SMagnus Karlsson * certain ring/qid 1320a714186SBjörn Töpel * @vsi: Current VSI 1331742b3d5SMagnus Karlsson * @qid: Rx ring to associate buffer pool with 1340a714186SBjörn Töpel * 1350a714186SBjörn Töpel * Returns 0 on success, <0 on failure 1360a714186SBjörn Töpel **/ 1371742b3d5SMagnus Karlsson static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) 1380a714186SBjörn Töpel { 139f3fef2b6SJan Sokolowski struct net_device *netdev = vsi->netdev; 1401742b3d5SMagnus Karlsson struct xsk_buff_pool *pool; 1410a714186SBjörn Töpel bool if_running; 1420a714186SBjörn Töpel int err; 1430a714186SBjörn Töpel 144c4655761SMagnus Karlsson pool = xsk_get_pool_from_qid(netdev, qid); 1451742b3d5SMagnus Karlsson if (!pool) 1460a714186SBjörn Töpel return -EINVAL; 1470a714186SBjörn Töpel 1480a714186SBjörn Töpel if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 1490a714186SBjörn Töpel 1500a714186SBjörn Töpel if (if_running) { 1510a714186SBjörn Töpel err = i40e_queue_pair_disable(vsi, qid); 1520a714186SBjörn Töpel if (err) 1530a714186SBjörn Töpel return err; 1540a714186SBjörn Töpel } 1550a714186SBjörn Töpel 15644ddd4f1SBjörn Töpel clear_bit(qid, vsi->af_xdp_zc_qps); 157c4655761SMagnus Karlsson xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); 1580a714186SBjörn Töpel 1590a714186SBjörn Töpel if (if_running) { 160aae425efSJan Sokolowski err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false); 161aae425efSJan Sokolowski if (err) 162aae425efSJan Sokolowski return err; 1630a714186SBjörn Töpel err = i40e_queue_pair_enable(vsi, qid); 1640a714186SBjörn Töpel if (err) 1650a714186SBjörn Töpel return err; 1660a714186SBjörn Töpel } 1670a714186SBjörn Töpel 1680a714186SBjörn Töpel return 0; 1690a714186SBjörn Töpel } 1700a714186SBjörn Töpel 1710a714186SBjörn Töpel /** 1721742b3d5SMagnus Karlsson * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from 1731742b3d5SMagnus Karlsson * a ring/qid 1740a714186SBjörn Töpel * @vsi: Current VSI 1751742b3d5SMagnus Karlsson * @pool: Buffer pool to enable/associate to a ring, or NULL to disable 1761742b3d5SMagnus Karlsson * @qid: Rx ring to (dis)associate buffer pool (from)to 1770a714186SBjörn Töpel * 1781742b3d5SMagnus Karlsson * This function enables or disables a buffer pool to a certain ring. 1790a714186SBjörn Töpel * 1800a714186SBjörn Töpel * Returns 0 on success, <0 on failure 1810a714186SBjörn Töpel **/ 1821742b3d5SMagnus Karlsson int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool, 1830a714186SBjörn Töpel u16 qid) 1840a714186SBjörn Töpel { 1851742b3d5SMagnus Karlsson return pool ? i40e_xsk_pool_enable(vsi, pool, qid) : 1861742b3d5SMagnus Karlsson i40e_xsk_pool_disable(vsi, qid); 1870a714186SBjörn Töpel } 1880a714186SBjörn Töpel 1890a714186SBjörn Töpel /** 1900a714186SBjörn Töpel * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff 1910a714186SBjörn Töpel * @rx_ring: Rx ring 1920a714186SBjörn Töpel * @xdp: xdp_buff used as input to the XDP program 19378f31931SCiara Loftus * @xdp_prog: XDP program to run 1940a714186SBjörn Töpel * 1950a714186SBjörn Töpel * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} 1960a714186SBjörn Töpel **/ 19778f31931SCiara Loftus static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp, 19878f31931SCiara Loftus struct bpf_prog *xdp_prog) 1990a714186SBjörn Töpel { 2000a714186SBjörn Töpel int err, result = I40E_XDP_PASS; 2010a714186SBjörn Töpel struct i40e_ring *xdp_ring; 2020a714186SBjörn Töpel u32 act; 2030a714186SBjörn Töpel 2040a714186SBjörn Töpel act = bpf_prog_run_xdp(xdp_prog, xdp); 2052f86c806SKevin Laatz 206346497c7SMagnus Karlsson if (likely(act == XDP_REDIRECT)) { 207346497c7SMagnus Karlsson err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 208b8aef650SMaciej Fijalkowski if (!err) 209f6c10b48SMagnus Karlsson return I40E_XDP_REDIR; 210b8aef650SMaciej Fijalkowski if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) 211b8aef650SMaciej Fijalkowski result = I40E_XDP_EXIT; 212b8aef650SMaciej Fijalkowski else 213b8aef650SMaciej Fijalkowski result = I40E_XDP_CONSUMED; 214b8aef650SMaciej Fijalkowski goto out_failure; 215346497c7SMagnus Karlsson } 216346497c7SMagnus Karlsson 2170a714186SBjörn Töpel switch (act) { 2180a714186SBjörn Töpel case XDP_PASS: 2190a714186SBjörn Töpel break; 2200a714186SBjörn Töpel case XDP_TX: 2210a714186SBjörn Töpel xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2220a714186SBjörn Töpel result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); 223f6c10b48SMagnus Karlsson if (result == I40E_XDP_CONSUMED) 224f6c10b48SMagnus Karlsson goto out_failure; 2250a714186SBjörn Töpel break; 226b8aef650SMaciej Fijalkowski case XDP_DROP: 227b8aef650SMaciej Fijalkowski result = I40E_XDP_CONSUMED; 228b8aef650SMaciej Fijalkowski break; 2290a714186SBjörn Töpel default: 230c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 2315463fce6SJeff Kirsher fallthrough; 2320a714186SBjörn Töpel case XDP_ABORTED: 233b8aef650SMaciej Fijalkowski result = I40E_XDP_CONSUMED; 234f6c10b48SMagnus Karlsson out_failure: 2350a714186SBjörn Töpel trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 2360a714186SBjörn Töpel } 2370a714186SBjörn Töpel return result; 2380a714186SBjörn Töpel } 2390a714186SBjörn Töpel 2403b4f0b66SBjörn Töpel bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) 2410a714186SBjörn Töpel { 2420a714186SBjörn Töpel u16 ntu = rx_ring->next_to_use; 2430a714186SBjörn Töpel union i40e_rx_desc *rx_desc; 2446aab0bb0SMagnus Karlsson struct xdp_buff **xdp; 2456aab0bb0SMagnus Karlsson u32 nb_buffs, i; 2463b4f0b66SBjörn Töpel dma_addr_t dma; 2470a714186SBjörn Töpel 2480a714186SBjörn Töpel rx_desc = I40E_RX_DESC(rx_ring, ntu); 2496aab0bb0SMagnus Karlsson xdp = i40e_rx_bi(rx_ring, ntu); 2506aab0bb0SMagnus Karlsson 2516aab0bb0SMagnus Karlsson nb_buffs = min_t(u16, count, rx_ring->count - ntu); 2526aab0bb0SMagnus Karlsson nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); 2536aab0bb0SMagnus Karlsson if (!nb_buffs) 2546aab0bb0SMagnus Karlsson return false; 2556aab0bb0SMagnus Karlsson 2566aab0bb0SMagnus Karlsson i = nb_buffs; 2576aab0bb0SMagnus Karlsson while (i--) { 2586aab0bb0SMagnus Karlsson dma = xsk_buff_xdp_get_dma(*xdp); 2593b4f0b66SBjörn Töpel rx_desc->read.pkt_addr = cpu_to_le64(dma); 2603b4f0b66SBjörn Töpel rx_desc->read.hdr_addr = 0; 2610a714186SBjörn Töpel 2620a714186SBjörn Töpel rx_desc++; 2636aab0bb0SMagnus Karlsson xdp++; 2646aab0bb0SMagnus Karlsson } 2650a714186SBjörn Töpel 2666aab0bb0SMagnus Karlsson ntu += nb_buffs; 2676aab0bb0SMagnus Karlsson if (ntu == rx_ring->count) { 2680a714186SBjörn Töpel rx_desc = I40E_RX_DESC(rx_ring, 0); 2690a714186SBjörn Töpel ntu = 0; 2700a714186SBjörn Töpel } 2710a714186SBjörn Töpel 27264050b5bSBjörn Töpel /* clear the status bits for the next_to_use descriptor */ 27364050b5bSBjörn Töpel rx_desc->wb.qword1.status_error_len = 0; 2740a714186SBjörn Töpel i40e_release_rx_desc(rx_ring, ntu); 2750a714186SBjörn Töpel 2763c6f3ae3SYang Li return count == nb_buffs; 2770a714186SBjörn Töpel } 2780a714186SBjörn Töpel 2790a714186SBjörn Töpel /** 280e92c0e02SJesper Dangaard Brouer * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer 2810a714186SBjörn Töpel * @rx_ring: Rx ring 2820a714186SBjörn Töpel * @xdp: xdp_buff 2830a714186SBjörn Töpel * 2840a714186SBjörn Töpel * This functions allocates a new skb from a zero-copy Rx buffer. 2850a714186SBjörn Töpel * 2860a714186SBjörn Töpel * Returns the skb, or NULL on failure. 2870a714186SBjörn Töpel **/ 2880a714186SBjörn Töpel static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, 2890a714186SBjörn Töpel struct xdp_buff *xdp) 2900a714186SBjörn Töpel { 2916dba2953SAlexander Lobakin unsigned int totalsize = xdp->data_end - xdp->data_meta; 2920a714186SBjörn Töpel unsigned int metasize = xdp->data - xdp->data_meta; 2931c9ba9c1STirthendu Sarkar struct skb_shared_info *sinfo = NULL; 2940a714186SBjörn Töpel struct sk_buff *skb; 2951c9ba9c1STirthendu Sarkar u32 nr_frags = 0; 2960a714186SBjörn Töpel 2971c9ba9c1STirthendu Sarkar if (unlikely(xdp_buff_has_frags(xdp))) { 2981c9ba9c1STirthendu Sarkar sinfo = xdp_get_shared_info_from_buff(xdp); 2991c9ba9c1STirthendu Sarkar nr_frags = sinfo->nr_frags; 3001c9ba9c1STirthendu Sarkar } 3016dba2953SAlexander Lobakin net_prefetch(xdp->data_meta); 3026dba2953SAlexander Lobakin 3030a714186SBjörn Töpel /* allocate a skb to store the frags */ 3046dba2953SAlexander Lobakin skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, 3050a714186SBjörn Töpel GFP_ATOMIC | __GFP_NOWARN); 3060a714186SBjörn Töpel if (unlikely(!skb)) 307f020fa1aSCristian Dumitrescu goto out; 3080a714186SBjörn Töpel 3096dba2953SAlexander Lobakin memcpy(__skb_put(skb, totalsize), xdp->data_meta, 3106dba2953SAlexander Lobakin ALIGN(totalsize, sizeof(long))); 3116dba2953SAlexander Lobakin 3126dba2953SAlexander Lobakin if (metasize) { 3130a714186SBjörn Töpel skb_metadata_set(skb, metasize); 3146dba2953SAlexander Lobakin __skb_pull(skb, metasize); 3156dba2953SAlexander Lobakin } 3160a714186SBjörn Töpel 3171c9ba9c1STirthendu Sarkar if (likely(!xdp_buff_has_frags(xdp))) 3181c9ba9c1STirthendu Sarkar goto out; 3191c9ba9c1STirthendu Sarkar 3201c9ba9c1STirthendu Sarkar for (int i = 0; i < nr_frags; i++) { 3211c9ba9c1STirthendu Sarkar struct skb_shared_info *skinfo = skb_shinfo(skb); 3221c9ba9c1STirthendu Sarkar skb_frag_t *frag = &sinfo->frags[i]; 3231c9ba9c1STirthendu Sarkar struct page *page; 3241c9ba9c1STirthendu Sarkar void *addr; 3251c9ba9c1STirthendu Sarkar 3261c9ba9c1STirthendu Sarkar page = dev_alloc_page(); 3271c9ba9c1STirthendu Sarkar if (!page) { 3281c9ba9c1STirthendu Sarkar dev_kfree_skb(skb); 3291c9ba9c1STirthendu Sarkar return NULL; 3301c9ba9c1STirthendu Sarkar } 3311c9ba9c1STirthendu Sarkar addr = page_to_virt(page); 3321c9ba9c1STirthendu Sarkar 3331c9ba9c1STirthendu Sarkar memcpy(addr, skb_frag_page(frag), skb_frag_size(frag)); 3341c9ba9c1STirthendu Sarkar 3351c9ba9c1STirthendu Sarkar __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++, 3361c9ba9c1STirthendu Sarkar addr, 0, skb_frag_size(frag)); 3371c9ba9c1STirthendu Sarkar } 3381c9ba9c1STirthendu Sarkar 339f020fa1aSCristian Dumitrescu out: 3403b4f0b66SBjörn Töpel xsk_buff_free(xdp); 3410a714186SBjörn Töpel return skb; 3420a714186SBjörn Töpel } 3430a714186SBjörn Töpel 344f020fa1aSCristian Dumitrescu static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, 345f020fa1aSCristian Dumitrescu struct xdp_buff *xdp_buff, 346f020fa1aSCristian Dumitrescu union i40e_rx_desc *rx_desc, 347f020fa1aSCristian Dumitrescu unsigned int *rx_packets, 348f020fa1aSCristian Dumitrescu unsigned int *rx_bytes, 349b8aef650SMaciej Fijalkowski unsigned int xdp_res, 350b8aef650SMaciej Fijalkowski bool *failure) 351f020fa1aSCristian Dumitrescu { 352f020fa1aSCristian Dumitrescu struct sk_buff *skb; 353f020fa1aSCristian Dumitrescu 354f020fa1aSCristian Dumitrescu *rx_packets = 1; 3551c9ba9c1STirthendu Sarkar *rx_bytes = xdp_get_buff_len(xdp_buff); 356f020fa1aSCristian Dumitrescu 357f020fa1aSCristian Dumitrescu if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX) 358f020fa1aSCristian Dumitrescu return; 359f020fa1aSCristian Dumitrescu 360b8aef650SMaciej Fijalkowski if (xdp_res == I40E_XDP_EXIT) { 361b8aef650SMaciej Fijalkowski *failure = true; 362b8aef650SMaciej Fijalkowski return; 363b8aef650SMaciej Fijalkowski } 364b8aef650SMaciej Fijalkowski 365f020fa1aSCristian Dumitrescu if (xdp_res == I40E_XDP_CONSUMED) { 366f020fa1aSCristian Dumitrescu xsk_buff_free(xdp_buff); 367f020fa1aSCristian Dumitrescu return; 368f020fa1aSCristian Dumitrescu } 369f020fa1aSCristian Dumitrescu if (xdp_res == I40E_XDP_PASS) { 370f020fa1aSCristian Dumitrescu /* NB! We are not checking for errors using 371f020fa1aSCristian Dumitrescu * i40e_test_staterr with 372f020fa1aSCristian Dumitrescu * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that 373f020fa1aSCristian Dumitrescu * SBP is *not* set in PRT_SBPVSI (default not set). 374f020fa1aSCristian Dumitrescu */ 375f020fa1aSCristian Dumitrescu skb = i40e_construct_skb_zc(rx_ring, xdp_buff); 376f020fa1aSCristian Dumitrescu if (!skb) { 377f020fa1aSCristian Dumitrescu rx_ring->rx_stats.alloc_buff_failed++; 378f020fa1aSCristian Dumitrescu *rx_packets = 0; 379f020fa1aSCristian Dumitrescu *rx_bytes = 0; 380f020fa1aSCristian Dumitrescu return; 381f020fa1aSCristian Dumitrescu } 382f020fa1aSCristian Dumitrescu 383f020fa1aSCristian Dumitrescu if (eth_skb_pad(skb)) { 384f020fa1aSCristian Dumitrescu *rx_packets = 0; 385f020fa1aSCristian Dumitrescu *rx_bytes = 0; 386f020fa1aSCristian Dumitrescu return; 387f020fa1aSCristian Dumitrescu } 388f020fa1aSCristian Dumitrescu 389f020fa1aSCristian Dumitrescu i40e_process_skb_fields(rx_ring, rx_desc, skb); 390f020fa1aSCristian Dumitrescu napi_gro_receive(&rx_ring->q_vector->napi, skb); 391f020fa1aSCristian Dumitrescu return; 392f020fa1aSCristian Dumitrescu } 393f020fa1aSCristian Dumitrescu 394f020fa1aSCristian Dumitrescu /* Should never get here, as all valid cases have been handled already. 395f020fa1aSCristian Dumitrescu */ 396f020fa1aSCristian Dumitrescu WARN_ON_ONCE(1); 397f020fa1aSCristian Dumitrescu } 398f020fa1aSCristian Dumitrescu 3991c9ba9c1STirthendu Sarkar static int 4001c9ba9c1STirthendu Sarkar i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first, 4011c9ba9c1STirthendu Sarkar struct xdp_buff *xdp, const unsigned int size) 4021c9ba9c1STirthendu Sarkar { 4031c9ba9c1STirthendu Sarkar struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first); 4041c9ba9c1STirthendu Sarkar 4051c9ba9c1STirthendu Sarkar if (!xdp_buff_has_frags(first)) { 4061c9ba9c1STirthendu Sarkar sinfo->nr_frags = 0; 4071c9ba9c1STirthendu Sarkar sinfo->xdp_frags_size = 0; 4081c9ba9c1STirthendu Sarkar xdp_buff_set_frags_flag(first); 4091c9ba9c1STirthendu Sarkar } 4101c9ba9c1STirthendu Sarkar 4111c9ba9c1STirthendu Sarkar if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { 4121c9ba9c1STirthendu Sarkar xsk_buff_free(first); 4131c9ba9c1STirthendu Sarkar return -ENOMEM; 4141c9ba9c1STirthendu Sarkar } 4151c9ba9c1STirthendu Sarkar 4161c9ba9c1STirthendu Sarkar __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, 417*29077990SMaciej Fijalkowski virt_to_page(xdp->data_hard_start), 418*29077990SMaciej Fijalkowski XDP_PACKET_HEADROOM, size); 4191c9ba9c1STirthendu Sarkar sinfo->xdp_frags_size += size; 4201c9ba9c1STirthendu Sarkar xsk_buff_add_frag(xdp); 4211c9ba9c1STirthendu Sarkar 4221c9ba9c1STirthendu Sarkar return 0; 4231c9ba9c1STirthendu Sarkar } 4241c9ba9c1STirthendu Sarkar 4250a714186SBjörn Töpel /** 4260a714186SBjörn Töpel * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring 4270a714186SBjörn Töpel * @rx_ring: Rx ring 4280a714186SBjörn Töpel * @budget: NAPI budget 4290a714186SBjörn Töpel * 4300a714186SBjörn Töpel * Returns amount of work completed 4310a714186SBjörn Töpel **/ 4320a714186SBjörn Töpel int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) 4330a714186SBjörn Töpel { 4340a714186SBjörn Töpel unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4351c9ba9c1STirthendu Sarkar u16 next_to_process = rx_ring->next_to_process; 436c8a8ca34SCristian Dumitrescu u16 next_to_clean = rx_ring->next_to_clean; 4370a714186SBjörn Töpel unsigned int xdp_res, xdp_xmit = 0; 4381c9ba9c1STirthendu Sarkar struct xdp_buff *first = NULL; 439913eda2bSMaciej Fijalkowski u32 count = rx_ring->count; 44078f31931SCiara Loftus struct bpf_prog *xdp_prog; 441913eda2bSMaciej Fijalkowski u32 entries_to_alloc; 4421773482fSDan Carpenter bool failure = false; 4430a714186SBjörn Töpel 4441c9ba9c1STirthendu Sarkar if (next_to_process != next_to_clean) 4451c9ba9c1STirthendu Sarkar first = *i40e_rx_bi(rx_ring, next_to_clean); 4461c9ba9c1STirthendu Sarkar 44778f31931SCiara Loftus /* NB! xdp_prog will always be !NULL, due to the fact that 44878f31931SCiara Loftus * this path is enabled by setting an XDP program. 44978f31931SCiara Loftus */ 45078f31931SCiara Loftus xdp_prog = READ_ONCE(rx_ring->xdp_prog); 45178f31931SCiara Loftus 4520a714186SBjörn Töpel while (likely(total_rx_packets < (unsigned int)budget)) { 4530a714186SBjörn Töpel union i40e_rx_desc *rx_desc; 454f020fa1aSCristian Dumitrescu unsigned int rx_packets; 455f020fa1aSCristian Dumitrescu unsigned int rx_bytes; 456d4178c31SCristian Dumitrescu struct xdp_buff *bi; 4570a714186SBjörn Töpel unsigned int size; 4580a714186SBjörn Töpel u64 qword; 4590a714186SBjörn Töpel 4601c9ba9c1STirthendu Sarkar rx_desc = I40E_RX_DESC(rx_ring, next_to_process); 4610a714186SBjörn Töpel qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 4620a714186SBjörn Töpel 4630a714186SBjörn Töpel /* This memory barrier is needed to keep us from reading 4640a714186SBjörn Töpel * any other fields out of the rx_desc until we have 4650a714186SBjörn Töpel * verified the descriptor has been written back. 4660a714186SBjörn Töpel */ 4670a714186SBjörn Töpel dma_rmb(); 4680a714186SBjörn Töpel 469be1222b5SBjörn Töpel if (i40e_rx_is_programming_status(qword)) { 470be1222b5SBjörn Töpel i40e_clean_programming_status(rx_ring, 471be1222b5SBjörn Töpel rx_desc->raw.qword[0], 4720a714186SBjörn Töpel qword); 4731c9ba9c1STirthendu Sarkar bi = *i40e_rx_bi(rx_ring, next_to_process); 474d4178c31SCristian Dumitrescu xsk_buff_free(bi); 475913eda2bSMaciej Fijalkowski if (++next_to_process == count) 476913eda2bSMaciej Fijalkowski next_to_process = 0; 4770a714186SBjörn Töpel continue; 4780a714186SBjörn Töpel } 4790a714186SBjörn Töpel 48062589808SJesse Brandeburg size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword); 4810a714186SBjörn Töpel if (!size) 4820a714186SBjörn Töpel break; 4830a714186SBjörn Töpel 4841c9ba9c1STirthendu Sarkar bi = *i40e_rx_bi(rx_ring, next_to_process); 4856aab0bb0SMagnus Karlsson xsk_buff_set_size(bi, size); 486d4178c31SCristian Dumitrescu xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); 4870a714186SBjörn Töpel 4881c9ba9c1STirthendu Sarkar if (!first) 4891c9ba9c1STirthendu Sarkar first = bi; 4901c9ba9c1STirthendu Sarkar else if (i40e_add_xsk_frag(rx_ring, first, bi, size)) 4911c9ba9c1STirthendu Sarkar break; 4921c9ba9c1STirthendu Sarkar 493913eda2bSMaciej Fijalkowski if (++next_to_process == count) 494913eda2bSMaciej Fijalkowski next_to_process = 0; 4951c9ba9c1STirthendu Sarkar 4961c9ba9c1STirthendu Sarkar if (i40e_is_non_eop(rx_ring, rx_desc)) 4971c9ba9c1STirthendu Sarkar continue; 4981c9ba9c1STirthendu Sarkar 4991c9ba9c1STirthendu Sarkar xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog); 5001c9ba9c1STirthendu Sarkar i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets, 5011c9ba9c1STirthendu Sarkar &rx_bytes, xdp_res, &failure); 5021c9ba9c1STirthendu Sarkar next_to_clean = next_to_process; 503b8aef650SMaciej Fijalkowski if (failure) 504b8aef650SMaciej Fijalkowski break; 505f020fa1aSCristian Dumitrescu total_rx_packets += rx_packets; 506f020fa1aSCristian Dumitrescu total_rx_bytes += rx_bytes; 507f020fa1aSCristian Dumitrescu xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); 5081c9ba9c1STirthendu Sarkar first = NULL; 5090a714186SBjörn Töpel } 5100a714186SBjörn Töpel 511c8a8ca34SCristian Dumitrescu rx_ring->next_to_clean = next_to_clean; 5121c9ba9c1STirthendu Sarkar rx_ring->next_to_process = next_to_process; 513c8a8ca34SCristian Dumitrescu 514913eda2bSMaciej Fijalkowski entries_to_alloc = I40E_DESC_UNUSED(rx_ring); 515913eda2bSMaciej Fijalkowski if (entries_to_alloc >= I40E_RX_BUFFER_WRITE) 516913eda2bSMaciej Fijalkowski failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc); 5178cbf7414SBjörn Töpel 5180a714186SBjörn Töpel i40e_finalize_xdp_rx(rx_ring, xdp_xmit); 5190a714186SBjörn Töpel i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); 5203d0c5f1cSMagnus Karlsson 521c4655761SMagnus Karlsson if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { 522c8a8ca34SCristian Dumitrescu if (failure || next_to_clean == rx_ring->next_to_use) 523c4655761SMagnus Karlsson xsk_set_rx_need_wakeup(rx_ring->xsk_pool); 5243d0c5f1cSMagnus Karlsson else 525c4655761SMagnus Karlsson xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); 5263d0c5f1cSMagnus Karlsson 5273d0c5f1cSMagnus Karlsson return (int)total_rx_packets; 5283d0c5f1cSMagnus Karlsson } 5290a714186SBjörn Töpel return failure ? budget : (int)total_rx_packets; 5300a714186SBjörn Töpel } 5310a714186SBjörn Töpel 5323106c580SMagnus Karlsson static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, 5333106c580SMagnus Karlsson unsigned int *total_bytes) 5343106c580SMagnus Karlsson { 535a92b96c4STirthendu Sarkar u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(desc); 5363106c580SMagnus Karlsson struct i40e_tx_desc *tx_desc; 5373106c580SMagnus Karlsson dma_addr_t dma; 5383106c580SMagnus Karlsson 5393106c580SMagnus Karlsson dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); 5403106c580SMagnus Karlsson xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); 5413106c580SMagnus Karlsson 5423106c580SMagnus Karlsson tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); 5433106c580SMagnus Karlsson tx_desc->buffer_addr = cpu_to_le64(dma); 544a92b96c4STirthendu Sarkar tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc->len, 0); 5453106c580SMagnus Karlsson 5463106c580SMagnus Karlsson *total_bytes += desc->len; 5473106c580SMagnus Karlsson } 5483106c580SMagnus Karlsson 5493106c580SMagnus Karlsson static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc, 5503106c580SMagnus Karlsson unsigned int *total_bytes) 5513106c580SMagnus Karlsson { 5523106c580SMagnus Karlsson u16 ntu = xdp_ring->next_to_use; 5533106c580SMagnus Karlsson struct i40e_tx_desc *tx_desc; 5543106c580SMagnus Karlsson dma_addr_t dma; 5553106c580SMagnus Karlsson u32 i; 5563106c580SMagnus Karlsson 5573106c580SMagnus Karlsson loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { 558a92b96c4STirthendu Sarkar u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]); 559a92b96c4STirthendu Sarkar 5603106c580SMagnus Karlsson dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); 5613106c580SMagnus Karlsson xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); 5623106c580SMagnus Karlsson 5633106c580SMagnus Karlsson tx_desc = I40E_TX_DESC(xdp_ring, ntu++); 5643106c580SMagnus Karlsson tx_desc->buffer_addr = cpu_to_le64(dma); 565a92b96c4STirthendu Sarkar tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc[i].len, 0); 5663106c580SMagnus Karlsson 5673106c580SMagnus Karlsson *total_bytes += desc[i].len; 5683106c580SMagnus Karlsson } 5693106c580SMagnus Karlsson 5703106c580SMagnus Karlsson xdp_ring->next_to_use = ntu; 5713106c580SMagnus Karlsson } 5723106c580SMagnus Karlsson 5733106c580SMagnus Karlsson static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, 5743106c580SMagnus Karlsson unsigned int *total_bytes) 5753106c580SMagnus Karlsson { 5763106c580SMagnus Karlsson u32 batched, leftover, i; 5773106c580SMagnus Karlsson 5783106c580SMagnus Karlsson batched = nb_pkts & ~(PKTS_PER_BATCH - 1); 5793106c580SMagnus Karlsson leftover = nb_pkts & (PKTS_PER_BATCH - 1); 5803106c580SMagnus Karlsson for (i = 0; i < batched; i += PKTS_PER_BATCH) 5813106c580SMagnus Karlsson i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); 5823106c580SMagnus Karlsson for (i = batched; i < batched + leftover; i++) 5833106c580SMagnus Karlsson i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes); 5843106c580SMagnus Karlsson } 5853106c580SMagnus Karlsson 5863106c580SMagnus Karlsson static void i40e_set_rs_bit(struct i40e_ring *xdp_ring) 5873106c580SMagnus Karlsson { 5883106c580SMagnus Karlsson u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; 5893106c580SMagnus Karlsson struct i40e_tx_desc *tx_desc; 5903106c580SMagnus Karlsson 5913106c580SMagnus Karlsson tx_desc = I40E_TX_DESC(xdp_ring, ntu); 592b32cddd2SNorbert Ciosek tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT); 5933106c580SMagnus Karlsson } 5943106c580SMagnus Karlsson 5951328dcddSMagnus Karlsson /** 5961328dcddSMagnus Karlsson * i40e_xmit_zc - Performs zero-copy Tx AF_XDP 5971328dcddSMagnus Karlsson * @xdp_ring: XDP Tx ring 5981328dcddSMagnus Karlsson * @budget: NAPI budget 5991328dcddSMagnus Karlsson * 6001328dcddSMagnus Karlsson * Returns true if the work is finished. 6011328dcddSMagnus Karlsson **/ 6021328dcddSMagnus Karlsson static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) 6031328dcddSMagnus Karlsson { 604d1bc532eSMagnus Karlsson struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; 6053106c580SMagnus Karlsson u32 nb_pkts, nb_processed = 0; 6063106c580SMagnus Karlsson unsigned int total_bytes = 0; 6071328dcddSMagnus Karlsson 608d1bc532eSMagnus Karlsson nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); 6093106c580SMagnus Karlsson if (!nb_pkts) 610528060efSMagnus Karlsson return true; 6111328dcddSMagnus Karlsson 6123106c580SMagnus Karlsson if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { 6133106c580SMagnus Karlsson nb_processed = xdp_ring->count - xdp_ring->next_to_use; 6143106c580SMagnus Karlsson i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); 6151328dcddSMagnus Karlsson xdp_ring->next_to_use = 0; 6161328dcddSMagnus Karlsson } 6171328dcddSMagnus Karlsson 6183106c580SMagnus Karlsson i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, 6193106c580SMagnus Karlsson &total_bytes); 6203106c580SMagnus Karlsson 6211328dcddSMagnus Karlsson /* Request an interrupt for the last frame and bump tail ptr. */ 6223106c580SMagnus Karlsson i40e_set_rs_bit(xdp_ring); 6231328dcddSMagnus Karlsson i40e_xdp_ring_update_tail(xdp_ring); 6241328dcddSMagnus Karlsson 6253106c580SMagnus Karlsson i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); 6261328dcddSMagnus Karlsson 627528060efSMagnus Karlsson return nb_pkts < budget; 6281328dcddSMagnus Karlsson } 6291328dcddSMagnus Karlsson 6301328dcddSMagnus Karlsson /** 6311328dcddSMagnus Karlsson * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry 6321328dcddSMagnus Karlsson * @tx_ring: XDP Tx ring 6331328dcddSMagnus Karlsson * @tx_bi: Tx buffer info to clean 6341328dcddSMagnus Karlsson **/ 6351328dcddSMagnus Karlsson static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, 6361328dcddSMagnus Karlsson struct i40e_tx_buffer *tx_bi) 6371328dcddSMagnus Karlsson { 6381328dcddSMagnus Karlsson xdp_return_frame(tx_bi->xdpf); 6395574ff7bSMagnus Karlsson tx_ring->xdp_tx_active--; 6401328dcddSMagnus Karlsson dma_unmap_single(tx_ring->dev, 6411328dcddSMagnus Karlsson dma_unmap_addr(tx_bi, dma), 6421328dcddSMagnus Karlsson dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); 6431328dcddSMagnus Karlsson dma_unmap_len_set(tx_bi, len, 0); 6441328dcddSMagnus Karlsson } 6451328dcddSMagnus Karlsson 6461328dcddSMagnus Karlsson /** 6471328dcddSMagnus Karlsson * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries 6481fd972ebSMagnus Karlsson * @vsi: Current VSI 6491328dcddSMagnus Karlsson * @tx_ring: XDP Tx ring 6501328dcddSMagnus Karlsson * 651b028813aSYueh-Shun Li * Returns true if cleanup/transmission is done. 6521328dcddSMagnus Karlsson **/ 6535574ff7bSMagnus Karlsson bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) 6541328dcddSMagnus Karlsson { 6551742b3d5SMagnus Karlsson struct xsk_buff_pool *bp = tx_ring->xsk_pool; 6565574ff7bSMagnus Karlsson u32 i, completed_frames, xsk_frames = 0; 6571328dcddSMagnus Karlsson u32 head_idx = i40e_get_head(tx_ring); 6581328dcddSMagnus Karlsson struct i40e_tx_buffer *tx_bi; 6591fd972ebSMagnus Karlsson unsigned int ntc; 6601328dcddSMagnus Karlsson 6611328dcddSMagnus Karlsson if (head_idx < tx_ring->next_to_clean) 6621328dcddSMagnus Karlsson head_idx += tx_ring->count; 6635574ff7bSMagnus Karlsson completed_frames = head_idx - tx_ring->next_to_clean; 6641328dcddSMagnus Karlsson 6655574ff7bSMagnus Karlsson if (completed_frames == 0) 6661328dcddSMagnus Karlsson goto out_xmit; 6675574ff7bSMagnus Karlsson 6685574ff7bSMagnus Karlsson if (likely(!tx_ring->xdp_tx_active)) { 6695574ff7bSMagnus Karlsson xsk_frames = completed_frames; 6705574ff7bSMagnus Karlsson goto skip; 6711328dcddSMagnus Karlsson } 6721328dcddSMagnus Karlsson 6731328dcddSMagnus Karlsson ntc = tx_ring->next_to_clean; 6741328dcddSMagnus Karlsson 6751328dcddSMagnus Karlsson for (i = 0; i < completed_frames; i++) { 6761328dcddSMagnus Karlsson tx_bi = &tx_ring->tx_bi[ntc]; 6771328dcddSMagnus Karlsson 6785574ff7bSMagnus Karlsson if (tx_bi->xdpf) { 6791328dcddSMagnus Karlsson i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); 6801328dcddSMagnus Karlsson tx_bi->xdpf = NULL; 6815574ff7bSMagnus Karlsson } else { 6825574ff7bSMagnus Karlsson xsk_frames++; 6835574ff7bSMagnus Karlsson } 6841328dcddSMagnus Karlsson 6851328dcddSMagnus Karlsson if (++ntc >= tx_ring->count) 6861328dcddSMagnus Karlsson ntc = 0; 6871328dcddSMagnus Karlsson } 6881328dcddSMagnus Karlsson 6895574ff7bSMagnus Karlsson skip: 6901328dcddSMagnus Karlsson tx_ring->next_to_clean += completed_frames; 6911328dcddSMagnus Karlsson if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) 6921328dcddSMagnus Karlsson tx_ring->next_to_clean -= tx_ring->count; 6931328dcddSMagnus Karlsson 6941328dcddSMagnus Karlsson if (xsk_frames) 695c4655761SMagnus Karlsson xsk_tx_completed(bp, xsk_frames); 6961328dcddSMagnus Karlsson 6975574ff7bSMagnus Karlsson i40e_arm_wb(tx_ring, vsi, completed_frames); 6981328dcddSMagnus Karlsson 6991328dcddSMagnus Karlsson out_xmit: 700c4655761SMagnus Karlsson if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 701c4655761SMagnus Karlsson xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 7023d0c5f1cSMagnus Karlsson 7031fd972ebSMagnus Karlsson return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring)); 7041328dcddSMagnus Karlsson } 7051328dcddSMagnus Karlsson 7061328dcddSMagnus Karlsson /** 7079116e5e2SMagnus Karlsson * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup 7081328dcddSMagnus Karlsson * @dev: the netdevice 7091328dcddSMagnus Karlsson * @queue_id: queue id to wake up 7109116e5e2SMagnus Karlsson * @flags: ignored in our case since we have Rx and Tx in the same NAPI. 7111328dcddSMagnus Karlsson * 7121328dcddSMagnus Karlsson * Returns <0 for errors, 0 otherwise. 7131328dcddSMagnus Karlsson **/ 7149116e5e2SMagnus Karlsson int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 7151328dcddSMagnus Karlsson { 7161328dcddSMagnus Karlsson struct i40e_netdev_priv *np = netdev_priv(dev); 7171328dcddSMagnus Karlsson struct i40e_vsi *vsi = np->vsi; 718b3873a5bSMaxim Mikityanskiy struct i40e_pf *pf = vsi->back; 7191328dcddSMagnus Karlsson struct i40e_ring *ring; 7201328dcddSMagnus Karlsson 721b3873a5bSMaxim Mikityanskiy if (test_bit(__I40E_CONFIG_BUSY, pf->state)) 722c77e9f09SMaciej Fijalkowski return -EAGAIN; 723b3873a5bSMaxim Mikityanskiy 7241328dcddSMagnus Karlsson if (test_bit(__I40E_VSI_DOWN, vsi->state)) 7251328dcddSMagnus Karlsson return -ENETDOWN; 7261328dcddSMagnus Karlsson 7271328dcddSMagnus Karlsson if (!i40e_enabled_xdp_vsi(vsi)) 728ed7ae2d6SMaciej Fijalkowski return -EINVAL; 7291328dcddSMagnus Karlsson 7301328dcddSMagnus Karlsson if (queue_id >= vsi->num_queue_pairs) 731ed7ae2d6SMaciej Fijalkowski return -EINVAL; 7321328dcddSMagnus Karlsson 7331742b3d5SMagnus Karlsson if (!vsi->xdp_rings[queue_id]->xsk_pool) 734ed7ae2d6SMaciej Fijalkowski return -EINVAL; 7351328dcddSMagnus Karlsson 7361328dcddSMagnus Karlsson ring = vsi->xdp_rings[queue_id]; 7371328dcddSMagnus Karlsson 7381328dcddSMagnus Karlsson /* The idea here is that if NAPI is running, mark a miss, so 7391328dcddSMagnus Karlsson * it will run again. If not, trigger an interrupt and 7401328dcddSMagnus Karlsson * schedule the NAPI from interrupt context. If NAPI would be 7411328dcddSMagnus Karlsson * scheduled here, the interrupt affinity would not be 7421328dcddSMagnus Karlsson * honored. 7431328dcddSMagnus Karlsson */ 7441328dcddSMagnus Karlsson if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) 7451328dcddSMagnus Karlsson i40e_force_wb(vsi, ring->q_vector); 7461328dcddSMagnus Karlsson 7471328dcddSMagnus Karlsson return 0; 7481328dcddSMagnus Karlsson } 7499dbb1370SBjörn Töpel 750411dc16fSBjörn Töpel void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) 751411dc16fSBjörn Töpel { 752d4178c31SCristian Dumitrescu u16 ntc = rx_ring->next_to_clean; 753d4178c31SCristian Dumitrescu u16 ntu = rx_ring->next_to_use; 754411dc16fSBjörn Töpel 755913eda2bSMaciej Fijalkowski while (ntc != ntu) { 756d4178c31SCristian Dumitrescu struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc); 757411dc16fSBjörn Töpel 7583b4f0b66SBjörn Töpel xsk_buff_free(rx_bi); 759913eda2bSMaciej Fijalkowski ntc++; 760913eda2bSMaciej Fijalkowski if (ntc >= rx_ring->count) 761913eda2bSMaciej Fijalkowski ntc = 0; 762411dc16fSBjörn Töpel } 763411dc16fSBjörn Töpel } 764411dc16fSBjörn Töpel 7659dbb1370SBjörn Töpel /** 766262de08fSJesse Brandeburg * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown 7671fd972ebSMagnus Karlsson * @tx_ring: XDP Tx ring 7689dbb1370SBjörn Töpel **/ 7699dbb1370SBjörn Töpel void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) 7709dbb1370SBjörn Töpel { 7719dbb1370SBjörn Töpel u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; 7721742b3d5SMagnus Karlsson struct xsk_buff_pool *bp = tx_ring->xsk_pool; 7739dbb1370SBjörn Töpel struct i40e_tx_buffer *tx_bi; 7749dbb1370SBjörn Töpel u32 xsk_frames = 0; 7759dbb1370SBjörn Töpel 7769dbb1370SBjörn Töpel while (ntc != ntu) { 7779dbb1370SBjörn Töpel tx_bi = &tx_ring->tx_bi[ntc]; 7789dbb1370SBjörn Töpel 7799dbb1370SBjörn Töpel if (tx_bi->xdpf) 7809dbb1370SBjörn Töpel i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); 7819dbb1370SBjörn Töpel else 7829dbb1370SBjörn Töpel xsk_frames++; 7839dbb1370SBjörn Töpel 7849dbb1370SBjörn Töpel tx_bi->xdpf = NULL; 7859dbb1370SBjörn Töpel 7869dbb1370SBjörn Töpel ntc++; 7879dbb1370SBjörn Töpel if (ntc >= tx_ring->count) 7889dbb1370SBjörn Töpel ntc = 0; 7899dbb1370SBjörn Töpel } 7909dbb1370SBjörn Töpel 7919dbb1370SBjörn Töpel if (xsk_frames) 792c4655761SMagnus Karlsson xsk_tx_completed(bp, xsk_frames); 7939dbb1370SBjörn Töpel } 7943ab52af5SBjörn Töpel 7953ab52af5SBjörn Töpel /** 7961742b3d5SMagnus Karlsson * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP 7971742b3d5SMagnus Karlsson * buffer pool attached 7983ab52af5SBjörn Töpel * @vsi: vsi 7993ab52af5SBjörn Töpel * 8001742b3d5SMagnus Karlsson * Returns true if any of the Rx rings has an AF_XDP buffer pool attached 8013ab52af5SBjörn Töpel **/ 8023ab52af5SBjörn Töpel bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) 8033ab52af5SBjörn Töpel { 804f3fef2b6SJan Sokolowski struct net_device *netdev = vsi->netdev; 8053ab52af5SBjörn Töpel int i; 8063ab52af5SBjörn Töpel 8073ab52af5SBjörn Töpel for (i = 0; i < vsi->num_queue_pairs; i++) { 808c4655761SMagnus Karlsson if (xsk_get_pool_from_qid(netdev, i)) 8093ab52af5SBjörn Töpel return true; 8103ab52af5SBjörn Töpel } 8113ab52af5SBjörn Töpel 8123ab52af5SBjörn Töpel return false; 8133ab52af5SBjörn Töpel } 814