10a714186SBjörn Töpel // SPDX-License-Identifier: GPL-2.0 20a714186SBjörn Töpel /* Copyright(c) 2018 Intel Corporation. */ 30a714186SBjörn Töpel 40a714186SBjörn Töpel #include <linux/bpf_trace.h> 53106c580SMagnus Karlsson #include <linux/stringify.h> 6a71506a4SMagnus Karlsson #include <net/xdp_sock_drv.h> 70a714186SBjörn Töpel #include <net/xdp.h> 80a714186SBjörn Töpel 90a714186SBjörn Töpel #include "i40e.h" 100a714186SBjörn Töpel #include "i40e_txrx_common.h" 110a714186SBjörn Töpel #include "i40e_xsk.h" 120a714186SBjörn Töpel 13be1222b5SBjörn Töpel int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) 14e1675f97SBjörn Töpel { 15be1222b5SBjörn Töpel unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; 16be1222b5SBjörn Töpel 17be1222b5SBjörn Töpel rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); 18be1222b5SBjörn Töpel return rx_ring->rx_bi_zc ? 0 : -ENOMEM; 19be1222b5SBjörn Töpel } 20be1222b5SBjörn Töpel 21be1222b5SBjörn Töpel void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) 22be1222b5SBjörn Töpel { 23be1222b5SBjörn Töpel memset(rx_ring->rx_bi_zc, 0, 24be1222b5SBjörn Töpel sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); 25be1222b5SBjörn Töpel } 26be1222b5SBjörn Töpel 273b4f0b66SBjörn Töpel static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) 28be1222b5SBjörn Töpel { 29be1222b5SBjörn Töpel return &rx_ring->rx_bi_zc[idx]; 30e1675f97SBjörn Töpel } 31e1675f97SBjörn Töpel 320a714186SBjörn Töpel /** 331742b3d5SMagnus Karlsson * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a 341742b3d5SMagnus Karlsson * certain ring/qid 350a714186SBjörn Töpel * @vsi: Current VSI 361742b3d5SMagnus Karlsson * @pool: buffer pool 371742b3d5SMagnus Karlsson * @qid: Rx ring to associate buffer pool with 380a714186SBjörn Töpel * 390a714186SBjörn Töpel * Returns 0 on success, <0 on failure 400a714186SBjörn Töpel **/ 411742b3d5SMagnus Karlsson static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, 421742b3d5SMagnus Karlsson struct xsk_buff_pool *pool, 430a714186SBjörn Töpel u16 qid) 440a714186SBjörn Töpel { 45f3fef2b6SJan Sokolowski struct net_device *netdev = vsi->netdev; 460a714186SBjörn Töpel bool if_running; 470a714186SBjörn Töpel int err; 480a714186SBjörn Töpel 490a714186SBjörn Töpel if (vsi->type != I40E_VSI_MAIN) 500a714186SBjörn Töpel return -EINVAL; 510a714186SBjörn Töpel 520a714186SBjörn Töpel if (qid >= vsi->num_queue_pairs) 530a714186SBjörn Töpel return -EINVAL; 540a714186SBjörn Töpel 55f3fef2b6SJan Sokolowski if (qid >= netdev->real_num_rx_queues || 56f3fef2b6SJan Sokolowski qid >= netdev->real_num_tx_queues) 570a714186SBjörn Töpel return -EINVAL; 580a714186SBjörn Töpel 59c4655761SMagnus Karlsson err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); 600a714186SBjörn Töpel if (err) 610a714186SBjörn Töpel return err; 620a714186SBjörn Töpel 6344ddd4f1SBjörn Töpel set_bit(qid, vsi->af_xdp_zc_qps); 6444ddd4f1SBjörn Töpel 650a714186SBjörn Töpel if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 660a714186SBjörn Töpel 670a714186SBjörn Töpel if (if_running) { 680a714186SBjörn Töpel err = i40e_queue_pair_disable(vsi, qid); 690a714186SBjörn Töpel if (err) 700a714186SBjörn Töpel return err; 710a714186SBjörn Töpel 720a714186SBjörn Töpel err = i40e_queue_pair_enable(vsi, qid); 730a714186SBjörn Töpel if (err) 740a714186SBjörn Töpel return err; 7514ffeb52SMagnus Karlsson 7614ffeb52SMagnus Karlsson /* Kick start the NAPI context so that receiving will start */ 779116e5e2SMagnus Karlsson err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); 7814ffeb52SMagnus Karlsson if (err) 7914ffeb52SMagnus Karlsson return err; 800a714186SBjörn Töpel } 810a714186SBjörn Töpel 820a714186SBjörn Töpel return 0; 830a714186SBjörn Töpel } 840a714186SBjörn Töpel 850a714186SBjörn Töpel /** 861742b3d5SMagnus Karlsson * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a 871742b3d5SMagnus Karlsson * certain ring/qid 880a714186SBjörn Töpel * @vsi: Current VSI 891742b3d5SMagnus Karlsson * @qid: Rx ring to associate buffer pool with 900a714186SBjörn Töpel * 910a714186SBjörn Töpel * Returns 0 on success, <0 on failure 920a714186SBjörn Töpel **/ 931742b3d5SMagnus Karlsson static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) 940a714186SBjörn Töpel { 95f3fef2b6SJan Sokolowski struct net_device *netdev = vsi->netdev; 961742b3d5SMagnus Karlsson struct xsk_buff_pool *pool; 970a714186SBjörn Töpel bool if_running; 980a714186SBjörn Töpel int err; 990a714186SBjörn Töpel 100c4655761SMagnus Karlsson pool = xsk_get_pool_from_qid(netdev, qid); 1011742b3d5SMagnus Karlsson if (!pool) 1020a714186SBjörn Töpel return -EINVAL; 1030a714186SBjörn Töpel 1040a714186SBjörn Töpel if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 1050a714186SBjörn Töpel 1060a714186SBjörn Töpel if (if_running) { 1070a714186SBjörn Töpel err = i40e_queue_pair_disable(vsi, qid); 1080a714186SBjörn Töpel if (err) 1090a714186SBjörn Töpel return err; 1100a714186SBjörn Töpel } 1110a714186SBjörn Töpel 11244ddd4f1SBjörn Töpel clear_bit(qid, vsi->af_xdp_zc_qps); 113c4655761SMagnus Karlsson xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); 1140a714186SBjörn Töpel 1150a714186SBjörn Töpel if (if_running) { 1160a714186SBjörn Töpel err = i40e_queue_pair_enable(vsi, qid); 1170a714186SBjörn Töpel if (err) 1180a714186SBjörn Töpel return err; 1190a714186SBjörn Töpel } 1200a714186SBjörn Töpel 1210a714186SBjörn Töpel return 0; 1220a714186SBjörn Töpel } 1230a714186SBjörn Töpel 1240a714186SBjörn Töpel /** 1251742b3d5SMagnus Karlsson * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from 1261742b3d5SMagnus Karlsson * a ring/qid 1270a714186SBjörn Töpel * @vsi: Current VSI 1281742b3d5SMagnus Karlsson * @pool: Buffer pool to enable/associate to a ring, or NULL to disable 1291742b3d5SMagnus Karlsson * @qid: Rx ring to (dis)associate buffer pool (from)to 1300a714186SBjörn Töpel * 1311742b3d5SMagnus Karlsson * This function enables or disables a buffer pool to a certain ring. 1320a714186SBjörn Töpel * 1330a714186SBjörn Töpel * Returns 0 on success, <0 on failure 1340a714186SBjörn Töpel **/ 1351742b3d5SMagnus Karlsson int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool, 1360a714186SBjörn Töpel u16 qid) 1370a714186SBjörn Töpel { 1381742b3d5SMagnus Karlsson return pool ? i40e_xsk_pool_enable(vsi, pool, qid) : 1391742b3d5SMagnus Karlsson i40e_xsk_pool_disable(vsi, qid); 1400a714186SBjörn Töpel } 1410a714186SBjörn Töpel 1420a714186SBjörn Töpel /** 1430a714186SBjörn Töpel * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff 1440a714186SBjörn Töpel * @rx_ring: Rx ring 1450a714186SBjörn Töpel * @xdp: xdp_buff used as input to the XDP program 1460a714186SBjörn Töpel * 1470a714186SBjörn Töpel * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} 1480a714186SBjörn Töpel **/ 1490a714186SBjörn Töpel static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) 1500a714186SBjörn Töpel { 1510a714186SBjörn Töpel int err, result = I40E_XDP_PASS; 1520a714186SBjörn Töpel struct i40e_ring *xdp_ring; 1530a714186SBjörn Töpel struct bpf_prog *xdp_prog; 1540a714186SBjörn Töpel u32 act; 1550a714186SBjörn Töpel 1560a714186SBjörn Töpel /* NB! xdp_prog will always be !NULL, due to the fact that 1570a714186SBjörn Töpel * this path is enabled by setting an XDP program. 1580a714186SBjörn Töpel */ 1590a714186SBjörn Töpel xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1600a714186SBjörn Töpel act = bpf_prog_run_xdp(xdp_prog, xdp); 1612f86c806SKevin Laatz 162346497c7SMagnus Karlsson if (likely(act == XDP_REDIRECT)) { 163346497c7SMagnus Karlsson err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 164b8aef650SMaciej Fijalkowski if (!err) 165f6c10b48SMagnus Karlsson return I40E_XDP_REDIR; 166b8aef650SMaciej Fijalkowski if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) 167b8aef650SMaciej Fijalkowski result = I40E_XDP_EXIT; 168b8aef650SMaciej Fijalkowski else 169b8aef650SMaciej Fijalkowski result = I40E_XDP_CONSUMED; 170b8aef650SMaciej Fijalkowski goto out_failure; 171346497c7SMagnus Karlsson } 172346497c7SMagnus Karlsson 1730a714186SBjörn Töpel switch (act) { 1740a714186SBjörn Töpel case XDP_PASS: 1750a714186SBjörn Töpel break; 1760a714186SBjörn Töpel case XDP_TX: 1770a714186SBjörn Töpel xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 1780a714186SBjörn Töpel result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); 179f6c10b48SMagnus Karlsson if (result == I40E_XDP_CONSUMED) 180f6c10b48SMagnus Karlsson goto out_failure; 1810a714186SBjörn Töpel break; 182b8aef650SMaciej Fijalkowski case XDP_DROP: 183b8aef650SMaciej Fijalkowski result = I40E_XDP_CONSUMED; 184b8aef650SMaciej Fijalkowski break; 1850a714186SBjörn Töpel default: 186c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 1875463fce6SJeff Kirsher fallthrough; 1880a714186SBjörn Töpel case XDP_ABORTED: 189b8aef650SMaciej Fijalkowski result = I40E_XDP_CONSUMED; 190f6c10b48SMagnus Karlsson out_failure: 1910a714186SBjörn Töpel trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 1925463fce6SJeff Kirsher fallthrough; /* handle aborts by dropping packet */ 1930a714186SBjörn Töpel } 1940a714186SBjörn Töpel return result; 1950a714186SBjörn Töpel } 1960a714186SBjörn Töpel 1973b4f0b66SBjörn Töpel bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) 1980a714186SBjörn Töpel { 1990a714186SBjörn Töpel u16 ntu = rx_ring->next_to_use; 2000a714186SBjörn Töpel union i40e_rx_desc *rx_desc; 2016aab0bb0SMagnus Karlsson struct xdp_buff **xdp; 2026aab0bb0SMagnus Karlsson u32 nb_buffs, i; 2033b4f0b66SBjörn Töpel dma_addr_t dma; 2040a714186SBjörn Töpel 2050a714186SBjörn Töpel rx_desc = I40E_RX_DESC(rx_ring, ntu); 2066aab0bb0SMagnus Karlsson xdp = i40e_rx_bi(rx_ring, ntu); 2076aab0bb0SMagnus Karlsson 2086aab0bb0SMagnus Karlsson nb_buffs = min_t(u16, count, rx_ring->count - ntu); 2096aab0bb0SMagnus Karlsson nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); 2106aab0bb0SMagnus Karlsson if (!nb_buffs) 2116aab0bb0SMagnus Karlsson return false; 2126aab0bb0SMagnus Karlsson 2136aab0bb0SMagnus Karlsson i = nb_buffs; 2146aab0bb0SMagnus Karlsson while (i--) { 2156aab0bb0SMagnus Karlsson dma = xsk_buff_xdp_get_dma(*xdp); 2163b4f0b66SBjörn Töpel rx_desc->read.pkt_addr = cpu_to_le64(dma); 2173b4f0b66SBjörn Töpel rx_desc->read.hdr_addr = 0; 2180a714186SBjörn Töpel 2190a714186SBjörn Töpel rx_desc++; 2206aab0bb0SMagnus Karlsson xdp++; 2216aab0bb0SMagnus Karlsson } 2220a714186SBjörn Töpel 2236aab0bb0SMagnus Karlsson ntu += nb_buffs; 2246aab0bb0SMagnus Karlsson if (ntu == rx_ring->count) { 2250a714186SBjörn Töpel rx_desc = I40E_RX_DESC(rx_ring, 0); 2260a714186SBjörn Töpel ntu = 0; 2270a714186SBjörn Töpel } 2280a714186SBjörn Töpel 22964050b5bSBjörn Töpel /* clear the status bits for the next_to_use descriptor */ 23064050b5bSBjörn Töpel rx_desc->wb.qword1.status_error_len = 0; 2310a714186SBjörn Töpel i40e_release_rx_desc(rx_ring, ntu); 2320a714186SBjörn Töpel 2333c6f3ae3SYang Li return count == nb_buffs; 2340a714186SBjörn Töpel } 2350a714186SBjörn Töpel 2360a714186SBjörn Töpel /** 237e92c0e02SJesper Dangaard Brouer * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer 2380a714186SBjörn Töpel * @rx_ring: Rx ring 2390a714186SBjörn Töpel * @xdp: xdp_buff 2400a714186SBjörn Töpel * 2410a714186SBjörn Töpel * This functions allocates a new skb from a zero-copy Rx buffer. 2420a714186SBjörn Töpel * 2430a714186SBjörn Töpel * Returns the skb, or NULL on failure. 2440a714186SBjörn Töpel **/ 2450a714186SBjörn Töpel static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, 2460a714186SBjörn Töpel struct xdp_buff *xdp) 2470a714186SBjörn Töpel { 2486dba2953SAlexander Lobakin unsigned int totalsize = xdp->data_end - xdp->data_meta; 2490a714186SBjörn Töpel unsigned int metasize = xdp->data - xdp->data_meta; 2500a714186SBjörn Töpel struct sk_buff *skb; 2510a714186SBjörn Töpel 2526dba2953SAlexander Lobakin net_prefetch(xdp->data_meta); 2536dba2953SAlexander Lobakin 2540a714186SBjörn Töpel /* allocate a skb to store the frags */ 2556dba2953SAlexander Lobakin skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, 2560a714186SBjörn Töpel GFP_ATOMIC | __GFP_NOWARN); 2570a714186SBjörn Töpel if (unlikely(!skb)) 258f020fa1aSCristian Dumitrescu goto out; 2590a714186SBjörn Töpel 2606dba2953SAlexander Lobakin memcpy(__skb_put(skb, totalsize), xdp->data_meta, 2616dba2953SAlexander Lobakin ALIGN(totalsize, sizeof(long))); 2626dba2953SAlexander Lobakin 2636dba2953SAlexander Lobakin if (metasize) { 2640a714186SBjörn Töpel skb_metadata_set(skb, metasize); 2656dba2953SAlexander Lobakin __skb_pull(skb, metasize); 2666dba2953SAlexander Lobakin } 2670a714186SBjörn Töpel 268f020fa1aSCristian Dumitrescu out: 2693b4f0b66SBjörn Töpel xsk_buff_free(xdp); 2700a714186SBjörn Töpel return skb; 2710a714186SBjörn Töpel } 2720a714186SBjörn Töpel 273f020fa1aSCristian Dumitrescu static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, 274f020fa1aSCristian Dumitrescu struct xdp_buff *xdp_buff, 275f020fa1aSCristian Dumitrescu union i40e_rx_desc *rx_desc, 276f020fa1aSCristian Dumitrescu unsigned int *rx_packets, 277f020fa1aSCristian Dumitrescu unsigned int *rx_bytes, 278f020fa1aSCristian Dumitrescu unsigned int size, 279b8aef650SMaciej Fijalkowski unsigned int xdp_res, 280b8aef650SMaciej Fijalkowski bool *failure) 281f020fa1aSCristian Dumitrescu { 282f020fa1aSCristian Dumitrescu struct sk_buff *skb; 283f020fa1aSCristian Dumitrescu 284f020fa1aSCristian Dumitrescu *rx_packets = 1; 285f020fa1aSCristian Dumitrescu *rx_bytes = size; 286f020fa1aSCristian Dumitrescu 287f020fa1aSCristian Dumitrescu if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX) 288f020fa1aSCristian Dumitrescu return; 289f020fa1aSCristian Dumitrescu 290b8aef650SMaciej Fijalkowski if (xdp_res == I40E_XDP_EXIT) { 291b8aef650SMaciej Fijalkowski *failure = true; 292b8aef650SMaciej Fijalkowski return; 293b8aef650SMaciej Fijalkowski } 294b8aef650SMaciej Fijalkowski 295f020fa1aSCristian Dumitrescu if (xdp_res == I40E_XDP_CONSUMED) { 296f020fa1aSCristian Dumitrescu xsk_buff_free(xdp_buff); 297f020fa1aSCristian Dumitrescu return; 298f020fa1aSCristian Dumitrescu } 299f020fa1aSCristian Dumitrescu if (xdp_res == I40E_XDP_PASS) { 300f020fa1aSCristian Dumitrescu /* NB! We are not checking for errors using 301f020fa1aSCristian Dumitrescu * i40e_test_staterr with 302f020fa1aSCristian Dumitrescu * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that 303f020fa1aSCristian Dumitrescu * SBP is *not* set in PRT_SBPVSI (default not set). 304f020fa1aSCristian Dumitrescu */ 305f020fa1aSCristian Dumitrescu skb = i40e_construct_skb_zc(rx_ring, xdp_buff); 306f020fa1aSCristian Dumitrescu if (!skb) { 307f020fa1aSCristian Dumitrescu rx_ring->rx_stats.alloc_buff_failed++; 308f020fa1aSCristian Dumitrescu *rx_packets = 0; 309f020fa1aSCristian Dumitrescu *rx_bytes = 0; 310f020fa1aSCristian Dumitrescu return; 311f020fa1aSCristian Dumitrescu } 312f020fa1aSCristian Dumitrescu 313f020fa1aSCristian Dumitrescu if (eth_skb_pad(skb)) { 314f020fa1aSCristian Dumitrescu *rx_packets = 0; 315f020fa1aSCristian Dumitrescu *rx_bytes = 0; 316f020fa1aSCristian Dumitrescu return; 317f020fa1aSCristian Dumitrescu } 318f020fa1aSCristian Dumitrescu 319f020fa1aSCristian Dumitrescu *rx_bytes = skb->len; 320f020fa1aSCristian Dumitrescu i40e_process_skb_fields(rx_ring, rx_desc, skb); 321f020fa1aSCristian Dumitrescu napi_gro_receive(&rx_ring->q_vector->napi, skb); 322f020fa1aSCristian Dumitrescu return; 323f020fa1aSCristian Dumitrescu } 324f020fa1aSCristian Dumitrescu 325f020fa1aSCristian Dumitrescu /* Should never get here, as all valid cases have been handled already. 326f020fa1aSCristian Dumitrescu */ 327f020fa1aSCristian Dumitrescu WARN_ON_ONCE(1); 328f020fa1aSCristian Dumitrescu } 329f020fa1aSCristian Dumitrescu 3300a714186SBjörn Töpel /** 3310a714186SBjörn Töpel * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring 3320a714186SBjörn Töpel * @rx_ring: Rx ring 3330a714186SBjörn Töpel * @budget: NAPI budget 3340a714186SBjörn Töpel * 3350a714186SBjörn Töpel * Returns amount of work completed 3360a714186SBjörn Töpel **/ 3370a714186SBjörn Töpel int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) 3380a714186SBjörn Töpel { 3390a714186SBjörn Töpel unsigned int total_rx_bytes = 0, total_rx_packets = 0; 340c8a8ca34SCristian Dumitrescu u16 next_to_clean = rx_ring->next_to_clean; 341c8a8ca34SCristian Dumitrescu u16 count_mask = rx_ring->count - 1; 3420a714186SBjörn Töpel unsigned int xdp_res, xdp_xmit = 0; 3431773482fSDan Carpenter bool failure = false; 3447e1b54d0SAlexander Lobakin u16 cleaned_count; 3450a714186SBjörn Töpel 3460a714186SBjörn Töpel while (likely(total_rx_packets < (unsigned int)budget)) { 3470a714186SBjörn Töpel union i40e_rx_desc *rx_desc; 348f020fa1aSCristian Dumitrescu unsigned int rx_packets; 349f020fa1aSCristian Dumitrescu unsigned int rx_bytes; 350d4178c31SCristian Dumitrescu struct xdp_buff *bi; 3510a714186SBjörn Töpel unsigned int size; 3520a714186SBjörn Töpel u64 qword; 3530a714186SBjörn Töpel 354c8a8ca34SCristian Dumitrescu rx_desc = I40E_RX_DESC(rx_ring, next_to_clean); 3550a714186SBjörn Töpel qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 3560a714186SBjörn Töpel 3570a714186SBjörn Töpel /* This memory barrier is needed to keep us from reading 3580a714186SBjörn Töpel * any other fields out of the rx_desc until we have 3590a714186SBjörn Töpel * verified the descriptor has been written back. 3600a714186SBjörn Töpel */ 3610a714186SBjörn Töpel dma_rmb(); 3620a714186SBjörn Töpel 363be1222b5SBjörn Töpel if (i40e_rx_is_programming_status(qword)) { 364be1222b5SBjörn Töpel i40e_clean_programming_status(rx_ring, 365be1222b5SBjörn Töpel rx_desc->raw.qword[0], 3660a714186SBjörn Töpel qword); 367d4178c31SCristian Dumitrescu bi = *i40e_rx_bi(rx_ring, next_to_clean); 368d4178c31SCristian Dumitrescu xsk_buff_free(bi); 369c8a8ca34SCristian Dumitrescu next_to_clean = (next_to_clean + 1) & count_mask; 3700a714186SBjörn Töpel continue; 3710a714186SBjörn Töpel } 3720a714186SBjörn Töpel 3730a714186SBjörn Töpel size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 3740a714186SBjörn Töpel I40E_RXD_QW1_LENGTH_PBUF_SHIFT; 3750a714186SBjörn Töpel if (!size) 3760a714186SBjörn Töpel break; 3770a714186SBjörn Töpel 378d4178c31SCristian Dumitrescu bi = *i40e_rx_bi(rx_ring, next_to_clean); 3796aab0bb0SMagnus Karlsson xsk_buff_set_size(bi, size); 380d4178c31SCristian Dumitrescu xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); 3810a714186SBjörn Töpel 382d4178c31SCristian Dumitrescu xdp_res = i40e_run_xdp_zc(rx_ring, bi); 383f020fa1aSCristian Dumitrescu i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets, 384b8aef650SMaciej Fijalkowski &rx_bytes, size, xdp_res, &failure); 385b8aef650SMaciej Fijalkowski if (failure) 386b8aef650SMaciej Fijalkowski break; 387f020fa1aSCristian Dumitrescu total_rx_packets += rx_packets; 388f020fa1aSCristian Dumitrescu total_rx_bytes += rx_bytes; 389f020fa1aSCristian Dumitrescu xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); 390c8a8ca34SCristian Dumitrescu next_to_clean = (next_to_clean + 1) & count_mask; 3910a714186SBjörn Töpel } 3920a714186SBjörn Töpel 393c8a8ca34SCristian Dumitrescu rx_ring->next_to_clean = next_to_clean; 394f12738b6SCristian Dumitrescu cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask; 395c8a8ca34SCristian Dumitrescu 3968cbf7414SBjörn Töpel if (cleaned_count >= I40E_RX_BUFFER_WRITE) 397b8aef650SMaciej Fijalkowski failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); 3988cbf7414SBjörn Töpel 3990a714186SBjörn Töpel i40e_finalize_xdp_rx(rx_ring, xdp_xmit); 4000a714186SBjörn Töpel i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); 4013d0c5f1cSMagnus Karlsson 402c4655761SMagnus Karlsson if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { 403c8a8ca34SCristian Dumitrescu if (failure || next_to_clean == rx_ring->next_to_use) 404c4655761SMagnus Karlsson xsk_set_rx_need_wakeup(rx_ring->xsk_pool); 4053d0c5f1cSMagnus Karlsson else 406c4655761SMagnus Karlsson xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); 4073d0c5f1cSMagnus Karlsson 4083d0c5f1cSMagnus Karlsson return (int)total_rx_packets; 4093d0c5f1cSMagnus Karlsson } 4100a714186SBjörn Töpel return failure ? budget : (int)total_rx_packets; 4110a714186SBjörn Töpel } 4120a714186SBjörn Töpel 4133106c580SMagnus Karlsson static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, 4143106c580SMagnus Karlsson unsigned int *total_bytes) 4153106c580SMagnus Karlsson { 4163106c580SMagnus Karlsson struct i40e_tx_desc *tx_desc; 4173106c580SMagnus Karlsson dma_addr_t dma; 4183106c580SMagnus Karlsson 4193106c580SMagnus Karlsson dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); 4203106c580SMagnus Karlsson xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); 4213106c580SMagnus Karlsson 4223106c580SMagnus Karlsson tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); 4233106c580SMagnus Karlsson tx_desc->buffer_addr = cpu_to_le64(dma); 4243106c580SMagnus Karlsson tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP, 4253106c580SMagnus Karlsson 0, desc->len, 0); 4263106c580SMagnus Karlsson 4273106c580SMagnus Karlsson *total_bytes += desc->len; 4283106c580SMagnus Karlsson } 4293106c580SMagnus Karlsson 4303106c580SMagnus Karlsson static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc, 4313106c580SMagnus Karlsson unsigned int *total_bytes) 4323106c580SMagnus Karlsson { 4333106c580SMagnus Karlsson u16 ntu = xdp_ring->next_to_use; 4343106c580SMagnus Karlsson struct i40e_tx_desc *tx_desc; 4353106c580SMagnus Karlsson dma_addr_t dma; 4363106c580SMagnus Karlsson u32 i; 4373106c580SMagnus Karlsson 4383106c580SMagnus Karlsson loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { 4393106c580SMagnus Karlsson dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); 4403106c580SMagnus Karlsson xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); 4413106c580SMagnus Karlsson 4423106c580SMagnus Karlsson tx_desc = I40E_TX_DESC(xdp_ring, ntu++); 4433106c580SMagnus Karlsson tx_desc->buffer_addr = cpu_to_le64(dma); 4443106c580SMagnus Karlsson tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | 4453106c580SMagnus Karlsson I40E_TX_DESC_CMD_EOP, 4463106c580SMagnus Karlsson 0, desc[i].len, 0); 4473106c580SMagnus Karlsson 4483106c580SMagnus Karlsson *total_bytes += desc[i].len; 4493106c580SMagnus Karlsson } 4503106c580SMagnus Karlsson 4513106c580SMagnus Karlsson xdp_ring->next_to_use = ntu; 4523106c580SMagnus Karlsson } 4533106c580SMagnus Karlsson 4543106c580SMagnus Karlsson static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, 4553106c580SMagnus Karlsson unsigned int *total_bytes) 4563106c580SMagnus Karlsson { 4573106c580SMagnus Karlsson u32 batched, leftover, i; 4583106c580SMagnus Karlsson 4593106c580SMagnus Karlsson batched = nb_pkts & ~(PKTS_PER_BATCH - 1); 4603106c580SMagnus Karlsson leftover = nb_pkts & (PKTS_PER_BATCH - 1); 4613106c580SMagnus Karlsson for (i = 0; i < batched; i += PKTS_PER_BATCH) 4623106c580SMagnus Karlsson i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); 4633106c580SMagnus Karlsson for (i = batched; i < batched + leftover; i++) 4643106c580SMagnus Karlsson i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes); 4653106c580SMagnus Karlsson } 4663106c580SMagnus Karlsson 4673106c580SMagnus Karlsson static void i40e_set_rs_bit(struct i40e_ring *xdp_ring) 4683106c580SMagnus Karlsson { 4693106c580SMagnus Karlsson u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; 4703106c580SMagnus Karlsson struct i40e_tx_desc *tx_desc; 4713106c580SMagnus Karlsson 4723106c580SMagnus Karlsson tx_desc = I40E_TX_DESC(xdp_ring, ntu); 473b32cddd2SNorbert Ciosek tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT); 4743106c580SMagnus Karlsson } 4753106c580SMagnus Karlsson 4761328dcddSMagnus Karlsson /** 4771328dcddSMagnus Karlsson * i40e_xmit_zc - Performs zero-copy Tx AF_XDP 4781328dcddSMagnus Karlsson * @xdp_ring: XDP Tx ring 4791328dcddSMagnus Karlsson * @budget: NAPI budget 4801328dcddSMagnus Karlsson * 4811328dcddSMagnus Karlsson * Returns true if the work is finished. 4821328dcddSMagnus Karlsson **/ 4831328dcddSMagnus Karlsson static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) 4841328dcddSMagnus Karlsson { 485d1bc532eSMagnus Karlsson struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; 4863106c580SMagnus Karlsson u32 nb_pkts, nb_processed = 0; 4873106c580SMagnus Karlsson unsigned int total_bytes = 0; 4881328dcddSMagnus Karlsson 489d1bc532eSMagnus Karlsson nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); 4903106c580SMagnus Karlsson if (!nb_pkts) 491528060efSMagnus Karlsson return true; 4921328dcddSMagnus Karlsson 4933106c580SMagnus Karlsson if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { 4943106c580SMagnus Karlsson nb_processed = xdp_ring->count - xdp_ring->next_to_use; 4953106c580SMagnus Karlsson i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); 4961328dcddSMagnus Karlsson xdp_ring->next_to_use = 0; 4971328dcddSMagnus Karlsson } 4981328dcddSMagnus Karlsson 4993106c580SMagnus Karlsson i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, 5003106c580SMagnus Karlsson &total_bytes); 5013106c580SMagnus Karlsson 5021328dcddSMagnus Karlsson /* Request an interrupt for the last frame and bump tail ptr. */ 5033106c580SMagnus Karlsson i40e_set_rs_bit(xdp_ring); 5041328dcddSMagnus Karlsson i40e_xdp_ring_update_tail(xdp_ring); 5051328dcddSMagnus Karlsson 5063106c580SMagnus Karlsson i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); 5071328dcddSMagnus Karlsson 508528060efSMagnus Karlsson return nb_pkts < budget; 5091328dcddSMagnus Karlsson } 5101328dcddSMagnus Karlsson 5111328dcddSMagnus Karlsson /** 5121328dcddSMagnus Karlsson * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry 5131328dcddSMagnus Karlsson * @tx_ring: XDP Tx ring 5141328dcddSMagnus Karlsson * @tx_bi: Tx buffer info to clean 5151328dcddSMagnus Karlsson **/ 5161328dcddSMagnus Karlsson static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, 5171328dcddSMagnus Karlsson struct i40e_tx_buffer *tx_bi) 5181328dcddSMagnus Karlsson { 5191328dcddSMagnus Karlsson xdp_return_frame(tx_bi->xdpf); 5205574ff7bSMagnus Karlsson tx_ring->xdp_tx_active--; 5211328dcddSMagnus Karlsson dma_unmap_single(tx_ring->dev, 5221328dcddSMagnus Karlsson dma_unmap_addr(tx_bi, dma), 5231328dcddSMagnus Karlsson dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); 5241328dcddSMagnus Karlsson dma_unmap_len_set(tx_bi, len, 0); 5251328dcddSMagnus Karlsson } 5261328dcddSMagnus Karlsson 5271328dcddSMagnus Karlsson /** 5281328dcddSMagnus Karlsson * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries 5291fd972ebSMagnus Karlsson * @vsi: Current VSI 5301328dcddSMagnus Karlsson * @tx_ring: XDP Tx ring 5311328dcddSMagnus Karlsson * 5321328dcddSMagnus Karlsson * Returns true if cleanup/tranmission is done. 5331328dcddSMagnus Karlsson **/ 5345574ff7bSMagnus Karlsson bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) 5351328dcddSMagnus Karlsson { 5361742b3d5SMagnus Karlsson struct xsk_buff_pool *bp = tx_ring->xsk_pool; 5375574ff7bSMagnus Karlsson u32 i, completed_frames, xsk_frames = 0; 5381328dcddSMagnus Karlsson u32 head_idx = i40e_get_head(tx_ring); 5391328dcddSMagnus Karlsson struct i40e_tx_buffer *tx_bi; 5401fd972ebSMagnus Karlsson unsigned int ntc; 5411328dcddSMagnus Karlsson 5421328dcddSMagnus Karlsson if (head_idx < tx_ring->next_to_clean) 5431328dcddSMagnus Karlsson head_idx += tx_ring->count; 5445574ff7bSMagnus Karlsson completed_frames = head_idx - tx_ring->next_to_clean; 5451328dcddSMagnus Karlsson 5465574ff7bSMagnus Karlsson if (completed_frames == 0) 5471328dcddSMagnus Karlsson goto out_xmit; 5485574ff7bSMagnus Karlsson 5495574ff7bSMagnus Karlsson if (likely(!tx_ring->xdp_tx_active)) { 5505574ff7bSMagnus Karlsson xsk_frames = completed_frames; 5515574ff7bSMagnus Karlsson goto skip; 5521328dcddSMagnus Karlsson } 5531328dcddSMagnus Karlsson 5541328dcddSMagnus Karlsson ntc = tx_ring->next_to_clean; 5551328dcddSMagnus Karlsson 5561328dcddSMagnus Karlsson for (i = 0; i < completed_frames; i++) { 5571328dcddSMagnus Karlsson tx_bi = &tx_ring->tx_bi[ntc]; 5581328dcddSMagnus Karlsson 5595574ff7bSMagnus Karlsson if (tx_bi->xdpf) { 5601328dcddSMagnus Karlsson i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); 5611328dcddSMagnus Karlsson tx_bi->xdpf = NULL; 5625574ff7bSMagnus Karlsson } else { 5635574ff7bSMagnus Karlsson xsk_frames++; 5645574ff7bSMagnus Karlsson } 5651328dcddSMagnus Karlsson 5661328dcddSMagnus Karlsson if (++ntc >= tx_ring->count) 5671328dcddSMagnus Karlsson ntc = 0; 5681328dcddSMagnus Karlsson } 5691328dcddSMagnus Karlsson 5705574ff7bSMagnus Karlsson skip: 5711328dcddSMagnus Karlsson tx_ring->next_to_clean += completed_frames; 5721328dcddSMagnus Karlsson if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) 5731328dcddSMagnus Karlsson tx_ring->next_to_clean -= tx_ring->count; 5741328dcddSMagnus Karlsson 5751328dcddSMagnus Karlsson if (xsk_frames) 576c4655761SMagnus Karlsson xsk_tx_completed(bp, xsk_frames); 5771328dcddSMagnus Karlsson 5785574ff7bSMagnus Karlsson i40e_arm_wb(tx_ring, vsi, completed_frames); 5791328dcddSMagnus Karlsson 5801328dcddSMagnus Karlsson out_xmit: 581c4655761SMagnus Karlsson if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 582c4655761SMagnus Karlsson xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 5833d0c5f1cSMagnus Karlsson 5841fd972ebSMagnus Karlsson return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring)); 5851328dcddSMagnus Karlsson } 5861328dcddSMagnus Karlsson 5871328dcddSMagnus Karlsson /** 5889116e5e2SMagnus Karlsson * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup 5891328dcddSMagnus Karlsson * @dev: the netdevice 5901328dcddSMagnus Karlsson * @queue_id: queue id to wake up 5919116e5e2SMagnus Karlsson * @flags: ignored in our case since we have Rx and Tx in the same NAPI. 5921328dcddSMagnus Karlsson * 5931328dcddSMagnus Karlsson * Returns <0 for errors, 0 otherwise. 5941328dcddSMagnus Karlsson **/ 5959116e5e2SMagnus Karlsson int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 5961328dcddSMagnus Karlsson { 5971328dcddSMagnus Karlsson struct i40e_netdev_priv *np = netdev_priv(dev); 5981328dcddSMagnus Karlsson struct i40e_vsi *vsi = np->vsi; 599b3873a5bSMaxim Mikityanskiy struct i40e_pf *pf = vsi->back; 6001328dcddSMagnus Karlsson struct i40e_ring *ring; 6011328dcddSMagnus Karlsson 602b3873a5bSMaxim Mikityanskiy if (test_bit(__I40E_CONFIG_BUSY, pf->state)) 603c77e9f09SMaciej Fijalkowski return -EAGAIN; 604b3873a5bSMaxim Mikityanskiy 6051328dcddSMagnus Karlsson if (test_bit(__I40E_VSI_DOWN, vsi->state)) 6061328dcddSMagnus Karlsson return -ENETDOWN; 6071328dcddSMagnus Karlsson 6081328dcddSMagnus Karlsson if (!i40e_enabled_xdp_vsi(vsi)) 609*ed7ae2d6SMaciej Fijalkowski return -EINVAL; 6101328dcddSMagnus Karlsson 6111328dcddSMagnus Karlsson if (queue_id >= vsi->num_queue_pairs) 612*ed7ae2d6SMaciej Fijalkowski return -EINVAL; 6131328dcddSMagnus Karlsson 6141742b3d5SMagnus Karlsson if (!vsi->xdp_rings[queue_id]->xsk_pool) 615*ed7ae2d6SMaciej Fijalkowski return -EINVAL; 6161328dcddSMagnus Karlsson 6171328dcddSMagnus Karlsson ring = vsi->xdp_rings[queue_id]; 6181328dcddSMagnus Karlsson 6191328dcddSMagnus Karlsson /* The idea here is that if NAPI is running, mark a miss, so 6201328dcddSMagnus Karlsson * it will run again. If not, trigger an interrupt and 6211328dcddSMagnus Karlsson * schedule the NAPI from interrupt context. If NAPI would be 6221328dcddSMagnus Karlsson * scheduled here, the interrupt affinity would not be 6231328dcddSMagnus Karlsson * honored. 6241328dcddSMagnus Karlsson */ 6251328dcddSMagnus Karlsson if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) 6261328dcddSMagnus Karlsson i40e_force_wb(vsi, ring->q_vector); 6271328dcddSMagnus Karlsson 6281328dcddSMagnus Karlsson return 0; 6291328dcddSMagnus Karlsson } 6309dbb1370SBjörn Töpel 631411dc16fSBjörn Töpel void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) 632411dc16fSBjörn Töpel { 633d4178c31SCristian Dumitrescu u16 count_mask = rx_ring->count - 1; 634d4178c31SCristian Dumitrescu u16 ntc = rx_ring->next_to_clean; 635d4178c31SCristian Dumitrescu u16 ntu = rx_ring->next_to_use; 636411dc16fSBjörn Töpel 637d4178c31SCristian Dumitrescu for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) { 638d4178c31SCristian Dumitrescu struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc); 639411dc16fSBjörn Töpel 6403b4f0b66SBjörn Töpel xsk_buff_free(rx_bi); 641411dc16fSBjörn Töpel } 642411dc16fSBjörn Töpel } 643411dc16fSBjörn Töpel 6449dbb1370SBjörn Töpel /** 645262de08fSJesse Brandeburg * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown 6461fd972ebSMagnus Karlsson * @tx_ring: XDP Tx ring 6479dbb1370SBjörn Töpel **/ 6489dbb1370SBjörn Töpel void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) 6499dbb1370SBjörn Töpel { 6509dbb1370SBjörn Töpel u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; 6511742b3d5SMagnus Karlsson struct xsk_buff_pool *bp = tx_ring->xsk_pool; 6529dbb1370SBjörn Töpel struct i40e_tx_buffer *tx_bi; 6539dbb1370SBjörn Töpel u32 xsk_frames = 0; 6549dbb1370SBjörn Töpel 6559dbb1370SBjörn Töpel while (ntc != ntu) { 6569dbb1370SBjörn Töpel tx_bi = &tx_ring->tx_bi[ntc]; 6579dbb1370SBjörn Töpel 6589dbb1370SBjörn Töpel if (tx_bi->xdpf) 6599dbb1370SBjörn Töpel i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); 6609dbb1370SBjörn Töpel else 6619dbb1370SBjörn Töpel xsk_frames++; 6629dbb1370SBjörn Töpel 6639dbb1370SBjörn Töpel tx_bi->xdpf = NULL; 6649dbb1370SBjörn Töpel 6659dbb1370SBjörn Töpel ntc++; 6669dbb1370SBjörn Töpel if (ntc >= tx_ring->count) 6679dbb1370SBjörn Töpel ntc = 0; 6689dbb1370SBjörn Töpel } 6699dbb1370SBjörn Töpel 6709dbb1370SBjörn Töpel if (xsk_frames) 671c4655761SMagnus Karlsson xsk_tx_completed(bp, xsk_frames); 6729dbb1370SBjörn Töpel } 6733ab52af5SBjörn Töpel 6743ab52af5SBjörn Töpel /** 6751742b3d5SMagnus Karlsson * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP 6761742b3d5SMagnus Karlsson * buffer pool attached 6773ab52af5SBjörn Töpel * @vsi: vsi 6783ab52af5SBjörn Töpel * 6791742b3d5SMagnus Karlsson * Returns true if any of the Rx rings has an AF_XDP buffer pool attached 6803ab52af5SBjörn Töpel **/ 6813ab52af5SBjörn Töpel bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) 6823ab52af5SBjörn Töpel { 683f3fef2b6SJan Sokolowski struct net_device *netdev = vsi->netdev; 6843ab52af5SBjörn Töpel int i; 6853ab52af5SBjörn Töpel 6863ab52af5SBjörn Töpel for (i = 0; i < vsi->num_queue_pairs; i++) { 687c4655761SMagnus Karlsson if (xsk_get_pool_from_qid(netdev, i)) 6883ab52af5SBjörn Töpel return true; 6893ab52af5SBjörn Töpel } 6903ab52af5SBjörn Töpel 6913ab52af5SBjörn Töpel return false; 6923ab52af5SBjörn Töpel } 693