1ac8a861fSMichal Kubiak /* SPDX-License-Identifier: GPL-2.0-only */ 2ac8a861fSMichal Kubiak /* Copyright (C) 2025 Intel Corporation */ 3ac8a861fSMichal Kubiak 4ac8a861fSMichal Kubiak #ifndef _IDPF_XDP_H_ 5ac8a861fSMichal Kubiak #define _IDPF_XDP_H_ 6ac8a861fSMichal Kubiak 7*cba102cdSAlexander Lobakin #include <net/libeth/xdp.h> 8ac8a861fSMichal Kubiak 9*cba102cdSAlexander Lobakin #include "idpf_txrx.h" 10ac8a861fSMichal Kubiak 11ac8a861fSMichal Kubiak int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport); 12ac8a861fSMichal Kubiak void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport); 13705457e7SMichal Kubiak void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport, 14705457e7SMichal Kubiak struct bpf_prog *xdp_prog); 15ac8a861fSMichal Kubiak 16ac8a861fSMichal Kubiak int idpf_xdpsqs_get(const struct idpf_vport *vport); 17ac8a861fSMichal Kubiak void idpf_xdpsqs_put(const struct idpf_vport *vport); 18ac8a861fSMichal Kubiak 19*cba102cdSAlexander Lobakin bool idpf_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags); 20*cba102cdSAlexander Lobakin 21*cba102cdSAlexander Lobakin /** 22*cba102cdSAlexander Lobakin * idpf_xdp_tx_xmit - produce a single HW Tx descriptor out of XDP desc 23*cba102cdSAlexander Lobakin * @desc: XDP descriptor to pull the DMA address and length from 24*cba102cdSAlexander Lobakin * @i: descriptor index on the queue to fill 25*cba102cdSAlexander Lobakin * @sq: XDP queue to produce the HW Tx descriptor on 26*cba102cdSAlexander Lobakin * @priv: &xsk_tx_metadata_ops on XSk xmit or %NULL 27*cba102cdSAlexander Lobakin */ 28*cba102cdSAlexander Lobakin static inline void idpf_xdp_tx_xmit(struct libeth_xdp_tx_desc desc, u32 i, 29*cba102cdSAlexander Lobakin const struct libeth_xdpsq *sq, u64 priv) 30*cba102cdSAlexander Lobakin { 31*cba102cdSAlexander Lobakin struct idpf_flex_tx_desc *tx_desc = sq->descs; 32*cba102cdSAlexander Lobakin u32 cmd; 33*cba102cdSAlexander Lobakin 34*cba102cdSAlexander Lobakin cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_DTYPE_M, 35*cba102cdSAlexander Lobakin IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2); 36*cba102cdSAlexander Lobakin if (desc.flags & LIBETH_XDP_TX_LAST) 37*cba102cdSAlexander Lobakin cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, 38*cba102cdSAlexander Lobakin IDPF_TX_DESC_CMD_EOP); 39*cba102cdSAlexander Lobakin if (priv && (desc.flags & LIBETH_XDP_TX_CSUM)) 40*cba102cdSAlexander Lobakin cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, 41*cba102cdSAlexander Lobakin IDPF_TX_FLEX_DESC_CMD_CS_EN); 42*cba102cdSAlexander Lobakin 43*cba102cdSAlexander Lobakin tx_desc = &tx_desc[i]; 44*cba102cdSAlexander Lobakin tx_desc->buf_addr = cpu_to_le64(desc.addr); 45*cba102cdSAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS 46*cba102cdSAlexander Lobakin *(u64 *)&tx_desc->qw1 = ((u64)desc.len << 48) | cmd; 47*cba102cdSAlexander Lobakin #else 48*cba102cdSAlexander Lobakin tx_desc->qw1.buf_size = cpu_to_le16(desc.len); 49*cba102cdSAlexander Lobakin tx_desc->qw1.cmd_dtype = cpu_to_le16(cmd); 50*cba102cdSAlexander Lobakin #endif 51*cba102cdSAlexander Lobakin } 52*cba102cdSAlexander Lobakin 53*cba102cdSAlexander Lobakin static inline void idpf_xdpsq_set_rs(const struct idpf_tx_queue *xdpsq) 54*cba102cdSAlexander Lobakin { 55*cba102cdSAlexander Lobakin u32 ntu, cmd; 56*cba102cdSAlexander Lobakin 57*cba102cdSAlexander Lobakin ntu = xdpsq->next_to_use; 58*cba102cdSAlexander Lobakin if (unlikely(!ntu)) 59*cba102cdSAlexander Lobakin ntu = xdpsq->desc_count; 60*cba102cdSAlexander Lobakin 61*cba102cdSAlexander Lobakin cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, IDPF_TX_DESC_CMD_RS); 62*cba102cdSAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS 63*cba102cdSAlexander Lobakin *(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd; 64*cba102cdSAlexander Lobakin #else 65*cba102cdSAlexander Lobakin xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd); 66*cba102cdSAlexander Lobakin #endif 67*cba102cdSAlexander Lobakin } 68*cba102cdSAlexander Lobakin 69*cba102cdSAlexander Lobakin static inline void idpf_xdpsq_update_tail(const struct idpf_tx_queue *xdpsq) 70*cba102cdSAlexander Lobakin { 71*cba102cdSAlexander Lobakin dma_wmb(); 72*cba102cdSAlexander Lobakin writel_relaxed(xdpsq->next_to_use, xdpsq->tail); 73*cba102cdSAlexander Lobakin } 74*cba102cdSAlexander Lobakin 75*cba102cdSAlexander Lobakin /** 76*cba102cdSAlexander Lobakin * idpf_xdp_tx_finalize - finalize sending over XDPSQ 77*cba102cdSAlexander Lobakin * @_xdpsq: XDP Tx queue 78*cba102cdSAlexander Lobakin * @sent: whether any frames were sent 79*cba102cdSAlexander Lobakin * @flush: whether to update RS bit and the tail register 80*cba102cdSAlexander Lobakin * 81*cba102cdSAlexander Lobakin * Set the RS bit ("end of batch"), bump the tail, and queue the cleanup timer. 82*cba102cdSAlexander Lobakin * To be called after a NAPI polling loop, at the end of .ndo_xdp_xmit() etc. 83*cba102cdSAlexander Lobakin */ 84*cba102cdSAlexander Lobakin static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush) 85*cba102cdSAlexander Lobakin { 86*cba102cdSAlexander Lobakin struct idpf_tx_queue *xdpsq = _xdpsq; 87*cba102cdSAlexander Lobakin 88*cba102cdSAlexander Lobakin if ((!flush || unlikely(!sent)) && 89*cba102cdSAlexander Lobakin likely(xdpsq->desc_count - 1 != xdpsq->pending)) 90*cba102cdSAlexander Lobakin return; 91*cba102cdSAlexander Lobakin 92*cba102cdSAlexander Lobakin libeth_xdpsq_lock(&xdpsq->xdp_lock); 93*cba102cdSAlexander Lobakin 94*cba102cdSAlexander Lobakin idpf_xdpsq_set_rs(xdpsq); 95*cba102cdSAlexander Lobakin idpf_xdpsq_update_tail(xdpsq); 96*cba102cdSAlexander Lobakin 97*cba102cdSAlexander Lobakin libeth_xdpsq_queue_timer(xdpsq->timer); 98*cba102cdSAlexander Lobakin 99*cba102cdSAlexander Lobakin libeth_xdpsq_unlock(&xdpsq->xdp_lock); 100*cba102cdSAlexander Lobakin } 101*cba102cdSAlexander Lobakin 102*cba102cdSAlexander Lobakin void idpf_xdp_set_features(const struct idpf_vport *vport); 103*cba102cdSAlexander Lobakin 104705457e7SMichal Kubiak int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp); 105705457e7SMichal Kubiak 106ac8a861fSMichal Kubiak #endif /* _IDPF_XDP_H_ */ 107