1ac8a861fSMichal Kubiak // SPDX-License-Identifier: GPL-2.0-only 2ac8a861fSMichal Kubiak /* Copyright (C) 2025 Intel Corporation */ 3ac8a861fSMichal Kubiak 4ac8a861fSMichal Kubiak #include "idpf.h" 5705457e7SMichal Kubiak #include "idpf_virtchnl.h" 6ac8a861fSMichal Kubiak #include "xdp.h" 73d57b2c0SMichal Kubiak #include "xsk.h" 8ac8a861fSMichal Kubiak 9ac8a861fSMichal Kubiak static int idpf_rxq_for_each(const struct idpf_vport *vport, 10ac8a861fSMichal Kubiak int (*fn)(struct idpf_rx_queue *rxq, void *arg), 11ac8a861fSMichal Kubiak void *arg) 12ac8a861fSMichal Kubiak { 13ac8a861fSMichal Kubiak bool splitq = idpf_is_queue_model_split(vport->rxq_model); 14ac8a861fSMichal Kubiak 15ac8a861fSMichal Kubiak if (!vport->rxq_grps) 16ac8a861fSMichal Kubiak return -ENETDOWN; 17ac8a861fSMichal Kubiak 18ac8a861fSMichal Kubiak for (u32 i = 0; i < vport->num_rxq_grp; i++) { 19ac8a861fSMichal Kubiak const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 20ac8a861fSMichal Kubiak u32 num_rxq; 21ac8a861fSMichal Kubiak 22ac8a861fSMichal Kubiak if (splitq) 23ac8a861fSMichal Kubiak num_rxq = rx_qgrp->splitq.num_rxq_sets; 24ac8a861fSMichal Kubiak else 25ac8a861fSMichal Kubiak num_rxq = rx_qgrp->singleq.num_rxq; 26ac8a861fSMichal Kubiak 27ac8a861fSMichal Kubiak for (u32 j = 0; j < num_rxq; j++) { 28ac8a861fSMichal Kubiak struct idpf_rx_queue *q; 29ac8a861fSMichal Kubiak int err; 30ac8a861fSMichal Kubiak 31ac8a861fSMichal Kubiak if (splitq) 32ac8a861fSMichal Kubiak q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 33ac8a861fSMichal Kubiak else 34ac8a861fSMichal Kubiak q = rx_qgrp->singleq.rxqs[j]; 35ac8a861fSMichal Kubiak 36ac8a861fSMichal Kubiak err = fn(q, arg); 37ac8a861fSMichal Kubiak if (err) 38ac8a861fSMichal Kubiak return err; 39ac8a861fSMichal Kubiak } 40ac8a861fSMichal Kubiak } 41ac8a861fSMichal Kubiak 42ac8a861fSMichal Kubiak return 0; 43ac8a861fSMichal Kubiak } 44ac8a861fSMichal Kubiak 45ac8a861fSMichal Kubiak static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg) 46ac8a861fSMichal Kubiak { 47ac8a861fSMichal Kubiak const struct idpf_vport *vport = rxq->q_vector->vport; 48ac8a861fSMichal Kubiak bool split = idpf_is_queue_model_split(vport->rxq_model); 49ac8a861fSMichal Kubiak const struct page_pool *pp; 50ac8a861fSMichal Kubiak int err; 51ac8a861fSMichal Kubiak 52ac8a861fSMichal Kubiak err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx, 53ac8a861fSMichal Kubiak rxq->q_vector->napi.napi_id, 54ac8a861fSMichal Kubiak rxq->rx_buf_size); 55ac8a861fSMichal Kubiak if (err) 56ac8a861fSMichal Kubiak return err; 57ac8a861fSMichal Kubiak 58ac8a861fSMichal Kubiak pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp; 59ac8a861fSMichal Kubiak xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp); 60ac8a861fSMichal Kubiak 61ac8a861fSMichal Kubiak if (!split) 62ac8a861fSMichal Kubiak return 0; 63ac8a861fSMichal Kubiak 64ac8a861fSMichal Kubiak rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset]; 65ac8a861fSMichal Kubiak rxq->num_xdp_txq = vport->num_xdp_txq; 66ac8a861fSMichal Kubiak 67ac8a861fSMichal Kubiak return 0; 68ac8a861fSMichal Kubiak } 69ac8a861fSMichal Kubiak 703d57b2c0SMichal Kubiak int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq) 713d57b2c0SMichal Kubiak { 723d57b2c0SMichal Kubiak return __idpf_xdp_rxq_info_init(rxq, NULL); 733d57b2c0SMichal Kubiak } 743d57b2c0SMichal Kubiak 75ac8a861fSMichal Kubiak int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport) 76ac8a861fSMichal Kubiak { 77ac8a861fSMichal Kubiak return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL); 78ac8a861fSMichal Kubiak } 79ac8a861fSMichal Kubiak 80ac8a861fSMichal Kubiak static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg) 81ac8a861fSMichal Kubiak { 82ac8a861fSMichal Kubiak if (idpf_is_queue_model_split((size_t)arg)) { 83ac8a861fSMichal Kubiak rxq->xdpsqs = NULL; 84ac8a861fSMichal Kubiak rxq->num_xdp_txq = 0; 85ac8a861fSMichal Kubiak } 86ac8a861fSMichal Kubiak 87ac8a861fSMichal Kubiak xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq); 88ac8a861fSMichal Kubiak xdp_rxq_info_unreg(&rxq->xdp_rxq); 89ac8a861fSMichal Kubiak 90ac8a861fSMichal Kubiak return 0; 91ac8a861fSMichal Kubiak } 92ac8a861fSMichal Kubiak 933d57b2c0SMichal Kubiak void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model) 943d57b2c0SMichal Kubiak { 953d57b2c0SMichal Kubiak __idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model); 963d57b2c0SMichal Kubiak } 973d57b2c0SMichal Kubiak 98ac8a861fSMichal Kubiak void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport) 99ac8a861fSMichal Kubiak { 100ac8a861fSMichal Kubiak idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit, 101ac8a861fSMichal Kubiak (void *)(size_t)vport->rxq_model); 102ac8a861fSMichal Kubiak } 103ac8a861fSMichal Kubiak 104705457e7SMichal Kubiak static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg) 105705457e7SMichal Kubiak { 106705457e7SMichal Kubiak struct bpf_prog *prog = arg; 107705457e7SMichal Kubiak struct bpf_prog *old; 108705457e7SMichal Kubiak 109705457e7SMichal Kubiak if (prog) 110705457e7SMichal Kubiak bpf_prog_inc(prog); 111705457e7SMichal Kubiak 112705457e7SMichal Kubiak old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held()); 113705457e7SMichal Kubiak if (old) 114705457e7SMichal Kubiak bpf_prog_put(old); 115705457e7SMichal Kubiak 116705457e7SMichal Kubiak return 0; 117705457e7SMichal Kubiak } 118705457e7SMichal Kubiak 119705457e7SMichal Kubiak void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport, 120705457e7SMichal Kubiak struct bpf_prog *xdp_prog) 121705457e7SMichal Kubiak { 122705457e7SMichal Kubiak idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog); 123705457e7SMichal Kubiak } 124705457e7SMichal Kubiak 125cba102cdSAlexander Lobakin static void idpf_xdp_tx_timer(struct work_struct *work); 126cba102cdSAlexander Lobakin 127ac8a861fSMichal Kubiak int idpf_xdpsqs_get(const struct idpf_vport *vport) 128ac8a861fSMichal Kubiak { 129ac8a861fSMichal Kubiak struct libeth_xdpsq_timer **timers __free(kvfree) = NULL; 130ac8a861fSMichal Kubiak struct net_device *dev; 131ac8a861fSMichal Kubiak u32 sqs; 132ac8a861fSMichal Kubiak 133ac8a861fSMichal Kubiak if (!idpf_xdp_enabled(vport)) 134ac8a861fSMichal Kubiak return 0; 135ac8a861fSMichal Kubiak 136ac8a861fSMichal Kubiak timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL); 137ac8a861fSMichal Kubiak if (!timers) 138ac8a861fSMichal Kubiak return -ENOMEM; 139ac8a861fSMichal Kubiak 140ac8a861fSMichal Kubiak for (u32 i = 0; i < vport->num_xdp_txq; i++) { 141ac8a861fSMichal Kubiak timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL, 142ac8a861fSMichal Kubiak cpu_to_mem(i)); 143ac8a861fSMichal Kubiak if (!timers[i]) { 144ac8a861fSMichal Kubiak for (int j = i - 1; j >= 0; j--) 145ac8a861fSMichal Kubiak kfree(timers[j]); 146ac8a861fSMichal Kubiak 147ac8a861fSMichal Kubiak return -ENOMEM; 148ac8a861fSMichal Kubiak } 149ac8a861fSMichal Kubiak } 150ac8a861fSMichal Kubiak 151ac8a861fSMichal Kubiak dev = vport->netdev; 152ac8a861fSMichal Kubiak sqs = vport->xdp_txq_offset; 153ac8a861fSMichal Kubiak 154ac8a861fSMichal Kubiak for (u32 i = sqs; i < vport->num_txq; i++) { 155ac8a861fSMichal Kubiak struct idpf_tx_queue *xdpsq = vport->txqs[i]; 156ac8a861fSMichal Kubiak 157ac8a861fSMichal Kubiak xdpsq->complq = xdpsq->txq_grp->complq; 158ac8a861fSMichal Kubiak kfree(xdpsq->refillq); 159ac8a861fSMichal Kubiak xdpsq->refillq = NULL; 160ac8a861fSMichal Kubiak 161ac8a861fSMichal Kubiak idpf_queue_clear(FLOW_SCH_EN, xdpsq); 162ac8a861fSMichal Kubiak idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq); 163ac8a861fSMichal Kubiak idpf_queue_set(NOIRQ, xdpsq); 164ac8a861fSMichal Kubiak idpf_queue_set(XDP, xdpsq); 165ac8a861fSMichal Kubiak idpf_queue_set(XDP, xdpsq->complq); 166ac8a861fSMichal Kubiak 167ac8a861fSMichal Kubiak xdpsq->timer = timers[i - sqs]; 168ac8a861fSMichal Kubiak libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share); 169cba102cdSAlexander Lobakin libeth_xdpsq_init_timer(xdpsq->timer, xdpsq, &xdpsq->xdp_lock, 170cba102cdSAlexander Lobakin idpf_xdp_tx_timer); 171ac8a861fSMichal Kubiak 172ac8a861fSMichal Kubiak xdpsq->pending = 0; 173ac8a861fSMichal Kubiak xdpsq->xdp_tx = 0; 174ac8a861fSMichal Kubiak xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count); 175ac8a861fSMichal Kubiak } 176ac8a861fSMichal Kubiak 177ac8a861fSMichal Kubiak return 0; 178ac8a861fSMichal Kubiak } 179ac8a861fSMichal Kubiak 180ac8a861fSMichal Kubiak void idpf_xdpsqs_put(const struct idpf_vport *vport) 181ac8a861fSMichal Kubiak { 182ac8a861fSMichal Kubiak struct net_device *dev; 183ac8a861fSMichal Kubiak u32 sqs; 184ac8a861fSMichal Kubiak 185ac8a861fSMichal Kubiak if (!idpf_xdp_enabled(vport)) 186ac8a861fSMichal Kubiak return; 187ac8a861fSMichal Kubiak 188ac8a861fSMichal Kubiak dev = vport->netdev; 189ac8a861fSMichal Kubiak sqs = vport->xdp_txq_offset; 190ac8a861fSMichal Kubiak 191ac8a861fSMichal Kubiak for (u32 i = sqs; i < vport->num_txq; i++) { 192ac8a861fSMichal Kubiak struct idpf_tx_queue *xdpsq = vport->txqs[i]; 193ac8a861fSMichal Kubiak 194ac8a861fSMichal Kubiak if (!idpf_queue_has_clear(XDP, xdpsq)) 195ac8a861fSMichal Kubiak continue; 196ac8a861fSMichal Kubiak 197cba102cdSAlexander Lobakin libeth_xdpsq_deinit_timer(xdpsq->timer); 198ac8a861fSMichal Kubiak libeth_xdpsq_put(&xdpsq->xdp_lock, dev); 199ac8a861fSMichal Kubiak 200ac8a861fSMichal Kubiak kfree(xdpsq->timer); 201ac8a861fSMichal Kubiak xdpsq->refillq = NULL; 202ac8a861fSMichal Kubiak idpf_queue_clear(NOIRQ, xdpsq); 203ac8a861fSMichal Kubiak } 204ac8a861fSMichal Kubiak } 205705457e7SMichal Kubiak 206cba102cdSAlexander Lobakin static int idpf_xdp_parse_cqe(const struct idpf_splitq_4b_tx_compl_desc *desc, 207cba102cdSAlexander Lobakin bool gen) 208cba102cdSAlexander Lobakin { 209cba102cdSAlexander Lobakin u32 val; 210cba102cdSAlexander Lobakin 211cba102cdSAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS 212cba102cdSAlexander Lobakin val = *(const u32 *)desc; 213cba102cdSAlexander Lobakin #else 214cba102cdSAlexander Lobakin val = ((u32)le16_to_cpu(desc->q_head_compl_tag.q_head) << 16) | 215cba102cdSAlexander Lobakin le16_to_cpu(desc->qid_comptype_gen); 216cba102cdSAlexander Lobakin #endif 217cba102cdSAlexander Lobakin if (!!(val & IDPF_TXD_COMPLQ_GEN_M) != gen) 218cba102cdSAlexander Lobakin return -ENODATA; 219cba102cdSAlexander Lobakin 220cba102cdSAlexander Lobakin if (unlikely((val & GENMASK(IDPF_TXD_COMPLQ_GEN_S - 1, 0)) != 221cba102cdSAlexander Lobakin FIELD_PREP(IDPF_TXD_COMPLQ_COMPL_TYPE_M, 222cba102cdSAlexander Lobakin IDPF_TXD_COMPLT_RS))) 223cba102cdSAlexander Lobakin return -EINVAL; 224cba102cdSAlexander Lobakin 225cba102cdSAlexander Lobakin return upper_16_bits(val); 226cba102cdSAlexander Lobakin } 227cba102cdSAlexander Lobakin 228*8ff6d622SAlexander Lobakin u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget) 229cba102cdSAlexander Lobakin { 230cba102cdSAlexander Lobakin struct idpf_compl_queue *cq = xdpsq->complq; 231cba102cdSAlexander Lobakin u32 tx_ntc = xdpsq->next_to_clean; 232cba102cdSAlexander Lobakin u32 tx_cnt = xdpsq->desc_count; 233cba102cdSAlexander Lobakin u32 ntc = cq->next_to_clean; 234cba102cdSAlexander Lobakin u32 cnt = cq->desc_count; 235cba102cdSAlexander Lobakin u32 done_frames; 236cba102cdSAlexander Lobakin bool gen; 237cba102cdSAlexander Lobakin 238cba102cdSAlexander Lobakin gen = idpf_queue_has(GEN_CHK, cq); 239cba102cdSAlexander Lobakin 240cba102cdSAlexander Lobakin for (done_frames = 0; done_frames < budget; ) { 241cba102cdSAlexander Lobakin int ret; 242cba102cdSAlexander Lobakin 243cba102cdSAlexander Lobakin ret = idpf_xdp_parse_cqe(&cq->comp_4b[ntc], gen); 244cba102cdSAlexander Lobakin if (ret >= 0) { 245cba102cdSAlexander Lobakin done_frames = ret > tx_ntc ? ret - tx_ntc : 246cba102cdSAlexander Lobakin ret + tx_cnt - tx_ntc; 247cba102cdSAlexander Lobakin goto next; 248cba102cdSAlexander Lobakin } 249cba102cdSAlexander Lobakin 250cba102cdSAlexander Lobakin switch (ret) { 251cba102cdSAlexander Lobakin case -ENODATA: 252cba102cdSAlexander Lobakin goto out; 253cba102cdSAlexander Lobakin case -EINVAL: 254cba102cdSAlexander Lobakin break; 255cba102cdSAlexander Lobakin } 256cba102cdSAlexander Lobakin 257cba102cdSAlexander Lobakin next: 258cba102cdSAlexander Lobakin if (unlikely(++ntc == cnt)) { 259cba102cdSAlexander Lobakin ntc = 0; 260cba102cdSAlexander Lobakin gen = !gen; 261cba102cdSAlexander Lobakin idpf_queue_change(GEN_CHK, cq); 262cba102cdSAlexander Lobakin } 263cba102cdSAlexander Lobakin } 264cba102cdSAlexander Lobakin 265cba102cdSAlexander Lobakin out: 266cba102cdSAlexander Lobakin cq->next_to_clean = ntc; 267cba102cdSAlexander Lobakin 268cba102cdSAlexander Lobakin return done_frames; 269cba102cdSAlexander Lobakin } 270cba102cdSAlexander Lobakin 271cba102cdSAlexander Lobakin static u32 idpf_xdpsq_complete(void *_xdpsq, u32 budget) 272cba102cdSAlexander Lobakin { 273cba102cdSAlexander Lobakin struct libeth_xdpsq_napi_stats ss = { }; 274cba102cdSAlexander Lobakin struct idpf_tx_queue *xdpsq = _xdpsq; 275cba102cdSAlexander Lobakin u32 tx_ntc = xdpsq->next_to_clean; 276cba102cdSAlexander Lobakin u32 tx_cnt = xdpsq->desc_count; 277cba102cdSAlexander Lobakin struct xdp_frame_bulk bq; 278cba102cdSAlexander Lobakin struct libeth_cq_pp cp = { 279cba102cdSAlexander Lobakin .dev = xdpsq->dev, 280cba102cdSAlexander Lobakin .bq = &bq, 281cba102cdSAlexander Lobakin .xss = &ss, 282cba102cdSAlexander Lobakin .napi = true, 283cba102cdSAlexander Lobakin }; 284cba102cdSAlexander Lobakin u32 done_frames; 285cba102cdSAlexander Lobakin 286cba102cdSAlexander Lobakin done_frames = idpf_xdpsq_poll(xdpsq, budget); 287cba102cdSAlexander Lobakin if (unlikely(!done_frames)) 288cba102cdSAlexander Lobakin return 0; 289cba102cdSAlexander Lobakin 290cba102cdSAlexander Lobakin xdp_frame_bulk_init(&bq); 291cba102cdSAlexander Lobakin 292cba102cdSAlexander Lobakin for (u32 i = 0; likely(i < done_frames); i++) { 293cba102cdSAlexander Lobakin libeth_xdp_complete_tx(&xdpsq->tx_buf[tx_ntc], &cp); 294cba102cdSAlexander Lobakin 295cba102cdSAlexander Lobakin if (unlikely(++tx_ntc == tx_cnt)) 296cba102cdSAlexander Lobakin tx_ntc = 0; 297cba102cdSAlexander Lobakin } 298cba102cdSAlexander Lobakin 299cba102cdSAlexander Lobakin xdp_flush_frame_bulk(&bq); 300cba102cdSAlexander Lobakin 301cba102cdSAlexander Lobakin xdpsq->next_to_clean = tx_ntc; 302cba102cdSAlexander Lobakin xdpsq->pending -= done_frames; 303cba102cdSAlexander Lobakin xdpsq->xdp_tx -= cp.xdp_tx; 304cba102cdSAlexander Lobakin 305cba102cdSAlexander Lobakin return done_frames; 306cba102cdSAlexander Lobakin } 307cba102cdSAlexander Lobakin 308cba102cdSAlexander Lobakin static u32 idpf_xdp_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq) 309cba102cdSAlexander Lobakin { 310cba102cdSAlexander Lobakin struct idpf_tx_queue *xdpsq = _xdpsq; 311cba102cdSAlexander Lobakin u32 free; 312cba102cdSAlexander Lobakin 313cba102cdSAlexander Lobakin libeth_xdpsq_lock(&xdpsq->xdp_lock); 314cba102cdSAlexander Lobakin 315cba102cdSAlexander Lobakin free = xdpsq->desc_count - xdpsq->pending; 316cba102cdSAlexander Lobakin if (free < xdpsq->thresh) 317cba102cdSAlexander Lobakin free += idpf_xdpsq_complete(xdpsq, xdpsq->thresh); 318cba102cdSAlexander Lobakin 319cba102cdSAlexander Lobakin *sq = (struct libeth_xdpsq){ 320cba102cdSAlexander Lobakin .sqes = xdpsq->tx_buf, 321cba102cdSAlexander Lobakin .descs = xdpsq->desc_ring, 322cba102cdSAlexander Lobakin .count = xdpsq->desc_count, 323cba102cdSAlexander Lobakin .lock = &xdpsq->xdp_lock, 324cba102cdSAlexander Lobakin .ntu = &xdpsq->next_to_use, 325cba102cdSAlexander Lobakin .pending = &xdpsq->pending, 326cba102cdSAlexander Lobakin .xdp_tx = &xdpsq->xdp_tx, 327cba102cdSAlexander Lobakin }; 328cba102cdSAlexander Lobakin 329cba102cdSAlexander Lobakin return free; 330cba102cdSAlexander Lobakin } 331cba102cdSAlexander Lobakin 332cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_START(); 333cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete); 334cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep, 335cba102cdSAlexander Lobakin idpf_xdp_tx_xmit); 336aaa3ac64SAlexander Lobakin LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep, 337aaa3ac64SAlexander Lobakin idpf_xdp_tx_xmit); 338cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_END(); 339cba102cdSAlexander Lobakin 340aaa3ac64SAlexander Lobakin int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 341aaa3ac64SAlexander Lobakin u32 flags) 342aaa3ac64SAlexander Lobakin { 343aaa3ac64SAlexander Lobakin const struct idpf_netdev_priv *np = netdev_priv(dev); 344aaa3ac64SAlexander Lobakin const struct idpf_vport *vport = np->vport; 345aaa3ac64SAlexander Lobakin 346aaa3ac64SAlexander Lobakin if (unlikely(!netif_carrier_ok(dev) || !vport->link_up)) 347aaa3ac64SAlexander Lobakin return -ENETDOWN; 348aaa3ac64SAlexander Lobakin 349aaa3ac64SAlexander Lobakin return libeth_xdp_xmit_do_bulk(dev, n, frames, flags, 350aaa3ac64SAlexander Lobakin &vport->txqs[vport->xdp_txq_offset], 351aaa3ac64SAlexander Lobakin vport->num_xdp_txq, 352aaa3ac64SAlexander Lobakin idpf_xdp_xmit_flush_bulk, 353aaa3ac64SAlexander Lobakin idpf_xdp_tx_finalize); 354aaa3ac64SAlexander Lobakin } 355aaa3ac64SAlexander Lobakin 35688ca0c73SAlexander Lobakin static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash, 35788ca0c73SAlexander Lobakin enum xdp_rss_hash_type *rss_type) 35888ca0c73SAlexander Lobakin { 35988ca0c73SAlexander Lobakin const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx; 36088ca0c73SAlexander Lobakin struct idpf_xdp_rx_desc desc __uninitialized; 36188ca0c73SAlexander Lobakin const struct idpf_rx_queue *rxq; 36288ca0c73SAlexander Lobakin struct libeth_rx_pt pt; 36388ca0c73SAlexander Lobakin 36488ca0c73SAlexander Lobakin rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq); 36588ca0c73SAlexander Lobakin 36688ca0c73SAlexander Lobakin idpf_xdp_get_qw0(&desc, xdp->desc); 36788ca0c73SAlexander Lobakin 36888ca0c73SAlexander Lobakin pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)]; 36988ca0c73SAlexander Lobakin if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt)) 37088ca0c73SAlexander Lobakin return -ENODATA; 37188ca0c73SAlexander Lobakin 37288ca0c73SAlexander Lobakin idpf_xdp_get_qw2(&desc, xdp->desc); 37388ca0c73SAlexander Lobakin 37488ca0c73SAlexander Lobakin return libeth_xdpmo_rx_hash(hash, rss_type, idpf_xdp_rx_hash(&desc), 37588ca0c73SAlexander Lobakin pt); 37688ca0c73SAlexander Lobakin } 37788ca0c73SAlexander Lobakin 37888ca0c73SAlexander Lobakin static const struct xdp_metadata_ops idpf_xdpmo = { 37988ca0c73SAlexander Lobakin .xmo_rx_hash = idpf_xdpmo_rx_hash, 38088ca0c73SAlexander Lobakin }; 38188ca0c73SAlexander Lobakin 382cba102cdSAlexander Lobakin void idpf_xdp_set_features(const struct idpf_vport *vport) 383cba102cdSAlexander Lobakin { 384cba102cdSAlexander Lobakin if (!idpf_is_queue_model_split(vport->rxq_model)) 385cba102cdSAlexander Lobakin return; 386cba102cdSAlexander Lobakin 38788ca0c73SAlexander Lobakin libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo); 388cba102cdSAlexander Lobakin } 389cba102cdSAlexander Lobakin 390705457e7SMichal Kubiak static int idpf_xdp_setup_prog(struct idpf_vport *vport, 391705457e7SMichal Kubiak const struct netdev_bpf *xdp) 392705457e7SMichal Kubiak { 393705457e7SMichal Kubiak const struct idpf_netdev_priv *np = netdev_priv(vport->netdev); 394705457e7SMichal Kubiak struct bpf_prog *old, *prog = xdp->prog; 395705457e7SMichal Kubiak struct idpf_vport_config *cfg; 396705457e7SMichal Kubiak int ret; 397705457e7SMichal Kubiak 398705457e7SMichal Kubiak cfg = vport->adapter->vport_config[vport->idx]; 399705457e7SMichal Kubiak 400705457e7SMichal Kubiak if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) || 401705457e7SMichal Kubiak !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) || 402705457e7SMichal Kubiak !!vport->xdp_prog == !!prog) { 403705457e7SMichal Kubiak if (np->state == __IDPF_VPORT_UP) 404705457e7SMichal Kubiak idpf_xdp_copy_prog_to_rqs(vport, prog); 405705457e7SMichal Kubiak 406705457e7SMichal Kubiak old = xchg(&vport->xdp_prog, prog); 407705457e7SMichal Kubiak if (old) 408705457e7SMichal Kubiak bpf_prog_put(old); 409705457e7SMichal Kubiak 410705457e7SMichal Kubiak cfg->user_config.xdp_prog = prog; 411705457e7SMichal Kubiak 412705457e7SMichal Kubiak return 0; 413705457e7SMichal Kubiak } 414705457e7SMichal Kubiak 415705457e7SMichal Kubiak if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) { 416705457e7SMichal Kubiak NL_SET_ERR_MSG_MOD(xdp->extack, 417705457e7SMichal Kubiak "No Tx queues available for XDP, please decrease the number of regular SQs"); 418705457e7SMichal Kubiak return -ENOSPC; 419705457e7SMichal Kubiak } 420705457e7SMichal Kubiak 421705457e7SMichal Kubiak old = cfg->user_config.xdp_prog; 422705457e7SMichal Kubiak cfg->user_config.xdp_prog = prog; 423705457e7SMichal Kubiak 424705457e7SMichal Kubiak ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE); 425705457e7SMichal Kubiak if (ret) { 426705457e7SMichal Kubiak NL_SET_ERR_MSG_MOD(xdp->extack, 427705457e7SMichal Kubiak "Could not reopen the vport after XDP setup"); 428705457e7SMichal Kubiak 429705457e7SMichal Kubiak cfg->user_config.xdp_prog = old; 430705457e7SMichal Kubiak old = prog; 431705457e7SMichal Kubiak } 432705457e7SMichal Kubiak 433705457e7SMichal Kubiak if (old) 434705457e7SMichal Kubiak bpf_prog_put(old); 435705457e7SMichal Kubiak 436aaa3ac64SAlexander Lobakin libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog); 437aaa3ac64SAlexander Lobakin 438705457e7SMichal Kubiak return ret; 439705457e7SMichal Kubiak } 440705457e7SMichal Kubiak 441705457e7SMichal Kubiak int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp) 442705457e7SMichal Kubiak { 443705457e7SMichal Kubiak struct idpf_vport *vport; 444705457e7SMichal Kubiak int ret; 445705457e7SMichal Kubiak 446705457e7SMichal Kubiak idpf_vport_ctrl_lock(dev); 447705457e7SMichal Kubiak vport = idpf_netdev_to_vport(dev); 448705457e7SMichal Kubiak 449705457e7SMichal Kubiak if (!idpf_is_queue_model_split(vport->txq_model)) 450705457e7SMichal Kubiak goto notsupp; 451705457e7SMichal Kubiak 452705457e7SMichal Kubiak switch (xdp->command) { 453705457e7SMichal Kubiak case XDP_SETUP_PROG: 454705457e7SMichal Kubiak ret = idpf_xdp_setup_prog(vport, xdp); 455705457e7SMichal Kubiak break; 4563d57b2c0SMichal Kubiak case XDP_SETUP_XSK_POOL: 4573d57b2c0SMichal Kubiak ret = idpf_xsk_pool_setup(vport, xdp); 4583d57b2c0SMichal Kubiak break; 459705457e7SMichal Kubiak default: 460705457e7SMichal Kubiak notsupp: 461705457e7SMichal Kubiak ret = -EOPNOTSUPP; 462705457e7SMichal Kubiak break; 463705457e7SMichal Kubiak } 464705457e7SMichal Kubiak 465705457e7SMichal Kubiak idpf_vport_ctrl_unlock(dev); 466705457e7SMichal Kubiak 467705457e7SMichal Kubiak return ret; 468705457e7SMichal Kubiak } 469