1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2025 Intel Corporation */ 3 4 #include <net/libeth/xdp.h> 5 6 #include "idpf.h" 7 #include "xdp.h" 8 9 static int idpf_rxq_for_each(const struct idpf_vport *vport, 10 int (*fn)(struct idpf_rx_queue *rxq, void *arg), 11 void *arg) 12 { 13 bool splitq = idpf_is_queue_model_split(vport->rxq_model); 14 15 if (!vport->rxq_grps) 16 return -ENETDOWN; 17 18 for (u32 i = 0; i < vport->num_rxq_grp; i++) { 19 const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; 20 u32 num_rxq; 21 22 if (splitq) 23 num_rxq = rx_qgrp->splitq.num_rxq_sets; 24 else 25 num_rxq = rx_qgrp->singleq.num_rxq; 26 27 for (u32 j = 0; j < num_rxq; j++) { 28 struct idpf_rx_queue *q; 29 int err; 30 31 if (splitq) 32 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; 33 else 34 q = rx_qgrp->singleq.rxqs[j]; 35 36 err = fn(q, arg); 37 if (err) 38 return err; 39 } 40 } 41 42 return 0; 43 } 44 45 static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg) 46 { 47 const struct idpf_vport *vport = rxq->q_vector->vport; 48 bool split = idpf_is_queue_model_split(vport->rxq_model); 49 const struct page_pool *pp; 50 int err; 51 52 err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx, 53 rxq->q_vector->napi.napi_id, 54 rxq->rx_buf_size); 55 if (err) 56 return err; 57 58 pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp; 59 xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp); 60 61 if (!split) 62 return 0; 63 64 rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset]; 65 rxq->num_xdp_txq = vport->num_xdp_txq; 66 67 return 0; 68 } 69 70 int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport) 71 { 72 return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL); 73 } 74 75 static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg) 76 { 77 if (idpf_is_queue_model_split((size_t)arg)) { 78 rxq->xdpsqs = NULL; 79 rxq->num_xdp_txq = 0; 80 } 81 82 xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq); 83 xdp_rxq_info_unreg(&rxq->xdp_rxq); 84 85 return 0; 86 } 87 88 void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport) 89 { 90 idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit, 91 (void *)(size_t)vport->rxq_model); 92 } 93 94 int idpf_xdpsqs_get(const struct idpf_vport *vport) 95 { 96 struct libeth_xdpsq_timer **timers __free(kvfree) = NULL; 97 struct net_device *dev; 98 u32 sqs; 99 100 if (!idpf_xdp_enabled(vport)) 101 return 0; 102 103 timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL); 104 if (!timers) 105 return -ENOMEM; 106 107 for (u32 i = 0; i < vport->num_xdp_txq; i++) { 108 timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL, 109 cpu_to_mem(i)); 110 if (!timers[i]) { 111 for (int j = i - 1; j >= 0; j--) 112 kfree(timers[j]); 113 114 return -ENOMEM; 115 } 116 } 117 118 dev = vport->netdev; 119 sqs = vport->xdp_txq_offset; 120 121 for (u32 i = sqs; i < vport->num_txq; i++) { 122 struct idpf_tx_queue *xdpsq = vport->txqs[i]; 123 124 xdpsq->complq = xdpsq->txq_grp->complq; 125 kfree(xdpsq->refillq); 126 xdpsq->refillq = NULL; 127 128 idpf_queue_clear(FLOW_SCH_EN, xdpsq); 129 idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq); 130 idpf_queue_set(NOIRQ, xdpsq); 131 idpf_queue_set(XDP, xdpsq); 132 idpf_queue_set(XDP, xdpsq->complq); 133 134 xdpsq->timer = timers[i - sqs]; 135 libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share); 136 137 xdpsq->pending = 0; 138 xdpsq->xdp_tx = 0; 139 xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count); 140 } 141 142 return 0; 143 } 144 145 void idpf_xdpsqs_put(const struct idpf_vport *vport) 146 { 147 struct net_device *dev; 148 u32 sqs; 149 150 if (!idpf_xdp_enabled(vport)) 151 return; 152 153 dev = vport->netdev; 154 sqs = vport->xdp_txq_offset; 155 156 for (u32 i = sqs; i < vport->num_txq; i++) { 157 struct idpf_tx_queue *xdpsq = vport->txqs[i]; 158 159 if (!idpf_queue_has_clear(XDP, xdpsq)) 160 continue; 161 162 libeth_xdpsq_put(&xdpsq->xdp_lock, dev); 163 164 kfree(xdpsq->timer); 165 xdpsq->refillq = NULL; 166 idpf_queue_clear(NOIRQ, xdpsq); 167 } 168 } 169