xref: /linux/drivers/net/ethernet/intel/idpf/xdp.c (revision 705457e7211f22c49b410eb25e83cef8a61bd560)
1ac8a861fSMichal Kubiak // SPDX-License-Identifier: GPL-2.0-only
2ac8a861fSMichal Kubiak /* Copyright (C) 2025 Intel Corporation */
3ac8a861fSMichal Kubiak 
4ac8a861fSMichal Kubiak #include <net/libeth/xdp.h>
5ac8a861fSMichal Kubiak 
6ac8a861fSMichal Kubiak #include "idpf.h"
7*705457e7SMichal Kubiak #include "idpf_virtchnl.h"
8ac8a861fSMichal Kubiak #include "xdp.h"
9ac8a861fSMichal Kubiak 
10ac8a861fSMichal Kubiak static int idpf_rxq_for_each(const struct idpf_vport *vport,
11ac8a861fSMichal Kubiak 			     int (*fn)(struct idpf_rx_queue *rxq, void *arg),
12ac8a861fSMichal Kubiak 			     void *arg)
13ac8a861fSMichal Kubiak {
14ac8a861fSMichal Kubiak 	bool splitq = idpf_is_queue_model_split(vport->rxq_model);
15ac8a861fSMichal Kubiak 
16ac8a861fSMichal Kubiak 	if (!vport->rxq_grps)
17ac8a861fSMichal Kubiak 		return -ENETDOWN;
18ac8a861fSMichal Kubiak 
19ac8a861fSMichal Kubiak 	for (u32 i = 0; i < vport->num_rxq_grp; i++) {
20ac8a861fSMichal Kubiak 		const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
21ac8a861fSMichal Kubiak 		u32 num_rxq;
22ac8a861fSMichal Kubiak 
23ac8a861fSMichal Kubiak 		if (splitq)
24ac8a861fSMichal Kubiak 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
25ac8a861fSMichal Kubiak 		else
26ac8a861fSMichal Kubiak 			num_rxq = rx_qgrp->singleq.num_rxq;
27ac8a861fSMichal Kubiak 
28ac8a861fSMichal Kubiak 		for (u32 j = 0; j < num_rxq; j++) {
29ac8a861fSMichal Kubiak 			struct idpf_rx_queue *q;
30ac8a861fSMichal Kubiak 			int err;
31ac8a861fSMichal Kubiak 
32ac8a861fSMichal Kubiak 			if (splitq)
33ac8a861fSMichal Kubiak 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
34ac8a861fSMichal Kubiak 			else
35ac8a861fSMichal Kubiak 				q = rx_qgrp->singleq.rxqs[j];
36ac8a861fSMichal Kubiak 
37ac8a861fSMichal Kubiak 			err = fn(q, arg);
38ac8a861fSMichal Kubiak 			if (err)
39ac8a861fSMichal Kubiak 				return err;
40ac8a861fSMichal Kubiak 		}
41ac8a861fSMichal Kubiak 	}
42ac8a861fSMichal Kubiak 
43ac8a861fSMichal Kubiak 	return 0;
44ac8a861fSMichal Kubiak }
45ac8a861fSMichal Kubiak 
46ac8a861fSMichal Kubiak static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
47ac8a861fSMichal Kubiak {
48ac8a861fSMichal Kubiak 	const struct idpf_vport *vport = rxq->q_vector->vport;
49ac8a861fSMichal Kubiak 	bool split = idpf_is_queue_model_split(vport->rxq_model);
50ac8a861fSMichal Kubiak 	const struct page_pool *pp;
51ac8a861fSMichal Kubiak 	int err;
52ac8a861fSMichal Kubiak 
53ac8a861fSMichal Kubiak 	err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
54ac8a861fSMichal Kubiak 				 rxq->q_vector->napi.napi_id,
55ac8a861fSMichal Kubiak 				 rxq->rx_buf_size);
56ac8a861fSMichal Kubiak 	if (err)
57ac8a861fSMichal Kubiak 		return err;
58ac8a861fSMichal Kubiak 
59ac8a861fSMichal Kubiak 	pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
60ac8a861fSMichal Kubiak 	xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
61ac8a861fSMichal Kubiak 
62ac8a861fSMichal Kubiak 	if (!split)
63ac8a861fSMichal Kubiak 		return 0;
64ac8a861fSMichal Kubiak 
65ac8a861fSMichal Kubiak 	rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
66ac8a861fSMichal Kubiak 	rxq->num_xdp_txq = vport->num_xdp_txq;
67ac8a861fSMichal Kubiak 
68ac8a861fSMichal Kubiak 	return 0;
69ac8a861fSMichal Kubiak }
70ac8a861fSMichal Kubiak 
71ac8a861fSMichal Kubiak int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
72ac8a861fSMichal Kubiak {
73ac8a861fSMichal Kubiak 	return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
74ac8a861fSMichal Kubiak }
75ac8a861fSMichal Kubiak 
76ac8a861fSMichal Kubiak static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
77ac8a861fSMichal Kubiak {
78ac8a861fSMichal Kubiak 	if (idpf_is_queue_model_split((size_t)arg)) {
79ac8a861fSMichal Kubiak 		rxq->xdpsqs = NULL;
80ac8a861fSMichal Kubiak 		rxq->num_xdp_txq = 0;
81ac8a861fSMichal Kubiak 	}
82ac8a861fSMichal Kubiak 
83ac8a861fSMichal Kubiak 	xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
84ac8a861fSMichal Kubiak 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
85ac8a861fSMichal Kubiak 
86ac8a861fSMichal Kubiak 	return 0;
87ac8a861fSMichal Kubiak }
88ac8a861fSMichal Kubiak 
89ac8a861fSMichal Kubiak void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
90ac8a861fSMichal Kubiak {
91ac8a861fSMichal Kubiak 	idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
92ac8a861fSMichal Kubiak 			  (void *)(size_t)vport->rxq_model);
93ac8a861fSMichal Kubiak }
94ac8a861fSMichal Kubiak 
95*705457e7SMichal Kubiak static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
96*705457e7SMichal Kubiak {
97*705457e7SMichal Kubiak 	struct bpf_prog *prog = arg;
98*705457e7SMichal Kubiak 	struct bpf_prog *old;
99*705457e7SMichal Kubiak 
100*705457e7SMichal Kubiak 	if (prog)
101*705457e7SMichal Kubiak 		bpf_prog_inc(prog);
102*705457e7SMichal Kubiak 
103*705457e7SMichal Kubiak 	old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
104*705457e7SMichal Kubiak 	if (old)
105*705457e7SMichal Kubiak 		bpf_prog_put(old);
106*705457e7SMichal Kubiak 
107*705457e7SMichal Kubiak 	return 0;
108*705457e7SMichal Kubiak }
109*705457e7SMichal Kubiak 
110*705457e7SMichal Kubiak void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
111*705457e7SMichal Kubiak 			       struct bpf_prog *xdp_prog)
112*705457e7SMichal Kubiak {
113*705457e7SMichal Kubiak 	idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
114*705457e7SMichal Kubiak }
115*705457e7SMichal Kubiak 
116ac8a861fSMichal Kubiak int idpf_xdpsqs_get(const struct idpf_vport *vport)
117ac8a861fSMichal Kubiak {
118ac8a861fSMichal Kubiak 	struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
119ac8a861fSMichal Kubiak 	struct net_device *dev;
120ac8a861fSMichal Kubiak 	u32 sqs;
121ac8a861fSMichal Kubiak 
122ac8a861fSMichal Kubiak 	if (!idpf_xdp_enabled(vport))
123ac8a861fSMichal Kubiak 		return 0;
124ac8a861fSMichal Kubiak 
125ac8a861fSMichal Kubiak 	timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL);
126ac8a861fSMichal Kubiak 	if (!timers)
127ac8a861fSMichal Kubiak 		return -ENOMEM;
128ac8a861fSMichal Kubiak 
129ac8a861fSMichal Kubiak 	for (u32 i = 0; i < vport->num_xdp_txq; i++) {
130ac8a861fSMichal Kubiak 		timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
131ac8a861fSMichal Kubiak 					 cpu_to_mem(i));
132ac8a861fSMichal Kubiak 		if (!timers[i]) {
133ac8a861fSMichal Kubiak 			for (int j = i - 1; j >= 0; j--)
134ac8a861fSMichal Kubiak 				kfree(timers[j]);
135ac8a861fSMichal Kubiak 
136ac8a861fSMichal Kubiak 			return -ENOMEM;
137ac8a861fSMichal Kubiak 		}
138ac8a861fSMichal Kubiak 	}
139ac8a861fSMichal Kubiak 
140ac8a861fSMichal Kubiak 	dev = vport->netdev;
141ac8a861fSMichal Kubiak 	sqs = vport->xdp_txq_offset;
142ac8a861fSMichal Kubiak 
143ac8a861fSMichal Kubiak 	for (u32 i = sqs; i < vport->num_txq; i++) {
144ac8a861fSMichal Kubiak 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
145ac8a861fSMichal Kubiak 
146ac8a861fSMichal Kubiak 		xdpsq->complq = xdpsq->txq_grp->complq;
147ac8a861fSMichal Kubiak 		kfree(xdpsq->refillq);
148ac8a861fSMichal Kubiak 		xdpsq->refillq = NULL;
149ac8a861fSMichal Kubiak 
150ac8a861fSMichal Kubiak 		idpf_queue_clear(FLOW_SCH_EN, xdpsq);
151ac8a861fSMichal Kubiak 		idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
152ac8a861fSMichal Kubiak 		idpf_queue_set(NOIRQ, xdpsq);
153ac8a861fSMichal Kubiak 		idpf_queue_set(XDP, xdpsq);
154ac8a861fSMichal Kubiak 		idpf_queue_set(XDP, xdpsq->complq);
155ac8a861fSMichal Kubiak 
156ac8a861fSMichal Kubiak 		xdpsq->timer = timers[i - sqs];
157ac8a861fSMichal Kubiak 		libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
158ac8a861fSMichal Kubiak 
159ac8a861fSMichal Kubiak 		xdpsq->pending = 0;
160ac8a861fSMichal Kubiak 		xdpsq->xdp_tx = 0;
161ac8a861fSMichal Kubiak 		xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
162ac8a861fSMichal Kubiak 	}
163ac8a861fSMichal Kubiak 
164ac8a861fSMichal Kubiak 	return 0;
165ac8a861fSMichal Kubiak }
166ac8a861fSMichal Kubiak 
167ac8a861fSMichal Kubiak void idpf_xdpsqs_put(const struct idpf_vport *vport)
168ac8a861fSMichal Kubiak {
169ac8a861fSMichal Kubiak 	struct net_device *dev;
170ac8a861fSMichal Kubiak 	u32 sqs;
171ac8a861fSMichal Kubiak 
172ac8a861fSMichal Kubiak 	if (!idpf_xdp_enabled(vport))
173ac8a861fSMichal Kubiak 		return;
174ac8a861fSMichal Kubiak 
175ac8a861fSMichal Kubiak 	dev = vport->netdev;
176ac8a861fSMichal Kubiak 	sqs = vport->xdp_txq_offset;
177ac8a861fSMichal Kubiak 
178ac8a861fSMichal Kubiak 	for (u32 i = sqs; i < vport->num_txq; i++) {
179ac8a861fSMichal Kubiak 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
180ac8a861fSMichal Kubiak 
181ac8a861fSMichal Kubiak 		if (!idpf_queue_has_clear(XDP, xdpsq))
182ac8a861fSMichal Kubiak 			continue;
183ac8a861fSMichal Kubiak 
184ac8a861fSMichal Kubiak 		libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
185ac8a861fSMichal Kubiak 
186ac8a861fSMichal Kubiak 		kfree(xdpsq->timer);
187ac8a861fSMichal Kubiak 		xdpsq->refillq = NULL;
188ac8a861fSMichal Kubiak 		idpf_queue_clear(NOIRQ, xdpsq);
189ac8a861fSMichal Kubiak 	}
190ac8a861fSMichal Kubiak }
191*705457e7SMichal Kubiak 
192*705457e7SMichal Kubiak static int idpf_xdp_setup_prog(struct idpf_vport *vport,
193*705457e7SMichal Kubiak 			       const struct netdev_bpf *xdp)
194*705457e7SMichal Kubiak {
195*705457e7SMichal Kubiak 	const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
196*705457e7SMichal Kubiak 	struct bpf_prog *old, *prog = xdp->prog;
197*705457e7SMichal Kubiak 	struct idpf_vport_config *cfg;
198*705457e7SMichal Kubiak 	int ret;
199*705457e7SMichal Kubiak 
200*705457e7SMichal Kubiak 	cfg = vport->adapter->vport_config[vport->idx];
201*705457e7SMichal Kubiak 
202*705457e7SMichal Kubiak 	if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
203*705457e7SMichal Kubiak 	    !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
204*705457e7SMichal Kubiak 	    !!vport->xdp_prog == !!prog) {
205*705457e7SMichal Kubiak 		if (np->state == __IDPF_VPORT_UP)
206*705457e7SMichal Kubiak 			idpf_xdp_copy_prog_to_rqs(vport, prog);
207*705457e7SMichal Kubiak 
208*705457e7SMichal Kubiak 		old = xchg(&vport->xdp_prog, prog);
209*705457e7SMichal Kubiak 		if (old)
210*705457e7SMichal Kubiak 			bpf_prog_put(old);
211*705457e7SMichal Kubiak 
212*705457e7SMichal Kubiak 		cfg->user_config.xdp_prog = prog;
213*705457e7SMichal Kubiak 
214*705457e7SMichal Kubiak 		return 0;
215*705457e7SMichal Kubiak 	}
216*705457e7SMichal Kubiak 
217*705457e7SMichal Kubiak 	if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
218*705457e7SMichal Kubiak 		NL_SET_ERR_MSG_MOD(xdp->extack,
219*705457e7SMichal Kubiak 				   "No Tx queues available for XDP, please decrease the number of regular SQs");
220*705457e7SMichal Kubiak 		return -ENOSPC;
221*705457e7SMichal Kubiak 	}
222*705457e7SMichal Kubiak 
223*705457e7SMichal Kubiak 	old = cfg->user_config.xdp_prog;
224*705457e7SMichal Kubiak 	cfg->user_config.xdp_prog = prog;
225*705457e7SMichal Kubiak 
226*705457e7SMichal Kubiak 	ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
227*705457e7SMichal Kubiak 	if (ret) {
228*705457e7SMichal Kubiak 		NL_SET_ERR_MSG_MOD(xdp->extack,
229*705457e7SMichal Kubiak 				   "Could not reopen the vport after XDP setup");
230*705457e7SMichal Kubiak 
231*705457e7SMichal Kubiak 		cfg->user_config.xdp_prog = old;
232*705457e7SMichal Kubiak 		old = prog;
233*705457e7SMichal Kubiak 	}
234*705457e7SMichal Kubiak 
235*705457e7SMichal Kubiak 	if (old)
236*705457e7SMichal Kubiak 		bpf_prog_put(old);
237*705457e7SMichal Kubiak 
238*705457e7SMichal Kubiak 	return ret;
239*705457e7SMichal Kubiak }
240*705457e7SMichal Kubiak 
241*705457e7SMichal Kubiak int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
242*705457e7SMichal Kubiak {
243*705457e7SMichal Kubiak 	struct idpf_vport *vport;
244*705457e7SMichal Kubiak 	int ret;
245*705457e7SMichal Kubiak 
246*705457e7SMichal Kubiak 	idpf_vport_ctrl_lock(dev);
247*705457e7SMichal Kubiak 	vport = idpf_netdev_to_vport(dev);
248*705457e7SMichal Kubiak 
249*705457e7SMichal Kubiak 	if (!idpf_is_queue_model_split(vport->txq_model))
250*705457e7SMichal Kubiak 		goto notsupp;
251*705457e7SMichal Kubiak 
252*705457e7SMichal Kubiak 	switch (xdp->command) {
253*705457e7SMichal Kubiak 	case XDP_SETUP_PROG:
254*705457e7SMichal Kubiak 		ret = idpf_xdp_setup_prog(vport, xdp);
255*705457e7SMichal Kubiak 		break;
256*705457e7SMichal Kubiak 	default:
257*705457e7SMichal Kubiak notsupp:
258*705457e7SMichal Kubiak 		ret = -EOPNOTSUPP;
259*705457e7SMichal Kubiak 		break;
260*705457e7SMichal Kubiak 	}
261*705457e7SMichal Kubiak 
262*705457e7SMichal Kubiak 	idpf_vport_ctrl_unlock(dev);
263*705457e7SMichal Kubiak 
264*705457e7SMichal Kubiak 	return ret;
265*705457e7SMichal Kubiak }
266