xref: /linux/drivers/net/ethernet/intel/idpf/xdp.c (revision 88ca0c738c4159ce87893782b6dd964b5aa01f6e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_virtchnl.h"
6 #include "xdp.h"
7 
8 static int idpf_rxq_for_each(const struct idpf_vport *vport,
9 			     int (*fn)(struct idpf_rx_queue *rxq, void *arg),
10 			     void *arg)
11 {
12 	bool splitq = idpf_is_queue_model_split(vport->rxq_model);
13 
14 	if (!vport->rxq_grps)
15 		return -ENETDOWN;
16 
17 	for (u32 i = 0; i < vport->num_rxq_grp; i++) {
18 		const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
19 		u32 num_rxq;
20 
21 		if (splitq)
22 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
23 		else
24 			num_rxq = rx_qgrp->singleq.num_rxq;
25 
26 		for (u32 j = 0; j < num_rxq; j++) {
27 			struct idpf_rx_queue *q;
28 			int err;
29 
30 			if (splitq)
31 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
32 			else
33 				q = rx_qgrp->singleq.rxqs[j];
34 
35 			err = fn(q, arg);
36 			if (err)
37 				return err;
38 		}
39 	}
40 
41 	return 0;
42 }
43 
44 static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
45 {
46 	const struct idpf_vport *vport = rxq->q_vector->vport;
47 	bool split = idpf_is_queue_model_split(vport->rxq_model);
48 	const struct page_pool *pp;
49 	int err;
50 
51 	err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
52 				 rxq->q_vector->napi.napi_id,
53 				 rxq->rx_buf_size);
54 	if (err)
55 		return err;
56 
57 	pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
58 	xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
59 
60 	if (!split)
61 		return 0;
62 
63 	rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
64 	rxq->num_xdp_txq = vport->num_xdp_txq;
65 
66 	return 0;
67 }
68 
69 int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
70 {
71 	return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
72 }
73 
74 static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
75 {
76 	if (idpf_is_queue_model_split((size_t)arg)) {
77 		rxq->xdpsqs = NULL;
78 		rxq->num_xdp_txq = 0;
79 	}
80 
81 	xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
82 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
83 
84 	return 0;
85 }
86 
87 void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
88 {
89 	idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
90 			  (void *)(size_t)vport->rxq_model);
91 }
92 
93 static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
94 {
95 	struct bpf_prog *prog = arg;
96 	struct bpf_prog *old;
97 
98 	if (prog)
99 		bpf_prog_inc(prog);
100 
101 	old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
102 	if (old)
103 		bpf_prog_put(old);
104 
105 	return 0;
106 }
107 
108 void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
109 			       struct bpf_prog *xdp_prog)
110 {
111 	idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
112 }
113 
114 static void idpf_xdp_tx_timer(struct work_struct *work);
115 
116 int idpf_xdpsqs_get(const struct idpf_vport *vport)
117 {
118 	struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
119 	struct net_device *dev;
120 	u32 sqs;
121 
122 	if (!idpf_xdp_enabled(vport))
123 		return 0;
124 
125 	timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL);
126 	if (!timers)
127 		return -ENOMEM;
128 
129 	for (u32 i = 0; i < vport->num_xdp_txq; i++) {
130 		timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
131 					 cpu_to_mem(i));
132 		if (!timers[i]) {
133 			for (int j = i - 1; j >= 0; j--)
134 				kfree(timers[j]);
135 
136 			return -ENOMEM;
137 		}
138 	}
139 
140 	dev = vport->netdev;
141 	sqs = vport->xdp_txq_offset;
142 
143 	for (u32 i = sqs; i < vport->num_txq; i++) {
144 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
145 
146 		xdpsq->complq = xdpsq->txq_grp->complq;
147 		kfree(xdpsq->refillq);
148 		xdpsq->refillq = NULL;
149 
150 		idpf_queue_clear(FLOW_SCH_EN, xdpsq);
151 		idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
152 		idpf_queue_set(NOIRQ, xdpsq);
153 		idpf_queue_set(XDP, xdpsq);
154 		idpf_queue_set(XDP, xdpsq->complq);
155 
156 		xdpsq->timer = timers[i - sqs];
157 		libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
158 		libeth_xdpsq_init_timer(xdpsq->timer, xdpsq, &xdpsq->xdp_lock,
159 					idpf_xdp_tx_timer);
160 
161 		xdpsq->pending = 0;
162 		xdpsq->xdp_tx = 0;
163 		xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
164 	}
165 
166 	return 0;
167 }
168 
169 void idpf_xdpsqs_put(const struct idpf_vport *vport)
170 {
171 	struct net_device *dev;
172 	u32 sqs;
173 
174 	if (!idpf_xdp_enabled(vport))
175 		return;
176 
177 	dev = vport->netdev;
178 	sqs = vport->xdp_txq_offset;
179 
180 	for (u32 i = sqs; i < vport->num_txq; i++) {
181 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
182 
183 		if (!idpf_queue_has_clear(XDP, xdpsq))
184 			continue;
185 
186 		libeth_xdpsq_deinit_timer(xdpsq->timer);
187 		libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
188 
189 		kfree(xdpsq->timer);
190 		xdpsq->refillq = NULL;
191 		idpf_queue_clear(NOIRQ, xdpsq);
192 	}
193 }
194 
195 static int idpf_xdp_parse_cqe(const struct idpf_splitq_4b_tx_compl_desc *desc,
196 			      bool gen)
197 {
198 	u32 val;
199 
200 #ifdef __LIBETH_WORD_ACCESS
201 	val = *(const u32 *)desc;
202 #else
203 	val = ((u32)le16_to_cpu(desc->q_head_compl_tag.q_head) << 16) |
204 	      le16_to_cpu(desc->qid_comptype_gen);
205 #endif
206 	if (!!(val & IDPF_TXD_COMPLQ_GEN_M) != gen)
207 		return -ENODATA;
208 
209 	if (unlikely((val & GENMASK(IDPF_TXD_COMPLQ_GEN_S - 1, 0)) !=
210 		     FIELD_PREP(IDPF_TXD_COMPLQ_COMPL_TYPE_M,
211 				IDPF_TXD_COMPLT_RS)))
212 		return -EINVAL;
213 
214 	return upper_16_bits(val);
215 }
216 
217 static u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget)
218 {
219 	struct idpf_compl_queue *cq = xdpsq->complq;
220 	u32 tx_ntc = xdpsq->next_to_clean;
221 	u32 tx_cnt = xdpsq->desc_count;
222 	u32 ntc = cq->next_to_clean;
223 	u32 cnt = cq->desc_count;
224 	u32 done_frames;
225 	bool gen;
226 
227 	gen = idpf_queue_has(GEN_CHK, cq);
228 
229 	for (done_frames = 0; done_frames < budget; ) {
230 		int ret;
231 
232 		ret = idpf_xdp_parse_cqe(&cq->comp_4b[ntc], gen);
233 		if (ret >= 0) {
234 			done_frames = ret > tx_ntc ? ret - tx_ntc :
235 						     ret + tx_cnt - tx_ntc;
236 			goto next;
237 		}
238 
239 		switch (ret) {
240 		case -ENODATA:
241 			goto out;
242 		case -EINVAL:
243 			break;
244 		}
245 
246 next:
247 		if (unlikely(++ntc == cnt)) {
248 			ntc = 0;
249 			gen = !gen;
250 			idpf_queue_change(GEN_CHK, cq);
251 		}
252 	}
253 
254 out:
255 	cq->next_to_clean = ntc;
256 
257 	return done_frames;
258 }
259 
260 static u32 idpf_xdpsq_complete(void *_xdpsq, u32 budget)
261 {
262 	struct libeth_xdpsq_napi_stats ss = { };
263 	struct idpf_tx_queue *xdpsq = _xdpsq;
264 	u32 tx_ntc = xdpsq->next_to_clean;
265 	u32 tx_cnt = xdpsq->desc_count;
266 	struct xdp_frame_bulk bq;
267 	struct libeth_cq_pp cp = {
268 		.dev	= xdpsq->dev,
269 		.bq	= &bq,
270 		.xss	= &ss,
271 		.napi	= true,
272 	};
273 	u32 done_frames;
274 
275 	done_frames = idpf_xdpsq_poll(xdpsq, budget);
276 	if (unlikely(!done_frames))
277 		return 0;
278 
279 	xdp_frame_bulk_init(&bq);
280 
281 	for (u32 i = 0; likely(i < done_frames); i++) {
282 		libeth_xdp_complete_tx(&xdpsq->tx_buf[tx_ntc], &cp);
283 
284 		if (unlikely(++tx_ntc == tx_cnt))
285 			tx_ntc = 0;
286 	}
287 
288 	xdp_flush_frame_bulk(&bq);
289 
290 	xdpsq->next_to_clean = tx_ntc;
291 	xdpsq->pending -= done_frames;
292 	xdpsq->xdp_tx -= cp.xdp_tx;
293 
294 	return done_frames;
295 }
296 
297 static u32 idpf_xdp_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
298 {
299 	struct idpf_tx_queue *xdpsq = _xdpsq;
300 	u32 free;
301 
302 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
303 
304 	free = xdpsq->desc_count - xdpsq->pending;
305 	if (free < xdpsq->thresh)
306 		free += idpf_xdpsq_complete(xdpsq, xdpsq->thresh);
307 
308 	*sq = (struct libeth_xdpsq){
309 		.sqes		= xdpsq->tx_buf,
310 		.descs		= xdpsq->desc_ring,
311 		.count		= xdpsq->desc_count,
312 		.lock		= &xdpsq->xdp_lock,
313 		.ntu		= &xdpsq->next_to_use,
314 		.pending	= &xdpsq->pending,
315 		.xdp_tx		= &xdpsq->xdp_tx,
316 	};
317 
318 	return free;
319 }
320 
321 LIBETH_XDP_DEFINE_START();
322 LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete);
323 LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep,
324 			   idpf_xdp_tx_xmit);
325 LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep,
326 			     idpf_xdp_tx_xmit);
327 LIBETH_XDP_DEFINE_END();
328 
329 int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
330 		  u32 flags)
331 {
332 	const struct idpf_netdev_priv *np = netdev_priv(dev);
333 	const struct idpf_vport *vport = np->vport;
334 
335 	if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
336 		return -ENETDOWN;
337 
338 	return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
339 				       &vport->txqs[vport->xdp_txq_offset],
340 				       vport->num_xdp_txq,
341 				       idpf_xdp_xmit_flush_bulk,
342 				       idpf_xdp_tx_finalize);
343 }
344 
345 static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
346 			      enum xdp_rss_hash_type *rss_type)
347 {
348 	const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
349 	struct idpf_xdp_rx_desc desc __uninitialized;
350 	const struct idpf_rx_queue *rxq;
351 	struct libeth_rx_pt pt;
352 
353 	rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
354 
355 	idpf_xdp_get_qw0(&desc, xdp->desc);
356 
357 	pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)];
358 	if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt))
359 		return -ENODATA;
360 
361 	idpf_xdp_get_qw2(&desc, xdp->desc);
362 
363 	return libeth_xdpmo_rx_hash(hash, rss_type, idpf_xdp_rx_hash(&desc),
364 				    pt);
365 }
366 
367 static const struct xdp_metadata_ops idpf_xdpmo = {
368 	.xmo_rx_hash		= idpf_xdpmo_rx_hash,
369 };
370 
371 void idpf_xdp_set_features(const struct idpf_vport *vport)
372 {
373 	if (!idpf_is_queue_model_split(vport->rxq_model))
374 		return;
375 
376 	libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo);
377 }
378 
379 static int idpf_xdp_setup_prog(struct idpf_vport *vport,
380 			       const struct netdev_bpf *xdp)
381 {
382 	const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
383 	struct bpf_prog *old, *prog = xdp->prog;
384 	struct idpf_vport_config *cfg;
385 	int ret;
386 
387 	cfg = vport->adapter->vport_config[vport->idx];
388 
389 	if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
390 	    !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
391 	    !!vport->xdp_prog == !!prog) {
392 		if (np->state == __IDPF_VPORT_UP)
393 			idpf_xdp_copy_prog_to_rqs(vport, prog);
394 
395 		old = xchg(&vport->xdp_prog, prog);
396 		if (old)
397 			bpf_prog_put(old);
398 
399 		cfg->user_config.xdp_prog = prog;
400 
401 		return 0;
402 	}
403 
404 	if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
405 		NL_SET_ERR_MSG_MOD(xdp->extack,
406 				   "No Tx queues available for XDP, please decrease the number of regular SQs");
407 		return -ENOSPC;
408 	}
409 
410 	old = cfg->user_config.xdp_prog;
411 	cfg->user_config.xdp_prog = prog;
412 
413 	ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
414 	if (ret) {
415 		NL_SET_ERR_MSG_MOD(xdp->extack,
416 				   "Could not reopen the vport after XDP setup");
417 
418 		cfg->user_config.xdp_prog = old;
419 		old = prog;
420 	}
421 
422 	if (old)
423 		bpf_prog_put(old);
424 
425 	libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog);
426 
427 	return ret;
428 }
429 
430 int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
431 {
432 	struct idpf_vport *vport;
433 	int ret;
434 
435 	idpf_vport_ctrl_lock(dev);
436 	vport = idpf_netdev_to_vport(dev);
437 
438 	if (!idpf_is_queue_model_split(vport->txq_model))
439 		goto notsupp;
440 
441 	switch (xdp->command) {
442 	case XDP_SETUP_PROG:
443 		ret = idpf_xdp_setup_prog(vport, xdp);
444 		break;
445 	default:
446 notsupp:
447 		ret = -EOPNOTSUPP;
448 		break;
449 	}
450 
451 	idpf_vport_ctrl_unlock(dev);
452 
453 	return ret;
454 }
455