xref: /linux/drivers/net/ethernet/intel/idpf/xdp.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_virtchnl.h"
6 #include "xdp.h"
7 #include "xsk.h"
8 
9 static int idpf_rxq_for_each(const struct idpf_vport *vport,
10 			     int (*fn)(struct idpf_rx_queue *rxq, void *arg),
11 			     void *arg)
12 {
13 	bool splitq = idpf_is_queue_model_split(vport->rxq_model);
14 
15 	if (!vport->rxq_grps)
16 		return -ENETDOWN;
17 
18 	for (u32 i = 0; i < vport->num_rxq_grp; i++) {
19 		const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
20 		u32 num_rxq;
21 
22 		if (splitq)
23 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
24 		else
25 			num_rxq = rx_qgrp->singleq.num_rxq;
26 
27 		for (u32 j = 0; j < num_rxq; j++) {
28 			struct idpf_rx_queue *q;
29 			int err;
30 
31 			if (splitq)
32 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
33 			else
34 				q = rx_qgrp->singleq.rxqs[j];
35 
36 			err = fn(q, arg);
37 			if (err)
38 				return err;
39 		}
40 	}
41 
42 	return 0;
43 }
44 
45 static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
46 {
47 	const struct idpf_vport *vport = rxq->q_vector->vport;
48 	bool split = idpf_is_queue_model_split(vport->rxq_model);
49 	int err;
50 
51 	err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
52 				 rxq->q_vector->napi.napi_id,
53 				 rxq->rx_buf_size);
54 	if (err)
55 		return err;
56 
57 	if (idpf_queue_has(XSK, rxq)) {
58 		err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
59 						 MEM_TYPE_XSK_BUFF_POOL,
60 						 rxq->pool);
61 		if (err)
62 			goto unreg;
63 	} else {
64 		const struct page_pool *pp;
65 
66 		pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
67 		xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
68 	}
69 
70 	if (!split)
71 		return 0;
72 
73 	rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
74 	rxq->num_xdp_txq = vport->num_xdp_txq;
75 
76 	return 0;
77 
78 unreg:
79 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
80 
81 	return err;
82 }
83 
84 int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
85 {
86 	return __idpf_xdp_rxq_info_init(rxq, NULL);
87 }
88 
89 int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
90 {
91 	return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
92 }
93 
94 static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
95 {
96 	if (idpf_is_queue_model_split((size_t)arg)) {
97 		rxq->xdpsqs = NULL;
98 		rxq->num_xdp_txq = 0;
99 	}
100 
101 	if (!idpf_queue_has(XSK, rxq))
102 		xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
103 
104 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
105 
106 	return 0;
107 }
108 
109 void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
110 {
111 	__idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
112 }
113 
114 void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
115 {
116 	idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
117 			  (void *)(size_t)vport->rxq_model);
118 }
119 
120 static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
121 {
122 	struct bpf_prog *prog = arg;
123 	struct bpf_prog *old;
124 
125 	if (prog)
126 		bpf_prog_inc(prog);
127 
128 	old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
129 	if (old)
130 		bpf_prog_put(old);
131 
132 	return 0;
133 }
134 
135 void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
136 			       struct bpf_prog *xdp_prog)
137 {
138 	idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
139 }
140 
141 static void idpf_xdp_tx_timer(struct work_struct *work);
142 
143 int idpf_xdpsqs_get(const struct idpf_vport *vport)
144 {
145 	struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
146 	struct net_device *dev;
147 	u32 sqs;
148 
149 	if (!idpf_xdp_enabled(vport))
150 		return 0;
151 
152 	timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL);
153 	if (!timers)
154 		return -ENOMEM;
155 
156 	for (u32 i = 0; i < vport->num_xdp_txq; i++) {
157 		timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
158 					 cpu_to_mem(i));
159 		if (!timers[i]) {
160 			for (int j = i - 1; j >= 0; j--)
161 				kfree(timers[j]);
162 
163 			return -ENOMEM;
164 		}
165 	}
166 
167 	dev = vport->netdev;
168 	sqs = vport->xdp_txq_offset;
169 
170 	for (u32 i = sqs; i < vport->num_txq; i++) {
171 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
172 
173 		xdpsq->complq = xdpsq->txq_grp->complq;
174 		kfree(xdpsq->refillq);
175 		xdpsq->refillq = NULL;
176 
177 		idpf_queue_clear(FLOW_SCH_EN, xdpsq);
178 		idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
179 		idpf_queue_set(NOIRQ, xdpsq);
180 		idpf_queue_set(XDP, xdpsq);
181 		idpf_queue_set(XDP, xdpsq->complq);
182 
183 		xdpsq->timer = timers[i - sqs];
184 		libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
185 		libeth_xdpsq_init_timer(xdpsq->timer, xdpsq, &xdpsq->xdp_lock,
186 					idpf_xdp_tx_timer);
187 
188 		xdpsq->pending = 0;
189 		xdpsq->xdp_tx = 0;
190 		xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
191 	}
192 
193 	return 0;
194 }
195 
196 void idpf_xdpsqs_put(const struct idpf_vport *vport)
197 {
198 	struct net_device *dev;
199 	u32 sqs;
200 
201 	if (!idpf_xdp_enabled(vport))
202 		return;
203 
204 	dev = vport->netdev;
205 	sqs = vport->xdp_txq_offset;
206 
207 	for (u32 i = sqs; i < vport->num_txq; i++) {
208 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
209 
210 		if (!idpf_queue_has_clear(XDP, xdpsq))
211 			continue;
212 
213 		libeth_xdpsq_deinit_timer(xdpsq->timer);
214 		libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
215 
216 		kfree(xdpsq->timer);
217 		xdpsq->refillq = NULL;
218 		idpf_queue_clear(NOIRQ, xdpsq);
219 	}
220 }
221 
222 static int idpf_xdp_parse_cqe(const struct idpf_splitq_4b_tx_compl_desc *desc,
223 			      bool gen)
224 {
225 	u32 val;
226 
227 #ifdef __LIBETH_WORD_ACCESS
228 	val = *(const u32 *)desc;
229 #else
230 	val = ((u32)le16_to_cpu(desc->q_head_compl_tag.q_head) << 16) |
231 	      le16_to_cpu(desc->qid_comptype_gen);
232 #endif
233 	if (!!(val & IDPF_TXD_COMPLQ_GEN_M) != gen)
234 		return -ENODATA;
235 
236 	if (unlikely((val & GENMASK(IDPF_TXD_COMPLQ_GEN_S - 1, 0)) !=
237 		     FIELD_PREP(IDPF_TXD_COMPLQ_COMPL_TYPE_M,
238 				IDPF_TXD_COMPLT_RS)))
239 		return -EINVAL;
240 
241 	return upper_16_bits(val);
242 }
243 
244 u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget)
245 {
246 	struct idpf_compl_queue *cq = xdpsq->complq;
247 	u32 tx_ntc = xdpsq->next_to_clean;
248 	u32 tx_cnt = xdpsq->desc_count;
249 	u32 ntc = cq->next_to_clean;
250 	u32 cnt = cq->desc_count;
251 	u32 done_frames;
252 	bool gen;
253 
254 	gen = idpf_queue_has(GEN_CHK, cq);
255 
256 	for (done_frames = 0; done_frames < budget; ) {
257 		int ret;
258 
259 		ret = idpf_xdp_parse_cqe(&cq->comp_4b[ntc], gen);
260 		if (ret >= 0) {
261 			done_frames = ret > tx_ntc ? ret - tx_ntc :
262 						     ret + tx_cnt - tx_ntc;
263 			goto next;
264 		}
265 
266 		switch (ret) {
267 		case -ENODATA:
268 			goto out;
269 		case -EINVAL:
270 			break;
271 		}
272 
273 next:
274 		if (unlikely(++ntc == cnt)) {
275 			ntc = 0;
276 			gen = !gen;
277 			idpf_queue_change(GEN_CHK, cq);
278 		}
279 	}
280 
281 out:
282 	cq->next_to_clean = ntc;
283 
284 	return done_frames;
285 }
286 
287 static u32 idpf_xdpsq_complete(void *_xdpsq, u32 budget)
288 {
289 	struct libeth_xdpsq_napi_stats ss = { };
290 	struct idpf_tx_queue *xdpsq = _xdpsq;
291 	u32 tx_ntc = xdpsq->next_to_clean;
292 	u32 tx_cnt = xdpsq->desc_count;
293 	struct xdp_frame_bulk bq;
294 	struct libeth_cq_pp cp = {
295 		.dev	= xdpsq->dev,
296 		.bq	= &bq,
297 		.xss	= &ss,
298 		.napi	= true,
299 	};
300 	u32 done_frames;
301 
302 	done_frames = idpf_xdpsq_poll(xdpsq, budget);
303 	if (unlikely(!done_frames))
304 		return 0;
305 
306 	xdp_frame_bulk_init(&bq);
307 
308 	for (u32 i = 0; likely(i < done_frames); i++) {
309 		libeth_xdp_complete_tx(&xdpsq->tx_buf[tx_ntc], &cp);
310 
311 		if (unlikely(++tx_ntc == tx_cnt))
312 			tx_ntc = 0;
313 	}
314 
315 	xdp_flush_frame_bulk(&bq);
316 
317 	xdpsq->next_to_clean = tx_ntc;
318 	xdpsq->pending -= done_frames;
319 	xdpsq->xdp_tx -= cp.xdp_tx;
320 
321 	return done_frames;
322 }
323 
324 static u32 idpf_xdp_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
325 {
326 	struct idpf_tx_queue *xdpsq = _xdpsq;
327 	u32 free;
328 
329 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
330 
331 	free = xdpsq->desc_count - xdpsq->pending;
332 	if (free < xdpsq->thresh)
333 		free += idpf_xdpsq_complete(xdpsq, xdpsq->thresh);
334 
335 	*sq = (struct libeth_xdpsq){
336 		.sqes		= xdpsq->tx_buf,
337 		.descs		= xdpsq->desc_ring,
338 		.count		= xdpsq->desc_count,
339 		.lock		= &xdpsq->xdp_lock,
340 		.ntu		= &xdpsq->next_to_use,
341 		.pending	= &xdpsq->pending,
342 		.xdp_tx		= &xdpsq->xdp_tx,
343 	};
344 
345 	return free;
346 }
347 
348 LIBETH_XDP_DEFINE_START();
349 LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete);
350 LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep,
351 			   idpf_xdp_tx_xmit);
352 LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep,
353 			     idpf_xdp_tx_xmit);
354 LIBETH_XDP_DEFINE_END();
355 
356 int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
357 		  u32 flags)
358 {
359 	const struct idpf_netdev_priv *np = netdev_priv(dev);
360 	const struct idpf_vport *vport = np->vport;
361 
362 	if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
363 		return -ENETDOWN;
364 
365 	return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
366 				       &vport->txqs[vport->xdp_txq_offset],
367 				       vport->num_xdp_txq,
368 				       idpf_xdp_xmit_flush_bulk,
369 				       idpf_xdp_tx_finalize);
370 }
371 
372 static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
373 			      enum xdp_rss_hash_type *rss_type)
374 {
375 	const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
376 	struct idpf_xdp_rx_desc desc __uninitialized;
377 	const struct idpf_rx_queue *rxq;
378 	struct libeth_rx_pt pt;
379 
380 	rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
381 
382 	idpf_xdp_get_qw0(&desc, xdp->desc);
383 
384 	pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)];
385 	if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt))
386 		return -ENODATA;
387 
388 	idpf_xdp_get_qw2(&desc, xdp->desc);
389 
390 	return libeth_xdpmo_rx_hash(hash, rss_type, idpf_xdp_rx_hash(&desc),
391 				    pt);
392 }
393 
394 static const struct xdp_metadata_ops idpf_xdpmo = {
395 	.xmo_rx_hash		= idpf_xdpmo_rx_hash,
396 };
397 
398 void idpf_xdp_set_features(const struct idpf_vport *vport)
399 {
400 	if (!idpf_is_queue_model_split(vport->rxq_model))
401 		return;
402 
403 	libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
404 					idpf_get_max_tx_bufs(vport->adapter),
405 					libeth_xsktmo);
406 }
407 
408 static int idpf_xdp_setup_prog(struct idpf_vport *vport,
409 			       const struct netdev_bpf *xdp)
410 {
411 	const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
412 	struct bpf_prog *old, *prog = xdp->prog;
413 	struct idpf_vport_config *cfg;
414 	int ret;
415 
416 	cfg = vport->adapter->vport_config[vport->idx];
417 
418 	if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
419 	    !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
420 	    !!vport->xdp_prog == !!prog) {
421 		if (np->state == __IDPF_VPORT_UP)
422 			idpf_xdp_copy_prog_to_rqs(vport, prog);
423 
424 		old = xchg(&vport->xdp_prog, prog);
425 		if (old)
426 			bpf_prog_put(old);
427 
428 		cfg->user_config.xdp_prog = prog;
429 
430 		return 0;
431 	}
432 
433 	if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
434 		NL_SET_ERR_MSG_MOD(xdp->extack,
435 				   "No Tx queues available for XDP, please decrease the number of regular SQs");
436 		return -ENOSPC;
437 	}
438 
439 	old = cfg->user_config.xdp_prog;
440 	cfg->user_config.xdp_prog = prog;
441 
442 	ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
443 	if (ret) {
444 		NL_SET_ERR_MSG_MOD(xdp->extack,
445 				   "Could not reopen the vport after XDP setup");
446 
447 		cfg->user_config.xdp_prog = old;
448 		old = prog;
449 	}
450 
451 	if (old)
452 		bpf_prog_put(old);
453 
454 	libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog);
455 
456 	return ret;
457 }
458 
459 int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
460 {
461 	struct idpf_vport *vport;
462 	int ret;
463 
464 	idpf_vport_ctrl_lock(dev);
465 	vport = idpf_netdev_to_vport(dev);
466 
467 	if (!idpf_is_queue_model_split(vport->txq_model))
468 		goto notsupp;
469 
470 	switch (xdp->command) {
471 	case XDP_SETUP_PROG:
472 		ret = idpf_xdp_setup_prog(vport, xdp);
473 		break;
474 	case XDP_SETUP_XSK_POOL:
475 		ret = idpf_xsk_pool_setup(vport, xdp);
476 		break;
477 	default:
478 notsupp:
479 		ret = -EOPNOTSUPP;
480 		break;
481 	}
482 
483 	idpf_vport_ctrl_unlock(dev);
484 
485 	return ret;
486 }
487