xref: /linux/drivers/net/ethernet/intel/idpf/xdp.c (revision 9100a28c8bb4270744942cf834efcd80f1acda7d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_ptp.h"
6 #include "idpf_virtchnl.h"
7 #include "xdp.h"
8 #include "xsk.h"
9 
10 static int idpf_rxq_for_each(const struct idpf_q_vec_rsrc *rsrc,
11 			     int (*fn)(struct idpf_rx_queue *rxq, void *arg),
12 			     void *arg)
13 {
14 	bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
15 
16 	if (!rsrc->rxq_grps)
17 		return -ENETDOWN;
18 
19 	for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
20 		const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
21 		u32 num_rxq;
22 
23 		if (splitq)
24 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
25 		else
26 			num_rxq = rx_qgrp->singleq.num_rxq;
27 
28 		for (u32 j = 0; j < num_rxq; j++) {
29 			struct idpf_rx_queue *q;
30 			int err;
31 
32 			if (splitq)
33 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
34 			else
35 				q = rx_qgrp->singleq.rxqs[j];
36 
37 			err = fn(q, arg);
38 			if (err)
39 				return err;
40 		}
41 	}
42 
43 	return 0;
44 }
45 
46 static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
47 {
48 	const struct idpf_vport *vport = rxq->q_vector->vport;
49 	const struct idpf_q_vec_rsrc *rsrc;
50 	u32 frag_size = 0;
51 	bool split;
52 	int err;
53 
54 	if (idpf_queue_has(XSK, rxq))
55 		frag_size = rxq->bufq_sets[0].bufq.truesize;
56 
57 	err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
58 				 rxq->q_vector->napi.napi_id,
59 				 frag_size);
60 	if (err)
61 		return err;
62 
63 	rsrc = &vport->dflt_qv_rsrc;
64 	split = idpf_is_queue_model_split(rsrc->rxq_model);
65 
66 	if (idpf_queue_has(XSK, rxq)) {
67 		err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
68 						 MEM_TYPE_XSK_BUFF_POOL,
69 						 rxq->pool);
70 		if (err)
71 			goto unreg;
72 	} else {
73 		const struct page_pool *pp;
74 
75 		pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
76 		xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
77 	}
78 
79 	if (!split)
80 		return 0;
81 
82 	rxq->xdpsqs = &vport->txqs[rsrc->xdp_txq_offset];
83 	rxq->num_xdp_txq = vport->num_xdp_txq;
84 
85 	return 0;
86 
87 unreg:
88 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
89 
90 	return err;
91 }
92 
93 int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
94 {
95 	return __idpf_xdp_rxq_info_init(rxq, NULL);
96 }
97 
98 int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc)
99 {
100 	return idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_init, NULL);
101 }
102 
103 static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
104 {
105 	if (idpf_is_queue_model_split((size_t)arg)) {
106 		rxq->xdpsqs = NULL;
107 		rxq->num_xdp_txq = 0;
108 	}
109 
110 	if (!idpf_queue_has(XSK, rxq))
111 		xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
112 
113 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
114 
115 	return 0;
116 }
117 
118 void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
119 {
120 	__idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
121 }
122 
123 void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc)
124 {
125 	idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_deinit,
126 			  (void *)(size_t)rsrc->rxq_model);
127 }
128 
129 static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
130 {
131 	struct bpf_prog *prog = arg;
132 	struct bpf_prog *old;
133 
134 	if (prog)
135 		bpf_prog_inc(prog);
136 
137 	old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
138 	if (old)
139 		bpf_prog_put(old);
140 
141 	return 0;
142 }
143 
144 void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc,
145 			       struct bpf_prog *xdp_prog)
146 {
147 	idpf_rxq_for_each(rsrc, idpf_xdp_rxq_assign_prog, xdp_prog);
148 }
149 
150 static void idpf_xdp_tx_timer(struct work_struct *work);
151 
152 int idpf_xdpsqs_get(const struct idpf_vport *vport)
153 {
154 	struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
155 	struct net_device *dev;
156 	u32 sqs;
157 
158 	if (!idpf_xdp_enabled(vport))
159 		return 0;
160 
161 	timers = kvzalloc_objs(*timers, vport->num_xdp_txq);
162 	if (!timers)
163 		return -ENOMEM;
164 
165 	for (u32 i = 0; i < vport->num_xdp_txq; i++) {
166 		timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
167 					 cpu_to_mem(i));
168 		if (!timers[i]) {
169 			for (int j = i - 1; j >= 0; j--)
170 				kfree(timers[j]);
171 
172 			return -ENOMEM;
173 		}
174 	}
175 
176 	dev = vport->netdev;
177 	sqs = vport->dflt_qv_rsrc.xdp_txq_offset;
178 
179 	for (u32 i = sqs; i < vport->num_txq; i++) {
180 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
181 
182 		xdpsq->complq = xdpsq->txq_grp->complq;
183 		kfree(xdpsq->refillq);
184 		xdpsq->refillq = NULL;
185 
186 		idpf_queue_clear(FLOW_SCH_EN, xdpsq);
187 		idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
188 		idpf_queue_set(NOIRQ, xdpsq);
189 		idpf_queue_set(XDP, xdpsq);
190 		idpf_queue_set(XDP, xdpsq->complq);
191 
192 		xdpsq->timer = timers[i - sqs];
193 		libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
194 		libeth_xdpsq_init_timer(xdpsq->timer, xdpsq, &xdpsq->xdp_lock,
195 					idpf_xdp_tx_timer);
196 
197 		xdpsq->pending = 0;
198 		xdpsq->xdp_tx = 0;
199 		xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
200 	}
201 
202 	return 0;
203 }
204 
205 void idpf_xdpsqs_put(const struct idpf_vport *vport)
206 {
207 	struct net_device *dev;
208 	u32 sqs;
209 
210 	if (!idpf_xdp_enabled(vport))
211 		return;
212 
213 	dev = vport->netdev;
214 	sqs = vport->dflt_qv_rsrc.xdp_txq_offset;
215 
216 	for (u32 i = sqs; i < vport->num_txq; i++) {
217 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
218 
219 		if (!idpf_queue_has_clear(XDP, xdpsq))
220 			continue;
221 
222 		libeth_xdpsq_deinit_timer(xdpsq->timer);
223 		libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
224 
225 		kfree(xdpsq->timer);
226 		xdpsq->refillq = NULL;
227 		idpf_queue_clear(NOIRQ, xdpsq);
228 	}
229 }
230 
231 static int idpf_xdp_parse_cqe(const struct idpf_splitq_4b_tx_compl_desc *desc,
232 			      bool gen)
233 {
234 	u32 val;
235 
236 #ifdef __LIBETH_WORD_ACCESS
237 	val = *(const u32 *)desc;
238 #else
239 	val = ((u32)le16_to_cpu(desc->q_head_compl_tag.q_head) << 16) |
240 	      le16_to_cpu(desc->qid_comptype_gen);
241 #endif
242 	if (!!(val & IDPF_TXD_COMPLQ_GEN_M) != gen)
243 		return -ENODATA;
244 
245 	if (unlikely((val & GENMASK(IDPF_TXD_COMPLQ_GEN_S - 1, 0)) !=
246 		     FIELD_PREP(IDPF_TXD_COMPLQ_COMPL_TYPE_M,
247 				IDPF_TXD_COMPLT_RS)))
248 		return -EINVAL;
249 
250 	return upper_16_bits(val);
251 }
252 
253 u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget)
254 {
255 	struct idpf_compl_queue *cq = xdpsq->complq;
256 	u32 tx_ntc = xdpsq->next_to_clean;
257 	u32 tx_cnt = xdpsq->desc_count;
258 	u32 ntc = cq->next_to_clean;
259 	u32 cnt = cq->desc_count;
260 	u32 done_frames;
261 	bool gen;
262 
263 	gen = idpf_queue_has(GEN_CHK, cq);
264 
265 	for (done_frames = 0; done_frames < budget; ) {
266 		int ret;
267 
268 		ret = idpf_xdp_parse_cqe(&cq->comp_4b[ntc], gen);
269 		if (ret >= 0) {
270 			done_frames = ret > tx_ntc ? ret - tx_ntc :
271 						     ret + tx_cnt - tx_ntc;
272 			goto next;
273 		}
274 
275 		switch (ret) {
276 		case -ENODATA:
277 			goto out;
278 		case -EINVAL:
279 			break;
280 		}
281 
282 next:
283 		if (unlikely(++ntc == cnt)) {
284 			ntc = 0;
285 			gen = !gen;
286 			idpf_queue_change(GEN_CHK, cq);
287 		}
288 	}
289 
290 out:
291 	cq->next_to_clean = ntc;
292 
293 	return done_frames;
294 }
295 
296 static u32 idpf_xdpsq_complete(void *_xdpsq, u32 budget)
297 {
298 	struct libeth_xdpsq_napi_stats ss = { };
299 	struct idpf_tx_queue *xdpsq = _xdpsq;
300 	u32 tx_ntc = xdpsq->next_to_clean;
301 	u32 tx_cnt = xdpsq->desc_count;
302 	struct xdp_frame_bulk bq;
303 	struct libeth_cq_pp cp = {
304 		.dev	= xdpsq->dev,
305 		.bq	= &bq,
306 		.xss	= &ss,
307 		.napi	= true,
308 	};
309 	u32 done_frames;
310 
311 	done_frames = idpf_xdpsq_poll(xdpsq, budget);
312 	if (unlikely(!done_frames))
313 		return 0;
314 
315 	xdp_frame_bulk_init(&bq);
316 
317 	for (u32 i = 0; likely(i < done_frames); i++) {
318 		libeth_xdp_complete_tx(&xdpsq->tx_buf[tx_ntc], &cp);
319 
320 		if (unlikely(++tx_ntc == tx_cnt))
321 			tx_ntc = 0;
322 	}
323 
324 	xdp_flush_frame_bulk(&bq);
325 
326 	xdpsq->next_to_clean = tx_ntc;
327 	xdpsq->pending -= done_frames;
328 	xdpsq->xdp_tx -= cp.xdp_tx;
329 
330 	return done_frames;
331 }
332 
333 static u32 idpf_xdp_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
334 {
335 	struct idpf_tx_queue *xdpsq = _xdpsq;
336 	u32 free;
337 
338 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
339 
340 	free = xdpsq->desc_count - xdpsq->pending;
341 	if (free < xdpsq->thresh)
342 		free += idpf_xdpsq_complete(xdpsq, xdpsq->thresh);
343 
344 	*sq = (struct libeth_xdpsq){
345 		.sqes		= xdpsq->tx_buf,
346 		.descs		= xdpsq->desc_ring,
347 		.count		= xdpsq->desc_count,
348 		.lock		= &xdpsq->xdp_lock,
349 		.ntu		= &xdpsq->next_to_use,
350 		.pending	= &xdpsq->pending,
351 		.xdp_tx		= &xdpsq->xdp_tx,
352 	};
353 
354 	return free;
355 }
356 
357 LIBETH_XDP_DEFINE_START();
358 LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete);
359 LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep,
360 			   idpf_xdp_tx_xmit);
361 LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep,
362 			     idpf_xdp_tx_xmit);
363 LIBETH_XDP_DEFINE_END();
364 
365 int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
366 		  u32 flags)
367 {
368 	const struct idpf_netdev_priv *np = netdev_priv(dev);
369 	const struct idpf_vport *vport = np->vport;
370 	u32 xdp_txq_offset;
371 
372 	if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
373 		return -ENETDOWN;
374 
375 	xdp_txq_offset = vport->dflt_qv_rsrc.xdp_txq_offset;
376 
377 	return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
378 				       &vport->txqs[xdp_txq_offset],
379 				       vport->num_xdp_txq,
380 				       idpf_xdp_xmit_flush_bulk,
381 				       idpf_xdp_tx_finalize);
382 }
383 
384 static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
385 			      enum xdp_rss_hash_type *rss_type)
386 {
387 	const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
388 	struct idpf_xdp_rx_desc desc __uninitialized;
389 	const struct idpf_rx_queue *rxq;
390 	struct libeth_rx_pt pt;
391 
392 	rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
393 
394 	idpf_xdp_get_qw0(&desc, xdp->desc);
395 
396 	pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)];
397 	if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt))
398 		return -ENODATA;
399 
400 	idpf_xdp_get_qw2(&desc, xdp->desc);
401 
402 	return libeth_xdpmo_rx_hash(hash, rss_type, idpf_xdp_rx_hash(&desc),
403 				    pt);
404 }
405 
406 static int idpf_xdpmo_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
407 {
408 	const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
409 	struct idpf_xdp_rx_desc desc __uninitialized;
410 	const struct idpf_rx_queue *rxq;
411 	u64 cached_time, ts_ns;
412 	u32 ts_high;
413 
414 	rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
415 
416 	if (!idpf_queue_has(PTP, rxq))
417 		return -ENODATA;
418 
419 	idpf_xdp_get_qw1(&desc, xdp->desc);
420 
421 	if (!(idpf_xdp_rx_ts_low(&desc) & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
422 		return -ENODATA;
423 
424 	cached_time = READ_ONCE(rxq->cached_phc_time);
425 
426 	idpf_xdp_get_qw3(&desc, xdp->desc);
427 
428 	ts_high = idpf_xdp_rx_ts_high(&desc);
429 	ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
430 
431 	*timestamp = ts_ns;
432 	return 0;
433 }
434 
435 static const struct xdp_metadata_ops idpf_xdpmo = {
436 	.xmo_rx_hash		= idpf_xdpmo_rx_hash,
437 	.xmo_rx_timestamp	= idpf_xdpmo_rx_timestamp,
438 };
439 
440 void idpf_xdp_set_features(const struct idpf_vport *vport)
441 {
442 	if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model))
443 		return;
444 
445 	libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
446 					idpf_get_max_tx_bufs(vport->adapter),
447 					libeth_xsktmo);
448 }
449 
450 static int idpf_xdp_setup_prog(struct idpf_vport *vport,
451 			       const struct netdev_bpf *xdp)
452 {
453 	const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
454 	const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
455 	struct bpf_prog *old, *prog = xdp->prog;
456 	struct idpf_vport_config *cfg;
457 	int ret;
458 
459 	cfg = vport->adapter->vport_config[vport->idx];
460 
461 	if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
462 	    !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
463 	    !!vport->xdp_prog == !!prog) {
464 		if (test_bit(IDPF_VPORT_UP, np->state))
465 			idpf_xdp_copy_prog_to_rqs(rsrc, prog);
466 
467 		old = xchg(&vport->xdp_prog, prog);
468 		if (old)
469 			bpf_prog_put(old);
470 
471 		cfg->user_config.xdp_prog = prog;
472 
473 		return 0;
474 	}
475 
476 	if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
477 		NL_SET_ERR_MSG_MOD(xdp->extack,
478 				   "No Tx queues available for XDP, please decrease the number of regular SQs");
479 		return -ENOSPC;
480 	}
481 
482 	old = cfg->user_config.xdp_prog;
483 	cfg->user_config.xdp_prog = prog;
484 
485 	ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
486 	if (ret) {
487 		NL_SET_ERR_MSG_MOD(xdp->extack,
488 				   "Could not reopen the vport after XDP setup");
489 
490 		cfg->user_config.xdp_prog = old;
491 		old = prog;
492 	}
493 
494 	if (old)
495 		bpf_prog_put(old);
496 
497 	libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog);
498 
499 	return ret;
500 }
501 
502 int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
503 {
504 	struct idpf_vport *vport;
505 	int ret;
506 
507 	idpf_vport_ctrl_lock(dev);
508 	vport = idpf_netdev_to_vport(dev);
509 
510 	if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
511 		goto notsupp;
512 
513 	switch (xdp->command) {
514 	case XDP_SETUP_PROG:
515 		ret = idpf_xdp_setup_prog(vport, xdp);
516 		break;
517 	case XDP_SETUP_XSK_POOL:
518 		ret = idpf_xsk_pool_setup(vport, xdp);
519 		break;
520 	default:
521 notsupp:
522 		ret = -EOPNOTSUPP;
523 		break;
524 	}
525 
526 	idpf_vport_ctrl_unlock(dev);
527 
528 	return ret;
529 }
530