xref: /linux/drivers/net/ethernet/intel/idpf/xdp.c (revision 96da9d67da780c9fc10edc8822dcc69d62898d8d)
1ac8a861fSMichal Kubiak // SPDX-License-Identifier: GPL-2.0-only
2ac8a861fSMichal Kubiak /* Copyright (C) 2025 Intel Corporation */
3ac8a861fSMichal Kubiak 
4ac8a861fSMichal Kubiak #include "idpf.h"
5705457e7SMichal Kubiak #include "idpf_virtchnl.h"
6ac8a861fSMichal Kubiak #include "xdp.h"
73d57b2c0SMichal Kubiak #include "xsk.h"
8ac8a861fSMichal Kubiak 
9ac8a861fSMichal Kubiak static int idpf_rxq_for_each(const struct idpf_vport *vport,
10ac8a861fSMichal Kubiak 			     int (*fn)(struct idpf_rx_queue *rxq, void *arg),
11ac8a861fSMichal Kubiak 			     void *arg)
12ac8a861fSMichal Kubiak {
13ac8a861fSMichal Kubiak 	bool splitq = idpf_is_queue_model_split(vport->rxq_model);
14ac8a861fSMichal Kubiak 
15ac8a861fSMichal Kubiak 	if (!vport->rxq_grps)
16ac8a861fSMichal Kubiak 		return -ENETDOWN;
17ac8a861fSMichal Kubiak 
18ac8a861fSMichal Kubiak 	for (u32 i = 0; i < vport->num_rxq_grp; i++) {
19ac8a861fSMichal Kubiak 		const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
20ac8a861fSMichal Kubiak 		u32 num_rxq;
21ac8a861fSMichal Kubiak 
22ac8a861fSMichal Kubiak 		if (splitq)
23ac8a861fSMichal Kubiak 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
24ac8a861fSMichal Kubiak 		else
25ac8a861fSMichal Kubiak 			num_rxq = rx_qgrp->singleq.num_rxq;
26ac8a861fSMichal Kubiak 
27ac8a861fSMichal Kubiak 		for (u32 j = 0; j < num_rxq; j++) {
28ac8a861fSMichal Kubiak 			struct idpf_rx_queue *q;
29ac8a861fSMichal Kubiak 			int err;
30ac8a861fSMichal Kubiak 
31ac8a861fSMichal Kubiak 			if (splitq)
32ac8a861fSMichal Kubiak 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
33ac8a861fSMichal Kubiak 			else
34ac8a861fSMichal Kubiak 				q = rx_qgrp->singleq.rxqs[j];
35ac8a861fSMichal Kubiak 
36ac8a861fSMichal Kubiak 			err = fn(q, arg);
37ac8a861fSMichal Kubiak 			if (err)
38ac8a861fSMichal Kubiak 				return err;
39ac8a861fSMichal Kubiak 		}
40ac8a861fSMichal Kubiak 	}
41ac8a861fSMichal Kubiak 
42ac8a861fSMichal Kubiak 	return 0;
43ac8a861fSMichal Kubiak }
44ac8a861fSMichal Kubiak 
45ac8a861fSMichal Kubiak static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
46ac8a861fSMichal Kubiak {
47ac8a861fSMichal Kubiak 	const struct idpf_vport *vport = rxq->q_vector->vport;
48ac8a861fSMichal Kubiak 	bool split = idpf_is_queue_model_split(vport->rxq_model);
49ac8a861fSMichal Kubiak 	int err;
50ac8a861fSMichal Kubiak 
51ac8a861fSMichal Kubiak 	err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
52ac8a861fSMichal Kubiak 				 rxq->q_vector->napi.napi_id,
53ac8a861fSMichal Kubiak 				 rxq->rx_buf_size);
54ac8a861fSMichal Kubiak 	if (err)
55ac8a861fSMichal Kubiak 		return err;
56ac8a861fSMichal Kubiak 
579705d655SAlexander Lobakin 	if (idpf_queue_has(XSK, rxq)) {
589705d655SAlexander Lobakin 		err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
599705d655SAlexander Lobakin 						 MEM_TYPE_XSK_BUFF_POOL,
609705d655SAlexander Lobakin 						 rxq->pool);
619705d655SAlexander Lobakin 		if (err)
629705d655SAlexander Lobakin 			goto unreg;
639705d655SAlexander Lobakin 	} else {
649705d655SAlexander Lobakin 		const struct page_pool *pp;
659705d655SAlexander Lobakin 
66ac8a861fSMichal Kubiak 		pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
67ac8a861fSMichal Kubiak 		xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
689705d655SAlexander Lobakin 	}
69ac8a861fSMichal Kubiak 
70ac8a861fSMichal Kubiak 	if (!split)
71ac8a861fSMichal Kubiak 		return 0;
72ac8a861fSMichal Kubiak 
73ac8a861fSMichal Kubiak 	rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
74ac8a861fSMichal Kubiak 	rxq->num_xdp_txq = vport->num_xdp_txq;
75ac8a861fSMichal Kubiak 
76ac8a861fSMichal Kubiak 	return 0;
779705d655SAlexander Lobakin 
789705d655SAlexander Lobakin unreg:
799705d655SAlexander Lobakin 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
809705d655SAlexander Lobakin 
819705d655SAlexander Lobakin 	return err;
82ac8a861fSMichal Kubiak }
83ac8a861fSMichal Kubiak 
843d57b2c0SMichal Kubiak int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
853d57b2c0SMichal Kubiak {
863d57b2c0SMichal Kubiak 	return __idpf_xdp_rxq_info_init(rxq, NULL);
873d57b2c0SMichal Kubiak }
883d57b2c0SMichal Kubiak 
89ac8a861fSMichal Kubiak int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
90ac8a861fSMichal Kubiak {
91ac8a861fSMichal Kubiak 	return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
92ac8a861fSMichal Kubiak }
93ac8a861fSMichal Kubiak 
94ac8a861fSMichal Kubiak static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
95ac8a861fSMichal Kubiak {
96ac8a861fSMichal Kubiak 	if (idpf_is_queue_model_split((size_t)arg)) {
97ac8a861fSMichal Kubiak 		rxq->xdpsqs = NULL;
98ac8a861fSMichal Kubiak 		rxq->num_xdp_txq = 0;
99ac8a861fSMichal Kubiak 	}
100ac8a861fSMichal Kubiak 
1019705d655SAlexander Lobakin 	if (!idpf_queue_has(XSK, rxq))
102ac8a861fSMichal Kubiak 		xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
1039705d655SAlexander Lobakin 
104ac8a861fSMichal Kubiak 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
105ac8a861fSMichal Kubiak 
106ac8a861fSMichal Kubiak 	return 0;
107ac8a861fSMichal Kubiak }
108ac8a861fSMichal Kubiak 
1093d57b2c0SMichal Kubiak void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
1103d57b2c0SMichal Kubiak {
1113d57b2c0SMichal Kubiak 	__idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
1123d57b2c0SMichal Kubiak }
1133d57b2c0SMichal Kubiak 
114ac8a861fSMichal Kubiak void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
115ac8a861fSMichal Kubiak {
116ac8a861fSMichal Kubiak 	idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
117ac8a861fSMichal Kubiak 			  (void *)(size_t)vport->rxq_model);
118ac8a861fSMichal Kubiak }
119ac8a861fSMichal Kubiak 
120705457e7SMichal Kubiak static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
121705457e7SMichal Kubiak {
122705457e7SMichal Kubiak 	struct bpf_prog *prog = arg;
123705457e7SMichal Kubiak 	struct bpf_prog *old;
124705457e7SMichal Kubiak 
125705457e7SMichal Kubiak 	if (prog)
126705457e7SMichal Kubiak 		bpf_prog_inc(prog);
127705457e7SMichal Kubiak 
128705457e7SMichal Kubiak 	old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
129705457e7SMichal Kubiak 	if (old)
130705457e7SMichal Kubiak 		bpf_prog_put(old);
131705457e7SMichal Kubiak 
132705457e7SMichal Kubiak 	return 0;
133705457e7SMichal Kubiak }
134705457e7SMichal Kubiak 
135705457e7SMichal Kubiak void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
136705457e7SMichal Kubiak 			       struct bpf_prog *xdp_prog)
137705457e7SMichal Kubiak {
138705457e7SMichal Kubiak 	idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
139705457e7SMichal Kubiak }
140705457e7SMichal Kubiak 
141cba102cdSAlexander Lobakin static void idpf_xdp_tx_timer(struct work_struct *work);
142cba102cdSAlexander Lobakin 
143ac8a861fSMichal Kubiak int idpf_xdpsqs_get(const struct idpf_vport *vport)
144ac8a861fSMichal Kubiak {
145ac8a861fSMichal Kubiak 	struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
146ac8a861fSMichal Kubiak 	struct net_device *dev;
147ac8a861fSMichal Kubiak 	u32 sqs;
148ac8a861fSMichal Kubiak 
149ac8a861fSMichal Kubiak 	if (!idpf_xdp_enabled(vport))
150ac8a861fSMichal Kubiak 		return 0;
151ac8a861fSMichal Kubiak 
152ac8a861fSMichal Kubiak 	timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL);
153ac8a861fSMichal Kubiak 	if (!timers)
154ac8a861fSMichal Kubiak 		return -ENOMEM;
155ac8a861fSMichal Kubiak 
156ac8a861fSMichal Kubiak 	for (u32 i = 0; i < vport->num_xdp_txq; i++) {
157ac8a861fSMichal Kubiak 		timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
158ac8a861fSMichal Kubiak 					 cpu_to_mem(i));
159ac8a861fSMichal Kubiak 		if (!timers[i]) {
160ac8a861fSMichal Kubiak 			for (int j = i - 1; j >= 0; j--)
161ac8a861fSMichal Kubiak 				kfree(timers[j]);
162ac8a861fSMichal Kubiak 
163ac8a861fSMichal Kubiak 			return -ENOMEM;
164ac8a861fSMichal Kubiak 		}
165ac8a861fSMichal Kubiak 	}
166ac8a861fSMichal Kubiak 
167ac8a861fSMichal Kubiak 	dev = vport->netdev;
168ac8a861fSMichal Kubiak 	sqs = vport->xdp_txq_offset;
169ac8a861fSMichal Kubiak 
170ac8a861fSMichal Kubiak 	for (u32 i = sqs; i < vport->num_txq; i++) {
171ac8a861fSMichal Kubiak 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
172ac8a861fSMichal Kubiak 
173ac8a861fSMichal Kubiak 		xdpsq->complq = xdpsq->txq_grp->complq;
174ac8a861fSMichal Kubiak 		kfree(xdpsq->refillq);
175ac8a861fSMichal Kubiak 		xdpsq->refillq = NULL;
176ac8a861fSMichal Kubiak 
177ac8a861fSMichal Kubiak 		idpf_queue_clear(FLOW_SCH_EN, xdpsq);
178ac8a861fSMichal Kubiak 		idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
179ac8a861fSMichal Kubiak 		idpf_queue_set(NOIRQ, xdpsq);
180ac8a861fSMichal Kubiak 		idpf_queue_set(XDP, xdpsq);
181ac8a861fSMichal Kubiak 		idpf_queue_set(XDP, xdpsq->complq);
182ac8a861fSMichal Kubiak 
183ac8a861fSMichal Kubiak 		xdpsq->timer = timers[i - sqs];
184ac8a861fSMichal Kubiak 		libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
185cba102cdSAlexander Lobakin 		libeth_xdpsq_init_timer(xdpsq->timer, xdpsq, &xdpsq->xdp_lock,
186cba102cdSAlexander Lobakin 					idpf_xdp_tx_timer);
187ac8a861fSMichal Kubiak 
188ac8a861fSMichal Kubiak 		xdpsq->pending = 0;
189ac8a861fSMichal Kubiak 		xdpsq->xdp_tx = 0;
190ac8a861fSMichal Kubiak 		xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
191ac8a861fSMichal Kubiak 	}
192ac8a861fSMichal Kubiak 
193ac8a861fSMichal Kubiak 	return 0;
194ac8a861fSMichal Kubiak }
195ac8a861fSMichal Kubiak 
196ac8a861fSMichal Kubiak void idpf_xdpsqs_put(const struct idpf_vport *vport)
197ac8a861fSMichal Kubiak {
198ac8a861fSMichal Kubiak 	struct net_device *dev;
199ac8a861fSMichal Kubiak 	u32 sqs;
200ac8a861fSMichal Kubiak 
201ac8a861fSMichal Kubiak 	if (!idpf_xdp_enabled(vport))
202ac8a861fSMichal Kubiak 		return;
203ac8a861fSMichal Kubiak 
204ac8a861fSMichal Kubiak 	dev = vport->netdev;
205ac8a861fSMichal Kubiak 	sqs = vport->xdp_txq_offset;
206ac8a861fSMichal Kubiak 
207ac8a861fSMichal Kubiak 	for (u32 i = sqs; i < vport->num_txq; i++) {
208ac8a861fSMichal Kubiak 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
209ac8a861fSMichal Kubiak 
210ac8a861fSMichal Kubiak 		if (!idpf_queue_has_clear(XDP, xdpsq))
211ac8a861fSMichal Kubiak 			continue;
212ac8a861fSMichal Kubiak 
213cba102cdSAlexander Lobakin 		libeth_xdpsq_deinit_timer(xdpsq->timer);
214ac8a861fSMichal Kubiak 		libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
215ac8a861fSMichal Kubiak 
216ac8a861fSMichal Kubiak 		kfree(xdpsq->timer);
217ac8a861fSMichal Kubiak 		xdpsq->refillq = NULL;
218ac8a861fSMichal Kubiak 		idpf_queue_clear(NOIRQ, xdpsq);
219ac8a861fSMichal Kubiak 	}
220ac8a861fSMichal Kubiak }
221705457e7SMichal Kubiak 
222cba102cdSAlexander Lobakin static int idpf_xdp_parse_cqe(const struct idpf_splitq_4b_tx_compl_desc *desc,
223cba102cdSAlexander Lobakin 			      bool gen)
224cba102cdSAlexander Lobakin {
225cba102cdSAlexander Lobakin 	u32 val;
226cba102cdSAlexander Lobakin 
227cba102cdSAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS
228cba102cdSAlexander Lobakin 	val = *(const u32 *)desc;
229cba102cdSAlexander Lobakin #else
230cba102cdSAlexander Lobakin 	val = ((u32)le16_to_cpu(desc->q_head_compl_tag.q_head) << 16) |
231cba102cdSAlexander Lobakin 	      le16_to_cpu(desc->qid_comptype_gen);
232cba102cdSAlexander Lobakin #endif
233cba102cdSAlexander Lobakin 	if (!!(val & IDPF_TXD_COMPLQ_GEN_M) != gen)
234cba102cdSAlexander Lobakin 		return -ENODATA;
235cba102cdSAlexander Lobakin 
236cba102cdSAlexander Lobakin 	if (unlikely((val & GENMASK(IDPF_TXD_COMPLQ_GEN_S - 1, 0)) !=
237cba102cdSAlexander Lobakin 		     FIELD_PREP(IDPF_TXD_COMPLQ_COMPL_TYPE_M,
238cba102cdSAlexander Lobakin 				IDPF_TXD_COMPLT_RS)))
239cba102cdSAlexander Lobakin 		return -EINVAL;
240cba102cdSAlexander Lobakin 
241cba102cdSAlexander Lobakin 	return upper_16_bits(val);
242cba102cdSAlexander Lobakin }
243cba102cdSAlexander Lobakin 
2448ff6d622SAlexander Lobakin u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget)
245cba102cdSAlexander Lobakin {
246cba102cdSAlexander Lobakin 	struct idpf_compl_queue *cq = xdpsq->complq;
247cba102cdSAlexander Lobakin 	u32 tx_ntc = xdpsq->next_to_clean;
248cba102cdSAlexander Lobakin 	u32 tx_cnt = xdpsq->desc_count;
249cba102cdSAlexander Lobakin 	u32 ntc = cq->next_to_clean;
250cba102cdSAlexander Lobakin 	u32 cnt = cq->desc_count;
251cba102cdSAlexander Lobakin 	u32 done_frames;
252cba102cdSAlexander Lobakin 	bool gen;
253cba102cdSAlexander Lobakin 
254cba102cdSAlexander Lobakin 	gen = idpf_queue_has(GEN_CHK, cq);
255cba102cdSAlexander Lobakin 
256cba102cdSAlexander Lobakin 	for (done_frames = 0; done_frames < budget; ) {
257cba102cdSAlexander Lobakin 		int ret;
258cba102cdSAlexander Lobakin 
259cba102cdSAlexander Lobakin 		ret = idpf_xdp_parse_cqe(&cq->comp_4b[ntc], gen);
260cba102cdSAlexander Lobakin 		if (ret >= 0) {
261cba102cdSAlexander Lobakin 			done_frames = ret > tx_ntc ? ret - tx_ntc :
262cba102cdSAlexander Lobakin 						     ret + tx_cnt - tx_ntc;
263cba102cdSAlexander Lobakin 			goto next;
264cba102cdSAlexander Lobakin 		}
265cba102cdSAlexander Lobakin 
266cba102cdSAlexander Lobakin 		switch (ret) {
267cba102cdSAlexander Lobakin 		case -ENODATA:
268cba102cdSAlexander Lobakin 			goto out;
269cba102cdSAlexander Lobakin 		case -EINVAL:
270cba102cdSAlexander Lobakin 			break;
271cba102cdSAlexander Lobakin 		}
272cba102cdSAlexander Lobakin 
273cba102cdSAlexander Lobakin next:
274cba102cdSAlexander Lobakin 		if (unlikely(++ntc == cnt)) {
275cba102cdSAlexander Lobakin 			ntc = 0;
276cba102cdSAlexander Lobakin 			gen = !gen;
277cba102cdSAlexander Lobakin 			idpf_queue_change(GEN_CHK, cq);
278cba102cdSAlexander Lobakin 		}
279cba102cdSAlexander Lobakin 	}
280cba102cdSAlexander Lobakin 
281cba102cdSAlexander Lobakin out:
282cba102cdSAlexander Lobakin 	cq->next_to_clean = ntc;
283cba102cdSAlexander Lobakin 
284cba102cdSAlexander Lobakin 	return done_frames;
285cba102cdSAlexander Lobakin }
286cba102cdSAlexander Lobakin 
287cba102cdSAlexander Lobakin static u32 idpf_xdpsq_complete(void *_xdpsq, u32 budget)
288cba102cdSAlexander Lobakin {
289cba102cdSAlexander Lobakin 	struct libeth_xdpsq_napi_stats ss = { };
290cba102cdSAlexander Lobakin 	struct idpf_tx_queue *xdpsq = _xdpsq;
291cba102cdSAlexander Lobakin 	u32 tx_ntc = xdpsq->next_to_clean;
292cba102cdSAlexander Lobakin 	u32 tx_cnt = xdpsq->desc_count;
293cba102cdSAlexander Lobakin 	struct xdp_frame_bulk bq;
294cba102cdSAlexander Lobakin 	struct libeth_cq_pp cp = {
295cba102cdSAlexander Lobakin 		.dev	= xdpsq->dev,
296cba102cdSAlexander Lobakin 		.bq	= &bq,
297cba102cdSAlexander Lobakin 		.xss	= &ss,
298cba102cdSAlexander Lobakin 		.napi	= true,
299cba102cdSAlexander Lobakin 	};
300cba102cdSAlexander Lobakin 	u32 done_frames;
301cba102cdSAlexander Lobakin 
302cba102cdSAlexander Lobakin 	done_frames = idpf_xdpsq_poll(xdpsq, budget);
303cba102cdSAlexander Lobakin 	if (unlikely(!done_frames))
304cba102cdSAlexander Lobakin 		return 0;
305cba102cdSAlexander Lobakin 
306cba102cdSAlexander Lobakin 	xdp_frame_bulk_init(&bq);
307cba102cdSAlexander Lobakin 
308cba102cdSAlexander Lobakin 	for (u32 i = 0; likely(i < done_frames); i++) {
309cba102cdSAlexander Lobakin 		libeth_xdp_complete_tx(&xdpsq->tx_buf[tx_ntc], &cp);
310cba102cdSAlexander Lobakin 
311cba102cdSAlexander Lobakin 		if (unlikely(++tx_ntc == tx_cnt))
312cba102cdSAlexander Lobakin 			tx_ntc = 0;
313cba102cdSAlexander Lobakin 	}
314cba102cdSAlexander Lobakin 
315cba102cdSAlexander Lobakin 	xdp_flush_frame_bulk(&bq);
316cba102cdSAlexander Lobakin 
317cba102cdSAlexander Lobakin 	xdpsq->next_to_clean = tx_ntc;
318cba102cdSAlexander Lobakin 	xdpsq->pending -= done_frames;
319cba102cdSAlexander Lobakin 	xdpsq->xdp_tx -= cp.xdp_tx;
320cba102cdSAlexander Lobakin 
321cba102cdSAlexander Lobakin 	return done_frames;
322cba102cdSAlexander Lobakin }
323cba102cdSAlexander Lobakin 
324cba102cdSAlexander Lobakin static u32 idpf_xdp_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
325cba102cdSAlexander Lobakin {
326cba102cdSAlexander Lobakin 	struct idpf_tx_queue *xdpsq = _xdpsq;
327cba102cdSAlexander Lobakin 	u32 free;
328cba102cdSAlexander Lobakin 
329cba102cdSAlexander Lobakin 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
330cba102cdSAlexander Lobakin 
331cba102cdSAlexander Lobakin 	free = xdpsq->desc_count - xdpsq->pending;
332cba102cdSAlexander Lobakin 	if (free < xdpsq->thresh)
333cba102cdSAlexander Lobakin 		free += idpf_xdpsq_complete(xdpsq, xdpsq->thresh);
334cba102cdSAlexander Lobakin 
335cba102cdSAlexander Lobakin 	*sq = (struct libeth_xdpsq){
336cba102cdSAlexander Lobakin 		.sqes		= xdpsq->tx_buf,
337cba102cdSAlexander Lobakin 		.descs		= xdpsq->desc_ring,
338cba102cdSAlexander Lobakin 		.count		= xdpsq->desc_count,
339cba102cdSAlexander Lobakin 		.lock		= &xdpsq->xdp_lock,
340cba102cdSAlexander Lobakin 		.ntu		= &xdpsq->next_to_use,
341cba102cdSAlexander Lobakin 		.pending	= &xdpsq->pending,
342cba102cdSAlexander Lobakin 		.xdp_tx		= &xdpsq->xdp_tx,
343cba102cdSAlexander Lobakin 	};
344cba102cdSAlexander Lobakin 
345cba102cdSAlexander Lobakin 	return free;
346cba102cdSAlexander Lobakin }
347cba102cdSAlexander Lobakin 
348cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_START();
349cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete);
350cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep,
351cba102cdSAlexander Lobakin 			   idpf_xdp_tx_xmit);
352aaa3ac64SAlexander Lobakin LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep,
353aaa3ac64SAlexander Lobakin 			     idpf_xdp_tx_xmit);
354cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_END();
355cba102cdSAlexander Lobakin 
356aaa3ac64SAlexander Lobakin int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
357aaa3ac64SAlexander Lobakin 		  u32 flags)
358aaa3ac64SAlexander Lobakin {
359aaa3ac64SAlexander Lobakin 	const struct idpf_netdev_priv *np = netdev_priv(dev);
360aaa3ac64SAlexander Lobakin 	const struct idpf_vport *vport = np->vport;
361aaa3ac64SAlexander Lobakin 
362aaa3ac64SAlexander Lobakin 	if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
363aaa3ac64SAlexander Lobakin 		return -ENETDOWN;
364aaa3ac64SAlexander Lobakin 
365aaa3ac64SAlexander Lobakin 	return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
366aaa3ac64SAlexander Lobakin 				       &vport->txqs[vport->xdp_txq_offset],
367aaa3ac64SAlexander Lobakin 				       vport->num_xdp_txq,
368aaa3ac64SAlexander Lobakin 				       idpf_xdp_xmit_flush_bulk,
369aaa3ac64SAlexander Lobakin 				       idpf_xdp_tx_finalize);
370aaa3ac64SAlexander Lobakin }
371aaa3ac64SAlexander Lobakin 
37288ca0c73SAlexander Lobakin static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
37388ca0c73SAlexander Lobakin 			      enum xdp_rss_hash_type *rss_type)
37488ca0c73SAlexander Lobakin {
37588ca0c73SAlexander Lobakin 	const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
37688ca0c73SAlexander Lobakin 	struct idpf_xdp_rx_desc desc __uninitialized;
37788ca0c73SAlexander Lobakin 	const struct idpf_rx_queue *rxq;
37888ca0c73SAlexander Lobakin 	struct libeth_rx_pt pt;
37988ca0c73SAlexander Lobakin 
38088ca0c73SAlexander Lobakin 	rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
38188ca0c73SAlexander Lobakin 
38288ca0c73SAlexander Lobakin 	idpf_xdp_get_qw0(&desc, xdp->desc);
38388ca0c73SAlexander Lobakin 
38488ca0c73SAlexander Lobakin 	pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)];
38588ca0c73SAlexander Lobakin 	if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt))
38688ca0c73SAlexander Lobakin 		return -ENODATA;
38788ca0c73SAlexander Lobakin 
38888ca0c73SAlexander Lobakin 	idpf_xdp_get_qw2(&desc, xdp->desc);
38988ca0c73SAlexander Lobakin 
39088ca0c73SAlexander Lobakin 	return libeth_xdpmo_rx_hash(hash, rss_type, idpf_xdp_rx_hash(&desc),
39188ca0c73SAlexander Lobakin 				    pt);
39288ca0c73SAlexander Lobakin }
39388ca0c73SAlexander Lobakin 
39488ca0c73SAlexander Lobakin static const struct xdp_metadata_ops idpf_xdpmo = {
39588ca0c73SAlexander Lobakin 	.xmo_rx_hash		= idpf_xdpmo_rx_hash,
39688ca0c73SAlexander Lobakin };
39788ca0c73SAlexander Lobakin 
398cba102cdSAlexander Lobakin void idpf_xdp_set_features(const struct idpf_vport *vport)
399cba102cdSAlexander Lobakin {
400cba102cdSAlexander Lobakin 	if (!idpf_is_queue_model_split(vport->rxq_model))
401cba102cdSAlexander Lobakin 		return;
402cba102cdSAlexander Lobakin 
403*96da9d67SAlexander Lobakin 	libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
404*96da9d67SAlexander Lobakin 					idpf_get_max_tx_bufs(vport->adapter),
405*96da9d67SAlexander Lobakin 					libeth_xsktmo);
406cba102cdSAlexander Lobakin }
407cba102cdSAlexander Lobakin 
408705457e7SMichal Kubiak static int idpf_xdp_setup_prog(struct idpf_vport *vport,
409705457e7SMichal Kubiak 			       const struct netdev_bpf *xdp)
410705457e7SMichal Kubiak {
411705457e7SMichal Kubiak 	const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
412705457e7SMichal Kubiak 	struct bpf_prog *old, *prog = xdp->prog;
413705457e7SMichal Kubiak 	struct idpf_vport_config *cfg;
414705457e7SMichal Kubiak 	int ret;
415705457e7SMichal Kubiak 
416705457e7SMichal Kubiak 	cfg = vport->adapter->vport_config[vport->idx];
417705457e7SMichal Kubiak 
418705457e7SMichal Kubiak 	if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
419705457e7SMichal Kubiak 	    !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
420705457e7SMichal Kubiak 	    !!vport->xdp_prog == !!prog) {
421705457e7SMichal Kubiak 		if (np->state == __IDPF_VPORT_UP)
422705457e7SMichal Kubiak 			idpf_xdp_copy_prog_to_rqs(vport, prog);
423705457e7SMichal Kubiak 
424705457e7SMichal Kubiak 		old = xchg(&vport->xdp_prog, prog);
425705457e7SMichal Kubiak 		if (old)
426705457e7SMichal Kubiak 			bpf_prog_put(old);
427705457e7SMichal Kubiak 
428705457e7SMichal Kubiak 		cfg->user_config.xdp_prog = prog;
429705457e7SMichal Kubiak 
430705457e7SMichal Kubiak 		return 0;
431705457e7SMichal Kubiak 	}
432705457e7SMichal Kubiak 
433705457e7SMichal Kubiak 	if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
434705457e7SMichal Kubiak 		NL_SET_ERR_MSG_MOD(xdp->extack,
435705457e7SMichal Kubiak 				   "No Tx queues available for XDP, please decrease the number of regular SQs");
436705457e7SMichal Kubiak 		return -ENOSPC;
437705457e7SMichal Kubiak 	}
438705457e7SMichal Kubiak 
439705457e7SMichal Kubiak 	old = cfg->user_config.xdp_prog;
440705457e7SMichal Kubiak 	cfg->user_config.xdp_prog = prog;
441705457e7SMichal Kubiak 
442705457e7SMichal Kubiak 	ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
443705457e7SMichal Kubiak 	if (ret) {
444705457e7SMichal Kubiak 		NL_SET_ERR_MSG_MOD(xdp->extack,
445705457e7SMichal Kubiak 				   "Could not reopen the vport after XDP setup");
446705457e7SMichal Kubiak 
447705457e7SMichal Kubiak 		cfg->user_config.xdp_prog = old;
448705457e7SMichal Kubiak 		old = prog;
449705457e7SMichal Kubiak 	}
450705457e7SMichal Kubiak 
451705457e7SMichal Kubiak 	if (old)
452705457e7SMichal Kubiak 		bpf_prog_put(old);
453705457e7SMichal Kubiak 
454aaa3ac64SAlexander Lobakin 	libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog);
455aaa3ac64SAlexander Lobakin 
456705457e7SMichal Kubiak 	return ret;
457705457e7SMichal Kubiak }
458705457e7SMichal Kubiak 
459705457e7SMichal Kubiak int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
460705457e7SMichal Kubiak {
461705457e7SMichal Kubiak 	struct idpf_vport *vport;
462705457e7SMichal Kubiak 	int ret;
463705457e7SMichal Kubiak 
464705457e7SMichal Kubiak 	idpf_vport_ctrl_lock(dev);
465705457e7SMichal Kubiak 	vport = idpf_netdev_to_vport(dev);
466705457e7SMichal Kubiak 
467705457e7SMichal Kubiak 	if (!idpf_is_queue_model_split(vport->txq_model))
468705457e7SMichal Kubiak 		goto notsupp;
469705457e7SMichal Kubiak 
470705457e7SMichal Kubiak 	switch (xdp->command) {
471705457e7SMichal Kubiak 	case XDP_SETUP_PROG:
472705457e7SMichal Kubiak 		ret = idpf_xdp_setup_prog(vport, xdp);
473705457e7SMichal Kubiak 		break;
4743d57b2c0SMichal Kubiak 	case XDP_SETUP_XSK_POOL:
4753d57b2c0SMichal Kubiak 		ret = idpf_xsk_pool_setup(vport, xdp);
4763d57b2c0SMichal Kubiak 		break;
477705457e7SMichal Kubiak 	default:
478705457e7SMichal Kubiak notsupp:
479705457e7SMichal Kubiak 		ret = -EOPNOTSUPP;
480705457e7SMichal Kubiak 		break;
481705457e7SMichal Kubiak 	}
482705457e7SMichal Kubiak 
483705457e7SMichal Kubiak 	idpf_vport_ctrl_unlock(dev);
484705457e7SMichal Kubiak 
485705457e7SMichal Kubiak 	return ret;
486705457e7SMichal Kubiak }
487